Fix striped constant pointer initializer
This commit is contained in:
parent
0b30258f80
commit
29611ec3db
|
@ -843,6 +843,16 @@ static char builddop_src(char ip, char shift, bool reverse)
|
|||
if (shift != 0)
|
||||
{
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, REG_S0);
|
||||
if (reverse)
|
||||
{
|
||||
if (shift > 4)
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ASL);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (shift <= 4)
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_LSR);
|
||||
}
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_S1);
|
||||
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_SP);
|
||||
|
@ -852,40 +862,46 @@ static char builddop_src(char ip, char shift, bool reverse)
|
|||
{
|
||||
if (shift > 4)
|
||||
{
|
||||
for(char i=shift; i<8; i++)
|
||||
for(char i=shift; i<7; i++)
|
||||
{
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_ASL, REG_S1);
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ROL);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_ASL, REG_S1);
|
||||
}
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ROL);
|
||||
}
|
||||
else
|
||||
{
|
||||
for(char i=0; i<shift; i++)
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_LSR);
|
||||
for(char i=1; i<shift; i++)
|
||||
{
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_LSR);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_ROR, REG_S1);
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_LSR);
|
||||
}
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, REG_S1);
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ROR);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (shift > 4)
|
||||
{
|
||||
for(char i=shift; i<8; i++)
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ASL);
|
||||
for(char i=shift; i<7; i++)
|
||||
{
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ASL);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_ROL, REG_S1);
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ASL);
|
||||
}
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, REG_S1);
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ROL);
|
||||
}
|
||||
else
|
||||
{
|
||||
for(char i=0; i<shift; i++)
|
||||
for(char i=1; i<shift; i++)
|
||||
{
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LSR, REG_S1);
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ROR);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LSR, REG_S1);
|
||||
}
|
||||
ip += asm_ac(BLIT_CODE + ip, ASM_ROR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -899,27 +915,50 @@ static char builddop_src(char ip, char shift, bool reverse)
|
|||
|
||||
static AsmIns blitops_op[4] = {ASM_BRK, ASM_AND, ASM_ORA, ASM_EOR};
|
||||
|
||||
static char builddop_op(char ip, BlitOp op)
|
||||
static char builddop_op(char ip, BlitOp op, char mask)
|
||||
{
|
||||
char reg = REG_D0;
|
||||
if (op & BLIT_PATTERN)
|
||||
reg = REG_PAT;
|
||||
|
||||
BlitOp rop = op & BLIT_OP;
|
||||
|
||||
if (op & BLIT_IMM)
|
||||
{
|
||||
if (op & BLIT_INVERT)
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDA, 0xff);
|
||||
else
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDA, 0x00);
|
||||
if (!rop && mask)
|
||||
{
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_DP);
|
||||
if (op & BLIT_INVERT)
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_ORA, ~mask);
|
||||
else
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_AND, mask);
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_STA, REG_DP);
|
||||
|
||||
return ip;
|
||||
}
|
||||
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDA, (op & BLIT_INVERT) ? 0xff : 0x00);
|
||||
}
|
||||
else if (!(op & BLIT_SRC))
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, reg);
|
||||
else if (op & BLIT_INVERT)
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_EOR, 0xff);
|
||||
|
||||
op &= BLIT_OP;
|
||||
if (op)
|
||||
ip += asm_zp(BLIT_CODE + ip, blitops_op[op], reg);
|
||||
if (rop)
|
||||
ip += asm_zp(BLIT_CODE + ip, blitops_op[rop], reg);
|
||||
|
||||
if (mask)
|
||||
{
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_S1);
|
||||
if (op & BLIT_DST)
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_EOR, REG_D0);
|
||||
else
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_EOR, REG_DP);
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_AND, mask);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_EOR, REG_S1);
|
||||
}
|
||||
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_STA, REG_DP);
|
||||
|
||||
return ip;
|
||||
}
|
||||
|
@ -933,7 +972,7 @@ static void builddop(char shift, char w, char lmask, char rmask, BlitOp op, bool
|
|||
bool usesrc = op & BLIT_SRC;
|
||||
bool usedst = op & BLIT_DST;
|
||||
|
||||
char asm_clc = ASM_CLC, asm_adc = ASM_ADC, asm_bcc = ASM_BCC, asm_inc = ASM_INC, ystart = 0;
|
||||
char asm_clc = ASM_CLC, asm_adc = ASM_ADC, asm_bcc = ASM_BCC, asm_inc = ASM_INC, ystart = 0, yinc = 8;
|
||||
if (reverse)
|
||||
{
|
||||
asm_clc = ASM_SEC;
|
||||
|
@ -941,49 +980,74 @@ static void builddop(char shift, char w, char lmask, char rmask, BlitOp op, bool
|
|||
asm_bcc = ASM_BCS;
|
||||
asm_inc = ASM_DEC;
|
||||
ystart = 0xf8;
|
||||
}
|
||||
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDY, ystart);
|
||||
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_DP);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_D0);
|
||||
|
||||
if (usesrc)
|
||||
{
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_SP);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_S0);
|
||||
|
||||
ip += asm_np(BLIT_CODE + ip, asm_clc);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, REG_SP);
|
||||
ip += asm_im(BLIT_CODE + ip, asm_adc, 8);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_SP);
|
||||
ip += asm_rl(BLIT_CODE + ip, asm_bcc, 2);
|
||||
ip += asm_zp(BLIT_CODE + ip, asm_inc, REG_SP + 1);
|
||||
|
||||
ip = builddop_src(ip, shift, reverse);
|
||||
yinc = 0xf8;
|
||||
}
|
||||
|
||||
if (w == 0)
|
||||
lmask &= rmask;
|
||||
|
||||
ip = builddop_op(ip, op);
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_AND, lmask);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_S1);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, REG_D0);
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_AND, ~lmask);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_ORA, REG_S1);
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_STA, REG_DP);
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDY, ystart);
|
||||
|
||||
if (lmask == 0xff)
|
||||
{
|
||||
if (usesrc && shift)
|
||||
{
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_SP);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_S0);
|
||||
|
||||
ip += asm_np(BLIT_CODE + ip, asm_clc);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, REG_SP);
|
||||
ip += asm_im(BLIT_CODE + ip, asm_adc, 8);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_SP);
|
||||
ip += asm_rl(BLIT_CODE + ip, asm_bcc, 2);
|
||||
ip += asm_zp(BLIT_CODE + ip, asm_inc, REG_SP + 1);
|
||||
}
|
||||
|
||||
w++;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (usedst)
|
||||
{
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_DP);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_D0);
|
||||
}
|
||||
|
||||
if (usesrc)
|
||||
{
|
||||
if (shift)
|
||||
{
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_SP);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_S0);
|
||||
|
||||
ip += asm_np(BLIT_CODE + ip, asm_clc);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, REG_SP);
|
||||
ip += asm_im(BLIT_CODE + ip, asm_adc, 8);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_SP);
|
||||
ip += asm_rl(BLIT_CODE + ip, asm_bcc, 2);
|
||||
ip += asm_zp(BLIT_CODE + ip, asm_inc, REG_SP + 1);
|
||||
}
|
||||
|
||||
ip = builddop_src(ip, shift, reverse);
|
||||
}
|
||||
|
||||
ip = builddop_op(ip, op, ~lmask);
|
||||
|
||||
if (w > 0)
|
||||
{
|
||||
ystart += yinc;
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDY, ystart);
|
||||
}
|
||||
}
|
||||
|
||||
if (w > 0)
|
||||
{
|
||||
ip += asm_np(BLIT_CODE + ip, asm_clc);
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_TYA);
|
||||
ip += asm_im(BLIT_CODE + ip, asm_adc, 8);
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_TAY);
|
||||
|
||||
if (w > 1)
|
||||
{
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDX, w - 1);
|
||||
if (w > 2)
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDX, w - 1);
|
||||
if (w <= 31 && !shift)
|
||||
ip += asm_np(BLIT_CODE + ip, asm_clc);
|
||||
char lp = ip;
|
||||
|
||||
if (usedst)
|
||||
|
@ -997,46 +1061,54 @@ static void builddop(char shift, char w, char lmask, char rmask, BlitOp op, bool
|
|||
ip = builddop_src(ip, shift, reverse);
|
||||
}
|
||||
|
||||
ip = builddop_op(ip, op);
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_STA, REG_DP);
|
||||
ip = builddop_op(ip, op, 0);
|
||||
|
||||
ip += asm_np(BLIT_CODE + ip, asm_clc);
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_TYA);
|
||||
ip += asm_im(BLIT_CODE + ip, asm_adc, 8);
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_TAY);
|
||||
if (w > 31)
|
||||
if (w > 2)
|
||||
{
|
||||
if (usesrc)
|
||||
if (w > 31 || shift)
|
||||
ip += asm_np(BLIT_CODE + ip, asm_clc);
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_TYA);
|
||||
ip += asm_im(BLIT_CODE + ip, asm_adc, 8);
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_TAY);
|
||||
if (w > 31)
|
||||
{
|
||||
ip += asm_rl(BLIT_CODE + ip, asm_bcc, 4);
|
||||
ip += asm_zp(BLIT_CODE + ip, asm_inc, REG_SP + 1);
|
||||
if (usesrc)
|
||||
{
|
||||
ip += asm_rl(BLIT_CODE + ip, asm_bcc, 4);
|
||||
ip += asm_zp(BLIT_CODE + ip, asm_inc, REG_SP + 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
ip += asm_rl(BLIT_CODE + ip, asm_bcc, 2);
|
||||
}
|
||||
ip += asm_zp(BLIT_CODE + ip, asm_inc, REG_DP + 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
ip += asm_rl(BLIT_CODE + ip, asm_bcc, 2);
|
||||
}
|
||||
ip += asm_zp(BLIT_CODE + ip, asm_inc, REG_DP + 1);
|
||||
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_DEX);
|
||||
ip += asm_rl(BLIT_CODE + ip, ASM_BNE, lp - ip - 2);
|
||||
}
|
||||
else
|
||||
{
|
||||
ystart += yinc;
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_LDY, ystart);
|
||||
}
|
||||
}
|
||||
|
||||
if (rmask != 0)
|
||||
{
|
||||
if (usedst)
|
||||
{
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_DP);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_D0);
|
||||
}
|
||||
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_DEX);
|
||||
ip += asm_rl(BLIT_CODE + ip, ASM_BNE, lp - ip - 2);
|
||||
if (usesrc)
|
||||
{
|
||||
ip = builddop_src(ip, shift, reverse);
|
||||
}
|
||||
|
||||
ip = builddop_op(ip, op, ~rmask);
|
||||
}
|
||||
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_LDA, REG_DP);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_D0);
|
||||
|
||||
if (usesrc)
|
||||
{
|
||||
ip = builddop_src(ip, shift, reverse);
|
||||
}
|
||||
|
||||
ip = builddop_op(ip, op);
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_AND, rmask);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_STA, REG_S1);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_LDA, REG_D0);
|
||||
ip += asm_im(BLIT_CODE + ip, ASM_AND, ~rmask);
|
||||
ip += asm_zp(BLIT_CODE + ip, ASM_ORA, REG_S1);
|
||||
ip += asm_iy(BLIT_CODE + ip, ASM_STA, REG_DP);
|
||||
}
|
||||
|
||||
ip += asm_np(BLIT_CODE + ip, ASM_RTS);
|
||||
|
@ -1053,7 +1125,7 @@ void bmu_bitblit(const Bitmap * dbm, int dx, int dy, const Bitmap * sbm, int sx,
|
|||
|
||||
char cw = dxh1 - dxh0;
|
||||
|
||||
bool reverse = dbm == sbm && (dy > sy || dy == sy && dx > sx);
|
||||
bool reverse = dbm == sbm && (dy > sy || dy == sy && dx > sx) && (op & BLIT_SRC);
|
||||
|
||||
if (reverse)
|
||||
{
|
||||
|
@ -1064,23 +1136,24 @@ void bmu_bitblit(const Bitmap * dbm, int dx, int dy, const Bitmap * sbm, int sx,
|
|||
char * dp = dbm->data + dbm->cwidth * (dy & ~7) + (dx & ~7) + (dy & 7);
|
||||
char * sp = sbm->data + sbm->cwidth * (sy & ~7) + (sx & ~7) + (sy & 7);
|
||||
|
||||
char shift = (dx & 7) - (sx & 7);
|
||||
if (reverse)
|
||||
{
|
||||
sp += 8 * cw + 8 - 0xf8;
|
||||
sp += 8 * cw - 0xf8;
|
||||
dp += 8 * cw - 0xf8;
|
||||
byte t = lm; lm = rm; rm = t;
|
||||
if (shift & 0x80)
|
||||
sp += 8;
|
||||
}
|
||||
else if (shift)
|
||||
{
|
||||
if (!(shift & 0x80))
|
||||
sp -= 8;
|
||||
}
|
||||
|
||||
char shift;
|
||||
if ((dx & 7) > (sx & 7))
|
||||
{
|
||||
shift = (dx & 7) - (sx & 7);
|
||||
sp -= 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
shift = 8 + (dx & 7) - (sx & 7);
|
||||
}
|
||||
shift &= 7;
|
||||
|
||||
|
||||
|
||||
builddop(shift, cw, lm, rm, op, reverse);
|
||||
|
||||
|
@ -1089,42 +1162,81 @@ void bmu_bitblit(const Bitmap * dbm, int dx, int dy, const Bitmap * sbm, int sx,
|
|||
int sstride = 8 * sbm->cwidth - 8;
|
||||
int dstride = 8 * dbm->cwidth - 8;
|
||||
|
||||
if (reverse)
|
||||
if (pattern)
|
||||
{
|
||||
sstride = -sstride;
|
||||
dstride = -dstride;
|
||||
if (reverse)
|
||||
{
|
||||
sstride = -sstride;
|
||||
dstride = -dstride;
|
||||
|
||||
for(char y=h; y>0; y--)
|
||||
{
|
||||
if (((int)sp & 7) == 0)
|
||||
sp += sstride;
|
||||
sp--;
|
||||
for(char y=h; y>0; y--)
|
||||
{
|
||||
if (((int)sp & 7) == 0)
|
||||
sp += sstride;
|
||||
sp--;
|
||||
|
||||
if (((int)dp & 7) == 0)
|
||||
dp += dstride;
|
||||
dp--;
|
||||
if (((int)dp & 7) == 0)
|
||||
dp += dstride;
|
||||
dp--;
|
||||
|
||||
char pi = (int)dp & 7;
|
||||
char pi = (int)dp & 7;
|
||||
|
||||
callddop(sp, dp, pat[pi]);
|
||||
callddop(sp, dp, pat[pi]);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for(char y=h; y>0; y--)
|
||||
{
|
||||
char pi = (int)dp & 7;
|
||||
|
||||
callddop(sp, dp, pat[pi]);
|
||||
|
||||
sp++;
|
||||
if (((int)sp & 7) == 0)
|
||||
sp += sstride;
|
||||
|
||||
dp++;
|
||||
if (((int)dp & 7) == 0)
|
||||
dp += dstride;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for(char y=h; y>0; y--)
|
||||
{
|
||||
char pi = (int)dp & 7;
|
||||
if (reverse)
|
||||
{
|
||||
sstride = -sstride;
|
||||
dstride = -dstride;
|
||||
|
||||
callddop(sp, dp, pat[pi]);
|
||||
for(char y=h; y>0; y--)
|
||||
{
|
||||
if (((int)sp & 7) == 0)
|
||||
sp += sstride;
|
||||
sp--;
|
||||
|
||||
sp++;
|
||||
if (((int)sp & 7) == 0)
|
||||
sp += sstride;
|
||||
if (((int)dp & 7) == 0)
|
||||
dp += dstride;
|
||||
dp--;
|
||||
|
||||
dp++;
|
||||
if (((int)dp & 7) == 0)
|
||||
dp += dstride;
|
||||
callddop(sp, dp, 0);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for(char y=h; y>0; y--)
|
||||
{
|
||||
callddop(sp, dp, 0);
|
||||
|
||||
sp++;
|
||||
if (((int)sp & 7) == 0)
|
||||
sp += sstride;
|
||||
|
||||
dp++;
|
||||
if (((int)dp & 7) == 0)
|
||||
dp += dstride;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1204,11 +1316,19 @@ inline void bm_rect_copy(const Bitmap * dbm, const ClipRect * clip, int dx, int
|
|||
|
||||
static char tworks[8];
|
||||
|
||||
int bmu_text(const Bitmap * bm, const char * str, char len)
|
||||
int bmu_text(const Bitmap * bm, char lx, const char * str, char len)
|
||||
{
|
||||
char lx = 0;
|
||||
int tw = 0;
|
||||
|
||||
tworks[0] = 0;
|
||||
tworks[1] = 0;
|
||||
tworks[2] = 0;
|
||||
tworks[3] = 0;
|
||||
tworks[4] = 0;
|
||||
tworks[5] = 0;
|
||||
tworks[6] = 0;
|
||||
tworks[7] = 0;
|
||||
|
||||
char * cp = bm->data;
|
||||
|
||||
for(char fx=0; fx<len; fx++)
|
||||
|
@ -1347,9 +1467,9 @@ static Bitmap tbitmap = {
|
|||
|
||||
int bmu_put_chars(const Bitmap * bm, int x, int y, const char * str, char len, BlitOp op)
|
||||
{
|
||||
int tw = bmu_text(&tbitmap, str, len);
|
||||
int tw = bmu_text(&tbitmap, x & 7, str, len);
|
||||
|
||||
bmu_bitblit(bm, x, y, &tbitmap, 0, 0, tw, 8, nullptr, op);
|
||||
bmu_bitblit(bm, x, y, &tbitmap, x & 7, 0, tw, 8, nullptr, op);
|
||||
|
||||
return tw;
|
||||
}
|
||||
|
@ -1401,9 +1521,9 @@ int bm_put_chars(const Bitmap * bm, const ClipRect * clip, int x, int y, const c
|
|||
fx++;
|
||||
}
|
||||
|
||||
int cw = bmu_text(&tbitmap, str, fx);
|
||||
int cw = bmu_text(&tbitmap, x & 7, str, fx);
|
||||
|
||||
bm_bitblit(bm, clip, x, y, &tbitmap, 0, 0, cw, 8, nullptr, op);
|
||||
bm_bitblit(bm, clip, x, y, &tbitmap, x & 7, 0, cw, 8, nullptr, op);
|
||||
|
||||
while (fx < len)
|
||||
{
|
||||
|
|
|
@ -148,7 +148,7 @@ inline void bm_rect_copy(const Bitmap * dbm, const ClipRect * clip, int dx, int
|
|||
|
||||
|
||||
// Unclipped text rendering
|
||||
int bmu_text(const Bitmap * bm, const char * str, char len);
|
||||
int bmu_text(const Bitmap * bm, char lx, const char * str, char len);
|
||||
|
||||
// Calculate size of a char range
|
||||
int bmu_text_size(const char * str, char len);
|
||||
|
|
|
@ -623,12 +623,15 @@ Declaration* Declaration::Clone(void)
|
|||
return ndec;
|
||||
}
|
||||
|
||||
Declaration* Declaration::ToStriped(void)
|
||||
Declaration* Declaration::ToStriped(Errors * errors)
|
||||
{
|
||||
Declaration* ndec = this->Clone();
|
||||
|
||||
if (mType == DT_TYPE_ARRAY)
|
||||
{
|
||||
if (mSize == 0)
|
||||
errors->Error(ndec->mLocation, ERRR_STRIPE_REQUIRES_FIXED_SIZE_ARRAY, "__striped requires fixed size array");
|
||||
|
||||
ndec->mFlags |= DTF_STRIPED;
|
||||
if (mBase->mType == DT_TYPE_ARRAY)
|
||||
{
|
||||
|
@ -643,10 +646,12 @@ Declaration* Declaration::ToStriped(void)
|
|||
ndec->mBase = mBase->ToStriped(mSize / mBase->mSize);
|
||||
}
|
||||
}
|
||||
else
|
||||
else if (ndec->mBase)
|
||||
{
|
||||
ndec->mBase = mBase->ToStriped();
|
||||
ndec->mBase = mBase->ToStriped(errors);
|
||||
}
|
||||
else
|
||||
errors->Error(ndec->mLocation, ERRR_STRIPE_REQUIRES_FIXED_SIZE_ARRAY, "__striped requires fixed size array");
|
||||
|
||||
return ndec;
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ public:
|
|||
|
||||
Declaration* ToConstType(void);
|
||||
Declaration* ToStriped(int stripe);
|
||||
Declaration* ToStriped(void);
|
||||
Declaration* ToStriped(Errors* errors);
|
||||
Declaration* Clone(void);
|
||||
|
||||
int Stride(void) const;
|
||||
|
|
|
@ -66,7 +66,7 @@ enum ErrorID
|
|||
ERRR_INVALID_STORAGE_TYPE,
|
||||
ERRR_SEMICOLON_EXPECTED,
|
||||
ERRR_USE_OF_UNINITIALIZED_VARIABLE,
|
||||
|
||||
ERRR_STRIPE_REQUIRES_FIXED_SIZE_ARRAY,
|
||||
|
||||
EERR_INVALID_PREPROCESSOR,
|
||||
};
|
||||
|
|
|
@ -5593,7 +5593,7 @@ void InterCodeBasicBlock::UpdateLocalIntegerRangeSets(const GrowingVariableArray
|
|||
{
|
||||
ins->mSrc[i].mRange = mLocalValueRange[ins->mSrc[i].mTemp];
|
||||
#if 1
|
||||
if (ins->mSrc[i].mRange.mMinState == IntegerValueRange::S_BOUND && ins->mSrc[i].mRange.mMaxState == IntegerValueRange::S_BOUND && ins->mSrc[i].mRange.mMinValue == ins->mSrc[i].mRange.mMaxValue)
|
||||
if (ins->mCode != IC_ASSEMBLER && ins->mSrc[i].mRange.mMinState == IntegerValueRange::S_BOUND && ins->mSrc[i].mRange.mMaxState == IntegerValueRange::S_BOUND && ins->mSrc[i].mRange.mMinValue == ins->mSrc[i].mRange.mMaxValue)
|
||||
{
|
||||
ins->mSrc[i].mTemp = -1;
|
||||
ins->mSrc[i].mIntConst = ins->mSrc[i].mRange.mMinValue;
|
||||
|
|
|
@ -3474,7 +3474,6 @@ void InterCodeGenerator::BuildInitializer(InterCodeModule * mod, uint8* dp, int
|
|||
|
||||
ref.mRefObject = dec->mLinkerObject;
|
||||
ref.mRefOffset = 0;
|
||||
variable->mLinkerObject->AddReference(ref);
|
||||
break;
|
||||
}
|
||||
case DT_CONST_FUNCTION:
|
||||
|
@ -3483,13 +3482,8 @@ void InterCodeGenerator::BuildInitializer(InterCodeModule * mod, uint8* dp, int
|
|||
InterCodeProcedure* cproc = this->TranslateProcedure(mod, dec->mValue, dec);
|
||||
}
|
||||
|
||||
LinkerReference ref;
|
||||
ref.mObject = variable->mLinkerObject;
|
||||
ref.mOffset = offset;
|
||||
ref.mFlags = LREF_LOWBYTE | LREF_HIGHBYTE;
|
||||
ref.mRefObject = dec->mLinkerObject;
|
||||
ref.mRefOffset = 0;
|
||||
variable->mLinkerObject->AddReference(ref);
|
||||
break;
|
||||
|
||||
case DT_VARIABLE:
|
||||
|
@ -3499,13 +3493,8 @@ void InterCodeGenerator::BuildInitializer(InterCodeModule * mod, uint8* dp, int
|
|||
InitGlobalVariable(mod, dec);
|
||||
}
|
||||
|
||||
LinkerReference ref;
|
||||
ref.mObject = variable->mLinkerObject;
|
||||
ref.mOffset = offset;
|
||||
ref.mFlags = LREF_LOWBYTE | LREF_HIGHBYTE;
|
||||
ref.mRefObject = dec->mLinkerObject;
|
||||
ref.mRefOffset = 0;
|
||||
variable->mLinkerObject->AddReference(ref);
|
||||
} break;
|
||||
|
||||
case DT_VARIABLE_REF:
|
||||
|
@ -3515,16 +3504,22 @@ void InterCodeGenerator::BuildInitializer(InterCodeModule * mod, uint8* dp, int
|
|||
InitGlobalVariable(mod, dec->mBase);
|
||||
}
|
||||
|
||||
LinkerReference ref;
|
||||
ref.mObject = variable->mLinkerObject;
|
||||
ref.mOffset = offset;
|
||||
ref.mFlags = LREF_LOWBYTE | LREF_HIGHBYTE;
|
||||
ref.mRefObject = dec->mBase->mLinkerObject;
|
||||
ref.mRefOffset = dec->mOffset;
|
||||
variable->mLinkerObject->AddReference(ref);
|
||||
} break;
|
||||
|
||||
}
|
||||
|
||||
if (data->mBase->mStripe == 1)
|
||||
variable->mLinkerObject->AddReference(ref);
|
||||
else
|
||||
{
|
||||
ref.mFlags = LREF_LOWBYTE;
|
||||
variable->mLinkerObject->AddReference(ref);
|
||||
ref.mFlags = LREF_HIGHBYTE;
|
||||
ref.mOffset += data->mBase->mStripe;
|
||||
variable->mLinkerObject->AddReference(ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -248,6 +248,10 @@ NativeCodeInstruction::NativeCodeInstruction(AsmInsType type, AsmInsMode mode, i
|
|||
{
|
||||
assert(address >= 0 && address < 256);
|
||||
}
|
||||
if (mode == ASMIM_ZERO_PAGE)
|
||||
{
|
||||
assert(address >= 2 && address < 256);
|
||||
}
|
||||
}
|
||||
|
||||
NativeCodeInstruction::NativeCodeInstruction(AsmInsType type, const NativeCodeInstruction& addr)
|
||||
|
@ -6744,14 +6748,13 @@ NativeCodeBasicBlock * NativeCodeBasicBlock::CopyValue(InterCodeProcedure* proc,
|
|||
else if (ins->mSrc[0].mMemory == IM_LOCAL || ins->mSrc[0].mMemory == IM_PARAM)
|
||||
{
|
||||
int index = ins->mSrc[0].mIntConst;
|
||||
int areg = mNoFrame ? BC_REG_STACK : BC_REG_LOCALS;
|
||||
sreg = mNoFrame ? BC_REG_STACK : BC_REG_LOCALS;
|
||||
if (ins->mSrc[0].mMemory == IM_LOCAL)
|
||||
index += proc->mLocalVars[ins->mSrc[0].mVarIndex]->mOffset;
|
||||
else
|
||||
index += ins->mSrc[0].mVarIndex + proc->mLocalSize + 2;
|
||||
index += mFrameOffset;
|
||||
CheckFrameIndex(areg, index, 256, BC_REG_ACCU);
|
||||
sreg = BC_REG_ACCU;
|
||||
CheckFrameIndex(sreg, index, 256, BC_REG_ACCU);
|
||||
}
|
||||
}
|
||||
else if (ins->mSrc[0].mIntConst != 0)
|
||||
|
@ -6799,15 +6802,14 @@ NativeCodeBasicBlock * NativeCodeBasicBlock::CopyValue(InterCodeProcedure* proc,
|
|||
}
|
||||
else if (ins->mSrc[1].mMemory == IM_LOCAL || ins->mSrc[1].mMemory == IM_PARAM)
|
||||
{
|
||||
int index = ins->mSrc[0].mIntConst;
|
||||
int areg = mNoFrame ? BC_REG_STACK : BC_REG_LOCALS;
|
||||
int index = ins->mSrc[1].mIntConst;
|
||||
dreg = mNoFrame ? BC_REG_STACK : BC_REG_LOCALS;
|
||||
if (ins->mSrc[1].mMemory == IM_LOCAL)
|
||||
index += proc->mLocalVars[ins->mSrc[1].mVarIndex]->mOffset;
|
||||
else
|
||||
index += ins->mSrc[1].mVarIndex + proc->mLocalSize + 2;
|
||||
index += mFrameOffset;
|
||||
CheckFrameIndex(areg, index, 256, BC_REG_ADDR);
|
||||
dreg = BC_REG_ADDR;
|
||||
CheckFrameIndex(dreg, index, 256, BC_REG_ADDR);
|
||||
}
|
||||
else if (ins->mSrc[1].mMemory == IM_FRAME)
|
||||
{
|
||||
|
@ -13403,6 +13405,12 @@ bool NativeCodeBasicBlock::MoveAccuTrainsUp(void)
|
|||
wzero -= BC_REG_WORK + j;
|
||||
}
|
||||
|
||||
if (!(mIns[i].mFlags & NCIF_RUNTIME) || (mIns[i].mFlags & NCIF_FEXEC))
|
||||
{
|
||||
wzero -= BC_REG_ADDR;
|
||||
wzero -= BC_REG_ADDR + 1;
|
||||
}
|
||||
|
||||
// wzero.Clear();
|
||||
i++;
|
||||
}
|
||||
|
@ -13446,7 +13454,7 @@ bool NativeCodeBasicBlock::AlternateXYUsage(void)
|
|||
|
||||
if (ins.mType == ASMIT_LDY)
|
||||
{
|
||||
if ((ins.mMode == ASMIM_ZERO_PAGE || ins.mMode == ASMIM_ABSOLUTE) && predYPos >= 0 && ins.SameEffectiveAddress(mIns[predYPos]))
|
||||
if ((ins.mMode == ASMIM_ZERO_PAGE || ins.mMode == ASMIM_ABSOLUTE) && predYPos >= 0 && ins.SameEffectiveAddress(mIns[predYPos]) && !(ins.mFlags & NCIF_VOLATILE))
|
||||
{
|
||||
if (CanReplaceYRegWithXReg(predYEnd, start))
|
||||
{
|
||||
|
@ -13470,7 +13478,7 @@ bool NativeCodeBasicBlock::AlternateXYUsage(void)
|
|||
}
|
||||
else if (ins.mType == ASMIT_LDX)
|
||||
{
|
||||
if ((ins.mMode == ASMIM_ZERO_PAGE || ins.mMode == ASMIM_ABSOLUTE) && predXPos >= 0 && ins.SameEffectiveAddress(mIns[predXPos]))
|
||||
if ((ins.mMode == ASMIM_ZERO_PAGE || ins.mMode == ASMIM_ABSOLUTE) && predXPos >= 0 && ins.SameEffectiveAddress(mIns[predXPos]) && !(ins.mFlags & NCIF_VOLATILE))
|
||||
{
|
||||
if (CanReplaceXRegWithYReg(predXEnd, start))
|
||||
{
|
||||
|
@ -16159,10 +16167,26 @@ bool NativeCodeBasicBlock::JoinTailCodeSequences(NativeCodeProcedure* proc, bool
|
|||
i++;
|
||||
if (i == mEntryBlocks.Size())
|
||||
{
|
||||
if (ins.RequiresAccu())
|
||||
mEntryRequiredRegs += CPU_REG_A;
|
||||
if (ins.RequiresYReg())
|
||||
mEntryRequiredRegs += CPU_REG_Y;
|
||||
if (ins.RequiresXReg())
|
||||
mEntryRequiredRegs += CPU_REG_X;
|
||||
|
||||
mIns.Insert(0, ins);
|
||||
|
||||
for (int i = 0; i < mEntryBlocks.Size(); i++)
|
||||
{
|
||||
NativeCodeBasicBlock* b = mEntryBlocks[i];
|
||||
|
||||
if (ins.RequiresAccu())
|
||||
b->mExitRequiredRegs += CPU_REG_A;
|
||||
if (ins.RequiresYReg())
|
||||
b->mExitRequiredRegs += CPU_REG_Y;
|
||||
if (ins.RequiresXReg())
|
||||
b->mExitRequiredRegs += CPU_REG_X;
|
||||
|
||||
b->mIns.SetSize(b->mIns.Size() - 1);
|
||||
}
|
||||
changed = true;
|
||||
|
@ -17679,6 +17703,14 @@ bool NativeCodeBasicBlock::PatchSingleUseGlobalLoad(const NativeCodeBasicBlock*
|
|||
if (at == 0 && !mEntryRequiredRegs[reg])
|
||||
return false;
|
||||
|
||||
if (at == 0)
|
||||
{
|
||||
if (ains.RequiresXReg())
|
||||
mEntryRequiredRegs += CPU_REG_X;
|
||||
if (ains.RequiresYReg())
|
||||
mEntryRequiredRegs += CPU_REG_Y;
|
||||
}
|
||||
|
||||
while (at < mIns.Size())
|
||||
{
|
||||
NativeCodeInstruction& ins(mIns[at]);
|
||||
|
@ -17710,6 +17742,11 @@ bool NativeCodeBasicBlock::PatchSingleUseGlobalLoad(const NativeCodeBasicBlock*
|
|||
at++;
|
||||
}
|
||||
|
||||
if (ains.RequiresXReg())
|
||||
mExitRequiredRegs += CPU_REG_X;
|
||||
if (ains.RequiresYReg())
|
||||
mExitRequiredRegs += CPU_REG_Y;
|
||||
|
||||
if (mTrueJump && mTrueJump->PatchSingleUseGlobalLoad(block, reg, 0, ains))
|
||||
changed = true;
|
||||
if (mFalseJump && mFalseJump->PatchSingleUseGlobalLoad(block, reg, 0, ains))
|
||||
|
@ -21011,7 +21048,13 @@ bool NativeCodeBasicBlock::MoveLoadImmStoreAbsoluteUp(int at)
|
|||
if (!(sins.mLive & LIVE_CPU_REG_A))
|
||||
mIns.Remove(at);
|
||||
|
||||
if (sins.ReferencesXReg())
|
||||
sins.mLive |= LIVE_CPU_REG_X;
|
||||
if (sins.ReferencesYReg())
|
||||
sins.mLive |= LIVE_CPU_REG_Y;
|
||||
|
||||
mIns.Insert(j, sins);
|
||||
|
||||
return true;
|
||||
}
|
||||
else if (mIns[j - 1].mMode == mIns[at + 1].mMode && mIns[j - 1].mAddress == mIns[at + 1].mAddress)
|
||||
|
@ -22531,6 +22574,8 @@ bool NativeCodeBasicBlock::RemoveSimpleLoopUnusedIndex(void)
|
|||
{
|
||||
bool changed = false;
|
||||
|
||||
assert(mEntryRequiredRegs.Size() > 0);
|
||||
|
||||
if (!mVisited)
|
||||
{
|
||||
mVisited = true;
|
||||
|
@ -22628,6 +22673,8 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoopInvariant(NativeCodeProcedure* proc
|
|||
if (mBranch == ASMIT_JMP)
|
||||
return false;
|
||||
|
||||
assert(mEntryRequiredRegs.Size());
|
||||
|
||||
CheckLive();
|
||||
|
||||
int sz = mIns.Size();
|
||||
|
@ -22786,6 +22833,9 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoopInvariant(NativeCodeProcedure* proc
|
|||
prevBlock->mIns.Push(mIns[si]);
|
||||
mIns.Remove(si);
|
||||
|
||||
mEntryRequiredRegs += CPU_REG_Y;
|
||||
mExitRequiredRegs += CPU_REG_Y;
|
||||
|
||||
CheckLive();
|
||||
|
||||
return true;
|
||||
|
@ -22852,6 +22902,9 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoopInvariant(NativeCodeProcedure* proc
|
|||
prevBlock->mIns.Push(mIns[si]);
|
||||
mIns.Remove(si);
|
||||
|
||||
mEntryRequiredRegs += CPU_REG_X;
|
||||
mExitRequiredRegs += CPU_REG_X;
|
||||
|
||||
CheckLive();
|
||||
|
||||
return true;
|
||||
|
@ -23148,7 +23201,7 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoopInvariant(NativeCodeProcedure* proc
|
|||
while (ai < mIns.Size() && !mIns[ai].ChangesAccu())
|
||||
ai++;
|
||||
|
||||
if (ai < mIns.Size())
|
||||
if (ai < mIns.Size() && !(mIns[ai].mLive & LIVE_CPU_REG_Z))
|
||||
{
|
||||
if (mIns[ai].mType == ASMIT_LDA && mIns[ai].mMode == ASMIM_IMMEDIATE)
|
||||
{
|
||||
|
@ -23220,6 +23273,9 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoopInvariant(NativeCodeProcedure* proc
|
|||
prevBlock->mIns.Push(mIns[ai]);
|
||||
mIns.Remove(ai);
|
||||
|
||||
mEntryRequiredRegs += CPU_REG_Y;
|
||||
prevBlock->mExitRequiredRegs += CPU_REG_Y;
|
||||
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
@ -23261,6 +23317,11 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoopInvariant(NativeCodeProcedure* proc
|
|||
|
||||
changed = true;
|
||||
|
||||
prevBlock->mExitRequiredRegs += CPU_REG_Y;
|
||||
mEntryRequiredRegs += CPU_REG_Y;
|
||||
mExitRequiredRegs += CPU_REG_Y;
|
||||
exitBlock->mEntryRequiredRegs += CPU_REG_Y;
|
||||
|
||||
prevBlock->mIns.Push(NativeCodeInstruction(ASMIT_LDY, ASMIM_ZERO_PAGE, addr));
|
||||
exitBlock->mIns.Push(NativeCodeInstruction(ASMIT_STY, ASMIM_ZERO_PAGE, addr));
|
||||
for (int i = 0; i < mIns.Size(); i++)
|
||||
|
@ -23765,6 +23826,15 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoop(NativeCodeProcedure * proc, bool f
|
|||
eblock->mTrueJump = mFalseJump;
|
||||
eblock->mFalseJump = nullptr;
|
||||
|
||||
lblock->mEntryRequiredRegs = mEntryRequiredRegs;
|
||||
lblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mEntryRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
mExitRequiredRegs = mEntryRequiredRegs;
|
||||
mExitRequiredRegs += CPU_REG_Y;
|
||||
lblock->mEntryRequiredRegs += CPU_REG_Y;
|
||||
lblock->mExitRequiredRegs += CPU_REG_Y;
|
||||
eblock->mEntryRequiredRegs += CPU_REG_Y;
|
||||
|
||||
mIns.SetSize(0);
|
||||
mIns.Push(NativeCodeInstruction(ASMIT_LDY, ASMIM_ZERO_PAGE, zreg));
|
||||
|
@ -23837,6 +23907,16 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoop(NativeCodeProcedure * proc, bool f
|
|||
eblock->mTrueJump = mFalseJump;
|
||||
eblock->mFalseJump = nullptr;
|
||||
|
||||
lblock->mEntryRequiredRegs = mEntryRequiredRegs;
|
||||
lblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mEntryRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
mExitRequiredRegs = mEntryRequiredRegs;
|
||||
mExitRequiredRegs += CPU_REG_X;
|
||||
lblock->mEntryRequiredRegs += CPU_REG_X;
|
||||
lblock->mExitRequiredRegs += CPU_REG_X;
|
||||
eblock->mEntryRequiredRegs += CPU_REG_X;
|
||||
|
||||
mIns.SetSize(0);
|
||||
mIns.Push(NativeCodeInstruction(ASMIT_LDX, ASMIM_ZERO_PAGE, zreg));
|
||||
mBranch = ASMIT_JMP;
|
||||
|
@ -23945,6 +24025,16 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoop(NativeCodeProcedure * proc, bool f
|
|||
eblock->mTrueJump = mFalseJump;
|
||||
eblock->mFalseJump = nullptr;
|
||||
|
||||
lblock->mEntryRequiredRegs = mEntryRequiredRegs;
|
||||
lblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mEntryRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
mExitRequiredRegs = mEntryRequiredRegs;
|
||||
mExitRequiredRegs += CPU_REG_Y;
|
||||
lblock->mEntryRequiredRegs += CPU_REG_Y;
|
||||
lblock->mExitRequiredRegs += CPU_REG_Y;
|
||||
eblock->mEntryRequiredRegs += CPU_REG_Y;
|
||||
|
||||
mIns.SetSize(0);
|
||||
mIns.Push(NativeCodeInstruction(ASMIT_LDY, ASMIM_ZERO_PAGE, zreg));
|
||||
mBranch = ASMIT_JMP;
|
||||
|
@ -24037,6 +24127,16 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoop(NativeCodeProcedure * proc, bool f
|
|||
eblock->mTrueJump = mFalseJump;
|
||||
eblock->mFalseJump = nullptr;
|
||||
|
||||
lblock->mEntryRequiredRegs = mEntryRequiredRegs;
|
||||
lblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mEntryRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
mExitRequiredRegs = mEntryRequiredRegs;
|
||||
mExitRequiredRegs += CPU_REG_Y;
|
||||
lblock->mEntryRequiredRegs += CPU_REG_Y;
|
||||
lblock->mExitRequiredRegs += CPU_REG_Y;
|
||||
eblock->mEntryRequiredRegs += CPU_REG_Y;
|
||||
|
||||
mIns.SetSize(0);
|
||||
mIns.Push(NativeCodeInstruction(ASMIT_LDY, ASMIM_ZERO_PAGE, zreg));
|
||||
mBranch = ASMIT_JMP;
|
||||
|
@ -24081,6 +24181,16 @@ bool NativeCodeBasicBlock::OptimizeSimpleLoop(NativeCodeProcedure * proc, bool f
|
|||
mTrueJump = lblock;
|
||||
mFalseJump = nullptr;
|
||||
|
||||
lblock->mEntryRequiredRegs = mEntryRequiredRegs;
|
||||
lblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mEntryRequiredRegs = mExitRequiredRegs;
|
||||
eblock->mExitRequiredRegs = mExitRequiredRegs;
|
||||
mExitRequiredRegs = mEntryRequiredRegs;
|
||||
mExitRequiredRegs += CPU_REG_X;
|
||||
lblock->mEntryRequiredRegs += CPU_REG_X;
|
||||
lblock->mExitRequiredRegs += CPU_REG_X;
|
||||
eblock->mEntryRequiredRegs += CPU_REG_X;
|
||||
|
||||
lblock->OptimizeSimpleLoopInvariant(proc, this, eblock, full);
|
||||
|
||||
lblock->CheckLive();
|
||||
|
@ -26292,6 +26402,10 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
mVisited = true;
|
||||
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
|
||||
|
||||
#if 1
|
||||
|
@ -30690,6 +30804,10 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
}
|
||||
#endif
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
#if 1
|
||||
if (i + 1 < mIns.Size())
|
||||
{
|
||||
|
@ -30724,6 +30842,10 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
progress = true;
|
||||
}
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30746,6 +30868,10 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
if (PatchSingleUseGlobalLoad(this, mIns[i + 1].mAddress, i + 2, mIns[i]))
|
||||
progress = true;
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30760,11 +30886,19 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
if (PatchSingleUseGlobalLoad(this, mIns[i + 0].mAddress, i + 2, mIns[i + 1]))
|
||||
progress = true;
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
|
||||
if (i + 5 < mIns.Size())
|
||||
{
|
||||
|
@ -31134,6 +31268,11 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
mIns[i + 3].mType = ASMIT_NOP; mIns[i + 3].mMode = ASMIM_IMPLIED;
|
||||
|
||||
progress = true;
|
||||
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31160,6 +31299,11 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
|
||||
progress = true;
|
||||
}
|
||||
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -31188,6 +31332,11 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
mIns[i + j].mType = ASMIT_NOP; mIns[i + j].mMode = ASMIM_IMPLIED;
|
||||
}
|
||||
}
|
||||
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -31217,6 +31366,11 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
mIns[i + j].mType = ASMIT_NOP; mIns[i + j].mMode = ASMIM_IMPLIED;
|
||||
}
|
||||
}
|
||||
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -31240,6 +31394,11 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
|
||||
mIns[i + 1].mType = ASMIT_NOP; mIns[i + 1].mMode = ASMIM_IMPLIED;
|
||||
mIns[i + 5].mAddress = mIns[i + 3].mAddress;
|
||||
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -31262,6 +31421,11 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
progress = true;
|
||||
|
||||
mIns[i + 1].mType = ASMIT_NOP; mIns[i + 1].mMode = ASMIM_IMPLIED;
|
||||
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -31439,6 +31603,10 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
#endif
|
||||
#endif
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
}
|
||||
|
||||
if (progress)
|
||||
|
@ -31447,6 +31615,10 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
} while (progress);
|
||||
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
|
||||
int sz = mIns.Size();
|
||||
#if 1
|
||||
|
@ -31707,6 +31879,10 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
#endif
|
||||
|
||||
CheckLive();
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
|
||||
#if 1
|
||||
if (mTrueJump && mFalseJump && !mTrueJump->mFalseJump && !mFalseJump->mFalseJump && mTrueJump->mTrueJump == mFalseJump->mTrueJump &&
|
||||
|
@ -31797,6 +31973,11 @@ bool NativeCodeBasicBlock::PeepHoleOptimizer(NativeCodeProcedure* proc, int pass
|
|||
#endif
|
||||
CheckLive();
|
||||
|
||||
if (mTrueJump)
|
||||
mTrueJump->CheckLive();
|
||||
if (mFalseJump)
|
||||
mFalseJump->CheckLive();
|
||||
|
||||
#endif
|
||||
assert(mIndex == 1000 || mNumEntries == mEntryBlocks.Size());
|
||||
|
||||
|
@ -32929,7 +33110,7 @@ void NativeCodeProcedure::RebuildEntry(void)
|
|||
|
||||
void NativeCodeProcedure::Optimize(void)
|
||||
{
|
||||
CheckFunc = !strcmp(mInterProc->mIdent->mString, "bmu_line");
|
||||
CheckFunc = !strcmp(mInterProc->mIdent->mString, "main");
|
||||
|
||||
#if 1
|
||||
int step = 0;
|
||||
|
|
|
@ -901,6 +901,7 @@ Expression* Parser::ParseInitExpression(Declaration* dtype)
|
|||
if (dtype->mType == DT_TYPE_POINTER && exp->mDecType && exp->mDecType->mType == DT_TYPE_ARRAY && exp->mType == EX_CONSTANT)
|
||||
{
|
||||
Declaration* ndec = new Declaration(exp->mDecValue->mLocation, DT_CONST_POINTER);
|
||||
ndec->mBase = dtype;
|
||||
ndec->mValue = exp;
|
||||
dec = ndec;
|
||||
|
||||
|
@ -955,6 +956,7 @@ Expression* Parser::ParseInitExpression(Declaration* dtype)
|
|||
else if (exp->mDecValue->mType == DT_CONST_FUNCTION)
|
||||
{
|
||||
Declaration* ndec = new Declaration(exp->mDecValue->mLocation, DT_CONST_POINTER);
|
||||
ndec->mBase = dtype;
|
||||
ndec->mValue = exp;
|
||||
dec = ndec;
|
||||
|
||||
|
@ -1083,7 +1085,7 @@ Declaration* Parser::ParseDeclaration(bool variable, bool expression)
|
|||
ndec = ReverseDeclaration(ndec, bdec);
|
||||
|
||||
if (storageFlags & DTF_STRIPED)
|
||||
ndec = ndec->ToStriped();
|
||||
ndec = ndec->ToStriped(mErrors);
|
||||
|
||||
Declaration* npdec = ndec;
|
||||
|
||||
|
|
Loading…
Reference in New Issue