diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2023-11-28 18:48:55 +0100 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2023-11-28 18:48:55 +0100 |
| commit | 8e5346be212f1c2114d4c513e7bb54b041c8f140 (patch) | |
| tree | 617fce9941d7e6f9faf7c26cf5d1bc7c81cdbee3 /src | |
| parent | 5f9d1f62cfc5c31e916e80e7eb3fa570d00ee3eb (diff) | |
| download | box64-8e5346be212f1c2114d4c513e7bb54b041c8f140.tar.gz box64-8e5346be212f1c2114d4c513e7bb54b041c8f140.zip | |
[AMR64_DYNAREC] A few optimisations on some F0 BTx opcodes
Diffstat (limited to 'src')
| -rw-r--r-- | src/dynarec/arm64/dynarec_arm64_f0.c | 34 |
1 files changed, 12 insertions, 22 deletions
diff --git a/src/dynarec/arm64/dynarec_arm64_f0.c b/src/dynarec/arm64/dynarec_arm64_f0.c index 384b5772..9dc48f12 100644 --- a/src/dynarec/arm64/dynarec_arm64_f0.c +++ b/src/dynarec/arm64/dynarec_arm64_f0.c @@ -200,23 +200,18 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin ORRxw_REG(ed, ed, x4); } else { // Will fetch only 1 byte, to avoid alignment issue - if(rex.w) { - ANDx_mask(x2, gd, 1, 0, 0b00010); //mask=0x0000000000000007 - } else { - ANDw_mask(x2, gd, 0, 0b00010); //mask=0x000000007 - } + ANDw_mask(x2, gd, 0, 0b00010); //mask=0x000000007 addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0); ASRxw(x1, gd, 3); // r1 = (gd>>3) ADDx_REG_LSL(x3, wback, x1, 0); //(&ed)+=r1; ed = x1; wback = x3; + MOV32w(x5, 1); MARKLOCK; LDAXRB(ed, wback); LSRw_REG(x4, ed, x2); - ANDw_mask(x4, x4, 0, 0); //mask=1 - BFIw(xFlags, x4, F_CF, 1); - MOV32w(x4, 1); - LSLw_REG(x4, x4, x2); + BFIw(xFlags, x5, F_CF, 1); + LSLw_REG(x4, x5, x2); ORRw_REG(ed, ed, x4); STLXRB(x4, ed, wback); CBNZw_MARKLOCK(x4); @@ -382,23 +377,18 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin BICxw_REG(ed, ed, x4); } else { // Will fetch only 1 byte, to avoid alignment issue - if(rex.w) { - ANDx_mask(x2, gd, 1, 0, 0b00010); //mask=0x0000000000000007 - } else { - ANDw_mask(x2, gd, 0, 0b00010); //mask=0x000000007 - } + ANDw_mask(x2, gd, 0, 0b00010); //mask=0x000000007 addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0); - ASRx(x1, gd, 3); // r1 = (gd>>3) + ASRx(x1, gd, 3); // r1 = (gd>>3), there might be an issue for negative 32bits values here ADDx_REG_LSL(x3, wback, x1, 0); //(&ed)+=r1; ed = x1; wback = x3; + MOV32w(x5, 1); MARKLOCK; LDAXRB(ed, wback); LSRw_REG(x4, ed, x2); - ANDw_mask(x4, x4, 0, 0); //mask=1 BFIw(xFlags, x4, F_CF, 1); - MOV32w(x4, 1); - LSLw_REG(x4, x4, x2); + LSLw_REG(x4, x5, x2); BICw_REG(ed, ed, x4); STLXRB(x4, ed, wback); CBNZw_MARKLOCK(x4); @@ -455,11 +445,11 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin wback = x3; } ed = x1; + MOV32w(x5, 1); MARKLOCK; LDAXRB(ed, wback); BFXILw(xFlags, ed, u8&7, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) - MOV32w(x4, 1); - BFIw(ed, x4, u8&7, 1); + BFIw(ed, x5, u8&7, 1); STLXRB(x4, ed, wback); CBNZw_MARKLOCK(x4); } @@ -512,11 +502,11 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin wback = x3; } ed = x1; + MOV32w(x5, 1); MARKLOCK; LDAXRB(ed, wback); BFXILw(xFlags, ed, u8&7, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) - MOV32w(x4, 1); - EORw_REG_LSL(ed, ed, x4, u8&7); + EORw_REG_LSL(ed, ed, x5, u8&7); STLXRB(x4, ed, wback); CBNZw_MARKLOCK(x4); } |