diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2021-03-16 16:37:37 +0100 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2021-03-16 16:37:37 +0100 |
| commit | 551542f99a2bcd690f19e1bb17ac16172feef583 (patch) | |
| tree | 2619575ab420a80d4de109833f0e1cb550d434ab /src | |
| parent | 1e5bfcdbcde5878cbe19770d5b3183e631e6c795 (diff) | |
| download | box64-551542f99a2bcd690f19e1bb17ac16172feef583.tar.gz box64-551542f99a2bcd690f19e1bb17ac16172feef583.zip | |
[DYNAREC] Added 85 opcode
Diffstat (limited to 'src')
| -rwxr-xr-x | src/dynarec/arm64_emitter.h | 1 | ||||
| -rwxr-xr-x | src/dynarec/arm64_printer.c | 25 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_00.c | 9 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_emit_tests.c | 408 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_helper.h | 2 |
5 files changed, 444 insertions, 1 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index 60c12748..7fabde02 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -240,6 +240,7 @@ #define ANDxw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(rex.w, 0b00, 0b00, 0, Rm, 0, Rn, Rd)) #define ANDSx_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(1, 0b11, 0b00, 0, Rm, 0, Rn, Rd)) #define ANDSw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(0, 0b11, 0b00, 0, Rm, 0, Rn, Rd)) +#define ANDSxw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(rex.w, 0b11, 0b00, 0, Rm, 0, Rn, Rd)) #define ORRx_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(1, 0b01, 0b00, 0, Rm, 0, Rn, Rd)) #define ORRx_REG_LSL(Rd, Rn, Rm, lsl) EMIT(LOGIC_REG_gen(1, 0b01, lsl, 0, Rm, 0, Rn, Rd)) #define ORRxw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(rex.w, 0b01, 0b00, 0, Rm, 0, Rn, Rd)) diff --git a/src/dynarec/arm64_printer.c b/src/dynarec/arm64_printer.c index 56dfd5c3..2a66f553 100755 --- a/src/dynarec/arm64_printer.c +++ b/src/dynarec/arm64_printer.c @@ -258,6 +258,31 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) return buff; } // ---- LOGIC + if(isMask(opcode, "f11100100Nrrrrrrssssssnnnnnddddd", &a)) { + int i = (a.N<<12)|(imms<<6)|immr; + if(sf==0 && a.N==1) + snprintf(buff, sizeof(buff), "invalid ANDS %s, %s, 0x%x", Wt[Rd], Wt[Rn], i); + else if(Rd==31) + snprintf(buff, sizeof(buff), "TST %s, 0x%x", sf?Xt[Rn]:Wt[Rn], i); + else + snprintf(buff, sizeof(buff), "ANDS %s, %s, 0x%x", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], i); + return buff; + } + if(isMask(opcode, "f1101010hh0mmmmmiiiiiinnnnnddddd", &a)) { + const char* shifts[] = { "LSL", "LSR", "ASR", "ROR" }; + if(shift==0 && imm==0) { + if(Rd==31) + snprintf(buff, sizeof(buff), "TST %s, %s", sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm]); + else + snprintf(buff, sizeof(buff), "ANDS %s, %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm]); + } else { + if(Rd==31) + snprintf(buff, sizeof(buff), "TST %s, %s, %s %d", sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + else + snprintf(buff, sizeof(buff), "ANDS %s, %s, %s, %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + } + return buff; + } // ---- SHIFT if(isMask(opcode, "f10100110Nrrrrrrssssssnnnnnddddd", &a)) { diff --git a/src/dynarec/dynarec_arm64_00.c b/src/dynarec/dynarec_arm64_00.c index 3f5e8b85..4c786ebd 100755 --- a/src/dynarec/dynarec_arm64_00.c +++ b/src/dynarec/dynarec_arm64_00.c @@ -136,6 +136,15 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } break; + case 0x85: + INST_NAME("TEST Ed, Gd"); + SETFLAGS(X_ALL, SF_SET); + nextop=F8; + GETGD; + GETED(0); + emit_test32(dyn, ninst, rex, ed, gd, x3, x5, x6); + break; + case 0x89: INST_NAME("MOV Ed, Gd"); nextop=F8; diff --git a/src/dynarec/dynarec_arm64_emit_tests.c b/src/dynarec/dynarec_arm64_emit_tests.c new file mode 100755 index 00000000..cfd7f6b4 --- /dev/null +++ b/src/dynarec/dynarec_arm64_emit_tests.c @@ -0,0 +1,408 @@ +#include <stdio.h> +#include <stdlib.h> +#include <stddef.h> +#include <pthread.h> +#include <errno.h> + +#include "debug.h" +#include "box64context.h" +#include "dynarec.h" +#include "emu/x64emu_private.h" +#include "emu/x64run_private.h" +#include "x64run.h" +#include "x64emu.h" +#include "box64stack.h" +#include "callback.h" +#include "emu/x64run_private.h" +#include "x64trace.h" +#include "dynarec_arm64.h" +#include "dynarec_arm64_private.h" +#include "arm64_printer.h" +#include "../tools/bridge_private.h" + +#include "dynarec_arm64_functions.h" +#include "dynarec_arm64_helper.h" + +// emit CMP32 instruction, from cmp s1 , s2, using s3 and s4 as scratch +//void emit_cmp32(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) +//{ +// IFX(X_PEND) { +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1)); +// STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2)); +// SET_DF(s4, d_cmp32); +// } else { +// SET_DFNONE(s4); +// } +// SUBS_REG_LSL_IMM5(s3, s1, s2, 0); // res = s1 - s2 +// IFX(X_PEND) { +// STR_IMM9(s3, xEmu, offsetof(x64emu_t, res)); +// } +// IFX(X_ZF|X_CF) { +// BIC_IMM8(xFlags, xFlags, (1<<F_ZF)|(1<<F_CF), 0); +// } +// IFX(X_ZF) { +// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); +// } +// IFX(X_OF) { +// ORR_IMM8_COND(cVS, xFlags, xFlags, 0b10, 0x0b); +// BIC_IMM8_COND(cVC, xFlags, xFlags, 0b10, 0x0b); +// } +// IFX(X_CF) { +// // reversed carry +// ORR_IMM8_COND(cCC, xFlags, xFlags, 1<<F_CF, 0); +// } +// IFX(X_SF) { +// MOV_REG_LSR_IMM5(s4, s3, 31); +// BFI(xFlags, s4, F_SF, 1); +// } +// // and now the tricky ones (and mostly unused), PF and AF +// IFX(X_AF) { +// // bc = (res & (~d | s)) | (~d & s) +// MVN_REG_LSL_IMM5(s4, s1, 0); // s4 = ~d +// ORR_REG_LSL_IMM5(s4, s4, s2, 0); // s4 = ~d | s +// AND_REG_LSL_IMM5(s4, s4, s3, 0); // s4 = res & (~d | s) +// BIC_REG_LSL_IMM5(s3, s2, s1, 0); // loosing res... s3 = s & ~d +// ORR_REG_LSL_IMM5(s3, s4, s3, 0); // s3 = (res & (~d | s)) | (s & ~d) +// IFX(X_AF) { +// MOV_REG_LSR_IMM5(s4, s3, 3); +// BFI(xFlags, s4, F_AF, 1); // AF: bc & 0x08 +// } +// } +// IFX(X_PF) { +// // PF: (((emu->x64emu_parity_tab[(res) / 32] >> ((res) % 32)) & 1) == 0) +// IFX(X_CF|X_AF) { +// SUB_REG_LSL_IMM5(s3, s1, s2, 0); +// } +// AND_IMM8(s3, s3, 0xE0); // lsr 5 masking pre-applied +// MOV32(s4, GetParityTab()); +// LDR_REG_LSR_IMM5(s4, s4, s3, 5-2); // x/32 and then *4 because array is integer +// SUB_REG_LSL_IMM5(s3, s1, s2, 0); +// AND_IMM8(s3, s3, 31); +// MVN_REG_LSR_REG(s4, s4, s3); +// BFI(xFlags, s4, F_PF, 1); +// } +//} + +// emit CMP32 instruction, from cmp s1 , 0, using s3 and s4 as scratch +//void emit_cmp32_0(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4) +//{ +// IFX(X_PEND) { +// MOVW(s4, 0); +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1)); +// STR_IMM9(s4, xEmu, offsetof(x64emu_t, op2)); +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, res)); +// SET_DF(s4, d_cmp32); +// } else { +// SET_DFNONE(s4); +// } +// SUBS_IMM8(s3, s1, 0); // res = s1 - 0 +// // and now the tricky ones (and mostly unused), PF and AF +// // bc = (res & (~d | s)) | (~d & s) => is 0 here... +// IFX(X_CF | X_AF | X_ZF) { +// BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0); +// } +// IFX(X_OF) { +// BFC(xFlags, F_OF, 1); +// } +// IFX(X_ZF) { +// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); +// } +// IFX(X_SF) { +// MOV_REG_LSR_IMM5(s4, s3, 31); +// BFI(xFlags, s4, F_SF, 1); +// } +// IFX(X_PF) { +// emit_pf(dyn, ninst, s1, s3, s4); +// } +//} + +// emit CMP16 instruction, from cmp s1 , s2, using s3 and s4 as scratch +//void emit_cmp16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) +//{ +// IFX(X_PEND) { +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1)); +// STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2)); +// SET_DF(s3, d_cmp16); +// } else { +// SET_DFNONE(s3); +// } +// SUB_REG_LSL_IMM5(s3, s1, s2, 0); // res = s1 - s2 +// IFX(X_PEND) { +// STR_IMM9(s3, xEmu, offsetof(x64emu_t, res)); +// } +// IFX(X_ZF) { +// MOVW(s4, 0xffff); +// TSTS_REG_LSL_IMM5(s3, s4, 0); +// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); +// BIC_IMM8_COND(cNE, xFlags, xFlags, 1<<F_ZF, 0); +// } +// IFX(X_SF) { +// MOV_REG_LSR_IMM5(s4, s3, 15); +// BFI(xFlags, s4, F_SF, 1); +// } +// // bc = (res & (~d | s)) | (~d & s) +// IFX(X_CF|X_AF|X_OF) { +// MVN_REG_LSL_IMM5(s4, s1, 0); // s4 = ~d +// ORR_REG_LSL_IMM5(s4, s4, s2, 0); // s4 = ~d | s +// AND_REG_LSL_IMM5(s4, s4, s3, 0); // s4 = res & (~d | s) +// BIC_REG_LSL_IMM5(s3, s2, s1, 0); // loosing res... s3 = s & ~d +// ORR_REG_LSL_IMM5(s3, s4, s3, 0); // s3 = (res & (~d | s)) | (s & ~d) +// IFX(X_CF) { +// MOV_REG_LSR_IMM5(s4, s3, 15); +// BFI(xFlags, s4, F_CF, 1); // CF : bc & 0x8000 +// } +// IFX(X_AF) { +// MOV_REG_LSR_IMM5(s4, s3, 3); +// BFI(xFlags, s4, F_AF, 1); // AF: bc & 0x08 +// } +// IFX(X_OF) { +// MOV_REG_LSR_IMM5(s4, s3, 14); +// XOR_REG_LSR_IMM8(s4, s4, s4, 1); +// BFI(xFlags, s4, F_OF, 1); // OF: ((bc >> 14) ^ ((bc>>14)>>1)) & 1 +// } +// } +// IFX(X_PF) { +// IFX(X_CF|X_AF|X_OF) { +// SUB_REG_LSL_IMM5(s3, s1, s2, 0); +// } +// AND_IMM8(s3, s3, 0xE0); // lsr 5 masking pre-applied +// MOV32(s4, GetParityTab()); +// LDR_REG_LSR_IMM5(s4, s4, s3, 5-2); // x/32 and then *4 because array is integer +// SUB_REG_LSL_IMM5(s3, s1, s2, 0); +// AND_IMM8(s3, s3, 31); +// MVN_REG_LSR_REG(s4, s4, s3); +// BFI(xFlags, s4, F_PF, 1); +// } +//} + +// emit CMP16 instruction, from cmp s1 , #0, using s3 and s4 as scratch +//void emit_cmp16_0(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4) +//{ +// IFX(X_PEND) { +// MOVW(s3, 0); +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1)); +// STR_IMM9(s3, xEmu, offsetof(x64emu_t, op2)); +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, res)); +// SET_DF(s3, d_cmp16); +// } else { +// SET_DFNONE(s3); +// } +// // bc = (res & (~d | s)) | (~d & s) = 0 +// IFX(X_CF | X_AF | X_ZF) { +// BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0); +// } +// IFX(X_OF) { +// BFC(xFlags, F_OF, 1); +// } +// IFX(X_ZF) { +// MOVW(s4, 0xffff); +// TSTS_REG_LSL_IMM5(s1, s4, 0); +// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); +// } +// IFX(X_SF) { +// MOV_REG_LSR_IMM5(s4, s1, 15); +// BFI(xFlags, s4, F_SF, 1); +// } +// IFX(X_PF) { +// emit_pf(dyn, ninst, s1, s3, s4); +// } +//} +// emit CMP8 instruction, from cmp s1 , s2, using s3 and s4 as scratch +//void emit_cmp8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) +//{ +// IFX(X_PEND) { +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1)); +// STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2)); +// SET_DF(s4, d_cmp8); +// } else { +// SET_DFNONE(s4); +// } +// SUB_REG_LSL_IMM5(s3, s1, s2, 0); // res = s1 - s2 +// IFX(X_PEND) { +// STR_IMM9(s3, xEmu, offsetof(x64emu_t, res)); +// } +// IFX(X_ZF) { +// TSTS_IMM8(s3, 0xff); +// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); +// BIC_IMM8_COND(cNE, xFlags, xFlags, 1<<F_ZF, 0); +// } +// IFX(X_SF) { +// MOV_REG_LSR_IMM5(s4, s3, 7); +// BFI(xFlags, s4, F_SF, 1); +// } +// // bc = (res & (~d | s)) | (~d & s) +// IFX(X_CF|X_AF|X_OF) { +// MVN_REG_LSL_IMM5(s4, s1, 0); // s4 = ~d +// ORR_REG_LSL_IMM5(s4, s4, s2, 0); // s4 = ~d | s +// AND_REG_LSL_IMM5(s4, s4, s3, 0); // s4 = res & (~d | s) +// BIC_REG_LSL_IMM5(s3, s2, s1, 0); // loosing res... s3 = s & ~d +// ORR_REG_LSL_IMM5(s3, s4, s3, 0); // s3 = (res & (~d | s)) | (s & ~d) +// IFX(X_CF) { +// MOV_REG_LSR_IMM5(s4, s3, 7); +// BFI(xFlags, s4, F_CF, 1); // CF : bc & 0x80 +// } +// IFX(X_AF) { +// MOV_REG_LSR_IMM5(s4, s3, 3); +// BFI(xFlags, s4, F_AF, 1); // AF: bc & 0x08 +// } +// IFX(X_OF) { +// MOV_REG_LSR_IMM5(s4, s3, 6); +// XOR_REG_LSR_IMM8(s4, s4, s4, 1); +// BFI(xFlags, s4, F_OF, 1); // OF: ((bc >> 6) ^ ((bc>>6)>>1)) & 1 +// } +// } +// IFX(X_PF) { +// IFX(X_CF|X_AF|X_OF) { +// SUB_REG_LSL_IMM5(s3, s1, s2, 0); +// } +// AND_IMM8(s3, s3, 0xE0); // lsr 5 masking pre-applied +// MOV32(s4, GetParityTab()); +// LDR_REG_LSR_IMM5(s4, s4, s3, 5-2); // x/32 and then *4 because array is integer +// SUB_REG_LSL_IMM5(s3, s1, s2, 0); +// AND_IMM8(s3, s3, 31); +// MVN_REG_LSR_REG(s4, s4, s3); +// BFI(xFlags, s4, F_PF, 1); +// } +//} +// emit CMP8 instruction, from cmp s1 , 0, using s3 and s4 as scratch +//void emit_cmp8_0(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4) +//{ +// IFX(X_PEND) { +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1)); +// MOVW(s4, 0); +// STR_IMM9(s4, xEmu, offsetof(x64emu_t, op2)); +// STR_IMM9(s1, xEmu, offsetof(x64emu_t, res)); +// SET_DF(s3, d_cmp8); +// } else { +// SET_DFNONE(s4); +// } +// // bc = (res & (~d | s)) | (~d & s) = 0 +// IFX(X_CF | X_AF | X_ZF) { +// BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0); +// } +// IFX(X_OF) { +// BFC(xFlags, F_OF, 1); +// } +// IFX(X_ZF) { +// TSTS_IMM8(s1, 0xff); +// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); +// } +// IFX(X_SF) { +// MOV_REG_LSR_IMM5(s4, s1, 7); +// BFI(xFlags, s4, F_SF, 1); +// } +// IFX(X_PF) { +// emit_pf(dyn, ninst, s1, s3, s4); +// } +//} + +// emit TEST32 instruction, from test s1 , s2, using s3 and s4 as scratch +void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4, int s5) +{ + IFX(X_PEND) { + SET_DF(s3, d_tst32); + } else { + SET_DFNONE(s4); + } + IFX(X_ZF|X_CF|X_OF) { + MOVw(s5, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); + BICx(xFlags, xFlags, s5); + } + ANDSxw_REG(s3, s1, s2); // res = s1 & s2 + IFX(X_PEND) { + STRx_U12(s3, xEmu, offsetof(x64emu_t, res)); + } + IFX(X_ZF) { + Bcond(cNE, +4); + ORRw_U12(xFlags, xFlags, 1<<F_ZF); + } + IFX(X_SF) { + LSRxw(s4, s3, rex.w?63:31); + BFIx(xFlags, s4, F_SF, 1); + } + // PF: (((emu->x64emu_parity_tab[(res) / 32] >> ((res) % 32)) & 1) == 0) + IFX(X_PF) { + ANDw_U12(s3, s3, 0xE0); // lsr 5 masking pre-applied + LSRw(s3, s3, 5); + MOV64x(s4, (uintptr_t)GetParityTab()); + LDRw_REG_LSL2(s4, s4, s3); + ANDw_U12(s3, s1, 31); + LSRw_REG(s4, s4, s3); + MVNx(s4, s4); + BFIx(xFlags, s4, F_PF, 1); + } +} + +// emit TEST16 instruction, from test s1 , s2, using s3 and s4 as scratch +//void emit_test16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) +//{ +// IFX(X_PEND) { +// SET_DF(s3, d_tst16); +// } else { +// SET_DFNONE(s4); +// } +// IFX(X_OF) { +// BFC(xFlags, F_OF, 1); +// } +// IFX(X_ZF|X_CF) { +// BIC_IMM8(xFlags, xFlags, (1<<F_ZF)|(1<<F_CF), 0); +// } +// ANDS_REG_LSL_IMM5(s3, s1, s2, 0); // res = s1 & s2 +// IFX(X_PEND) { +// STR_IMM9(s3, xEmu, offsetof(x64emu_t, res)); +// } +// IFX(X_ZF) { +// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); +// } +// IFX(X_SF) { +// MOV_REG_LSR_IMM5(s4, s3, 15); +// BFI(xFlags, s4, F_SF, 1); +// } +// // PF: (((emu->x64emu_parity_tab[(res) / 32] >> ((res) % 32)) & 1) == 0) +// IFX(X_PF) { +// AND_IMM8(s3, s3, 0xE0); // lsr 5 masking pre-applied +// MOV32(s4, GetParityTab()); +// LDR_REG_LSR_IMM5(s4, s4, s3, 5-2); // x/32 and then *4 because array is integer +// AND_REG_LSL_IMM5(s3, s1, s2, 0); +// AND_IMM8(s3, s3, 31); +// MVN_REG_LSR_REG(s4, s4, s3); +// BFI(xFlags, s4, F_PF, 1); +// } +//} + +// emit TEST8 instruction, from test s1 , s2, using s3 and s4 as scratch +//void emit_test8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) +//{ +// IFX(X_PEND) { +// SET_DF(s3, d_tst8); +// } else { +// SET_DFNONE(s4); +// } +// IFX(X_OF) { +// BFC(xFlags, F_OF, 1); +// } +// IFX(X_ZF|X_CF) { +// BIC_IMM8(xFlags, xFlags, (1<<F_ZF)|(1<<F_CF), 0); +// } +// ANDS_REG_LSL_IMM5(s3, s1, s2, 0); // res = s1 & s2 +// IFX(X_PEND) { +// STR_IMM9(s3, xEmu, offsetof(x64emu_t, res)); +// } +// IFX(X_ZF) { +// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); +// } +// IFX(X_SF) { +// MOV_REG_LSR_IMM5(s4, s3, 7); +// BFI(xFlags, s4, F_SF, 1); +// } +// // PF: (((emu->x64emu_parity_tab[(res) / 32] >> ((res) % 32)) & 1) == 0) +// IFX(X_PF) { +// AND_IMM8(s3, s3, 0xE0); // lsr 5 masking pre-applied +// MOV32(s4, GetParityTab()); +// LDR_REG_LSR_IMM5(s4, s4, s3, 5-2); // x/32 and then *4 because array is integer +// AND_REG_LSL_IMM5(s3, s1, s2, 0); +// AND_IMM8(s3, s3, 31); +// MVN_REG_LSR_REG(s4, s4, s3); +// BFI(xFlags, s4, F_PF, 1); +// } +//} diff --git a/src/dynarec/dynarec_arm64_helper.h b/src/dynarec/dynarec_arm64_helper.h index 3d947a5c..4fef2acb 100755 --- a/src/dynarec/dynarec_arm64_helper.h +++ b/src/dynarec/dynarec_arm64_helper.h @@ -611,7 +611,7 @@ void call_c(dynarec_arm_t* dyn, int ninst, void* fnc, int reg, int ret, int save //void emit_cmp32_0(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4); //void emit_test8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); //void emit_test16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); -//void emit_test32(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); +void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4, int s5); //void emit_add32(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); //void emit_add32c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4); //void emit_add8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4, int save_s4); |