diff options
Diffstat (limited to 'src')
| -rwxr-xr-x | src/dynarec/arm64_emitter.h | 10 | ||||
| -rwxr-xr-x | src/dynarec/arm64_printer.c | 15 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_f20f.c | 39 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_functions.c | 14 |
4 files changed, 50 insertions, 28 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index 9bf56a5b..3c3a822f 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -597,4 +597,14 @@ #define VMOVQ(Vd, Vn) EMIT(ORR_vector(1, Vn, Vn, Vd)) #define VMOV(Dd, Dn) EMIT(ORR_vector(0, Dn, Dn, Dd)) +// ADD +#define FADD_vector(Q, U, sz, Rm, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b01110<<24 | (sz)<<22 | 1<<21 | (Rm)<<16 | 0b11010<<11 | 1<<10 | (Rn)<<5 | (Rd)) +#define VFADDQS(Vd, Vn, Vm) EMIT(FADD_vector(1, 0, 0, Vm, Vn, Vd)) +#define VFADDQD(Vd, Vn, Vm) EMIT(FADD_vector(1, 0, 1, Vm, Vn, Vd)) +#define VFADDS(Dd, Dn, Dm) EMIT(FADD_vector(0, 0, 0, Dm, Dn, Dd)) + +#define FADD_scalar(type, Rm, op, Rn, Rd) (0b11110<<24 | (type)<<22 | 1<<21 | (Rm)<<16 | 0b001<<13 | (op)<<12 | 0b10<<10 | (Rn)<<5 | (Rd)) +#define FADDS(Sd, Sn, Sm) EMIT(FADD_scalar(0b00, Sm, 0, Sn, Sd)) +#define FADDD(Dd, Dn, Dm) EMIT(FADD_scalar(0b01, Dm, 0, Dn, Dd)) + #endif //__ARM64_EMITTER_H__ diff --git a/src/dynarec/arm64_printer.c b/src/dynarec/arm64_printer.c index b0b1849a..145eb1a4 100755 --- a/src/dynarec/arm64_printer.c +++ b/src/dynarec/arm64_printer.c @@ -786,6 +786,21 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) return buff; } + // FADD + if(isMask(opcode, "0Q0011100f1mmmmm110101nnnnnddddd", &a)) { + char s = a.Q?'V':'D'; + char d = sf?'D':'S'; + int n = (a.Q && !sf)?4:2; + snprintf(buff, sizeof(buff), "VFADD %c%d.%d%c, %c%d.%d%c, %c%d.%c%d", s, Rd, n, d, s, Rn, n, d, s, Rm, s, d); + return buff; + } + if(isMask(opcode, "00011110ff1mmmmm001010nnnnnddddd", &a)) { + char s = (sf==0)?'S':((sf==1)?'D':'?'); + snprintf(buff, sizeof(buff), "FADD %c%d, %c%d, %c%d", s, Rd, s, Rn, s, Rm); + return buff; + } + + snprintf(buff, sizeof(buff), "%08X ???", __builtin_bswap32(opcode)); return buff; } \ No newline at end of file diff --git a/src/dynarec/dynarec_arm64_f20f.c b/src/dynarec/dynarec_arm64_f20f.c index e8708a12..1caa91b7 100755 --- a/src/dynarec/dynarec_arm64_f20f.c +++ b/src/dynarec/dynarec_arm64_f20f.c @@ -22,22 +22,21 @@ #include "dynarec_arm64_functions.h" #include "dynarec_arm64_helper.h" -// Get Ex as a double, not a quad (warning, x2 and x3 may get used) -#define GETEX(a) \ - if((nextop&0xC0)==0xC0) { \ - a = sse_get_reg(dyn, ninst, x1, nextop&7); \ - } else { \ - parity = getedparity(dyn, ninst, addr, nextop, 3); \ - a = fpu_get_scratch_double(dyn); \ - if(parity) { \ - addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, 1023, 3); \ - VLDR_64(a, ed, fixedaddress); \ - } else { \ - addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, 4095-4, 0);\ - LDR_IMM9(x2, ed, fixedaddress+0); \ - LDR_IMM9(x3, ed, fixedaddress+4); \ - VMOVtoV_D(a, x2, x3); \ - } \ +// Get Ex as a double, not a quad (warning, x2 get used) +#define GETEX(a, D) \ + if(MODREG) { \ + a = sse_get_reg(dyn, ninst, x1, (nextop&7)+(rex.b<<3)); \ + } else { \ + parity = getedparity(dyn, ninst, addr, nextop, 3); \ + a = fpu_get_scratch(dyn); \ + if(parity) { \ + addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, 0xfff<<3, 3, rex, 0, D); \ + VLDR64_U12(a, ed, fixedaddress); \ + } else { \ + addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, 0xfff<<3, 0, rex, 0, D); \ + LDRx_U12(x2, ed, fixedaddress+0); \ + VMOVQDfrom(a, 0, x2); \ + } \ } uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int* ok, int* need_epilog) @@ -63,6 +62,14 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n switch(opcode) { + case 0x58: + INST_NAME("ADDSD Gx, Ex"); + nextop = F8; + gd = ((nextop&0x38)>>3)+(rex.r<<3); + v0 = sse_get_reg(dyn, ninst, x1, gd); + GETEX(d0, 0); + FADDD(v0, v0, d0); + break; default: DEFAULT; diff --git a/src/dynarec/dynarec_arm64_functions.c b/src/dynarec/dynarec_arm64_functions.c index b6bb5e3c..24b1b55b 100755 --- a/src/dynarec/dynarec_arm64_functions.c +++ b/src/dynarec/dynarec_arm64_functions.c @@ -189,21 +189,11 @@ void arm_fprem1(x64emu_t* emu) #define EMM0 8 #define SCRATCH0 24 -// Get a FPU single scratch reg -int fpu_get_scratch_single(dynarec_arm_t* dyn) +// Get a FPU scratch reg +int fpu_get_scratch(dynarec_arm_t* dyn) { return SCRATCH0 + dyn->fpu_scratch++; // return an Sx } -// Get a FPU double scratch reg -int fpu_get_scratch_double(dynarec_arm_t* dyn) -{ - return SCRATCH0 + dyn->fpu_scratch++; // return an Dx (same as Sx) -} -// Get a FPU quad scratch reg -int fpu_get_scratch_quad(dynarec_arm_t* dyn) -{ - return SCRATCH0 + dyn->fpu_scratch++; // return an Qx (same as Dx or Sx) -} // Reset scratch regs counter void fpu_reset_scratch(dynarec_arm_t* dyn) { |