From a2aa5fc650f7e2d4e9400f76832b80689366f4af Mon Sep 17 00:00:00 2001 From: ptitSeb Date: Mon, 5 Jul 2021 23:01:09 +0200 Subject: Added 66 0F 38 08/09/0A opcodes ([DYNAREC] too) (for #32 / Zoom) --- src/dynarec/arm64_emitter.h | 19 +++++++++++++++++ src/dynarec/dynarec_arm64_660f.c | 45 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) (limited to 'src/dynarec') diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index 8b2248ce..1ceee266 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -1424,6 +1424,25 @@ #define VCHIQQ_32(Rd, Rn, Rm) EMIT(CMG_vector(1, 1, 0b10, 0, Rm, Rn, Rd)) #define VCHIQQ_64(Rd, Rn, Rm) EMIT(CMG_vector(1, 1, 0b11, 0, Rm, Rn, Rd)) +// Less Than 0 +#define CMLT_0_vector(Q, size, Rn, Rd) ((Q)<<30 | 0b01110<<24 | (size)<<22 | 0b10000<<17 | 0b01010<<12 | 0b10<<10 | (Rn)<<5 | (Rd)) +#define CMLT_0_8(Rd, Rn) EMIT(CMLT_0_vector(0, 0b00, Rn, Rd)) +#define CMLT_0_16(Rd, Rn) EMIT(CMLT_0_vector(0, 0b01, Rn, Rd)) +#define CMLT_0_32(Rd, Rn) EMIT(CMLT_0_vector(0, 0b10, Rn, Rd)) +#define CMLTQ_0_8(Rd, Rn) EMIT(CMLT_0_vector(1, 0b00, Rn, Rd)) +#define CMLTQ_0_16(Rd, Rn) EMIT(CMLT_0_vector(1, 0b01, Rn, Rd)) +#define CMLTQ_0_32(Rd, Rn) EMIT(CMLT_0_vector(1, 0b10, Rn, Rd)) +#define CMLTQ_0_64(Rd, Rn) EMIT(CMLT_0_vector(1, 0b11, Rn, Rd)) +// Equal 0 +#define CMEQ_0_vector(Q, size, Rn, Rd) ((Q)<<30 | 0b01110<<24 | (size)<<22 | 0b10000<<17 | 0b0100<<13 | 1<<12 | 0b10<<10 | (Rn)<<5 | (Rd)) +#define CMEQ_0_8(Rd, Rn) EMIT(CMEQ_0_vector(0, 0b00, Rn, Rd)) +#define CMEQ_0_16(Rd, Rn) EMIT(CMEQ_0_vector(0, 0b01, Rn, Rd)) +#define CMEQ_0_32(Rd, Rn) EMIT(CMEQ_0_vector(0, 0b10, Rn, Rd)) +#define CMEQQ_0_8(Rd, Rn) EMIT(CMEQ_0_vector(1, 0b00, Rn, Rd)) +#define CMEQQ_0_16(Rd, Rn) EMIT(CMEQ_0_vector(1, 0b01, Rn, Rd)) +#define CMEQQ_0_32(Rd, Rn) EMIT(CMEQ_0_vector(1, 0b10, Rn, Rd)) +#define CMEQQ_0_64(Rd, Rn) EMIT(CMEQ_0_vector(1, 0b11, Rn, Rd)) + // Vector Float CMP // EQual #define FCMP_vector(Q, U, E, sz, Rm, ac, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b01110<<24 | (E)<<23 | (sz)<<22 | 1<<21 | (Rm)<<16 | 0b1110<<12 | (ac)<<11 | 1<<10 | (Rn)<<5 | (Rd)) diff --git a/src/dynarec/dynarec_arm64_660f.c b/src/dynarec/dynarec_arm64_660f.c index 19eba626..490c206d 100755 --- a/src/dynarec/dynarec_arm64_660f.c +++ b/src/dynarec/dynarec_arm64_660f.c @@ -256,6 +256,51 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n SQXTN2_16(q0, v0); break; + case 0x08: + INST_NAME("PSIGNB Gx, Ex"); + nextop = F8; + GETGX(q0); + GETEX(q1, 0); + v1 = fpu_get_scratch(dyn); + v0 = fpu_get_scratch(dyn); + NEGQ_8(v0, q0); // get NEG + CMLTQ_0_8(v1, q1); // calculate mask + VBICQ(q0, q0, v1); // apply not mask on dest + VANDQ(v0, v0, v1); // apply mask on src + VORRQ(q0, q0, v0); // merge + CMEQQ_0_8(v1, q1); // handle case where Ex is 0 + VBICQ(q0, q0, v1); + break; + case 0x09: + INST_NAME("PSIGNW Gx, Ex"); + nextop = F8; + GETGX(q0); + GETEX(q1, 0); + v1 = fpu_get_scratch(dyn); + v0 = fpu_get_scratch(dyn); + NEGQ_16(v0, q0); // get NEG + CMLTQ_0_16(v1, q1); // calculate mask + VBICQ(q0, q0, v1); // apply not mask on dest + VANDQ(v0, v0, v1); // apply mask on src + VORRQ(q0, q0, v0); // merge + CMEQQ_0_16(v1, q1); // handle case where Ex is 0 + VBICQ(q0, q0, v1); + break; + case 0x0A: + INST_NAME("PSIGND Gx, Ex"); + nextop = F8; + GETGX(q0); + GETEX(q1, 0); + v1 = fpu_get_scratch(dyn); + v0 = fpu_get_scratch(dyn); + NEGQ_32(v0, q0); // get NEG + CMLTQ_0_32(v1, q1); // calculate mask + VBICQ(q0, q0, v1); // apply not mask on dest + VANDQ(v0, v0, v1); // apply mask on src + VORRQ(q0, q0, v0); // merge + CMEQQ_0_32(v1, q1); // handle case where Ex is 0 + VBICQ(q0, q0, v1); + break; case 0x0B: INST_NAME("PMULHRSW Gx,Ex"); nextop = F8; -- cgit 1.4.1