diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2021-03-29 11:23:36 +0200 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2021-03-29 11:23:36 +0200 |
| commit | a527fdcc28844feed95c48537a45092df994cf8f (patch) | |
| tree | 6f6335586e7deedf054eb3358e1a7bf82eed7143 /src | |
| parent | 5c3a4deed8395b6c9a008421d5f70b834a50e8f6 (diff) | |
| download | box64-a527fdcc28844feed95c48537a45092df994cf8f.tar.gz box64-a527fdcc28844feed95c48537a45092df994cf8f.zip | |
[DYNAREC] Added 0F C2 opcode
Diffstat (limited to 'src')
| -rwxr-xr-x | src/dynarec/arm64_emitter.h | 19 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_0f.c | 32 |
2 files changed, 51 insertions, 0 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index 041dbe5d..6fe29a96 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -787,6 +787,9 @@ #define VMOVHto(Wd, Vn, index) EMIT(UMOV_gen(0, ((index)<<2) | 2, Vn, Wd)) #define VMOVSto(Wd, Vn, index) EMIT(UMOV_gen(0, ((index)<<3) | 4, Vn, Wd)) +#define MVN_vector(Q, Rn, Rd) ((Q)<<30 | 1<<29 | 0b01110<<24 | 0b10000<<17 | 0b00101<<12 | 0b10<<10 | (Rn)<<5 | (Rd)) +#define VMVNQ(Rd, Rn) EMIT(MVN_vector(1, Rn, Rd)) + // VORR #define ORR_vector(Q, Rm, Rn, Rd) ((Q)<<30 | 0b01110<<24 | 0b10<<22 | 1<<21 | (Rm)<<16 | 0b00011<<11 | 1<<10 | (Rn)<<5 | (Rd)) #define VORRQ(Vd, Vn, Vm) EMIT(ORR_vector(1, Vm, Vn, Vd)) @@ -1198,6 +1201,22 @@ #define VCHIQQ_32(Rd, Rn, Rm) EMIT(CMG_vector(1, 1, 0b10, 0, Rm, Rn, Rd)) #define VCHIQQ_64(Rd, Rn, Rm) EMIT(CMG_vector(1, 1, 0b11, 0, Rm, Rn, Rd)) +// Vector Float CMP +// EQual +#define FCMP_vector(Q, U, E, sz, Rm, ac, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b01110<<24 | (E)<<23 | (sz)<<22 | 1<<21 | (Rm)<<16 | 0b1110<<12 | (ac)<<11 | 1<<10 | (Rn)<<5 | (Rd)) +#define FCMEQQD(Rd, Rn, Rm) EMIT(FCMP_vector(1, 0, 0, 1, Rm, 0, Rn, Rd)) +#define FCMEQQS(Rd, Rn, Rm) EMIT(FCMP_vector(1, 0, 0, 0, Rm, 0, Rn, Rd)) +// Greater or Equal +#define FCMGEQD(Rd, Rn, Rm) EMIT(FCMP_vector(1, 1, 0, 1, Rm, 0, Rn, Rd)) +#define FCMGEQS(Rd, Rn, Rm) EMIT(FCMP_vector(1, 1, 0, 0, Rm, 0, Rn, Rd)) +#define FCMGEQD_ABS(Rd, Rn, Rm) EMIT(FCMP_vector(1, 1, 0, 1, Rm, 1, Rn, Rd)) +#define FCMGEQS_ABS(Rd, Rn, Rm) EMIT(FCMP_vector(1, 1, 0, 0, Rm, 1, Rn, Rd)) +// Greater Than +#define FCMGTQD(Rd, Rn, Rm) EMIT(FCMP_vector(1, 1, 1, 1, Rm, 0, Rn, Rd)) +#define FCMGTQS(Rd, Rn, Rm) EMIT(FCMP_vector(1, 1, 1, 0, Rm, 0, Rn, Rd)) +#define FCMGTQD_ABS(Rd, Rn, Rm) EMIT(FCMP_vector(1, 1, 1, 1, Rm, 1, Rn, Rd)) +#define FCMGTQS_ABS(Rd, Rn, Rm) EMIT(FCMP_vector(1, 1, 1, 0, Rm, 1, Rn, Rd)) + // UMULL / SMULL #define MULL_vector(Q, U, size, Rm, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b01110<<24 | (size)<<22 | 1<<21 | (Rm)<<16 | 0b1100<<12 |(Rn)<<5 |(Rd)) #define VUMULL_8(Rd, Rn, Rm) EMIT(MULL_vector(0, 1, 0b00, Rm, Rn, Rd)) diff --git a/src/dynarec/dynarec_arm64_0f.c b/src/dynarec/dynarec_arm64_0f.c index dd6a0110..8f264f5d 100755 --- a/src/dynarec/dynarec_arm64_0f.c +++ b/src/dynarec/dynarec_arm64_0f.c @@ -877,6 +877,38 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } break; + case 0xC2: + INST_NAME("CMPPS Gx, Ex, Ib"); + nextop = F8; + GETGX(v0); + GETEX(v1, 1); + u8 = F8; + switch(u8&7) { + // the inversion of the params in the comparison is there to handle NaN the same way SSE does + case 0: FCMEQQS(v0, v0, v1); break; // Equal + case 1: FCMGTQS(v0, v1, v0); break; // Less than + case 2: FCMGEQS(v0, v1, v0); break; // Less or equal + case 3: FCMEQQS(v0, v0, v0); + if(v0!=v1) { + q0 = fpu_get_scratch(dyn); + FCMEQQS(q0, v1, v1); + VANDQ(v0, v0, q0); + } + VMVNQ(v0, v0); + break; // NaN (NaN is not equal to himself) + case 4: FCMEQQS(v0, v0, v1); VMVNQ(v0, v0); break; // Not Equal (or unordered on ARM, not on X86...) + case 5: FCMGTQS(v0, v1, v0); VMVNQ(v0, v0); break; // Greater or equal or unordered + case 6: FCMGEQS(v0, v1, v0); VMVNQ(v0, v0); break; // Greater or unordered + case 7: FCMEQQS(v0, v0, v0); + if(v0!=v1) { + q0 = fpu_get_scratch(dyn); + FCMEQQS(q0, v1, v1); + VANDQ(v0, v0, q0); + } + break; // not NaN + } + break; + case 0xC6: INST_NAME("SHUFPS Gx, Ex, Ib"); nextop = F8; |