diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2021-04-11 16:23:45 +0200 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2021-04-11 16:23:45 +0200 |
| commit | ece7ea36d90c77373fe966e3d67873e112c56aa9 (patch) | |
| tree | 000c4746474c01ede88dcc8ec389b0911da9a14f /src | |
| parent | 73d27a625fee7f7cb73c8a880b4d91092ec7f66c (diff) | |
| download | box64-ece7ea36d90c77373fe966e3d67873e112c56aa9.tar.gz box64-ece7ea36d90c77373fe966e3d67873e112c56aa9.zip | |
[DYNAREC] Added 66 0F E2 ocpode
Diffstat (limited to 'src')
| -rwxr-xr-x | src/dynarec/arm64_emitter.h | 25 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_660f.c | 14 |
2 files changed, 39 insertions, 0 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index cb00c19e..2c30aa77 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -744,6 +744,15 @@ #define SHL_scalar_imm(U, immh, immb, Rn, Rd) (0b01<<30 | 0b111110<<23 | (immh)<<19 | (immb)<<16 | 0b01010<<11 | 1<<10 | (Rn)<<5 | (Rd)) #define SHL_64(Vd, Vn, shift) EMIT(SHL_scalar_imm(0, 0b1000 | (((shift)>>3)&7), (shift)&7, Vn, Vd)) +#define SHL_vector_vector(Q, U, size, Rm, R, S, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b01110<<24 | (size)<<22 | 1<<21 | (Rm)<<16 | 0b010<<13 | (R)<<12 | (S)<<11 | 1<<10 | (Rn)<<5 | (Rd)) +#define SSHL_8(Vd, Vn, Vm) EMIT(SHL_vector_vector(0, 0, 0b00, Vm, 0, 0, Vn, Vd)) +#define SSHL_16(Vd, Vn, Vm) EMIT(SHL_vector_vector(0, 0, 0b01, Vm, 0, 0, Vn, Vd)) +#define SSHL_32(Vd, Vn, Vm) EMIT(SHL_vector_vector(0, 0, 0b10, Vm, 0, 0, Vn, Vd)) +#define SSHLQ_8(Vd, Vn, Vm) EMIT(SHL_vector_vector(1, 0, 0b00, Vm, 0, 0, Vn, Vd)) +#define SSHLQ_16(Vd, Vn, Vm) EMIT(SHL_vector_vector(1, 0, 0b01, Vm, 0, 0, Vn, Vd)) +#define SSHLQ_32(Vd, Vn, Vm) EMIT(SHL_vector_vector(1, 0, 0b10, Vm, 0, 0, Vn, Vd)) +#define SSHLQ_64(Vd, Vn, Vm) EMIT(SHL_vector_vector(1, 0, 0b11, Vm, 0, 0, Vn, Vd)) + #define SHR_vector(Q, U, immh, immb, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b011110<<23 | (immh)<<19 | (immb)<<16 | 0b00000<<11 | 1<<10 | (Rn)<<5 | (Rd)) #define VSHRQ_8(Vd, Vn, shift) EMIT(SHR_vector(1, 1, 0b0001, (8-(shift))&7, Vn, Vd)) #define VSHRQ_16(Vd, Vn, shift) EMIT(SHR_vector(1, 1, 0b0010 | (((16-(shift))>>3)&1), (16-(shift))&7, Vn, Vd)) @@ -805,6 +814,22 @@ #define VSUB_16(Vd, Vn, Vm) EMIT(ADDSUB_vector(0, 1, 0b01, Vm, Vn, Vd)) #define VSUB_32(Vd, Vn, Vm) EMIT(ADDSUB_vector(0, 1, 0b10, Vm, Vn, Vd)) +#define NEGABS_vector(Q, U, size, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b01110<<24 | (size)<<22 | 0b10000<<17 | 0b01011<<12 | 0b10<<10 | (Rn)<<5 | (Rd)) +#define NEG_8(Vd, Vn) EMIT(NEGABS_vector(0, 1, 0b00, Vn, Vd)) +#define NEG_16(Vd, Vn) EMIT(NEGABS_vector(0, 1, 0b01, Vn, Vd)) +#define NEG_32(Vd, Vn) EMIT(NEGABS_vector(0, 1, 0b10, Vn, Vd)) +#define NEGQ_8(Vd, Vn) EMIT(NEGABS_vector(1, 1, 0b00, Vn, Vd)) +#define NEGQ_16(Vd, Vn) EMIT(NEGABS_vector(1, 1, 0b01, Vn, Vd)) +#define NEGQ_32(Vd, Vn) EMIT(NEGABS_vector(1, 1, 0b10, Vn, Vd)) +#define NEGQ_64(Vd, Vn) EMIT(NEGABS_vector(1, 1, 0b11, Vn, Vd)) +#define ABS_8(Vd, Vn) EMIT(NEGABS_vector(0, 0, 0b00, Vn, Vd)) +#define ABS_16(Vd, Vn) EMIT(NEGABS_vector(0, 0, 0b01, Vn, Vd)) +#define ABS_32(Vd, Vn) EMIT(NEGABS_vector(0, 0, 0b10, Vn, Vd)) +#define ABSQ_8(Vd, Vn) EMIT(NEGABS_vector(1, 0, 0b00, Vn, Vd)) +#define ABSQ_16(Vd, Vn) EMIT(NEGABS_vector(1, 0, 0b01, Vn, Vd)) +#define ABSQ_32(Vd, Vn) EMIT(NEGABS_vector(1, 0, 0b10, Vn, Vd)) +#define ABSQ_64(Vd, Vn) EMIT(NEGABS_vector(1, 0, 0b11, Vn, Vd)) + // FMOV #define FMOV_general(sf, type, mode, opcode, Rn, Rd) ((sf)<<31 | 0b11110<<24 | (type)<<22 | 1<<21 | (mode)<<19 | (opcode)<<16 | (Rn)<<5 | (Rd)) // 32-bit to single-precision diff --git a/src/dynarec/dynarec_arm64_660f.c b/src/dynarec/dynarec_arm64_660f.c index e18d530e..cf1ac89a 100755 --- a/src/dynarec/dynarec_arm64_660f.c +++ b/src/dynarec/dynarec_arm64_660f.c @@ -1134,6 +1134,20 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n URHADDQ_8(v0, v0, v1); break; + case 0xE2: + INST_NAME("PSRAD Gx,Ex"); + nextop = F8; + GETGX(q0); + GETEX(q1, 0); + v0 = fpu_get_scratch(dyn); + VMOVeD(v0, 0, q1, 0); + VMOVeD(v0, 1, q1, 0); + SQXTN_32(v0, v0); // 2*q1 in 32bits now + NEG_32(v0, v0); // because we want SHR and not SHL + VMOVeD(v0, 1, v0, 0); + SSHLQ_32(q0, q0, v0); + break; + case 0xE4: INST_NAME("PMULHUW Gx,Ex"); nextop = F8; |