diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2021-03-23 17:34:32 +0100 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2021-03-23 17:34:32 +0100 |
| commit | 1d209596a34caf4dc0a0d6ac51eb4031ef2cca93 (patch) | |
| tree | b3ccaca06860c038f8dae5ee4abaf011fc95c330 /src | |
| parent | b1d5e27564f0cce7d38b3ab8167657ede4833ca8 (diff) | |
| download | box64-1d209596a34caf4dc0a0d6ac51eb4031ef2cca93.tar.gz box64-1d209596a34caf4dc0a0d6ac51eb4031ef2cca93.zip | |
[DYNAREC] Added 66 0F D4 opcode
Diffstat (limited to 'src')
| -rwxr-xr-x | src/dynarec/arm64_emitter.h | 17 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_660f.c | 9 |
2 files changed, 26 insertions, 0 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index 647964e7..1baf2218 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -691,6 +691,23 @@ #define VSRI_16(Vd, Vn, shift) EMIT(VSRI_vector(0, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) #define VSRI_32(Vd, Vn, shift) EMIT(VSRI_vector(0, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) +// Integer MATH +#define ADDSUB_vector(Q, U, size, Rm, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b01110<<24 | (size)<<22 | 1<<21 | (Rm)<<16 | 0b10000<<11 | 1<<10 | (Rn)<<5 | (Rd)) +#define VADDQ_8(Vd, Vn, Vm) EMIT(ADDSUB_vector(1, 0, 0b00, Vm, Vn, Vd)) +#define VADDQ_16(Vd, Vn, Vm) EMIT(ADDSUB_vector(1, 0, 0b01, Vm, Vn, Vd)) +#define VADDQ_32(Vd, Vn, Vm) EMIT(ADDSUB_vector(1, 0, 0b10, Vm, Vn, Vd)) +#define VADDQ_64(Vd, Vn, Vm) EMIT(ADDSUB_vector(1, 0, 0b11, Vm, Vn, Vd)) +#define VADD_8(Vd, Vn, Vm) EMIT(ADDSUB_vector(0, 0, 0b00, Vm, Vn, Vd)) +#define VADD_16(Vd, Vn, Vm) EMIT(ADDSUB_vector(0, 0, 0b01, Vm, Vn, Vd)) +#define VADD_32(Vd, Vn, Vm) EMIT(ADDSUB_vector(0, 0, 0b10, Vm, Vn, Vd)) +#define VSUBQ_8(Vd, Vn, Vm) EMIT(ADDSUB_vector(1, 1, 0b00, Vm, Vn, Vd)) +#define VSUBQ_16(Vd, Vn, Vm) EMIT(ADDSUB_vector(1, 1, 0b01, Vm, Vn, Vd)) +#define VSUBQ_32(Vd, Vn, Vm) EMIT(ADDSUB_vector(1, 1, 0b10, Vm, Vn, Vd)) +#define VSUBQ_64(Vd, Vn, Vm) EMIT(ADDSUB_vector(1, 1, 0b11, Vm, Vn, Vd)) +#define VSUB_8(Vd, Vn, Vm) EMIT(ADDSUB_vector(0, 1, 0b00, Vm, Vn, Vd)) +#define VSUB_16(Vd, Vn, Vm) EMIT(ADDSUB_vector(0, 1, 0b01, Vm, Vn, Vd)) +#define VSUB_32(Vd, Vn, Vm) EMIT(ADDSUB_vector(0, 1, 0b10, Vm, Vn, Vd)) + // FMOV #define FMOV_general(sf, type, mode, opcode, Rn, Rd) ((sf)<<31 | 0b11110<<24 | (type)<<22 | 1<<21 | (mode)<<19 | (opcode)<<16 | (Rn)<<5 | (Rd)) // 32-bit to single-precision diff --git a/src/dynarec/dynarec_arm64_660f.c b/src/dynarec/dynarec_arm64_660f.c index 614ccf6e..84440b08 100755 --- a/src/dynarec/dynarec_arm64_660f.c +++ b/src/dynarec/dynarec_arm64_660f.c @@ -581,6 +581,15 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n BFIx(gd, x1, 0, 16); break; + + case 0xD4: + INST_NAME("PADDQ Gx,Ex"); + nextop = F8; + GETGX(v0); + GETEX(q0, 0); + VADDQ_64(v0, v0, q0); + break; + case 0xD6: INST_NAME("MOVQ Ex, Gx"); nextop = F8; |