diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2021-03-23 14:07:06 +0100 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2021-03-23 14:07:06 +0100 |
| commit | eb102f8a82a091623053acba5f8233f6a6c3f09c (patch) | |
| tree | 81bd326bb0c6c231dfb6e52a4d06bbf234a08a70 | |
| parent | e1a2439fbfbfd1b9520767ff4036998dd5591a23 (diff) | |
| download | box64-eb102f8a82a091623053acba5f8233f6a6c3f09c.tar.gz box64-eb102f8a82a091623053acba5f8233f6a6c3f09c.zip | |
[DYNAREC] Added 66 0F 73 opcodes
| -rwxr-xr-x | src/dynarec/arm64_emitter.h | 48 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_660f.c | 72 |
2 files changed, 120 insertions, 0 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index 9b347af7..709ca4e6 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -637,6 +637,54 @@ #define VLOGIC_immediate(Q, op, abc, cmade, defgh, Rd) ((Q)<<30 | (op)<<29 | 0b0111100000<<19 | (abc)<<16 | (cmode)<<12 | 1<<10 | (defgh)<<5 | (Rd)) //#define V +#define SHL_vector(Q, immh, immb, Rn, Rd) ((Q)<<30 | 0b011110<<23 | (immh)<<19 | (immb)<<16 | 0b01010<<11 | 1<<10 | (Rn)<<5 | (Rd)) +#define VSHLQ_8(Vd, Vn, shift) EMIT(SHL_vector(1, 0b0001, (shift)&7, Vn, Vd)) +#define VSHLQ_16(Vd, Vn, shift) EMIT(SHL_vector(1, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSHLQ_32(Vd, Vn, shift) EMIT(SHL_vector(1, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) +#define VSHLQ_64(Vd, Vn, shift) EMIT(SHL_vector(1, 0b1000 | ((shift)>>3)&7, (shift)&7, Vn, Vd)) +#define VSHL_8(Vd, Vn, shift) EMIT(SHL_vector(0, 0b0001, (shift)&7, Vn, Vd)) +#define VSHL_16(Vd, Vn, shift) EMIT(SHL_vector(0, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSHL_32(Vd, Vn, shift) EMIT(SHL_vector(0, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) + +#define SHR_vector(Q, U, immh, immb, Rn, Rd) ((Q)<<30 | (U)<<29 | 0b111110<<23 | (immh)<<19 | (immb)<<16 | 0b00000<<11 | 1<<10 | (Rn)<<5 | (Rd)) +#define VSHRQ_8(Vd, Vn, shift) EMIT(SHR_vector(1, 0, 0b0001, (shift)&7, Vn, Vd)) +#define VSHRQ_16(Vd, Vn, shift) EMIT(SHR_vector(1, 0, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSHRQ_32(Vd, Vn, shift) EMIT(SHR_vector(1, 0, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) +#define VSHRQ_64(Vd, Vn, shift) EMIT(SHR_vector(1, 0, 0b1000 | ((shift)>>3)&7, (shift)&7, Vn, Vd)) +#define VSHR_8(Vd, Vn, shift) EMIT(SHR_vector(0, 0, 0b0001, (shift)&7, Vn, Vd)) +#define VSHR_16(Vd, Vn, shift) EMIT(SHR_vector(0, 0, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSHR_32(Vd, Vn, shift) EMIT(SHR_vector(0, 0, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) +#define VSSHRQ_8(Vd, Vn, shift) EMIT(SHR_vector(1, 0, 0b0001, (shift)&7, Vn, Vd)) +#define VSSHRQ_16(Vd, Vn, shift) EMIT(SHR_vector(1, 0, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSSHRQ_32(Vd, Vn, shift) EMIT(SHR_vector(1, 0, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) +#define VSSHRQ_64(Vd, Vn, shift) EMIT(SHR_vector(1, 0, 0b1000 | ((shift)>>3)&7, (shift)&7, Vn, Vd)) +#define VSSHR_8(Vd, Vn, shift) EMIT(SHR_vector(0, 0, 0b0001, (shift)&7, Vn, Vd)) +#define VSSHR_16(Vd, Vn, shift) EMIT(SHR_vector(0, 0, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSSHR_32(Vd, Vn, shift) EMIT(SHR_vector(0, 0, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) + +#define EXT_vector(Q, Rm, imm4, Rn, Rd) ((Q)<<30 | 0b101110<<24 | (Rm)<<16 | (imm4)<<11 | (Rn)<<5 | (Rd)) +#define VEXTQ_8(Rd, Rn, Rm, index) EMIT(EXT_vector(1, Rm, index, Rn, Rd)) + +// Shift Left and Insert (not touching lower part of dest) +#define SLI_vector(Q, immh, immb, Rn, Rd) ((Q)<<30 | 1<<29 | 0b011110<<23 | (immh)<<19 | (immb)<<16 | 0b01010<<1 | 1<<10 | (Rn)<<5 | (Rd)) +#define VSLIQ_8(Vd, Vn, shift) EMIT(VSLI_vector(1, 0b0001, (shift)&7, Vn, Vd)) +#define VSLIQ_16(Vd, Vn, shift) EMIT(VSLI_vector(1, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSLIQ_32(Vd, Vn, shift) EMIT(VSLI_vector(1, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) +#define VSLIQ_64(Vd, Vn, shift) EMIT(VSLI_vector(1, 0b1000 | ((shift)>>3)&7, (shift)&7, Vn, Vd)) +#define VSLI_8(Vd, Vn, shift) EMIT(VSLI_vector(0, 0b0001, (shift)&7, Vn, Vd)) +#define VSLI_16(Vd, Vn, shift) EMIT(VSLI_vector(0, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSLI_32(Vd, Vn, shift) EMIT(VSLI_vector(0, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) + +// Shift Right and Insert (not touching higher part of dest) +#define SRI_vector(Q, immh, immb, Rn, Rd) ((Q)<<30 | 1<<29 | 0b011110<<23 | (immh)<<19 | (immb)<<16 | 0b01000<<1 | 1<<10 | (Rn)<<5 | (Rd)) +#define VSRIQ_8(Vd, Vn, shift) EMIT(VSRI_vector(1, 0b0001, (shift)&7, Vn, Vd)) +#define VSRIQ_16(Vd, Vn, shift) EMIT(VSRI_vector(1, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSRIQ_32(Vd, Vn, shift) EMIT(VSRI_vector(1, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) +#define VSRIQ_64(Vd, Vn, shift) EMIT(VSRI_vector(1, 0b1000 | ((shift)>>3)&7, (shift)&7, Vn, Vd)) +#define VSRI_8(Vd, Vn, shift) EMIT(VSRI_vector(0, 0b0001, (shift)&7, Vn, Vd)) +#define VSRI_16(Vd, Vn, shift) EMIT(VSRI_vector(0, 0b0010 | ((shift)>>3)&1, (shift)&7, Vn, Vd)) +#define VSRI_32(Vd, Vn, shift) EMIT(VSRI_vector(0, 0b0100 | ((shift)>>3)&3, (shift)&7, Vn, Vd)) + // FMOV #define FMOV_general(sf, type, mode, opcode, Rn, Rd) ((sf)<<31 | 0b11110<<24 | (type)<<22 | 1<<21 | (mode)<<19 | (opcode)<<16 | (Rn)<<5 | (Rd)) // 32-bit to single-precision diff --git a/src/dynarec/dynarec_arm64_660f.c b/src/dynarec/dynarec_arm64_660f.c index 4905ee36..565a193d 100755 --- a/src/dynarec/dynarec_arm64_660f.c +++ b/src/dynarec/dynarec_arm64_660f.c @@ -280,6 +280,78 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n } break; + case 0x73: + nextop = F8; + switch((nextop>>3)&7) { + case 2: + INST_NAME("PSRLQ Ex, Ib"); + GETEX(q0, 1); + u8 = F8; + if(u8) { + if (u8>63) { + VEORQ(q0, q0, q0); + } else if(u8) { + VSHRQ_64(q0, q0, u8); + } + if(!MODREG) { + VSTR128_U12(q0, ed, fixedaddress); + } + } + break; + case 3: + INST_NAME("PSRLDQ Ex, Ib"); + GETEX(q0, 1); + u8 = F8; + if(u8) { + if(u8>15) { + VEORQ(q0, q0, q0); + } else { + q1 = fpu_get_scratch(dyn); + VEORQ(q1, q1, q1); + VEXTQ_8(q0, q0, q1, u8); + } + if(!MODREG) { + VSTR128_U12(q0, ed, fixedaddress); + } + } + break; + case 6: + INST_NAME("PSLLQ Ex, Ib"); + GETEX(q0, 1); + u8 = F8; + if(u8) { + if (u8>63) { + VEORQ(q0, q0, q0); + } else { + VSHLQ_64(q0, q0, u8); + } + if(!MODREG) { + VSTR128_U12(q0, ed, fixedaddress); + } + } + break; + case 7: + INST_NAME("PSLLDQ Ex, Ib"); + GETEX(q0, 1); + u8 = F8; + if(u8) { + if(u8>15) { + VEORQ(q0, q0, q0); + } else if(u8>0) { + q1 = fpu_get_scratch(dyn); + VEORQ(q1, q1, q1); + VEXTQ_8(q0, q1, q0, 16-u8); + } + if(!MODREG) { + VSTR128_U12(q0, ed, fixedaddress); + } + } + break; + default: + DEFAULT; + } + break; + case 0x7E: INST_NAME("MOVD Ed,Gx"); nextop = F8; |