diff options
| author | phorcys <phorcys@126.com> | 2025-06-26 17:02:28 +0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-06-26 11:02:28 +0200 |
| commit | 42b98f77cec444deedf0c7b0c922a36800347e1d (patch) | |
| tree | f7e6d839c9aedea7c70c7b7ecd50a09648815b22 /src | |
| parent | bb253f37086d48106d59feb66b57957ba9d4715c (diff) | |
| download | box64-42b98f77cec444deedf0c7b0c922a36800347e1d.tar.gz box64-42b98f77cec444deedf0c7b0c922a36800347e1d.zip | |
[LA64_DYNAREC] Add la64 avx load/store ops part 3. (#2774)
* VEX.0f VMOVMSKPS * VEX.66.0f VMOVMSKPD/VPMOVMSKB/VMASKMOVDQU * VEX.66.0f.38 VMASKMOVPS/VMASKMOVPD/VPMASKMOVD/VPMASKMOVQ
Diffstat (limited to 'src')
| -rw-r--r-- | src/dynarec/la64/dynarec_la64_avx_0f.c | 16 | ||||
| -rw-r--r-- | src/dynarec/la64/dynarec_la64_avx_66_0f.c | 44 | ||||
| -rw-r--r-- | src/dynarec/la64/dynarec_la64_avx_66_0f38.c | 119 | ||||
| -rw-r--r-- | src/dynarec/la64/dynarec_la64_helper.h | 2 | ||||
| -rw-r--r-- | src/dynarec/la64/la64_emitter.h | 49 | ||||
| -rw-r--r-- | src/dynarec/la64/la64_printer.c | 196 |
6 files changed, 416 insertions, 10 deletions
diff --git a/src/dynarec/la64/dynarec_la64_avx_0f.c b/src/dynarec/la64/dynarec_la64_avx_0f.c index e5b3af89..89b8b243 100644 --- a/src/dynarec/la64/dynarec_la64_avx_0f.c +++ b/src/dynarec/la64/dynarec_la64_avx_0f.c @@ -216,6 +216,22 @@ uintptr_t dynarec64_AVX_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, in SMWRITE2(); } break; + case 0x50: + nextop = F8; + INST_NAME("VMOVMSKPS Gd, Ex"); + GETEYxy(v0, 0, 0); + GETGD; + d1 = fpu_get_scratch(dyn); + if (vex.l) { + XVMSKLTZ_W(d1, v0); + VPICKVE2GR_DU(gd, d1, 0); + VPICKVE2GR_DU(x4, d1, 2); + BSTRINS_D(gd, x4, 7, 4); + } else { + VMSKLTZ_W(d1, v0); + VPICKVE2GR_DU(gd, d1, 0); + } + break; case 0x77: if (!vex.l) { INST_NAME("VZEROUPPER"); diff --git a/src/dynarec/la64/dynarec_la64_avx_66_0f.c b/src/dynarec/la64/dynarec_la64_avx_66_0f.c index 852c145b..db6f4c96 100644 --- a/src/dynarec/la64/dynarec_la64_avx_66_0f.c +++ b/src/dynarec/la64/dynarec_la64_avx_66_0f.c @@ -211,6 +211,22 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, SMWRITE2(); } break; + case 0x50: + nextop = F8; + INST_NAME("VMOVMSKPD Gd, Ex"); + GETEYxy(v0, 0, 0); + GETGD; + d1 = fpu_get_scratch(dyn); + if (vex.l) { + XVMSKLTZ_D(d1, v0); + VPICKVE2GR_DU(gd, d1, 0); + VPICKVE2GR_DU(x4, d1, 2); + BSTRINS_D(gd, x4, 3, 2); + } else { + VMSKLTZ_D(d1, v0); + VPICKVE2GR_DU(gd, d1, 0); + } + break; case 0x6E: INST_NAME("VMOVD Gx, Ed"); nextop = F8; @@ -302,6 +318,22 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, SMWRITE2(); } break; + case 0xD7: + nextop = F8; + INST_NAME("VPMOVMSKB Gd, Ex"); + GETEYxy(v0, 0, 0); + GETGD; + d1 = fpu_get_scratch(dyn); + if (vex.l) { + XVMSKLTZ_B(d1, v0); + VPICKVE2GR_DU(gd, d1, 0); + VPICKVE2GR_DU(x4, d1, 2); + BSTRINS_D(gd, x4, 31, 16); + } else { + VMSKLTZ_B(d1, v0); + VPICKVE2GR_DU(gd, d1, 0); + } + break; case 0xE7: INST_NAME("VMOVNTDQ Ex, Gx"); nextop = F8; @@ -318,6 +350,18 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, SMWRITE2(); } break; + case 0xF7: + INST_NAME("VMASKMOVDQU Gx, Ex"); + nextop = F8; + GETEYx(v1, 0, 0); + GETGYx(v0, 1); + q0 = fpu_get_scratch(dyn); + q1 = fpu_get_scratch(dyn); + VSLTI_B(q1, v1, 0); // q1 = byte selection mask + VLD(q0, xRDI, 0); + VBITSEL_V(q0, q0, v0, q1); // sel v0 if mask is 1 + VST(q0, xRDI, 0); + break; default: DEFAULT; } diff --git a/src/dynarec/la64/dynarec_la64_avx_66_0f38.c b/src/dynarec/la64/dynarec_la64_avx_66_0f38.c index 752cd308..1c2d70ad 100644 --- a/src/dynarec/la64/dynarec_la64_avx_66_0f38.c +++ b/src/dynarec/la64/dynarec_la64_avx_66_0f38.c @@ -62,9 +62,9 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t i nextop = F8; GETEYSS(q2, 0, 0); GETGYxy_empty(q0); - if(vex.l){ + if (vex.l) { XVREPLVE0_W(q0, q2); - }else{ + } else { VREPLVE_W(q0, q2, 0); } break; @@ -72,10 +72,10 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t i INST_NAME("VBROADCASTSD Gx, Ex"); nextop = F8; GETEYSD(q2, 0, 0); - GETGYxy_empty(q0); - if(vex.l){ + GETGYxy_empty(q0); + if (vex.l) { XVREPLVE0_D(q0, q2); - }else{ + } else { VREPLVE_D(q0, q2, 0); } break; @@ -85,6 +85,115 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t i GETGY_empty_EY_xy(q0, q2, 0); XVREPLVE0_Q(q0, q2); break; + case 0x2C: + INST_NAME("VMASKMOVPS Gx, Vx, Ex"); + nextop = F8; + GETGY_empty_VYEY_xy(v0, v1, v2, 0); + d0 = fpu_get_scratch(dyn); + d1 = fpu_get_scratch(dyn); + if (vex.l) { + XVXOR_V(d0, d0, d0); + XVSLTI_W(d1, v1, 0); // create all-one mask for negetive element. + XVBITSEL_V(v0, d0, v2, d1); + } else { + VXOR_V(d0, d0, d0); + VSLTI_W(d1, v1, 0); // create all-one mask for negetive element. + VBITSEL_V(v0, d0, v2, d1); + } + break; + case 0x2D: + INST_NAME("VMASKMOVPD Gx, Vx, Ex"); + nextop = F8; + GETGY_empty_VYEY_xy(v0, v1, v2, 0); + d0 = fpu_get_scratch(dyn); + d1 = fpu_get_scratch(dyn); + if (vex.l) { + XVXOR_V(d0, d0, d0); + XVSLTI_D(d1, v1, 0); // create all-one mask for negetive element. + XVBITSEL_V(v0, d0, v2, d1); + } else { + VXOR_V(d0, d0, d0); + VSLTI_D(d1, v1, 0); // create all-one mask for negetive element. + VBITSEL_V(v0, d0, v2, d1); + } + break; + case 0x2E: + INST_NAME("VMASKMOVPS Ex, Gx, Vx"); + nextop = F8; + GETEY_VYGY_xy(v0, v2, v1, 0); + d0 = fpu_get_scratch(dyn); + if (vex.l) { + XVSLTI_W(d0, v1, 0); // create all-one mask for negetive element. + XVBITSEL_V(v0, v0, v2, d0); + PUTEYy(v0); + } else { + VSLTI_W(d0, v1, 0); // create all-one mask for negetive element. + VBITSEL_V(v0, v0, v2, d0); + PUTEYx(v0); + } + break; + case 0x2F: + INST_NAME("VMASKMOVPD Ex, Gx, Vx"); + nextop = F8; + GETEY_VYGY_xy(v0, v2, v1, 0); + d0 = fpu_get_scratch(dyn); + if (vex.l) { + XVSLTI_D(d0, v1, 0); // create all-one mask for negetive element. + XVBITSEL_V(v0, v0, v2, d0); + PUTEYy(v0); + } else { + VSLTI_D(d0, v1, 0); // create all-one mask for negetive element. + VBITSEL_V(v0, v0, v2, d0); + PUTEYx(v0); + } + break; + case 0x8C: + INST_NAME("VPMASKMOVD/Q Gx, Vx, Ex"); + nextop = F8; + GETGY_empty_VYEY_xy(v0, v1, v2, 0); + d0 = fpu_get_scratch(dyn); + d1 = fpu_get_scratch(dyn); + if (vex.l) { + XVXOR_V(d0, d0, d0); + if (rex.w) { + XVSLTI_D(d1, v1, 0); + } else { + XVSLTI_W(d1, v1, 0); + } + XVBITSEL_V(v0, d0, v2, d1); + } else { + VXOR_V(d0, d0, d0); + if (rex.w) { + VSLTI_D(d1, v1, 0); + } else { + VSLTI_W(d1, v1, 0); + } + VBITSEL_V(v0, d0, v2, d1); + } + break; + case 0x8E: + INST_NAME("VPMASKMOVD/Q Ex, Vx, Gx"); + nextop = F8; + GETEY_VYGY_xy(v0, v2, v1, 0); + d0 = fpu_get_scratch(dyn); + if (vex.l) { + if (rex.w) { + XVSLTI_D(d0, v1, 0); + } else { + XVSLTI_W(d0, v1, 0); + } + XVBITSEL_V(v0, v0, v2, d0); + PUTEYy(v0); + } else { + if (rex.w) { + VSLTI_D(d0, v1, 0); + } else { + VSLTI_W(d0, v1, 0); + } + VBITSEL_V(v0, v0, v2, d0); + PUTEYx(v0); + } + break; default: DEFAULT; } diff --git a/src/dynarec/la64/dynarec_la64_helper.h b/src/dynarec/la64/dynarec_la64_helper.h index 7d833547..223ac151 100644 --- a/src/dynarec/la64/dynarec_la64_helper.h +++ b/src/dynarec/la64/dynarec_la64_helper.h @@ -630,7 +630,7 @@ GETGYxy(gx, 1); // Get writable EY, and non-written VY and GY -#define GETEY_VYGY_xy(gx, vx, ex, D) \ +#define GETEY_VYGY_xy(ex, vx, gx, D) \ GETVYxy(vx, 0); \ GETGYxy(gx, 0); \ GETEYxy(ex, 1, D); diff --git a/src/dynarec/la64/la64_emitter.h b/src/dynarec/la64/la64_emitter.h index 28dc5d63..349b5a48 100644 --- a/src/dynarec/la64/la64_emitter.h +++ b/src/dynarec/la64/la64_emitter.h @@ -2062,10 +2062,10 @@ LSX instruction starts with V, LASX instruction starts with XV. #define VNEG_W(vd, vj) EMIT(type_2R(0b0111011010011100001110, vj, vd)) #define VNEG_D(vd, vj) EMIT(type_2R(0b0111011010011100001111, vj, vd)) -#define XVLD(vd, rj, imm12) EMIT(type_2RI12(0b0010110010, imm12, rj, vd)) -#define XVST(vd, rj, imm12) EMIT(type_2RI12(0b0010110011, imm12, rj, vd)) -#define XVLDX(vd, vj, vk) EMIT(type_3R(0b00111000010010000, vk, vj, vd)) -#define XVSTX(vd, vj, vk) EMIT(type_3R(0b00111000010011000, vk, vj, vd)) +#define XVLD(vd, rj, imm12) EMIT(type_2RI12(0b0010110010, imm12, rj, vd)) +#define XVST(vd, rj, imm12) EMIT(type_2RI12(0b0010110011, imm12, rj, vd)) +#define XVLDX(vd, vj, vk) EMIT(type_3R(0b00111000010010000, vk, vj, vd)) +#define XVSTX(vd, vj, vk) EMIT(type_3R(0b00111000010011000, vk, vj, vd)) #define XVHSELI_D(vd, vj, imm5) EMIT(type_2RI5(0b01110110100111111, imm5, vj, vd)) #define XVROTRI_B(vd, vj, imm3) EMIT(type_2RI3(0b0111011010100000001, imm3, vj, vd)) @@ -2200,6 +2200,47 @@ LSX instruction starts with V, LASX instruction starts with XV. #define XVORI_B(vd, vj, imm8) EMIT(type_2RI8(0b01110111110101, imm8, vj, vd)) #define XVXORI_B(vd, vj, imm8) EMIT(type_2RI8(0b01110111110110, imm8, vj, vd)) #define XVNORI_B(vd, vj, imm8) EMIT(type_2RI8(0b01110111110111, imm8, vj, vd)) +#define XVBITSEL_V(xd, xj, xk, xa) EMIT(type_4R(0b000011010010, xa, xk, xj, xd)) +#define XVSEQI_B(xd, xj, imm5) EMIT(type_2RI5(0b01110110100000000, imm5, xj, xd)) +#define XVSEQI_H(xd, xj, imm5) EMIT(type_2RI5(0b01110110100000001, imm5, xj, xd)) +#define XVSEQI_W(xd, xj, imm5) EMIT(type_2RI5(0b01110110100000010, imm5, xj, xd)) +#define XVSEQI_D(xd, xj, imm5) EMIT(type_2RI5(0b01110110100000011, imm5, xj, xd)) +#define XVSLEI_B(xd, xj, imm5) EMIT(type_2RI5(0b01110110100000100, imm5, xj, xd)) +#define XVSLEI_H(xd, xj, imm5) EMIT(type_2RI5(0b01110110100000101, imm5, xj, xd)) +#define XVSLEI_W(xd, xj, imm5) EMIT(type_2RI5(0b01110110100000110, imm5, xj, xd)) +#define XVSLEI_D(xd, xj, imm5) EMIT(type_2RI5(0b01110110100000111, imm5, xj, xd)) +#define XVSLEI_BU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100001000, imm5, xj, xd)) +#define XVSLEI_HU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100001001, imm5, xj, xd)) +#define XVSLEI_WU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100001010, imm5, xj, xd)) +#define XVSLEI_DU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100001011, imm5, xj, xd)) +#define XVSLTI_B(xd, xj, imm5) EMIT(type_2RI5(0b01110110100001100, imm5, xj, xd)) +#define XVSLTI_H(xd, xj, imm5) EMIT(type_2RI5(0b01110110100001101, imm5, xj, xd)) +#define XVSLTI_W(xd, xj, imm5) EMIT(type_2RI5(0b01110110100001110, imm5, xj, xd)) +#define XVSLTI_D(xd, xj, imm5) EMIT(type_2RI5(0b01110110100001111, imm5, xj, xd)) +#define XVSLTI_BU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100010000, imm5, xj, xd)) +#define XVSLTI_HU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100010001, imm5, xj, xd)) +#define XVSLTI_WU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100010010, imm5, xj, xd)) +#define XVSLTI_DU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100010011, imm5, xj, xd)) +#define XVBSLL_V(xd, xj, imm5) EMIT(type_2RI5(0b01110110100011100, imm5, xj, xd)) +#define XVBSRL_V(xd, xj, imm5) EMIT(type_2RI5(0b01110110100011101, imm5, xj, xd)) +#define XVMAXI_B(xd, xj, imm5) EMIT(type_2RI5(0b01110110100100000, imm5, xj, xd)) +#define XVMAXI_H(xd, xj, imm5) EMIT(type_2RI5(0b01110110100100001, imm5, xj, xd)) +#define XVMAXI_W(xd, xj, imm5) EMIT(type_2RI5(0b01110110100100010, imm5, xj, xd)) +#define XVMAXI_D(xd, xj, imm5) EMIT(type_2RI5(0b01110110100100011, imm5, xj, xd)) +#define XVMINI_B(xd, xj, imm5) EMIT(type_2RI5(0b01110110100100100, imm5, xj, xd)) +#define XVMINI_H(xd, xj, imm5) EMIT(type_2RI5(0b01110110100100101, imm5, xj, xd)) +#define XVMINI_W(xd, xj, imm5) EMIT(type_2RI5(0b01110110100100110, imm5, xj, xd)) +#define XVMINI_D(xd, xj, imm5) EMIT(type_2RI5(0b01110110100100111, imm5, xj, xd)) +#define XVMAXI_BU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100101000, imm5, xj, xd)) +#define XVMAXI_HU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100101001, imm5, xj, xd)) +#define XVMAXI_WU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100101010, imm5, xj, xd)) +#define XVMAXI_DU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100101011, imm5, xj, xd)) +#define XVMINI_BU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100101100, imm5, xj, xd)) +#define XVMINI_HU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100101101, imm5, xj, xd)) +#define XVMINI_WU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100101110, imm5, xj, xd)) +#define XVMINI_DU(xd, xj, imm5) EMIT(type_2RI5(0b01110110100101111, imm5, xj, xd)) +#define XVFRSTPI_B(xd, xj, imm5) EMIT(type_2RI5(0b01110110100110100, imm5, xj, xd)) +#define XVFRSTPI_H(xd, xj, imm5) EMIT(type_2RI5(0b01110110100110101, imm5, xj, xd)) #define XVFMADD_S(xd, xj, xk, xa) EMIT(type_4R(0b000010100001, xa, xk, xj, xd)) #define XVFMSUB_S(xd, xj, xk, xa) EMIT(type_4R(0b000010100101, xa, xk, xj, xd)) diff --git a/src/dynarec/la64/la64_printer.c b/src/dynarec/la64/la64_printer.c index 0c80f84a..2d494a5e 100644 --- a/src/dynarec/la64/la64_printer.c +++ b/src/dynarec/la64/la64_printer.c @@ -7248,6 +7248,202 @@ const char* la64_print(uint32_t opcode, uintptr_t addr) snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVPERMI.Q", XVt[Rd], XVt[Rj], imm); return buff; } + if (isMask(opcode, "000011010010aaaaakkkkkjjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, %s, %s", "XVBITSEL.V", XVt[Rd], XVt[Rj], XVt[Rk], XVt[Ra]); + return buff; + } + if (isMask(opcode, "01110110100000000iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSEQI.B", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100000001iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSEQI.H", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100000010iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSEQI.W", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100000011iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSEQI.D", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100000100iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLEI.B", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100000101iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLEI.H", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100000110iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLEI.W", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100000111iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLEI.D", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100001000iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLEI.BU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100001001iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLEI.HU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100001010iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLEI.WU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100001011iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLEI.DU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100001100iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLTI.B", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100001101iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLTI.H", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100001110iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLTI.W", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100001111iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLTI.D", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100010000iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLTI.BU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100010001iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLTI.HU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100010010iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLTI.WU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100010011iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSLTI.DU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100010100iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVADDI.BU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100010101iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVADDI.HU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100010110iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVADDI.WU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100010111iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVADDI.DU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100011000iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSUBI.BU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100011001iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSUBI.HU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100011010iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSUBI.WU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100011011iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVSUBI.DU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100011100iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVBSLL.V", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100011101iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVBSRL.V", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100100000iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMAXI.B", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100100001iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMAXI.H", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100100010iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMAXI.W", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100100011iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMAXI.D", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100100100iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMINI.B", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100100101iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMINI.H", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100100110iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMINI.W", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100100111iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMINI.D", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100101000iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMAXI.BU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100101001iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMAXI.HU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100101010iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMAXI.WU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100101011iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMAXI.DU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100101100iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMINI.BU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100101101iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMINI.HU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100101110iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMINI.WU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100101111iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVMINI.DU", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100110100iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVFRSTPI.B", XVt[Rd], XVt[Rj], imm); + return buff; + } + if (isMask(opcode, "01110110100110101iiiiijjjjjddddd", &a)) { + snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVFRSTPI.H", XVt[Rd], XVt[Rj], imm); + return buff; + } snprintf(buff, sizeof(buff), "%08X ???", __builtin_bswap32(opcode)); return buff; } |