about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/dynarec/la64/dynarec_la64_avx_0f.c15
-rw-r--r--src/dynarec/la64/dynarec_la64_avx_66_0f.c33
-rw-r--r--src/dynarec/la64/dynarec_la64_avx_66_0f38.c44
-rw-r--r--src/dynarec/la64/dynarec_la64_avx_66_0f3a.c70
-rw-r--r--src/dynarec/la64/dynarec_la64_avx_f2_0f.c14
-rw-r--r--src/dynarec/la64/dynarec_la64_avx_f3_0f.c14
-rw-r--r--src/dynarec/la64/la64_emitter.h98
-rw-r--r--src/dynarec/la64/la64_printer.c16
8 files changed, 288 insertions, 16 deletions
diff --git a/src/dynarec/la64/dynarec_la64_avx_0f.c b/src/dynarec/la64/dynarec_la64_avx_0f.c
index dc97726e..3a9c9822 100644
--- a/src/dynarec/la64/dynarec_la64_avx_0f.c
+++ b/src/dynarec/la64/dynarec_la64_avx_0f.c
@@ -337,6 +337,21 @@ uintptr_t dynarec64_AVX_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, in
                         DEFAULT;
                 }
             break;
+        case 0xC6:
+            INST_NAME("VSHUFPS Gx, Vx, Ex, Ib");
+            nextop = F8;
+            GETGY_empty_VYEY_xy(v0, v1, v2, 1);
+            u8 = F8;
+            d0 = fpu_get_scratch(dyn);
+            d1 = fpu_get_scratch(dyn);
+            if (v1 == v2) {
+                VSHUF4Ixy(W, v0, v1, u8);
+            } else {
+                VSHUF4Ixy(W, d0, v1, u8);
+                VSHUF4Ixy(W, d1, v2, u8 >> 4);
+                VPICKEVxy(D, v0, d1, d0);
+            }
+            break;
         default:
             DEFAULT;
     }
diff --git a/src/dynarec/la64/dynarec_la64_avx_66_0f.c b/src/dynarec/la64/dynarec_la64_avx_66_0f.c
index 520cb82c..13e9a359 100644
--- a/src/dynarec/la64/dynarec_la64_avx_66_0f.c
+++ b/src/dynarec/la64/dynarec_la64_avx_66_0f.c
@@ -394,6 +394,13 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip,
                 }
             }
             break;
+        case 0x70:
+            INST_NAME("VPSHUFD Gx, Ex, Ib");
+            nextop = F8;
+            GETGY_empty_EY_xy(v0, v1, 1);
+            u8 = F8;
+            VSHUF4Ixy(W, v0, v1, u8);
+            break;
         case 0x71:
             nextop = F8;
             switch ((nextop >> 3) & 7) {
@@ -566,6 +573,32 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip,
                 SMWRITE2();
             }
             break;
+        case 0xC6:
+            INST_NAME("VSHUFPD Gx, Vx, Ex, Ib");
+            nextop = F8;
+            GETGY_empty_VYEY_xy(v0, v1, v2, 1);
+            u8 = F8 & 0xf;
+            d0 = fpu_get_scratch(dyn);
+            d1 = fpu_get_scratch(dyn);
+            if (vex.l) {
+                if ((u8 >> 2) == (u8 & 0b11)) {
+                    XVOR_V(d0, v1, v1);
+                    XVSHUF4I_D(d0, v2, 0x8 | (u8 & 1) | ((u8 & 2) << 1));
+                    XVOR_V(v0, d0, d0);
+                } else {
+                    XVOR_V(d0, v1, v1);
+                    XVOR_V(d1, v1, v1);
+                    XVSHUF4I_D(d0, v2, 0x8 | (u8 & 1) | ((u8 & 2) << 1));
+                    XVSHUF4I_D(d1, v2, 0x8 | ((u8 & 4) >> 2) | ((u8 & 8) >> 1));
+                    XVPERMI_Q(d1, d0, XVPERMI_IMM_4_0(3, 0));
+                    XVOR_V(v0, d1, d1);
+                }
+            } else {
+                VOR_V(d0, v1, v1);
+                VSHUF4I_D(d0, v2, 0x8 | (u8 & 1) | ((u8 & 2) << 1));
+                VOR_V(v0, d0, d0);
+            }
+            break;
         case 0xD1:
             INST_NAME("VPSRLW Gx, Vx, Ex");
             nextop = F8;
diff --git a/src/dynarec/la64/dynarec_la64_avx_66_0f38.c b/src/dynarec/la64/dynarec_la64_avx_66_0f38.c
index 2109223f..ceed503a 100644
--- a/src/dynarec/la64/dynarec_la64_avx_66_0f38.c
+++ b/src/dynarec/la64/dynarec_la64_avx_66_0f38.c
@@ -57,6 +57,18 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t i
     rex_t rex = vex.rex;
 
     switch (opcode) {
+        case 0x00:
+            INST_NAME("VPSHUFB Gx, Vx, Ex");
+            nextop = F8;
+            GETGY_empty_VYEY_xy(v0, v1, v2, 0);
+            q0 = fpu_get_scratch(dyn);
+            q1 = fpu_get_scratch(dyn);
+            VLDIxy(q0, 0b0000010001111); // broadcast 0b10001111 as byte
+            VAND_Vxy(q0, q0, v2);
+            VMINIxy(BU, q0, q0, 0x1f);
+            VXOR_Vxy(q1, q1, q1);
+            VSHUF_Bxy(v0, q1, v1, q0);
+            break;
         case 0x01:
             INST_NAME("VPHADDW Gx, Vx, Ex");
             nextop = F8;
@@ -171,6 +183,32 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t i
                 XVPERMI_D(v0, q0, 0b1000);
             }
             break;
+        case 0x0C:
+            INST_NAME("VPERMILPS Gx, Vx, Ex");
+            nextop = F8;
+            GETGY_empty_VYEY_xy(v0, v1, v2, 0);
+            u8 = F8;
+            d0 = fpu_get_scratch(dyn);
+            VANDIxy(d0, v2, 0b11);
+            VSHUFxy(W, d0, v1, v1);
+            VOR_Vxy(v0, d0, d0);
+            break;
+        case 0x0D:
+            INST_NAME("VPERMILPD Gx, Vx, Ex");
+            nextop = F8;
+            GETGY_empty_VYEY_xy(v0, v1, v2, 0);
+            d0 = fpu_get_scratch(dyn);
+            VSRLIxy(D, d0, v2, 0x1);
+            VANDIxy(d0, d0, 0b1);
+            VSHUFxy(D, d0, v2, v1);
+            VOR_Vxy(v0, d0, d0);
+            break;
+        case 0x16:
+            INST_NAME("VPERMPS Gx, Vx, Ex");
+            nextop = F8;
+            GETGY_empty_VYEY_xy(v0, v1, v2, 0);
+            XVPERM_W(v0, v2, v1);
+            break;
         case 0x18:
             INST_NAME("VBROADCASTSS Gx, Ex");
             nextop = F8;
@@ -471,6 +509,12 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t i
                 VSLLWIL_DU_WU(q0, q1, 0);
             }
             break;
+        case 0x36:
+            INST_NAME("VPERMD Gx, Vx, Ex");
+            nextop = F8;
+            GETGY_empty_VYEY_xy(v0, v1, v2, 0);
+            XVPERM_W(v0, v2, v1);
+            break;
         case 0x38:
             INST_NAME("VPMINSB Gx, Vx, Ex");
             nextop = F8;
diff --git a/src/dynarec/la64/dynarec_la64_avx_66_0f3a.c b/src/dynarec/la64/dynarec_la64_avx_66_0f3a.c
index 9e250a6f..4a5baf93 100644
--- a/src/dynarec/la64/dynarec_la64_avx_66_0f3a.c
+++ b/src/dynarec/la64/dynarec_la64_avx_66_0f3a.c
@@ -57,6 +57,19 @@ uintptr_t dynarec64_AVX_66_0F3A(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t i
     rex_t rex = vex.rex;
 
     switch (opcode) {
+        case 0x00:
+        case 0x01:
+            if (opcode) {
+                INST_NAME("VPERMPD Gx, Ex, Imm8");
+            } else {
+                INST_NAME("VPERMQ Gx, Ex, Imm8");
+            }
+            nextop = F8;
+            if (!vex.l) EMIT(0);
+            GETGY_empty_EY_xy(v0, v1, 1);
+            u8 = F8;
+            XVPERMI_D(v0, v1, u8);
+            break;
         case 0x02:
         case 0x0C:
             if (opcode == 0x2) {
@@ -96,6 +109,63 @@ uintptr_t dynarec64_AVX_66_0F3A(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t i
             VEXT2XV_W_B(d0, d0);
             XVBITSEL_V(v0, v1, v2, d0);
             break;
+        case 0x04:
+            INST_NAME("VPERMILPS Gx, Ex, Imm8");
+            nextop = F8;
+            GETGY_empty_EY_xy(v0, v1, 1);
+            u8 = F8;
+            VSHUF4Ixy(W, v0, v1, u8);
+            break;
+        case 0x05:
+            INST_NAME("VPERMILPD Gx, Ex, Imm8");
+            nextop = F8;
+            GETGY_empty_EY_xy(v0, v1, 1);
+            u8 = F8;
+            u8 = 0b10100000 + ((u8 & 0b00001000) << 3) + ((u8 & 0b00000100) << 2) + ((u8 & 0b00000010) << 1) + (u8 & 0b00000001);
+            XVPERMI_D(v0, v1, u8);
+            break;
+        case 0x06:
+        case 0x46:
+            if (opcode == 0x06) {
+                INST_NAME("VPERM2F128 Gx, Vx, Ex, Imm8");
+            } else {
+                INST_NAME("VPERM2I128 Gx, Vx, Ex, Imm8");
+            }
+            nextop = F8;
+            if (!vex.l) EMIT(0);
+            u8 = F8;
+            GETGY_empty_VYEY_xy(v0, v1, v2, 1);
+            if (u8 == 0x88) {
+                XVXOR_V(v0, v0, v0);
+                break;
+            }
+            d0 = fpu_get_scratch(dyn);
+            uint8_t zero_low = (u8 & 0x8) >> 3;
+            uint8_t zero_up = (u8 & 0x80) >> 7;
+            uint8_t vec_lo = (u8 & 0x2) >> 1;
+            uint8_t index_lo = u8 & 0x1;
+            uint8_t vec_hi = (u8 & 0x20) >> 5;
+            uint8_t index_hi = (u8 & 0x10) >> 4;
+            if (!zero_low && !zero_up) {
+                if (v0 == v1) {
+                    XVPERMI_Q(v0, v2, XVPERMI_IMM_4_0((vec_hi ? 0 : 2) | index_hi, (vec_lo ? 0 : 2) | index_lo));
+                } else if (v0 == v2) {
+                    XVPERMI_Q(v0, v1, XVPERMI_IMM_4_0((vec_hi << 1) | index_hi, ((vec_lo) << 1) | index_lo));
+                } else {
+                    XVOR_V(v0, v2, v2);
+                    XVPERMI_Q(v0, v1, XVPERMI_IMM_4_0((vec_hi << 1) | index_hi, ((vec_lo) << 1) | index_lo));
+                }
+                break;
+            }
+            XVXOR_V(d0, d0, d0);
+            if (zero_low) {
+                XVORI_B(v0, vec_hi ? v2 : v1, 0);
+                XVPERMI_Q(v0, d0, XVPERMI_IMM_4_0(2 + index_hi, 0));
+            } else {
+                XVORI_B(v0, vec_lo ? v2 : v1, 0);
+                XVPERMI_Q(v0, d0, XVPERMI_IMM_4_0(0, 2 + index_lo));
+            }
+            break;
         case 0x0D:
             INST_NAME("VBLENDPD Gx, Vx, Ex, Ib");
             nextop = F8;
diff --git a/src/dynarec/la64/dynarec_la64_avx_f2_0f.c b/src/dynarec/la64/dynarec_la64_avx_f2_0f.c
index ad42f029..0b0eb1df 100644
--- a/src/dynarec/la64/dynarec_la64_avx_f2_0f.c
+++ b/src/dynarec/la64/dynarec_la64_avx_f2_0f.c
@@ -116,6 +116,20 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip,
                 VREPLVE_D(q0, q1, 0);
             }
             break;
+        case 0x70:
+            INST_NAME("VPSHUFLW Gx, Ex, Ib");
+            nextop = F8;
+            GETGY_empty_EY_xy(v0, v1, 1);
+            u8 = F8;
+            d0 = fpu_get_scratch(dyn);
+            if (v0 != v1) {
+                VSHUF4Ixy(H, v0, v1, u8);
+                VEXTRINSxy(D, v0, v1, VEXTRINS_IMM_4_0(1, 1));
+            } else {
+                VSHUF4Ixy(H, d0, v1, u8);
+                VEXTRINSxy(D, v0, d0, VEXTRINS_IMM_4_0(0, 0));
+            }
+            break;
         case 0xF0:
             INST_NAME("VLDDQU Gx, Ex");
             nextop = F8;
diff --git a/src/dynarec/la64/dynarec_la64_avx_f3_0f.c b/src/dynarec/la64/dynarec_la64_avx_f3_0f.c
index 2415e6a5..b8332a4b 100644
--- a/src/dynarec/la64/dynarec_la64_avx_f3_0f.c
+++ b/src/dynarec/la64/dynarec_la64_avx_f3_0f.c
@@ -121,6 +121,20 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip,
                 }
             }
             break;
+        case 0x70:
+            INST_NAME("VPSHUFHW Gx, Ex, Ib");
+            nextop = F8;
+            GETGY_empty_EY_xy(v0, v1, 1);
+            u8 = F8;
+            d0 = fpu_get_scratch(dyn);
+            if (v0 != v1) {
+                VSHUF4Ixy(H, v0, v1, u8);
+                VEXTRINSxy(D, v0, v1, VEXTRINS_IMM_4_0(0, 0));
+            } else {
+                VSHUF4Ixy(H, d0, v1, u8);
+                VEXTRINSxy(D, v0, d0, VEXTRINS_IMM_4_0(1, 1));
+            }
+            break;
         case 0x7E:
             INST_NAME("VMOVD Gx, Ex");
             nextop = F8;
diff --git a/src/dynarec/la64/la64_emitter.h b/src/dynarec/la64/la64_emitter.h
index f8825d11..1bf7c79f 100644
--- a/src/dynarec/la64/la64_emitter.h
+++ b/src/dynarec/la64/la64_emitter.h
@@ -153,18 +153,18 @@ f24-f31  fs0-fs7   Static registers                Callee
 #define type_I26(opc, imm26)             ((opc) << 26 | ((imm26) & 0xFFFF) << 10 | ((imm26 >> 16) & 0x3FF))
 
 // Made-up formats not found in the spec.
-#define type_1RI13(opc, imm13, rd)     ((opc) << 18 | ((imm13) & 0x1FFFF) << 5 | (rd))
-#define type_2RI1(opc, imm1, rj, rd)   ((opc) << 11 | ((imm1) & 0x1) << 10 | (rj) << 5 | (rd))
-#define type_2RI2(opc, imm2, rj, rd)   ((opc) << 12 | ((imm2) & 0x3) << 10 | (rj) << 5 | (rd))
-#define type_2RI3(opc, imm3, rj, rd)   ((opc) << 13 | ((imm3) & 0x7) << 10 | (rj) << 5 | (rd))
-#define type_2RI4(opc, imm4, rj, rd)   ((opc) << 14 | ((imm4) & 0xF) << 10 | (rj) << 5 | (rd))
-#define type_2RI5(opc, imm5, rj, rd)   ((opc) << 15 | ((imm5) & 0x1F) << 10 | (rj) << 5 | (rd))
-#define type_2RI6(opc, imm6, rj, rd)   ((opc) << 16 | ((imm6) & 0x3F) << 10 | (rj) << 5 | (rd))
-#define type_2RI7(opc, imm7, rj, rd)   ((opc) << 17 | ((imm7) & 0x7F) << 10 | (rj) << 5 | (rd))
-#define type_2RI9(opc, imm9, rj, rd)   ((opc) << 19 | ((imm9) & 0x1FF) << 10 | (rj) << 5 | (rd))
-#define type_2RI10(opc, imm10, rj, rd) ((opc) << 20 | ((imm10) & 0x3FF) << 10 | (rj) << 5 | (rd))
-#define type_2RI11(opc, imm11, rj, rd) ((opc) << 21 | ((imm11) & 0x7FF) << 10 | (rj) << 5 | (rd))
-#define type_1RI5I5(opc, imm5, imm5_2, rd)   ((opc) << 15 | ((imm5) & 0x1F) << 10 | ((imm5_2) & 0x1F) << 5 | (rd))
+#define type_1RI13(opc, imm13, rd)         ((opc) << 18 | ((imm13) & 0x1FFFF) << 5 | (rd))
+#define type_2RI1(opc, imm1, rj, rd)       ((opc) << 11 | ((imm1) & 0x1) << 10 | (rj) << 5 | (rd))
+#define type_2RI2(opc, imm2, rj, rd)       ((opc) << 12 | ((imm2) & 0x3) << 10 | (rj) << 5 | (rd))
+#define type_2RI3(opc, imm3, rj, rd)       ((opc) << 13 | ((imm3) & 0x7) << 10 | (rj) << 5 | (rd))
+#define type_2RI4(opc, imm4, rj, rd)       ((opc) << 14 | ((imm4) & 0xF) << 10 | (rj) << 5 | (rd))
+#define type_2RI5(opc, imm5, rj, rd)       ((opc) << 15 | ((imm5) & 0x1F) << 10 | (rj) << 5 | (rd))
+#define type_2RI6(opc, imm6, rj, rd)       ((opc) << 16 | ((imm6) & 0x3F) << 10 | (rj) << 5 | (rd))
+#define type_2RI7(opc, imm7, rj, rd)       ((opc) << 17 | ((imm7) & 0x7F) << 10 | (rj) << 5 | (rd))
+#define type_2RI9(opc, imm9, rj, rd)       ((opc) << 19 | ((imm9) & 0x1FF) << 10 | (rj) << 5 | (rd))
+#define type_2RI10(opc, imm10, rj, rd)     ((opc) << 20 | ((imm10) & 0x3FF) << 10 | (rj) << 5 | (rd))
+#define type_2RI11(opc, imm11, rj, rd)     ((opc) << 21 | ((imm11) & 0x7FF) << 10 | (rj) << 5 | (rd))
+#define type_1RI5I5(opc, imm5, imm5_2, rd) ((opc) << 15 | ((imm5) & 0x1F) << 10 | ((imm5_2) & 0x1F) << 5 | (rd))
 
 // tmp = GR[rj][31:0] + GR[rk][31:0]
 // Gr[rd] = SignExtend(tmp[31:0], GRLEN)
@@ -1530,6 +1530,10 @@ LSX instruction starts with V, LASX instruction starts with XV.
 #define VFMSUB_D(vd, vj, vk, va)     EMIT(type_4R(0b000010010110, va, vk, vj, vd))
 #define VFNMADD_D(vd, vj, vk, va)    EMIT(type_4R(0b000010011010, va, vk, vj, vd))
 #define VFNMSUB_D(vd, vj, vk, va)    EMIT(type_4R(0b000010011110, va, vk, vj, vd))
+#define VANDI_B(vd, vj, imm8)        EMIT(type_2RI8(0b01110011110100, imm8, vj, vd))
+#define VORI_B(vd, vj, imm8)         EMIT(type_2RI8(0b01110011110101, imm8, vj, vd))
+#define VXORI_B(vd, vj, imm8)        EMIT(type_2RI8(0b01110011110110, imm8, vj, vd))
+#define VNORI_B(vd, vj, imm8)        EMIT(type_2RI8(0b01110011110111, imm8, vj, vd))
 
 #define XVADD_B(vd, vj, vk)          EMIT(type_3R(0b01110100000010100, vk, vj, vd))
 #define XVADD_H(vd, vj, vk)          EMIT(type_3R(0b01110100000010101, vk, vj, vd))
@@ -2251,8 +2255,8 @@ LSX instruction starts with V, LASX instruction starts with XV.
 #define XVFNMADD_D(xd, xj, xk, xa) EMIT(type_4R(0b000010101010, xa, xk, xj, xd))
 #define XVFNMSUB_D(xd, xj, xk, xa) EMIT(type_4R(0b000010101110, xa, xk, xj, xd))
 
-#define VMEPATMSK_V(vd, mode, uimm5)     EMIT(type_1RI5I5(0b01110010100110111, uimm5, mode, vd))
-#define XVMEPATMSK_V(xd, mode, uimm5)    EMIT(type_1RI5I5(0b01110110100110111, uimm5, mode, xd))
+#define VMEPATMSK_V(vd, mode, uimm5)  EMIT(type_1RI5I5(0b01110010100110111, uimm5, mode, vd))
+#define XVMEPATMSK_V(xd, mode, uimm5) EMIT(type_1RI5I5(0b01110110100110111, uimm5, mode, xd))
 
 ////////////////////////////////////////////////////////////////////////////////
 // (undocumented) LBT extension instructions
@@ -2975,7 +2979,7 @@ LSX instruction starts with V, LASX instruction starts with XV.
         if (vex.l) {                    \
             XVILVL_##width(vd, vj, vk); \
         } else {                        \
-            VILVL_##width(vd, vj, vk); \
+            VILVL_##width(vd, vj, vk);  \
         }                               \
     } while (0)
 
@@ -2984,7 +2988,7 @@ LSX instruction starts with V, LASX instruction starts with XV.
         if (vex.l) {                    \
             XVILVH_##width(vd, vj, vk); \
         } else {                        \
-            VILVH_##width(vd, vj, vk); \
+            VILVH_##width(vd, vj, vk);  \
         }                               \
     } while (0)
 
@@ -3015,4 +3019,66 @@ LSX instruction starts with V, LASX instruction starts with XV.
         }                               \
     } while (0)
 
+#define VSLTIxy(width, vd, vj, imm)      \
+    do {                                 \
+        if (vex.l) {                     \
+            XVSLTI_##width(vd, vj, imm); \
+        } else {                         \
+            VSLTI_##width(vd, vj, imm);  \
+        }                                \
+    } while (0)
+
+#define VBITSEL_Vxy(vd, vj, vk, va)     \
+    do {                                \
+        if (vex.l) {                    \
+            XVBITSEL_V(vd, vj, vk, va); \
+        } else {                        \
+            VBITSEL_V(vd, vj, vk, va);  \
+        }                               \
+    } while (0)
+
+#define VSHUF_Bxy(vd, vj, vk, va)     \
+    do {                              \
+        if (vex.l) {                  \
+            XVSHUF_B(vd, vj, vk, va); \
+        } else {                      \
+            VSHUF_B(vd, vj, vk, va);  \
+        }                             \
+    } while (0)
+
+#define VSHUFxy(width, vd, vj, vk)      \
+    do {                                \
+        if (vex.l) {                    \
+            XVSHUF_##width(vd, vj, vk); \
+        } else {                        \
+            VSHUF_##width(vd, vj, vk);  \
+        }                               \
+    } while (0)
+
+#define VSHUF4Ixy(width, vd, vj, imm)      \
+    do {                                   \
+        if (vex.l) {                       \
+            XVSHUF4I_##width(vd, vj, imm); \
+        } else {                           \
+            VSHUF4I_##width(vd, vj, imm);  \
+        }                                  \
+    } while (0)
+
+#define VEXTRINSxy(width, vd, vj, imm)      \
+    do {                                    \
+        if (vex.l) {                        \
+            XVEXTRINS_##width(vd, vj, imm); \
+        } else {                            \
+            VEXTRINS_##width(vd, vj, imm);  \
+        }                                   \
+    } while (0)
+
+#define VANDIxy(vd, vj, imm)       \
+    do {                           \
+        if (vex.l) {               \
+            XVANDI_B(vd, vj, imm); \
+        } else {                   \
+            VANDI_B(vd, vj, imm);  \
+        }                          \
+    } while (0)
 #endif //__ARM64_EMITTER_H__
diff --git a/src/dynarec/la64/la64_printer.c b/src/dynarec/la64/la64_printer.c
index 2d494a5e..60e3389c 100644
--- a/src/dynarec/la64/la64_printer.c
+++ b/src/dynarec/la64/la64_printer.c
@@ -7444,6 +7444,22 @@ const char* la64_print(uint32_t opcode, uintptr_t addr)
         snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "XVFRSTPI.H", XVt[Rd], XVt[Rj], imm);
         return buff;
     }
+    if (isMask(opcode, "01110011110100iiiiiiiijjjjjddddd", &a)) {
+        snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "VANDI.B", Vt[Rd], Vt[Rj], imm);
+        return buff;
+    }
+    if (isMask(opcode, "01110011110101iiiiiiiijjjjjddddd", &a)) {
+        snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "VORI.B", Vt[Rd], Vt[Rj], imm);
+        return buff;
+    }
+    if (isMask(opcode, "01110011110110iiiiiiiijjjjjddddd", &a)) {
+        snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "VXORI.B", Vt[Rd], Vt[Rj], imm);
+        return buff;
+    }
+    if (isMask(opcode, "01110011110111iiiiiiiijjjjjddddd", &a)) {
+        snprintf(buff, sizeof(buff), "%-15s %s, %s, 0x%lx", "VNORI.B", Vt[Rd], Vt[Rj], imm);
+        return buff;
+    }
     snprintf(buff, sizeof(buff), "%08X ???", __builtin_bswap32(opcode));
     return buff;
 }