summary refs log tree commit diff stats
path: root/target/ppc/translate/vsx-impl.c.inc
diff options
context:
space:
mode:
Diffstat (limited to 'target/ppc/translate/vsx-impl.c.inc')
-rw-r--r--target/ppc/translate/vsx-impl.c.inc702
1 files changed, 405 insertions, 297 deletions
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index 57a7f73bba..c0e38060b4 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -1,23 +1,13 @@
 /***                           VSX extension                               ***/
 
-static inline void get_cpu_vsrh(TCGv_i64 dst, int n)
+static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high)
 {
-    tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, true));
+    tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, high));
 }
 
-static inline void get_cpu_vsrl(TCGv_i64 dst, int n)
+static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
 {
-    tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, false));
-}
-
-static inline void set_cpu_vsrh(int n, TCGv_i64 src)
-{
-    tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, true));
-}
-
-static inline void set_cpu_vsrl(int n, TCGv_i64 src)
-{
-    tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, false));
+    tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, high));
 }
 
 static inline TCGv_ptr gen_vsr_ptr(int reg)
@@ -41,7 +31,7 @@ static void gen_##name(DisasContext *ctx)                     \
     EA = tcg_temp_new();                                      \
     gen_addr_reg_index(ctx, EA);                              \
     gen_qemu_##operation(ctx, t0, EA);                        \
-    set_cpu_vsrh(xT(ctx->opcode), t0);                        \
+    set_cpu_vsr(xT(ctx->opcode), t0, true);                   \
     /* NOTE: cpu_vsrl is undefined */                         \
     tcg_temp_free(EA);                                        \
     tcg_temp_free_i64(t0);                                    \
@@ -67,10 +57,10 @@ static void gen_lxvd2x(DisasContext *ctx)
     EA = tcg_temp_new();
     gen_addr_reg_index(ctx, EA);
     gen_qemu_ld64_i64(ctx, t0, EA);
-    set_cpu_vsrh(xT(ctx->opcode), t0);
+    set_cpu_vsr(xT(ctx->opcode), t0, true);
     tcg_gen_addi_tl(EA, EA, 8);
     gen_qemu_ld64_i64(ctx, t0, EA);
-    set_cpu_vsrl(xT(ctx->opcode), t0);
+    set_cpu_vsr(xT(ctx->opcode), t0, false);
     tcg_temp_free(EA);
     tcg_temp_free_i64(t0);
 }
@@ -109,8 +99,8 @@ static void gen_lxvw4x(DisasContext *ctx)
         tcg_gen_addi_tl(EA, EA, 8);
         tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
     }
-    set_cpu_vsrh(xT(ctx->opcode), xth);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
     tcg_temp_free(EA);
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -233,8 +223,8 @@ static void gen_lxvh8x(DisasContext *ctx)
     if (ctx->le_mode) {
         gen_bswap16x8(xth, xtl, xth, xtl);
     }
-    set_cpu_vsrh(xT(ctx->opcode), xth);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
     tcg_temp_free(EA);
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -258,121 +248,13 @@ static void gen_lxvb16x(DisasContext *ctx)
     tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
     tcg_gen_addi_tl(EA, EA, 8);
     tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
     tcg_temp_free(EA);
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
 }
 
-#define VSX_VECTOR_LOAD(name, op, indexed)                  \
-static void gen_##name(DisasContext *ctx)                   \
-{                                                           \
-    int xt;                                                 \
-    TCGv EA;                                                \
-    TCGv_i64 xth;                                           \
-    TCGv_i64 xtl;                                           \
-                                                            \
-    if (indexed) {                                          \
-        xt = xT(ctx->opcode);                               \
-    } else {                                                \
-        xt = DQxT(ctx->opcode);                             \
-    }                                                       \
-                                                            \
-    if (xt < 32) {                                          \
-        if (unlikely(!ctx->vsx_enabled)) {                  \
-            gen_exception(ctx, POWERPC_EXCP_VSXU);          \
-            return;                                         \
-        }                                                   \
-    } else {                                                \
-        if (unlikely(!ctx->altivec_enabled)) {              \
-            gen_exception(ctx, POWERPC_EXCP_VPU);           \
-            return;                                         \
-        }                                                   \
-    }                                                       \
-    xth = tcg_temp_new_i64();                               \
-    xtl = tcg_temp_new_i64();                               \
-    gen_set_access_type(ctx, ACCESS_INT);                   \
-    EA = tcg_temp_new();                                    \
-    if (indexed) {                                          \
-        gen_addr_reg_index(ctx, EA);                        \
-    } else {                                                \
-        gen_addr_imm_index(ctx, EA, 0x0F);                  \
-    }                                                       \
-    if (ctx->le_mode) {                                     \
-        tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ);   \
-        set_cpu_vsrl(xt, xtl);                              \
-        tcg_gen_addi_tl(EA, EA, 8);                         \
-        tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ);   \
-        set_cpu_vsrh(xt, xth);                              \
-    } else {                                                \
-        tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ);   \
-        set_cpu_vsrh(xt, xth);                              \
-        tcg_gen_addi_tl(EA, EA, 8);                         \
-        tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ);   \
-        set_cpu_vsrl(xt, xtl);                              \
-    }                                                       \
-    tcg_temp_free(EA);                                      \
-    tcg_temp_free_i64(xth);                                 \
-    tcg_temp_free_i64(xtl);                                 \
-}
-
-VSX_VECTOR_LOAD(lxv, ld_i64, 0)
-VSX_VECTOR_LOAD(lxvx, ld_i64, 1)
-
-#define VSX_VECTOR_STORE(name, op, indexed)                 \
-static void gen_##name(DisasContext *ctx)                   \
-{                                                           \
-    int xt;                                                 \
-    TCGv EA;                                                \
-    TCGv_i64 xth;                                           \
-    TCGv_i64 xtl;                                           \
-                                                            \
-    if (indexed) {                                          \
-        xt = xT(ctx->opcode);                               \
-    } else {                                                \
-        xt = DQxT(ctx->opcode);                             \
-    }                                                       \
-                                                            \
-    if (xt < 32) {                                          \
-        if (unlikely(!ctx->vsx_enabled)) {                  \
-            gen_exception(ctx, POWERPC_EXCP_VSXU);          \
-            return;                                         \
-        }                                                   \
-    } else {                                                \
-        if (unlikely(!ctx->altivec_enabled)) {              \
-            gen_exception(ctx, POWERPC_EXCP_VPU);           \
-            return;                                         \
-        }                                                   \
-    }                                                       \
-    xth = tcg_temp_new_i64();                               \
-    xtl = tcg_temp_new_i64();                               \
-    get_cpu_vsrh(xth, xt);                                  \
-    get_cpu_vsrl(xtl, xt);                                  \
-    gen_set_access_type(ctx, ACCESS_INT);                   \
-    EA = tcg_temp_new();                                    \
-    if (indexed) {                                          \
-        gen_addr_reg_index(ctx, EA);                        \
-    } else {                                                \
-        gen_addr_imm_index(ctx, EA, 0x0F);                  \
-    }                                                       \
-    if (ctx->le_mode) {                                     \
-        tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ);   \
-        tcg_gen_addi_tl(EA, EA, 8);                         \
-        tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ);   \
-    } else {                                                \
-        tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ);   \
-        tcg_gen_addi_tl(EA, EA, 8);                         \
-        tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ);   \
-    }                                                       \
-    tcg_temp_free(EA);                                      \
-    tcg_temp_free_i64(xth);                                 \
-    tcg_temp_free_i64(xtl);                                 \
-}
-
-VSX_VECTOR_STORE(stxv, st_i64, 0)
-VSX_VECTOR_STORE(stxvx, st_i64, 1)
-
 #ifdef TARGET_PPC64
 #define VSX_VECTOR_LOAD_STORE_LENGTH(name)                         \
 static void gen_##name(DisasContext *ctx)                          \
@@ -421,7 +303,7 @@ static void gen_##name(DisasContext *ctx)                         \
     EA = tcg_temp_new();                                          \
     gen_addr_imm_index(ctx, EA, 0x03);                            \
     gen_qemu_##operation(ctx, xth, EA);                           \
-    set_cpu_vsrh(rD(ctx->opcode) + 32, xth);                      \
+    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);                 \
     /* NOTE: cpu_vsrl is undefined */                             \
     tcg_temp_free(EA);                                            \
     tcg_temp_free_i64(xth);                                       \
@@ -443,7 +325,7 @@ static void gen_##name(DisasContext *ctx)                     \
     gen_set_access_type(ctx, ACCESS_INT);                     \
     EA = tcg_temp_new();                                      \
     gen_addr_reg_index(ctx, EA);                              \
-    get_cpu_vsrh(t0, xS(ctx->opcode));                        \
+    get_cpu_vsr(t0, xS(ctx->opcode), true);                   \
     gen_qemu_##operation(ctx, t0, EA);                        \
     tcg_temp_free(EA);                                        \
     tcg_temp_free_i64(t0);                                    \
@@ -468,10 +350,10 @@ static void gen_stxvd2x(DisasContext *ctx)
     gen_set_access_type(ctx, ACCESS_INT);
     EA = tcg_temp_new();
     gen_addr_reg_index(ctx, EA);
-    get_cpu_vsrh(t0, xS(ctx->opcode));
+    get_cpu_vsr(t0, xS(ctx->opcode), true);
     gen_qemu_st64_i64(ctx, t0, EA);
     tcg_gen_addi_tl(EA, EA, 8);
-    get_cpu_vsrl(t0, xS(ctx->opcode));
+    get_cpu_vsr(t0, xS(ctx->opcode), false);
     gen_qemu_st64_i64(ctx, t0, EA);
     tcg_temp_free(EA);
     tcg_temp_free_i64(t0);
@@ -489,8 +371,8 @@ static void gen_stxvw4x(DisasContext *ctx)
     }
     xsh = tcg_temp_new_i64();
     xsl = tcg_temp_new_i64();
-    get_cpu_vsrh(xsh, xS(ctx->opcode));
-    get_cpu_vsrl(xsl, xS(ctx->opcode));
+    get_cpu_vsr(xsh, xS(ctx->opcode), true);
+    get_cpu_vsr(xsl, xS(ctx->opcode), false);
     gen_set_access_type(ctx, ACCESS_INT);
     EA = tcg_temp_new();
     gen_addr_reg_index(ctx, EA);
@@ -529,8 +411,8 @@ static void gen_stxvh8x(DisasContext *ctx)
     }
     xsh = tcg_temp_new_i64();
     xsl = tcg_temp_new_i64();
-    get_cpu_vsrh(xsh, xS(ctx->opcode));
-    get_cpu_vsrl(xsl, xS(ctx->opcode));
+    get_cpu_vsr(xsh, xS(ctx->opcode), true);
+    get_cpu_vsr(xsl, xS(ctx->opcode), false);
     gen_set_access_type(ctx, ACCESS_INT);
     EA = tcg_temp_new();
     gen_addr_reg_index(ctx, EA);
@@ -566,8 +448,8 @@ static void gen_stxvb16x(DisasContext *ctx)
     }
     xsh = tcg_temp_new_i64();
     xsl = tcg_temp_new_i64();
-    get_cpu_vsrh(xsh, xS(ctx->opcode));
-    get_cpu_vsrl(xsl, xS(ctx->opcode));
+    get_cpu_vsr(xsh, xS(ctx->opcode), true);
+    get_cpu_vsr(xsl, xS(ctx->opcode), false);
     gen_set_access_type(ctx, ACCESS_INT);
     EA = tcg_temp_new();
     gen_addr_reg_index(ctx, EA);
@@ -590,7 +472,7 @@ static void gen_##name(DisasContext *ctx)                         \
         return;                                                   \
     }                                                             \
     xth = tcg_temp_new_i64();                                     \
-    get_cpu_vsrh(xth, rD(ctx->opcode) + 32);                      \
+    get_cpu_vsr(xth, rD(ctx->opcode) + 32, true);                 \
     gen_set_access_type(ctx, ACCESS_INT);                         \
     EA = tcg_temp_new();                                          \
     gen_addr_imm_index(ctx, EA, 0x03);                            \
@@ -618,7 +500,7 @@ static void gen_mfvsrwz(DisasContext *ctx)
     }
     TCGv_i64 tmp = tcg_temp_new_i64();
     TCGv_i64 xsh = tcg_temp_new_i64();
-    get_cpu_vsrh(xsh, xS(ctx->opcode));
+    get_cpu_vsr(xsh, xS(ctx->opcode), true);
     tcg_gen_ext32u_i64(tmp, xsh);
     tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp);
     tcg_temp_free_i64(tmp);
@@ -642,7 +524,7 @@ static void gen_mtvsrwa(DisasContext *ctx)
     TCGv_i64 xsh = tcg_temp_new_i64();
     tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
     tcg_gen_ext32s_i64(xsh, tmp);
-    set_cpu_vsrh(xT(ctx->opcode), xsh);
+    set_cpu_vsr(xT(ctx->opcode), xsh, true);
     tcg_temp_free_i64(tmp);
     tcg_temp_free_i64(xsh);
 }
@@ -664,7 +546,7 @@ static void gen_mtvsrwz(DisasContext *ctx)
     TCGv_i64 xsh = tcg_temp_new_i64();
     tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
     tcg_gen_ext32u_i64(xsh, tmp);
-    set_cpu_vsrh(xT(ctx->opcode), xsh);
+    set_cpu_vsr(xT(ctx->opcode), xsh, true);
     tcg_temp_free_i64(tmp);
     tcg_temp_free_i64(xsh);
 }
@@ -685,7 +567,7 @@ static void gen_mfvsrd(DisasContext *ctx)
         }
     }
     t0 = tcg_temp_new_i64();
-    get_cpu_vsrh(t0, xS(ctx->opcode));
+    get_cpu_vsr(t0, xS(ctx->opcode), true);
     tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
     tcg_temp_free_i64(t0);
 }
@@ -706,7 +588,7 @@ static void gen_mtvsrd(DisasContext *ctx)
     }
     t0 = tcg_temp_new_i64();
     tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
-    set_cpu_vsrh(xT(ctx->opcode), t0);
+    set_cpu_vsr(xT(ctx->opcode), t0, true);
     tcg_temp_free_i64(t0);
 }
 
@@ -725,7 +607,7 @@ static void gen_mfvsrld(DisasContext *ctx)
         }
     }
     t0 = tcg_temp_new_i64();
-    get_cpu_vsrl(t0, xS(ctx->opcode));
+    get_cpu_vsr(t0, xS(ctx->opcode), false);
     tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
     tcg_temp_free_i64(t0);
 }
@@ -751,10 +633,10 @@ static void gen_mtvsrdd(DisasContext *ctx)
     } else {
         tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
     }
-    set_cpu_vsrh(xT(ctx->opcode), t0);
+    set_cpu_vsr(xT(ctx->opcode), t0, true);
 
     tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]);
-    set_cpu_vsrl(xT(ctx->opcode), t0);
+    set_cpu_vsr(xT(ctx->opcode), t0, false);
     tcg_temp_free_i64(t0);
 }
 
@@ -776,8 +658,8 @@ static void gen_mtvsrws(DisasContext *ctx)
     t0 = tcg_temp_new_i64();
     tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)],
                         cpu_gpr[rA(ctx->opcode)], 32, 32);
-    set_cpu_vsrl(xT(ctx->opcode), t0);
-    set_cpu_vsrh(xT(ctx->opcode), t0);
+    set_cpu_vsr(xT(ctx->opcode), t0, false);
+    set_cpu_vsr(xT(ctx->opcode), t0, true);
     tcg_temp_free_i64(t0);
 }
 
@@ -797,33 +679,25 @@ static void gen_xxpermdi(DisasContext *ctx)
 
     if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
                  (xT(ctx->opcode) == xB(ctx->opcode)))) {
-        if ((DM(ctx->opcode) & 2) == 0) {
-            get_cpu_vsrh(xh, xA(ctx->opcode));
-        } else {
-            get_cpu_vsrl(xh, xA(ctx->opcode));
-        }
-        if ((DM(ctx->opcode) & 1) == 0) {
-            get_cpu_vsrh(xl, xB(ctx->opcode));
-        } else {
-            get_cpu_vsrl(xl, xB(ctx->opcode));
-        }
+        get_cpu_vsr(xh, xA(ctx->opcode), (DM(ctx->opcode) & 2) == 0);
+        get_cpu_vsr(xl, xB(ctx->opcode), (DM(ctx->opcode) & 1) == 0);
 
-        set_cpu_vsrh(xT(ctx->opcode), xh);
-        set_cpu_vsrl(xT(ctx->opcode), xl);
+        set_cpu_vsr(xT(ctx->opcode), xh, true);
+        set_cpu_vsr(xT(ctx->opcode), xl, false);
     } else {
         if ((DM(ctx->opcode) & 2) == 0) {
-            get_cpu_vsrh(xh, xA(ctx->opcode));
-            set_cpu_vsrh(xT(ctx->opcode), xh);
+            get_cpu_vsr(xh, xA(ctx->opcode), true);
+            set_cpu_vsr(xT(ctx->opcode), xh, true);
         } else {
-            get_cpu_vsrl(xh, xA(ctx->opcode));
-            set_cpu_vsrh(xT(ctx->opcode), xh);
+            get_cpu_vsr(xh, xA(ctx->opcode), false);
+            set_cpu_vsr(xT(ctx->opcode), xh, true);
         }
         if ((DM(ctx->opcode) & 1) == 0) {
-            get_cpu_vsrh(xl, xB(ctx->opcode));
-            set_cpu_vsrl(xT(ctx->opcode), xl);
+            get_cpu_vsr(xl, xB(ctx->opcode), true);
+            set_cpu_vsr(xT(ctx->opcode), xl, false);
         } else {
-            get_cpu_vsrl(xl, xB(ctx->opcode));
-            set_cpu_vsrl(xT(ctx->opcode), xl);
+            get_cpu_vsr(xl, xB(ctx->opcode), false);
+            set_cpu_vsr(xT(ctx->opcode), xl, false);
         }
     }
     tcg_temp_free_i64(xh);
@@ -847,7 +721,7 @@ static void glue(gen_, name)(DisasContext *ctx)                   \
         }                                                         \
         xb = tcg_temp_new_i64();                                  \
         sgm = tcg_temp_new_i64();                                 \
-        get_cpu_vsrh(xb, xB(ctx->opcode));                        \
+        get_cpu_vsr(xb, xB(ctx->opcode), true);                   \
         tcg_gen_movi_i64(sgm, sgn_mask);                          \
         switch (op) {                                             \
             case OP_ABS: {                                        \
@@ -864,7 +738,7 @@ static void glue(gen_, name)(DisasContext *ctx)                   \
             }                                                     \
             case OP_CPSGN: {                                      \
                 TCGv_i64 xa = tcg_temp_new_i64();                 \
-                get_cpu_vsrh(xa, xA(ctx->opcode));                \
+                get_cpu_vsr(xa, xA(ctx->opcode), true);           \
                 tcg_gen_and_i64(xa, xa, sgm);                     \
                 tcg_gen_andc_i64(xb, xb, sgm);                    \
                 tcg_gen_or_i64(xb, xb, xa);                       \
@@ -872,7 +746,7 @@ static void glue(gen_, name)(DisasContext *ctx)                   \
                 break;                                            \
             }                                                     \
         }                                                         \
-        set_cpu_vsrh(xT(ctx->opcode), xb);                        \
+        set_cpu_vsr(xT(ctx->opcode), xb, true);                   \
         tcg_temp_free_i64(xb);                                    \
         tcg_temp_free_i64(sgm);                                   \
     }
@@ -898,8 +772,8 @@ static void glue(gen_, name)(DisasContext *ctx)                   \
     xbl = tcg_temp_new_i64();                                     \
     sgm = tcg_temp_new_i64();                                     \
     tmp = tcg_temp_new_i64();                                     \
-    get_cpu_vsrh(xbh, xb);                                        \
-    get_cpu_vsrl(xbl, xb);                                        \
+    get_cpu_vsr(xbh, xb, true);                                   \
+    get_cpu_vsr(xbl, xb, false);                                  \
     tcg_gen_movi_i64(sgm, sgn_mask);                              \
     switch (op) {                                                 \
     case OP_ABS:                                                  \
@@ -914,15 +788,15 @@ static void glue(gen_, name)(DisasContext *ctx)                   \
     case OP_CPSGN:                                                \
         xah = tcg_temp_new_i64();                                 \
         xa = rA(ctx->opcode) + 32;                                \
-        get_cpu_vsrh(tmp, xa);                                    \
+        get_cpu_vsr(tmp, xa, true);                               \
         tcg_gen_and_i64(xah, tmp, sgm);                           \
         tcg_gen_andc_i64(xbh, xbh, sgm);                          \
         tcg_gen_or_i64(xbh, xbh, xah);                            \
         tcg_temp_free_i64(xah);                                   \
         break;                                                    \
     }                                                             \
-    set_cpu_vsrh(xt, xbh);                                        \
-    set_cpu_vsrl(xt, xbl);                                        \
+    set_cpu_vsr(xt, xbh, true);                                   \
+    set_cpu_vsr(xt, xbl, false);                                  \
     tcg_temp_free_i64(xbl);                                       \
     tcg_temp_free_i64(xbh);                                       \
     tcg_temp_free_i64(sgm);                                       \
@@ -945,8 +819,8 @@ static void glue(gen_, name)(DisasContext *ctx)                  \
         xbh = tcg_temp_new_i64();                                \
         xbl = tcg_temp_new_i64();                                \
         sgm = tcg_temp_new_i64();                                \
-        get_cpu_vsrh(xbh, xB(ctx->opcode));                      \
-        get_cpu_vsrl(xbl, xB(ctx->opcode));                      \
+        get_cpu_vsr(xbh, xB(ctx->opcode), true);                 \
+        get_cpu_vsr(xbl, xB(ctx->opcode), false);                \
         tcg_gen_movi_i64(sgm, sgn_mask);                         \
         switch (op) {                                            \
             case OP_ABS: {                                       \
@@ -967,8 +841,8 @@ static void glue(gen_, name)(DisasContext *ctx)                  \
             case OP_CPSGN: {                                     \
                 TCGv_i64 xah = tcg_temp_new_i64();               \
                 TCGv_i64 xal = tcg_temp_new_i64();               \
-                get_cpu_vsrh(xah, xA(ctx->opcode));              \
-                get_cpu_vsrl(xal, xA(ctx->opcode));              \
+                get_cpu_vsr(xah, xA(ctx->opcode), true);         \
+                get_cpu_vsr(xal, xA(ctx->opcode), false);        \
                 tcg_gen_and_i64(xah, xah, sgm);                  \
                 tcg_gen_and_i64(xal, xal, sgm);                  \
                 tcg_gen_andc_i64(xbh, xbh, sgm);                 \
@@ -980,8 +854,8 @@ static void glue(gen_, name)(DisasContext *ctx)                  \
                 break;                                           \
             }                                                    \
         }                                                        \
-        set_cpu_vsrh(xT(ctx->opcode), xbh);                      \
-        set_cpu_vsrl(xT(ctx->opcode), xbl);                      \
+        set_cpu_vsr(xT(ctx->opcode), xbh, true);                 \
+        set_cpu_vsr(xT(ctx->opcode), xbl, false);                \
         tcg_temp_free_i64(xbh);                                  \
         tcg_temp_free_i64(xbl);                                  \
         tcg_temp_free_i64(sgm);                                  \
@@ -1193,9 +1067,9 @@ static void gen_##name(DisasContext *ctx)                     \
     }                                                         \
     t0 = tcg_temp_new_i64();                                  \
     t1 = tcg_temp_new_i64();                                  \
-    get_cpu_vsrh(t0, xB(ctx->opcode));                        \
+    get_cpu_vsr(t0, xB(ctx->opcode), true);                   \
     gen_helper_##name(t1, cpu_env, t0);                       \
-    set_cpu_vsrh(xT(ctx->opcode), t1);                        \
+    set_cpu_vsr(xT(ctx->opcode), t1, true);                   \
     tcg_temp_free_i64(t0);                                    \
     tcg_temp_free_i64(t1);                                    \
 }
@@ -1390,13 +1264,13 @@ static void gen_xxbrd(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
 
     tcg_gen_bswap64_i64(xth, xbh);
     tcg_gen_bswap64_i64(xtl, xbl);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -1419,12 +1293,12 @@ static void gen_xxbrh(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
 
     gen_bswap16x8(xth, xtl, xbh, xbl);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -1448,15 +1322,15 @@ static void gen_xxbrq(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
     t0 = tcg_temp_new_i64();
 
     tcg_gen_bswap64_i64(t0, xbl);
     tcg_gen_bswap64_i64(xtl, xbh);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
     tcg_gen_mov_i64(xth, t0);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
 
     tcg_temp_free_i64(t0);
     tcg_temp_free_i64(xth);
@@ -1480,12 +1354,12 @@ static void gen_xxbrw(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
 
     gen_bswap32x4(xth, xtl, xbh, xbl);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -1527,23 +1401,16 @@ static void glue(gen_, name)(DisasContext *ctx)             \
         b0 = tcg_temp_new_i64();                            \
         b1 = tcg_temp_new_i64();                            \
         tmp = tcg_temp_new_i64();                           \
-        if (high) {                                         \
-            get_cpu_vsrh(a0, xA(ctx->opcode));              \
-            get_cpu_vsrh(a1, xA(ctx->opcode));              \
-            get_cpu_vsrh(b0, xB(ctx->opcode));              \
-            get_cpu_vsrh(b1, xB(ctx->opcode));              \
-        } else {                                            \
-            get_cpu_vsrl(a0, xA(ctx->opcode));              \
-            get_cpu_vsrl(a1, xA(ctx->opcode));              \
-            get_cpu_vsrl(b0, xB(ctx->opcode));              \
-            get_cpu_vsrl(b1, xB(ctx->opcode));              \
-        }                                                   \
+        get_cpu_vsr(a0, xA(ctx->opcode), high);             \
+        get_cpu_vsr(a1, xA(ctx->opcode), high);             \
+        get_cpu_vsr(b0, xB(ctx->opcode), high);             \
+        get_cpu_vsr(b1, xB(ctx->opcode), high);             \
         tcg_gen_shri_i64(a0, a0, 32);                       \
         tcg_gen_shri_i64(b0, b0, 32);                       \
         tcg_gen_deposit_i64(tmp, b0, a0, 32, 32);           \
-        set_cpu_vsrh(xT(ctx->opcode), tmp);                 \
+        set_cpu_vsr(xT(ctx->opcode), tmp, true);            \
         tcg_gen_deposit_i64(tmp, b1, a1, 32, 32);           \
-        set_cpu_vsrl(xT(ctx->opcode), tmp);                 \
+        set_cpu_vsr(xT(ctx->opcode), tmp, false);           \
         tcg_temp_free_i64(a0);                              \
         tcg_temp_free_i64(a1);                              \
         tcg_temp_free_i64(b0);                              \
@@ -1569,47 +1436,114 @@ static void gen_xxsel(DisasContext *ctx)
                         vsr_full_offset(rb), vsr_full_offset(ra), 16, 16);
 }
 
-static void gen_xxspltw(DisasContext *ctx)
+static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2 *a)
 {
-    int rt = xT(ctx->opcode);
-    int rb = xB(ctx->opcode);
-    int uim = UIM(ctx->opcode);
     int tofs, bofs;
 
-    if (unlikely(!ctx->vsx_enabled)) {
-        gen_exception(ctx, POWERPC_EXCP_VSXU);
-        return;
-    }
+    REQUIRE_VSX(ctx);
 
-    tofs = vsr_full_offset(rt);
-    bofs = vsr_full_offset(rb);
-    bofs += uim << MO_32;
+    tofs = vsr_full_offset(a->xt);
+    bofs = vsr_full_offset(a->xb);
+    bofs += a->uim << MO_32;
 #ifndef HOST_WORDS_BIG_ENDIAN
     bofs ^= 8 | 4;
 #endif
 
     tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16);
+    return true;
 }
 
 #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
 
-static void gen_xxspltib(DisasContext *ctx)
+static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a)
 {
-    uint8_t uim8 = IMM8(ctx->opcode);
-    int rt = xT(ctx->opcode);
+    if (a->xt < 32) {
+        REQUIRE_VSX(ctx);
+    } else {
+        REQUIRE_VECTOR(ctx);
+    }
+    tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm);
+    return true;
+}
 
-    if (rt < 32) {
-        if (unlikely(!ctx->vsx_enabled)) {
-            gen_exception(ctx, POWERPC_EXCP_VSXU);
-            return;
-        }
+static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a)
+{
+    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    REQUIRE_VSX(ctx);
+
+    tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si);
+
+    return true;
+}
+
+static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a)
+{
+    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    REQUIRE_VSX(ctx);
+
+    tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16,
+                         helper_todouble(a->si));
+    return true;
+}
+
+static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a)
+{
+    TCGv_i32 imm;
+
+    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    REQUIRE_VSX(ctx);
+
+    imm = tcg_constant_i32(a->si);
+
+    tcg_gen_st_i32(imm, cpu_env,
+        offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
+    tcg_gen_st_i32(imm, cpu_env,
+        offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix)));
+
+    return true;
+}
+
+static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a)
+{
+    static const uint64_t values[32] = {
+        0, /* Unspecified */
+        0x3FFF000000000000llu, /* QP +1.0 */
+        0x4000000000000000llu, /* QP +2.0 */
+        0x4000800000000000llu, /* QP +3.0 */
+        0x4001000000000000llu, /* QP +4.0 */
+        0x4001400000000000llu, /* QP +5.0 */
+        0x4001800000000000llu, /* QP +6.0 */
+        0x4001C00000000000llu, /* QP +7.0 */
+        0x7FFF000000000000llu, /* QP +Inf */
+        0x7FFF800000000000llu, /* QP dQNaN */
+        0, /* Unspecified */
+        0, /* Unspecified */
+        0, /* Unspecified */
+        0, /* Unspecified */
+        0, /* Unspecified */
+        0, /* Unspecified */
+        0x8000000000000000llu, /* QP -0.0 */
+        0xBFFF000000000000llu, /* QP -1.0 */
+        0xC000000000000000llu, /* QP -2.0 */
+        0xC000800000000000llu, /* QP -3.0 */
+        0xC001000000000000llu, /* QP -4.0 */
+        0xC001400000000000llu, /* QP -5.0 */
+        0xC001800000000000llu, /* QP -6.0 */
+        0xC001C00000000000llu, /* QP -7.0 */
+        0xFFFF000000000000llu, /* QP -Inf */
+    };
+
+    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    REQUIRE_VSX(ctx);
+
+    if (values[a->uim]) {
+        set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false);
+        set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true);
     } else {
-        if (unlikely(!ctx->altivec_enabled)) {
-            gen_exception(ctx, POWERPC_EXCP_VPU);
-            return;
-        }
+        gen_invalid(ctx);
     }
-    tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(rt), 16, 16, uim8);
+
+    return true;
 }
 
 static void gen_xxsldwi(DisasContext *ctx)
@@ -1624,40 +1558,40 @@ static void gen_xxsldwi(DisasContext *ctx)
 
     switch (SHW(ctx->opcode)) {
         case 0: {
-            get_cpu_vsrh(xth, xA(ctx->opcode));
-            get_cpu_vsrl(xtl, xA(ctx->opcode));
+            get_cpu_vsr(xth, xA(ctx->opcode), true);
+            get_cpu_vsr(xtl, xA(ctx->opcode), false);
             break;
         }
         case 1: {
             TCGv_i64 t0 = tcg_temp_new_i64();
-            get_cpu_vsrh(xth, xA(ctx->opcode));
+            get_cpu_vsr(xth, xA(ctx->opcode), true);
             tcg_gen_shli_i64(xth, xth, 32);
-            get_cpu_vsrl(t0, xA(ctx->opcode));
+            get_cpu_vsr(t0, xA(ctx->opcode), false);
             tcg_gen_shri_i64(t0, t0, 32);
             tcg_gen_or_i64(xth, xth, t0);
-            get_cpu_vsrl(xtl, xA(ctx->opcode));
+            get_cpu_vsr(xtl, xA(ctx->opcode), false);
             tcg_gen_shli_i64(xtl, xtl, 32);
-            get_cpu_vsrh(t0, xB(ctx->opcode));
+            get_cpu_vsr(t0, xB(ctx->opcode), true);
             tcg_gen_shri_i64(t0, t0, 32);
             tcg_gen_or_i64(xtl, xtl, t0);
             tcg_temp_free_i64(t0);
             break;
         }
         case 2: {
-            get_cpu_vsrl(xth, xA(ctx->opcode));
-            get_cpu_vsrh(xtl, xB(ctx->opcode));
+            get_cpu_vsr(xth, xA(ctx->opcode), false);
+            get_cpu_vsr(xtl, xB(ctx->opcode), true);
             break;
         }
         case 3: {
             TCGv_i64 t0 = tcg_temp_new_i64();
-            get_cpu_vsrl(xth, xA(ctx->opcode));
+            get_cpu_vsr(xth, xA(ctx->opcode), false);
             tcg_gen_shli_i64(xth, xth, 32);
-            get_cpu_vsrh(t0, xB(ctx->opcode));
+            get_cpu_vsr(t0, xB(ctx->opcode), true);
             tcg_gen_shri_i64(t0, t0, 32);
             tcg_gen_or_i64(xth, xth, t0);
-            get_cpu_vsrh(xtl, xB(ctx->opcode));
+            get_cpu_vsr(xtl, xB(ctx->opcode), true);
             tcg_gen_shli_i64(xtl, xtl, 32);
-            get_cpu_vsrl(t0, xB(ctx->opcode));
+            get_cpu_vsr(t0, xB(ctx->opcode), false);
             tcg_gen_shri_i64(t0, t0, 32);
             tcg_gen_or_i64(xtl, xtl, t0);
             tcg_temp_free_i64(t0);
@@ -1665,8 +1599,8 @@ static void gen_xxsldwi(DisasContext *ctx)
         }
     }
 
-    set_cpu_vsrh(xT(ctx->opcode), xth);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -1694,8 +1628,8 @@ static void gen_##name(DisasContext *ctx)                       \
      */                                                         \
     if (uimm > 15) {                                            \
         tcg_gen_movi_i64(t1, 0);                                \
-        set_cpu_vsrh(xT(ctx->opcode), t1);                      \
-        set_cpu_vsrl(xT(ctx->opcode), t1);                      \
+        set_cpu_vsr(xT(ctx->opcode), t1, true);                 \
+        set_cpu_vsr(xT(ctx->opcode), t1, false);                \
         return;                                                 \
     }                                                           \
     tcg_gen_movi_i32(t0, uimm);                                 \
@@ -1719,7 +1653,7 @@ static void gen_xsxexpdp(DisasContext *ctx)
         return;
     }
     t0 = tcg_temp_new_i64();
-    get_cpu_vsrh(t0, xB(ctx->opcode));
+    get_cpu_vsr(t0, xB(ctx->opcode), true);
     tcg_gen_extract_i64(rt, t0, 52, 11);
     tcg_temp_free_i64(t0);
 }
@@ -1737,12 +1671,12 @@ static void gen_xsxexpqp(DisasContext *ctx)
     xth = tcg_temp_new_i64();
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
+    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
 
     tcg_gen_extract_i64(xth, xbh, 48, 15);
-    set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
+    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
     tcg_gen_movi_i64(xtl, 0);
-    set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
+    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
 
     tcg_temp_free_i64(xbh);
     tcg_temp_free_i64(xth);
@@ -1766,7 +1700,7 @@ static void gen_xsiexpdp(DisasContext *ctx)
     tcg_gen_andi_i64(t0, rb, 0x7FF);
     tcg_gen_shli_i64(t0, t0, 52);
     tcg_gen_or_i64(xth, xth, t0);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
     /* dword[1] is undefined */
     tcg_temp_free_i64(t0);
     tcg_temp_free_i64(xth);
@@ -1789,19 +1723,19 @@ static void gen_xsiexpqp(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xah = tcg_temp_new_i64();
     xal = tcg_temp_new_i64();
-    get_cpu_vsrh(xah, rA(ctx->opcode) + 32);
-    get_cpu_vsrl(xal, rA(ctx->opcode) + 32);
+    get_cpu_vsr(xah, rA(ctx->opcode) + 32, true);
+    get_cpu_vsr(xal, rA(ctx->opcode) + 32, false);
     xbh = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
+    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
     t0 = tcg_temp_new_i64();
 
     tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
     tcg_gen_andi_i64(t0, xbh, 0x7FFF);
     tcg_gen_shli_i64(t0, t0, 48);
     tcg_gen_or_i64(xth, xth, t0);
-    set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
+    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
     tcg_gen_mov_i64(xtl, xal);
-    set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
+    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
 
     tcg_temp_free_i64(t0);
     tcg_temp_free_i64(xth);
@@ -1826,12 +1760,12 @@ static void gen_xsxsigdp(DisasContext *ctx)
     zr = tcg_const_i64(0);
     nan = tcg_const_i64(2047);
 
-    get_cpu_vsrh(t1, xB(ctx->opcode));
+    get_cpu_vsr(t1, xB(ctx->opcode), true);
     tcg_gen_extract_i64(exp, t1, 52, 11);
     tcg_gen_movi_i64(t0, 0x0010000000000000);
     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
-    get_cpu_vsrh(t1, xB(ctx->opcode));
+    get_cpu_vsr(t1, xB(ctx->opcode), true);
     tcg_gen_deposit_i64(rt, t0, t1, 0, 52);
 
     tcg_temp_free_i64(t0);
@@ -1857,8 +1791,8 @@ static void gen_xsxsigqp(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
-    get_cpu_vsrl(xbl, rB(ctx->opcode) + 32);
+    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
+    get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
     exp = tcg_temp_new_i64();
     t0 = tcg_temp_new_i64();
     zr = tcg_const_i64(0);
@@ -1869,9 +1803,9 @@ static void gen_xsxsigqp(DisasContext *ctx)
     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
     tcg_gen_deposit_i64(xth, t0, xbh, 0, 48);
-    set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
+    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
     tcg_gen_mov_i64(xtl, xbl);
-    set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
+    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
 
     tcg_temp_free_i64(t0);
     tcg_temp_free_i64(exp);
@@ -1904,22 +1838,22 @@ static void gen_xviexpsp(DisasContext *ctx)
     xal = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xah, xA(ctx->opcode));
-    get_cpu_vsrl(xal, xA(ctx->opcode));
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xah, xA(ctx->opcode), true);
+    get_cpu_vsr(xal, xA(ctx->opcode), false);
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
     t0 = tcg_temp_new_i64();
 
     tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
     tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
     tcg_gen_shli_i64(t0, t0, 23);
     tcg_gen_or_i64(xth, xth, t0);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
     tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
     tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
     tcg_gen_shli_i64(t0, t0, 23);
     tcg_gen_or_i64(xtl, xtl, t0);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(t0);
     tcg_temp_free_i64(xth);
@@ -1949,16 +1883,16 @@ static void gen_xviexpdp(DisasContext *ctx)
     xal = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xah, xA(ctx->opcode));
-    get_cpu_vsrl(xal, xA(ctx->opcode));
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xah, xA(ctx->opcode), true);
+    get_cpu_vsr(xal, xA(ctx->opcode), false);
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
 
     tcg_gen_deposit_i64(xth, xah, xbh, 52, 11);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
 
     tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -1983,15 +1917,15 @@ static void gen_xvxexpsp(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
 
     tcg_gen_shri_i64(xth, xbh, 23);
     tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
     tcg_gen_shri_i64(xtl, xbl, 23);
     tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -2014,13 +1948,13 @@ static void gen_xvxexpdp(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
 
     tcg_gen_extract_i64(xth, xbh, 52, 11);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
     tcg_gen_extract_i64(xtl, xbl, 52, 11);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(xth);
     tcg_temp_free_i64(xtl);
@@ -2046,8 +1980,8 @@ static void gen_xvxsigdp(DisasContext *ctx)
     xtl = tcg_temp_new_i64();
     xbh = tcg_temp_new_i64();
     xbl = tcg_temp_new_i64();
-    get_cpu_vsrh(xbh, xB(ctx->opcode));
-    get_cpu_vsrl(xbl, xB(ctx->opcode));
+    get_cpu_vsr(xbh, xB(ctx->opcode), true);
+    get_cpu_vsr(xbl, xB(ctx->opcode), false);
     exp = tcg_temp_new_i64();
     t0 = tcg_temp_new_i64();
     zr = tcg_const_i64(0);
@@ -2058,14 +1992,14 @@ static void gen_xvxsigdp(DisasContext *ctx)
     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
     tcg_gen_deposit_i64(xth, t0, xbh, 0, 52);
-    set_cpu_vsrh(xT(ctx->opcode), xth);
+    set_cpu_vsr(xT(ctx->opcode), xth, true);
 
     tcg_gen_extract_i64(exp, xbl, 52, 11);
     tcg_gen_movi_i64(t0, 0x0010000000000000);
     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
     tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52);
-    set_cpu_vsrl(xT(ctx->opcode), xtl);
+    set_cpu_vsr(xT(ctx->opcode), xtl, false);
 
     tcg_temp_free_i64(t0);
     tcg_temp_free_i64(exp);
@@ -2077,6 +2011,180 @@ static void gen_xvxsigdp(DisasContext *ctx)
     tcg_temp_free_i64(xbl);
 }
 
+static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
+                     int rt, bool store, bool paired)
+{
+    TCGv ea;
+    TCGv_i64 xt;
+    MemOp mop;
+    int rt1, rt2;
+
+    xt = tcg_temp_new_i64();
+
+    mop = DEF_MEMOP(MO_Q);
+
+    gen_set_access_type(ctx, ACCESS_INT);
+    ea = do_ea_calc(ctx, ra, displ);
+
+    if (paired && ctx->le_mode) {
+        rt1 = rt + 1;
+        rt2 = rt;
+    } else {
+        rt1 = rt;
+        rt2 = rt + 1;
+    }
+
+    if (store) {
+        get_cpu_vsr(xt, rt1, !ctx->le_mode);
+        tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+        gen_addr_add(ctx, ea, ea, 8);
+        get_cpu_vsr(xt, rt1, ctx->le_mode);
+        tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+        if (paired) {
+            gen_addr_add(ctx, ea, ea, 8);
+            get_cpu_vsr(xt, rt2, !ctx->le_mode);
+            tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+            gen_addr_add(ctx, ea, ea, 8);
+            get_cpu_vsr(xt, rt2, ctx->le_mode);
+            tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+        }
+    } else {
+        tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+        set_cpu_vsr(rt1, xt, !ctx->le_mode);
+        gen_addr_add(ctx, ea, ea, 8);
+        tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+        set_cpu_vsr(rt1, xt, ctx->le_mode);
+        if (paired) {
+            gen_addr_add(ctx, ea, ea, 8);
+            tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+            set_cpu_vsr(rt2, xt, !ctx->le_mode);
+            gen_addr_add(ctx, ea, ea, 8);
+            tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+            set_cpu_vsr(rt2, xt, ctx->le_mode);
+        }
+    }
+
+    tcg_temp_free(ea);
+    tcg_temp_free_i64(xt);
+    return true;
+}
+
+static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
+{
+    if (paired) {
+        REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    } else {
+        REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+    }
+
+    if (paired || a->rt >= 32) {
+        REQUIRE_VSX(ctx);
+    } else {
+        REQUIRE_VECTOR(ctx);
+    }
+
+    return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired);
+}
+
+static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
+                           bool store, bool paired)
+{
+    arg_D d;
+    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    REQUIRE_VSX(ctx);
+
+    if (!resolve_PLS_D(ctx, &d, a)) {
+        return true;
+    }
+
+    return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired);
+}
+
+static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
+{
+    if (paired) {
+        REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    } else {
+        REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+    }
+
+    if (paired || a->rt >= 32) {
+        REQUIRE_VSX(ctx);
+    } else {
+        REQUIRE_VECTOR(ctx);
+    }
+
+    return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
+}
+
+TRANS(STXV, do_lstxv_D, true, false)
+TRANS(LXV, do_lstxv_D, false, false)
+TRANS(STXVP, do_lstxv_D, true, true)
+TRANS(LXVP, do_lstxv_D, false, true)
+TRANS(STXVX, do_lstxv_X, true, false)
+TRANS(LXVX, do_lstxv_X, false, false)
+TRANS(STXVPX, do_lstxv_X, true, true)
+TRANS(LXVPX, do_lstxv_X, false, true)
+TRANS64(PSTXV, do_lstxv_PLS_D, true, false)
+TRANS64(PLXV, do_lstxv_PLS_D, false, false)
+TRANS64(PSTXVP, do_lstxv_PLS_D, true, true)
+TRANS64(PLXVP, do_lstxv_PLS_D, false, true)
+
+static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+                             TCGv_vec c)
+{
+    TCGv_vec tmp = tcg_temp_new_vec_matching(c);
+    tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1);
+    tcg_gen_bitsel_vec(vece, t, tmp, b, a);
+    tcg_temp_free_vec(tmp);
+}
+
+static bool do_xxblendv(DisasContext *ctx, arg_XX4 *a, unsigned vece)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_sari_vec, 0
+    };
+    static const GVecGen4 ops[4] = {
+        {
+            .fniv = gen_xxblendv_vec,
+            .fno = gen_helper_XXBLENDVB,
+            .opt_opc = vecop_list,
+            .vece = MO_8
+        },
+        {
+            .fniv = gen_xxblendv_vec,
+            .fno = gen_helper_XXBLENDVH,
+            .opt_opc = vecop_list,
+            .vece = MO_16
+        },
+        {
+            .fniv = gen_xxblendv_vec,
+            .fno = gen_helper_XXBLENDVW,
+            .opt_opc = vecop_list,
+            .vece = MO_32
+        },
+        {
+            .fniv = gen_xxblendv_vec,
+            .fno = gen_helper_XXBLENDVD,
+            .opt_opc = vecop_list,
+            .vece = MO_64
+        }
+    };
+
+    REQUIRE_VSX(ctx);
+
+    tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
+                   vsr_full_offset(a->xb), vsr_full_offset(a->xc),
+                   16, 16, &ops[vece]);
+
+    return true;
+}
+
+TRANS(XXBLENDVB, do_xxblendv, MO_8)
+TRANS(XXBLENDVH, do_xxblendv, MO_16)
+TRANS(XXBLENDVW, do_xxblendv, MO_32)
+TRANS(XXBLENDVD, do_xxblendv, MO_64)
+
 #undef GEN_XX2FORM
 #undef GEN_XX3FORM
 #undef GEN_XX2IFORM