about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
authorYang Liu <numbksco@gmail.com>2024-07-16 14:18:14 +0800
committerGitHub <noreply@github.com>2024-07-16 08:18:14 +0200
commit5c212e92c39c88d57b4ead86c4c08aaab280408b (patch)
tree10a1202890ee9b433185dcfc65348916f81afbec /src
parent1dde4ee9592b2a7effb8e020312e507dbebb0615 (diff)
downloadbox64-5c212e92c39c88d57b4ead86c4c08aaab280408b.tar.gz
box64-5c212e92c39c88d57b4ead86c4c08aaab280408b.zip
[LA64_DYNAREC] Remove xMASK and fixed 86 XCHG opcode (#1683)
Diffstat (limited to 'src')
-rw-r--r--src/dynarec/la64/dynarec_la64_00.c52
-rw-r--r--src/dynarec/la64/dynarec_la64_0f.c4
-rw-r--r--src/dynarec/la64/dynarec_la64_64.c15
-rw-r--r--src/dynarec/la64/dynarec_la64_66.c2
-rw-r--r--src/dynarec/la64/dynarec_la64_67.c2
-rw-r--r--src/dynarec/la64/dynarec_la64_emit_math.c24
-rw-r--r--src/dynarec/la64/dynarec_la64_f0.c6
-rw-r--r--src/dynarec/la64/dynarec_la64_f30f.c2
-rw-r--r--src/dynarec/la64/dynarec_la64_helper.c5
-rw-r--r--src/dynarec/la64/dynarec_la64_helper.h6
-rw-r--r--src/dynarec/la64/la64_emitter.h34
-rw-r--r--src/dynarec/la64/la64_printer.c2
-rw-r--r--src/dynarec/la64/la64_prolog.S3
13 files changed, 67 insertions, 90 deletions
diff --git a/src/dynarec/la64/dynarec_la64_00.c b/src/dynarec/la64/dynarec_la64_00.c
index e16d5974..925959f0 100644
--- a/src/dynarec/la64/dynarec_la64_00.c
+++ b/src/dynarec/la64/dynarec_la64_00.c
@@ -470,7 +470,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                     }
                 } else {
                     if (MODREG) { // reg <= reg
-                        AND(gd, TO_LA64((nextop & 7) + (rex.b << 3)), xMASK);
+                        ZEROUP2(gd, TO_LA64((nextop & 7) + (rex.b << 3)));
                     } else { // mem <= reg
                         SMREAD();
                         addr = geted(dyn, addr, ninst, nextop, &ed, x2, x1, &fixedaddress, rex, NULL, 1, 0);
@@ -808,7 +808,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         emit_cmp32(dyn, ninst, rex, ed, x2, x3, x4, x5, x6);
                     } else {
                         if (!rex.w && MODREG) {
-                            AND(x1, ed, xMASK);
+                            ZEROUP2(x1, ed);
                             ed = x1;
                         }
                         emit_cmp32_0(dyn, ninst, rex, ed, x3, x4);
@@ -845,9 +845,10 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
             } else {
                 GETGB(x3);
                 addr = geted(dyn, addr, ninst, nextop, &ed, x2, x1, &fixedaddress, rex, LOCK_LOCK, 0, 0);
-                if (la64_lam_bh)
+                if (la64_lam_bh) {
                     AMSWAP_DB_B(x1, gd, ed);
-                else {
+                    BSTRINS_D(gb1, x1, gb2 + 7, gb2);
+                } else {
                     SMDMB();
 
                     // calculate shift amount
@@ -858,32 +859,25 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                     ADDI_D(x4, xZR, 0xffc);
                     AND(x6, ed, x4);
 
-                    // load aligned data
-                    LD_WU(x5, x6, 0);
-
-                    // insert gd byte into the aligned data
+                    // prepare mask
                     ADDI_D(x4, xZR, 0xff);
                     SLL_D(x4, x4, x1);
                     NOR(x4, x4, xZR);
-                    AND(x4, x5, x4);
-                    SLL_D(x5, gd, x1);
-                    OR(x4, x4, x5);
 
-                    // do aligned ll/sc sequence
+                    SLL_D(x7, gd, x1);
+
+                    // do aligned ll/sc sequence, reusing x2 (ed might be x2 but is no longer needed)
                     MARKLOCK;
-                    LL_W(x1, x6, 0);
-                    MV(x5, x4);
+                    LL_W(x2, x6, 0);
+                    AND(x5, x2, x4);
+                    OR(x5, x5, x7);
                     SC_W(x5, x6, 0);
                     BEQZ_MARKLOCK(x5);
 
-                    // calculate shift amount again
-                    ANDI(x4, ed, 0x3);
-                    SLLI_D(x4, x4, 3);
-
                     // extract loaded byte
-                    SRL_D(x1, x1, x4);
+                    SRL_D(gd, x2, x1);
+                    BSTRINS_D(gb1, gd, gb2 + 7, gb2);
                 }
-                BSTRINS_D(gb1, x1, gb2 + 7, gb2);
             }
             break;
         case 0x87:
@@ -2054,13 +2048,13 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         MUL_D(xRAX, xRAX, ed);
                         if (gd != xRDX) { MV(xRDX, gd); }
                     } else {
-                        AND(x3, xRAX, xMASK);
+                        ZEROUP2(x3, xRAX);
                         if (MODREG) {
-                            AND(x4, ed, xMASK);
+                            ZEROUP2(x4, ed);
                             ed = x4;
                         }
                         MUL_D(xRDX, x3, ed); // 64 <- 32x32
-                        AND(xRAX, xRDX, xMASK);
+                        ZEROUP2(xRAX, xRDX);
                         SRLI_D(xRDX, xRDX, 32);
                     }
                     UFLAG_RES(xRAX);
@@ -2082,7 +2076,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                     } else {
                         ADDI_W(x3, xRAX, 0); // sign extend 32bits-> 64bits
                         MUL_D(xRDX, x3, ed); // 64 <- 32x32
-                        AND(xRAX, xRDX, xMASK);
+                        ZEROUP2(xRAX, xRDX);
                         SRLI_D(xRDX, xRDX, 32);
                     }
                     UFLAG_RES(xRAX);
@@ -2096,15 +2090,15 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         SET_DFNONE();
                         GETED(0);
                         SLLI_D(x3, xRDX, 32);
-                        AND(x2, xRAX, xMASK);
+                        ZEROUP2(x2, xRAX);
                         OR(x3, x3, x2);
                         if (MODREG) {
-                            AND(x4, ed, xMASK);
+                            ZEROUP2(x4, ed);
                             ed = x4;
                         }
                         DIV_DU(x2, x3, ed);
                         MOD_DU(xRDX, x3, ed);
-                        AND(xRAX, x2, xMASK);
+                        ZEROUP2(xRAX, x2);
                         ZEROUP(xRDX);
                     } else {
                         if (ninst
@@ -2139,11 +2133,11 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         SET_DFNONE()
                         GETSED(0);
                         SLLI_D(x3, xRDX, 32);
-                        AND(x2, xRAX, xMASK);
+                        ZEROUP2(x2, xRAX);
                         OR(x3, x3, x2);
                         DIV_D(x2, x3, ed);
                         MOD_D(xRDX, x3, ed);
-                        AND(xRAX, x2, xMASK);
+                        ZEROUP2(xRAX, x2);
                         ZEROUP(xRDX);
                     } else {
                         if (ninst && dyn->insts
diff --git a/src/dynarec/la64/dynarec_la64_0f.c b/src/dynarec/la64/dynarec_la64_0f.c
index 2bdc22a1..3124e7ae 100644
--- a/src/dynarec/la64/dynarec_la64_0f.c
+++ b/src/dynarec/la64/dynarec_la64_0f.c
@@ -913,7 +913,7 @@ uintptr_t dynarec64_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
             GETED(0);
             GETGD;
             if (!rex.w && MODREG) {
-                AND(x4, ed, xMASK);
+                ZEROUP2(x4, ed);
                 ed = x4;
             }
             BNE_MARK(ed, xZR);
@@ -945,7 +945,7 @@ uintptr_t dynarec64_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
             GETED(0);
             GETGD;
             if (!rex.w && MODREG) {
-                AND(x4, ed, xMASK);
+                ZEROUP2(x4, ed);
                 ed = x4;
             }
             BNE_MARK(ed, xZR);
diff --git a/src/dynarec/la64/dynarec_la64_64.c b/src/dynarec/la64/dynarec_la64_64.c
index 892d3290..95d6566e 100644
--- a/src/dynarec/la64/dynarec_la64_64.c
+++ b/src/dynarec/la64/dynarec_la64_64.c
@@ -180,9 +180,7 @@ uintptr_t dynarec64_64(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         i64 = F32S;
                     else
                         i64 = F8S;
-                    emit_add32c(dyn, ninst, rex, ed, i64, x3, x4, x5, xMASK);
-                    IFXA (X_CF, !la64_lbt)
-                        REGENERATE_MASK(); // use xMASK as a scratch
+                    emit_add32c(dyn, ninst, rex, ed, i64, x3, x4, x5, x7);
                     WBACKO(x6);
                     break;
                 case 1:
@@ -216,10 +214,9 @@ uintptr_t dynarec64_64(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                     MOV64xw(x5, i64);
                     IFXA (X_ALL, !la64_lbt)
                         ST_D(x6, xEmu, offsetof(x64emu_t, scratch));
-                    emit_adc32(dyn, ninst, rex, ed, x5, x3, x4, x6, xMASK);
+                    emit_adc32(dyn, ninst, rex, ed, x5, x3, x4, x6, x7);
                     IFXA (X_ALL, !la64_lbt) {
                         LD_D(x6, xEmu, offsetof(x64emu_t, scratch));
-                        REGENERATE_MASK(); // use xMASK as a scratch
                     }
                     WBACKO(x6);
                     break;
@@ -237,9 +234,7 @@ uintptr_t dynarec64_64(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                     else
                         i64 = F8S;
                     MOV64xw(x5, i64);
-                    emit_sbb32(dyn, ninst, rex, ed, x5, x3, x4, xMASK);
-                    IFXA (X_CF, !la64_lbt)
-                        REGENERATE_MASK(); // use xMASK as a scratch
+                    emit_sbb32(dyn, ninst, rex, ed, x5, x3, x4, x7);
                     WBACKO(x6);
                     break;
                 case 4:
@@ -269,9 +264,7 @@ uintptr_t dynarec64_64(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         i64 = F32S;
                     else
                         i64 = F8S;
-                    emit_sub32c(dyn, ninst, rex, ed, i64, x3, x4, x5, xMASK);
-                    IFXA (X_CF, !la64_lbt)
-                        REGENERATE_MASK(); // use xMASK as a scratch
+                    emit_sub32c(dyn, ninst, rex, ed, i64, x3, x4, x5, x7);
                     WBACKO(x6);
                     break;
                 case 6:
diff --git a/src/dynarec/la64/dynarec_la64_66.c b/src/dynarec/la64/dynarec_la64_66.c
index 3c32462d..b5e594a1 100644
--- a/src/dynarec/la64/dynarec_la64_66.c
+++ b/src/dynarec/la64/dynarec_la64_66.c
@@ -91,7 +91,7 @@ uintptr_t dynarec64_66(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
             nextop = F8;
             GETGW(x2);
             GETEW(x1, 0);
-            emit_or16(dyn, ninst, x1, x2, x4, x2);
+            emit_or16(dyn, ninst, x1, x2, x4, x5);
             EWBACK;
             break;
         case 0x0B:
diff --git a/src/dynarec/la64/dynarec_la64_67.c b/src/dynarec/la64/dynarec_la64_67.c
index 242d315e..119778fd 100644
--- a/src/dynarec/la64/dynarec_la64_67.c
+++ b/src/dynarec/la64/dynarec_la64_67.c
@@ -124,7 +124,7 @@ uintptr_t dynarec64_67(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         if (gd != xRDX) MV(xRDX, gd);
                     } else {
                         MUL_D(xRDX, xRAX, ed); // 64 <- 32x32
-                        AND(xRAX, xRDX, xMASK);
+                        ZEROUP2(xRAX, xRDX);
                         SRLI_W(xRDX, xRDX, 32);
                     }
                     UFLAG_RES(xRAX);
diff --git a/src/dynarec/la64/dynarec_la64_emit_math.c b/src/dynarec/la64/dynarec_la64_emit_math.c
index 577ffc75..4abc9603 100644
--- a/src/dynarec/la64/dynarec_la64_emit_math.c
+++ b/src/dynarec/la64/dynarec_la64_emit_math.c
@@ -57,8 +57,8 @@ void emit_add32(dynarec_la64_t* dyn, int ninst, rex_t rex, int s1, int s2, int s
     IFX(X_CF)
     {
         if (rex.w) {
-            AND(s5, xMASK, s1);
-            AND(s4, xMASK, s2);
+            ZEROUP2(s5, s1);
+            ZEROUP2(s4, s2);
             ADD_D(s5, s5, s4);
             SRLI_D(s3, s1, 0x20);
             SRLI_D(s4, s2, 0x20);
@@ -69,8 +69,8 @@ void emit_add32(dynarec_la64_t* dyn, int ninst, rex_t rex, int s1, int s2, int s
             BEQZ(s5, 8);
             ORI(xFlags, xFlags, 1 << F_CF);
         } else {
-            AND(s3, s1, xMASK);
-            AND(s4, s2, xMASK);
+            ZEROUP2(s3, s1);
+            ZEROUP2(s4, s2);
             ADD_D(s5, s3, s4);
             SRLI_D(s5, s5, 0x20);
             BEQZ(s5, 8);
@@ -177,8 +177,8 @@ void emit_add32c(dynarec_la64_t* dyn, int ninst, rex_t rex, int s1, int64_t c, i
     IFX(X_CF)
     {
         if (rex.w) {
-            AND(s5, xMASK, s1);
-            AND(s4, xMASK, s2);
+            ZEROUP2(s5, s1);
+            ZEROUP2(s4, s2);
             ADD_D(s5, s5, s4);
             SRLI_D(s3, s1, 0x20);
             SRLI_D(s4, s2, 0x20);
@@ -189,8 +189,8 @@ void emit_add32c(dynarec_la64_t* dyn, int ninst, rex_t rex, int s1, int64_t c, i
             BEQZ(s5, 8);
             ORI(xFlags, xFlags, 1 << F_CF);
         } else {
-            AND(s3, s1, xMASK);
-            AND(s4, s2, xMASK);
+            ZEROUP2(s3, s1);
+            ZEROUP2(s4, s2);
             ADD_D(s5, s3, s4);
             SRLI_D(s5, s5, 0x20);
             BEQZ(s5, 8);
@@ -1059,8 +1059,8 @@ void emit_adc32(dynarec_la64_t* dyn, int ninst, rex_t rex, int s1, int s2, int s
 
     IFX (X_CF) {
         if (rex.w) {
-            AND(s5, xMASK, s1);
-            AND(s4, xMASK, s2);
+            ZEROUP2(s5, s1);
+            ZEROUP2(s4, s2);
             ADD_D(s5, s5, s4); // lo
             ANDI(s3, xFlags, 1);
             ADD_D(s5, s5, s3); // add carry
@@ -1071,8 +1071,8 @@ void emit_adc32(dynarec_la64_t* dyn, int ninst, rex_t rex, int s1, int s2, int s
             ADD_D(s5, s5, s4); // hi
             SRAI_D(s6, s5, 0x20);
         } else {
-            AND(s3, s1, xMASK);
-            AND(s4, s2, xMASK);
+            ZEROUP2(s3, s1);
+            ZEROUP2(s4, s2);
             ADD_D(s5, s3, s4);
             ANDI(s3, xFlags, 1);
             ADD_D(s5, s5, s3); // add carry
diff --git a/src/dynarec/la64/dynarec_la64_f0.c b/src/dynarec/la64/dynarec_la64_f0.c
index d1b0810c..0f7a48d8 100644
--- a/src/dynarec/la64/dynarec_la64_f0.c
+++ b/src/dynarec/la64/dynarec_la64_f0.c
@@ -239,10 +239,10 @@ uintptr_t dynarec64_F0(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                                 }
                             } else {
                                 SMDMB();
-                                AND(x3, xRAX, xMASK);
+                                ZEROUP2(x3, xRAX);
                                 SLLI_D(x2, xRDX, 32);
                                 OR(x3, x3, x2);
-                                AND(x4, xRBX, xMASK);
+                                ZEROUP2(x4, xRBX);
                                 SLLI_D(x2, xRCX, 32);
                                 OR(x4, x4, x2);
                                 MARKLOCK;
@@ -260,7 +260,7 @@ uintptr_t dynarec64_F0(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                                 B_MARK3_nocond;
                                 MARK;
                                 SLLI_D(xRDX, x2, 32);
-                                AND(xRAX, x2, xMASK);
+                                ZEROUP2(xRAX, x2);
                                 MARK3;
                                 SMDMB();
                             }
diff --git a/src/dynarec/la64/dynarec_la64_f30f.c b/src/dynarec/la64/dynarec_la64_f30f.c
index 333c7b62..e340474e 100644
--- a/src/dynarec/la64/dynarec_la64_f30f.c
+++ b/src/dynarec/la64/dynarec_la64_f30f.c
@@ -301,7 +301,7 @@ uintptr_t dynarec64_F30F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int
             GETED(0);
             GETGD;
             if (!rex.w && MODREG) {
-                AND(x4, ed, xMASK);
+                ZEROUP2(x4, ed);
                 ed = x4;
             }
             RESTORE_EFLAGS(x1);
diff --git a/src/dynarec/la64/dynarec_la64_helper.c b/src/dynarec/la64/dynarec_la64_helper.c
index 5c0b3879..d3954e63 100644
--- a/src/dynarec/la64/dynarec_la64_helper.c
+++ b/src/dynarec/la64/dynarec_la64_helper.c
@@ -249,7 +249,7 @@ static uintptr_t geted_32(dynarec_la64_t* dyn, uintptr_t addr, int ninst, uint8_
         } else {
             ret = TO_LA64((nextop & 7));
             if (ret == hint) {
-                AND(hint, ret, xMASK); // to clear upper part
+                ZEROUP2(hint, ret); // to clear upper part
             }
         }
     } else {
@@ -397,7 +397,7 @@ uintptr_t geted32(dynarec_la64_t* dyn, uintptr_t addr, int ninst, uint8_t nextop
         } else {
             ret = TO_LA64((nextop & 7) + (rex.b << 3));
             if (ret == hint) {
-                AND(hint, ret, xMASK); // to clear upper part
+                ZEROUP2(hint, ret); // to clear upper part
             }
         }
     } else {
@@ -685,7 +685,6 @@ void call_c(dynarec_la64_t* dyn, int ninst, void* fnc, int reg, int ret, int sav
             LD_D(xRIP, xEmu, offsetof(x64emu_t, ip));
 #undef GO
     }
-    REGENERATE_MASK();
 
     fpu_popcache(dyn, ninst, reg, 0);
     if (saveflags) {
diff --git a/src/dynarec/la64/dynarec_la64_helper.h b/src/dynarec/la64/dynarec_la64_helper.h
index 2bfff82a..573c115b 100644
--- a/src/dynarec/la64/dynarec_la64_helper.h
+++ b/src/dynarec/la64/dynarec_la64_helper.h
@@ -1141,12 +1141,6 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int
         }                                  \
     } while (0)
 
-#define REGENERATE_MASK()       \
-    do {                        \
-        ADDI_W(xMASK, xZR, -1); \
-        LU32I_D(xMASK, 0);      \
-    } while (0)
-
 #define PURGE_YMM()    /* TODO */
 
 #endif //__DYNAREC_LA64_HELPER_H__
diff --git a/src/dynarec/la64/la64_emitter.h b/src/dynarec/la64/la64_emitter.h
index bbc500b9..c243d026 100644
--- a/src/dynarec/la64/la64_emitter.h
+++ b/src/dynarec/la64/la64_emitter.h
@@ -79,8 +79,7 @@ f24-f31  fs0-fs7   Static registers                Callee
 #define x4      8
 #define x5      9
 #define x6      10
-// used to clear the upper 32bits
-#define xMASK   11
+#define x7      11
 // 32bits version of scratch
 #define w1      x1
 #define w2      x2
@@ -498,7 +497,8 @@ f24-f31  fs0-fs7   Static registers                Callee
 #define BSTRPICK_D(rd, rj, msbd6, lsbd6) EMIT(type_2RI12(0b0000000011, ((msbd6) & 0x3F) << 6 | ((lsbd6) & 0x3F), rj, rd))
 
 // ZERO the upper part
-#define ZEROUP(rd) BSTRINS_D(rd, xZR, 63, 32);
+#define ZEROUP(rd)      BSTRPICK_D(rd, rd, 31, 0)
+#define ZEROUP2(rd, rj) BSTRPICK_D(rd, rj, 31, 0)
 
 #define CLO_W(rd, rj)     EMIT(type_2R(0b0000000000000000000100, rj, rd))
 #define CLZ_W(rd, rj)     EMIT(type_2R(0b0000000000000000000101, rj, rd))
@@ -2025,23 +2025,23 @@ LSX instruction starts with V, LASX instruction starts with XV.
 // rd[63:0] = rj[63:0] (pseudo instruction)
 #define MV(rd, rj) ADDI_D(rd, rj, 0)
 // rd = rj (pseudo instruction)
-#define MVxw(rd, rj)            \
-    do {                        \
-        if (rex.w) {            \
-            MV(rd, rj);         \
-        } else {                \
-            AND(rd, rj, xMASK); \
-        }                       \
+#define MVxw(rd, rj)         \
+    do {                     \
+        if (rex.w) {         \
+            MV(rd, rj);      \
+        } else {             \
+            ZEROUP2(rd, rj); \
+        }                    \
     } while (0)
 
 // rd = rj (pseudo instruction)
-#define MVz(rd, rj)             \
-    do {                        \
-        if (rex.is32bits) {     \
-            AND(rd, rj, xMASK); \
-        } else {                \
-            MV(rd, rj);         \
-        }                       \
+#define MVz(rd, rj)          \
+    do {                     \
+        if (rex.is32bits) {  \
+            ZEROUP2(rd, rj); \
+        } else {             \
+            MV(rd, rj);      \
+        }                    \
     } while (0)
 
 #define ADDIxw(rd, rj, imm12)      \
diff --git a/src/dynarec/la64/la64_printer.c b/src/dynarec/la64/la64_printer.c
index ba9264fb..bc314bd6 100644
--- a/src/dynarec/la64/la64_printer.c
+++ b/src/dynarec/la64/la64_printer.c
@@ -6,7 +6,7 @@
 #include "la64_printer.h"
 #include "debug.h"
 
-static const char* Xt[] = { "xZR", "r1", "r2", "sp", "xEmu", "x1_r5", "x2_r6", "x3_r7", "x4_r8", "x5_r9", "x6_r10", "xMASK_r11", "xRAX_r12", "xRCX_r13", "xRDX_r14", "xRBX_r15", "xRSP_r16", "xRBP_r17", "xRSI_r18", "xRDI_r19", "xRIP_r20", "r21", "r22", "xR8_r23", "xR9_r24", "xR10_r25", "xR11_r26", "xR12_r27", "xR13_r28", "xR14_r29", "xR15_r30", "xFlags_r31" };
+static const char* Xt[] = { "xZR", "r1", "r2", "sp", "xEmu", "x1_r5", "x2_r6", "x3_r7", "x4_r8", "x5_r9", "x6_r10", "x7_r11", "xRAX_r12", "xRCX_r13", "xRDX_r14", "xRBX_r15", "xRSP_r16", "xRBP_r17", "xRSI_r18", "xRDI_r19", "xRIP_r20", "r21", "r22", "xR8_r23", "xR9_r24", "xR10_r25", "xR11_r26", "xR12_r27", "xR13_r28", "xR14_r29", "xR15_r30", "xFlags_r31" };
 static const char* Ft[] = { "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7", "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", "ft8", "ft9", "ft10", "ft11", "ft12", "ft13", "ft14", "ft15", "fs0", "fs1", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7" };
 static const char* Vt[] = { "vra0", "vra1", "vra2", "vra3", "vra4", "vra5", "vra6", "vra7", "vrt0", "vrt1", "vrt2", "vrt3", "vrt4", "vrt5", "vrt6", "vrt7", "vrt8", "vrt9", "vrt10", "vrt11", "vrt12", "vrt13", "vrt14", "vrt15", "vrs0", "vrs1", "vrs2", "vrs3", "vrs4", "vrs5", "vrs6", "vrs7" };
 
diff --git a/src/dynarec/la64/la64_prolog.S b/src/dynarec/la64/la64_prolog.S
index d0faa1e2..1e6263bb 100644
--- a/src/dynarec/la64/la64_prolog.S
+++ b/src/dynarec/la64/la64_prolog.S
@@ -64,8 +64,5 @@ la64_prolog:
     addi.d $sp,  $sp, -16
     // save old sp into xSavedSP
     addi.d $r22, $sp, 16
-    // setup xMASK
-    addi.w  $r11, $r0, -1
-    lu32i.d $r11, 0
     //jump to function
     jirl   $r0,  $a1, 0