about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
authorptitSeb <sebastien.chev@gmail.com>2021-03-18 12:14:45 +0100
committerptitSeb <sebastien.chev@gmail.com>2021-03-18 12:14:45 +0100
commitf08bc7846ac9c9e5756ccaa9ef851b0dbfe13860 (patch)
tree4bbe2c3cc8a7c5a8c4c45fc96712c9b18813b20b /src
parentbac90cfe573465c8020b36da9dbd1665cc6ef185 (diff)
downloadbox64-f08bc7846ac9c9e5756ccaa9ef851b0dbfe13860.tar.gz
box64-f08bc7846ac9c9e5756ccaa9ef851b0dbfe13860.zip
Added 31/33/35 XOR opcodes
Diffstat (limited to 'src')
-rwxr-xr-xsrc/dynarec/arm64_emitter.h1
-rwxr-xr-xsrc/dynarec/dynarec_arm64_00.c26
-rwxr-xr-xsrc/dynarec/dynarec_arm64_emit_logic.c694
-rwxr-xr-xsrc/dynarec/dynarec_arm64_emit_math.c2
-rwxr-xr-xsrc/dynarec/dynarec_arm64_helper.h12
5 files changed, 728 insertions, 7 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h
index 0d182425..89b32c0c 100755
--- a/src/dynarec/arm64_emitter.h
+++ b/src/dynarec/arm64_emitter.h
@@ -320,6 +320,7 @@
 #define BICxw_REG   BICxw
 #define TSTx_REG(Rn, Rm)                ANDSx_REG(xZR, Rn, Rm)
 #define TSTw_REG(Rn, Rm)                ANDSw_REG(wZR, Rn, Rm)
+#define TSTxw_REG(Rn, Rm)               ANDSxw_REG(xZR, Rn, Rm)
 
 
 // BFI
diff --git a/src/dynarec/dynarec_arm64_00.c b/src/dynarec/dynarec_arm64_00.c
index 6b5ebbf9..27964f60 100755
--- a/src/dynarec/dynarec_arm64_00.c
+++ b/src/dynarec/dynarec_arm64_00.c
@@ -109,6 +109,32 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             emit_sub32c(dyn, ninst, rex, xRAX, i32, x3, x4, x5);
             break;
 
+        case 0x31:
+            INST_NAME("XOR Ed, Gd");
+            SETFLAGS(X_ALL, SF_SET);
+            nextop = F8;
+            GETGD;
+            GETED(0);
+            emit_xor32(dyn, ninst, rex, ed, gd, x3, x4);
+            WBACK;
+            break;
+
+        case 0x33:
+            INST_NAME("XOR Gd, Ed");
+            SETFLAGS(X_ALL, SF_SET);
+            nextop = F8;
+            GETGD;
+            GETED(0);
+            emit_xor32(dyn, ninst, rex, gd, ed, x3, x4);
+            break;
+
+        case 0x35:
+            INST_NAME("XOR EAX, Id");
+            SETFLAGS(X_ALL, SF_SET);
+            i32 = F32S;
+            emit_xor32c(dyn, ninst, rex, xRAX, i32, x3, x4);
+            break;
+
         case 0x50:
         case 0x51:
         case 0x52:
diff --git a/src/dynarec/dynarec_arm64_emit_logic.c b/src/dynarec/dynarec_arm64_emit_logic.c
new file mode 100755
index 00000000..d15d0088
--- /dev/null
+++ b/src/dynarec/dynarec_arm64_emit_logic.c
@@ -0,0 +1,694 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <pthread.h>
+#include <errno.h>
+
+#include "debug.h"
+#include "box64context.h"
+#include "dynarec.h"
+#include "emu/x64emu_private.h"
+#include "emu/x64run_private.h"
+#include "x64run.h"
+#include "x64emu.h"
+#include "box64stack.h"
+#include "callback.h"
+#include "emu/x64run_private.h"
+#include "x64trace.h"
+#include "dynarec_arm64.h"
+#include "dynarec_arm64_private.h"
+#include "arm64_printer.h"
+#include "../tools/bridge_private.h"
+
+#include "dynarec_arm64_functions.h"
+#include "dynarec_arm64_helper.h"
+
+// emit OR32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
+void emit_or32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
+{
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
+        STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
+        SET_DF(s4, rex.w?d_or64:d_or32);
+    } else IFX(X_ALL) {
+        SET_DFNONE(s4);
+    }
+    ORRxw_REG(s1, s1, s2);
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, res));
+    }
+    IFX(X_CF | X_AF | X_ZF | X_OF) {
+        MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF));
+        BICw(xFlags, xFlags, s3);
+    }
+    IFX(X_ZF) {
+        TSTxw_REG(s1, s1);
+        Bcond(cNE, +8);
+        ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40
+    }
+    IFX(X_SF) {
+        LSRxw(s3, s1, (rex.w)?63:31);
+        BFIx(xFlags, s3, F_SF, 1);
+    }
+    IFX(X_PF) {
+        emit_pf(dyn, ninst, s1, s3, s4);
+    }
+}
+
+// emit OR32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+void emit_or32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4)
+{
+    IFX(X_PEND) {
+        MOV64xw(s3, c);
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
+        STRxw_U12(s3, xEmu, offsetof(x64emu_t, op2));
+        SET_DF(s4, rex.w?d_or64:d_or32);
+    } else IFX(X_ALL) {
+        SET_DFNONE(s4);
+    }
+    IFX(X_PEND) {} else {MOV64xw(s3, c);}
+    ORRxw_REG(s1, s1, s3);
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, res));
+    }
+    IFX(X_CF | X_AF | X_ZF | X_OF) {
+        MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF));
+        BICw(xFlags, xFlags, s3);
+    }
+    IFX(X_ZF) {
+        TSTxw_REG(s1, s1);
+        Bcond(cNE, +8);
+        ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40
+    }
+    IFX(X_SF) {
+        LSRxw(s3, s1, (rex.w)?63:31);
+        BFIx(xFlags, s3, F_SF, 1);
+    }
+    IFX(X_PF) {
+        emit_pf(dyn, ninst, s1, s3, s4);
+    }
+}
+
+// emit XOR32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
+void emit_xor32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
+{
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
+        STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
+        SET_DF(s4, rex.w?d_xor64:d_xor32);
+    } else IFX(X_ALL) {
+        SET_DFNONE(s4);
+    }
+    EORxw_REG(s1, s1, s2);
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, res));
+    }
+    IFX(X_CF | X_AF | X_ZF | X_OF) {
+        MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF));
+        BICw(xFlags, xFlags, s3);
+    }
+    IFX(X_ZF) {
+        TSTxw_REG(s1, s1);
+        Bcond(cNE, +8);
+        ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40
+    }
+    IFX(X_SF) {
+        LSRxw(s3, s1, (rex.w)?63:31);
+        BFIx(xFlags, s3, F_SF, 1);
+    }
+    IFX(X_PF) {
+        emit_pf(dyn, ninst, s1, s3, s4);
+    }
+}
+
+// emit XOR32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+void emit_xor32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4)
+{
+    IFX(X_PEND) {
+        MOV64xw(s3, c);
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
+        STRxw_U12(s3, xEmu, offsetof(x64emu_t, op2));
+        SET_DF(s4, rex.w?d_xor64:d_xor32);
+    } else IFX(X_ALL) {
+        SET_DFNONE(s4);
+    }
+    IFX(X_PEND) {} else {MOV64x(s3, c);}
+    EORxw_REG(s1, s1, s3);
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, res));
+    }
+    IFX(X_CF | X_AF | X_ZF | X_OF) {
+        MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF));
+        BICw(xFlags, xFlags, s3);
+    }
+    IFX(X_ZF) {
+        TSTxw_REG(s1, s1);
+        Bcond(cNE, +8);
+        ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40
+    }
+    IFX(X_SF) {
+        LSRxw(s3, s1, (rex.w)?63:31);
+        BFIx(xFlags, s3, F_SF, 1);
+    }
+    IFX(X_PF) {
+        emit_pf(dyn, ninst, s1, s3, s4);
+    }
+}
+
+// emit AND32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
+void emit_and32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
+{
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
+        STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
+        SET_DF(s4, rex.w?d_and64:d_and32);
+    } else IFX(X_ALL) {
+        SET_DFNONE(s4);
+    }
+    IFX(X_ALL) {
+        ANDSxw_REG(s1, s1, s2);
+    } else {
+        ANDxw_REG(s1, s2, s2);
+    }
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, res));
+    }
+    IFX(X_CF | X_AF | X_ZF | X_OF) {
+        MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF));
+        BICw(xFlags, xFlags, s3);
+    }
+    IFX(X_ZF) {
+        Bcond(cNE, +8);
+        ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40
+    }
+    IFX(X_SF) {
+        LSRxw(s3, s1, (rex.w)?63:31);
+        BFIx(xFlags, s3, F_SF, 1);
+    }
+    IFX(X_PF) {
+        emit_pf(dyn, ninst, s1, s3, s4);
+    }
+}
+
+// emit AND32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+void emit_and32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4)
+{
+    IFX(X_PEND) {
+        MOV64xw(s3, c);
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
+        STRxw_U12(s3, xEmu, offsetof(x64emu_t, op2));
+        SET_DF(s4, rex.w?d_and64:d_and32);
+    } else IFX(X_ALL) {
+        SET_DFNONE(s4);
+    }
+    IFX(X_PEND) {} else {MOV64xw(s3, c);}
+    IFX(X_ALL) {
+        ANDSxw_REG(s1, s1, s3);
+    } else {
+        ANDxw_REG(s1, s1, s3);
+    }
+    IFX(X_PEND) {
+        STRxw_U12(s1, xEmu, offsetof(x64emu_t, res));
+    }
+    IFX(X_CF | X_AF | X_ZF | X_OF) {
+        MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF));
+        BICw(xFlags, xFlags, s3);
+    }
+    IFX(X_ZF) {
+        Bcond(cNE, +8);
+        ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40
+    }
+    IFX(X_SF) {
+        LSRxw(s3, s1, (rex.w)?63:31);
+        BFIx(xFlags, s3, F_SF, 1);
+    }
+    IFX(X_PF) {
+        emit_pf(dyn, ninst, s1, s3, s4);
+    }
+}
+
+// emit OR8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
+//void emit_or8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s3, d_or8);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s3);
+//    }
+//    IFX(X_ALL) {
+//        ORRS_REG_LSL_IMM5(s1, s1, s2, 0);
+//    } else {
+//        ORR_REG_LSL_IMM5(s1, s1, s2, 0);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 7);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit OR8 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+//void emit_or8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        MOV32(s3, c&0xff);
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s3, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s4, d_or8);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s4);
+//    }
+//    IFX(X_ALL) {
+//        ORRS_IMM8(s1, s1, c, 0);
+//    } else {
+//        ORR_IMM8(s1, s1, c, 0);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 7);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit XOR8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
+//void emit_xor8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s3, d_xor8);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s3);
+//    }
+//    IFX(X_ALL) {
+//        XORS_REG_LSL_IMM5(s1, s1, s2, 0);
+//    } else {
+//        XOR_REG_LSL_IMM5(s1, s1, s2, 0);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 7);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit XOR8 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+//void emit_xor8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        MOV32(s3, c&0xff);
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s3, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s4, d_xor8);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s4);
+//    }
+//    IFX(X_ALL) {
+//        XORS_IMM8(s1, s1, c);
+//    } else {
+//        XOR_IMM8(s1, s1, c);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 7);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit AND8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
+//void emit_and8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s3, d_and8);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s3);
+//    }
+//    IFX(X_ALL) {
+//        ANDS_REG_LSL_IMM5(s1, s1, s2, 0);
+//    } else {
+//        AND_REG_LSL_IMM5(s1, s1, s2, 0);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 7);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit AND8 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+//void emit_and8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        MOV32(s3, c&0xff);
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s3, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s4, d_and8);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s4);
+//    }
+//    IFX(X_ALL) {
+//        ANDS_IMM8(s1, s1, c);
+//    } else {
+//        AND_IMM8(s1, s1, c);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 7);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+
+// emit OR16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
+//void emit_or16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s3, d_or16);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s3);
+//    }
+//    IFX(X_ALL) {
+//        ORRS_REG_LSL_IMM5(s1, s1, s2, 0);
+//    } else {
+//        ORR_REG_LSL_IMM5(s1, s1, s2, 0);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 15);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit OR16 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+//void emit_or16c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        MOVW(s3, c);
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s3, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s4, d_or16);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s4);
+//    }
+//    if(c>=0 && c<256) {
+//        IFX(X_ALL) {
+//            ORRS_IMM8(s1, s1, c, 0);
+//        } else {
+//            ORR_IMM8(s1, s1, c, 0);
+//        }
+//    } else {
+//        IFX(X_PEND) {} else {MOVW(s3, c);}
+//        IFX(X_ALL) {
+//            ORRS_REG_LSL_IMM5(s1, s1, s3, 0);
+//        } else {
+//            ORR_REG_LSL_IMM5(s1, s1, s3, 0);
+//        }
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 15);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit XOR16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
+//void emit_xor16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s3, d_xor16);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s3);
+//    }
+//    IFX(X_ALL) {
+//        XORS_REG_LSL_IMM5(s1, s1, s2, 0);
+//    } else {
+//        XOR_REG_LSL_IMM5(s1, s1, s2, 0);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 15);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit XOR16 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+//void emit_xor16c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        MOVW(s3, c);
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s3, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s4, d_xor16);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s4);
+//    }
+//    if(c>=0 && c<256) {
+//        IFX(X_ALL) {
+//            XORS_IMM8(s1, s1, c);
+//        } else {
+//            XOR_IMM8(s1, s1, c);
+//        }
+//    } else {
+//        IFX(X_PEND) {} else {MOVW(s3, c);}
+//        IFX(X_ALL) {
+//            XORS_REG_LSL_IMM5(s1, s1, s3, 0);
+//        } else {
+//            XOR_REG_LSL_IMM5(s1, s1, s3, 0);
+//        }
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 15);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit AND16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
+//void emit_and16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s3, d_and16);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s3);
+//    }
+//    IFX(X_ALL) {
+//        ANDS_REG_LSL_IMM5(s1, s1, s2, 0);
+//    } else {
+//        AND_REG_LSL_IMM5(s1, s1, s2, 0);
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 15);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
+
+// emit AND16 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
+//void emit_and16c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
+//{
+//    IFX(X_PEND) {
+//        MOVW(s3, c);
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1));
+//        STR_IMM9(s3, xEmu, offsetof(x64emu_t, op2));
+//        SET_DF(s4, d_and16);
+//    } else IFX(X_ALL) {
+//        SET_DFNONE(s4);
+//    }
+//    if(c>=0 && c<256) {
+//        IFX(X_ALL) {
+//            ANDS_IMM8(s1, s1, c);
+//        } else {
+//            AND_IMM8(s1, s1, c);
+//        }
+//    } else {
+//        IFX(X_PEND) {} else {MOVW(s3, c);}
+//        IFX(X_ALL) {
+//            ANDS_REG_LSL_IMM5(s1, s1, s3, 0);
+//        } else {
+//            AND_REG_LSL_IMM5(s1, s1, s3, 0);
+//        }
+//    }
+//    IFX(X_PEND) {
+//        STR_IMM9(s1, xEmu, offsetof(x64emu_t, res));
+//    }
+//    IFX(X_CF | X_AF | X_ZF) {
+//        BIC_IMM8(xFlags, xFlags, (1<<F_CF)|(1<<F_AF)|(1<<F_ZF), 0);
+//    }
+//    IFX(X_OF) {
+//        BIC_IMM8(xFlags, xFlags, 0b10, 0x0b);
+//    }
+//    IFX(X_ZF) {
+//        ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0);
+//    }
+//    IFX(X_SF) {
+//        MOV_REG_LSR_IMM5(s3, s1, 15);
+//        BFI(xFlags, s3, F_SF, 1);
+//    }
+//    IFX(X_PF) {
+//        emit_pf(dyn, ninst, s1, s3, s4);
+//    }
+//}
\ No newline at end of file
diff --git a/src/dynarec/dynarec_arm64_emit_math.c b/src/dynarec/dynarec_arm64_emit_math.c
index 0a84c676..4e172e56 100755
--- a/src/dynarec/dynarec_arm64_emit_math.c
+++ b/src/dynarec/dynarec_arm64_emit_math.c
@@ -878,7 +878,7 @@ void emit_dec32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s3, int s4
 {
     IFX(X_PEND) {
         STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
-        SET_DF(s4, d_dec32);
+        SET_DF(s4, rex.w?d_dec64:d_dec32);
     } else IFX(X_ZF|X_OF|X_AF|X_SF|X_PF) {
         SET_DFNONE(s4);
     }
diff --git a/src/dynarec/dynarec_arm64_helper.h b/src/dynarec/dynarec_arm64_helper.h
index 6cad1fd9..68042470 100755
--- a/src/dynarec/dynarec_arm64_helper.h
+++ b/src/dynarec/dynarec_arm64_helper.h
@@ -630,12 +630,12 @@ void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
 void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5);
 //void emit_sub8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4, int save_s4);
 //void emit_sub8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4);
-//void emit_or32(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4);
-//void emit_or32c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4);
-//void emit_xor32(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4);
-//void emit_xor32c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4);
-//void emit_and32(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4);
-//void emit_and32c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4);
+void emit_or32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4);
+void emit_or32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4);
+void emit_xor32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4);
+void emit_xor32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4);
+void emit_and32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4);
+void emit_and32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4);
 //void emit_or8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4);
 //void emit_or8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4);
 //void emit_xor8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4);