about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/dynarec/arm64/dynarec_arm64_00.c74
-rw-r--r--src/dynarec/arm64/dynarec_arm64_0f.c45
-rw-r--r--src/dynarec/arm64/dynarec_arm64_64.c24
-rw-r--r--src/dynarec/arm64/dynarec_arm64_66.c15
-rw-r--r--src/dynarec/arm64/dynarec_arm64_660f.c27
-rw-r--r--src/dynarec/arm64/dynarec_arm64_67.c4
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c18
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c4
-rw-r--r--src/dynarec/arm64/dynarec_arm64_consts.c115
-rw-r--r--src/dynarec/arm64/dynarec_arm64_consts.h96
-rw-r--r--src/dynarec/arm64/dynarec_arm64_d9.c49
-rw-r--r--src/dynarec/arm64/dynarec_arm64_db.c6
-rw-r--r--src/dynarec/arm64/dynarec_arm64_dd.c12
-rw-r--r--src/dynarec/arm64/dynarec_arm64_df.c4
-rw-r--r--src/dynarec/arm64/dynarec_arm64_f30f.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_helper.c8
-rw-r--r--src/dynarec/arm64/dynarec_arm64_helper.h3
-rw-r--r--src/dynarec/arm64/dynarec_arm64_private.h16
-rw-r--r--src/dynarec/dynarec_native_functions.c2
-rw-r--r--src/dynarec/rv64/dynarec_rv64_dd.c1
-rw-r--r--src/emu/x64printer.c4
-rw-r--r--src/wrapped/generated/functions_list.txt2
-rw-r--r--src/wrapped/generated/wrapper.c4
-rw-r--r--src/wrapped/generated/wrapper.h2
24 files changed, 357 insertions, 180 deletions
diff --git a/src/dynarec/arm64/dynarec_arm64_00.c b/src/dynarec/arm64/dynarec_arm64_00.c
index ff429a60..c78e4f74 100644
--- a/src/dynarec/arm64/dynarec_arm64_00.c
+++ b/src/dynarec/arm64/dynarec_arm64_00.c
@@ -443,7 +443,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 READFLAGS(X_AF|X_CF);
                 SETFLAGS(X_ALL, SF_SET_DF);
                 UXTBx(x1, xRAX);
-                CALL_(daa8, x1, 0);
+                CALL_(const_daa8, x1, 0);
                 BFIz(xRAX, x1, 0, 8);
             } else {
                 DEFAULT;
@@ -508,7 +508,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 READFLAGS(X_AF|X_CF);
                 SETFLAGS(X_ALL, SF_SET_DF);
                 UXTBx(x1, xRAX);
-                CALL_(das8, x1, 0);
+                CALL_(const_das8, x1, 0);
                 BFIz(xRAX, x1, 0, 8);
             } else {
                 DEFAULT;
@@ -602,7 +602,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 READFLAGS(X_AF);
                 SETFLAGS(X_ALL, SF_SET_DF);
                 UXTHx(x1, xRAX);
-                CALL_(aaa16, x1, 0);
+                CALL_(const_aaa16, x1, 0);
                 BFIz(xRAX, x1, 0, 16);
             } else {
                 DEFAULT;
@@ -670,7 +670,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 READFLAGS(X_AF);
                 SETFLAGS(X_ALL, SF_SET_DF);
                 UXTHw(x1, xRAX);
-                CALL_(aas16, x1, 0);
+                CALL_(const_aas16, x1, 0);
                 BFIx(xRAX, x1, 0, 16);
             } else {
                 DEFAULT;
@@ -836,7 +836,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 B_NEXT_nocond;
                 MARK;
                 STORE_XEMU_CALL(xRIP);
-                CALL_S(native_br, -1);
+                CALL_S(const_native_br, -1);
                 LOAD_XEMU_CALL(xRIP);
             } else {
                 if(BOX64DRENV(dynarec_safeflags)>1) {
@@ -846,7 +846,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 }
                 GETIP(ip);
                 STORE_XEMU_CALL(xRIP);
-                CALL_S(native_ud, -1);
+                CALL_S(const_native_ud, -1);
                 LOAD_XEMU_CALL(xRIP);
                 jump_to_epilog(dyn, 0, xRIP, ninst);
                 *need_epilog = 0;
@@ -1046,7 +1046,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }
             GETIP(ip);
             STORE_XEMU_CALL(xRIP);
-            CALL_S(native_priv, -1);
+            CALL_S(const_native_priv, -1);
             LOAD_XEMU_CALL(xRIP);
             jump_to_epilog(dyn, 0, xRIP, ninst);
             *need_epilog = 0;
@@ -1062,7 +1062,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }
             GETIP(ip);
             STORE_XEMU_CALL(xRIP);
-            CALL_S(native_priv, -1);
+            CALL_S(const_native_priv, -1);
             LOAD_XEMU_CALL(xRIP);
             jump_to_epilog(dyn, 0, xRIP, ninst);
             *need_epilog = 0;
@@ -2609,7 +2609,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         STORE_XEMU_CALL(xRIP);
                         ADDx_U12(x3, xRIP, 8+8+2);    // expected return address
                         ADDx_U12(x1, xEmu, (uint32_t)offsetof(x64emu_t, ip)); // setup addr as &emu->ip
-                        CALL_(EmuInt3, -1, x3);
+                        CALL_(const_int3, -1, x3);
                         SMWRITE2();
                         LOAD_XEMU_CALL(xRIP);
                         addr+=8+8;
@@ -2634,7 +2634,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     B_MARK(cEQ);
                     GETIP(addr);  // update RIP
                     STORE_XEMU_CALL(xRIP);
-                    CALL_S(native_int3, -1);
+                    CALL_S(const_native_int3, -1);
                     LOAD_XEMU_CALL(xRIP);
                     MARK;
                     jump_to_epilog(dyn, addr, 0, ninst);
@@ -2652,7 +2652,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             GETIP(ip);
             STORE_XEMU_CALL(xRIP);
             MOV32w(x1, u8);
-            CALL_S(native_int, -1);
+            CALL_S(const_native_int, -1);
             LOAD_XEMU_CALL(xRIP);
             TABLE64(x3, addr); // expected return address
             CMPSx_REG(xRIP, x3);
@@ -2670,7 +2670,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 GETIP(ip);  // priviledged instruction, IP not updated
                 STORE_XEMU_CALL(xRIP);
                 MOV32w(x1, u8);
-                CALL_S(native_int, -1);
+                CALL_S(const_native_int, -1);
                 LOAD_XEMU_CALL(xRIP);
                 LOAD_XEMU_REM();
                 TABLE64(x3, addr); // expected return address
@@ -2686,7 +2686,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 SMEND();
                 GETIP(addr);
                 STORE_XEMU_CALL(xRIP);
-                CALL_S(EmuX86Syscall, -1);
+                CALL_S(const_x86syscall, -1);
                 LOAD_XEMU_CALL(xRIP);
                 TABLE64(x3, addr); // expected return address
                 CMPSx_REG(xRIP, x3);
@@ -2705,7 +2705,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 }
                 GETIP(addr);
                 STORE_XEMU_CALL(xRIP);
-                CALL_S(native_int3, -1);
+                CALL_S(const_native_int3, -1);
                 LOAD_XEMU_CALL(xRIP);
                 jump_to_epilog(dyn, 0, xRIP, ninst);
                 *need_epilog = 0;
@@ -2724,7 +2724,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 }
                 STORE_XEMU_CALL(xRIP);
                 MOV32w(x1,u8);
-                CALL_S(native_int, -1);
+                CALL_S(const_native_int, -1);
                 LOAD_XEMU_CALL(xRIP);
                 jump_to_epilog(dyn, 0, xRIP, ninst);
                 *need_epilog = 0;
@@ -2742,7 +2742,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 TBZ_NEXT(wFlags, F_OF);
                 STORE_XEMU_CALL(xRIP);
                 MOV32w(x1,4);
-                CALL_S(native_int, -1);
+                CALL_S(const_native_int, -1);
                 LOAD_XEMU_CALL(xRIP);
             }
             break;
@@ -3140,7 +3140,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         ANDw_mask(x2, xRCX, 0, 0b00100);  //mask=0x00000001f
                     }
                     GETEDW(x4, x1, 0);
-                    CALL_(rex.w?((void*)rcl64):((void*)rcl32), ed, x4);
+                    CALL_(rex.w?const_rcl64:const_rcl32, ed, x4);
                     WBACK;
                     break;
                 case 3:
@@ -3158,7 +3158,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         ANDw_mask(x2, xRCX, 0, 0b00100);  //mask=0x00000001f
                     }
                     GETEDW(x4, x1, 0);
-                    CALL_(rex.w?((void*)rcr64):((void*)rcr32), ed, x4);
+                    CALL_(rex.w?const_rcr64:const_rcr32, ed, x4);
                     WBACK;
                     break;
                 case 4:
@@ -3231,7 +3231,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 UBFXx(x1, xRAX, 0, 8);    // load AL
                 u8 = F8;
                 MOV32w(x2, u8);
-                CALL_(aam16, x1, 0);
+                CALL_(const_aam16, x1, 0);
                 BFIz(xRAX, x1, 0, 16);
             } else {
                 DEFAULT;
@@ -3244,7 +3244,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 UBFXx(x1, xRAX, 0, 16);    // load AX
                 u8 = F8;
                 MOV32w(x2, u8);
-                CALL_(aad16, x1, 0);
+                CALL_(const_aad16, x1, 0);
                 BFIz(xRAX, x1, 0, 16);
             } else {
                 DEFAULT;
@@ -3349,7 +3349,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 u8 = F8;
                 GETIP(ip);
                 STORE_XEMU_CALL(xRIP);
-                CALL_S(native_priv, -1);
+                CALL_S(const_native_priv, -1);
                 LOAD_XEMU_CALL(xRIP);
                 jump_to_epilog(dyn, 0, xRIP, ninst);
                 *need_epilog = 0;
@@ -3403,7 +3403,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         GETIP_(dyn->insts[ninst].natcall); // read the 0xCC already
                         STORE_XEMU_CALL(xRIP);
                         ADDx_U12(x1, xEmu, (uint32_t)offsetof(x64emu_t, ip)); // setup addr as &emu->ip
-                        CALL_S(EmuInt3, -1);
+                        CALL_S(const_int3, -1);
                         SMWRITE2();
                         LOAD_XEMU_CALL(xRIP);
                         MOV64x(x3, dyn->insts[ninst].natcall);
@@ -3534,7 +3534,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 }
                 GETIP(ip);
                 STORE_XEMU_CALL(xRIP);
-                CALL_S(native_priv, -1);
+                CALL_S(const_native_priv, -1);
                 LOAD_XEMU_CALL(xRIP);
                 jump_to_epilog(dyn, 0, xRIP, ninst);
                 *need_epilog = 0;
@@ -3554,7 +3554,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }
             GETIP(ip);
             STORE_XEMU_CALL(xRIP);
-            CALL_S(native_priv, -1);  // is that a privileged opcodes or an int 1??
+            CALL_S(const_native_priv, -1);  // is that a privileged opcodes or an int 1??
             LOAD_XEMU_CALL(xRIP);
             jump_to_epilog(dyn, 0, xRIP, ninst);
             *need_epilog = 0;
@@ -3570,7 +3570,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }
             GETIP(ip);
             STORE_XEMU_CALL(xRIP);
-            CALL_S(native_priv, -1);
+            CALL_S(const_native_priv, -1);
             LOAD_XEMU_CALL(xRIP);
             jump_to_epilog(dyn, 0, xRIP, ninst);
             *need_epilog = 0;
@@ -3683,7 +3683,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         CBNZw_MARK3(ed);
                         GETIP_(ip);
                         STORE_XEMU_CALL(xRIP);
-                        CALL_S(native_div0, -1);
+                        CALL_S(const_native_div0, -1);
                         CLEARIP();
                         LOAD_XEMU_CALL(xRIP);
                         jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -3717,7 +3717,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         CBNZw_MARK3(ed);
                         GETIP_(ip);
                         STORE_XEMU_CALL(xRIP);
-                        CALL_S(native_div0, -1);
+                        CALL_S(const_native_div0, -1);
                         CLEARIP();
                         LOAD_XEMU_CALL(xRIP);
                         jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -3849,14 +3849,14 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             MESSAGE(LOG_INFO, "Divide by 0 hack\n");
                             GETIP(ip);
                             STORE_XEMU_CALL(xRIP);
-                            CALL_S(native_div0, -1);
+                            CALL_S(const_native_div0, -1);
                             LOAD_XEMU_CALL(xRIP);
                         } else {
                             if(BOX64ENV(dynarec_div0)) {
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -3883,7 +3883,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -3898,7 +3898,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -3906,7 +3906,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             }
                             CBZxw_MARK(xRDX);
                             if(ed!=x1) {MOVx_REG(x1, ed);}
-                            CALL(div64, -1);
+                            CALL(const_div64, -1);
                             B_NEXT_nocond;
                             MARK;
                             UDIVx(x2, xRAX, ed);
@@ -3939,7 +3939,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             CBNZx_MARK3(wb);
                             GETIP_(ip);
                             STORE_XEMU_CALL(xRIP);
-                            CALL_S(native_div0, -1);
+                            CALL_S(const_native_div0, -1);
                             CLEARIP();
                             LOAD_XEMU_CALL(xRIP);
                             jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -3961,7 +3961,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -3976,7 +3976,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -3992,7 +3992,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             TBNZ_MARK(xRAX, 63);
                             MARK3;
                             if(ed!=x1) {MOVx_REG(x1, ed);}
-                            CALL((void*)idiv64, -1);
+                            CALL(const_idiv64, -1);
                             B_NEXT_nocond;
                             MARK;
                             SDIVx(x2, xRAX, ed);
@@ -4034,7 +4034,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 }
                 GETIP(ip);
                 STORE_XEMU_CALL(xRIP);
-                CALL_S(native_priv, -1);
+                CALL_S(const_native_priv, -1);
                 LOAD_XEMU_CALL(xRIP);
                 jump_to_epilog(dyn, 0, xRIP, ninst);
                 *need_epilog = 0;
diff --git a/src/dynarec/arm64/dynarec_arm64_0f.c b/src/dynarec/arm64/dynarec_arm64_0f.c
index de266b82..f6d16ee6 100644
--- a/src/dynarec/arm64/dynarec_arm64_0f.c
+++ b/src/dynarec/arm64/dynarec_arm64_0f.c
@@ -16,7 +16,6 @@
 #include "my_cpuid.h"

 #include "freq.h"

 #include "emu/x87emu_private.h"

-#include "emu/x64shaext.h"

 #include "arm64_printer.h"

 #include "dynarec_arm64_private.h"

 #include "dynarec_arm64_functions.h"

@@ -90,7 +89,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     INST_NAME("RDTSCP");

                     NOTEST(x1);

                     if(box64_rdtsc) {

-                        CALL_(ReadTSC, x1, x3);

+                        CALL_(const_readtsc, x1, x3);

                     } else {

                         MRS_cntvct_el0(x1);

                     }

@@ -99,7 +98,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     }

                     LSRx(xRDX, x1, 32);

                     MOVw_REG(xRAX, x1);   // wipe upper part

-                    CALL_(helper_getcpu, x1, x3);

+                    CALL_(const_helper_getcpu, x1, x3);

                     MOVw_REG(xRCX, x1);    // IA32_TSC, 0 for now

                     break;

                 default:

@@ -149,7 +148,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             SMEND();

             GETIP(addr);

             STORE_XEMU_CALL(xRIP);

-            CALL_S(EmuX64Syscall, -1);

+            CALL_S(const_x64syscall, -1);

             LOAD_XEMU_CALL(xRIP);

             TABLE64(x3, addr); // expected return address

             SUBx_REG(x3, x3, xRIP);

@@ -170,7 +169,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }

             GETIP(ip);

             STORE_XEMU_CALL(xRIP);

-            CALL_S(native_ud, -1);

+            CALL_S(const_native_ud, -1);

             LOAD_XEMU_CALL(xRIP);

             jump_to_epilog(dyn, 0, xRIP, ninst);

             *need_epilog = 0;

@@ -186,7 +185,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }

             GETIP(ip);

             STORE_XEMU_CALL(xRIP);

-            CALL_S(native_ud, -1);

+            CALL_S(const_native_ud, -1);

             LOAD_XEMU_CALL(xRIP);

             jump_to_epilog(dyn, 0, xRIP, ninst);

             *need_epilog = 0;

@@ -225,7 +224,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }

             GETIP(ip);

             STORE_XEMU_CALL(xRIP);

-            CALL_S(native_ud, -1);

+            CALL_S(const_native_ud, -1);

             LOAD_XEMU_CALL(xRIP);

             jump_to_epilog(dyn, 0, xRIP, ninst);

             *need_epilog = 0;

@@ -532,7 +531,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }

             GETIP(ip);

             STORE_XEMU_CALL(xRIP);

-            CALL_S(native_priv, -1);

+            CALL_S(const_native_priv, -1);

             LOAD_XEMU_CALL(xRIP);

             jump_to_epilog(dyn, 0, xRIP, ninst);

             // continue the block

@@ -541,7 +540,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             INST_NAME("RDTSC");

             NOTEST(x1);

             if(box64_rdtsc) {

-                CALL_(ReadTSC, x1, x3);

+                CALL_(const_readtsc, x1, x3);

             } else {

                 MRS_cntvct_el0(x1);

             }

@@ -771,7 +770,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         GETG;

                         sse_forget_reg(dyn, ninst, gd);

                         ADDx_U12(x1, xEmu, offsetof(x64emu_t, xmm[gd]));

-                        CALL(sha1msg2, -1);

+                        CALL(const_sha1msg2, -1);

                     }

                     break;

                 case 0xCB:

@@ -817,7 +816,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         sse_forget_reg(dyn, ninst, gd);

                         ADDx_U12(x1, xEmu, offsetof(x64emu_t, xmm[gd]));

                         sse_reflect_reg(dyn, ninst, 0);

-                        CALL(sha256rnds2, -1);

+                        CALL(const_sha256rnds2, -1);

                     }

                     break;

                 case 0xCC:

@@ -842,7 +841,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         GETG;

                         sse_forget_reg(dyn, ninst, gd);

                         ADDx_U12(x1, xEmu, offsetof(x64emu_t, xmm[gd]));

-                        CALL(sha256msg1, -1);

+                        CALL(const_sha256msg1, -1);

                     }

                     break;

                 case 0xCD:

@@ -876,7 +875,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         GETG;

                         sse_forget_reg(dyn, ninst, gd);

                         ADDx_U12(x1, xEmu, offsetof(x64emu_t, xmm[gd]));

-                        CALL(sha256msg2, -1);

+                        CALL(const_sha256msg2, -1);

                     }

                     break;

 

@@ -999,7 +998,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         sse_forget_reg(dyn, ninst, gd);

                         ADDx_U12(x1, xEmu, offsetof(x64emu_t, xmm[gd]));

                         MOV32w(x3, u8);

-                        CALL(sha1rnds4, -1);

+                        CALL(const_sha1rnds4, -1);

                     }

                     break;

 

@@ -1743,7 +1742,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             INST_NAME("CPUID");

             NOTEST(x1);

             MOVx_REG(x1, xRAX);

-            CALL_(my_cpuid, -1, 0);

+            CALL_(const_cpuid, -1, 0);

             break;

         case 0xA3:

             INST_NAME("BT Ed, Gd");

@@ -1925,7 +1924,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         fpu_purgecache(dyn, ninst, 0, x1, x2, x3);

                         addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);

                         if(ed!=x1) {MOVx_REG(x1, ed);}

-                        CALL(rex.is32bits?((void*)fpu_fxsave32):((void*)fpu_fxsave64), -1);

+                        CALL(rex.is32bits?const_fpu_fxsave32:const_fpu_fxsave64, -1);

                         break;

                     case 1:

                         INST_NAME("FXRSTOR Ed");

@@ -1933,7 +1932,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         fpu_purgecache(dyn, ninst, 0, x1, x2, x3);

                         addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);

                         if(ed!=x1) {MOVx_REG(x1, ed);}

-                        CALL(rex.is32bits?((void*)fpu_fxrstor32):((void*)fpu_fxrstor64), -1);

+                        CALL(rex.is32bits?const_fpu_fxrstor32:const_fpu_fxrstor64, -1);

                         break;

                     case 2:

                         INST_NAME("LDMXCSR Md");

@@ -1994,7 +1993,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);

                         if(ed!=x1) {MOVx_REG(x1, ed);}

                         MOV32w(x2, rex.w?0:1);

-                        CALL((void*)fpu_xsave, -1);

+                        CALL(const_fpu_xsave, -1);

                         break;

                     case 5:

                         INST_NAME("XRSTOR Ed");

@@ -2003,7 +2002,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);

                         if(ed!=x1) {MOVx_REG(x1, ed);}

                         MOV32w(x2, rex.w?0:1);

-                        CALL((void*)fpu_xrstor, -1);

+                        CALL(const_fpu_xrstor, -1);

                         break;

                     case 7:

                         INST_NAME("CLFLUSH Ed");

@@ -2012,7 +2011,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         if(ed!=x1) {

                             MOVx_REG(x1, ed);

                         }

-                        CALL_(native_clflush, -1, 0);

+                        CALL_(const_native_clflush, -1, 0);

                         break;

                     default:

                         DEFAULT;

@@ -2595,7 +2594,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MRS_rndr(x1);

                     IFX(X_CF) { CSETw(x3, cNE); }

                 } else {

-                    CALL(rex.w?((void*)get_random64):((void*)get_random32), x1);

+                    CALL(rex.w?const_random64:const_random32, x1);

                     IFX(X_CF) { MOV32w(x3, 1); }

                 }

                 IFX(X_CF) { BFIw(xFlags, x3, F_CF, 1); }

@@ -2604,7 +2603,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             case 7:

                 INST_NAME("RDPID Ed");

                 GETED(0);

-                CALL_(helper_getcpu, ed, x2);

+                CALL_(const_helper_getcpu, ed, x2);

                 break;

             default:

                 DEFAULT;

@@ -2654,7 +2653,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MRS_rndr(x1);

                     IFX(X_CF) { CSETw(x3, cNE); }

                 } else {

-                    CALL(rex.w?((void*)get_random64):((void*)get_random32), x1);

+                    CALL(rex.w?const_random64:const_random32, x1);

                     IFX(X_CF) { MOV32w(x3, 1); }

                 }

                 IFX(X_CF) { BFIw(xFlags, x3, F_CF, 1); }

diff --git a/src/dynarec/arm64/dynarec_arm64_64.c b/src/dynarec/arm64/dynarec_arm64_64.c
index 1bb12d62..73f0ee9a 100644
--- a/src/dynarec/arm64/dynarec_arm64_64.c
+++ b/src/dynarec/arm64/dynarec_arm64_64.c
@@ -640,7 +640,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }
             GETIP(ip);
             STORE_XEMU_CALL(xRIP);
-            CALL_S(native_priv, -1);
+            CALL_S(const_native_priv, -1);
             LOAD_XEMU_CALL(xRIP);
             jump_to_epilog(dyn, 0, xRIP, ninst);
             *need_epilog = 0;
@@ -656,7 +656,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             }
             GETIP(ip);
             STORE_XEMU_CALL(xRIP);
-            CALL_S(native_priv, -1);
+            CALL_S(const_native_priv, -1);
             LOAD_XEMU_CALL(xRIP);
             jump_to_epilog(dyn, 0, xRIP, ninst);
             *need_epilog = 0;
@@ -1242,7 +1242,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     if(wback) {ADDx_REG(x6, x6, wback); wback=x6;}
                     if(!rex.w && MODREG) {MOVw_REG(ed, ed);}
                     CBZw_NEXT(x2);
-                    CALL_(rex.w?((void*)rcl64):((void*)rcl32), ed, x6);
+                    CALL_(rex.w?const_rcl64:const_rcl32, ed, x6);
                     WBACK;
                     break;
                 case 3:
@@ -1263,7 +1263,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     if(wback) {ADDx_REG(x6, x6, wback); wback=x6;}
                     if(!rex.w && MODREG) {MOVw_REG(ed, ed);}
                     CBZw_NEXT(x2);
-                    CALL_(rex.w?((void*)rcr64):((void*)rcr32), ed, x6);
+                    CALL_(rex.w?const_rcr64:const_rcr32, ed, x6);
                     WBACK;
                     break;
                 case 4:
@@ -1437,7 +1437,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             CBNZx_MARK3(ed);
                             GETIP_(ip);
                             STORE_XEMU_CALL(xRIP);
-                            CALL_S(native_div0, -1);
+                            CALL_S(const_native_div0, -1);
                             CLEARIP();
                             LOAD_XEMU_CALL(xRIP);
                             jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -1463,7 +1463,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -1476,14 +1476,14 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             GETEDO(x6, 0);
                             CBZxw_MARK(xRDX);
                             if(ed!=x1) {MOVx_REG(x1, ed);}
-                            CALL(div64, -1);
+                            CALL(const_div64, -1);
                             B_NEXT_nocond;
                             MARK;
                             if(BOX64ENV(dynarec_div0)) {
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -1521,7 +1521,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             CBNZx_MARK3(wb);
                             GETIP_(ip);
                             STORE_XEMU_CALL(xRIP);
-                            CALL_S(native_div0, -1);
+                            CALL_S(const_native_div0, -1);
                             CLEARIP();
                             LOAD_XEMU_CALL(xRIP);
                             jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -1541,7 +1541,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -1556,7 +1556,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 CBNZx_MARK3(ed);
                                 GETIP_(ip);
                                 STORE_XEMU_CALL(xRIP);
-                                CALL_S(native_div0, -1);
+                                CALL_S(const_native_div0, -1);
                                 CLEARIP();
                                 LOAD_XEMU_CALL(xRIP);
                                 jump_to_epilog(dyn, 0, xRIP, ninst);
@@ -1566,7 +1566,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             MVNx_REG(x2, xRDX);
                             CBZxw_MARK(x2);
                             if(ed!=x1) {MOVx_REG(x1, ed);}
-                            CALL((void*)idiv64, -1);
+                            CALL(const_idiv64, -1);
                             B_NEXT_nocond;
                             MARK;
                             SDIVx(x2, xRAX, ed);
diff --git a/src/dynarec/arm64/dynarec_arm64_66.c b/src/dynarec/arm64/dynarec_arm64_66.c
index f654e14c..5435eafc 100644
--- a/src/dynarec/arm64/dynarec_arm64_66.c
+++ b/src/dynarec/arm64/dynarec_arm64_66.c
@@ -1285,7 +1285,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     SETFLAGS(X_OF|X_CF, SF_SET_DF);

                     ANDw_mask(x2, xRCX, 0, 0b00100);

                     GETEW(x1, 0);

-                    CALL_(rcl16, x1, x3);

+                    CALL_(const_rcl16, x1, x3);

                     EWBACK;

                     break;

                 case 3:

@@ -1299,7 +1299,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     SETFLAGS(X_OF|X_CF, SF_SET_DF);

                     ANDw_mask(x2, xRCX, 0, 0b00100);

                     GETEW(x1, 0);

-                    CALL_(rcr16, x1, x3);

+                    CALL_(const_rcr16, x1, x3);

                     EWBACK;

                     break;

                 case 4:

@@ -1365,7 +1365,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);

                         if(ed!=x1) {MOVx_REG(x1, ed);}

                         MOV32w(x2, 1);

-                        CALL(fpu_savenv, -1);

+                        CALL(const_fpu_savenv, -1);

                         break;

                     default:

                         DEFAULT;

@@ -1384,7 +1384,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         fpu_purgecache(dyn, ninst, 0, x1, x2, x3);

                         addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);

                         if(ed!=x1) {MOVx_REG(x1, ed);}

-                        CALL(native_frstor16, -1);

+                        CALL(const_native_frstor16, -1);

                         break;

                     case 6:

                         INST_NAME("FNSAVE Ed");

@@ -1392,8 +1392,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         fpu_purgecache(dyn, ninst, 0, x1, x2, x3);

                         addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);

                         if(ed!=x1) {MOVx_REG(x1, ed);}

-                        CALL(native_fsave16, -1);

-                        CALL(reset_fpu, -1);

+                        CALL(const_native_fsave16, -1);

                         break;

                     default:

                         DEFAULT;

@@ -1506,7 +1505,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         CBNZw_MARK3(ed);

                         GETIP_(ip);

                         STORE_XEMU_CALL(xRIP);

-                        CALL_S(native_div0, -1);

+                        CALL_S(const_native_div0, -1);

                         CLEARIP();

                         LOAD_XEMU_CALL(xRIP);

                         jump_to_epilog(dyn, 0, xRIP, ninst);

@@ -1540,7 +1539,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         CBNZw_MARK3(ed);

                         GETIP_(ip);

                         STORE_XEMU_CALL(xRIP);

-                        CALL_S(native_div0, -1);

+                        CALL_S(const_native_div0, -1);

                         CLEARIP();

                         LOAD_XEMU_CALL(xRIP);

                         jump_to_epilog(dyn, 0, xRIP, ninst);

diff --git a/src/dynarec/arm64/dynarec_arm64_660f.c b/src/dynarec/arm64/dynarec_arm64_660f.c
index 31ca06ea..937e0872 100644
--- a/src/dynarec/arm64/dynarec_arm64_660f.c
+++ b/src/dynarec/arm64/dynarec_arm64_660f.c
@@ -18,7 +18,6 @@
 #include "dynarec_arm64_private.h"

 #include "dynarec_arm64_functions.h"

 #include "../dynarec_helper.h"

-#include "emu/x64compstrings.h"

 

 uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int* ok, int* need_epilog)

 {

@@ -781,7 +780,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         }

                         sse_forget_reg(dyn, ninst, gd);

                         MOV32w(x1, gd);

-                        CALL(native_aesimc, -1);

+                        CALL(const_native_aesimc, -1);

                     }

                     break;

                 case 0xDC:

@@ -804,7 +803,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         } else d0 = -1;

                         sse_forget_reg(dyn, ninst, gd);

                         MOV32w(x1, gd);

-                        CALL(native_aese, -1);

+                        CALL(const_native_aese, -1);

                         GETGX(q0, 1);

                         VEORQ(q0, q0, (d0!=-1)?d0:q1);

                     }

@@ -828,7 +827,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         } else d0 = -1;

                         sse_forget_reg(dyn, ninst, gd);

                         MOV32w(x1, gd);

-                        CALL(native_aeselast, -1);

+                        CALL(const_native_aeselast, -1);

                         GETGX(q0, 1);

                         VEORQ(q0, q0, (d0!=-1)?d0:q1);

                     }

@@ -853,7 +852,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         } else d0 = -1;

                         sse_forget_reg(dyn, ninst, gd);

                         MOV32w(x1, gd);

-                        CALL(native_aesd, -1);

+                        CALL(const_native_aesd, -1);

                         GETGX(q0, 1);

                         VEORQ(q0, q0, (d0!=-1)?d0:q1);

                     }

@@ -877,7 +876,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         } else d0 = -1;

                         sse_forget_reg(dyn, ninst, gd);

                         MOV32w(x1, gd);

-                        CALL(native_aesdlast, -1);

+                        CALL(const_native_aesdlast, -1);

                         GETGX(q0, 1);

                         VEORQ(q0, q0, (d0!=-1)?d0:q1);

                     }

@@ -1276,7 +1275,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         }

                         u8 = F8;

                         MOV32w(x4, u8);

-                        CALL(native_pclmul, -1);

+                        CALL(const_native_pclmul, -1);

                     }

                     break;

 

@@ -1302,7 +1301,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                     MOVx_REG(x4, xRAX);

                     u8 = F8;

                     MOV32w(x5, u8);

-                    CALL(sse42_compare_string_explicit_len, x1);

+                    CALL(const_sse42_compare_string_explicit_len, x1);

                     q0 = sse_get_reg_empty(dyn, ninst, x2, 0);

                     if(u8&0b1000000) {

                         q1 = fpu_get_scratch(dyn, ninst);

@@ -1471,7 +1470,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         MOVw_REG(x4, xRAX);

                         u8 = F8;

                         MOV32w(x5, u8);

-                        CALL(sse42_compare_string_explicit_len, x1);

+                        CALL(const_sse42_compare_string_explicit_len, x1);

                     }

                     if(u8&0b1000000) {

                         CBNZw_MARK(x1);

@@ -1507,7 +1506,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                     }

                     u8 = F8;

                     MOV32w(x3, u8);

-                    CALL(sse42_compare_string_implicit_len, x1);

+                    CALL(const_sse42_compare_string_implicit_len, x1);

                     q0 = sse_get_reg_empty(dyn, ninst, x2, 0);

                     if(u8&0b1000000) {

                         q1 = fpu_get_scratch(dyn, ninst);

@@ -1561,7 +1560,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                     }

                     u8 = F8;

                     MOV32w(x3, u8);

-                    CALL(sse42_compare_string_implicit_len, x1);

+                    CALL(const_sse42_compare_string_implicit_len, x1);

                     CBNZw_MARK(x1);

                     MOV32w(xRCX, (u8&1)?8:16);

                     B_NEXT_nocond;

@@ -1596,7 +1595,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                     }

                     u8 = F8;

                     MOV32w(x4, u8);

-                    CALL(native_aeskeygenassist, -1);

+                    CALL(const_native_aeskeygenassist, -1);

                     break;

 

                 default:

@@ -2514,7 +2513,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         if(ed!=x1) {

                             MOVx_REG(x1, ed);

                         }

-                        CALL_(native_clflush, -1, 0);

+                        CALL_(const_native_clflush, -1, 0);

                         break;

                     case 7:

                         INST_NAME("CLFLUSHOPT Ed");

@@ -2523,7 +2522,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         if(ed!=x1) {

                             MOVx_REG(x1, ed);

                         }

-                        CALL_(native_clflush, -1, 0);

+                        CALL_(const_native_clflush, -1, 0);

                         break;

                     default:

                         DEFAULT;

diff --git a/src/dynarec/arm64/dynarec_arm64_67.c b/src/dynarec/arm64/dynarec_arm64_67.c
index d70da391..b32433ad 100644
--- a/src/dynarec/arm64/dynarec_arm64_67.c
+++ b/src/dynarec/arm64/dynarec_arm64_67.c
@@ -1616,7 +1616,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             GETED32H(x1, 0);  // get edd changed addr, so cannot be called 2 times for same op...

                             CBZxw_MARK(xRDX);

                             if(ed!=x1) {MOVx_REG(x1, ed);}

-                            CALL(div64, -1);

+                            CALL(const_div64, -1);

                             B_NEXT_nocond;

                             MARK;

                             UDIVx(x2, xRAX, ed);

@@ -1666,7 +1666,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             MVNx_REG(x2, xRDX);

                             CBZxw_MARK(x2);

                             if(ed!=x1) {MOVx_REG(x1, ed);}

-                            CALL((void*)idiv64, -1);

+                            CALL(const_idiv64, -1);

                             B_NEXT_nocond;

                             MARK;

                             SDIVx(x2, xRAX, ed);

diff --git a/src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c b/src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c
index a57d6a3e..a0208890 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c
@@ -1887,7 +1887,7 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
                 }
                 sse_forget_reg(dyn, ninst, gd);
                 MOV32w(x1, gd);
-                CALL(native_aesimc, -1);
+                CALL(const_native_aesimc, -1);
             }
             YMM0(gd);
             break;
@@ -1922,8 +1922,8 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
                 }
                 sse_forget_reg(dyn, ninst, gd);
                 MOV32w(x1, gd);
-                CALL(native_aese, -1);
-                if(vex.l) {MOV32w(x1, gd); CALL(native_aese_y, -1);}
+                CALL(const_native_aese, -1);
+                if(vex.l) {MOV32w(x1, gd); CALL(const_native_aese_y, -1);}
                 GETGX(q0, 1);
                 VEORQ(q0, q0, (d0==-1)?q1:d0);
                 if(vex.l) {
@@ -1962,8 +1962,8 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
                 }
                 sse_forget_reg(dyn, ninst, gd);
                 MOV32w(x1, gd);
-                CALL(native_aeselast, -1);
-                if(vex.l) {MOV32w(x1, gd); CALL(native_aeselast_y, -1);}
+                CALL(const_native_aeselast, -1);
+                if(vex.l) {MOV32w(x1, gd); CALL(const_native_aeselast_y, -1);}
                 GETGX(q0, 1);
                 VEORQ(q0, q0, (d0==-1)?q1:d0);
                 if(vex.l) {
@@ -2003,8 +2003,8 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
                 }
                 sse_forget_reg(dyn, ninst, gd);
                 MOV32w(x1, gd);
-                CALL(native_aesd, -1);
-                if(vex.l) {MOV32w(x1, gd); CALL(native_aesd_y, -1);}
+                CALL(const_native_aesd, -1);
+                if(vex.l) {MOV32w(x1, gd); CALL(const_native_aesd_y, -1);}
                 GETGX(q0, 1);
                 VEORQ(q0, q0, (d0==-1)?q1:d0);
                 if(vex.l) {
@@ -2043,8 +2043,8 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
                 }
                 sse_forget_reg(dyn, ninst, gd);
                 MOV32w(x1, gd);
-                CALL(native_aesdlast, -1);
-                if(vex.l) {MOV32w(x1, gd); CALL(native_aesdlast_y, -1);}
+                CALL(const_native_aesdlast, -1);
+                if(vex.l) {MOV32w(x1, gd); CALL(const_native_aesdlast_y, -1);}
                 GETGX(q0, 1);
                 VEORQ(q0, q0, (d0==-1)?q1:d0);
                 if(vex.l) {
diff --git a/src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c b/src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c
index df345979..b0253420 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c
@@ -706,7 +706,7 @@ uintptr_t dynarec64_AVX_66_0F3A(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
                 }
                 u8 = F8;
                 MOV32w(x4, u8);
-                CALL_(vex.l?native_pclmul_y:native_pclmul_x, -1, x3);
+                CALL_(vex.l?const_native_pclmul_y:const_native_pclmul_x, -1, x3);
             }
             if(!vex.l) YMM0(gd);
             break;
@@ -810,7 +810,7 @@ uintptr_t dynarec64_AVX_66_0F3A(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
             }
             u8 = F8;
             MOV32w(x4, u8);
-            CALL(native_aeskeygenassist, -1);
+            CALL(const_native_aeskeygenassist, -1);
             YMM0(gd);
             break;
 
diff --git a/src/dynarec/arm64/dynarec_arm64_consts.c b/src/dynarec/arm64/dynarec_arm64_consts.c
new file mode 100644
index 00000000..0bcfc22a
--- /dev/null
+++ b/src/dynarec/arm64/dynarec_arm64_consts.c
@@ -0,0 +1,115 @@
+#include <stdint.h>
+#include <string.h>
+
+#include "dynarec_arm64_consts.h"
+#include "debug.h"
+#include "box64context.h"
+#include "box64cpu.h"
+#include "emu/x64emu_private.h"
+#include "x64emu.h"
+#include "box64stack.h"
+#include "callback.h"
+#include "emu/x64run_private.h"
+#include "emu/x87emu_private.h"
+#include "emu/x64primop.h"
+#include "my_cpuid.h"
+#include "freq.h"
+#include "debug.h"
+#include "dynarec_arm64_functions.h"
+#include "emu/x64shaext.h"
+#include "emu/x87emu_private.h"
+#include "emu/x64compstrings.h"
+#include "x64test.h"
+
+uintptr_t getConst(arm64_consts_t which)
+{
+    switch(which) {
+        case const_none: dynarec_log(LOG_NONE, "Warning, const none used\n");
+            return 0;
+        case const_daa8: return (uintptr_t)daa8;
+        case const_das8: return (uintptr_t)das8;
+        case const_aaa16: return (uintptr_t)aaa16;
+        case const_aas16: return (uintptr_t)aas16;
+        case const_aam16: return (uintptr_t)aam16;
+        case const_aad16: return (uintptr_t)aad16;
+        case const_native_br: return (uintptr_t)native_br;
+        case const_native_ud: return (uintptr_t)native_ud;
+        case const_native_priv: return (uintptr_t)native_priv;
+        case const_native_int3: return (uintptr_t)native_int3;
+        case const_native_int: return (uintptr_t)native_int;
+        case const_native_div0: return (uintptr_t)native_div0;
+        case const_native_clflush: return (uintptr_t)native_clflush;
+        case const_native_frstor16: return (uintptr_t)native_frstor16;
+        case const_native_fsave16: return (uintptr_t)native_fsave16;
+        case const_native_fsave: return (uintptr_t)native_fsave;
+        case const_native_aesimc: return (uintptr_t)native_aesimc;
+        case const_native_aesd: return (uintptr_t)native_aesd;
+        case const_native_aesd_y: return (uintptr_t)native_aesd_y;
+        case const_native_aesdlast: return (uintptr_t)native_aesdlast;
+        case const_native_aesdlast_y: return (uintptr_t)native_aesdlast_y;
+        case const_native_aese: return (uintptr_t)native_aese;
+        case const_native_aese_y: return (uintptr_t)native_aese_y;
+        case const_native_aeselast: return (uintptr_t)native_aeselast;
+        case const_native_aeselast_y: return (uintptr_t)native_aeselast_y;
+        case const_native_aeskeygenassist: return (uintptr_t)native_aeskeygenassist;
+        case const_native_pclmul: return (uintptr_t)native_pclmul;
+        case const_native_pclmul_x: return (uintptr_t)native_pclmul_x;
+        case const_native_pclmul_y: return (uintptr_t)native_pclmul_y;
+        case const_native_f2xm1: return (uintptr_t)native_f2xm1;
+        case const_native_fyl2x: return (uintptr_t)native_fyl2x;
+        case const_native_fyl2xp1: return (uintptr_t)native_fyl2xp1;
+        case const_native_fxtract: return (uintptr_t)native_fxtract;
+        case const_native_ftan: return (uintptr_t)native_ftan;
+        case const_native_fpatan: return (uintptr_t)native_fpatan;
+        case const_native_fcos: return (uintptr_t)native_fcos;
+        case const_native_fsin: return (uintptr_t)native_fsin;
+        case const_native_fsincos: return (uintptr_t)native_fsincos;
+        case const_native_fscale: return (uintptr_t)native_fscale;
+        case const_native_fld: return (uintptr_t)native_fld;
+        case const_native_fstp: return (uintptr_t)native_fstp;
+        case const_native_frstor: return (uintptr_t)native_frstor;
+        case const_int3: return (uintptr_t)EmuInt3;
+        case const_x86syscall: return (uintptr_t)EmuX86Syscall;
+        case const_x64syscall: return (uintptr_t)EmuX64Syscall;
+        case const_rcl16: return (uintptr_t)rcl16;
+        case const_rcl32: return (uintptr_t)rcl32;
+        case const_rcl64: return (uintptr_t)rcl64;
+        case const_rcr16: return (uintptr_t)rcr16;
+        case const_rcr32: return (uintptr_t)rcr32;
+        case const_rcr64: return (uintptr_t)rcr64;
+        case const_div64: return (uintptr_t)div64;
+        case const_idiv64: return (uintptr_t)idiv64;
+        case const_random32: return (uintptr_t)get_random32;
+        case const_random64: return (uintptr_t)get_random64;
+        case const_readtsc: return (uintptr_t)ReadTSC;
+        case const_helper_getcpu: return (uintptr_t)helper_getcpu;
+        case const_cpuid: return (uintptr_t)my_cpuid;
+        case const_getsegmentbase: return (uintptr_t)GetSegmentBaseEmu;
+        case const_updateflags: return (uintptr_t)UpdateFlags;
+        case const_reset_fpu: return (uintptr_t)reset_fpu;
+        case const_sha1msg2: return (uintptr_t)sha1msg2;
+        case const_sha1rnds4: return (uintptr_t)sha1rnds4;
+        case const_sha256msg1: return (uintptr_t)sha256msg1;
+        case const_sha256msg2: return (uintptr_t)sha256msg2;
+        case const_sha256rnds2: return (uintptr_t)sha256rnds2;
+        case const_fpu_loadenv: return (uintptr_t)fpu_loadenv;
+        case const_fpu_savenv: return (uintptr_t)fpu_savenv;
+        case const_fpu_fxsave32: return (uintptr_t)fpu_fxsave32;
+        case const_fpu_fxsave64: return (uintptr_t)fpu_fxsave64;
+        case const_fpu_fxrstor32: return (uintptr_t)fpu_fxrstor32;
+        case const_fpu_fxrstor64: return (uintptr_t)fpu_fxrstor64;
+        case const_fpu_xsave: return (uintptr_t)fpu_xsave;
+        case const_fpu_xrstor: return (uintptr_t)fpu_xrstor;
+        case const_fpu_fbld: return (uintptr_t)fpu_fbld;
+        case const_fpu_fbst: return (uintptr_t)fpu_fbst;
+        case const_sse42_compare_string_explicit_len: return (uintptr_t)sse42_compare_string_explicit_len;
+        case const_sse42_compare_string_implicit_len: return (uintptr_t)sse42_compare_string_implicit_len;
+        case const_x64test_step: return (uintptr_t)x64test_step;
+        case const_printtrace: return (uintptr_t)PrintTrace;
+
+        case const_last: dynarec_log(LOG_NONE, "Warning, const last used\n");
+            return 0;
+    }
+    dynarec_log(LOG_NONE, "Warning, Unknown const %d used\n", which);
+    return 0;
+}
\ No newline at end of file
diff --git a/src/dynarec/arm64/dynarec_arm64_consts.h b/src/dynarec/arm64/dynarec_arm64_consts.h
new file mode 100644
index 00000000..6357791d
--- /dev/null
+++ b/src/dynarec/arm64/dynarec_arm64_consts.h
@@ -0,0 +1,96 @@
+#ifndef __DYNAREC_ARM64_CONSTS__
+#define __DYNAREC_ARM64_CONSTS__
+#include <stdint.h>
+
+typedef enum arm64_consts_s {
+    const_none,
+    const_daa8,
+    const_das8,
+    const_aaa16,
+    const_aas16,
+    const_aam16,
+    const_aad16,
+    const_native_br,
+    const_native_ud,
+    const_native_priv,
+    const_native_int3,
+    const_native_int,
+    const_native_div0,
+    const_native_clflush,
+    const_native_frstor16,
+    const_native_fsave16,
+    const_native_fsave,
+    const_native_aesimc,
+    const_native_aesd,
+    const_native_aesd_y,
+    const_native_aesdlast,
+    const_native_aesdlast_y,
+    const_native_aese,
+    const_native_aese_y,
+    const_native_aeselast,
+    const_native_aeselast_y,
+    const_native_aeskeygenassist,
+    const_native_pclmul,
+    const_native_pclmul_x,
+    const_native_pclmul_y,
+    const_native_f2xm1,
+    const_native_fyl2x,
+    const_native_fyl2xp1,
+    const_native_fxtract,
+    const_native_ftan,
+    const_native_fpatan,
+    const_native_fcos,
+    const_native_fsin,
+    const_native_fsincos,
+    const_native_fscale,
+    const_native_fld,
+    const_native_fstp,
+    const_native_frstor,
+    const_int3,
+    const_x86syscall,
+    const_x64syscall,
+    const_rcl16,
+    const_rcl32,
+    const_rcl64,
+    const_rcr16,
+    const_rcr32,
+    const_rcr64,
+    const_div64,
+    const_idiv64,
+    const_random32,
+    const_random64,
+    const_readtsc,
+    const_helper_getcpu,
+    const_cpuid,
+    const_getsegmentbase,
+    const_updateflags,
+    const_reset_fpu,
+    const_sha1msg2,
+    const_sha1rnds4,
+    const_sha256msg1,
+    const_sha256msg2,
+    const_sha256rnds2,
+    const_fpu_loadenv,
+    const_fpu_savenv,
+    const_fpu_fxsave32,
+    const_fpu_fxsave64,
+    const_fpu_fxrstor32,
+    const_fpu_fxrstor64,
+    const_fpu_xsave,
+    const_fpu_xrstor,
+    const_fpu_fbld,
+    const_fpu_fbst,
+    const_sse42_compare_string_explicit_len,
+    const_sse42_compare_string_implicit_len,
+    const_x64test_step,
+    const_printtrace,
+
+    const_last
+} arm64_consts_t;
+
+uintptr_t getConst(arm64_consts_t which);
+
+// temporary define...
+#define const_PrintTrace const_printtrace
+
+#endif //__DYNAREC_ARM64_CONSTS__
\ No newline at end of file
diff --git a/src/dynarec/arm64/dynarec_arm64_d9.c b/src/dynarec/arm64/dynarec_arm64_d9.c
index 787b597f..b8cfe6e0 100644
--- a/src/dynarec/arm64/dynarec_arm64_d9.c
+++ b/src/dynarec/arm64/dynarec_arm64_d9.c
@@ -271,7 +271,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             MESSAGE(LOG_DUMP, "Need Optimization (F2XM1)\n");
             i1 = x87_stackcount(dyn, ninst, x1);
             x87_forget(dyn, ninst, x1, x2, 0);
-            CALL(native_f2xm1, -1);
+            CALL(const_native_f2xm1, -1);
             x87_unstackcount(dyn, ninst, x1, i1);
             break;
         case 0xF1:
@@ -280,7 +280,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             i1 = x87_stackcount(dyn, ninst, x1);
             x87_forget(dyn, ninst, x1, x2, 0);
             x87_forget(dyn, ninst, x1, x2, 1);
-            CALL(native_fyl2x, -1);
+            CALL(const_native_fyl2x, -1);
             x87_unstackcount(dyn, ninst, x1, i1);
             X87_POP_OR_FAIL(dyn, ninst, x3);
             break;
@@ -291,7 +291,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             x87_forget(dyn, ninst, x1, x2, 0);
             if(!BOX64ENV(dynarec_fastround))
                 u8 = x87_setround(dyn, ninst, x1, x2, x4);
-            CALL_(native_ftan, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
+            CALL_(const_native_ftan, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
             x87_unstackcount(dyn, ninst, x1, i1);
            if(!BOX64ENV(dynarec_fastround))
                 x87_restoreround(dyn, ninst, u8);
@@ -316,7 +316,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             x87_forget(dyn, ninst, x1, x2, 1);
             if(!BOX64ENV(dynarec_fastround))
                 u8 = x87_setround(dyn, ninst, x1, x2, x4);
-            CALL_(native_fpatan, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
+            CALL_(const_native_fpatan, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
             if(!BOX64ENV(dynarec_fastround))
                 x87_restoreround(dyn, ninst, u8);
             x87_unstackcount(dyn, ninst, x1, i1);
@@ -328,19 +328,11 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             X87_PUSH_EMPTY_OR_FAIL(dyn, ninst, 0);
             i1 = x87_stackcount(dyn, ninst, x1);
             x87_forget(dyn, ninst, x1, x2, 1);
-            CALL(native_fxtract, -1);
+            CALL(const_native_fxtract, -1);
             x87_unstackcount(dyn, ninst, x1, i1);
             break;
         case 0xF5:
             INST_NAME("FPREM1");
-            #if 0
-            MESSAGE(LOG_DUMP, "Need Optimization\n");
-            i1 = x87_stackcount(dyn, ninst, x1);
-            x87_forget(dyn, ninst, x1, x2, 0);
-            x87_forget(dyn, ninst, x1, x2, 1);
-            CALL(native_fprem1, -1);
-            x87_unstackcount(dyn, ninst, x1, i1);
-            #else
             v1 = x87_get_st(dyn, ninst, x1, x2, 0, NEON_CACHE_ST_D);
             v2 = x87_get_st(dyn, ninst, x1, x2, 1, NEON_CACHE_ST_D);
             s0 = fpu_get_scratch(dyn, ninst);
@@ -361,7 +353,6 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             LSRx(x4, x4, 1);
             BFIw(x1, x4, 8, 1);
             STRw_U12(x1, xEmu, offsetof(x64emu_t, sw));
-            #endif
             break;
         case 0xF6:
             INST_NAME("FDECSTP");
@@ -381,14 +372,6 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             break;
         case 0xF8:
             INST_NAME("FPREM");
-            #if 0
-            MESSAGE(LOG_DUMP, "Need Optimization\n");
-            i1 = x87_stackcount(dyn, ninst, x1);
-            x87_forget(dyn, ninst, x1, x2, 0);
-            x87_forget(dyn, ninst, x1, x2, 1);
-            CALL(native_fprem, -1);
-            x87_unstackcount(dyn, ninst, x1, i1);
-            #else
             v1 = x87_get_st(dyn, ninst, x1, x2, 0, NEON_CACHE_ST_D);
             v2 = x87_get_st(dyn, ninst, x1, x2, 1, NEON_CACHE_ST_D);
             s0 = fpu_get_scratch(dyn, ninst);
@@ -409,7 +392,6 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             LSRx(x4, x4, 1);
             BFIw(x1, x4, 8, 1);
             STRw_U12(x1, xEmu, offsetof(x64emu_t, sw));
-            #endif
             break;
         case 0xF9:
             INST_NAME("FYL2XP1");
@@ -417,7 +399,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             i1 = x87_stackcount(dyn, ninst, x1);
             x87_forget(dyn, ninst, x1, x2, 0);
             x87_forget(dyn, ninst, x1, x2, 1);
-            CALL(native_fyl2xp1, -1);
+            CALL(const_native_fyl2xp1, -1);
             x87_unstackcount(dyn, ninst, x1, i1);
             X87_POP_OR_FAIL(dyn, ninst, x3);
             break;
@@ -443,19 +425,13 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             x87_forget(dyn, ninst, x1, x2, 1);
             if(!BOX64ENV(dynarec_fastround))
                 u8 = x87_setround(dyn, ninst, x1, x2, x4);
-            CALL_(native_fsincos, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
+            CALL_(const_native_fsincos, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
             if(!BOX64ENV(dynarec_fastround))
                 x87_restoreround(dyn, ninst, u8);
             x87_unstackcount(dyn, ninst, x1, i1);
             break;
         case 0xFC:
             INST_NAME("FRNDINT");
-            #if 0
-            MESSAGE(LOG_DUMP, "Need Optimization\n");
-            // use C helper for now, nothing staightforward is available
-            x87_forget(dyn, ninst, x1, x2, 0);
-            CALL(native_frndint, -1);
-            #else
             v1 = x87_get_st(dyn, ninst, x1, x2, 0, X87_ST0);
             u8 = x87_setround(dyn, ninst, x1, x2, x3);
             if(ST_IS_F(0)) {
@@ -464,7 +440,6 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 FRINTID(v1, v1);
             }
             x87_restoreround(dyn, ninst, u8);
-            #endif
             break;
         case 0xFD:
             INST_NAME("FSCALE");
@@ -474,7 +449,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             x87_forget(dyn, ninst, x1, x2, 1);
             if(!BOX64ENV(dynarec_fastround))
                 u8 = x87_setround(dyn, ninst, x1, x2, x4);
-            CALL_(native_fscale, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
+            CALL_(const_native_fscale, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
             if(!BOX64ENV(dynarec_fastround))
                 x87_restoreround(dyn, ninst, u8);
             x87_unstackcount(dyn, ninst, x1, i1);
@@ -486,7 +461,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             x87_forget(dyn, ninst, x1, x2, 0);
             if(!BOX64ENV(dynarec_fastround))
                 u8 = x87_setround(dyn, ninst, x1, x2, x4);
-            CALL_(native_fsin, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
+            CALL_(const_native_fsin, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
             if(!BOX64ENV(dynarec_fastround))
                 x87_restoreround(dyn, ninst, u8);
             x87_unstackcount(dyn, ninst, x1, i1);
@@ -498,7 +473,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             x87_forget(dyn, ninst, x1, x2, 0);
             if(!BOX64ENV(dynarec_fastround))
                 u8 = x87_setround(dyn, ninst, x1, x2, x4);
-            CALL_(native_fcos, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
+            CALL_(const_native_fcos, -1, BOX64ENV(dynarec_fastround) ? 0 : u8);
             if(!BOX64ENV(dynarec_fastround))
                 x87_restoreround(dyn, ninst, u8);
             x87_unstackcount(dyn, ninst, x1, i1);
@@ -556,7 +531,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MOVx_REG(x1, ed);
                 }
                 MOV32w(x2, 0);
-                CALL(fpu_loadenv, -1);
+                CALL(const_fpu_loadenv, -1);
                 NATIVE_RESTORE_X87PC();
                 break;
             case 5:
@@ -575,7 +550,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MOVx_REG(x1, ed);
                 }
                 MOV32w(x2, 0);
-                CALL(fpu_savenv, -1);
+                CALL(const_fpu_savenv, -1);
                 break;
             case 7:
                 INST_NAME("FNSTCW Ew");
diff --git a/src/dynarec/arm64/dynarec_arm64_db.c b/src/dynarec/arm64/dynarec_arm64_db.c
index 3a835593..e10f8e4a 100644
--- a/src/dynarec/arm64/dynarec_arm64_db.c
+++ b/src/dynarec/arm64/dynarec_arm64_db.c
@@ -133,7 +133,7 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             INST_NAME("FNINIT");
             MESSAGE(LOG_DUMP, "Need Optimization (FNINIT)\n");
             x87_purgecache(dyn, ninst, 0, x1, x2, x3);
-            CALL(reset_fpu, -1);
+            CALL(const_reset_fpu, -1);
             NATIVE_RESTORE_X87PC();
             break;
         case 0xE8:
@@ -309,7 +309,7 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         X87_PUSH_EMPTY_OR_FAIL(dyn, ninst, 0);
                         // sync top
                         x87_reflectcount(dyn, ninst, x3, x4);
-                        CALL(native_fld, -1);
+                        CALL(const_native_fld, -1);
                         // go back with the top & stack counter
                         x87_unreflectcount(dyn, ninst, x3, x4);
                     }
@@ -327,7 +327,7 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
                         if(ed!=x1) {MOVx_REG(x1, ed);}
                         x87_reflectcount(dyn, ninst, x3, x4);
-                        CALL(native_fstp, -1);
+                        CALL(const_native_fstp, -1);
                         x87_unreflectcount(dyn, ninst, x3, x4);
                     } else {
                         // Painfully long, straight conversion from the C code, shoud be optimized
diff --git a/src/dynarec/arm64/dynarec_arm64_dd.c b/src/dynarec/arm64/dynarec_arm64_dd.c
index 35a442be..f88c965d 100644
--- a/src/dynarec/arm64/dynarec_arm64_dd.c
+++ b/src/dynarec/arm64/dynarec_arm64_dd.c
@@ -49,7 +49,6 @@ uintptr_t dynarec64_DD(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
         case 0xC6:
         case 0xC7:
             INST_NAME("FFREE STx");
-            #if 1
             if((nextop&7)==0 && PK(0)==0xD9 && PK(1)==0xF7) {
                 MESSAGE(LOG_DUMP, "Hack for FFREE ST0 / FINCSTP\n");
                 x87_do_pop(dyn, ninst, x1);
@@ -57,12 +56,6 @@ uintptr_t dynarec64_DD(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 SKIPTEST(x1);
             } else
                 x87_free(dyn, ninst, x1, x2, x3, nextop&7);
-            #else
-            MESSAGE(LOG_DUMP, "Need Optimization\n");
-            x87_purgecache(dyn, ninst, 0, x1, x2, x3);
-            MOV32w(x1, nextop&7);
-            CALL(fpu_do_free, -1);
-            #endif
             break;
         case 0xD0:
         case 0xD1:
@@ -196,7 +189,7 @@ uintptr_t dynarec64_DD(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 fpu_purgecache(dyn, ninst, 0, x1, x2, x3);
                 addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
                 if(ed!=x1) {MOVx_REG(x1, ed);}
-                CALL(native_frstor, -1);
+                CALL(const_native_frstor, -1);
                 break;
             case 6:
                 INST_NAME("FNSAVE m108byte");
@@ -204,8 +197,7 @@ uintptr_t dynarec64_DD(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 fpu_purgecache(dyn, ninst, 0, x1, x2, x3);
                 addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
                 if(ed!=x1) {MOVx_REG(x1, ed);}
-                CALL(native_fsave, -1);
-                CALL(reset_fpu, -1);
+                CALL(const_native_fsave, -1);
                 NATIVE_RESTORE_X87PC();
                 break;
             case 7:
diff --git a/src/dynarec/arm64/dynarec_arm64_df.c b/src/dynarec/arm64/dynarec_arm64_df.c
index 3bf37b24..f177fef2 100644
--- a/src/dynarec/arm64/dynarec_arm64_df.c
+++ b/src/dynarec/arm64/dynarec_arm64_df.c
@@ -273,7 +273,7 @@ uintptr_t dynarec64_DF(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
                 if(ed!=x1) {MOVx_REG(x1, ed);}
                 s0 = x87_stackcount(dyn, ninst, x3);
-                CALL(fpu_fbld, -1);
+                CALL(const_fpu_fbld, -1);
                 x87_unstackcount(dyn, ninst, x3, s0);
                 break;
             case 5:
@@ -310,7 +310,7 @@ uintptr_t dynarec64_DF(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 x87_forget(dyn, ninst, x1, x2, 0);
                 addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
                 if(ed!=x1) {MOVx_REG(x1, ed);}
-                CALL(fpu_fbst, -1);
+                CALL(const_fpu_fbst, -1);
                 x87_unstackcount(dyn, ninst, x1, i1);
                 X87_POP_OR_FAIL(dyn, ninst, x3);
                 break;
diff --git a/src/dynarec/arm64/dynarec_arm64_f30f.c b/src/dynarec/arm64/dynarec_arm64_f30f.c
index b8a50063..2531a8d6 100644
--- a/src/dynarec/arm64/dynarec_arm64_f30f.c
+++ b/src/dynarec/arm64/dynarec_arm64_f30f.c
@@ -6,11 +6,11 @@
 #include "debug.h"

 #include "box64context.h"

 #include "box64cpu.h"

+#include "emu/x64run_private.h"

 #include "emu/x64emu_private.h"

 #include "x64emu.h"

 #include "box64stack.h"

 #include "callback.h"

-#include "emu/x64run_private.h"

 #include "x64trace.h"

 #include "dynarec_native.h"

 

diff --git a/src/dynarec/arm64/dynarec_arm64_helper.c b/src/dynarec/arm64/dynarec_arm64_helper.c
index 4012cd2f..38a1eee4 100644
--- a/src/dynarec/arm64/dynarec_arm64_helper.c
+++ b/src/dynarec/arm64/dynarec_arm64_helper.c
@@ -757,7 +757,7 @@ void iret_to_epilog(dynarec_arm_t* dyn, uintptr_t ip, int ninst, int is32bits, i
     CLEARIP();
 }
 
-void call_c(dynarec_arm_t* dyn, int ninst, void* fnc, int reg, int ret, int saveflags, int savereg)
+void call_c(dynarec_arm_t* dyn, int ninst, arm64_consts_t fnc, int reg, int ret, int saveflags, int savereg)
 {
     MAYUSE(fnc);
     #if STEP == 0
@@ -780,7 +780,7 @@ void call_c(dynarec_arm_t* dyn, int ninst, void* fnc, int reg, int ret, int save
     #ifdef _WIN32
     LDRx_U12(xR8, xEmu, offsetof(x64emu_t, win64_teb));
     #endif
-    TABLE64(reg, (uintptr_t)fnc);
+    TABLE64(reg, getConst(fnc));
     BLR(reg);
     if(ret>=0) {
         MOVx_REG(ret, xEmu);
@@ -924,7 +924,7 @@ void grab_segdata(dynarec_arm_t* dyn, uintptr_t addr, int ninst, int reg, int se
         CBZw_MARKSEG(t2);
     }
     MOVZw(x1, segment);
-    call_c(dyn, ninst, GetSegmentBaseEmu, t2, reg, 1, 0);
+    call_c(dyn, ninst, const_getsegmentbase, t2, reg, 1, 0);
     MARKSEG;
     #endif
     MESSAGE(LOG_DUMP, "----%s Offset\n", (segment==_FS)?"FS":"GS");
@@ -2522,7 +2522,7 @@ static void flagsCacheTransform(dynarec_arm_t* dyn, int ninst, int s1)
         }
         if(dyn->insts[ninst].need_nat_flags)
             MRS_nzcv(s1);
-        CALL_(UpdateFlags, -1, s1);
+        CALL_(const_updateflags, -1, s1);
         if(dyn->insts[ninst].need_nat_flags)
             MSR_nzcv(s1);
         MARKF2;
diff --git a/src/dynarec/arm64/dynarec_arm64_helper.h b/src/dynarec/arm64/dynarec_arm64_helper.h
index b57d218b..b270343e 100644
--- a/src/dynarec/arm64/dynarec_arm64_helper.h
+++ b/src/dynarec/arm64/dynarec_arm64_helper.h
@@ -17,6 +17,7 @@
 #include "debug.h"
 #include "arm64_emitter.h"
 #include "../emu/x64primop.h"
+#include "dynarec_arm64_consts.h"
 
 #define F8      *(uint8_t*)(addr++)
 #define F8S     *(int8_t*)(addr++)
@@ -1483,7 +1484,7 @@ void jump_to_next(dynarec_arm_t* dyn, uintptr_t ip, int reg, int ninst, int is32
 void ret_to_epilog(dynarec_arm_t* dyn, uintptr_t ip, int ninst, rex_t rex);
 void retn_to_epilog(dynarec_arm_t* dyn, uintptr_t ip, int ninst, rex_t rex, int n);
 void iret_to_epilog(dynarec_arm_t* dyn, uintptr_t ip, int ninst, int is32bits, int is64bits);
-void call_c(dynarec_arm_t* dyn, int ninst, void* fnc, int reg, int ret, int saveflags, int save_reg);
+void call_c(dynarec_arm_t* dyn, int ninst, arm64_consts_t fnc, int reg, int ret, int saveflags, int save_reg);
 void call_i(dynarec_arm_t* dyn, int ninst, void* fnc);
 void call_n(dynarec_arm_t* dyn, int ninst, void* fnc, int w);
 void grab_segdata(dynarec_arm_t* dyn, uintptr_t addr, int ninst, int reg, int segment, int modreg);
diff --git a/src/dynarec/arm64/dynarec_arm64_private.h b/src/dynarec/arm64/dynarec_arm64_private.h
index af6d5bcb..211fe596 100644
--- a/src/dynarec/arm64/dynarec_arm64_private.h
+++ b/src/dynarec/arm64/dynarec_arm64_private.h
@@ -193,14 +193,14 @@ int Table64(dynarec_arm_t *dyn, uint64_t val, int pass);  // add a value to tabl
 
 void CreateJmpNext(void* addr, void* next);
 
-#define GO_TRACE(A, B, s0)  \
-    GETIP(addr);            \
-    MOVx_REG(x1, xRIP);     \
-    MRS_nzcv(s0);           \
-    STORE_XEMU_CALL(xRIP);  \
-    MOV32w(x2, B);          \
-    CALL_(A, -1, s0);       \
-    MSR_nzcv(s0);           \
+#define GO_TRACE(A, B, s0)      \
+    GETIP(addr);                \
+    MOVx_REG(x1, xRIP);         \
+    MRS_nzcv(s0);               \
+    STORE_XEMU_CALL(xRIP);      \
+    MOV32w(x2, B);              \
+    CALL_(const_##A, -1, s0);   \
+    MSR_nzcv(s0);               \
     LOAD_XEMU_CALL(xRIP)
 
 #endif //__DYNAREC_ARM_PRIVATE_H_
diff --git a/src/dynarec/dynarec_native_functions.c b/src/dynarec/dynarec_native_functions.c
index 928ad7e1..4c87385a 100644
--- a/src/dynarec/dynarec_native_functions.c
+++ b/src/dynarec/dynarec_native_functions.c
@@ -235,6 +235,7 @@ void native_fsave(x64emu_t* emu, uint8_t* ed)
         LD2D(p, &emu->x87[7-i].d);
         p+=10;
     }
+    reset_fpu(emu);
 }
 void native_fsave16(x64emu_t* emu, uint8_t* ed)
 {
@@ -246,6 +247,7 @@ void native_fsave16(x64emu_t* emu, uint8_t* ed)
         LD2D(p, &emu->x87[7-i].d);
         p+=10;
     }
+    reset_fpu(emu);
 }
 void native_frstor(x64emu_t* emu, uint8_t* ed)
 {
diff --git a/src/dynarec/rv64/dynarec_rv64_dd.c b/src/dynarec/rv64/dynarec_rv64_dd.c
index 926ac85f..4035377a 100644
--- a/src/dynarec/rv64/dynarec_rv64_dd.c
+++ b/src/dynarec/rv64/dynarec_rv64_dd.c
@@ -161,7 +161,6 @@ uintptr_t dynarec64_DD(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                 fpu_purgecache(dyn, ninst, 0, x1, x2, x3);
                 addr = geted(dyn, addr, ninst, nextop, &ed, x4, x6, &fixedaddress, rex, NULL, 0, 0);
                 CALL(native_fsave, -1, ed, 0);
-                CALL(reset_fpu, -1, 0, 0);
                 NATIVE_RESTORE_X87PC();
                 break;
             case 7:
diff --git a/src/emu/x64printer.c b/src/emu/x64printer.c
index 65664426..857a57d5 100644
--- a/src/emu/x64printer.c
+++ b/src/emu/x64printer.c
@@ -6003,8 +6003,6 @@ void x64Print(x64emu_t* emu, char* buff, size_t buffsz, const char* func, int ti
         snprintf(buff, buffsz, "%04d|%p: Calling %s(%" PRIp ", %" PRIi32 ", %" PRIu64 ", %" PRIu64 ", %" PRIi32 ", %" PRIi32 ", %" PRIi32 ", %" PRIp ", %" PRIi32 ", %" PRIp ")", tid, *(void**)(R_RSP), func, (void*)R_RDI, (int32_t)R_RSI, (uintptr_t)R_RDX, (uintptr_t)R_RCX, (int32_t)R_R8, (int32_t)R_R9, *(int32_t*)(R_RSP + 8), *(void**)(R_RSP + 16), *(int32_t*)(R_RSP + 24), *(void**)(R_RSP + 32));
     } else if (w == iFpiLLdduudd) {
         snprintf(buff, buffsz, "%04d|%p: Calling %s(%" PRIp ", %" PRIi32 ", %" PRIu64 ", %" PRIu64 ", %" PRIf ", %" PRIf ", %" PRIu32 ", %" PRIu32 ", %" PRIf ", %" PRIf ")", tid, *(void**)(R_RSP), func, (void*)R_RDI, (int32_t)R_RSI, (uintptr_t)R_RDX, (uintptr_t)R_RCX, emu->xmm[0].d[0], emu->xmm[1].d[0], (uint32_t)R_R8, (uint32_t)R_R9, emu->xmm[2].d[0], emu->xmm[3].d[0]);
-    } else if (w == iFpipiipippi) {
-        snprintf(buff, buffsz, "%04d|%p: Calling %s(%" PRIp ", %" PRIi32 ", %" PRIp ", %" PRIi32 ", %" PRIi32 ", %" PRIp ", %" PRIi32 ", %" PRIp ", %" PRIp ", %" PRIi32 ")", tid, *(void**)(R_RSP), func, (void*)R_RDI, (int32_t)R_RSI, (void*)R_RDX, (int32_t)R_RCX, (int32_t)R_R8, (void*)R_R9, *(int32_t*)(R_RSP + 8), *(void**)(R_RSP + 16), *(void**)(R_RSP + 24), *(int32_t*)(R_RSP + 32));
     } else if (w == iFpCuWCCCCup) {
         snprintf(buff, buffsz, "%04d|%p: Calling %s(%" PRIp ", %" PRIu8 ", %" PRIu32 ", %" PRIu16 ", %" PRIu8 ", %" PRIu8 ", %" PRIu8 ", %" PRIu8 ", %" PRIu32 ", %" PRIp ")", tid, *(void**)(R_RSP), func, (void*)R_RDI, (uint8_t)R_RSI, (uint32_t)R_RDX, (uint16_t)R_RCX, (uint8_t)R_R8, (uint8_t)R_R9, *(uint8_t*)(R_RSP + 8), *(uint8_t*)(R_RSP + 16), *(uint32_t*)(R_RSP + 24), *(void**)(R_RSP + 32));
     } else if (w == iFpuuLiuiiLL) {
@@ -6420,6 +6418,8 @@ void x64Print(x64emu_t* emu, char* buff, size_t buffsz, const char* func, int ti
         snprintf(buff, buffsz, "%04d|%p: Calling %s(%" PRIp ", %" PRIi32 ", %" PRIp ", %" PRIp ", %" PRIp ", %" PRIu64 ")", tid, *(void**)(R_RSP), func, (void*)R_RDI, (int32_t)R_RSI, (void*)R_RDX, (void*)R_RCX, (void*)R_R8, (uintptr_t)R_R9);
     } else if (w == lFpLpdddddd) {
         snprintf(buff, buffsz, "%04d|%p: Calling %s(%" PRIp ", %" PRIu64 ", %" PRIp ", %" PRIf ", %" PRIf ", %" PRIf ", %" PRIf ", %" PRIf ", %" PRIf ")", tid, *(void**)(R_RSP), func, (void*)R_RDI, (uintptr_t)R_RSI, (void*)R_RDX, emu->xmm[0].d[0], emu->xmm[1].d[0], emu->xmm[2].d[0], emu->xmm[3].d[0], emu->xmm[4].d[0], emu->xmm[5].d[0]);
+    } else if (w == iFpipiipippi) {
+        snprintf(buff, buffsz, "%04d|%p: Calling %s(%" PRIp ", %" PRIi32 ", %" PRIp ", %" PRIi32 ", %" PRIi32 ", %" PRIp ", %" PRIi32 ", %" PRIp ", %" PRIp ", %" PRIi32 ")", tid, *(void**)(R_RSP), func, (void*)R_RDI, (int32_t)R_RSI, (void*)R_RDX, (int32_t)R_RCX, (int32_t)R_R8, (void*)R_R9, *(int32_t*)(R_RSP + 8), *(void**)(R_RSP + 16), *(void**)(R_RSP + 24), *(int32_t*)(R_RSP + 32));
 #endif
     }
     else
diff --git a/src/wrapped/generated/functions_list.txt b/src/wrapped/generated/functions_list.txt
index 79d2ea99..bf306cfa 100644
--- a/src/wrapped/generated/functions_list.txt
+++ b/src/wrapped/generated/functions_list.txt
@@ -3269,7 +3269,6 @@
 #() iFpiiLiiipip
 #() iFpiLLiiipip
 #() iFpiLLdduudd
-#() iFpipiipippi
 #() iFpCuWCCCCup
 #() iFpuuLiuiiLL
 #() iFpLLpiiuuii
@@ -3495,6 +3494,7 @@
 #!defined(STATICBUILD) iFEiipup
 #!defined(STATICBUILD) iFEpipppL
 #!defined(STATICBUILD) lFpLpdddddd
+#!defined(STATICBUILD) iFpipiipippi
 #() vFEv -> vFE
 #() iFEv -> iFE
 #() lFEv -> lFE
diff --git a/src/wrapped/generated/wrapper.c b/src/wrapped/generated/wrapper.c
index 25890534..aa30e698 100644
--- a/src/wrapped/generated/wrapper.c
+++ b/src/wrapped/generated/wrapper.c
@@ -3296,7 +3296,6 @@ typedef int32_t (*iFuiiiuuiiip_t)(uint32_t, int32_t, int32_t, int32_t, uint32_t,
 typedef int32_t (*iFpiiLiiipip_t)(void*, int32_t, int32_t, uintptr_t, int32_t, int32_t, int32_t, void*, int32_t, void*);
 typedef int32_t (*iFpiLLiiipip_t)(void*, int32_t, uintptr_t, uintptr_t, int32_t, int32_t, int32_t, void*, int32_t, void*);
 typedef int32_t (*iFpiLLdduudd_t)(void*, int32_t, uintptr_t, uintptr_t, double, double, uint32_t, uint32_t, double, double);
-typedef int32_t (*iFpipiipippi_t)(void*, int32_t, void*, int32_t, int32_t, void*, int32_t, void*, void*, int32_t);
 typedef int32_t (*iFpCuWCCCCup_t)(void*, uint8_t, uint32_t, uint16_t, uint8_t, uint8_t, uint8_t, uint8_t, uint32_t, void*);
 typedef int32_t (*iFpuuLiuiiLL_t)(void*, uint32_t, uint32_t, uintptr_t, int32_t, uint32_t, int32_t, int32_t, uintptr_t, uintptr_t);
 typedef int32_t (*iFpLLpiiuuii_t)(void*, uintptr_t, uintptr_t, void*, int32_t, int32_t, uint32_t, uint32_t, int32_t, int32_t);
@@ -3582,6 +3581,7 @@ typedef int32_t (*iFEipup_t)(x64emu_t*, int32_t, void*, uint32_t, void*);
 typedef int32_t (*iFEiipup_t)(x64emu_t*, int32_t, int32_t, void*, uint32_t, void*);
 typedef int32_t (*iFEpipppL_t)(x64emu_t*, void*, int32_t, void*, void*, void*, uintptr_t);
 typedef intptr_t (*lFpLpdddddd_t)(void*, uintptr_t, void*, double, double, double, double, double, double);
+typedef int32_t (*iFpipiipippi_t)(void*, int32_t, void*, int32_t, int32_t, void*, int32_t, void*, void*, int32_t);
 #endif
 
 void vFE(x64emu_t *emu, uintptr_t fcn) { vFE_t fn = (vFE_t)fcn; fn(emu); }
@@ -6831,7 +6831,6 @@ void iFuiiiuuiiip(x64emu_t *emu, uintptr_t fcn) { iFuiiiuuiiip_t fn = (iFuiiiuui
 void iFpiiLiiipip(x64emu_t *emu, uintptr_t fcn) { iFpiiLiiipip_t fn = (iFpiiLiiipip_t)fcn; R_RAX=(uint32_t)fn((void*)R_RDI, (int32_t)R_RSI, (int32_t)R_RDX, (uintptr_t)R_RCX, (int32_t)R_R8, (int32_t)R_R9, *(int32_t*)(R_RSP + 8), *(void**)(R_RSP + 16), *(int32_t*)(R_RSP + 24), *(void**)(R_RSP + 32)); }
 void iFpiLLiiipip(x64emu_t *emu, uintptr_t fcn) { iFpiLLiiipip_t fn = (iFpiLLiiipip_t)fcn; R_RAX=(uint32_t)fn((void*)R_RDI, (int32_t)R_RSI, (uintptr_t)R_RDX, (uintptr_t)R_RCX, (int32_t)R_R8, (int32_t)R_R9, *(int32_t*)(R_RSP + 8), *(void**)(R_RSP + 16), *(int32_t*)(R_RSP + 24), *(void**)(R_RSP + 32)); }
 void iFpiLLdduudd(x64emu_t *emu, uintptr_t fcn) { iFpiLLdduudd_t fn = (iFpiLLdduudd_t)fcn; R_RAX=(uint32_t)fn((void*)R_RDI, (int32_t)R_RSI, (uintptr_t)R_RDX, (uintptr_t)R_RCX, emu->xmm[0].d[0], emu->xmm[1].d[0], (uint32_t)R_R8, (uint32_t)R_R9, emu->xmm[2].d[0], emu->xmm[3].d[0]); }
-void iFpipiipippi(x64emu_t *emu, uintptr_t fcn) { iFpipiipippi_t fn = (iFpipiipippi_t)fcn; R_RAX=(uint32_t)fn((void*)R_RDI, (int32_t)R_RSI, (void*)R_RDX, (int32_t)R_RCX, (int32_t)R_R8, (void*)R_R9, *(int32_t*)(R_RSP + 8), *(void**)(R_RSP + 16), *(void**)(R_RSP + 24), *(int32_t*)(R_RSP + 32)); }
 void iFpCuWCCCCup(x64emu_t *emu, uintptr_t fcn) { iFpCuWCCCCup_t fn = (iFpCuWCCCCup_t)fcn; R_RAX=(uint32_t)fn((void*)R_RDI, (uint8_t)R_RSI, (uint32_t)R_RDX, (uint16_t)R_RCX, (uint8_t)R_R8, (uint8_t)R_R9, *(uint8_t*)(R_RSP + 8), *(uint8_t*)(R_RSP + 16), *(uint32_t*)(R_RSP + 24), *(void**)(R_RSP + 32)); }
 void iFpuuLiuiiLL(x64emu_t *emu, uintptr_t fcn) { iFpuuLiuiiLL_t fn = (iFpuuLiuiiLL_t)fcn; R_RAX=(uint32_t)fn((void*)R_RDI, (uint32_t)R_RSI, (uint32_t)R_RDX, (uintptr_t)R_RCX, (int32_t)R_R8, (uint32_t)R_R9, *(int32_t*)(R_RSP + 8), *(int32_t*)(R_RSP + 16), *(uintptr_t*)(R_RSP + 24), *(uintptr_t*)(R_RSP + 32)); }
 void iFpLLpiiuuii(x64emu_t *emu, uintptr_t fcn) { iFpLLpiiuuii_t fn = (iFpLLpiiuuii_t)fcn; R_RAX=(uint32_t)fn((void*)R_RDI, (uintptr_t)R_RSI, (uintptr_t)R_RDX, (void*)R_RCX, (int32_t)R_R8, (int32_t)R_R9, *(uint32_t*)(R_RSP + 8), *(uint32_t*)(R_RSP + 16), *(int32_t*)(R_RSP + 24), *(int32_t*)(R_RSP + 32)); }
@@ -7142,6 +7141,7 @@ void iFEipup(x64emu_t *emu, uintptr_t fcn) { iFEipup_t fn = (iFEipup_t)fcn; R_RA
 void iFEiipup(x64emu_t *emu, uintptr_t fcn) { iFEiipup_t fn = (iFEiipup_t)fcn; R_RAX=(uint32_t)fn(emu, (int32_t)R_RDI, (int32_t)R_RSI, (void*)R_RDX, (uint32_t)R_RCX, (void*)R_R8); }
 void iFEpipppL(x64emu_t *emu, uintptr_t fcn) { iFEpipppL_t fn = (iFEpipppL_t)fcn; R_RAX=(uint32_t)fn(emu, (void*)R_RDI, (int32_t)R_RSI, (void*)R_RDX, (void*)R_RCX, (void*)R_R8, (uintptr_t)R_R9); }
 void lFpLpdddddd(x64emu_t *emu, uintptr_t fcn) { lFpLpdddddd_t fn = (lFpLpdddddd_t)fcn; R_RAX=(intptr_t)fn((void*)R_RDI, (uintptr_t)R_RSI, (void*)R_RDX, emu->xmm[0].d[0], emu->xmm[1].d[0], emu->xmm[2].d[0], emu->xmm[3].d[0], emu->xmm[4].d[0], emu->xmm[5].d[0]); }
+void iFpipiipippi(x64emu_t *emu, uintptr_t fcn) { iFpipiipippi_t fn = (iFpipiipippi_t)fcn; R_RAX=(uint32_t)fn((void*)R_RDI, (int32_t)R_RSI, (void*)R_RDX, (int32_t)R_RCX, (int32_t)R_R8, (void*)R_R9, *(int32_t*)(R_RSP + 8), *(void**)(R_RSP + 16), *(void**)(R_RSP + 24), *(int32_t*)(R_RSP + 32)); }
 #endif
 
 void vFEv(x64emu_t *emu, uintptr_t fcn) { vFE_t fn = (vFE_t)fcn; fn(emu); }
diff --git a/src/wrapped/generated/wrapper.h b/src/wrapped/generated/wrapper.h
index c782cb40..168ddc84 100644
--- a/src/wrapped/generated/wrapper.h
+++ b/src/wrapped/generated/wrapper.h
@@ -3306,7 +3306,6 @@ void iFuiiiuuiiip(x64emu_t *emu, uintptr_t fnc);
 void iFpiiLiiipip(x64emu_t *emu, uintptr_t fnc);
 void iFpiLLiiipip(x64emu_t *emu, uintptr_t fnc);
 void iFpiLLdduudd(x64emu_t *emu, uintptr_t fnc);
-void iFpipiipippi(x64emu_t *emu, uintptr_t fnc);
 void iFpCuWCCCCup(x64emu_t *emu, uintptr_t fnc);
 void iFpuuLiuiiLL(x64emu_t *emu, uintptr_t fnc);
 void iFpLLpiiuuii(x64emu_t *emu, uintptr_t fnc);
@@ -3540,6 +3539,7 @@ void iFEipup(x64emu_t *emu, uintptr_t fnc);
 void iFEiipup(x64emu_t *emu, uintptr_t fnc);
 void iFEpipppL(x64emu_t *emu, uintptr_t fnc);
 void lFpLpdddddd(x64emu_t *emu, uintptr_t fnc);
+void iFpipiipippi(x64emu_t *emu, uintptr_t fnc);
 #endif
 
 void vFEv(x64emu_t *emu, uintptr_t fnc);