about summary refs log tree commit diff stats
path: root/src/dynarec/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'src/dynarec/arm64')
-rw-r--r--src/dynarec/arm64/arm64_lock.S62
-rw-r--r--src/dynarec/arm64/dynarec_arm64_00.c8
-rw-r--r--src/dynarec/arm64/dynarec_arm64_0f.c20
-rw-r--r--src/dynarec/arm64/dynarec_arm64_66.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_660f.c22
-rw-r--r--src/dynarec/arm64/dynarec_arm64_66f0.c30
-rw-r--r--src/dynarec/arm64/dynarec_arm64_66f20f.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_66_0f.c6
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c10
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c14
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c14
-rw-r--r--src/dynarec/arm64/dynarec_arm64_db.c6
-rw-r--r--src/dynarec/arm64/dynarec_arm64_dd.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_df.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_emit_logic.c8
-rw-r--r--src/dynarec/arm64/dynarec_arm64_emit_math.c12
-rw-r--r--src/dynarec/arm64/dynarec_arm64_f0.c92
-rw-r--r--src/dynarec/arm64/dynarec_arm64_f20f.c18
-rw-r--r--src/dynarec/arm64/dynarec_arm64_f30f.c14
-rw-r--r--src/dynarec/arm64/dynarec_arm64_helper.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_helper.h4
22 files changed, 176 insertions, 176 deletions
diff --git a/src/dynarec/arm64/arm64_lock.S b/src/dynarec/arm64/arm64_lock.S
index 74855129..2d85fe5e 100644
--- a/src/dynarec/arm64/arm64_lock.S
+++ b/src/dynarec/arm64/arm64_lock.S
@@ -5,7 +5,7 @@
 .text
 .align 4
 
-.extern arm64_atomics
+.extern cpuext
 .global arm64_lock_read_b
 .global arm64_lock_write_b
 .global arm64_lock_read_h
@@ -106,10 +106,10 @@ arm64_lock_write_dq:
 
 
 arm64_lock_xchg_dd:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_xchg_dd
+    tbnz    w3, #0, arm64_atomic_xchg_dd
     dmb     ish
 arm64_lock_xchg_dd_0:
     // address is x0, value is x1, return old value in x0
@@ -126,10 +126,10 @@ arm64_atomic_xchg_dd:
     ret
 
 arm64_lock_xchg_d:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_xchg_d
+    tbnz    w3, #0, arm64_atomic_xchg_d
     dmb     ish
 arm64_lock_xchg_d_0:
     // address is x0, value is x1, return old value in x0
@@ -146,10 +146,10 @@ arm64_atomic_xchg_d:
     ret
 
 arm64_lock_xchg_h:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_xchg_h
+    tbnz    w3, #0, arm64_atomic_xchg_h
     dmb     ish
 arm64_lock_xchg_h_0:
     // address is x0, value is x1, return old value in x0
@@ -166,10 +166,10 @@ arm64_atomic_xchg_h:
     ret
 
 arm64_lock_xchg_b:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_xchg_b
+    tbnz    w3, #0, arm64_atomic_xchg_b
     dmb     ish
 arm64_lock_xchg_b_0:
     // address is x0, value is x1, return old value in x0
@@ -186,10 +186,10 @@ arm64_atomic_xchg_b:
     ret
 
 arm64_lock_storeifnull:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_storeifnull
+    tbnz    w3, #0, arm64_atomic_storeifnull
     dmb     ish
 1:
     // address is x0, value is x1, x1 store to x0 only if [x0] is 0. return old [x0] value
@@ -211,10 +211,10 @@ arm64_atomic_storeifnull:
 
 
 arm64_lock_storeifnull_d:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_storeifnull_d
+    tbnz    w3, #0, arm64_atomic_storeifnull_d
     dmb     ish
 1:
     // address is x0, value is w1, w1 store to x0 only if [x0] is 0. return old [x0] value
@@ -236,10 +236,10 @@ arm64_atomic_storeifnull_d:
     ret
 
 arm64_lock_storeifref:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_storeifref
+    tbnz    w3, #0, arm64_atomic_storeifref
     dmb     ish
 1:
     // address is x0, value is x1, x1 store to x0 only if [x0] is x2. return new [x0] value (so x1 or old value)
@@ -267,10 +267,10 @@ arm64_atomic_storeifref:
     ret
 
 arm64_lock_storeifref_d:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_storeifref_d
+    tbnz    w3, #0, arm64_atomic_storeifref_d
     dmb     ish
 1:
     // address is x0, value is w1, w1 store to x0 only if [x0] is w2. return new [x0] value (so x1 or old value)
@@ -298,10 +298,10 @@ arm64_atomic_storeifref_d:
     ret
 
 arm64_lock_storeifref2_d:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_storeifref2_d
+    tbnz    w3, #0, arm64_atomic_storeifref2_d
     dmb     ish
 1:
     // address is x0, value is w1, w1 store to x0 only if [x0] is w2. return old [x0] value
@@ -352,10 +352,10 @@ arm64_lock_decifnot0:
     ret
 
 arm64_lock_incif0:
-    adrp    x3, arm64_atomics
-    add     x3, x3, #:lo12:arm64_atomics
+    adrp    x3, cpuext
+    add     x3, x3, #:lo12:cpuext
     ldr     w3, [x3]
-    cbnz    w3, arm64_atomic_incif0
+    tbnz    w3, #0, arm64_atomic_incif0
     dmb     ish
 1:
     ldaxr   w1, [x0]
diff --git a/src/dynarec/arm64/dynarec_arm64_00.c b/src/dynarec/arm64/dynarec_arm64_00.c
index 443d497d..2947ee42 100644
--- a/src/dynarec/arm64/dynarec_arm64_00.c
+++ b/src/dynarec/arm64/dynarec_arm64_00.c
@@ -1336,7 +1336,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 BFIx(eb1, x4, eb2, 8);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     SWPALB(x4, x1, ed);
                 } else {
                     MARKLOCK;
@@ -1363,7 +1363,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 GETGD;
                 addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, ed, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -1372,7 +1372,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     SWPALxw(gd, gd, ed);
                     if(!ALIGNED_ATOMICxw) {
                         B_NEXT_nocond;
@@ -1396,7 +1396,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MARK2;
                 }
                 SMDMB();
-                if(!ALIGNED_ATOMICxw || !arm64_atomics) {
+                if(!ALIGNED_ATOMICxw || !cpuext.atomics) {
                     MOVxw_REG(gd, x1);
                 }
             }
diff --git a/src/dynarec/arm64/dynarec_arm64_0f.c b/src/dynarec/arm64/dynarec_arm64_0f.c
index fa476e6d..8842d43f 100644
--- a/src/dynarec/arm64/dynarec_arm64_0f.c
+++ b/src/dynarec/arm64/dynarec_arm64_0f.c
@@ -443,7 +443,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             if (BOX64ENV(dynarec_fastround)) {

                 VFCVTZSS(q0, v1);

             } else {

-                if(arm64_frintts) {

+                if(cpuext.frintts) {

                     VFRINT32ZS(q0, v1);

                     VFCVTZSS(q0, q0);

                 } else {

@@ -481,7 +481,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 VFCVTZSS(q0, q0);

             } else {

                 u8 = sse_setround(dyn, ninst, x1, x2, x3);

-                if(arm64_frintts) {

+                if(cpuext.frintts) {

                     VFRINT32XS(q0, v1);

                     VFCVTZSS(q0, q0);

                 } else {

@@ -707,7 +707,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     GETEX(q1, 0, 0);

                     v0 = fpu_get_scratch(dyn, ninst);

                     VEORQ(v0, v0, v0);

-                    if(arm64_sha1) {

+                    if(cpuext.sha1) {

                         v1 = fpu_get_scratch(dyn, ninst);

                         VMOVeS(v1, 0, q0, 3);

                         SHA1H(v1, v1);

@@ -731,7 +731,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 case 0xCA:

                     INST_NAME("SHA1MSG2 Gx, Ex");

                     nextop = F8;

-                    if(arm64_sha1) {

+                    if(cpuext.sha1) {

                         GETGX(q0, 1);

                         GETEX(q1, 0, 0);

                         VEXTQ_8(q0, q0, q0, 8);

@@ -773,7 +773,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 case 0xCB:

                     INST_NAME("SHA256RNDS2 Gx, Ex (, XMM0)");

                     nextop = F8;

-                    if(arm64_sha2) {

+                    if(cpuext.sha2) {

                         GETGX(q0, 1);

                         GETEX(q1, 0, 0);

                         d0 = sse_get_reg(dyn, ninst, x1, 0, 0);

@@ -819,7 +819,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 case 0xCC:

                     INST_NAME("SHA256MSG1 Gx, Ex");

                     nextop = F8;

-                    if(arm64_sha2) {

+                    if(cpuext.sha2) {

                         GETGX(q0, 1);

                         GETEX(q1, 0, 0);

                         SHA256SU0(q0, q1);

@@ -844,7 +844,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 case 0xCD:

                     INST_NAME("SHA256MSG2 Gx, Ex");

                     nextop = F8;

-                    if(arm64_sha2) {

+                    if(cpuext.sha2) {

                         GETGX(q0, 1);

                         GETEX(q1, 0, 0);

                         v0 = fpu_get_scratch(dyn, ninst);

@@ -931,7 +931,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 case 0xCC:

                     INST_NAME("SHA1RNDS4 Gx, Ex, Ib");

                     nextop = F8;

-                    if(arm64_sha1) {

+                    if(cpuext.sha1) {

                         GETGX(q0, 1);

                         GETEX(q1, 0, 1);

                         u8 = F8&3;

@@ -2587,7 +2587,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MOV32w(x1, (1<<F_OF)|(1<<F_SF)|(1<<F_ZF)|(1<<F_PF)|(1<<F_AF));

                     BICw(xFlags, xFlags, x1);

                 }

-                if(arm64_rndr) {

+                if(cpuext.rndr) {

                     MRS_rndr(x1);

                     IFX(X_CF) { CSETw(x3, cNE); }

                 } else {

@@ -2646,7 +2646,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MOV32w(x1, (1<<F_OF)|(1<<F_SF)|(1<<F_ZF)|(1<<F_PF)|(1<<F_AF));

                     BICw(xFlags, xFlags, x1);

                 }

-                if(arm64_rndr) {

+                if(cpuext.rndr) {

                     MRS_rndr(x1);

                     IFX(X_CF) { CSETw(x3, cNE); }

                 } else {

diff --git a/src/dynarec/arm64/dynarec_arm64_66.c b/src/dynarec/arm64/dynarec_arm64_66.c
index 5435eafc..30b4ea0e 100644
--- a/src/dynarec/arm64/dynarec_arm64_66.c
+++ b/src/dynarec/arm64/dynarec_arm64_66.c
@@ -596,7 +596,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 if(!ALIGNED_ATOMICH) {

                     TBNZ_MARK(ed, 0);

                 }

-                if(arm64_atomics) {

+                if(cpuext.atomics) {

                     SWPALH(gd, x1, ed);

                     SMDMB();

                     if(!ALIGNED_ATOMICH) {

diff --git a/src/dynarec/arm64/dynarec_arm64_660f.c b/src/dynarec/arm64/dynarec_arm64_660f.c
index 68061e6b..d1581b5d 100644
--- a/src/dynarec/arm64/dynarec_arm64_660f.c
+++ b/src/dynarec/arm64/dynarec_arm64_660f.c
@@ -229,7 +229,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 VFCVTZSQD(q0, v1);

                 SQXTN_32(q0, q0);

             } else {

-                if(arm64_frintts) {

+                if(cpuext.frintts) {

                     VFRINT32ZDQ(q0, v1);

                     VFCVTZSQD(q0, q0);

                     SQXTN_32(q0, q0);

@@ -269,7 +269,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 VFCVTZSS(q0, q0);

             } else {

                 u8 = sse_setround(dyn, ninst, x1, x2, x3);

-                if(arm64_frintts) {

+                if(cpuext.frintts) {

                     VFRINT32XDQ(q0, v1);

                     VFCVTZSQD(q0, q0);

                     SQXTN_32(q0, q0);

@@ -765,7 +765,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 case 0xDB:

                     INST_NAME("AESIMC Gx, Ex");  // AES-NI

                     nextop = F8;

-                    if(arm64_aes) {

+                    if(cpuext.aes) {

                         GETEX(q1, 0, 0);

                         GETGX_empty(q0);

                         AESIMC(q0, q1);

@@ -783,7 +783,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 case 0xDC:

                     INST_NAME("AESENC Gx, Ex");  // AES-NI

                     nextop = F8;

-                    if(arm64_aes) {

+                    if(cpuext.aes) {

                         GETEX(q1, 0, 0);

                         GETGX(q0, 1);

                         v0 = fpu_get_scratch(dyn, ninst);  // ARM64 internal operation differs a bit from x86_64

@@ -808,7 +808,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 case 0xDD:

                     INST_NAME("AESENCLAST Gx, Ex");  // AES-NI

                     nextop = F8;

-                    if(arm64_aes) {

+                    if(cpuext.aes) {

                         GETEX(q1, 0, 0);

                         GETGX(q0, 1);

                         v0 = fpu_get_scratch(dyn, ninst);  // ARM64 internal operation differs a bit from x86_64

@@ -832,7 +832,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 case 0xDE:

                     INST_NAME("AESDEC Gx, Ex");  // AES-NI

                     nextop = F8;

-                    if(arm64_aes) {

+                    if(cpuext.aes) {

                         GETEX(q1, 0, 0);

                         GETGX(q0, 1);

                         v0 = fpu_get_scratch(dyn, ninst);  // ARM64 internal operation differs a bit from x86_64

@@ -857,7 +857,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 case 0xDF:

                     INST_NAME("AESDECLAST Gx, Ex");  // AES-NI

                     nextop = F8;

-                    if(arm64_aes) {

+                    if(cpuext.aes) {

                         GETEX(q1, 0, 0);

                         GETGX(q0, 1);

                         v0 = fpu_get_scratch(dyn, ninst);  // ARM64 internal operation differs a bit from x86_64

@@ -919,7 +919,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                     GETGD;

                     IFNATIVE_BEFORE(NF_CF) {

                         if(INVERTED_CARRY_BEFORE) {

-                            if(arm64_flagm)

+                            if(cpuext.flagm)

                                 CFINV();

                             else {

                                 MRS_nzcv(x3);

@@ -1234,7 +1234,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 case 0x44:

                     INST_NAME("PCLMULQDQ Gx, Ex, Ib");

                     nextop = F8;

-                    if(arm64_pmull) {

+                    if(cpuext.pmull) {

                         GETGX(q0, 1);

                         GETEX(q1, 0, 1);

                         u8 = F8;

@@ -1748,7 +1748,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 x87_restoreround(dyn, ninst, u8);

                 VFCVTZSQS(v0, v0);

             } else {

-                if(arm64_frintts) {

+                if(cpuext.frintts) {

                     u8 = sse_setround(dyn, ninst, x1, x2, x3);

                     VFRINT32XSQ(v0, v1); // handle overflow

                     VFCVTZSQS(v0, v0);

@@ -3184,7 +3184,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 VFCVTZSQD(v0, v1);  // convert double -> int64

                 SQXTN_32(v0, v0);   // convert int64 -> int32 with saturation in lower part, RaZ high part

             } else {

-                if(arm64_frintts) {

+                if(cpuext.frintts) {

                     VFRINT32ZDQ(v0, v1); // handle overflow

                     VFCVTZSQD(v0, v0);  // convert double -> int64

                     SQXTN_32(v0, v0);   // convert int64 -> int32 with saturation in lower part, RaZ high part

diff --git a/src/dynarec/arm64/dynarec_arm64_66f0.c b/src/dynarec/arm64/dynarec_arm64_66f0.c
index 1a636ce6..ed49e2f8 100644
--- a/src/dynarec/arm64/dynarec_arm64_66f0.c
+++ b/src/dynarec/arm64/dynarec_arm64_66f0.c
@@ -60,7 +60,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 BFIx(ed, x6, 0, 16);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     UFLAG_IF {
                         LDADDALH(x5, x1, wback);
                         emit_add16(dyn, ninst, x1, x5, x3, x4);
@@ -90,7 +90,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 BFIx(ed, x6, 0, 16);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     UFLAG_IF {
                         LDSETALH(x5, x1, wback);
                         emit_or16(dyn, ninst, x1, x5, x3, x4);
@@ -128,7 +128,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         if(!ALIGNED_ATOMICH) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 0xF);
                                 B_MARK3(cGE);
@@ -138,7 +138,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                             }
                         }
                         // Aligned version
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOVw_REG(x1, x6);
                             CASALH(x1, gd, wback);
                         } else {
@@ -187,7 +187,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         BFIx(ed, x5, 0, 16);
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             LDADDALH(x5, x1, wback);
                         } else {
                             MARKLOCK;
@@ -244,7 +244,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 BFIx(ed, x6, 0, 16);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     MVNw_REG(x3, gd);
                     UFLAG_IF {
                         LDCLRALH(x3, x1, wback);
@@ -286,7 +286,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         if(opcode==0x81) i16 = F16S; else i16 = F8S;
                         MOV32w(x5, i16);
                         if(!ALIGNED_ATOMICH) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 0xF);
                                 B_MARK(cGE);
@@ -295,7 +295,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                                 B_MARK(cNE);
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             UFLAG_IF {
                                 LDADDALH(x5, x1, wback);
                             } else {
@@ -341,7 +341,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, (opcode==0x81)?2:1);
                         if(opcode==0x81) i16 = F16S; else i16 = F8S;
                         if(!i64) {MOV32w(x5, i16);}
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             UFLAG_IF {
                                 LDSETALH(x5, x1, wback);
                                 emit_or16c(dyn, ninst, x1, i16, x3, x4);
@@ -423,7 +423,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, (opcode==0x81)?2:1);
                         if(opcode==0x81) i16 = F16S; else i16 = F8S;
                         i64 = convert_bitmask_w(i16);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x5, ~i16);
                             UFLAG_IF {
                                 LDCLRALH(x5, x1, wback);
@@ -466,7 +466,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         if(opcode==0x81) i16 = F16S; else i16 = F8S;
                         MOV32w(x5, i16);
                         if(!ALIGNED_ATOMICH) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 0xF);
                                 B_MARK(cGE);
@@ -475,7 +475,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                                 B_MARK(cNE);
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             NEGw_REG(x4, x5);
                             UFLAG_IF {
                                 LDADDALH(x4, x1, wback);
@@ -522,7 +522,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, (opcode==0x81)?2:1);
                         if(opcode==0x81) i16 = F16S; else i16 = F8S;
                         i64 = convert_bitmask_w(i16);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             UFLAG_IF {
                                 LDEORALH(x5, x1, wback);
                                 emit_xor16c(dyn, ninst, x1, i16, x3, x4);
@@ -574,7 +574,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                             BFIx(ed, x6, 0, 16);
                         } else {
                             addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                            if(arm64_atomics) {
+                            if(cpuext.atomics) {
                                 MOV32w(x3, 1);
                                 UFLAG_IF {
                                     LDADDALH(x3, x1, wback);
@@ -602,7 +602,7 @@ uintptr_t dynarec64_66F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                             BFIx(ed, x6, 0, 16);
                         } else {
                             addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                            if(arm64_atomics) {
+                            if(cpuext.atomics) {
                                 MOV32w(x3, -1);
                                 UFLAG_IF {
                                     LDADDALH(x3, x1, wback);
diff --git a/src/dynarec/arm64/dynarec_arm64_66f20f.c b/src/dynarec/arm64/dynarec_arm64_66f20f.c
index 36b65c47..f4866029 100644
--- a/src/dynarec/arm64/dynarec_arm64_66f20f.c
+++ b/src/dynarec/arm64/dynarec_arm64_66f20f.c
@@ -57,7 +57,7 @@ uintptr_t dynarec64_66F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int
                     nextop = F8;
                     GETEW(x1, 0);
                     GETGD;
-                    if(arm64_crc32) {
+                    if(cpuext.crc32) {
                         CRC32CH(gd, gd, ed);
                     } else {
                         MOV32w(x2, 0x82f63b78);
diff --git a/src/dynarec/arm64/dynarec_arm64_avx_66_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_66_0f.c
index 888ff831..b5e1d362 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_66_0f.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_66_0f.c
@@ -429,7 +429,7 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             INST_NAME("VCVTPS2DQ Gx, Ex");
             nextop = F8;
             u8 = sse_setround(dyn, ninst, x1, x2, x6);
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 d0 = fpu_get_scratch(dyn, ninst);
                 d1 = fpu_get_scratch(dyn, ninst);
                 MOVI_32_lsl(d1, 0x80, 3);
@@ -440,7 +440,7 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
                     VFRINTISQ(v0, v1);
                     VFCVTZSQS(v0, v0);
                 } else {
-                    if(arm64_frintts) {
+                    if(cpuext.frintts) {
                          VFRINT32XSQ(v0, v1);
                          VFCVTZSQS(v0, v0);
                     } else {
@@ -1694,7 +1694,7 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
                     else
                         SQXTN2_32(v0, d0);   // convert int64 -> int32 with saturation in higher part
                 } else {
-                    if(arm64_frintts) {
+                    if(cpuext.frintts) {
                         VFRINT32ZDQ(l?d0:v0, v1); // handle overflow
                         VFCVTZSQD(l?d0:v0, l?d0:v0);  // convert double -> int64
                         if(!l)
diff --git a/src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c b/src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c
index 6447eef4..e65864bf 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_66_0f38.c
@@ -1874,7 +1874,7 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
             INST_NAME("VAESIMC Gx, Ex");
             nextop = F8;
             GETGX_empty_EX(v0, v1, 0);
-            if(arm64_aes) {
+            if(cpuext.aes) {
                 AESIMC(v0, v1);
             } else {
                 if(v0!=v1) {
@@ -1890,7 +1890,7 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
         case 0xDC:
             INST_NAME("VAESENC Gx, Vx, Ex");  // AES-NI
             nextop = F8;
-            if(arm64_aes) {
+            if(cpuext.aes) {
                 d0 = fpu_get_scratch(dyn, ninst);  // ARM64 internal operation differs a bit from x86_64
                 for(int l=0; l<1+vex.l; ++l) {
                     if(!l) {GETGX_empty_VXEX(v0, v2, v1, 0);} else {GETGY_empty_VYEY(v0, v2, v1);}
@@ -1931,7 +1931,7 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
         case 0xDD:
             INST_NAME("VAESENCLAST Gx, Vx, Ex");  // AES-NI
             nextop = F8;
-            if(arm64_aes) {
+            if(cpuext.aes) {
                 d0 = fpu_get_scratch(dyn, ninst);  // ARM64 internal operation differs a bit from x86_64
                 for(int l=0; l<1+vex.l; ++l) {
                     if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); }
@@ -1971,7 +1971,7 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
         case 0xDE:
             INST_NAME("VAESDEC Gx, Vx, Ex");  // AES-NI
             nextop = F8;
-            if(arm64_aes) {
+            if(cpuext.aes) {
                 d0 = fpu_get_scratch(dyn, ninst);  // ARM64 internal operation differs a bit from x86_64
                 for(int l=0; l<1+vex.l; ++l) {
                     if(!l) {GETGX_empty_VXEX(v0, v2, v1, 0);} else {GETGY_empty_VYEY(v0, v2, v1);}
@@ -2012,7 +2012,7 @@ uintptr_t dynarec64_AVX_66_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
         case 0xDF:
             INST_NAME("VAESDECLAST Gx, Vx, Ex");  // AES-NI
             nextop = F8;
-            if(arm64_aes) {
+            if(cpuext.aes) {
                 d0 = fpu_get_scratch(dyn, ninst);  // ARM64 internal operation differs a bit from x86_64
                 for(int l=0; l<1+vex.l; ++l) {
                     if(!l) {GETGX_empty_VXEX(v0, v2, v1, 0);} else {GETGY_empty_VYEY(v0, v2, v1);}
diff --git a/src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c b/src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c
index b0253420..42cdb771 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_66_0f3a.c
@@ -662,7 +662,7 @@ uintptr_t dynarec64_AVX_66_0F3A(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
         case 0x44:
             INST_NAME("VPCLMULQDQ Gx, Vx, Ex, Ib");
             nextop = F8;
-            if(arm64_pmull) {
+            if(cpuext.pmull) {
                 d0 = fpu_get_scratch(dyn, ninst);
                 for(int l=0; l<1+vex.l; ++l) {
                     if(!l) {
diff --git a/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c
index 79a2b651..d1953a79 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c
@@ -144,12 +144,12 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             nextop = F8;
             GETGD;
             GETEXSD(q0, 0, 0);
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 MRS_fpsr(x5);
                 BFCw(x5, FPSR_IOC, 1);   // reset IOC bit
                 MSR_fpsr(x5);
             }
-            if(!BOX64ENV(dynarec_fastround) && arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && cpuext.frintts) {
                 v0 = fpu_get_scratch(dyn, ninst);
                 if(rex.w) {
                     FRINT64ZD(v0, q0);
@@ -160,7 +160,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             } else {
                 FCVTZSxwD(gd, q0);
             }
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 MRS_fpsr(x5);   // get back FPSR to check the IOC bit
                 TBZ_NEXT(x5, FPSR_IOC);
                 if(rex.w) {
@@ -175,14 +175,14 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             nextop = F8;
             GETGD;
             GETEXSD(q0, 0, 0);
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 MRS_fpsr(x5);
                 BFCw(x5, FPSR_IOC, 1);   // reset IOC bit
                 MSR_fpsr(x5);
             }
             u8 = sse_setround(dyn, ninst, x1, x2, x3);
             d1 = fpu_get_scratch(dyn, ninst);
-            if(!BOX64ENV(dynarec_fastround) && arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && cpuext.frintts) {
                 if(rex.w) {
                     FRINT64XD(d1, q0);
                 } else {
@@ -193,7 +193,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             }
             x87_restoreround(dyn, ninst, u8);
             FCVTZSxwD(gd, d1);
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 MRS_fpsr(x5);   // get back FPSR to check the IOC bit
                 TBZ_NEXT(x5, FPSR_IOC);
                 if(rex.w) {
@@ -550,7 +550,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
                     else
                         SQXTN2_32(v0, d0);   // convert int64 -> int32 with saturation in higher part
                 } else {
-                    if(arm64_frintts) {
+                    if(cpuext.frintts) {
                         VFRINT32XDQ(l?d0:v0, v1);    // round, handling of overflow and Nan to 0x80000000
                         VFCVTNSQD(l?d0:v0, l?d0:v0);  // convert double -> int64
                         if(!l)
diff --git a/src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c
index 12f11648..881fc784 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c
@@ -144,12 +144,12 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             nextop = F8;
             GETGD;
             GETEXSS(d0, 0, 0);
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 MRS_fpsr(x5);
                 BFCw(x5, FPSR_IOC, 1);   // reset IOC bit
                 MSR_fpsr(x5);
             }
-            if(!BOX64ENV(dynarec_fastround) && arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && cpuext.frintts) {
                 v0 = fpu_get_scratch(dyn, ninst);
                 if(rex.w) {
                     FRINT64ZS(v0, d0);
@@ -160,7 +160,7 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             } else {
                 FCVTZSxwS(gd, d0);
             }
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 MRS_fpsr(x5);   // get back FPSR to check the IOC bit
                 TBZ_NEXT(x5, FPSR_IOC);
                 if(rex.w) {
@@ -175,14 +175,14 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             nextop = F8;
             GETGD;
             GETEXSS(q0, 0, 0);
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 MRS_fpsr(x5);
                 BFCw(x5, FPSR_IOC, 1);   // reset IOC bit
                 MSR_fpsr(x5);
             }
             u8 = sse_setround(dyn, ninst, x1, x2, x3);
             d1 = fpu_get_scratch(dyn, ninst);
-            if(!BOX64ENV(dynarec_fastround) && arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && cpuext.frintts) {
                 if(rex.w) {
                     FRINT64XS(d1, q0);
                 } else {
@@ -193,7 +193,7 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             }
             x87_restoreround(dyn, ninst, u8);
             FCVTZSxwS(gd, d1);
-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {
+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {
                 MRS_fpsr(x5);   // get back FPSR to check the IOC bit
                 TBZ_NEXT(x5, FPSR_IOC);
                 if(rex.w) {
@@ -342,7 +342,7 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
                 if(BOX64ENV(dynarec_fastround)) {
                     VFCVTZSQS(v0, v1);
                 } else {
-                    if(arm64_frintts) {
+                    if(cpuext.frintts) {
                         VFRINT32ZSQ(v0, v1);
                         VFCVTZSQS(v0, v0);
                     } else {
diff --git a/src/dynarec/arm64/dynarec_arm64_db.c b/src/dynarec/arm64/dynarec_arm64_db.c
index e10f8e4a..2d98f0cb 100644
--- a/src/dynarec/arm64/dynarec_arm64_db.c
+++ b/src/dynarec/arm64/dynarec_arm64_db.c
@@ -193,7 +193,7 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 v1 = x87_get_st(dyn, ninst, x1, x2, 0, NEON_CACHE_ST_D);
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0);
                 s0 = fpu_get_scratch(dyn, ninst);
-                if(arm64_frintts) {
+                if(cpuext.frintts) {
                     FRINT32ZD(s0, v1);
                     FCVTZSwD(x5, s0);
                     STW(x5, wback, fixedaddress);
@@ -223,7 +223,7 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 u8 = x87_setround(dyn, ninst, x1, x2, x4); // x1 have the modified RPSCR reg
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0);
                 s0 = fpu_get_scratch(dyn, ninst);
-                if(arm64_frintts) {
+                if(cpuext.frintts) {
                     FRINT32XD(s0, v1);
                     FCVTZSwD(x5, s0);
                     STW(x5, wback, fixedaddress);
@@ -253,7 +253,7 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 u8 = x87_setround(dyn, ninst, x1, x2, x4); // x1 have the modified RPSCR reg
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0);
                 s0 = fpu_get_scratch(dyn, ninst);
-                if(arm64_frintts) {
+                if(cpuext.frintts) {
                     FRINT32XD(s0, v1);
                     FCVTZSwD(x5, s0);
                     STW(x5, wback, fixedaddress);
diff --git a/src/dynarec/arm64/dynarec_arm64_dd.c b/src/dynarec/arm64/dynarec_arm64_dd.c
index f88c965d..fe640ef3 100644
--- a/src/dynarec/arm64/dynarec_arm64_dd.c
+++ b/src/dynarec/arm64/dynarec_arm64_dd.c
@@ -150,7 +150,7 @@ uintptr_t dynarec64_DD(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     VST64(v1, ed, fixedaddress);
                 } else {
                     s0 = fpu_get_scratch(dyn, ninst);
-                    if(arm64_frintts) {
+                    if(cpuext.frintts) {
                         FRINT64ZD(s0, v1);
                         VFCVTZSd(s0, s0);
                         VST64(s0, ed, fixedaddress);
diff --git a/src/dynarec/arm64/dynarec_arm64_df.c b/src/dynarec/arm64/dynarec_arm64_df.c
index f177fef2..e0e06059 100644
--- a/src/dynarec/arm64/dynarec_arm64_df.c
+++ b/src/dynarec/arm64/dynarec_arm64_df.c
@@ -354,7 +354,7 @@ uintptr_t dynarec64_DF(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK3(c__);
                         MARK2;
                     }
-                    if(arm64_frintts) {
+                    if(cpuext.frintts) {
                         FRINT64XD(s0, v1);
                         VFCVTZSd(s0, s0);
                         VST64(s0, wback, fixedaddress);
diff --git a/src/dynarec/arm64/dynarec_arm64_emit_logic.c b/src/dynarec/arm64/dynarec_arm64_emit_logic.c
index ac6b8a3a..404f7d6e 100644
--- a/src/dynarec/arm64/dynarec_arm64_emit_logic.c
+++ b/src/dynarec/arm64/dynarec_arm64_emit_logic.c
@@ -451,7 +451,7 @@ void emit_and8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
         MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF));
         BICw_REG(xFlags, xFlags, s3);
     }
-    if(arm64_flagm) {
+    if(cpuext.flagm) {
         COMP_ZFSF(s1, 8)
     } else {
         IFX(X_ZF) {
@@ -496,7 +496,7 @@ void emit_and8c(dynarec_arm_t* dyn, int ninst, int s1, uint8_t c, int s3, int s4
         MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF));
         BICw(xFlags, xFlags, s3);
     }
-    if(arm64_flagm) {
+    if(cpuext.flagm) {
         COMP_ZFSF(s1, 8)
     } else {
         IFX(X_ZF) {
@@ -639,7 +639,7 @@ void emit_and16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
         MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF));
         BICw_REG(xFlags, xFlags, s3);
     }
-    if(arm64_flagm) {
+    if(cpuext.flagm) {
         COMP_ZFSF(s1, 16)
     } else {
         IFX(X_ZF) {
@@ -684,7 +684,7 @@ void emit_and16c(dynarec_arm_t* dyn, int ninst, int s1, int16_t c, int s3, int s
         MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF));
         BICw(xFlags, xFlags, s3);
     }
-    if(arm64_flagm) {
+    if(cpuext.flagm) {
         COMP_ZFSF(s1, 16)
     } else {
         IFX(X_ZF) {
diff --git a/src/dynarec/arm64/dynarec_arm64_emit_math.c b/src/dynarec/arm64/dynarec_arm64_emit_math.c
index 76f951ed..63be6c30 100644
--- a/src/dynarec/arm64/dynarec_arm64_emit_math.c
+++ b/src/dynarec/arm64/dynarec_arm64_emit_math.c
@@ -991,7 +991,7 @@ void emit_adc32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
     }
     IFNATIVE_BEFORE(NF_CF) {
         if(INVERTED_CARRY_BEFORE) {
-            if(arm64_flagm)
+            if(cpuext.flagm)
                 CFINV();
             else {
                 MRS_nzcv(s3);
@@ -1134,7 +1134,7 @@ void emit_adc8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
     }
     IFNATIVE_BEFORE(NF_CF) {
         if(INVERTED_CARRY_BEFORE) {
-            if(arm64_flagm)
+            if(cpuext.flagm)
                 CFINV();
             else {
                 MRS_nzcv(s3);
@@ -1198,7 +1198,7 @@ void emit_adc16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
     }
     IFNATIVE_BEFORE(NF_CF) {
         if(INVERTED_CARRY_BEFORE) {
-            if(arm64_flagm)
+            if(cpuext.flagm)
                 CFINV();
             else {
                 MRS_nzcv(s3);
@@ -1321,7 +1321,7 @@ void emit_sbb32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
     }
     IFNATIVE_BEFORE(NF_CF) {
         if(!INVERTED_CARRY_BEFORE) {
-            if(arm64_flagm)
+            if(cpuext.flagm)
                 CFINV();
             else {
                 MRS_nzcv(s3);
@@ -1470,7 +1470,7 @@ void emit_sbb8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
     }
     IFNATIVE_BEFORE(NF_CF) {
         if(!INVERTED_CARRY_BEFORE) {
-            if(arm64_flagm)
+            if(cpuext.flagm)
                 CFINV();
             else {
                 MRS_nzcv(s3);
@@ -1535,7 +1535,7 @@ void emit_sbb16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
     }
     IFNATIVE_BEFORE(NF_CF) {
         if(!INVERTED_CARRY_BEFORE) {
-            if(arm64_flagm)
+            if(cpuext.flagm)
                 CFINV();
             else {
                 MRS_nzcv(s3);
diff --git a/src/dynarec/arm64/dynarec_arm64_f0.c b/src/dynarec/arm64/dynarec_arm64_f0.c
index 4a72e98e..69f9a5ef 100644
--- a/src/dynarec/arm64/dynarec_arm64_f0.c
+++ b/src/dynarec/arm64/dynarec_arm64_f0.c
@@ -72,7 +72,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 BFIx(wback, x1, wb2*8, 8);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     UFLAG_IF {
                         LDADDALB(x2, x1, wback);
                         emit_add8(dyn, ninst, x1, x2, x4, x5);
@@ -100,7 +100,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -109,7 +109,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     UFLAG_IF {
                         LDADDALxw(gd, x1, wback);
                     } else {
@@ -163,7 +163,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 BFIx(wback, x1, wb2*8, 8);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     LDSETALB(x2, x1, wback);
                     UFLAG_IF {
                         emit_or8(dyn, ninst, x1, x2, x4, x5);
@@ -188,7 +188,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 emit_or32(dyn, ninst, rex, ed, gd, x3, x4);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     LDSETALxw(gd, x1, wback);
                     UFLAG_IF {
                         emit_or32(dyn, ninst, rex, x1, gd, x3, x4);
@@ -288,7 +288,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 BFIx(xRAX, x2, 0, 8);
                             } else {
                                 addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                                if(arm64_atomics) {
+                                if(cpuext.atomics) {
                                     UFLAG_IF {
                                         MOVw_REG(x2, x6);
                                         CASALB(x6, gd, wback);
@@ -338,7 +338,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             } else {
                                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                                 if(!ALIGNED_ATOMICxw) {
-                                    if(arm64_uscat) {
+                                    if(cpuext.uscat) {
                                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                         B_MARK3(cGT);
@@ -348,7 +348,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                     }
                                 }
                                 // Aligned version
-                                if(arm64_atomics) {
+                                if(cpuext.atomics) {
                                     UFLAG_IF {
                                         MOVxw_REG(x1, xRAX);
                                         CASALxw(x1, gd, wback);
@@ -395,7 +395,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 // Common part (and fallback for EAX != Ed)
                                 UFLAG_IF {emit_cmp32(dyn, ninst, rex, xRAX, x1, x3, x4, x5); MOVxw_REG(xRAX, x1);}
                                 else {
-                                    if(!ALIGNED_ATOMICxw || !arm64_atomics)
+                                    if(!ALIGNED_ATOMICxw || !cpuext.atomics)
                                         MOVxw_REG(xRAX, x1);    // upper par of RAX will be erase on 32bits, no mater what
                                 }
                             }
@@ -617,7 +617,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 EBBACK; // eb gets x1 (sum)
                             } else {
                                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                                if(arm64_atomics) {
+                                if(cpuext.atomics) {
                                     UFLAG_IF {
                                         MOVxw_REG(x3, gd);
                                         LDADDALB(x3, gd, wback);
@@ -663,7 +663,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             } else {
                                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                                 if(!ALIGNED_ATOMICxw) {
-                                    if(arm64_uscat) {
+                                    if(cpuext.uscat) {
                                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                         B_MARK(cGT);
@@ -672,7 +672,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                         B_MARK(cNE);    // unaligned
                                     }
                                 }
-                                if(arm64_atomics) {
+                                if(cpuext.atomics) {
                                     UFLAG_IF {
                                         LDADDALxw(gd, x1, wback);
                                     } else {
@@ -690,7 +690,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                     UFLAG_IF {
                                         B_MARK2_nocond;
                                     } else {
-                                        if(!arm64_atomics) MOVxw_REG(gd, x1);
+                                        if(!cpuext.atomics) MOVxw_REG(gd, x1);
                                         B_NEXT_nocond;
                                     }
                                     MARK;
@@ -709,7 +709,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                     MOVxw_REG(x3, x1);
                                     emit_add32(dyn, ninst, rex, x3, gd, x4, x5);
                                     MOVxw_REG(gd, x1);
-                                } else if(!arm64_atomics || !ALIGNED_ATOMICxw) {
+                                } else if(!cpuext.atomics || !ALIGNED_ATOMICxw) {
                                     MOVxw_REG(gd, x1);
                                 }
                             }
@@ -732,7 +732,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         SETFLAGS(X_ZF, SF_SUBSET);
                         addr = geted(dyn, addr, ninst, nextop, &wback, x1, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 if(rex.w) {
                                     TSTx_mask(wback, 1, 0, 3);
                                     B_MARK2(cNE);
@@ -746,7 +746,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK2(cNE);    // unaligned
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOVx_REG(x2, xRAX);
                             MOVx_REG(x3, xRDX);
                             MOVx_REG(x4, xRBX);
@@ -886,7 +886,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 GETGB(x5);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     MVNxw_REG(x1, gd);
                     UFLAG_IF {
                         LDCLRALB(x1, x1, wback);
@@ -914,7 +914,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 emit_and32(dyn, ninst, rex, ed, gd, x3, x4);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     MVNxw_REG(x1, gd);
                     UFLAG_IF {
                         LDCLRALxw(x1, x1, wback);
@@ -944,7 +944,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -953,7 +953,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics && 0) {    // disabled because 0x80000000 has no negative
+                if(cpuext.atomics && 0) {    // disabled because 0x80000000 has no negative
                     NEGxw_REG(x1, gd);
                     UFLAG_IF {
                         LDADDALxw(x1, x1, wback);
@@ -1003,7 +1003,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -1012,7 +1012,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     UFLAG_IF {
                         LDEORALxw(gd, x1, wback);
                     } else {
@@ -1065,7 +1065,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, u8);
                             UFLAG_IF {
                                 LDADDALB(x2, x1, wback);
@@ -1097,7 +1097,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, u8);
                             UFLAG_IF {
                                 LDSETALB(x2, x1, wback);
@@ -1172,7 +1172,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, ~u8);
                             UFLAG_IF {
                                 LDCLRALB(x2, x1, wback);
@@ -1203,7 +1203,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, -u8);
                             UFLAG_IF {
                                 LDADDALB(x2, x1, wback);
@@ -1234,7 +1234,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, u8);
                             UFLAG_IF {
                                 LDEORALB(x2, x1, wback);
@@ -1291,7 +1291,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             MOV64xw(x5, i64);
                         }
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                 B_MARK(cGT);
@@ -1300,7 +1300,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK(cNE);
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             if((i64>-0x1000) && (i64<0x1000)) {
                                 MOV64xw(x5, i64);
                             }
@@ -1367,7 +1367,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             // this is __faststorefence
                             DMB_ST();
                         } else {
-                            if(arm64_atomics) {
+                            if(cpuext.atomics) {
                                 MOV64xw(x5, i64);
                                 UFLAG_IF {
                                     LDSETALxw(x5, x1, wback);
@@ -1439,7 +1439,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, (opcode==0x81)?4:1);
                         if(opcode==0x81) i64 = F32S; else i64 = F8S;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV64xw(x5, ~i64);
                             UFLAG_IF {
                                 LDCLRALxw(x5, x1, wback);
@@ -1472,7 +1472,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             MOV64xw(x5, i64);
                         }
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                 B_MARK(cGT);
@@ -1481,7 +1481,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK(cNE);
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             if((i64>-0x1000) && (i64<0x1000)) {
                                 MOV64xw(x5, -i64);
                             } else {
@@ -1553,7 +1553,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, (opcode==0x81)?4:1);
                         if(opcode==0x81) i64 = F32S; else i64 = F8S;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV64xw(x5, i64);
                             UFLAG_IF {
                                 LDEORALxw(x5, x1, wback);
@@ -1609,7 +1609,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 GETGB(x4);
                 addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     SWPALB(x4, x1, ed);
                 } else {
                     MARKLOCK;
@@ -1636,7 +1636,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 GETGD;
                 addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, ed, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -1645,7 +1645,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     SWPALxw(gd, gd, ed);
                     if(!ALIGNED_ATOMICxw) {
                         B_NEXT_nocond;
@@ -1671,7 +1671,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MARK2;
                 }
                 SMDMB();
-                if(!ALIGNED_ATOMICxw || !arm64_atomics) {
+                if(!ALIGNED_ATOMICxw || !cpuext.atomics) {
                     MOVxw_REG(gd, x1);
                 }
             }
@@ -1697,7 +1697,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         EBBACK;
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x1, 0xff);
                             STEORLB(x1, wback);
                         } else {
@@ -1725,7 +1725,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         EBBACK;
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV64x(x1, ~0LL);
                             STEORLxw(x1, wback);
                         } else {
@@ -1757,7 +1757,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         MARKLOCK;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x3, 1);
                             UFLAG_IF {
                                 LDADDALB(x3, x1, wback);
@@ -1783,7 +1783,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         EBBACK;
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x3, -1);
                             UFLAG_IF {
                                 LDADDALB(x3, x1, wback);
@@ -1818,7 +1818,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                 B_MARK(cGT);
@@ -1827,7 +1827,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK(cNE);    // unaligned
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x3, 1);
                             UFLAG_IF {
                                 LDADDALxw(x3, x1, wback);
@@ -1874,7 +1874,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                 B_MARK(cGT);
@@ -1883,7 +1883,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK(cNE);    // unaligned
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV64xw(x3, -1);
                             UFLAG_IF {
                                 LDADDALxw(x3, x1, wback);
diff --git a/src/dynarec/arm64/dynarec_arm64_f20f.c b/src/dynarec/arm64/dynarec_arm64_f20f.c
index 8428538d..6e13def9 100644
--- a/src/dynarec/arm64/dynarec_arm64_f20f.c
+++ b/src/dynarec/arm64/dynarec_arm64_f20f.c
@@ -118,12 +118,12 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             nextop = F8;

             GETGD;

             GETEXSD(q0, 0, 0);

-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {

                 MRS_fpsr(x5);

                 BFCw(x5, FPSR_IOC, 1);   // reset IOC bit

                 MSR_fpsr(x5);

             }

-            if(!BOX64ENV(dynarec_fastround) && arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && cpuext.frintts) {

                 v0 = fpu_get_scratch(dyn, ninst);

                 if(rex.w) {

                     FRINT64ZD(v0, q0);

@@ -134,7 +134,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             } else {

                 FCVTZSxwD(gd, q0);

             }

-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {

                 MRS_fpsr(x5);   // get back FPSR to check the IOC bit

                 TBZ_NEXT(x5, FPSR_IOC);

                 if(rex.w) {

@@ -149,14 +149,14 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             nextop = F8;

             GETGD;

             GETEXSD(q0, 0, 0);

-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {

                 MRS_fpsr(x5);

                 BFCw(x5, FPSR_IOC, 1);   // reset IOC bit

                 MSR_fpsr(x5);

             }

             u8 = sse_setround(dyn, ninst, x1, x2, x3);

             d1 = fpu_get_scratch(dyn, ninst);

-            if(!BOX64ENV(dynarec_fastround) && arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && cpuext.frintts) {

                 if(rex.w) {

                     FRINT64XD(d1, q0);

                 } else {

@@ -167,7 +167,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             }

             x87_restoreround(dyn, ninst, u8);

             FCVTZSxwD(gd, d1);

-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {

                 MRS_fpsr(x5);   // get back FPSR to check the IOC bit

                 TBZ_NEXT(x5, FPSR_IOC);

                 if(rex.w) {

@@ -187,7 +187,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                     nextop = F8;

                     GETEB(x1, 0);

                     GETGD;

-                    if(arm64_crc32) {

+                    if(cpuext.crc32) {

                         CRC32CB(gd, gd, ed);

                     } else {

                         EORw_REG(gd, gd, ed);

@@ -204,7 +204,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                     nextop = F8;

                     GETED(0);

                     GETGD;

-                    if(arm64_crc32) {

+                    if(cpuext.crc32) {

                         CRC32Cxw(gd, gd, ed);

                     } else {

                         MOV32w(x2, 0x82f63b78);

@@ -551,7 +551,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
                 SQXTN_32(v0, v0);   // convert int64 -> int32 with saturation in lower part, RaZ high part

             } else {

                 u8 = sse_setround(dyn, ninst, x1, x2, x3);

-                if(arm64_frintts) {

+                if(cpuext.frintts) {

                     VFRINT32XDQ(v0, v1);    // round, handling of overflow and Nan to 0x80000000

                     VFCVTNSQD(v0, v0);  // convert double -> int64

                     SQXTN_32(v0, v0);   // convert int64 -> int32 with saturation in lower part, RaZ high part

diff --git a/src/dynarec/arm64/dynarec_arm64_f30f.c b/src/dynarec/arm64/dynarec_arm64_f30f.c
index 2531a8d6..19d054e3 100644
--- a/src/dynarec/arm64/dynarec_arm64_f30f.c
+++ b/src/dynarec/arm64/dynarec_arm64_f30f.c
@@ -119,12 +119,12 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             nextop = F8;

             GETGD;

             GETEXSS(d0, 0, 0);

-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {

                 MRS_fpsr(x5);

                 BFCw(x5, FPSR_IOC, 1);   // reset IOC bit

                 MSR_fpsr(x5);

             }

-            if(!BOX64ENV(dynarec_fastround) && arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && cpuext.frintts) {

                 v0 = fpu_get_scratch(dyn, ninst);

                 if(rex.w) {

                     FRINT64ZS(v0, d0);

@@ -135,7 +135,7 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             } else {

                 FCVTZSxwS(gd, d0);

             }

-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {

                 MRS_fpsr(x5);   // get back FPSR to check the IOC bit

                 TBZ_NEXT(x5, FPSR_IOC);

                 if(rex.w) {

@@ -150,14 +150,14 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             nextop = F8;

             GETGD;

             GETEXSS(q0, 0, 0);

-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {

                 MRS_fpsr(x5);

                 BFCw(x5, FPSR_IOC, 1);   // reset IOC bit

                 MSR_fpsr(x5);

             }

             u8 = sse_setround(dyn, ninst, x1, x2, x3);

             d1 = fpu_get_scratch(dyn, ninst);

-            if(!BOX64ENV(dynarec_fastround) && arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && cpuext.frintts) {

                 if(rex.w) {

                     FRINT64XS(d1, q0);

                 } else {

@@ -168,7 +168,7 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             }

             x87_restoreround(dyn, ninst, u8);

             FCVTZSxwS(gd, d1);

-            if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) {

+            if(!BOX64ENV(dynarec_fastround) && !cpuext.frintts) {

                 MRS_fpsr(x5);   // get back FPSR to check the IOC bit

                 TBZ_NEXT(x5, FPSR_IOC);

                 if(rex.w) {

@@ -322,7 +322,7 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             if(BOX64ENV(dynarec_fastround)) {

                 VFCVTZSQS(v0, v1);

             } else {

-                if(arm64_frintts) {

+                if(cpuext.frintts) {

                     VFRINT32ZSQ(v0, v1);

                     VFCVTZSQS(v0, v0);

                 } else {

diff --git a/src/dynarec/arm64/dynarec_arm64_helper.c b/src/dynarec/arm64/dynarec_arm64_helper.c
index 0db1700b..656ee7ad 100644
--- a/src/dynarec/arm64/dynarec_arm64_helper.c
+++ b/src/dynarec/arm64/dynarec_arm64_helper.c
@@ -2593,7 +2593,7 @@ static void nativeFlagsTransform(dynarec_arm_t* dyn, int ninst, int s1, int s2)
     }
     // special case for NF_CF changing state
     if((flags_before&NF_CF) && (flags_after&NF_CF) && (nc_before!=nc_after)) {
-        if(arm64_flagm && !mrs) {
+        if(cpuext.flagm && !mrs) {
             CFINV();
         } else {
             GO_MRS(s2);
diff --git a/src/dynarec/arm64/dynarec_arm64_helper.h b/src/dynarec/arm64/dynarec_arm64_helper.h
index f0f26b8d..50c1279a 100644
--- a/src/dynarec/arm64/dynarec_arm64_helper.h
+++ b/src/dynarec/arm64/dynarec_arm64_helper.h
@@ -943,7 +943,7 @@
 #define GEN_INVERTED_CARRY()
 #endif
 #ifndef INVERT_CARRY
-#define INVERT_CARRY(A)     if(dyn->insts[ninst].normal_carry) {if(arm64_flagm) CFINV(); else {MRS_nzcv(A); EORx_mask(A, A, 1, 35, 0); MSR_nzcv(A);}}
+#define INVERT_CARRY(A)     if(dyn->insts[ninst].normal_carry) {if(cpuext.flagm) CFINV(); else {MRS_nzcv(A); EORx_mask(A, A, 1, 35, 0); MSR_nzcv(A);}}
 #endif
 
 // Generate FCOM with s1 and s2 scratch regs (the VCMP is already done)
@@ -1953,7 +1953,7 @@ uintptr_t dynarec64_AVX_F3_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip
 
 #define COMP_ZFSF(s1, A)                        \
     IFX(X_ZF|X_SF) {                            \
-        if(arm64_flagm) {                       \
+        if(cpuext.flagm) {                      \
             SETF##A(s1);                        \
             IFX(X_ZF) {                         \
                 IFNATIVE(NF_EQ) {} else {       \