about summary refs log tree commit diff stats
path: root/src/dynarec/arm64/dynarec_arm64_f0.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/dynarec/arm64/dynarec_arm64_f0.c')
-rw-r--r--src/dynarec/arm64/dynarec_arm64_f0.c92
1 files changed, 46 insertions, 46 deletions
diff --git a/src/dynarec/arm64/dynarec_arm64_f0.c b/src/dynarec/arm64/dynarec_arm64_f0.c
index 4a72e98e..69f9a5ef 100644
--- a/src/dynarec/arm64/dynarec_arm64_f0.c
+++ b/src/dynarec/arm64/dynarec_arm64_f0.c
@@ -72,7 +72,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 BFIx(wback, x1, wb2*8, 8);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     UFLAG_IF {
                         LDADDALB(x2, x1, wback);
                         emit_add8(dyn, ninst, x1, x2, x4, x5);
@@ -100,7 +100,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -109,7 +109,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     UFLAG_IF {
                         LDADDALxw(gd, x1, wback);
                     } else {
@@ -163,7 +163,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 BFIx(wback, x1, wb2*8, 8);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     LDSETALB(x2, x1, wback);
                     UFLAG_IF {
                         emit_or8(dyn, ninst, x1, x2, x4, x5);
@@ -188,7 +188,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 emit_or32(dyn, ninst, rex, ed, gd, x3, x4);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     LDSETALxw(gd, x1, wback);
                     UFLAG_IF {
                         emit_or32(dyn, ninst, rex, x1, gd, x3, x4);
@@ -288,7 +288,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 BFIx(xRAX, x2, 0, 8);
                             } else {
                                 addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                                if(arm64_atomics) {
+                                if(cpuext.atomics) {
                                     UFLAG_IF {
                                         MOVw_REG(x2, x6);
                                         CASALB(x6, gd, wback);
@@ -338,7 +338,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             } else {
                                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                                 if(!ALIGNED_ATOMICxw) {
-                                    if(arm64_uscat) {
+                                    if(cpuext.uscat) {
                                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                         B_MARK3(cGT);
@@ -348,7 +348,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                     }
                                 }
                                 // Aligned version
-                                if(arm64_atomics) {
+                                if(cpuext.atomics) {
                                     UFLAG_IF {
                                         MOVxw_REG(x1, xRAX);
                                         CASALxw(x1, gd, wback);
@@ -395,7 +395,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 // Common part (and fallback for EAX != Ed)
                                 UFLAG_IF {emit_cmp32(dyn, ninst, rex, xRAX, x1, x3, x4, x5); MOVxw_REG(xRAX, x1);}
                                 else {
-                                    if(!ALIGNED_ATOMICxw || !arm64_atomics)
+                                    if(!ALIGNED_ATOMICxw || !cpuext.atomics)
                                         MOVxw_REG(xRAX, x1);    // upper par of RAX will be erase on 32bits, no mater what
                                 }
                             }
@@ -617,7 +617,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 EBBACK; // eb gets x1 (sum)
                             } else {
                                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                                if(arm64_atomics) {
+                                if(cpuext.atomics) {
                                     UFLAG_IF {
                                         MOVxw_REG(x3, gd);
                                         LDADDALB(x3, gd, wback);
@@ -663,7 +663,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             } else {
                                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                                 if(!ALIGNED_ATOMICxw) {
-                                    if(arm64_uscat) {
+                                    if(cpuext.uscat) {
                                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                         B_MARK(cGT);
@@ -672,7 +672,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                         B_MARK(cNE);    // unaligned
                                     }
                                 }
-                                if(arm64_atomics) {
+                                if(cpuext.atomics) {
                                     UFLAG_IF {
                                         LDADDALxw(gd, x1, wback);
                                     } else {
@@ -690,7 +690,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                     UFLAG_IF {
                                         B_MARK2_nocond;
                                     } else {
-                                        if(!arm64_atomics) MOVxw_REG(gd, x1);
+                                        if(!cpuext.atomics) MOVxw_REG(gd, x1);
                                         B_NEXT_nocond;
                                     }
                                     MARK;
@@ -709,7 +709,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                     MOVxw_REG(x3, x1);
                                     emit_add32(dyn, ninst, rex, x3, gd, x4, x5);
                                     MOVxw_REG(gd, x1);
-                                } else if(!arm64_atomics || !ALIGNED_ATOMICxw) {
+                                } else if(!cpuext.atomics || !ALIGNED_ATOMICxw) {
                                     MOVxw_REG(gd, x1);
                                 }
                             }
@@ -732,7 +732,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         SETFLAGS(X_ZF, SF_SUBSET);
                         addr = geted(dyn, addr, ninst, nextop, &wback, x1, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 if(rex.w) {
                                     TSTx_mask(wback, 1, 0, 3);
                                     B_MARK2(cNE);
@@ -746,7 +746,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK2(cNE);    // unaligned
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOVx_REG(x2, xRAX);
                             MOVx_REG(x3, xRDX);
                             MOVx_REG(x4, xRBX);
@@ -886,7 +886,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 GETGB(x5);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     MVNxw_REG(x1, gd);
                     UFLAG_IF {
                         LDCLRALB(x1, x1, wback);
@@ -914,7 +914,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 emit_and32(dyn, ninst, rex, ed, gd, x3, x4);
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     MVNxw_REG(x1, gd);
                     UFLAG_IF {
                         LDCLRALxw(x1, x1, wback);
@@ -944,7 +944,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -953,7 +953,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics && 0) {    // disabled because 0x80000000 has no negative
+                if(cpuext.atomics && 0) {    // disabled because 0x80000000 has no negative
                     NEGxw_REG(x1, gd);
                     UFLAG_IF {
                         LDADDALxw(x1, x1, wback);
@@ -1003,7 +1003,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -1012,7 +1012,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     UFLAG_IF {
                         LDEORALxw(gd, x1, wback);
                     } else {
@@ -1065,7 +1065,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, u8);
                             UFLAG_IF {
                                 LDADDALB(x2, x1, wback);
@@ -1097,7 +1097,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, u8);
                             UFLAG_IF {
                                 LDSETALB(x2, x1, wback);
@@ -1172,7 +1172,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, ~u8);
                             UFLAG_IF {
                                 LDCLRALB(x2, x1, wback);
@@ -1203,7 +1203,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, -u8);
                             UFLAG_IF {
                                 LDADDALB(x2, x1, wback);
@@ -1234,7 +1234,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         addr = geted(dyn, addr, ninst, nextop, &wback, x5, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 1);
                         u8 = F8;
                         wb1 = 1;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x2, u8);
                             UFLAG_IF {
                                 LDEORALB(x2, x1, wback);
@@ -1291,7 +1291,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             MOV64xw(x5, i64);
                         }
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                 B_MARK(cGT);
@@ -1300,7 +1300,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK(cNE);
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             if((i64>-0x1000) && (i64<0x1000)) {
                                 MOV64xw(x5, i64);
                             }
@@ -1367,7 +1367,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             // this is __faststorefence
                             DMB_ST();
                         } else {
-                            if(arm64_atomics) {
+                            if(cpuext.atomics) {
                                 MOV64xw(x5, i64);
                                 UFLAG_IF {
                                     LDSETALxw(x5, x1, wback);
@@ -1439,7 +1439,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, (opcode==0x81)?4:1);
                         if(opcode==0x81) i64 = F32S; else i64 = F8S;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV64xw(x5, ~i64);
                             UFLAG_IF {
                                 LDCLRALxw(x5, x1, wback);
@@ -1472,7 +1472,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                             MOV64xw(x5, i64);
                         }
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                 B_MARK(cGT);
@@ -1481,7 +1481,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK(cNE);
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             if((i64>-0x1000) && (i64<0x1000)) {
                                 MOV64xw(x5, -i64);
                             } else {
@@ -1553,7 +1553,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, (opcode==0x81)?4:1);
                         if(opcode==0x81) i64 = F32S; else i64 = F8S;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV64xw(x5, i64);
                             UFLAG_IF {
                                 LDEORALxw(x5, x1, wback);
@@ -1609,7 +1609,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             } else {
                 GETGB(x4);
                 addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     SWPALB(x4, x1, ed);
                 } else {
                     MARKLOCK;
@@ -1636,7 +1636,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 GETGD;
                 addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                 if(!ALIGNED_ATOMICxw) {
-                    if(arm64_uscat) {
+                    if(cpuext.uscat) {
                         ANDx_mask(x1, ed, 1, 0, 3);  // mask = F
                         CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                         B_MARK(cGT);
@@ -1645,7 +1645,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         B_MARK(cNE);
                     }
                 }
-                if(arm64_atomics) {
+                if(cpuext.atomics) {
                     SWPALxw(gd, gd, ed);
                     if(!ALIGNED_ATOMICxw) {
                         B_NEXT_nocond;
@@ -1671,7 +1671,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     MARK2;
                 }
                 SMDMB();
-                if(!ALIGNED_ATOMICxw || !arm64_atomics) {
+                if(!ALIGNED_ATOMICxw || !cpuext.atomics) {
                     MOVxw_REG(gd, x1);
                 }
             }
@@ -1697,7 +1697,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         EBBACK;
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x1, 0xff);
                             STEORLB(x1, wback);
                         } else {
@@ -1725,7 +1725,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         EBBACK;
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV64x(x1, ~0LL);
                             STEORLxw(x1, wback);
                         } else {
@@ -1757,7 +1757,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         MARKLOCK;
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x3, 1);
                             UFLAG_IF {
                                 LDADDALB(x3, x1, wback);
@@ -1783,7 +1783,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         EBBACK;
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x3, -1);
                             UFLAG_IF {
                                 LDADDALB(x3, x1, wback);
@@ -1818,7 +1818,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                 B_MARK(cGT);
@@ -1827,7 +1827,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK(cNE);    // unaligned
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV32w(x3, 1);
                             UFLAG_IF {
                                 LDADDALxw(x3, x1, wback);
@@ -1874,7 +1874,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     } else {
                         addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, LOCK_LOCK, 0, 0);
                         if(!ALIGNED_ATOMICxw) {
-                            if(arm64_uscat) {
+                            if(cpuext.uscat) {
                                 ANDx_mask(x1, wback, 1, 0, 3);  // mask = F
                                 CMPSw_U12(x1, 16-(1<<(2+rex.w)));
                                 B_MARK(cGT);
@@ -1883,7 +1883,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                                 B_MARK(cNE);    // unaligned
                             }
                         }
-                        if(arm64_atomics) {
+                        if(cpuext.atomics) {
                             MOV64xw(x3, -1);
                             UFLAG_IF {
                                 LDADDALxw(x3, x1, wback);