about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/dynarec/arm64/dynarec_arm64_0f.c20
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_0f.c28
2 files changed, 14 insertions, 34 deletions
diff --git a/src/dynarec/arm64/dynarec_arm64_0f.c b/src/dynarec/arm64/dynarec_arm64_0f.c
index 83f7b3b4..ccff6c02 100644
--- a/src/dynarec/arm64/dynarec_arm64_0f.c
+++ b/src/dynarec/arm64/dynarec_arm64_0f.c
@@ -1233,13 +1233,9 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             GETEX(v1, 0, 0);

             // FMIN/FMAX wll not copy the value if v0[x] is NaN

             // but x86 will copy if either v0[x] or v1[x] is NaN, so lets force a copy if source is NaN

-            if(BOX64ENV(dynarec_fastnan)) {

-                VFMINQS(v0, v0, v1);

-            } else {

-                q0 = fpu_get_scratch(dyn, ninst);

-                VFCMGTQS(q0, v1, v0);   // 0 is NaN or v1 GT v0, so invert mask for copy

-                VBIFQ(v0, v1, q0);

-            }

+            q0 = fpu_get_scratch(dyn, ninst);

+            VFCMGTQS(q0, v1, v0);   // 0 is NaN or v1 GT v0, so invert mask for copy

+            VBIFQ(v0, v1, q0);

             break;

         case 0x5E:

             INST_NAME("DIVPS Gx, Ex");

@@ -1268,13 +1264,9 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
             GETEX(v1, 0, 0);

             // FMIN/FMAX wll not copy the value if v0[x] is NaN

             // but x86 will copy if either v0[x] or v1[x] is NaN, or if values are equals, so lets force a copy if source is NaN

-            if(BOX64ENV(dynarec_fastnan)) {

-                VFMAXQS(v0, v0, v1);

-            } else {

-                q0 = fpu_get_scratch(dyn, ninst);

-                VFCMGTQS(q0, v0, v1);   // 0 is NaN or v0 GT v1, so invert mask for copy

-                VBIFQ(v0, v1, q0);

-            }

+            q0 = fpu_get_scratch(dyn, ninst);

+            VFCMGTQS(q0, v0, v1);   // 0 is NaN or v0 GT v1, so invert mask for copy

+            VBIFQ(v0, v1, q0);

             break;

         case 0x60:

             INST_NAME("PUNPCKLBW Gm,Em");

diff --git a/src/dynarec/arm64/dynarec_arm64_avx_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_0f.c
index dc3d07ae..668ece1d 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_0f.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_0f.c
@@ -550,20 +550,14 @@ uintptr_t dynarec64_AVX_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int
         case 0x5D:
             INST_NAME("VMINPS Gx, Vx, Ex");
             nextop = F8;
-            if(!BOX64ENV(dynarec_fastnan)) {
-                q0 = fpu_get_scratch(dyn, ninst);
-            }
+            q0 = fpu_get_scratch(dyn, ninst);
             for(int l=0; l<1+vex.l; ++l) {
                 if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); }
                 // FMIN/FMAX wll not copy a NaN if either is NaN
                 // but x86 will copy src2 if either value is NaN, so lets force a copy of Src2 (Ex) if result is NaN
-                if(BOX64ENV(dynarec_fastnan)) {
-                    VFMINQS(v0, v2, v1);
-                } else {
-                    VFCMGTQS(q0, v1, v2);   // 0 if NaN or v1 GT v2, so invert mask for copy
-                    if(v0!=v1) VBIFQ(v0, v1, q0);
-                    if(v0!=v2) VBITQ(v0, v2, q0);
-                }
+                VFCMGTQS(q0, v1, v2);   // 0 if NaN or v1 GT v2, so invert mask for copy
+                if(v0!=v1) VBIFQ(v0, v1, q0);
+                if(v0!=v2) VBITQ(v0, v2, q0);
             }
             if(!vex.l) YMM0(gd);
             break;
@@ -594,20 +588,14 @@ uintptr_t dynarec64_AVX_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int
         case 0x5F:
             INST_NAME("VMAXPS Gx, Vx, Ex");
             nextop = F8;
-            if(!BOX64ENV(dynarec_fastnan)) {
-                q0 = fpu_get_scratch(dyn, ninst);
-            }
+            q0 = fpu_get_scratch(dyn, ninst);
             for(int l=0; l<1+vex.l; ++l) {
                 if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); }
                 // FMIN/FMAX wll not copy a NaN if either is NaN
                 // but x86 will copy src2 if either value is NaN, so lets force a copy of Src2 (Ex) if result is NaN
-                if(BOX64ENV(dynarec_fastnan)) {
-                    VFMAXQS(v0, v2, v1);
-                } else {
-                    VFCMGTQS(q0, v2, v1);   // 0 if NaN or v2 GT v1, so invert mask for copy
-                    if(v0!=v1) VBIFQ(v0, v1, q0);
-                    if(v0!=v2) VBITQ(v0, v2, q0);
-                }
+                VFCMGTQS(q0, v2, v1);   // 0 if NaN or v2 GT v1, so invert mask for copy
+                if(v0!=v1) VBIFQ(v0, v1, q0);
+                if(v0!=v2) VBITQ(v0, v2, q0);
             }
             if(!vex.l) YMM0(gd);
             break;