diff options
Diffstat (limited to 'src/dynarec/rv64/dynarec_rv64_0f_vector.c')
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_0f_vector.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/src/dynarec/rv64/dynarec_rv64_0f_vector.c b/src/dynarec/rv64/dynarec_rv64_0f_vector.c index b1569238..6fa4f633 100644 --- a/src/dynarec/rv64/dynarec_rv64_0f_vector.c +++ b/src/dynarec/rv64/dynarec_rv64_0f_vector.c @@ -101,7 +101,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, GETEX_vector(v1, 0, 0, VECTOR_SEW64); q0 = fpu_get_scratch(dyn); VSLIDEDOWN_VI(q0, v1, 1, VECTOR_UNMASKED); - if (rv64_xtheadvector) { + if (cpuext.xtheadvector) { VECTOR_LOAD_VMASK(0b01, x4, 1); VMERGE_VVM(v0, v0, q0); // implies VMASK } else { @@ -127,7 +127,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, if (MODREG) { ed = (nextop & 7) + (rex.b << 3); d0 = sse_get_reg_vector(dyn, ninst, x1, ed, 1, VECTOR_SEW64); - if (rv64_xtheadvector) { + if (cpuext.xtheadvector) { VECTOR_LOAD_VMASK(0b01, x4, 1); VMERGE_VVM(v0, v0, v1); // implies VMASK } else { @@ -173,7 +173,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, v0 = fpu_get_scratch_lmul(dyn, VECTOR_LMUL2); v1 = fpu_get_scratch_lmul(dyn, VECTOR_LMUL2); d0 = fpu_get_scratch_lmul(dyn, VECTOR_LMUL2); // no more scratches! - if (rv64_vlen >= 256) { + if (cpuext.vlen >= 32) { VWADDU_VX(v0, q0, xZR, VECTOR_UNMASKED); VWADDU_VX(v1, q1, xZR, VECTOR_UNMASKED); vector_vsetvli(dyn, ninst, x1, VECTOR_SEW32, VECTOR_LMUL2, 2); @@ -224,7 +224,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, v1 = sse_get_reg_vector(dyn, ninst, x1, (nextop & 7) + (rex.b << 3), 0, VECTOR_SEW64); q0 = fpu_get_scratch(dyn); VSLIDE1DOWN_VX(q0, v0, xZR, VECTOR_UNMASKED); - if (rv64_xtheadvector) { + if (cpuext.xtheadvector) { VECTOR_LOAD_VMASK(0b01, x4, 1); VMERGE_VVM(v1, v1, q0); // implies VMASK } else { @@ -498,7 +498,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); GETGD; GETEX_vector(q0, 0, 0, VECTOR_SEW32); - if (rv64_xtheadvector) { + if (cpuext.xtheadvector) { v0 = fpu_get_scratch_lmul(dyn, VECTOR_LMUL8); VSRL_VI(v0, q0, 31, VECTOR_UNMASKED); // Force the element width to 4bit @@ -865,7 +865,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, } else { SET_ELEMENT_WIDTH(x3, VECTOR_SEW32, 1); } - if (!rv64_xtheadvector) { + if (!cpuext.xtheadvector) { VXOR_VV(v0, v0, v0, VECTOR_UNMASKED); } VMV_S_X(v0, ed); @@ -1144,7 +1144,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, q0 = fpu_get_scratch(dyn); d0 = fpu_get_scratch(dyn); d1 = fpu_get_scratch(dyn); - if (rv64_xtheadvector) { // lack of vrgatherei16.vv + if (cpuext.xtheadvector) { // lack of vrgatherei16.vv q1 = fpu_get_scratch(dyn); vector_vsetvli(dyn, ninst, x1, VECTOR_SEW64, VECTOR_LMUL1, 1); tmp64u0 = ((((uint64_t)u8 >> 2) & 3) << 32) | (u8 & 3); @@ -1229,7 +1229,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); GETEM_vector(q0, 0); SET_ELEMENT_WIDTH(x1, VECTOR_SEW8, 1); - if (rv64_xtheadvector) { + if (cpuext.xtheadvector) { v0 = fpu_get_scratch_lmul(dyn, VECTOR_LMUL8); VSRL_VI(v0, q0, 7, VECTOR_UNMASKED); // Force the element width to 1bit @@ -1240,7 +1240,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, VMSLT_VX(VMASK, q0, xZR, VECTOR_UNMASKED); } VMV_X_S(gd, VMASK); - if (!rv64_xtheadvector) { ANDI(gd, gd, 0xff); } + if (!cpuext.xtheadvector) { ANDI(gd, gd, 0xff); } break; case 0xD8: case 0xD9: |