diff options
| author | Yang Liu <numbksco@gmail.com> | 2023-05-10 16:06:38 +0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-05-10 10:06:38 +0200 |
| commit | 1ca4f6acf5aee62242447e34b8a243a23144f4c2 (patch) | |
| tree | fbf2e2406626c85c3cb5e55919d220784d61c9f5 /src | |
| parent | 392463b9f0a81586c71c76a5c537e4a0bbf8bebb (diff) | |
| download | box64-1ca4f6acf5aee62242447e34b8a243a23144f4c2.tar.gz box64-1ca4f6acf5aee62242447e34b8a243a23144f4c2.zip | |
[RV64_DYNAREC] Added more opcodes and a small fix (#774)
* Added 67 0F 2E,2F (U)COMISS opcodes * Added 66 0F 3A 09 ROUNDPD opcode * Added 66 D1,D3 SHL opcodes * Use ZEXTH if Zbb is available
Diffstat (limited to 'src')
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_00_3.c | 6 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_0f.c | 3 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_66.c | 66 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_660f.c | 43 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_6664.c | 3 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_66f0.c | 3 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_67.c | 48 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_emit_logic.c | 4 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_emit_math.c | 9 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_emit_tests.c | 2 | ||||
| -rw-r--r-- | src/dynarec/rv64/dynarec_rv64_helper.h | 5 | ||||
| -rw-r--r-- | src/dynarec/rv64/rv64_emitter.h | 2 |
12 files changed, 135 insertions, 59 deletions
diff --git a/src/dynarec/rv64/dynarec_rv64_00_3.c b/src/dynarec/rv64/dynarec_rv64_00_3.c index 4a548677..00de07fe 100644 --- a/src/dynarec/rv64/dynarec_rv64_00_3.c +++ b/src/dynarec/rv64/dynarec_rv64_00_3.c @@ -742,8 +742,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int UFLAG_RES(x1); LUI(x2, 0xffff0); AND(xRAX, xRAX, x2); - SLLI(x1, x1, 48); - SRLI(x1, x1, 48); + ZEXTH(x1, x1); OR(xRAX, xRAX, x1); break; case 5: @@ -757,8 +756,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int UFLAG_RES(x1); LUI(x2, 0xffff0); AND(xRAX, xRAX, x2); - SLLI(x1, x1, 48); - SRLI(x1, x1, 48); + ZEXTH(x1, x1); OR(xRAX, xRAX, x1); break; case 6: diff --git a/src/dynarec/rv64/dynarec_rv64_0f.c b/src/dynarec/rv64/dynarec_rv64_0f.c index 777d0ed2..1eab5d24 100644 --- a/src/dynarec/rv64/dynarec_rv64_0f.c +++ b/src/dynarec/rv64/dynarec_rv64_0f.c @@ -911,8 +911,7 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni GETGD; if(MODREG) { ed = xRAX+(nextop&7)+(rex.b<<3); - SLLI(gd, ed, 48); - SRLI(gd, gd, 48); + ZEXTH(gd, ed); } else { SMREAD(); addr = geted(dyn, addr, ninst, nextop, &ed, x2, x1, &fixedaddress, rex, NULL, 1, 0); diff --git a/src/dynarec/rv64/dynarec_rv64_66.c b/src/dynarec/rv64/dynarec_rv64_66.c index 4208d21a..afa95515 100644 --- a/src/dynarec/rv64/dynarec_rv64_66.c +++ b/src/dynarec/rv64/dynarec_rv64_66.c @@ -83,8 +83,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("ADD AX, Iw"); SETFLAGS(X_ALL, SF_SET_PENDING); i32 = F16; - SLLI(x1 , xRAX, 48); - SRLI(x1, x1, 48); + ZEXTH(x1 , xRAX); MOV32w(x2, i32); emit_add16(dyn, ninst, x1, x2, x3, x4, x6); LUI(x3, 0xffff0); @@ -113,8 +112,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("OR AX, Iw"); SETFLAGS(X_ALL, SF_SET_PENDING); i32 = F16; - SLLI(x1, xRAX, 48); - SRLI(x1, x1, 48); + ZEXTH(x1, xRAX); MOV32w(x2, i32); emit_or16(dyn, ninst, x1, x2, x3, x4); LUI(x3, 0xffff0); @@ -156,8 +154,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("AND AX, Iw"); SETFLAGS(X_ALL, SF_SET_PENDING); i32 = F16; - SLLI(x1, xRAX, 48); - SRLI(x1, x1, 48); + ZEXTH(x1, xRAX); MOV32w(x2, i32); emit_and16(dyn, ninst, x1, x2, x3, x4); LUI(x3, 0xffff0); @@ -186,8 +183,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("SUB AX, Iw"); SETFLAGS(X_ALL, SF_SET_PENDING); i32 = F16; - SLLI(x1, xRAX, 48); - SRLI(x1, x1, 48); + ZEXTH(x1, xRAX); MOV32w(x2, i32); emit_sub16(dyn, ninst, x1, x2, x3, x4, x5); LUI(x2, 0xffff0); @@ -216,8 +212,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("XOR AX, Iw"); SETFLAGS(X_ALL, SF_SET_PENDING); i32 = F16; - SLLI(x1, xRAX, 48); - SRLI(x1, x1, 48); + ZEXTH(x1, xRAX); MOV32w(x2, i32); emit_xor16(dyn, ninst, x1, x2, x3, x4, x5); LUI(x5, 0xffff0); @@ -244,8 +239,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("CMP AX, Iw"); SETFLAGS(X_ALL, SF_SET_PENDING); i32 = F16; - SLLI(x1, xRAX, 48); - SRLI(x1, x1, 48); + ZEXTH(x1, xRAX); if(i32) { MOV32w(x2, i32); emit_cmp16(dyn, ninst, x1, x2, x3, x4, x5, x6); @@ -276,8 +270,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni if(opcode==0x69) i32 = F16S; else i32 = F8S; MOV32w(x2, i32); MULW(x2, x2, x1); - SLLI(x2, x2, 48); - SRLI(x2, x2, 48); + ZEXTH(x2, x2); UFLAG_RES(x2); gd=x2; GWBACK; @@ -403,8 +396,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni // we don't use GETGW above, so we need let gd & 0xffff. LUI(x1, 0xffff0); AND(ed, ed, x1); - SLLI(x2, gd, 48); - SRLI(x2, x2, 48); + ZEXTH(x2, gd); OR(ed, ed, x2); } } else { @@ -422,8 +414,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni if(ed!=gd) { LUI(x1, 0xffff0); AND(gd, gd, x1); - SLLI(x2, ed, 48); - SRLI(x2, x2, 48); + ZEXTH(x2, ed); OR(gd, gd, x2); } } else { @@ -452,13 +443,11 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni // x2 <- rax MV(x2, xRAX); // rax[15:0] <- gd[15:0] - SLLI(x3, gd, 48); - SRLI(x3, x3, 48); + ZEXTH(x3, gd); AND(xRAX, xRAX, x4); OR(xRAX, xRAX, x3); // gd[15:0] <- x2[15:0] - SLLI(x2, x2, 48); - SRLI(x2, x2, 48); + ZEXTH(x2, x2); AND(gd, gd, x4); OR(gd, gd, x2); } @@ -652,8 +641,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni UFLAG_OP12(ed, x2) SRAI(ed, ed, u8&0x1f); if(MODREG) { - SLLI(ed, ed, 48); - SRLI(ed, ed, 48); + ZEXTH(ed, ed); } EWBACK; UFLAG_RES(ed); @@ -702,6 +690,25 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni UFLAG_RES(ed); UFLAG_DF(x3, d_shr16); break; + case 4: + case 6: + if(opcode==0xD1) { + INST_NAME("SHL Ew, 1"); + MOV32w(x4, 1); + } else { + INST_NAME("SHL Ew, CL"); + ANDI(x4, xRCX, 0x1f); + } + UFLAG_IF {MESSAGE(LOG_DUMP, "Need Optimization for flags\n");} + SETFLAGS(X_ALL, SF_PENDING); + GETEW(x1, 0); + UFLAG_OP12(ed, x4) + SLL(ed, ed, x4); + ZEXTH(ed, ed); + EWBACK; + UFLAG_RES(ed); + UFLAG_DF(x3, d_shl16); + break; case 7: if(opcode==0xD1) { INST_NAME("SAR Ew, 1"); @@ -713,10 +720,9 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni UFLAG_IF {MESSAGE(LOG_DUMP, "Need Optimization for flags\n");} SETFLAGS(X_ALL, SF_PENDING); GETSEW(x1, 0); - UFLAG_OP12(ed, x4) + UFLAG_OP12(ed, x4); SRA(ed, ed, x4); - SLLI(ed, ed, 48); - SRLI(ed, ed, 48); + ZEXTH(ed, ed); EWBACK; UFLAG_RES(ed); UFLAG_DF(x3, d_sar16); @@ -758,9 +764,8 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("DIV Ew"); SETFLAGS(X_ALL, SF_SET); GETEW(x1, 0); - SLLI(x2, xRAX, 48); + ZEXTH(x2, xRAX); SLLI(x3, xRDX, 48); - SRLI(x2, x2, 48); SRLI(x3, x3, 32); OR(x2, x2, x3); DIVUW(x3, x2, ed); @@ -779,9 +784,8 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni NOTEST(x1); SETFLAGS(X_ALL, SF_SET); GETSEW(x1, 0); - SLLI(x2, xRAX, 48); + ZEXTH(x2, xRAX); SLLI(x3, xRDX, 48); - SRLI(x2, x2, 48); SRLI(x3, x3, 32); OR(x2, x2, x3); DIVW(x3, x2, ed); diff --git a/src/dynarec/rv64/dynarec_rv64_660f.c b/src/dynarec/rv64/dynarec_rv64_660f.c index 0408aeaa..bd9a4544 100644 --- a/src/dynarec/rv64/dynarec_rv64_660f.c +++ b/src/dynarec/rv64/dynarec_rv64_660f.c @@ -128,8 +128,7 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETGD; \ if(MODREG) { \ ed = xRAX+(nextop&7)+(rex.b<<3); \ - SLLI(x4, ed, 48); \ - SRLI(x4, x4, 48); \ + ZEXTH(x4, ed); \ } else { \ SMREAD(); \ addr = geted(dyn, addr, ninst, nextop, &ed, x2, x4, &fixedaddress, rex, NULL, 1, 0); \ @@ -391,7 +390,7 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int case 0x0B: INST_NAME("ROUNDSD Gx, Ex, Ib"); nextop = F8; - GETEXSD(d0, 0); + GETEXSD(d0, 1); GETGXSD_empty(v0); d1 = fpu_get_scratch(dyn); v1 = fpu_get_scratch(dyn); @@ -419,6 +418,41 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int FCVTDL(v0, x5, RD_RTZ); } break; + case 0x09: + INST_NAME("ROUNDPD Gx, Ex, Ib"); + nextop = F8; + GETGX(x1); + GETEX(x2, 1); + u8 = F8; + d0 = fpu_get_scratch(dyn); + d1 = fpu_get_scratch(dyn); + v1 = fpu_get_scratch(dyn); + MOV64x(x3, 1ULL << __DBL_MANT_DIG__); + FCVTDL(d1, x3, RD_RTZ); + for (int i=0; i<2; ++i) { + FLD(d0, wback, fixedaddress+8*i); + FEQD(x4, d0, d0); + BNEZ_MARK(x4); + B_MARK3_nocond; + MARK; // d0 is not nan + FABSD(v1, d0); + FLTD(x4, v1, d1); + BNEZ_MARK2(x4); + B_MARK3_nocond; + MARK2; + if(u8&4) { + u8 = sse_setround(dyn, ninst, x4, x5); + FCVTLD(x5, d0, RD_DYN); + FCVTDL(d0, x5, RD_RTZ); + x87_restoreround(dyn, ninst, u8); + } else { + FCVTLD(x5, d0, round_round[u8&3]); + FCVTDL(d0, x5, RD_RTZ); + } + MARK3; + FSD(d0, gback, 8*i); + } + break; case 0x0E: INST_NAME("PBLENDW Gx, Ex, Ib"); nextop = F8; @@ -1416,8 +1450,7 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETSGW(x2); MULW(x2, x2, x1); UFLAG_RES(x2); - SLLI(x2, x2, 48); - SRLI(x2, x2, 48); + ZEXTH(x2, x2); GWBACK; break; diff --git a/src/dynarec/rv64/dynarec_rv64_6664.c b/src/dynarec/rv64/dynarec_rv64_6664.c index 569d1c9c..973b6bd1 100644 --- a/src/dynarec/rv64/dynarec_rv64_6664.c +++ b/src/dynarec/rv64/dynarec_rv64_6664.c @@ -57,8 +57,7 @@ uintptr_t dynarec64_6664(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int if(ed!=gd) { LUI(x1, 0xffff0); AND(gd, gd, x1); - SLLI(x1, ed, 48); - SRLI(x1, x1, 48); + ZEXTH(x1, ed); OR(gd, gd, x1); } } diff --git a/src/dynarec/rv64/dynarec_rv64_66f0.c b/src/dynarec/rv64/dynarec_rv64_66f0.c index c0785742..4862b89b 100644 --- a/src/dynarec/rv64/dynarec_rv64_66f0.c +++ b/src/dynarec/rv64/dynarec_rv64_66f0.c @@ -69,8 +69,7 @@ uintptr_t dynarec64_66F0(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int if(opcode==0x81) i32 = F16S; else i32 = F8S; ed = xRAX+(nextop&7)+(rex.b<<3); MOV32w(x5, i32); - SLLI(x6, ed, 48); - SRLI(x6, x6, 48); + ZEXTH(x6, ed); emit_add16(dyn, ninst, x6, x5, x3, x4, x2); SRLI(ed, ed, 16); SLLI(ed, ed, 16); diff --git a/src/dynarec/rv64/dynarec_rv64_67.c b/src/dynarec/rv64/dynarec_rv64_67.c index b1dbcfc8..14544463 100644 --- a/src/dynarec/rv64/dynarec_rv64_67.c +++ b/src/dynarec/rv64/dynarec_rv64_67.c @@ -132,7 +132,53 @@ uintptr_t dynarec64_67(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni case 0x0F: opcode=F8; switch(opcode) { - + case 0x2E: + // no special check... + case 0x2F: + switch (rep) { + case 0: + if(opcode==0x2F) {INST_NAME("COMISS Gx, Ex");} else {INST_NAME("UCOMISS Gx, Ex");} + SETFLAGS(X_ALL, SF_SET); + nextop = F8; + GETGXSS(s0); + if(MODREG) { + v0 = sse_get_reg(dyn, ninst, x1, (nextop&7) + (rex.b<<3), 1); + } else { + v0 = fpu_get_scratch(dyn); + SMREAD(); + addr = geted32(dyn, addr, ninst, nextop, &ed, x1, x2, &fixedaddress, rex, NULL, 1, 0); + FLW(v0, ed, fixedaddress); + } + CLEAR_FLAGS(); + // if isnan(s0) || isnan(v0) + IFX(X_ZF | X_PF | X_CF) { + FEQS(x3, s0, s0); + FEQS(x2, v0, v0); + AND(x2, x2, x3); + BNE_MARK(x2, xZR); + ORI(xFlags, xFlags, (1<<F_ZF) | (1<<F_PF) | (1<<F_CF)); + B_NEXT_nocond; + } + MARK; + // else if isless(d0, v0) + IFX(X_CF) { + FLTS(x2, s0, v0); + BEQ_MARK2(x2, xZR); + ORI(xFlags, xFlags, 1<<F_CF); + B_NEXT_nocond; + } + MARK2; + // else if d0 == v0 + IFX(X_ZF) { + FEQS(x2, s0, v0); + CBZ_NEXT(x2); + ORI(xFlags, xFlags, 1<<F_ZF); + } + break; + default: + DEFAULT; + } + break; default: DEFAULT; } diff --git a/src/dynarec/rv64/dynarec_rv64_emit_logic.c b/src/dynarec/rv64/dynarec_rv64_emit_logic.c index b7187211..809620bf 100644 --- a/src/dynarec/rv64/dynarec_rv64_emit_logic.c +++ b/src/dynarec/rv64/dynarec_rv64_emit_logic.c @@ -165,7 +165,7 @@ void emit_xor16(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3, int s4, } XOR(s1, s1, s2); - if(rv64_zbb) ZEXTH(s1, s1); else {SLLI(s1, s1, 48); SRLI(s1, s1, 48);} + ZEXTH(s1, s1); IFX(X_PEND) { SH(s1, xEmu, offsetof(x64emu_t, res)); @@ -196,7 +196,7 @@ void emit_or16(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3, int s4) { } OR(s1, s1, s2); - if(rv64_zbb) ZEXTH(s1, s1); else {SLLI(s1, s1, 48); SRLI(s1, s1, 48);} + ZEXTH(s1, s1); IFX(X_PEND) { SD(s1, xEmu, offsetof(x64emu_t, res)); } diff --git a/src/dynarec/rv64/dynarec_rv64_emit_math.c b/src/dynarec/rv64/dynarec_rv64_emit_math.c index 0381050e..6d7f3b25 100644 --- a/src/dynarec/rv64/dynarec_rv64_emit_math.c +++ b/src/dynarec/rv64/dynarec_rv64_emit_math.c @@ -247,8 +247,7 @@ void emit_add16(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3, int s4, ORI(xFlags, xFlags, 1 << F_CF); } - SLLI(s1, s1, 48); - SRLI(s1, s1, 48); + ZEXTH(s1, s1); IFX(X_ZF) { BNEZ(s1, 8); @@ -846,7 +845,7 @@ void emit_inc16(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3, int s4) } } - if(rv64_zbb) ZEXTH(s1, s1); else {SLLI(s1, s1, 48); SRLI(s1, s1, 48);} + ZEXTH(s1, s1); IFX(X_ZF) { BNEZ(s1, 8); @@ -1120,7 +1119,7 @@ void emit_neg16(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3) } NEG(s1, s1); - if(rv64_zbb) ZEXTH(s1, s1); else {SLLI(s1, s1, 48); SRLI(s1, s1, 48);} + ZEXTH(s1, s1); IFX(X_PEND) { SH(s1, xEmu, offsetof(x64emu_t, res)); } @@ -1268,7 +1267,7 @@ void emit_adc16(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3, int s4, ORI(xFlags, xFlags, 1 << F_CF); } - if(rv64_zbb) ZEXTH(s1, s1); else {SLLI(s1, s1, 48); SRLI(s1, s1, 48);} + ZEXTH(s1, s1); IFX(X_ZF) { BNEZ(s1, 8); diff --git a/src/dynarec/rv64/dynarec_rv64_emit_tests.c b/src/dynarec/rv64/dynarec_rv64_emit_tests.c index 3be67793..76cc66d5 100644 --- a/src/dynarec/rv64/dynarec_rv64_emit_tests.c +++ b/src/dynarec/rv64/dynarec_rv64_emit_tests.c @@ -108,7 +108,7 @@ void emit_cmp16(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3, int s4, // It's a cmp, we can't store the result back to s1. SUB(s6, s1, s2); IFX(X_ALL) { - if(rv64_zbb) ZEXTH(s6, s6); else {SLLI(s6, s6, 48); SRLI(s6, s6, 48);} + ZEXTH(s6, s6); } IFX_PENDOR0 { SH(s6, xEmu, offsetof(x64emu_t, res)); diff --git a/src/dynarec/rv64/dynarec_rv64_helper.h b/src/dynarec/rv64/dynarec_rv64_helper.h index ba2f426c..fd9db950 100644 --- a/src/dynarec/rv64/dynarec_rv64_helper.h +++ b/src/dynarec/rv64/dynarec_rv64_helper.h @@ -131,12 +131,11 @@ LDxw(ed, wback, fixedaddress); \ } // GETGW extract x64 register in gd, that is i -#define GETGW(i) gd = xRAX+((nextop&0x38)>>3)+(rex.r<<3); SLLI(i, gd, 48); SRLI(i, i, 48); gd = i; +#define GETGW(i) gd = xRAX+((nextop&0x38)>>3)+(rex.r<<3); ZEXTH(i, gd); gd = i; //GETEWW will use i for ed, and can use w for wback. #define GETEWW(w, i, D) if(MODREG) { \ wback = xRAX+(nextop&7)+(rex.b<<3);\ - SLLI(i, wback, 48); \ - SRLI(i, i, 48); \ + ZEXTH(i, wback); \ ed = i; \ wb1 = 0; \ } else { \ diff --git a/src/dynarec/rv64/rv64_emitter.h b/src/dynarec/rv64/rv64_emitter.h index 7ff2d4db..bf834507 100644 --- a/src/dynarec/rv64/rv64_emitter.h +++ b/src/dynarec/rv64/rv64_emitter.h @@ -571,7 +571,7 @@ f28–31 ft8–11 FP temporaries Caller // Sign-extend half-word #define SEXTH(rd, rs) EMIT(R_type(0b0110000, 0b00101, rs, 0b001, rd, 0b0010011)) // Zero-extend half-word -#define ZEXTH(rd, rs) EMIT(R_type(0b0000100, 0b00000, rs, 0b100, rd, 0b0111011)) +#define ZEXTH(rd, rs) if(rv64_zbb) EMIT(R_type(0b0000100, 0b00000, rs, 0b100, rd, 0b0111011)); else {SLLI(rd, rs, 48); SRLI(rd, rd, 48);} // Rotate left (register) #define ROL(rd, rs1, rs2) EMIT(R_type(0b0110000, rs2, rs1, 0b001, rd, 0b0110011)) // Rotate left word (register) |