diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2021-03-17 17:01:00 +0100 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2021-03-17 17:01:00 +0100 |
| commit | 6a14d26c1db433706a4c9ebb8728b4fc5954c76d (patch) | |
| tree | 6a2e525daa9d01cba83fe45a70825972e79dddf7 /src | |
| parent | 477617240a968da98a4d517fc38c11c385a46a36 (diff) | |
| download | box64-6a14d26c1db433706a4c9ebb8728b4fc5954c76d.tar.gz box64-6a14d26c1db433706a4c9ebb8728b4fc5954c76d.zip | |
[DYNAREC] Added 01/03/05/83 /0 ADD opcodes, plus various bugfixes (but vvvvvv and IntoTheBreach are still broken, Thimberweed and WorldOfGoo seems ok)
Diffstat (limited to 'src')
| -rwxr-xr-x | src/dynarec/arm64_emitter.h | 27 | ||||
| -rwxr-xr-x | src/dynarec/arm64_printer.c | 62 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_00.c | 52 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_emit_math.c | 287 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_emit_tests.c | 8 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_helper.c | 16 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_helper.h | 8 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_pass.c | 2 | ||||
| -rwxr-xr-x | src/librarian/librarian.c | 3 |
9 files changed, 271 insertions, 194 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index f91732e2..738035f5 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -111,7 +111,7 @@ #define MOV64x(Rd, imm64) \ if(~((uint64_t)(imm64))<0xffff) { \ MOVZx(Rd, (~(uint64_t)(imm64))&0xffff); \ - MVNx(Rd, Rd); \ + MVNx_REG(Rd, Rd); \ } else { \ MOVZx(Rd, ((uint64_t)(imm64))&0xffff); \ if(((uint64_t)(imm64))&0xffff0000) {MOVKx_LSL(Rd, (((uint64_t)(imm64))>>16)&0xffff, 16);} \ @@ -130,6 +130,7 @@ #define ADDw_REG(Rd, Rn, Rm) EMIT(ADDSUB_REG_gen(0, 0, 0, 0b00, Rm, 0, Rn, Rd)) #define ADDSw_REG(Rd, Rn, Rm) EMIT(ADDSUB_REG_gen(0, 0, 1, 0b00, Rm, 0, Rn, Rd)) #define ADDxw_REG(Rd, Rn, Rm) EMIT(ADDSUB_REG_gen(rex.w, 0, 0, 0b00, Rm, 0, Rn, Rd)) +#define ADDSxw_REG(Rd, Rn, Rm) EMIT(ADDSUB_REG_gen(rex.w, 0, 1, 0b00, Rm, 0, Rn, Rd)) #define ADDSUB_IMM_gen(sf, op, S, shift, imm12, Rn, Rd) ((sf)<<31 | (op)<<30 | (S)<<29 | 0b10001<<24 | (shift)<<22 | (imm12)<<10 | (Rn)<<5 | (Rd)) #define ADDx_U12(Rd, Rn, imm12) EMIT(ADDSUB_IMM_gen(1, 0, 0, 0b00, (imm12)&0xfff, Rn, Rd)) @@ -251,8 +252,8 @@ #define ANDSw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(0, 0b11, 0b00, 0, Rm, 0, Rn, Rd)) #define ANDSxw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(rex.w, 0b11, 0b00, 0, Rm, 0, Rn, Rd)) #define ORRx_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(1, 0b01, 0b00, 0, Rm, 0, Rn, Rd)) -#define ORRx_REG_LSL(Rd, Rn, Rm, lsl) EMIT(LOGIC_REG_gen(1, 0b01, lsl, 0, Rm, 0, Rn, Rd)) -#define ORRw_REG_LSL(Rd, Rn, Rm, lsl) EMIT(LOGIC_REG_gen(0, 0b01, lsl, 0, Rm, 0, Rn, Rd)) +#define ORRx_REG_LSL(Rd, Rn, Rm, lsl) EMIT(LOGIC_REG_gen(1, 0b01, 0b00, 0, Rm, lsl, Rn, Rd)) +#define ORRw_REG_LSL(Rd, Rn, Rm, lsl) EMIT(LOGIC_REG_gen(0, 0b01, 0b00, 0, Rm, lsl, Rn, Rd)) #define ORRxw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(rex.w, 0b01, 0b00, 0, Rm, 0, Rn, Rd)) #define ORRw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(0, 0b01, 0b00, 0, Rm, 0, Rn, Rd)) #define ORNx_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(1, 0b01, 0b00, 1, Rm, 0, Rn, Rd)) @@ -261,16 +262,16 @@ #define EORx_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(1, 0b10, 0b00, 0, Rm, 0, Rn, Rd)) #define EORw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(0, 0b10, 0b00, 0, Rm, 0, Rn, Rd)) #define EORxw_REG(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(rex.w, 0b10, 0b00, 0, Rm, 0, Rn, Rd)) -#define EORx_REG_LSL(Rd, Rn, Rm, imm6) EMIT(LOGIC_REG_gen(1, 0b10, 0b00, 0, Rm, imm6, Rn, Rd)) -#define EORw_REG_LSL(Rd, Rn, Rm, imm6) EMIT(LOGIC_REG_gen(0, 0b10, 0b00, 0, Rm, imm6, Rn, Rd)) -#define EORxw_REG_LSL(Rd, Rn, Rm, imm6) EMIT(LOGIC_REG_gen(rex.w, 0b10, 0b00, 0, Rm, imm6, Rn, Rd)) -#define MOVx(Rd, Rm) ORRx_REG(Rd, xZR, Rm) -#define MOVw(Rd, Rm) ORRw_REG(Rd, xZR, Rm) -#define MOVxw(Rd, Rm) ORRxw_REG(Rd, xZR, Rm) -#define MVNx(Rd, Rm) ORNx_REG(Rd, xZR, Rm) -#define MVNx_LSL(Rd, Rm, lsl) ORNx_REG_LSL(Rd, xZR, Rm, lsl) -#define MVNw(Rd, Rm) ORNw_REG(Rd, xZR, Rm) -#define MVNxw(Rd, Rm) ORNxw_REG(Rd, xZR, Rm) +#define EORx_REG_LSL(Rd, Rn, Rm, lsl) EMIT(LOGIC_REG_gen(1, 0b10, 0b00, 0, Rm, lsl, Rn, Rd)) +#define EORw_REG_LSL(Rd, Rn, Rm, lsl) EMIT(LOGIC_REG_gen(0, 0b10, 0b00, 0, Rm, lsl, Rn, Rd)) +#define EORxw_REG_LSL(Rd, Rn, Rm, lsl) EMIT(LOGIC_REG_gen(rex.w, 0b10, 0b00, 0, Rm, lsl, Rn, Rd)) +#define MOVx_REG(Rd, Rm) ORRx_REG(Rd, xZR, Rm) +#define MOVw_REG(Rd, Rm) ORRw_REG(Rd, xZR, Rm) +#define MOVxw_REG(Rd, Rm) ORRxw_REG(Rd, xZR, Rm) +#define MVNx_REG(Rd, Rm) ORNx_REG(Rd, xZR, Rm) +#define MVNx_REG_LSL(Rd, Rm, lsl) ORNx_REG_LSL(Rd, xZR, Rm, lsl) +#define MVNw_REG(Rd, Rm) ORNw_REG(Rd, xZR, Rm) +#define MVNxw_REG(Rd, Rm) ORNxw_REG(Rd, xZR, Rm) #define MOV_frmSP(Rd) ADDx_U12(Rd, xSP, 0) #define MOV_toSP(Rm) ADDx_U12(xSP, Rm, 0) #define BICx(Rd, Rn, Rm) EMIT(LOGIC_REG_gen(1, 0b00, 0b00, 1, Rm, 0, Rn, Rd)) diff --git a/src/dynarec/arm64_printer.c b/src/dynarec/arm64_printer.c index cb9a1957..3137d33a 100755 --- a/src/dynarec/arm64_printer.c +++ b/src/dynarec/arm64_printer.c @@ -175,11 +175,7 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) snprintf(buff, sizeof(buff), "STR %s, [%s, %s, %s %d]", (size==2)?Wt[Rt]:Xt[Rt], XtSp[Rn], ((option&1)==0)?Wt[Rm]:Xt[Rm], extend[option], amount); return buff; } - // --- MOV - if(isMask(opcode, "f0101010000mmmmm00000011111ddddd", &a)) { - snprintf(buff, sizeof(buff), "MOV %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rm]:Wt[Rm]); - return buff; - } + // --- MOV (REGS: see Logic MOV==ORR, MVN==ORN) if(isMask(opcode, "f10100101wwiiiiiiiiiiiiiiiiddddd", &a)) { if(!hw) snprintf(buff, sizeof(buff), "MOVZ %s, 0x%x", sf?Xt[Rd]:Wt[Rd], imm); @@ -211,7 +207,7 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) if(shift==0) snprintf(buff, sizeof(buff), "CMP %s, 0x%x", sf?XtSp[Rn]:WtSp[Rn], imm); else - snprintf(buff, sizeof(buff), "ADD %s, 0x%x", sf?XtSp[Rn]:WtSp[Rn], imm<<12); + snprintf(buff, sizeof(buff), "CMP %s, 0x%x", sf?XtSp[Rn]:WtSp[Rn], imm<<12); return buff; } if(isMask(opcode, "f1101011hh0mmmmmiiiiiinnnnn11111", &a)) { @@ -223,7 +219,9 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) return buff; } if(isMask(opcode, "f0010001hhiiiiiiiiiiiinnnnnddddd", &a)) { - if(shift==0) + if((Rd==31 || Rn==31) && shift==0 && imm==0) + snprintf(buff, sizeof(buff), "MOV %s, %s, 0x%x", sf?XtSp[Rd]:WtSp[Rd], sf?XtSp[Rn]:WtSp[Rn], imm); + else if(shift==0) snprintf(buff, sizeof(buff), "ADD %s, %s, 0x%x", sf?XtSp[Rd]:WtSp[Rd], sf?XtSp[Rn]:WtSp[Rn], imm); else if (shift==1) snprintf(buff, sizeof(buff), "ADD %s, %s, 0x%x", sf?XtSp[Rd]:WtSp[Rd], sf?XtSp[Rn]:WtSp[Rn], imm<<12); @@ -248,6 +246,14 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) snprintf(buff, sizeof(buff), "ADD %s, %s, %s %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); return buff; } + if(isMask(opcode, "f0101011hh0mmmmmiiiiiinnnnnddddd", &a)) { + const char* shifts[] = { "LSL", "LSR", "ASR", "???"}; + if(shift==0 && imm==0) + snprintf(buff, sizeof(buff), "ADDS %s, %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm]); + else + snprintf(buff, sizeof(buff), "ADDS %s, %s, %s %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + return buff; + } if(isMask(opcode, "f1010001hhiiiiiiiiiiiinnnnnddddd", &a)) { if(shift==0) snprintf(buff, sizeof(buff), "SUB %s, %s, 0x%x", sf?XtSp[Rd]:WtSp[Rd], sf?XtSp[Rn]:WtSp[Rn], imm); @@ -274,6 +280,14 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) snprintf(buff, sizeof(buff), "SUB %s, %s, %s %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); return buff; } + if(isMask(opcode, "f1101011hh0mmmmmiiiiiinnnnnddddd", &a)) { + const char* shifts[] = { "LSL", "LSR", "ASR", "???"}; + if(shift==0 && imm==0) + snprintf(buff, sizeof(buff), "SUBS %s, %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm]); + else + snprintf(buff, sizeof(buff), "SUBS %s, %s, %s %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + return buff; + } // ---- LOGIC if(isMask(opcode, "f11100100Nrrrrrrssssssnnnnnddddd", &a)) { uint64_t i = DecodeBitMasks(a.N, imms, immr); @@ -318,6 +332,40 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) snprintf(buff, sizeof(buff), "ORR %s, %s, 0x%lx", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], i); return buff; } + if(isMask(opcode, "f0101010hh1mmmmmiiiiiinnnnnddddd", &a)) { + const char* shifts[] = { "LSL", "LSR", "ASR", "ROR" }; + if(Rn==31) { + if(shift==0 && imm==0) + snprintf(buff, sizeof(buff), "MVN %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rm]:Wt[Rm]); + else + snprintf(buff, sizeof(buff), "MVN %s, %s, %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + } else if(shift==0 && imm==0) + snprintf(buff, sizeof(buff), "ORN %s, %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm]); + else + snprintf(buff, sizeof(buff), "ORN %s, %s, %s, %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + return buff; + } + if(isMask(opcode, "f0101010hh0mmmmmiiiiiinnnnnddddd", &a)) { + const char* shifts[] = { "LSL", "LSR", "ASR", "ROR" }; + if(Rn==31) { + if(shift==0 && imm==0) + snprintf(buff, sizeof(buff), "MOV %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rm]:Wt[Rm]); + else + snprintf(buff, sizeof(buff), "MOV %s, %s, %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + } else if(shift==0 && imm==0) + snprintf(buff, sizeof(buff), "ORR %s, %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm]); + else + snprintf(buff, sizeof(buff), "ORR %s, %s, %s, %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + return buff; + } + if(isMask(opcode, "f1001010hh0mmmmmiiiiiinnnnnddddd", &a)) { + const char* shifts[] = { "LSL", "LSR", "ASR", "ROR" }; + if(shift==0 && imm==0) + snprintf(buff, sizeof(buff), "EOR %s, %s, %s", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm]); + else + snprintf(buff, sizeof(buff), "EOR %s, %s, %s, %s %d", sf?Xt[Rd]:Wt[Rd], sf?Xt[Rn]:Wt[Rn], sf?Xt[Rm]:Wt[Rm], shifts[shift], imm); + return buff; + } // ---- SHIFT if(isMask(opcode, "f10100110Nrrrrrrssssssnnnnnddddd", &a)) { diff --git a/src/dynarec/dynarec_arm64_00.c b/src/dynarec/dynarec_arm64_00.c index 1bd5aca5..58207531 100755 --- a/src/dynarec/dynarec_arm64_00.c +++ b/src/dynarec/dynarec_arm64_00.c @@ -57,13 +57,39 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin switch(opcode) { + case 0x01: + INST_NAME("ADD Ed, Gd"); + SETFLAGS(X_ALL, SF_SET); + nextop = F8; + GETGD; + GETED(0); + emit_add32(dyn, ninst, rex, ed, gd, x3, x4); + WBACK; + break; + + case 0x03: + INST_NAME("ADD Gd, Ed"); + SETFLAGS(X_ALL, SF_SET); + nextop = F8; + GETGD; + GETED(0); + emit_add32(dyn, ninst, rex, gd, ed, x3, x4); + break; + + case 0x05: + INST_NAME("ADD EAX, Id"); + SETFLAGS(X_ALL, SF_SET); + i32 = F32S; + emit_add32c(dyn, ninst, rex, xRAX, i32, x3, x4, x5); + break; + case 0x29: INST_NAME("SUB Ed, Gd"); SETFLAGS(X_ALL, SF_SET); nextop = F8; GETGD; GETED(0); - emit_sub32(dyn, ninst, rex, ed, gd, x3, x4, x5); + emit_sub32(dyn, ninst, rex, ed, gd, x3, x4); WBACK; break; @@ -73,7 +99,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin nextop = F8; GETGD; GETED(0); - emit_sub32(dyn, ninst, rex, gd, ed, x3, x4, x5); + emit_sub32(dyn, ninst, rex, gd, ed, x3, x4); break; case 0x2D: @@ -94,7 +120,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("PUSH reg"); gd = xRAX+(opcode&0x07)+(rex.b<<3); if(gd==xRSP) { - MOVx(x1, gd); + MOVx_REG(x1, gd); gd = x1; } PUSH1(gd); @@ -111,7 +137,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin gd = xRAX+(opcode&0x07)+(rex.b<<3); if(gd == xRSP) { POP1(x1); - MOVx(gd, x1); + MOVx_REG(gd, x1); } else { POP1(gd); } @@ -230,6 +256,14 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0x83: nextop = F8; switch((nextop>>3)&7) { + case 0: //ADD + if(opcode==0x81) {INST_NAME("ADD Ed, Id");} else {INST_NAME("ADD Ed, Ib");} + SETFLAGS(X_ALL, SF_SET); + GETED((opcode==0x81)?4:1); + if(opcode==0x81) i32 = F32S; else i32 = F8S; + emit_add32c(dyn, ninst, rex, ed, i32, x3, x4, x5); + WBACK; + break; case 5: //SUB if(opcode==0x81) {INST_NAME("SUB Ed, Id");} else {INST_NAME("SUB Ed, Ib");} @@ -251,7 +285,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin nextop=F8; GETGD; GETED(0); - emit_test32(dyn, ninst, rex, ed, gd, x3, x5, x6); + emit_test32(dyn, ninst, rex, ed, gd, x3, x5); break; case 0x89: @@ -259,7 +293,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin nextop=F8; GETGD; if(MODREG) { // reg <= reg - MOVxw(xRAX+(nextop&7)+(rex.b<<3), gd); + MOVxw_REG(xRAX+(nextop&7)+(rex.b<<3), gd); } else { // mem <= reg addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, 0xfff<<(2+rex.w), (1<<(2+rex.w))-1, rex, 0, 0); STRxw_U12(gd, ed, fixedaddress); @@ -271,7 +305,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin nextop=F8; GETGD; if(MODREG) { // reg <= reg - MOVxw(gd, xRAX+(nextop&7)+(rex.b<<3)); + MOVxw_REG(gd, xRAX+(nextop&7)+(rex.b<<3)); } else { // mem <= reg addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, 0xfff<<(2+rex.w), (1<<(2+rex.w))-1, rex, 0, 0); LDRxw_U12(gd, ed, fixedaddress); @@ -287,7 +321,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } else { // mem <= reg addr = geted(dyn, addr, ninst, nextop, &ed, gd, &fixedaddress, 0, 0, rex, 0, 0); if(gd!=ed) { // it's sometimes used as a 3 bytes NOP - MOVxw(gd, ed); + MOVxw_REG(gd, ed); } } break; @@ -402,8 +436,6 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } GETIP(addr); PUSH1(xRIP); - /*TABLE64(x3, addr); - PUSH1(x3);*/ jump_to_next(dyn, 0, ed, ninst); break; case 4: // JMP Ed diff --git a/src/dynarec/dynarec_arm64_emit_math.c b/src/dynarec/dynarec_arm64_emit_math.c index d712f58a..0a84c676 100755 --- a/src/dynarec/dynarec_arm64_emit_math.c +++ b/src/dynarec/dynarec_arm64_emit_math.c @@ -24,139 +24,135 @@ #include "dynarec_arm64_helper.h" // emit ADD32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch -//void emit_add32(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) -//{ -// IFX(X_PEND) { -// STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1)); -// STR_IMM9(s2, xEmu, offsetof(x64emu_t, op2)); -// SET_DF(s3, d_add32); -// } else IFX(X_ALL) { -// SET_DFNONE(s3); -// } -// IFX(X_AF) { -// ORR_REG_LSL_IMM5(s3, s1, s2, 0); // s3 = op1 | op2 -// AND_REG_LSL_IMM5(s4, s1, s2, 0); // s4 = op1 & op2 -// } -// IFX(X_ALL) { -// ADDS_REG_LSL_IMM5(s1, s1, s2, 0); -// } else { -// ADD_REG_LSL_IMM5(s1, s1, s2, 0); -// } -// IFX(X_PEND) { -// STR_IMM9(s1, xEmu, offsetof(x64emu_t, res)); -// } -// IFX(X_AF) { -// BIC_REG_LSL_IMM5(s3, s3, s1, 0); // s3 = (op1 | op2) & ~ res -// ORR_REG_LSL_IMM5(s3, s3, s4, 0); // s4 = (op1 & op2) | ((op1 | op2) & ~ res) -// MOV_REG_LSR_IMM5(s4, s3, 3); -// BFI(xFlags, s4, F_AF, 1); // AF: bc & 0x08 -// } -// IFX(X_ZF|X_CF) { -// BIC_IMM8(xFlags, xFlags, (1<<F_ZF)|(1<<F_CF), 0); -// } -// IFX(X_ZF) { -// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); -// } -// IFX(X_CF) { -// ORR_IMM8_COND(cCS, xFlags, xFlags, 1<<F_CF, 0); -// } -// IFX(X_OF) { -// ORR_IMM8_COND(cVS, xFlags, xFlags, 0b10, 0x0b); -// BIC_IMM8_COND(cVC, xFlags, xFlags, 0b10, 0x0b); -// } -// IFX(X_SF) { -// MOV_REG_LSR_IMM5(s3, s1, 31); -// BFI(xFlags, s3, F_SF, 1); -// } -// IFX(X_PF) { -// emit_pf(dyn, ninst, s1, s3, s4); -// } -//} +void emit_add32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4) +{ + IFX(X_PEND) { + STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1)); + STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2)); + SET_DF(s3, rex.w?d_add64:d_add32b); + } else IFX(X_ALL) { + SET_DFNONE(s3); + } + IFX(X_AF) { + ORRxw_REG(s3, s1, s2); // s3 = op1 | op2 + ANDxw_REG(s4, s1, s2); // s4 = op1 & op2 + } + IFX(X_ALL) { + ADDSxw_REG(s1, s1, s2); + } else { + ADDxw_REG(s1, s1, s2); + } + IFX(X_PEND) { + STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); + } + IFX(X_AF) { + BICxw_REG(s3, s3, s1); // s3 = (op1 | op2) & ~ res + ORRxw_REG(s3, s3, s4); // s3 = (op1 & op2) | ((op1 | op2) & ~ res) + LSRxw(s4, s3, 3); + BFIxw(xFlags, s4, F_AF, 1); // AF: bc & 0x08 + } + IFX(X_ZF|X_CF|X_OF) { + MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); + BICx(xFlags, xFlags, s3); + } + IFX(X_ZF) { + Bcond(cNE, +8); + ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + } + IFX(X_CF) { + Bcond(cCC, +8); + ORRw_mask(xFlags, xFlags, 0, 0); // mask=0x01 + } + IFX(X_OF) { + Bcond(cVC, +8); + ORRw_mask(xFlags, xFlags, 0b010101, 0); // mask=0x800 + } + IFX(X_SF) { + LSRxw(s3, s1, (rex.w)?63:31); + BFIx(xFlags, s3, F_SF, 1); + } + IFX(X_PF) { + emit_pf(dyn, ninst, s1, s3, s4); + } +} // emit ADD32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch -//void emit_add32c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4) -//{ -// if(s1==xESP && (!dyn->insts || dyn->insts[ninst].x64.need_flags==X_PEND)) -// { -// // special case when doing math on ESP and only PEND is needed: ignoring it! -// if(c>=0 && c<256) { -// ADD_IMM8(s1, s1, c); -// } else { -// MOV32(s3, c); -// ADD_REG_LSL_IMM5(s1, s1, s3, 0); -// } -// return; -// } -// IFX(X_PEND) { -// MOV32(s3, c); -// STR_IMM9(s1, xEmu, offsetof(x64emu_t, op1)); -// STR_IMM9(s3, xEmu, offsetof(x64emu_t, op2)); -// SET_DF(s4, d_add32); -// } else IFX(X_ALL) { -// SET_DFNONE(s4); -// } -// IFX(X_AF) { -// if(c>=0 && c<256) { -// ORR_IMM8(s3, s1, c, 0); // s3 = op1 | op2 -// AND_IMM8(s4, s1, c); // s4 = op1 & op2 -// } else { -// IFX(X_PEND) {} else {MOV32(s3, c);} -// MOV_REG(s4, s3); -// ORR_REG_LSL_IMM5(s3, s1, s3, 0); -// AND_REG_LSL_IMM5(s4, s1, s4, 0); -// PUSH(xSP, 1<<s3); -// } -// } -// if(c>=0 && c<256) { -// IFX(X_ALL) { -// ADDS_IMM8(s1, s1, c); -// } else { -// ADD_IMM8(s1, s1, c); -// } -// } else { -// IFXN(X_PEND, X_AF) {} else {MOV32(s3, c);} -// IFX(X_ALL) { -// ADDS_REG_LSL_IMM5(s1, s1, s3, 0); -// } else { -// ADD_REG_LSL_IMM5(s1, s1, s3, 0); -// } -// } -// IFX(X_PEND) { -// STR_IMM9(s1, xEmu, offsetof(x64emu_t, res)); -// } -// IFX(X_AF) { -// if(c<0 || c>=256) { -// POP(xSP, 1<<s3); -// } -// BIC_REG_LSL_IMM5(s3, s3, s1, 0); // s3 = (op1 | op2) & ~ res -// ORR_REG_LSL_IMM5(s3, s3, s4, 0); // s4 = (op1 & op2) | ((op1 | op2) & ~ res) -// MOV_REG_LSR_IMM5(s4, s3, 3); -// BFI(xFlags, s4, F_AF, 1); // AF: bc & 0x08 -// } -// IFX(X_ZF|X_CF) { -// BIC_IMM8(xFlags, xFlags, (1<<F_ZF)|(1<<F_CF), 0); -// } -// IFX(X_ZF) { -// ORR_IMM8_COND(cEQ, xFlags, xFlags, 1<<F_ZF, 0); -// } -// IFX(X_CF) { -// ORR_IMM8_COND(cCS, xFlags, xFlags, 1<<F_CF, 0); -// } -// IFX(X_OF) { -// ORR_IMM8_COND(cVS, xFlags, xFlags, 0b10, 0x0b); -// BIC_IMM8_COND(cVC, xFlags, xFlags, 0b10, 0x0b); -// } -// IFX(X_SF) { -// MOV_REG_LSR_IMM5(s3, s1, 31); -// BFI(xFlags, s3, F_SF, 1); -// } -// IFX(X_PF) { -// emit_pf(dyn, ninst, s1, s3, s4); -// } -//} +void emit_add32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5) +{ + if(s1==xRSP && (!dyn->insts || dyn->insts[ninst].x64.need_flags==X_PEND)) + { + // special case when doing math on ESP and only PEND is needed: ignoring it! + if(c>=0 && c<0x1000) { + ADDx_U12(s1, s1, c); + } else { + MOV64x(s3, c); + ADDx_REG(s1, s1, s3); + } + return; + } + IFX(X_PEND) { + MOV64xw(s5, c); + STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1)); + STRxw_U12(s5, xEmu, offsetof(x64emu_t, op2)); + SET_DF(s4, rex.w?d_add64:d_add32b); + } else IFX(X_ALL) { + SET_DFNONE(s4); + } + IFX(X_AF) { + IFX(X_PEND) {} else {MOV64xw(s5, c);} + ORRxw_REG(s3, s1, s5); // s3 = op1 | op2 + ANDxw_REG(s4, s1, s5); // s4 = op1 & op2 + } + if(c>=0 && c<0x1000) { + IFX(X_ALL) { + ADDSxw_U12(s1, s1, c); + } else { + ADDxw_U12(s1, s1, c); + } + } else { + IFX(X_PEND|X_AF) {} else {MOV64xw(s5, c);} + IFX(X_ALL) { + ADDSxw_REG(s1, s1, s5); + } else { + ADDxw_REG(s1, s1, s5); + } + } + IFX(X_PEND) { + STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); + } + IFX(X_AF) { + BICxw_REG(s3, s3, s1); // s3 = (op1 | op2) & ~ res + ORRxw_REG(s3, s3, s4); // s3 = (op1 & op2) | ((op1 | op2) & ~ res) + LSRxw(s4, s3, 3); + BFIxw(xFlags, s4, F_AF, 1); // AF: bc & 0x08 + } + IFX(X_ZF|X_CF|X_OF) { + MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); + BICx(xFlags, xFlags, s3); + } + IFX(X_ZF) { + Bcond(cNE, +8); + ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + } + IFX(X_CF) { + Bcond(cCC, +8); + ORRw_mask(xFlags, xFlags, 0, 0); // mask=0x01 + } + IFX(X_OF) { + Bcond(cVC, +8); + ORRw_mask(xFlags, xFlags, 0b010101, 0); // mask=0x800 + } + IFX(X_SF) { + LSRxw(s3, s1, (rex.w)?63:31); + BFIx(xFlags, s3, F_SF, 1); + } + IFX(X_PF) { + emit_pf(dyn, ninst, s1, s3, s4); + } +} // emit SUB32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch -void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4, int s5) +void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4) { IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1)); @@ -166,7 +162,7 @@ void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 SET_DFNONE(s3); } IFX(X_AF) { - MVNxw(s3, s1); + MVNxw_REG(s3, s1); ORRxw_REG(s3, s3, s2); // s3 = ~op1 | op2 BICxw(s4, s2, s1); // s4 = ~op1 & op2 } @@ -180,13 +176,13 @@ void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 } IFX(X_AF) { ANDxw_REG(s3, s3, s1); // s3 = (~op1 | op2) & res - ORRxw_REG(s3, s3, s4); // s4 = (~op1 & op2) | ((~op1 | op2) & res) + ORRxw_REG(s3, s3, s4); // s3 = (~op1 & op2) | ((~op1 | op2) & res) LSRxw(s4, s3, 3); BFIx(xFlags, s4, F_AF, 1); // AF: bc & 0x08 } IFX(X_ZF|X_CF|X_OF) { - MOVw(s5, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); - BICx(xFlags, xFlags, s5); + MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); + BICx(xFlags, xFlags, s3); } IFX(X_ZF) { Bcond(cNE, +8); @@ -226,16 +222,15 @@ void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in } IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1)); - MOV64xw(s3, c); - STRxw_U12(s3, xEmu, offsetof(x64emu_t, op2)); + MOV64xw(s5, c); + STRxw_U12(s5, xEmu, offsetof(x64emu_t, op2)); SET_DF(s4, rex.w?d_sub64:d_sub32); } else IFX(X_ALL) { SET_DFNONE(s4); } IFX(X_AF) { - MOV64xw(s5, c); - MVNxw(s4, s1); - ORRxw_REG(s3, s4, s5); // s3 = ~op1 | op2 + IFX(X_PEND) {} else {MOV64xw(s5, c);} + ORNxw_REG(s3, s5, s1); // s3 = ~op1 | op2 BICxw_REG(s4, s5, s1); // s4 = ~op1 & op2 } if(c>=0 && c<0x1000) { @@ -245,7 +240,7 @@ void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in SUBxw_U12(s1, s1, c); } } else { - IFXN(X_PEND, X_AF) {} else {MOV64x(s5, c);} + IFX(X_PEND|X_AF) {} else {MOV64xw(s5, c);} IFX(X_ALL) { SUBSxw_REG(s1, s1, s5); } else { @@ -257,13 +252,13 @@ void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in } IFX(X_AF) { ANDxw_REG(s3, s3, s1); // s3 = (~op1 | op2) & res - ORRxw_REG(s3, s3, s4); // s4 = (~op1 & op2) | ((~op1 | op2) & ~ res) + ORRxw_REG(s3, s3, s4); // s3 = (~op1 & op2) | ((~op1 | op2) & res) LSRxw(s4, s3, 3); BFIw(xFlags, s4, F_AF, 1); // AF: bc & 0x08 } IFX(X_ZF|X_CF|X_OF) { - MOV32w(s5, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); - BICx(xFlags, xFlags, s5); + MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); + BICx(xFlags, xFlags, s3); } IFX(X_ZF) { Bcond(cNE, +8); @@ -761,12 +756,12 @@ void emit_inc32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s3, int s4 } IFX(X_AF) { BICxw_REG(s3, s3, s1); // s3 = (op1 | op2) & ~ res - ORRxw_REG(s3, s3, s4); // s4 = (op1 & op2) | ((op1 | op2) & ~ res) + ORRxw_REG(s3, s3, s4); // s3 = (op1 & op2) | ((op1 | op2) & ~ res) LSRxw(s4, s3, 3); BFIxw(xFlags, s4, F_AF, 1); // AF: bc & 0x08 } IFX(X_ZF|X_OF) { - MOVw(s3, (1<<F_ZF)|(1<<F_OF)); + MOV32w(s3, (1<<F_ZF)|(1<<F_OF)); BICx(xFlags, xFlags, s3); } IFX(X_ZF) { @@ -888,7 +883,7 @@ void emit_dec32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s3, int s4 SET_DFNONE(s4); } IFX(X_AF) { - MVNxw(s3, s1); + MVNxw_REG(s3, s1); if(rex.w) { ANDx_mask(s4, s3, 1, 0, 0); // s4 = ~op1 & op2 ORRx_mask(s3, s3, 1, 0, 0); // s3 = ~op1 | op2 @@ -912,7 +907,7 @@ void emit_dec32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s3, int s4 BFIxw(xFlags, s4, F_AF, 1); // AF: bc & 0x08 } IFX(X_ZF|X_OF) { - MOVw(s3, (1<<F_ZF)|(1<<F_OF)); + MOV32w(s3, (1<<F_ZF)|(1<<F_OF)); BICx(xFlags, xFlags, s3); } IFX(X_ZF) { diff --git a/src/dynarec/dynarec_arm64_emit_tests.c b/src/dynarec/dynarec_arm64_emit_tests.c index f8352a32..acc7455e 100755 --- a/src/dynarec/dynarec_arm64_emit_tests.c +++ b/src/dynarec/dynarec_arm64_emit_tests.c @@ -297,7 +297,7 @@ //} // emit TEST32 instruction, from test s1 , s2, using s3 and s4 as scratch -void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4, int s5) +void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4) { IFX(X_PEND) { SET_DF(s3, rex.w?d_tst64:d_tst32); @@ -305,8 +305,8 @@ void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s SET_DFNONE(s4); } IFX(X_ZF|X_CF|X_OF) { - MOV32w(s5, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); - BICw(xFlags, xFlags, s5); + MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_OF)); + BICw(xFlags, xFlags, s3); } ANDSxw_REG(s3, s1, s2); // res = s1 & s2 IFX(X_PEND) { @@ -328,7 +328,7 @@ void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s LDRw_REG_LSL2(s4, s4, s3); ANDw_mask(s3, s1, 0, 0b000100); // 0x1f LSRw_REG(s4, s4, s3); - MVNx(s4, s4); + MVNx_REG(s4, s4); BFIw(xFlags, s4, F_PF, 1); } } diff --git a/src/dynarec/dynarec_arm64_helper.c b/src/dynarec/dynarec_arm64_helper.c index 64ae31f7..a48bd313 100755 --- a/src/dynarec/dynarec_arm64_helper.c +++ b/src/dynarec/dynarec_arm64_helper.c @@ -222,7 +222,7 @@ void jump_to_epilog(dynarec_arm_t* dyn, uintptr_t ip, int reg, int ninst) MESSAGE(LOG_DUMP, "Jump to epilog\n"); if(reg) { if(reg!=xRIP) { - MOVx(xRIP, reg); + MOVx_REG(xRIP, reg); } } else { GETIP_(ip); @@ -237,7 +237,7 @@ void jump_to_next(dynarec_arm_t* dyn, uintptr_t ip, int reg, int ninst) if(reg) { if(reg!=xRIP) { - MOVx(xRIP, reg); + MOVx_REG(xRIP, reg); } uintptr_t tbl = getJumpTable64(); TABLE64(x2, tbl); @@ -256,7 +256,7 @@ void jump_to_next(dynarec_arm_t* dyn, uintptr_t ip, int reg, int ninst) LDRx_U12(x3, x2, 0); } if(reg!=x1) { - MOVx(x1, xRIP); + MOVx_REG(x1, xRIP); } #ifdef HAVE_TRACE //MOVx(x2, 15); no access to PC reg @@ -338,7 +338,7 @@ void call_c(dynarec_arm_t* dyn, int ninst, void* fnc, int reg, int ret, int save BLR(reg); fpu_popcache(dyn, ninst, reg); if(ret>=0) { - MOVx(ret, xEmu); + MOVx_REG(ret, xEmu); } if(ret!=-2) { LDRx_S9_postindex(xEmu, xSP, 16); @@ -721,7 +721,7 @@ int x87_setround(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3) MOV64x(s1, (uintptr_t)round_map); LDRw_REG_LSL2(s2, s1, s2); VMRS(s1); // get fpscr - MOVx(s3, s1); + MOVx_REG(s3, s1); BFIx(s1, s2, 22, 2); // inject new round VMSR(s1); // put new fpscr return s3; @@ -735,7 +735,7 @@ int sse_setround(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3) MOV64x(s1, (uintptr_t)round_map); LDRw_REG_LSL2(s2, s1, s2); VMRS(s1); // get fpscr - MOVx(s3, s1); + MOVx_REG(s3, s1); BFIx(s1, s2, 22, 2); // inject new round VMSR(s1); // put new fpscr return s3; @@ -979,6 +979,6 @@ void emit_pf(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4) LDRw_REG_LSL2(s4, s4, s3); ANDw_mask(s3, s1, 0, 0b000100); //0x1f LSRw_REG(s4, s4, s3); - MVNx(s4, s4); - BFIx(xFlags, s4, F_PF, 1); + MVNw_REG(s4, s4); + BFIw(xFlags, s4, F_PF, 1); } \ No newline at end of file diff --git a/src/dynarec/dynarec_arm64_helper.h b/src/dynarec/dynarec_arm64_helper.h index 1a576f7c..43861e39 100755 --- a/src/dynarec/dynarec_arm64_helper.h +++ b/src/dynarec/dynarec_arm64_helper.h @@ -621,12 +621,12 @@ void call_c(dynarec_arm_t* dyn, int ninst, void* fnc, int reg, int ret, int save //void emit_cmp32_0(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4); //void emit_test8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); //void emit_test16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); -void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4, int s5); -//void emit_add32(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); -//void emit_add32c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4); +void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4); +void emit_add32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4); +void emit_add32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5); //void emit_add8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4, int save_s4); //void emit_add8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4); -void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4, int s5); +void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4); void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5); //void emit_sub8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4, int save_s4); //void emit_sub8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4); diff --git a/src/dynarec/dynarec_arm64_pass.c b/src/dynarec/dynarec_arm64_pass.c index 12f22acf..e7adb758 100755 --- a/src/dynarec/dynarec_arm64_pass.c +++ b/src/dynarec/dynarec_arm64_pass.c @@ -55,7 +55,7 @@ void arm_pass(dynarec_arm_t* dyn, uintptr_t addr) MESSAGE(LOG_DUMP, "TRACE ----\n"); fpu_reflectcache(dyn, ninst, x1, x2, x3); GETIP(ip); - MOVx(x1, xRIP); + MOVx_REG(x1, xRIP); STORE_XEMU_REGS(xRIP); MOV32w(x2, 1); CALL(PrintTrace, -1); diff --git a/src/librarian/librarian.c b/src/librarian/librarian.c index 5f429c88..dcfe4754 100755 --- a/src/librarian/librarian.c +++ b/src/librarian/librarian.c @@ -430,7 +430,8 @@ int GetGlobalNoWeakSymbolStartEnd(lib_t *maplib, const char* name, uintptr_t* st int IsGlobalNoWeakSymbolInNative(lib_t *maplib, const char* name) { - uintptr_t start, end; + uintptr_t start=0; + uintptr_t end=0; for(int i=0; i<maplib->libsz; ++i) if(GetElfIndex(maplib->libraries[i].lib)==-1) if(GetLibNoWeakSymbolStartEnd(maplib->libraries[i].lib, name, &start, &end)) |