diff options
Diffstat (limited to 'src')
| -rwxr-xr-x | src/dynarec/dynarec_arm64_00.c | 10 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_66.c | 4 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_emit_logic.c | 78 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_emit_math.c | 26 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_helper.h | 2 | ||||
| -rwxr-xr-x | src/emu/x64run_private.c | 21 | ||||
| -rwxr-xr-x | src/include/regs.h | 1 |
7 files changed, 78 insertions, 64 deletions
diff --git a/src/dynarec/dynarec_arm64_00.c b/src/dynarec/dynarec_arm64_00.c index 61a18f8f..c46d78ff 100755 --- a/src/dynarec/dynarec_arm64_00.c +++ b/src/dynarec/dynarec_arm64_00.c @@ -98,8 +98,8 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SETFLAGS(X_ALL, SF_SET); u8 = F8; UXTBw(x1, xRAX); - emit_add8c(dyn, ninst, x1, u8, x3, x4, x5); - BFIw(xRAX, x1, 0, 8); + emit_add8c(dyn, ninst, x1, u8, x3, x4); + BFIx(xRAX, x1, 0, 8); break; case 0x05: INST_NAME("ADD EAX, Id"); @@ -149,7 +149,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin u8 = F8; UXTBw(x1, xRAX); emit_or8c(dyn, ninst, x1, u8, x3, x4); - BFIw(xRAX, x1, 0, 8); + BFIx(xRAX, x1, 0, 8); break; case 0x0D: INST_NAME("OR EAX, Id"); @@ -416,7 +416,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin u8 = F8; UXTBw(x1, xRAX); emit_xor8c(dyn, ninst, x1, u8, x3, x4); - BFIw(xRAX, x1, 0, 8); + BFIx(xRAX, x1, 0, 8); break; case 0x35: INST_NAME("XOR EAX, Id"); @@ -563,7 +563,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SETFLAGS(X_ALL, SF_SET); GETEB(x1, 1); u8 = F8; - emit_add8c(dyn, ninst, x1, u8, x2, x4, x5); + emit_add8c(dyn, ninst, x1, u8, x2, x4); EBBACK; break; case 1: //OR diff --git a/src/dynarec/dynarec_arm64_66.c b/src/dynarec/dynarec_arm64_66.c index 10c214f7..024404e0 100755 --- a/src/dynarec/dynarec_arm64_66.c +++ b/src/dynarec/dynarec_arm64_66.c @@ -106,7 +106,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin UXTHw(x1, xRAX); MOV32w(x2, i32); emit_or16(dyn, ninst, x1, x2, x3, x4); - BFIw(xRAX, x1, 0, 16); + BFIx(xRAX, x1, 0, 16); break; case 0x0F: @@ -227,7 +227,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin UXTHw(x1, xRAX); MOV32w(x2, i32); emit_sub16(dyn, ninst, x1, x2, x3, x4); - BFIw(xRAX, x1, 0, 16); + BFIx(xRAX, x1, 0, 16); break; case 0x31: diff --git a/src/dynarec/dynarec_arm64_emit_logic.c b/src/dynarec/dynarec_arm64_emit_logic.c index 9d12d333..141322db 100755 --- a/src/dynarec/dynarec_arm64_emit_logic.c +++ b/src/dynarec/dynarec_arm64_emit_logic.c @@ -37,14 +37,14 @@ void emit_or32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { TSTxw_REG(s1, s1); - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + CSETw(s3, cEQ); + BFIw(xFlags, s3, F_ZF, 1); } IFX(X_SF) { LSRxw(s3, s1, (rex.w)?63:31); @@ -71,14 +71,14 @@ void emit_or32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { TSTxw_REG(s1, s1); - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + CSETw(s3, cEQ); + BFIw(xFlags, s3, F_ZF, 1); } IFX(X_SF) { LSRxw(s3, s1, (rex.w)?63:31); @@ -103,14 +103,14 @@ void emit_xor32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { TSTxw_REG(s1, s1); - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + CSETw(s3, cEQ); + BFIw(xFlags, s3, F_ZF, 1); } IFX(X_SF) { LSRxw(s3, s1, (rex.w)?63:31); @@ -132,19 +132,19 @@ void emit_xor32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in } else IFX(X_ALL) { SET_DFNONE(s4); } - IFX(X_PEND) {} else {MOV64x(s3, c);} + IFX(X_PEND) {} else {MOV64xw(s3, c);} EORxw_REG(s1, s1, s3); IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { TSTxw_REG(s1, s1); - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + CSETw(s3, cEQ); + BFIw(xFlags, s3, F_ZF, 1); } IFX(X_SF) { LSRxw(s3, s1, (rex.w)?63:31); @@ -165,7 +165,7 @@ void emit_and32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 } else IFX(X_ALL) { SET_DFNONE(s4); } - IFX(X_ALL) { + IFX(X_ZF) { ANDSxw_REG(s1, s1, s2); } else { ANDxw_REG(s1, s1, s2); @@ -173,13 +173,11 @@ void emit_and32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 } IFX(X_SF) { LSRxw(s3, s1, (rex.w)?63:31); @@ -210,13 +208,13 @@ void emit_and32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + CSETw(s3, cEQ); + BFIw(xFlags, s3, F_ZF, 1); } IFX(X_SF) { LSRxw(s3, s1, (rex.w)?63:31); @@ -274,14 +272,14 @@ void emit_or8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4) IFX(X_PEND) { STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { TSTw_REG(s1, s1); - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + CSETw(s3, cEQ); + BFIw(xFlags, s3, F_ZF, 1); } IFX(X_SF) { LSRw(s3, s1, 7); @@ -339,14 +337,14 @@ void emit_xor8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4 IFX(X_PEND) { STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { TSTw_REG(s1, s1); - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + CSETw(s3, cEQ); + BFIw(xFlags, s3, F_ZF, 1); } IFX(X_SF) { LSRw(s3, s1, 7); @@ -367,7 +365,7 @@ void emit_and8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) } else IFX(X_ALL) { SET_DFNONE(s3); } - IFX(X_ALL) { + IFX(X_ZF) { ANDSw_REG(s1, s1, s2); } else { ANDw_REG(s1, s1, s2); @@ -411,13 +409,13 @@ void emit_and8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4 IFX(X_PEND) { STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); } - IFX(X_CF | X_AF | X_ZF | X_OF) { - MOV32w(s3, (1<<F_ZF)|(1<<F_CF)|(1<<F_AF)|(1<<F_OF)); + IFX(X_CF | X_AF | X_OF) { + MOV32w(s3, (1<<F_CF)|(1<<F_AF)|(1<<F_OF)); BICw(xFlags, xFlags, s3); } IFX(X_ZF) { - Bcond(cNE, +8); - ORRw_mask(xFlags, xFlags, 0b011010, 0); // mask=0x40 + CSETw(s3, cEQ); + BFIw(xFlags, s3, F_ZF, 1); } IFX(X_SF) { LSRw(s3, s1, 7); @@ -601,7 +599,7 @@ void emit_and16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) ANDw_REG(s1, s1, s2); } IFX(X_PEND) { - STRH_REG(s1, xEmu, offsetof(x64emu_t, res)); + STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_PEND) { STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); diff --git a/src/dynarec/dynarec_arm64_emit_math.c b/src/dynarec/dynarec_arm64_emit_math.c index 5bb03da2..e631305b 100755 --- a/src/dynarec/dynarec_arm64_emit_math.c +++ b/src/dynarec/dynarec_arm64_emit_math.c @@ -315,7 +315,7 @@ void emit_add8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) BFIw(xFlags, s3, F_CF, 1); } IFX(X_PEND) { - STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); + STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_ZF) { ANDSw_mask(s1, s1, 0, 7); //mask=0xff @@ -332,20 +332,20 @@ void emit_add8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) } // emit ADD8 instruction, from s1 , const c, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved -void emit_add8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, int s5) +void emit_add8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4) { IFX(X_PEND) { - MOV32w(s5, c&0xff); + MOV32w(s4, c&0xff); STRB_U12(s1, xEmu, offsetof(x64emu_t, op1)); - STRB_U12(s3, xEmu, offsetof(x64emu_t, op2)); + STRB_U12(s4, xEmu, offsetof(x64emu_t, op2)); SET_DF(s3, d_add8); } else IFX(X_ALL) { SET_DFNONE(s3); } IFX(X_AF | X_OF) { - if(X_PEND) {} else {MOV32w(s5, c&0xff);} - ORRw_REG(s3, s1, s5); // s3 = op1 | op2 - ANDw_REG(s4, s1, s5); // s4 = op1 & op2 + if(X_PEND) {} else {MOV32w(s4, c&0xff);} + ORRw_REG(s3, s1, s4); // s3 = op1 | op2 + ANDw_REG(s4, s1, s4); // s4 = op1 & op2 } ADDw_U12(s1, s1, c); @@ -367,7 +367,7 @@ void emit_add8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, in BFIw(xFlags, s3, F_CF, 1); } IFX(X_PEND) { - STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); + STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_ZF) { ANDSw_mask(s1, s1, 0, 0b000111); //mask=000000ff @@ -525,7 +525,7 @@ void emit_add16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) BFIw(xFlags, s3, F_CF, 1); } IFX(X_PEND) { - STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); + STRw_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_ZF) { ANDSw_mask(s1, s1, 0, 15); //mask=0xffff @@ -1015,7 +1015,7 @@ void emit_adc32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 IFX(X_PEND) { STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1)); STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2)); - SET_DF(s3, rex.w?d_adc64:d_adc32); + SET_DF(s3, rex.w?d_adc64:d_adc32b); } else IFX(X_ALL) { SET_DFNONE(s3); } @@ -1150,7 +1150,7 @@ void emit_adc8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) MSR_nzvc(s3); // load CC into ARM CF ADCw_REG(s1, s1, s2); IFX(X_PEND) { - STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); + STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_AF|X_OF) { ORRw_REG(s3, s4, s2); // s3 = op1 | op2 @@ -1204,7 +1204,7 @@ void emit_adc8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, in MSR_nzvc(s3); // load CC into ARM CF ADCw_REG(s1, s1, s5); IFX(X_PEND) { - STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); + STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_AF|X_OF) { ORRw_REG(s3, s4, s5); // s3 = op1 | op2 @@ -1257,7 +1257,7 @@ void emit_adc16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) MSR_nzvc(s3); // load CC into ARM CF ADCw_REG(s1, s1, s2); IFX(X_PEND) { - STRH_REG(s1, xEmu, offsetof(x64emu_t, res)); + STRw_REG(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_AF|X_OF) { ORRw_REG(s3, s4, s2); // s3 = op1 | op2 diff --git a/src/dynarec/dynarec_arm64_helper.h b/src/dynarec/dynarec_arm64_helper.h index e4bc025d..5384e67a 100755 --- a/src/dynarec/dynarec_arm64_helper.h +++ b/src/dynarec/dynarec_arm64_helper.h @@ -643,7 +643,7 @@ void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s void emit_add32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4); void emit_add32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5); void emit_add8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); -void emit_add8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4, int s5); +void emit_add8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4); void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4); void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5); void emit_sub8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4); diff --git a/src/emu/x64run_private.c b/src/emu/x64run_private.c index b279ca1d..429f766d 100755 --- a/src/emu/x64run_private.c +++ b/src/emu/x64run_private.c @@ -771,7 +771,7 @@ void UpdateFlags(x64emu_t *emu) CLEAR_FLAG(F_CF); break; case d_adc8: - CONDITIONAL_SET_FLAG(emu->res.u8 & 0x100, F_CF); + CONDITIONAL_SET_FLAG(emu->res.u16 & 0x100, F_CF); CONDITIONAL_SET_FLAG((emu->res.u8 & 0xff) == 0, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u8 & 0x80, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u8 & 0xff), F_PF); @@ -780,7 +780,7 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(cc & 0x8, F_AF); break; case d_adc16: - CONDITIONAL_SET_FLAG(emu->res.u16 & 0x10000, F_CF); + CONDITIONAL_SET_FLAG(emu->res.u32 & 0x10000, F_CF); CONDITIONAL_SET_FLAG((emu->res.u16 & 0xffff) == 0, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u16 & 0x8000, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u16 & 0xff), F_PF); @@ -789,7 +789,7 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(cc & 0x8, F_AF); break; case d_adc32: - CONDITIONAL_SET_FLAG(emu->res.u32 & 0x100000000L, F_CF); + CONDITIONAL_SET_FLAG(emu->res.u64 & 0x100000000L, F_CF); CONDITIONAL_SET_FLAG((emu->res.u32 & 0xffffffff) == 0, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u32 & 0x80000000, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u32 & 0xff), F_PF); @@ -797,6 +797,21 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(XOR2(cc >> 30), F_OF); CONDITIONAL_SET_FLAG(cc & 0x8, F_AF); break; + case d_adc32b: + if(emu->res.u32 == (emu->op1.u32+emu->op2.u32)) { + lo = (emu->op1.u32 & 0xFFFF) + (emu->op2.u32 & 0xFFFF); + } else { + lo = 1 + (emu->op1.u32 & 0xFFFF) + (emu->op2.u32 & 0xFFFF); + } + hi = (lo >> 16) + (emu->op1.u32 >> 16) + (emu->op2.u32 >> 16); + CONDITIONAL_SET_FLAG(hi & 0x10000, F_CF); + CONDITIONAL_SET_FLAG(!emu->res.u32, F_ZF); + CONDITIONAL_SET_FLAG(emu->res.u32 & 0x80000000, F_SF); + CONDITIONAL_SET_FLAG(PARITY(emu->res.u64 & 0xff), F_PF); + cc = (emu->op2.u32 & emu->op1.u32) | ((~emu->res.u32) & (emu->op2.u32 | emu->op1.u32)); + CONDITIONAL_SET_FLAG(XOR2(cc >> 30), F_OF); + CONDITIONAL_SET_FLAG(cc & 0x8, F_AF); + break; case d_adc64: if(emu->res.u64 == (emu->op1.u64+emu->op2.u64)) { lo = (emu->op1.u64 & 0xFFFFFFFF) + (emu->op2.u64 & 0xFFFFFFFF); diff --git a/src/include/regs.h b/src/include/regs.h index 44cc21b8..91deee65 100755 --- a/src/include/regs.h +++ b/src/include/regs.h @@ -109,6 +109,7 @@ typedef enum { d_adc8, d_adc16, d_adc32, + d_adc32b, d_adc64, d_sbb8, d_sbb16, |