diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2021-08-28 16:46:59 +0200 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2021-08-28 16:46:59 +0200 |
| commit | 3aa12a52eb7185a6675f6d59c63e31b94b4f1c0f (patch) | |
| tree | d4a7011e478dd91ef20548450443c842d2ec10e8 /src | |
| parent | 5f31e0e093f87e91dc71de7866450f0036deb91b (diff) | |
| download | box64-3aa12a52eb7185a6675f6d59c63e31b94b4f1c0f.tar.gz box64-3aa12a52eb7185a6675f6d59c63e31b94b4f1c0f.zip | |
Added AES-NI cpu extension support ([DYNAREC] too, using AES extension if available)
Diffstat (limited to 'src')
| -rwxr-xr-x | src/dynarec/arm64_emitter.h | 9 | ||||
| -rwxr-xr-x | src/dynarec/arm64_printer.c | 9 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_660f.c | 101 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_functions.c | 119 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_functions.h | 7 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_helper.c | 15 | ||||
| -rwxr-xr-x | src/dynarec/dynarec_arm64_helper.h | 3 | ||||
| -rw-r--r-- | src/emu/x64run660f.c | 187 | ||||
| -rw-r--r-- | src/tools/my_cpuid.c | 1 | ||||
| -rwxr-xr-x | src/wrapped/wrappedlibc.c | 2 |
10 files changed, 452 insertions, 1 deletions
diff --git a/src/dynarec/arm64_emitter.h b/src/dynarec/arm64_emitter.h index 13ad556b..1118ec11 100755 --- a/src/dynarec/arm64_emitter.h +++ b/src/dynarec/arm64_emitter.h @@ -1727,4 +1727,13 @@ #define SQDMULHQ_16(Vd, Vn, Vm) EMIT(QDMULH_vector(1, 0, 0b01, Vm, Vn, Vd)) #define SQDMULHQ_32(Vd, Vn, Vm) EMIT(QDMULH_vector(1, 0, 0b10, Vm, Vn, Vd)) +// AES extensions +#define AES_gen(D, Rn, Rd) (0b01001110<<24 | 0b00<<22 | 0b10100<<17 | 0b0010<<13 | (D)<<12 | 0b10<<10 | (Rn)<<5 | (Rd)) +#define AESD(Vd, Vn) EMIT(AES_gen(1, Vn, Vd)) +#define AESE(Vd, Vn) EMIT(AES_gen(0, Vn, Vd)) + +#define AESMC_gen(D, Rn, Rd) (0b01001110<<24 | 0b00<<22 | 0b10100<<17 | 0b0011<<13 | (D)<<12 | 0b10<<10 | (Rn)<<5 | (Rd)) +#define AESIMC(Vd, Vn) EMIT(AESMC_gen(1, Vn, Vd)) +#define AESMC(Vd, Vn) EMIT(AESMC_gen(0, Vn, Vd)) + #endif //__ARM64_EMITTER_H__ diff --git a/src/dynarec/arm64_printer.c b/src/dynarec/arm64_printer.c index b2c90dda..f65d9049 100755 --- a/src/dynarec/arm64_printer.c +++ b/src/dynarec/arm64_printer.c @@ -1312,6 +1312,15 @@ const char* arm64_print(uint32_t opcode, uintptr_t addr) return buff; } + // AES + if(isMask(opcode, "0100111000101000010f10nnnnnddddd", &a)) { + snprintf(buff, sizeof(buff), "AES%c V%d.16B, V%d.16B", sf?'D':'E', Rd, Rn); + return buff; + } + if(isMask(opcode, "0100111000101000011f10nnnnnddddd", &a)) { + snprintf(buff, sizeof(buff), "AES%sMC V%d.16B, V%d.16B", sf?"I":"", Rd, Rn); + return buff; + } snprintf(buff, sizeof(buff), "%08X ???", __builtin_bswap32(opcode)); diff --git a/src/dynarec/dynarec_arm64_660f.c b/src/dynarec/dynarec_arm64_660f.c index 0cddb61f..45b8907f 100755 --- a/src/dynarec/dynarec_arm64_660f.c +++ b/src/dynarec/dynarec_arm64_660f.c @@ -381,6 +381,107 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n SXTL_32(q0, q1); // 32bits->64bits break; + case 0xDB: + INST_NAME("AESIMC Gx, Ex"); // AES-NI + nextop = F8; + if(arm64_aes) { + GETEX(q1, 0); + GETGX_empty(q0); + AESIMC(q0, q1); + } else { + GETEX(q1, 0); + GETGX_empty(q0); + if(q0!=q1) { + VMOVQ(q0, q1); + } + sse_forget_reg(dyn, ninst, gd); + MOV32w(x1, gd); + CALL(arm_aesimc, -1); + } + break; + case 0xDC: + INST_NAME("AESENC Gx, Ex"); // AES-NI + nextop = F8; + if(arm64_aes) { + GETEX(q1, 0); + GETGX(q0); + v0 = fpu_get_scratch(dyn); // ARM64 internal operation differs a bit from x86_64 + VEORQ(v0, q0, q1); + AESE(v0, q1); + AESMC(v0, v0); + VEORQ(q0, v0, q1); + } else { + GETG; + sse_forget_reg(dyn, ninst, gd); + MOV32w(x1, gd); + CALL(arm_aese, -1); + GETGX(q0); + GETEX(q1, 0); + VEORQ(q0, q0, q1); + } + break; + case 0xDD: + INST_NAME("AESENCLAST Gx, Ex"); // AES-NI + nextop = F8; + if(arm64_aes) { + GETEX(q1, 0); + GETGX(q0); + v0 = fpu_get_scratch(dyn); // ARM64 internal operation differs a bit from x86_64 + VEORQ(v0, q0, q1); + AESE(v0, q1); + VEORQ(q0, v0, q1); + } else { + GETG; + sse_forget_reg(dyn, ninst, gd); + MOV32w(x1, gd); + CALL(arm_aeselast, -1); + GETGX(q0); + GETEX(q1, 0); + VEORQ(q0, q0, q1); + } + break; + case 0xDE: + INST_NAME("AESDEC Gx, Ex"); // AES-NI + nextop = F8; + if(arm64_aes) { + GETEX(q1, 0); + GETGX(q0); + v0 = fpu_get_scratch(dyn); // ARM64 internal operation differs a bit from x86_64 + VEORQ(v0, q0, q1); + AESD(v0, q1); + AESIMC(v0, v0); + VEORQ(q0, v0, q1); + } else { + GETG; + sse_forget_reg(dyn, ninst, gd); + MOV32w(x1, gd); + CALL(arm_aesd, -1); + GETGX(q0); + GETEX(q1, 0); + VEORQ(q0, q0, q1); + } + break; + case 0xDF: + INST_NAME("AESDECLAST Gx, Ex"); // AES-NI + nextop = F8; + if(arm64_aes) { + GETEX(q1, 0); + GETGX(q0); + v0 = fpu_get_scratch(dyn); // ARM64 internal operation differs a bit from x86_64 + VEORQ(v0, q0, q1); + AESD(v0, q1); + VEORQ(q0, v0, q1); + } else { + GETG; + sse_forget_reg(dyn, ninst, gd); + MOV32w(x1, gd); + CALL(arm_aesdlast, -1); + GETGX(q0); + GETEX(q1, 0); + VEORQ(q0, q0, q1); + } + break; + default: DEFAULT; } diff --git a/src/dynarec/dynarec_arm64_functions.c b/src/dynarec/dynarec_arm64_functions.c index ce8a7f55..faf09e27 100755 --- a/src/dynarec/dynarec_arm64_functions.c +++ b/src/dynarec/dynarec_arm64_functions.c @@ -190,6 +190,125 @@ void arm_fprem1(x64emu_t* emu) emu->sw.f.F87_C3 = ((tmp32s>>1)&1); emu->sw.f.F87_C1 = ((tmp32s>>2)&1); } + +static uint8_t ff_mult(uint8_t a, uint8_t b) +{ + int retval = 0; + + for(int i = 0; i < 8; i++) { + if((b & 1) == 1) + retval ^= a; + + if((a & 0x80)) { + a <<= 1; + a ^= 0x1b; + } else { + a <<= 1; + } + + b >>= 1; + } + + return retval; +} + +void arm_aesimc(x64emu_t* emu, int xmm) +{ + sse_regs_t eax1 = emu->xmm[xmm]; + + for(int j=0; j<4; ++j) { + emu->xmm[xmm].ub[0+j*4] = ff_mult(0x0E, eax1.ub[0+j*4]) ^ ff_mult(0x0B, eax1.ub[1+j*4]) ^ ff_mult(0x0D, eax1.ub[2+j*4]) ^ ff_mult(0x09, eax1.ub[3+j*4]); + emu->xmm[xmm].ub[1+j*4] = ff_mult(0x09, eax1.ub[0+j*4]) ^ ff_mult(0x0E, eax1.ub[1+j*4]) ^ ff_mult(0x0B, eax1.ub[2+j*4]) ^ ff_mult(0x0D, eax1.ub[3+j*4]); + emu->xmm[xmm].ub[2+j*4] = ff_mult(0x0D, eax1.ub[0+j*4]) ^ ff_mult(0x09, eax1.ub[1+j*4]) ^ ff_mult(0x0E, eax1.ub[2+j*4]) ^ ff_mult(0x0B, eax1.ub[3+j*4]); + emu->xmm[xmm].ub[3+j*4] = ff_mult(0x0B, eax1.ub[0+j*4]) ^ ff_mult(0x0D, eax1.ub[1+j*4]) ^ ff_mult(0x09, eax1.ub[2+j*4]) ^ ff_mult(0x0E, eax1.ub[3+j*4]); + } +} +void arm_aesmc(x64emu_t* emu, int xmm) +{ + sse_regs_t eax1 = emu->xmm[xmm]; + + for(int j=0; j<4; ++j) { + emu->xmm[xmm].ub[0+j*4] = ff_mult(0x02, eax1.ub[0+j*4]) ^ ff_mult(0x03, eax1.ub[1+j*4]) ^ eax1.ub[2+j*4] ^ eax1.ub[3+j*4] ; + emu->xmm[xmm].ub[1+j*4] = eax1.ub[0+j*4] ^ ff_mult(0x02, eax1.ub[1+j*4]) ^ ff_mult(0x03, eax1.ub[2+j*4]) ^ eax1.ub[3+j*4] ; + emu->xmm[xmm].ub[2+j*4] = eax1.ub[0+j*4] ^ eax1.ub[1+j*4] ^ ff_mult(0x02, eax1.ub[2+j*4]) ^ ff_mult(0x03, eax1.ub[3+j*4]); + emu->xmm[xmm].ub[3+j*4] = ff_mult(0x03, eax1.ub[0+j*4]) ^ eax1.ub[1+j*4] ^ eax1.ub[2+j*4] ^ ff_mult(0x02, eax1.ub[3+j*4]); + } +} +void arm_aesdlast(x64emu_t* emu, int xmm) +{ + // A0 B1 C2 D3 E4 F5 G6 H7 I8 J9 Ka Lb Mc Nd Oe Pf + // A N K H E B O L I F C P M J G D + const uint8_t invshiftrows[] = {0,13,10, 7, 4, 1,14,11, 8, 5, 2,15,12, 9, 6, 3}; + const uint8_t invsubbytes[256] = { + 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, + 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, + 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, + 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, + 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, + 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, + 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, + 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, + 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, + 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, + 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, + 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, + 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, + 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, + 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d, + }; + + sse_regs_t eax1; + for(int i=0; i<16; ++i) + eax1.ub[i] = emu->xmm[xmm].ub[invshiftrows[i]]; + //STATE ← InvSubBytes( STATE ); + for(int i=0; i<16; ++i) + emu->xmm[xmm].ub[i] = invsubbytes[eax1.ub[i]]; + +} +void arm_aeselast(x64emu_t* emu, int xmm) +{ + // A0 B1 C2 D3 E4 F5 G6 H7 I8 J9 Ka Lb Mc Nd Oe Pf + // A F K P E J O D I N C H M B G L + const uint8_t shiftrows[] = {0, 5,10,15, 4, 9,14, 3, 8,13, 2, 7,12, 1, 6,11}; + const uint8_t subbytes[256] = { + 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, + 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, + 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, + 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, + 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, + 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, + 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, + 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, + 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, + 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, + 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, + 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, + 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, + 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, + 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, + 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16, + }; + + sse_regs_t eax1; + for(int i=0; i<16; ++i) + eax1.ub[i] = emu->xmm[xmm].ub[shiftrows[i]]; + //STATE ← SubBytes( STATE ); + for(int i=0; i<16; ++i) + emu->xmm[xmm].ub[i] = subbytes[eax1.ub[i]]; +} +void arm_aesd(x64emu_t* emu, int xmm) +{ + arm_aesdlast(emu, xmm); + arm_aesimc(emu, xmm); +} +void arm_aese(x64emu_t* emu, int xmm) +{ + arm_aeselast(emu, xmm); + arm_aesmc(emu, xmm); +} + + #define XMM0 0 #define XMM8 16 #define X870 8 diff --git a/src/dynarec/dynarec_arm64_functions.h b/src/dynarec/dynarec_arm64_functions.h index 085e4c1f..d4c861c9 100755 --- a/src/dynarec/dynarec_arm64_functions.h +++ b/src/dynarec/dynarec_arm64_functions.h @@ -29,6 +29,13 @@ void arm_fsave(x64emu_t* emu, uint8_t* ed); void arm_frstor(x64emu_t* emu, uint8_t* ed); void arm_fprem1(x64emu_t* emu); +void arm_aesd(x64emu_t* emu, int xmm); +void arm_aese(x64emu_t* emu, int xmm); +void arm_aesdlast(x64emu_t* emu, int xmm); +void arm_aeselast(x64emu_t* emu, int xmm); +void arm_aesimc(x64emu_t* emu, int xmm); + + void arm_ud(x64emu_t* emu); // Get an FPU scratch reg diff --git a/src/dynarec/dynarec_arm64_helper.c b/src/dynarec/dynarec_arm64_helper.c index e598c47b..8c65ac32 100755 --- a/src/dynarec/dynarec_arm64_helper.c +++ b/src/dynarec/dynarec_arm64_helper.c @@ -1114,6 +1114,21 @@ int sse_get_reg_empty(dynarec_arm_t* dyn, int ninst, int s1, int a) return 0; #endif } +// forget neon register for a SSE reg, create the entry if needed +void sse_forget_reg(dynarec_arm_t* dyn, int ninst, int a) +{ + (void) ninst; +#if STEP > 1 + if(dyn->ssecache[a]==-1) + return; + VSTR128_U12(dyn->ssecache[a], xEmu, offsetof(x64emu_t, xmm[a])); + fpu_free_reg(dyn, dyn->ssecache[a]); + dyn->ssecache[a] = -1; +#else + (void)dyn; (void)a; +#endif + return; +} // purge the SSE cache for XMM0..XMM7 (to use before function native call) void sse_purge07cache(dynarec_arm_t* dyn, int ninst, int s1) { diff --git a/src/dynarec/dynarec_arm64_helper.h b/src/dynarec/dynarec_arm64_helper.h index 27c1e9a7..a005d90d 100755 --- a/src/dynarec/dynarec_arm64_helper.h +++ b/src/dynarec/dynarec_arm64_helper.h @@ -744,6 +744,7 @@ void* arm64_next(x64emu_t* emu, uintptr_t addr); #define mmx_get_reg_empty STEPNAME(mmx_get_reg_empty) #define sse_get_reg STEPNAME(sse_get_reg) #define sse_get_reg_empty STEPNAME(sse_get_reg_empty) +#define sse_forget_reg STEPNAME(sse_forget_reg) #define sse_purge07cache STEPNAME(sse_purge07cache) #define fpu_pushcache STEPNAME(fpu_pushcache) @@ -884,6 +885,8 @@ int mmx_get_reg_empty(dynarec_arm_t* dyn, int ninst, int s1, int a); int sse_get_reg(dynarec_arm_t* dyn, int ninst, int s1, int a); // get neon register for a SSE reg, but don't try to synch it if it needed to be created int sse_get_reg_empty(dynarec_arm_t* dyn, int ninst, int s1, int a); +// forget neon register for a SSE reg, create the entry if needed +void sse_forget_reg(dynarec_arm_t* dyn, int ninst, int a); // purge the XMM0..XMM7 cache (before function call) void sse_purge07cache(dynarec_arm_t* dyn, int ninst, int s1); diff --git a/src/emu/x64run660f.c b/src/emu/x64run660f.c index 5eee4f56..2c6a364c 100644 --- a/src/emu/x64run660f.c +++ b/src/emu/x64run660f.c @@ -22,6 +22,27 @@ #include "modrm.h" +static uint8_t ff_mult(uint8_t a, uint8_t b) +{ + int retval = 0; + + for(int i = 0; i < 8; i++) { + if((b & 1) == 1) + retval ^= a; + + if((a & 0x80)) { + a <<= 1; + a ^= 0x1b; + } else { + a <<= 1; + } + + b >>= 1; + } + + return retval; +} + int Run660F(x64emu_t *emu, rex_t rex) { uint8_t opcode; @@ -36,6 +57,49 @@ int Run660F(x64emu_t *emu, rex_t rex) reg64_t *oped, *opgd; sse_regs_t *opex, *opgx, eax1, *opex2; mmx87_regs_t *opem, *opgm; + // AES opcodes constants + // A0 B1 C2 D3 E4 F5 G6 H7 I8 J9 Ka Lb Mc Nd Oe Pf + // A F K P E J O D I N C H M B G L + const uint8_t shiftrows[] = {0, 5,10,15, 4, 9,14, 3, 8,13, 2, 7,12, 1, 6,11}; + const uint8_t subbytes[256] = { + 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, + 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, + 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, + 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, + 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, + 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, + 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, + 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, + 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, + 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, + 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, + 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, + 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, + 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, + 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, + 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16, + }; + // A0 B1 C2 D3 E4 F5 G6 H7 I8 J9 Ka Lb Mc Nd Oe Pf + // A N K H E B O L I F C P M J G D + const uint8_t invshiftrows[] = {0,13,10, 7, 4, 1,14,11, 8, 5, 2,15,12, 9, 6, 3}; + const uint8_t invsubbytes[256] = { + 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, + 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, + 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, + 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, + 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, + 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, + 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, + 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, + 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, + 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, + 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, + 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, + 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, + 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, + 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d, + }; opcode = F8; @@ -365,6 +429,108 @@ int Run660F(x64emu_t *emu, rex_t rex) GX->sq[i] = EX->sd[i]; break; + case 0xDB: /* AESIMC Gx, Ex */ + nextop = F8; + GETEX(0); + GETGX; + //STATE ← InvMixColumns( STATE ); + if (EX == GX) { + for(int i=0; i<16; ++i) + eax1.ub[i] = EX->ub[i]; + for(int j=0; j<4; ++j) { + GX->ub[0+j*4] = ff_mult(0x0E, eax1.ub[0+j*4]) ^ ff_mult(0x0B, eax1.ub[1+j*4]) ^ ff_mult(0x0D, eax1.ub[2+j*4]) ^ ff_mult(0x09, eax1.ub[3+j*4]); + GX->ub[1+j*4] = ff_mult(0x09, eax1.ub[0+j*4]) ^ ff_mult(0x0E, eax1.ub[1+j*4]) ^ ff_mult(0x0B, eax1.ub[2+j*4]) ^ ff_mult(0x0D, eax1.ub[3+j*4]); + GX->ub[2+j*4] = ff_mult(0x0D, eax1.ub[0+j*4]) ^ ff_mult(0x09, eax1.ub[1+j*4]) ^ ff_mult(0x0E, eax1.ub[2+j*4]) ^ ff_mult(0x0B, eax1.ub[3+j*4]); + GX->ub[3+j*4] = ff_mult(0x0B, eax1.ub[0+j*4]) ^ ff_mult(0x0D, eax1.ub[1+j*4]) ^ ff_mult(0x09, eax1.ub[2+j*4]) ^ ff_mult(0x0E, eax1.ub[3+j*4]); + } + } else { + for(int j=0; j<4; ++j) { + GX->ub[0+j*4] = ff_mult(0x0E, EX->ub[0+j*4]) ^ ff_mult(0x0B, EX->ub[1+j*4]) ^ ff_mult(0x0D, EX->ub[2+j*4]) ^ ff_mult(0x09, EX->ub[3+j*4]); + GX->ub[1+j*4] = ff_mult(0x09, EX->ub[0+j*4]) ^ ff_mult(0x0E, EX->ub[1+j*4]) ^ ff_mult(0x0B, EX->ub[2+j*4]) ^ ff_mult(0x0D, EX->ub[3+j*4]); + GX->ub[2+j*4] = ff_mult(0x0D, EX->ub[0+j*4]) ^ ff_mult(0x09, EX->ub[1+j*4]) ^ ff_mult(0x0E, EX->ub[2+j*4]) ^ ff_mult(0x0B, EX->ub[3+j*4]); + GX->ub[3+j*4] = ff_mult(0x0B, EX->ub[0+j*4]) ^ ff_mult(0x0D, EX->ub[1+j*4]) ^ ff_mult(0x09, EX->ub[2+j*4]) ^ ff_mult(0x0E, EX->ub[3+j*4]); + } + } + break; + case 0xDC: /* AESENC Gx, Ex */ + nextop = F8; + GETEX(0); + GETGX; + //STATE ← SRC1; + //RoundKey ← SRC2; + //STATE ← ShiftRows( STATE ); + for(int i=0; i<16; ++i) + eax1.ub[i] = GX->ub[shiftrows[i]]; + //STATE ← SubBytes( STATE ); + for(int i=0; i<16; ++i) + eax1.ub[i] = subbytes[eax1.ub[i]]; + //STATE ← MixColumns( STATE ); + for(int j=0; j<4; ++j) { + GX->ub[0+j*4] = ff_mult(0x02, eax1.ub[0+j*4]) ^ ff_mult(0x03, eax1.ub[1+j*4]) ^ eax1.ub[2+j*4] ^ eax1.ub[3+j*4] ; + GX->ub[1+j*4] = eax1.ub[0+j*4] ^ ff_mult(0x02, eax1.ub[1+j*4]) ^ ff_mult(0x03, eax1.ub[2+j*4]) ^ eax1.ub[3+j*4] ; + GX->ub[2+j*4] = eax1.ub[0+j*4] ^ eax1.ub[1+j*4] ^ ff_mult(0x02, eax1.ub[2+j*4]) ^ ff_mult(0x03, eax1.ub[3+j*4]); + GX->ub[3+j*4] = ff_mult(0x03, eax1.ub[0+j*4]) ^ eax1.ub[1+j*4] ^ eax1.ub[2+j*4] ^ ff_mult(0x02, eax1.ub[3+j*4]); + } + //DEST[127:0] ← STATE XOR RoundKey; + GX->q[0] ^= EX->q[0]; + GX->q[1] ^= EX->q[1]; + break; + case 0xDD: /* AESENCLAST Gx, Ex */ + nextop = F8; + GETEX(0); + GETGX; + //STATE ← SRC1; + //RoundKey ← SRC2; + //STATE ← ShiftRows( STATE ); + for(int i=0; i<16; ++i) + eax1.ub[i] = GX->ub[shiftrows[i]]; + //STATE ← SubBytes( STATE ); + for(int i=0; i<16; ++i) + GX->ub[i] = subbytes[eax1.ub[i]]; + //DEST[127:0] ← STATE XOR RoundKey; + GX->q[0] ^= EX->q[0]; + GX->q[1] ^= EX->q[1]; + break; + case 0xDE: /* AESDEC Gx, Ex */ + nextop = F8; + GETEX(0); + GETGX; + //STATE ← SRC1; + //RoundKey ← SRC2; + //STATE ← InvShiftRows( STATE ); + for(int i=0; i<16; ++i) + eax1.ub[i] = GX->ub[invshiftrows[i]]; + //STATE ← InvSubBytes( STATE ); + for(int i=0; i<16; ++i) + eax1.ub[i] = invsubbytes[eax1.ub[i]]; + //STATE ← InvMixColumns( STATE ); + for(int j=0; j<4; ++j) { + GX->ub[0+j*4] = ff_mult(0x0E, eax1.ub[0+j*4]) ^ ff_mult(0x0B, eax1.ub[1+j*4]) ^ ff_mult(0x0D, eax1.ub[2+j*4]) ^ ff_mult(0x09, eax1.ub[3+j*4]); + GX->ub[1+j*4] = ff_mult(0x09, eax1.ub[0+j*4]) ^ ff_mult(0x0E, eax1.ub[1+j*4]) ^ ff_mult(0x0B, eax1.ub[2+j*4]) ^ ff_mult(0x0D, eax1.ub[3+j*4]); + GX->ub[2+j*4] = ff_mult(0x0D, eax1.ub[0+j*4]) ^ ff_mult(0x09, eax1.ub[1+j*4]) ^ ff_mult(0x0E, eax1.ub[2+j*4]) ^ ff_mult(0x0B, eax1.ub[3+j*4]); + GX->ub[3+j*4] = ff_mult(0x0B, eax1.ub[0+j*4]) ^ ff_mult(0x0D, eax1.ub[1+j*4]) ^ ff_mult(0x09, eax1.ub[2+j*4]) ^ ff_mult(0x0E, eax1.ub[3+j*4]); + } + //DEST[127:0] ← STATE XOR RoundKey; + GX->q[0] ^= EX->q[0]; + GX->q[1] ^= EX->q[1]; + break; + case 0xDF: /* AESDECLAST Gx, Ex */ + nextop = F8; + GETEX(0); + GETGX; + //STATE ← SRC1; + //RoundKey ← SRC2; + //STATE ← InvShiftRows( STATE ); + for(int i=0; i<16; ++i) + eax1.ub[i] = GX->ub[invshiftrows[i]]; + //STATE ← InvSubBytes( STATE ); + for(int i=0; i<16; ++i) + GX->ub[i] = invsubbytes[eax1.ub[i]]; + //DEST[127:0] ← STATE XOR RoundKey; + GX->q[0] ^= EX->q[0]; + GX->q[1] ^= EX->q[1]; + break; + default: return 1; } @@ -476,6 +642,27 @@ int Run660F(x64emu_t *emu, rex_t rex) GX->ud[i] = 0; break; + case 0xDF: // AESKEYGENASSIST Gx, Ex, u8 + nextop = F8; + GETEX(1); + GETGX; + tmp32u = F8; + for (int i = 4; i < 8; ++i) + GX->ub[i] = subbytes[EX->ub[i]]; + for (int i = 12; i < 16; ++i) + GX->ub[i] = subbytes[EX->ub[i]]; + GX->ud[0] = GX->ud[1]; + tmp8u = GX->ub[4]; + GX->ud[1] = GX->ud[1] >> 8; + GX->ub[7] = tmp8u; + GX->ud[1] ^= tmp32u; + GX->ud[2] = GX->ud[3]; + tmp8u = GX->ub[12]; + GX->ud[3] = GX->ud[3] >> 8; + GX->ub[15] = tmp8u; + GX->ud[3] ^= tmp32u; + break; + default: return 1; } diff --git a/src/tools/my_cpuid.c b/src/tools/my_cpuid.c index e3f716be..2cb4b92c 100644 --- a/src/tools/my_cpuid.c +++ b/src/tools/my_cpuid.c @@ -53,6 +53,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) | 1<<9 // SSSE3 | 1<<12 // fma | 1<<13 // cx16 (cmpxchg16) + | 1<<25 // aesni ; break; case 0x2: // TLB and Cache info. Sending 1st gen P4 info... diff --git a/src/wrapped/wrappedlibc.c b/src/wrapped/wrappedlibc.c index f668b58b..1c08dc03 100755 --- a/src/wrapped/wrappedlibc.c +++ b/src/wrapped/wrappedlibc.c @@ -1256,7 +1256,7 @@ void CreateCPUInfoFile(int fd) P; sprintf(buff, "bogomips\t: %g\n", bogoMips); P; - sprintf(buff, "flags\t\t: fpu cx8 sep cmov clflush mmx sse sse2 syscall tsc lahf_lm ssse3 ht tm lm fma fxsr cpuid cx16 movbe pni\n"); + sprintf(buff, "flags\t\t: fpu cx8 sep cmov clflush mmx sse sse2 syscall tsc lahf_lm ssse3 ht tm lm fma fxsr cpuid cx16 aes movbe pni\n"); P; sprintf(buff, "address sizes\t: 46 bits physical, 48 bits virtual\n"); P; |