diff options
Diffstat (limited to 'host/include')
| -rw-r--r-- | host/include/aarch64/host/cpuinfo.h | 1 | ||||
| -rw-r--r-- | host/include/aarch64/host/crypto/aes-round.h | 205 | ||||
| -rw-r--r-- | host/include/generic/host/crypto/aes-round.h | 33 | ||||
| -rw-r--r-- | host/include/i386/host/cpuinfo.h | 1 | ||||
| -rw-r--r-- | host/include/i386/host/crypto/aes-round.h | 152 | ||||
| -rw-r--r-- | host/include/ppc/host/cpuinfo.h | 30 | ||||
| -rw-r--r-- | host/include/ppc/host/crypto/aes-round.h | 182 | ||||
| -rw-r--r-- | host/include/ppc64/host/cpuinfo.h | 1 | ||||
| -rw-r--r-- | host/include/ppc64/host/crypto/aes-round.h | 1 | ||||
| -rw-r--r-- | host/include/x86_64/host/crypto/aes-round.h | 1 |
10 files changed, 607 insertions, 0 deletions
diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h index 82227890b4..05feeb4f43 100644 --- a/host/include/aarch64/host/cpuinfo.h +++ b/host/include/aarch64/host/cpuinfo.h @@ -9,6 +9,7 @@ #define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */ #define CPUINFO_LSE (1u << 1) #define CPUINFO_LSE2 (1u << 2) +#define CPUINFO_AES (1u << 3) /* Initialized with a constructor. */ extern unsigned cpuinfo; diff --git a/host/include/aarch64/host/crypto/aes-round.h b/host/include/aarch64/host/crypto/aes-round.h new file mode 100644 index 0000000000..8b5f88d50c --- /dev/null +++ b/host/include/aarch64/host/crypto/aes-round.h @@ -0,0 +1,205 @@ +/* + * AArch64 specific aes acceleration. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef AARCH64_HOST_CRYPTO_AES_ROUND_H +#define AARCH64_HOST_CRYPTO_AES_ROUND_H + +#include "host/cpuinfo.h" +#include <arm_neon.h> + +#ifdef __ARM_FEATURE_AES +# define HAVE_AES_ACCEL true +#else +# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES) +#endif +#if !defined(__ARM_FEATURE_AES) && defined(CONFIG_ARM_AES_BUILTIN) +# define ATTR_AES_ACCEL __attribute__((target("+crypto"))) +#else +# define ATTR_AES_ACCEL +#endif + +static inline uint8x16_t aes_accel_bswap(uint8x16_t x) +{ + return vqtbl1q_u8(x, (uint8x16_t){ 15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0, }); +} + +#ifdef CONFIG_ARM_AES_BUILTIN +# define aes_accel_aesd vaesdq_u8 +# define aes_accel_aese vaeseq_u8 +# define aes_accel_aesmc vaesmcq_u8 +# define aes_accel_aesimc vaesimcq_u8 +# define aes_accel_aesd_imc(S, K) vaesimcq_u8(vaesdq_u8(S, K)) +# define aes_accel_aese_mc(S, K) vaesmcq_u8(vaeseq_u8(S, K)) +#else +static inline uint8x16_t aes_accel_aesd(uint8x16_t d, uint8x16_t k) +{ + asm(".arch_extension aes\n\t" + "aesd %0.16b, %1.16b" : "+w"(d) : "w"(k)); + return d; +} + +static inline uint8x16_t aes_accel_aese(uint8x16_t d, uint8x16_t k) +{ + asm(".arch_extension aes\n\t" + "aese %0.16b, %1.16b" : "+w"(d) : "w"(k)); + return d; +} + +static inline uint8x16_t aes_accel_aesmc(uint8x16_t d) +{ + asm(".arch_extension aes\n\t" + "aesmc %0.16b, %1.16b" : "=w"(d) : "w"(d)); + return d; +} + +static inline uint8x16_t aes_accel_aesimc(uint8x16_t d) +{ + asm(".arch_extension aes\n\t" + "aesimc %0.16b, %1.16b" : "=w"(d) : "w"(d)); + return d; +} + +/* Most CPUs fuse AESD+AESIMC in the execution pipeline. */ +static inline uint8x16_t aes_accel_aesd_imc(uint8x16_t d, uint8x16_t k) +{ + asm(".arch_extension aes\n\t" + "aesd %0.16b, %1.16b\n\t" + "aesimc %0.16b, %0.16b" : "+w"(d) : "w"(k)); + return d; +} + +/* Most CPUs fuse AESE+AESMC in the execution pipeline. */ +static inline uint8x16_t aes_accel_aese_mc(uint8x16_t d, uint8x16_t k) +{ + asm(".arch_extension aes\n\t" + "aese %0.16b, %1.16b\n\t" + "aesmc %0.16b, %0.16b" : "+w"(d) : "w"(k)); + return d; +} +#endif /* CONFIG_ARM_AES_BUILTIN */ + +static inline void ATTR_AES_ACCEL +aesenc_MC_accel(AESState *ret, const AESState *st, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aesmc(t); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesmc(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aese(t, z); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aese(t, z); + } + ret->v = (AESStateVec)t ^ rk->v; +} + +static inline void ATTR_AES_ACCEL +aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aese_mc(t, z); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aese_mc(t, z); + } + ret->v = (AESStateVec)t ^ rk->v; +} + +static inline void ATTR_AES_ACCEL +aesdec_IMC_accel(AESState *ret, const AESState *st, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aesimc(t); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesimc(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aesd(t, z); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesd(t, z); + } + ret->v = (AESStateVec)t ^ rk->v; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t k = (uint8x16_t)rk->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = aes_accel_aesd(t, z); + t ^= k; + t = aes_accel_aesimc(t); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesd(t, z); + t ^= k; + t = aes_accel_aesimc(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aesd_imc(t, z); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesd_imc(t, z); + } + ret->v = (AESStateVec)t ^ rk->v; +} + +#endif /* AARCH64_HOST_CRYPTO_AES_ROUND_H */ diff --git a/host/include/generic/host/crypto/aes-round.h b/host/include/generic/host/crypto/aes-round.h new file mode 100644 index 0000000000..1b9720f917 --- /dev/null +++ b/host/include/generic/host/crypto/aes-round.h @@ -0,0 +1,33 @@ +/* + * No host specific aes acceleration. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef GENERIC_HOST_CRYPTO_AES_ROUND_H +#define GENERIC_HOST_CRYPTO_AES_ROUND_H + +#define HAVE_AES_ACCEL false +#define ATTR_AES_ACCEL + +void aesenc_MC_accel(AESState *, const AESState *, bool) + QEMU_ERROR("unsupported accel"); +void aesenc_SB_SR_AK_accel(AESState *, const AESState *, + const AESState *, bool) + QEMU_ERROR("unsupported accel"); +void aesenc_SB_SR_MC_AK_accel(AESState *, const AESState *, + const AESState *, bool) + QEMU_ERROR("unsupported accel"); + +void aesdec_IMC_accel(AESState *, const AESState *, bool) + QEMU_ERROR("unsupported accel"); +void aesdec_ISB_ISR_AK_accel(AESState *, const AESState *, + const AESState *, bool) + QEMU_ERROR("unsupported accel"); +void aesdec_ISB_ISR_AK_IMC_accel(AESState *, const AESState *, + const AESState *, bool) + QEMU_ERROR("unsupported accel"); +void aesdec_ISB_ISR_IMC_AK_accel(AESState *, const AESState *, + const AESState *, bool) + QEMU_ERROR("unsupported accel"); + +#endif /* GENERIC_HOST_CRYPTO_AES_ROUND_H */ diff --git a/host/include/i386/host/cpuinfo.h b/host/include/i386/host/cpuinfo.h index a6537123cf..073d0a426f 100644 --- a/host/include/i386/host/cpuinfo.h +++ b/host/include/i386/host/cpuinfo.h @@ -26,6 +26,7 @@ #define CPUINFO_AVX512VBMI2 (1u << 15) #define CPUINFO_ATOMIC_VMOVDQA (1u << 16) #define CPUINFO_ATOMIC_VMOVDQU (1u << 17) +#define CPUINFO_AES (1u << 18) /* Initialized with a constructor. */ extern unsigned cpuinfo; diff --git a/host/include/i386/host/crypto/aes-round.h b/host/include/i386/host/crypto/aes-round.h new file mode 100644 index 0000000000..59a64130f7 --- /dev/null +++ b/host/include/i386/host/crypto/aes-round.h @@ -0,0 +1,152 @@ +/* + * x86 specific aes acceleration. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef X86_HOST_CRYPTO_AES_ROUND_H +#define X86_HOST_CRYPTO_AES_ROUND_H + +#include "host/cpuinfo.h" +#include <immintrin.h> + +#if defined(__AES__) && defined(__SSSE3__) +# define HAVE_AES_ACCEL true +# define ATTR_AES_ACCEL +#else +# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES) +# define ATTR_AES_ACCEL __attribute__((target("aes,ssse3"))) +#endif + +static inline __m128i ATTR_AES_ACCEL +aes_accel_bswap(__m128i x) +{ + return _mm_shuffle_epi8(x, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15)); +} + +static inline void ATTR_AES_ACCEL +aesenc_MC_accel(AESState *ret, const AESState *st, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i z = _mm_setzero_si128(); + + if (be) { + t = aes_accel_bswap(t); + t = _mm_aesdeclast_si128(t, z); + t = _mm_aesenc_si128(t, z); + t = aes_accel_bswap(t); + } else { + t = _mm_aesdeclast_si128(t, z); + t = _mm_aesenc_si128(t, z); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesenclast_si128(t, k); + t = aes_accel_bswap(t); + } else { + t = _mm_aesenclast_si128(t, k); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesenc_si128(t, k); + t = aes_accel_bswap(t); + } else { + t = _mm_aesenc_si128(t, k); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_IMC_accel(AESState *ret, const AESState *st, bool be) +{ + __m128i t = (__m128i)st->v; + + if (be) { + t = aes_accel_bswap(t); + t = _mm_aesimc_si128(t); + t = aes_accel_bswap(t); + } else { + t = _mm_aesimc_si128(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesdeclast_si128(t, k); + t = aes_accel_bswap(t); + } else { + t = _mm_aesdeclast_si128(t, k); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesdeclast_si128(t, k); + t = _mm_aesimc_si128(t); + t = aes_accel_bswap(t); + } else { + t = _mm_aesdeclast_si128(t, k); + t = _mm_aesimc_si128(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesdec_si128(t, k); + t = aes_accel_bswap(t); + } else { + t = _mm_aesdec_si128(t, k); + } + ret->v = (AESStateVec)t; +} + +#endif /* X86_HOST_CRYPTO_AES_ROUND_H */ diff --git a/host/include/ppc/host/cpuinfo.h b/host/include/ppc/host/cpuinfo.h new file mode 100644 index 0000000000..29ee7f9ef8 --- /dev/null +++ b/host/include/ppc/host/cpuinfo.h @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Host specific cpu indentification for ppc. + */ + +#ifndef HOST_CPUINFO_H +#define HOST_CPUINFO_H + +/* Digested version of <cpuid.h> */ + +#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */ +#define CPUINFO_V2_06 (1u << 1) +#define CPUINFO_V2_07 (1u << 2) +#define CPUINFO_V3_0 (1u << 3) +#define CPUINFO_V3_1 (1u << 4) +#define CPUINFO_ISEL (1u << 5) +#define CPUINFO_ALTIVEC (1u << 6) +#define CPUINFO_VSX (1u << 7) +#define CPUINFO_CRYPTO (1u << 8) + +/* Initialized with a constructor. */ +extern unsigned cpuinfo; + +/* + * We cannot rely on constructor ordering, so other constructors must + * use the function interface rather than the variable above. + */ +unsigned cpuinfo_init(void); + +#endif /* HOST_CPUINFO_H */ diff --git a/host/include/ppc/host/crypto/aes-round.h b/host/include/ppc/host/crypto/aes-round.h new file mode 100644 index 0000000000..8062d2a537 --- /dev/null +++ b/host/include/ppc/host/crypto/aes-round.h @@ -0,0 +1,182 @@ +/* + * Power v2.07 specific aes acceleration. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef PPC_HOST_CRYPTO_AES_ROUND_H +#define PPC_HOST_CRYPTO_AES_ROUND_H + +#ifdef __ALTIVEC__ +#include "host/cpuinfo.h" + +#ifdef __CRYPTO__ +# define HAVE_AES_ACCEL true +#else +# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_CRYPTO) +#endif +#define ATTR_AES_ACCEL + +/* + * While there is <altivec.h>, both gcc and clang "aid" with the + * endianness issues in different ways. Just use inline asm instead. + */ + +/* Bytes in memory are host-endian; bytes in register are @be. */ +static inline AESStateVec aes_accel_ld(const AESState *p, bool be) +{ + AESStateVec r; + + if (be) { + asm("lvx %0, 0, %1" : "=v"(r) : "r"(p), "m"(*p)); + } else if (HOST_BIG_ENDIAN) { + AESStateVec rev = { + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + }; + asm("lvx %0, 0, %1\n\t" + "vperm %0, %0, %0, %2" + : "=v"(r) : "r"(p), "v"(rev), "m"(*p)); + } else { +#ifdef __POWER9_VECTOR__ + asm("lxvb16x %x0, 0, %1" : "=v"(r) : "r"(p), "m"(*p)); +#else + asm("lxvd2x %x0, 0, %1\n\t" + "xxpermdi %x0, %x0, %x0, 2" + : "=v"(r) : "r"(p), "m"(*p)); +#endif + } + return r; +} + +static void aes_accel_st(AESState *p, AESStateVec r, bool be) +{ + if (be) { + asm("stvx %1, 0, %2" : "=m"(*p) : "v"(r), "r"(p)); + } else if (HOST_BIG_ENDIAN) { + AESStateVec rev = { + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + }; + asm("vperm %1, %1, %1, %2\n\t" + "stvx %1, 0, %3" + : "=m"(*p), "+v"(r) : "v"(rev), "r"(p)); + } else { +#ifdef __POWER9_VECTOR__ + asm("stxvb16x %x1, 0, %2" : "=m"(*p) : "v"(r), "r"(p)); +#else + asm("xxpermdi %x1, %x1, %x1, 2\n\t" + "stxvd2x %x1, 0, %2" + : "=m"(*p), "+v"(r) : "r"(p)); +#endif + } +} + +static inline AESStateVec aes_accel_vcipher(AESStateVec d, AESStateVec k) +{ + asm("vcipher %0, %0, %1" : "+v"(d) : "v"(k)); + return d; +} + +static inline AESStateVec aes_accel_vncipher(AESStateVec d, AESStateVec k) +{ + asm("vncipher %0, %0, %1" : "+v"(d) : "v"(k)); + return d; +} + +static inline AESStateVec aes_accel_vcipherlast(AESStateVec d, AESStateVec k) +{ + asm("vcipherlast %0, %0, %1" : "+v"(d) : "v"(k)); + return d; +} + +static inline AESStateVec aes_accel_vncipherlast(AESStateVec d, AESStateVec k) +{ + asm("vncipherlast %0, %0, %1" : "+v"(d) : "v"(k)); + return d; +} + +static inline void +aesenc_MC_accel(AESState *ret, const AESState *st, bool be) +{ + AESStateVec t, z = { }; + + t = aes_accel_ld(st, be); + t = aes_accel_vncipherlast(t, z); + t = aes_accel_vcipher(t, z); + aes_accel_st(ret, t, be); +} + +static inline void +aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + AESStateVec t, k; + + t = aes_accel_ld(st, be); + k = aes_accel_ld(rk, be); + t = aes_accel_vcipherlast(t, k); + aes_accel_st(ret, t, be); +} + +static inline void +aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + AESStateVec t, k; + + t = aes_accel_ld(st, be); + k = aes_accel_ld(rk, be); + t = aes_accel_vcipher(t, k); + aes_accel_st(ret, t, be); +} + +static inline void +aesdec_IMC_accel(AESState *ret, const AESState *st, bool be) +{ + AESStateVec t, z = { }; + + t = aes_accel_ld(st, be); + t = aes_accel_vcipherlast(t, z); + t = aes_accel_vncipher(t, z); + aes_accel_st(ret, t, be); +} + +static inline void +aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + AESStateVec t, k; + + t = aes_accel_ld(st, be); + k = aes_accel_ld(rk, be); + t = aes_accel_vncipherlast(t, k); + aes_accel_st(ret, t, be); +} + +static inline void +aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + AESStateVec t, k; + + t = aes_accel_ld(st, be); + k = aes_accel_ld(rk, be); + t = aes_accel_vncipher(t, k); + aes_accel_st(ret, t, be); +} + +static inline void +aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + AESStateVec t, k, z = { }; + + t = aes_accel_ld(st, be); + k = aes_accel_ld(rk, be); + t = aes_accel_vncipher(t, z); + aes_accel_st(ret, t ^ k, be); +} +#else +/* Without ALTIVEC, we can't even write inline assembly. */ +#include "host/include/generic/host/crypto/aes-round.h" +#endif + +#endif /* PPC_HOST_CRYPTO_AES_ROUND_H */ diff --git a/host/include/ppc64/host/cpuinfo.h b/host/include/ppc64/host/cpuinfo.h new file mode 100644 index 0000000000..2f036a0627 --- /dev/null +++ b/host/include/ppc64/host/cpuinfo.h @@ -0,0 +1 @@ +#include "host/include/ppc/host/cpuinfo.h" diff --git a/host/include/ppc64/host/crypto/aes-round.h b/host/include/ppc64/host/crypto/aes-round.h new file mode 100644 index 0000000000..5eeba6dcb7 --- /dev/null +++ b/host/include/ppc64/host/crypto/aes-round.h @@ -0,0 +1 @@ +#include "host/include/ppc/host/crypto/aes-round.h" diff --git a/host/include/x86_64/host/crypto/aes-round.h b/host/include/x86_64/host/crypto/aes-round.h new file mode 100644 index 0000000000..2773cc9f10 --- /dev/null +++ b/host/include/x86_64/host/crypto/aes-round.h @@ -0,0 +1 @@ +#include "host/include/i386/host/crypto/aes-round.h" |