diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/crypto/aes-round.h | 164 | ||||
| -rw-r--r-- | include/crypto/aes.h | 30 |
2 files changed, 164 insertions, 30 deletions
diff --git a/include/crypto/aes-round.h b/include/crypto/aes-round.h new file mode 100644 index 0000000000..854fb0966a --- /dev/null +++ b/include/crypto/aes-round.h @@ -0,0 +1,164 @@ +/* + * AES round fragments, generic version + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Copyright (C) 2023 Linaro, Ltd. + */ + +#ifndef CRYPTO_AES_ROUND_H +#define CRYPTO_AES_ROUND_H + +/* Hosts with acceleration will usually need a 16-byte vector type. */ +typedef uint8_t AESStateVec __attribute__((vector_size(16))); + +typedef union { + uint8_t b[16]; + uint32_t w[4]; + uint64_t d[2]; + AESStateVec v; +} AESState; + +#include "host/crypto/aes-round.h" + +/* + * Perform MixColumns. + */ + +void aesenc_MC_gen(AESState *ret, const AESState *st); +void aesenc_MC_genrev(AESState *ret, const AESState *st); + +static inline void aesenc_MC(AESState *r, const AESState *st, bool be) +{ + if (HAVE_AES_ACCEL) { + aesenc_MC_accel(r, st, be); + } else if (HOST_BIG_ENDIAN == be) { + aesenc_MC_gen(r, st); + } else { + aesenc_MC_genrev(r, st); + } +} + +/* + * Perform SubBytes + ShiftRows + AddRoundKey. + */ + +void aesenc_SB_SR_AK_gen(AESState *ret, const AESState *st, + const AESState *rk); +void aesenc_SB_SR_AK_genrev(AESState *ret, const AESState *st, + const AESState *rk); + +static inline void aesenc_SB_SR_AK(AESState *r, const AESState *st, + const AESState *rk, bool be) +{ + if (HAVE_AES_ACCEL) { + aesenc_SB_SR_AK_accel(r, st, rk, be); + } else if (HOST_BIG_ENDIAN == be) { + aesenc_SB_SR_AK_gen(r, st, rk); + } else { + aesenc_SB_SR_AK_genrev(r, st, rk); + } +} + +/* + * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey. + */ + +void aesenc_SB_SR_MC_AK_gen(AESState *ret, const AESState *st, + const AESState *rk); +void aesenc_SB_SR_MC_AK_genrev(AESState *ret, const AESState *st, + const AESState *rk); + +static inline void aesenc_SB_SR_MC_AK(AESState *r, const AESState *st, + const AESState *rk, bool be) +{ + if (HAVE_AES_ACCEL) { + aesenc_SB_SR_MC_AK_accel(r, st, rk, be); + } else if (HOST_BIG_ENDIAN == be) { + aesenc_SB_SR_MC_AK_gen(r, st, rk); + } else { + aesenc_SB_SR_MC_AK_genrev(r, st, rk); + } +} + +/* + * Perform InvMixColumns. + */ + +void aesdec_IMC_gen(AESState *ret, const AESState *st); +void aesdec_IMC_genrev(AESState *ret, const AESState *st); + +static inline void aesdec_IMC(AESState *r, const AESState *st, bool be) +{ + if (HAVE_AES_ACCEL) { + aesdec_IMC_accel(r, st, be); + } else if (HOST_BIG_ENDIAN == be) { + aesdec_IMC_gen(r, st); + } else { + aesdec_IMC_genrev(r, st); + } +} + +/* + * Perform InvSubBytes + InvShiftRows + AddRoundKey. + */ + +void aesdec_ISB_ISR_AK_gen(AESState *ret, const AESState *st, + const AESState *rk); +void aesdec_ISB_ISR_AK_genrev(AESState *ret, const AESState *st, + const AESState *rk); + +static inline void aesdec_ISB_ISR_AK(AESState *r, const AESState *st, + const AESState *rk, bool be) +{ + if (HAVE_AES_ACCEL) { + aesdec_ISB_ISR_AK_accel(r, st, rk, be); + } else if (HOST_BIG_ENDIAN == be) { + aesdec_ISB_ISR_AK_gen(r, st, rk); + } else { + aesdec_ISB_ISR_AK_genrev(r, st, rk); + } +} + +/* + * Perform InvSubBytes + InvShiftRows + AddRoundKey + InvMixColumns. + */ + +void aesdec_ISB_ISR_AK_IMC_gen(AESState *ret, const AESState *st, + const AESState *rk); +void aesdec_ISB_ISR_AK_IMC_genrev(AESState *ret, const AESState *st, + const AESState *rk); + +static inline void aesdec_ISB_ISR_AK_IMC(AESState *r, const AESState *st, + const AESState *rk, bool be) +{ + if (HAVE_AES_ACCEL) { + aesdec_ISB_ISR_AK_IMC_accel(r, st, rk, be); + } else if (HOST_BIG_ENDIAN == be) { + aesdec_ISB_ISR_AK_IMC_gen(r, st, rk); + } else { + aesdec_ISB_ISR_AK_IMC_genrev(r, st, rk); + } +} + +/* + * Perform InvSubBytes + InvShiftRows + InvMixColumns + AddRoundKey. + */ + +void aesdec_ISB_ISR_IMC_AK_gen(AESState *ret, const AESState *st, + const AESState *rk); +void aesdec_ISB_ISR_IMC_AK_genrev(AESState *ret, const AESState *st, + const AESState *rk); + +static inline void aesdec_ISB_ISR_IMC_AK(AESState *r, const AESState *st, + const AESState *rk, bool be) +{ + if (HAVE_AES_ACCEL) { + aesdec_ISB_ISR_IMC_AK_accel(r, st, rk, be); + } else if (HOST_BIG_ENDIAN == be) { + aesdec_ISB_ISR_IMC_AK_gen(r, st, rk); + } else { + aesdec_ISB_ISR_IMC_AK_genrev(r, st, rk); + } +} + +#endif /* CRYPTO_AES_ROUND_H */ diff --git a/include/crypto/aes.h b/include/crypto/aes.h index 822d64588c..709d4d226b 100644 --- a/include/crypto/aes.h +++ b/include/crypto/aes.h @@ -30,34 +30,4 @@ void AES_decrypt(const unsigned char *in, unsigned char *out, extern const uint8_t AES_sbox[256]; extern const uint8_t AES_isbox[256]; -/* AES ShiftRows and InvShiftRows */ -extern const uint8_t AES_shifts[16]; -extern const uint8_t AES_ishifts[16]; - -/* AES InvMixColumns */ -/* AES_imc[x][0] = [x].[0e, 09, 0d, 0b]; */ -/* AES_imc[x][1] = [x].[0b, 0e, 09, 0d]; */ -/* AES_imc[x][2] = [x].[0d, 0b, 0e, 09]; */ -/* AES_imc[x][3] = [x].[09, 0d, 0b, 0e]; */ -extern const uint32_t AES_imc[256][4]; - -/* -AES_Te0[x] = S [x].[02, 01, 01, 03]; -AES_Te1[x] = S [x].[03, 02, 01, 01]; -AES_Te2[x] = S [x].[01, 03, 02, 01]; -AES_Te3[x] = S [x].[01, 01, 03, 02]; -AES_Te4[x] = S [x].[01, 01, 01, 01]; - -AES_Td0[x] = Si[x].[0e, 09, 0d, 0b]; -AES_Td1[x] = Si[x].[0b, 0e, 09, 0d]; -AES_Td2[x] = Si[x].[0d, 0b, 0e, 09]; -AES_Td3[x] = Si[x].[09, 0d, 0b, 0e]; -AES_Td4[x] = Si[x].[01, 01, 01, 01]; -*/ - -extern const uint32_t AES_Te0[256], AES_Te1[256], AES_Te2[256], - AES_Te3[256], AES_Te4[256]; -extern const uint32_t AES_Td0[256], AES_Td1[256], AES_Td2[256], - AES_Td3[256], AES_Td4[256]; - #endif |