diff options
| author | Stefan Hajnoczi <stefanha@redhat.com> | 2023-09-18 11:04:21 -0400 |
|---|---|---|
| committer | Stefan Hajnoczi <stefanha@redhat.com> | 2023-09-18 11:04:21 -0400 |
| commit | 13d6b1608160de40ec65ae4c32419e56714bbadf (patch) | |
| tree | 1197b4557f544c172508fee58e497540ed0ecd58 /include/crypto/clmul.h | |
| parent | 005ad32358f12fe9313a4a01918a55e60d4f39e5 (diff) | |
| parent | 055c99015a4ec3c608d0260592368adc604429ea (diff) | |
| download | focaccia-qemu-13d6b1608160de40ec65ae4c32419e56714bbadf.tar.gz focaccia-qemu-13d6b1608160de40ec65ae4c32419e56714bbadf.zip | |
Merge tag 'pull-crypto-20230915' of https://gitlab.com/rth7680/qemu into staging
Unify implementation of carry-less multiply. Accelerate carry-less multiply for 64x64->128. # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmUEiPodHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/akgf/XkiIeErWJr1YXSbS # YPQtCsDAfIrqn3RiyQ2uwSn2eeuwVqTFFPGER04YegRDK8dyO874JBfvOwmBT70J # I/aU8Z4BbRyNu9nfaCtFMlXQH9KArAKcAds1PnshfcnI5T2yBloZ1sAU97IuJFZk # Uuz96H60+ohc4wzaUiPqPhXQStgZeSYwwAJB0s25DhCckdea0udRCAJ1tQTVpxkM # wIFef1SHPoM6DtMzFKHLLUH6VivSlHjqx8GqFusa7pVqfQyDzNBfwvDl1F/bkE07 # yTocQEkV3QnZvIplhqUxAaZXIFZr9BNk7bDimMjHW6z3pNPN3T8zRn4trNjxbgPV # jqzAtg== # =8nnk # -----END PGP SIGNATURE----- # gpg: Signature made Fri 15 Sep 2023 12:40:26 EDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-crypto-20230915' of https://gitlab.com/rth7680/qemu: host/include/aarch64: Implement clmul.h host/include/i386: Implement clmul.h target/ppc: Use clmul_64 target/s390x: Use clmul_64 target/i386: Use clmul_64 target/arm: Use clmul_64 crypto: Add generic 64-bit carry-less multiply routine target/ppc: Use clmul_32* routines target/s390x: Use clmul_32* routines target/arm: Use clmul_32* routines crypto: Add generic 32-bit carry-less multiply routines target/ppc: Use clmul_16* routines target/s390x: Use clmul_16* routines target/arm: Use clmul_16* routines crypto: Add generic 16-bit carry-less multiply routines target/ppc: Use clmul_8* routines target/s390x: Use clmul_8* routines target/arm: Use clmul_8* routines crypto: Add generic 8-bit carry-less multiply routines Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'include/crypto/clmul.h')
| -rw-r--r-- | include/crypto/clmul.h | 83 |
1 files changed, 83 insertions, 0 deletions
diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h new file mode 100644 index 0000000000..446931fe05 --- /dev/null +++ b/include/crypto/clmul.h @@ -0,0 +1,83 @@ +/* + * Carry-less multiply operations. + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Copyright (C) 2023 Linaro, Ltd. + */ + +#ifndef CRYPTO_CLMUL_H +#define CRYPTO_CLMUL_H + +#include "qemu/int128.h" +#include "host/crypto/clmul.h" + +/** + * clmul_8x8_low: + * + * Perform eight 8x8->8 carry-less multiplies. + */ +uint64_t clmul_8x8_low(uint64_t, uint64_t); + +/** + * clmul_8x4_even: + * + * Perform four 8x8->16 carry-less multiplies. + * The odd bytes of the inputs are ignored. + */ +uint64_t clmul_8x4_even(uint64_t, uint64_t); + +/** + * clmul_8x4_odd: + * + * Perform four 8x8->16 carry-less multiplies. + * The even bytes of the inputs are ignored. + */ +uint64_t clmul_8x4_odd(uint64_t, uint64_t); + +/** + * clmul_8x4_packed: + * + * Perform four 8x8->16 carry-less multiplies. + */ +uint64_t clmul_8x4_packed(uint32_t, uint32_t); + +/** + * clmul_16x2_even: + * + * Perform two 16x16->32 carry-less multiplies. + * The odd words of the inputs are ignored. + */ +uint64_t clmul_16x2_even(uint64_t, uint64_t); + +/** + * clmul_16x2_odd: + * + * Perform two 16x16->32 carry-less multiplies. + * The even words of the inputs are ignored. + */ +uint64_t clmul_16x2_odd(uint64_t, uint64_t); + +/** + * clmul_32: + * + * Perform a 32x32->64 carry-less multiply. + */ +uint64_t clmul_32(uint32_t, uint32_t); + +/** + * clmul_64: + * + * Perform a 64x64->128 carry-less multiply. + */ +Int128 clmul_64_gen(uint64_t, uint64_t); + +static inline Int128 clmul_64(uint64_t a, uint64_t b) +{ + if (HAVE_CLMUL_ACCEL) { + return clmul_64_accel(a, b); + } else { + return clmul_64_gen(a, b); + } +} + +#endif /* CRYPTO_CLMUL_H */ |