summary refs log tree commit diff stats
path: root/crypto/clmul.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-09-18 11:04:21 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2023-09-18 11:04:21 -0400
commit13d6b1608160de40ec65ae4c32419e56714bbadf (patch)
tree1197b4557f544c172508fee58e497540ed0ecd58 /crypto/clmul.c
parent005ad32358f12fe9313a4a01918a55e60d4f39e5 (diff)
parent055c99015a4ec3c608d0260592368adc604429ea (diff)
downloadfocaccia-qemu-13d6b1608160de40ec65ae4c32419e56714bbadf.tar.gz
focaccia-qemu-13d6b1608160de40ec65ae4c32419e56714bbadf.zip
Merge tag 'pull-crypto-20230915' of https://gitlab.com/rth7680/qemu into staging
Unify implementation of carry-less multiply.
Accelerate carry-less multiply for 64x64->128.

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmUEiPodHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/akgf/XkiIeErWJr1YXSbS
# YPQtCsDAfIrqn3RiyQ2uwSn2eeuwVqTFFPGER04YegRDK8dyO874JBfvOwmBT70J
# I/aU8Z4BbRyNu9nfaCtFMlXQH9KArAKcAds1PnshfcnI5T2yBloZ1sAU97IuJFZk
# Uuz96H60+ohc4wzaUiPqPhXQStgZeSYwwAJB0s25DhCckdea0udRCAJ1tQTVpxkM
# wIFef1SHPoM6DtMzFKHLLUH6VivSlHjqx8GqFusa7pVqfQyDzNBfwvDl1F/bkE07
# yTocQEkV3QnZvIplhqUxAaZXIFZr9BNk7bDimMjHW6z3pNPN3T8zRn4trNjxbgPV
# jqzAtg==
# =8nnk
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 15 Sep 2023 12:40:26 EDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-crypto-20230915' of https://gitlab.com/rth7680/qemu:
  host/include/aarch64: Implement clmul.h
  host/include/i386: Implement clmul.h
  target/ppc: Use clmul_64
  target/s390x: Use clmul_64
  target/i386: Use clmul_64
  target/arm: Use clmul_64
  crypto: Add generic 64-bit carry-less multiply routine
  target/ppc: Use clmul_32* routines
  target/s390x: Use clmul_32* routines
  target/arm: Use clmul_32* routines
  crypto: Add generic 32-bit carry-less multiply routines
  target/ppc: Use clmul_16* routines
  target/s390x: Use clmul_16* routines
  target/arm: Use clmul_16* routines
  crypto: Add generic 16-bit carry-less multiply routines
  target/ppc: Use clmul_8* routines
  target/s390x: Use clmul_8* routines
  target/arm: Use clmul_8* routines
  crypto: Add generic 8-bit carry-less multiply routines

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'crypto/clmul.c')
-rw-r--r--crypto/clmul.c111
1 files changed, 111 insertions, 0 deletions
diff --git a/crypto/clmul.c b/crypto/clmul.c
new file mode 100644
index 0000000000..9e3e61a77d
--- /dev/null
+++ b/crypto/clmul.c
@@ -0,0 +1,111 @@
+/*
+ * Carry-less multiply operations.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#include "qemu/osdep.h"
+#include "crypto/clmul.h"
+
+uint64_t clmul_8x8_low(uint64_t n, uint64_t m)
+{
+    uint64_t r = 0;
+
+    for (int i = 0; i < 8; ++i) {
+        uint64_t mask = (n & 0x0101010101010101ull) * 0xff;
+        r ^= m & mask;
+        m = (m << 1) & 0xfefefefefefefefeull;
+        n >>= 1;
+    }
+    return r;
+}
+
+static uint64_t clmul_8x4_even_int(uint64_t n, uint64_t m)
+{
+    uint64_t r = 0;
+
+    for (int i = 0; i < 8; ++i) {
+        uint64_t mask = (n & 0x0001000100010001ull) * 0xffff;
+        r ^= m & mask;
+        n >>= 1;
+        m <<= 1;
+    }
+    return r;
+}
+
+uint64_t clmul_8x4_even(uint64_t n, uint64_t m)
+{
+    n &= 0x00ff00ff00ff00ffull;
+    m &= 0x00ff00ff00ff00ffull;
+    return clmul_8x4_even_int(n, m);
+}
+
+uint64_t clmul_8x4_odd(uint64_t n, uint64_t m)
+{
+    return clmul_8x4_even(n >> 8, m >> 8);
+}
+
+static uint64_t unpack_8_to_16(uint64_t x)
+{
+    return  (x & 0x000000ff)
+         | ((x & 0x0000ff00) << 8)
+         | ((x & 0x00ff0000) << 16)
+         | ((x & 0xff000000) << 24);
+}
+
+uint64_t clmul_8x4_packed(uint32_t n, uint32_t m)
+{
+    return clmul_8x4_even_int(unpack_8_to_16(n), unpack_8_to_16(m));
+}
+
+uint64_t clmul_16x2_even(uint64_t n, uint64_t m)
+{
+    uint64_t r = 0;
+
+    n &= 0x0000ffff0000ffffull;
+    m &= 0x0000ffff0000ffffull;
+
+    for (int i = 0; i < 16; ++i) {
+        uint64_t mask = (n & 0x0000000100000001ull) * 0xffffffffull;
+        r ^= m & mask;
+        n >>= 1;
+        m <<= 1;
+    }
+    return r;
+}
+
+uint64_t clmul_16x2_odd(uint64_t n, uint64_t m)
+{
+    return clmul_16x2_even(n >> 16, m >> 16);
+}
+
+uint64_t clmul_32(uint32_t n, uint32_t m32)
+{
+    uint64_t r = 0;
+    uint64_t m = m32;
+
+    for (int i = 0; i < 32; ++i) {
+        r ^= n & 1 ? m : 0;
+        n >>= 1;
+        m <<= 1;
+    }
+    return r;
+}
+
+Int128 clmul_64_gen(uint64_t n, uint64_t m)
+{
+    uint64_t rl = 0, rh = 0;
+
+    /* Bit 0 can only influence the low 64-bit result.  */
+    if (n & 1) {
+        rl = m;
+    }
+
+    for (int i = 1; i < 64; ++i) {
+        uint64_t mask = -((n >> i) & 1);
+        rl ^= (m << i) & mask;
+        rh ^= (m >> (64 - i)) & mask;
+    }
+    return int128_make128(rl, rh);
+}