summary refs log tree commit diff stats
path: root/target-arm/helper-a64.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-06-09 17:04:13 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-06-09 17:04:13 +0100
commit7721a3044234c46cd6f5f899e7467dc9351f3c8d (patch)
tree82162c617563aee0f9d9ca1d9bf2073733975538 /target-arm/helper-a64.c
parent14ac57339288c07b47e7e91fa192735158aa6a1e (diff)
parent3b1a41381254f6080b5cfeb149c28a9237d42a0b (diff)
downloadfocaccia-qemu-7721a3044234c46cd6f5f899e7467dc9351f3c8d.tar.gz
focaccia-qemu-7721a3044234c46cd6f5f899e7467dc9351f3c8d.zip
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140609-1' into staging
----------------------------------------------------------------
target-arm queue:
 * support -bios option in vexpress boards
 * register the Cortex-A57 impdef system registers
 * fix handling of UXN bit in ARMv8 page tables
 * complete support of crypto insns in A32/T32
 * implement CRC and crypto insns in A64
 * fix bugs in generic timer control register

----------------------------------------------------------------

# gpg: Signature made Mon 09 Jun 2014 16:08:26 BST using RSA key ID 14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"

* remotes/pmaydell/tags/pull-target-arm-20140609-1:
  target-arm: Delete unused iwmmxt_msadb helper
  target-arm: Fix errors in writes to generic timer control registers
  target-arm: A64: Implement two-register SHA instructions
  target-arm: A64: Implement 3-register SHA instructions
  target-arm: A64: Implement AES instructions
  target-arm: A32/T32: Mask CRC value in calling code, not helper
  target-arm: A64: Implement CRC instructions
  target-arm: VFPv4 implies half-precision extension
  target-arm: Clean up handling of ARMv8 optional feature bits
  target-arm: Remove unnecessary setting of feature bits
  target-arm: arm_any_initfn() should never set ARM_FEATURE_AARCH64
  target-arm: A64: Use PMULL feature bit for PMULL
  target-arm: add support for v8 VMULL.P64 instruction
  target-arm: Allow 3reg_wide undefreq to encode more bad size options
  target-arm: add support for v8 SHA1 and SHA256 instructions
  target-arm: Correct handling of UXN bit in ARMv8 LPAE page tables
  target-arm: Prepare cpreg writefns/readfns for EL3/SecExt
  target-arm/cpu64.c: Actually register Cortex-A57 impdef registers
  vexpress: Add support for the -bios flag to provide firmware

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-arm/helper-a64.c')
-rw-r--r--target-arm/helper-a64.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index cccda74113..2b4ce6ac60 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -24,6 +24,8 @@
 #include "sysemu/sysemu.h"
 #include "qemu/bitops.h"
 #include "internals.h"
+#include "qemu/crc32c.h"
+#include <zlib.h> /* For crc32 */
 
 /* C2.4.7 Multiply and divide */
 /* special cases for 0 and LLONG_MIN are mandated by the standard */
@@ -186,36 +188,6 @@ uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
     return result;
 }
 
-/* Helper function for 64 bit polynomial multiply case:
- * perform PolynomialMult(op1, op2) and return either the top or
- * bottom half of the 128 bit result.
- */
-uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2)
-{
-    int bitnum;
-    uint64_t res = 0;
-
-    for (bitnum = 0; bitnum < 64; bitnum++) {
-        if (op1 & (1ULL << bitnum)) {
-            res ^= op2 << bitnum;
-        }
-    }
-    return res;
-}
-uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2)
-{
-    int bitnum;
-    uint64_t res = 0;
-
-    /* bit 0 of op1 can't influence the high 64 bits at all */
-    for (bitnum = 1; bitnum < 64; bitnum++) {
-        if (op1 & (1ULL << bitnum)) {
-            res ^= op2 >> (64 - bitnum);
-        }
-    }
-    return res;
-}
-
 /* 64bit/double versions of the neon float compare functions */
 uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
 {
@@ -438,6 +410,34 @@ float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
     return r;
 }
 
+/* 64-bit versions of the CRC helpers. Note that although the operation
+ * (and the prototypes of crc32c() and crc32() mean that only the bottom
+ * 32 bits of the accumulator and result are used, we pass and return
+ * uint64_t for convenience of the generated code. Unlike the 32-bit
+ * instruction set versions, val may genuinely have 64 bits of data in it.
+ * The upper bytes of val (above the number specified by 'bytes') must have
+ * been zeroed out by the caller.
+ */
+uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes)
+{
+    uint8_t buf[8];
+
+    stq_le_p(buf, val);
+
+    /* zlib crc32 converts the accumulator and output to one's complement.  */
+    return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
+}
+
+uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
+{
+    uint8_t buf[8];
+
+    stq_le_p(buf, val);
+
+    /* Linux crc32c converts the output to one's complement.  */
+    return crc32c(acc, buf, bytes) ^ 0xffffffff;
+}
+
 /* Handle a CPU exception.  */
 void aarch64_cpu_do_interrupt(CPUState *cs)
 {