diff options
Diffstat (limited to 'target/arm')
| -rw-r--r-- | target/arm/arm-qmp-cmds.c | 2 | ||||
| -rw-r--r-- | target/arm/cpu.c | 2 | ||||
| -rw-r--r-- | target/arm/cpu.h | 4 | ||||
| -rw-r--r-- | target/arm/cpu64.c | 38 | ||||
| -rw-r--r-- | target/arm/helper.c | 285 | ||||
| -rw-r--r-- | target/arm/kvm.c | 2 | ||||
| -rw-r--r-- | target/arm/tcg/arith_helper.c | 296 | ||||
| -rw-r--r-- | target/arm/tcg/meson.build | 1 | ||||
| -rw-r--r-- | target/arm/tcg/op_addsub.c.inc (renamed from target/arm/op_addsub.h) | 0 |
9 files changed, 333 insertions, 297 deletions
diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c index 3cc8cc738b..33cea080d1 100644 --- a/target/arm/arm-qmp-cmds.c +++ b/target/arm/arm-qmp-cmds.c @@ -94,7 +94,7 @@ static const char *cpu_model_advertised_features[] = { "sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280", "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048", "kvm-no-adjvtime", "kvm-steal-time", - "pauth", "pauth-impdef", "pauth-qarma3", + "pauth", "pauth-impdef", "pauth-qarma3", "pauth-qarma5", NULL }; diff --git a/target/arm/cpu.c b/target/arm/cpu.c index dcedadc89e..dc0231233a 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -2653,6 +2653,8 @@ static const Property arm_cpu_properties[] = { DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1), /* True to default to the backward-compat old CNTFRQ rather than 1Ghz */ DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, backcompat_cntfrq, false), + DEFINE_PROP_BOOL("backcompat-pauth-default-use-qarma5", ARMCPU, + backcompat_pauth_default_use_qarma5, false), }; static const gchar *arm_gdb_arch_name(CPUState *cs) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 12b8466542..9a6e8e589c 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -972,6 +972,9 @@ struct ArchCPU { /* QOM property to indicate we should use the back-compat CNTFRQ default */ bool backcompat_cntfrq; + /* QOM property to indicate we should use the back-compat QARMA5 default */ + bool backcompat_pauth_default_use_qarma5; + /* Specify the number of cores in this CPU cluster. Used for the L2CTLR * register. */ @@ -1062,6 +1065,7 @@ struct ArchCPU { bool prop_pauth; bool prop_pauth_impdef; bool prop_pauth_qarma3; + bool prop_pauth_qarma5; bool prop_lpa2; /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index dca83e4518..8188ede5cc 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -520,25 +520,40 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) } if (cpu->prop_pauth) { - if (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) { + if ((cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) || + (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma5) || + (cpu->prop_pauth_qarma3 && cpu->prop_pauth_qarma5)) { error_setg(errp, - "cannot enable both pauth-impdef and pauth-qarma3"); + "cannot enable pauth-impdef, pauth-qarma3 and " + "pauth-qarma5 at the same time"); return; } - if (cpu->prop_pauth_impdef) { - isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features); - isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1); + bool use_default = !cpu->prop_pauth_qarma5 && + !cpu->prop_pauth_qarma3 && + !cpu->prop_pauth_impdef; + + if (cpu->prop_pauth_qarma5 || + (use_default && + cpu->backcompat_pauth_default_use_qarma5)) { + isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features); + isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1); } else if (cpu->prop_pauth_qarma3) { isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, features); isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 1); + } else if (cpu->prop_pauth_impdef || + (use_default && + !cpu->backcompat_pauth_default_use_qarma5)) { + isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features); + isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1); } else { - isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features); - isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1); + g_assert_not_reached(); } - } else if (cpu->prop_pauth_impdef || cpu->prop_pauth_qarma3) { - error_setg(errp, "cannot enable pauth-impdef or " - "pauth-qarma3 without pauth"); + } else if (cpu->prop_pauth_impdef || + cpu->prop_pauth_qarma3 || + cpu->prop_pauth_qarma5) { + error_setg(errp, "cannot enable pauth-impdef, pauth-qarma3 or " + "pauth-qarma5 without pauth"); error_append_hint(errp, "Add pauth=on to the CPU property list.\n"); } } @@ -553,6 +568,8 @@ static const Property arm_cpu_pauth_impdef_property = DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false); static const Property arm_cpu_pauth_qarma3_property = DEFINE_PROP_BOOL("pauth-qarma3", ARMCPU, prop_pauth_qarma3, false); +static Property arm_cpu_pauth_qarma5_property = + DEFINE_PROP_BOOL("pauth-qarma5", ARMCPU, prop_pauth_qarma5, false); void aarch64_add_pauth_properties(Object *obj) { @@ -573,6 +590,7 @@ void aarch64_add_pauth_properties(Object *obj) } else { qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property); qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma3_property); + qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma5_property); } } diff --git a/target/arm/helper.c b/target/arm/helper.c index 5b595f951b..6399767851 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -17,11 +17,9 @@ #include "qemu/main-loop.h" #include "qemu/timer.h" #include "qemu/bitops.h" -#include "qemu/crc32c.h" #include "qemu/qemu-print.h" #include "exec/exec-all.h" #include "exec/translation-block.h" -#include <zlib.h> /* for crc32 */ #include "hw/irq.h" #include "system/cpu-timers.h" #include "system/kvm.h" @@ -10984,289 +10982,6 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, }; } -/* - * Note that signed overflow is undefined in C. The following routines are - * careful to use unsigned types where modulo arithmetic is required. - * Failure to do so _will_ break on newer gcc. - */ - -/* Signed saturating arithmetic. */ - -/* Perform 16-bit signed saturating addition. */ -static inline uint16_t add16_sat(uint16_t a, uint16_t b) -{ - uint16_t res; - - res = a + b; - if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { - if (a & 0x8000) { - res = 0x8000; - } else { - res = 0x7fff; - } - } - return res; -} - -/* Perform 8-bit signed saturating addition. */ -static inline uint8_t add8_sat(uint8_t a, uint8_t b) -{ - uint8_t res; - - res = a + b; - if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { - if (a & 0x80) { - res = 0x80; - } else { - res = 0x7f; - } - } - return res; -} - -/* Perform 16-bit signed saturating subtraction. */ -static inline uint16_t sub16_sat(uint16_t a, uint16_t b) -{ - uint16_t res; - - res = a - b; - if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { - if (a & 0x8000) { - res = 0x8000; - } else { - res = 0x7fff; - } - } - return res; -} - -/* Perform 8-bit signed saturating subtraction. */ -static inline uint8_t sub8_sat(uint8_t a, uint8_t b) -{ - uint8_t res; - - res = a - b; - if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { - if (a & 0x80) { - res = 0x80; - } else { - res = 0x7f; - } - } - return res; -} - -#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); -#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); -#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); -#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); -#define PFX q - -#include "op_addsub.h" - -/* Unsigned saturating arithmetic. */ -static inline uint16_t add16_usat(uint16_t a, uint16_t b) -{ - uint16_t res; - res = a + b; - if (res < a) { - res = 0xffff; - } - return res; -} - -static inline uint16_t sub16_usat(uint16_t a, uint16_t b) -{ - if (a > b) { - return a - b; - } else { - return 0; - } -} - -static inline uint8_t add8_usat(uint8_t a, uint8_t b) -{ - uint8_t res; - res = a + b; - if (res < a) { - res = 0xff; - } - return res; -} - -static inline uint8_t sub8_usat(uint8_t a, uint8_t b) -{ - if (a > b) { - return a - b; - } else { - return 0; - } -} - -#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); -#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); -#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); -#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); -#define PFX uq - -#include "op_addsub.h" - -/* Signed modulo arithmetic. */ -#define SARITH16(a, b, n, op) do { \ - int32_t sum; \ - sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ - RESULT(sum, n, 16); \ - if (sum >= 0) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define SARITH8(a, b, n, op) do { \ - int32_t sum; \ - sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ - RESULT(sum, n, 8); \ - if (sum >= 0) \ - ge |= 1 << n; \ - } while (0) - - -#define ADD16(a, b, n) SARITH16(a, b, n, +) -#define SUB16(a, b, n) SARITH16(a, b, n, -) -#define ADD8(a, b, n) SARITH8(a, b, n, +) -#define SUB8(a, b, n) SARITH8(a, b, n, -) -#define PFX s -#define ARITH_GE - -#include "op_addsub.h" - -/* Unsigned modulo arithmetic. */ -#define ADD16(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ - RESULT(sum, n, 16); \ - if ((sum >> 16) == 1) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define ADD8(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ - RESULT(sum, n, 8); \ - if ((sum >> 8) == 1) \ - ge |= 1 << n; \ - } while (0) - -#define SUB16(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ - RESULT(sum, n, 16); \ - if ((sum >> 16) == 0) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define SUB8(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ - RESULT(sum, n, 8); \ - if ((sum >> 8) == 0) \ - ge |= 1 << n; \ - } while (0) - -#define PFX u -#define ARITH_GE - -#include "op_addsub.h" - -/* Halved signed arithmetic. */ -#define ADD16(a, b, n) \ - RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) -#define SUB16(a, b, n) \ - RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) -#define ADD8(a, b, n) \ - RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) -#define SUB8(a, b, n) \ - RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) -#define PFX sh - -#include "op_addsub.h" - -/* Halved unsigned arithmetic. */ -#define ADD16(a, b, n) \ - RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) -#define SUB16(a, b, n) \ - RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) -#define ADD8(a, b, n) \ - RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) -#define SUB8(a, b, n) \ - RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) -#define PFX uh - -#include "op_addsub.h" - -static inline uint8_t do_usad(uint8_t a, uint8_t b) -{ - if (a > b) { - return a - b; - } else { - return b - a; - } -} - -/* Unsigned sum of absolute byte differences. */ -uint32_t HELPER(usad8)(uint32_t a, uint32_t b) -{ - uint32_t sum; - sum = do_usad(a, b); - sum += do_usad(a >> 8, b >> 8); - sum += do_usad(a >> 16, b >> 16); - sum += do_usad(a >> 24, b >> 24); - return sum; -} - -/* For ARMv6 SEL instruction. */ -uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) -{ - uint32_t mask; - - mask = 0; - if (flags & 1) { - mask |= 0xff; - } - if (flags & 2) { - mask |= 0xff00; - } - if (flags & 4) { - mask |= 0xff0000; - } - if (flags & 8) { - mask |= 0xff000000; - } - return (a & mask) | (b & ~mask); -} - -/* - * CRC helpers. - * The upper bytes of val (above the number specified by 'bytes') must have - * been zeroed out by the caller. - */ -uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) -{ - uint8_t buf[4]; - - stl_le_p(buf, val); - - /* zlib crc32 converts the accumulator and output to one's complement. */ - return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; -} - -uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) -{ - uint8_t buf[4]; - - stl_le_p(buf, val); - - /* Linux crc32c converts the output to one's complement. */ - return crc32c(acc, buf, bytes) ^ 0xffffffff; -} /* * Return the exception level to which FP-disabled exceptions should diff --git a/target/arm/kvm.c b/target/arm/kvm.c index a9444a2c7a..da30bdbb23 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -2387,7 +2387,7 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) */ if (code == BUS_MCEERR_AR) { kvm_cpu_synchronize_state(c); - if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { + if (!acpi_ghes_memory_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { kvm_inject_arm_sea(c); } else { error_report("failed to record the error"); diff --git a/target/arm/tcg/arith_helper.c b/target/arm/tcg/arith_helper.c new file mode 100644 index 0000000000..9a555c7966 --- /dev/null +++ b/target/arm/tcg/arith_helper.c @@ -0,0 +1,296 @@ +/* + * ARM generic helpers for various arithmetical operations. + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "qemu/crc32c.h" +#include <zlib.h> /* for crc32 */ + +/* + * Note that signed overflow is undefined in C. The following routines are + * careful to use unsigned types where modulo arithmetic is required. + * Failure to do so _will_ break on newer gcc. + */ + +/* Signed saturating arithmetic. */ + +/* Perform 16-bit signed saturating addition. */ +static inline uint16_t add16_sat(uint16_t a, uint16_t b) +{ + uint16_t res; + + res = a + b; + if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { + if (a & 0x8000) { + res = 0x8000; + } else { + res = 0x7fff; + } + } + return res; +} + +/* Perform 8-bit signed saturating addition. */ +static inline uint8_t add8_sat(uint8_t a, uint8_t b) +{ + uint8_t res; + + res = a + b; + if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { + if (a & 0x80) { + res = 0x80; + } else { + res = 0x7f; + } + } + return res; +} + +/* Perform 16-bit signed saturating subtraction. */ +static inline uint16_t sub16_sat(uint16_t a, uint16_t b) +{ + uint16_t res; + + res = a - b; + if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { + if (a & 0x8000) { + res = 0x8000; + } else { + res = 0x7fff; + } + } + return res; +} + +/* Perform 8-bit signed saturating subtraction. */ +static inline uint8_t sub8_sat(uint8_t a, uint8_t b) +{ + uint8_t res; + + res = a - b; + if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { + if (a & 0x80) { + res = 0x80; + } else { + res = 0x7f; + } + } + return res; +} + +#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); +#define PFX q + +#include "op_addsub.c.inc" + +/* Unsigned saturating arithmetic. */ +static inline uint16_t add16_usat(uint16_t a, uint16_t b) +{ + uint16_t res; + res = a + b; + if (res < a) { + res = 0xffff; + } + return res; +} + +static inline uint16_t sub16_usat(uint16_t a, uint16_t b) +{ + if (a > b) { + return a - b; + } else { + return 0; + } +} + +static inline uint8_t add8_usat(uint8_t a, uint8_t b) +{ + uint8_t res; + res = a + b; + if (res < a) { + res = 0xff; + } + return res; +} + +static inline uint8_t sub8_usat(uint8_t a, uint8_t b) +{ + if (a > b) { + return a - b; + } else { + return 0; + } +} + +#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); +#define PFX uq + +#include "op_addsub.c.inc" + +/* Signed modulo arithmetic. */ +#define SARITH16(a, b, n, op) do { \ + int32_t sum; \ + sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ + RESULT(sum, n, 16); \ + if (sum >= 0) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define SARITH8(a, b, n, op) do { \ + int32_t sum; \ + sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ + RESULT(sum, n, 8); \ + if (sum >= 0) \ + ge |= 1 << n; \ + } while (0) + + +#define ADD16(a, b, n) SARITH16(a, b, n, +) +#define SUB16(a, b, n) SARITH16(a, b, n, -) +#define ADD8(a, b, n) SARITH8(a, b, n, +) +#define SUB8(a, b, n) SARITH8(a, b, n, -) +#define PFX s +#define ARITH_GE + +#include "op_addsub.c.inc" + +/* Unsigned modulo arithmetic. */ +#define ADD16(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 1) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define ADD8(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 1) \ + ge |= 1 << n; \ + } while (0) + +#define SUB16(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 0) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define SUB8(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 0) \ + ge |= 1 << n; \ + } while (0) + +#define PFX u +#define ARITH_GE + +#include "op_addsub.c.inc" + +/* Halved signed arithmetic. */ +#define ADD16(a, b, n) \ + RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) \ + RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) \ + RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) \ + RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) +#define PFX sh + +#include "op_addsub.c.inc" + +/* Halved unsigned arithmetic. */ +#define ADD16(a, b, n) \ + RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) \ + RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) \ + RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) \ + RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define PFX uh + +#include "op_addsub.c.inc" + +static inline uint8_t do_usad(uint8_t a, uint8_t b) +{ + if (a > b) { + return a - b; + } else { + return b - a; + } +} + +/* Unsigned sum of absolute byte differences. */ +uint32_t HELPER(usad8)(uint32_t a, uint32_t b) +{ + uint32_t sum; + sum = do_usad(a, b); + sum += do_usad(a >> 8, b >> 8); + sum += do_usad(a >> 16, b >> 16); + sum += do_usad(a >> 24, b >> 24); + return sum; +} + +/* For ARMv6 SEL instruction. */ +uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) +{ + uint32_t mask; + + mask = 0; + if (flags & 1) { + mask |= 0xff; + } + if (flags & 2) { + mask |= 0xff00; + } + if (flags & 4) { + mask |= 0xff0000; + } + if (flags & 8) { + mask |= 0xff000000; + } + return (a & mask) | (b & ~mask); +} + +/* + * CRC helpers. + * The upper bytes of val (above the number specified by 'bytes') must have + * been zeroed out by the caller. + */ +uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) +{ + uint8_t buf[4]; + + stl_le_p(buf, val); + + /* zlib crc32 converts the accumulator and output to one's complement. */ + return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; +} + +uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) +{ + uint8_t buf[4]; + + stl_le_p(buf, val); + + /* Linux crc32c converts the output to one's complement. */ + return crc32c(acc, buf, bytes) ^ 0xffffffff; +} diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build index 09238989c5..1f9077c372 100644 --- a/target/arm/tcg/meson.build +++ b/target/arm/tcg/meson.build @@ -40,6 +40,7 @@ arm_ss.add(files( 'tlb_helper.c', 'vec_helper.c', 'tlb-insns.c', + 'arith_helper.c', )) arm_ss.add(when: 'TARGET_AARCH64', if_true: files( diff --git a/target/arm/op_addsub.h b/target/arm/tcg/op_addsub.c.inc index ca4a1893c3..ca4a1893c3 100644 --- a/target/arm/op_addsub.h +++ b/target/arm/tcg/op_addsub.c.inc |