summary refs log tree commit diff stats
path: root/util/memalign.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2022-03-08 15:26:10 +0000
committerPeter Maydell <peter.maydell@linaro.org>2022-03-08 15:26:10 +0000
commit9740b907a5363c06ecf61e08b21966a81eb0dab4 (patch)
tree85296ab4c290bfe5c1b1229e4716a31e1065bfc7 /util/memalign.c
parent33d102e92e41a65c817d85ff8bfd5ffa2c16b1d3 (diff)
parent0942820408dc788560f6968e9b5f011803b846c2 (diff)
downloadfocaccia-qemu-9740b907a5363c06ecf61e08b21966a81eb0dab4.tar.gz
focaccia-qemu-9740b907a5363c06ecf61e08b21966a81eb0dab4.zip
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20220307' into staging
target-arm queue:
 * cleanups of qemu_oom_check() and qemu_memalign()
 * target/arm/translate-neon: UNDEF if VLD1/VST1 stride bits are non-zero
 * target/arm/translate-neon: Simplify align field check for VLD3
 * GICv3 ITS: add more trace events
 * GICv3 ITS: implement 8-byte accesses properly
 * GICv3: fix minor issues with some trace/log messages
 * ui/cocoa: Use the standard about panel
 * target/arm: Provide cpu property for controling FEAT_LPA2
 * hw/arm/virt: Disable LPA2 for -machine virt-6.2

# gpg: Signature made Mon 07 Mar 2022 16:46:06 GMT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20220307:
  hw/arm/virt: Disable LPA2 for -machine virt-6.2
  target/arm: Provide cpu property for controling FEAT_LPA2
  ui/cocoa: Use the standard about panel
  hw/intc/arm_gicv3_cpuif: Fix register names in ICV_HPPIR read trace event
  hw/intc/arm_gicv3: Fix missing spaces in error log messages
  hw/intc/arm_gicv3: Specify valid and impl in MemoryRegionOps
  hw/intc/arm_gicv3_its: Add trace events for table reads and writes
  hw/intc/arm_gicv3_its: Add trace events for commands
  target/arm/translate-neon: Simplify align field check for VLD3
  target/arm/translate-neon: UNDEF if VLD1/VST1 stride bits are non-zero
  osdep: Move memalign-related functions to their own header
  util: Put qemu_vfree() in memalign.c
  util: Use meson checks for valloc() and memalign() presence
  util: Share qemu_try_memalign() implementation between POSIX and Windows
  meson.build: Don't misdetect posix_memalign() on Windows
  util: Return valid allocation for qemu_try_memalign() with zero size
  util: Unify implementations of qemu_memalign()
  util: Make qemu_oom_check() a static function

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util/memalign.c')
-rw-r--r--util/memalign.c92
1 files changed, 92 insertions, 0 deletions
diff --git a/util/memalign.c b/util/memalign.c
new file mode 100644
index 0000000000..c199ae7073
--- /dev/null
+++ b/util/memalign.c
@@ -0,0 +1,92 @@
+/*
+ * memalign.c: Allocate an aligned memory region
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2010-2016 Red Hat, Inc.
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "qemu/memalign.h"
+#include "trace.h"
+
+void *qemu_try_memalign(size_t alignment, size_t size)
+{
+    void *ptr;
+
+    if (alignment < sizeof(void*)) {
+        alignment = sizeof(void*);
+    } else {
+        g_assert(is_power_of_2(alignment));
+    }
+
+    /*
+     * Handling of 0 allocations varies among the different
+     * platform APIs (for instance _aligned_malloc() will
+     * fail) -- ensure that we always return a valid non-NULL
+     * pointer that can be freed by qemu_vfree().
+     */
+    if (size == 0) {
+        size++;
+    }
+#if defined(CONFIG_POSIX_MEMALIGN)
+    int ret;
+    ret = posix_memalign(&ptr, alignment, size);
+    if (ret != 0) {
+        errno = ret;
+        ptr = NULL;
+    }
+#elif defined(CONFIG_ALIGNED_MALLOC)
+    ptr = _aligned_malloc(size, alignment);
+#elif defined(CONFIG_VALLOC)
+    ptr = valloc(size);
+#elif defined(CONFIG_MEMALIGN)
+    ptr = memalign(alignment, size);
+#else
+    #error No function to allocate aligned memory available
+#endif
+    trace_qemu_memalign(alignment, size, ptr);
+    return ptr;
+}
+
+void *qemu_memalign(size_t alignment, size_t size)
+{
+    void *p = qemu_try_memalign(alignment, size);
+    if (p) {
+        return p;
+    }
+    fprintf(stderr,
+            "qemu_memalign: failed to allocate %zu bytes at alignment %zu: %s\n",
+            size, alignment, strerror(errno));
+    abort();
+}
+
+void qemu_vfree(void *ptr)
+{
+    trace_qemu_vfree(ptr);
+#if !defined(CONFIG_POSIX_MEMALIGN) && defined(CONFIG_ALIGNED_MALLOC)
+    /* Only Windows _aligned_malloc needs a special free function */
+    _aligned_free(ptr);
+#else
+    free(ptr);
+#endif
+}