summary refs log tree commit diff stats
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-04-09 17:36:00 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-04-09 17:36:01 +0100
commit4b9a21c3442530908b2863b924e7c5ea7496cc57 (patch)
tree04d9620a5c3052c2be557716512153dfbc369ecc /include
parent8cb2ca3d7479748587313f0b34034a3f8aa08c92 (diff)
parent3e20c81ed87cb15cd04f929b075e244e0758641a (diff)
downloadfocaccia-qemu-4b9a21c3442530908b2863b924e7c5ea7496cc57.tar.gz
focaccia-qemu-4b9a21c3442530908b2863b924e7c5ea7496cc57.zip
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* fixes for Alpine and SuSE
* fix crash when hot-plugging nvdimm on older machine types

# gpg: Signature made Tue 09 Apr 2019 17:34:27 BST
# gpg:                using RSA key BFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream:
  tests: Make check-block a phony target
  hw/i386/pc: Fix crash when hot-plugging nvdimm on older machine types
  include/qemu/bswap.h: Use __builtin_memcpy() in accessor functions
  roms: Allow passing configure options to the EDK2 build tools
  roms: Rename the EFIROM variable to avoid clashing with iPXE

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/qemu/bswap.h26
1 files changed, 16 insertions, 10 deletions
diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
index 5a70f78c0b..2a9f3fe783 100644
--- a/include/qemu/bswap.h
+++ b/include/qemu/bswap.h
@@ -316,51 +316,57 @@ static inline void stb_p(void *ptr, uint8_t v)
     *(uint8_t *)ptr = v;
 }
 
-/* Any compiler worth its salt will turn these memcpy into native unaligned
-   operations.  Thus we don't need to play games with packed attributes, or
-   inline byte-by-byte stores.  */
+/*
+ * Any compiler worth its salt will turn these memcpy into native unaligned
+ * operations.  Thus we don't need to play games with packed attributes, or
+ * inline byte-by-byte stores.
+ * Some compilation environments (eg some fortify-source implementations)
+ * may intercept memcpy() in a way that defeats the compiler optimization,
+ * though, so we use __builtin_memcpy() to give ourselves the best chance
+ * of good performance.
+ */
 
 static inline int lduw_he_p(const void *ptr)
 {
     uint16_t r;
-    memcpy(&r, ptr, sizeof(r));
+    __builtin_memcpy(&r, ptr, sizeof(r));
     return r;
 }
 
 static inline int ldsw_he_p(const void *ptr)
 {
     int16_t r;
-    memcpy(&r, ptr, sizeof(r));
+    __builtin_memcpy(&r, ptr, sizeof(r));
     return r;
 }
 
 static inline void stw_he_p(void *ptr, uint16_t v)
 {
-    memcpy(ptr, &v, sizeof(v));
+    __builtin_memcpy(ptr, &v, sizeof(v));
 }
 
 static inline int ldl_he_p(const void *ptr)
 {
     int32_t r;
-    memcpy(&r, ptr, sizeof(r));
+    __builtin_memcpy(&r, ptr, sizeof(r));
     return r;
 }
 
 static inline void stl_he_p(void *ptr, uint32_t v)
 {
-    memcpy(ptr, &v, sizeof(v));
+    __builtin_memcpy(ptr, &v, sizeof(v));
 }
 
 static inline uint64_t ldq_he_p(const void *ptr)
 {
     uint64_t r;
-    memcpy(&r, ptr, sizeof(r));
+    __builtin_memcpy(&r, ptr, sizeof(r));
     return r;
 }
 
 static inline void stq_he_p(void *ptr, uint64_t v)
 {
-    memcpy(ptr, &v, sizeof(v));
+    __builtin_memcpy(ptr, &v, sizeof(v));
 }
 
 static inline int lduw_le_p(const void *ptr)