summary refs log tree commit diff stats
path: root/linux-user/elfload.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-11-12 11:32:01 -0800
committerRichard Henderson <richard.henderson@linaro.org>2024-11-16 08:42:25 -0800
commitc81d1fafa6233448bcc2d8fcd2ba63a4ae834f3a (patch)
treee1aad510f13fa666c422089b00c7c02c6077e37b /linux-user/elfload.c
parent779f30a01af8566780cefc8639505b758950afb3 (diff)
downloadfocaccia-qemu-c81d1fafa6233448bcc2d8fcd2ba63a4ae834f3a.tar.gz
focaccia-qemu-c81d1fafa6233448bcc2d8fcd2ba63a4ae834f3a.zip
linux-user: Honor elf alignment when placing images
Most binaries don't actually depend on more than page alignment,
but any binary can request it.  Not honoring this was a bug.

This became obvious when gdb reported

    Failed to read a valid object file image from memory

when examining some vdso which are marked as needing more
than page alignment.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'linux-user/elfload.c')
-rw-r--r--linux-user/elfload.c35
1 files changed, 28 insertions, 7 deletions
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index d6ad77d27d..90e79a01b4 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -3179,7 +3179,8 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
                            char **pinterp_name)
 {
     g_autofree struct elf_phdr *phdr = NULL;
-    abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
+    abi_ulong load_addr, load_bias, loaddr, hiaddr, error, align;
+    size_t reserve_size, align_size;
     int i, prot_exec;
     Error *err = NULL;
 
@@ -3263,6 +3264,9 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
 
     load_addr = loaddr;
 
+    align = pow2ceil(info->alignment);
+    info->alignment = align;
+
     if (pinterp_name != NULL) {
         if (ehdr->e_type == ET_EXEC) {
             /*
@@ -3271,8 +3275,6 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
              */
             probe_guest_base(image_name, loaddr, hiaddr);
         } else {
-            abi_ulong align;
-
             /*
              * The binary is dynamic, but we still need to
              * select guest_base.  In this case we pass a size.
@@ -3290,10 +3292,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
              * Since we do not have complete control over the guest
              * address space, we prefer the kernel to choose some address
              * rather than force the use of LOAD_ADDR via MAP_FIXED.
-             * But without MAP_FIXED we cannot guarantee alignment,
-             * only suggest it.
              */
-            align = pow2ceil(info->alignment);
             if (align) {
                 load_addr &= -align;
             }
@@ -3317,13 +3316,35 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
      * In both cases, we will overwrite pages in this range with mappings
      * from the executable.
      */
-    load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
+    reserve_size = (size_t)hiaddr - loaddr + 1;
+    align_size = reserve_size;
+
+    if (ehdr->e_type != ET_EXEC && align > qemu_real_host_page_size()) {
+        align_size += align - 1;
+    }
+
+    load_addr = target_mmap(load_addr, align_size, PROT_NONE,
                             MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
                             (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0),
                             -1, 0);
     if (load_addr == -1) {
         goto exit_mmap;
     }
+
+    if (align_size != reserve_size) {
+        abi_ulong align_addr = ROUND_UP(load_addr, align);
+        abi_ulong align_end = align_addr + reserve_size;
+        abi_ulong load_end = load_addr + align_size;
+
+        if (align_addr != load_addr) {
+            target_munmap(load_addr, align_addr - load_addr);
+        }
+        if (align_end != load_end) {
+            target_munmap(align_end, load_end - align_end);
+        }
+        load_addr = align_addr;
+    }
+
     load_bias = load_addr - loaddr;
 
     if (elf_is_fdpic(ehdr)) {