about summary refs log tree commit diff stats
diff options
context:
space:
mode:
authorptitSeb <sebastien.chev@gmail.com>2025-09-08 20:45:12 +0200
committerptitSeb <sebastien.chev@gmail.com>2025-09-08 20:45:12 +0200
commit464fdc3ebe5816c281b0cf9e1960cb33386e29e9 (patch)
tree9edfde55580db30db5abfcbf92fb4fee81f7b940
parent6b9ed18286d9296bf91a98619e84ae1ace6a9ba8 (diff)
downloadbox64-464fdc3ebe5816c281b0cf9e1960cb33386e29e9.tar.gz
box64-464fdc3ebe5816c281b0cf9e1960cb33386e29e9.zip
[BOX32] Various small improvments on box32, mostly on memory tracking
-rw-r--r--src/custommem.c155
-rw-r--r--src/emu/x64emu.c4
-rw-r--r--src/include/custommem.h2
-rw-r--r--src/libtools/threads.c6
-rwxr-xr-xsrc/libtools/threads32.c8
-rw-r--r--src/os/backtrace.c6
-rw-r--r--src/tools/box64stack.c2
-rw-r--r--src/tools/bridge.c2
-rw-r--r--src/wrapped/wrappedlibc.c2
-rwxr-xr-xsrc/wrapped32/wrappedlibc.c2
10 files changed, 135 insertions, 54 deletions
diff --git a/src/custommem.c b/src/custommem.c
index 00fc794e..60e0549e 100644
--- a/src/custommem.c
+++ b/src/custommem.c
@@ -74,7 +74,11 @@ typedef enum {
     MEM_UNUSED = 0,
     MEM_ALLOCATED = 1,
     MEM_RESERVED = 2,
-    MEM_MMAP = 3
+    MEM_MMAP = 3,
+    MEM_BOX = 5,
+    MEM_STACK = 9,
+    MEM_EXTERNAL = 17,
+    MEM_ELF = 33
 } mem_flag_t;
 rbtree_t*  mapallmem = NULL;
 static rbtree_t*  blockstree = NULL;
@@ -122,10 +126,10 @@ typedef struct blockmark_s {
 #define LAST_BLOCK(b, s) (blockmark_t*)(((uintptr_t)(b)+(s))-sizeof(blockmark_t))
 #define SIZE_BLOCK(b) (((ssize_t)b.offs)-sizeof(blockmark_t))
 
-void printBlock(blockmark_t* b, void* start)
+void printBlock(blockmark_t* b, void* start, size_t sz)
 {
     if(!b) return;
-    printf_log(LOG_NONE, "========== Block is:\n");
+    printf_log(LOG_NONE, "========== Block is: (%p - %p)\n", b, ((void*)b)+sz);
     do {
         printf_log(LOG_NONE, "%c%p, fill=%d, size=0x%x (prev=%d/0x%x)\n", b==start?'*':' ', b, b->next.fill, SIZE_BLOCK(b->next), b->prev.fill, SIZE_BLOCK(b->prev));
         b = NEXT_BLOCK(b);
@@ -340,6 +344,7 @@ static int isBlockChainCoherent(blockmark_t* m, blockmark_t* end)
     while(m) {
         if(m>end) return 0;
         if(m==end) return 1;
+        if(m==NEXT_BLOCK(m)) return 0;
         m = NEXT_BLOCK(m);
     }
     return 0;
@@ -356,7 +361,7 @@ int printBlockCoherent(int i)
     blockmark_t* m = (blockmark_t*)p_blocks[i].block;
     if(!m) {printf_log(LOG_NONE, "Warning, block #%d is NULL\n", i); return 0;}
     // check coherency of the chained list first
-    if(!isBlockChainCoherent(m, (blockmark_t*)(p_blocks[i].block+p_blocks[i].size-sizeof(blockmark_t)))) {printf_log(LOG_NONE, "Warning, block #%d chained list is not coherent\n", i); return 0;}
+    if(!isBlockChainCoherent(m, (blockmark_t*)(p_blocks[i].block+p_blocks[i].size-sizeof(blockmark_t)))) {printf_log(LOG_NONE, "Warning, block #%d %schained list is not coherent\n", i, p_blocks[i].is32bits?"(32bits) ":""); return 0;}
     // check if first is correct
     blockmark_t* first = getNextFreeBlock(m);
     if(p_blocks[i].first && p_blocks[i].first!=first) {printf_log(LOG_NONE, "First %p and stored first %p differs for block %d\n", first, p_blocks[i].first, i); ret = 0;}
@@ -387,6 +392,21 @@ int printBlockCoherent(int i)
     return ret;
 }
 
+static char* niceSize(size_t sz)
+{
+    static int idx = 0;
+    static char rets[16][50] = {0};
+    int i = idx = (idx+1)&15;
+    const char* units[] = {"b", "kb", "Mb", "Gb"};
+    const size_t vals[] = {1, 1024, 1024*1024, 1024*1024*1024};
+    int k = 0;
+    for(int j=0; j<sizeof(vals)/sizeof(vals[0]); ++j)
+        if(vals[j]<sz)
+            k = j;
+    sprintf(rets[i], "%zd %s", sz/vals[k], units[k]);
+    return rets[i];
+}
+
 void testAllBlocks()
 {
     size_t total = 0;
@@ -402,7 +422,7 @@ void testAllBlocks()
             int is32bits = p_blocks[i].is32bits;
             if(is32bits) ++n_blocks32;
             if((p_blocks[i].type==BTYPE_LIST) && !printBlockCoherent(i))
-                printBlock(p_blocks[i].block, p_blocks[i].first);
+                printBlock(p_blocks[i].block, p_blocks[i].first, p_blocks[i].size);
             total += p_blocks[i].size;
             if(is32bits) total32 += p_blocks[i].size;
             if(p_blocks[i].type==BTYPE_LIST) {
@@ -428,9 +448,9 @@ void testAllBlocks()
             }
         }
     }
-    printf_log(LOG_NONE, "CustomMem: Total %d blocks, for %zd (0x%zx) allocated memory, max_free %zd (0x%zx), total fragmented free %zd (0x%zx)\n", n_blocks, total, total, max_free, max_free, fragmented_free, fragmented_free);
+    printf_log(LOG_NONE, "CustomMem: Total %d blocks, for %s (0x%zx) allocated memory, max_free %s (0x%zx), total fragmented free %s (0x%zx)\n", n_blocks, niceSize(total), total, niceSize(max_free), max_free, niceSize(fragmented_free), fragmented_free);
     if(box64_is32bits)
-        printf_log(LOG_NONE, "   32bits: Total %d blocks, for %zd (0x%zx) allocated memory, max_free %zd (0x%zx), total fragmented free %zd (0x%zx)\n", n_blocks32, total32, total32, max_free32, max_free32, fragmented_free32, fragmented_free32);
+        printf_log(LOG_NONE, "   32bits: Total %d blocks, for %s (0x%zx) allocated memory, max_free %s (0x%zx), total fragmented free %s (0x%zx)\n", n_blocks32, niceSize(total32), total32, niceSize(max_free32), max_free32, niceSize(fragmented_free32), fragmented_free32);
 }
 
 static size_t roundSize(size_t size)
@@ -521,16 +541,18 @@ int isCustomAddr(void* p)
 static uintptr_t    defered_prot_p = 0;
 static size_t       defered_prot_sz = 0;
 static uint32_t     defered_prot_prot = 0;
+static mem_flag_t   defered_prot_flags = MEM_ALLOCATED;
 static sigset_t     critical_prot = {0};
+static void setProtection_generic(uintptr_t addr, size_t sz, uint32_t prot, mem_flag_t flags);
 #define LOCK_PROT()         sigset_t old_sig = {0}; pthread_sigmask(SIG_BLOCK, &critical_prot, &old_sig); mutex_lock(&mutex_prot)
 #define LOCK_PROT_READ()    sigset_t old_sig = {0}; pthread_sigmask(SIG_BLOCK, &critical_prot, &old_sig); mutex_lock(&mutex_prot)
 #define LOCK_PROT_FAST()    mutex_lock(&mutex_prot)
 #define UNLOCK_PROT()       if(defered_prot_p) {                                \
-                                uintptr_t p = defered_prot_p; size_t sz = defered_prot_sz; uint32_t prot = defered_prot_prot; \
+                                uintptr_t p = defered_prot_p; size_t sz = defered_prot_sz; uint32_t prot = defered_prot_prot; mem_flag_t f = defered_prot_flags;\
                                 defered_prot_p = 0;                             \
                                 pthread_sigmask(SIG_SETMASK, &old_sig, NULL);   \
                                 mutex_unlock(&mutex_prot);                      \
-                                setProtection(p, sz, prot);                     \
+                                setProtection_generic(p, sz, prot, f);          \
                             } else {                                            \
                                 pthread_sigmask(SIG_SETMASK, &old_sig, NULL);   \
                                 mutex_unlock(&mutex_prot);                      \
@@ -642,6 +664,7 @@ void* map128_customMalloc(size_t size, int is32bits)
             defered_prot_p = (uintptr_t)p;
             defered_prot_sz = allocsize;
             defered_prot_prot = PROT_READ|PROT_WRITE;
+            defered_prot_flags = MEM_ALLOCATED;
         } else
             setProtection((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE);
     }
@@ -740,6 +763,7 @@ void* map64_customMalloc(size_t size, int is32bits)
             defered_prot_p    = (uintptr_t)p;
             defered_prot_sz   = allocsize;
             defered_prot_prot = PROT_READ | PROT_WRITE;
+            defered_prot_flags = MEM_ALLOCATED;
         } else {
             setProtection((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE);
         }
@@ -855,6 +879,7 @@ void* internal_customMalloc(size_t size, int is32bits)
             defered_prot_p = (uintptr_t)p;
             defered_prot_sz = allocsize;
             defered_prot_prot = PROT_READ|PROT_WRITE;
+            defered_prot_flags = MEM_ALLOCATED;
         } else
             setProtection((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE);
     }
@@ -1057,9 +1082,11 @@ void* internal_customMemAligned(size_t align, size_t size, int is32bits)
                 if(rsize-size<THRESHOLD)
                     size = rsize;
                 blockmark_t* new_sub = sub;
-                if(empty_size)
+                if(empty_size) {
                     new_sub = createAlignBlock(p_blocks[i].block, sub, empty_size); // this block is a marker, between 2 free blocks
-                void* ret = allocBlock(p_blocks[i].block, new_sub, size-empty_size, &p_blocks[i].first);
+                    size -= empty_size;
+                }
+                void* ret = allocBlock(p_blocks[i].block, new_sub, size, &p_blocks[i].first);
                 if((uintptr_t)p_blocks[i].first>(uintptr_t)sub && (sub!=new_sub))
                     p_blocks[i].first = sub;
                 if(rsize==p_blocks[i].maxfree)
@@ -1086,8 +1113,8 @@ void* internal_customMemAligned(size_t align, size_t size, int is32bits)
     if(is32bits)
         mutex_unlock(&mutex_blocks);
     void* p = is32bits
-        ? mmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0)
-        : InternalMmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+        ? box_mmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0)
+        : (box64_is32bits ? box32_dynarec_mmap(allocsize, -1, 0) : InternalMmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
     if(is32bits)
         mutex_lock(&mutex_blocks);
 #ifdef TRACE_MEMSTAT
@@ -1096,6 +1123,34 @@ void* internal_customMemAligned(size_t align, size_t size, int is32bits)
     p_blocks[i].block = p;
     p_blocks[i].first = p;
     p_blocks[i].size = allocsize;
+    if(is32bits && p>(void*)0xffffffffLL) {
+        printf_log(LOG_INFO, "Warning: failed to allocate aligned 0x%x (0x%x) bytes in 32bits address space (block %d)\n", size, allocsize, i);
+        // failed to allocate memory
+        if(BOX64ENV(showbt) || BOX64ENV(showsegv)) {
+            // mask size from this block
+            p_blocks[i].size = 0;
+            mutex_unlock(&mutex_blocks);
+            ShowNativeBT(LOG_NONE);
+            testAllBlocks();
+            if(1 || BOX64ENV(log)>=LOG_DEBUG) {
+                printf_log(LOG_NONE, "Used 32bits address space map:\n");
+                uintptr_t addr = rb_get_leftmost(mapallmem);
+                while(addr<0x100000000LL) {
+                    uintptr_t bend;
+                    uint32_t val;
+                    if(rb_get_end(mapallmem, addr, &val, &bend))
+                        printf_log(LOG_NONE, "\t%p - %p (%d)\n", (void*)addr, (void*)bend, val);
+                    addr = bend;
+                }
+            }
+            p_blocks[i].size = allocsize;
+        }
+        #ifdef TRACE_MEMSTAT
+        printf_log(LOG_INFO, "Custommem: Failed to aligned alloc 32bits: allocation %p-%p for LIST Alloc p_blocks[%d]\n", p, p+allocsize, i);
+        #endif
+        p_blocks[i].maxfree = allocsize - sizeof(blockmark_t)*2;
+        return NULL;
+    }
     #ifdef TRACE_MEMSTAT
     printf_log(LOG_INFO, "Custommem: allocation %p-%p for LIST Alloc p_blocks[%d], aligned\n", p, p+allocsize, i);
     #endif
@@ -1115,14 +1170,12 @@ void* internal_customMemAligned(size_t align, size_t size, int is32bits)
         empty_size += align;
         aligned_p += align;
     }
-    void* new_sub = NULL;
     sub = p;
+    void* new_sub = sub;
     if(empty_size)
         new_sub = createAlignBlock(p_blocks[i].block, sub, empty_size);
     // alloc 1st block
     void* ret  = allocBlock(p_blocks[i].block, new_sub, size, &p_blocks[i].first);
-    if(sub!=new_sub)
-        p_blocks[i].first = sub;
     p_blocks[i].maxfree = getMaxFreeBlock(p_blocks[i].block, p_blocks[i].size, p_blocks[i].first);
     mutex_unlock(&mutex_blocks);
     if(mapallmem)
@@ -1139,6 +1192,8 @@ void* customMemAligned32(size_t align, size_t size)
     void* ret = internal_customMemAligned(align, size, 1);
     if(((uintptr_t)ret)>=0x100000000LL) {
         printf_log(LOG_NONE, "Error, customAligned32(0x%lx, 0x%lx) return 64bits point %p\n", align, size, ret);
+        ShowNativeBT(LOG_NONE);
+        testAllBlocks();
     }
     return ret;
 }
@@ -1182,9 +1237,9 @@ void* box32_dynarec_mmap(size_t size, int fd, off_t offset)
         if(rb_get_end(mapallmem, cur, &flag, &bend)) {
             if(flag == MEM_RESERVED && bend-cur>=size) {
                 void* ret = InternalMmap((void*)cur, size, PROT_READ | PROT_WRITE | PROT_EXEC, map_flags, fd, offset);
-                if(ret!=MAP_FAILED)
-                    rb_set(mapallmem, cur, cur+size, MEM_ALLOCATED);    // mark as allocated
-                else
+                if(ret!=MAP_FAILED) {
+                    //rb_set(mapallmem, cur, cur+size, MEM_BOX);    // mark as allocated by/for box
+                } else
                     printf_log(LOG_INFO, "BOX32: Error allocating Dynarec memory: %s\n", strerror(errno));
                 cur = cur+size;
                 return ret;
@@ -1294,7 +1349,7 @@ int MmaplistAddBlock(mmaplist_t* list, int fd, off_t offset, void* orig, size_t
     #ifdef MADV_HUGEPAGE
     madvise(map, size, MADV_HUGEPAGE);
     #endif
-    setProtection((uintptr_t)map, size, PROT_READ | PROT_WRITE | PROT_EXEC);
+    setProtection_box((uintptr_t)map, size, PROT_READ | PROT_WRITE | PROT_EXEC);
     list->chunks[i] = map;
     intptr_t delta = map - orig;
     // relocate the pointers
@@ -1492,7 +1547,7 @@ uintptr_t AllocDynarecMap(uintptr_t x64_addr, size_t size, int is_new)
     if(box64env.dynarec_log>LOG_INFO || box64env.dynarec_dump)
         dynarec_log(LOG_NONE, "Custommem: allocation %p-%p for Dynarec %p->chunk[%d]\n", p, p+allocsize, list, i);
 #endif
-    setProtection((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE | PROT_EXEC);
+    setProtection_box((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE | PROT_EXEC);
     list->chunks[i] = p;
     rb_set_64(rbt_dynmem, (uintptr_t)p, (uintptr_t)p+allocsize, (uintptr_t)list->chunks[i]);
     p = p + sizeof(blocklist_t);    // adjust pointer and size, to exclude blocklist_t itself
@@ -2235,7 +2290,7 @@ void updateProtection(uintptr_t addr, size_t size, uint32_t prot)
     LOCK_PROT();
     uintptr_t cur = addr & ~(box64_pagesize-1);
     uintptr_t end = ALIGN(cur+size);
-    rb_set(mapallmem, cur, cur+size, MEM_ALLOCATED);
+    //rb_set(mapallmem, cur, cur+size, MEM_ALLOCATED);
     while (cur < end) {
         uintptr_t bend;
         uint32_t oprot;
@@ -2260,20 +2315,7 @@ void updateProtection(uintptr_t addr, size_t size, uint32_t prot)
     UNLOCK_PROT();
 }
 
-void setProtection(uintptr_t addr, size_t size, uint32_t prot)
-{
-    size = ALIGN(size);
-    LOCK_PROT();
-    ++setting_prot;
-    uintptr_t cur = addr & ~(box64_pagesize-1);
-    uintptr_t end = ALIGN(cur+size);
-    rb_set(mapallmem, cur, end, MEM_ALLOCATED);
-    rb_set(memprot, cur, end, prot);
-    --setting_prot;
-    UNLOCK_PROT();
-}
-
-void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot)
+static void setProtection_generic(uintptr_t addr, size_t size, uint32_t prot, mem_flag_t flag)
 {
     if(!size)
         return;
@@ -2281,7 +2323,7 @@ void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot)
     size = ALIGN(size);
     if(!prot) {
         LOCK_PROT();
-        rb_set(mapallmem, addr, addr+size, MEM_MMAP);
+        rb_set(mapallmem, addr, addr+size, flag);
         rb_unset(memprot, addr, addr+size);
         UNLOCK_PROT();
     }
@@ -2290,22 +2332,51 @@ void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot)
         ++setting_prot;
         uintptr_t cur = addr & ~(box64_pagesize-1);
         uintptr_t end = ALIGN(cur+size);
-        rb_set(mapallmem, cur, end, MEM_MMAP);
+        rb_set(mapallmem, cur, end, flag);
         rb_set(memprot, cur, end, prot);
         --setting_prot;
         UNLOCK_PROT();
     }
 }
 
+
+void setProtection(uintptr_t addr, size_t size, uint32_t prot)
+{
+    size = ALIGN(size);
+    LOCK_PROT();
+    ++setting_prot;
+    uintptr_t cur = addr & ~(box64_pagesize-1);
+    uintptr_t end = ALIGN(cur+size);
+    rb_set(mapallmem, cur, end, MEM_ALLOCATED);
+    rb_set(memprot, cur, end, prot);
+    --setting_prot;
+    UNLOCK_PROT();
+}
+
+void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot)
+{
+    setProtection_generic(addr, size, prot, MEM_MMAP);
+}
+
+void setProtection_box(uintptr_t addr, size_t size, uint32_t prot)
+{
+    setProtection_generic(addr, size, prot, MEM_BOX);
+}
+
+void setProtection_stack(uintptr_t addr, size_t size, uint32_t prot)
+{
+    setProtection_generic(addr, size, prot, MEM_STACK);
+}
+
 void setProtection_elf(uintptr_t addr, size_t size, uint32_t prot)
 {
     size = ALIGN(size);
     addr &= ~(box64_pagesize-1);
     if(prot)
-        setProtection(addr, size, prot);
+        setProtection_generic(addr, size, prot, MEM_ELF);
     else {
         LOCK_PROT();
-        rb_set(mapallmem, addr, addr+size, MEM_ALLOCATED);
+        rb_set(mapallmem, addr, addr+size, MEM_ELF);
         rb_unset(memprot, addr, addr+size);
         UNLOCK_PROT();
     }
@@ -2334,7 +2405,7 @@ void allocProtection(uintptr_t addr, size_t size, uint32_t prot)
     int there = rb_get_end(mapallmem, addr, &val, &endb);
     // block is here or absent, no half-block handled..
     if(!there)
-        rb_set(mapallmem, addr, addr+size, MEM_ALLOCATED);
+        rb_set(mapallmem, addr, addr+size, MEM_EXTERNAL);
     UNLOCK_PROT();
     // don't need to add precise tracking probably
 }
@@ -2640,7 +2711,7 @@ void my_reserveHighMem()
             void* ret = InternalMmap((void*)cur, bend - cur, 0, MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
             printf_log(LOG_DEBUG, "Reserve %p-%p => %p (%s)\n", (void*)cur, bend, ret, (ret==MAP_FAILED)?strerror(errno):"ok");
             if(ret!=(void*)-1) {
-                rb_set(mapallmem, cur, bend, MEM_ALLOCATED);
+                rb_set(mapallmem, cur, bend, MEM_RESERVED);
             }
         }
         cur = bend;
diff --git a/src/emu/x64emu.c b/src/emu/x64emu.c
index 0c68de08..4af656f5 100644
--- a/src/emu/x64emu.c
+++ b/src/emu/x64emu.c
@@ -140,8 +140,10 @@ void SetTraceEmu(uintptr_t start, uintptr_t end)
 
 static void internalFreeX64(x64emu_t* emu)
 {
-    if(emu && emu->stack2free)
+    if(emu && emu->stack2free) {
         munmap(emu->stack2free, emu->size_stack);
+        freeProtection((uintptr_t)emu->stack2free, emu->size_stack);
+    }
     #ifdef BOX32
     if(emu->res_state_32)
         actual_free(emu->res_state_32);
diff --git a/src/include/custommem.h b/src/include/custommem.h
index 28503828..4209f827 100644
--- a/src/include/custommem.h
+++ b/src/include/custommem.h
@@ -107,6 +107,8 @@ uintptr_t getJumpAddress64(uintptr_t addr);
 void updateProtection(uintptr_t addr, size_t size, uint32_t prot);
 void setProtection(uintptr_t addr, size_t size, uint32_t prot);
 void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot);
+void setProtection_box(uintptr_t addr, size_t size, uint32_t prot);
+void setProtection_stack(uintptr_t addr, size_t size, uint32_t prot);
 void setProtection_elf(uintptr_t addr, size_t size, uint32_t prot);
 void freeProtection(uintptr_t addr, size_t size);
 void refreshProtection(uintptr_t addr);
diff --git a/src/libtools/threads.c b/src/libtools/threads.c
index aa0cbe08..71222eca 100644
--- a/src/libtools/threads.c
+++ b/src/libtools/threads.c
@@ -202,7 +202,7 @@ x64emu_t* thread_get_emu()
 		else
             stack = InternalMmap(NULL, stacksize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, -1, 0);
 		if(stack!=MAP_FAILED)
-			setProtection((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
+			setProtection_stack((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
 		x64emu_t *emu = NewX64Emu(my_context, my_context->exit_bridge, (uintptr_t)stack, stacksize, 1);
 		SetupX64Emu(emu, NULL);
 		thread_set_emu(emu);
@@ -525,7 +525,7 @@ EXPORT int my_pthread_create(x64emu_t *emu, void* t, void* attr, void* start_rou
 	} else {
         stack = InternalMmap(NULL, stacksize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, -1, 0);
         if(stack!=MAP_FAILED)
-	        setProtection((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
+	        setProtection_stack((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
 		own = 1;
 	}
 
@@ -554,7 +554,7 @@ void* my_prepare_thread(x64emu_t *emu, void* f, void* arg, int ssize, void** pet
 	int stacksize = (ssize)?ssize:(2*1024*1024);	//default stack size is 2Mo
     void* stack = InternalMmap(NULL, stacksize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, -1, 0);
     if(stack!=MAP_FAILED)
-		setProtection((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
+		setProtection_stack((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
 	emuthread_t *et = (emuthread_t*)box_calloc(1, sizeof(emuthread_t));
 	x64emu_t *emuthread = NewX64Emu(emu->context, (uintptr_t)f, (uintptr_t)stack, stacksize, 1);
 	SetupX64Emu(emuthread, emu					);
diff --git a/src/libtools/threads32.c b/src/libtools/threads32.c
index 92e31240..318db0d2 100755
--- a/src/libtools/threads32.c
+++ b/src/libtools/threads32.c
@@ -206,12 +206,16 @@ EXPORT int my32_pthread_create(x64emu_t *emu, void* t, void* attr, void* start_r
 	}
 	if(!stack) {
 		//stack = malloc(stacksize);
-		stack = mmap64(NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_32BIT, -1, 0);
+		stack = box_mmap(NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_32BIT, -1, 0);
+		setProtection_stack((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
 		own = 1;
 	}
 
 	if((uintptr_t)stack>=0x100000000LL) {
-		if(own) munmap(stack, stacksize);
+		if(own) {
+			box_munmap(stack, stacksize);
+			freeProtection((uintptr_t)stack, stacksize);
+		}
 		return EAGAIN;
 	}
 
diff --git a/src/os/backtrace.c b/src/os/backtrace.c
index f0345cd6..2d1261d9 100644
--- a/src/os/backtrace.c
+++ b/src/os/backtrace.c
@@ -34,8 +34,10 @@ void ShowNativeBT(int log_minimum)
         for (int j = 0; j < nptrs; j++)
             printf_log(log_minimum, "NativeBT: %s\n", strings[j]);
         free(strings);
-    } else
-        printf_log(log_minimum, "NativeBT: none (%d/%s)\n", errno, strerror(errno));
+    } else {
+        for (int j = 0; j < nptrs; j++)
+            printf_log(log_minimum, "NativeBT: %p\n", buffer[j]);
+    }
     // restore modified name
     memcpy(my_context->box64path, my_context->orig_argv[0], boxpath_lenth);
 }
diff --git a/src/tools/box64stack.c b/src/tools/box64stack.c
index 4db5653e..56159264 100644
--- a/src/tools/box64stack.c
+++ b/src/tools/box64stack.c
@@ -27,7 +27,7 @@ int CalcStackSize(box64context_t *context)
         printf_log(LOG_NONE, "Cannot allocate aligned memory (0x%lx/0x%zx) for stack\n", context->stacksz, context->stackalign);
         return 1;
     } else
-        setProtection((uintptr_t)context->stack, context->stacksz, PROT_READ|PROT_WRITE);
+        setProtection_stack((uintptr_t)context->stack, context->stacksz, PROT_READ|PROT_WRITE);
     //memset(context->stack, 0, context->stacksz);
     printf_log(LOG_DEBUG, "Stack is @%p size=0x%lx align=0x%zx\n", context->stack, context->stacksz, context->stackalign);
 
diff --git a/src/tools/bridge.c b/src/tools/bridge.c
index 17496cc7..4bdeb09e 100644
--- a/src/tools/bridge.c
+++ b/src/tools/bridge.c
@@ -58,7 +58,7 @@ brick_t* NewBrick(void* old)
     if(ptr == MAP_FAILED) {
         printf_log(LOG_NONE, "Warning, cannot allocate 0x%lx aligned bytes for bridge, will probably crash later\n", NBRICK*sizeof(onebridge_t));
     }
-    setProtection_mmap((uintptr_t)ptr, NBRICK * sizeof(onebridge_t), PROT_READ | PROT_WRITE | PROT_EXEC | PROT_NOPROT);
+    setProtection_box((uintptr_t)ptr, NBRICK * sizeof(onebridge_t), PROT_READ | PROT_WRITE | PROT_EXEC | PROT_NOPROT);
     dynarec_log(LOG_INFO, "New Bridge brick at %p (size 0x%zx)\n", ptr, NBRICK*sizeof(onebridge_t));
     if(box64_is32bits) load_addr_32bits = ptr + NBRICK*sizeof(onebridge_t);
     ret->b = ptr;
diff --git a/src/wrapped/wrappedlibc.c b/src/wrapped/wrappedlibc.c
index 08e412d0..31c2f2cd 100644
--- a/src/wrapped/wrappedlibc.c
+++ b/src/wrapped/wrappedlibc.c
@@ -3082,7 +3082,7 @@ EXPORT void* my_mmap64(x64emu_t* emu, void *addr, size_t length, int prot, int f
         if(emu)
             setProtection_mmap((uintptr_t)ret, length, prot);
         else
-            setProtection((uintptr_t)ret, length, prot);
+            setProtection_box((uintptr_t)ret, length, prot);
         if(addr && ret!=addr)
             e = EEXIST;
     }
diff --git a/src/wrapped32/wrappedlibc.c b/src/wrapped32/wrappedlibc.c
index a43ca311..8665f593 100755
--- a/src/wrapped32/wrappedlibc.c
+++ b/src/wrapped32/wrappedlibc.c
@@ -321,7 +321,7 @@ static void* findftwFct(void* fct)
 static uintptr_t my32_ftw64_fct_##A = 0;                                            \
 static int my32_ftw64_##A(void* fpath, void* sb, int flag)                          \
 {                                                                                   \
-    struct i386_stat64 i386st;                                                      \
+    static struct i386_stat64 i386st;                                               \
     UnalignStat64_32(sb, &i386st);                                                  \
     return (int)RunFunctionFmt(my32_ftw64_fct_##A, "ppi", fpath, &i386st, flag);    \
 }