about summary refs log tree commit diff stats
diff options
context:
space:
mode:
authorrajdakin <rajdakin@gmail.com>2023-12-31 15:49:57 +0100
committerGitHub <noreply@github.com>2023-12-31 15:49:57 +0100
commit5e9e1faedc97194e46f3fb4b3665ec416ce7efbf (patch)
tree27d345328502d82ede6c58e3d181d1f682bab255
parentdba6a88341bacbf52d0f0c37117a04164afce9fa (diff)
downloadbox64-5e9e1faedc97194e46f3fb4b3665ec416ce7efbf.tar.gz
box64-5e9e1faedc97194e46f3fb4b3665ec416ce7efbf.zip
[MEMORY] Switched from a sparse array to a red-black tree (#1180)
* [MEMORY] Switched from a sparse array to an RB tree

* [RBTREE] Fixed the Android build
-rw-r--r--CMakeLists.txt3
-rw-r--r--src/custommem.c418
-rw-r--r--src/dynarec/dynablock.c18
-rw-r--r--src/dynarec/dynarec_native.c7
-rw-r--r--src/include/custommem.h13
-rw-r--r--src/include/debug.h2
-rw-r--r--src/include/rbtree.h13
-rw-r--r--src/libtools/signals.c18
-rw-r--r--src/main.c23
-rw-r--r--src/mallochook.c2
-rw-r--r--src/tools/rbtree.c894
-rw-r--r--src/tools/rcfile.c4
-rw-r--r--src/wrapped/wrappedlibc.c10
13 files changed, 1030 insertions, 395 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ce29f7c7..3c4c6c15 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -343,9 +343,10 @@ set(ELFLOADER_SRC
     "${BOX64_ROOT}/src/tools/bridge.c"
     "${BOX64_ROOT}/src/tools/callback.c"
     "${BOX64_ROOT}/src/tools/my_cpuid.c"
+    "${BOX64_ROOT}/src/tools/fileutils.c"
     "${BOX64_ROOT}/src/tools/gtkclass.c"
     "${BOX64_ROOT}/src/tools/pathcoll.c"
-    "${BOX64_ROOT}/src/tools/fileutils.c"
+    "${BOX64_ROOT}/src/tools/rbtree.c"
     "${BOX64_ROOT}/src/tools/rcfile.c"
     "${BOX64_ROOT}/src/tools/wine_tools.c"
     "${BOX64_ROOT}/src/wrapped/generated/wrapper.c"
diff --git a/src/custommem.c b/src/custommem.c
index 2dcc2b02..ef207d30 100644
--- a/src/custommem.c
+++ b/src/custommem.c
@@ -24,6 +24,7 @@
 #include "custommem.h"
 #include "khash.h"
 #include "threads.h"
+#include "rbtree.h"
 #ifdef DYNAREC
 #include "dynablock.h"
 #include "dynarec/dynablock_private.h"
@@ -59,32 +60,8 @@ static pthread_mutex_t     mutex_blocks;
 static pthread_mutex_t     mutex_prot;
 static pthread_mutex_t     mutex_blocks;
 #endif
-#if defined(PAGE64K)
-#define MEMPROT_SHIFT 16
-#define MEMPROT_SHIFT2 (16+16)
-#elif defined(PAGE16K)
-#define MEMPROT_SHIFT 14
-#define MEMPROT_SHIFT2 (16+14)
-#elif defined(PAGE8K)
-#define MEMPROT_SHIFT 13
-#define MEMPROT_SHIFT2 (16+13)
-#else
-#define MEMPROT_SHIFT 12
-#define MEMPROT_SHIFT2 (16+12)
-#endif
-#define MEMPROT_SIZE (1<<16)
-#define MEMPROT_SIZE0 (48-MEMPROT_SHIFT2)
-typedef struct memprot_s
-{
-    uint8_t* prot;
-    uint8_t* hot;
-} memprot_t;
 //#define TRACE_MEMSTAT
-#ifdef TRACE_MEMSTAT
-static uint64_t  memprot_allocated = 0, memprot_max_allocated = 0;
-#endif
-static memprot_t memprot[1<<MEMPROT_SIZE0];    // x86_64 mem is 48bits, page is 12bits, so memory is tracked as [20][16][page protection]
-static uint8_t   memprot_default[MEMPROT_SIZE];
+rbtree* memprot = NULL;
 int have48bits = 0;
 static int inited = 0;
 
@@ -502,24 +479,6 @@ void customFree(void* p)
 #define LOCK_NODYNAREC()
 #define UNLOCK_DYNAREC()    mutex_unlock(&mutex_prot)
 #define UNLOCK_NODYNAREC()
-static uint8_t* getProtBlock(uintptr_t idx, int fill)
-{
-    uint8_t* block = (uint8_t*)native_lock_get_dd(&memprot[idx].prot);
-    if(fill && block==memprot_default) {
-        uint8_t* newblock = box_calloc(1<<16, sizeof(uint8_t));
-        if(native_lock_storeifref(&memprot[idx].prot, newblock, memprot_default)==newblock)
-        {
-            block = newblock;
-#ifdef TRACE_MEMSTAT
-            memprot_allocated += (1<<16) * sizeof(uint8_t);
-            if (memprot_allocated > memprot_max_allocated) memprot_max_allocated = memprot_allocated;
-#endif
-        } else {
-            box_free(newblock);
-        }
-    }
-    return block;
-}
 #else
 #define GET_PROT_WAIT(A, B) uint32_t A = block[B]
 #define GET_PROT(A, B)      uint32_t A = block[B]
@@ -527,19 +486,6 @@ static uint8_t* getProtBlock(uintptr_t idx, int fill)
 #define LOCK_NODYNAREC()    mutex_lock(&mutex_prot)
 #define UNLOCK_DYNAREC()
 #define UNLOCK_NODYNAREC()  mutex_unlock(&mutex_prot)
-static uint8_t* getProtBlock(uintptr_t idx, int fill)
-{
-    uint8_t* block = memprot[idx].prot;
-    if(fill && block==memprot_default) {
-        block = box_calloc(1<<16, sizeof(uint8_t));
-        memprot[idx].prot = block;
-#ifdef TRACE_MEMSTAT
-        memprot_allocated += (1<<16) * sizeof(uint8_t);
-        if (memprot_allocated > memprot_max_allocated) memprot_max_allocated = memprot_allocated;
-#endif
-    }
-    return block;
-}
 #endif
 
 #ifdef DYNAREC
@@ -1051,96 +997,93 @@ uintptr_t getJumpAddress64(uintptr_t addr)
 void protectDB(uintptr_t addr, uintptr_t size)
 {
     dynarec_log(LOG_DEBUG, "protectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
-    uintptr_t idx = (addr>>MEMPROT_SHIFT);
-    uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
-    if(end>=(1LL<<(48-MEMPROT_SHIFT)))
-        end = (1LL<<(48-MEMPROT_SHIFT))-1;
-    if(end<idx) // memory addresses higher than 48bits are not tracked
-        return;
-    int ret;
-    uintptr_t bidx = ~1LL;
-    uint8_t* block = NULL;
-    for (uintptr_t i=idx; i<=end; ++i) {
-        if(i>>16!=bidx) {
-            bidx = i>>16;
-            block = getProtBlock(bidx, 1);
-        }
-        uint32_t prot;
-        do {
-            prot = native_lock_xchg_b(&block[i&0xffff], PROT_WAIT);
-        } while(prot==PROT_WAIT);
-        uint32_t dyn = prot&PROT_DYN;
+
+    uintptr_t cur = addr&~(box64_pagesize-1);
+    uintptr_t end = (addr+size+(box64_pagesize-1))&~(box64_pagesize-1);
+
+    mutex_lock(&mutex_prot);
+    while(cur!=end) {
+        uint8_t prot = 0, oprot;
+        uintptr_t bend = 0;
+        rb_get_end(memprot, cur, &prot, &bend);
+        if(bend>end)
+            bend = end;
+        oprot = prot;
+        uint8_t dyn = prot&PROT_DYN;
         if(!prot)
-            prot = PROT_READ | PROT_WRITE | PROT_EXEC;      // comes from malloc & co, so should not be able to execute
+            prot = PROT_READ | PROT_WRITE | PROT_EXEC;
         if(!(dyn&PROT_NOPROT)) {
             prot&=~PROT_CUSTOM;
             if(prot&PROT_WRITE) {
                 if(!dyn) 
-                    mprotect((void*)(i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, prot&~PROT_WRITE);
+                    mprotect((void*)cur, bend-cur, prot&~PROT_WRITE);
                 prot |= PROT_DYNAREC;
             } else 
                 prot |= PROT_DYNAREC_R;
         }
-        native_lock_storeb(&block[i&0xffff], prot);
+        if (prot != oprot) // If the node doesn't exist, then prot != 0
+            rb_set(memprot, cur, bend, prot);
+        cur = bend;
     }
+    mutex_unlock(&mutex_prot);
 }
 
 // Add the Write flag from an adress range, and mark all block as dirty
-// no log, as it can be executed inside a signal handler
 void unprotectDB(uintptr_t addr, size_t size, int mark)
 {
     dynarec_log(LOG_DEBUG, "unprotectDB %p -> %p (mark=%d)\n", (void*)addr, (void*)(addr+size-1), mark);
-    uintptr_t idx = (addr>>MEMPROT_SHIFT);
-    uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT);
-    if(end>=(1LL<<(48-MEMPROT_SHIFT)))
-        end = (1LL<<(48-MEMPROT_SHIFT))-1;
-    if(end<idx) // memory addresses higher than 48bits are not tracked
-        return;
-    for (uintptr_t i=idx; i<=end; ++i) {
-        uint8_t* block = getProtBlock(i>>16, 0);
-        if(block == memprot_default) {
-            i=(((i>>16)+1)<<16)-1;  // next block
-        } else {
-            uint32_t prot;
-            do {
-                prot = native_lock_xchg_b(&block[i&0xffff], PROT_WAIT);
-            } while(prot==PROT_WAIT);
-            if(!(prot&PROT_NOPROT)) {
-                if(prot&PROT_DYNAREC) {
-                    prot&=~PROT_DYN;
-                    if(mark)
-                        cleanDBFromAddressRange((i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, 0);
-                    mprotect((void*)(i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, prot);
-                } else if(prot&PROT_DYNAREC_R)
-                    prot &= ~PROT_CUSTOM;
+
+    uintptr_t cur = addr&~(box64_pagesize-1);
+    uintptr_t end = (addr+size+(box64_pagesize-1))&~(box64_pagesize-1);
+
+    mutex_lock(&mutex_prot);
+    while(cur!=end) {
+        uint8_t prot = 0, oprot;
+        uintptr_t bend = 0;
+        if (!rb_get_end(memprot, cur, &prot, &bend)) {
+            if(bend>=end) break;
+            else {
+                cur = bend;
+                continue;
             }
-            native_lock_storeb(&block[i&0xffff], prot);
         }
+        oprot = prot;
+        if(bend>end)
+            bend = end;
+        if(!(prot&PROT_NOPROT)) {
+            if(prot&PROT_DYNAREC) {
+                prot&=~PROT_DYN;
+                if(mark)
+                    cleanDBFromAddressRange(cur, bend-cur, 0);
+                mprotect((void*)cur, bend-cur, prot);
+            } else if(prot&PROT_DYNAREC_R)
+                prot &= ~PROT_CUSTOM;
+        }
+        if (prot != oprot)
+            rb_set(memprot, cur, bend, prot);
+        cur = bend;
     }
+    mutex_unlock(&mutex_prot);
 }
 
 int isprotectedDB(uintptr_t addr, size_t size)
 {
     dynarec_log(LOG_DEBUG, "isprotectedDB %p -> %p => ", (void*)addr, (void*)(addr+size-1));
-    uintptr_t idx = (addr>>MEMPROT_SHIFT);
-    uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
-    if(end>=(1LL<<(48-MEMPROT_SHIFT)))
-        end = (1LL<<(48-MEMPROT_SHIFT))-1LL;
-    if(end<idx) { // memory addresses higher than 48bits are not tracked
-        dynarec_log(LOG_DEBUG, "00\n");
-        return 0;
-    }
-    for (uintptr_t i=idx; i<=end; ++i) {
-        uint8_t* block = getProtBlock(i>>16, 0);
-        uint32_t prot;
-        do {
-            prot = native_lock_get_b(&block[i&0xffff]);
-        } while(prot==PROT_WAIT);
-        if(!(prot&PROT_DYN)) {
+    uintptr_t end = (addr+size+(box64_pagesize-1))&~(box64_pagesize-1);
+    addr &=~(box64_pagesize-1);
+    mutex_lock(&mutex_prot);
+    while (addr < end) {
+        uint8_t prot;
+        uintptr_t bend;
+        if (!rb_get_end(memprot, addr, &prot, &bend) || !(prot&PROT_DYN)) {
             dynarec_log(LOG_DEBUG, "0\n");
+            mutex_unlock(&mutex_prot);
             return 0;
+        } else {
+            addr = bend;
         }
     }
+    mutex_unlock(&mutex_prot);
     dynarec_log(LOG_DEBUG, "1\n");
     return 1;
 }
@@ -1245,60 +1188,44 @@ void removeMapMem(mapmem_t* mapmem, uintptr_t begin, uintptr_t end)
     }
 }
 
-void updateProtection(uintptr_t addr, size_t size, uint32_t prot)
+void updateProtection(uintptr_t addr, size_t size, uint8_t prot)
 {
-    dynarec_log(LOG_DEBUG, "updateProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
+    dynarec_log(LOG_DEBUG, "updateProtection %p:%p 0x%hhx\n", (void*)addr, (void*)(addr+size-1), prot);
     mutex_lock(&mutex_prot);
     addMapMem(mapallmem, addr, addr+size-1);
-    UNLOCK_DYNAREC();
-    uintptr_t idx = (addr>>MEMPROT_SHIFT);
-    uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT);
-    if(end>=(1LL<<(48-MEMPROT_SHIFT)))
-        end = (1LL<<(48-MEMPROT_SHIFT))-1;
-    uintptr_t bidx = ~1LL;
-    uint8_t *block = NULL;
-    for (uintptr_t i=idx; i<=end; ++i) {
-        if(bidx!=i>>16) {
-            bidx = i>>16;
-            block = getProtBlock(bidx, 1);
-        }
-        GET_PROT_WAIT(old_prot, i&0xffff);
-        uint32_t dyn=(old_prot&PROT_DYN);
+    uintptr_t cur = addr & ~(box64_pagesize-1);
+    uintptr_t end = (cur+size+(box64_pagesize-1))&~(box64_pagesize-1);
+    while (cur < end) {
+        uintptr_t bend;
+        uint8_t prot;
+        rb_get_end(memprot, cur, &prot, &bend);
+        uint8_t dyn=(prot&PROT_DYN);
         if(!(dyn&PROT_NOPROT)) {
             if(dyn && (prot&PROT_WRITE)) {   // need to remove the write protection from this block
                 dyn = PROT_DYNAREC;
-                mprotect((void*)(i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, prot&~PROT_WRITE);
+                mprotect((void*)cur, bend-cur, prot&~PROT_WRITE);
             } else if(dyn && !(prot&PROT_WRITE)) {
                 dyn = PROT_DYNAREC_R;
             }
         }
-        SET_PROT(i&0xffff, prot|dyn);
+        if (prot|dyn != prot)
+            rb_set(memprot, cur, bend, prot|dyn);
+        cur = bend;
     }
-    UNLOCK_NODYNAREC();
+    mutex_unlock(&mutex_prot);
 }
 
-void setProtection(uintptr_t addr, size_t size, uint32_t prot)
+void setProtection(uintptr_t addr, size_t size, uint8_t prot)
 {
     mutex_lock(&mutex_prot);
     addMapMem(mapallmem, addr, addr+size-1);
-    UNLOCK_DYNAREC();
-    uintptr_t idx = (addr>>MEMPROT_SHIFT);
-    uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT);
-    if(end>=(1LL<<(48-MEMPROT_SHIFT)))
-        end = (1LL<<(48-MEMPROT_SHIFT))-1;
-    for (uintptr_t i=(idx>>16); i<=(end>>16); ++i) {
-        uint8_t* block = getProtBlock(i, prot?1:0);
-        if(prot || block!=memprot_default) {
-            uintptr_t bstart = ((i<<16)<idx)?(idx&0xffff):0;
-            uintptr_t bend = (((i<<16)+0xffff)>end)?(end&0xffff):0xffff;
-            for (uintptr_t j=bstart; j<=bend; ++j)
-                SET_PROT(j, prot);
-        }
-    }
-    UNLOCK_NODYNAREC();
+    uintptr_t cur = addr & ~(box64_pagesize-1);
+    uintptr_t end = (cur+size+(box64_pagesize-1))&~(box64_pagesize-1);
+    rb_set(memprot, cur, end, prot);
+    mutex_unlock(&mutex_prot);
 }
 
-void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot)
+void setProtection_mmap(uintptr_t addr, size_t size, uint8_t prot)
 {
     if(!size)
         return;
@@ -1315,7 +1242,7 @@ void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot)
     }
 }
 
-void setProtection_elf(uintptr_t addr, size_t size, uint32_t prot)
+void setProtection_elf(uintptr_t addr, size_t size, uint8_t prot)
 {
     if(prot)
         setProtection(addr, size, prot);
@@ -1328,15 +1255,14 @@ void setProtection_elf(uintptr_t addr, size_t size, uint32_t prot)
 
 void refreshProtection(uintptr_t addr)
 {
-    LOCK_NODYNAREC();
-    uintptr_t idx = (addr>>MEMPROT_SHIFT);
-    uint8_t* block = getProtBlock(idx>>16, 0);
-    if(block!=memprot_default) {
-        GET_PROT(prot, idx&0xffff);
-        int ret = mprotect((void*)(idx<<MEMPROT_SHIFT), box64_pagesize, prot&~PROT_CUSTOM);
-printf_log(LOG_INFO, "refreshProtection(%p): %p/0x%x (ret=%d/%s)\n", (void*)addr, (void*)(idx<<MEMPROT_SHIFT), prot, ret, ret?strerror(errno):"ok");
+    mutex_lock(&mutex_prot);
+    uint8_t prot;
+    uintptr_t bend;
+    if (rb_get_end(memprot, addr, &prot, &bend)) {
+        int ret = mprotect((void*)(addr&~(box64_pagesize-1)), box64_pagesize, prot&~PROT_CUSTOM);
+printf_log(LOG_INFO, "refreshProtection(%p): %p/0x%x (ret=%d/%s)\n", (void*)addr, (void*)(addr&~(box64_pagesize-1)), prot, ret, ret?strerror(errno):"ok");
     }
-    UNLOCK_NODYNAREC();
+    mutex_unlock(&mutex_prot);
 }
 
 void allocProtection(uintptr_t addr, size_t size, uint32_t prot)
@@ -1348,69 +1274,6 @@ void allocProtection(uintptr_t addr, size_t size, uint32_t prot)
     // don't need to add precise tracking probably
 }
 
-#ifdef DYNAREC
-int IsInHotPage(uintptr_t addr) {
-    if(addr>=(1LL<<48))
-        return 0;
-    int idx = (addr>>MEMPROT_SHIFT)>>16;
-    uint8_t *hot = (uint8_t*)native_lock_get_dd(&memprot[idx].hot);
-    if(!hot)
-        return 0;
-    int base = (addr>>MEMPROT_SHIFT)&0xffff;
-    if(!hot[base])
-        return 0;
-    // decrement hot
-    native_lock_decifnot0b(&hot[base]);
-    return 1;
-}
-
-int AreaInHotPage(uintptr_t start, uintptr_t end_) {
-    uintptr_t idx = (start>>MEMPROT_SHIFT);
-    uintptr_t end = (end_>>MEMPROT_SHIFT);
-    if(end>=(1LL<<(48-MEMPROT_SHIFT)))
-        end = (1LL<<(48-MEMPROT_SHIFT))-1LL;
-    if(end<idx) { // memory addresses higher than 48bits are not tracked
-        return 0;
-    }
-    int ret = 0;
-    for (uintptr_t i=idx; i<=end; ++i) {
-        uint8_t *block = (uint8_t*)native_lock_get_dd(&memprot[i>>16].hot);
-        int base = i&0xffff;
-        if(block) {
-            uint32_t hot = block[base];
-            if(hot) {
-                // decrement hot
-                native_lock_decifnot0b(&block[base]);
-                ret = 1;
-            }
-        } else {
-            i+=0xffff-base;
-        }
-    }
-    if(ret && box64_dynarec_log>LOG_INFO)
-        dynarec_log(LOG_DEBUG, "BOX64: AreaInHotPage %p-%p\n", (void*)start, (void*)end_);
-    return ret;
-
-}
-
-void AddHotPage(uintptr_t addr) {
-    int idx = (addr>>MEMPROT_SHIFT)>>16;
-    int base = (addr>>MEMPROT_SHIFT)&0xffff;
-    if(!memprot[idx].hot) {
-            uint8_t* newblock = box_calloc(1<<16, sizeof(uint8_t));
-            if (native_lock_storeifnull(&memprot[idx].hot, newblock)) {
-                box_free(newblock);
-#ifdef TRACE_MEMSTAT
-            } else {
-                memprot_allocated += (1<<16) * sizeof(uint8_t);
-                if (memprot_allocated > memprot_max_allocated) memprot_max_allocated = memprot_allocated;
-#endif
-            }
-    }
-    native_lock_storeb(&memprot[idx].hot[base], box64_dynarec_hotpage);
-}
-#endif
-
 void loadProtectionFromMap()
 {
     if(box64_mapclean)
@@ -1443,91 +1306,23 @@ void loadProtectionFromMap()
     box64_mapclean = 1;
 }
 
-static int blockempty(uint8_t* mem)
-{
-    uint64_t *p8 = (uint64_t*)mem;
-    for (int i=0; i<(MEMPROT_SIZE)/8; ++i, ++p8)
-        if(*p8)
-            return 0;
-    return 1;
-}
-
 void freeProtection(uintptr_t addr, size_t size)
 {
     dynarec_log(LOG_DEBUG, "freeProtection %p:%p\n", (void*)addr, (void*)(addr+size-1));
-    uintptr_t idx = (addr>>MEMPROT_SHIFT);
-    uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
-    if(end>=(1LL<<(48-MEMPROT_SHIFT)))
-        end = (1LL<<(48-MEMPROT_SHIFT))-1;
     mutex_lock(&mutex_prot);
     removeMapMem(mapallmem, addr, addr+size-1);
     removeMapMem(mmapmem, addr, addr+size-1);
-    UNLOCK_DYNAREC();
-    for (uintptr_t i=idx; i<=end; ++i) {
-        const uint32_t key = (i>>16);
-        const uintptr_t start = i&(MEMPROT_SIZE-1);
-        const uintptr_t finish = (((i|(MEMPROT_SIZE-1))<end)?(MEMPROT_SIZE-1):end)&(MEMPROT_SIZE-1);
-        if(getProtBlock(key, 0)!=memprot_default) {
-            if(start==0 && finish==MEMPROT_SIZE-1) {
-                #ifdef DYNAREC
-                uint8_t *block = (uint8_t*)native_lock_xchg_dd(&memprot[key].prot, (uintptr_t)memprot_default);
-                #else
-                uint8_t *block = memprot[key].prot; 
-                memprot[key].prot = memprot_default;
-                #endif
-                if(block!=memprot_default) {
-                    box_free(block);
-#ifdef TRACE_MEMSTAT
-                    memprot_allocated -= (1<<16) * sizeof(uint8_t);
-#endif
-                }
-            } else {
-                #ifdef DYNAREC
-                uint8_t *block = (uint8_t*)native_lock_get_dd(&memprot[key].prot);
-                #else
-                uint8_t *block = memprot[key].prot;
-                #endif
-                memset(block+start, 0, (finish-start+1)*sizeof(uint8_t));
-                // blockempty is quite slow, so disable the free of blocks for now
-                /*else if(blockempty(block)) {
-                    memprot[key] = memprot_default;
-                    box_free(block);
-                }*/
-            }
-        }
-        #ifdef DYNAREC
-        if(native_lock_get_dd(&memprot[key].hot) && start==0 && finish==MEMPROT_SIZE-1) {
-            uint8_t *hot = (uint8_t*)native_lock_xchg_dd(&memprot[key].hot, 0);
-            if(hot) {
-                box_free(hot);
-#ifdef TRACE_MEMSTAT
-                memprot_allocated -= (1<<16) * sizeof(uint8_t);
-#endif
-            }
-        }
-        #else
-        if(memprot[key].hot && start==0 && finish==MEMPROT_SIZE-1) {
-            box_free(memprot[key].hot);
-            memprot[key].hot = NULL;
-#ifdef TRACE_MEMSTAT
-            memprot_allocated -= (1<<16) * sizeof(uint8_t);
-#endif
-        }
-        #endif
-        i+=finish-start;    // +1 from the "for" loop
-    }
-    UNLOCK_NODYNAREC();
+    rb_unset(memprot, addr, addr+size);
+    mutex_unlock(&mutex_prot);
 }
 
-uint32_t getProtection(uintptr_t addr)
+uint8_t getProtection(uintptr_t addr)
 {
     if(addr>=(1LL<<48))
         return 0;
-    LOCK_NODYNAREC();
-    const uintptr_t idx = (addr>>MEMPROT_SHIFT);
-    uint8_t *block = getProtBlock(idx>>16, 0);
-    GET_PROT(ret, idx&0xffff);
-    UNLOCK_NODYNAREC();
+    mutex_lock(&mutex_prot);
+    uint8_t ret = rb_get(memprot, addr);
+    mutex_unlock(&mutex_prot);
     return ret;
 }
 
@@ -1660,7 +1455,7 @@ int unlockCustommemMutex()
         }
     #endif
     GO(mutex_blocks, 0)
-    GO(mutex_prot, 1)
+    GO(mutex_prot, 1) // See also signals.c
     #undef GO
     return ret;
 }
@@ -1672,7 +1467,7 @@ void relockCustommemMutex(int locks)
             mutex_trylock(&A);          \
 
     GO(mutex_blocks, 0)
-    GO(mutex_prot, 1)
+    GO(mutex_prot, 1) // See also signals.c
     #undef GO
 }
 
@@ -1736,9 +1531,7 @@ void init_custommem_helper(box64context_t* ctx)
     if(inited) // already initialized
         return;
     inited = 1;
-    memset(memprot_default, 0, sizeof(memprot_default));
-    for(int i=0; i<(1<<MEMPROT_SIZE0); ++i)
-        memprot[i].prot = memprot_default;
+    memprot = init_rbtree();
     init_mutexes();
 #ifdef DYNAREC
     if(box64_dynarec) {
@@ -1771,11 +1564,6 @@ void init_custommem_helper(box64context_t* ctx)
     mmapmem = (mapmem_t*)box_calloc(1, sizeof(mapmem_t));
     mmapmem->begin = 0x0;
     mmapmem->end = (uintptr_t)box64_pagesize - 1;
-    // check if PageSize is correctly defined
-    if(box64_pagesize != (1<<MEMPROT_SHIFT)) {
-        printf_log(LOG_NONE, "Error: PageSize configuration is wrong: configured with %d, but got %zd\n", 1<<MEMPROT_SHIFT, box64_pagesize);
-        exit(-1);   // abort or let it continue?
-    }
 }
 
 void fini_custommem_helper(box64context_t *ctx)
@@ -1861,15 +1649,7 @@ void fini_custommem_helper(box64context_t *ctx)
     kh_destroy(lockaddress, lockaddress);
     lockaddress = NULL;
 #endif
-    uint8_t* m;
-    for(int i=0; i<(1<<MEMPROT_SIZE0); ++i) {
-        m = memprot[i].prot;
-        if(m!=memprot_default)
-            box_free(m);
-        m = memprot[i].hot;
-        if(m)
-            box_free(m);
-    }
+    delete_rbtree(memprot);
 
     for(int i=0; i<n_blocks; ++i)
         #ifdef USE_MMAP
diff --git a/src/dynarec/dynablock.c b/src/dynarec/dynablock.c
index 3cfd871c..c5fa1dc5 100644
--- a/src/dynarec/dynablock.c
+++ b/src/dynarec/dynablock.c
@@ -246,24 +246,6 @@ dynablock_t* DBGetBlock(x64emu_t* emu, uintptr_t addr, int create, int is32bits)
     if(db && db->done && db->block && getNeedTest(addr)) {
         if(db->always_test)
             sched_yield();  // just calm down...
-        if(AreaInHotPage((uintptr_t)db->x64_addr, (uintptr_t)db->x64_addr + db->x64_size - 1)) {
-            emu->test.test = 0;
-            if(box64_dynarec_fastpage) {
-                uint32_t hash = X31_hash_code(db->x64_addr, db->x64_size);
-                if(hash==db->hash) { // seems ok, run it without reprotecting it
-                    setJumpTableIfRef64(db->x64_addr, db->block, db->jmpnext);
-                    return db;
-                }
-                db->done = 0;   // invalidating the block, it's already not good
-                dynarec_log(LOG_DEBUG, "Invalidating block %p from %p:%p (hash:%X/%X) for %p\n", db, db->x64_addr, db->x64_addr+db->x64_size-1, hash, db->hash, (void*)addr);
-                // Free db, it's now invalid!
-                FreeDynablock(db, 1);
-                return NULL;    // not building a new one, it's still a hotpage
-            } else {
-                dynarec_log(LOG_INFO, "Not running block %p from %p:%p with for %p because it's in a hotpage\n", db, db->x64_addr, db->x64_addr+db->x64_size-1, (void*)addr);
-                return NULL;
-            }
-        }
         uint32_t hash = X31_hash_code(db->x64_addr, db->x64_size);
         int need_lock = mutex_trylock(&my_context->mutex_dyndump);
         if(hash!=db->hash) {
diff --git a/src/dynarec/dynarec_native.c b/src/dynarec/dynarec_native.c
index 94898c27..6dfc9c89 100644
--- a/src/dynarec/dynarec_native.c
+++ b/src/dynarec/dynarec_native.c
@@ -463,10 +463,6 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
         B+32 .. B+32+sz : instsize (compressed array with each instruction length on x64 and native side)
 
     */
-    if(IsInHotPage(addr)) {
-        dynarec_log(LOG_DEBUG, "Canceling dynarec FillBlock on hotpage for %p\n", (void*)addr);
-        return NULL;
-    }
     if(addr>=box64_nodynarec_start && addr<box64_nodynarec_end) {
         dynarec_log(LOG_INFO, "Create empty block in no-dynarec zone\n");
         return CreateEmptyBlock(block, addr);
@@ -495,7 +491,6 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
     }
     if(!isprotectedDB(addr, 1)) {
         dynarec_log(LOG_INFO, "Warning, write on current page on pass0, aborting dynablock creation (%p)\n", (void*)addr);
-        AddHotPage(addr);
         CancelBlock64(0);
         return NULL;
     }
@@ -634,7 +629,6 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
     // Check if something changed, to abort if it is
     if((block->hash != hash)) {
         dynarec_log(LOG_DEBUG, "Warning, a block changed while being processed hash(%p:%ld)=%x/%x\n", block->x64_addr, block->x64_size, block->hash, hash);
-        AddHotPage(addr);
         CancelBlock64(0);
         return NULL;
     }
@@ -658,7 +652,6 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
     }
     if(!isprotectedDB(addr, end-addr)) {
         dynarec_log(LOG_DEBUG, "Warning, block unprotected while being processed %p:%ld, marking as need_test\n", block->x64_addr, block->x64_size);
-        AddHotPage(addr);
         block->dirty = 1;
         //protectDB(addr, end-addr);
     }
diff --git a/src/include/custommem.h b/src/include/custommem.h
index 60fabcea..363c3274 100644
--- a/src/include/custommem.h
+++ b/src/include/custommem.h
@@ -75,22 +75,19 @@ uintptr_t getJumpAddress64(uintptr_t addr);
 #define PROT_CUSTOM     (PROT_DYNAREC | PROT_DYNAREC_R | PROT_NOPROT)
 #define PROT_WAIT       0xFF
 
-void updateProtection(uintptr_t addr, size_t size, uint32_t prot);
-void setProtection(uintptr_t addr, size_t size, uint32_t prot);
-void setProtection_mmap(uintptr_t addr, size_t size, uint32_t prot);
-void setProtection_elf(uintptr_t addr, size_t size, uint32_t prot);
+void updateProtection(uintptr_t addr, size_t size, uint8_t prot);
+void setProtection(uintptr_t addr, size_t size, uint8_t prot);
+void setProtection_mmap(uintptr_t addr, size_t size, uint8_t prot);
+void setProtection_elf(uintptr_t addr, size_t size, uint8_t prot);
 void freeProtection(uintptr_t addr, size_t size);
 void refreshProtection(uintptr_t addr);
-uint32_t getProtection(uintptr_t addr);
+uint8_t getProtection(uintptr_t addr);
 int getMmapped(uintptr_t addr);
 void loadProtectionFromMap(void);
 #ifdef DYNAREC
 void protectDB(uintptr_t addr, size_t size);
 void unprotectDB(uintptr_t addr, size_t size, int mark);    // if mark==0, the blocks are not marked as potentially dirty
 int isprotectedDB(uintptr_t addr, size_t size);
-int IsInHotPage(uintptr_t addr);
-int AreaInHotPage(uintptr_t start, uintptr_t end);
-void AddHotPage(uintptr_t addr);
 #endif
 void* find32bitBlock(size_t size);
 void* find31bitBlockNearHint(void* hint, size_t size, uintptr_t mask);
diff --git a/src/include/debug.h b/src/include/debug.h
index 4ce68cb9..08018633 100644
--- a/src/include/debug.h
+++ b/src/include/debug.h
@@ -27,8 +27,6 @@ extern int box64_dynarec_callret;
 extern int box64_dynarec_bleeding_edge;
 extern int box64_dynarec_jvm;
 extern int box64_dynarec_tbb;
-extern int box64_dynarec_hotpage;
-extern int box64_dynarec_fastpage;
 extern int box64_dynarec_wait;
 extern int box64_dynarec_missing;
 extern int box64_dynarec_aligned_atomics;
diff --git a/src/include/rbtree.h b/src/include/rbtree.h
new file mode 100644
index 00000000..a624b5da
--- /dev/null
+++ b/src/include/rbtree.h
@@ -0,0 +1,13 @@
+#include <stdint.h>
+
+typedef struct rbtree rbtree;
+
+rbtree* init_rbtree();
+void delete_rbtree(rbtree *tree);
+
+uint8_t rb_get(rbtree *tree, uintptr_t addr);
+int rb_get_end(rbtree* tree, uintptr_t addr, uint8_t* val, uintptr_t* end);
+int rb_set(rbtree *tree, uintptr_t start, uintptr_t end, uint8_t data);
+int rb_unset(rbtree *tree, uintptr_t start, uintptr_t end);
+
+void print_rbtree(const rbtree *tree);
\ No newline at end of file
diff --git a/src/libtools/signals.c b/src/libtools/signals.c
index 24b0a900..e6be8120 100644
--- a/src/libtools/signals.c
+++ b/src/libtools/signals.c
@@ -270,7 +270,8 @@ static void sigstack_key_alloc() {
     pthread_key_create(&sigstack_key, sigstack_destroy);
 }
 
-//1<<8 is mutex_dyndump
+//1<<1 is mutex_prot, 1<<8 is mutex_dyndump
+#define is_memprot_locked (1<<1)
 #define is_dyndump_locked (1<<8)
 uint64_t RunFunctionHandler(int* exit, int dynarec, x64_ucontext_t* sigcontext, uintptr_t fnc, int nargs, ...)
 {
@@ -1002,7 +1003,7 @@ void my_sigactionhandler_oldcode(int32_t sig, int simple, siginfo_t* info, void
     TRAP_x86_MCHK       = 18,  // Machine check exception
     TRAP_x86_CACHEFLT   = 19   // SIMD exception (via SIGFPE) if CPU is SSE capable otherwise Cache flush exception (via SIGSEV)
     */
-    uint32_t prot = getProtection((uintptr_t)info->si_addr);
+    uint8_t prot = getProtection((uintptr_t)info->si_addr);
     if(sig==SIGBUS)
         sigcontext->uc_mcontext.gregs[X64_TRAPNO] = 17;
     else if(sig==SIGSEGV) {
@@ -1082,7 +1083,7 @@ void my_sigactionhandler_oldcode(int32_t sig, int simple, siginfo_t* info, void
     int ret;
     int dynarec = 0;
     #ifdef DYNAREC
-    if(sig!=SIGSEGV && !(Locks&is_dyndump_locked))
+    if(sig!=SIGSEGV && !(Locks&is_dyndump_locked) && !(Locks&is_memprot_locked))
         dynarec = 1;
     #endif
     ret = RunFunctionHandler(&exits, dynarec, sigcontext, my_context->signals[info2->si_signo], 3, info2->si_signo, info2, sigcontext);
@@ -1278,12 +1279,12 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
         return;
     }
     int Locks = unlockMutex();
-    uint32_t prot = getProtection((uintptr_t)addr);
+    uint8_t prot = getProtection((uintptr_t)addr);
     #ifdef BAD_SIGNAL
     // try to see if the si_code makes sense
     // the RK3588 tend to need a special Kernel that seems to have a weird behaviour sometimes
     if((sig==SIGSEGV) && (addr) && (info->si_code == 1) && prot&(PROT_READ|PROT_WRITE|PROT_EXEC)) {
-        printf_log(LOG_DEBUG, "Workaround for suspicious si_code for %p / prot=0x%x\n", addr, prot);
+        printf_log(LOG_DEBUG, "Workaround for suspicious si_code for %p / prot=0x%hhx\n", addr, prot);
         info->si_code = 2;
     }
     #endif
@@ -1303,19 +1304,18 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
         db = FindDynablockFromNativeAddress(pc);
         db_searched = 1;
         static uintptr_t repeated_page = 0;
-        dynarec_log(LOG_DEBUG, "SIGSEGV with Access error on %p for %p , db=%p(%p), prot=0x%x (old page=%p)\n", pc, addr, db, db?((void*)db->x64_addr):NULL, prot, (void*)repeated_page);
+        dynarec_log(LOG_DEBUG, "SIGSEGV with Access error on %p for %p , db=%p(%p), prot=0x%hhx (old page=%p)\n", pc, addr, db, db?((void*)db->x64_addr):NULL, prot, (void*)repeated_page);
         static int repeated_count = 0;
         if(repeated_page == ((uintptr_t)addr&~(box64_pagesize-1))) {
             ++repeated_count;   // Access eoor multiple time on same page, disable dynarec on this page a few time...
             dynarec_log(LOG_DEBUG, "Detecting a Hotpage at %p (%d)\n", (void*)repeated_page, repeated_count);
-            AddHotPage(repeated_page);
         } else {
             repeated_page = (uintptr_t)addr&~(box64_pagesize-1);
             repeated_count = 0;
         }
         // access error, unprotect the block (and mark them dirty)
         unprotectDB((uintptr_t)addr, 1, 1);    // unprotect 1 byte... But then, the whole page will be unprotected
-        int db_need_test = (db && !box64_dynarec_fastpage)?getNeedTest((uintptr_t)db->x64_addr):0;
+        int db_need_test = db?getNeedTest((uintptr_t)db->x64_addr):0;
         if(db && ((addr>=db->x64_addr && addr<(db->x64_addr+db->x64_size)) || db_need_test)) {
             emu = getEmuSignal(emu, p, db);
             // dynablock got auto-dirty! need to get out of it!!!
@@ -1383,7 +1383,7 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
         cleanDBFromAddressRange(((uintptr_t)addr)&~(box64_pagesize-1), box64_pagesize, 0);
         static void* glitch_pc = NULL;
         static void* glitch_addr = NULL;
-        static int glitch_prot = 0;
+        static uint8_t glitch_prot = 0;
         if(addr && pc /*&& db*/) {
             if((glitch_pc!=pc || glitch_addr!=addr || glitch_prot!=prot)) {
                 // probably a glitch due to intensive multitask...
diff --git a/src/main.c b/src/main.c
index 67080567..49212eb2 100644
--- a/src/main.c
+++ b/src/main.c
@@ -62,8 +62,6 @@ int box64_dynarec_fastnan = 1;
 int box64_dynarec_fastround = 1;
 int box64_dynarec_safeflags = 1;
 int box64_dynarec_callret = 1;
-int box64_dynarec_hotpage = 0;
-int box64_dynarec_fastpage = 0;
 int box64_dynarec_bleeding_edge = 1;
 int box64_dynarec_jvm = 1;
 int box64_dynarec_tbb = 1;
@@ -696,27 +694,6 @@ void LoadLogEnv()
         if(!box64_dynarec_wait)
             printf_log(LOG_INFO, "Dynarec will not wait for FillBlock to ready and use Interpreter instead\n");
     }
-    p = getenv("BOX64_DYNAREC_HOTPAGE");
-    if(p) {
-        int val = -1;
-        if(sscanf(p, "%d", &val)==1) {
-            if(val>=0)
-                box64_dynarec_hotpage = val;
-        }
-        if(box64_dynarec_hotpage)
-            printf_log(LOG_INFO, "Dynarec will have HotPage tagged for %d attempts\n", box64_dynarec_hotpage);
-        else
-            printf_log(LOG_INFO, "Dynarec will not tag HotPage\n");
-    }
-    p = getenv("BOX64_DYNAREC_FASTPAGE");
-    if(p) {
-        if(strlen(p)==1) {
-            if(p[0]>='0' && p[0]<='1')
-                box64_dynarec_fastpage = p[0]-'0';
-        }
-        if(box64_dynarec_fastpage)
-            printf_log(LOG_INFO, "Dynarec will use Fast HotPage\n");
-    }
     p = getenv("BOX64_DYNAREC_ALIGNED_ATOMICS");
     if(p) {
         if(strlen(p)==1) {
diff --git a/src/mallochook.c b/src/mallochook.c
index 52a09c74..1725a374 100644
--- a/src/mallochook.c
+++ b/src/mallochook.c
@@ -134,7 +134,7 @@ typedef void* (*pFpLLp_t)(void*, size_t, size_t, void*);
 size_t(*box_malloc_usable_size)(void*) = NULL;
 
 int GetTID();
-uint32_t getProtection(uintptr_t addr);
+uint8_t getProtection(uintptr_t addr);
 // malloc_hack "2" handling
 // mmap history
 static int malloc_hack_2 = 0;
diff --git a/src/tools/rbtree.c b/src/tools/rbtree.c
new file mode 100644
index 00000000..c79d1089
--- /dev/null
+++ b/src/tools/rbtree.c
@@ -0,0 +1,894 @@
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#ifdef RBTREE_TEST
+#define customMalloc malloc
+#define customFree free
+#else
+#include "custommem.h"
+#include "debug.h"
+#include "rbtree.h"
+#endif
+
+typedef struct rbnode {
+    struct rbnode *left, *right, *parent;
+    uintptr_t start, end;
+    uint8_t meta;
+    uint8_t data;
+} rbnode;
+
+typedef struct rbtree {
+    rbnode *root;
+    int is_unstable;
+} rbtree;
+
+rbtree* init_rbtree() {
+    rbtree* tree = customMalloc(sizeof(rbtree));
+    tree->root = NULL;
+    tree->is_unstable = 0;
+    return tree;
+}
+
+void delete_rbnode(rbnode *root) {
+    if (!root) return;
+    delete_rbnode(root->left);
+    delete_rbnode(root->right);
+    customFree(root);
+}
+void delete_rbtree(rbtree *tree) {
+    delete_rbnode(tree->root);
+    customFree(tree);
+}
+
+#define IS_LEFT  0x1
+#define IS_BLACK 0x2
+
+// Make sure prev is either the rightmost node before start or the leftmost range after start
+int add_range_next_to(rbtree *tree, rbnode *prev, uintptr_t start, uintptr_t end, uint8_t data) {
+// printf("Adding %lX-%lX:%hhX next to %p\n", start, end, data, prev);
+    rbnode *node = customMalloc(sizeof(*node));
+    if (!node) return -1;
+    node->start = start;
+    node->end = end;
+    node->data = data;
+    node->left = NULL;
+    node->right = NULL;
+
+    if (tree->is_unstable) {
+        printf_log(LOG_NONE, "Warning, unstable Red-Black tree; trying to add a node anyways\n");
+    }
+    tree->is_unstable = 1;
+
+    if (!tree->root) {
+        node->parent = NULL;
+        node->meta = IS_BLACK;
+        tree->root = node;
+        tree->is_unstable = 0;
+        return 0;
+    }
+
+    node->parent = prev;
+    if (prev->start < start) {
+        prev->right = node;
+        node->meta = 0;
+    } else {
+        prev->left = node;
+        node->meta = IS_LEFT;
+    }
+    
+    while (!(node->meta & IS_BLACK)) {
+        if (!node->parent) {
+            node->meta = IS_BLACK;
+            tree->root = node;
+            tree->is_unstable = 0;
+            return 0;
+        }
+        if (node->parent->meta & IS_BLACK) {
+            tree->is_unstable = 0;
+            return 0;
+        }
+        if (!node->parent->parent) {
+            tree->is_unstable = 0;
+            return 0; // Cannot happen as the root is black, unless the tree is unstable
+        }
+        if (node->parent->meta & IS_LEFT) {
+            if (node->parent->parent->right && !(node->parent->parent->right->meta & IS_BLACK)) {
+                node->parent->meta |= IS_BLACK;
+                node = node->parent->parent;
+                node->meta &= ~IS_BLACK;
+                node->right->meta |= IS_BLACK;
+            } else {
+                if (!(node->meta & IS_LEFT)) {
+                    rbnode *y, *z;
+                    y = node;
+                    z = y->parent;
+                    // ((Bz->left), Rz, ((By->left), Ry, (By->right)))
+                    // y = RED, rchild of z
+                    // z = RED, child of z->parent
+                    // target = (((Bz->left), Rz, (By->left)), Ry, (By->right))
+                    if (z->meta & IS_LEFT) {
+                        z->parent->left = y;
+                    } else {
+                        z->parent->right = y;
+                    }
+                    y->meta = z->meta; // red + same side as z
+                    z->meta = IS_LEFT; // red + left
+                    y->parent = z->parent;
+                    z->parent = y;
+                    z->right = y->left;
+                    y->left = z;
+                    if (z->right) {
+                        z->right->meta &= ~IS_LEFT;
+                        z->right->parent = z;
+                    }
+                    node = z;
+                }
+                rbnode *y, *z;
+                y = node->parent;
+                z = y->parent;
+                // (((Rnode), Ry, (By->right)), Bz, (Bz->right))
+                // node = RED, lchild of y
+                // y = RED, lchild of z
+                // z = BLACK, child of z->parent OR ROOT
+                // target = ((Rnode), By, ((By->right), Rz, (Bz->right)))
+                if (z->parent) {
+                    if (z->meta & IS_LEFT) {
+                        z->parent->left = y;
+                    } else {
+                        z->parent->right = y;
+                    }
+                }
+                y->meta = z->meta; // black + same side as z
+                z->meta = 0; // red + right
+                y->parent = z->parent;
+                z->parent = y;
+                z->left = y->right;
+                y->right = z;
+                if (z->left) {
+                    z->left->meta |= IS_LEFT;
+                    z->left->parent = z;
+                }
+                if (!y->parent) tree->root = y;
+                tree->is_unstable = 0;
+                return 0;
+            }
+        } else {
+            if (node->parent->parent->left && !(node->parent->parent->left->meta & IS_BLACK)) {
+                node->parent->meta |= IS_BLACK;
+                node = node->parent->parent;
+                node->meta &= ~IS_BLACK;
+                node->left->meta |= IS_BLACK;
+            } else {
+                if (node->meta & IS_LEFT) {
+                    rbnode *y, *z;
+                    y = node;
+                    z = y->parent;
+                    // (((By->left), Ry, (By->right)), Rz, (Bz->right))
+                    // y = RED, lchild of z
+                    // z = RED, child of z->parent
+                    // target = ((By->left), Ry, ((By->right), Rz, (Bz->right)))
+                    if (z->meta & IS_LEFT) {
+                        z->parent->left = y;
+                    } else {
+                        z->parent->right = y;
+                    }
+                    y->meta = z->meta; // red + same side as z
+                    z->meta = 0; // red + right
+                    y->parent = z->parent;
+                    z->parent = y;
+                    z->left = y->right;
+                    y->right = z;
+                    if (z->left) {
+                        z->left->meta |= IS_LEFT;
+                        z->left->parent = z;
+                    }
+                    node = z;
+                }
+                rbnode *y, *z;
+                y = node->parent;
+                z = y->parent;
+                // ((Bz->left), Bz, ((By->left), Ry, (Rnode)))
+                // node = RED, rchild of y
+                // y = RED, rchild of z
+                // z = BLACK, child of z->parent OR ROOT
+                // target = (((Bz->left), Rz, (By->left)), By, (Rnode))
+                if (z->parent) {
+                    if (z->meta & IS_LEFT) {
+                        z->parent->left = y;
+                    } else {
+                        z->parent->right = y;
+                    }
+                }
+                y->meta = z->meta; // black + same side as z
+                z->meta = IS_LEFT; // red + left
+                y->parent = z->parent;
+                z->parent = y;
+                z->right = y->left;
+                y->left = z;
+                if (z->right) {
+                    z->right->meta &= ~IS_LEFT;
+                    z->right->parent = z;
+                }
+                if (!y->parent) tree->root = y;
+                tree->is_unstable = 0;
+                return 0;
+            }
+        }
+    }
+    tree->is_unstable = 0;
+    return -1; // unreachable
+}
+int add_range(rbtree *tree, uintptr_t start, uintptr_t end, uint8_t data) {
+// printf("add_range\n");
+    rbnode *cur = tree->root, *prev = NULL;
+    while (cur) {
+        prev = cur;
+        if (cur->start < start) cur = cur->right;
+        else cur = cur->left;
+    }
+    return add_range_next_to(tree, prev, start, end, data);
+}
+
+rbnode *find_addr(rbtree *tree, uintptr_t addr) {
+    rbnode *node = tree->root;
+    while (node) {
+        if ((node->start <= addr) && (node->end > addr)) return node;
+        if (addr < node->start) node = node->left;
+        else node = node->right;
+    }
+    return NULL;
+}
+
+// node must be a valid node in the tree
+int remove_node(rbtree *tree, rbnode *node) {
+// printf("Removing %p\n", node); print_rbtree(tree); fflush(stdout);
+    if (tree->is_unstable) {
+        printf_log(LOG_NONE, "Warning, unstable Red-Black tree; trying to add a node anyways\n");
+    }
+    tree->is_unstable = 1;
+
+    if (node->left && node->right) {
+        // Swap node and its successor
+        // Do NOT free the successor as a reference to it can exist
+        rbnode *cur = node->right, *prev;
+        while (cur) {
+            prev = cur;
+            cur = cur->left;
+        }
+        // Swap the position of node and prev != node
+        uint8_t tmp8 = node->meta;
+        node->meta = prev->meta;
+        prev->meta = tmp8;
+        prev->left = node->left;
+        node->left = NULL;
+        if (prev->left) prev->left->parent = prev;
+        if (node->meta & IS_LEFT) {
+            cur = node->parent;
+            node->parent = prev->parent;
+            prev->parent = cur;
+            cur = node->right;
+            node->right = prev->right;
+            prev->right = cur;
+            if (cur) cur->parent = prev;
+        } else {
+            node->right = prev->right;
+            prev->right = node;
+            prev->parent = node->parent;
+            node->parent = prev;
+        }
+        if (node->right) node->right->parent = node; // Should be overriden later
+        if (!prev->parent) {
+            tree->root = prev; // prev is already black
+        } else if (prev->meta & IS_LEFT) {
+            prev->parent->left = prev;
+        } else {
+            prev->parent->right = prev;
+        }
+    }
+    rbnode *child = node->left ? node->left : node->right, *parent = node->parent;
+    if (child) {
+        child->parent = parent;
+        if (!parent) {
+            tree->root = child;
+            child->meta |= IS_BLACK; // Needs to be an or
+            tree->is_unstable = 0;
+            return 0;
+        } else if (node->meta & IS_LEFT) {
+            child->meta |= IS_LEFT;
+            parent->left = child;
+        } else {
+            child->meta &= ~IS_LEFT;
+            parent->right = child;
+        }
+    } else {
+        if (!parent) {
+            tree->root = NULL;
+            customFree(node);
+            tree->is_unstable = 0;
+            return 0;
+        } else if (node->meta & IS_LEFT) {
+            parent->left = NULL;
+        } else {
+            parent->right = NULL;
+        }
+    }
+    // Node has been removed, now to fix the tree
+    if (!(node->meta & IS_BLACK)) {
+        customFree(node);
+        tree->is_unstable = 0;
+        return 0;
+    }
+    customFree(node);
+
+    // Add a black node before child
+    // Notice that the sibling cannot be NULL.
+    while (parent && (!child || (child->meta & IS_BLACK))) {
+        if ((child && child->meta & IS_LEFT) || (!child && !parent->left)) {
+            node = parent->right;
+            if (!(node->meta & IS_BLACK)) {
+                // rotate ===
+                rbnode *y, *z;
+                y = node;
+                z = parent;
+                // ((Bchild), Bz, ((y->left), Ry, (y->right)))
+                // y = RED, rchild of z
+                // z = BLACK, child of z->parent OR ROOT
+                // target = (((Bchild), Rz, (y->left)), By, (y->right))
+                if (z->parent) {
+                    if (z->meta & IS_LEFT) {
+                        z->parent->left = y;
+                    } else {
+                        z->parent->right = y;
+                    }
+                } else {
+                    tree->root = y;
+                }
+                y->meta = z->meta; // black + same side as z
+                z->meta = IS_LEFT; // red + left
+                y->parent = z->parent;
+                z->parent = y;
+                z->right = y->left;
+                y->left = z;
+                if (z->right) {
+                    z->right->meta &= ~IS_LEFT;
+                    z->right->parent = z;
+                }
+                // ===
+                node = parent->right;
+            }
+            if (node->right && !(node->right->meta & IS_BLACK)) {
+                case4_l: {
+                rbnode *y, *z;
+                y = node;
+                z = parent;
+                // ((Bchild), ?z, ((?y->left), By, (Ry->right)))
+                // y = BLACK, rchild of z
+                // z = ?, child of z->parent OR ROOT
+                // target = (((Bchild), Bz, (?y->left)), ?y, (By->right))
+                if (z->parent) {
+                    if (z->meta & IS_LEFT) {
+                        z->parent->left = y;
+                    } else {
+                        z->parent->right = y;
+                    }
+                }
+                y->meta = z->meta; // same color as z + same side as z
+                z->meta = IS_BLACK | IS_LEFT; // black + left
+                y->parent = z->parent;
+                z->parent = y;
+                z->right = y->left;
+                y->left = z;
+                if (z->right) {
+                    z->right->meta &= ~IS_LEFT;
+                    z->right->parent = z;
+                }
+                if (!y->parent) tree->root = y;
+                node->right->meta |= IS_BLACK;
+                tree->is_unstable = 0;
+                return 0; }
+            } else if (!node->left || (node->left->meta & IS_BLACK)) {
+                // case2_l:
+                child = parent; // Remember that child can be NULL
+                parent = child->parent;
+                node->meta &= ~IS_BLACK;
+            } else {
+                // case3_l:
+                rbnode *y, *z;
+                y = node->left;
+                z = node;
+                // (((y->left), Ry, (y->right)), Bz, (Bz->right))
+                // y = RED, rchild of z
+                // z = BLACK, child of z->parent
+                // target = ((y->left), By, ((y->right), Rz, (z->right)))
+                if (z->meta & IS_LEFT) {
+                    z->parent->left = y;
+                } else {
+                    z->parent->right = y;
+                }
+                y->meta = z->meta; // black + same side as z
+                z->meta = 0; // red + right
+                y->parent = z->parent;
+                z->parent = y;
+                z->left = y->right;
+                y->right = z;
+                if (z->left) {
+                    z->left->meta |= IS_LEFT;
+                    z->left->parent = z;
+                }
+                node = y;
+                goto case4_l;
+            }
+        } else {
+            node = parent->left;
+            if (!(node->meta & IS_BLACK)) {
+                // rotate ===
+                rbnode *y, *z;
+                y = node;
+                z = parent;
+                // (((y->left), Ry, (y->right)), Bz, (Bchild))
+                // y = RED, lchild of z
+                // z = BLACK, child of z->parent OR ROOT
+                // target = ((y->left), By, ((y->right), Rz, (Bchild)))
+                if (z->parent) {
+                    if (z->meta & IS_LEFT) {
+                        z->parent->left = y;
+                    } else {
+                        z->parent->right = y;
+                    }
+                }
+                y->meta = z->meta; // black + same side as z
+                z->meta = 0; // red + right
+                y->parent = z->parent;
+                z->parent = y;
+                z->left = y->right;
+                y->right = z;
+                if (z->left) {
+                    z->left->meta |= IS_LEFT;
+                    z->left->parent = z;
+                }
+                if (!y->parent) tree->root = y;
+                // ===
+                node = parent->left;
+            }
+            if (node->left && !(node->left->meta & IS_BLACK)) {
+                case4_r: {
+                rbnode *y, *z;
+                y = node;
+                z = y->parent;
+                // (((?y->left), By, (Ry->right)), ?z, (Bchild))
+                // y = BLACK, rchild of z
+                // z = ?, child of z->parent OR ROOT
+                // target = ((?y->left), ?y, ((Ry->right), Bz, (Bchild)))
+                if (z->parent) {
+                    if (z->meta & IS_LEFT) {
+                        z->parent->left = y;
+                    } else {
+                        z->parent->right = y;
+                    }
+                }
+                y->meta = z->meta; // same color as z + same side as z
+                z->meta = IS_BLACK; // black + right
+                y->parent = z->parent;
+                z->parent = y;
+                z->left = y->right;
+                y->right = z;
+                if (z->left) {
+                    z->left->meta |= IS_LEFT;
+                    z->left->parent = z;
+                }
+                if (!y->parent) tree->root = y;
+                node->left->meta |= IS_BLACK;
+                tree->is_unstable = 0;
+                return 0; }
+            } else if (!node->right || (node->right->meta & IS_BLACK)) {
+                // case2_r:
+                child = parent;
+                parent = child->parent;
+                node->meta &= ~IS_BLACK;
+            } else {
+                // case3_r:
+                rbnode *y, *z;
+                y = node->right;
+                z = node;
+                // ((Bz->left), Bz, ((y->left), Ry, (y->right)))
+                // y = RED, rchild of z
+                // z = BLACK, child of z->parent
+                // target = (((Bz->left), Rz, (y->left)), By, (y->right))
+                if (z->meta & IS_LEFT) {
+                    z->parent->left = y;
+                } else {
+                    z->parent->right = y;
+                }
+                y->meta = z->meta; // black + same side as z
+                z->meta = IS_LEFT; // red + left
+                y->parent = z->parent;
+                z->parent = y;
+                z->right = y->left;
+                y->left = z;
+                if (z->right) {
+                    z->right->meta &= ~IS_LEFT;
+                    z->right->parent = z;
+                }
+                node = y;
+                goto case4_r;
+            }
+        }
+    }
+    if (child)
+        child->meta |= IS_BLACK;
+    tree->is_unstable = 0;
+    return 0;
+}
+
+rbnode *first_node(rbtree *tree) {
+    rbnode *node = tree->root, *prev = node;
+    while (node) {
+        prev = node;
+        node = node->left;
+    }
+    return prev;
+}
+rbnode *pred_node(rbnode *node) {
+    if (!node) return NULL;
+    if (node->left) {
+        node = node->left;
+        while (node->right) node = node->right;
+        return node;
+    } else {
+        while (node->parent && node->meta & IS_LEFT) node = node->parent;
+        return node->parent;
+    }
+}
+rbnode *succ_node(rbnode *node) {
+    if (!node) return NULL;
+    if (node->right) {
+        node = node->right;
+        while (node->left) node = node->left;
+        return node;
+    } else {
+        while (node->parent && !(node->meta & IS_LEFT)) node = node->parent;
+        return node->parent;
+    }
+}
+
+uint8_t rb_get(rbtree *tree, uintptr_t addr) {
+    rbnode *node = find_addr(tree, addr);
+    if (node) return node->data;
+    else return 0;
+}
+
+int rb_get_end(rbtree* tree, uintptr_t addr, uint8_t* val, uintptr_t* end) {
+    rbnode *node = tree->root, *next = NULL;
+    while (node) {
+        if (node->end <= addr) {
+            node = node->right;
+        } else if (node->start <= addr) {
+            *val = node->data;
+            *end = node->end;
+            return 1;
+        } else {
+            next = node;
+            node = node->left;
+        }
+    }
+    if (next) {
+        *val = next->data;
+        *end = next->start;
+    } else {
+        *val = 0;
+        *end = (uintptr_t)-1;
+    }
+    return 0;
+}
+
+int rb_set(rbtree *tree, uintptr_t start, uintptr_t end, uint8_t data) {
+// printf("rb_set( "); print_rbtree(tree); printf(" , 0x%lX, 0x%lX, %hhu);\n", start, end, data); fflush(stdout);
+// dynarec_log(LOG_DEBUG, "set 0x%lX, 0x%lX, %hhu\n", start, end, data);
+    if (!tree->root) {
+        return add_range(tree, start, end, data);
+    }
+    
+    rbnode *node = tree->root, *prev = NULL, *last = NULL;
+    while (node) {
+        if (node->start < start) {
+            prev = node;
+            node = node->right;
+        } else if (node->start == start) {
+            if (node->left) {
+                prev = node->left;
+                while (prev->right) prev = prev->right;
+            }
+            if (node->right) {
+                last = node->right;
+                while (last->left) last = last->left;
+            }
+            break;
+        } else {
+            last = node;
+            node = node->left;
+        }
+    }
+
+    // prev is the largest node starting strictly before start, or NULL if there is none
+    // node is the node starting exactly at start, or NULL if there is none
+    // last is the smallest node starting strictly after start, or NULL if there is none
+    // Note that prev may contain start
+
+    if (prev && (prev->end >= start) && (prev->data == data)) {
+        // Merge with prev
+        if (end <= prev->end) return 0; // Nothing to do!
+        
+        if (node && (node->end > end)) {
+            node->start = end;
+            prev->end = end;
+            return 0;
+        } else if (node && (node->end == end)) {
+            remove_node(tree, node);
+            prev->end = end;
+            return 0;
+        } else if (node) {
+            remove_node(tree, node);
+        }
+        while (last && (last->start < end) && (last->end <= end)) {
+            // Remove the entire node
+            node = last;
+            last = succ_node(last);
+            remove_node(tree, node);
+        }
+        if (last && (last->start <= end) && (last->data == data)) {
+            // Merge node and last
+            prev->end = last->end;
+            remove_node(tree, last);
+            return 0;
+        }
+        if (last && (last->start < end)) last->start = end;
+        prev->end = end;
+        return 0;
+    } else if (prev && (prev->end > start)) {
+        if (prev->end > end) {
+            // Split in three
+            // Note that here, succ(prev) = last and node = NULL
+            int ret;
+            ret = add_range_next_to(tree, prev->right ? last : prev, end, prev->end, prev->data);
+            ret = ret ? ret : add_range_next_to(tree, prev->right ? succ_node(prev) : prev, start, end, data);
+            prev->end = start;
+            return ret;
+        }
+        // Cut prev and continue
+        prev->end = start;
+    }
+
+    if (node) {
+        // Change node
+        if (node->end >= end) {
+            if (node->data == data) return 0; // Nothing to do!
+            // Cut node
+            if (node->end > end) {
+                int ret = add_range_next_to(tree, node->right ? last : node, end, node->end, node->data);
+                node->end = end;
+                node->data = data;
+                return ret;
+            }
+            // Fallthrough
+        }
+        
+        // Overwrite and extend node
+        while (last && (last->start < end) && (last->end <= end)) {
+            // Remove the entire node
+            prev = last;
+            last = succ_node(last);
+            remove_node(tree, prev);
+        }
+        if (last && (last->start <= end) && (last->data == data)) {
+            // Merge node and last
+            remove_node(tree, node);
+            last->start = start;
+            return 0;
+        }
+        if (last && (last->start < end)) last->start = end;
+        if (node->end < end) node->end = end;
+        node->data = data;
+        return 0;
+    }
+
+    while (last && (last->start < end) && (last->end <= end)) {
+        // Remove the entire node
+        node = last;
+        last = succ_node(last);
+        remove_node(tree, node);
+    }
+    if (!last) {
+        // Add a new node next to prev, the largest node of the tree
+        // It exists since the tree is nonempty
+        return add_range_next_to(tree, prev, start, end, data);
+    }
+    if ((last->start <= end) && (last->data == data)) {
+        // Extend
+        last->start = start;
+        return 0;
+    } else if (last->start < end) {
+        // Cut
+        last->start = end;
+    }
+    // Probably 'last->left ? prev : last' is enough
+    return add_range_next_to(tree, last->left ? pred_node(last) : last, start, end, data);
+}
+
+int rb_unset(rbtree *tree, uintptr_t start, uintptr_t end) {
+// printf("rb_unset( "); print_rbtree(tree); printf(" , 0x%lX, 0x%lX);\n", start, end); fflush(stdout);
+// dynarec_log(LOG_DEBUG, "rb_unset(tree, 0x%lX, 0x%lX);\n", start, end);
+    if (!tree->root) return 0;
+
+    rbnode *node = tree->root, *prev = NULL, *next = NULL;
+    while (node) {
+        if (node->start < start) {
+            prev = node;
+            node = node->right;
+        } else if (node->start == start) {
+            if (node->left) {
+                prev = node->left;
+                while (prev->right) prev = prev->right;
+            }
+            if (node->right) {
+                next = node->right;
+                while (next->left) next = next->left;
+            }
+            break;
+        } else {
+            next = node;
+            node = node->left;
+        }
+    }
+
+    if (node) {
+        if (node->end > end) {
+            node->start = end;
+            return 0;
+        } else if (node->end == end) {
+            remove_node(tree, node);
+            return 0;
+        } else {
+            remove_node(tree, node);
+        }
+    } else if (prev && (prev->end > start)) {
+        if (prev->end > end) {
+            // Split prev
+            int ret = add_range_next_to(tree, prev->right ? next : prev, end, prev->end, prev->data);
+            prev->end = start;
+            return ret;
+        } else if (prev->end == end) {
+            prev->end = start;
+            return 0;
+        } // else fallthrough
+    }
+    while (next && (next->start < end) && (next->end <= end)) {
+        // Remove the entire node
+        node = next;
+        next = succ_node(next);
+        remove_node(tree, node);
+    }
+    if (next && (next->start < end)) {
+        // next->end > end: cut the node
+        next->start = end;
+    }
+    return 0;
+}
+
+#include <stdio.h>
+void print_rbnode(const rbnode *node, unsigned depth, uintptr_t minstart, uintptr_t maxend, unsigned *bdepth) {
+    if (!node) {
+        if (!*bdepth || *bdepth == depth + 1) {
+            *bdepth = depth + 1;
+            printf("[%u]", depth);
+        } else
+            printf("<invalid black depth %u>", depth);
+        return;
+    }
+    if (node->start < minstart) {
+        printf("<invalid start>");
+        return;
+    }
+    if (node->end > maxend) {
+        printf("<invalid end>");
+        return;
+    }
+    printf("(");
+    if (node->left && !(node->left->meta & IS_LEFT)) {
+        printf("<invalid meta>");
+    } else if (node->left && (node->left->parent != node)) {
+        printf("<invalid parent %p instead of %p>", node->left->parent, node);
+    } else if (node->left && !(node->meta & IS_BLACK) && !(node->left->meta & IS_BLACK)) {
+        printf("<invalid red-red node> ");
+        print_rbnode(node->left, depth + ((node->meta & IS_BLACK) ? 1 : 0), minstart, node->start, bdepth);
+    } else {
+        print_rbnode(node->left, depth + ((node->meta & IS_BLACK) ? 1 : 0), minstart, node->start, bdepth);
+    }
+    printf(", (%c/%p) %lX-%lX: %hhu, ", node->meta & IS_BLACK ? 'B' : 'R', node, node->start, node->end, node->data);
+    if (node->right && (node->right->meta & IS_LEFT)) {
+        printf("<invalid meta>");
+    } else if (node->right && (node->right->parent != node)) {
+        printf("<invalid parent %p instead of %p>", node->right->parent, node);
+    } else if (node->right && !(node->meta & IS_BLACK) && !(node->right->meta & IS_BLACK)) {
+        printf("<invalid red-red node> ");
+        print_rbnode(node->right, depth + ((node->meta & IS_BLACK) ? 1 : 0), node->end, maxend, bdepth);
+    } else {
+        print_rbnode(node->right, depth + ((node->meta & IS_BLACK) ? 1 : 0), node->end, maxend, bdepth);
+    }
+    printf(")");
+}
+void print_rbtree(const rbtree *tree) {
+    if (!tree) {
+        printf("<NULL>\n");
+        return;
+    }
+    if (tree->root && tree->root->parent) {
+        printf("Root has parent\n");
+        return;
+    }
+    if (tree->root && !(tree->root->meta & IS_BLACK)) {
+        printf("Root is red\n");
+        return;
+    }
+    unsigned bdepth = 0;
+    print_rbnode(tree->root, 0, 0, (uintptr_t)-1, &bdepth);
+    printf("\n");
+}
+
+#ifdef RBTREE_TEST
+int main() {
+    rbtree* tree = init_rbtree();
+    print_rbtree(tree); fflush(stdout);
+    /*int ret;
+    ret = rb_set(tree, 0x43, 0x44, 0x01);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout);
+    ret = rb_set(tree, 0x42, 0x43, 0x01);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout);
+    ret = rb_set(tree, 0x41, 0x42, 0x01);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout);
+    ret = rb_set(tree, 0x40, 0x41, 0x01);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout);
+    ret = rb_set(tree, 0x20, 0x40, 0x03);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout);
+    ret = rb_set(tree, 0x10, 0x20, 0x01);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout);
+
+    uint8_t val = rb_get(tree, 0x33);
+    printf("0x33 has attribute %hhu\n", val); fflush(stdout);*/
+    /* rbnode *node = find_addr(tree, 0x33);
+    printf("0x33 is at %p: ", node); print_rbnode(node, 0); printf("\n"); fflush(stdout);
+    ret = remove_node(tree, node);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout);
+    node = find_addr(tree, 0x20);
+    printf("0x20 is at %p\n", node);
+    node = find_addr(tree, 0x1F);
+    printf("0x1F is at %p: ", node); print_rbnode(node, 0); printf("\n"); fflush(stdout);
+    ret = remove_node(tree, node);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout); */
+    /* ret = rb_set(tree, 0x15, 0x42, 0x00);
+    printf("%d; ", ret); print_rbtree(tree); fflush(stdout); */
+    /*rb_unset(tree, 0x15, 0x42);
+    print_rbtree(tree); fflush(stdout);*/
+    
+    // tree->root = node27; print_rbtree(tree); fflush(stdout);
+    // rb_set(tree, 2, 3, 1); print_rbtree(tree); fflush(stdout);
+    // add_range_next_to(tree, node24, 0x0E7000, 0x0E8000, 69); print_rbtree(tree); fflush(stdout);
+    // print_rbtree(tree); fflush(stdout);
+    // uint8_t val = rb_get(tree, 0x11003000);
+    // printf("0x11003000 has attribute %hhu\n", val); fflush(stdout);
+    // remove_node(tree, node0); print_rbtree(tree); fflush(stdout);
+    // add_range_next_to(tree, node1, 0x0E7000, 0x0E8000, 69); print_rbtree(tree); fflush(stdout);
+rb_set(tree, 0x130000, 0x140000, 7);
+    print_rbtree(tree); fflush(stdout);
+rb_set(tree, 0x141000, 0x142000, 135);
+    print_rbtree(tree); fflush(stdout);
+rb_set(tree, 0x140000, 0x141000, 135);
+    print_rbtree(tree); fflush(stdout);
+rb_set(tree, 0x140000, 0x141000, 7);
+    print_rbtree(tree); fflush(stdout);
+rb_set(tree, 0x140000, 0x141000, 135);
+    print_rbtree(tree); fflush(stdout);
+    uint8_t val = rb_get(tree, 0x141994); printf("0x141994 has attribute %hhu\n", val); fflush(stdout);
+    delete_rbtree(tree);
+}
+#endif
diff --git a/src/tools/rcfile.c b/src/tools/rcfile.c
index 13e50b09..53c426ff 100644
--- a/src/tools/rcfile.c
+++ b/src/tools/rcfile.c
@@ -148,8 +148,8 @@ ENTRYBOOL(BOX64_DYNAREC_CALLRET, box64_dynarec_callret)             \
 ENTRYBOOL(BOX64_DYNAREC_BLEEDING_EDGE, box64_dynarec_bleeding_edge) \
 ENTRYBOOL(BOX64_DYNAREC_JVM, box64_dynarec_jvm)                     \
 ENTRYBOOL(BOX64_DYNAREC_TBB, box64_dynarec_tbb)                     \
-ENTRYINT(BOX64_DYNAREC_HOTPAGE, box64_dynarec_hotpage, 0, 255, 8)   \
-ENTRYBOOL(BOX64_DYNAREC_FASTPAGE, box64_dynarec_fastpage)           \
+IGNORE(BOX64_DYNAREC_HOTPAGE)                                       \
+IGNORE(BOX64_DYNAREC_FASTPAGE)                                      \
 ENTRYBOOL(BOX64_DYNAREC_ALIGNED_ATOMICS, box64_dynarec_aligned_atomics) \
 ENTRYBOOL(BOX64_DYNAREC_WAIT, box64_dynarec_wait)                   \
 ENTRYSTRING_(BOX64_NODYNAREC, box64_nodynarec)                      \
diff --git a/src/wrapped/wrappedlibc.c b/src/wrapped/wrappedlibc.c
index 9dbd6e04..3f54350c 100644
--- a/src/wrapped/wrappedlibc.c
+++ b/src/wrapped/wrappedlibc.c
@@ -2645,9 +2645,9 @@ EXPORT void* my_mmap64(x64emu_t* emu, void *addr, unsigned long length, int prot
     #endif
     if(ret!=MAP_FAILED) {
         if(emu)
-            setProtection_mmap((uintptr_t)ret, length, prot);
+            setProtection_mmap((uintptr_t)ret, length, (uint8_t)prot);
         else
-            setProtection((uintptr_t)ret, length, prot);
+            setProtection((uintptr_t)ret, length, (uint8_t)prot);
     }
     return ret;
 }
@@ -2660,7 +2660,7 @@ EXPORT void* my_mremap(x64emu_t* emu, void* old_addr, size_t old_size, size_t ne
     void* ret = mremap(old_addr, old_size, new_size, flags, new_addr);
     if(emu && (box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "%p\n", ret);}
     if(ret!=(void*)-1) {
-        uint32_t prot = getProtection((uintptr_t)old_addr)&~PROT_CUSTOM;
+        uint8_t prot = getProtection((uintptr_t)old_addr)&~PROT_CUSTOM;
         if(ret==old_addr) {
             if(old_size && old_size<new_size) {
                 setProtection_mmap((uintptr_t)ret+old_size, new_size-old_size, prot);
@@ -2736,11 +2736,11 @@ EXPORT int my_mprotect(x64emu_t* emu, void *addr, unsigned long len, int prot)
     #endif
     if(!ret && len) {
         if(prot)
-            updateProtection((uintptr_t)addr, len, prot);
+            updateProtection((uintptr_t)addr, len, (uint8_t)prot);
         else {
             // avoid allocating detailled protection for a no prot 0
             freeProtection((uintptr_t)addr, len);
-            setProtection_mmap((uintptr_t)addr, len, prot);
+            setProtection_mmap((uintptr_t)addr, len, (uint8_t)prot);
         }
     }
     return ret;