about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core.c22
-rw-r--r--src/custommem.c199
-rw-r--r--src/dynarec/arm64/dynarec_arm64_00.c30
-rw-r--r--src/dynarec/arm64/dynarec_arm64_64.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_67.c2
-rw-r--r--src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c4
-rw-r--r--src/dynarec/arm64/dynarec_arm64_consts.c7
-rw-r--r--src/dynarec/arm64/dynarec_arm64_consts.h1
-rw-r--r--src/dynarec/arm64/dynarec_arm64_f20f.c4
-rw-r--r--src/dynarec/arm64/dynarec_arm64_helper.c23
-rw-r--r--src/dynarec/arm64/dynarec_arm64_helper.h57
-rw-r--r--src/dynarec/arm64/dynarec_arm64_pass2.h18
-rw-r--r--src/dynarec/arm64/dynarec_arm64_pass3.h16
-rw-r--r--src/dynarec/dynablock.c17
-rw-r--r--src/dynarec/dynablock_private.h4
-rw-r--r--src/dynarec/dynacache_reloc.c84
-rw-r--r--src/dynarec/dynacache_reloc.h4
-rw-r--r--src/dynarec/dynarec_native.c67
-rw-r--r--src/dynarec/la64/dynarec_la64_00.c8
-rw-r--r--src/elfs/elfloader.c14
-rw-r--r--src/include/custommem.h8
-rw-r--r--src/include/dynablock.h2
-rw-r--r--src/include/env.h16
-rw-r--r--src/include/fileutils.h1
-rw-r--r--src/include/os.h1
-rw-r--r--src/libtools/signals.c4
-rw-r--r--src/os/os_linux.c20
-rw-r--r--src/os/os_wine.c12
-rw-r--r--src/tools/env.c587
-rw-r--r--src/wrapped/generated/functions_list.txt1
-rw-r--r--src/wrapped/wrappedlibc.c10
31 files changed, 1115 insertions, 130 deletions
diff --git a/src/core.c b/src/core.c
index 362ffab1..82eb20cd 100644
--- a/src/core.c
+++ b/src/core.c
@@ -235,11 +235,6 @@ static void displayMiscInfo()
 
     char* p;
 
-    // grab pagesize
-    box64_pagesize = sysconf(_SC_PAGESIZE);
-    if(!box64_pagesize)
-        box64_pagesize = 4096;
-
 #ifdef DYNAREC
     if (DetectHostCpuFeatures())
         PrintHostCpuFeatures();
@@ -356,6 +351,8 @@ void PrintHelp() {
     PrintfFtrace(0, "    '-v'|'--version' to print box64 version and quit\n");
     PrintfFtrace(0, "    '-h'|'--help' to print this and quit\n");
     PrintfFtrace(0, "    '-k'|'--kill-all' to kill all box64 instances\n");
+    PrintfFtrace(0, "    '--dynacache-list' to list of DynaCache file and their validity\n");
+    PrintfFtrace(0, "    '--dynacache-clean' to remove invalide DynaCache files\n");
 }
 
 void KillAllInstances()
@@ -660,6 +657,7 @@ void endBox64()
     if(!my_context || box64_quit)
         return;
 
+    SerializeAllMapping();   // just to be safe
     // then call all the fini
     dynarec_log(LOG_DEBUG, "endBox64() called\n");
     box64_quit = 1;
@@ -683,6 +681,7 @@ void endBox64()
     #ifndef STATICBUILD
     endMallocHook();
     #endif
+    SerializeAllMapping();   // to be safe
     FreeBox64Context(&my_context);
     #ifdef DYNAREC
     // disable dynarec now
@@ -763,6 +762,11 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf
     LoadEnvVariables();
     InitializeEnvFiles();
 
+    // grab pagesize
+    box64_pagesize = sysconf(_SC_PAGESIZE);
+    if(!box64_pagesize)
+        box64_pagesize = 4096;
+
     const char* prog = argv[1];
     int nextarg = 1;
     // check if some options are passed
@@ -779,6 +783,14 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf
             KillAllInstances();
             exit(0);
         }
+        if(!strcmp(prog, "--dynacache-list")) {
+            DynaCacheList(argv[nextarg+1]);
+            exit(0);
+        }
+        if(!strcmp(prog, "--dynacache-clean")) {
+            DynaCacheClean();
+            exit(0);
+        }
         // other options?
         if(!strcmp(prog, "--")) {
             prog = argv[++nextarg];
diff --git a/src/custommem.c b/src/custommem.c
index f23ec5d5..19ec422c 100644
--- a/src/custommem.c
+++ b/src/custommem.c
@@ -94,6 +94,7 @@ typedef struct blocklist_s {
 #define MMAPSIZE (512*1024)     // allocate 512kb sized blocks
 #define MMAPSIZE128 (128*1024)  // allocate 128kb sized blocks for 128byte map
 #define DYNMMAPSZ (2*1024*1024) // allocate 2Mb block for dynarec
+#define DYNMMAPSZ0 (128*1024)   // allocate 128kb block for 1st page, to avoid wasting too much memory on small program / libs
 
 static int                 n_blocks = 0;       // number of blocks for custom malloc
 static int                 c_blocks = 0;       // capacity of blocks for custom malloc
@@ -483,7 +484,7 @@ void add_blockstree(uintptr_t start, uintptr_t end, int idx)
     reent = 0;
 }
 
-void* box32_dynarec_mmap(size_t size);
+void* box32_dynarec_mmap(size_t size, int fd, off_t offset);
 #ifdef BOX32
 int isCustomAddr(void* p)
 {
@@ -577,7 +578,7 @@ void* map128_customMalloc(size_t size, int is32bits)
     if(is32bits) mutex_unlock(&mutex_blocks);   // unlocking, because mmap might use it
     void* p = is32bits
         ? box_mmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0)
-        : (box64_is32bits ? box32_dynarec_mmap(allocsize) : InternalMmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
+        : (box64_is32bits ? box32_dynarec_mmap(allocsize, -1, 0) : InternalMmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
     if(is32bits) mutex_lock(&mutex_blocks);
     #ifdef TRACE_MEMSTAT
     customMalloc_allocated += allocsize;
@@ -687,7 +688,7 @@ void* internal_customMalloc(size_t size, int is32bits)
         mutex_unlock(&mutex_blocks);
     void* p = is32bits
         ? box_mmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0)
-        : (box64_is32bits ? box32_dynarec_mmap(allocsize) : InternalMmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
+        : (box64_is32bits ? box32_dynarec_mmap(allocsize, -1, 0) : InternalMmap(NULL, allocsize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
     if(is32bits)
         mutex_lock(&mutex_blocks);
 #ifdef TRACE_MEMSTAT
@@ -1031,18 +1032,19 @@ size_t customGetUsableSize(void* p)
     return 0;
 }
 
-void* box32_dynarec_mmap(size_t size)
+void* box32_dynarec_mmap(size_t size, int fd, off_t offset)
 {
-#ifdef BOX32
+    #ifdef BOX32
     // find a block that was prereserve before and big enough
     size = (size+box64_pagesize-1)&~(box64_pagesize-1);
     uint32_t flag;
     static uintptr_t cur = 0x100000000LL;
     uintptr_t bend = 0;
     while(bend<0x800000000000LL) {
+        uint32_t map_flags = MAP_FIXED | ((fd==-1)?MAP_ANONYMOUS:0) | MAP_PRIVATE;
         if(rb_get_end(mapallmem, cur, &flag, &bend)) {
             if(flag == MEM_RESERVED && bend-cur>=size) {
-                void* ret = InternalMmap((void*)cur, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+                void* ret = InternalMmap((void*)cur, size, PROT_READ | PROT_WRITE | PROT_EXEC, map_flags, fd, offset);
                 if(ret!=MAP_FAILED)
                     rb_set(mapallmem, cur, cur+size, MEM_ALLOCATED);    // mark as allocated
                 else
@@ -1054,8 +1056,9 @@ void* box32_dynarec_mmap(size_t size)
         cur = bend;
     }
 #endif
+    uint32_t map_flags = ((fd==-1)?0:MAP_ANONYMOUS) | MAP_PRIVATE;
     //printf_log(LOG_INFO, "BOX32: Error allocating Dynarec memory: %s\n", "fallback to internal mmap");
-    return InternalMmap((void*)0x100000000LL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+    return InternalMmap((void*)0x100000000LL, size, PROT_READ | PROT_WRITE | PROT_EXEC, map_flags, fd, offset);
     ;
 }
 
@@ -1065,6 +1068,7 @@ typedef struct mmaplist_s {
     int             cap;
     int             size;
     int             has_new;
+    int             dirty;
 } mmaplist_t;
 
 mmaplist_t* NewMmaplist()
@@ -1072,6 +1076,160 @@ mmaplist_t* NewMmaplist()
     return (mmaplist_t*)box_calloc(1, sizeof(mmaplist_t));
 }
 
+int MmaplistHasNew(mmaplist_t* list, int clear)
+{
+    if(!list) return 0;
+    int ret = list->has_new;
+    if(clear) list->has_new = 0;
+    return ret;
+}
+
+int MmaplistIsDirty(mmaplist_t* list)
+{
+    if(!list) return 0;
+    return list->dirty;
+}
+
+int MmaplistNBlocks(mmaplist_t* list)
+{
+    if(!list) return 0;
+    return list->size;
+}
+
+void MmaplistAddNBlocks(mmaplist_t* list, int nblocks)
+{
+    if(!list) return;
+    if(nblocks<=0) return;
+    list->cap = list->size + nblocks;
+    list->chunks = box_realloc(list->chunks, list->cap*sizeof(blocklist_t**));
+}
+
+int RelocsHaveCancel(dynablock_t* block);
+size_t MmaplistChunkGetUsedcode(blocklist_t* list)
+{
+    void* p = list->block;
+    void* end = list->block + list->size - sizeof(blockmark_t);
+    size_t total = 0;
+    while(p<end) {
+        if(((blockmark_t*)p)->next.fill) {
+            dynablock_t* b = *(dynablock_t**)((blockmark_t*)p)->mark;
+            size_t b_size = SIZE_BLOCK(((blockmark_t*)p)->next);
+            if(b->relocs && b->relocsize && RelocsHaveCancel(b))
+                b_size = 0;
+            total +=  b_size;
+        }
+        p = NEXT_BLOCK((blockmark_t*)p);
+    }
+    return total;
+}
+
+size_t MmaplistTotalAlloc(mmaplist_t* list)
+{
+    if(!list) return 0;
+    size_t total = 0;
+    for(int i=0; i<list->size; ++i)
+        total += MmaplistChunkGetUsedcode(list->chunks[i]);
+    return total;
+}
+
+int ApplyRelocs(dynablock_t* block, intptr_t delta_block, intptr_t delat_map, uintptr_t mapping_start);
+uintptr_t RelocGetNext();
+int MmaplistAddBlock(mmaplist_t* list, int fd, off_t offset, void* orig, size_t size, intptr_t delta_map, uintptr_t mapping_start)
+{
+    if(!list) return -1;
+    if(list->cap==list->size) {
+        list->cap += 4;
+        list->chunks = box_realloc(list->chunks, list->cap*sizeof(blocklist_t**));
+    }
+    int i = list->size++;
+    void* map = MAP_FAILED;
+    #ifdef BOX32
+    if(box64_is32bits)
+        map = box32_dynarec_mmap(size, fd, offset);
+    #endif
+    if(map==MAP_FAILED)
+        map = InternalMmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, fd, offset);
+    if(map==MAP_FAILED) {
+        printf_log(LOG_INFO, "Failed to load block %d of a maplist\n", i);
+        return -3;
+    }
+    #ifdef MADV_HUGEPAGE
+    madvise(map, size, MADV_HUGEPAGE);
+    #endif
+    setProtection((uintptr_t)map, size, PROT_READ | PROT_WRITE | PROT_EXEC);
+    list->chunks[i] = map;
+    intptr_t delta = map - orig;
+    // relocate the pointers
+    if(delta) {
+        list->chunks[i]->block = ((void*)list->chunks[i]->block) + delta;
+        list->chunks[i]->first += delta;
+    }
+    // relocate all allocated dynablocks
+    void* p = list->chunks[i]->block;
+    void* end = map + size - sizeof(blockmark_t);
+    while(p<end) {
+        if(((blockmark_t*)p)->next.fill) {
+            void** b = (void**)((blockmark_t*)p)->mark;
+            // first is the address of the dynablock itself, that needs to be adjusted
+            b[0] += delta;
+            dynablock_t* bl = b[0];
+            // now reloacte the dynablocks, all that need to be adjusted!
+            #define GO(A) if(bl->A) bl->A = ((void*)bl->A)+delta
+            GO(block);
+            GO(actual_block);
+            GO(instsize);
+            GO(arch);
+            GO(callrets);
+            GO(jmpnext);
+            GO(table64);
+            GO(relocs);
+            #undef GO
+            bl->previous = NULL;    // that seems safer that way
+            // shift the self referece to dynablock
+            if(bl->block!=bl->jmpnext) {
+                void** db_ref = (bl->jmpnext-sizeof(void*));
+                *db_ref = (*db_ref)+delta;
+            }
+            // adjust x64_addr with delta_map
+            bl->x64_addr += delta_map;
+            *(uintptr_t*)(bl->jmpnext+2*sizeof(void*)) = RelocGetNext();
+            if(bl->relocs && bl->relocsize)
+                ApplyRelocs(bl, delta, delta_map, mapping_start);
+            ClearCache(bl->actual_block+sizeof(void*), bl->native_size);
+            //add block, as dirty for now
+            if(!addJumpTableIfDefault64(bl->x64_addr, bl->jmpnext)) {
+                // cannot add blocks?
+                printf_log(LOG_INFO, "Warning, cannot add DynaCache Block %d to JmpTable\n", i);
+            } else {
+                if(bl->x64_size) {
+                    dynarec_log(LOG_DEBUG, "Added DynCache bl %p for %p - %p\n", bl, bl->x64_addr, bl->x64_addr+bl->x64_size);
+                    if(bl->x64_size>my_context->max_db_size) {
+                        my_context->max_db_size = bl->x64_size;
+                        dynarec_log(LOG_INFO, "BOX64 Dynarec: higher max_db=%d\n", my_context->max_db_size);
+                    }
+                    rb_inc(my_context->db_sizes, bl->x64_size, bl->x64_size+1);
+                }
+            }
+
+        }
+        p = NEXT_BLOCK((blockmark_t*)p);
+    }
+    // add new block to rbtt_dynmem
+    rb_set_64(rbt_dynmem, (uintptr_t)map, (uintptr_t)map+size, (uintptr_t)list->chunks[i]);
+
+    return 0;
+}
+
+void MmaplistFillBlocks(mmaplist_t* list, DynaCacheBlock_t* blocks)
+{
+    if(!list) return;
+    for(int i=0; i<list->size; ++i) {
+        blocks[i].block = list->chunks[i];
+        blocks[i].size = list->chunks[i]->size+sizeof(blocklist_t);
+        blocks[i].free_size = list->chunks[i]->maxfree;
+    }
+}
+
 void DelMmaplist(mmaplist_t* list)
 {
     if(!list) return;
@@ -1138,6 +1296,7 @@ uintptr_t AllocDynarecMap(uintptr_t x64_addr, size_t size, int is_new)
     if(!list)
         list = mmaplist = NewMmaplist();
     if(is_new) list->has_new = 1;
+    list->dirty = 1;
     // check if there is space in current open ones
     int idx = 0;
     uintptr_t sz = size + 2*sizeof(blockmark_t);
@@ -1162,13 +1321,13 @@ uintptr_t AllocDynarecMap(uintptr_t x64_addr, size_t size, int is_new)
     int i = list->size++;
     size_t need_sz = sz + sizeof(blocklist_t);
     // alloc a new block, aversized or not, we are at the end of the list
-    size_t allocsize = (need_sz>DYNMMAPSZ)?need_sz:DYNMMAPSZ;
+    size_t allocsize = (need_sz>(i?DYNMMAPSZ:DYNMMAPSZ0))?need_sz:(i?DYNMMAPSZ:DYNMMAPSZ0);
     // allign sz with pagesize
     allocsize = (allocsize+(box64_pagesize-1))&~(box64_pagesize-1);
     void* p=MAP_FAILED;
     #ifdef BOX32
     if(box64_is32bits)
-        p = box32_dynarec_mmap(allocsize);
+        p = box32_dynarec_mmap(allocsize, -1, 0);
     #endif
     // disabling for now. explicit hugepage needs to be enabled to be used on userspace 
     // with`/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages` as the number of allowaed 2M huge page
@@ -2430,7 +2589,27 @@ int isLockAddress(uintptr_t addr)
     khint_t k = kh_get(lockaddress, lockaddress, addr);
     return (k==kh_end(lockaddress))?0:1;
 }
-
+int nLockAddressRange(uintptr_t start, size_t size)
+{
+    int n = 0;
+    uintptr_t end = start + size -1;
+    uintptr_t addr;
+    kh_foreach_key(lockaddress, addr,
+        if(addr>=start && addr<=end)
+            ++n;
+    );
+    return n;
+}
+void getLockAddressRange(uintptr_t start, size_t size, uintptr_t addrs[])
+{
+    int n = 0;
+    uintptr_t end = start + size -1;
+    uintptr_t addr;
+    kh_foreach_key(lockaddress, addr,
+        if(addr>=start && addr<=end)
+            addrs[n++] = addr;
+    );
+}
 #endif
 
 #ifndef MAP_FIXED_NOREPLACE
diff --git a/src/dynarec/arm64/dynarec_arm64_00.c b/src/dynarec/arm64/dynarec_arm64_00.c
index 75d963af..219fa44c 100644
--- a/src/dynarec/arm64/dynarec_arm64_00.c
+++ b/src/dynarec/arm64/dynarec_arm64_00.c
@@ -3365,7 +3365,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                 #endif
             }
             #if STEP < 2
-            if (!rex.is32bits && IsNativeCall(addr + i32, rex.is32bits, &dyn->insts[ninst].natcall, &dyn->insts[ninst].retn))
+            if (!rex.is32bits  && !dyn->need_reloc && IsNativeCall(addr + i32, rex.is32bits, &dyn->insts[ninst].natcall, &dyn->insts[ninst].retn))
                 tmp = dyn->insts[ninst].pass2choice = 3;
             else
                 tmp = dyn->insts[ninst].pass2choice = i32?0:1;
@@ -3377,8 +3377,14 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     SETFLAGS(X_ALL, SF_SET_NODF);    // Hack to set flags to "dont'care" state
                     if(dyn->last_ip && (addr-dyn->last_ip<0x1000)) {
                         ADDx_U12(x2, xRIP, addr-dyn->last_ip);
+                    } else if(dyn->last_ip && (dyn->last_ip-addr<0x1000)) {
+                        SUBx_U12(x2, xRIP, dyn->last_ip-addr);
                     } else {
-                        MOV64x(x2, addr);
+                        if(dyn->need_reloc) {
+                            TABLE64(x2, addr);
+                        } else {
+                            MOV64x(x2, addr);
+                        }
                     }
                     PUSH1(x2);
                     MESSAGE(LOG_DUMP, "Native Call to %s (retn=%d)\n", GetBridgeName((void*)(dyn->insts[ninst].natcall - 1)) ?: GetNativeName(GetNativeFnc(dyn->insts[ninst].natcall - 1)), dyn->insts[ninst].retn);
@@ -3406,6 +3412,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         CALL_S(const_int3, -1);
                         SMWRITE2();
                         LOAD_XEMU_CALL(xRIP);
+                        // in case of dyn->need_reloc, the previous GETIP_ will end up in a TABLE64 that will generate a RELOC_CANCELBLOCK as natcall will be out of the mmap space anyway
                         MOV64x(x3, dyn->insts[ninst].natcall);
                         ADDx_U12(x3, x3, 2+8+8);
                         CMPSx_REG(xRIP, x3);
@@ -3424,7 +3431,11 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                     break;
                 case 1:
                     // this is call to next step, so just push the return address to the stack
-                    MOV64x(x2, addr);
+                    if(dyn->need_reloc) {
+                        TABLE64(x2, addr);
+                    } else {
+                        MOV64x(x2, addr);
+                    }
                     PUSH1z(x2);
                     break;
                 default:
@@ -3434,7 +3445,11 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         SETFLAGS(X_ALL, SF_SET_NODF);    // Hack to set flags to "dont'care" state
                     }
                     // regular call
-                    MOV64x(x2, addr);
+                    if(dyn->need_reloc) {
+                        TABLE64(x2, addr);
+                    } else {
+                        MOV64x(x2, addr);
+                    }
                     fpu_purgecache(dyn, ninst, 1, x1, x3, x4);
                     PUSH1z(x2);
                     if (BOX64DRENV(dynarec_callret)) {
@@ -3471,7 +3486,12 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         // jumps out of current dynablock...
                         MARK;
                         j64 = getJumpTableAddress64(addr);
-                        MOV64x(x4, j64);
+                        if(dyn->need_reloc) {
+                            AddRelocTable64JmpTbl(dyn, ninst, addr, STEP);
+                            TABLE64_(x4, j64);
+                        } else {
+                            MOV64x(x4, j64);
+                        }
                         LDRx_U12(x4, x4, 0);
                         BR(x4);
                     }
diff --git a/src/dynarec/arm64/dynarec_arm64_64.c b/src/dynarec/arm64/dynarec_arm64_64.c
index de10a343..63f0b6af 100644
--- a/src/dynarec/arm64/dynarec_arm64_64.c
+++ b/src/dynarec/arm64/dynarec_arm64_64.c
@@ -1648,7 +1648,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         MARK;
                         j64 = getJumpTableAddress64(addr);
                         if(dyn->need_reloc) AddRelocTable64RetEndBlock(dyn, ninst, addr, STEP);
-                        TABLE64(x4, j64);
+                        TABLE64_(x4, j64);
                         LDRx_U12(x4, x4, 0);
                         BR(x4);
                     }
diff --git a/src/dynarec/arm64/dynarec_arm64_67.c b/src/dynarec/arm64/dynarec_arm64_67.c
index f44cb687..e6755147 100644
--- a/src/dynarec/arm64/dynarec_arm64_67.c
+++ b/src/dynarec/arm64/dynarec_arm64_67.c
@@ -1749,7 +1749,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
                         MARK;

                         j64 = getJumpTableAddress64(addr);

                         if(dyn->need_reloc) AddRelocTable64RetEndBlock(dyn, ninst, addr, STEP);

-                        TABLE64(x4, j64);

+                        TABLE64_(x4, j64);

                         LDRx_U12(x4, x4, 0);

                         BR(x4);

                     }

diff --git a/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c
index d1953a79..ab644ee8 100644
--- a/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c
+++ b/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c
@@ -509,9 +509,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip,
             INST_NAME("VADDSUBPS Gx, Vx, Ex");
             nextop = F8;
             q0 = fpu_get_scratch(dyn, ninst);
-            static float addsubps[4] = {-1.f, 1.f, -1.f, 1.f};
-            MAYUSE(addsubps);
-            MOV64x(x2, (uintptr_t)&addsubps);
+            TABLE64C(x2, const_4f_m1_1_m1_1);
             VLDR128_U12(q0, x2, 0);
             for(int l=0; l<1+vex.l; ++l) {
                 if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); }
diff --git a/src/dynarec/arm64/dynarec_arm64_consts.c b/src/dynarec/arm64/dynarec_arm64_consts.c
index ce59c954..d30c0bd2 100644
--- a/src/dynarec/arm64/dynarec_arm64_consts.c
+++ b/src/dynarec/arm64/dynarec_arm64_consts.c
@@ -21,6 +21,7 @@
 #include "emu/x87emu_private.h"
 #include "emu/x64compstrings.h"
 #include "x64test.h"
+#include "dynarec/dynarec_next.h"
 
 static const int8_t mask_shift8[] = { -7, -6, -5, -4, -3, -2, -1, 0 };
 static const int8_t mask_string8[] = { 7, 6, 5, 4, 3, 2, 1, 0 };
@@ -30,9 +31,6 @@ static const double addsubpd[2] = {-1., 1.};
 static const float subaddps[4] = {1.f, -1.f, 1.f, -1.f};
 static const double subaddpd[2] = {1., -1.};
 
-void arm64_epilog(void);
-void* arm64_next(x64emu_t* emu, uintptr_t addr);
-
 #ifndef HAVE_TRACE
 void PrintTrace() {}
 #endif
@@ -84,6 +82,7 @@ uintptr_t getConst(arm64_consts_t which)
         case const_native_fld: return (uintptr_t)native_fld;
         case const_native_fstp: return (uintptr_t)native_fstp;
         case const_native_frstor: return (uintptr_t)native_frstor;
+        case const_native_next: return (uintptr_t)native_next;
         case const_int3: return (uintptr_t)EmuInt3;
         case const_x86syscall: return (uintptr_t)EmuX86Syscall;
         case const_x64syscall: return (uintptr_t)EmuX64Syscall;
@@ -122,7 +121,7 @@ uintptr_t getConst(arm64_consts_t which)
         case const_sse42_compare_string_implicit_len: return (uintptr_t)sse42_compare_string_implicit_len;
         case const_x64test_step: return (uintptr_t)x64test_step;
         case const_printtrace: return (uintptr_t)PrintTrace;
-        case const_epilog: return (uintptr_t)arm64_epilog;
+        case const_epilog: return (uintptr_t)native_epilog;
         case const_jmptbl32: return getJumpTable32();
         case const_jmptbl48: return getJumpTable48();
         case const_jmptbl64: return getJumpTable64();
diff --git a/src/dynarec/arm64/dynarec_arm64_consts.h b/src/dynarec/arm64/dynarec_arm64_consts.h
index 71e2bdce..9ba65aca 100644
--- a/src/dynarec/arm64/dynarec_arm64_consts.h
+++ b/src/dynarec/arm64/dynarec_arm64_consts.h
@@ -46,6 +46,7 @@ typedef enum arm64_consts_s {
     const_native_fld,
     const_native_fstp,
     const_native_frstor,
+    const_native_next,
     const_int3,
     const_x86syscall,
     const_x64syscall,
diff --git a/src/dynarec/arm64/dynarec_arm64_f20f.c b/src/dynarec/arm64/dynarec_arm64_f20f.c
index 6e13def9..e6289479 100644
--- a/src/dynarec/arm64/dynarec_arm64_f20f.c
+++ b/src/dynarec/arm64/dynarec_arm64_f20f.c
@@ -523,9 +523,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
             GETGX(v0, 1);

             GETEX(v1, 0, 0);

             q0 = fpu_get_scratch(dyn, ninst);

-            static float addsubps[4] = {-1.f, 1.f, -1.f, 1.f};

-            MAYUSE(addsubps);

-            MOV64x(x2, (uintptr_t)&addsubps);   // no need to use table64, as box64 is loaded in low memory

+            TABLE64C(x2, const_4f_m1_1_m1_1);

             VLDR128_U12(q0, x2, 0);

             VFMLAQS(v0, v1, q0);

             break;

diff --git a/src/dynarec/arm64/dynarec_arm64_helper.c b/src/dynarec/arm64/dynarec_arm64_helper.c
index d4817803..e6ee1b70 100644
--- a/src/dynarec/arm64/dynarec_arm64_helper.c
+++ b/src/dynarec/arm64/dynarec_arm64_helper.c
@@ -101,7 +101,7 @@ uintptr_t geted(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, u
             } else if(tmp<0 && tmp>-0x1000) {
                 GETIP(addr+delta);
                 SUBx_U12(ret, xRIP, -tmp);
-            } else if(tmp+addr+delta<0x1000000000000LL) {  // 3 opcodes to load immediate is cheap enough
+            } else if((tmp+addr+delta<0x1000000000000LL) && !dyn->need_reloc) {  // 3 opcodes to load immediate is cheap enough
                 MOV64x(ret, tmp+addr+delta);
             } else {
                 MOV64x(ret, tmp);
@@ -575,7 +575,11 @@ static int indirect_lookup(dynarec_arm_t* dyn, int ninst, int is32bits, int s1,
         LSRx_IMM(s1, xRIP, 48);
         CBNZw(s1, (intptr_t)dyn->jmp_next - (intptr_t)dyn->block);
         // load table
-        MOV64x(s2, getConst(const_jmptbl48));    // this is a static value, so will be a low address
+        if(dyn->need_reloc) {
+            TABLE64C(s2, const_jmptbl48);
+        } else {
+            MOV64x(s2, getConst(const_jmptbl48));    // this is a static value, so will be a low address
+        }
         #ifdef JMPTABL_SHIFT4
         UBFXx(s1, xRIP, JMPTABL_START3, JMPTABL_SHIFT3);
         LDRx_REG_LSL3(s2, s2, s1);
@@ -751,7 +755,11 @@ void iret_to_epilog(dynarec_arm_t* dyn, uintptr_t ip, int ninst, int is32bits, i
     MOVx_REG(xRSP, x3);
     MARKSEG;
     // Ret....
-    MOV64x(x2, getConst(const_epilog));  // epilog on purpose, CS might have changed!
+    // epilog on purpose, CS might have changed!
+    if(dyn->need_reloc)
+        TABLE64C(x2, const_epilog);
+    else
+        MOV64x(x2, getConst(const_epilog));
     BR(x2);
     CLEARIP();
 }
@@ -880,8 +888,13 @@ void call_n(dynarec_arm_t* dyn, int ninst, void* fnc, int w)
     MOVx_REG(x4, xR8);
     MOVx_REG(x5, xR9);
     // native call
-    // fnc is indirect, to help with relocation (but PltResolver might be an issue here)
-    TABLE64(16, *(uintptr_t*)fnc);    // using x16 as scratch regs for call address
+    if(dyn->need_reloc) {
+        // fnc is indirect, to help with relocation (but PltResolver might be an issue here)
+        TABLE64(16, (uintptr_t)fnc);
+        LDRx_U12(16, 16, 0);
+    } else {
+        TABLE64_(16, *(uintptr_t*)fnc);    // using x16 as scratch regs for call address
+    }
     BLR(16);
     // put return value in x64 regs
     if(w>0) {
diff --git a/src/dynarec/arm64/dynarec_arm64_helper.h b/src/dynarec/arm64/dynarec_arm64_helper.h
index 8806777c..63418126 100644
--- a/src/dynarec/arm64/dynarec_arm64_helper.h
+++ b/src/dynarec/arm64/dynarec_arm64_helper.h
@@ -1229,31 +1229,40 @@
 #define GETIP_(A) MOV64x(xRIP, A)
 #else
 // put value in the Table64 even if not using it for now to avoid difference between Step2 and Step3. Needs to be optimized later...
-#define GETIP(A)                                        \
-    if(dyn->last_ip && ((A)-dyn->last_ip)<0x1000) {     \
-        uint64_t _delta_ip = (A)-dyn->last_ip;          \
-        dyn->last_ip += _delta_ip;                      \
-        if(_delta_ip) {                                 \
-            ADDx_U12(xRIP, xRIP, _delta_ip);            \
-        }                                               \
-    } else {                                            \
-        dyn->last_ip = (A);                             \
-        if(dyn->need_reloc) {                           \
-            TABLE64(xRIP, dyn->last_ip);                \
-        } else {                                        \
-            MOV64x(xRIP, dyn->last_ip);                 \
-        }                                               \
+#define GETIP(A)                                            \
+    if(dyn->last_ip && ((A)-dyn->last_ip)<0x1000) {         \
+        uint64_t _delta_ip = (A)-dyn->last_ip;              \
+        dyn->last_ip += _delta_ip;                          \
+        if(_delta_ip) {                                     \
+            ADDx_U12(xRIP, xRIP, _delta_ip);                \
+        }                                                   \
+    } else if(dyn->last_ip && (dyn->last_ip-(A))<0x1000) {  \
+        uint64_t _delta_ip = dyn->last_ip-(A);              \
+        dyn->last_ip -= _delta_ip;                          \
+        if(_delta_ip) {                                     \
+            SUBx_U12(xRIP, xRIP, _delta_ip);                \
+        }                                                   \
+    } else {                                                \
+        dyn->last_ip = (A);                                 \
+        if(dyn->need_reloc) {                               \
+            TABLE64(xRIP, dyn->last_ip);                    \
+        } else {                                            \
+            MOV64x(xRIP, dyn->last_ip);                     \
+        }                                                   \
     }
-#define GETIP_(A)                                       \
-    if(dyn->last_ip && ((A)-dyn->last_ip)<0x1000) {     \
-        uint64_t _delta_ip = (A)-dyn->last_ip;          \
-        if(_delta_ip) {ADDx_U12(xRIP, xRIP, _delta_ip);}\
-    } else {                                            \
-        if(dyn->need_reloc) {                           \
-            TABLE64(xRIP, (A));                         \
-        } else {                                        \
-            MOV64x(xRIP, (A));                          \
-        }                                               \
+#define GETIP_(A)                                           \
+    if(dyn->last_ip && ((A)-dyn->last_ip)<0x1000) {         \
+        uint64_t _delta_ip = (A)-dyn->last_ip;              \
+        if(_delta_ip) {ADDx_U12(xRIP, xRIP, _delta_ip);}    \
+    } else if(dyn->last_ip && (dyn->last_ip-(A))<0x1000) {  \
+        uint64_t _delta_ip = dyn->last_ip-(A);              \
+        if(_delta_ip) {SUBx_U12(xRIP, xRIP, _delta_ip);}    \
+    } else {                                                \
+        if(dyn->need_reloc) {                               \
+            TABLE64(xRIP, (A));                             \
+        } else {                                            \
+            MOV64x(xRIP, (A));                              \
+        }                                                   \
     }
 #endif
 #define CLEARIP()   dyn->last_ip=0
diff --git a/src/dynarec/arm64/dynarec_arm64_pass2.h b/src/dynarec/arm64/dynarec_arm64_pass2.h
index 6bedf76a..90abb349 100644
--- a/src/dynarec/arm64/dynarec_arm64_pass2.h
+++ b/src/dynarec/arm64/dynarec_arm64_pass2.h
@@ -15,19 +15,19 @@
         }
 #define INST_EPILOG dyn->insts[ninst].epilog = dyn->native_size; 
 #define INST_NAME(name) 
-#define TABLE64(A, V)   {                                                       \
-                if(dyn->need_reloc && !isTable64(dyn, (V)))                     \
+#define TABLE64(A, V)   do {                                                    \
+                if(dyn->need_reloc)                                             \
                         AddRelocTable64Addr(dyn, ninst, (V), 2);                \
                 Table64(dyn, (V), 2); EMIT(0);                                  \
-        }
-#define TABLE64_(A, V)   {                                                      \
+        } while(0)
+#define TABLE64_(A, V)  do {                                                    \
                 Table64(dyn, (V), 2); EMIT(0);                                  \
-        }
-#define TABLE64C(A, V)  {                                                       \
-                if(dyn->need_reloc && !isTable64(dyn, getConst(V)))             \
+        } while(0)
+#define TABLE64C(A, V)  do {                                                    \
+                if(dyn->need_reloc)                                             \
                         AddRelocTable64Const(dyn, ninst, (V), 2);               \
                 Table64(dyn, getConst(V), 2); EMIT(0);                          \
-        }
-#define FTABLE64(A, V)  {mmx87_regs_t v = {.d = V}; Table64(dyn, v.q, 2); EMIT(0);}
+        } while(0)
+#define FTABLE64(A, V)  do {mmx87_regs_t v = {.d = V}; Table64(dyn, v.q, 2); EMIT(0);} while(0)
 #define CALLRET_RET()   do {dyn->callrets[dyn->callret_size].type = 0; dyn->callrets[dyn->callret_size++].offs = dyn->native_size; EMIT(ARCH_NOP); } while(0)
 #define CALLRET_LOOP()   do {dyn->callrets[dyn->callret_size].type = 1; dyn->callrets[dyn->callret_size++].offs = dyn->native_size; EMIT(ARCH_NOP); } while(0)
\ No newline at end of file
diff --git a/src/dynarec/arm64/dynarec_arm64_pass3.h b/src/dynarec/arm64/dynarec_arm64_pass3.h
index 5930c410..c59d2aee 100644
--- a/src/dynarec/arm64/dynarec_arm64_pass3.h
+++ b/src/dynarec/arm64/dynarec_arm64_pass3.h
@@ -24,22 +24,22 @@
     }
 #define INST_EPILOG
 #define INST_NAME(name) inst_name_pass3(dyn, ninst, name, rex)
-#define TABLE64(A, V)   {                                                                   \
-                if(dyn->need_reloc && !isTable64(dyn, (V)))                                 \
+#define TABLE64(A, V)  do {                                                                 \
+                if(dyn->need_reloc)                                                         \
                         AddRelocTable64Addr(dyn, ninst, (V), 3);                            \
                 int val64offset = Table64(dyn, (V), 3);                                     \
                 MESSAGE(LOG_DUMP, "  Table64: 0x%lx\n", (V)); LDRx_literal(A, val64offset); \
-            }
+            } while(0)
 #define TABLE64_(A, V)   {                                                                  \
                 int val64offset = Table64(dyn, (V), 3);                                     \
                 MESSAGE(LOG_DUMP, "  Table64: 0x%lx\n", (V)); LDRx_literal(A, val64offset); \
             }
-#define TABLE64C(A, V)  {                                                                   \
-                if(dyn->need_reloc && !isTable64(dyn, getConst(V)))                         \
+#define TABLE64C(A, V) do {                                                                 \
+                if(dyn->need_reloc)                                                         \
                         AddRelocTable64Const(dyn, ninst, (V), 3);                           \
                 int val64offset = Table64(dyn, getConst(V), 3);                             \
-                MESSAGE(LOG_DUMP, "  Table64: 0x%lx\n", (V)); LDRx_literal(A, val64offset); \
-            }
-#define FTABLE64(A, V)  {mmx87_regs_t v = {.d = V}; int val64offset = Table64(dyn, v.q, 3); MESSAGE(LOG_DUMP, "  FTable64: %g\n", v.d); VLDR64_literal(A, val64offset);}
+                MESSAGE(LOG_DUMP, "  Table64C: 0x%lx\n", (V)); LDRx_literal(A, val64offset);\
+            } while(0)
+#define FTABLE64(A, V)  do {mmx87_regs_t v = {.d = V}; int val64offset = Table64(dyn, v.q, 3); MESSAGE(LOG_DUMP, "  FTable64: %g\n", v.d); VLDR64_literal(A, val64offset);} while(0)
 #define CALLRET_RET()   do {dyn->callrets[dyn->callret_size].type = 0; dyn->callrets[dyn->callret_size++].offs = dyn->native_size; EMIT(ARCH_NOP); } while(0)
 #define CALLRET_LOOP()   do {dyn->callrets[dyn->callret_size].type = 1; dyn->callrets[dyn->callret_size++].offs = dyn->native_size; EMIT(ARCH_NOP); } while(0)
diff --git a/src/dynarec/dynablock.c b/src/dynarec/dynablock.c
index b5a81020..b804eff2 100644
--- a/src/dynarec/dynablock.c
+++ b/src/dynarec/dynablock.c
@@ -85,14 +85,15 @@ void FreeInvalidDynablock(dynablock_t* db, int need_lock)
     }
 }
 
-void FreeDynablock(dynablock_t* db, int need_lock)
+void FreeDynablock(dynablock_t* db, int need_lock, int need_remove)
 {
     if(db) {
         if(db->gone)
             return; // already in the process of deletion!
         dynarec_log(LOG_DEBUG, "FreeDynablock(%p), db->block=%p x64=%p:%p already gone=%d\n", db, db->block, db->x64_addr, db->x64_addr+db->x64_size-1, db->gone);
         // remove jumptable without waiting
-        setJumpTableDefault64(db->x64_addr);
+        if(need_remove)
+            setJumpTableDefault64(db->x64_addr);
         if(need_lock)
             mutex_lock(&my_context->mutex_dyndump);
         dynarec_log(LOG_DEBUG, " -- FreeDyrecMap(%p, %d)\n", db->actual_block, db->size);
@@ -168,7 +169,7 @@ int FreeRangeDynablock(dynablock_t* db, uintptr_t addr, uintptr_t size)
 
     int need_lock = my_context?1:0;
     if(IntervalIntersects((uintptr_t)db->x64_addr, (uintptr_t)db->x64_addr+db->x64_size-1, addr, addr+size+1)) {
-        FreeDynablock(db, need_lock);
+        FreeDynablock(db, need_lock, 1);
         return 0;
     }
     return 1;
@@ -233,7 +234,7 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
     if(block) {
         // fill-in jumptable
         if(!addJumpTableIfDefault64(block->x64_addr, block->dirty?block->jmpnext:block->block)) {
-            FreeDynablock(block, 0);
+            FreeDynablock(block, 0, 0);
             block = getDB(addr);
             MarkDynablock(block);   // just in case...
         } else {
@@ -261,9 +262,9 @@ dynablock_t* DBGetBlock(x64emu_t* emu, uintptr_t addr, int create, int is32bits)
     if(is_inhotpage && !BOX64ENV(dynarec_dirty))
         return NULL;
     dynablock_t *db = internalDBGetBlock(emu, addr, addr, create, 1, is32bits, 1);
-    if(db && db->done && db->block && getNeedTest(addr)) {
+    if(db && db->done && db->block && (db->dirty || getNeedTest(addr))) {
         if (db->always_test) SchedYield(); // just calm down...
-        uint32_t hash = X31_hash_code(db->x64_addr, db->x64_size);
+        uint32_t hash = db->dirty?(~db->hash):X31_hash_code(db->x64_addr, db->x64_size);
         if(is_inhotpage && hash!=db->hash)
             return NULL;    // will be handle when hotpage is over
         int need_lock = mutex_trylock(&my_context->mutex_dyndump);
@@ -313,10 +314,10 @@ dynablock_t* DBAlternateBlock(x64emu_t* emu, uintptr_t addr, uintptr_t filladdr,
     dynarec_log(LOG_DEBUG, "Creating AlternateBlock at %p for %p%s\n", (void*)addr, (void*)filladdr, is32bits?" 32bits":"");
     int create = 1;
     dynablock_t *db = internalDBGetBlock(emu, addr, filladdr, create, 1, is32bits, 1);
-    if(db && db->done && db->block && getNeedTest(filladdr)) {
+    if(db && db->done && db->block && (db->dirty || getNeedTest(filladdr))) {
         if (db->always_test) SchedYield(); // just calm down...
         int need_lock = mutex_trylock(&my_context->mutex_dyndump);
-        uint32_t hash = X31_hash_code(db->x64_addr, db->x64_size);
+        uint32_t hash = db->dirty?(~db->hash):X31_hash_code(db->x64_addr, db->x64_size);
         if(hash!=db->hash) {
             db->done = 0;   // invalidating the block
             dynarec_log(LOG_DEBUG, "Invalidating alt block %p from %p:%p (hash:%X/%X) for %p\n", db, db->x64_addr, db->x64_addr+db->x64_size, hash, db->hash, (void*)addr);
diff --git a/src/dynarec/dynablock_private.h b/src/dynarec/dynablock_private.h
index f78d457c..db142c25 100644
--- a/src/dynarec/dynablock_private.h
+++ b/src/dynarec/dynablock_private.h
@@ -17,6 +17,7 @@ typedef struct dynablock_s {
     struct dynablock_s*    previous;   // a previous block that might need to be freed
     void*           x64_addr;
     uintptr_t       x64_size;
+    size_t          native_size;
     int             size;
     uint32_t        hash;
     uint8_t         done;
@@ -31,6 +32,9 @@ typedef struct dynablock_s {
     void*           arch;       // arch dependant per inst info (can be NULL)
     callret_t*      callrets;   // array of callret return, with NOP / UDF depending if the block is clean or dirty
     void*           jmpnext;    // a branch jmpnext code when block is marked
+    size_t          table64size;// to check table64
+    void*           table64;    // to relocate the table64
+    size_t          relocsize;  // number of relocations (should be an int only)
     void*           relocs;     // relocations, when block is loaded
     #ifdef GDBJIT
     void*           gdbjit_block;
diff --git a/src/dynarec/dynacache_reloc.c b/src/dynarec/dynacache_reloc.c
index 7692e007..b145629e 100644
--- a/src/dynarec/dynacache_reloc.c
+++ b/src/dynarec/dynacache_reloc.c
@@ -4,6 +4,7 @@
 #include "os.h"
 #include "dynarec_private.h"
 #include "emu/x64run_private.h"
+#include "dynablock_private.h"
 #include "dynarec_arch.h"
 #include "custommem.h"
 #include "debug.h"
@@ -13,7 +14,7 @@
 #define RELOC_TBL64RETENDBL 3
 #define RELOC_CANCELBLOCK   4
 #define RELOC_TBL64TBLJMPH  5
-#define RELOC_TBL64TBLJMPL  5
+#define RELOC_TBL64TBLJMPL  6
 
 typedef union reloc_s {
     uint8_t type;
@@ -48,6 +49,8 @@ void AddRelocTable64Const(dynarec_native_t* dyn, int ninst, native_consts_t C, i
 {
     if(!dyn->need_reloc)
         return;
+    if(isTable64(dyn, getConst(C)))
+        return;
     if(pass<3)  {
         dyn->reloc_size++;
         return;
@@ -117,12 +120,13 @@ void AddRelocTable64Addr(dynarec_native_t* dyn, int ninst, uintptr_t addr, int p
     }
     if(!ok) return AddRelocCancelBlock(dyn, ninst, pass);
     if(pass<3) {
-        dyn->reloc_size+=2;
+        dyn->reloc_size++;
         return;
     }
     reloc_t reloc = {0};
     reloc.type = RELOC_TBL64ADDR;
     reloc.table64addr.idx = dyn->table64size;
+    dyn->relocs[dyn->reloc_size++] = reloc.x;
 }
 
 
@@ -159,4 +163,80 @@ void AddRelocTable64JmpTbl(dynarec_native_t* dyn, int ninst, uintptr_t addr, int
     reloc.type = RELOC_TBL64TBLJMPL;
     reloc.table64jmptbll.deltal = delta&0xffffff;
     dyn->relocs[dyn->reloc_size++] = reloc.x;
+}
+
+int ApplyRelocs(dynablock_t* block, intptr_t delta_block, intptr_t delta_map, uintptr_t mapping_start)
+{
+    if(!block || !block->relocs || !block->relocsize)
+        return 0;
+    size_t reloc_size = block->relocsize / sizeof(uint32_t);
+    reloc_t *relocs = block->relocs;
+    uint64_t *table64 = block->table64;
+    int idx;
+
+    int i = 0;
+    uintptr_t addr;
+    dynarec_log(LOG_DEBUG, "Will apply %zd reloc to dynablock starting at %p - %p\n", reloc_size, block->x64_addr, block->x64_addr + block->x64_size);
+    while(i<reloc_size) {
+        idx = -1;
+        switch(relocs[i].type) {
+            case RELOC_TBL64C:
+                idx = relocs[i].table64c.idx;
+                table64[idx] = getConst(relocs[i].table64c.C);
+                dynarec_log(LOG_DEBUG, "\tApply Relocs[%d]: TABLE64[%d]=Const:%d\n", i, idx, relocs[i].table64c.C);
+                break;
+            case RELOC_TBL64ADDR:
+                idx = relocs[i].table64addr.idx;
+                table64[idx] += delta_map;
+                dynarec_log(LOG_DEBUG, "\tApply Relocs[%d]: TABLE64[%d]=Addr in Map, delta=%zd\n", i, idx, delta_map);
+                break;
+            case RELOC_TBL64RETENDBL:
+                idx = relocs[i].table64retendbl.idx;
+                addr = (uintptr_t)block->x64_addr + block->x64_size + relocs[i].table64retendbl.delta;
+                table64[idx] = getJumpTableAddress64(addr);
+                dynarec_log(LOG_DEBUG, "\tApply Relocs[%d]: TABLE64[%d]=JmpTable64(%p)\n", i, idx, (void*)addr);
+                break;
+            case RELOC_CANCELBLOCK:
+                dynarec_log(LOG_DEBUG, "\tApply Relocs[%d]: Cancel Block\n", i);
+                block->dirty = 1;
+                block->hash = 0;
+                return 0;
+            case RELOC_TBL64TBLJMPH:
+                if(relocs[i+1].type!=RELOC_TBL64TBLJMPL)
+                    return -2;  // bad sequence
+                idx = relocs[i].table64jmptblh.idx;
+                addr = relocs[i].table64jmptblh.deltah;
+                addr = mapping_start + relocs[i+1].table64jmptbll.deltal + (addr<<24);
+                table64[idx] = getJumpTableAddress64(addr);
+                dynarec_log(LOG_DEBUG, "\tApply Relocs[%d,%d]: TABLE64[%d]=JmpTable64(%p)=%p\n", i, i+1, idx, (void*)addr, getJumpTableAddress64(addr));
+                break;
+            case RELOC_TBL64TBLJMPL:
+                break;
+            default: 
+                dynarec_log(LOG_DEBUG, "\tUnknown Relocs[%d]: %d\n", i, relocs[i].type);
+                return -1;
+        }
+        if(idx!=-1) {
+            if(idx>=block->table64size) {
+                dynarec_log(LOG_NONE, "Warning: Reloc Table64 idx out of range (%d vs %d)\n", idx, block->table64size);
+            }
+        }
+        ++i;
+    }
+    return 0;
+}
+
+int RelocsHaveCancel(dynablock_t* block)
+{
+    if(!block->relocs || !block->relocsize)
+        return 0;
+    size_t reloc_size = block->relocsize/sizeof(uint32_t);
+    for(size_t i=0; i<reloc_size; ++i)
+        if(((reloc_t*)block->relocs)[i].type == RELOC_CANCELBLOCK)
+            return 1;
+    return 0;
+}
+
+uintptr_t RelocGetNext() {
+    return getConst(const_native_next);
 }
\ No newline at end of file
diff --git a/src/dynarec/dynacache_reloc.h b/src/dynarec/dynacache_reloc.h
index fed00562..dc0f63a4 100644
--- a/src/dynarec/dynacache_reloc.h
+++ b/src/dynarec/dynacache_reloc.h
@@ -17,4 +17,8 @@ void AddRelocTable64RetEndBlock(dynarec_native_t* dyn, int ninst, uintptr_t addr
 void AddRelocTable64JmpTbl(dynarec_native_t* dyn, int ninst, uintptr_t addr, int pass);
 void AddRelocCancelBlock(dynarec_native_t* dyn, int ninst, int pass);
 
+int ApplyRelocs(dynablock_t* block, intptr_t delta_block, intptr_t delta_map, uintptr_t mapping_start);
+int RelocsHaveCancel(dynablock_t* block);
+uintptr_t RelocGetNext();
+
 #endif
\ No newline at end of file
diff --git a/src/dynarec/dynarec_native.c b/src/dynarec/dynarec_native.c
index bba18978..8e142b57 100644
--- a/src/dynarec/dynarec_native.c
+++ b/src/dynarec/dynarec_native.c
@@ -23,6 +23,9 @@
 #include "dynarec_arch.h"
 #include "dynarec_next.h"
 #include "gdbjit.h"
+#include "khash.h"
+
+KHASH_MAP_INIT_INT64(table64, uint32_t)
 
 void printf_x64_instruction(dynarec_native_t* dyn, zydis_dec_t* dec, instruction_x64_t* inst, const char* name) {
     uint8_t *ip = (uint8_t*)inst->addr;
@@ -293,36 +296,51 @@ void addInst(instsize_t* insts, size_t* size, int x64_size, int native_size)
     }
 }
 
+static kh_table64_t* khtable64 = NULL;
+
 int isTable64(dynarec_native_t *dyn, uint64_t val)
 {
-    // find the value if already present
-    int idx = -1;
-    for(int i=0; i<dyn->table64size && (idx==-1); ++i)
-        if(dyn->table64[i] == val)
-            idx = i;
-    return (idx!=-1);
+    if(!khtable64)
+        return 0;
+    if(kh_get(table64, khtable64, val)==kh_end(khtable64))
+        return 0;
+    return 1;
 }
 // add a value to table64 (if needed) and gives back the imm19 to use in LDR_literal
 int Table64(dynarec_native_t *dyn, uint64_t val, int pass)
 {
+    if(!khtable64)
+        khtable64 = kh_init(table64);
     // find the value if already present
-    int idx = -1;
-    for(int i=0; i<dyn->table64size && (idx==-1); ++i)
-        if(dyn->table64[i] == val)
-            idx = i;
-    // not found, add it
-    if(idx==-1) {
+    khint_t k = kh_get(table64, khtable64, val);
+    uint32_t idx = 0;
+    if(k!=kh_end(khtable64)) {
+        idx = kh_value(khtable64, k);
+    } else {
         idx = dyn->table64size++;
-        if(idx < dyn->table64cap)
-            dyn->table64[idx] = val;
-        else if(pass==3)
-            printf_log(LOG_NONE, "Warning, table64 bigger than expected %d vs %d\n", idx, dyn->table64cap);
+        if(pass==3) {
+            if(idx < dyn->table64cap)
+                dyn->table64[idx] = val;
+            else
+                printf_log(LOG_NONE, "Warning, table64 bigger than expected %d vs %d\n", idx, dyn->table64cap);
+        }
+        int ret;
+        k = kh_put(table64, khtable64, val, &ret);
+        kh_value(khtable64, k) = idx;
     }
     // calculate offset
     int delta = dyn->tablestart + idx*sizeof(uint64_t) - (uintptr_t)dyn->block;
     return delta;
 }
 
+void ResetTable64(dynarec_native_t* dyn)
+{
+    dyn->table64size = 0;
+    if(khtable64) {
+        kh_clear(table64, khtable64);
+    }
+}
+
 static void recurse_mark_alive(dynarec_native_t* dyn, int i)
 {
     if(dyn->insts[i].x64.alive)
@@ -510,7 +528,6 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached)
 void* current_helper = NULL;
 static int static_jmps[MAX_INSTS+2];
 static uintptr_t static_next[MAX_INSTS+2];
-static uint64_t static_table64[(MAX_INSTS+3)/4];
 static instruction_native_t static_insts[MAX_INSTS+2] = {0};
 static callret_t static_callrets[MAX_INSTS+2] = {0};
 // TODO: ninst could be a uint16_t instead of an int, that could same some temp. memory
@@ -640,8 +657,9 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
     helper.jmp_cap = MAX_INSTS;
     helper.next = static_next;
     helper.next_cap = MAX_INSTS;
-    helper.table64 = static_table64;
-    helper.table64cap = sizeof(static_table64)/sizeof(uint64_t);
+    helper.table64 = NULL;
+    ResetTable64(&helper);
+    helper.table64cap = 0;
     helper.end = addr + SizeFileMapped(addr);
     if(helper.end == helper.start)  // that means there is no mmap with a file associated to the memory
         helper.end = (uintptr_t)~0LL;
@@ -780,7 +798,7 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
     // no need for next anymore
     helper.next_sz = helper.next_cap = 0;
     helper.next = NULL;
-    helper.table64size = 0;
+    ResetTable64(&helper);
     helper.reloc_size = 0;
     // pass 1, float optimizations, first pass for flags
     native_pass1(&helper, addr, alternate, is32bits, inst_max);
@@ -794,7 +812,7 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
             helper.need_x87check = 0;
     }
     POSTUPDATE_SPECIFICS(&helper);
-    helper.table64size = 0;
+    ResetTable64(&helper);
     helper.reloc_size = 0;
     // pass 2, instruction size
     helper.callrets = static_callrets;
@@ -850,6 +868,7 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
     block->actual_block = actual_p;
     helper.relocs = relocs;
     block->relocs = relocs;
+    block->table64size = helper.table64size;
     helper.native_start = (uintptr_t)p;
     helper.tablestart = (uintptr_t)tablestart;
     helper.jmp_next = (uintptr_t)next+sizeof(void*);
@@ -858,6 +877,7 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
     helper.table64cap = helper.table64size;
     helper.table64 = (uint64_t*)helper.tablestart;
     helper.callrets = (callret_t*)callrets;
+    block->table64 = helper.table64;
     if(callret_size)
         memcpy(helper.callrets, static_callrets, helper.callret_size*sizeof(callret_t));
     helper.callret_size = 0;
@@ -875,7 +895,7 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
     size_t oldinstsize = helper.insts_size;
     int oldsize= helper.size;
     helper.native_size = 0;
-    helper.table64size = 0; // reset table64 (but not the cap)
+    ResetTable64(&helper); // reset table64 (but not the cap)
     helper.insts_size = 0;  // reset
     helper.reloc_size = 0;
     native_pass3(&helper, addr, alternate, is32bits, inst_max);
@@ -899,6 +919,7 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
     block->always_test = helper.always_test;
     block->dirty = block->always_test;
     block->is32bits = is32bits;
+    block->relocsize = helper.reloc_size*sizeof(uint32_t);
     if(arch_size) {
         block->arch_size = arch_size;
         block->arch = ARCH_FILL(&helper, arch, arch_size);
@@ -909,6 +930,7 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
     }
     block->callret_size = helper.callret_size;
     block->callrets = helper.callrets;
+    block->native_size = native_size;
     *(dynablock_t**)next = block;
     *(void**)(next+3*sizeof(void*)) = native_next;
     CreateJmpNext(block->jmpnext, next+3*sizeof(void*));
@@ -950,6 +972,7 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
         return NULL;
     }
     // ok, free the helper now
+    ResetTable64(&helper);
     //dynaFree(helper.insts);
     helper.insts = NULL;
     if(insts_rsize/sizeof(instsize_t)<helper.insts_size) {
diff --git a/src/dynarec/la64/dynarec_la64_00.c b/src/dynarec/la64/dynarec_la64_00.c
index 20b7a597..43c4f1f5 100644
--- a/src/dynarec/la64/dynarec_la64_00.c
+++ b/src/dynarec/la64/dynarec_la64_00.c
@@ -2446,7 +2446,9 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         // jumps out of current dynablock...
                         MARK;
                         j64 = getJumpTableAddress64(addr);
-                        TABLE64(x4, j64);
+                        if(dyn->need_reloc)
+                            AddRelocTable64JmpTbl(dyn, ninst, addr, STEP);
+                        TABLE64_(x4, j64);
                         LD_D(x4, x4, 0);
                         BR(x4);
                     }
@@ -2842,7 +2844,9 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
                         // jumps out of current dynablock...
                         MARK;
                         j64 = getJumpTableAddress64(addr);
-                        TABLE64(x4, j64);
+                        if(dyn->need_reloc)
+                            AddRelocTable64JmpTbl(dyn, ninst, addr, STEP);
+                        TABLE64_(x4, j64);
                         LD_D(x4, x4, 0);
                         BR(x4);
                     }
diff --git a/src/elfs/elfloader.c b/src/elfs/elfloader.c
index f05dd219..14694313 100644
--- a/src/elfs/elfloader.c
+++ b/src/elfs/elfloader.c
@@ -242,10 +242,10 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
     void* image = NULL;
     if(!head->vaddr) {
         sz += head->align;
-        raw = mmap64((void*)offs, sz, 0, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+        raw = InternalMmap((void*)offs, sz, 0, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
         image = (void*)(((uintptr_t)raw+max_align)&~max_align);
     } else {
-        image = raw = mmap64((void*)head->vaddr, sz, 0, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+        image = raw = InternalMmap((void*)head->vaddr, sz, 0, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
         if(head->vaddr&(box64_pagesize-1)) {
             // load address might be lower
             if((uintptr_t)image == (head->vaddr&~(box64_pagesize-1))) {
@@ -258,7 +258,7 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
         printf_log(LOG_INFO, "%s: Mmap64 for (@%p 0x%zx) for elf \"%s\" returned %p(%p/0x%zx) instead\n", (((uintptr_t)image)&max_align)?"Error":"Warning", (void*)(head->vaddr?head->vaddr:offs), head->memsz, head->name, image, raw, head->align);
         offs = (uintptr_t)image;
         if(((uintptr_t)image)&max_align) {
-            munmap(raw, sz);
+            InternalMunmap(raw, sz);
             return 1;   // that's an error, alocated memory is not aligned properly
         }
     }
@@ -314,7 +314,7 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
                 try_mmap = 0;
             if(try_mmap) {
                 printf_dump(log_level, "Mmaping 0x%lx(0x%lx) bytes @%p for Elf \"%s\"\n", head->multiblocks[n].size, head->multiblocks[n].asize, (void*)head->multiblocks[n].paddr, head->name);
-                void* p = mmap64(
+                void* p = InternalMmap(
                     (void*)head->multiblocks[n].paddr,
                     head->multiblocks[n].size,
                     prot,
@@ -339,7 +339,7 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
                 void* p = MAP_FAILED;
                 if(paddr==(paddr&~(box64_pagesize-1)) && (asize==ALIGN(asize))) {
                     printf_dump(log_level, "Allocating 0x%zx (0x%zx) bytes @%p, will read 0x%zx @%p for Elf \"%s\"\n", asize, e->p_memsz, (void*)paddr, e->p_filesz, (void*)head->multiblocks[n].paddr, head->name);
-                    p = mmap64(
+                    p = InternalMmap(
                         (void*)paddr,
                         asize,
                         prot|PROT_WRITE,
@@ -362,7 +362,7 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin)
                     }
                     if(new_size>0) {
                         printf_dump(log_level, "Allocating 0x%zx (0x%zx/0x%zx) bytes @%p, will read 0x%zx @%p for Elf \"%s\"\n", ALIGN(new_size), paddr, e->p_memsz, (void*)new_addr, e->p_filesz, (void*)head->multiblocks[n].paddr, head->name);
-                        p = mmap64(
+                        p = InternalMmap(
                             (void*)new_addr,
                             ALIGN(new_size),
                             prot|PROT_WRITE,
@@ -450,7 +450,7 @@ void FreeElfMemory(elfheader_t* head)
     // we only need to free the overall mmap, no need to free individual part as they are inside the big one
     if(head->raw && head->raw_size) {
         dynarec_log(LOG_INFO, "Unmap elf memory %p-%p for %s\n", head->raw, head->raw+head->raw_size, head->path);
-        munmap(head->raw, head->raw_size);
+        InternalMunmap(head->raw, head->raw_size);
     }
     freeProtection((uintptr_t)head->raw, head->raw_size);
 }
diff --git a/src/include/custommem.h b/src/include/custommem.h
index 17acc1e9..96ebfdd0 100644
--- a/src/include/custommem.h
+++ b/src/include/custommem.h
@@ -32,11 +32,17 @@ size_t customGetUsableSize(void* p);
 #ifdef DYNAREC
 typedef struct dynablock_s dynablock_t;
 typedef struct mmaplist_s mmaplist_t;
+typedef struct DynaCacheBlock_s DynaCacheBlock_t;
 // custom protection flag to mark Page that are Write protected for Dynarec purpose
 uintptr_t AllocDynarecMap(uintptr_t x64_addr, size_t size, int is_new);
 void FreeDynarecMap(uintptr_t addr);
 mmaplist_t* NewMmaplist();
 void DelMmaplist(mmaplist_t* list);
+int MmaplistHasNew(mmaplist_t* list, int clear);
+int MmaplistNBlocks(mmaplist_t* list);
+void MmaplistFillBlocks(mmaplist_t* list, DynaCacheBlock_t* blocks);
+void MmaplistAddNBlocks(mmaplist_t* list, int nblocks);
+int MmaplistAddBlock(mmaplist_t* list, int fd, off_t offset, void* orig, size_t size, intptr_t delta_map, uintptr_t mapping_start);
 
 void addDBFromAddressRange(uintptr_t addr, size_t size);
 // Will return 1 if at least 1 db in the address range
@@ -134,6 +140,8 @@ void fini_custommem_helper(box64context_t* ctx);
 // ---- StrongMemoryModel
 void addLockAddress(uintptr_t addr);    // add an address to the list of "LOCK"able
 int isLockAddress(uintptr_t addr);  // return 1 is the address is used as a LOCK, 0 else
+int nLockAddressRange(uintptr_t start, size_t size);    // gives the number of lock address for a range
+void getLockAddressRange(uintptr_t start, size_t size, uintptr_t addrs[]);   // fill in the array with the lock addresses in the range (array must be of the correct size)
 
 void SetHotPage(uintptr_t addr);
 void CheckHotPage(uintptr_t addr);
diff --git a/src/include/dynablock.h b/src/include/dynablock.h
index 551a223e..650e18cd 100644
--- a/src/include/dynablock.h
+++ b/src/include/dynablock.h
@@ -5,7 +5,7 @@ typedef struct x64emu_s x64emu_t;
 typedef struct dynablock_s dynablock_t;
 
 uint32_t X31_hash_code(void* addr, int len);
-void FreeDynablock(dynablock_t* db, int need_lock);
+void FreeDynablock(dynablock_t* db, int need_lock, int need_remove);
 void MarkDynablock(dynablock_t* db);
 void MarkRangeDynablock(dynablock_t* db, uintptr_t addr, uintptr_t size);
 int FreeRangeDynablock(dynablock_t* db, uintptr_t addr, uintptr_t size);
diff --git a/src/include/env.h b/src/include/env.h
index 1c640091..6e0df8c1 100644
--- a/src/include/env.h
+++ b/src/include/env.h
@@ -127,6 +127,8 @@ extern char* ftrace_name;
     BOOLEAN(BOX64_X11THREADS, x11threads, 0, 0)                               \
     BOOLEAN(BOX64_X87_NO80BITS, x87_no80bits, 0, 1)                           \
     BOOLEAN(BOX64_DYNACACHE, dynacache, 0, 0)                                 \
+    STRING(BOX64_DYNACACHE_FOLDER, dynacache_folder, 0)                       \
+    INTEGER(BOX64_DYNACACHE_MIN, dynacache_min, 350, 0, 10240, 0)             \
 
 #ifdef ARM64
 #define ENVSUPER2() \
@@ -195,6 +197,15 @@ typedef struct box64env_s {
 } box64env_t;
 
 typedef struct mmaplist_s mmaplist_t;
+#ifdef DYNAREC
+typedef struct blocklist_s blocklist_t;
+
+typedef struct DynaCacheBlock_s {
+    blocklist_t*    block;
+    size_t          size;
+    size_t          free_size;
+} DynaCacheBlock_t;
+#endif
 
 void InitializeEnvFiles();
 void ApplyEnvFileEntry(const char* name);
@@ -203,11 +214,16 @@ void InitializeEnv();
 void LoadEnvVariables();
 void PrintEnvVariables(box64env_t* env, int level);
 void RecordEnvMappings(uintptr_t addr, size_t length, int fd);
+void WillRemoveMapping(uintptr_t addr, size_t length);
 void RemoveMapping(uintptr_t addr, size_t length);
 box64env_t* GetCurEnvByAddr(uintptr_t addr);
 int IsAddrFileMapped(uintptr_t addr, const char** filename, uintptr_t* start);
 size_t SizeFileMapped(uintptr_t addr);
 mmaplist_t* GetMmaplistByAddr(uintptr_t addr);
 int IsAddrNeedReloc(uintptr_t addr);
+void SerializeAllMapping();
+void DynaCacheList(const char* name);
+void DynaCacheClean();
+int IsAddrMappingLoadAndClean(uintptr_t addr);
 
 #endif // __ENV_H
diff --git a/src/include/fileutils.h b/src/include/fileutils.h
index 855eb8ff..042ca800 100644
--- a/src/include/fileutils.h
+++ b/src/include/fileutils.h
@@ -12,6 +12,7 @@ char* ResolveFileSoft(const char* filename, path_collection_t* paths);
 int FileIsX86ELF(const char* filename);
 int FileIsX64ELF(const char* filename);
 int FileIsShell(const char* filename);
+size_t FileSize(const char* filename);
 
 // return temp folder (will return /tmp if nothing is correct)
 const char* GetTmpDir(void);
diff --git a/src/include/os.h b/src/include/os.h
index dc3e2c33..7a6c036c 100644
--- a/src/include/os.h
+++ b/src/include/os.h
@@ -107,6 +107,7 @@ void* GetEnv(const char* name);
 
 // 0 : doesn't exist, 1: does exist.
 int FileExist(const char* filename, int flags);
+int MakeDir(const char* folder);    // return 1 for success, 0 else
 
 #ifdef _WIN32
 #define BOXFILE_BUFSIZE 4096
diff --git a/src/libtools/signals.c b/src/libtools/signals.c
index a2e56cf9..1c8247ec 100644
--- a/src/libtools/signals.c
+++ b/src/libtools/signals.c
@@ -1094,6 +1094,10 @@ void my_sigactionhandler_oldcode_64(x64emu_t* emu, int32_t sig, int simple, sigi
     }
     //TODO: SIGABRT generate what?
     printf_log((sig==10)?LOG_DEBUG:log_minimum, "Signal %d: si_addr=%p, TRAPNO=%d, ERR=%d, RIP=%p, prot=%x, mmapped:%d\n", sig, (void*)info2->si_addr, sigcontext->uc_mcontext.gregs[X64_TRAPNO], sigcontext->uc_mcontext.gregs[X64_ERR],sigcontext->uc_mcontext.gregs[X64_RIP], prot, mmapped);
+    #ifdef DYNAREC
+    if(sig==3)
+        SerializeAllMapping();  // Signal Interupt: it's a good time to serialize the mappings if needed
+    #endif
     // call the signal handler
     x64_ucontext_t sigcontext_copy = *sigcontext;
     // save old value from emu
diff --git a/src/os/os_linux.c b/src/os/os_linux.c
index 29d0a1d1..d1a940b5 100644
--- a/src/os/os_linux.c
+++ b/src/os/os_linux.c
@@ -9,6 +9,7 @@
 #include <string.h>
 #include <stdarg.h>
 #include <stdlib.h>
+#include <errno.h>
 
 #include "os.h"
 #include "signals.h"
@@ -229,4 +230,23 @@ int FileExist(const char* filename, int flags)
             return 0; // nope
     }
     return 1;
+}
+
+int MakeDir(const char* folder)
+{
+    int ret = mkdir(folder, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+    if(!ret || ret==EEXIST)
+        return 1;
+    return 0;
+}
+
+size_t FileSize(const char* filename)
+{
+    struct stat sb;
+    if (stat(filename, &sb) == -1)
+        return 0;
+    // check type of file? should be executable, or folder
+    if (!S_ISREG(sb.st_mode))
+        return 0;
+    return sb.st_size;
 }
\ No newline at end of file
diff --git a/src/os/os_wine.c b/src/os/os_wine.c
index 28e8ada3..add139dc 100644
--- a/src/os/os_wine.c
+++ b/src/os/os_wine.c
@@ -236,4 +236,16 @@ int FileExist(const char* filename, int flags)
     }
 
     return 1;
+}
+
+int MakeDir(const char* folder)
+{
+    // TODO
+    return 0;
+}
+
+size_t FileSize(const char* filename)
+{
+    // TODO
+    return 0;
 }
\ No newline at end of file
diff --git a/src/tools/env.c b/src/tools/env.c
index b279a988..ec9da3c8 100644
--- a/src/tools/env.c
+++ b/src/tools/env.c
@@ -5,6 +5,10 @@
 #include <fcntl.h>
 #include <string.h>
 #include <inttypes.h>
+#if defined(DYNAREC) && !defined(WIN32)
+#include <sys/types.h>
+#include <dirent.h>
+#endif
 
 #include "os.h"
 #include "env.h"
@@ -24,6 +28,18 @@ static kh_box64env_entry_t* box64env_entries_gen = NULL;
 
 mmaplist_t* NewMmaplist();
 void DelMmaplist(mmaplist_t* list);
+#ifdef DYNAREC
+int MmaplistHasNew(mmaplist_t* list, int clear);
+int MmaplistIsDirty(mmaplist_t* list);
+int MmaplistNBlocks(mmaplist_t* list);
+size_t MmaplistTotalAlloc(mmaplist_t* list);
+void MmaplistFillBlocks(mmaplist_t* list, DynaCacheBlock_t* blocks);
+void MmaplistAddNBlocks(mmaplist_t* list, int nblocks);
+int MmaplistAddBlock(mmaplist_t* list, int fd, off_t offset, void* orig, size_t size, intptr_t delta_map, uintptr_t mapping_start);
+int nLockAddressRange(uintptr_t start, size_t size);
+void getLockAddressRange(uintptr_t start, size_t size, uintptr_t addrs[]);
+void addLockAddress(uintptr_t addr);
+#endif
 
 static rbtree_t* envmap = NULL;
 
@@ -658,6 +674,11 @@ typedef struct mapping_s {
 KHASH_MAP_INIT_STR(mapping_entry, mapping_t*);
 static kh_mapping_entry_t* mapping_entries = NULL;
 
+#if defined(DYNAREC) && !defined(WIN32)
+void MmapDynaCache(mapping_t* mapping);
+#endif
+
+
 void RecordEnvMappings(uintptr_t addr, size_t length, int fd)
 {
 #ifndef _WIN32
@@ -698,6 +719,13 @@ void RecordEnvMappings(uintptr_t addr, size_t length, int fd)
                 mapping->env = &kh_value(box64env_entries, k);
         }
         dynarec_log(LOG_INFO, "Mapping %s (%s) in %p-%p\n", fullname, lowercase_filename, (void*)addr, (void*)(addr+length));
+        #if defined(DYNAREC) && !defined(WIN32)
+        int dynacache = box64env.dynacache;
+        if(mapping->env && mapping->env->is_dynacache_overridden)
+            dynacache = mapping->env->dynacache;
+        if(dynacache)
+            MmapDynaCache(mapping);
+        #endif
     } else
         mapping = kh_value(mapping_entries, k);
 
@@ -722,6 +750,525 @@ void RecordEnvMappings(uintptr_t addr, size_t length, int fd)
 #endif
 }
 
+#ifdef DYNAREC
+const char* GetDynacacheFolder(mapping_t* mapping)
+{
+    if(mapping && mapping->env && mapping->env->is_dynacache_folder_overridden && mapping->env->dynacache_folder) {
+        if(FileExist(mapping->env->dynacache_folder, 0))
+            return mapping->env->dynacache_folder;  // folder exist
+        if(MakeDir(mapping->env->dynacache_folder))
+            return mapping->env->dynacache_folder; 
+    }
+    if(box64env.dynacache_folder) {
+        if(FileExist(box64env.dynacache_folder, 0))
+            return box64env.dynacache_folder;  // folder exist
+        if(MakeDir(box64env.dynacache_folder))
+            return box64env.dynacache_folder; 
+    }
+    static char folder[4096] = {0};
+    static int default_folder = 0;
+    if(default_folder)
+        return folder;
+    if(GetEnv("XDG_CACHE_HOME"))
+        strcpy(folder, GetEnv("XDG_CACHE_HOME"));
+    else if(GetEnv(HOME)) {
+        strcpy(folder, GetEnv(HOME));
+        strcat(folder, PATHSEP);
+        strcat(folder, ".cache");
+        if(!FileExist(folder, 0))
+            if(!MakeDir(folder))
+                return NULL;
+    }
+    else return NULL;
+    strcat(folder, PATHSEP);
+    strcat(folder, "box64");
+    if(!FileExist(folder, 0))
+        if(!MakeDir(folder))
+            return NULL;
+    strcat(folder, PATHSEP);
+    return folder;
+}
+
+/* 
+    There is 3 version to change when evoling things, depending on what is changed:actual_malloc_usable_size
+    1. FILE_VERSION for the DynaCache infrastructure
+    2. DYNAREC_VERSION for dynablock_t changes and other global dynarec change
+    3. ARCH_VERSION for the architecture specific changes (and there is one per arch)
+
+    An ARCH_VERSION of 0 means Unsupported and disable DynaCache.
+    Dynacache will ignore any DynaCache file not exactly matching those 3 version.
+    `box64 --dynacache-clean` can be used from command line to purge obsolete DyaCache files
+*/
+
+#define HEADER_VERSION 1
+#define HEADER_SIGN "DynaCache"
+#define SET_VERSION(MAJ, MIN, REV) (((MAJ)<<24)|((MIN)<<16)|(REV))
+#ifdef ARM64
+#define ARCH_VERSION SET_VERSION(0, 0, 1)
+#elif defined(RV64)
+#define ARCH_VERSION SET_VERSION(0, 0, 0)
+#elif defined(LA64)
+#define ARCH_VERSION SET_VERSION(0, 0, 0)
+#else
+#error meh!
+#endif
+#define DYNAREC_VERSION SET_VERSION(0, 0, 1)
+
+typedef struct DynaCacheHeader_s {
+    char sign[10];  //"DynaCache\0"
+    uint64_t    file_version:16;
+    uint64_t    dynarec_version:24;
+    uint64_t    arch_version:24;
+    uint64_t    cpuext;
+    uint64_t    dynarec_settings;
+    size_t      pagesize;
+    size_t      codesize;
+    uintptr_t   map_addr;
+    size_t      map_len;
+    size_t      file_length;
+    uint32_t    filename_length;
+    uint32_t    nblocks;
+    uint32_t    nLockAddresses;
+    char        filename[];
+} DynaCacheHeader_t;
+
+#define DYNAREC_SETTINGS()                                              \
+    DS_GO(BOX64_DYNAREC_ALIGNED_ATOMICS, dynarec_aligned_atomics, 1)    \
+    DS_GO(BOX64_DYNAREC_BIGBLOCK, dynarec_bigblock, 2)                  \
+    DS_GO(BOX64_DYNAREC_CALLRET, dynarec_callret, 2)                    \
+    DS_GO(BOX64_DYNAREC_DF, dynarec_df, 1)                              \
+    DS_GO(BOX64_DYNAREC_DIRTY, dynarec_dirty, 2)                        \
+    DS_GO(BOX64_DYNAREC_DIV0, dynarec_div0, 1)                          \
+    DS_GO(BOX64_DYNAREC_FASTNAN, dynarec_fastnan, 1)                    \
+    DS_GO(BOX64_DYNAREC_FASTROUND, dynarec_fastround, 2)                \
+    DS_GO(BOX64_DYNAREC_FORWARD, dynarec_forward, 10)                   \
+    DS_GO(BOX64_DYNAREC_NATIVEFLAGS, dynarec_nativeflags, 1)            \
+    DS_GO(BOX64_DYNAREC_SAFEFLAGS, dynarec_safeflags, 2)                \
+    DS_GO(BOX64_DYNAREC_STRONGMEM, dynarec_strongmem, 2)                \
+    DS_GO(BOX64_DYNAREC_VOLATILE_METADATA, dynarec_volatile_metadata, 1)\
+    DS_GO(BOX64_DYNAREC_WEAKBARRIER, dynarec_weakbarrier, 2)            \
+    DS_GO(BOX64_DYNAREC_X87DOUBLE, dynarec_x87double, 2)                \
+    DS_GO(BOX64_SHAEXT, shaext, 1)                                      \
+    DS_GO(BOX64_SSE42, sse42, 1)                                        \
+    DS_GO(BOX64_AVX, avx, 2)                                            \
+    DS_GO(BOX64_X87_NO80BITS, x87_no80bits, 1)                          \
+    DS_GO(BOX64_RDTSC_1GHZ, rdtsc_1ghz, 1)                              \
+    DS_GO(BOX64_SSE_FLUSHTO0, sse_flushto0, 1)                          \
+
+#define DS_GO(A, B, C) uint64_t B:C;
+typedef union dynarec_settings_s {
+    struct {
+        DYNAREC_SETTINGS()
+    };
+    uint64_t    x;
+} dynarec_settings_t;
+#undef DS_GO
+uint64_t GetDynSetting(mapping_t* mapping)
+{
+    dynarec_settings_t settings = {0};
+    #define DS_GO(A, B, C)  settings.B = (mapping->env && mapping->env->is_##B##_overridden)?mapping->env->B:box64env.B;
+    DYNAREC_SETTINGS()
+    #undef DS_GO
+    return settings.x;
+}
+void PrintDynfSettings(int level, uint64_t s)
+{
+    dynarec_settings_t settings = {0};
+    settings.x = s;
+    #define DS_GO(A, B, C) if(settings.B) printf_log_prefix(0, level, "\t\t" #A "=%d\n", settings.B);
+    DYNAREC_SETTINGS()
+    #undef DS_GO
+}
+#undef DYNAREC_SETTINGS
+
+char* MmaplistName(const char* filename, uint64_t dynarec_settings, const char* fullname)
+{
+    // names are FOLDER/filename-YYYYY-XXXXX.box64
+    // Where XXXXX is the hash of the full name
+    // and YYYY is the Dynarec optim (in hex)
+    static char mapname[4096];
+    snprintf(mapname, 4095-6, "%s-%llx-%u", filename, dynarec_settings, __ac_X31_hash_string(fullname));
+    strcat(mapname, ".box64");
+    return mapname;
+}
+
+char* GetMmaplistName(mapping_t* mapping)
+{
+    return MmaplistName(mapping->filename+1, GetDynSetting(mapping), mapping->fullname);
+}
+
+const char* NicePrintSize(size_t sz)
+{
+    static char buf[256];
+    const char* units[] = {"", "kb", "Mb", "Gb"};
+    size_t u, d;
+    int idx = 0;
+    size_t ratio = 0;
+    while(idx<sizeof(units)/sizeof(units[0]) && (1<<(ratio+10))<sz) {
+        ratio+=10;
+        ++idx;
+    }
+    if(ratio && (sz>>ratio)<50) {
+        snprintf(buf, 255, "%zd.%zd%s", sz>>ratio, (sz>>(ratio-1))%10, units[idx]);
+    } else {
+        snprintf(buf, 255, "%zd%s", sz>>ratio, units[idx]);
+    }
+    return buf;
+}
+
+void SerializeMmaplist(mapping_t* mapping)
+{
+    if(!DYNAREC_VERSION)
+        return;
+    if(!box64env.dynacache)
+        return;
+    if(mapping->env && mapping->env->is_dynacache_overridden && !mapping->env->dynacache)
+        return;
+    const char* folder = GetDynacacheFolder(mapping);
+    if(!folder) return; // no folder, no serialize...
+    const char* name = GetMmaplistName(mapping);
+    if(!name) return;
+    char mapname[strlen(folder)+strlen(name)+1];
+    strcpy(mapname, folder);
+    strcat(mapname, name);
+    size_t filesize = FileSize(mapping->fullname);
+    if(!filesize) {
+        dynarec_log(LOG_INFO, "DynaCache will not serialize cache for %s because filesize is 0\n", mapping->fullname);
+        return;   // mmaped file as a 0 size...
+    }
+    // prepare header
+    int nblocks = MmaplistNBlocks(mapping->mmaplist);
+    if(!nblocks) {
+        dynarec_log(LOG_INFO, "DynaCache will not serialize cache for %s because nblocks is 0\n", mapping->fullname);
+        return; //How???
+    }
+    size_t map_len = SizeFileMapped(mapping->start);
+    size_t nLockAddresses = nLockAddressRange(mapping->start, map_len);
+    size_t total = sizeof(DynaCacheHeader_t) + strlen(mapping->fullname) + 1 + nblocks*sizeof(DynaCacheBlock_t) + nLockAddresses*sizeof(uintptr_t);
+    total = (total + box64_pagesize-1)&~(box64_pagesize-1); // align on pagesize
+    uint8_t all_header[total];
+    memset(all_header, 0, total);
+    void* p = all_header;
+    DynaCacheHeader_t* header = p;
+    strcpy(header->sign, HEADER_SIGN);
+    header->file_version = HEADER_VERSION;
+    header->dynarec_version = DYNAREC_VERSION;
+    header->arch_version = ARCH_VERSION;
+    header->dynarec_settings = GetDynSetting(mapping);
+    header->cpuext = cpuext.x;
+    header->pagesize = box64_pagesize;
+    header->codesize = MmaplistTotalAlloc(mapping->mmaplist);
+    header->map_addr = mapping->start;
+    header->file_length = filesize;
+    header->filename_length = strlen(mapping->fullname);
+    header->nblocks = MmaplistNBlocks(mapping->mmaplist);
+    header->map_len = map_len;
+    header->nLockAddresses = nLockAddresses;
+    size_t dynacache_min = box64env.dynacache_min;
+    if(mapping->env && mapping->env->is_dynacache_min_overridden)
+        dynacache_min = mapping->env->dynacache_min;
+    if(dynacache_min*1024>header->codesize) {
+        dynarec_log(LOG_INFO, "DynaCache will not serialize cache for %s because there is not enough usefull code (%s)\n", mapping->fullname, NicePrintSize(header->codesize));
+        return; // not enugh code, do no write
+    }
+    p += sizeof(DynaCacheHeader_t); // fullname
+    strcpy(p, mapping->fullname);
+    p += strlen(p) + 1; // blocks
+    DynaCacheBlock_t* blocks = p;
+    MmaplistFillBlocks(mapping->mmaplist, blocks);
+    p += nblocks*sizeof(DynaCacheBlock_t);
+    uintptr_t* lockAddresses = p;
+    getLockAddressRange(mapping->start, map_len, lockAddresses);
+    // all done, now just create the file and write all this down...
+    #ifndef WIN32
+    unlink(mapname);
+    FILE* f = fopen(mapname, "wbx");
+    if(!f) {
+        dynarec_log(LOG_INFO, "Cannot create cache file %s\n", mapname);
+        return;
+    }
+    if(fwrite(all_header, total, 1, f)!=1) {
+        dynarec_log(LOG_INFO, "Error writing Cache file (disk full?)\n");
+        return;
+    }
+    for(int i=0; i<nblocks; ++i) {
+        if(fwrite(blocks[i].block, blocks[i].size, 1, f)!=1) {
+            dynarec_log(LOG_INFO, "Error writing Cache file (disk full?)\n");
+            return;
+        }
+    }
+    fclose(f);
+    #else
+    // TODO?
+    #endif
+}
+
+#define DCERR_OK            0
+#define DCERR_NEXIST        1
+#define DCERR_TOOSMALL      2
+#define DCERR_FERROR        3
+#define DCERR_BADHEADER     4
+#define DCERR_FILEVER       5
+#define DCERR_DYNVER        6
+#define DCERR_DYNARCHVER    7
+#define DCERR_PAGESIZE      8
+#define DCERR_MAPNEXIST     9
+#define DCERR_MAPCHG        10
+#define DCERR_RELOC         11
+#define DCERR_BADNAME       12
+
+#ifndef WIN32
+int ReadDynaCache(const char* folder, const char* name, mapping_t* mapping, int verbose)
+{
+    char filename[strlen(folder)+strlen(name)+1];
+    strcpy(filename, folder);
+    strcat(filename, name);
+    if(verbose) printf_log(LOG_NONE, "File %s:\t", name);
+    if(!FileExist(filename, IS_FILE)) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Invalid file\n");
+        return DCERR_NEXIST;
+    }
+    size_t filesize = FileSize(filename);
+    if(filesize<sizeof(DynaCacheHeader_t)) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Invalid side: %zd\n", filesize);
+        return DCERR_TOOSMALL;
+    }
+    FILE *f = fopen(filename, "rb");
+    if(!f) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Cannot open file\n");
+        return DCERR_FERROR;
+    }
+    DynaCacheHeader_t header = {0};
+    if(fread(&header, sizeof(header), 1, f)!=1) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Cannot read header\n");
+        fclose(f);
+        return DCERR_FERROR;
+    }
+    if(strcmp(header.sign, HEADER_SIGN)) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Bad header\n");
+        fclose(f);
+        return DCERR_BADHEADER;
+    }
+    if(header.file_version!=HEADER_VERSION) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Incompatible File Version\n");
+        fclose(f);
+        return DCERR_FILEVER;
+    }
+    if(header.dynarec_version!=DYNAREC_VERSION) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Incompatible Dynarec Version\n");
+        fclose(f);
+        return DCERR_DYNVER;
+    }
+    if(header.arch_version!=ARCH_VERSION) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Incompatible Dynarec Arch Version\n");
+        fclose(f);
+        return DCERR_DYNARCHVER;
+    }
+    if(header.arch_version!=ARCH_VERSION) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Incompatible Dynarec Arch Version\n");
+        fclose(f);
+        return DCERR_DYNVER;
+    }
+    if(header.pagesize!=box64_pagesize) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Bad pagesize\n");
+        fclose(f);
+        return DCERR_PAGESIZE;
+    }
+    char map_filename[header.filename_length+1];
+    if(fread(map_filename, header.filename_length+1, 1, f)!=1) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Cannot read filename\n");
+        fclose(f);
+        return DCERR_FERROR;
+    }
+    if(!FileExist(map_filename, IS_FILE)) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Mapfiled does not exists\n");
+        fclose(f);
+        return DCERR_MAPNEXIST;
+    }
+    if(FileSize(map_filename)!=header.file_length) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "File changed\n");
+        fclose(f);
+        return DCERR_MAPCHG;
+    }
+    DynaCacheBlock_t blocks[header.nblocks];
+    if(fread(blocks, sizeof(DynaCacheBlock_t), header.nblocks, f)!=header.nblocks) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Cannot read blocks\n");
+        fclose(f);
+        return DCERR_FERROR;
+    }
+    uintptr_t lockAddresses[header.nLockAddresses];
+    if(fread(lockAddresses, sizeof(uintptr_t), header.nLockAddresses, f)!=header.nLockAddresses) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Cannot read lockAddresses\n");
+        fclose(f);
+        return DCERR_FERROR;
+    }
+    off_t p = ftell(f);
+    p = (p+box64_pagesize-1)&~(box64_pagesize-1);
+    if(fseek(f, p, SEEK_SET)<0) {
+        if(verbose) printf_log_prefix(0, LOG_NONE, "Error reading a block\n");
+        fclose(f);
+        return DCERR_FERROR;
+    }
+    if(!mapping) {
+        // check the blocks can be read without reading...
+        for(int i=0; i<header.nblocks; ++i) {
+            p+=blocks[i].size;
+            if(fseek(f, blocks[i].size, SEEK_CUR)<0 || ftell(f)!=p) {
+                if(verbose) printf_log_prefix(0, LOG_NONE, "Error reading a block\n");
+                fclose(f);
+                return DCERR_FERROR;
+            }
+        }
+        char* short_name = strrchr(map_filename, '/');
+        if(short_name)
+            ++short_name;
+        else
+            short_name = map_filename;
+        short_name = LowerCase(short_name);
+        const char* file_name = MmaplistName(short_name, header.dynarec_settings, map_filename);
+        box_free(short_name);
+        if(strcmp(file_name, name)) {
+            if(verbose) printf_log_prefix(0, LOG_NONE, "Invalid cache name\n");
+            return DCERR_BADNAME;
+        }
+        if(verbose) {
+            // check if name is coherent
+            // file is valid, gives informations:
+            printf_log_prefix(0, LOG_NONE, "%s (%s)\n", map_filename, NicePrintSize(filesize));
+            printf_log_prefix(0, LOG_NONE, "\tDynarec Settings:\n");
+            PrintDynfSettings(LOG_NONE, header.dynarec_settings);
+            size_t total_blocks = 0, total_free = 0;
+            size_t total_code = header.codesize;
+            for(int i=0; i<header.nblocks; ++i) {
+                total_blocks += blocks[i].size;
+                total_free += blocks[i].free_size;
+            }
+            printf_log_prefix(0, LOG_NONE, "\tHas %d blocks for a total of %s", header.nblocks, NicePrintSize(total_blocks));
+            printf_log_prefix(0, LOG_NONE, " with %s still free", NicePrintSize(total_free));
+            printf_log_prefix(0, LOG_NONE, " and %s non-canceled blocks (mapped at %p-%p, with %zu lock addresses)\n", NicePrintSize(total_code), (void*)header.map_addr, (void*)header.map_addr+header.map_len, header.nLockAddresses);
+        }
+    } else {
+        // actually reading!
+        int fd = fileno(f);
+        intptr_t delta_map = mapping->start - header.map_addr;
+        dynarec_log(LOG_INFO, "Trying to load DynaCache for %s, with a delta_map=%zx\n", mapping->fullname, delta_map);
+        if(!mapping->mmaplist)
+            mapping->mmaplist = NewMmaplist();
+        MmaplistAddNBlocks(mapping->mmaplist, header.nblocks);
+        for(int i=0; i<header.nblocks; ++i) {
+            if(MmaplistAddBlock(mapping->mmaplist, fd, p, blocks[i].block, blocks[i].size, delta_map, mapping->start)) {
+                printf_log(LOG_NONE, "Error while doing relocation on a DynaCache (block %d)\n", i);
+                fclose(f);
+                return DCERR_RELOC;
+            }
+            p+=blocks[i].size;
+        }
+        for(size_t i=0; i<header.nLockAddresses; ++i)
+            addLockAddress(lockAddresses[i]+delta_map);
+        dynarec_log(LOG_INFO, "Loaded DynaCache for %s, with %d blocks\n", mapping->fullname, header.nblocks);
+    }
+    fclose(f);
+    return DCERR_OK;
+}
+#endif
+
+void DynaCacheList(const char* filter)
+{
+    #ifndef WIN32
+    const char* folder = GetDynacacheFolder(NULL);
+    if(!folder) {
+        printf_log(LOG_NONE, "DynaCache folder not found\n");
+        return;
+    }
+    DIR* dir = opendir(folder);
+    if(!dir) {
+        printf_log(LOG_NONE, "Cannot open DynaCache folder\n");
+    }
+    struct dirent* d = NULL;
+    int need_filter = (filter && strlen(filter));
+    while(d = readdir(dir)) {
+        size_t l = strlen(d->d_name);
+        if(l>6 && !strcmp(d->d_name+l-6, ".box64")) {
+            if(need_filter && !strstr(d->d_name, filter))
+                continue;
+            ReadDynaCache(folder, d->d_name, NULL, 1);
+            printf_log_prefix(0, LOG_NONE, "\n");
+        }
+    }
+    closedir(dir);
+    #endif
+}
+void DynaCacheClean()
+{
+    #ifndef WIN32
+    const char* folder = GetDynacacheFolder(NULL);
+    if(!folder) {
+        printf_log(LOG_NONE, "DynaCache folder not found\n");
+        return;
+    }
+    DIR* dir = opendir(folder);
+    if(!dir) {
+        printf_log(LOG_NONE, "Cannot open DynaCache folder\n");
+    }
+    struct dirent* d = NULL;
+    while(d = readdir(dir)) {
+        size_t l = strlen(d->d_name);
+        if(l>6 && !strcmp(d->d_name+l-6, ".box64")) {
+            int ret = ReadDynaCache(folder, d->d_name, NULL, 0);
+            if(ret) {
+                char filename[strlen(folder)+strlen(d->d_name)+1];
+                strcpy(filename, folder);
+                strcat(filename, d->d_name);
+                size_t filesize = FileSize(filename);
+                if(!unlink(filename)) {
+                    printf_log(LOG_NONE, "Removed %s for %s\n", d->d_name, NicePrintSize(filesize));
+                } else {
+                    printf_log(LOG_NONE, "Could not remove %d\n", d->d_name);
+                }
+            }
+        }
+    }
+    closedir(dir);
+    #endif
+}
+#ifndef WIN32
+void MmapDynaCache(mapping_t* mapping)
+{
+    if(!DYNAREC_VERSION)
+        return;
+    if(!box64env.dynacache)
+        return;
+    if(mapping->env && mapping->env->is_dynacache_overridden && !mapping->env->dynacache)
+        return;
+    const char* folder = GetDynacacheFolder(mapping);
+    if(!folder) return;
+    const char* name = GetMmaplistName(mapping);
+    if(!name) return;
+    dynarec_log(LOG_DEBUG, "Looking for DynaCache %s in %s\n", name, folder);
+    ReadDynaCache(folder, name, mapping, 0);
+}
+#endif
+#else
+void SerializeMmaplist(mapping_t* mapping) {}
+void DynaCacheList() { printf_log(LOG_NONE, "Dynarec not enable\n"); }
+void DynaCacheClean() {}
+#endif
+
+void WillRemoveMapping(uintptr_t addr, size_t length)
+{
+    #ifdef DYNAREC
+    if(!envmap) return;
+    mapping_t* mapping = (mapping_t*)rb_get_64(envmap, addr);
+    if(mapping) {
+        if(MmaplistHasNew(mapping->mmaplist, 1)) {
+            mutex_lock(&my_context->mutex_dyndump);
+            SerializeMmaplist(mapping);
+            mutex_unlock(&my_context->mutex_dyndump);
+        }
+    }
+    #endif
+}
+
 void RemoveMapping(uintptr_t addr, size_t length)
 {
     if(!envmap) return;
@@ -756,6 +1303,19 @@ void RemoveMapping(uintptr_t addr, size_t length)
     }
 }
 
+void SerializeAllMapping()
+{
+#ifdef DYNAREC
+    mapping_t* mapping;
+    mutex_lock(&my_context->mutex_dyndump);
+    kh_foreach_value(mapping_entries, mapping, 
+        if(MmaplistHasNew(mapping->mmaplist, 1))
+            SerializeMmaplist(mapping);
+    );
+    mutex_unlock(&my_context->mutex_dyndump);
+#endif
+}
+
 box64env_t* GetCurEnvByAddr(uintptr_t addr)
 {
     if (!envmap) return &box64env;
@@ -811,17 +1371,32 @@ size_t SizeFileMapped(uintptr_t addr)
 int IsAddrNeedReloc(uintptr_t addr)
 {
     box64env_t* env = GetCurEnvByAddr(addr);
-    if(env->nodynarec)
-        return 0;
-    if(!env->dynacache)
+    // TODO: this seems quite wrong and should be refactored
+    int test = env->is_dynacache_overridden?env->dynacache:box64env.dynacache;
+    if(!test)
         return 0;
-    if(env->nodynarec_end && addr>=env->nodynarec_start && addr<env->nodynarec_end)
+    uintptr_t end = env->nodynarec_end?env->nodynarec_end:box64env.nodynarec_end;
+    uintptr_t start = env->nodynarec_start?env->nodynarec_start:box64env.nodynarec_start;
+    if(end && addr>=start && addr<end)
         return 0;
     #ifdef HAVE_TRACE
-    if(env->dynarec_test_end && addr>=env->dynarec_test_start && addr<env->dynarec_test_end)
+    end = env->dynarec_test_end?env->dynarec_test_end:box64env.dynarec_test_end;
+    start = env->dynarec_test_start?env->dynarec_test_start:box64env.dynarec_test_start;
+    if(end && addr>=start && addr<end)
         return 0;
-    if(env->dynarec_trace && trace_end && addr>=trace_start && addr<trace_end)
+    test = env->is_dynarec_trace_overridden?env->dynarec_trace:box64env.dynarec_trace;
+    if(test && trace_end && addr>=trace_start && addr<trace_end)
         return 0;
     #endif
     return 1;
+}
+
+int IsAddrMappingLoadAndClean(uintptr_t addr)
+{
+    if(!envmap) return 0;
+    mapping_t* mapping = ((mapping_t*)rb_get_64(envmap, addr));
+    if(!mapping) return 0;
+    if(!mapping->mmaplist) return 0;
+    if(MmaplistIsDirty(mapping->mmaplist)) return 0;
+    return 1;
 }
\ No newline at end of file
diff --git a/src/wrapped/generated/functions_list.txt b/src/wrapped/generated/functions_list.txt
index 82b573e2..bf306cfa 100644
--- a/src/wrapped/generated/functions_list.txt
+++ b/src/wrapped/generated/functions_list.txt
@@ -3506,7 +3506,6 @@
 #() iFEpuvvppp -> iFEpuppp
 wrappedalure:
 wrappedalut:
-wrappedandroidshmem:
 wrappedanl:
 wrappedatk:
 - vFp:
diff --git a/src/wrapped/wrappedlibc.c b/src/wrapped/wrappedlibc.c
index ef9d96f3..b6e05fb3 100644
--- a/src/wrapped/wrappedlibc.c
+++ b/src/wrapped/wrappedlibc.c
@@ -3145,6 +3145,9 @@ EXPORT int my_munmap(x64emu_t* emu, void* addr, size_t length)
     int ret = box_munmap(addr, length);
     int e = errno;
     #ifdef DYNAREC
+    if(!ret) {
+        WillRemoveMapping((uintptr_t)addr, length);
+    }
     if(!ret && BOX64ENV(dynarec) && length) {
         cleanDBFromAddressRange((uintptr_t)addr, length, 1);
     }
@@ -3168,9 +3171,10 @@ EXPORT int my_mprotect(x64emu_t* emu, void *addr, unsigned long len, int prot)
     int ret = mprotect(addr, len, prot);
     #ifdef DYNAREC
     if(BOX64ENV(dynarec) && !ret && len) {
-        if(prot& PROT_EXEC)
-            addDBFromAddressRange((uintptr_t)addr, len);
-        else
+        if(prot& PROT_EXEC) {
+            if(!IsAddrMappingLoadAndClean((uintptr_t)addr))
+                addDBFromAddressRange((uintptr_t)addr, len);
+        } else
             cleanDBFromAddressRange((uintptr_t)addr, len, (!prot)?1:0);
     }
     #endif