about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
authorptitSeb <sebastien.chev@gmail.com>2023-02-19 09:56:42 +0100
committerptitSeb <sebastien.chev@gmail.com>2023-02-19 09:56:42 +0100
commit3b8d6ef208d4a3f5d3402977b0978f9a294e8aae (patch)
tree324182382ea7cf79e3474539d41a24dabe75f3f1 /src
parent2e59b56f1691227e4a93ac1c2dd844adcc6334cd (diff)
downloadbox64-3b8d6ef208d4a3f5d3402977b0978f9a294e8aae.tar.gz
box64-3b8d6ef208d4a3f5d3402977b0978f9a294e8aae.zip
Simplified dynablock handling (reduced memory consuption)
Diffstat (limited to 'src')
-rw-r--r--src/custommem.c401
-rwxr-xr-xsrc/dynarec/dynablock.c20
-rwxr-xr-xsrc/dynarec/dynarec_native.c29
-rw-r--r--src/include/custommem.h4
-rwxr-xr-xsrc/include/dynablock.h2
-rwxr-xr-xsrc/include/dynarec_native.h2
-rwxr-xr-xsrc/libtools/signals.c10
7 files changed, 145 insertions, 323 deletions
diff --git a/src/custommem.c b/src/custommem.c
index 9f5422a9..8b7a5a47 100644
--- a/src/custommem.c
+++ b/src/custommem.c
@@ -32,10 +32,7 @@
 //#define USE_MMAP
 
 // init inside dynablocks.c
-KHASH_MAP_INIT_INT64(dynablocks, dynablock_t*)
 static mmaplist_t          *mmaplist = NULL;
-static size_t              mmapsize = 0;
-static kh_dynablocks_t     *dblist_oversized;      // store the list of oversized dynablocks (normal sized are inside mmaplist)
 static uintptr_t***        box64_jmptbl3[1<<JMPTABL_SHIFT];
 static uintptr_t**         box64_jmptbldefault2[1<<JMPTABL_SHIFT];
 static uintptr_t*          box64_jmptbldefault1[1<<JMPTABL_SHIFT];
@@ -413,270 +410,151 @@ void customFree(void* p)
 }
 
 #ifdef DYNAREC
-typedef struct mmapchunk_s {
-    void*               block;
-    size_t              maxfree;
-    size_t              size;
-    kh_dynablocks_t*    dblist;
-    uint8_t*            helper;
-    void*               first;  // first free block, to speed up things
-    int                 lock;   // don't try to add stuff on locked block
-} mmapchunk_t;
 #define NCHUNK          64
 typedef struct mmaplist_s {
-    mmapchunk_t         chunks[NCHUNK];
+    blocklist_t         chunks[NCHUNK];
     mmaplist_t*         next;
 } mmaplist_t;
 
-mmapchunk_t* addChunk(size_t mmapsize) {
-    if(!mmaplist)
-        mmaplist = (mmaplist_t*)box_calloc(1, sizeof(mmaplist_t));
-    mmaplist_t* head = mmaplist;
-    size_t i = mmapsize;
-    while(1) {
-        if(i>=NCHUNK) {
-            i-=NCHUNK;
-            if(!head->next) {
-                head->next = (mmaplist_t*)box_calloc(1, sizeof(mmaplist_t));
-            }
-            head=head->next;
-        } else
-            return &head->chunks[i];
-    }
-}
-
-uintptr_t FindFreeDynarecMap(dynablock_t* db, size_t size)
+dynablock_t* FindDynablockFromNativeAddress(void* p)
 {
-    // look for free space
-    void* sub = NULL;
-    mmaplist_t* head = mmaplist;
-    int i = mmapsize;
-    while(head) {
-        const int n = (i>NCHUNK)?NCHUNK:i;
-        i-=n;
-        for(int i=0; i<n; ++i) {
-            mmapchunk_t* chunk = &head->chunks[i];
-            if(chunk->maxfree>=size+sizeof(blockmark_t) && !native_lock_incif0(&chunk->lock)) {
-                size_t rsize = 0;
-                sub = getFirstBlock(chunk->block, size, &rsize, chunk->first);
-                if(sub) {
-                    uintptr_t ret = (uintptr_t)allocBlock(chunk->block, sub, size, &chunk->first);
-                    if(rsize==chunk->maxfree) {
-                        chunk->maxfree = getMaxFreeBlock(chunk->block, chunk->size, chunk->first);
-                    }
-                    kh_dynablocks_t *blocks = chunk->dblist;
-                    if(!blocks) {
-                        blocks = chunk->dblist = kh_init(dynablocks);
-                        kh_resize(dynablocks, blocks, 64);
-                    }
-                    khint_t k;
-                    int r;
-                    k = kh_put(dynablocks, blocks, (uintptr_t)ret, &r);
-                    kh_value(blocks, k) = db;
-                    int size255=(size<256)?size:255;
-                    for(size_t j=0; j<size255; ++j)
-                        chunk->helper[(uintptr_t)ret-(uintptr_t)chunk->block+j] = j;
-                    if(size!=size255)
-                        memset(&chunk->helper[(uintptr_t)ret-(uintptr_t)chunk->block+256], -1, size-255);
-                    native_lock_decifnot0(&chunk->lock);
-                    return ret;
-                } else {
-                    printf_log(LOG_INFO, "BOX64: Warning, sub not found, corrupted mmaplist[%i] info?\n", i);
-                    native_lock_decifnot0(&chunk->lock);
-                    if(box64_log >= LOG_DEBUG)
-                        printBlock(chunk->block, chunk->first);
+    if(!p)
+        return NULL;
+    
+    uintptr_t addr = (uintptr_t)p;
+
+    int i= 0;
+    mmaplist_t* list = mmaplist;
+    if(!list)
+        return NULL;
+    while(list) {
+        if ((addr>(uintptr_t)list->chunks[i].block) 
+         && (addr<((uintptr_t)list->chunks[i].block+list->chunks[i].size))) {
+            blockmark_t* sub = (blockmark_t*)list->chunks[i].block;
+            while((uintptr_t)sub<addr) {
+                blockmark_t* n = NEXT_BLOCK(sub);
+                if((uintptr_t)n>addr) {
+                    // found it!
+                    // self is the field of a block
+                    return *(dynablock_t**)((uintptr_t)sub+sizeof(blockmark_t));
                 }
+                sub = n;
             }
+            return NULL;
+        }
+        ++i;
+        if(i==NCHUNK) {
+            i = 0;
+            list = list->next;
         }
-        head = head->next;
     }
-    return 0;
+    return NULL;
 }
 
-uintptr_t AddNewDynarecMap(dynablock_t* db, size_t size)
+uintptr_t AllocDynarecMap(size_t size)
 {
-    dynarec_log(LOG_DEBUG, "Ask for DynaRec Block Alloc #%zu\n", mmapsize);
-    mmapchunk_t* chunk = addChunk(mmapsize++);
-    native_lock_incif0(&chunk->lock);
-    #ifndef USE_MMAP
-    void *p = NULL;
-    if(!(p=box_memalign(box64_pagesize, MMAPSIZE))) {
-        dynarec_log(LOG_INFO, "Cannot create memory map of %d byte for dynarec block #%zu\n", MMAPSIZE, mmapsize-1);
-        native_lock_store(&chunk->lock, 0);
-        --mmapsize;
-        return 0;
-    }
-    mprotect(p, MMAPSIZE, PROT_READ | PROT_WRITE | PROT_EXEC);
-    #else
-    void* p = mmap(NULL, MMAPSIZE, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-    if(p==(void*)-1) {
-        dynarec_log(LOG_INFO, "Cannot create memory map of %d byte for dynarec block #%zu\n", MMAPSIZE, mmapsize-1);
-        native_lock_store(&chunk->lock, 0);
-        --mmapsize;
+    if(!size)
         return 0;
-    }
-    #endif
-    setProtection((uintptr_t)p, MMAPSIZE, PROT_READ | PROT_WRITE | PROT_EXEC);
 
-    chunk->block = p;
-    chunk->size = MMAPSIZE;
-    chunk->helper = (uint8_t*)box_calloc(1, MMAPSIZE);
-    chunk->first = p;
-    // setup marks
-    blockmark_t* m = (blockmark_t*)p;
-    m->prev.x32 = 0;
-    m->next.fill = 0;
-    m->next.size = MMAPSIZE-2*sizeof(blockmark_t);
-    blockmark_t* n = NEXT_BLOCK(m);
-    n->next.x32 = 0;
-    n->prev.fill = 0;
-    n->prev.size = m->next.size;
-    // alloc 1st block
-    uintptr_t sub  = (uintptr_t)allocBlock(chunk->block, p, size, &chunk->first);
-    chunk->maxfree = getMaxFreeBlock(chunk->block, chunk->size, chunk->first);
-    kh_dynablocks_t *blocks = chunk->dblist = kh_init(dynablocks);
-    kh_resize(dynablocks, blocks, 64);
-    khint_t k;
-    int ret;
-    k = kh_put(dynablocks, blocks, (uintptr_t)sub, &ret);
-    kh_value(blocks, k) = db;
-    for(size_t j=0; j<size; ++j)
-        chunk->helper[(uintptr_t)sub-(uintptr_t)chunk->block + j] = (j<256)?j:255;
-    native_lock_decifnot0(&chunk->lock);
-    return sub;
-}
+    size = roundSize(size);
 
-void ActuallyFreeDynarecMap(dynablock_t* db, uintptr_t addr, size_t size)
-{
-    mmaplist_t* head = mmaplist;
-    int i = mmapsize;
-    while(head) {
-        const int n = (i>NCHUNK)?NCHUNK:i;
-        i-=n;
-        for(int i=0; i<n; ++i) {
-            mmapchunk_t* chunk = &head->chunks[i];
-            if ((addr>(uintptr_t)(chunk->block)) 
-            && (addr<((uintptr_t)(chunk->block)+chunk->size))) {
-                int loopedwait = 256;
-                while (native_lock_incif0(&chunk->lock) && loopedwait) {
-                    sched_yield();
-                    --loopedwait;
-                }
-                if(!loopedwait) {
-                    printf_log(LOG_INFO, "BOX64: Warning, Free a chunk in a locked mmaplist[%d]\n", i);
-                    //arm_lock_incb(&chunk->lock);
-                    if(cycle_log)
-                        print_cycle_log(LOG_INFO);
-                }
-                void* sub = (void*)(addr-sizeof(blockmark_t));
-                size_t newfree = freeBlock(chunk->block, sub, &chunk->first);
-                if(chunk->maxfree < newfree) chunk->maxfree = newfree;
-                kh_dynablocks_t *blocks = chunk->dblist;
-                if(blocks) {
-                    khint_t k = kh_get(dynablocks, blocks, (uintptr_t)sub);
-                    if(k!=kh_end(blocks))
-                        kh_del(dynablocks, blocks, k);
-                    memset(&chunk->helper[(uintptr_t)sub-(uintptr_t)chunk->block], 0, size);
-                }
-                native_lock_decifnot0(&chunk->lock);
-                return;
+    mmaplist_t* list = mmaplist;
+    if(!list)
+        list = mmaplist = (mmaplist_t*)box_calloc(1, sizeof(mmaplist_t));
+    // check if there is space in current open ones
+    int i = 0;
+    uintptr_t sz = size + 2*sizeof(blockmark_t);
+    while(1) {
+        if(list->chunks[i].maxfree>=size) {
+            // looks free, try to alloc!
+            size_t rsize = 0;
+            void* sub = getFirstBlock(list->chunks[i].block, size, &rsize, list->chunks[i].first);
+            if(sub) {
+                void* ret = allocBlock(list->chunks[i].block, sub, size, NULL);
+                if(sub==list->chunks[i].first)
+                    list->chunks[i].first = getNextFreeBlock(sub);
+                if(rsize==list->chunks[i].maxfree)
+                    list->chunks[i].maxfree = getMaxFreeBlock(list->chunks[i].block, list->chunks[i].size, list->chunks[i].first);
+                return (uintptr_t)ret;
             }
         }
-        head = head->next;
-    }
-    if(mmapsize)
-        dynarec_log(LOG_NONE, "Warning, block %p (size %zu) not found in mmaplist for Free\n", (void*)addr, size);
-}
-
-dynablock_t* FindDynablockFromNativeAddress(void* addr)
-{
-    // look in actual list
-   mmaplist_t* head = mmaplist;
-    int i = mmapsize;
-    while(head) {
-        const int n = (i>NCHUNK)?NCHUNK:i;
-        i-=n;
-        for(int i=0; i<n; ++i) {
-            mmapchunk_t* chunk = &head->chunks[i];
-            if ((uintptr_t)addr>=(uintptr_t)chunk->block 
-            && ((uintptr_t)addr<(uintptr_t)chunk->block+chunk->size)) {
-                if(!chunk->helper)
-                    return FindDynablockDynablocklist(addr, chunk->dblist);
-                else {
-                    uintptr_t p = (uintptr_t)addr - (uintptr_t)chunk->block;
-                    while(chunk->helper[p]) p -= chunk->helper[p];
-                    khint_t k = kh_get(dynablocks, chunk->dblist, (uintptr_t)chunk->block + p);
-                    if(k!=kh_end(chunk->dblist))
-                        return kh_value(chunk->dblist, k);
-                    return NULL;
-                }
+        // check if new
+        if(!list->chunks[i].size) {
+            // alloc a new block, aversized or not, we are at the end of the list
+            size_t allocsize = (sz>MMAPSIZE)?sz:MMAPSIZE;
+            // allign sz with pagesize
+            allocsize = (allocsize+(box64_pagesize-1))&~(box64_pagesize-1);
+            #ifndef USE_MMAP
+            void *p = NULL;
+            if(!(p=box_memalign(box64_pagesize, allocsize))) {
+                dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", allocsize);
+                return 0;
             }
+            mprotect(p, allocsize, PROT_READ | PROT_WRITE | PROT_EXEC);
+            #else
+            void* p = mmap(NULL, allocsize, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+            if(p==(void*)-1) {
+                dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", allocsize);
+                return 0;
+            }
+            #endif
+            setProtection((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE | PROT_EXEC);
+            list->chunks[i].block = p;
+            list->chunks[i].first = p;
+            list->chunks[i].size = allocsize;
+            // setup marks
+            blockmark_t* m = (blockmark_t*)p;
+            m->prev.x32 = 0;
+            m->next.fill = 0;
+            m->next.size = allocsize-2*sizeof(blockmark_t);
+            blockmark_t* n = NEXT_BLOCK(m);
+            n->next.x32 = 0;
+            n->prev.fill = 0;
+            n->prev.size = m->next.size;
+            // alloc 1st block
+            void* ret  = allocBlock(list->chunks[i].block, p, size, NULL);
+            list->chunks[i].maxfree = getMaxFreeBlock(list->chunks[i].block, list->chunks[i].size, NULL);
+            if(list->chunks[i].maxfree)
+                list->chunks[i].first = getNextFreeBlock(m);
+            return (uintptr_t)ret;
         }
-        head = head->next;
-    }
-    // look in oversized
-    return FindDynablockDynablocklist(addr, dblist_oversized);
-}
-
-uintptr_t AllocDynarecMap(dynablock_t* db, size_t size)
-{
-    if(!size)
-        return 0;
-    if(size>MMAPSIZE-2*sizeof(blockmark_t)) {
-        #ifndef USE_MMAP
-        void *p = NULL;
-        if(!(p=box_memalign(box64_pagesize, size))) {
-            dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", size);
-            return 0;
-        }
-        mprotect(p, size, PROT_READ | PROT_WRITE | PROT_EXEC);
-        #else
-        void* p = mmap(NULL, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
-        if(p==(void*)-1) {
-            dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", size);
-            return 0;
-        }
-        #endif
-        setProtection((uintptr_t)p, size, PROT_READ | PROT_WRITE | PROT_EXEC);
-        kh_dynablocks_t *blocks = dblist_oversized;
-        if(!blocks) {
-            blocks = dblist_oversized = kh_init(dynablocks);
-            kh_resize(dynablocks, blocks, 64);
+        // next chunk...
+        ++i;
+        if(i==NCHUNK) {
+            i = 0;
+            if(!list->next)
+                list->next = (mmaplist_t*)box_calloc(1, sizeof(mmaplist_t));
+            list = list->next;
         }
-        khint_t k;
-        int ret;
-        k = kh_put(dynablocks, blocks, (uintptr_t)p, &ret);
-        kh_value(blocks, k) = db;
-        return (uintptr_t)p;
     }
-    
-
-    uintptr_t ret = FindFreeDynarecMap(db, size);
-    if(!ret)
-        ret = AddNewDynarecMap(db, size);
-
-    return ret;
 }
 
-void FreeDynarecMap(dynablock_t* db, uintptr_t addr, size_t size)
+void FreeDynarecMap(uintptr_t addr)
 {
-    if(!addr || !size)
+    if(!addr)
         return;
-    if(size>MMAPSIZE-2*sizeof(blockmark_t)) {
-        #ifndef USE_MMAP
-        box_free((void*)addr);
-        #else
-        munmap((void*)addr, size);
-        #endif
-        kh_dynablocks_t *blocks = dblist_oversized;
-        if(blocks) {
-            khint_t k = kh_get(dynablocks, blocks, addr);
-            if(k!=kh_end(blocks))
-                kh_del(dynablocks, blocks, k);
+    
+    int i= 0;
+    mmaplist_t* list = mmaplist;
+
+    while(list) {
+        if ((addr>(uintptr_t)list->chunks[i].block) 
+         && (addr<((uintptr_t)list->chunks[i].block+list->chunks[i].size))) {
+            void* sub = (void*)(addr-sizeof(blockmark_t));
+            void* n = NEXT_BLOCK((blockmark_t*)sub);
+            size_t newfree = freeBlock(list->chunks[i].block, sub, NULL);
+            if(sub<=list->chunks[i].first)
+                list->chunks[i].first = getPrevFreeBlock(n);
+            if(list->chunks[i].maxfree < newfree)
+                list->chunks[i].maxfree = newfree;
+            return;
+        }
+        ++i;
+        if(i=NCHUNK) {
+            i = 0;
+            list = list->next;
         }
-        return;
     }
-    ActuallyFreeDynarecMap(db, addr, size);
 }
 
 uintptr_t getSizeJmpDefault(uintptr_t addr, size_t maxsize)
@@ -870,38 +748,6 @@ uintptr_t getJumpTableAddress64(uintptr_t addr)
     return (uintptr_t)&box64_jmptbl3[idx3][idx2][idx1][idx0];
 }
 
-uintptr_t getJumpAddress64(uintptr_t addr)
-{
-    uintptr_t idx3, idx2, idx1, idx0;
-    idx3 = ((addr)>>48)&0xffff;
-    idx2 = ((addr)>>32)&0xffff;
-    idx1 = ((addr)>>16)&0xffff;
-    idx0 = ((addr)    )&0xffff;
-    if(box64_jmptbl3[idx3] == box64_jmptbldefault2) {
-        uintptr_t*** tbl = (uintptr_t***)box_malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t**));
-        for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
-            tbl[i] = box64_jmptbldefault1;
-        if(native_lock_storeifref(&box64_jmptbl3[idx3], tbl, box64_jmptbldefault2)!=tbl)
-            box_free(tbl);
-    }
-    if(box64_jmptbl3[idx3][idx2] == box64_jmptbldefault1) {
-        uintptr_t** tbl = (uintptr_t**)box_malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t*));
-        for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
-            tbl[i] = box64_jmptbldefault0;
-        if(native_lock_storeifref(&box64_jmptbl3[idx3][idx2], tbl, box64_jmptbldefault1)!=tbl)
-            box_free(tbl);
-    }
-    if(box64_jmptbl3[idx3][idx2][idx1] == box64_jmptbldefault0) {
-        uintptr_t* tbl = (uintptr_t*)box_malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t));
-        for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
-            tbl[i] = (uintptr_t)native_next;
-        if(native_lock_storeifref(&box64_jmptbl3[idx3][idx2][idx1], tbl, box64_jmptbldefault0)!=tbl)
-            box_free(tbl);
-    }
-
-    return (uintptr_t)box64_jmptbl3[idx3][idx2][idx1][idx0];
-}
-
 dynablock_t* getDB(uintptr_t addr)
 {
     uintptr_t idx3, idx2, idx1, idx0;
@@ -1504,24 +1350,11 @@ void fini_custommem_helper(box64context_t *ctx)
                     #else
                     box_free(head->chunks[i].block);
                     #endif
-                if(head->chunks[i].dblist) {
-                    kh_destroy(dynablocks, head->chunks[i].dblist);
-                    head->chunks[i].dblist = NULL;
-                }
-                if(head->chunks[i].helper) {
-                    box_free(head->chunks[i].helper);
-                    head->chunks[i].helper = NULL;
-                }
             }
             mmaplist_t *old = head;
             head = head->next;
             free(old);
         }
-        if(dblist_oversized) {
-            kh_destroy(dynablocks, dblist_oversized);
-            dblist_oversized = NULL;
-        }
-        mmapsize = 0;
 
         box_free(mmaplist);
         for (int i3=0; i3<(1<<DYNAMAP_SHIFT); ++i3)
diff --git a/src/dynarec/dynablock.c b/src/dynarec/dynablock.c
index c1cd9bd3..6ff5e53c 100755
--- a/src/dynarec/dynablock.c
+++ b/src/dynarec/dynablock.c
@@ -28,8 +28,6 @@
 #include "custommem.h"
 #include "khash.h"
 
-KHASH_MAP_INIT_INT(dynablocks, dynablock_t*)
-
 uint32_t X31_hash_code(void* addr, int len)
 {
     if(!len) return 0;
@@ -52,7 +50,7 @@ void FreeDynablock(dynablock_t* db, int need_lock)
         dynarec_log(LOG_DEBUG, " -- FreeDyrecMap(%p, %d)\n", db->actual_block, db->size);
         db->done = 0;
         db->gone = 1;
-        FreeDynarecMap(db, (uintptr_t)db->actual_block, db->size);
+        FreeDynarecMap((uintptr_t)db->actual_block);
         customFree(db);
         if(need_lock)
             mutex_unlock(&my_context->mutex_dyndump);
@@ -110,20 +108,6 @@ int FreeRangeDynablock(dynablock_t* db, uintptr_t addr, uintptr_t size)
     return 1;
 }
 
-dynablock_t* FindDynablockDynablocklist(void* addr, kh_dynablocks_t* dynablocks)
-{
-    if(!dynablocks)
-        return NULL;
-    dynablock_t* db;
-    kh_foreach_value(dynablocks, db, 
-        const uintptr_t s = (uintptr_t)db->block;
-        const uintptr_t e = (uintptr_t)db->block+db->size;
-        if((uintptr_t)addr>=s && (uintptr_t)addr<e)
-            return db;
-    )
-    return NULL;
-}
-
 dynablock_t *AddNewDynablock(uintptr_t addr)
 {
     dynablock_t* block;
@@ -201,7 +185,7 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
             my_context->max_db_size = blocksz;
         // fill-in jumptable
         if(!addJumpTableIfDefault64(block->x64_addr, block->block)) {
-            FreeDynablock(block, 1);
+            FreeDynablock(block, 0);
             block = getDB(addr);
         } else {
             if(block->x64_size)
diff --git a/src/dynarec/dynarec_native.c b/src/dynarec/dynarec_native.c
index c2cf1387..f8d29b61 100755
--- a/src/dynarec/dynarec_native.c
+++ b/src/dynarec/dynarec_native.c
@@ -378,21 +378,28 @@ static int updateNeed(dynarec_arm_t* dyn, int ninst, uint8_t need) {
 
 void* current_helper = NULL;
 
-void CancelBlock64()
+void CancelBlock64(int need_lock)
 {
+    if(need_lock)
+        mutex_lock(&my_context->mutex_dyndump);
     dynarec_native_t* helper = (dynarec_native_t*)current_helper;
     current_helper = NULL;
-    if(!helper)
+    if(!helper) {
+        if(need_lock)
+            mutex_unlock(&my_context->mutex_dyndump);
         return;
+    }
     customFree(helper->next);
     customFree(helper->insts);
     customFree(helper->instsize);
     customFree(helper->predecessor);
     customFree(helper->table64);
     if(helper->dynablock && helper->dynablock->actual_block)
-        FreeDynarecMap(helper->dynablock, (uintptr_t)helper->dynablock->actual_block, helper->dynablock->size);
+        FreeDynarecMap((uintptr_t)helper->dynablock->actual_block);
     else if(helper->dynablock && helper->block)
-        FreeDynarecMap(helper->dynablock, (uintptr_t)helper->block-sizeof(void*), helper->dynablock->size);
+        FreeDynarecMap((uintptr_t)helper->block-sizeof(void*));
+    if(need_lock)
+        mutex_unlock(&my_context->mutex_dyndump);
 }
 
 uintptr_t native_pass0(dynarec_native_t* dyn, uintptr_t addr);
@@ -404,11 +411,11 @@ void* CreateEmptyBlock(dynablock_t* block, uintptr_t addr) {
     block->isize = 0;
     block->done = 0;
     size_t sz = 4*sizeof(void*);
-    void* actual_p = (void*)AllocDynarecMap(block, sz);
+    void* actual_p = (void*)AllocDynarecMap(sz);
     void* p = actual_p + sizeof(void*);
     if(actual_p==NULL) {
         dynarec_log(LOG_INFO, "AllocDynarecMap(%p, %zu) failed, cancelling block\n", block, sz);
-        CancelBlock64();
+        CancelBlock64(0);
         return NULL;
     }
     block->size = sz;
@@ -469,13 +476,13 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
     // basic checks
     if(!helper.size) {
         dynarec_log(LOG_INFO, "Warning, null-sized dynarec block (%p)\n", (void*)addr);
-        CancelBlock64();
+        CancelBlock64(0);
         return CreateEmptyBlock(block, addr);;
     }
     if(!isprotectedDB(addr, 1)) {
         dynarec_log(LOG_INFO, "Warning, write on current page on pass0, aborting dynablock creation (%p)\n", (void*)addr);
         AddHotPage(addr);
-        CancelBlock64();
+        CancelBlock64(0);
         return NULL;
     }
     // protect the block of it goes over the 1st page
@@ -531,13 +538,13 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
     // ok, now allocate mapped memory, with executable flag on
     size_t sz = sizeof(void*) + helper.native_size + helper.table64size*sizeof(uint64_t) + 4*sizeof(void*) + insts_rsize;
     //           dynablock_t*     block (arm insts)            table64                       jmpnext code       instsize
-    void* actual_p = (void*)AllocDynarecMap(block, sz);
+    void* actual_p = (void*)AllocDynarecMap(sz);
     void* p = actual_p + sizeof(void*);
     void* next = p + helper.native_size + helper.table64size*sizeof(uint64_t);
     void* instsize = next + 4*sizeof(void*);
     if(actual_p==NULL) {
         dynarec_log(LOG_INFO, "AllocDynarecMap(%p, %zu) failed, cancelling block\n", block, sz);
-        CancelBlock64();
+        CancelBlock64(0);
         return NULL;
     }
     helper.block = p;
@@ -603,7 +610,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
     if((block->hash != hash)) {
         dynarec_log(LOG_DEBUG, "Warning, a block changed while beeing processed hash(%p:%ld)=%x/%x\n", block->x64_addr, block->x64_size, block->hash, hash);
         AddHotPage(addr);
-        CancelBlock64();
+        CancelBlock64(0);
         return NULL;
     }
     if(!isprotectedDB(addr, end-addr)) {
diff --git a/src/include/custommem.h b/src/include/custommem.h
index 42f75a89..f2a4481d 100644
--- a/src/include/custommem.h
+++ b/src/include/custommem.h
@@ -19,8 +19,8 @@ void customFree(void* p);
 #ifdef DYNAREC
 typedef struct dynablock_s dynablock_t;
 // custom protection flag to mark Page that are Write protected for Dynarec purpose
-uintptr_t AllocDynarecMap(dynablock_t* db, size_t size);
-void FreeDynarecMap(dynablock_t* db, uintptr_t addr, size_t size);
+uintptr_t AllocDynarecMap(size_t size);
+void FreeDynarecMap(uintptr_t addr);
 
 void addDBFromAddressRange(uintptr_t addr, size_t size);
 void cleanDBFromAddressRange(uintptr_t addr, size_t size, int destroy);
diff --git a/src/include/dynablock.h b/src/include/dynablock.h
index 7f28fbd7..1868deea 100755
--- a/src/include/dynablock.h
+++ b/src/include/dynablock.h
@@ -3,7 +3,6 @@
 
 typedef struct x64emu_s x64emu_t;
 typedef struct dynablock_s dynablock_t;
-typedef struct kh_dynablocks_s  kh_dynablocks_t;
 
 uint32_t X31_hash_code(void* addr, int len);
 void FreeDynablock(dynablock_t* db, int need_lock);
@@ -12,7 +11,6 @@ void MarkRangeDynablock(dynablock_t* db, uintptr_t addr, uintptr_t size);
 int FreeRangeDynablock(dynablock_t* db, uintptr_t addr, uintptr_t size);
 
 dynablock_t* FindDynablockFromNativeAddress(void* addr);    // defined in box64context.h
-dynablock_t* FindDynablockDynablocklist(void* addr, kh_dynablocks_t* dynablocks);
 
 // Handling of Dynarec block (i.e. an exectable chunk of x64 translated code)
 dynablock_t* DBGetBlock(x64emu_t* emu, uintptr_t addr, int create);   // return NULL if block is not found / cannot be created. Don't create if create==0
diff --git a/src/include/dynarec_native.h b/src/include/dynarec_native.h
index bc4cf3f6..9fe26323 100755
--- a/src/include/dynarec_native.h
+++ b/src/include/dynarec_native.h
@@ -4,7 +4,7 @@
 typedef struct dynablock_s dynablock_t;
 typedef struct x64emu_s x64emu_t;
 
-void CancelBlock64();
+void CancelBlock64(int need_lock);
 void* FillBlock64(dynablock_t* block, uintptr_t addr);
 
 #endif //__DYNAREC_ARM_H_
\ No newline at end of file
diff --git a/src/libtools/signals.c b/src/libtools/signals.c
index a3f0b796..40e9a8a7 100755
--- a/src/libtools/signals.c
+++ b/src/libtools/signals.c
@@ -751,7 +751,7 @@ void my_sigactionhandler_oldcode(int32_t sig, int simple, siginfo_t* info, void
             //relockMutex(Locks);   // do not relock mutex, because of the siglongjmp, whatever was running is canceled
             #ifdef DYNAREC
             if(Locks & is_dyndump_locked)
-                CancelBlock64();
+                CancelBlock64(1);
             #endif
             siglongjmp(ejb->jmpbuf, 1);
         }
@@ -793,7 +793,7 @@ void my_sigactionhandler_oldcode(int32_t sig, int simple, siginfo_t* info, void
         //relockMutex(Locks);   // the thread will exit, so no relock there
         #ifdef DYNAREC
         if(Locks & is_dyndump_locked)
-            CancelBlock64();
+            CancelBlock64(1);
         #endif
         exit(ret);
     }
@@ -850,7 +850,7 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
 #ifdef DYNAREC
     if((Locks & is_dyndump_locked) && (sig==SIGSEGV) && current_helper) {
         relockMutex(Locks);
-        CancelBlock64();
+        CancelBlock64(0);
         cancelFillBlock();  // Segfault inside a Fillblock, cancel it's creation...
     }
     dynablock_t* db = NULL;
@@ -901,11 +901,11 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
                     dynarec_log(LOG_INFO, "Dynablock unprotected, getting out!\n");
                 }
                 //relockMutex(Locks);
+                mutex_unlock(&mutex_dynarec_prot);
                 #ifdef DYNAREC
                 if(Locks & is_dyndump_locked)
-                    CancelBlock64();
+                    CancelBlock64(1);
                 #endif
-                mutex_unlock(&mutex_dynarec_prot);
                 siglongjmp(ejb->jmpbuf, 2);
             }
             dynarec_log(LOG_INFO, "Warning, Auto-SMC (%p for db %p/%p) detected, but jmpbuffer not ready!\n", (void*)addr, db, (void*)db->x64_addr);