From b5e405b2b447c64b9dcbcaf5fc5e0cf31de66d5e Mon Sep 17 00:00:00 2001 From: ptitSeb Date: Wed, 4 Sep 2024 15:23:37 +0200 Subject: Added an rbtree is Custom Mem Manager to speedup customFree and customRealloc --- src/custommem.c | 53 ++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/custommem.c b/src/custommem.c index 292d1b87..f227bbee 100644 --- a/src/custommem.c +++ b/src/custommem.c @@ -66,6 +66,7 @@ static int inited = 0; rbtree* mapallmem = NULL; static rbtree* mmapmem = NULL; +static rbtree* blockstree = NULL; typedef struct blocklist_s { void* block; @@ -359,7 +360,7 @@ void testAllBlocks() size_t max_free = 0; for(int i=0; i=(uintptr_t)p_blocks[i].block) && (addr<=(uintptr_t)p_blocks[i].block+p_blocks[i].size)) + return &p_blocks[i]; + } + return NULL; +} + #ifdef DYNAREC #define GET_PROT_WAIT(A, B) \ uint32_t A; \ @@ -491,6 +507,8 @@ void* internal_customMalloc(size_t size, int is32bits) void* ret = allocBlock(p_blocks[i].block, p, size, &p_blocks[i].first); p_blocks[i].maxfree = getMaxFreeBlock(p_blocks[i].block, p_blocks[i].size, p_blocks[i].first); mutex_unlock(&mutex_blocks); + if(blockstree) + rb_set(blockstree, (uintptr_t)p, (uintptr_t)p+allocsize, i); if(mapallmem) { // defer the setProtection... //setProtection((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE); @@ -533,19 +551,23 @@ void* internal_customRealloc(void* p, size_t size, int is32bits) uintptr_t addr = (uintptr_t)p; mutex_lock(&mutex_blocks); for(int i=0; i(uintptr_t)p_blocks[i].block) - && (addr<((uintptr_t)p_blocks[i].block+p_blocks[i].size))) { + blocklist_t* l = findBlock(addr); + if(l) { blockmark_t* sub = (blockmark_t*)(addr-sizeof(blockmark_t)); - if(expandBlock(p_blocks[i].block, sub, size, &p_blocks[i].first)) { - p_blocks[i].maxfree = getMaxFreeBlock(p_blocks[i].block, p_blocks[i].size, p_blocks[i].first); + if(expandBlock(l->block, sub, size, &l->first)) { + l->maxfree = getMaxFreeBlock(l->block, l->size, l->first); mutex_unlock(&mutex_blocks); return p; } mutex_unlock(&mutex_blocks); void* newp = internal_customMalloc(size, is32bits); memcpy(newp, p, sizeBlock(sub)); - size_t newfree = freeBlock(p_blocks[i].block, p_blocks[i].size, sub, &p_blocks[i].first); - if(p_blocks[i].maxfree < newfree) p_blocks[i].maxfree = newfree; + // disabling the "fast free", as mutex has been released, so things are not garantied to stay as-is + internal_customFree(p, is32bits); + //mutex_lock(&mutex_blocks); + //size_t newfree = freeBlock(l->block, l->size, sub, &l->first); + //if(l->maxfree < newfree) l->maxfree = newfree; + //mutex_unlock(&mutex_blocks); return newp; } } @@ -573,11 +595,11 @@ void internal_customFree(void* p, int is32bits) uintptr_t addr = (uintptr_t)p; mutex_lock(&mutex_blocks); for(int i=0; i(uintptr_t)p_blocks[i].block) - && (addr<((uintptr_t)p_blocks[i].block+p_blocks[i].size))) { + blocklist_t* l = findBlock(addr); + if(l) { blockmark_t* sub = (blockmark_t*)(addr-sizeof(blockmark_t)); - size_t newfree = freeBlock(p_blocks[i].block, p_blocks[i].size, sub, &p_blocks[i].first); - if(p_blocks[i].maxfree < newfree) p_blocks[i].maxfree = newfree; + size_t newfree = freeBlock(l->block, l->size, sub, &l->first); + if(l->maxfree < newfree) l->maxfree = newfree; mutex_unlock(&mutex_blocks); return; } @@ -705,6 +727,8 @@ void* internal_customMemAligned(size_t align, size_t size, int is32bits) mutex_unlock(&mutex_blocks); if(mapallmem) setProtection((uintptr_t)p, allocsize, PROT_READ | PROT_WRITE); + if(blockstree) + rb_set(blockstree, (uintptr_t)p, (uintptr_t)p+allocsize, i); return ret; } void* customMemAligned(size_t align, size_t size) @@ -1858,6 +1882,11 @@ void init_custommem_helper(box64context_t* ctx) if(inited) // already initialized return; inited = 1; + blockstree = init_rbtree(); + // if there is some blocks already + if(n_blocks) + for(int i=0; i