about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
authorptitSeb <sebastien.chev@gmail.com>2025-06-30 16:36:38 +0200
committerptitSeb <sebastien.chev@gmail.com>2025-06-30 16:36:38 +0200
commit2278438462e6f00e10ce9ace248505a8af0808d1 (patch)
tree6c6f380282c110fced76d307d602faa3d25467b4 /src
parenta942cef1eed4fba6447303e9f475940a0a4c1ed5 (diff)
downloadbox64-2278438462e6f00e10ce9ace248505a8af0808d1.tar.gz
box64-2278438462e6f00e10ce9ace248505a8af0808d1.zip
[DYNAREC] Refactor hotpage detection and dynarec_dirty 1 & 2. Also adjust some launcher/games flags
Diffstat (limited to 'src')
-rw-r--r--src/custommem.c144
-rw-r--r--src/dynarec/dynablock.c82
-rw-r--r--src/dynarec/dynablock_private.h2
-rw-r--r--src/dynarec/dynarec_native.c1
-rw-r--r--src/include/custommem.h4
-rw-r--r--src/libtools/signals.c2
-rw-r--r--src/tools/env.c14
7 files changed, 203 insertions, 46 deletions
diff --git a/src/custommem.c b/src/custommem.c
index 599e60e7..7d81f7ce 100644
--- a/src/custommem.c
+++ b/src/custommem.c
@@ -2065,6 +2065,36 @@ void neverprotectDB(uintptr_t addr, size_t size, int mark)
     UNLOCK_PROT();
 }
 
+// Remove the NEVERCLEAN flag for an adress range
+void unneverprotectDB(uintptr_t addr, size_t size)
+{
+    dynarec_log(LOG_DEBUG, "unneverprotectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
+
+    uintptr_t cur = addr&~(box64_pagesize-1);
+    uintptr_t end = ALIGN(addr+size);
+
+    LOCK_PROT();
+    while(cur!=end) {
+        uint32_t prot = 0, oprot;
+        uintptr_t bend = 0;
+        if (!rb_get_end(memprot, cur, &prot, &bend)) {
+            if(bend>=end) break;
+            else {
+                cur = bend;
+                continue;
+            }
+        }
+        oprot = prot;
+        if(bend>end)
+            bend = end;
+        prot &= ~PROT_NEVERCLEAN;
+        if (prot != oprot)
+            rb_set(memprot, cur, bend, prot);
+        cur = bend;
+    }
+    UNLOCK_PROT();
+}
+
 int isprotectedDB(uintptr_t addr, size_t size)
 {
     dynarec_log(LOG_DEBUG, "isprotectedDB %p -> %p => ", (void*)addr, (void*)(addr+size-1));
@@ -2087,47 +2117,107 @@ int isprotectedDB(uintptr_t addr, size_t size)
     return 1;
 }
 
-static uintptr_t hotpage = 0;
-static int hotpage_cnt = 0;
-static int repeated_count = 0;
-static uintptr_t repeated_page = 0;
+typedef union hotpage_s {
+    struct {
+        uint64_t    addr:36;
+        uint64_t    cnt:28;
+    };
+    uint64_t    x;
+} hotpage_t;
+#define HOTPAGE_MAX ((1<<28)-1)
+#define N_HOTPAGE   16
 #define HOTPAGE_MARK 64
-#define HOTPAGE_DIRTY 4
-void SetHotPage(uintptr_t addr)
-{
-    hotpage = addr&~(box64_pagesize-1);
-    hotpage_cnt = BOX64ENV(dynarec_dirty)?HOTPAGE_DIRTY:HOTPAGE_MARK;
+#define HOTPAGE_DIRTY 1024
+static hotpage_t hotpage[N_HOTPAGE] = {0};
+void SetHotPage(int idx, uintptr_t page)
+{
+    hotpage_t tmp = hotpage[idx];
+    tmp.addr = page;
+    tmp.cnt = BOX64ENV(dynarec_dirty)?HOTPAGE_DIRTY:HOTPAGE_MARK;
+    //TODO: use Atomics to update hotpage?
+    native_lock_store_dd(hotpage+idx, tmp.x);
+}
+int IdxHotPage(uintptr_t page)
+{
+    for(int i=0; i<N_HOTPAGE; ++i)
+        if(hotpage[i].addr == page)
+            return i;
+    return -1;
+}
+void CancelHotPage(uintptr_t page)
+{
+    unneverprotectDB(page<<12, box64_pagesize);
+}
+int IdxOldestHotPage(uintptr_t page)
+{
+    int best_idx = -1;
+    uint32_t best_cnt = HOTPAGE_MAX+1;
+    // to reset hotpage with new value...
+    hotpage_t tmp;
+    tmp.addr = page;
+    tmp.cnt = HOTPAGE_MAX;
+    for(int i=0; i<N_HOTPAGE; ++i) {
+        if(!hotpage[i].cnt) {
+            native_lock_store_dd(hotpage+i, tmp.x);
+            return i;
+        }
+        uint32_t cnt = hotpage[i].cnt;
+        if(cnt==HOTPAGE_MAX) cnt = 0;
+        if(cnt < best_cnt) {
+            best_idx = i;
+            best_cnt = cnt;
+        }
+    }
+    hotpage_t old = hotpage[best_idx];
+    native_lock_store_dd(hotpage+best_idx, tmp.x);
+    if(old.cnt && old.cnt!=HOTPAGE_MAX && BOX64ENV(dynarec_dirty)==1)
+        CancelHotPage(old.addr);
+    return best_idx;
 }
-void CheckHotPage(uintptr_t addr)
+void CheckHotPage(uintptr_t addr, uint32_t prot)
 {
-    uintptr_t page = (uintptr_t)addr&~(box64_pagesize-1);
-    if(repeated_count==1 && repeated_page==page) {
+    if(addr>=0x1000000000000LL) // more than 48bits
+        return;
+    if(prot&PROT_NEVERCLEAN && BOX64ENV(dynarec_dirty)==2)
+        return;
+    uintptr_t page = addr>>12;
+    // look for idx
+    int idx = IdxHotPage(page);
+    if(idx==-1) { IdxOldestHotPage(page); return; }
+    hotpage_t hp = hotpage[idx];
+    /*if(hp.cnt==HOTPAGE_MAX)*/ {
         if(BOX64ENV(dynarec_dirty)>1) {
-            dynarec_log(LOG_INFO, "Detecting a Hotpage at %p (%d), marking page as NEVERCLEAN\n", (void*)repeated_page, repeated_count);
-            neverprotectDB(repeated_page, box64_pagesize, 1);
+            dynarec_log(LOG_INFO, "Detecting a Hotpage at %p (idx=%d), marking page as NEVERCLEAN\n", (void*)(page<<12), idx);
+            neverprotectDB(page<<12, box64_pagesize, 1);
+            hp.cnt = 0;
+            native_lock_store_dd(hotpage+idx, hp.x);  // free slot
         } else {
-            dynarec_log(LOG_INFO, "Detecting a Hotpage at %p (%d)\n", (void*)repeated_page, repeated_count);
-            SetHotPage(repeated_page);
+            dynarec_log(LOG_INFO, "Detecting a Hotpage at %p (idx=%d)\n", (void*)(page<<12), idx);
+            SetHotPage(idx, page);
         }
-        repeated_count = 0;
-        repeated_page = 0;
-    } else {
-        repeated_count = 1;
-        repeated_page = page;
     }
 }
 int isInHotPage(uintptr_t addr)
 {
-    if(!hotpage_cnt)
+    if(addr>0x1000000000000LL) return 0;
+    uintptr_t page = addr>>12;
+    int idx = IdxHotPage(page);
+    if(idx==-1 || !hotpage[idx].cnt || (hotpage[idx].cnt==HOTPAGE_MAX))
         return 0;
-    int ret = (addr>=hotpage) && (addr<hotpage+box64_pagesize);
-    if(ret)
-        --hotpage_cnt;
-    return ret;
+    //TODO: do Atomic stuffs instead
+    hotpage_t hp = hotpage[idx];
+    --hp.cnt;
+    native_lock_store_dd(hotpage+idx, hp.x);
+    if(!hp.cnt && BOX64ENV(dynarec_dirty)==1)
+        CancelHotPage(hp.addr);
+    return 1;
 }
 int checkInHotPage(uintptr_t addr)
 {
-    return hotpage_cnt && (addr>=hotpage) && (addr<hotpage+box64_pagesize);
+    if(addr>0x1000000000000LL) return 0;
+    uintptr_t page = addr>>12;
+    int idx = IdxHotPage(page);
+    return (idx==-1 || !hotpage[idx].cnt || (hotpage[idx].cnt==HOTPAGE_MAX))?0:1;
 }
 
 
diff --git a/src/dynarec/dynablock.c b/src/dynarec/dynablock.c
index b804eff2..13693204 100644
--- a/src/dynarec/dynablock.c
+++ b/src/dynarec/dynablock.c
@@ -54,11 +54,11 @@ dynablock_t* InvalidDynablock(dynablock_t* db, int need_lock)
         if(db->callret_size) {
             // mark all callrets to UDF
             for(int i=0; i<db->callret_size; ++i)
-                *(uint32_t*)(db->block+db->callrets[i].offs) = ARCH_NOP;
+                *(uint32_t*)(db->block+db->callrets[i].offs) = ARCH_UDF;
             ClearCache(db->block, db->size);
         }
         #endif
-        if(db_size && my_context) {
+        if(db_size && my_context && !BOX64ENV(dynarec_dirty)) {
             uint32_t n = rb_dec(my_context->db_sizes, db_size, db_size+1);
             if(!n && (db_size >= my_context->max_db_size)) {
                 my_context->max_db_size = rb_get_rightmost(my_context->db_sizes);
@@ -71,14 +71,52 @@ dynablock_t* InvalidDynablock(dynablock_t* db, int need_lock)
     return db;
 }
 
+dynablock_t* SwitchDynablock(dynablock_t* db, int need_lock)
+{
+    if(db) {
+        if(!db->done || !db->previous || !db->previous->gone)
+            return NULL; // not a correct block!
+        dynarec_log(LOG_DEBUG, "SwitchDynablock(%p/%p), db->block=%p->%p x64=%p:%p->%p hash->%x->%x\n", db, db->previous, db->block, db->previous->block, db->x64_addr, db->x64_addr+db->x64_size-1, db->previous->x64_addr+db->previous->x64_size-1, db->hash, db->previous->hash);
+        // remove jumptable without waiting
+        dynablock_t* db_new = db->previous;
+        db->previous = NULL;
+        if(need_lock)
+            mutex_lock(&my_context->mutex_dyndump);
+        InvalidDynablock(db, 0);
+        db_new->done = 1;
+        db_new->gone = 0;
+        db_new->previous = db;
+        #ifdef ARCH_NOP
+        if(db_new->callret_size) {
+            // mark all callrets to UDF
+            for(int i=0; i<db_new->callret_size; ++i)
+                *(uint32_t*)(db_new->block+db_new->callrets[i].offs) = ARCH_NOP;
+            ClearCache(db_new->block, db_new->size);
+        }
+        #endif
+        if(need_lock)
+            mutex_unlock(&my_context->mutex_dyndump);
+        return db_new;
+    }
+    return db;
+}
+
 void FreeInvalidDynablock(dynablock_t* db, int need_lock)
 {
     if(db) {
         if(!db->gone)
             return; // already in the process of deletion!
         dynarec_log(LOG_DEBUG, "FreeInvalidDynablock(%p), db->block=%p x64=%p:%p already gone=%d\n", db, db->block, db->x64_addr, db->x64_addr+db->x64_size-1, db->gone);
+        uintptr_t db_size = db->x64_size;
         if(need_lock)
             mutex_lock(&my_context->mutex_dyndump);
+        if(db_size && my_context && BOX64ENV(dynarec_dirty)) {
+            uint32_t n = rb_dec(my_context->db_sizes, db_size, db_size+1);
+            if(!n && (db_size >= my_context->max_db_size)) {
+                my_context->max_db_size = rb_get_rightmost(my_context->db_sizes);
+                dynarec_log(LOG_INFO, "BOX64 Dynarec: lower max_db=%d\n", my_context->max_db_size);
+            }
+        }
         FreeDynarecMap((uintptr_t)db->actual_block);    // will also free db
         if(need_lock)
             mutex_unlock(&my_context->mutex_dyndump);
@@ -233,11 +271,13 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
     // check size
     if(block) {
         // fill-in jumptable
-        if(!addJumpTableIfDefault64(block->x64_addr, block->dirty?block->jmpnext:block->block)) {
+        if(!addJumpTableIfDefault64(block->x64_addr, (block->dirty || block->always_test)?block->jmpnext:block->block)) {
             FreeDynablock(block, 0, 0);
             block = getDB(addr);
             MarkDynablock(block);   // just in case...
         } else {
+            if(block->dirty)
+                block->dirty = 0;
             if(block->x64_size) {
                 if(block->x64_size>my_context->max_db_size) {
                     my_context->max_db_size = block->x64_size;
@@ -262,15 +302,30 @@ dynablock_t* DBGetBlock(x64emu_t* emu, uintptr_t addr, int create, int is32bits)
     if(is_inhotpage && !BOX64ENV(dynarec_dirty))
         return NULL;
     dynablock_t *db = internalDBGetBlock(emu, addr, addr, create, 1, is32bits, 1);
-    if(db && db->done && db->block && (db->dirty || getNeedTest(addr))) {
-        if (db->always_test) SchedYield(); // just calm down...
-        uint32_t hash = db->dirty?(~db->hash):X31_hash_code(db->x64_addr, db->x64_size);
-        if(is_inhotpage && hash!=db->hash)
-            return NULL;    // will be handle when hotpage is over
+    if(db && db->done && db->block && getNeedTest(addr)) {
+        //if (db->always_test) SchedYield(); // just calm down...
+        uint32_t hash = X31_hash_code(db->x64_addr, db->x64_size);
         int need_lock = mutex_trylock(&my_context->mutex_dyndump);
         if(hash!=db->hash) {
+            if(is_inhotpage && db->previous) {
+                // check alternate
+                if(db->previous && !db->dirty && X31_hash_code(db->previous->x64_addr, db->previous->x64_size)==db->previous->hash) {
+                    db = SwitchDynablock(db, need_lock);
+                    if(!addJumpTableIfDefault64(db->x64_addr, (db->always_test)?db->jmpnext:db->block)) {
+                        FreeDynablock(db, 0, 0);
+                        db = getDB(addr);
+                        MarkDynablock(db);   // just in case...
+                    }
+                    if(!need_lock)
+                        mutex_unlock(&my_context->mutex_dyndump);
+                    return db;
+                }
+                if(!need_lock)
+                    mutex_unlock(&my_context->mutex_dyndump);
+                return NULL;    // will be handle when hotpage is over
+            }
             db->done = 0;   // invalidating the block
-            dynarec_log(LOG_DEBUG, "Invalidating block %p from %p:%p (hash:%X/%X, always_test:%d) for %p\n", db, db->x64_addr, db->x64_addr+db->x64_size-1, hash, db->hash, db->always_test,(void*)addr);
+            dynarec_log(LOG_DEBUG, "Invalidating block %p from %p:%p (hash:%X/%X, always_test:%d, previous=%p/hash=%X) for %p\n", db, db->x64_addr, db->x64_addr+db->x64_size-1, hash, db->hash, db->always_test,db->previous, db->previous?db->previous->hash:0,(void*)addr);
             // Free db, it's now invalid!
             dynablock_t* old = InvalidDynablock(db, need_lock);
             // start again... (will create a new block)
@@ -283,12 +338,15 @@ dynablock_t* DBGetBlock(x64emu_t* emu, uintptr_t addr, int create, int is32bits)
                 FreeInvalidDynablock(old, need_lock);
         } else {
             if(is_inhotpage) {
+                db->always_test = 2;
                 // log?
             } else {
                 dynarec_log(LOG_DEBUG, "Validating block %p from %p:%p (hash:%X, always_test:%d) for %p\n", db, db->x64_addr, db->x64_addr+db->x64_size-1, db->hash, db->always_test, (void*)addr);
-                if(db->always_test)
+                if(db->always_test) {
+                    if(db->always_test==2)
+                        db->always_test = 0;
                     protectDB((uintptr_t)db->x64_addr, db->x64_size);
-                else {
+                } else {
                     #ifdef ARCH_NOP
                     if(db->callret_size) {
                         // mark all callrets to UDF
@@ -317,7 +375,7 @@ dynablock_t* DBAlternateBlock(x64emu_t* emu, uintptr_t addr, uintptr_t filladdr,
     if(db && db->done && db->block && (db->dirty || getNeedTest(filladdr))) {
         if (db->always_test) SchedYield(); // just calm down...
         int need_lock = mutex_trylock(&my_context->mutex_dyndump);
-        uint32_t hash = db->dirty?(~db->hash):X31_hash_code(db->x64_addr, db->x64_size);
+        uint32_t hash = X31_hash_code(db->x64_addr, db->x64_size);
         if(hash!=db->hash) {
             db->done = 0;   // invalidating the block
             dynarec_log(LOG_DEBUG, "Invalidating alt block %p from %p:%p (hash:%X/%X) for %p\n", db, db->x64_addr, db->x64_addr+db->x64_size, hash, db->hash, (void*)addr);
diff --git a/src/dynarec/dynablock_private.h b/src/dynarec/dynablock_private.h
index db142c25..5a7938ea 100644
--- a/src/dynarec/dynablock_private.h
+++ b/src/dynarec/dynablock_private.h
@@ -23,7 +23,7 @@ typedef struct dynablock_s {
     uint8_t         done;
     uint8_t         gone;
     uint8_t         dirty;      // if need to be tested as soon as it's created
-    uint8_t         always_test:1;
+    uint8_t         always_test:2;
     uint8_t         is32bits:1;
     int             callret_size;   // size of the array
     int             isize;
diff --git a/src/dynarec/dynarec_native.c b/src/dynarec/dynarec_native.c
index 98aa0e61..9fef7df2 100644
--- a/src/dynarec/dynarec_native.c
+++ b/src/dynarec/dynarec_native.c
@@ -992,7 +992,6 @@ dynablock_t* FillBlock64(uintptr_t addr, int alternate, int is32bits, int inst_m
         //protectDB(addr, end-addr);
     }
     if(getProtection(addr)&PROT_NEVERCLEAN) {
-        block->dirty = 1;
         block->always_test = 1;
     }
     if(block->always_test) {
diff --git a/src/include/custommem.h b/src/include/custommem.h
index 96ebfdd0..28503828 100644
--- a/src/include/custommem.h
+++ b/src/include/custommem.h
@@ -120,6 +120,7 @@ void protectDB(uintptr_t addr, size_t size);
 void protectDBJumpTable(uintptr_t addr, size_t size, void* jump, void* ref);
 void unprotectDB(uintptr_t addr, size_t size, int mark);    // if mark==0, the blocks are not marked as potentially dirty
 void neverprotectDB(uintptr_t addr, size_t size, int mark);
+void unneverprotectDB(uintptr_t addr, size_t size);
 int isprotectedDB(uintptr_t addr, size_t size);
 #endif
 void* find32bitBlock(size_t size);
@@ -143,8 +144,7 @@ int isLockAddress(uintptr_t addr);  // return 1 is the address is used as a LOCK
 int nLockAddressRange(uintptr_t start, size_t size);    // gives the number of lock address for a range
 void getLockAddressRange(uintptr_t start, size_t size, uintptr_t addrs[]);   // fill in the array with the lock addresses in the range (array must be of the correct size)
 
-void SetHotPage(uintptr_t addr);
-void CheckHotPage(uintptr_t addr);
+void CheckHotPage(uintptr_t addr, uint32_t prot);
 int isInHotPage(uintptr_t addr);
 int checkInHotPage(uintptr_t addr);
 #endif
diff --git a/src/libtools/signals.c b/src/libtools/signals.c
index 089557bc..27226e1d 100644
--- a/src/libtools/signals.c
+++ b/src/libtools/signals.c
@@ -1597,7 +1597,7 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
         }
         // access error, unprotect the block (and mark them dirty)
         unprotectDB((uintptr_t)addr, 1, 1);    // unprotect 1 byte... But then, the whole page will be unprotected
-        CheckHotPage((uintptr_t)addr);
+        CheckHotPage((uintptr_t)addr, prot);
         int db_need_test = (db && !BOX64ENV(dynarec_dirty))?getNeedTest((uintptr_t)db->x64_addr):0;
         if(db && ((addr>=db->x64_addr && addr<(db->x64_addr+db->x64_size)) || db_need_test)) {
             emu = getEmuSignal(emu, p, db);
diff --git a/src/tools/env.c b/src/tools/env.c
index 4ce600c9..1680fdfe 100644
--- a/src/tools/env.c
+++ b/src/tools/env.c
@@ -815,7 +815,7 @@ done:
 #else
 #error meh!
 #endif
-#define DYNAREC_VERSION SET_VERSION(0, 0, 2)
+#define DYNAREC_VERSION SET_VERSION(0, 0, 3)
 
 typedef struct DynaCacheHeader_s {
     char sign[10];  //"DynaCache\0"
@@ -928,6 +928,11 @@ void SerializeMmaplist(mapping_t* mapping)
         return;
     if((!mapping->env || !mapping->env->is_dynacache_overridden) && box64env.dynacache!=1)
         return;
+    // don't do serialize for program that needs dirty=1
+    if(mapping->env && mapping->env->is_dynarec_dirty_overridden && mapping->env->dynarec_dirty)
+        return;
+    if((!mapping->env || !mapping->env->is_dynarec_dirty_overridden) && box64env.dynarec_dirty)
+        return;
     const char* folder = GetDynacacheFolder(mapping);
     if(!folder) return; // no folder, no serialize...
     const char* name = GetMmaplistName(mapping);
@@ -1395,7 +1400,12 @@ int IsAddrNeedReloc(uintptr_t addr)
     uintptr_t start = env->nodynarec_start?env->nodynarec_start:box64env.nodynarec_start;
     if(end && addr>=start && addr<end)
         return 0;
-    #ifdef HAVE_TRACE
+     // don't do serialize for program that needs dirty=1 or 2 (maybe 1 is ok?)
+    if(env && env->is_dynarec_dirty_overridden && env->dynarec_dirty)
+        return 0;
+    if((!env || !env->is_dynarec_dirty_overridden) && box64env.dynarec_dirty)
+        return 0;
+   #ifdef HAVE_TRACE
     end = env->dynarec_test_end?env->dynarec_test_end:box64env.dynarec_test_end;
     start = env->dynarec_test_start?env->dynarec_test_start:box64env.dynarec_test_start;
     if(end && addr>=start && addr<end)