diff options
Diffstat (limited to 'src')
164 files changed, 2063 insertions, 3316 deletions
diff --git a/src/box64context.c b/src/box64context.c index 94df1142..7cb2916c 100644 --- a/src/box64context.c +++ b/src/box64context.c @@ -19,7 +19,6 @@ #include "wrapper.h" #include "x64emu.h" #include "signals.h" -#include "rcfile.h" #include "gltools.h" #include "rbtree.h" #include "dynarec.h" @@ -49,7 +48,6 @@ void finiAllHelpers(box64context_t* context) static int finied = 0; if(finied) return; - DeleteParams(); fini_pthread_helper(context); fini_signal_helper(); fini_bridge_helper(); @@ -181,8 +179,8 @@ static void atfork_child_box64context(void) void freeCycleLog(box64context_t* ctx) { - if(cycle_log) { - for(int i=0; i<cycle_log; ++i) { + if(BOX64ENV(rolling_log)) { + for(int i=0; i<BOX64ENV(rolling_log); ++i) { box_free(ctx->log_call[i]); box_free(ctx->log_ret[i]); } @@ -194,10 +192,10 @@ void freeCycleLog(box64context_t* ctx) } void initCycleLog(box64context_t* context) { - if(cycle_log) { - context->log_call = (char**)box_calloc(cycle_log, sizeof(char*)); - context->log_ret = (char**)box_calloc(cycle_log, sizeof(char*)); - for(int i=0; i<cycle_log; ++i) { + if(BOX64ENV(rolling_log)) { + context->log_call = (char**)box_calloc(BOX64ENV(rolling_log), sizeof(char*)); + context->log_ret = (char**)box_calloc(BOX64ENV(rolling_log), sizeof(char*)); + for(int i=0; i<BOX64ENV(rolling_log); ++i) { context->log_call[i] = (char*)box_calloc(256, 1); context->log_ret[i] = (char*)box_calloc(128, 1); } diff --git a/src/core.c b/src/core.c index 85c60500..83f09419 100644 --- a/src/core.c +++ b/src/core.c @@ -37,75 +37,38 @@ #include "librarian.h" #include "x64run.h" #include "symbols.h" -#include "rcfile.h" #include "emu/x64run_private.h" #include "elfs/elfloader_private.h" #include "library.h" #include "core.h" +#include "env.h" box64context_t *my_context = NULL; +extern box64env_t box64env; + int box64_quit = 0; int box64_exit_code = 0; -int box64_log = LOG_INFO; //LOG_NONE; -int box64_dump = 0; -int box64_nobanner = 0; int box64_stdout_no_w = 0; -int box64_dynarec_log = LOG_NONE; uintptr_t box64_pagesize; -uintptr_t box64_load_addr = 0; -int box64_nosandbox = 0; -int box64_inprocessgpu = 0; -int box64_cefdisablegpu = 0; -int box64_cefdisablegpucompositor = 0; -int box64_malloc_hack = 0; -int box64_dynarec_test = 0; -int box64_x11sync = 0; path_collection_t box64_addlibs = {0}; -int box64_maxcpu = 0; -int box64_maxcpu_immutable = 0; int box64_is32bits = 0; -int box64_cputype = 0; -#if defined(SD845) || defined(SD888) || defined(SD8G2) || defined(TEGRAX1) -int box64_mmap32 = 1; -#else -int box64_mmap32 = 0; -#endif -int box64_ignoreint3 = 0; int box64_rdtsc = 0; -int box64_rdtsc_1ghz = 0; uint8_t box64_rdtsc_shift = 0; -char* box64_insert_args = NULL; -char* box64_new_args = NULL; +int box64_mapclean = 0; +int box64_zoom = 0; +int box64_steam = 0; +int box64_steamcmd = 0; +int box64_wine = 0; +int box64_musl = 0; +char* box64_custom_gstreamer = NULL; +int box64_tcmalloc_minimal = 0; +uintptr_t fmod_smc_start = 0; +uintptr_t fmod_smc_end = 0; +uint32_t default_gs = 0x53; +uint32_t default_fs = 0; +int box64_isglibc234 = 0; + #ifdef DYNAREC -int box64_dynarec = 1; -int box64_dynarec_dump = 0; -int box64_dynarec_forced = 0; -int box64_dynarec_bigblock = 1; -int box64_dynarec_forward = 128; -int box64_dynarec_strongmem = 0; -int box64_dynarec_weakbarrier = 1; -int box64_dynarec_pause = 0; -int box64_dynarec_x87double = 0; -int box64_dynarec_div0 = 0; -int box64_dynarec_fastnan = 1; -int box64_dynarec_fastround = 1; -int box64_dynarec_safeflags = 1; -int box64_dynarec_callret = 0; -int box64_dynarec_bleeding_edge = 1; -int box64_dynarec_tbb = 1; -int box64_dynarec_wait = 1; -int box64_dynarec_missing = 0; -int box64_dynarec_aligned_atomics = 0; -int box64_dynarec_nativeflags = 1; -uintptr_t box64_nodynarec_start = 0; -uintptr_t box64_nodynarec_end = 0; -uintptr_t box64_dynarec_test_start = 0; -uintptr_t box64_dynarec_test_end = 0; -int box64_dynarec_gdbjit = 0; -int box64_dynarec_df = 1; -int box64_dynarec_perf_map = 0; -int box64_dynarec_perf_map_fd = -1; -int box64_dynarec_dirty = 0; #ifdef ARM64 int arm64_asimd = 0; int arm64_aes = 0; @@ -143,73 +106,12 @@ int la64_lam_bh = 0; int la64_lamcas = 0; int la64_scq = 0; #endif -#else //DYNAREC -int box64_dynarec = 0; #endif -int box64_libcef = 1; -int box64_jvm = 1; -int box64_unityplayer = 1; -int box64_sdl2_jguid = 0; -int dlsym_error = 0; -int cycle_log = 0; + #ifdef HAVE_TRACE -int trace_xmm = 0; -int trace_emm = 0; -int trace_regsdiff = 0; -uint64_t start_cnt = 0; uintptr_t trace_start = 0, trace_end = 0; char* trace_func = NULL; -char* trace_init = NULL; -char* box64_trace = NULL; -#ifdef DYNAREC -int box64_dynarec_trace = 0; #endif -#endif -int box64_x11threads = 0; -int box64_x11glx = 1; -int allow_missing_libs = 0; -int box64_prefer_emulated = 0; -int box64_prefer_wrapped = 0; -int box64_wrap_egl = 0; -int box64_sse_flushto0 = 0; -int box64_x87_no80bits = 0; -int box64_sync_rounding = 0; -int box64_shaext = 1; -int box64_sse42 = 1; -#if defined(DYNAREC) && defined(ARM64) -int box64_avx = 1; -int box64_avx2 = 1; -#else -int box64_avx = 0; -int box64_avx2 = 0; -#endif -int fix_64bit_inodes = 0; -int box64_dummy_crashhandler = 1; -int box64_mapclean = 0; -int box64_zoom = 0; -int box64_steam = 0; -int box64_steamcmd = 0; -int box64_wine = 0; -int box64_musl = 0; -int box64_nopulse = 0; -int box64_nogtk = 0; -int box64_novulkan = 0; -int box64_showsegv = 0; -int box64_showbt = 0; -int box64_isglibc234 = 0; -#ifdef BAD_SIGNAL -int box64_futex_waitv = 0; -#else -int box64_futex_waitv = 1; -#endif -char* box64_libGL = NULL; -char* box64_custom_gstreamer = NULL; -uintptr_t fmod_smc_start = 0; -uintptr_t fmod_smc_end = 0; -uint32_t default_gs = 0x53; -uint32_t default_fs = 0; -int jit_gdb = 0; -int box64_tcmalloc_minimal = 0; FILE* ftrace = NULL; char* ftrace_name = NULL; @@ -217,7 +119,7 @@ int ftrace_has_pid = 0; void openFTrace(const char* newtrace, int reopen) { - const char* p = newtrace?newtrace:getenv("BOX64_TRACE_FILE"); + const char* p = newtrace?newtrace:BOX64ENV(trace_file); #ifndef MAX_PATH #define MAX_PATH 4096 #endif @@ -272,7 +174,7 @@ void openFTrace(const char* newtrace, int reopen) if (!reopen) ftrace_name = box_strdup(p); /*fclose(ftrace); ftrace = NULL;*/ - if(!box64_nobanner) { + if (!BOX64ENV(nobanner)) { printf("BOX64 Trace %s to \"%s\"\n", append?"appended":"redirected", p); box64_stdout_no_w = 1; } @@ -304,7 +206,7 @@ void printf_ftrace(const char* fmt, ...) void my_prepare_fork() { if (ftrace_has_pid && ftrace && (ftrace != stdout) && (ftrace != stderr)) { - printf_log(LOG_INFO, "%04d|Closed trace file of %s at prepare\n", GetTID(), GetLastApplyName()); + printf_log(LOG_INFO, "%04d|Closed trace file of %s at prepare\n", GetTID(), GetLastApplyEntryName()); fclose(ftrace); } } @@ -313,7 +215,7 @@ void my_parent_fork() { if (ftrace_has_pid) { openFTrace(NULL, 1); - printf_log(LOG_INFO, "%04d|Reopened trace file of %s at parent\n", GetTID(), GetLastApplyName()); + printf_log(LOG_INFO, "%04d|Reopened trace file of %s at parent\n", GetTID(), GetLastApplyEntryName()); } } @@ -321,135 +223,24 @@ void my_child_fork() { if (ftrace_has_pid) { openFTrace(NULL, 0); - printf_log(LOG_INFO, "%04d|Created trace file of %s at child\n", GetTID(), GetLastApplyName()); + printf_log(LOG_INFO, "%04d|Created trace file of %s at child\n", GetTID(), GetLastApplyEntryName()); } } const char* getCpuName(); int getNCpu(); + #ifdef DYNAREC void GatherDynarecExtensions() { #ifdef ARM64 -/* -HWCAP_FP - Functionality implied by ID_AA64PFR0_EL1.FP == 0b0000. -HWCAP_ASIMD - Functionality implied by ID_AA64PFR0_EL1.AdvSIMD == 0b0000. -HWCAP_EVTSTRM - The generic timer is configured to generate events at a frequency of - approximately 10KHz. -HWCAP_AES - Functionality implied by ID_AA64ISAR0_EL1.AES == 0b0001. => AESE, AESD, AESMC, and AESIMC instructions are implemented -HWCAP_PMULL - Functionality implied by ID_AA64ISAR0_EL1.AES == 0b0010. => AESE, AESD, AESMC, and AESIMC instructions are implemented plus PMULL/PMULL2 instructions operating on 64-bit data quantities. -HWCAP_SHA1 - Functionality implied by ID_AA64ISAR0_EL1.SHA1 == 0b0001. => SHA1C, SHA1P, SHA1M, SHA1H, SHA1SU0, and SHA1SU1 instructions implemented. -HWCAP_SHA2 - Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0001. => SHA256H, SHA256H2, SHA256SU0 and SHA256SU1 instructions implemented. -HWCAP_CRC32 - Functionality implied by ID_AA64ISAR0_EL1.CRC32 == 0b0001. => CRC32B, CRC32H, CRC32W, CRC32X, CRC32CB, CRC32CH, CRC32CW, and CRC32CX instructions implemented. -HWCAP_ATOMICS - Functionality implied by ID_AA64ISAR0_EL1.Atomic == 0b0010. => LDADD, LDCLR, LDEOR, LDSET, LDSMAX, LDSMIN, LDUMAX, LDUMIN, CAS, CASP, and SWP instructions implemented. -HWCAP_FPHP - Functionality implied by ID_AA64PFR0_EL1.FP == 0b0001. -HWCAP_ASIMDHP - Functionality implied by ID_AA64PFR0_EL1.AdvSIMD == 0b0001. -HWCAP_CPUID - EL0 access to certain ID registers is available. - These ID registers may imply the availability of features. -HWCAP_ASIMDRDM - Functionality implied by ID_AA64ISAR0_EL1.RDM == 0b0001. => SQRDMLAH and SQRDMLSH instructions implemented. -HWCAP_JSCVT - Functionality implied by ID_AA64ISAR1_EL1.JSCVT == 0b0001. => The FJCVTZS instruction is implemented. -HWCAP_FCMA - Functionality implied by ID_AA64ISAR1_EL1.FCMA == 0b0001. => The FCMLA and FCADD instructions are implemented. -HWCAP_LRCPC - Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0001. => LDAPR and variants -HWCAP_DCPOP - Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001. -HWCAP_SHA3 - Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001. => EOR3, RAX1, XAR, and BCAX instructions implemented. -HWCAP_SM3 - Functionality implied by ID_AA64ISAR0_EL1.SM3 == 0b0001. => SM3SS1, SM3TT1A, SM3TT1B, SM3TT2A, SM3TT2B, SM3PARTW1, and SM3PARTW2 instructions implemented. -HWCAP_SM4 - Functionality implied by ID_AA64ISAR0_EL1.SM4 == 0b0001. => SM4E and SM4EKEY instructions implemented. -HWCAP_ASIMDDP - Functionality implied by ID_AA64ISAR0_EL1.DP == 0b0001. => UDOT and SDOT instructions implemented. -HWCAP_SHA512 - Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0010. => SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. -HWCAP_SVE - Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001. -HWCAP_ASIMDFHM - Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001. => FMLAL and FMLSL instructions are implemented. -HWCAP_DIT - Functionality implied by ID_AA64PFR0_EL1.DIT == 0b0001. -HWCAP_USCAT - Functionality implied by ID_AA64MMFR2_EL1.AT == 0b0001. -HWCAP_ILRCPC - Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0010. => The LDAPUR*, STLUR*, and LDAPR* instructions are implemented. -HWCAP_FLAGM - Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001. -HWCAP_SSBS - Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010. => AArch64 provides the PSTATE.SSBS mechanism to mark regions that are Speculative Store Bypassing Safe, and the MSR and MRS instructions to directly read and write the PSTATE.SSBS field. -HWCAP_SB - Functionality implied by ID_AA64ISAR1_EL1.SB == 0b0001. => SB instruction is implemented. -HWCAP_PACA - Functionality implied by ID_AA64ISAR1_EL1.APA == 0b0001 or - ID_AA64ISAR1_EL1.API == 0b0001. -HWCAP_PACG - Functionality implied by ID_AA64ISAR1_EL1.GPA == 0b0001 or => Generic Authentication using the QARMA algorithm is implemented. This includes the PACGA instruction. - ID_AA64ISAR1_EL1.GPI == 0b0001. -HWCAP2_DCPODP - Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010. => DC CVAP and DC CVADP supported -HWCAP2_SVE2 - Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001. -HWCAP2_SVEAES - Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001. -HWCAP2_SVEPMULL - Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010. -HWCAP2_SVEBITPERM - Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001. -HWCAP2_SVESHA3 - Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001. -HWCAP2_SVESM4 - Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001. -HWCAP2_FLAGM2 - Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0010. => CFINV, RMIF, SETF16, SETF8, AXFLAG, and XAFLAG instructions are implemented. -HWCAP2_FRINT - Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001. => FRINT32Z, FRINT32X, FRINT64Z, and FRINT64X instructions are implemented. -HWCAP2_SVEI8MM - Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001. -HWCAP2_SVEF32MM - Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001. -HWCAP2_SVEF64MM - Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001. -HWCAP2_SVEBF16 - Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001 -HWCAP2_I8MM - Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001. => SMMLA, SUDOT, UMMLA, USMMLA, and USDOT instructions are implemented -HWCAP2_BF16 - Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0001. => BFDOT, BFMLAL, BFMLAL2, BFMMLA, BFCVT, and BFCVT2 instructions are implemented. -HWCAP2_DGH - Functionality implied by ID_AA64ISAR1_EL1.DGH == 0b0001. => Data Gathering Hint is implemented. -HWCAP2_RNG - Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001. -HWCAP2_BTI - Functionality implied by ID_AA64PFR0_EL1.BT == 0b0001. -HWCAP2_MTE - Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0010. => Full Memory Tagging Extension is implemented. -HWCAP2_ECV - Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001. -HWCAP2_AFP - AFP = 0b0001 => The AArch64-FPCR.{AH, FIZ, NEP} fields are supported. (Alternate floating-point behavior) -*/ unsigned long hwcap = real_getauxval(AT_HWCAP); - if(!hwcap) // no HWCap: provide a default... + if(!hwcap) hwcap = HWCAP_ASIMD; // first, check all needed extensions, lif half, edsp and fastmult if((hwcap&HWCAP_ASIMD) == 0) { printf_log(LOG_INFO, "Missing ASMID cpu support, disabling Dynarec\n"); - box64_dynarec=0; + SET_BOX64ENV(dynarec, 0); return; } if(hwcap&HWCAP_CRC32) @@ -529,7 +320,7 @@ HWCAP2_AFP printf_log(LOG_INFO, "with extension LSX LASX"); } else { printf_log(LOG_INFO, "\nMissing LSX and/or LASX extension support, disabling Dynarec\n"); - box64_dynarec = 0; + SET_BOX64ENV(dynarec, 0); return; } @@ -611,7 +402,7 @@ void computeRDTSC() printf_log(LOG_INFO, "Will use time-based emulation for RDTSC, even if hardware counters are available\n"); #endif uint64_t freq = ReadTSCFrequency(NULL); - if(freq<((box64_rdtsc_1ghz)?1000000000LL:1000000)) { + if(freq<((BOX64ENV(rdtsc_1ghz))?1000000000LL:1000000)) { box64_rdtsc = 1; if(hardware) printf_log(LOG_INFO, "Hardware counter is too slow (%d kHz), not using it\n", freq/1000); hardware = 0; @@ -642,710 +433,23 @@ void computeRDTSC() EXPORTDYN void LoadLogEnv() { - ftrace = stdout; - box64_nobanner = isatty(fileno(stdout))?0:1; - const char *p = getenv("BOX64_NOBANNER"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_nobanner = p[0]-'0'; - } - } - // grab BOX64_TRACE_FILE envvar, and change %pid to actual pid is present in the name + // grab BOX64ENV(trace_file), and change %pid to actual pid is present in the name openFTrace(NULL, 0); - box64_log = ftrace_name?LOG_INFO:(isatty(fileno(stdout))?LOG_INFO:LOG_NONE); //default LOG value different if stdout is redirected or not - p = getenv("BOX64_LOG"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0'+LOG_NONE && p[0]<='0'+LOG_NEVER) { - box64_log = p[0]-'0'; - if(box64_log == LOG_NEVER) { - --box64_log; - box64_dump = 1; - } - } - } else { - if(!strcasecmp(p, "NONE")) - box64_log = LOG_NONE; - else if(!strcasecmp(p, "INFO")) - box64_log = LOG_INFO; - else if(!strcasecmp(p, "DEBUG")) - box64_log = LOG_DEBUG; - else if(!strcasecmp(p, "DUMP")) { - box64_log = LOG_DEBUG; - box64_dump = 1; - } - } - if(!box64_nobanner) - printf_log(LOG_INFO, "Debug level is %d\n", box64_log); - } - if((box64_nobanner || box64_log) && ftrace==stdout) + + if ((BOX64ENV(nobanner) || BOX64ENV(log)) && ftrace==stdout) box64_stdout_no_w = 1; #if !defined(DYNAREC) && (defined(ARM64) || defined(RV64) || defined(LA64)) printf_log(LOG_INFO, "Warning: DynaRec is available on this host architecture, an interpreter-only build is probably not intended.\n"); #endif - p = getenv("BOX64_ROLLING_LOG"); - if(p) { - int cycle = 0; - if(sscanf(p, "%d", &cycle)==1) - cycle_log = cycle; - if(cycle_log==1) - cycle_log = 16; - if(cycle_log<0) - cycle_log = 0; - if(cycle_log && box64_log>LOG_INFO) { - cycle_log = 0; - printf_log(LOG_NONE, "Incompatible Rolling log and Debug Log, disabling Rolling log\n"); - } - } - if(!box64_nobanner && cycle_log) - printf_log(LOG_INFO, "Rolling log, showing last %d function call on signals\n", cycle_log); - p = getenv("BOX64_DUMP"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dump = p[0]-'0'; - } - } - if(!box64_nobanner && box64_dump) - printf_log(LOG_INFO, "Elf Dump if ON\n"); -#ifdef DYNAREC - #ifdef ARM64 - // unaligned atomic (with restriction) is supported in hardware - /*if(arm64_uscat) - box64_dynarec_aligned_atomics = 1;*/ // the unaligned support is not good enough for x86 emulation, so diabling - #endif - p = getenv("BOX64_DYNAREC_DUMP"); - if(p) { - if(strlen(p)==1) { - if (p[0] >= '0' && p[0] <= '2') - box64_dynarec_dump = p[0]-'0'; - } - if (box64_dynarec_dump) printf_log(LOG_INFO, "Dynarec blocks are dumped%s\n", (box64_dynarec_dump>1)?" in color":""); - } - p = getenv("BOX64_DYNAREC_LOG"); - if(p) { - if(strlen(p)==1) { - if((p[0]>='0'+LOG_NONE) && (p[0]<='0'+LOG_NEVER)) - box64_dynarec_log = p[0]-'0'; - } else { - if(!strcasecmp(p, "NONE")) - box64_dynarec_log = LOG_NONE; - else if(!strcasecmp(p, "INFO")) - box64_dynarec_log = LOG_INFO; - else if(!strcasecmp(p, "DEBUG")) - box64_dynarec_log = LOG_DEBUG; - else if(!strcasecmp(p, "VERBOSE")) - box64_dynarec_log = LOG_VERBOSE; - } - printf_log(LOG_INFO, "Dynarec log level is %d\n", box64_dynarec_log); - } - p = getenv("BOX64_DYNAREC"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec = p[0]-'0'; - } - printf_log(LOG_INFO, "Dynarec is %s\n", box64_dynarec?"on":"off"); - } - p = getenv("BOX64_DYNAREC_FORCED"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_forced = p[0]-'0'; - } - if(box64_dynarec_forced) - printf_log(LOG_INFO, "Dynarec is forced on all addresses\n"); - } - p = getenv("BOX64_DYNAREC_BIGBLOCK"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='3') - box64_dynarec_bigblock = p[0]-'0'; - } - if(!box64_dynarec_bigblock) - printf_log(LOG_INFO, "Dynarec will not try to make big block\n"); - else if (box64_dynarec_bigblock>1) - printf_log(LOG_INFO, "Dynarec will try to make bigger blocks%s\n", (box64_dynarec_bigblock>2)?" even on non-elf memory":""); + char* p; - } - p = getenv("BOX64_DYNAREC_FORWARD"); - if(p) { - int val = -1; - if(sscanf(p, "%d", &val)==1) { - if(val>=0) - box64_dynarec_forward = val; - } - if(box64_dynarec_forward) - printf_log(LOG_INFO, "Dynarec will continue block for %d bytes on forward jump\n", box64_dynarec_forward); - else - printf_log(LOG_INFO, "Dynarec will not continue block on forward jump\n"); - } - p = getenv("BOX64_DYNAREC_STRONGMEM"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='4') - box64_dynarec_strongmem = p[0]-'0'; - } - if(box64_dynarec_strongmem) - printf_log(LOG_INFO, "Dynarec will try to emulate a strong memory model%s\n", (box64_dynarec_strongmem==1)?" with limited performance loss":((box64_dynarec_strongmem>1)?" with more performance loss":"")); - } - p = getenv("BOX64_DYNAREC_WEAKBARRIER"); - if (p) { - if (strlen(p) == 1) { - if (p[0] >= '0' && p[0] <= '2') - box64_dynarec_weakbarrier = p[0] - '0'; - } - if (box64_dynarec_weakbarrier) - printf_log(LOG_INFO, "Dynarec will try to use weaker memory barriers to reduce the performance loss introduce by strong memory emulation\n"); - else - printf_log(LOG_INFO, "Dynarec will not use weakbarrier on strong memory emulation\n"); - } -#ifdef ARM64 - p = getenv("BOX64_DYNAREC_PAUSE"); - if (p) { - if (strlen(p) == 1) { - if (p[0] >= '0' && p[0] <= '3') - box64_dynarec_pause = p[0] - '0'; - } - if (box64_dynarec_pause) - printf_log(LOG_INFO, "Dynarec will use %s to emulate pause instruction\n", - box64_dynarec_pause == 1 ? "yield" : (box64_dynarec_pause == 2 ? "wfi" : "wfe")); - else - printf_log(LOG_INFO, "Dynarec will generate nothing for the pause instuction\n"); - } -#endif - p = getenv("BOX64_DYNAREC_X87DOUBLE"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_x87double = p[0]-'0'; - } - if(box64_dynarec_x87double) - printf_log(LOG_INFO, "Dynarec will use only double for x87 emulation\n"); - } - p = getenv("BOX64_DYNAREC_DIV0"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_div0 = p[0]-'0'; - } - if(box64_dynarec_div0) - printf_log(LOG_INFO, "Dynarec will check for divide by 0\n"); - } - p = getenv("BOX64_DYNAREC_FASTNAN"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_fastnan = p[0]-'0'; - } - if(!box64_dynarec_fastnan) - printf_log(LOG_INFO, "Dynarec will try to normalize generated NAN\n"); - } - p = getenv("BOX64_DYNAREC_FASTROUND"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='2') - box64_dynarec_fastround = p[0]-'0'; - } - if(!box64_dynarec_fastround) - printf_log(LOG_INFO, "Dynarec will try to generate x86 precise IEEE->int rounding and and set rounding mode for computation\n"); - else if(box64_dynarec_fastround==2) - printf_log(LOG_INFO, "Dynarec will generate x86 very imprecise double->float rounding\n"); - } - p = getenv("BOX64_DYNAREC_SAFEFLAGS"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='2') - box64_dynarec_safeflags = p[0]-'0'; - } - if(!box64_dynarec_safeflags) - printf_log(LOG_INFO, "Dynarec will not play it safe with x64 flags\n"); - else - printf_log(LOG_INFO, "Dynarec will play %s safe with x64 flags\n", (box64_dynarec_safeflags==1)?"moderatly":"it"); - } - p = getenv("BOX64_DYNAREC_CALLRET"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_callret = p[0]-'0'; - } - if(box64_dynarec_callret) - printf_log(LOG_INFO, "Dynarec will optimize CALL/RET\n"); - else - printf_log(LOG_INFO, "Dynarec will not optimize CALL/RET\n"); - } - p = getenv("BOX64_DYNAREC_BLEEDING_EDGE"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_bleeding_edge = p[0]-'0'; - } - if(!box64_dynarec_bleeding_edge) - printf_log(LOG_INFO, "Dynarec will not detect MonoBleedingEdge\n"); - } - p = getenv("BOX64_DYNAREC_JVM"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_jvm = p[0]-'0'; - } - if(!box64_jvm) - printf_log(LOG_INFO, "Dynarec will not detect libjvm\n"); - } - p = getenv("BOX64_DYNAREC_TBB"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_tbb = p[0]-'0'; - } - if(!box64_dynarec_tbb) - printf_log(LOG_INFO, "Dynarec will not detect libtbb\n"); - } - p = getenv("BOX64_DYNAREC_WAIT"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_wait = p[0]-'0'; - } - if(!box64_dynarec_wait) - printf_log(LOG_INFO, "Dynarec will not wait for FillBlock to ready and use Interpreter instead\n"); - } - p = getenv("BOX64_DYNAREC_GDBJIT"); - if (p) { - if (strlen(p) == 1) { - if (p[0] >= '0' && p[0] <= '2') - box64_dynarec_gdbjit = p[0] - '0'; - } - if (box64_dynarec_gdbjit) - printf_log(LOG_INFO, "Dynarec will generate debuginfo for gdbjit\n"); - } - p = getenv("BOX64_DYNAREC_PERFMAP"); - if (p) { - if (strlen(p) == 1) { - if (p[0] >= '0' && p[0] <= '1') - box64_dynarec_perf_map = p[0] - '0'; - } - if (box64_dynarec_perf_map) - printf_log(LOG_INFO, "Dynarec will generate map file /tmp/perf-%d.map for Linux perf tool\n", getpid()); - } - p = getenv("BOX64_DYNAREC_DF"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_df = p[0]-'0'; - } - if(!box64_dynarec_df) - printf_log(LOG_INFO, "Dynarec will not use/generate defered flags\n"); - } - p = getenv("BOX64_DYNAREC_DIRTY"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_dirty = p[0]-'0'; - } - if(box64_dynarec_dirty) - printf_log(LOG_INFO, "Dynarec will allow dirty block to continu running\n"); - } - p = getenv("BOX64_DYNAREC_ALIGNED_ATOMICS"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_aligned_atomics = p[0]-'0'; - } - if(box64_dynarec_aligned_atomics) - printf_log(LOG_INFO, "Dynarec will generate only aligned atomics code\n"); - } - p = getenv("BOX64_DYNAREC_NATIVEFLAGS"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_nativeflags = p[0]-'0'; - } - if(!box64_dynarec_nativeflags) - printf_log(LOG_INFO, "Dynarec will not use native flags if possible\n"); - } - p = getenv("BOX64_DYNAREC_MISSING"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='2') - box64_dynarec_missing = p[0]-'0'; - } - if(box64_dynarec_missing==1) - printf_log(LOG_INFO, "Dynarec will print missing opcodes\n"); - else if (box64_dynarec_missing==2) - printf_log(LOG_INFO, "Dynarec will print fallback to scalar opcodes\n"); - } - p = getenv("BOX64_NODYNAREC"); - if(p) { - if (strchr(p,'-')) { - if(sscanf(p, "%ld-%ld", &box64_nodynarec_start, &box64_nodynarec_end)!=2) { - if(sscanf(p, "0x%lX-0x%lX", &box64_nodynarec_start, &box64_nodynarec_end)!=2) - sscanf(p, "%lx-%lx", &box64_nodynarec_start, &box64_nodynarec_end); - } - printf_log(LOG_INFO, "No dynablock creation that start in the range %p - %p\n", (void*)box64_nodynarec_start, (void*)box64_nodynarec_end); - } - } - p = getenv("BOX64_DYNAREC_TEST"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_dynarec_test = p[0]-'0'; - box64_dynarec_test_start = 0x0; - box64_dynarec_test_end = 0x0; - } else if (strchr(p,'-')) { - if(sscanf(p, "%ld-%ld", &box64_dynarec_test_start, &box64_dynarec_test_end)!=2) { - if(sscanf(p, "0x%lX-0x%lX", &box64_dynarec_test_start, &box64_dynarec_test_end)!=2) - sscanf(p, "%lx-%lx", &box64_dynarec_test_start, &box64_dynarec_test_end); - } - if(box64_dynarec_test_end>box64_dynarec_test_start) { - box64_dynarec_test = 1; - printf_log(LOG_INFO, "Dynarec test in the range %p - %p\n", (void*)box64_dynarec_test_start, (void*)box64_dynarec_test_end); - } else { - box64_dynarec_test = 0; - printf_log(LOG_INFO, "Ignoring BOX64_NODYNAREC=%s (%p-%p)\n", p, (void*)box64_dynarec_test_start, (void*)box64_dynarec_test_end); - } - } - - if(box64_dynarec_test) { - box64_dynarec_fastnan = 0; - box64_dynarec_fastround = 0; - box64_dynarec_x87double = 1; - box64_dynarec_div0 = 1; - box64_dynarec_callret = 0; - #if defined( RV64) || defined(LA64) - box64_dynarec_nativeflags = 0; - #endif - printf_log(LOG_INFO, "Dynarec will compare it's execution with the interpreter (super slow, only for testing)\n"); - } - } + if (!BOX64ENV(nobanner) && BOX64ENV(rolling_log)) + printf_log(LOG_INFO, "Rolling log, showing last %d function call on signals\n", BOX64ENV(rolling_log)); -#endif -#ifdef HAVE_TRACE - p = getenv("BOX64_TRACE_XMM"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - trace_xmm = p[0]-'0'; - } - } - p = getenv("BOX64_TRACE_EMM"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - trace_emm = p[0]-'0'; - } - } - p = getenv("BOX64_TRACE_COLOR"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - trace_regsdiff = p[0]-'0'; - } - } - p = getenv("BOX64_TRACE_START"); - if(p) { - char* p2; - start_cnt = strtoll(p, &p2, 10); - printf_log(LOG_INFO, "Will start trace only after %lu instructions\n", start_cnt); - } -#ifdef DYNAREC - p = getenv("BOX64_DYNAREC_TRACE"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_dynarec_trace = p[0]-'0'; - if(box64_dynarec_trace) - printf_log(LOG_INFO, "Dynarec generated code will also print a trace\n"); - } - } -#endif -#endif - // Other BOX64 env. var. - p = getenv("BOX64_LIBCEF"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_libcef = p[0]-'0'; - } - if(!box64_libcef) - printf_log(LOG_INFO, "BOX64 will not detect libcef\n"); - } - p = getenv("BOX64_JVM"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_jvm = p[0]-'0'; - } - if(!box64_jvm) - printf_log(LOG_INFO, "BOX64 will not detect libjvm\n"); - } - p = getenv("BOX64_UNITYPLAYER"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_unityplayer = p[0]-'0'; - } - if(!box64_unityplayer) - printf_log(LOG_INFO, "BOX64 will not detect UnityPlayer.dll\n"); - } - p = getenv("BOX64_SDL2_JGUID"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='1') - box64_sdl2_jguid = p[0]-'0'; - } - if(!box64_sdl2_jguid) - printf_log(LOG_INFO, "BOX64 will workaround the use of SDL_GetJoystickGUIDInfo with 4 args instead of 5\n"); - } - p = getenv("BOX64_LOAD_ADDR"); - if(p) { - if(sscanf(p, "0x%zx", &box64_load_addr)!=1) - box64_load_addr = 0; - if(box64_load_addr) - printf_log(LOG_INFO, "Use a starting load address of %p\n", (void*)box64_load_addr); - } - p = getenv("BOX64_DLSYM_ERROR"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - dlsym_error = p[0]-'0'; - } - } - p = getenv("BOX64_X11THREADS"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_x11threads = p[0]-'0'; - } - if(box64_x11threads) - printf_log(LOG_INFO, "Try to Call XInitThreads if libX11 is loaded\n"); - } - p = getenv("BOX64_X11GLX"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_x11glx = p[0]-'0'; - } - if(box64_x11glx) - printf_log(LOG_INFO, "Hack to force libX11 GLX extension present\n"); - else - printf_log(LOG_INFO, "Disabled Hack to force libX11 GLX extension present\n"); - } - p = getenv("BOX64_LIBGL"); - if(p) - box64_libGL = box_strdup(p); - if(!box64_libGL) { - p = getenv("SDL_VIDEO_GL_DRIVER"); - if(p) - box64_libGL = box_strdup(p); - } - if(box64_libGL) { - printf_log(LOG_INFO, "BOX64 using \"%s\" as libGL.so.1\n", p); - } - p = getenv("BOX64_ALLOWMISSINGLIBS"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - allow_missing_libs = p[0]-'0'; - } - if(allow_missing_libs) - printf_log(LOG_INFO, "Allow missing needed libs\n"); - } - p = getenv("BOX64_CRASHHANDLER"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_dummy_crashhandler = p[0]-'0'; - } - if(!box64_dummy_crashhandler) - printf_log(LOG_INFO, "Don't use dummy crashhandler lib\n"); - } - p = getenv("BOX64_MALLOC_HACK"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+2) - box64_malloc_hack = p[0]-'0'; - } - if(!box64_malloc_hack) { - if(box64_malloc_hack==1) { - printf_log(LOG_INFO, "Malloc hook will not be redirected\n"); - } else - printf_log(LOG_INFO, "Malloc hook will check for mmap/free occurrences\n"); - } - } - p = getenv("BOX64_NOPULSE"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_nopulse = p[0]-'0'; - } - if(box64_nopulse) - printf_log(LOG_INFO, "Disable the use of pulseaudio libs\n"); - } - p = getenv("BOX64_NOGTK"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_nogtk = p[0]-'0'; - } - if(box64_nogtk) - printf_log(LOG_INFO, "Disable the use of wrapped gtk libs\n"); - } - p = getenv("BOX64_NOVULKAN"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_novulkan = p[0]-'0'; - } - if(box64_novulkan) - printf_log(LOG_INFO, "Disable the use of wrapped vulkan libs\n"); - } - p = getenv("BOX64_FUTEX_WAITV"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_futex_waitv = p[0]-'0'; - } - #ifdef BAD_SIGNAL - if(box64_futex_waitv) - printf_log(LOG_INFO, "Enable the use of futex waitv syscall (if available on the system\n"); - #else - if(!box64_futex_waitv) - printf_log(LOG_INFO, "Disable the use of futex waitv syscall\n"); - #endif - } - p = getenv("BOX64_SHAEXT"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_shaext = p[0]-'0'; - } - if(!box64_shaext) - printf_log(LOG_INFO, "Do not expose SHAEXT capabilities\n"); - } - p = getenv("BOX64_SSE42"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_sse42 = p[0]-'0'; - } - if(!box64_sse42) - printf_log(LOG_INFO, "Do not expose SSE 4.2 capabilities\n"); - } - p = getenv("BOX64_AVX"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+2) - box64_avx = p[0]-'0'; - } - if(box64_avx) - printf_log(LOG_INFO, "Will expose AVX capabilities\n"); - if(box64_avx==2) { - box64_avx=1; - box64_avx2 = 1; - printf_log(LOG_INFO, "Will expose AVX2 capabilities\n"); - } - if(!box64_avx) - printf_log(LOG_INFO, "Will not expose AVX capabilities\n"); - if(!box64_avx2) - printf_log(LOG_INFO, "Will not expose AVX2 capabilities\n"); - } - p = getenv("BOX64_RDTSC_1GHZ"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_rdtsc_1ghz = p[0]-'0'; - } - if(!box64_rdtsc_1ghz) - printf_log(LOG_INFO, "Will require a hardware counter of 1GHz minimum or will fallback to software\n"); - } - p = getenv("BOX64_FIX_64BIT_INODES"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - fix_64bit_inodes = p[0]-'0'; - } - if(fix_64bit_inodes) - printf_log(LOG_INFO, "Fix 64bit inodes\n"); - } - p = getenv("BOX64_JITGDB"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+3) - jit_gdb = p[0]-'0'; - } - if(jit_gdb) - printf_log(LOG_INFO, "Launch %s on segfault\n", (jit_gdb==2)?"gdbserver":((jit_gdb==3)?"lldb":"gdb")); - } - p = getenv("BOX64_SHOWSEGV"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_showsegv = p[0]-'0'; - } - if(box64_showsegv) - printf_log(LOG_INFO, "Show Segfault signal even if a signal handler is present\n"); - } - p = getenv("BOX64_SHOWBT"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_showbt = p[0]-'0'; - } - if(box64_showbt) - printf_log(LOG_INFO, "Show a Backtrace when a Segfault signal is caught\n"); - } - p = getenv("BOX64_MAXCPU"); - if(p) { - int maxcpu = 0; - if(sscanf(p, "%d", &maxcpu)==1) - box64_maxcpu = maxcpu; - if(box64_maxcpu<0) - box64_maxcpu = 0; - if(box64_maxcpu) { - printf_log(LOG_NONE, "Will not expose more than %d cpu cores\n", box64_maxcpu); - } else { - printf_log(LOG_NONE, "Will not limit the number of cpu cores exposed\n"); - } - } - p = getenv("BOX64_MMAP32"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_mmap32 = p[0]-'0'; - } - if(box64_mmap32) - printf_log(LOG_INFO, "Will use 32bits address in priority for external MMAP (when 32bits process are detected)\n"); - else - printf_log(LOG_INFO, "Will not use 32bits address in priority for external MMAP (when 32bits process are detected)\n"); - } - p = getenv("BOX64_CPUTYPE"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_cputype = p[0]-'0'; - } - printf_log(LOG_INFO, "Will emulate an %s CPU\n", box64_cputype?"AMD":"Intel"); - } - p = getenv("BOX64_IGNOREINT3"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_ignoreint3 = p[0]-'0'; - } - if(box64_ignoreint3) - printf_log(LOG_INFO, "Will silently ignore INT3 in the code\n"); - } - p = getenv("BOX64_X11SYNC"); - if(p) { - if(strlen(p)==1) { - if(p[0]>='0' && p[0]<='0'+1) - box64_x11sync = p[0]-'0'; - } - } + if (!BOX64ENV(nobanner) && BOX64ENV(dump)) + printf_log(LOG_INFO, "Elf Dump if ON\n"); // grab pagesize box64_pagesize = sysconf(_SC_PAGESIZE); if(!box64_pagesize) @@ -1362,12 +466,10 @@ void LoadLogEnv() computeRDTSC(); } -EXPORTDYN -void LoadEnvPath(path_collection_t *col, const char* defpath, const char* env) +static void loadPath(path_collection_t *col, const char* defpath, const char* path) { - const char* p = getenv(env); - if(p) { - ParseList(p, col, 1); + if(path) { + ParseList(path, col, 1); } else { ParseList(defpath, col, 1); } @@ -1375,7 +477,7 @@ void LoadEnvPath(path_collection_t *col, const char* defpath, const char* env) void PrintCollection(path_collection_t* col, const char* env) { - if(LOG_INFO<=box64_log) { + if (LOG_INFO<=BOX64ENV(log)) { printf_log(LOG_INFO, "%s: ", env); for(int i=0; i<col->size; i++) printf_log(LOG_INFO, "%s%s", col->paths[i], (i==col->size-1)?"\n":":"); @@ -1426,60 +528,12 @@ void AddNewLibs(const char* list) printf_log(LOG_INFO, "BOX64: Adding %s to the libs\n", list); } -void PrintFlags() { - printf("Environment Variables:\n"); - printf(" BOX64_PATH is the box64 version of PATH (default is '.:bin')\n"); - printf(" BOX64_LD_LIBRARY_PATH is the box64 version LD_LIBRARY_PATH (default is '.:lib:lib64')\n"); - printf(" BOX64_LOG with 0/1/2/3 or NONE/INFO/DEBUG/DUMP to set the printed debug info (level 3 is level 2 + BOX64_DUMP)\n"); - printf(" BOX64_DUMP with 0/1 to dump elf infos\n"); - printf(" BOX64_NOBANNER with 0/1 to enable/disable the printing of box64 version and build at start\n"); -#ifdef DYNAREC - printf(" BOX64_DYNAREC_LOG with 0/1/2/3 or NONE/INFO/DEBUG/DUMP to set the printed dynarec info\n"); - printf(" BOX64_DYNAREC with 0/1 to disable or enable Dynarec (On by default)\n"); - printf(" BOX64_NODYNAREC with address interval (0x1234-0x4567) to forbid dynablock creation in the interval specified\n"); -#endif -#ifdef HAVE_TRACE - printf(" BOX64_TRACE with 1 to enable x86_64 execution trace\n"); - printf(" or with XXXXXX-YYYYYY to enable x86_64 execution trace only between address\n"); - printf(" or with FunctionName to enable x86_64 execution trace only in one specific function\n"); - printf(" use BOX64_TRACE_INIT instead of BOX64_TRACE to start trace before init of Libs and main program\n\t (function name will probably not work then)\n"); - printf(" BOX64_TRACE_EMM with 1 to enable dump of MMX registers along with regular registers\n"); - printf(" BOX64_TRACE_XMM with 1 to enable dump of SSE registers along with regular registers\n"); - printf(" BOX64_TRACE_COLOR with 1 to enable detection of changed general register values\n"); - printf(" BOX64_TRACE_START with N to enable trace after N instructions\n"); -#ifdef DYNAREC - printf(" BOX64_DYNAREC_TRACE with 0/1 to disable or enable Trace on generated code too\n"); -#endif -#endif - printf(" BOX64_TRACE_FILE with FileName to redirect logs in a file (or stderr to use stderr instead of stdout)\n"); - printf(" BOX64_DLSYM_ERROR with 1 to log dlsym errors\n"); - printf(" BOX64_LOAD_ADDR=0xXXXXXX try to load at 0xXXXXXX main binary (if binary is a PIE)\n"); - printf(" BOX64_NOSIGSEGV=1 to disable handling of SigSEGV\n"); - printf(" BOX64_NOSIGILL=1 to disable handling of SigILL\n"); - printf(" BOX64_SHOWSEGV=1 to show Segfault signal even if a signal handler is present\n"); - printf(" BOX64_X11THREADS=1 to call XInitThreads when loading X11 (for old Loki games with Loki_Compat lib)\n"); - printf(" BOX64_LIBGL=libXXXX set the name (and optionnally full path) for libGL.so.1\n"); - printf(" BOX64_LD_PRELOAD=XXXX[:YYYYY] force loading XXXX (and YYYY...) libraries with the binary\n"); - printf(" BOX64_ALLOWMISSINGLIBS with 1 to allow one to continue even if a lib is missing (unadvised, will probably crash later)\n"); - printf(" BOX64_PREFER_EMULATED=1 to prefer emulated libs first (execpt for glibc, alsa, pulse, GL, vulkan and X11)\n"); - printf(" BOX64_PREFER_WRAPPED if box64 will use wrapped libs even if the lib is specified with absolute path\n"); - printf(" BOX64_CRASHHANDLER=0 to not use a dummy crashhandler lib\n"); - printf(" BOX64_NOPULSE=1 to disable the loading of pulseaudio libs\n"); - printf(" BOX64_NOGTK=1 to disable the loading of wrapped gtk libs\n"); - printf(" BOX64_NOVULKAN=1 to disable the loading of wrapped vulkan libs\n"); - printf(" BOX64_ENV='XXX=yyyy' will add XXX=yyyy env. var.\n"); - printf(" BOX64_ENV1='XXX=yyyy' will add XXX=yyyy env. var. and continue with BOX86_ENV2 ... until var doesn't exist\n"); - printf(" BOX64_JITGDB with 1 to launch \"gdb\" when a segfault is trapped, attached to the offending process\n"); - printf(" BOX64_MMAP32=1 to use 32bits address space mmap in priority for external mmap as soon a 32bits process are detected (default for Snapdragon build)\n"); -} - void PrintHelp() { printf("This is Box64, the Linux x86_64 emulator with a twist.\n"); printf("\nUsage is 'box64 [options] path/to/software [args]' to launch x86_64 software.\n"); printf(" options are:\n"); printf(" '-v'|'--version' to print box64 version and quit\n"); printf(" '-h'|'--help' to print this and quit\n"); - printf(" '-f'|'--flags' to print box64 flags and quit\n"); } void addNewEnvVar(const char* s) @@ -1502,33 +556,17 @@ void addNewEnvVar(const char* s) EXPORTDYN void LoadEnvVars(box64context_t *context) { - // Check custom env. var. and add them if needed - { - char* p = getenv("BOX64_ENV"); - if(p) - addNewEnvVar(p); - int i = 1; - char box64_env[50]; - do { - sprintf(box64_env, "BOX64_ENV%d", i); - p = getenv(box64_env); - if(p) { - addNewEnvVar(p); - ++i; - } - } while(p); - } - - if(getenv("BOX64_EMULATED_LIBS")) { - char* p = getenv("BOX64_EMULATED_LIBS"); + if(BOX64ENV(emulated_libs)) { + char* p = BOX64ENV(emulated_libs); ParseList(p, &context->box64_emulated_libs, 0); - if (my_context->box64_emulated_libs.size && box64_log) { + if (my_context->box64_emulated_libs.size && BOX64ENV(log)) { printf_log(LOG_INFO, "BOX64 will force the used of emulated libs for "); for (int i=0; i<context->box64_emulated_libs.size; ++i) printf_log(LOG_INFO, "%s ", context->box64_emulated_libs.paths[i]); printf_log(LOG_INFO, "\n"); } } + // Add libssl and libcrypto (and a few others) to prefer the emulated version because multiple versions exist AddPath("libssl.so.1", &context->box64_emulated_libs, 0); AddPath("libssl.so.1.0.0", &context->box64_emulated_libs, 0); @@ -1541,77 +579,21 @@ void LoadEnvVars(box64context_t *context) AddPath("libtbbmalloc.so.2", &context->box64_emulated_libs, 0); AddPath("libtbbmalloc_proxy.so.2", &context->box64_emulated_libs, 0); - if(getenv("BOX64_SSE_FLUSHTO0")) { - if (strcmp(getenv("BOX64_SSE_FLUSHTO0"), "1")==0) { - box64_sse_flushto0 = 1; - printf_log(LOG_INFO, "BOX64: Direct apply of SSE Flush to 0 flag\n"); - } - } - if(getenv("BOX64_X87_NO80BITS")) { - if (strcmp(getenv("BOX64_X87_NO80BITS"), "1")==0) { - box64_x87_no80bits = 1; - printf_log(LOG_INFO, "BOX64: All 80bits x87 long double will be handle as double\n"); - } + if(BOX64ENV(nosigsegv)) { + context->no_sigsegv = 1; } - if(getenv("BOX64_SYNC_ROUNDING")) { - if (strcmp(getenv("BOX64_SYNC_ROUNDING"), "1")==0) { - box64_sync_rounding = 1; - printf_log(LOG_INFO, "BOX64: Rounding mode will be synced with fesetround/fegetround\n"); - } + if(BOX64ENV(nosigill)) { + context->no_sigill = 1; } - if(getenv("BOX64_PREFER_WRAPPED")) { - if (strcmp(getenv("BOX64_PREFER_WRAPPED"), "1")==0) { - box64_prefer_wrapped = 1; - printf_log(LOG_INFO, "BOX64: Prefering Wrapped libs\n"); - } - } - if(getenv("BOX64_PREFER_EMULATED")) { - if (strcmp(getenv("BOX64_PREFER_EMULATED"), "1")==0) { - box64_prefer_emulated = 1; - printf_log(LOG_INFO, "BOX64: Prefering Emulated libs\n"); - } + if(BOX64ENV(addlibs)) { + AddNewLibs(BOX64ENV(addlibs)); } - if(getenv("BOX64_WRAP_EGL")) { - char* p = getenv("BOX64_WRAP_EGL"); - if (*p>='0' && *p<='1') { - box64_wrap_egl = *p - '0'; - if(box64_wrap_egl) printf_log(LOG_INFO, "BOX64: Prefering Native(Wrapped) EGL/GLESv2\n"); - } - } - - if(getenv("BOX64_NOSIGSEGV")) { - if (strcmp(getenv("BOX64_NOSIGSEGV"), "1")==0) { - context->no_sigsegv = 1; - printf_log(LOG_INFO, "BOX64: Disabling handling of SigSEGV\n"); - } - } - if(getenv("BOX64_NOSIGILL")) { - if (strcmp(getenv("BOX64_NOSIGILL"), "1")==0) { - context->no_sigill = 1; - printf_log(LOG_INFO, "BOX64: Disabling handling of SigILL\n"); - } - } - if(getenv("BOX64_ADDLIBS")) { - AddNewLibs(getenv("BOX64_ADDLIBS")); - } - // check BOX64_PATH and load it - LoadEnvPath(&context->box64_path, ".:bin", "BOX64_PATH"); + loadPath(&context->box64_path, ".:bin", BOX64ENV(path)); if(getenv("PATH")) AppendList(&context->box64_path, getenv("PATH"), 1); // in case some of the path are for x86 world #ifdef HAVE_TRACE - char* p = getenv("BOX64_TRACE"); - if(p) { - if (strcmp(p, "0")) { - context->x64trace = 1; - box64_trace = p; - } - } - p = getenv("BOX64_TRACE_INIT"); - if(p) { - if (strcmp(p, "0")) { - context->x64trace = 1; - trace_init = p; - } + if((BOX64ENV(trace_init) && strcmp(BOX64ENV(trace_init), "0")) || (BOX64ENV(trace) && strcmp(BOX64ENV(trace), "0"))) { + context->x64trace = 1; } if(my_context->x64trace) { printf_log(LOG_INFO, "Initializing Zydis lib\n"); @@ -1626,13 +608,12 @@ void LoadEnvVars(box64context_t *context) EXPORTDYN void LoadLDPath(box64context_t *context) { - // check BOX64_LD_LIBRARY_PATH and load it #ifdef BOX32 if(box64_is32bits) - LoadEnvPath(&context->box64_ld_lib, ".:lib:i386:bin:libs", "BOX64_LD_LIBRARY_PATH"); + loadPath(&context->box64_ld_lib, ".:lib:i386:bin:libs", BOX64ENV(ld_library_path)); else #endif - LoadEnvPath(&context->box64_ld_lib, ".:lib:lib64:x86_64:bin64:libs64", "BOX64_LD_LIBRARY_PATH"); + loadPath(&context->box64_ld_lib, ".:lib:lib64:x86_64:bin64:libs64", BOX64ENV(ld_library_path)); #ifndef TERMUX if(box64_is32bits) { #ifdef BOX32 @@ -1687,7 +668,7 @@ EXPORTDYN void setupTraceInit() { #ifdef HAVE_TRACE - char* p = trace_init; + char* p = BOX64ENV(trace_init); if(p) { setbuf(stdout, NULL); uintptr_t s_trace_start=0, s_trace_end=0; @@ -1719,10 +700,8 @@ void setupTraceInit() } } } else { - p = box64_trace; - if(p) - if (strcmp(p, "0")) - SetTraceEmu(0, 1); + if(BOX64ENV(trace) && strcmp(BOX64ENV(trace), "0")) + SetTraceEmu(0, 1); } #endif } @@ -1758,7 +737,7 @@ EXPORTDYN void setupTrace() { #ifdef HAVE_TRACE - char* p = box64_trace; + char* p = BOX64ENV(trace); if(p) { setbuf(stdout, NULL); uintptr_t s_trace_start=0, s_trace_end=0; @@ -1838,11 +817,11 @@ void endBox64() FreeBox64Context(&my_context); #ifdef DYNAREC // disable dynarec now - box64_dynarec = 0; + SET_BOX64ENV(dynarec, 0); #endif - if(box64_libGL) { - box_free(box64_libGL); - box64_libGL = NULL; + if(BOX64ENV(libgl)) { + box_free(BOX64ENV(libgl)); + SET_BOX64ENV(libgl, NULL); } if(box64_custom_gstreamer) { box_free(box64_custom_gstreamer); @@ -1876,34 +855,6 @@ static void add_argv(const char* what) { } } -static void load_rcfiles() -{ - char* rcpath = getenv("BOX64_RCFILE"); - - if(rcpath && FileExist(rcpath, IS_FILE)) - LoadRCFile(rcpath); - #ifndef TERMUX - else if(FileExist("/etc/box64.box64rc", IS_FILE)) - LoadRCFile("/etc/box64.box64rc"); - else if(FileExist("/data/data/com.termux/files/usr/glibc/etc/box64.box64rc", IS_FILE)) - LoadRCFile("/data/data/com.termux/files/usr/glibc/etc/box64.box64rc"); - #else - else if(FileExist("/data/data/com.termux/files/usr/etc/box64.box64rc", IS_FILE)) - LoadRCFile("/data/data/com.termux/files/usr/etc/box64.box64rc"); - #endif - else - LoadRCFile(NULL); // load default rcfile - - char* p = getenv("HOME"); - if(p) { - char tmp[4096]; - strncpy(tmp, p, 4095); - strncat(tmp, "/.box64rc", 4095); - if(FileExist(tmp, IS_FILE)) - LoadRCFile(tmp); - } -} - #ifndef STATICBUILD void pressure_vessel(int argc, const char** argv, int nextarg, const char* prog); #endif @@ -1930,7 +881,7 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf printf("See 'box64 --help' for more information.\n"); exit(0); } - if(argc>1 && !strcmp(argv[1], "/usr/bin/gdb") && getenv("BOX64_TRACE_FILE")) + if(argc>1 && !strcmp(argv[1], "/usr/bin/gdb") && BOX64ENV(trace_file)) exit(0); // uname -m is redirected to box64 -m if(argc==2 && (!strcmp(argv[1], "-m") || !strcmp(argv[1], "-p") || !strcmp(argv[1], "-i"))) @@ -1939,14 +890,16 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf exit(0); } + ftrace = stdout; + + LoadEnvVariables(); + InitializeEnvFiles(); + // check BOX64_LOG debug level LoadLogEnv(); - if(!getenv("BOX64_NORCFILES")) { - load_rcfiles(); - } char* bashpath = NULL; { - char* p = getenv("BOX64_BASH"); + char* p = BOX64ENV(bash); if(p) { if(FileIsX64ELF(p)) { bashpath = p; @@ -1969,10 +922,6 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf PrintHelp(); exit(0); } - if(!strcmp(prog, "-f") || !strcmp(prog, "--flags")) { - PrintFlags(); - exit(0); - } // other options? if(!strcmp(prog, "--")) { prog = argv[++nextarg]; @@ -1985,8 +934,7 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf printf("BOX64: Nothing to run\n"); exit(0); } - if(!box64_nobanner) - PrintBox64Version(); + if (!BOX64ENV(nobanner)) PrintBox64Version(); // precheck, for win-preload const char* prog_ = strrchr(prog, '/'); if(!prog_) prog_ = prog; else ++prog_; @@ -2028,8 +976,8 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf // special case for winedbg, doesn't work anyway if(argv[nextarg+1] && strstr(argv[nextarg+1], "winedbg")==argv[nextarg+1]) { if(getenv("BOX64_WINEDBG")) { - box64_nobanner = 1; - box64_log = 0; + SET_BOX64ENV(nobanner, 1); + BOX64ENV(log) = 0; } else { printf_log(LOG_NONE, "winedbg detected, not launching it!\n"); exit(0); // exiting, it doesn't work anyway @@ -2045,13 +993,10 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf strcat(tmp, "/../lib64/gstreamer-1.0"); // check if it exist if(FileExist(tmp, 0)) { - //printf_log(LOG_INFO, "BOX64: Custom gstreamer detected, disable gtk wrapping\n"); - //box64_nogtk = 1; - //is_custom_gstreamer = 1; box64_custom_gstreamer = box_strdup(tmp); } } - // Try to get the name of the exe being run, to ApplyParams laters + // Try to get the name of the exe being run, to ApplyEnvFileEntry laters if(argv[nextarg+1] && argv[nextarg+1][0]!='-' && strlen(argv[nextarg+1])>4 && !strcasecmp(argv[nextarg+1]+strlen(argv[nextarg+1])-4, ".exe")) { const char* pp = strrchr(argv[nextarg+1], '/'); if(pp) @@ -2112,7 +1057,7 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf if(getenv("BOX64_LD_PRELOAD")) { char* p = getenv("BOX64_LD_PRELOAD"); ParseList(p, &ld_preload, 0); - if (ld_preload.size && box64_log) { + if (ld_preload.size && BOX64ENV(log)) { printf_log(LOG_INFO, "BOX64 trying to Preload "); for (int i=0; i<ld_preload.size; ++i) printf_log(LOG_INFO, "%s ", ld_preload.paths[i]); @@ -2130,7 +1075,7 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf if(strstr(p, "libasan.so")) box64_tcmalloc_minimal = 1; // it seems Address Sanitizer doesn't handle dlsym'd malloc very well AppendList(&ld_preload, p, 0); - if (ld_preload.size && box64_log) { + if (ld_preload.size && BOX64ENV(log)) { printf_log(LOG_INFO, "BOX64 trying to Preload "); for (int i=0; i<ld_preload.size; ++i) printf_log(LOG_INFO, "%s ", ld_preload.paths[i]); @@ -2148,7 +1093,7 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf my_context->argv[0] = ResolveFileSoft(prog, &my_context->box64_path); // GatherEnv(&my_context->envv, environ?environ:env, my_context->argv[0]); - if(box64_dump || box64_log<=LOG_DEBUG) { + if (BOX64ENV(dump) || BOX64ENV(log)<=LOG_DEBUG) { for (int i=0; i<my_context->envc; ++i) printf_dump(LOG_DEBUG, " Env[%02d]: %s\n", i, my_context->envv[i]); } @@ -2186,10 +1131,10 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf printf_log(LOG_INFO, "Zoom detected, Trying to use system libturbojpeg if possible\n"); box64_zoom = 1; } - // special case for bash (add BOX86_NOBANNER=1 if not there) + // special case for bash if(!strcmp(prgname, "bash") || !strcmp(prgname, "box64-bash")) { printf_log(LOG_INFO, "bash detected, disabling banner\n"); - if (!box64_nobanner) { + if (!BOX64ENV(nobanner)) { setenv("BOX86_NOBANNER", "1", 0); setenv("BOX64_NOBANNER", "1", 0); } @@ -2203,46 +1148,37 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf if(bashpath) my_context->bashpath = box_strdup(bashpath); - /*if(strstr(prgname, "awesomium_process")==prgname) { - printf_log(LOG_INFO, "awesomium_process detected, forcing emulated libpng12\n"); - AddPath("libpng12.so.0", &my_context->box64_emulated_libs, 0); - }*/ - /*if(!strcmp(prgname, "gdb")) { - exit(-1); - }*/ - ApplyParams("*"); // [*] is a special setting for all process - ApplyParams(prgname); - if(box64_wine && wine_prog) { - ApplyParams(wine_prog); + ApplyEnvFileEntry(prgname); + if (box64_wine && wine_prog) { + ApplyEnvFileEntry(wine_prog); wine_prog = NULL; } - if(box64_wine) - box64_maxcpu_immutable = 1; // cannot change once wine is loaded + PrintEnvVariables(); for(int i=1; i<my_context->argc; ++i) { my_context->argv[i] = box_strdup(argv[i+nextarg]); printf_log(LOG_INFO, "argv[%i]=\"%s\"\n", i, my_context->argv[i]); } - if(box64_nosandbox) + if(BOX64ENV(nosandbox)) { add_argv("--no-sandbox"); } - if(box64_inprocessgpu) + if(BOX64ENV(inprocessgpu)) { add_argv("--in-process-gpu"); } - if(box64_cefdisablegpu) + if(BOX64ENV(cefdisablegpu)) { add_argv("-cef-disable-gpu"); } - if(box64_cefdisablegpucompositor) + if(BOX64ENV(cefdisablegpucompositor)) { add_argv("-cef-disable-gpu-compositor"); } // add new args only if there is no args already - if(box64_new_args) { + if(BOX64ENV(args)) { char tmp[256]; - char* p = box64_new_args; + char* p = BOX64ENV(args); int state = 0; char* p2 = p; if(my_context->argc==1 || (my_context->argc==2 && box64_wine)) @@ -2264,12 +1200,10 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf } ++p2; } - box_free(box64_new_args); - box64_new_args = NULL; } - if(box64_insert_args) { + if(BOX64ENV(insert_args)) { char tmp[256]; - char* p = box64_insert_args; + char* p = BOX64ENV(insert_args); int state = 0; char* p2 = p; while(state>=0) { @@ -2290,8 +1224,6 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf } ++p2; } - box_free(box64_insert_args); - box64_insert_args = NULL; } // check if file exist if(!my_context->argv[0] || !FileExist(my_context->argv[0], IS_FILE)) { @@ -2459,8 +1391,9 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf printf_log(LOG_NONE, "Error setting process name (%s)\n", strerror(errno)); else printf_log(LOG_INFO, "Rename process to \"%s\"\n", p); - if(strcmp(prgname, p)) - ApplyParams(p); + if(strcmp(prgname, p)) { + ApplyEnvFileEntry(p); + } // and now all change the argv (so libs libs mesa find the correct program names) char* endp = (char*)argv[argc-1]; while(*endp) @@ -2567,15 +1500,6 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf setupTrace(); *emulator = emu; - -#ifdef DYNAREC - if (box64_dynarec_perf_map) { - char pathname[32]; - snprintf(pathname, sizeof(pathname), "/tmp/perf-%d.map", getpid()); - box64_dynarec_perf_map_fd = open(pathname, O_CREAT | O_RDWR | O_APPEND, S_IRUSR | S_IWUSR); - } -#endif - return 0; } @@ -2616,11 +1540,10 @@ int emulate(x64emu_t* emu, elfheader_t* elf_header) #endif #ifdef DYNAREC - if (box64_dynarec_perf_map && box64_dynarec_perf_map_fd != -1) { - close(box64_dynarec_perf_map_fd); - box64_dynarec_perf_map_fd = -1; + if (BOX64ENV(dynarec_perf_map) && BOX64ENV(dynarec_perf_map_fd) != -1) { + close(BOX64ENV(dynarec_perf_map_fd)); + SET_BOX64ENV(dynarec_perf_map_fd, -1); } #endif - return ret; } diff --git a/src/custommem.c b/src/custommem.c index 4b0f0712..cd2a743b 100644 --- a/src/custommem.c +++ b/src/custommem.c @@ -1934,7 +1934,7 @@ void reverveHigMem32(void) } } printf_log(LOG_INFO, "Memory higher than 32bits reserved\n"); - if(box64_log>=LOG_DEBUG) { + if (BOX64ENV(log)>=LOG_DEBUG) { uintptr_t start=0x100000000LL; int prot; uintptr_t bend; @@ -1963,7 +1963,7 @@ void my_reserveHighMem() size_t n = sizeof(p)/sizeof(p[0]); for(size_t i=0; i<n; ++i) p[i] = box32_malloc(SZ-128); - if(box64_log>=LOG_DEBUG) { + if (BOX64ENV(log)>=LOG_DEBUG) { printf_log(LOG_DEBUG, "Reserved %u MB of low memory [", (SZ)*n); for(size_t i=0; i<n; ++i) printf_log(LOG_DEBUG, "%p%s", p[i], (i==(n-1))?"]\n":", "); @@ -2001,14 +2001,8 @@ void my_reserveHighMem() void reserveHighMem() { - char* p = getenv("BOX64_RESERVE_HIGH"); - if(!box64_is32bits) - #if 0//def ADLINK - if(p && p[0]=='0') - #else - if(!p || p[0]=='0') - #endif - return; // don't reserve by default + if(!box64_is32bits && !BOX64ENV(reserve_high)) + return; // don't reserve by default on 64bits my_reserveHighMem(); } @@ -2028,7 +2022,7 @@ void init_custommem_helper(box64context_t* ctx) sigfillset(&critical_prot); init_mutexes(); #ifdef DYNAREC - if(box64_dynarec) { + if(BOX64ENV(dynarec)) { #ifdef JMPTABL_SHIFT4 for(int i=0; i<(1<<JMPTABL_SHIFT4); ++i) box64_jmptbl4[i] = box64_jmptbldefault3; @@ -2087,18 +2081,17 @@ void fini_custommem_helper(box64context_t *ctx) } #ifdef JMPTABL_SHIFT4 } - if(box64_log) printf("Allocation:\n- dynarec: %lld kio\n- customMalloc: %lld kio\n- jump table: %lld kio (%lld level 4, %lld level 3, %lld level 2, %lld level 1 table allocated, for %lld jumps, with at most %lld per level 1)\n", dynarec_allocated / 1024, customMalloc_allocated / 1024, jmptbl_allocated / 1024, jmptbl_allocated4, jmptbl_allocated3, jmptbl_allocated2, jmptbl_allocated1, njmps, njmps_in_lv1_max); - #else - if(box64_log) printf("Allocation:\n- dynarec: %lld kio\n- customMalloc: %lld kio\n- jump table: %lld kio (%lld level 3, %lld level 2, %lld level 1 table allocated, for %lld jumps, with at most %lld per level 1)\n", dynarec_allocated / 1024, customMalloc_allocated / 1024, jmptbl_allocated / 1024, jmptbl_allocated3, jmptbl_allocated2, jmptbl_allocated1, njmps, njmps_in_lv1_max); - #endif - if(box64_log) - testAllBlocks(); + if(BOX64ENV(log)) printf("Allocation:\n- dynarec: %lld kio\n- customMalloc: %lld kio\n- jump table: %lld kio (%lld level 4, %lld level 3, %lld level 2, %lld level 1 table allocated, for %lld jumps, with at most %lld per level 1)\n", dynarec_allocated / 1024, customMalloc_allocated / 1024, jmptbl_allocated / 1024, jmptbl_allocated4, jmptbl_allocated3, jmptbl_allocated2, jmptbl_allocated1, njmps, njmps_in_lv1_max); +#else + if(BOX64ENV(log)) printf("Allocation:\n- dynarec: %lld kio\n- customMalloc: %lld kio\n- jump table: %lld kio (%lld level 3, %lld level 2, %lld level 1 table allocated, for %lld jumps, with at most %lld per level 1)\n", dynarec_allocated / 1024, customMalloc_allocated / 1024, jmptbl_allocated / 1024, jmptbl_allocated3, jmptbl_allocated2, jmptbl_allocated1, njmps, njmps_in_lv1_max); +#endif + if(BOX64ENV(log)) testAllBlocks(); #endif if(!inited) return; inited = 0; #ifdef DYNAREC - if(box64_dynarec) { + if(BOX64ENV(dynarec)) { dynarec_log(LOG_DEBUG, "Free global Dynarecblocks\n"); mmaplist_t* head = mmaplist; mmaplist = NULL; diff --git a/src/custommmap.c b/src/custommmap.c index c33841f8..4f506f32 100644 --- a/src/custommmap.c +++ b/src/custommmap.c @@ -2,6 +2,8 @@ #include <unistd.h> #include <stdint.h> +#include "env.h" + #ifndef MAP_FAILED #define MAP_FAILED ((void *) -1) #endif @@ -29,12 +31,12 @@ void* box_mmap(void *addr, unsigned long length, int prot, int flags, int fd, ss void* my_mmap64(x64emu_t* emu, void *addr, unsigned long length, int prot, int flags, int fd, ssize_t offset); extern int running32bits; -extern int box64_mmap32; +extern box64env_t box64env; EXPORT void* mmap64(void *addr, unsigned long length, int prot, int flags, int fd, ssize_t offset) { void* ret; - if(!addr && ((running32bits && box64_mmap32) || (flags&MAP_32BIT) || box64_is32bits)) + if(!addr && ((running32bits && BOX64ENV(mmap32)) || (flags&MAP_32BIT) || box64_is32bits)) ret = box_mmap(addr, length, prot, flags | MAP_32BIT, fd, offset); else ret = internal_mmap(addr, length, prot, flags, fd, offset); diff --git a/src/dynarec/arm64/dynarec_arm64_00.c b/src/dynarec/arm64/dynarec_arm64_00.c index 5ab33ffe..25b70278 100644 --- a/src/dynarec/arm64/dynarec_arm64_00.c +++ b/src/dynarec/arm64/dynarec_arm64_00.c @@ -725,7 +725,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin u32 = PK(i32); i32++; } - if(!box64_dynarec_test && u32>=0x50 && u32<=0x57 && (dyn->size>(ninst+1) && dyn->insts[ninst+1].pred_sz==1) && gd != xRSP) { + if(!BOX64ENV(dynarec_test) && u32>=0x50 && u32<=0x57 && (dyn->size>(ninst+1) && dyn->insts[ninst+1].pred_sz==1) && gd != xRSP) { u32 = TO_NAT((u32 & 0x07) + (rex.b << 3)); if(u32==xRSP) { PUSH1z(gd); @@ -770,7 +770,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin u32 = PK(i32); i32++; } - if(!box64_dynarec_test && (gd!=xRSP) && u32>=0x58 && u32<=0x5f && (dyn->size>(ninst+1) && dyn->insts[ninst+1].pred_sz==1)) { + if (!BOX64ENV(dynarec_test) && (gd != xRSP) && u32 >= 0x58 && u32 <= 0x5f && (dyn->size > (ninst + 1) && dyn->insts[ninst + 1].pred_sz == 1)) { // double pop! u32 = TO_NAT((u32 & 0x07) + (rex.b << 3)); MESSAGE(LOG_DUMP, "DOUBLE POP\n"); @@ -1010,7 +1010,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0x6C: case 0x6D: INST_NAME(opcode == 0x6C ? "INSB" : "INSD"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -1026,7 +1026,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0x6E: case 0x6F: INST_NAME(opcode == 0x6C ? "OUTSB" : "OUTSD"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -1553,7 +1553,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin if(gd==xRAX) { if (rep == 2) { INST_NAME("PAUSE"); - switch (box64_dynarec_pause) { + switch (BOX64ENV(dynarec_pause)) { case 1: YIELD; break; case 2: WFI; break; case 3: @@ -1730,7 +1730,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: case 2: if(rep==1) {INST_NAME("REPNZ CMPSB");} else {INST_NAME("REPZ CMPSB");} - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SMREAD(); SETFLAGS(X_ALL, SF_SET_PENDING); @@ -1772,7 +1772,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: case 2: if(rep==1) {INST_NAME("REPNZ CMPSD");} else {INST_NAME("REPZ CMPSD");} - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SETFLAGS(X_ALL, SF_SET_PENDING); SMREAD(); @@ -1914,7 +1914,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: case 2: if(rep==1) {INST_NAME("REPNZ SCASB");} else {INST_NAME("REPZ SCASB");} - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SMREAD(); SETFLAGS(X_ALL, SF_SET_PENDING); @@ -1953,7 +1953,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: case 2: if(rep==1) {INST_NAME("REPNZ SCASD");} else {INST_NAME("REPZ SCASD");} - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SMREAD(); SETFLAGS(X_ALL, SF_SET_PENDING); @@ -2283,7 +2283,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0xC2: INST_NAME("RETN"); //SETFLAGS(X_ALL, SF_SET_NODF); // Hack, set all flags (to an unknown state...) - if(box64_dynarec_safeflags) { + if(BOX64ENV(dynarec_safeflags)) { READFLAGS(X_PEND); // lets play safe here too } fpu_purgecache(dyn, ninst, 1, x1, x2, x3); // using next, even if there no next @@ -2295,7 +2295,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0xC3: INST_NAME("RET"); // SETFLAGS(X_ALL, SF_SET_NODF); // Hack, set all flags (to an unknown state...) - if(box64_dynarec_safeflags) { + if(BOX64ENV(dynarec_safeflags)) { READFLAGS(X_PEND); // so instead, force the deferred flags, so it's not too slow, and flags are not lost } fpu_purgecache(dyn, ninst, 1, x1, x2, x3); // using next, even if there no next @@ -2500,7 +2500,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin if(isRetX87Wrapper(*(wrapper_t*)(addr))) // return value will be on the stack, so the stack depth needs to be updated x87_purgecache(dyn, ninst, 0, x3, x1, x4); - if((box64_log<2 && !cycle_log && !box64_dynarec_test) && tmp) { + if ((BOX64ENV(log)<2 && !BOX64ENV(rolling_log) && !BOX64ENV(dynarec_test)) && tmp) { //GETIP(ip+3+8+8); // read the 0xCC call_n(dyn, ninst, *(void**)(addr+8), tmp); addr+=8+8; @@ -2524,7 +2524,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } else { INST_NAME("INT 3"); - if(!box64_ignoreint3) { + if(!BOX64ENV(ignoreint3)) { // check if TRAP signal is handled TABLE64(x1, (uintptr_t)my_context); MOV32w(x2, offsetof(box64context_t, signals[SIGTRAP])); @@ -2573,7 +2573,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin jump_to_epilog(dyn, 0, xRIP, ninst); } else if(u8==0x03) { INST_NAME("INT 3"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -2587,7 +2587,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin *ok = 0; } else { INST_NAME("INT n"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -2748,7 +2748,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0: INST_NAME("ROL Eb, CL"); SETFLAGS(X_OF|X_CF, SF_SUBSET); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); UFLAG_IF { UFLAG_DF(x2, d_none); @@ -2778,7 +2778,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: INST_NAME("ROR Eb, CL"); SETFLAGS(X_OF|X_CF, SF_SUBSET); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); UFLAG_IF { UFLAG_DF(x2, d_none); @@ -2808,7 +2808,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("RCL Eb, CL"); MESSAGE(LOG_DUMP, "Need Optimization\n"); READFLAGS(X_CF); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SETFLAGS(X_OF|X_CF, SF_SET_DF); ANDw_mask(x2, xRCX, 0, 0b00100); @@ -2820,7 +2820,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("RCR Eb, CL"); MESSAGE(LOG_DUMP, "Need Optimization\n"); READFLAGS(X_CF); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SETFLAGS(X_OF|X_CF, SF_SET_DF); ANDw_mask(x2, xRCX, 0, 0b00100); @@ -2832,7 +2832,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 6: INST_NAME("SHL Eb, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); ANDw_mask(x2, xRCX, 0, 0b00100); //mask=0x00000001f UFLAG_IF { @@ -2845,7 +2845,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 5: INST_NAME("SHR Eb, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); ANDw_mask(x2, xRCX, 0, 0b00100); //mask=0x00000001f UFLAG_IF { @@ -2858,7 +2858,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 7: INST_NAME("SAR Eb, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); ANDw_mask(x2, xRCX, 0, 0b00100); //mask=0x00000001f UFLAG_IF { @@ -2876,7 +2876,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0: INST_NAME("ROL Ed, CL"); SETFLAGS(X_OF|X_CF, SF_SUBSET); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x3, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -2911,7 +2911,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: INST_NAME("ROR Ed, CL"); SETFLAGS(X_OF|X_CF, SF_SUBSET); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x3, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -2947,7 +2947,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MESSAGE(LOG_DUMP, "Need Optimization\n"); READFLAGS(X_CF); SETFLAGS(X_OF|X_CF, SF_SET_DF); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x2, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -2965,7 +2965,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MESSAGE(LOG_DUMP, "Need Optimization\n"); READFLAGS(X_CF); SETFLAGS(X_OF|X_CF, SF_SET_DF); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x2, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -2982,7 +2982,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 6: INST_NAME("SHL Ed, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x3, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -3000,7 +3000,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 5: INST_NAME("SHR Ed, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x3, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -3018,7 +3018,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 7: INST_NAME("SAR Ed, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x3, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -3149,10 +3149,10 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0xE6: /* OUT Ib, AL */ case 0xE7: /* OUT Ib, EAX */ INST_NAME(opcode==0xE4?"IN AL, Ib":(opcode==0xE5?"IN EAX, Ib":(opcode==0xE6?"OUT Ib, AL":"OUT Ib, EAX"))); - if (rex.is32bits && box64_ignoreint3) { + if (rex.is32bits && BOX64ENV(ignoreint3)) { F8; } else { - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -3197,14 +3197,14 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin // calling a native function SMEND(); sse_purge07cache(dyn, ninst, x3); - if((box64_log<2 && !cycle_log && !box64_dynarec_test) && dyn->insts[ninst].natcall) { + if ((BOX64ENV(log) < 2 && !BOX64ENV(rolling_log) && !BOX64ENV(dynarec_test)) && dyn->insts[ninst].natcall) { tmp=isSimpleWrapper(*(wrapper_t*)(dyn->insts[ninst].natcall+2)); } else tmp=0; if(dyn->insts[ninst].natcall && isRetX87Wrapper(*(wrapper_t*)(dyn->insts[ninst].natcall+2))) // return value will be on the stack, so the stack depth needs to be updated x87_purgecache(dyn, ninst, 0, x3, x1, x4); - if((box64_log<2 && !cycle_log) && dyn->insts[ninst].natcall && tmp) { + if ((BOX64ENV(log)<2 && !BOX64ENV(rolling_log)) && dyn->insts[ninst].natcall && tmp) { //GETIP(ip+3+8+8); // read the 0xCC call_n(dyn, ninst, *(void**)(dyn->insts[ninst].natcall+2+8), tmp); SMWRITE2(); @@ -3239,7 +3239,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin PUSH1z(x2); break; default: - if((box64_dynarec_safeflags>1) || (ninst && dyn->insts[ninst-1].x64.set_flags)) { + if((BOX64ENV(dynarec_safeflags)>1) || (ninst && dyn->insts[ninst-1].x64.set_flags)) { READFLAGS(X_PEND); // that's suspicious } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags to "dont'care" state @@ -3248,7 +3248,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MOV64x(x2, addr); fpu_purgecache(dyn, ninst, 1, x1, x3, x4); PUSH1z(x2); - if(box64_dynarec_callret) { + if(BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); // Push actual return address if(addr < (dyn->start+dyn->isize)) { @@ -3271,7 +3271,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin else j64 = addr+i32; jump_to_next(dyn, j64, 0, ninst, rex.is32bits); - if(box64_dynarec_callret && addr >= (dyn->start + dyn->isize)) { + if(BOX64ENV(dynarec_callret) && addr >= (dyn->start + dyn->isize)) { // jumps out of current dynablock... MARK; j64 = getJumpTableAddress64(addr); @@ -3329,9 +3329,9 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0xEE: /* OUT DX, AL */ case 0xEF: /* OUT DX, EAX */ INST_NAME(opcode==0xEC?"IN AL, DX":(opcode==0xED?"IN EAX, DX":(opcode==0xEE?"OUT DX, AL":"OUT DX, EAX"))); - if(rex.is32bits && box64_ignoreint3) + if(rex.is32bits && BOX64ENV(ignoreint3)) {} else { - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -3351,7 +3351,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin break; case 0xF1: INST_NAME("INT1"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -3367,7 +3367,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0xF4: INST_NAME("HLT"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -3437,7 +3437,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -3465,7 +3465,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -3477,7 +3477,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SETFLAGS(X_ALL, SF_SET); GETEB(x1, 0); UXTHw(x2, xRAX); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZw_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -3493,7 +3493,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin BFIx(xRAX, x4, 8, 8); SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -3503,7 +3503,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SKIPTEST(x1); SETFLAGS(X_ALL, SF_SET); GETSEB(x1, 0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZw_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -3520,7 +3520,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin BFIx(xRAX, x4, 8, 8); SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -3578,7 +3578,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -3612,7 +3612,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -3635,7 +3635,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin CALL(native_div0, -1); LOAD_XEMU_CALL(xRIP); } else { - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -3662,7 +3662,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin && *(uint8_t*)(dyn->insts[ninst-1].x64.addr)==0x31 && *(uint8_t*)(dyn->insts[ninst-1].x64.addr+1)==0xD2) { GETED(0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -3677,7 +3677,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MOVx_REG(xRAX, x2); } else { GETEDH(x1, 0); // get edd changed addr, so cannot be called 2 times for same op... - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -3699,7 +3699,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -3710,7 +3710,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SETFLAGS(X_ALL, SF_SET); if(!rex.w) { GETSEDw(0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(wb); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -3732,7 +3732,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin && *(uint8_t*)(dyn->insts[ninst-1].x64.addr)==0x48 && *(uint8_t*)(dyn->insts[ninst-1].x64.addr+1)==0x99) { GETED(0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -3747,7 +3747,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MOVx_REG(xRAX, x2); } else { GETEDH(x1, 0); // get edd changed addr, so cannot be called 2 times for same op... - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -3777,7 +3777,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -3799,9 +3799,9 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0xFA: /* STI */ case 0xFB: /* CLI */ INST_NAME(opcode==0xFA?"CLI":"STI"); - if(rex.is32bits && box64_ignoreint3) + if(rex.is32bits && BOX64ENV(ignoreint3)) {} else { - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -3864,7 +3864,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin break; case 2: // CALL Ed INST_NAME("CALL Ed"); - PASS2IF((box64_dynarec_safeflags>1) || + PASS2IF((BOX64ENV(dynarec_safeflags)>1) || ((ninst && dyn->insts[ninst-1].x64.set_flags) || ((ninst>1) && dyn->insts[ninst-2].x64.set_flags)), 1) { @@ -3873,7 +3873,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SETFLAGS(X_ALL, SF_SET_NODF); //Hack to put flag in "don't care" state } GETEDz(0); - if(box64_dynarec_callret && box64_dynarec_bigblock>1) { + if(BOX64ENV(dynarec_callret) && BOX64ENV(dynarec_bigblock)>1) { BARRIER(BARRIER_FULL); } else { BARRIER(BARRIER_FLOAT); @@ -3881,7 +3881,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin *ok = 0; } GETIP_(addr); - if(box64_dynarec_callret) { + if(BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); // Push actual return address if(addr < (dyn->start+dyn->isize)) { @@ -3898,7 +3898,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } PUSH1z(xRIP); jump_to_next(dyn, 0, ed, ninst, rex.is32bits); - if(box64_dynarec_callret && addr >= (dyn->start + dyn->isize)) { + if(BOX64ENV(dynarec_callret) && addr >= (dyn->start + dyn->isize)) { // jumps out of current dynablock... MARK; j64 = getJumpTableAddress64(addr); @@ -3922,7 +3922,7 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin LDH(x4, xEmu, offsetof(x64emu_t, segs[_CS])); GETIP_(addr); /* - if(box64_dynarec_callret) { + if(BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); // Push actual return address if(addr < (dyn->start+dyn->isize)) { diff --git a/src/dynarec/arm64/dynarec_arm64_0f.c b/src/dynarec/arm64/dynarec_arm64_0f.c index fbb5d399..98f84863 100644 --- a/src/dynarec/arm64/dynarec_arm64_0f.c +++ b/src/dynarec/arm64/dynarec_arm64_0f.c @@ -165,7 +165,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0x09: INST_NAME("WBINVD"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -181,7 +181,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0x0B: INST_NAME("UD2"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -220,7 +220,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin break; case 0x0E: INST_NAME("femms"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -446,7 +446,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin nextop = F8; GETGM(q0); GETEX(v1, 0, 0); - if (box64_dynarec_fastround) { + if (BOX64ENV(dynarec_fastround)) { VFCVTZSS(q0, v1); } else { if(arm64_frintts) { @@ -480,7 +480,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin nextop = F8; GETGM(q0); GETEX(v1, 0, 0); - if (box64_dynarec_fastround) { + if (BOX64ENV(dynarec_fastround)) { u8 = sse_setround(dyn, ninst, x1, x2, x3); VFRINTIS(q0, v1); x87_restoreround(dyn, ninst, u8); @@ -527,7 +527,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin break; case 0x30: INST_NAME("WRMSR"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -1175,7 +1175,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin // FMIN/FMAX wll not copy the value if v0[x] is NaN // but x86 will copy if either v0[x] or v1[x] is NaN, so lets force a copy if source is NaN VFMINQS(v0, v0, v1); - if(!box64_dynarec_fastnan && (v0!=v1)) { + if(!BOX64ENV(dynarec_fastnan) && (v0!=v1)) { q0 = fpu_get_scratch(dyn, ninst); VFCMEQQS(q0, v0, v0); // 0 is NaN, 1 is not NaN, so MASK for NaN VBIFQ(v0, v1, q0); // copy dest where source is NaN @@ -1196,7 +1196,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin // FMIN/FMAX wll not copy the value if v0[x] is NaN // but x86 will copy if either v0[x] or v1[x] is NaN, so lets force a copy if source is NaN VFMAXQS(v0, v0, v1); - if(!box64_dynarec_fastnan && (v0!=v1)) { + if(!BOX64ENV(dynarec_fastnan) && (v0!=v1)) { q0 = fpu_get_scratch(dyn, ninst); VFCMEQQS(q0, v0, v0); // 0 is NaN, 1 is not NaN, so MASK for NaN VBIFQ(v0, v1, q0); // copy dest where source is NaN @@ -1693,7 +1693,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin LSRxw_REG(x4, ed, x2); BFIw(xFlags, x4, F_CF, 1); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -1719,7 +1719,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin nextop = F8; INST_NAME("SHLD Ed, Gd, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); GETGD; GETED(0); @@ -1781,7 +1781,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin STxw(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -1807,7 +1807,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin nextop = F8; INST_NAME("SHRD Ed, Gd, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); GETGD; GETED(0); @@ -1863,7 +1863,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("LDMXCSR Md"); GETED(0); STRw_U12(ed, xEmu, offsetof(x64emu_t, mxcsr)); - if(box64_sse_flushto0) { + if(BOX64ENV(sse_flushto0)) { MRS_fpcr(x1); // get fpscr LSRw_IMM(x3, ed, 15); // get FZ bit BFIw(x1, x3, 24, 1); // inject FZ bit @@ -1956,7 +1956,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x1, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x1); @@ -2025,7 +2025,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin STxw(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2088,7 +2088,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin IFX(X_CF) { BFXILxw(xFlags, ed, u8, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2119,7 +2119,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin STxw(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2149,7 +2149,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin STxw(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2180,7 +2180,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin STxw(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2226,7 +2226,7 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin STxw(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} diff --git a/src/dynarec/arm64/dynarec_arm64_64.c b/src/dynarec/arm64/dynarec_arm64_64.c index 060da592..a0c706cd 100644 --- a/src/dynarec/arm64/dynarec_arm64_64.c +++ b/src/dynarec/arm64/dynarec_arm64_64.c @@ -320,7 +320,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x1, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x1); @@ -607,7 +607,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0x6C: case 0x6D: INST_NAME(opcode == 0x6C ? "INSB" : "INSD"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -623,7 +623,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0x6E: case 0x6F: INST_NAME(opcode == 0x6C ? "OUTSB" : "OUTSD"); - if(box64_dynarec_safeflags>1) { + if(BOX64ENV(dynarec_safeflags)>1) { READFLAGS(X_PEND); } else { SETFLAGS(X_ALL, SF_SET_NODF); // Hack to set flags in "don't care" state @@ -1263,7 +1263,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 6: INST_NAME("SHL Ed, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x3, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -1279,7 +1279,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 5: INST_NAME("SHR Ed, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x3, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -1295,7 +1295,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 7: INST_NAME("SAR Ed, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); if(rex.w) { ANDx_mask(x3, xRCX, 1, 0, 0b00101); //mask=0x000000000000003f @@ -1367,7 +1367,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -1402,7 +1402,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -1414,7 +1414,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SETFLAGS(X_ALL, SF_SET); if(!rex.w) { GETEDO(x6, 0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -1440,7 +1440,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin && *(uint8_t*)(dyn->insts[ninst-1].x64.addr)==0x31 && *(uint8_t*)(dyn->insts[ninst-1].x64.addr+1)==0xD2) { GETEDO(x6, 0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -1460,7 +1460,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin CALL(div64, -1); B_NEXT_nocond; MARK; - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -1477,7 +1477,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -1490,7 +1490,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin GETSEDOw(x6, 0); MOVw_REG(x3, xRAX); ORRx_REG_LSL(x3, x3, xRDX, 32); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(wb); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -1510,7 +1510,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin && *(uint8_t*)(dyn->insts[ninst-1].x64.addr)==0x48 && *(uint8_t*)(dyn->insts[ninst-1].x64.addr+1)==0x99) { GETEDO(x6, 0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -1525,7 +1525,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MOVx_REG(xRAX, x2); } else { GETEDO(x6, 0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZx_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -1549,7 +1549,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -1577,7 +1577,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin break; case 2: // CALL Ed INST_NAME("CALL Ed"); - PASS2IF((box64_dynarec_safeflags>1) || + PASS2IF((BOX64ENV(dynarec_safeflags)>1) || ((ninst && dyn->insts[ninst-1].x64.set_flags) || ((ninst>1) && dyn->insts[ninst-2].x64.set_flags)), 1) { @@ -1586,7 +1586,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SETFLAGS(X_ALL, SF_SET_NODF); //Hack to put flag in "don't care" state } GETEDOz(x6, 0); - if(box64_dynarec_callret && box64_dynarec_bigblock>1) { + if(BOX64ENV(dynarec_callret) && BOX64ENV(dynarec_bigblock)>1) { BARRIER(BARRIER_FULL); } else { BARRIER(BARRIER_FLOAT); @@ -1594,7 +1594,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin *ok = 0; } GETIP_(addr); - if(box64_dynarec_callret) { + if(BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); // Push actual return address if(addr < (dyn->start+dyn->isize)) { @@ -1611,7 +1611,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } PUSH1z(xRIP); jump_to_next(dyn, 0, ed, ninst, rex.is32bits); - if(box64_dynarec_callret && addr >= (dyn->start + dyn->isize)) { + if(BOX64ENV(dynarec_callret) && addr >= (dyn->start + dyn->isize)) { // jumps out of current dynablock... MARK; j64 = getJumpTableAddress64(addr); diff --git a/src/dynarec/arm64/dynarec_arm64_66.c b/src/dynarec/arm64/dynarec_arm64_66.c index c2a92dc0..b42e14d2 100644 --- a/src/dynarec/arm64/dynarec_arm64_66.c +++ b/src/dynarec/arm64/dynarec_arm64_66.c @@ -458,7 +458,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -836,7 +836,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: case 2: if(rep==1) {INST_NAME("REPNZ CMPSW");} else {INST_NAME("REPZ CMPSW");} - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SETFLAGS(X_ALL, SF_SET_PENDING); CBZx_NEXT(xRCX); @@ -936,7 +936,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: case 2: if(rep==1) {INST_NAME("REPNZ SCASW");} else {INST_NAME("REPZ SCASW");} - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SETFLAGS(X_ALL, SF_SET_PENDING); CBZx_NEXT(xRCX); @@ -1167,7 +1167,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0: INST_NAME("ROL Ew, CL"); SETFLAGS(X_OF|X_CF, SF_SUBSET); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); UFLAG_IF { ANDw_mask(x2, xRCX, 0, 0b00100); //mask=0x00000001f @@ -1193,7 +1193,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 1: INST_NAME("ROR Ew, CL"); SETFLAGS(X_OF|X_CF, SF_SUBSET); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); UFLAG_IF { ANDw_mask(x2, xRCX, 0, 0b00100); //mask=0x00000001f @@ -1219,7 +1219,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("RCL Ew, CL"); MESSAGE(LOG_DUMP, "Need Optimization\n"); READFLAGS(X_CF); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SETFLAGS(X_OF|X_CF, SF_SET_DF); ANDw_mask(x2, xRCX, 0, 0b00100); @@ -1231,7 +1231,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("RCR Ew, CL"); MESSAGE(LOG_DUMP, "Need Optimization\n"); READFLAGS(X_CF); - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); SETFLAGS(X_OF|X_CF, SF_SET_DF); ANDw_mask(x2, xRCX, 0, 0b00100); @@ -1243,7 +1243,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 6: INST_NAME("SHL Ew, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); ANDw_mask(x2, xRCX, 0, 0b00100); //mask=0x00000001f UFLAG_IF { @@ -1256,7 +1256,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 5: INST_NAME("SHR Ew, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); ANDw_mask(x2, xRCX, 0, 0b00100); //mask=0x00000001f UFLAG_IF { @@ -1269,7 +1269,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 7: INST_NAME("SAR Ew, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); ANDw_mask(x2, xRCX, 0, 0b00100); //mask=0x00000001f UFLAG_IF { @@ -1385,7 +1385,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -1414,7 +1414,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -1427,7 +1427,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin GETEW(x1, 0); UXTHw(x2, xRAX); BFIw(x2, xRDX, 16, 16); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZw_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -1443,7 +1443,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin BFIz(xRDX, x4, 0, 16); SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -1453,7 +1453,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SKIPTEST(x1); SETFLAGS(X_ALL, SF_SET); GETSEW(x1, 0); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { CBNZw_MARK3(ed); GETIP_(ip); STORE_XEMU_CALL(xRIP); @@ -1471,7 +1471,7 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin BFIz(xRDX, x4, 0, 16); SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } diff --git a/src/dynarec/arm64/dynarec_arm64_660f.c b/src/dynarec/arm64/dynarec_arm64_660f.c index 8867b0a6..54f98516 100644 --- a/src/dynarec/arm64/dynarec_arm64_660f.c +++ b/src/dynarec/arm64/dynarec_arm64_660f.c @@ -231,7 +231,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETGM(q0); GETEX(v1, 0, 0); - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { VFCVTZSQD(q0, v1); SQXTN_32(q0, q0); } else { @@ -267,7 +267,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETGM(q0); GETEX(v1, 0, 0); - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { u8 = sse_setround(dyn, ninst, x1, x2, x3); VFRINTIDQ(q0, v1); FCVTXN(q0, q0); @@ -1640,7 +1640,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(q0, 0, 0); GETGX_empty(q1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); v1 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -1695,7 +1695,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(q0, 0, 0); GETGX(q1, 1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); v1 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -1703,7 +1703,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n VFCMEQQD(v0, v0, v0); // 0 if NAN, 1 if not NAN } VFADDQD(q1, q1, q0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(v1, q1, q1); // 0 => out is NAN VBICQ(v1, v0, v1); // forget it in any input was a NAN already VSHLQ_64(v1, v1, 63); // only keep the sign bit @@ -1715,7 +1715,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(q0, 0, 0); GETGX(q1, 1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); v1 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -1723,7 +1723,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n VFCMEQQD(v0, v0, v0); // 0 if NAN, 1 if not NAN } VFMULQD(q1, q1, q0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(v1, q1, q1); // 0 => out is NAN VBICQ(v1, v0, v1); // forget it in any input was a NAN already VSHLQ_64(v1, v1, 63); // only keep the sign bit @@ -1735,7 +1735,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(v1, 0, 0); GETGX_empty(v0); - if(box64_dynarec_fastround==2) { + if(BOX64ENV(dynarec_fastround)==2) { FCVTXN(v0, v1); } else { u8 = sse_setround(dyn, ninst, x1, x2, x3); @@ -1748,7 +1748,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(v1, 0, 0); GETGX_empty(v0); - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { u8 = sse_setround(dyn, ninst, x1, x2, x3); VFRINTISQ(v0, v1); x87_restoreround(dyn, ninst, u8); @@ -1783,7 +1783,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(q0, 0, 0); GETGX(q1, 1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); v1 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -1791,7 +1791,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n VFCMEQQD(v0, v0, v0); // 0 if NAN, 1 if not NAN } VFSUBQD(q1, q1, q0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(v1, q1, q1); // 0 => out is NAN VBICQ(v1, v0, v1); // forget it in any input was a NAN already VSHLQ_64(v1, v1, 63); // only keep the sign bit @@ -1805,7 +1805,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETEX(v1, 0, 0); // FMIN/FMAX wll not copy the value if v0[x] is NaN // but x86 will copy if either v0[x] or v1[x] is NaN, so lets force a copy if source is NaN - if(!box64_dynarec_fastnan && v0!=v1) { + if(!BOX64ENV(dynarec_fastnan) && v0!=v1) { q0 = fpu_get_scratch(dyn, ninst); VFCMEQQD(q0, v0, v0); // 0 is NaN, 1 is not NaN, so MASK for NaN VANDQ(v0, v0, q0); @@ -1819,7 +1819,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(q0, 0, 0); GETGX(q1, 1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); v1 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -1827,7 +1827,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n VFCMEQQD(v0, v0, v0); // 0 if NAN, 1 if not NAN } VFDIVQD(q1, q1, q0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(v1, q1, q1); // 0 => out is NAN VBICQ(v1, v0, v1); // forget it in any input was a NAN already VSHLQ_64(v1, v1, 63); // only keep the sign bit @@ -1841,7 +1841,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETEX(v1, 0, 0); // FMIN/FMAX wll not copy the value if v0[x] is NaN // but x86 will copy if either v0[x] or v1[x] is NaN, so lets force a copy if source is NaN - if(!box64_dynarec_fastnan && v0!=v1) { + if(!BOX64ENV(dynarec_fastnan) && v0!=v1) { q0 = fpu_get_scratch(dyn, ninst); VFCMEQQD(q0, v0, v0); // 0 is NaN, 1 is not NaN, so MASK for NaN VANDQ(v0, v0, q0); @@ -2261,7 +2261,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETGX(q1, 1); GETEX(q0, 0, 0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); v1 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -2272,7 +2272,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n VFCMEQQD(v0, v0, v0); // 0 if NAN, 1 if not NAN } VFADDPQD(q1, q1, q0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(v1, q1, q1); // 0 => out is NAN VBICQ(v1, v0, v1); // forget it in any input was a NAN already VSHLQ_64(v1, v1, 63); // only keep the sign bit @@ -2348,7 +2348,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n LSRw_REG(x1, ed, x2); BFIw(xFlags, x1, F_CF, 1); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2375,7 +2375,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; INST_NAME("SHLD Ew, Gw, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); GETGW(x2); GETEW(x1, 0); @@ -2416,7 +2416,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n STH(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2443,7 +2443,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; INST_NAME("SHRD Ew, Gw, CL"); SETFLAGS(X_ALL, SF_SET_PENDING); // some flags are left undefined - if(box64_dynarec_safeflags>1) + if(BOX64ENV(dynarec_safeflags)>1) MAYSETFLAGS(); GETGW(x2); GETEW(x1, 0); @@ -2508,7 +2508,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -2544,7 +2544,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n STH(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2603,7 +2603,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n IFX(X_CF) { BFXILxw(xFlags, ed, u8, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2623,7 +2623,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n mask = convert_bitmask_xw(1<<u8); ORRxw_mask(ed, ed, (mask>>12)&1, mask&0x3F, (mask>>6)&0x3F); EWBACK; - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2642,7 +2642,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n } BFCxw(ed, u8, 1); EWBACK; - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2662,7 +2662,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n mask = convert_bitmask_xw(1<<u8); EORxw_mask(ed, ed, (mask>>12)&1, mask&0x3F, (mask>>6)&0x3F); EWBACK; - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -2703,7 +2703,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n STH(ed, wback, fixedaddress); SMWRITE(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -3134,7 +3134,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(v1, 0, 0); GETGX_empty(v0); - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { VFCVTZSQD(v0, v1); // convert double -> int64 SQXTN_32(v0, v0); // convert int64 -> int32 with saturation in lower part, RaZ high part } else { diff --git a/src/dynarec/arm64/dynarec_arm64_67.c b/src/dynarec/arm64/dynarec_arm64_67.c index f7e41c3e..fbe4b4c7 100644 --- a/src/dynarec/arm64/dynarec_arm64_67.c +++ b/src/dynarec/arm64/dynarec_arm64_67.c @@ -1488,7 +1488,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -1522,7 +1522,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } } IFX(X_AF | X_PF | X_ZF | X_SF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise during test MOV32w(x3, (1<<F_ZF)|(1<<F_AF)|(1<<F_PF)|(1<<F_SF)); BICw(xFlags, xFlags, x3); @@ -1567,7 +1567,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -1609,7 +1609,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } SET_DFNONE(); IFX(X_AF | X_SF | X_CF | X_PF | X_ZF | X_OF) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { MOV32w(x1, (1<<F_AF) | (1<<F_SF) | (1<<F_CF) | (1<<F_PF) | (1<<F_ZF) | (1<<F_OF)); BICw(xFlags, xFlags, x1); } @@ -1635,7 +1635,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin break; case 2: // CALL Ed INST_NAME("CALL Ed"); - PASS2IF((box64_dynarec_safeflags>1) || + PASS2IF((BOX64ENV(dynarec_safeflags)>1) || ((ninst && dyn->insts[ninst-1].x64.set_flags) || ((ninst>1) && dyn->insts[ninst-2].x64.set_flags)), 1) { @@ -1644,7 +1644,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin SETFLAGS(X_ALL, SF_SET_NODF); //Hack to put flag in "don't care" state } GETED32(0); - if(box64_dynarec_callret && box64_dynarec_bigblock>1) { + if(BOX64ENV(dynarec_callret) && BOX64ENV(dynarec_bigblock)>1) { BARRIER(BARRIER_FULL); } else { BARRIER(BARRIER_FLOAT); @@ -1652,7 +1652,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin *ok = 0; } GETIP_(addr); - if(box64_dynarec_callret) { + if(BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); // Push actual return address if(addr < (dyn->start+dyn->isize)) { @@ -1669,7 +1669,7 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin } PUSH1z(xRIP); jump_to_next(dyn, 0, ed, ninst, rex.is32bits); - if(box64_dynarec_callret && addr >= (dyn->start + dyn->isize)) { + if(BOX64ENV(dynarec_callret) && addr >= (dyn->start + dyn->isize)) { // jumps out of current dynablock... MARK; j64 = getJumpTableAddress64(addr); diff --git a/src/dynarec/arm64/dynarec_arm64_67_avx.c b/src/dynarec/arm64/dynarec_arm64_67_avx.c index 6dcf16bf..dbb32abd 100644 --- a/src/dynarec/arm64/dynarec_arm64_67_avx.c +++ b/src/dynarec/arm64/dynarec_arm64_67_avx.c @@ -132,7 +132,7 @@ uintptr_t dynarec64_67_AVX(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int } else {DEFAULT;} - if((*ok==-1) && (box64_dynarec_log>=LOG_INFO || box64_dynarec_dump || box64_dynarec_missing==1)) { + if((*ok==-1) && (BOX64ENV(dynarec_log)>=LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_missing)==1)) { dynarec_log(LOG_NONE, "Dynarec unimplemented AVX opcode size %d prefix %s map %s opcode %02X ", 128<<vex.l, avx_prefix_string(vex.p), avx_map_string(vex.m), opcode); } return addr; diff --git a/src/dynarec/arm64/dynarec_arm64_avx.c b/src/dynarec/arm64/dynarec_arm64_avx.c index 0fa47ec0..ad90c319 100644 --- a/src/dynarec/arm64/dynarec_arm64_avx.c +++ b/src/dynarec/arm64/dynarec_arm64_avx.c @@ -71,7 +71,7 @@ uintptr_t dynarec64_AVX(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ni addr = dynarec64_AVX_F3_0F38(dyn, addr, ip, ninst, vex, ok, need_epilog); else {DEFAULT;} - if((*ok==-1) && (box64_dynarec_log>=LOG_INFO || box64_dynarec_dump || box64_dynarec_missing==1)) { + if((*ok==-1) && (BOX64ENV(dynarec_log)>=LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_missing)==1)) { dynarec_log(LOG_NONE, "Dynarec unimplemented AVX opcode size %d prefix %s map %s opcode %02X ", 128<<vex.l, avx_prefix_string(vex.p), avx_map_string(vex.m), opcode); } return addr; diff --git a/src/dynarec/arm64/dynarec_arm64_avx_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_0f.c index 2cdc6e51..8c333f7d 100644 --- a/src/dynarec/arm64/dynarec_arm64_avx_0f.c +++ b/src/dynarec/arm64/dynarec_arm64_avx_0f.c @@ -490,7 +490,7 @@ uintptr_t dynarec64_AVX_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int case 0x5D: INST_NAME("VMINPS Gx, Vx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { @@ -498,7 +498,7 @@ uintptr_t dynarec64_AVX_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int // FMIN/FMAX wll not copy a NaN if either is NaN // but x86 will copy src2 if either value is NaN, so lets force a copy of Src2 (Ex) if result is NaN VFMINQS(v0, v2, v1); - if(!box64_dynarec_fastnan && (v2!=v1)) { + if(!BOX64ENV(dynarec_fastnan) && (v2!=v1)) { VFCMEQQS(q0, v0, v0); // 0 is NaN, 1 is not NaN, so MASK for NaN VBIFQ(v0, v1, q0); // copy dest where source is NaN } @@ -518,7 +518,7 @@ uintptr_t dynarec64_AVX_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int case 0x5F: INST_NAME("VMAXPS Gx, Vx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { @@ -526,7 +526,7 @@ uintptr_t dynarec64_AVX_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int // FMIN/FMAX wll not copy a NaN if either is NaN // but x86 will copy src2 if either value is NaN, so lets force a copy of Src2 (Ex) if result is NaN VFMAXQS(v0, v2, v1); - if(!box64_dynarec_fastnan && (v2!=v1)) { + if(!BOX64ENV(dynarec_fastnan) && (v2!=v1)) { VFCMEQQS(q0, v0, v0); // 0 is NaN, 1 is not NaN, so MASK for NaN VBIFQ(v0, v1, q0); // copy dest where source is NaN } @@ -568,7 +568,7 @@ uintptr_t dynarec64_AVX_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int INST_NAME("VLDMXCSR Md"); GETED(0); STRw_U12(ed, xEmu, offsetof(x64emu_t, mxcsr)); - if(box64_sse_flushto0) { + if(BOX64ENV(sse_flushto0)) { MRS_fpcr(x1); // get fpscr LSRw_IMM(x3, ed, 15); // get FZ bit BFIw(x1, x3, 24, 1); // inject FZ bit diff --git a/src/dynarec/arm64/dynarec_arm64_avx_0f38.c b/src/dynarec/arm64/dynarec_arm64_avx_0f38.c index d07f6d6f..da34d5f7 100644 --- a/src/dynarec/arm64/dynarec_arm64_avx_0f38.c +++ b/src/dynarec/arm64/dynarec_arm64_avx_0f38.c @@ -112,7 +112,7 @@ uintptr_t dynarec64_AVX_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, i BFIw(xFlags, x3, F_SF, 1); } IFX(X_OF) BFCw(xFlags, F_OF, 1); - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_AF) BFCw(xFlags, F_AF, 1); IFX(X_PF) BFCw(xFlags, F_PF, 1); } diff --git a/src/dynarec/arm64/dynarec_arm64_avx_66_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_66_0f.c index b9ae6152..570098e4 100644 --- a/src/dynarec/arm64/dynarec_arm64_avx_66_0f.c +++ b/src/dynarec/arm64/dynarec_arm64_avx_66_0f.c @@ -295,13 +295,13 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, case 0x51: INST_NAME("VSQRTPD Gx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { d0 = fpu_get_scratch(dyn, ninst); d1 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { if(!l) { GETGX_empty_EX(v0, v1, 0); } else { GETGY_empty_EY(v0, v1); } - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { // check if any input value was NAN VFCMEQQD(d0, v1, v1); // 0 if NAN, 1 if not NAN VFSQRTQD(v0, v1); @@ -359,19 +359,19 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, case 0x58: INST_NAME("VADDPD Gx, Vx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); q1 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); } - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { // check if any input value was NAN VFMAXQD(q0, v2, v1); // propagate NAN VFCMEQQD(q0, q0, q0); // 0 if NAN, 1 if not NAN } VFADDQD(v0, v2, v1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(q1, v0, v0); // 0 => out is NAN VBICQ(q1, q0, q1); // forget it in any input was a NAN already VSHLQ_64(q1, q1, 63); // only keep the sign bit @@ -383,19 +383,19 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, case 0x59: INST_NAME("VMULPD Gx, Vx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); q1 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); } - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { // check if any input value was NAN VFMAXQD(q0, v2, v1); // propagate NAN VFCMEQQD(q0, q0, q0); // 0 if NAN, 1 if not NAN } VFMULQD(v0, v2, v1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(q1, v0, v0); // 0 => out is NAN VBICQ(q1, q0, q1); // forget it in any input was a NAN already VSHLQ_64(q1, q1, 63); // only keep the sign bit @@ -409,7 +409,7 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, nextop = F8; GETEX(v1, 0, 0); GETGX_empty(v0); - if(box64_dynarec_fastround==2) { + if(BOX64ENV(dynarec_fastround)==2) { FCVTXN(v0, v1); } else { u8 = sse_setround(dyn, ninst, x1, x2, x4); @@ -418,7 +418,7 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, } if(vex.l) { GETEY(v1); - if(box64_dynarec_fastround==2) { + if(BOX64ENV(dynarec_fastround)==2) { FCVTXN2(v0, v1); } else { FCVTN2(v0, v1); @@ -431,14 +431,14 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, INST_NAME("VCVTPS2DQ Gx, Ex"); nextop = F8; u8 = sse_setround(dyn, ninst, x1, x2, x6); - if(!box64_dynarec_fastround && !arm64_frintts) { + if(!BOX64ENV(dynarec_fastround) && !arm64_frintts) { d0 = fpu_get_scratch(dyn, ninst); d1 = fpu_get_scratch(dyn, ninst); MOVI_32_lsl(d1, 0x80, 3); } for(int l=0; l<1+vex.l; ++l) { if(!l) { GETEX(v1, 0, 0); GETGX_empty(v0); } else { GETGY_empty_EY(v0, v1); } - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { VFRINTISQ(v0, v1); VFCVTZSQS(v0, v0); } else { @@ -467,19 +467,19 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, case 0x5C: INST_NAME("VSUBPD Gx, Vx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); q1 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); } - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { // check if any input value was NAN VFMAXQD(q0, v1, v2); // propagate NAN VFCMEQQD(q0, q0, q0); // 0 if NAN, 1 if not NAN } VFSUBQD(v0, v2, v1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(q1, v0, v0); // 0 => out is NAN VBICQ(q1, q0, q1); // forget it in any input was a NAN already VSHLQ_64(q1, q1, 63); // only keep the sign bit @@ -491,14 +491,14 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, case 0x5D: INST_NAME("VMINPD Gx, Vx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) + if(!BOX64ENV(dynarec_fastnan)) q0 = fpu_get_scratch(dyn, ninst); for(int l=0; l<1+vex.l; ++l) { if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); } // FMIN/FMAX wll not copy a NaN if either is NaN // but x86 will copy src2 if either value is NaN, so lets force a copy of Src2 (Ex) if result is NaN VFMINQD(v0, v2, v1); - if(!box64_dynarec_fastnan && (v2!=v1)) { + if(!BOX64ENV(dynarec_fastnan) && (v2!=v1)) { VFCMEQQD(q0, v0, v0); // 0 is NaN, 1 is not NaN, so MASK for NaN VBIFQ(v0, v1, q0); // copy dest where source is NaN } @@ -508,19 +508,19 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, case 0x5E: INST_NAME("DIVPD Gx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); q1 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); } - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { // check if any input value was NAN VFMAXQD(q0, v2, v1); // propagate NAN VFCMEQQD(q0, q0, q0); // 0 if NAN, 1 if not NAN } VFDIVQD(v0, v2, v1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(q1, v0, v0); // 0 => out is NAN VBICQ(q1, q0, q1); // forget it in any input was a NAN already VSHLQ_64(q1, q1, 63); // only keep the sign bit @@ -532,7 +532,7 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, case 0x5F: INST_NAME("VMAXPD Gx, Vx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { @@ -540,7 +540,7 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, // FMIN/FMAX wll not copy a NaN if either is NaN // but x86 will copy src2 if either value is NaN, so lets force a copy of Src2 (Ex) if result is NaN VFMAXQD(v0, v2, v1); - if(!box64_dynarec_fastnan && (v2!=v1)) { + if(!BOX64ENV(dynarec_fastnan) && (v2!=v1)) { VFCMEQQD(q0, v0, v0); // 0 is NaN, 1 is not NaN, so MASK for NaN VBIFQ(v0, v1, q0); // copy dest where source is NaN } @@ -1125,13 +1125,13 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, case 0x7C: INST_NAME("VHADDPD Gx, Vx, Ex"); nextop = F8; - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); q1 = fpu_get_scratch(dyn, ninst); } for(int l=0; l<1+vex.l; ++l) { if(!l) { GETGX_empty_VXEX(v0, v2, v1, 0); } else { GETGY_empty_VYEY(v0, v2, v1); } - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { // check if any input value was NAN // but need to mix low/high part VTRNQ1_64(q0, v2, v1); @@ -1140,7 +1140,7 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, VFCMEQQD(q0, q0, q0); // 0 if NAN, 1 if not NAN } VFADDPQD(v0, v2, v1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { VFCMEQQD(q1, v0, v0); // 0 => out is NAN VBICQ(q1, q0, q1); // forget it in any input was a NAN already VSHLQ_64(q1, q1, 63); // only keep the sign bit @@ -1616,11 +1616,11 @@ uintptr_t dynarec64_AVX_66_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, GETEX_Y(v1, 0, 0); GETGX_empty(v0); } else { - if(box64_dynarec_fastround) + if(BOX64ENV(dynarec_fastround)) d0 = fpu_get_scratch(dyn, ninst); GETEY(v1); } - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { VFCVTZSQD(l?d0:v0, v1); // convert double -> int64 if(!l) SQXTN_32(v0, v0); // convert int64 -> int32 with saturation in lower part, RaZ high part diff --git a/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c index e8858014..8ee698ab 100644 --- a/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c +++ b/src/dynarec/arm64/dynarec_arm64_avx_f2_0f.c @@ -140,13 +140,13 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, nextop = F8; GETGD; GETEXSD(q0, 0, 0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); BFCw(x5, FPSR_IOC, 1); // reset IOC bit MSR_fpsr(x5); } FCVTZSxwD(gd, q0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); // get back FPSR to check the IOC bit TBZ_NEXT(x5, FPSR_IOC); if(rex.w) { @@ -161,7 +161,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, nextop = F8; GETGD; GETEXSD(q0, 0, 0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); BFCw(x5, FPSR_IOC, 1); // reset IOC bit MSR_fpsr(x5); @@ -171,7 +171,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, FRINTID(d1, q0); x87_restoreround(dyn, ninst, u8); FCVTZSxwD(gd, d1); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); // get back FPSR to check the IOC bit TBZ_NEXT(x5, FPSR_IOC); if(rex.w) { @@ -188,14 +188,14 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, d1 = fpu_get_scratch(dyn, ninst); GETEXSD(v1, 0, 0); GETGX_empty_VX(v0, v2); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); q1 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN FCMEQD(q0, v1, v1); // 0 if NAN, 1 if not NAN } FSQRTD(d1, v1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { FCMEQD(q1, d1, d1); // 0 => out is NAN VBIC(q1, q0, q1); // forget it in any input was a NAN already VSHLQ_64(q1, q1, 63); // only keep the sign bit @@ -240,7 +240,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, d1 = fpu_get_scratch(dyn, ninst); GETEXSD(v1, 0, 0); GETGX_empty_VX(v0, v2); - if(box64_dynarec_fastround==2) { + if(BOX64ENV(dynarec_fastround)==2) { FCVT_S_D(d1, v1); } else { u8 = sse_setround(dyn, ninst, x1, x2, x3); @@ -288,7 +288,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, d1 = fpu_get_scratch(dyn, ninst); GETEXSD(v1, 0, 0); GETGX_empty_VX(v0, v2); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { q0 = fpu_get_scratch(dyn, ninst); q1 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -296,7 +296,7 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, FCMEQD(q0, q0, q0); // 0 if NAN, 1 if not NAN } FDIVD(d1, v2, v1); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { FCMEQD(q1, d1, d1); // 0 => out is NAN VBIC(q1, q0, q1); // forget it in any input was a NAN already VSHLQ_64(q1, q1, 63); // only keep the sign bit @@ -418,12 +418,12 @@ uintptr_t dynarec64_AVX_F2_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, if(!l) { GETEX_Y(v1, 0, 0); GETGX_empty(v0); - if(!box64_dynarec_fastround || vex.l) + if(!BOX64ENV(dynarec_fastround) || vex.l) d0 = fpu_get_scratch(dyn, ninst); } else { GETEY(v1); } - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { VFRINTIDQ(l?d0:v0, v1); VFCVTNSQD(l?d0:v0, l?d0:v0); // convert double -> int64 if(!l) diff --git a/src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c b/src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c index cb7d7cdb..eed9fb59 100644 --- a/src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c +++ b/src/dynarec/arm64/dynarec_arm64_avx_f3_0f.c @@ -140,13 +140,13 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, nextop = F8; GETGD; GETEXSS(d0, 0, 0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); BFCw(x5, FPSR_IOC, 1); // reset IOC bit MSR_fpsr(x5); } FCVTZSxwS(gd, d0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); // get back FPSR to check the IOC bit TBZ_NEXT(x5, FPSR_IOC); if(rex.w) { @@ -161,7 +161,7 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, nextop = F8; GETGD; GETEXSS(q0, 0, 0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); BFCw(x5, FPSR_IOC, 1); // reset IOC bit MSR_fpsr(x5); @@ -171,7 +171,7 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, FRINTIS(d1, q0); x87_restoreround(dyn, ninst, u8); FCVTZSxwS(gd, d1); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); // get back FPSR to check the IOC bit TBZ_NEXT(x5, FPSR_IOC); if(rex.w) { @@ -286,13 +286,13 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, INST_NAME("VCVTTPS2DQ Gx, Ex"); nextop = F8; d0 = fpu_get_scratch(dyn, ninst); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); ORRw_mask(x4, xZR, 1, 0); //0x80000000 } for(int l=0; l<1+vex.l; ++l) { if(!l) { GETGX_empty_EX(v0, v1, 0); } else { GETGY_empty_EY(v0, v1); } - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { VFCVTZSQS(v0, v1); } else { BFCw(x5, FPSR_IOC, 1); // reset IOC bit diff --git a/src/dynarec/arm64/dynarec_arm64_d8.c b/src/dynarec/arm64/dynarec_arm64_d8.c index 826cad7d..129fe86e 100644 --- a/src/dynarec/arm64/dynarec_arm64_d8.c +++ b/src/dynarec/arm64/dynarec_arm64_d8.c @@ -52,14 +52,14 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FADD ST0, STx"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v2 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FADDS(v1, v1, v2); } else { FADDD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xC8: @@ -73,14 +73,14 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FMUL ST0, STx"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v2 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FMULS(v1, v1, v2); } else { FMULD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xD0: @@ -131,14 +131,14 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FSUB ST0, STx"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v2 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FSUBS(v1, v1, v2); } else { FSUBD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xE8: @@ -152,14 +152,14 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FSUBR ST0, STx"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v2 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FSUBS(v1, v2, v1); } else { FSUBD(v1, v2, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xF0: @@ -173,14 +173,14 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FDIV ST0, STx"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v2 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FDIVS(v1, v1, v2); } else { FDIVD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xF8: @@ -194,14 +194,14 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FDIVR ST0, STx"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v2 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FDIVS(v1, v2, v1); } else { FDIVD(v1, v2, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; default: @@ -215,7 +215,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin s0 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); VLD32(s0, ed, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); if(ST_IS_F(0)) { FADDS(v1, v1, s0); @@ -223,7 +223,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin FCVT_D_S(s0, s0); FADDD(v1, v1, s0); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 1: @@ -232,7 +232,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin s0 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); VLD32(s0, ed, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); if(ST_IS_F(0)) { FMULS(v1, v1, s0); @@ -240,7 +240,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin FCVT_D_S(s0, s0); FMULD(v1, v1, s0); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 2: @@ -278,7 +278,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin s0 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); VLD32(s0, ed, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); if(ST_IS_F(0)) { FSUBS(v1, v1, s0); @@ -286,7 +286,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin FCVT_D_S(s0, s0); FSUBD(v1, v1, s0); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 5: @@ -295,7 +295,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin s0 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); VLD32(s0, ed, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); if(ST_IS_F(0)) { FSUBS(v1, s0, v1); @@ -303,7 +303,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin FCVT_D_S(s0, s0); FSUBD(v1, s0, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 6: @@ -312,7 +312,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin s0 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); VLD32(s0, ed, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); if(ST_IS_F(0)) { FDIVS(v1, v1, s0); @@ -320,7 +320,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin FCVT_D_S(s0, s0); FDIVD(v1, v1, s0); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 7: @@ -329,7 +329,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin s0 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); VLD32(s0, ed, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); if(ST_IS_F(0)) { FDIVS(v1, s0, v1); @@ -337,7 +337,7 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin FCVT_D_S(s0, s0); FDIVD(v1, s0, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; default: diff --git a/src/dynarec/arm64/dynarec_arm64_d9.c b/src/dynarec/arm64/dynarec_arm64_d9.c index c1961a01..0fc54e18 100644 --- a/src/dynarec/arm64/dynarec_arm64_d9.c +++ b/src/dynarec/arm64/dynarec_arm64_d9.c @@ -291,11 +291,11 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MESSAGE(LOG_DUMP, "Need Optimization\n"); i1 = x87_stackcount(dyn, ninst, x1); x87_forget(dyn, ninst, x1, x2, 0); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); - CALL_(native_ftan, -1, box64_dynarec_fastround ? 0 : u8); + CALL_(native_ftan, -1, BOX64ENV(dynarec_fastround) ? 0 : u8); x87_unstackcount(dyn, ninst, x1, i1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); if(PK(0)==0xdd && PK(1)==0xd8) { MESSAGE(LOG_DUMP, "Optimized next DD D8 fstp st0, st0, not emitting 1\n"); @@ -316,10 +316,10 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin i1 = x87_stackcount(dyn, ninst, x1); x87_forget(dyn, ninst, x1, x2, 0); x87_forget(dyn, ninst, x1, x2, 1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); - CALL_(native_fpatan, -1, box64_dynarec_fastround ? 0 : u8); - if(!box64_dynarec_fastround) + CALL_(native_fpatan, -1, BOX64ENV(dynarec_fastround) ? 0 : u8); + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); x87_unstackcount(dyn, ninst, x1, i1); X87_POP_OR_FAIL(dyn, ninst, x3); @@ -426,14 +426,14 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin case 0xFA: INST_NAME("FSQRT"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, X87_ST0); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FSQRTS(v1, v1); } else { FSQRTD(v1, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xFB: @@ -442,10 +442,10 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin X87_PUSH_EMPTY_OR_FAIL(dyn, ninst, 0); i1 = x87_stackcount(dyn, ninst, x1); x87_forget(dyn, ninst, x1, x2, 1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); - CALL_(native_fsincos, -1, box64_dynarec_fastround ? 0 : u8); - if(!box64_dynarec_fastround) + CALL_(native_fsincos, -1, BOX64ENV(dynarec_fastround) ? 0 : u8); + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); x87_unstackcount(dyn, ninst, x1, i1); break; @@ -473,10 +473,10 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin i1 = x87_stackcount(dyn, ninst, x1); x87_forget(dyn, ninst, x1, x2, 0); x87_forget(dyn, ninst, x1, x2, 1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); - CALL_(native_fscale, -1, box64_dynarec_fastround ? 0 : u8); - if(!box64_dynarec_fastround) + CALL_(native_fscale, -1, BOX64ENV(dynarec_fastround) ? 0 : u8); + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); x87_unstackcount(dyn, ninst, x1, i1); break; @@ -485,10 +485,10 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MESSAGE(LOG_DUMP, "Need Optimization\n"); i1 = x87_stackcount(dyn, ninst, x1); x87_forget(dyn, ninst, x1, x2, 0); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); - CALL_(native_fsin, -1, box64_dynarec_fastround ? 0 : u8); - if(!box64_dynarec_fastround) + CALL_(native_fsin, -1, BOX64ENV(dynarec_fastround) ? 0 : u8); + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); x87_unstackcount(dyn, ninst, x1, i1); break; @@ -497,10 +497,10 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MESSAGE(LOG_DUMP, "Need Optimization\n"); i1 = x87_stackcount(dyn, ninst, x1); x87_forget(dyn, ninst, x1, x2, 0); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); - CALL_(native_fcos, -1, box64_dynarec_fastround ? 0 : u8); - if(!box64_dynarec_fastround) + CALL_(native_fcos, -1, BOX64ENV(dynarec_fastround) ? 0 : u8); + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); x87_unstackcount(dyn, ninst, x1, i1); break; @@ -511,7 +511,7 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin switch((nextop>>3)&7) { case 0: INST_NAME("FLD ST0, float[ED]"); - X87_PUSH_OR_FAIL(v1, dyn, ninst, x1, box64_dynarec_x87double?NEON_CACHE_ST_D:NEON_CACHE_ST_F); + X87_PUSH_OR_FAIL(v1, dyn, ninst, x1, BOX64ENV(dynarec_x87double)?NEON_CACHE_ST_D:NEON_CACHE_ST_F); addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); VLD32(v1, ed, fixedaddress); if(!ST_IS_F(0)) { @@ -525,10 +525,10 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin s0 = v1; else { s0 = fpu_get_scratch(dyn, ninst); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); FCVT_S_D(s0, v1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); } addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); @@ -539,10 +539,10 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin v1 = x87_get_st(dyn, ninst, x1, x2, 0, NEON_CACHE_ST_F); addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0); if(!ST_IS_F(0)) { - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); FCVT_S_D(v1, v1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); } VST32(v1, ed, fixedaddress); diff --git a/src/dynarec/arm64/dynarec_arm64_da.c b/src/dynarec/arm64/dynarec_arm64_da.c index 6e4bb528..f6668985 100644 --- a/src/dynarec/arm64/dynarec_arm64_da.c +++ b/src/dynarec/arm64/dynarec_arm64_da.c @@ -149,10 +149,10 @@ uintptr_t dynarec64_DA(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin VLD32(v2, ed, fixedaddress); SXTL_32(v2, v2); // i32 -> i64 SCVTFDD(v2, v2); // i64 -> double - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); FADDD(v1, v1, v2); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 1: @@ -163,10 +163,10 @@ uintptr_t dynarec64_DA(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin VLD32(v2, ed, fixedaddress); SXTL_32(v2, v2); // i32 -> i64 SCVTFDD(v2, v2); // i64 -> double - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); FMULD(v1, v1, v2); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 2: @@ -200,10 +200,10 @@ uintptr_t dynarec64_DA(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin VLD32(v2, ed, fixedaddress); SXTL_32(v2, v2); // i32 -> i64 SCVTFDD(v2, v2); // i64 -> double - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); FSUBD(v1, v1, v2); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 5: @@ -214,10 +214,10 @@ uintptr_t dynarec64_DA(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin VLD32(v2, ed, fixedaddress); SXTL_32(v2, v2); // i32 -> i64 SCVTFDD(v2, v2); // i64 -> double - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); FSUBD(v1, v2, v1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 6: @@ -228,10 +228,10 @@ uintptr_t dynarec64_DA(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin VLD32(v2, ed, fixedaddress); SXTL_32(v2, v2); // i32 -> i64 SCVTFDD(v2, v2); // i64 -> double - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); FDIVD(v1, v1, v2); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 7: @@ -242,10 +242,10 @@ uintptr_t dynarec64_DA(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin VLD32(v2, ed, fixedaddress); SXTL_32(v2, v2); // i32 -> i64 SCVTFDD(v2, v2); // i64 -> double - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x5, x4); FDIVD(v1, v2, v1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; } diff --git a/src/dynarec/arm64/dynarec_arm64_db.c b/src/dynarec/arm64/dynarec_arm64_db.c index 8e5d55b9..1a655889 100644 --- a/src/dynarec/arm64/dynarec_arm64_db.c +++ b/src/dynarec/arm64/dynarec_arm64_db.c @@ -300,7 +300,7 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin STRx_U12(x5, ed, 0); STRH_U12(x6, ed, 8); } else { - if(box64_x87_no80bits) { + if(BOX64ENV(x87_no80bits)) { X87_PUSH_OR_FAIL(v1, dyn, ninst, x1, NEON_CACHE_ST_D); VLDR64_U12(v1, ed, fixedaddress); } else { @@ -318,12 +318,12 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin break; case 7: INST_NAME("FSTP tbyte"); - if(box64_x87_no80bits) { + if(BOX64ENV(x87_no80bits)) { v1 = x87_get_st(dyn, ninst, x1, x2, 0, NEON_CACHE_ST_D); addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0); VST64(v1, wback, fixedaddress); } else { - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { x87_forget(dyn, ninst, x1, x3, 0); addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0); if(ed!=x1) {MOVx_REG(x1, ed);} diff --git a/src/dynarec/arm64/dynarec_arm64_dc.c b/src/dynarec/arm64/dynarec_arm64_dc.c index 2fbac9d8..1e068277 100644 --- a/src/dynarec/arm64/dynarec_arm64_dc.c +++ b/src/dynarec/arm64/dynarec_arm64_dc.c @@ -50,14 +50,14 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FADD STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FADDS(v1, v1, v2); } else { FADDD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xC8: @@ -71,14 +71,14 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FMUL STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FMULS(v1, v1, v2); } else { FMULD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xD0: @@ -129,14 +129,14 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FSUBR STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FSUBS(v1, v2, v1); } else { FSUBD(v1, v2, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xE8: @@ -150,14 +150,14 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FSUB STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FSUBS(v1, v1, v2); } else { FSUBD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xF0: @@ -171,14 +171,14 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FDIVR STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FDIVS(v1, v2, v1); } else { FDIVD(v1, v2, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 0xF8: @@ -192,14 +192,14 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FDIV STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FDIVS(v1, v1, v2); } else { FDIVD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; default: @@ -213,10 +213,10 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin v2 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0); VLD64(v2, wback, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); FADDD(v1, v1, v2); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 1: @@ -225,10 +225,10 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin v2 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0); VLD64(v2, wback, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); FMULD(v1, v1, v2); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 2: @@ -256,10 +256,10 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin v2 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0); VLD64(v2, wback, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); FSUBD(v1, v1, v2); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 5: @@ -268,10 +268,10 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin v2 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0); VLD64(v2, wback, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); FSUBD(v1, v2, v1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 6: @@ -280,10 +280,10 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin v2 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0); VLD64(v2, wback, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); FDIVD(v1, v1, v2); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; case 7: @@ -292,10 +292,10 @@ uintptr_t dynarec64_DC(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin v2 = fpu_get_scratch(dyn, ninst); addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0); VLD64(v2, wback, fixedaddress); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); FDIVD(v1, v2, v1); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); break; } diff --git a/src/dynarec/arm64/dynarec_arm64_de.c b/src/dynarec/arm64/dynarec_arm64_de.c index 8dcc16c2..9c4dcc81 100644 --- a/src/dynarec/arm64/dynarec_arm64_de.c +++ b/src/dynarec/arm64/dynarec_arm64_de.c @@ -50,14 +50,14 @@ uintptr_t dynarec64_DE(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FADDP STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FADDS(v1, v1, v2); } else { FADDD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); X87_POP_OR_FAIL(dyn, ninst, x3); break; @@ -72,14 +72,14 @@ uintptr_t dynarec64_DE(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FMULP STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FMULS(v1, v1, v2); } else { FMULD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); X87_POP_OR_FAIL(dyn, ninst, x3); break; @@ -126,14 +126,14 @@ uintptr_t dynarec64_DE(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FSUBRP STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FSUBS(v1, v2, v1); } else { FSUBD(v1, v2, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); X87_POP_OR_FAIL(dyn, ninst, x3); break; @@ -148,14 +148,14 @@ uintptr_t dynarec64_DE(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FSUBP STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FSUBS(v1, v1, v2); } else { FSUBD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); X87_POP_OR_FAIL(dyn, ninst, x3); break; @@ -170,14 +170,14 @@ uintptr_t dynarec64_DE(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FDIVRP STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FDIVS(v1, v2, v1); } else { FDIVD(v1, v2, v1); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); X87_POP_OR_FAIL(dyn, ninst, x3); break; @@ -192,14 +192,14 @@ uintptr_t dynarec64_DE(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin INST_NAME("FDIVP STx, ST0"); v2 = x87_get_st(dyn, ninst, x1, x2, 0, X87_COMBINE(0, nextop&7)); v1 = x87_get_st(dyn, ninst, x1, x2, nextop&7, X87_COMBINE(0, nextop&7)); - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) u8 = x87_setround(dyn, ninst, x1, x2, x4); if(ST_IS_F(0)) { FDIVS(v1, v1, v2); } else { FDIVD(v1, v1, v2); } - if(!box64_dynarec_fastround) + if(!BOX64ENV(dynarec_fastround)) x87_restoreround(dyn, ninst, u8); X87_POP_OR_FAIL(dyn, ninst, x3); break; diff --git a/src/dynarec/arm64/dynarec_arm64_emit_math.c b/src/dynarec/arm64/dynarec_arm64_emit_math.c index 34687f8d..a20af739 100644 --- a/src/dynarec/arm64/dynarec_arm64_emit_math.c +++ b/src/dynarec/arm64/dynarec_arm64_emit_math.c @@ -83,7 +83,7 @@ void emit_add32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 void emit_add32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5) { MAYUSE(s5); - if((s1==xRSP) && (box64_dynarec_safeflags<2) && (!dyn->insts || (dyn->insts[ninst].x64.gen_flags==X_PEND) || (!box64_dynarec_df && (dyn->insts[ninst].x64.gen_flags==X_ALL)))) + if((s1==xRSP) && (BOX64ENV(dynarec_safeflags)<2) && (!dyn->insts || (dyn->insts[ninst].x64.gen_flags==X_PEND) || (!BOX64ENV(dynarec_df) && (dyn->insts[ninst].x64.gen_flags==X_ALL)))) { // special case when doing math on ESP and only PEND is needed: ignoring it! if(c>=0 && c<0x1000) { @@ -224,7 +224,7 @@ void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5) { MAYUSE(s5); - if(s1==xRSP && (box64_dynarec_safeflags<2) && (!dyn->insts || (dyn->insts[ninst].x64.gen_flags==X_PEND) || (!box64_dynarec_df && (dyn->insts[ninst].x64.gen_flags==X_ALL)))) + if(s1==xRSP && (BOX64ENV(dynarec_safeflags)<2) && (!dyn->insts || (dyn->insts[ninst].x64.gen_flags==X_PEND) || (!BOX64ENV(dynarec_df) && (dyn->insts[ninst].x64.gen_flags==X_ALL)))) { // special case when doing math on RSP and only PEND is needed: ignoring it! if(c>=0 && c<0x1000) { diff --git a/src/dynarec/arm64/dynarec_arm64_emit_shift.c b/src/dynarec/arm64/dynarec_arm64_emit_shift.c index fa779a29..058d4440 100644 --- a/src/dynarec/arm64/dynarec_arm64_emit_shift.c +++ b/src/dynarec/arm64/dynarec_arm64_emit_shift.c @@ -68,7 +68,7 @@ void emit_shl32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 BFIw(xFlags, s4, F_ZF, 1); } } - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -124,7 +124,7 @@ void emit_shl32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, uint32_t c, i BFCw(xFlags, F_OF, 1); } } - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -183,7 +183,7 @@ void emit_shr32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 BFIx(xFlags, s4, F_SF, 1); } } - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -237,7 +237,7 @@ void emit_shr32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, uint32_t c, i BFCw(xFlags, F_SF, 1); } } - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -291,7 +291,7 @@ void emit_sar32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3 BFIx(xFlags, s4, F_SF, 1); } } - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -338,10 +338,10 @@ void emit_sar32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, uint32_t c, i } } IFX(X_OF) - if(c==1 || box64_dynarec_test) { + if (c == 1 || BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); - } - if(box64_dynarec_test) + } + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -382,7 +382,7 @@ void emit_shl8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) BFIw(xFlags, s4, F_OF, 1); } COMP_ZFSF(s1, 8) - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -423,9 +423,10 @@ void emit_shl8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s BFCw(xFlags, F_OF, 1); } } - if(box64_dynarec_test) IFX(X_AF) { - BFCw(xFlags, F_AF, 1); - } + if (BOX64ENV(dynarec_test)) + IFX (X_AF) { + BFCw(xFlags, F_AF, 1); + } IFX(X_PF) { emit_pf(dyn, ninst, s1, s4); } @@ -448,9 +449,10 @@ void emit_shl8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s IFX(X_SF) { IFNATIVE(NF_SF) {} else BFCw(xFlags, F_SF, 1); } - if(box64_dynarec_test) IFX(X_AF) { - BFCw(xFlags, F_AF, 1); - } + if (BOX64ENV(dynarec_test)) + IFX (X_AF) { + BFCw(xFlags, F_AF, 1); + } IFX(X_ZF) { IFNATIVE(NF_EQ) {} else { ORRw_mask(xFlags, xFlags, 26, 0); //1<<F_ZF @@ -492,7 +494,7 @@ void emit_shr8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); } COMP_ZFSF(s1, 8) - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -525,7 +527,7 @@ void emit_shr8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s if(c==1) { LSRw(s4, s1, 7); BFIw(xFlags, s4, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -534,7 +536,7 @@ void emit_shr8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s STRB_U12(s1, xEmu, offsetof(x64emu_t, res)); } COMP_ZFSF(s1, 8) - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -568,7 +570,7 @@ void emit_sar8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) //CBNZw(s4, 4+4); BFCw(xFlags, F_OF, 1); } - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -599,9 +601,9 @@ void emit_sar8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s if(c<8) { COMP_ZFSF(s1, 8) IFX(X_OF) - if((c==1) || box64_dynarec_test) { + if ((c == 1) || BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); - } + } IFX(X_PF) { emit_pf(dyn, ninst, s1, s4); } @@ -619,7 +621,7 @@ void emit_sar8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s } } } - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -653,7 +655,7 @@ void emit_shl16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) BFIw(xFlags, s4, F_OF, 1); } COMP_ZFSF(s1, 16) - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -690,13 +692,14 @@ void emit_shl16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int IFX2(X_SF, && !arm64_flagm) {} else {LSRw(s3, s1, 15);} // use COMP_ZFSF operation EORw_REG(s4, s3, xFlags); // CF is set if OF is asked BFIw(xFlags, s4, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } - if(box64_dynarec_test) IFX(X_AF) { - BFCw(xFlags, F_AF, 1); - } + if (BOX64ENV(dynarec_test)) + IFX (X_AF) { + BFCw(xFlags, F_AF, 1); + } IFX(X_PF) { if(c>7) { // the 0xff area will be 0, so PF is known @@ -723,9 +726,10 @@ void emit_shl16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int IFX(X_SF) { IFNATIVE(NF_SF) {} else BFCw(xFlags, F_SF, 1); } - if(box64_dynarec_test) IFX(X_AF) { - BFCw(xFlags, F_AF, 1); - } + if (BOX64ENV(dynarec_test)) + IFX (X_AF) { + BFCw(xFlags, F_AF, 1); + } IFX(X_ZF) { IFNATIVE(NF_EQ) {} else { ORRw_mask(xFlags, xFlags, 26, 0); //1<<F_ZF @@ -763,7 +767,7 @@ void emit_shr16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } COMP_ZFSF(s1, 16) - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -797,7 +801,7 @@ void emit_shr16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int if(c==1) { LSRw(s4, s1, 15); BFIw(xFlags, s4, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -806,7 +810,7 @@ void emit_shr16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } COMP_ZFSF(s1, 16) - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -840,7 +844,7 @@ void emit_sar16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4) BFCw(xFlags, F_OF, 1); } COMP_ZFSF(s1, 16) - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -871,10 +875,10 @@ void emit_sar16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int } COMP_ZFSF(s1, 16) IFX(X_OF) - if((c==1) || box64_dynarec_test) { + if ((c == 1) || BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); - } - if(box64_dynarec_test) + } + if (BOX64ENV(dynarec_test)) IFX(X_AF) { BFCw(xFlags, F_AF, 1); } @@ -899,7 +903,7 @@ void emit_rol32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, uint32_t c, i if(c==1) { EORxw_REG_LSR(s3, s1, s1, rex.w?63:31); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -923,7 +927,7 @@ void emit_ror32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, uint32_t c, i LSRxw(s3, s1, rex.w?62:30); EORxw_REG_LSR(s3, s3, s3, 1); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -950,7 +954,7 @@ void emit_rol8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s if(c==1) { EORw_REG_LSR(s3, s1, s1, 7); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -977,7 +981,7 @@ void emit_ror8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s LSRw(s3, s1, 6); EORw_REG_LSR(s3, s3, s3, 1); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -1004,7 +1008,7 @@ void emit_rol16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int if(c==1) { EORw_REG_LSR(s3, s1, s1, 15); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -1031,7 +1035,7 @@ void emit_ror16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int LSRw(s3, s1, 14); EORw_REG_LSR(s3, s3, s3, 1); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -1065,7 +1069,7 @@ void emit_rcl8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s if(c==1) { EORw_REG_LSR(s3, s3, s1, 7); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -1086,7 +1090,7 @@ void emit_rcr8c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int s if(c==1) { EORw_REG_LSR(s3, xFlags, s1, 7); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -1127,7 +1131,7 @@ void emit_rcl16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int if(c==1) { EORw_REG_LSR(s3, s3, s1, 15); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -1148,7 +1152,7 @@ void emit_rcr16c(dynarec_arm_t* dyn, int ninst, int s1, uint32_t c, int s3, int if(c==1) { EORw_REG_LSR(s3, xFlags, s1, 15); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } MOVw_REG(s3, wFlags); @@ -1189,7 +1193,7 @@ void emit_rcl32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, uint32_t c, i if(c==1) { EORxw_REG_LSR(s3, s3, s1, rex.w?63:31); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -1207,7 +1211,7 @@ void emit_rcr32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, uint32_t c, i if(c==1) { EORxw_REG_LSR(s3, xFlags, s1, rex.w?63:31); BFIw(xFlags, s3, F_OF, 1); - } else if(box64_dynarec_test) { + } else if (BOX64ENV(dynarec_test)) { BFCw(xFlags, F_OF, 1); } } @@ -1278,7 +1282,7 @@ void emit_shrd32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, uint BFIx(xFlags, s4, F_SF, 1); } } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_AF) {BFCw(xFlags, F_AF, 1);} IFX2(X_OF, && (c>1)) {BFCw(xFlags, F_OF, 1);} } @@ -1332,7 +1336,7 @@ void emit_shld32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, uint BFIx(xFlags, s4, F_SF, 1); } } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_AF) {BFCw(xFlags, F_AF, 1);} IFX2(X_OF, && (c>1)) {BFCw(xFlags, F_OF, 1);} } @@ -1373,13 +1377,13 @@ void emit_shrd32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_OF) { - if(box64_dynarec_test) { - CMPSw_U12(s5, 1); + if (BOX64ENV(dynarec_test)) { + CMPSw_U12(s5, 1); EORxw_REG_LSR(s3, s4, s1, rex.w?63:31); // OF is set if sign changed CSELw(s3, s3, xZR, cEQ); BFIw(xFlags, s3, F_OF, 1); } else { - //CMPSw_U12(s5, 1); + // CMPSw_U12(s5, 1); EORxw_REG_LSR(s3, s4, s1, rex.w?63:31); // OF is set if sign changed // CSELw(s3, s3, xZR, cEQ); BFIw(xFlags, s3, F_OF, 1); @@ -1401,7 +1405,7 @@ void emit_shrd32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s BFIx(xFlags, s4, F_SF, 1); } } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_AF) {BFCw(xFlags, F_AF, 1);} } IFX(X_PF) { @@ -1439,13 +1443,13 @@ void emit_shld32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s STRxw_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_OF) { - if(box64_dynarec_test) { - CMPSw_U12(s5, 1); + if (BOX64ENV(dynarec_test)) { + CMPSw_U12(s5, 1); EORx_REG_LSR(s3, s4, s1, rex.w?63:31); // OF is set if sign changed CSELw(s3, s3, xZR, cEQ); BFIw(xFlags, s3, F_OF, 1); } else { - //CMPSw_U12(s5, 1); + // CMPSw_U12(s5, 1); EORx_REG_LSR(s3, s4, s1, rex.w?63:31); // OF is set if sign changed // CSELw(s3, s3, xZR, cEQ); BFIw(xFlags, s3, F_OF, 1); @@ -1467,7 +1471,7 @@ void emit_shld32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s BFIx(xFlags, s4, F_SF, 1); } } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_AF) {BFCw(xFlags, F_AF, 1);} } IFX(X_PF) { @@ -1513,7 +1517,7 @@ void emit_shrd16c(dynarec_arm_t* dyn, int ninst, int s1, int s2, uint32_t c, int BFIw(xFlags, s3, F_OF, 1); } } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_AF) {BFCw(xFlags, F_AF, 1);} IFX2(X_OF, && (c>1)) {BFCw(xFlags, F_OF, 1);} } @@ -1547,13 +1551,13 @@ void emit_shrd16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s5, int s3, STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_OF) { - if(box64_dynarec_test) { - CMPSw_U12(s5, 1); + if (BOX64ENV(dynarec_test)) { + CMPSw_U12(s5, 1); EORw_REG_LSR(s3, s4, s1, 15); // OF is set if sign changed CSELw(s3, s3, xZR, cEQ); BFIw(xFlags, s3, F_OF, 1); } else { - //CMPSw_U12(s5, 1); + // CMPSw_U12(s5, 1); EORw_REG_LSR(s3, s4, s1, 15); // OF is set if sign changed // CSELw(s3, s3, xZR, cEQ); BFIw(xFlags, s3, F_OF, 1); @@ -1608,7 +1612,7 @@ void emit_shld16c(dynarec_arm_t* dyn, int ninst, int s1, int s2, uint32_t c, int BFIw(xFlags, s3, F_OF, 1); } } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_AF) {BFCw(xFlags, F_AF, 1);} IFX2(X_OF, && (c>1)) {BFCw(xFlags, F_OF, 1);} } @@ -1645,20 +1649,20 @@ void emit_shld16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s5, int s3, STRH_U12(s1, xEmu, offsetof(x64emu_t, res)); } IFX(X_OF) { - if(box64_dynarec_test) { - CMPSw_U12(s5, 1); + if (BOX64ENV(dynarec_test)) { + CMPSw_U12(s5, 1); EORw_REG_LSR(s3, s4, s1, 15); // OF is set if sign changed CSELw(s3, s3, xZR, cEQ); BFIw(xFlags, s3, F_OF, 1); } else { - //CMPSw_U12(s5, 1); + // CMPSw_U12(s5, 1); EORw_REG_LSR(s3, s4, s1, 15); // OF is set if sign changed // CSELw(s3, s3, xZR, cEQ); BFIw(xFlags, s3, F_OF, 1); } } COMP_ZFSF(s1, 16) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_AF) {BFCw(xFlags, F_AF, 1);} } IFX(X_PF) { diff --git a/src/dynarec/arm64/dynarec_arm64_f0.c b/src/dynarec/arm64/dynarec_arm64_f0.c index 588e898b..e0e74535 100644 --- a/src/dynarec/arm64/dynarec_arm64_f0.c +++ b/src/dynarec/arm64/dynarec_arm64_f0.c @@ -254,7 +254,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin CBNZw_MARKLOCK(x4); SMDMB(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -447,7 +447,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin CBNZw_MARKLOCK(x4); SMDMB(); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -486,7 +486,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin BFXILxw(xFlags, x1, u8&7, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) } } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -505,7 +505,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin IFX(X_CF) { BFXILxw(xFlags, ed, u8, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -527,7 +527,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin IFX(X_CF) { BFXILw(xFlags, ed, u8&7, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -552,7 +552,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin IFX(X_CF) { BFXILxw(xFlags, ed, u8, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -570,7 +570,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin MARKLOCK; LDAXRB(ed, wback); BFXILw(xFlags, ed, u8&7, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -594,7 +594,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin IFX(X_CF) { BFXILxw(xFlags, ed, u8, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} @@ -615,7 +615,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin IFX(X_CF) { BFXILw(xFlags, ed, u8&7, 1); // inject 1 bit from u8 to F_CF (i.e. pos 0) } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { IFX(X_OF) {BFCw(xFlags, F_OF, 1);} IFX(X_SF) {BFCw(xFlags, F_SF, 1);} IFX(X_AF) {BFCw(xFlags, F_AF, 1);} diff --git a/src/dynarec/arm64/dynarec_arm64_f20f.c b/src/dynarec/arm64/dynarec_arm64_f20f.c index 0a2c49f2..984ebc2e 100644 --- a/src/dynarec/arm64/dynarec_arm64_f20f.c +++ b/src/dynarec/arm64/dynarec_arm64_f20f.c @@ -113,13 +113,13 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETGD; GETEXSD(q0, 0, 0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); BFCw(x5, FPSR_IOC, 1); // reset IOC bit MSR_fpsr(x5); } FCVTZSxwD(gd, q0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); // get back FPSR to check the IOC bit TBZ_NEXT(x5, FPSR_IOC); if(rex.w) { @@ -134,7 +134,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETGD; GETEXSD(q0, 0, 0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); BFCw(x5, FPSR_IOC, 1); // reset IOC bit MSR_fpsr(x5); @@ -144,7 +144,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n FRINTID(d1, q0); x87_restoreround(dyn, ninst, u8); FCVTZSxwD(gd, d1); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); // get back FPSR to check the IOC bit TBZ_NEXT(x5, FPSR_IOC); if(rex.w) { @@ -209,7 +209,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETGX(v0, 1); d1 = fpu_get_scratch(dyn, ninst); GETEXSD(d0, 0, 0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v1 = fpu_get_scratch(dyn, ninst); FCMLTD_0(v1, d0); SHL_64(v1, v1, 63); @@ -227,7 +227,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETGX(d1, 1); v1 = fpu_get_scratch(dyn, ninst); GETEXSD(d0, 0, 0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); q0 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -249,7 +249,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETGX(d1, 1); v1 = fpu_get_scratch(dyn, ninst); GETEXSD(d0, 0, 0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); q0 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -271,7 +271,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETGX(v0, 1); GETEXSD(d0, 0, 0); d1 = fpu_get_scratch(dyn, ninst); - if(box64_dynarec_fastround==2) { + if(BOX64ENV(dynarec_fastround)==2) { FCVT_S_D(d1, d0); } else { u8 = sse_setround(dyn, ninst, x1, x2, x3); @@ -287,7 +287,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETGX(d1, 1); v1 = fpu_get_scratch(dyn, ninst); GETEXSD(d0, 0, 0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); q0 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -325,7 +325,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETGX(v0, 1); d1 = fpu_get_scratch(dyn, ninst); GETEXSD(v1, 0, 0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { d0 = fpu_get_scratch(dyn, ninst); q0 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -515,7 +515,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(v1, 0, 0); GETGX_empty(v0); - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { u8 = sse_setround(dyn, ninst, x1, x2, x3); VFRINTIDQ(v0, v1); x87_restoreround(dyn, ninst, u8); diff --git a/src/dynarec/arm64/dynarec_arm64_f30f.c b/src/dynarec/arm64/dynarec_arm64_f30f.c index bbe987e4..45a5c454 100644 --- a/src/dynarec/arm64/dynarec_arm64_f30f.c +++ b/src/dynarec/arm64/dynarec_arm64_f30f.c @@ -131,13 +131,13 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETGD; GETEXSS(d0, 0, 0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); BFCw(x5, FPSR_IOC, 1); // reset IOC bit MSR_fpsr(x5); } FCVTZSxwS(gd, d0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); // get back FPSR to check the IOC bit TBZ_NEXT(x5, FPSR_IOC); if(rex.w) { @@ -152,7 +152,7 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETGD; GETEXSS(q0, 0, 0); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); BFCw(x5, FPSR_IOC, 1); // reset IOC bit MSR_fpsr(x5); @@ -162,7 +162,7 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n FRINTIS(d1, q0); x87_restoreround(dyn, ninst, u8); FCVTZSxwS(gd, d1); - if(!box64_dynarec_fastround) { + if(!BOX64ENV(dynarec_fastround)) { MRS_fpsr(x5); // get back FPSR to check the IOC bit TBZ_NEXT(x5, FPSR_IOC); if(rex.w) { @@ -258,7 +258,7 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n GETGX(d1, 1); v1 = fpu_get_scratch(dyn, ninst); GETEXSS(d0, 0, 0); - if(!box64_dynarec_fastnan) { + if(!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn, ninst); q0 = fpu_get_scratch(dyn, ninst); // check if any input value was NAN @@ -288,7 +288,7 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n nextop = F8; GETEX(v1, 0, 0) ; GETGX_empty(v0); - if(box64_dynarec_fastround) { + if(BOX64ENV(dynarec_fastround)) { VFCVTZSQS(v0, v1); } else { MRS_fpsr(x5); diff --git a/src/dynarec/arm64/dynarec_arm64_functions.c b/src/dynarec/arm64/dynarec_arm64_functions.c index 2a7a2a09..9d211a51 100644 --- a/src/dynarec/arm64/dynarec_arm64_functions.c +++ b/src/dynarec/arm64/dynarec_arm64_functions.c @@ -287,7 +287,7 @@ static void neoncache_promote_double_combined(dynarec_arm_t* dyn, int ninst, int } else a = dyn->insts[ninst].n.combined1; int i = neoncache_get_st_f_i64_noback(dyn, ninst, a); - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_combined, ninst=%d combined%c %d i=%d (stack:%d/%d)\n", ninst, (a == dyn->insts[ninst].n.combined2)?'2':'1', a ,i, dyn->insts[ninst].n.stack_push, -dyn->insts[ninst].n.stack_pop); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_combined, ninst=%d combined%c %d i=%d (stack:%d/%d)\n", ninst, (a == dyn->insts[ninst].n.combined2)?'2':'1', a ,i, dyn->insts[ninst].n.stack_push, -dyn->insts[ninst].n.stack_pop); if(i>=0) { dyn->insts[ninst].n.neoncache[i].t = NEON_CACHE_ST_D; if(!dyn->insts[ninst].n.barrier) @@ -304,19 +304,19 @@ static void neoncache_promote_double_internal(dynarec_arm_t* dyn, int ninst, int while(ninst>=0) { a+=dyn->insts[ninst].n.stack_pop; // adjust Stack depth: add pop'd ST (going backward) int i = neoncache_get_st_f_i64(dyn, ninst, a); - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d, a=%d st=%d:%d, i=%d\n", ninst, a, dyn->insts[ninst].n.stack, dyn->insts[ninst].n.stack_next, i); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d, a=%d st=%d:%d, i=%d\n", ninst, a, dyn->insts[ninst].n.stack, dyn->insts[ninst].n.stack_next, i); if(i<0) return; dyn->insts[ninst].n.neoncache[i].t = NEON_CACHE_ST_D; // check combined propagation too if(dyn->insts[ninst].n.combined1 || dyn->insts[ninst].n.combined2) { if(dyn->insts[ninst].n.swapped) { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack); if (a==dyn->insts[ninst].n.combined1) a = dyn->insts[ninst].n.combined2; else if (a==dyn->insts[ninst].n.combined2) a = dyn->insts[ninst].n.combined1; } else { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack); neoncache_promote_double_combined(dyn, ninst, maxinst, a); } } @@ -332,19 +332,19 @@ static void neoncache_promote_double_forward(dynarec_arm_t* dyn, int ninst, int while((ninst!=-1) && (ninst<maxinst) && (a>=0)) { a+=dyn->insts[ninst].n.stack_push; // // adjust Stack depth: add push'd ST (going forward) if((dyn->insts[ninst].n.combined1 || dyn->insts[ninst].n.combined2) && dyn->insts[ninst].n.swapped) { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack); if (a==dyn->insts[ninst].n.combined1) a = dyn->insts[ninst].n.combined2; else if (a==dyn->insts[ninst].n.combined2) a = dyn->insts[ninst].n.combined1; } int i = neoncache_get_st_f_i64_noback(dyn, ninst, a); - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d, a=%d st=%d:%d(%d/%d), i=%d\n", ninst, a, dyn->insts[ninst].n.stack, dyn->insts[ninst].n.stack_next, dyn->insts[ninst].n.stack_push, -dyn->insts[ninst].n.stack_pop, i); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d, a=%d st=%d:%d(%d/%d), i=%d\n", ninst, a, dyn->insts[ninst].n.stack, dyn->insts[ninst].n.stack_next, dyn->insts[ninst].n.stack_push, -dyn->insts[ninst].n.stack_pop, i); if(i<0) return; dyn->insts[ninst].n.neoncache[i].t = NEON_CACHE_ST_D; // check combined propagation too if((dyn->insts[ninst].n.combined1 || dyn->insts[ninst].n.combined2) && !dyn->insts[ninst].n.swapped) { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack); neoncache_promote_double_combined(dyn, ninst, maxinst, a); } a-=dyn->insts[ninst].n.stack_pop; // adjust Stack depth: remove pop'd ST (going forward) @@ -360,20 +360,20 @@ static void neoncache_promote_double_forward(dynarec_arm_t* dyn, int ninst, int void neoncache_promote_double(dynarec_arm_t* dyn, int ninst, int a) { int i = neoncache_get_current_st_f_i64(dyn, a); - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d a=%d st=%d i=%d\n", ninst, a, dyn->n.stack, i); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d a=%d st=%d i=%d\n", ninst, a, dyn->n.stack, i); if(i<0) return; dyn->n.neoncache[i].t = NEON_CACHE_ST_D; dyn->insts[ninst].n.neoncache[i].t = NEON_CACHE_ST_D; // check combined propagation too if(dyn->n.combined1 || dyn->n.combined2) { if(dyn->n.swapped) { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d swapped! %d/%d vs %d\n", ninst, dyn->n.combined1 ,dyn->n.combined2, a); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d swapped! %d/%d vs %d\n", ninst, dyn->n.combined1 ,dyn->n.combined2, a); if(dyn->n.combined1 == a) a = dyn->n.combined2; else if(dyn->n.combined2 == a) a = dyn->n.combined1; } else { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d combined! %d/%d vs %d\n", ninst, dyn->n.combined1 ,dyn->n.combined2, a); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d combined! %d/%d vs %d\n", ninst, dyn->n.combined1 ,dyn->n.combined2, a); if(dyn->n.combined1 == a) neoncache_promote_double(dyn, ninst, dyn->n.combined2); else if(dyn->n.combined2 == a) @@ -734,7 +734,7 @@ static register_mapping_t register_mappings[] = { void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t rex) { - if (!box64_dynarec_dump && !box64_dynarec_gdbjit && !box64_dynarec_perf_map) return; + if (!BOX64ENV(dynarec_dump) && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return; static char buf[512]; int length = sprintf(buf, "barrier=%d state=%d/%d/%d(%d:%d->%d:%d), %s=%X/%X, use=%X, need=%X/%X, sm=%d(%d/%d)", @@ -805,13 +805,13 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r } } if (memcmp(dyn->insts[ninst].n.neoncache, dyn->n.neoncache, sizeof(dyn->n.neoncache))) { - length += sprintf(buf + length, " %s(Change:", (box64_dynarec_dump > 1) ? "\e[1;91m" : ""); + length += sprintf(buf + length, " %s(Change:", (BOX64ENV(dynarec_dump) > 1) ? "\e[1;91m" : ""); for (int ii = 0; ii < 32; ++ii) if (dyn->insts[ninst].n.neoncache[ii].v != dyn->n.neoncache[ii].v) { length += sprintf(buf + length, " V%d:%s", ii, getCacheName(dyn->n.neoncache[ii].t, dyn->n.neoncache[ii].n)); length += sprintf(buf + length, "->%s", getCacheName(dyn->insts[ninst].n.neoncache[ii].t, dyn->insts[ninst].n.neoncache[ii].n)); } - length += sprintf(buf + length, ")%s", (box64_dynarec_dump > 1) ? "\e[0;32m" : ""); + length += sprintf(buf + length, ")%s", (BOX64ENV(dynarec_dump) > 1) ? "\e[0;32m" : ""); } if (dyn->insts[ninst].n.ymm_used) { length += sprintf(buf + length, " ymmUsed=%04x", dyn->insts[ninst].n.ymm_used); @@ -828,15 +828,15 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r if (dyn->insts[ninst].n.combined1 || dyn->insts[ninst].n.combined2) { length += sprintf(buf + length, " %s:%d/%d", dyn->insts[ninst].n.swapped ? "SWP" : "CMB", dyn->insts[ninst].n.combined1, dyn->insts[ninst].n.combined2); } - if (box64_dynarec_dump) { + if (BOX64ENV(dynarec_dump)) { printf_x64_instruction(rex.is32bits ? my_context->dec32 : my_context->dec, &dyn->insts[ninst].x64, name); dynarec_log(LOG_NONE, "%s%p: %d emitted opcodes, inst=%d, %s%s\n", - (box64_dynarec_dump > 1) ? "\e[32m" : "", - (void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (box64_dynarec_dump > 1) ? "\e[m" : ""); + (BOX64ENV(dynarec_dump) > 1) ? "\e[32m" : "", + (void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (BOX64ENV(dynarec_dump) > 1) ? "\e[m" : ""); } - if (box64_dynarec_gdbjit) { + if (BOX64ENV(dynarec_gdbjit)) { static char buf2[512]; - if (box64_dynarec_gdbjit > 1) { + if (BOX64ENV(dynarec_gdbjit) > 1) { sprintf(buf2, "; %d: %d opcodes, %s", ninst, dyn->insts[ninst].size / 4, buf); dyn->gdbjit_block = GdbJITBlockAddLine(dyn->gdbjit_block, (dyn->native_start + dyn->insts[ninst].address), buf2); } @@ -849,7 +849,7 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r } dyn->gdbjit_block = GdbJITBlockAddLine(dyn->gdbjit_block, (dyn->native_start + dyn->insts[ninst].address), inst_name); } - if (box64_dynarec_perf_map && box64_dynarec_perf_map_fd != -1) { + if (BOX64ENV(dynarec_perf_map) && BOX64ENV(dynarec_perf_map_fd) != -1) { writePerfMap(dyn->insts[ninst].x64.addr, dyn->native_start + dyn->insts[ninst].address, dyn->insts[ninst].size / 4, name); } } @@ -1096,7 +1096,7 @@ static void propagateNativeFlags(dynarec_arm_t* dyn, int start) void updateNativeFlags(dynarec_native_t* dyn) { - if(!box64_dynarec_nativeflags) + if(!BOX64ENV(dynarec_nativeflags)) return; // forward check if native flags are used for(int ninst=0; ninst<dyn->size; ++ninst) diff --git a/src/dynarec/arm64/dynarec_arm64_helper.c b/src/dynarec/arm64/dynarec_arm64_helper.c index 7f0594f3..411afdca 100644 --- a/src/dynarec/arm64/dynarec_arm64_helper.c +++ b/src/dynarec/arm64/dynarec_arm64_helper.c @@ -626,7 +626,7 @@ void ret_to_epilog(dynarec_arm_t* dyn, int ninst, rex_t rex) POP1z(xRIP); MOVz_REG(x1, xRIP); SMEND(); - if(box64_dynarec_callret) { + if(BOX64ENV(dynarec_callret)) { // pop the actual return address for ARM stack LDPx_S7_postindex(xLR, x6, xSP, 16); SUBx_REG(x6, x6, xRIP); // is it the right address? @@ -673,7 +673,7 @@ void retn_to_epilog(dynarec_arm_t* dyn, int ninst, rex_t rex, int n) } MOVz_REG(x1, xRIP); SMEND(); - if(box64_dynarec_callret) { + if(BOX64ENV(dynarec_callret)) { // pop the actual return address for ARM stack LDPx_S7_postindex(xLR, x6, xSP, 16); SUBx_REG(x6, x6, xRIP); // is it the right address? @@ -2609,10 +2609,10 @@ void fpu_reset_cache(dynarec_arm_t* dyn, int ninst, int reset_n) dyn->ymm_zero = dyn->insts[reset_n].ymm0_out; #endif #if STEP == 0 - if(box64_dynarec_dump && dyn->n.x87stack) dynarec_log(LOG_NONE, "New x87stack=%d at ResetCache in inst %d with %d\n", dyn->n.x87stack, ninst, reset_n); + if(BOX64ENV(dynarec_dump) && dyn->n.x87stack) dynarec_log(LOG_NONE, "New x87stack=%d at ResetCache in inst %d with %d\n", dyn->n.x87stack, ninst, reset_n); #endif #if defined(HAVE_TRACE) && (STEP>2) - if(box64_dynarec_dump && 0) //disable for now, need more work + if(BOX64ENV(dynarec_dump) && 0) //disable for now, need more work if(memcmp(&dyn->n, &dyn->insts[reset_n].n, sizeof(neoncache_t))) { MESSAGE(LOG_DEBUG, "Warning, difference in neoncache: reset="); for(int i=0; i<32; ++i) diff --git a/src/dynarec/arm64/dynarec_arm64_helper.h b/src/dynarec/arm64/dynarec_arm64_helper.h index 7ce075be..3bf240ab 100644 --- a/src/dynarec/arm64/dynarec_arm64_helper.h +++ b/src/dynarec/arm64/dynarec_arm64_helper.h @@ -728,8 +728,8 @@ TSTw_mask(xFlags, 0b010110, 0); \ CNEGx(r, r, cNE) -#define ALIGNED_ATOMICxw ((fixedaddress && !(fixedaddress&(((1<<(2+rex.w))-1)))) || box64_dynarec_aligned_atomics) -#define ALIGNED_ATOMICH ((fixedaddress && !(fixedaddress&1)) || box64_dynarec_aligned_atomics) +#define ALIGNED_ATOMICxw ((fixedaddress && !(fixedaddress&(((1<<(2+rex.w))-1)))) || BOX64ENV(dynarec_aligned_atomics)) +#define ALIGNED_ATOMICH ((fixedaddress && !(fixedaddress&1)) || BOX64ENV(dynarec_aligned_atomics)) // CALL will use x7 for the call address. Return value can be put in ret (unless ret is -1) // R0 will not be pushed/popd if ret is -2 @@ -1058,7 +1058,7 @@ #else #define X87_PUSH_OR_FAIL(var, dyn, ninst, scratch, t) \ if ((dyn->n.x87stack==8) || (dyn->n.pushed==8)) { \ - if(box64_dynarec_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.pushed, ninst); \ + if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.pushed, ninst); \ dyn->abort = 1; \ return addr; \ } \ @@ -1066,7 +1066,7 @@ #define X87_PUSH_EMPTY_OR_FAIL(dyn, ninst, scratch) \ if ((dyn->n.x87stack==8) || (dyn->n.pushed==8)) { \ - if(box64_dynarec_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.pushed, ninst); \ + if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.pushed, ninst); \ dyn->abort = 1; \ return addr; \ } \ @@ -1074,7 +1074,7 @@ #define X87_POP_OR_FAIL(dyn, ninst, scratch) \ if ((dyn->n.x87stack==-8) || (dyn->n.poped==8)) { \ - if(box64_dynarec_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Pop, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.poped, ninst); \ + if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Pop, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.poped, ninst); \ dyn->abort = 1; \ return addr; \ } \ @@ -1878,19 +1878,19 @@ uintptr_t dynarec64_AVX_F3_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip } \ break -#define NOTEST(s1) \ - if(box64_dynarec_test) { \ - STRw_U12(xZR, xEmu, offsetof(x64emu_t, test.test)); \ - STRw_U12(xZR, xEmu, offsetof(x64emu_t, test.clean));\ +#define NOTEST(s1) \ + if (BOX64ENV(dynarec_test)) { \ + STRw_U12(xZR, xEmu, offsetof(x64emu_t, test.test)); \ + STRw_U12(xZR, xEmu, offsetof(x64emu_t, test.clean)); \ } -#define SKIPTEST(s1) \ - if(box64_dynarec_test) { \ - STRw_U12(xZR, xEmu, offsetof(x64emu_t, test.clean));\ +#define SKIPTEST(s1) \ + if (BOX64ENV(dynarec_test)) { \ + STRw_U12(xZR, xEmu, offsetof(x64emu_t, test.clean)); \ } -#define GOTEST(s1, s2) \ - if(box64_dynarec_test) { \ - MOV32w(s2, 1); \ - STRw_U12(s2, xEmu, offsetof(x64emu_t, test.test)); \ +#define GOTEST(s1, s2) \ + if (BOX64ENV(dynarec_test)) { \ + MOV32w(s2, 1); \ + STRw_U12(s2, xEmu, offsetof(x64emu_t, test.test)); \ } #define GETREX() \ diff --git a/src/dynarec/arm64/dynarec_arm64_pass0.h b/src/dynarec/arm64/dynarec_arm64_pass0.h index 0f096482..3b049d79 100644 --- a/src/dynarec/arm64/dynarec_arm64_pass0.h +++ b/src/dynarec/arm64/dynarec_arm64_pass0.h @@ -9,14 +9,14 @@ #define MAYSETFLAGS() dyn->insts[ninst].x64.may_set = 1 #define READFLAGS(A) \ dyn->insts[ninst].x64.use_flags = A; dyn->f.dfnone = 1;\ - if(!box64_dynarec_df && (A)&X_PEND) dyn->insts[ninst].x64.use_flags = X_ALL; \ + if(!BOX64ENV(dynarec_df) && (A)&X_PEND) dyn->insts[ninst].x64.use_flags = X_ALL; \ dyn->f.pending=SF_SET #define SETFLAGS(A,B) \ dyn->insts[ninst].x64.set_flags = A; \ dyn->insts[ninst].x64.state_flags = (B)&~SF_DF; \ dyn->f.pending=(B)&SF_SET_PENDING; \ dyn->f.dfnone=((B)&SF_SET)?(((B)==SF_SET_NODF)?0:1):0; \ - if(!box64_dynarec_df) {dyn->f.dfnone=1; dyn->f.pending=0; if((A)==SF_PENDING){printf_log(LOG_INFO, "Warning, some opcode use SF_PENDING, forcing deferedflags ON\n"); box64_dynarec_df=1; }} + if(!BOX64ENV(dynarec_df)) {dyn->f.dfnone=1; dyn->f.pending=0; if((A)==SF_PENDING){printf_log(LOG_INFO, "Warning, some opcode use SF_PENDING, forcing deferedflags ON\n"); SET_BOX64ENV(dynarec_df, 1); }} #define EMIT(A) dyn->native_size+=4 #define JUMP(A, C) add_jump(dyn, ninst); add_next(dyn, (uintptr_t)A); SMEND(); dyn->insts[ninst].x64.jmp = A; dyn->insts[ninst].x64.jmp_cond = C; dyn->insts[ninst].x64.jmp_insts = 0 #define BARRIER(A) if(A!=BARRIER_MAYBE) {fpu_purgecache(dyn, ninst, 0, x1, x2, x3); dyn->insts[ninst].x64.barrier = A;} else dyn->insts[ninst].barrier_maybe = 1 @@ -39,7 +39,7 @@ #define DEFAULT \ --dyn->size; \ *ok = -1; \ - if(box64_dynarec_log>=LOG_INFO || box64_dynarec_dump || box64_dynarec_missing==1) {\ + if(BOX64ENV(dynarec_log)>=LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_missing)==1) {\ dynarec_log(LOG_NONE, "%p: Dynarec stopped because of %sOpcode %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X", \ (void*)ip, rex.is32bits?"x86 ":"x64 ",\ PKip(0), \ @@ -67,4 +67,4 @@ #define INVERT_CARRY(A) dyn->insts[ninst].invert_carry = 1 #define INVERT_CARRY_BEFORE(A) dyn->insts[ninst].invert_carry_before = 1 // mark opcode as "unaligned" possible only if the current address is not marked as already unaligned -#define IF_UNALIGNED(A) if((dyn->insts[ninst].unaligned=(is_addr_unaligned(A)?0:1))) \ No newline at end of file +#define IF_UNALIGNED(A) if((dyn->insts[ninst].unaligned=(is_addr_unaligned(A)?0:1))) diff --git a/src/dynarec/arm64/dynarec_arm64_pass1.h b/src/dynarec/arm64/dynarec_arm64_pass1.h index f0f7019d..7e81b167 100644 --- a/src/dynarec/arm64/dynarec_arm64_pass1.h +++ b/src/dynarec/arm64/dynarec_arm64_pass1.h @@ -12,7 +12,7 @@ dyn->insts[ninst].x64.state_flags = (B)&~SF_DF; \ dyn->f.pending=(B)&SF_SET_PENDING; \ dyn->f.dfnone=((B)&SF_SET)?(((B)==SF_SET_NODF)?0:1):0; \ - if(!box64_dynarec_df) {dyn->f.dfnone=1; dyn->f.pending=0; } + if(!BOX64ENV(dynarec_df)) {dyn->f.dfnone=1; dyn->f.pending=0; } #define NEW_INST \ dyn->insts[ninst].f_entry = dyn->f; \ dyn->n.combined1 = dyn->n.combined2 = 0;\ diff --git a/src/dynarec/arm64/dynarec_arm64_pass3.h b/src/dynarec/arm64/dynarec_arm64_pass3.h index 260c69c4..6bfad31c 100644 --- a/src/dynarec/arm64/dynarec_arm64_pass3.h +++ b/src/dynarec/arm64/dynarec_arm64_pass3.h @@ -5,14 +5,14 @@ addInst(dyn->instsize, &dyn->insts_size, 0, 0); #define EMIT(A) \ do{ \ - if(box64_dynarec_dump) print_opcode(dyn, ninst, (uint32_t)(A)); \ + if(BOX64ENV(dynarec_dump)) print_opcode(dyn, ninst, (uint32_t)(A)); \ if((uintptr_t)dyn->block<dyn->tablestart) \ *(uint32_t*)(dyn->block) = (uint32_t)(A); \ dyn->block += 4; dyn->native_size += 4; \ dyn->insts[ninst].size2 += 4; \ }while(0) -#define MESSAGE(A, ...) if(box64_dynarec_dump) dynarec_log(LOG_NONE, __VA_ARGS__) +#define MESSAGE(A, ...) if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, __VA_ARGS__) #define NEW_INST \ if(ninst) { \ if(dyn->insts[ninst].address!=(uintptr_t)dyn->block-(uintptr_t)dyn->native_start) dyn->abort = 1; \ diff --git a/src/dynarec/dynablock.c b/src/dynarec/dynablock.c index 62a63bec..517a702e 100644 --- a/src/dynarec/dynablock.c +++ b/src/dynarec/dynablock.c @@ -208,7 +208,7 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t } if(need_lock) { - if(box64_dynarec_wait) { + if(BOX64ENV(dynarec_wait)) { mutex_lock(&my_context->mutex_dyndump); } else { if(mutex_trylock(&my_context->mutex_dyndump)) // FillBlock not available for now diff --git a/src/dynarec/dynarec.c b/src/dynarec/dynarec.c index f6e61db8..ca0bd4f2 100644 --- a/src/dynarec/dynarec.c +++ b/src/dynarec/dynarec.c @@ -52,7 +52,7 @@ void* LinkNext(x64emu_t* emu, uintptr_t addr, void* x2, uintptr_t* x3) dynablock_t* block = NULL; if(hasAlternate((void*)addr)) { printf_log(LOG_DEBUG, "Jmp address has alternate: %p", (void*)addr); - if(box64_log<LOG_DEBUG) dynarec_log(LOG_INFO, "Jmp address has alternate: %p", (void*)addr); + if (BOX64ENV(log)<LOG_DEBUG) dynarec_log(LOG_INFO, "Jmp address has alternate: %p", (void*)addr); uintptr_t old_addr = addr; addr = (uintptr_t)getAlternate((void*)addr); // set new address R_RIP = addr; // but also new RIP! @@ -63,7 +63,7 @@ void* LinkNext(x64emu_t* emu, uintptr_t addr, void* x2, uintptr_t* x3) block = DBGetBlock(emu, addr, 1, is32bits); if(!block) { #ifdef HAVE_TRACE - if(LOG_INFO<=box64_dynarec_log) { + if(LOG_INFO<=BOX64ENV(dynarec_log)) { if(checkInHotPage(addr)) { dynarec_log(LOG_INFO, "Not trying to run a block from a Hotpage at %p\n", (void*)addr); } else { @@ -79,7 +79,7 @@ void* LinkNext(x64emu_t* emu, uintptr_t addr, void* x2, uintptr_t* x3) if(!block->done) { // not finished yet... leave linker #ifdef HAVE_TRACE - if(box64_dynarec_log && !block->isize) { + if(BOX64ENV(dynarec_log) && !block->isize) { dynablock_t* db = FindDynablockFromNativeAddress(x2-4); printf_log(LOG_NONE, "Warning, NULL block at %p from %p (db=%p, x64addr=%p/%s)\n", (void*)addr, x2-4, db, db?(void*)getX64Address(db, (uintptr_t)x2-4):NULL, db?getAddrFunctionName(getX64Address(db, (uintptr_t)x2-4)):"(nil)"); } @@ -172,7 +172,7 @@ void DynaRun(x64emu_t* emu) { printf_log(LOG_DEBUG, "Setjmp DynaRun, fs=0x%x\n", emu->segs[_FS]); #ifdef DYNAREC - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { if(emu->test.clean) x64test_check(emu, R_RIP); emu->test.clean = 0; @@ -184,7 +184,7 @@ void DynaRun(x64emu_t* emu) emu->flags.need_jmpbuf = 0; #ifdef DYNAREC - if(!box64_dynarec) + if(!BOX64ENV(dynarec)) #endif Run(emu, 0); #ifdef DYNAREC @@ -210,18 +210,18 @@ void DynaRun(x64emu_t* emu) skip = 0; // no block, of block doesn't have DynaRec content (yet, temp is not null) // Use interpreter (should use single instruction step...) - if(box64_dynarec_log) { + if(BOX64ENV(dynarec_log)) { if(ACCESS_FLAG(F_TF)) dynarec_log(LOG_INFO, "%04d|Running Interpreter @%p, emu=%p because TF is on\n", GetTID(), (void*)R_RIP, emu); else dynarec_log(LOG_DEBUG, "%04d|Running Interpreter @%p, emu=%p\n", GetTID(), (void*)R_RIP, emu); } - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) emu->test.clean = 0; Run(emu, 1); } else { dynarec_log(LOG_DEBUG, "%04d|Running DynaRec Block @%p (%p) of %d x64 insts (hash=0x%x) emu=%p\n", GetTID(), (void*)R_RIP, block->block, block->isize, block->hash, emu); - if(!box64_dynarec_df) { + if(!BOX64ENV(dynarec_df)) { CHECK_FLAGS(emu); } // block is here, let's run it! diff --git a/src/dynarec/dynarec_helper.h b/src/dynarec/dynarec_helper.h index 9997fce2..4f51c9a9 100644 --- a/src/dynarec/dynarec_helper.h +++ b/src/dynarec/dynarec_helper.h @@ -49,12 +49,12 @@ SMWRITE(); \ } while (0) -#define SMWRITE2() \ - do { \ - if (box64_dynarec_strongmem >= STRONGMEM_SIMD_WRITE) { \ - dyn->smwrite = 1; \ - dyn->insts[ninst].will_write = 2; \ - } \ +#define SMWRITE2() \ + do { \ + if (BOX64ENV(dynarec_strongmem) >= STRONGMEM_SIMD_WRITE) { \ + dyn->smwrite = 1; \ + dyn->insts[ninst].will_write = 2; \ + } \ } while (0) #define SMREAD() @@ -71,7 +71,7 @@ #define SMEND() \ do { \ /* If there is any guest memory write, which is a SEQ, then compute the last_write. */ \ - if (dyn->smwrite && (box64_dynarec_strongmem >= STRONGMEM_LAST_WRITE)) { \ + if (dyn->smwrite && (BOX64ENV(dynarec_strongmem) >= STRONGMEM_LAST_WRITE)) { \ int i = ninst; \ while (i >= 0 && !dyn->insts[i].will_write) \ --i; \ @@ -88,7 +88,7 @@ #define SMWRITE() \ do { \ /* Put a barrier at every third memory write. */ \ - if (box64_dynarec_strongmem >= STRONGMEM_SEQ_WRITE) { \ + if (BOX64ENV(dynarec_strongmem) >= STRONGMEM_SEQ_WRITE) { \ if (++dyn->smwrite >= 3 /* Every third memory write */) { \ DMB_ISH(); \ dyn->smwrite = 1; \ @@ -110,10 +110,10 @@ } while (0) // Similar to SMWRITE, but for SIMD instructions. -#define SMWRITE2() \ - do { \ - if (box64_dynarec_strongmem >= STRONGMEM_SIMD_WRITE) \ - SMWRITE(); \ +#define SMWRITE2() \ + do { \ + if (BOX64ENV(dynarec_strongmem) >= STRONGMEM_SIMD_WRITE) \ + SMWRITE(); \ } while (0) // An opcode reads guest memory, this need to be put before the LOAD instruction manually. @@ -130,22 +130,22 @@ } while (0) // An opcode will write memory, this will be put before the STORE instruction automatically. -#define WILLWRITE() \ - do { \ - if (box64_dynarec_strongmem >= dyn->insts[ninst].will_write && dyn->smwrite == 0) { \ - /* Will write but never written, this is the start of a SEQ, put a barrier. */ \ - if (box64_dynarec_weakbarrier) \ - DMB_ISHLD(); \ - else \ - DMB_ISH(); \ - } else if (box64_dynarec_strongmem >= STRONGMEM_LAST_WRITE && box64_dynarec_weakbarrier != 2 \ - && dyn->insts[ninst].last_write) { \ - /* Last write, put a barrier */ \ - if (box64_dynarec_weakbarrier) \ - DMB_ISHST(); \ - else \ - DMB_ISH(); \ - } \ +#define WILLWRITE() \ + do { \ + if (BOX64ENV(dynarec_strongmem) >= dyn->insts[ninst].will_write && dyn->smwrite == 0) { \ + /* Will write but never written, this is the start of a SEQ, put a barrier. */ \ + if (BOX64ENV(dynarec_weakbarrier)) \ + DMB_ISHLD(); \ + else \ + DMB_ISH(); \ + } else if (BOX64ENV(dynarec_strongmem) >= STRONGMEM_LAST_WRITE && BOX64ENV(dynarec_weakbarrier) != 2 \ + && dyn->insts[ninst].last_write) { \ + /* Last write, put a barrier */ \ + if (BOX64ENV(dynarec_weakbarrier)) \ + DMB_ISHST(); \ + else \ + DMB_ISH(); \ + } \ } while (0) // Similar to WILLWRITE, but checks lock. @@ -167,11 +167,11 @@ // Will be put at the end of the SEQ #define SMEND() \ do { \ - if (box64_dynarec_strongmem) { \ + if (BOX64ENV(dynarec_strongmem)) { \ /* It's a SEQ, put a barrier here. */ \ if (dyn->smwrite) { \ /* Check if the next instruction has a end loop mark */ \ - if (box64_dynarec_weakbarrier) \ + if (BOX64ENV(dynarec_weakbarrier)) \ DMB_ISHST(); \ else \ DMB_ISH(); \ diff --git a/src/dynarec/dynarec_native.c b/src/dynarec/dynarec_native.c index e901e462..b096b79f 100644 --- a/src/dynarec/dynarec_native.c +++ b/src/dynarec/dynarec_native.c @@ -30,15 +30,15 @@ void printf_x64_instruction(zydis_dec_t* dec, instruction_x64_t* inst, const cha if(ip[0]==0xcc && ip[1]=='S' && ip[2]=='C') { uintptr_t a = *(uintptr_t*)(ip+3); if(a==0) { - dynarec_log(LOG_NONE, "%s%p: Exit x64emu%s\n", (box64_dynarec_dump>1)?"\e[01;33m":"", (void*)ip, (box64_dynarec_dump>1)?"\e[m":""); + dynarec_log(LOG_NONE, "%s%p: Exit x64emu%s\n", (BOX64ENV(dynarec_dump)>1)?"\e[01;33m":"", (void*)ip, (BOX64ENV(dynarec_dump)>1)?"\e[m":""); } else { - dynarec_log(LOG_NONE, "%s%p: Native call to %p%s\n", (box64_dynarec_dump>1)?"\e[01;33m":"", (void*)ip, (void*)a, (box64_dynarec_dump>1)?"\e[m":""); + dynarec_log(LOG_NONE, "%s%p: Native call to %p%s\n", (BOX64ENV(dynarec_dump)>1)?"\e[01;33m":"", (void*)ip, (void*)a, (BOX64ENV(dynarec_dump)>1)?"\e[m":""); } } else { if(dec) { - dynarec_log(LOG_NONE, "%s%p: %s", (box64_dynarec_dump > 1) ? "\e[01;33m" : "", ip, DecodeX64Trace(dec, inst->addr, 1)); + dynarec_log(LOG_NONE, "%s%p: %s", (BOX64ENV(dynarec_dump) > 1) ? "\e[01;33m" : "", ip, DecodeX64Trace(dec, inst->addr, 1)); } else { - dynarec_log(LOG_NONE, "%s%p: ", (box64_dynarec_dump>1)?"\e[01;33m":"", ip); + dynarec_log(LOG_NONE, "%s%p: ", (BOX64ENV(dynarec_dump)>1)?"\e[01;33m":"", ip); for(int i=0; i<inst->size; ++i) { dynarec_log(LOG_NONE, "%02X ", ip[i]); } @@ -55,12 +55,12 @@ void printf_x64_instruction(zydis_dec_t* dec, instruction_x64_t* inst, const cha } } // end of line and colors - dynarec_log(LOG_NONE, "%s\n", (box64_dynarec_dump>1)?"\e[m":""); + dynarec_log(LOG_NONE, "%s\n", (BOX64ENV(dynarec_dump)>1)?"\e[m":""); } } void add_next(dynarec_native_t *dyn, uintptr_t addr) { - if(!box64_dynarec_bigblock) + if(!BOX64ENV(dynarec_bigblock)) return; // exist? for(int i=0; i<dyn->next_sz; ++i) @@ -422,7 +422,7 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached) int can_incr = ninst == max_ninst_reached; // Are we the top-level call? int ok = 1; while ((can_incr || ok) && ninst<dyn->size) { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "update ninst=%d (%d): can_incr=%d\n", ninst, max_ninst_reached, can_incr); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "update ninst=%d (%d): can_incr=%d\n", ninst, max_ninst_reached, can_incr); uint16_t new_purge_ymm, new_ymm0_in, new_ymm0_out; if (dyn->insts[ninst].pred_sz && dyn->insts[ninst].x64.alive) { @@ -433,18 +433,18 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached) uint16_t ymm0_inter = (ninst && !(dyn->insts[ninst].x64.barrier & BARRIER_FLOAT)) ? ((uint16_t)-1) : (uint16_t)0; for (int i = 0; i < dyn->insts[ninst].pred_sz; ++i) { int pred = dyn->insts[ninst].pred[i]; - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "\twith pred[%d] = %d", i, pred); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "\twith pred[%d] = %d", i, pred); if (pred >= max_ninst_reached) { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, " (skipped)\n"); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, " (skipped)\n"); continue; } int pred_out = dyn->insts[pred].x64.has_callret ? 0 : dyn->insts[pred].ymm0_out; - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, " ~> %04X\n", pred_out); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, " ~> %04X\n", pred_out); ymm0_union |= pred_out; ymm0_inter &= pred_out; } - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "\t=> %04X,%04X\n", ymm0_union, ymm0_inter); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "\t=> %04X,%04X\n", ymm0_union, ymm0_inter); // Notice the default values yield something coherent here (if all pred are after ninst) new_purge_ymm = ymm0_union & ~ymm0_inter; new_ymm0_in = ymm0_inter; @@ -466,7 +466,7 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached) int jmp = (dyn->insts[ninst].x64.jmp)?dyn->insts[ninst].x64.jmp_insts:-1; if((jmp!=-1) && (jmp < max_ninst_reached)) { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "\t! jump to %d\n", jmp); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "\t! jump to %d\n", jmp); // The jump goes before the last instruction reached, update the destination // If this is the top level call, this means the jump goes backward (jmp != ninst) // Otherwise, since we don't update all instructions, we may miss the update (don't use jmp < ninst) @@ -480,7 +480,7 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached) // Also update jumps to before (they are skipped otherwise) int jmp = (dyn->insts[ninst].x64.jmp)?dyn->insts[ninst].x64.jmp_insts:-1; if((jmp!=-1) && (jmp < max_ninst_reached)) { - //if(box64_dynarec_dump) dynarec_log(LOG_NONE, "\t! jump to %d\n", jmp); + //if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "\t! jump to %d\n", jmp); updateYmm0s(dyn, jmp, max_ninst_reached); } } else { @@ -597,7 +597,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit C .. C+sz : arch: arch specific info (likes flags info) per inst (can be absent) */ - if(addr>=box64_nodynarec_start && addr<box64_nodynarec_end) { + if(addr>=BOX64ENV(nodynarec_start) && addr<BOX64ENV(nodynarec_end)) { dynarec_log(LOG_INFO, "Create empty block in no-dynarec zone\n"); return CreateEmptyBlock(block, addr, is32bits); } @@ -627,7 +627,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit // pass 0, addresses, x64 jump addresses, overall size of the block uintptr_t end = native_pass0(&helper, addr, alternate, is32bits); if(helper.abort) { - if(box64_dynarec_dump || box64_dynarec_log)dynarec_log(LOG_NONE, "Abort dynablock on pass0\n"); + if(BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass0\n"); CancelBlock64(0); return NULL; } @@ -742,7 +742,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit // pass 1, float optimizations, first pass for flags native_pass1(&helper, addr, alternate, is32bits); if(helper.abort) { - if(box64_dynarec_dump || box64_dynarec_log)dynarec_log(LOG_NONE, "Abort dynablock on pass1\n"); + if(BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass1\n"); CancelBlock64(0); return NULL; } @@ -750,7 +750,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit // pass 2, instruction size native_pass2(&helper, addr, alternate, is32bits); if(helper.abort) { - if(box64_dynarec_dump || box64_dynarec_log)dynarec_log(LOG_NONE, "Abort dynablock on pass2\n"); + if(BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass2\n"); CancelBlock64(0); return NULL; } @@ -783,12 +783,12 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit helper.table64cap = helper.table64size; helper.table64 = (uint64_t*)helper.tablestart; // pass 3, emit (log emit native opcode) - if(box64_dynarec_dump) { - dynarec_log(LOG_NONE, "%s%04d|Emitting %zu bytes for %u %s bytes (native=%zu, table64=%zu, instsize=%zu, arch=%zu)", (box64_dynarec_dump>1)?"\e[01;36m":"", GetTID(), helper.native_size, helper.isize, is32bits?"x86":"x64", native_size, helper.table64size*sizeof(uint64_t), insts_rsize, arch_size); + if(BOX64ENV(dynarec_dump)) { + dynarec_log(LOG_NONE, "%s%04d|Emitting %zu bytes for %u %s bytes (native=%zu, table64=%zu, instsize=%zu, arch=%zu)", (BOX64ENV(dynarec_dump)>1)?"\e[01;36m":"", GetTID(), helper.native_size, helper.isize, is32bits?"x86":"x64", native_size, helper.table64size*sizeof(uint64_t), insts_rsize, arch_size); printFunctionAddr(helper.start, " => "); - dynarec_log(LOG_NONE, "%s\n", (box64_dynarec_dump>1)?"\e[m":""); + dynarec_log(LOG_NONE, "%s\n", (BOX64ENV(dynarec_dump)>1)?"\e[m":""); } - if (box64_dynarec_gdbjit) { + if (BOX64ENV(dynarec_gdbjit)) { GdbJITNewBlock(helper.gdbjit_block, (GDB_CORE_ADDR)block->actual_block, (GDB_CORE_ADDR)block->actual_block + native_size, helper.start); } int oldtable64size = helper.table64size; @@ -800,7 +800,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit helper.insts_size = 0; // reset native_pass3(&helper, addr, alternate, is32bits); if(helper.abort) { - if(box64_dynarec_dump || box64_dynarec_log)dynarec_log(LOG_NONE, "Abort dynablock on pass3\n"); + if(BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass3\n"); CancelBlock64(0); return NULL; } @@ -833,7 +833,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit //block->x64_addr = (void*)start; block->x64_size = end-start; // all done... - if (box64_dynarec_gdbjit) { + if (BOX64ENV(dynarec_gdbjit)) { GdbJITBlockReady(helper.gdbjit_block); } ClearCache(actual_p+sizeof(void*), native_size); // need to clear the cache before execution... @@ -892,5 +892,5 @@ void writePerfMap(uintptr_t func_addr, uintptr_t code_addr, size_t code_size, co uintptr_t start = 0; const char* symbname = FindNearestSymbolName(FindElfAddress(my_context, func_addr), (void*)func_addr, &start, &sz); snprintf(pbuf, sizeof(pbuf), "0x%lx %ld %s:%s\n", code_addr, code_size, symbname, inst_name); - write(box64_dynarec_perf_map_fd, pbuf, strlen(pbuf)); + write(BOX64ENV(dynarec_perf_map_fd), pbuf, strlen(pbuf)); } diff --git a/src/dynarec/dynarec_native_functions.c b/src/dynarec/dynarec_native_functions.c index 18082d82..bcf06e6a 100644 --- a/src/dynarec/dynarec_native_functions.c +++ b/src/dynarec/dynarec_native_functions.c @@ -184,7 +184,7 @@ void native_fld(x64emu_t* emu, uint8_t* ed) void native_ud(x64emu_t* emu) { - if(box64_dynarec_test) + if(BOX64ENV(dynarec_test)) emu->test.test = 0; emit_signal(emu, SIGILL, (void*)R_RIP, 0); } diff --git a/src/dynarec/dynarec_native_pass.c b/src/dynarec/dynarec_native_pass.c index 50d5e231..7ab4f528 100644 --- a/src/dynarec/dynarec_native_pass.c +++ b/src/dynarec/dynarec_native_pass.c @@ -57,7 +57,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int ARCH_INIT(); int reset_n = -1; // -1 no reset; -2 reset to 0; else reset to the state of reset_n dyn->last_ip = (alternate || (dyn->insts && dyn->insts[0].pred_sz))?0:ip; // RIP is always set at start of block unless there is a predecessor! - int stopblock = 2+(FindElfAddress(my_context, addr)?0:1); // if block is in elf_memory, it can be extended with box64_dynarec_bigblock==2, else it needs 3 + int stopblock = 2+(FindElfAddress(my_context, addr)?0:1); // if block is in elf_memory, it can be extended with BOX64ENV(dynarec_bigblock)==2, else it needs 3 // ok, go now INIT; #if STEP == 0 @@ -126,7 +126,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int if((dyn->insts[ninst].x64.need_before&~X_PEND) && !dyn->insts[ninst].pred_sz) { READFLAGS(dyn->insts[ninst].x64.need_before&~X_PEND); } - if(box64_dynarec_test && (!box64_dynarec_test_end || (ip>=box64_dynarec_test_start && ip<box64_dynarec_test_end) )) { + if(BOX64ENV(dynarec_test) && (!BOX64ENV(dynarec_test_end) || (ip>=BOX64ENV(dynarec_test_start) && ip<BOX64ENV(dynarec_test_end)))) { MESSAGE(LOG_DUMP, "TEST STEP ----\n"); extcache_native_t save; fpu_save_and_unwind(dyn, ninst, &save); @@ -137,7 +137,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int MESSAGE(LOG_DUMP, "----------\n"); } #ifdef HAVE_TRACE - else if(my_context->dec && box64_dynarec_trace) { + else if(my_context->dec && BOX64ENV(dynarec_trace)) { if((trace_end == 0) || ((ip >= trace_start) && (ip < trace_end))) { MESSAGE(LOG_DUMP, "TRACE ----\n"); @@ -232,7 +232,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int #else // check if block need to be stopped, because it's a 00 00 opcode (unreadeable is already checked earlier) if((ok>0) && !dyn->forward && !(*(uint32_t*)addr)) { - if(box64_dynarec_dump) dynarec_log(LOG_NONE, "Stopping block at %p reason: %s\n", (void*)addr, "Next opcode is 00 00 00 00"); + if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "Stopping block at %p reason: %s\n", (void*)addr, "Next opcode is 00 00 00 00"); ok = 0; need_epilog = 1; dyn->insts[ninst].x64.need_after |= X_PEND; @@ -241,7 +241,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int if(dyn->forward_to == addr && !need_epilog && ok>=0) { // we made it! reset_n = get_first_jump_addr(dyn, addr); - if(box64_dynarec_dump) dynarec_log(LOG_NONE, "Forward extend block for %d bytes %s%p -> %p (ninst %d - %d)\n", dyn->forward_to-dyn->forward, dyn->insts[dyn->forward_ninst].x64.has_callret?"(opt. call) ":"", (void*)dyn->forward, (void*)dyn->forward_to, reset_n, ninst); + if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "Forward extend block for %d bytes %s%p -> %p (ninst %d - %d)\n", dyn->forward_to-dyn->forward, dyn->insts[dyn->forward_ninst].x64.has_callret?"(opt. call) ":"", (void*)dyn->forward, (void*)dyn->forward_to, reset_n, ninst); if(dyn->insts[dyn->forward_ninst].x64.has_callret && !dyn->insts[dyn->forward_ninst].x64.has_next) dyn->insts[dyn->forward_ninst].x64.has_next = 1; // this block actually continue dyn->forward = 0; @@ -251,7 +251,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int ok = 1; // in case it was 0 } else if ((dyn->forward_to < addr) || ok<=0) { // something when wrong! rollback - if(box64_dynarec_dump) dynarec_log(LOG_NONE, "Could not forward extend block for %d bytes %p -> %p\n", dyn->forward_to-dyn->forward, (void*)dyn->forward, (void*)dyn->forward_to); + if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "Could not forward extend block for %d bytes %p -> %p\n", dyn->forward_to-dyn->forward, (void*)dyn->forward, (void*)dyn->forward_to); ok = 0; dyn->size = dyn->forward_size; ninst = dyn->forward_ninst; @@ -262,7 +262,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int dyn->forward_ninst = 0; } // else just continue - } else if(!ok && !need_epilog && box64_dynarec_bigblock && (getProtection(addr+3)&~PROT_READ)) + } else if(!ok && !need_epilog && BOX64ENV(dynarec_bigblock) && (getProtection(addr+3)&~PROT_READ)) if(*(uint32_t*)addr!=0) { // check if need to continue (but is next 4 bytes are 0, stop) uintptr_t next = get_closest_next(dyn, addr); if(next && ( @@ -277,9 +277,9 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int // and pred table is not ready yet reset_n = get_first_jump(dyn, next); } - if(box64_dynarec_dump) dynarec_log(LOG_NONE, "Extend block %p, %s%p -> %p (ninst=%d, jump from %d)\n", dyn, dyn->insts[ninst].x64.has_callret?"(opt. call) ":"", (void*)addr, (void*)next, ninst+1, dyn->insts[ninst].x64.has_callret?ninst:reset_n); - } else if(next && (int)(next-addr)<box64_dynarec_forward && (getProtection(next)&PROT_READ)/*box64_dynarec_bigblock>=stopblock*/) { - if(!((box64_dynarec_bigblock<stopblock) && !isJumpTableDefault64((void*)next))) { + if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "Extend block %p, %s%p -> %p (ninst=%d, jump from %d)\n", dyn, dyn->insts[ninst].x64.has_callret?"(opt. call) ":"", (void*)addr, (void*)next, ninst+1, dyn->insts[ninst].x64.has_callret?ninst:reset_n); + } else if(next && (int)(next-addr)<BOX64ENV(dynarec_forward) && (getProtection(next)&PROT_READ)/*BOX64ENV(dynarec_bigblock)>=stopblock*/) { + if(!((BOX64ENV(dynarec_bigblock)<stopblock) && !isJumpTableDefault64((void*)next))) { if(dyn->forward) { if(next<dyn->forward_to) dyn->forward_to = next; @@ -327,8 +327,8 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int ++ninst; #if STEP == 0 memset(&dyn->insts[ninst], 0, sizeof(instruction_native_t)); - if((ok>0) && (((box64_dynarec_bigblock<stopblock) && !isJumpTableDefault64((void*)addr)) - || (addr>=box64_nodynarec_start && addr<box64_nodynarec_end))) + if((ok>0) && (((BOX64ENV(dynarec_bigblock)<stopblock) && !isJumpTableDefault64((void*)addr)) + || (addr>=BOX64ENV(nodynarec_start) && addr<BOX64ENV(nodynarec_end)))) #else if((ok>0) && (ninst==dyn->size)) #endif @@ -346,7 +346,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int } #endif MESSAGE(LOG_DEBUG, "Stopping block %p (%d / %d)\n",(void*)init_addr, ninst, dyn->size); - if(!box64_dynarec_dump && addr>=box64_nodynarec_start && addr<box64_nodynarec_end) + if(!BOX64ENV(dynarec_dump) && addr>=BOX64ENV(nodynarec_start) && addr<BOX64ENV(nodynarec_end)) dynarec_log(LOG_INFO, "Stopping block in no-dynarec zone\n"); --ninst; if(!dyn->insts[ninst].x64.barrier) { diff --git a/src/dynarec/la64/dynarec_la64_00.c b/src/dynarec/la64/dynarec_la64_00.c index a3a188a7..f9dfbc0a 100644 --- a/src/dynarec/la64/dynarec_la64_00.c +++ b/src/dynarec/la64/dynarec_la64_00.c @@ -1796,7 +1796,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni break; case 0xC2: INST_NAME("RETN"); - if (box64_dynarec_safeflags) { + if (BOX64ENV(dynarec_safeflags)) { READFLAGS(X_PEND); // lets play safe here too } fpu_purgecache(dyn, ninst, 1, x1, x2, x3); // using next, even if there no next @@ -1807,7 +1807,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni break; case 0xC3: INST_NAME("RET"); - if (box64_dynarec_safeflags) { + if (BOX64ENV(dynarec_safeflags)) { READFLAGS(X_PEND); // so instead, force the deferred flags, so it's not too slow, and flags are not lost } fpu_purgecache(dyn, ninst, 1, x1, x2, x3); // using next, even if there no next @@ -1902,7 +1902,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni } } else { INST_NAME("INT 3"); - if (!box64_ignoreint3) { + if (!BOX64ENV(ignoreint3)) { // check if TRAP signal is handled TABLE64(x1, (uintptr_t)my_context); MOV32w(x2, offsetof(box64context_t, signals[SIGTRAP])); @@ -1959,7 +1959,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni BEQ_NEXT(x2, xZR); } SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) MAYSETFLAGS(); + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); emit_shl8(dyn, ninst, x1, x2, x5, x4, x6); EBBACK(); break; @@ -1975,7 +1975,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni BEQ_NEXT(x2, xZR); } SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) MAYSETFLAGS(); + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); emit_shr8(dyn, ninst, x1, x2, x5, x4, x6); EBBACK(); break; @@ -2185,7 +2185,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni MESSAGE(LOG_DUMP, "Native Call to %s (retn=%d)\n", GetNativeName(GetNativeFnc(dyn->insts[ninst].natcall - 1)), dyn->insts[ninst].retn); // calling a native function sse_purge07cache(dyn, ninst, x3); - if ((box64_log < 2 && !cycle_log) && dyn->insts[ninst].natcall) { + if ((BOX64ENV(log) < 2 && !BOX64ENV(rolling_log)) && dyn->insts[ninst].natcall) { // FIXME: Add basic support for isSimpleWrapper tmp = 0; // isSimpleWrapper(*(wrapper_t*)(dyn->insts[ninst].natcall + 2)); } else @@ -2195,7 +2195,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni // FIXME: if (dyn->insts[ninst].natcall && isRetX87Wrapper(*(wrapper_t*)(dyn->insts[ninst].natcall + 2))) // // return value will be on the stack, so the stack depth needs to be updated // x87_purgecache(dyn, ninst, 0, x3, x1, x4); - if ((box64_log < 2 && !cycle_log) && dyn->insts[ninst].natcall && tmp) { + if ((BOX64ENV(log) < 2 && !BOX64ENV(rolling_log)) && dyn->insts[ninst].natcall && tmp) { // GETIP(ip+3+8+8); // read the 0xCC // FIXME: call_n(dyn, ninst, *(void**)(dyn->insts[ninst].natcall + 2 + 8), tmp); POP1(xRIP); // pop the return address @@ -2228,13 +2228,13 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni } break; default: - if ((box64_dynarec_safeflags > 1) || (ninst && dyn->insts[ninst - 1].x64.set_flags)) { + if ((BOX64ENV(dynarec_safeflags) > 1) || (ninst && dyn->insts[ninst - 1].x64.set_flags)) { READFLAGS(X_PEND); // that's suspicious } else { SETFLAGS(X_ALL, SF_SET_NODF, NAT_FLAGS_NOFUSION); // Hack to set flags to "dont'care" state } // regular call - /*if (box64_dynarec_callret && box64_dynarec_bigblock > 1) { + /*if (BOX64ENV(dynarec_callret) && BOX64ENV(dynarec_bigblock) > 1) { BARRIER(BARRIER_FULL); } else { BARRIER(BARRIER_FLOAT); @@ -2249,7 +2249,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni } fpu_purgecache(dyn, ninst, 1, x1, x3, x4); PUSH1z(x2); - if (box64_dynarec_callret) { + if (BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); // Push actual return address if (addr < (dyn->start + dyn->isize)) { @@ -2276,7 +2276,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni else j64 = addr + i32; jump_to_next(dyn, j64, 0, ninst, rex.is32bits); - if (box64_dynarec_callret && addr >= (dyn->start + dyn->isize)) { + if (BOX64ENV(dynarec_callret) && addr >= (dyn->start + dyn->isize)) { // jumps out of current dynablock... MARK; j64 = getJumpTableAddress64(addr); @@ -2602,13 +2602,13 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni break; case 2: INST_NAME("CALL Ed"); - PASS2IF ((box64_dynarec_safeflags > 1) || ((ninst && dyn->insts[ninst - 1].x64.set_flags) || ((ninst > 1) && dyn->insts[ninst - 2].x64.set_flags)), 1) { + PASS2IF ((BOX64ENV(dynarec_safeflags) > 1) || ((ninst && dyn->insts[ninst - 1].x64.set_flags) || ((ninst > 1) && dyn->insts[ninst - 2].x64.set_flags)), 1) { READFLAGS(X_PEND); // that's suspicious } else { SETFLAGS(X_ALL, SF_SET_NODF, NAT_FLAGS_NOFUSION); // Hack to put flag in "don't care" state } GETEDz(0); - if (box64_dynarec_callret && box64_dynarec_bigblock > 1) { + if (BOX64ENV(dynarec_callret) && BOX64ENV(dynarec_bigblock) > 1) { BARRIER(BARRIER_FULL); } else { BARRIER(BARRIER_FLOAT); @@ -2616,7 +2616,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni *ok = 0; } GETIP_(addr); - if (box64_dynarec_callret) { + if (BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); // Push actual return address if (addr < (dyn->start + dyn->isize)) { @@ -2637,7 +2637,7 @@ uintptr_t dynarec64_00(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni } PUSH1z(xRIP); jump_to_next(dyn, 0, ed, ninst, rex.is32bits); - if (box64_dynarec_callret && addr >= (dyn->start + dyn->isize)) { + if (BOX64ENV(dynarec_callret) && addr >= (dyn->start + dyn->isize)) { // jumps out of current dynablock... MARK; j64 = getJumpTableAddress64(addr); diff --git a/src/dynarec/la64/dynarec_la64_0f.c b/src/dynarec/la64/dynarec_la64_0f.c index 313f4f07..70b20bd7 100644 --- a/src/dynarec/la64/dynarec_la64_0f.c +++ b/src/dynarec/la64/dynarec_la64_0f.c @@ -592,7 +592,7 @@ uintptr_t dynarec64_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni nextop = F8; GETGX(v0, 1); GETEX(v1, 0, 0); - if (!box64_dynarec_fastnan && v0 != v1) { + if (!BOX64ENV(dynarec_fastnan) && v0 != v1) { q0 = fpu_get_scratch(dyn); // always copy from v1 if any oprand is NaN VFCMP_S(q0, v0, v1, cUN); @@ -614,7 +614,7 @@ uintptr_t dynarec64_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni nextop = F8; GETGX(v0, 1); GETEX(v1, 0, 0); - if (!box64_dynarec_fastnan && v0 != v1) { + if (!BOX64ENV(dynarec_fastnan) && v0 != v1) { q0 = fpu_get_scratch(dyn); // always copy from v1 if any oprand is NaN VFCMP_S(q0, v0, v1, cUN); @@ -856,7 +856,7 @@ uintptr_t dynarec64_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("LDMXCSR Md"); GETED(0); ST_W(ed, xEmu, offsetof(x64emu_t, mxcsr)); - if (box64_sse_flushto0) { + if (BOX64ENV(sse_flushto0)) { // TODO } break; @@ -901,7 +901,7 @@ uintptr_t dynarec64_0F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni nextop = F8; GETGD; GETED(0); - if (box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // avoid noise during test CLEAR_FLAGS(x3); } diff --git a/src/dynarec/la64/dynarec_la64_66.c b/src/dynarec/la64/dynarec_la64_66.c index 27999e66..68894bf4 100644 --- a/src/dynarec/la64/dynarec_la64_66.c +++ b/src/dynarec/la64/dynarec_la64_66.c @@ -795,7 +795,7 @@ uintptr_t dynarec64_66(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni BEQ_NEXT(x2, xZR); } SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) MAYSETFLAGS(); + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); GETEW(x1, 0); emit_shr16(dyn, ninst, x1, x2, x5, x4, x6); EWBACK; @@ -811,7 +811,7 @@ uintptr_t dynarec64_66(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni BEQ_NEXT(x2, xZR); } SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); GETEW(x1, 0); emit_shl16(dyn, ninst, x1, x2, x5, x4, x6); @@ -827,7 +827,7 @@ uintptr_t dynarec64_66(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni BEQ_NEXT(x2, xZR); } SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); GETSEW(x1, 0); emit_sar16(dyn, ninst, x1, x2, x5, x4, x6); @@ -872,7 +872,7 @@ uintptr_t dynarec64_66(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni SLLI_D(x7, xRDX, 48); SRLI_D(x7, x7, 32); OR(x2, x2, x7); - if(box64_dynarec_div0) { + if(BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(); @@ -893,7 +893,7 @@ uintptr_t dynarec64_66(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni SETFLAGS(X_ALL, SF_SET, NAT_FLAGS_NOFUSION); SET_DFNONE(); GETSEW(x1, 0); - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(); diff --git a/src/dynarec/la64/dynarec_la64_660f.c b/src/dynarec/la64/dynarec_la64_660f.c index 5d74e154..5b6055cc 100644 --- a/src/dynarec/la64/dynarec_la64_660f.c +++ b/src/dynarec/la64/dynarec_la64_660f.c @@ -861,13 +861,13 @@ uintptr_t dynarec64_660F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETEX(q0, 0, 0); GETGX(q1, 1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); v1 = fpu_get_scratch(dyn); VFCMP_D(v0, q0, q1, cUN); } VFADD_D(q1, q1, q0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VFCMP_D(v1, q1, q1, cUN); VANDN_V(v0, v0, v1); VLDI(v1, 0b011111111000); // broadcast 0xFFFFFFFFFFFFFFF8 @@ -882,13 +882,13 @@ uintptr_t dynarec64_660F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETEX(q0, 0, 0); GETGX(q1, 1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); v1 = fpu_get_scratch(dyn); VFCMP_D(v0, q0, q1, cUN); } VFMUL_D(q1, q1, q0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VFCMP_D(v1, q1, q1, cUN); VANDN_V(v0, v0, v1); VLDI(v1, 0b011111111000); // broadcast 0xFFFFFFFFFFFFFFF8 @@ -903,7 +903,7 @@ uintptr_t dynarec64_660F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETEX(v1, 0, 0); GETGX_empty(v0); - // TODO: !box64_dynarec_fastround + // TODO: !BOX64ENV(dynarec_fastround) q0 = fpu_get_scratch(dyn); VFCVT_S_D(q0, v1, v1); VXOR_V(v0, v0, v0); @@ -915,12 +915,12 @@ uintptr_t dynarec64_660F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(v1, 0, 0); GETGX_empty(v0); u8 = sse_setround(dyn, ninst, x6, x4); - if (v0 == v1 && !box64_dynarec_fastround) { + if (v0 == v1 && !BOX64ENV(dynarec_fastround)) { v1 = fpu_get_scratch(dyn); VOR_V(v1, v0, v0); } VFTINT_W_S(v0, v1); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { q0 = fpu_get_scratch(dyn); q1 = fpu_get_scratch(dyn); d1 = fpu_get_scratch(dyn); @@ -941,13 +941,13 @@ uintptr_t dynarec64_660F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETEX(q0, 0, 0); GETGX(q1, 1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); v1 = fpu_get_scratch(dyn); VFCMP_D(v0, q0, q1, cUN); } VFSUB_D(q1, q1, q0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VFCMP_D(v1, q1, q1, cUN); VANDN_V(v0, v0, v1); VLDI(v1, 0b011111111000); // broadcast 0xFFFFFFFFFFFFFFF8 @@ -962,13 +962,13 @@ uintptr_t dynarec64_660F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETEX(q0, 0, 0); GETGX(q1, 1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); v1 = fpu_get_scratch(dyn); VFCMP_D(v0, q0, q1, cUN); } VFDIV_D(q1, q1, q0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VFCMP_D(v1, q1, q1, cUN); VANDN_V(v0, v0, v1); VLDI(v1, 0b011111111000); // broadcast 0xFFFFFFFFFFFFFFF8 diff --git a/src/dynarec/la64/dynarec_la64_f20f.c b/src/dynarec/la64/dynarec_la64_f20f.c index 774dcbf4..fcb32bdb 100644 --- a/src/dynarec/la64/dynarec_la64_f20f.c +++ b/src/dynarec/la64/dynarec_la64_f20f.c @@ -114,7 +114,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGD; GETEXSD(q0, 0, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { MOVGR2FCSR(FCSR2, xZR); // reset all bits } d1 = fpu_get_scratch(dyn); @@ -127,7 +127,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ZEROUP(gd); } if (!rex.w) ZEROUP(gd); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { MOVFCSR2GR(x5, FCSR2); // get back FPSR to check MOV32w(x3, (1 << FR_V) | (1 << FR_O)); AND(x5, x5, x3); @@ -144,7 +144,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGD; GETEXSD(q0, 0, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { MOVGR2FCSR(FCSR2, xZR); // reset all bits } d1 = fpu_get_scratch(dyn); @@ -158,7 +158,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ZEROUP(gd); } x87_restoreround(dyn, ninst, u8); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { MOVFCSR2GR(x5, FCSR2); // get back FPSR to check MOV32w(x3, (1 << FR_V) | (1 << FR_O)); AND(x5, x5, x3); @@ -177,7 +177,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int d1 = fpu_get_scratch(dyn); GETEXSD(d0, 0, 0); FSQRT_D(d1, d0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v1 = fpu_get_scratch(dyn); MOVGR2FR_D(v1, xZR); FCMP_D(fcc0, d0, v1, cLT); @@ -193,7 +193,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEXSD(v1, 0, 0); d0 = fpu_get_scratch(dyn); FADD_D(d0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FCMP_D(fcc0, v0, v1, cUN); BCNEZ_MARK(fcc0); FCMP_D(fcc1, d0, d0, cOR); @@ -210,7 +210,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEXSD(v1, 0, 0); d0 = fpu_get_scratch(dyn); FMUL_D(d0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FCMP_D(fcc0, v0, v1, cUN); BCNEZ_MARK(fcc0); FCMP_D(fcc1, d0, d0, cOR); @@ -236,7 +236,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEXSD(v1, 0, 0); d0 = fpu_get_scratch(dyn); FSUB_D(d0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FCMP_D(fcc0, v0, v1, cUN); BCNEZ_MARK(fcc0); FCMP_D(fcc1, d0, d0, cOR); @@ -266,7 +266,7 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEXSD(v1, 0, 0); d0 = fpu_get_scratch(dyn); FDIV_D(d0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FCMP_D(fcc0, v0, v1, cUN); BCNEZ_MARK(fcc0); FCMP_D(fcc1, d0, d0, cOR); diff --git a/src/dynarec/la64/dynarec_la64_f30f.c b/src/dynarec/la64/dynarec_la64_f30f.c index de8b847b..03f316cb 100644 --- a/src/dynarec/la64/dynarec_la64_f30f.c +++ b/src/dynarec/la64/dynarec_la64_f30f.c @@ -111,7 +111,7 @@ uintptr_t dynarec64_F30F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGD; GETEXSS(d0, 0, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { MOVGR2FCSR(FCSR2, xZR); // reset all bits } d1 = fpu_get_scratch(dyn); @@ -123,7 +123,7 @@ uintptr_t dynarec64_F30F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int MOVFR2GR_S(gd, d1); ZEROUP(gd); } - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { MOVFCSR2GR(x5, FCSR2); // get back FPSR to check MOV32w(x3, (1 << FR_V) | (1 << FR_O)); AND(x5, x5, x3); @@ -142,7 +142,7 @@ uintptr_t dynarec64_F30F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGD; GETEXSS(d0, 0, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { MOVGR2FCSR(FCSR2, xZR); // reset all bits } u8 = sse_setround(dyn, ninst, x5, x6); @@ -156,7 +156,7 @@ uintptr_t dynarec64_F30F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ZEROUP(gd); } x87_restoreround(dyn, ninst, u8); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { MOVFCSR2GR(x5, FCSR2); // get back FPSR to check MOV32w(x3, (1 << FR_V) | (1 << FR_O)); AND(x5, x5, x3); @@ -210,7 +210,7 @@ uintptr_t dynarec64_F30F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(v1, 0, 0); GETGX_empty(v0); VFTINTRZ_W_S(v0, v1); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { q0 = fpu_get_scratch(dyn); q1 = fpu_get_scratch(dyn); d1 = fpu_get_scratch(dyn); diff --git a/src/dynarec/la64/dynarec_la64_functions.c b/src/dynarec/la64/dynarec_la64_functions.c index 5d8b2c77..391eac80 100644 --- a/src/dynarec/la64/dynarec_la64_functions.c +++ b/src/dynarec/la64/dynarec_la64_functions.c @@ -332,7 +332,7 @@ static register_mapping_t register_mappings[] = { void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t rex) { - if (!box64_dynarec_dump && !box64_dynarec_gdbjit && !box64_dynarec_perf_map) return; + if (!BOX64ENV(dynarec_dump) && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return; static char buf[512]; int length = sprintf(buf, "barrier=%d state=%d/%d(%d), %s=%X/%X, use=%X, need=%X/%X, fuse=%d, sm=%d(%d/%d)", @@ -377,15 +377,15 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r if (dyn->insts[ninst].lsx.combined1 || dyn->insts[ninst].lsx.combined2) length += sprintf(buf + length, " %s:%d/%d", dyn->insts[ninst].lsx.swapped ? "SWP" : "CMB", dyn->insts[ninst].lsx.combined1, dyn->insts[ninst].lsx.combined2); - if (box64_dynarec_dump) { + if (BOX64ENV(dynarec_dump)) { printf_x64_instruction(rex.is32bits ? my_context->dec32 : my_context->dec, &dyn->insts[ninst].x64, name); dynarec_log(LOG_NONE, "%s%p: %d emitted opcodes, inst=%d, %s%s\n", - (box64_dynarec_dump > 1) ? "\e[32m" : "", - (void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (box64_dynarec_dump > 1) ? "\e[m" : ""); + (BOX64ENV(dynarec_dump) > 1) ? "\e[32m" : "", + (void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (BOX64ENV(dynarec_dump) > 1) ? "\e[m" : ""); } - if (box64_dynarec_gdbjit) { + if (BOX64ENV(dynarec_gdbjit)) { static char buf2[512]; - if (box64_dynarec_gdbjit > 1) { + if (BOX64ENV(dynarec_gdbjit) > 1) { sprintf(buf2, "; %d: %d opcodes, %s", ninst, dyn->insts[ninst].size / 4, buf); dyn->gdbjit_block = GdbJITBlockAddLine(dyn->gdbjit_block, (dyn->native_start + dyn->insts[ninst].address), buf2); } @@ -398,7 +398,7 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r } dyn->gdbjit_block = GdbJITBlockAddLine(dyn->gdbjit_block, (dyn->native_start + dyn->insts[ninst].address), inst_name); } - if (box64_dynarec_perf_map && box64_dynarec_perf_map_fd != -1) { + if (BOX64ENV(dynarec_perf_map) && BOX64ENV(dynarec_perf_map_fd) != -1) { writePerfMap(dyn->insts[ninst].x64.addr, dyn->native_start + dyn->insts[ninst].address, dyn->insts[ninst].size / 4, name); } } @@ -493,7 +493,7 @@ void fpu_unwind_restore(dynarec_la64_t* dyn, int ninst, lsxcache_t* cache) void updateNativeFlags(dynarec_la64_t* dyn) { - if (!box64_dynarec_nativeflags) + if (!BOX64ENV(dynarec_nativeflags)) return; for (int i = 1; i < dyn->size; ++i) if (dyn->insts[i].nat_flags_fusion) { diff --git a/src/dynarec/la64/dynarec_la64_helper.c b/src/dynarec/la64/dynarec_la64_helper.c index 3e622517..af3ab1ea 100644 --- a/src/dynarec/la64/dynarec_la64_helper.c +++ b/src/dynarec/la64/dynarec_la64_helper.c @@ -582,7 +582,7 @@ void ret_to_epilog(dynarec_la64_t* dyn, int ninst, rex_t rex) POP1z(xRIP); MVz(x1, xRIP); SMEND(); - if (box64_dynarec_callret) { + if (BOX64ENV(dynarec_callret)) { // pop the actual return address from RV64 stack LD_D(xRA, xSP, 0); // native addr LD_D(x6, xSP, 8); // x86 addr @@ -627,7 +627,7 @@ void retn_to_epilog(dynarec_la64_t* dyn, int ninst, rex_t rex, int n) } MVz(x1, xRIP); SMEND(); - if (box64_dynarec_callret) { + if (BOX64ENV(dynarec_callret)) { // pop the actual return address from RV64 stack LD_D(xRA, xSP, 0); // native addr LD_D(x6, xSP, 8); // x86 addr diff --git a/src/dynarec/la64/dynarec_la64_helper.h b/src/dynarec/la64/dynarec_la64_helper.h index 38ae926a..a109812e 100644 --- a/src/dynarec/la64/dynarec_la64_helper.h +++ b/src/dynarec/la64/dynarec_la64_helper.h @@ -1180,18 +1180,18 @@ uintptr_t dynarec64_F20F(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int B##COND(dyn->insts[ninst].nat_flags_op1, dyn->insts[ninst].nat_flags_op2, val); #define NOTEST(s1) \ - if (box64_dynarec_test) { \ + if (BOX64ENV(dynarec_test)) { \ ST_W(xZR, xEmu, offsetof(x64emu_t, test.test)); \ ST_W(xZR, xEmu, offsetof(x64emu_t, test.clean)); \ } #define SKIPTEST(s1) \ - if (box64_dynarec_test) { \ + if (BOX64ENV(dynarec_test)) { \ ST_W(xZR, xEmu, offsetof(x64emu_t, test.clean)); \ } #define GOTEST(s1, s2) \ - if (box64_dynarec_test) { \ + if (BOX64ENV(dynarec_test)) { \ MOV32w(s2, 1); \ ST_W(s2, xEmu, offsetof(x64emu_t, test.test)); \ } diff --git a/src/dynarec/la64/dynarec_la64_pass0.h b/src/dynarec/la64/dynarec_la64_pass0.h index a35c9669..aa7ee34e 100644 --- a/src/dynarec/la64/dynarec_la64_pass0.h +++ b/src/dynarec/la64/dynarec_la64_pass0.h @@ -12,7 +12,7 @@ dyn->f.pending = SF_SET #define READFLAGS_FUSION(A, s1, s2, s3, s4, s5) \ - if (box64_dynarec_nativeflags && ninst > 0 && !dyn->insts[ninst - 1].nat_flags_nofusion) { \ + if (BOX64ENV(dynarec_nativeflags) && ninst > 0 && !dyn->insts[ninst - 1].nat_flags_nofusion) { \ if ((A) == (X_ZF)) \ dyn->insts[ninst].nat_flags_fusion = 1; \ else if (dyn->insts[ninst - 1].nat_flags_carry && ((A) == (X_CF) || (A) == (X_CF | X_ZF))) \ @@ -54,7 +54,7 @@ #define DEFAULT \ --dyn->size; \ *ok = -1; \ - if (box64_dynarec_log >= LOG_INFO || box64_dynarec_dump || box64_dynarec_missing==1) { \ + if (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_missing)==1) { \ dynarec_log(LOG_NONE, "%p: Dynarec stopped because of %sOpcode %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X", \ (void*)ip, rex.is32bits ? "32bits " : "", \ PKip(0), \ diff --git a/src/dynarec/la64/dynarec_la64_pass3.h b/src/dynarec/la64/dynarec_la64_pass3.h index 34654f99..193f33be 100644 --- a/src/dynarec/la64/dynarec_la64_pass3.h +++ b/src/dynarec/la64/dynarec_la64_pass3.h @@ -5,7 +5,7 @@ addInst(dyn->instsize, &dyn->insts_size, 0, 0); #define EMIT(A) \ do { \ - if (box64_dynarec_dump) print_opcode(dyn, ninst, (uint32_t)(A)); \ + if (BOX64ENV(dynarec_dump)) print_opcode(dyn, ninst, (uint32_t)(A)); \ if ((uintptr_t)dyn->block < dyn->tablestart) \ *(uint32_t*)(dyn->block) = (uint32_t)(A); \ dyn->block += 4; \ @@ -14,7 +14,7 @@ } while (0) #define MESSAGE(A, ...) \ - if (box64_dynarec_dump) dynarec_log(LOG_NONE, __VA_ARGS__) + if (BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, __VA_ARGS__) #define NEW_INST \ if (ninst) { \ addInst(dyn->instsize, &dyn->insts_size, dyn->insts[ninst - 1].x64.size, dyn->insts[ninst - 1].size / 4); \ diff --git a/src/dynarec/rv64/dynarec_rv64_00_3.c b/src/dynarec/rv64/dynarec_rv64_00_3.c index 357eaddf..0e200145 100644 --- a/src/dynarec/rv64/dynarec_rv64_00_3.c +++ b/src/dynarec/rv64/dynarec_rv64_00_3.c @@ -280,7 +280,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int case 0xC2: INST_NAME("RETN"); // SETFLAGS(X_ALL, SF_SET_NODF); // Hack, set all flags (to an unknown state...) - if (box64_dynarec_safeflags) { + if (BOX64ENV(dynarec_safeflags)) { READFLAGS(X_PEND); // lets play safe here too } fpu_purgecache(dyn, ninst, 1, x1, x2, x3); // using next, even if there no next @@ -292,7 +292,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int case 0xC3: INST_NAME("RET"); // SETFLAGS(X_ALL, SF_SET_NODF); // Hack, set all flags (to an unknown state...) - if (box64_dynarec_safeflags) { + if (BOX64ENV(dynarec_safeflags)) { READFLAGS(X_PEND); // so instead, force the deferred flags, so it's not too slow, and flags are not lost } fpu_purgecache(dyn, ninst, 1, x1, x2, x3); // using next, even if there no next @@ -438,7 +438,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int x87_purgecache(dyn, ninst, 0, x3, x1, x4); if (tmp < 0 || (tmp & 15) > 1) tmp = 0; // TODO: removed when FP is in place - if ((box64_log < 2 && !cycle_log) && tmp) { + if ((BOX64ENV(log) < 2 && !BOX64ENV(rolling_log)) && tmp) { // GETIP(ip+3+8+8); // read the 0xCC call_n(dyn, ninst, *(void**)(addr + 8), tmp); addr += 8 + 8; @@ -459,7 +459,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int } } else { INST_NAME("INT 3"); - if (!box64_ignoreint3) { + if (!BOX64ENV(ignoreint3)) { // check if TRAP signal is handled TABLE64(x1, (uintptr_t)my_context); MOV32w(x2, offsetof(box64context_t, signals[SIGTRAP])); @@ -613,7 +613,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int BEQ_NEXT(x2, xZR); } SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); emit_shl8(dyn, ninst, x1, x2, x5, x4, x6); EBBACK(x5, 0); @@ -630,7 +630,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int BEQ_NEXT(x2, xZR); } SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); emit_shr8(dyn, ninst, x1, x2, x5, x4, x6); EBBACK(x5, 0); @@ -647,7 +647,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int BEQ_NEXT(x2, xZR); } SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); emit_sar8(dyn, ninst, x1, x2, x5, x4, x6); EBBACK(x5, 0); @@ -927,7 +927,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int MESSAGE(LOG_DUMP, "Native Call to %s (retn=%d)\n", GetNativeName(GetNativeFnc(dyn->insts[ninst].natcall - 1)), dyn->insts[ninst].retn); // calling a native function sse_purge07cache(dyn, ninst, x3); - if ((box64_log < 2 && !cycle_log) && dyn->insts[ninst].natcall) { + if ((BOX64ENV(log) < 2 && !BOX64ENV(rolling_log)) && dyn->insts[ninst].natcall) { // Partially support isSimpleWrapper tmp = isSimpleWrapper(*(wrapper_t*)(dyn->insts[ninst].natcall + 2)); } else @@ -937,7 +937,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int if (dyn->insts[ninst].natcall && isRetX87Wrapper(*(wrapper_t*)(dyn->insts[ninst].natcall + 2))) // return value will be on the stack, so the stack depth needs to be updated x87_purgecache(dyn, ninst, 0, x3, x1, x4); - if ((box64_log < 2 && !cycle_log) && dyn->insts[ninst].natcall && tmp) { + if ((BOX64ENV(log) < 2 && !BOX64ENV(rolling_log)) && dyn->insts[ninst].natcall && tmp) { // GETIP(ip+3+8+8); // read the 0xCC call_n(dyn, ninst, *(void**)(dyn->insts[ninst].natcall + 2 + 8), tmp); POP1(xRIP); // pop the return address @@ -970,13 +970,13 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int } break; default: - if ((box64_dynarec_safeflags > 1) || (ninst && dyn->insts[ninst - 1].x64.set_flags)) { + if ((BOX64ENV(dynarec_safeflags) > 1) || (ninst && dyn->insts[ninst - 1].x64.set_flags)) { READFLAGS(X_PEND); // that's suspicious } else { SETFLAGS(X_ALL, SF_SET_NODF, NAT_FLAGS_NOFUSION); // Hack to set flags to "dont'care" state } // regular call - /*if(box64_dynarec_callret && box64_dynarec_bigblock>1) { + /*if(BOX64ENV(dynarec_callret) && BOX64ENV(dynarec_bigblock)>1) { BARRIER(BARRIER_FULL); } else { BARRIER(BARRIER_FLOAT); @@ -991,7 +991,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int } fpu_purgecache(dyn, ninst, 1, x1, x3, x4); PUSH1z(x2); - if (box64_dynarec_callret) { + if (BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); // Push actual return address j64 = (dyn->insts) ? (GETMARK - (dyn->native_size)) : 0; @@ -1011,9 +1011,9 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int j64 = addr + i32; jump_to_next(dyn, j64, 0, ninst, rex.is32bits); MARK; - if (box64_dynarec_callret && dyn->vector_sew != VECTOR_SEWNA) + if (BOX64ENV(dynarec_callret) && dyn->vector_sew != VECTOR_SEWNA) vector_vsetvli(dyn, ninst, x3, dyn->vector_sew, VECTOR_LMUL1, 1); - if (box64_dynarec_callret && addr >= (dyn->start + dyn->isize)) { + if (BOX64ENV(dynarec_callret) && addr >= (dyn->start + dyn->isize)) { // jumps out of current dynablock... j64 = getJumpTableAddress64(addr); TABLE64(x4, j64); @@ -1265,7 +1265,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int CALL(native_div0, -1, 0, 0); LOAD_XEMU_CALL(); } else { - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(x3); @@ -1293,7 +1293,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int && *(uint8_t*)(dyn->insts[ninst - 1].x64.addr) == 0x31 && *(uint8_t*)(dyn->insts[ninst - 1].x64.addr + 1) == 0xD2) { GETED(0); - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(x3); @@ -1308,7 +1308,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int MV(xRAX, x2); } else { GETEDH(x1, 0); // get edd changed addr, so cannot be called 2 times for same op... - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(x3); @@ -1335,7 +1335,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int SET_DFNONE() if (!rex.w) { GETSED(0); - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(x3); @@ -1358,7 +1358,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int && *(uint8_t*)(dyn->insts[ninst - 1].x64.addr) == 0x48 && *(uint8_t*)(dyn->insts[ninst - 1].x64.addr + 1) == 0x99) { GETED(0); - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(x3); @@ -1373,7 +1373,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int MV(xRAX, x2); } else { GETEDH(x1, 0); // get edd changed addr, so cannot be called 2 times for same op... - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(x3); @@ -1466,13 +1466,13 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int break; case 2: // CALL Ed INST_NAME("CALL Ed"); - PASS2IF ((box64_dynarec_safeflags > 1) || ((ninst && dyn->insts[ninst - 1].x64.set_flags) || ((ninst > 1) && dyn->insts[ninst - 2].x64.set_flags)), 1) { + PASS2IF ((BOX64ENV(dynarec_safeflags) > 1) || ((ninst && dyn->insts[ninst - 1].x64.set_flags) || ((ninst > 1) && dyn->insts[ninst - 2].x64.set_flags)), 1) { READFLAGS(X_PEND); // that's suspicious } else { SETFLAGS(X_ALL, SF_SET_NODF, NAT_FLAGS_NOFUSION); // Hack to put flag in "don't care" state } GETEDz(0); - if (box64_dynarec_callret && box64_dynarec_bigblock > 1) { + if (BOX64ENV(dynarec_callret) && BOX64ENV(dynarec_bigblock) > 1) { BARRIER(BARRIER_FULL); } else { BARRIER(BARRIER_FLOAT); @@ -1480,7 +1480,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int *ok = 0; } GETIP_(addr); - if (box64_dynarec_callret) { + if (BOX64ENV(dynarec_callret)) { SET_HASCALLRET(); j64 = (dyn->insts) ? (GETMARK - (dyn->native_size)) : 0; AUIPC(x4, ((j64 + 0x800) >> 12) & 0xfffff); @@ -1493,9 +1493,9 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int PUSH1z(xRIP); jump_to_next(dyn, 0, ed, ninst, rex.is32bits); MARK; - if (box64_dynarec_callret && dyn->vector_sew != VECTOR_SEWNA) + if (BOX64ENV(dynarec_callret) && dyn->vector_sew != VECTOR_SEWNA) vector_vsetvli(dyn, ninst, x3, dyn->vector_sew, VECTOR_LMUL1, 1); - if (box64_dynarec_callret && addr >= (dyn->start + dyn->isize)) { + if (BOX64ENV(dynarec_callret) && addr >= (dyn->start + dyn->isize)) { // jumps out of current dynablock... j64 = getJumpTableAddress64(addr); TABLE64(x4, j64); diff --git a/src/dynarec/rv64/dynarec_rv64_0f.c b/src/dynarec/rv64/dynarec_rv64_0f.c index 16b414bf..b6fc356d 100644 --- a/src/dynarec/rv64/dynarec_rv64_0f.c +++ b/src/dynarec/rv64/dynarec_rv64_0f.c @@ -334,12 +334,12 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni GETEX(x2, 0, 4); d0 = fpu_get_scratch(dyn); for (int i = 0; i < 2; ++i) { - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // // reset all bits } FLW(d0, wback, fixedaddress + i * 4); FCVTWS(x1, d0, RD_RTZ); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); BEQ_MARKi(x5, xZR, i); @@ -357,12 +357,12 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni d0 = fpu_get_scratch(dyn); u8 = sse_setround(dyn, ninst, x6, x4); for (int i = 0; i < 2; ++i) { - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // // reset all bits } FLW(d0, wback, fixedaddress + i * 4); FCVTWS(x1, d0, RD_DYN); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); BEQ_MARKi(x5, xZR, i); @@ -962,12 +962,12 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni // do accurate computation, because riscv doesn't have rsqrt MOV32w(x3, 1); FCVTSW(s1, x3, RD_DYN); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FCVTSW(v0, xZR, RD_DYN); } for (int i = 0; i < 4; ++i) { FLW(s0, wback, fixedaddress + i * 4); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FLES(x3, v0, s0); // s0 >= 0.0f? BNEZ(x3, 6 * 4); FEQS(x3, s0, s0); // isnan(s0)? @@ -1121,7 +1121,7 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni for (int i = 0; i < 4; ++i) { FLW(s0, wback, fixedaddress + i * 4); FLW(s1, gback, gdoffset + i * 4); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQS(x3, s0, s0); FEQS(x4, s1, s1); AND(x3, x3, x4); @@ -1160,7 +1160,7 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni for (int i = 0; i < 4; ++i) { FLW(s0, wback, fixedaddress + i * 4); FLW(s1, gback, gdoffset + i * 4); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQS(x3, s0, s0); FEQS(x4, s1, s1); AND(x3, x3, x4); @@ -1790,7 +1790,7 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni nextop = F8; INST_NAME("SHLD Ed, Gd, CL"); SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); GETGD; GETED(0); @@ -1852,7 +1852,7 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni nextop = F8; INST_NAME("SHRD Ed, Gd, CL"); SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); GETGD; GETED(0); @@ -1903,7 +1903,7 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("LDMXCSR Md"); GETED(0); SW(ed, xEmu, offsetof(x64emu_t, mxcsr)); - if (box64_sse_flushto0) { + if (BOX64ENV(sse_flushto0)) { // TODO: applyFlushTo0 also needs to add RISC-V support. } break; diff --git a/src/dynarec/rv64/dynarec_rv64_0f_vector.c b/src/dynarec/rv64/dynarec_rv64_0f_vector.c index 7a966c0d..342ab55f 100644 --- a/src/dynarec/rv64/dynarec_rv64_0f_vector.c +++ b/src/dynarec/rv64/dynarec_rv64_0f_vector.c @@ -325,7 +325,7 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, VFSQRT_V(v1, v0, VECTOR_UNMASKED); break; case 0x52: - if (!box64_dynarec_fastround) return 0; + if (!BOX64ENV(dynarec_fastround)) return 0; INST_NAME("RSQRTPS Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); @@ -445,13 +445,13 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); GETGX_vector(q0, 1, VECTOR_SEW32); GETEX_vector(q1, 0, 0, VECTOR_SEW32); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); VMFEQ_VV(VMASK, q0, q0, VECTOR_UNMASKED); VMFEQ_VV(v0, q1, q1, VECTOR_UNMASKED); } VFMIN_VV(q0, q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMAND_MM(VMASK, v0, VMASK); VXOR_VI(VMASK, VMASK, 0x1F, VECTOR_UNMASKED); VADD_VX(q0, q1, xZR, VECTOR_MASKED); @@ -472,12 +472,12 @@ uintptr_t dynarec64_0F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, GETGX_vector(q0, 1, VECTOR_SEW32); GETEX_vector(q1, 0, 0, VECTOR_SEW32); v0 = fpu_get_scratch(dyn); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMFEQ_VV(VMASK, q0, q0, VECTOR_UNMASKED); VMFEQ_VV(v0, q1, q1, VECTOR_UNMASKED); } VFMAX_VV(q0, q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMAND_MM(VMASK, v0, VMASK); VXOR_VI(VMASK, VMASK, 0x1F, VECTOR_UNMASKED); VADD_VX(q0, q1, xZR, VECTOR_MASKED); diff --git a/src/dynarec/rv64/dynarec_rv64_66.c b/src/dynarec/rv64/dynarec_rv64_66.c index 3aac15f0..5fb476d9 100644 --- a/src/dynarec/rv64/dynarec_rv64_66.c +++ b/src/dynarec/rv64/dynarec_rv64_66.c @@ -1302,7 +1302,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni ANDI(x2, xRCX, 0x1f); BEQ_NEXT(x2, xZR); SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); GETEW(x1, 0); emit_shr16(dyn, ninst, x1, x2, x5, x4, x6); @@ -1314,7 +1314,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni ANDI(x2, xRCX, 0x1f); BEQ_NEXT(x2, xZR); SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); GETEW(x1, 0); emit_shl16(dyn, ninst, x1, x2, x5, x4, x6); @@ -1325,7 +1325,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni ANDI(x2, xRCX, 0x1f); BEQ_NEXT(x2, xZR); SETFLAGS(X_ALL, SF_SET_PENDING, NAT_FLAGS_FUSION); // some flags are left undefined - if (box64_dynarec_safeflags > 1) + if (BOX64ENV(dynarec_safeflags) > 1) MAYSETFLAGS(); GETSEW(x1, 0); emit_sar16(dyn, ninst, x1, x2, x5, x4, x6); @@ -1401,7 +1401,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni SLLI(x7, xRDX, 48); SRLI(x7, x7, 32); OR(x2, x2, x7); - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(x6); @@ -1422,7 +1422,7 @@ uintptr_t dynarec64_66(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni SETFLAGS(X_ALL, SF_SET, NAT_FLAGS_NOFUSION); SET_DFNONE(); GETSEW(x1, 0); - if (box64_dynarec_div0) { + if (BOX64ENV(dynarec_div0)) { BNE_MARK3(ed, xZR); GETIP_(ip); STORE_XEMU_CALL(x6); diff --git a/src/dynarec/rv64/dynarec_rv64_660f.c b/src/dynarec/rv64/dynarec_rv64_660f.c index ac6f5b24..62043219 100644 --- a/src/dynarec/rv64/dynarec_rv64_660f.c +++ b/src/dynarec/rv64/dynarec_rv64_660f.c @@ -192,12 +192,12 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(x2, 0, 8); d0 = fpu_get_scratch(dyn); for (int i = 0; i < 2; ++i) { - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // // reset all bits } FLD(d0, wback, fixedaddress + i * 8); FCVTWD(x1, d0, RD_RTZ); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); BEQ_MARKi(x5, xZR, i); @@ -215,12 +215,12 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int d0 = fpu_get_scratch(dyn); u8 = sse_setround(dyn, ninst, x4, x5); for (int i = 0; i < 2; ++i) { - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // // reset all bits } FLD(d0, wback, fixedaddress + i * 8); FCVTWD(x1, d0, RD_DYN); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); BEQ_MARKi(x5, xZR, i); @@ -320,17 +320,17 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETGX(); GETEX(x2, 0, 8); d0 = fpu_get_scratch(dyn); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { d1 = fpu_get_scratch(dyn); FMVDX(d1, xZR); } for (int i = 0; i < 2; ++i) { FLD(d0, wback, fixedaddress + i * 8); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FLTD(x3, d0, d1); } FSQRTD(d0, d0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { BEQ(x3, xZR, 8); FNEGD(d0, d0); } @@ -371,12 +371,12 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(x1, 0, 8); GETGX(); SSE_LOOP_FQ(x3, x4, { - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, v0, v0); FEQD(x4, v1, v1); } FADDD(v0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BEQZ(x3, 16); FEQD(x3, v0, v0); @@ -391,12 +391,12 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(x1, 0, 8); GETGX(); SSE_LOOP_FQ(x3, x4, { - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, v0, v0); FEQD(x4, v1, v1); } FMULD(v0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BEQZ(x3, 16); FEQD(x3, v0, v0); @@ -445,12 +445,12 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(x1, 0, 8); GETGX(); SSE_LOOP_FQ(x3, x4, { - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, v0, v0); FEQD(x4, v1, v1); } FSUBD(v0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BEQZ(x3, 16); FEQD(x3, v0, v0); @@ -484,12 +484,12 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(x1, 0, 8); GETGX(); SSE_LOOP_FQ(x3, x4, { - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, v0, v0); FEQD(x4, v1, v1); } FDIVD(v0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BEQZ(x3, 16); FEQD(x3, v0, v0); @@ -1090,13 +1090,13 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int d1 = fpu_get_scratch(dyn); FLD(d0, gback, gdoffset + 0); FLD(d1, gback, gdoffset + 8); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, d0, d0); FEQD(x4, d1, d1); AND(x3, x3, x4); } FADDD(d0, d0, d1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x4, d0, d0); BEQZ(x3, 12); BNEZ(x4, 8); @@ -1109,13 +1109,13 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(x2, 0, 8); FLD(d0, wback, fixedaddress + 0); FLD(d1, wback, fixedaddress + 8); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, d0, d0); FEQD(x4, d1, d1); AND(x3, x3, x4); } FADDD(d0, d0, d1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x4, d0, d0); BEQZ(x3, 12); BNEZ(x4, 8); @@ -1921,11 +1921,11 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int v1 = fpu_get_scratch(dyn); FLD(v0, wback, fixedaddress + 0); FLD(v1, wback, fixedaddress + 8); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // // reset all bits } FCVTWD(x3, v0, RD_RTZ); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); BEQ_MARK(x5, xZR); @@ -1934,7 +1934,7 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int FSFLAGSI(0); // // reset all bits } FCVTWD(x4, v1, RD_RTZ); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); BEQ_MARK2(x5, xZR); diff --git a/src/dynarec/rv64/dynarec_rv64_660f_vector.c b/src/dynarec/rv64/dynarec_rv64_660f_vector.c index 8eba1e38..41960c73 100644 --- a/src/dynarec/rv64/dynarec_rv64_660f_vector.c +++ b/src/dynarec/rv64/dynarec_rv64_660f_vector.c @@ -920,13 +920,13 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); GETGX_vector(q0, 1, VECTOR_SEW64); GETEX_vector(q1, 0, 0, VECTOR_SEW64); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); VXOR_VV(v0, v0, v0, VECTOR_UNMASKED); VMFLT_VV(VMASK, q1, v0, VECTOR_UNMASKED); } VFSQRT_V(q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VFSGNJN_VV(q0, q0, q0, VECTOR_MASKED); } break; @@ -976,13 +976,13 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); GETGX_vector(q0, 1, VECTOR_SEW64); GETEX_vector(q1, 0, 0, VECTOR_SEW64); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); VMFEQ_VV(VMASK, q0, q0, VECTOR_UNMASKED); VMFEQ_VV(v0, q1, q1, VECTOR_UNMASKED); } VFADD_VV(q0, q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMAND_MM(VMASK, v0, VMASK); VMFEQ_VV(v0, q0, q0, VECTOR_UNMASKED); VXOR_VI(v0, v0, 0x1F, VECTOR_UNMASKED); @@ -996,13 +996,13 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); GETGX_vector(q0, 1, VECTOR_SEW64); GETEX_vector(q1, 0, 0, VECTOR_SEW64); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); VMFEQ_VV(VMASK, q0, q0, VECTOR_UNMASKED); VMFEQ_VV(v0, q1, q1, VECTOR_UNMASKED); } VFMUL_VV(q0, q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMAND_MM(VMASK, v0, VMASK); VMFEQ_VV(v0, q0, q0, VECTOR_UNMASKED); VXOR_VI(v0, v0, 0x1F, VECTOR_UNMASKED); @@ -1031,7 +1031,7 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VMV_S_X(v0, x4); break; case 0x5B: - if (!box64_dynarec_fastround) return 0; + if (!BOX64ENV(dynarec_fastround)) return 0; INST_NAME("CVTPS2DQ Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); @@ -1047,13 +1047,13 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); GETGX_vector(q0, 1, VECTOR_SEW64); GETEX_vector(q1, 0, 0, VECTOR_SEW64); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); VMFEQ_VV(VMASK, q0, q0, VECTOR_UNMASKED); VMFEQ_VV(v0, q1, q1, VECTOR_UNMASKED); } VFSUB_VV(q0, q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMAND_MM(VMASK, v0, VMASK); VMFEQ_VV(v0, q0, q0, VECTOR_UNMASKED); VXOR_VI(v0, v0, 0x1F, VECTOR_UNMASKED); @@ -1068,12 +1068,12 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i GETGX_vector(q0, 1, VECTOR_SEW64); GETEX_vector(q1, 0, 0, VECTOR_SEW64); v0 = fpu_get_scratch(dyn); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMFEQ_VV(VMASK, q0, q0, VECTOR_UNMASKED); VMFEQ_VV(v0, q1, q1, VECTOR_UNMASKED); } VFMIN_VV(q0, q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMAND_MM(VMASK, v0, VMASK); VXOR_VI(VMASK, VMASK, 0x1F, VECTOR_UNMASKED); VADD_VX(q0, q1, xZR, VECTOR_MASKED); @@ -1085,13 +1085,13 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); GETGX_vector(q0, 1, VECTOR_SEW64); GETEX_vector(q1, 0, 0, VECTOR_SEW64); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); VMFEQ_VV(VMASK, q0, q0, VECTOR_UNMASKED); VMFEQ_VV(v0, q1, q1, VECTOR_UNMASKED); } VFDIV_VV(q0, q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMAND_MM(VMASK, v0, VMASK); VMFEQ_VV(v0, q0, q0, VECTOR_UNMASKED); VXOR_VI(v0, v0, 0x1F, VECTOR_UNMASKED); @@ -1106,12 +1106,12 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i GETGX_vector(q0, 1, VECTOR_SEW64); GETEX_vector(q1, 0, 0, VECTOR_SEW64); v0 = fpu_get_scratch(dyn); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMFEQ_VV(VMASK, q0, q0, VECTOR_UNMASKED); VMFEQ_VV(v0, q1, q1, VECTOR_UNMASKED); } VFMAX_VV(q0, q0, q1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMAND_MM(VMASK, v0, VMASK); VXOR_VI(VMASK, VMASK, 0x1F, VECTOR_UNMASKED); VADD_VX(q0, q1, xZR, VECTOR_MASKED); @@ -1619,13 +1619,13 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VXOR_VI(VMASK, VMASK, 0x1F, VECTOR_UNMASKED); VCOMPRESS_VM(d1, v0, VMASK); vector_vsetvli(dyn, ninst, x1, VECTOR_SEW64, VECTOR_LMUL1, 1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMFEQ_VV(v0, d0, d0, VECTOR_UNMASKED); VMFEQ_VV(v1, d1, d1, VECTOR_UNMASKED); VMAND_MM(v0, v0, v1); } VFADD_VV(q0, d0, d1, VECTOR_UNMASKED); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { VMFEQ_VV(v1, q0, q0, VECTOR_UNMASKED); VMANDN_MM(VMASK, v0, v1); VFSGNJN_VV(q0, q0, q0, VECTOR_MASKED); @@ -2010,7 +2010,7 @@ uintptr_t dynarec64_660F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VMULH_VV(q0, q0, q1, VECTOR_UNMASKED); break; case 0xE6: - if (!box64_dynarec_fastround) return 0; + if (!BOX64ENV(dynarec_fastround)) return 0; INST_NAME("CVTTPD2DQ Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); diff --git a/src/dynarec/rv64/dynarec_rv64_avx.c b/src/dynarec/rv64/dynarec_rv64_avx.c index f8b1ce89..3008fde5 100644 --- a/src/dynarec/rv64/dynarec_rv64_avx.c +++ b/src/dynarec/rv64/dynarec_rv64_avx.c @@ -56,7 +56,7 @@ uintptr_t dynarec64_AVX(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int n DEFAULT; } - if ((*ok == -1) && (box64_dynarec_log >= LOG_INFO || box64_dynarec_dump || box64_dynarec_missing == 1)) { + if ((*ok == -1) && (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_missing) == 1)) { dynarec_log(LOG_NONE, "Dynarec unimplemented AVX opcode size %d prefix %s map %s opcode %02X ", 128 << vex.l, avx_prefix_string(vex.p), avx_map_string(vex.m), opcode); } return addr; diff --git a/src/dynarec/rv64/dynarec_rv64_d9.c b/src/dynarec/rv64/dynarec_rv64_d9.c index 4ea79815..02cf39a5 100644 --- a/src/dynarec/rv64/dynarec_rv64_d9.c +++ b/src/dynarec/rv64/dynarec_rv64_d9.c @@ -460,7 +460,7 @@ uintptr_t dynarec64_D9(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni switch ((nextop >> 3) & 7) { case 0: INST_NAME("FLD ST0, float[ED]"); - X87_PUSH_OR_FAIL(v1, dyn, ninst, x1, box64_dynarec_x87double ? EXT_CACHE_ST_D : EXT_CACHE_ST_F); + X87_PUSH_OR_FAIL(v1, dyn, ninst, x1, BOX64ENV(dynarec_x87double) ? EXT_CACHE_ST_D : EXT_CACHE_ST_F); addr = geted(dyn, addr, ninst, nextop, &ed, x2, x1, &fixedaddress, rex, NULL, 1, 0); FLW(v1, ed, fixedaddress); if (!ST_IS_F(0)) { diff --git a/src/dynarec/rv64/dynarec_rv64_db.c b/src/dynarec/rv64/dynarec_rv64_db.c index 6943eac7..9b58410d 100644 --- a/src/dynarec/rv64/dynarec_rv64_db.c +++ b/src/dynarec/rv64/dynarec_rv64_db.c @@ -201,11 +201,11 @@ uintptr_t dynarec64_DB(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("FISTTP Ed, ST0"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, EXT_CACHE_ST_D); addr = geted(dyn, addr, ninst, nextop, &wback, x3, x4, &fixedaddress, rex, NULL, 1, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } FCVTWD(x4, v1, RD_RTZ); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, 1 << FR_NV); BEQZ_MARK(x5); @@ -225,12 +225,12 @@ uintptr_t dynarec64_DB(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni u8 = x87_setround(dyn, ninst, x1, x2); addr = geted(dyn, addr, ninst, nextop, &wback, x2, x3, &fixedaddress, rex, NULL, 1, 0); v2 = fpu_get_scratch(dyn); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } FCVTWD(x4, v1, RD_DYN); x87_restoreround(dyn, ninst, u8); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, 1 << FR_NV); BEQ_MARK2(x5, xZR); @@ -260,7 +260,7 @@ uintptr_t dynarec64_DB(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni SD(x5, ed, fixedaddress + 0); SH(x6, ed, fixedaddress + 8); } else { - if (box64_x87_no80bits) { + if (BOX64ENV(x87_no80bits)) { X87_PUSH_OR_FAIL(v1, dyn, ninst, x1, EXT_CACHE_ST_D); FLD(v1, ed, fixedaddress); } else { @@ -274,7 +274,7 @@ uintptr_t dynarec64_DB(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni break; case 7: INST_NAME("FSTP tbyte"); - if (box64_x87_no80bits) { + if (BOX64ENV(x87_no80bits)) { v1 = x87_get_st(dyn, ninst, x1, x2, 0, EXT_CACHE_ST_D); addr = geted(dyn, addr, ninst, nextop, &wback, x2, x1, &fixedaddress, rex, NULL, 1, 0); FSD(v1, wback, fixedaddress); diff --git a/src/dynarec/rv64/dynarec_rv64_dd.c b/src/dynarec/rv64/dynarec_rv64_dd.c index 2d2c22d4..06e2ea98 100644 --- a/src/dynarec/rv64/dynarec_rv64_dd.c +++ b/src/dynarec/rv64/dynarec_rv64_dd.c @@ -169,11 +169,11 @@ uintptr_t dynarec64_DD(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni if (ST_IS_I64(0)) { FSD(v1, wback, fixedaddress); } else { - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } FCVTLD(x4, v1, RD_RTZ); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, 1 << FR_NV); BEQZ_MARK(x5); diff --git a/src/dynarec/rv64/dynarec_rv64_df.c b/src/dynarec/rv64/dynarec_rv64_df.c index 119a2910..9d277713 100644 --- a/src/dynarec/rv64/dynarec_rv64_df.c +++ b/src/dynarec/rv64/dynarec_rv64_df.c @@ -135,7 +135,7 @@ uintptr_t dynarec64_DF(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni INST_NAME("FISTTP Ew, ST0"); v1 = x87_get_st(dyn, ninst, x1, x2, 0, EXT_CACHE_ST_F); addr = geted(dyn, addr, ninst, nextop, &wback, x3, x4, &fixedaddress, rex, NULL, 1, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } if (ST_IS_F(0)) { @@ -143,7 +143,7 @@ uintptr_t dynarec64_DF(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni } else { FCVTWD(x4, v1, RD_RTZ); } - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, 1 << FR_NV); BNEZ_MARK(x5); @@ -162,7 +162,7 @@ uintptr_t dynarec64_DF(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni v1 = x87_get_st(dyn, ninst, x1, x2, 0, EXT_CACHE_ST_F); u8 = x87_setround(dyn, ninst, x1, x2); addr = geted(dyn, addr, ninst, nextop, &wback, x2, x3, &fixedaddress, rex, NULL, 1, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } if (ST_IS_F(0)) { @@ -171,7 +171,7 @@ uintptr_t dynarec64_DF(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni FCVTWD(x4, v1, RD_DYN); } x87_restoreround(dyn, ninst, u8); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, 1 << FR_NV); BNEZ_MARK(x5); @@ -189,7 +189,7 @@ uintptr_t dynarec64_DF(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni v1 = x87_get_st(dyn, ninst, x1, x2, 0, EXT_CACHE_ST_F); u8 = x87_setround(dyn, ninst, x1, x2); addr = geted(dyn, addr, ninst, nextop, &wback, x2, x3, &fixedaddress, rex, NULL, 1, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } if (ST_IS_F(0)) { @@ -198,7 +198,7 @@ uintptr_t dynarec64_DF(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni FCVTWD(x4, v1, RD_DYN); } x87_restoreround(dyn, ninst, u8); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, 1 << FR_NV); BNEZ_MARK(x5); @@ -288,11 +288,11 @@ uintptr_t dynarec64_DF(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni MARK; } - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } FCVTLD(x4, v1, RD_DYN); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, 1 << FR_NV); BEQ_MARK2(x5, xZR); diff --git a/src/dynarec/rv64/dynarec_rv64_f20f.c b/src/dynarec/rv64/dynarec_rv64_f20f.c index 3fbac305..6692e3ce 100644 --- a/src/dynarec/rv64/dynarec_rv64_f20f.c +++ b/src/dynarec/rv64/dynarec_rv64_f20f.c @@ -105,12 +105,12 @@ uintptr_t dynarec64_F20F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGD; GETEXSD(v0, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } FCVTLDxw(gd, v0, RD_RTZ); if (!rex.w) ZEROUP(gd); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); CBZ_NEXT(x5); @@ -126,14 +126,14 @@ uintptr_t dynarec64_F20F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGD; GETEXSD(v0, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // // reset all bits } u8 = sse_setround(dyn, ninst, x2, x3); FCVTLDxw(gd, v0, RD_DYN); if (!rex.w) ZEROUP(gd); x87_restoreround(dyn, ninst, u8); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); CBZ_NEXT(x5); @@ -189,13 +189,13 @@ uintptr_t dynarec64_F20F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETEXSD(d0, 0); GETGXSD_empty(d1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { v0 = fpu_get_scratch(dyn); // need a scratch in case d0 == d1 FMVDX(v0, xZR); FLTD(x3, d0, v0); } FSQRTD(d1, d0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { BEQ(x3, xZR, 8); FNEGD(d1, d1); } @@ -205,12 +205,12 @@ uintptr_t dynarec64_F20F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGXSD(v0); GETEXSD(v1, 0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, v0, v0); FEQD(x4, v1, v1); } FADDD(v0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BNEZ_MARK(x3); CBNZ_NEXT(x4); @@ -227,12 +227,12 @@ uintptr_t dynarec64_F20F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGXSD(v0); GETEXSD(v1, 0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, v0, v0); FEQD(x4, v1, v1); } FMULD(v0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BNEZ_MARK(x3); CBNZ_NEXT(x4); @@ -262,12 +262,12 @@ uintptr_t dynarec64_F20F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGXSD(v0); GETEXSD(v1, 0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, v0, v0); FEQD(x4, v1, v1); } FSUBD(v0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BNEZ_MARK(x3); CBNZ_NEXT(x4); @@ -299,12 +299,12 @@ uintptr_t dynarec64_F20F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGXSD(v0); GETEXSD(v1, 0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQD(x3, v0, v0); FEQD(x4, v1, v1); } FDIVD(v0, v0, v1); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BNEZ_MARK(x3); CBNZ_NEXT(x4); diff --git a/src/dynarec/rv64/dynarec_rv64_f20f_vector.c b/src/dynarec/rv64/dynarec_rv64_f20f_vector.c index 34646907..b49cf50a 100644 --- a/src/dynarec/rv64/dynarec_rv64_f20f_vector.c +++ b/src/dynarec/rv64/dynarec_rv64_f20f_vector.c @@ -150,7 +150,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i LD(x4, ed, fixedaddress); VMV_S_X(v0, x4); } - if (box64_dynarec_fastround) { + if (BOX64ENV(dynarec_fastround)) { VFMV_F_S(v0, v0); FCVTLDxw(gd, v0, RD_RTZ); if (!rex.w) ZEROUP(gd); @@ -183,7 +183,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i LD(x4, ed, fixedaddress); VMV_S_X(v0, x4); } - if (box64_dynarec_fastround) { + if (BOX64ENV(dynarec_fastround)) { VFMV_F_S(v0, v0); u8 = sse_setround(dyn, ninst, x2, x3); FCVTLDxw(gd, v0, RD_DYN); @@ -223,7 +223,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i LD(x4, ed, fixedaddress); VMV_S_X(v1, x4); } - if (box64_dynarec_fastnan) { + if (BOX64ENV(dynarec_fastnan)) { VECTOR_LOAD_VMASK(0b01, x4, 1); VFSQRT_V(v0, v1, VECTOR_MASKED); } else { @@ -243,7 +243,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } break; case 0x58: - if (!box64_dynarec_fastnan) return 0; + if (!BOX64ENV(dynarec_fastnan)) return 0; INST_NAME("ADDSD Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); @@ -262,7 +262,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VFADD_VV(v0, v0, v1, VECTOR_MASKED); break; case 0x59: - if (!box64_dynarec_fastnan) return 0; + if (!BOX64ENV(dynarec_fastnan)) return 0; INST_NAME("MULSD Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); @@ -322,7 +322,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } break; case 0x5C: - if (!box64_dynarec_fastnan) return 0; + if (!BOX64ENV(dynarec_fastnan)) return 0; INST_NAME("SUBSD Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); @@ -355,7 +355,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VMV_S_X(v1, x4); GETGX_vector(v0, 1, VECTOR_SEW64); } - if (box64_dynarec_fastnan) { + if (BOX64ENV(dynarec_fastnan)) { VECTOR_LOAD_VMASK(0b01, x4, 1); VFMIN_VV(v0, v0, v1, VECTOR_MASKED); } else { @@ -382,7 +382,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } break; case 0x5E: - if (!box64_dynarec_fastnan) return 0; + if (!BOX64ENV(dynarec_fastnan)) return 0; INST_NAME("DIVSD Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); @@ -415,7 +415,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VMV_S_X(v1, x4); GETGX_vector(v0, 1, VECTOR_SEW64); } - if (box64_dynarec_fastnan) { + if (BOX64ENV(dynarec_fastnan)) { VECTOR_LOAD_VMASK(0b01, x4, 1); VFMAX_VV(v0, v0, v1, VECTOR_MASKED); } else { @@ -553,7 +553,7 @@ uintptr_t dynarec64_F20F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } break; case 0xE6: - if (!box64_dynarec_fastround) return 0; + if (!BOX64ENV(dynarec_fastround)) return 0; INST_NAME("CVTPD2DQ Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW64, 1); diff --git a/src/dynarec/rv64/dynarec_rv64_f30f.c b/src/dynarec/rv64/dynarec_rv64_f30f.c index 58daf454..b4645b41 100644 --- a/src/dynarec/rv64/dynarec_rv64_f30f.c +++ b/src/dynarec/rv64/dynarec_rv64_f30f.c @@ -134,13 +134,13 @@ uintptr_t dynarec64_F30F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGD; GETEXSS(d0, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // // reset all bits } FCVTSxw(gd, d0, RD_RTZ); if (!rex.w) ZEROUP(gd); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); CBZ_NEXT(x5); @@ -156,7 +156,7 @@ uintptr_t dynarec64_F30F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGD; GETEXSS(d0, 0); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // // reset all bits } u8 = sse_setround(dyn, ninst, x5, x6); @@ -164,7 +164,7 @@ uintptr_t dynarec64_F30F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int x87_restoreround(dyn, ninst, u8); if (!rex.w) ZEROUP(gd); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); CBZ_NEXT(x5); @@ -209,12 +209,12 @@ uintptr_t dynarec64_F30F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGXSS(v0); GETEXSS(d0, 0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQS(x3, v0, v0); FEQS(x4, d0, d0); } FADDS(v0, v0, d0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BNEZ_MARK(x3); CBNZ_NEXT(x4); @@ -227,12 +227,12 @@ uintptr_t dynarec64_F30F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGXSS(v0); GETEXSS(d0, 0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQS(x3, v0, v0); FEQS(x4, d0, d0); } FMULS(v0, v0, d0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BNEZ_MARK(x3); CBNZ_NEXT(x4); @@ -264,12 +264,12 @@ uintptr_t dynarec64_F30F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int GETEX(x2, 0, 12); v0 = fpu_get_scratch(dyn); for (int i = 0; i < 4; ++i) { - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FSFLAGSI(0); // reset all bits } FLW(v0, wback, fixedaddress + i * 4); FCVTWS(x3, v0, RD_RTZ); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); // get back FPSR to check the IOC bit ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); BEQZ(x5, 8); @@ -283,12 +283,12 @@ uintptr_t dynarec64_F30F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGXSS(v0); GETEXSS(d0, 0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQS(x3, v0, v0); FEQS(x4, d0, d0); } FSUBS(v0, v0, d0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BNEZ_MARK(x3); CBNZ_NEXT(x4); @@ -316,12 +316,12 @@ uintptr_t dynarec64_F30F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int nextop = F8; GETGXSS(v0); GETEXSS(d0, 0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { FEQS(x3, v0, v0); FEQS(x4, d0, d0); } FDIVS(v0, v0, d0); - if (!box64_dynarec_fastnan) { + if (!BOX64ENV(dynarec_fastnan)) { AND(x3, x3, x4); BNEZ_MARK(x3); CBNZ_NEXT(x4); diff --git a/src/dynarec/rv64/dynarec_rv64_f30f_vector.c b/src/dynarec/rv64/dynarec_rv64_f30f_vector.c index 7cd82dfa..a167c16e 100644 --- a/src/dynarec/rv64/dynarec_rv64_f30f_vector.c +++ b/src/dynarec/rv64/dynarec_rv64_f30f_vector.c @@ -173,10 +173,10 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } else { GETEXSS(d0, 0); } - if (!box64_dynarec_fastround) FSFLAGSI(0); + if (!BOX64ENV(dynarec_fastround)) FSFLAGSI(0); FCVTSxw(gd, d0, RD_RTZ); if (!rex.w) ZEROUP(gd); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); CBZ_NEXT(x5); @@ -199,12 +199,12 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } else { GETEXSS(d0, 0); } - if (!box64_dynarec_fastround) FSFLAGSI(0); + if (!BOX64ENV(dynarec_fastround)) FSFLAGSI(0); u8 = sse_setround(dyn, ninst, x5, x6); FCVTSxw(gd, d0, RD_DYN); x87_restoreround(dyn, ninst, u8); if (!rex.w) ZEROUP(gd); - if (!box64_dynarec_fastround) { + if (!BOX64ENV(dynarec_fastround)) { FRFLAGS(x5); ANDI(x5, x5, (1 << FR_NV) | (1 << FR_OF)); CBZ_NEXT(x5); @@ -277,7 +277,7 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VFRDIV_VF(v0, v1, v1, VECTOR_MASKED); break; case 0x58: - if (!box64_dynarec_fastnan) return 0; + if (!BOX64ENV(dynarec_fastnan)) return 0; INST_NAME("ADDSS Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); @@ -296,7 +296,7 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VFADD_VV(v0, v0, v1, VECTOR_MASKED); break; case 0x59: - if (!box64_dynarec_fastnan) return 0; + if (!BOX64ENV(dynarec_fastnan)) return 0; INST_NAME("MULSS Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); @@ -341,7 +341,7 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } break; case 0x5B: - if (!box64_dynarec_fastround) return 0; + if (!BOX64ENV(dynarec_fastround)) return 0; INST_NAME("CVTTPS2DQ Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); @@ -357,7 +357,7 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } break; case 0x5C: - if (!box64_dynarec_fastnan) return 0; + if (!BOX64ENV(dynarec_fastnan)) return 0; INST_NAME("SUBSS Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); @@ -390,7 +390,7 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VMV_S_X(v1, x4); GETGX_vector(v0, 1, VECTOR_SEW32); } - if (box64_dynarec_fastnan) { + if (BOX64ENV(dynarec_fastnan)) { VECTOR_LOAD_VMASK(0b0001, x4, 1); VFMIN_VV(v0, v0, v1, VECTOR_MASKED); } else { @@ -417,7 +417,7 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i } break; case 0x5E: - if (!box64_dynarec_fastnan) return 0; + if (!BOX64ENV(dynarec_fastnan)) return 0; INST_NAME("DIVSS Gx, Ex"); nextop = F8; SET_ELEMENT_WIDTH(x1, VECTOR_SEW32, 1); @@ -450,7 +450,7 @@ uintptr_t dynarec64_F30F_vector(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t i VMV_S_X(v1, x4); GETGX_vector(v0, 1, VECTOR_SEW32); } - if (box64_dynarec_fastnan) { + if (BOX64ENV(dynarec_fastnan)) { VECTOR_LOAD_VMASK(0b0001, x4, 1); VFMAX_VV(v0, v0, v1, VECTOR_MASKED); } else { diff --git a/src/dynarec/rv64/dynarec_rv64_functions.c b/src/dynarec/rv64/dynarec_rv64_functions.c index 34dbeb47..0b1943d6 100644 --- a/src/dynarec/rv64/dynarec_rv64_functions.c +++ b/src/dynarec/rv64/dynarec_rv64_functions.c @@ -217,7 +217,7 @@ static void extcache_promote_double_combined(dynarec_rv64_t* dyn, int ninst, int } else a = dyn->insts[ninst].e.combined1; int i = extcache_get_st_f_i64_noback(dyn, ninst, a); - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double_combined, ninst=%d combined%c %d i=%d (stack:%d/%d)\n", ninst, (a == dyn->insts[ninst].e.combined2)?'2':'1', a ,i, dyn->insts[ninst].e.stack_push, -dyn->insts[ninst].e.stack_pop); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_combined, ninst=%d combined%c %d i=%d (stack:%d/%d)\n", ninst, (a == dyn->insts[ninst].e.combined2)?'2':'1', a ,i, dyn->insts[ninst].e.stack_push, -dyn->insts[ninst].e.stack_pop); if (i >= 0) { dyn->insts[ninst].e.extcache[i].t = EXT_CACHE_ST_D; if (!dyn->insts[ninst].e.barrier) @@ -236,19 +236,19 @@ static void extcache_promote_double_internal(dynarec_rv64_t* dyn, int ninst, int while (ninst >= 0) { a += dyn->insts[ninst].e.stack_pop; // adjust Stack depth: add pop'd ST (going backward) int i = extcache_get_st_f_i64(dyn, ninst, a); - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d, a=%d st=%d:%d, i=%d\n", ninst, a, dyn->insts[ninst].e.stack, dyn->insts[ninst].e.stack_next, i); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d, a=%d st=%d:%d, i=%d\n", ninst, a, dyn->insts[ninst].e.stack, dyn->insts[ninst].e.stack_next, i); if (i < 0) return; dyn->insts[ninst].e.extcache[i].t = EXT_CACHE_ST_D; // check combined propagation too if (dyn->insts[ninst].e.combined1 || dyn->insts[ninst].e.combined2) { if (dyn->insts[ninst].e.swapped) { - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack); if (a == dyn->insts[ninst].e.combined1) a = dyn->insts[ninst].e.combined2; else if (a == dyn->insts[ninst].e.combined2) a = dyn->insts[ninst].e.combined1; } else { - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack); extcache_promote_double_combined(dyn, ninst, maxinst, a); } } @@ -264,19 +264,19 @@ static void extcache_promote_double_forward(dynarec_rv64_t* dyn, int ninst, int while ((ninst != -1) && (ninst < maxinst) && (a >= 0)) { a += dyn->insts[ninst].e.stack_push; // // adjust Stack depth: add push'd ST (going forward) if ((dyn->insts[ninst].e.combined1 || dyn->insts[ninst].e.combined2) && dyn->insts[ninst].e.swapped) { - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack); if (a == dyn->insts[ninst].e.combined1) a = dyn->insts[ninst].e.combined2; else if (a == dyn->insts[ninst].e.combined2) a = dyn->insts[ninst].e.combined1; } int i = extcache_get_st_f_i64_noback(dyn, ninst, a); - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d, a=%d st=%d:%d(%d/%d), i=%d\n", ninst, a, dyn->insts[ninst].e.stack, dyn->insts[ninst].e.stack_next, dyn->insts[ninst].e.stack_push, -dyn->insts[ninst].e.stack_pop, i); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d, a=%d st=%d:%d(%d/%d), i=%d\n", ninst, a, dyn->insts[ninst].e.stack, dyn->insts[ninst].e.stack_next, dyn->insts[ninst].e.stack_push, -dyn->insts[ninst].e.stack_pop, i); if (i < 0) return; dyn->insts[ninst].e.extcache[i].t = EXT_CACHE_ST_D; // check combined propagation too if ((dyn->insts[ninst].e.combined1 || dyn->insts[ninst].e.combined2) && !dyn->insts[ninst].e.swapped) { - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack); extcache_promote_double_combined(dyn, ninst, maxinst, a); } a -= dyn->insts[ninst].e.stack_pop; // adjust Stack depth: remove pop'd ST (going forward) @@ -292,20 +292,20 @@ static void extcache_promote_double_forward(dynarec_rv64_t* dyn, int ninst, int void extcache_promote_double(dynarec_rv64_t* dyn, int ninst, int a) { int i = extcache_get_current_st_f_i64(dyn, a); - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d a=%d st=%d i=%d\n", ninst, a, dyn->e.stack, i); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d a=%d st=%d i=%d\n", ninst, a, dyn->e.stack, i); if (i < 0) return; dyn->e.extcache[i].t = EXT_CACHE_ST_D; dyn->insts[ninst].e.extcache[i].t = EXT_CACHE_ST_D; // check combined propagation too if (dyn->e.combined1 || dyn->e.combined2) { if (dyn->e.swapped) { - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d swapped! %d/%d vs %d\n", ninst, dyn->e.combined1 ,dyn->e.combined2, a); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d swapped! %d/%d vs %d\n", ninst, dyn->e.combined1 ,dyn->e.combined2, a); if (dyn->e.combined1 == a) a = dyn->e.combined2; else if (dyn->e.combined2 == a) a = dyn->e.combined1; } else { - // if(box64_dynarec_dump) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d combined! %d/%d vs %d\n", ninst, dyn->e.combined1 ,dyn->e.combined2, a); + // if(BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d combined! %d/%d vs %d\n", ninst, dyn->e.combined1 ,dyn->e.combined2, a); if (dyn->e.combined1 == a) extcache_promote_double(dyn, ninst, dyn->e.combined2); else if (dyn->e.combined2 == a) @@ -698,7 +698,7 @@ static register_mapping_t register_mappings[] = { void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t rex) { - if (!box64_dynarec_dump && !box64_dynarec_gdbjit && !box64_dynarec_perf_map) return; + if (!BOX64ENV(dynarec_dump) && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return; static char buf[512]; int length = sprintf(buf, "barrier=%d state=%d/%d(%d), %s=%X/%X, use=%X, need=%X/%X, fuse=%d, sm=%d(%d/%d), sew@entry=%d, sew@exit=%d", @@ -751,15 +751,15 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r if (dyn->insts[ninst].e.combined1 || dyn->insts[ninst].e.combined2) length += sprintf(buf + length, " %s:%d/%d", dyn->insts[ninst].e.swapped ? "SWP" : "CMB", dyn->insts[ninst].e.combined1, dyn->insts[ninst].e.combined2); - if (box64_dynarec_dump) { + if (BOX64ENV(dynarec_dump)) { printf_x64_instruction(rex.is32bits ? my_context->dec32 : my_context->dec, &dyn->insts[ninst].x64, name); dynarec_log(LOG_NONE, "%s%p: %d emitted opcodes, inst=%d, %s%s\n", - (box64_dynarec_dump > 1) ? "\e[32m" : "", - (void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (box64_dynarec_dump > 1) ? "\e[m" : ""); + (BOX64ENV(dynarec_dump) > 1) ? "\e[32m" : "", + (void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (BOX64ENV(dynarec_dump) > 1) ? "\e[m" : ""); } - if (box64_dynarec_gdbjit) { + if (BOX64ENV(dynarec_gdbjit)) { static char buf2[512]; - if (box64_dynarec_gdbjit > 1) { + if (BOX64ENV(dynarec_gdbjit) > 1) { sprintf(buf2, "; %d: %d opcodes, %s", ninst, dyn->insts[ninst].size / 4, buf); dyn->gdbjit_block = GdbJITBlockAddLine(dyn->gdbjit_block, (dyn->native_start + dyn->insts[ninst].address), buf2); } @@ -772,7 +772,7 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r } dyn->gdbjit_block = GdbJITBlockAddLine(dyn->gdbjit_block, (dyn->native_start + dyn->insts[ninst].address), inst_name); } - if (box64_dynarec_perf_map && box64_dynarec_perf_map_fd != -1) { + if (BOX64ENV(dynarec_perf_map) && BOX64ENV(dynarec_perf_map_fd) != -1) { writePerfMap(dyn->insts[ninst].x64.addr, dyn->native_start + dyn->insts[ninst].address, dyn->insts[ninst].size / 4, name); } } @@ -785,9 +785,9 @@ void print_opcode(dynarec_native_t* dyn, int ninst, uint32_t opcode) void print_newinst(dynarec_native_t* dyn, int ninst) { dynarec_log(LOG_NONE, "%sNew instruction %d, native=%p (0x%x)%s\n", - (box64_dynarec_dump > 1) ? "\e[4;32m" : "", + (BOX64ENV(dynarec_dump) > 1) ? "\e[4;32m" : "", ninst, dyn->block, dyn->native_size, - (box64_dynarec_dump > 1) ? "\e[m" : ""); + (BOX64ENV(dynarec_dump) > 1) ? "\e[m" : ""); } // x87 stuffs @@ -850,7 +850,7 @@ int fpu_is_st_freed(dynarec_rv64_t* dyn, int ninst, int st) void updateNativeFlags(dynarec_rv64_t* dyn) { - if (!box64_dynarec_nativeflags) + if (!BOX64ENV(dynarec_nativeflags)) return; for (int i = 1; i < dyn->size; ++i) if (dyn->insts[i].nat_flags_fusion) { diff --git a/src/dynarec/rv64/dynarec_rv64_helper.c b/src/dynarec/rv64/dynarec_rv64_helper.c index af44f12b..fd88271f 100644 --- a/src/dynarec/rv64/dynarec_rv64_helper.c +++ b/src/dynarec/rv64/dynarec_rv64_helper.c @@ -611,7 +611,7 @@ void ret_to_epilog(dynarec_rv64_t* dyn, int ninst, rex_t rex) POP1z(xRIP); MVz(x1, xRIP); SMEND(); - if (box64_dynarec_callret) { + if (BOX64ENV(dynarec_callret)) { // pop the actual return address from RV64 stack LD(xRA, xSP, 0); // native addr LD(x6, xSP, 8); // x86 addr @@ -680,7 +680,7 @@ void retn_to_epilog(dynarec_rv64_t* dyn, int ninst, rex_t rex, int n) } MVz(x1, xRIP); SMEND(); - if (box64_dynarec_callret) { + if (BOX64ENV(dynarec_callret)) { // pop the actual return address from RV64 stack LD(xRA, xSP, 0); // native addr LD(x6, xSP, 8); // x86 addr @@ -2891,10 +2891,10 @@ void fpu_reset_cache(dynarec_rv64_t* dyn, int ninst, int reset_n) #endif extcacheUnwind(&dyn->e); #if STEP == 0 - if (box64_dynarec_dump) dynarec_log(LOG_NONE, "New x87stack=%d\n", dyn->e.x87stack); + if (BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, "New x87stack=%d\n", dyn->e.x87stack); #endif #if defined(HAVE_TRACE) && (STEP > 2) - if (box64_dynarec_dump) + if (BOX64ENV(dynarec_dump)) if (memcmp(&dyn->e, &dyn->insts[reset_n].e, sizeof(ext_cache_t))) { MESSAGE(LOG_DEBUG, "Warning, difference in extcache: reset="); for (int i = 0; i < 24; ++i) diff --git a/src/dynarec/rv64/dynarec_rv64_helper.h b/src/dynarec/rv64/dynarec_rv64_helper.h index 40ef0717..332009d0 100644 --- a/src/dynarec/rv64/dynarec_rv64_helper.h +++ b/src/dynarec/rv64/dynarec_rv64_helper.h @@ -963,7 +963,7 @@ #else #define X87_PUSH_OR_FAIL(var, dyn, ninst, scratch, t) \ if ((dyn->e.x87stack == 8) || (dyn->e.pushed == 8)) { \ - if (box64_dynarec_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.pushed, ninst); \ + if (BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.pushed, ninst); \ dyn->abort = 1; \ return addr; \ } \ @@ -971,7 +971,7 @@ #define X87_PUSH_EMPTY_OR_FAIL(dyn, ninst, scratch) \ if ((dyn->e.x87stack == 8) || (dyn->e.pushed == 8)) { \ - if (box64_dynarec_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.pushed, ninst); \ + if (BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.pushed, ninst); \ dyn->abort = 1; \ return addr; \ } \ @@ -979,7 +979,7 @@ #define X87_POP_OR_FAIL(dyn, ninst, scratch) \ if ((dyn->e.x87stack == -8) || (dyn->e.poped == 8)) { \ - if (box64_dynarec_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Pop, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.poped, ninst); \ + if (BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Pop, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.poped, ninst); \ dyn->abort = 1; \ return addr; \ } \ @@ -1773,16 +1773,16 @@ uintptr_t dynarec64_AVX_F3_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, B##COND(dyn->insts[ninst].nat_flags_op1, dyn->insts[ninst].nat_flags_op2, val); #define NOTEST(s1) \ - if (box64_dynarec_test) { \ + if (BOX64ENV(dynarec_test)) { \ SW(xZR, xEmu, offsetof(x64emu_t, test.test)); \ SW(xZR, xEmu, offsetof(x64emu_t, test.clean)); \ } #define SKIPTEST(s1) \ - if (box64_dynarec_test) { \ + if (BOX64ENV(dynarec_test)) { \ SW(xZR, xEmu, offsetof(x64emu_t, test.clean)); \ } #define GOTEST(s1, s2) \ - if (box64_dynarec_test) { \ + if (BOX64ENV(dynarec_test)) { \ MOV32w(s2, 1); \ SW(s2, xEmu, offsetof(x64emu_t, test.test)); \ } diff --git a/src/dynarec/rv64/dynarec_rv64_pass0.h b/src/dynarec/rv64/dynarec_rv64_pass0.h index 10dfeb0e..53630a89 100644 --- a/src/dynarec/rv64/dynarec_rv64_pass0.h +++ b/src/dynarec/rv64/dynarec_rv64_pass0.h @@ -15,7 +15,7 @@ dyn->f.pending = SF_SET #define READFLAGS_FUSION(A, s1, s2, s3, s4, s5) \ - if (box64_dynarec_nativeflags && ninst > 0 && !dyn->insts[ninst - 1].nat_flags_nofusion) { \ + if (BOX64ENV(dynarec_nativeflags) && ninst > 0 && !dyn->insts[ninst - 1].nat_flags_nofusion) { \ if ((A) == (X_ZF)) \ dyn->insts[ninst].nat_flags_fusion = 1; \ else if (dyn->insts[ninst - 1].nat_flags_carry && ((A) == (X_CF) || (A) == (X_CF | X_ZF))) \ @@ -73,7 +73,7 @@ #define DEFAULT \ --dyn->size; \ *ok = -1; \ - if (box64_dynarec_log >= LOG_INFO || box64_dynarec_dump || box64_dynarec_missing == 1) { \ + if (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_missing) == 1) { \ dynarec_log(LOG_NONE, "%p: Dynarec stopped because of %sOpcode %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X", \ (void*)ip, rex.is32bits ? "32bits " : "", \ PKip(0), \ diff --git a/src/dynarec/rv64/dynarec_rv64_pass3.h b/src/dynarec/rv64/dynarec_rv64_pass3.h index d8a270f6..781c69e9 100644 --- a/src/dynarec/rv64/dynarec_rv64_pass3.h +++ b/src/dynarec/rv64/dynarec_rv64_pass3.h @@ -5,7 +5,7 @@ addInst(dyn->instsize, &dyn->insts_size, 0, 0); #define EMIT(A) \ do { \ - if (box64_dynarec_dump) print_opcode(dyn, ninst, (uint32_t)A); \ + if (BOX64ENV(dynarec_dump)) print_opcode(dyn, ninst, (uint32_t)A); \ *(uint32_t*)(dyn->block) = (uint32_t)(A); \ dyn->block += 4; \ dyn->native_size += 4; \ @@ -13,13 +13,13 @@ } while (0) #define MESSAGE(A, ...) \ - if (box64_dynarec_dump) dynarec_log(LOG_NONE, __VA_ARGS__) + if (BOX64ENV(dynarec_dump)) dynarec_log(LOG_NONE, __VA_ARGS__) #define NEW_INST \ dyn->vector_sew = dyn->insts[ninst].vector_sew_entry; \ dyn->inst_sew = dyn->vector_sew; \ dyn->inst_vlmul = VECTOR_LMUL1; \ dyn->inst_vl = 0; \ - if (box64_dynarec_dump) print_newinst(dyn, ninst); \ + if (BOX64ENV(dynarec_dump)) print_newinst(dyn, ninst); \ if (ninst) { \ addInst(dyn->instsize, &dyn->insts_size, dyn->insts[ninst - 1].x64.size, dyn->insts[ninst - 1].size / 4); \ dyn->insts[ninst].ymm0_pass3 = dyn->ymm_zero; \ @@ -44,7 +44,7 @@ } #define DEFAULT_VECTOR \ - if (box64_dynarec_log >= LOG_INFO || box64_dynarec_dump || box64_dynarec_missing == 2) { \ + if (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(dynarec_missing) == 2) { \ dynarec_log(LOG_NONE, "%p: Dynarec fallback to scalar version because of %sOpcode" \ " %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X", \ (void*)ip, rex.is32bits ? "x86 " : "x64 ", \ diff --git a/src/elfs/elfload_dump.c b/src/elfs/elfload_dump.c index 2ec0d603..fbd24eaa 100644 --- a/src/elfs/elfload_dump.c +++ b/src/elfs/elfload_dump.c @@ -256,7 +256,7 @@ void DumpMainHeader32(Elf32_Ehdr *header, elfheader_t *h) { } #endif void DumpMainHeader64(Elf64_Ehdr *header, elfheader_t *h) { - if(box64_dump) { + if(BOX64ENV(dump)) { printf_dump(LOG_NEVER, "ELF Dump main header\n"); printf_dump(LOG_NEVER, " Entry point = %p\n", (void*)header->e_entry); printf_dump(LOG_NEVER, " Program Header table offset = %p\n", (void*)header->e_phoff); @@ -284,11 +284,11 @@ void DumpSymTab32(elfheader_t *h) { } #endif void DumpSymTab64(elfheader_t *h) { - if(box64_dump && h->SymTab._64) { + if (BOX64ENV(dump) && h->SymTab._64) { const char* name = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump SymTab(%zu)=\n", h->numSymTab); for (size_t i=0; i<h->numSymTab; ++i) - printf_dump(LOG_NEVER, " %s:SymTab[%zu] = \"%s\", value=%p, size=%ld, info/other=%d/%d index=%d\n", name, + printf_dump(LOG_NEVER, " %s:SymTab[%zu] = \"%s\", value=%p, size=%ld, info/other=%d/%d index=%d\n", name, i, h->StrTab+h->SymTab._64[i].st_name, (void*)h->SymTab._64[i].st_value, h->SymTab._64[i].st_size, h->SymTab._64[i].st_info, h->SymTab._64[i].st_other, h->SymTab._64[i].st_shndx); printf_dump(LOG_NEVER, "ELF Dump SymTab=====\n"); @@ -300,7 +300,7 @@ void DumpDynamicSections32(elfheader_t *h) { } #endif void DumpDynamicSections64(elfheader_t *h) { - if(box64_dump && h->Dynamic._64) { + if (BOX64ENV(dump) && h->Dynamic._64) { printf_dump(LOG_NEVER, "ELF Dump Dynamic(%zu)=\n", h->numDynamic); for (size_t i=0; i<h->numDynamic; ++i) printf_dump(LOG_NEVER, " Dynamic %04zu : %s\n", i, DumpDynamic(h->Dynamic._64+i)); @@ -313,7 +313,7 @@ void DumpDynSym32(elfheader_t *h) { } #endif void DumpDynSym64(elfheader_t *h) { - if(box64_dump && h->DynSym._64) { + if (BOX64ENV(dump) && h->DynSym._64) { const char* name = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump DynSym(%zu)=\n", h->numDynSym); for (size_t i=0; i<h->numDynSym; ++i) { @@ -334,40 +334,38 @@ void DumpDynamicNeeded(elfheader_t *h) { if(box64_is32bits) DumpDynamicNeeded32(h); - else - if(box64_dump && h->DynStrTab) { - printf_dump(LOG_NEVER, "ELF Dump DT_NEEDED=====\n"); - for (size_t i=0; i<h->numDynamic; ++i) - if(h->Dynamic._64[i].d_tag==DT_NEEDED) { - printf_dump(LOG_NEVER, " Needed : %s\n", h->DynStrTab+h->Dynamic._64[i].d_un.d_val + h->delta); - } - printf_dump(LOG_NEVER, "ELF Dump DT_NEEDED=====\n"); - } + else if (BOX64ENV(dump) && h->DynStrTab) { + printf_dump(LOG_NEVER, "ELF Dump DT_NEEDED=====\n"); + for (size_t i=0; i<h->numDynamic; ++i) + if(h->Dynamic._64[i].d_tag==DT_NEEDED) { + printf_dump(LOG_NEVER, " Needed : %s\n", h->DynStrTab+h->Dynamic._64[i].d_un.d_val + h->delta); + } + printf_dump(LOG_NEVER, "ELF Dump DT_NEEDED=====\n"); + } } -void DumpDynamicRPath32(elfheader_t *h) +void DumpDynamicRPath32(elfheader_t *h) #ifndef BOX32 -{ } + {} #else - ; + ; #endif void DumpDynamicRPath(elfheader_t *h) { if(box64_is32bits) DumpDynamicRPath32(h); - else - if(box64_dump && h->DynStrTab) { - printf_dump(LOG_NEVER, "ELF Dump DT_RPATH/DT_RUNPATH=====\n"); - for (size_t i=0; i<h->numDynamic; ++i) { - if(h->Dynamic._64[i].d_tag==DT_RPATH) { - printf_dump(LOG_NEVER, " RPATH : %s\n", h->DynStrTab+h->Dynamic._64[i].d_un.d_val + h->delta); - } - if(h->Dynamic._64[i].d_tag==DT_RUNPATH) { - printf_dump(LOG_NEVER, " RUNPATH : %s\n", h->DynStrTab+h->Dynamic._64[i].d_un.d_val + h->delta); - } + else if (BOX64ENV(dump) && h->DynStrTab) { + printf_dump(LOG_NEVER, "ELF Dump DT_RPATH/DT_RUNPATH=====\n"); + for (size_t i=0; i<h->numDynamic; ++i) { + if(h->Dynamic._64[i].d_tag==DT_RPATH) { + printf_dump(LOG_NEVER, " RPATH : %s\n", h->DynStrTab+h->Dynamic._64[i].d_un.d_val + h->delta); + } + if(h->Dynamic._64[i].d_tag==DT_RUNPATH) { + printf_dump(LOG_NEVER, " RUNPATH : %s\n", h->DynStrTab+h->Dynamic._64[i].d_un.d_val + h->delta); } - printf_dump(LOG_NEVER, "=====ELF Dump DT_RPATH/DT_RUNPATH\n"); } + printf_dump(LOG_NEVER, "=====ELF Dump DT_RPATH/DT_RUNPATH\n"); + } } #ifndef BOX32 @@ -375,7 +373,7 @@ void DumpRelTable32(elfheader_t *h, int cnt, Elf32_Rel *rel, const char* name) { #endif void DumpRelTable64(elfheader_t *h, int cnt, Elf64_Rel *rel, const char* name) { - if(box64_dump) { + if (BOX64ENV(dump)) { const char* elfname = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump %s Table(%d) @%p\n", name, cnt, rel); for (int i = 0; i<cnt; ++i) @@ -390,7 +388,7 @@ void DumpRelATable32(elfheader_t *h, int cnt, Elf32_Rela *rela, const char* name #endif void DumpRelATable64(elfheader_t *h, int cnt, Elf64_Rela *rela, const char* name) { - if(box64_dump && h->rela) { + if (BOX64ENV(dump) && h->rela) { const char* elfname = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump %s Table(%d) @%p\n", name, cnt, rela); for (int i = 0; i<cnt; ++i) @@ -407,7 +405,7 @@ void DumpRelRTable32(elfheader_t *h, int cnt, Elf32_Relr *relr, const char *name #endif void DumpRelRTable64(elfheader_t *h, int cnt, Elf64_Relr *relr, const char *name) { - if(box64_dump && h->relr) { + if (BOX64ENV(dump) && h->relr) { const char* elfname = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump %s Table(%d) @%p\n", name, cnt, relr); for (int i = 0; i<cnt; ++i) diff --git a/src/elfs/elfload_dump32.c b/src/elfs/elfload_dump32.c index b6d8df41..43b6b2a2 100755 --- a/src/elfs/elfload_dump32.c +++ b/src/elfs/elfload_dump32.c @@ -223,7 +223,7 @@ static const char* IdxSymName(elfheader_t *h, int sym) void DumpMainHeader32(Elf32_Ehdr *header, elfheader_t *h) { - if(box64_dump) { + if (BOX64ENV(dump)) { printf_dump(LOG_NEVER, "ELF Dump main header\n"); printf_dump(LOG_NEVER, " Entry point = %p\n", from_ptrv(header->e_entry)); printf_dump(LOG_NEVER, " Program Header table offset = %p\n", from_ptrv(header->e_phoff)); @@ -249,7 +249,7 @@ void DumpMainHeader32(Elf32_Ehdr *header, elfheader_t *h) void DumpSymTab32(elfheader_t *h) { - if(box64_dump && h->SymTab._32) { + if (BOX64ENV(dump) && h->SymTab._32) { const char* name = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump SymTab(%d)=\n", h->numSymTab); for (int i=0; i<h->numSymTab; ++i) @@ -262,7 +262,7 @@ void DumpSymTab32(elfheader_t *h) void DumpDynamicSections32(elfheader_t *h) { - if(box64_dump && h->Dynamic._32) { + if (BOX64ENV(dump) && h->Dynamic._32) { printf_dump(LOG_NEVER, "ELF Dump Dynamic(%d)=\n", h->numDynamic); for (int i=0; i<h->numDynamic; ++i) printf_dump(LOG_NEVER, " Dynamic %04d : %s\n", i, DumpDynamic(h->Dynamic._32+i)); @@ -272,7 +272,7 @@ void DumpDynamicSections32(elfheader_t *h) void DumpDynSym32(elfheader_t *h) { - if(box64_dump && h->DynSym._32) { + if (BOX64ENV(dump) && h->DynSym._32) { const char* name = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump DynSym(%d)=\n", h->numDynSym); for (int i=0; i<h->numDynSym; ++i) { @@ -285,7 +285,7 @@ void DumpDynSym32(elfheader_t *h) void DumpDynamicNeeded32(elfheader_t *h) { - if(box64_dump && h->DynStrTab) { + if (BOX64ENV(dump) && h->DynStrTab) { printf_dump(LOG_NEVER, "ELF Dump DT_NEEDED=====\n"); for (int i=0; i<h->numDynamic; ++i) if(h->Dynamic._32[i].d_tag==DT_NEEDED) { @@ -297,7 +297,7 @@ void DumpDynamicNeeded32(elfheader_t *h) void DumpDynamicRPath32(elfheader_t *h) { - if(box64_dump && h->DynStrTab) { + if (BOX64ENV(dump) && h->DynStrTab) { printf_dump(LOG_NEVER, "ELF Dump DT_RPATH/DT_RUNPATH=====\n"); for (int i=0; i<h->numDynamic; ++i) { if(h->Dynamic._32[i].d_tag==DT_RPATH) { @@ -313,7 +313,7 @@ void DumpDynamicRPath32(elfheader_t *h) void DumpRelTable32(elfheader_t *h, int cnt, Elf32_Rel *rel, const char* name) { - if(box64_dump) { + if (BOX64ENV(dump)) { const char* elfname = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump %s Table(%d) @%p\n", name, cnt, rel); for (int i = 0; i<cnt; ++i) @@ -326,7 +326,7 @@ void DumpRelTable32(elfheader_t *h, int cnt, Elf32_Rel *rel, const char* name) void DumpRelATable32(elfheader_t *h, int cnt, Elf32_Rela *rela, const char* name) { - if(box64_dump && h->rela) { + if (BOX64ENV(dump) && h->rela) { const char* elfname = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump %s Table(%d) @%p\n", name, cnt, rela); for (int i = 0; i<cnt; ++i) @@ -340,7 +340,7 @@ void DumpRelATable32(elfheader_t *h, int cnt, Elf32_Rela *rela, const char* name void DumpRelRTable32(elfheader_t *h, int cnt, Elf32_Relr *relr, const char* name) { - if(box64_dump && h->relr) { + if (BOX64ENV(dump) && h->relr) { const char* elfname = ElfName(h); printf_dump(LOG_NEVER, "ELF Dump %s Table(%d) @%p\n", name, cnt, relr); for (int i = 0; i<cnt; ++i) diff --git a/src/elfs/elfloader.c b/src/elfs/elfloader.c index 9d54ac10..3a979ed5 100644 --- a/src/elfs/elfloader.c +++ b/src/elfs/elfloader.c @@ -214,7 +214,7 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin) return AllocLoadElfMemory32(context, head, mainbin); uintptr_t offs = 0; loadProtectionFromMap(); - int log_level = box64_load_addr?LOG_INFO:LOG_DEBUG; + int log_level = BOX64ENV(load_addr)?LOG_INFO:LOG_DEBUG; head->multiblock_n = 0; // count PHEntrie with LOAD uintptr_t max_align = head->align-1; @@ -223,10 +223,10 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin) ++head->multiblock_n; } - if(!head->vaddr && box64_load_addr) { - offs = (uintptr_t)find47bitBlockNearHint((void*)((box64_load_addr+max_align)&~max_align), head->memsz+head->align, max_align); - box64_load_addr = offs + head->memsz; - box64_load_addr = (box64_load_addr+0x10ffffffLL)&~0xffffffLL; + if(!head->vaddr && BOX64ENV(load_addr)) { + offs = (uintptr_t)find47bitBlockNearHint((void*)((BOX64ENV(load_addr)+max_align)&~max_align), head->memsz+head->align, max_align); + BOX64ENV(load_addr) = offs + head->memsz; + BOX64ENV(load_addr) = (BOX64ENV(load_addr)+0x10ffffffLL)&~0xffffffLL; } if(!offs && !head->vaddr) offs = (uintptr_t)find47bitBlockElf(head->memsz+head->align, mainbin, max_align); // limit to 47bits... @@ -393,7 +393,7 @@ int AllocLoadElfMemory(box64context_t* context, elfheader_t* head, int mainbin) mprotect((void*)paddr, asize, prot); } #ifdef DYNAREC - if(box64_dynarec && (e->p_flags & PF_X)) { + if(BOX64ENV(dynarec) && (e->p_flags & PF_X)) { dynarec_log(LOG_DEBUG, "Add ELF eXecutable Memory %p:%p\n", head->multiblocks[n].p, (void*)head->multiblocks[n].asize); addDBFromAddressRange((uintptr_t)head->multiblocks[n].p, head->multiblocks[n].asize); } @@ -433,7 +433,7 @@ void FreeElfMemory(elfheader_t* head) #ifdef DYNAREC for(int i=0; i<head->multiblock_n; ++i) { dynarec_log(LOG_INFO, "Free DynaBlocks %p-%p for %s\n", head->multiblocks[i].p, head->multiblocks[i].p+head->multiblocks[i].asize, head->path); - if(box64_dynarec) + if(BOX64ENV(dynarec)) cleanDBFromAddressRange((uintptr_t)head->multiblocks[i].p, head->multiblocks[i].asize, 1); freeProtection((uintptr_t)head->multiblocks[i].p, head->multiblocks[i].asize); } @@ -948,7 +948,7 @@ uintptr_t GetEntryPoint(lib_t* maplib, elfheader_t* h) (void)maplib; uintptr_t ep = h->entrypoint + h->delta; printf_log(LOG_DEBUG, "Entry Point is %p\n", (void*)ep); - if(box64_dump) { + if (BOX64ENV(dump)) { printf_dump(LOG_NEVER, "(short) Dump of Entry point\n"); int sz = 64; uintptr_t lastbyte = GetLastByte(h); @@ -980,10 +980,10 @@ void AddSymbols(lib_t *maplib, elfheader_t* h) if(box64_is32bits) { AddSymbols32(maplib, h); } else { - //if(box64_dump && h->hash) old_elf_hash_dump(h); - //if(box64_dump && h->gnu_hash) new_elf_hash_dump(h); - if(box64_dump && h->DynSym._64) DumpDynSym64(h); - if(h==my_context->elfs[0]) + // if(BOX64ENV(dump) && h->hash) old_elf_hash_dump(h); + // if(BOX64ENV(dump) && h->gnu_hash) new_elf_hash_dump(h); + if (BOX64ENV(dump) && h->DynSym._64) DumpDynSym64(h); + if (h==my_context->elfs[0]) GrabX64CopyMainElfReloc(h); } #ifndef STATICBUILD @@ -1102,7 +1102,7 @@ int LoadNeededLibs(elfheader_t* h, lib_t *maplib, int local, int bindnow, int de // TODO: Add LD_LIBRARY_PATH and RPATH handling if(AddNeededLib(maplib, local, bindnow, deepbind, h->needed, h, box64, emu)) { printf_log(LOG_INFO, "Error loading one of needed lib\n"); - if(!allow_missing_libs) + if(!BOX64ENV(allow_missing_libs)) return 1; //error... } return 0; @@ -1460,7 +1460,7 @@ dynablock_t* GetDynablocksFromAddress(box64context_t *context, uintptr_t addr) if(ret) { return ret; }*/ - if(box64_dynarec_forced) { + if(BOX64ENV(dynarec_forced)) { addDBFromAddressRange(addr, 1); return getDB(addr); } diff --git a/src/elfs/elfloader32.c b/src/elfs/elfloader32.c index 2fbef7d5..513eec4d 100644 --- a/src/elfs/elfloader32.c +++ b/src/elfs/elfloader32.c @@ -118,9 +118,9 @@ static void GrabX32CopyMainElfReloc(elfheader_t* head) void checkHookedSymbols(elfheader_t* h); void AddSymbols32(lib_t *maplib, elfheader_t* h) { - //if(box64_dump && h->hash) old_elf_hash_dump(h); - //if(box64_dump && h->gnu_hash) new_elf_hash_dump(h); - if(box64_dump && h->DynSym._32) DumpDynSym32(h); + // if(BOX64ENV(dump) && h->hash) old_elf_hash_dump(h); + // if(BOX64ENV(dump) && h->gnu_hash) new_elf_hash_dump(h); + if (BOX64ENV(dump) && h->DynSym._32) DumpDynSym32(h); if(h==my_context->elfs[0]) GrabX32CopyMainElfReloc(h); #ifndef STATICBUILD @@ -132,7 +132,7 @@ int AllocLoadElfMemory32(box64context_t* context, elfheader_t* head, int mainbin { ptr_t offs = 0; loadProtectionFromMap(); - int log_level = box64_load_addr?LOG_INFO:LOG_DEBUG; + int log_level = BOX64ENV(load_addr)?LOG_INFO:LOG_DEBUG; head->multiblock_n = 0; // count PHEntrie with LOAD uintptr_t max_align = (box64_pagesize-1); @@ -141,10 +141,10 @@ int AllocLoadElfMemory32(box64context_t* context, elfheader_t* head, int mainbin ++head->multiblock_n; } - if(!head->vaddr && box64_load_addr) { - offs = to_ptrv(find31bitBlockNearHint((void*)box64_load_addr, head->memsz, max_align)); - box64_load_addr = offs + head->memsz; - box64_load_addr = (box64_load_addr+0x10ffffff)&~0xffffff; + if(!head->vaddr && BOX64ENV(load_addr)) { + offs = to_ptrv(find31bitBlockNearHint((void*)BOX64ENV(load_addr), head->memsz, max_align)); + BOX64ENV(load_addr) = offs + head->memsz; + BOX64ENV(load_addr) = (BOX64ENV(load_addr)+0x10ffffff)&~0xffffff; } if(!offs && !head->vaddr) offs = (uintptr_t)find31bitBlockElf(head->memsz, mainbin, max_align); @@ -311,7 +311,7 @@ int AllocLoadElfMemory32(box64context_t* context, elfheader_t* head, int mainbin mprotect((void*)paddr, asize, prot); } #ifdef DYNAREC - if(box64_dynarec && (e->p_flags & PF_X)) { + if(BOX64ENV(dynarec) && (e->p_flags & PF_X)) { dynarec_log(LOG_DEBUG, "Add ELF eXecutable Memory %p:%p\n", head->multiblocks[n].p, (void*)head->multiblocks[n].asize); addDBFromAddressRange((uintptr_t)head->multiblocks[n].p, head->multiblocks[n].asize); } diff --git a/src/elfs/elfparser.c b/src/elfs/elfparser.c index 618b55d0..7f88a8a6 100644 --- a/src/elfs/elfparser.c +++ b/src/elfs/elfparser.c @@ -188,14 +188,14 @@ elfheader_t* ParseElfHeader64(FILE* f, const char* name, int exec) FreeElfHeader(&h); return NULL; } - if(box64_dump) DumpMainHeader64(&header, h); + if (BOX64ENV(dump)) DumpMainHeader64(&header, h); LoadNamedSection(f, h->SHEntries._64, h->numSHEntries, h->SHStrTab, ".strtab", "SymTab Strings", SHT_STRTAB, (void**)&h->StrTab, NULL); LoadNamedSection(f, h->SHEntries._64, h->numSHEntries, h->SHStrTab, ".symtab", "SymTab", SHT_SYMTAB, (void**)&h->SymTab._64, &h->numSymTab); - if(box64_dump && h->SymTab._64) DumpSymTab64(h); + if (BOX64ENV(dump) && h->SymTab._64) DumpSymTab64(h); LoadNamedSection(f, h->SHEntries._64, h->numSHEntries, h->SHStrTab, ".dynamic", "Dynamic", SHT_DYNAMIC, (void**)&h->Dynamic._64, &h->numDynamic); - if(box64_dump && h->Dynamic._64) DumpDynamicSections64(h); + if (BOX64ENV(dump) && h->Dynamic._64) DumpDynamicSections64(h); // grab DT_REL & DT_RELA stuffs // also grab the DT_STRTAB string table { @@ -393,7 +393,7 @@ elfheader_t* ParseElfHeader64(FILE* f, const char* name, int exec) LoadNamedSection(f, h->SHEntries._64, h->numSHEntries, h->SHStrTab, ".dynstr", "DynSym Strings", SHT_STRTAB, (void**)&h->DynStr, NULL); LoadNamedSection(f, h->SHEntries._64, h->numSHEntries, h->SHStrTab, ".dynsym", "DynSym", SHT_DYNSYM, (void**)&h->DynSym, &h->numDynSym); } - + return h; } diff --git a/src/elfs/elfparser32.c b/src/elfs/elfparser32.c index c12ab40d..f8a0dca0 100755 --- a/src/elfs/elfparser32.c +++ b/src/elfs/elfparser32.c @@ -178,14 +178,14 @@ elfheader_t* ParseElfHeader32(FILE* f, const char* name, int exec) FreeElfHeader(&h); return NULL; } - if(box64_dump) DumpMainHeader32(&header, h); + if (BOX64ENV(dump)) DumpMainHeader32(&header, h); LoadNamedSection(f, h->SHEntries._32, h->numSHEntries, h->SHStrTab, ".strtab", "SymTab Strings", SHT_STRTAB, (void**)&h->StrTab, NULL); LoadNamedSection(f, h->SHEntries._32, h->numSHEntries, h->SHStrTab, ".symtab", "SymTab", SHT_SYMTAB, (void**)&h->SymTab._32, &h->numSymTab); - if(box64_dump && h->SymTab._32) DumpSymTab32(h); + if (BOX64ENV(dump) && h->SymTab._32) DumpSymTab32(h); LoadNamedSection(f, h->SHEntries._32, h->numSHEntries, h->SHStrTab, ".dynamic", "Dynamic", SHT_DYNAMIC, (void**)&h->Dynamic._32, &h->numDynamic); - if(box64_dump && h->Dynamic._32) DumpDynamicSections32(h); + if (BOX64ENV(dump) && h->Dynamic._32) DumpDynamicSections32(h); // grab DT_REL & DT_RELA stuffs // also grab the DT_STRTAB string table { diff --git a/src/emu/x64emu.c b/src/emu/x64emu.c index e35f2608..daaf3a3a 100644 --- a/src/emu/x64emu.c +++ b/src/emu/x64emu.c @@ -415,7 +415,7 @@ const char* DumpCPURegs(x64emu_t* emu, uintptr_t ip, int is32bits) char tmp[160]; buff[0] = '\0'; #ifdef HAVE_TRACE - if(trace_emm) { + if(BOX64ENV(trace_emm)) { // do emm reg if needed for(int i=0; i<8; ++i) { sprintf(tmp, "mm%d:%016lx", i, emu->mmx[i].q); @@ -423,10 +423,10 @@ const char* DumpCPURegs(x64emu_t* emu, uintptr_t ip, int is32bits) if ((i&3)==3) strcat(buff, "\n"); else strcat(buff, " "); } } - if(trace_xmm) { + if(BOX64ENV(trace_xmm)) { // do xmm reg if needed for(int i=0; i<(is32bits?8:16); ++i) { - if (trace_regsdiff && (emu->old_xmm[i].q[0] != emu->xmm[i].q[0] || emu->old_xmm[i].q[1] != emu->xmm[i].q[1])) { + if (BOX64ENV(trace_regsdiff) && (emu->old_xmm[i].q[0] != emu->xmm[i].q[0] || emu->old_xmm[i].q[1] != emu->xmm[i].q[1])) { sprintf(tmp, "\e[1;35m%02d:%016lx-%016lx\e[m", i, emu->xmm[i].q[1], emu->xmm[i].q[0]); emu->old_xmm[i].q[0] = emu->xmm[i].q[0]; emu->old_xmm[i].q[1] = emu->xmm[i].q[1]; @@ -434,8 +434,8 @@ const char* DumpCPURegs(x64emu_t* emu, uintptr_t ip, int is32bits) sprintf(tmp, "%02d:%016lx-%016lx", i, emu->xmm[i].q[1], emu->xmm[i].q[0]); } strcat(buff, tmp); - if(box64_avx) { - if (trace_regsdiff && (emu->old_ymm[i].q[0] != emu->ymm[i].q[0] || emu->old_ymm[i].q[1] != emu->ymm[i].q[1])) { + if(BOX64ENV(avx)) { + if (BOX64ENV(trace_regsdiff) && (emu->old_ymm[i].q[0] != emu->ymm[i].q[0] || emu->old_ymm[i].q[1] != emu->ymm[i].q[1])) { sprintf(tmp, "\e[1;35m-%016lx-%016lx\e[m", emu->ymm[i].q[1], emu->ymm[i].q[0]); emu->old_ymm[i].q[0] = emu->ymm[i].q[0]; emu->old_ymm[i].q[1] = emu->ymm[i].q[1]; @@ -444,7 +444,7 @@ const char* DumpCPURegs(x64emu_t* emu, uintptr_t ip, int is32bits) } strcat(buff, tmp); } - if(box64_avx) + if(BOX64ENV(avx)) if ((i&1)==1) strcat(buff, "\n"); else strcat(buff, " "); else if ((i&3)==3) strcat(buff, "\n"); else strcat(buff, " "); @@ -477,7 +477,7 @@ const char* DumpCPURegs(x64emu_t* emu, uintptr_t ip, int is32bits) if(is32bits) for (int i=_AX; i<=_RDI; ++i) { #ifdef HAVE_TRACE - if (trace_regsdiff && (emu->regs[i].dword[0] != emu->oldregs[i].q[0])) { + if (BOX64ENV(trace_regsdiff) && (emu->regs[i].dword[0] != emu->oldregs[i].q[0])) { sprintf(tmp, "\e[1;35m%s=%08x\e[m ", regname32[i], emu->regs[i].dword[0]); emu->oldregs[i].q[0] = emu->regs[i].dword[0]; } else { @@ -505,7 +505,7 @@ const char* DumpCPURegs(x64emu_t* emu, uintptr_t ip, int is32bits) else for (int i=_AX; i<=_R15; ++i) { #ifdef HAVE_TRACE - if (trace_regsdiff && (emu->regs[i].q[0] != emu->oldregs[i].q[0])) { + if (BOX64ENV(trace_regsdiff) && (emu->regs[i].q[0] != emu->oldregs[i].q[0])) { sprintf(tmp, "\e[1;35m%s=%016lx\e[m ", regname[i], emu->regs[i].q[0]); emu->oldregs[i].q[0] = emu->regs[i].q[0]; } else { diff --git a/src/emu/x64int3.c b/src/emu/x64int3.c index 9b468279..44d47a2f 100644 --- a/src/emu/x64int3.c +++ b/src/emu/x64int3.c @@ -110,17 +110,17 @@ void x64Int3(x64emu_t* emu, uintptr_t* addr) elfheader_t *h = FindElfAddress(my_context, *(uintptr_t*)(R_ESP)); int have_trace = 0; if(h && strstr(ElfName(h), "libMiles")) have_trace = 1;*/ - if(box64_log>=LOG_DEBUG || cycle_log) { + if(BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(rolling_log)) { int tid = GetTID(); char t_buff[256] = "\0"; char buff2[64] = "\0"; char buff3[64] = "\0"; int cycle_line = my_context->current_line; - if(cycle_log) { - my_context->current_line = (my_context->current_line+1)%cycle_log; + if(BOX64ENV(rolling_log)) { + my_context->current_line = (my_context->current_line+1)%BOX64ENV(rolling_log); } - char* buff = cycle_log?my_context->log_call[cycle_line]:t_buff; - char* buffret = cycle_log?my_context->log_ret[cycle_line]:NULL; + char* buff = BOX64ENV(rolling_log)?my_context->log_call[cycle_line]:t_buff; + char* buffret = BOX64ENV(rolling_log)?my_context->log_ret[cycle_line]:NULL; if(buffret) buffret[0] = '\0'; char *tmp; int post = 0; @@ -132,7 +132,7 @@ void x64Int3(x64emu_t* emu, uintptr_t* addr) if(!s) s = GetNativeName((void*)a); if(a==(uintptr_t)PltResolver64) { - if(cycle_log) { + if(BOX64ENV(rolling_log)) { uintptr_t addr = *((uint64_t*)(R_RSP)); int slot = *((uint64_t*)(R_RSP+8)); elfheader_t *h = (elfheader_t*)addr; @@ -320,7 +320,7 @@ void x64Int3(x64emu_t* emu, uintptr_t* addr) } else { snprintf(buff, 256, "%04d|%p: Calling %s(0x%lX, 0x%lX, 0x%lX, ...)", tid, *(void**)(R_RSP), s, R_RDI, R_RSI, R_RDX); } - if(!cycle_log) { + if(!BOX64ENV(rolling_log)) { mutex_lock(&emu->context->mutex_trace); printf_log(LOG_NONE, "%s =>", buff); mutex_unlock(&emu->context->mutex_trace); @@ -372,7 +372,7 @@ void x64Int3(x64emu_t* emu, uintptr_t* addr) else if(perr==3 && (S_RAX)==-1) snprintf(buff3, 64, " (errno=%d:\"%s\")", errno, strerror(errno)); - if(cycle_log) + if(BOX64ENV(rolling_log)) snprintf(buffret, 128, "0x%lX%s%s", R_RAX, buff2, buff3); else { mutex_lock(&emu->context->mutex_trace); @@ -389,7 +389,7 @@ void x64Int3(x64emu_t* emu, uintptr_t* addr) printf_log(LOG_DEBUG, "%04d|Warning, x64int3 with no CC opcode at %p?\n", GetTID(), (void*)R_RIP); return; } - if(!box64_ignoreint3 && my_context->signals[SIGTRAP]) { + if(!BOX64ENV(ignoreint3) && my_context->signals[SIGTRAP]) { R_RIP = *addr; // update RIP emit_signal(emu, SIGTRAP, NULL, 3); } else { @@ -404,12 +404,12 @@ int GetTID() return syscall(SYS_gettid); } -void print_cycle_log(int loglevel) { - if(cycle_log) { +void print_rolling_log(int loglevel) { + if(BOX64ENV(rolling_log)) { printf_log(loglevel, "Last calls\n"); - int j = (my_context->current_line+1)%cycle_log; - for (int i=0; i<cycle_log; ++i) { - int k = (i+j)%cycle_log; + int j = (my_context->current_line+1)%BOX64ENV(rolling_log); + for (int i=0; i<BOX64ENV(rolling_log); ++i) { + int k = (i+j)%BOX64ENV(rolling_log); if(my_context->log_call[k][0]) { printf_log(loglevel, "%s => return %s\n", my_context->log_call[k], my_context->log_ret[k]); } @@ -423,4 +423,4 @@ void x86Int3(x64emu_t* emu, uintptr_t* addr) printf_log(LOG_NONE, "Error: Calling 32bits wrapped function without box32 support built in\n"); abort(); } -#endif \ No newline at end of file +#endif diff --git a/src/emu/x64primop.c b/src/emu/x64primop.c index 4898a0d8..129069ad 100644 --- a/src/emu/x64primop.c +++ b/src/emu/x64primop.c @@ -103,8 +103,9 @@ #include "x64emu_private.h" #include "x64run_private.h" +#include "env.h" -extern int box64_dynarec_test; +extern box64env_t box64env; /*------------------------- Global Variables ------------------------------*/ @@ -780,7 +781,7 @@ uint8_t rol8(x64emu_t *emu, uint8_t d, uint8_t s) /* OF flag is set if s == 1; OF = CF _XOR_ MSB of result */ if(s == 1) { CONDITIONAL_SET_FLAG((d + (d >> 7)) & 1, F_OF); - } else if(box64_dynarec_test) { + } else if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); } @@ -805,7 +806,7 @@ uint16_t rol16(x64emu_t *emu, uint16_t d, uint8_t s) /* OF flag is set if s == 1; OF = CF _XOR_ MSB of result */ if(s == 1) { CONDITIONAL_SET_FLAG((d + (d >> 15)) & 1, F_OF); - } else if(box64_dynarec_test) { + } else if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); } @@ -830,7 +831,7 @@ uint32_t rol32(x64emu_t *emu, uint32_t d, uint8_t s) /* OF flag is set if s == 1; OF = CF _XOR_ MSB of result */ if(s == 1) { CONDITIONAL_SET_FLAG((d + (d >> 31)) & 1, F_OF); - } else if(box64_dynarec_test) { + } else if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); } @@ -855,7 +856,7 @@ uint64_t rol64(x64emu_t *emu, uint64_t d, uint8_t s) /* OF flag is set if s == 1; OF = CF _XOR_ MSB of result */ if(s == 1) { CONDITIONAL_SET_FLAG((d + (d >> 63)) & 1, F_OF); - } else if(box64_dynarec_test) { + } else if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); } @@ -884,7 +885,7 @@ uint8_t ror8(x64emu_t *emu, uint8_t d, uint8_t s) /* OF flag is set if s == 1; OF = MSB _XOR_ (M-1)SB of result */ if(s == 1) { CONDITIONAL_SET_FLAG(XOR2(d >> 6), F_OF); - } else if(box64_dynarec_test) { + } else if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); } @@ -909,7 +910,7 @@ uint16_t ror16(x64emu_t *emu, uint16_t d, uint8_t s) /* OF flag is set if s == 1; OF = MSB _XOR_ (M-1)SB of result */ if(s == 1) { CONDITIONAL_SET_FLAG(XOR2(d >> 14), F_OF); - } else if(box64_dynarec_test) { + } else if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); } @@ -934,7 +935,7 @@ uint32_t ror32(x64emu_t *emu, uint32_t d, uint8_t s) /* OF flag is set if s == 1; OF = MSB _XOR_ (M-1)SB of result */ if(s == 1) { CONDITIONAL_SET_FLAG(XOR2(d >> 30), F_OF); - } else if(box64_dynarec_test) { + } else if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); } @@ -959,7 +960,7 @@ uint64_t ror64(x64emu_t *emu, uint64_t d, uint8_t s) /* OF flag is set if s == 1; OF = MSB _XOR_ (M-1)SB of result */ if(s == 1) { CONDITIONAL_SET_FLAG(XOR2(d >> 62), F_OF); - } else if(box64_dynarec_test) { + } else if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); } @@ -1009,7 +1010,7 @@ uint16_t shld16 (x64emu_t *emu, uint16_t d, uint16_t fill, uint8_t s) CONDITIONAL_SET_FLAG(PARITY(res & 0xff), F_PF); CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) + if(BOX64ENV(dynarec_test)) CLEAR_FLAG(F_AF); return (uint16_t)res; } @@ -1038,7 +1039,7 @@ uint32_t shld32 (x64emu_t *emu, uint32_t d, uint32_t fill, uint8_t s) } else { CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) + if(BOX64ENV(dynarec_test)) CLEAR_FLAG(F_AF); return res; } @@ -1067,7 +1068,7 @@ uint64_t shld64 (x64emu_t *emu, uint64_t d, uint64_t fill, uint8_t s) } else { CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) + if(BOX64ENV(dynarec_test)) CLEAR_FLAG(F_AF); return res; } @@ -1122,7 +1123,7 @@ uint16_t shrd16 (x64emu_t *emu, uint16_t d, uint16_t fill, uint8_t s) CLEAR_FLAG(F_PF); #endif } - if(box64_dynarec_test) + if(BOX64ENV(dynarec_test)) CLEAR_FLAG(F_AF); return (uint16_t)res; } @@ -1151,7 +1152,7 @@ uint32_t shrd32 (x64emu_t *emu, uint32_t d, uint32_t fill, uint8_t s) } else { CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) + if(BOX64ENV(dynarec_test)) CLEAR_FLAG(F_AF); return res; } @@ -1181,7 +1182,7 @@ uint64_t shrd64 (x64emu_t *emu, uint64_t d, uint64_t fill, uint8_t s) } else { CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) + if(BOX64ENV(dynarec_test)) CLEAR_FLAG(F_AF); return res; } @@ -1345,12 +1346,12 @@ void test64(x64emu_t *emu, uint64_t d, uint64_t s) REMARKS: Implements the IDIV instruction and side effects. ****************************************************************************/ -extern int box64_dynarec_test; + void idiv8(x64emu_t *emu, uint8_t s) { int32_t dvd, quot, mod; RESET_FLAGS(emu); - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_AF); CLEAR_FLAG(F_PF); @@ -1379,7 +1380,7 @@ void idiv16(x64emu_t *emu, uint16_t s) { int32_t dvd, quot, mod; RESET_FLAGS(emu); - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_AF); CLEAR_FLAG(F_PF); @@ -1409,7 +1410,7 @@ void idiv32(x64emu_t *emu, uint32_t s) { int64_t dvd, quot, mod; RESET_FLAGS(emu); - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_AF); CLEAR_FLAG(F_PF); @@ -1439,7 +1440,7 @@ void idiv64(x64emu_t *emu, uint64_t s) { __int128 dvd, quot, mod; RESET_FLAGS(emu); - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_AF); CLEAR_FLAG(F_PF); @@ -1472,7 +1473,7 @@ void div8(x64emu_t *emu, uint8_t s) { uint32_t dvd, div, mod; RESET_FLAGS(emu); - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_AF); CLEAR_FLAG(F_PF); @@ -1500,7 +1501,7 @@ void div16(x64emu_t *emu, uint16_t s) { uint32_t dvd, div, mod; RESET_FLAGS(emu); - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_AF); CLEAR_FLAG(F_PF); @@ -1529,7 +1530,7 @@ void div32(x64emu_t *emu, uint32_t s) { uint64_t dvd, div, mod; RESET_FLAGS(emu); - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_AF); CLEAR_FLAG(F_PF); @@ -1558,7 +1559,7 @@ void div64(x64emu_t *emu, uint64_t s) { __int128 dvd, div, mod; RESET_FLAGS(emu); - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_AF); CLEAR_FLAG(F_PF); diff --git a/src/emu/x64run.c b/src/emu/x64run.c index 4a17ab55..c339dcf5 100644 --- a/src/emu/x64run.c +++ b/src/emu/x64run.c @@ -590,7 +590,7 @@ x64emurun: case 0x6E: /* OUTSB DX */ case 0x6F: /* OUTSD DX */ #ifndef TEST_INTERPRETER - if(rex.is32bits && box64_ignoreint3) + if(rex.is32bits && BOX64ENV(ignoreint3)) { } else { emit_signal(emu, SIGSEGV, (void*)R_RIP, 0xbad0); @@ -1872,7 +1872,7 @@ x64emurun: // this is a privilege opcode... #ifndef TEST_INTERPRETER F8; - if(rex.is32bits && box64_ignoreint3) + if(rex.is32bits && BOX64ENV(ignoreint3)) {} else emit_signal(emu, SIGSEGV, (void*)R_RIP, 0xbad0); STEP; @@ -1912,7 +1912,7 @@ x64emurun: case 0xEF: /* OUT DX, EAX */ // this is a privilege opcode... #ifndef TEST_INTERPRETER - if(rex.is32bits && box64_ignoreint3) + if(rex.is32bits && BOX64ENV(ignoreint3)) {} else emit_signal(emu, SIGSEGV, (void*)R_RIP, 0xbad0); STEP; @@ -2075,14 +2075,14 @@ x64emurun: break; case 0xFA: /* CLI */ // this is a privilege opcode - if(rex.is32bits && box64_ignoreint3) + if(rex.is32bits && BOX64ENV(ignoreint3)) {} else emit_signal(emu, SIGSEGV, (void*)R_RIP, 0xbad0); STEP; break; case 0xFB: /* STI */ // this is a privilege opcode - if(rex.is32bits && box64_ignoreint3) + if(rex.is32bits && BOX64ENV(ignoreint3)) {} else emit_signal(emu, SIGSEGV, (void*)R_RIP, 0xbad0); STEP; diff --git a/src/emu/x64run0f.c b/src/emu/x64run0f.c index 911d80b1..f156b3e2 100644 --- a/src/emu/x64run0f.c +++ b/src/emu/x64run0f.c @@ -1168,7 +1168,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) else CLEAR_FLAG(F_CF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -1255,7 +1255,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -1314,7 +1314,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) GETED(0); emu->mxcsr.x32 = ED->dword[0]; #ifndef TEST_INTERPRETER - if(box64_sse_flushto0) + if(BOX64ENV(sse_flushto0)) applyFlushTo0(emu); #endif break; @@ -1337,7 +1337,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) case 7: /* CLFLUSH Ed */ _GETED(0); #if defined(DYNAREC) && !defined(TEST_INTERPRETER) - if(box64_dynarec) + if(BOX64ENV(dynarec)) cleanDBFromAddressRange((uintptr_t)ED, 8, 0); #endif break; @@ -1424,7 +1424,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -1465,7 +1465,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) else CLEAR_FLAG(F_CF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -1495,7 +1495,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -1523,7 +1523,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -1551,7 +1551,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -1598,7 +1598,7 @@ uintptr_t Run0F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); diff --git a/src/emu/x64run64.c b/src/emu/x64run64.c index 9d8b920e..0470d92a 100644 --- a/src/emu/x64run64.c +++ b/src/emu/x64run64.c @@ -321,7 +321,7 @@ uintptr_t Run64(x64emu_t *emu, rex_t rex, int seg, uintptr_t addr) else CLEAR_FLAG(F_CF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -351,7 +351,7 @@ uintptr_t Run64(x64emu_t *emu, rex_t rex, int seg, uintptr_t addr) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -379,7 +379,7 @@ uintptr_t Run64(x64emu_t *emu, rex_t rex, int seg, uintptr_t addr) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -407,7 +407,7 @@ uintptr_t Run64(x64emu_t *emu, rex_t rex, int seg, uintptr_t addr) if(MODREG) ED->dword[1] = 0; } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); diff --git a/src/emu/x64run660f.c b/src/emu/x64run660f.c index 6bd25132..3889b189 100644 --- a/src/emu/x64run660f.c +++ b/src/emu/x64run660f.c @@ -1717,7 +1717,7 @@ uintptr_t Run660F(x64emu_t *emu, rex_t rex, uintptr_t addr) case 0x78: /* EXTRQ Ex, ib, ib */ // AMD only nextop = F8; - if(!box64_cputype || (nextop&0xC0)>>3) { + if(!BOX64ENV(cputype) || (nextop&0xC0)>>3) { #ifndef TEST_INTERPRETER emit_signal(emu, SIGILL, (void*)R_RIP, 0); #endif @@ -1732,7 +1732,7 @@ uintptr_t Run660F(x64emu_t *emu, rex_t rex, uintptr_t addr) case 0x79: /* EXTRQ Ex, Gx */ // AMD only nextop = F8; - if(!box64_cputype || !(MODREG)) { + if(!BOX64ENV(cputype) || !(MODREG)) { #ifndef TEST_INTERPRETER emit_signal(emu, SIGILL, (void*)R_RIP, 0); #endif @@ -1911,7 +1911,7 @@ uintptr_t Run660F(x64emu_t *emu, rex_t rex, uintptr_t addr) // same code and CLFLUSH, is it ok? _GETED(0); #if defined(DYNAREC) && !defined(TEST_INTERPRETER) - if(box64_dynarec) + if(BOX64ENV(dynarec)) cleanDBFromAddressRange((uintptr_t)ED, 8, 0); #endif break; @@ -1919,7 +1919,7 @@ uintptr_t Run660F(x64emu_t *emu, rex_t rex, uintptr_t addr) // same code and CLFLUSH, is it ok? _GETED(0); #if defined(DYNAREC) && !defined(TEST_INTERPRETER) - if(box64_dynarec) + if(BOX64ENV(dynarec)) cleanDBFromAddressRange((uintptr_t)ED, 8, 0); #endif break; diff --git a/src/emu/x64run_private.c b/src/emu/x64run_private.c index 5cfc4625..a82fc6f5 100644 --- a/src/emu/x64run_private.c +++ b/src/emu/x64run_private.c @@ -381,7 +381,7 @@ void UpdateFlags(x64emu_t *emu) SET_FLAG(F_CF); SET_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise in tests CLEAR_FLAG(F_SF); CLEAR_FLAG(F_ZF); @@ -400,7 +400,7 @@ void UpdateFlags(x64emu_t *emu) SET_FLAG(F_CF); SET_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise in tests CLEAR_FLAG(F_SF); CLEAR_FLAG(F_ZF); @@ -417,7 +417,7 @@ void UpdateFlags(x64emu_t *emu) SET_FLAG(F_CF); SET_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise in tests CLEAR_FLAG(F_SF); CLEAR_FLAG(F_ZF); @@ -434,7 +434,7 @@ void UpdateFlags(x64emu_t *emu) SET_FLAG(F_CF); SET_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise in tests CLEAR_FLAG(F_SF); CLEAR_FLAG(F_ZF); @@ -451,7 +451,7 @@ void UpdateFlags(x64emu_t *emu) SET_FLAG(F_CF); SET_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise in tests CLEAR_FLAG(F_SF); CLEAR_FLAG(F_ZF); @@ -468,7 +468,7 @@ void UpdateFlags(x64emu_t *emu) SET_FLAG(F_CF); SET_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise in tests CLEAR_FLAG(F_SF); CLEAR_FLAG(F_ZF); @@ -484,7 +484,7 @@ void UpdateFlags(x64emu_t *emu) SET_FLAG(F_CF); SET_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise in tests CLEAR_FLAG(F_SF); CLEAR_FLAG(F_ZF); @@ -500,7 +500,7 @@ void UpdateFlags(x64emu_t *emu) SET_FLAG(F_CF); SET_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { // to avoid noise in tests CLEAR_FLAG(F_SF); CLEAR_FLAG(F_ZF); @@ -589,7 +589,7 @@ void UpdateFlags(x64emu_t *emu) } else { CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); } } @@ -607,7 +607,7 @@ void UpdateFlags(x64emu_t *emu) } else { CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); } } @@ -626,7 +626,7 @@ void UpdateFlags(x64emu_t *emu) } else { CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); } } @@ -644,7 +644,7 @@ void UpdateFlags(x64emu_t *emu) } else { CLEAR_FLAG(F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); } } @@ -656,9 +656,9 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(!emu->res.u8, F_ZF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u8), F_PF); CONDITIONAL_SET_FLAG(emu->res.u8 & 0x80, F_SF); - if(emu->op2.u8==1 || box64_dynarec_test) + if (emu->op2.u8 == 1 || BOX64ENV(dynarec_test)) CLEAR_FLAG(F_OF); - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); } } @@ -670,9 +670,9 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(!emu->res.u16, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u16 & 0x8000, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u8), F_PF); - if(emu->op2.u16==1 || box64_dynarec_test) + if (emu->op2.u16 == 1 || BOX64ENV(dynarec_test)) CLEAR_FLAG(F_OF); - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); } } @@ -684,9 +684,9 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(!emu->res.u32, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u32 & 0x80000000, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u8), F_PF); - if(emu->op2.u32==1 || box64_dynarec_test) + if (emu->op2.u32 == 1 || BOX64ENV(dynarec_test)) CLEAR_FLAG(F_OF); - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); } } @@ -698,9 +698,9 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(!emu->res.u64, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u64 & 0x8000000000000000LL, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u8), F_PF); - if(emu->op2.u64==1 || box64_dynarec_test) + if (emu->op2.u64 == 1 || BOX64ENV(dynarec_test)) CLEAR_FLAG(F_OF); - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); } } @@ -713,7 +713,7 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(!emu->res.u8, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u8 & 0x80, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u8), F_PF); - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); if(cnt>1) { CLEAR_FLAG(F_OF); @@ -732,7 +732,7 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(!emu->res.u16, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u16 & 0x8000, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u8), F_PF); - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); if(cnt>1) { CLEAR_FLAG(F_OF); @@ -751,7 +751,7 @@ void UpdateFlags(x64emu_t *emu) CONDITIONAL_SET_FLAG(!emu->res.u32, F_ZF); CONDITIONAL_SET_FLAG(emu->res.u32 & 0x80000000, F_SF); CONDITIONAL_SET_FLAG(PARITY(emu->res.u8), F_PF); - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); if(cnt>1) { CLEAR_FLAG(F_OF); @@ -773,7 +773,7 @@ void UpdateFlags(x64emu_t *emu) if (cnt == 1) { CONDITIONAL_SET_FLAG(emu->op1.u64 & 0x8000000000000000LL, F_OF); } - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_AF); if(cnt>1) { CLEAR_FLAG(F_OF); @@ -1206,7 +1206,6 @@ int printFunctionAddr(uintptr_t nextaddr, const char* text) } #ifdef HAVE_TRACE -extern uint64_t start_cnt; #define PK(a) (*(uint8_t*)(ip+a)) #define PKS(a) (*(int8_t*)(ip+a)) #define PK32(a) (*(int32_t*)((uint8_t*)(ip+a))) @@ -1215,8 +1214,8 @@ extern uint64_t start_cnt; void PrintTrace(x64emu_t* emu, uintptr_t ip, int dynarec) { int is32bits = (emu->segs[_CS]==0x23); - if(start_cnt) --start_cnt; - if(!start_cnt && my_context->dec && ( + if(BOX64ENV(start_cnt)) SET_BOX64ENV(start_cnt, BOX64ENV(start_cnt)-1); + if(!BOX64ENV(start_cnt) && my_context->dec && ( (trace_end == 0) || ((ip >= trace_start) && (ip < trace_end))) ) { int tid = syscall(SYS_gettid); diff --git a/src/emu/x64runavx0f.c b/src/emu/x64runavx0f.c index 1b02989b..2b3f8189 100644 --- a/src/emu/x64runavx0f.c +++ b/src/emu/x64runavx0f.c @@ -529,7 +529,7 @@ uintptr_t RunAVX_0F(x64emu_t *emu, vex_t vex, uintptr_t addr, int *step) GETED(0); emu->mxcsr.x32 = ED->dword[0]; #ifndef TEST_INTERPRETER - if(box64_sse_flushto0) + if(BOX64ENV(sse_flushto0)) applyFlushTo0(emu); #endif break; diff --git a/src/emu/x64runf0.c b/src/emu/x64runf0.c index df381e5a..8a1c5d0a 100644 --- a/src/emu/x64runf0.c +++ b/src/emu/x64runf0.c @@ -290,7 +290,7 @@ uintptr_t RunF0(x64emu_t *emu, rex_t rex, uintptr_t addr) } pthread_mutex_unlock(&my_context->mutex_lock); #endif - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -478,7 +478,7 @@ uintptr_t RunF0(x64emu_t *emu, rex_t rex, uintptr_t addr) } pthread_mutex_unlock(&my_context->mutex_lock); #endif - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -506,7 +506,7 @@ uintptr_t RunF0(x64emu_t *emu, rex_t rex, uintptr_t addr) else CLEAR_FLAG(F_CF); } - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -580,7 +580,7 @@ uintptr_t RunF0(x64emu_t *emu, rex_t rex, uintptr_t addr) } pthread_mutex_unlock(&my_context->mutex_lock); #endif - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -638,7 +638,7 @@ uintptr_t RunF0(x64emu_t *emu, rex_t rex, uintptr_t addr) } pthread_mutex_unlock(&my_context->mutex_lock); #endif - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); @@ -692,7 +692,7 @@ uintptr_t RunF0(x64emu_t *emu, rex_t rex, uintptr_t addr) } pthread_mutex_unlock(&my_context->mutex_lock); #endif - if(box64_dynarec_test) { + if(BOX64ENV(dynarec_test)) { CLEAR_FLAG(F_OF); CLEAR_FLAG(F_SF); CLEAR_FLAG(F_AF); diff --git a/src/emu/x64runf20f.c b/src/emu/x64runf20f.c index d9617461..c8b5e545 100644 --- a/src/emu/x64runf20f.c +++ b/src/emu/x64runf20f.c @@ -315,7 +315,7 @@ uintptr_t RunF20F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) case 0x78: /* INSERTQ Ex, Gx, ib, ib */ // AMD only nextop = F8; - if(!box64_cputype || !(MODREG)) { + if(!BOX64ENV(cputype) || !(MODREG)) { #ifndef TEST_INTERPRETER emit_signal(emu, SIGILL, (void*)R_RIP, 0); #endif @@ -332,7 +332,7 @@ uintptr_t RunF20F(x64emu_t *emu, rex_t rex, uintptr_t addr, int *step) case 0x79: /* INSERTQ Ex, Gx */ // AMD only nextop = F8; - if(!box64_cputype || !(MODREG)) { + if(!BOX64ENV(cputype) || !(MODREG)) { #ifndef TEST_INTERPRETER emit_signal(emu, SIGILL, (void*)R_RIP, 0); #endif diff --git a/src/emu/x64syscall.c b/src/emu/x64syscall.c index a912afcb..4c20bb9d 100644 --- a/src/emu/x64syscall.c +++ b/src/emu/x64syscall.c @@ -450,14 +450,14 @@ void EXPORT x64Syscall(x64emu_t *emu) char buff2[64] = "\0"; char* buff = NULL; char* buffret = NULL; - if(box64_log>=LOG_DEBUG || cycle_log) { + if(BOX64ENV(log) >= LOG_DEBUG || BOX64ENV(rolling_log)) { log = 1; - buff = cycle_log?my_context->log_call[my_context->current_line]:t_buff; - buffret = cycle_log?my_context->log_ret[my_context->current_line]:t_buffret; - if(cycle_log) - my_context->current_line = (my_context->current_line+1)%cycle_log; + buff = BOX64ENV(rolling_log)?my_context->log_call[my_context->current_line]:t_buff; + buffret = BOX64ENV(rolling_log)?my_context->log_ret[my_context->current_line]:t_buffret; + if(BOX64ENV(rolling_log)) + my_context->current_line = (my_context->current_line+1)%BOX64ENV(rolling_log); snprintf(buff, 255, "%04d|%p: Calling syscall 0x%02X (%d) %p %p %p %p %p %p", GetTID(), (void*)R_RIP, s, s, (void*)R_RDI, (void*)R_RSI, (void*)R_RDX, (void*)R_R10, (void*)R_R8, (void*)R_R9); - if(!cycle_log) + if(!BOX64ENV(rolling_log)) printf_log(LOG_NONE, "%s", buff); } // check wrapper first @@ -480,7 +480,7 @@ void EXPORT x64Syscall(x64emu_t *emu) if(S_RAX==-1 && errno>0) S_RAX = -errno; if(log) snprintf(buffret, 127, "0x%x%s", R_EAX, buff2); - if(log && !cycle_log) printf_log(LOG_NONE, "=> %s\n", buffret); + if(log && !BOX64ENV(rolling_log)) printf_log(LOG_NONE, "=> %s\n", buffret); return; } switch (s) { @@ -838,7 +838,7 @@ void EXPORT x64Syscall(x64emu_t *emu) #endif case 449: #ifdef __NR_futex_waitv - if(box64_futex_waitv) + if(BOX64ENV(futex_waitv)) S_RAX = syscall(__NR_futex_waitv, R_RDI, R_RSI, R_RDX, R_R10, R_R8); else #endif @@ -851,7 +851,7 @@ void EXPORT x64Syscall(x64emu_t *emu) return; } if(log) snprintf(buffret, 127, "0x%lx%s", R_RAX, buff2); - if(log && !cycle_log) printf_log(LOG_NONE, "=> %s\n", buffret); + if(log && !BOX64ENV(rolling_log)) printf_log(LOG_NONE, "=> %s\n", buffret); } #define stack(n) (R_RSP+8+n) @@ -1116,7 +1116,7 @@ long EXPORT my_syscall(x64emu_t *emu) #endif case 449: #ifdef __NR_futex_waitv - if(box64_futex_waitv) + if(BOX64ENV(futex_waitv)) return syscall(__NR_futex_waitv, R_RSI, R_RDX, R_RCX, R_R8, R_R9); else #endif diff --git a/src/emu/x64test.c b/src/emu/x64test.c index 135a120c..b00fc060 100644 --- a/src/emu/x64test.c +++ b/src/emu/x64test.c @@ -106,7 +106,7 @@ void x64test_check(x64emu_t* ref, uintptr_t ip) BANNER; printf_log(LOG_NONE, "MXCSR: %x | %x\n", ref->mxcsr.x32, emu->mxcsr.x32); } - if(box64_avx) + if(BOX64ENV(avx)) if(memcmp(ref->ymm, emu->ymm, sizeof(emu->ymm))) { BANNER; for(int i=0; i<16; ++i) { diff --git a/src/emu/x64tls.c b/src/emu/x64tls.c index 16d4193e..4902177e 100644 --- a/src/emu/x64tls.c +++ b/src/emu/x64tls.c @@ -257,7 +257,7 @@ static int sizeDTS(box64context_t* context) } static int sizeTLSData(int s) { - uint32_t mask = 0xffff/*box64_nogtk?0xffff:0x1fff*/; // x86_64 does the mapping per 64K blocks, so it makes sense to have it this large + uint32_t mask = 0xffff/*BOX64ENV(nogtk)?0xffff:0x1fff*/; // x86_64 does the mapping per 64K blocks, so it makes sense to have it this large return (s+mask)&~mask; } diff --git a/src/emu/x86int3.c b/src/emu/x86int3.c index 72494a24..0eae5dd7 100755 --- a/src/emu/x86int3.c +++ b/src/emu/x86int3.c @@ -64,17 +64,17 @@ void x86Int3(x64emu_t* emu, uintptr_t* addr) elfheader_t *h = FindElfAddress(my_context, *(uintptr_t*)(R_ESP)); int have_trace = 0; if(h && strstr(ElfName(h), "libMiles")) have_trace = 1;*/ - if(box64_log>=LOG_DEBUG || cycle_log) { + if(BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(rolling_log)) { int tid = GetTID(); char t_buff[256] = "\0"; char buff2[64] = "\0"; char buff3[64] = "\0"; int cycle_line = my_context->current_line; - if(cycle_log) { - my_context->current_line = (my_context->current_line+1)%cycle_log; + if(BOX64ENV(rolling_log)) { + my_context->current_line = (my_context->current_line+1)%BOX64ENV(rolling_log); } - char* buff = cycle_log?my_context->log_call[cycle_line]:t_buff; - char* buffret = cycle_log?my_context->log_ret[cycle_line]:NULL; + char* buff = BOX64ENV(rolling_log)?my_context->log_call[cycle_line]:t_buff; + char* buffret = BOX64ENV(rolling_log)?my_context->log_ret[cycle_line]:NULL; if(buffret) buffret[0] = '\0'; char *tmp; int post = 0; @@ -87,7 +87,7 @@ void x86Int3(x64emu_t* emu, uintptr_t* addr) if(!s) s = GetNativeName((void*)a); if(a==(uintptr_t)PltResolver32) { - if(cycle_log) { + if(BOX64ENV(rolling_log)) { ptr_t addr = *((uint32_t*)from_ptrv(R_ESP)); int slot = *((uint32_t*)from_ptrv(R_ESP+4)); elfheader_t *h = (elfheader_t*)from_ptrv(addr); @@ -399,7 +399,7 @@ void x86Int3(x64emu_t* emu, uintptr_t* addr) } else { snprintf(buff, 255, "%04d|%p: Calling %s (%08X, %08X, %08X...)", tid, from_ptriv(R_ESP), (char *)s, *(uint32_t*)from_ptr(R_ESP+4), *(uint32_t*)from_ptr(R_ESP+8), *(uint32_t*)from_ptr(R_ESP+12)); } - if(!cycle_log) { + if(!BOX64ENV(rolling_log)) { mutex_lock(&emu->context->mutex_trace); printf_log(LOG_NONE, "%s =>", buff); mutex_unlock(&emu->context->mutex_trace); @@ -462,7 +462,7 @@ void x86Int3(x64emu_t* emu, uintptr_t* addr) snprintf(buff3, 63, " (errno=%d:\"%s\")", errno, strerror(errno)); else if(perr==3 && (S_EAX)==-1) snprintf(buff3, 63, " (errno=%d:\"%s\")", errno, strerror(errno)); - if(cycle_log) { + if(BOX64ENV(rolling_log)) { if(ret_fmt==1) snprintf(buffret, 128, "%d%s%s", S_EAX, buff2, buff3); else @@ -480,7 +480,7 @@ void x86Int3(x64emu_t* emu, uintptr_t* addr) } return; } - if(!box64_ignoreint3 && my_context->signals[SIGTRAP]) { + if(!BOX64ENV(ignoreint3) && my_context->signals[SIGTRAP]) { R_RIP = *addr; // update RIP emit_signal(emu, SIGTRAP, NULL, 3); } else { diff --git a/src/emu/x86syscall_32.c b/src/emu/x86syscall_32.c index dc2bf76c..31f07aad 100644 --- a/src/emu/x86syscall_32.c +++ b/src/emu/x86syscall_32.c @@ -491,7 +491,7 @@ void EXPORT x86Syscall(x64emu_t *emu) break; case 449: #ifdef __NR_futex_waitv - if(box64_futex_waitv) + if(BOX64ENV(futex_waitv)) S_RAX = syscall(__NR_futex_waitv, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI); else #endif @@ -715,7 +715,7 @@ uint32_t EXPORT my32_syscall(x64emu_t *emu, uint32_t s, ptr_t* b) #endif case 449: #ifdef __NR_futex_waitv - if(box64_futex_waitv) + if(BOX64ENV(futex_waitv)) return syscall(__NR_futex_waitv, u32(0), u32(4), u32(8), u32(12), u32(16)); else #endif @@ -737,4 +737,4 @@ uint32_t EXPORT my32_syscall(x64emu_t *emu, uint32_t s, ptr_t* b) } return 0; } -#endif //BOX32 \ No newline at end of file +#endif //BOX32 diff --git a/src/emu/x87emu_private.c b/src/emu/x87emu_private.c index 260a2be1..5d95a8e5 100644 --- a/src/emu/x87emu_private.c +++ b/src/emu/x87emu_private.c @@ -81,7 +81,7 @@ void fpu_fbld(x64emu_t* emu, uint8_t* s) { // long double (80bits) -> double (64bits) void LD2D(void* ld, void* d) { - if(box64_x87_no80bits) { + if(BOX64ENV(x87_no80bits)) { *(uint64_t*)d = *(uint64_t*)ld; return; } @@ -160,7 +160,7 @@ void LD2D(void* ld, void* d) // double (64bits) -> long double (80bits) void D2LD(void* d, void* ld) { - if(box64_x87_no80bits) { + if(BOX64ENV(x87_no80bits)) { *(uint64_t*)ld = *(uint64_t*)d; return; } @@ -214,7 +214,7 @@ void D2LD(void* d, void* ld) double FromLD(void* ld) { - if(box64_x87_no80bits) + if(BOX64ENV(x87_no80bits)) return *(double*)ld; double ret; // cannot add = 0; it break factorio (issue when calling fmodl) LD2D(ld, &ret); @@ -379,7 +379,7 @@ void fpu_fxrstor32(x64emu_t* emu, void* ed) emu->cw.x16 = p->ControlWord; emu->sw.x16 = p->StatusWord; emu->mxcsr.x32 = p->MxCsr; - if(box64_sse_flushto0) + if(BOX64ENV(sse_flushto0)) applyFlushTo0(emu); emu->top = emu->sw.f.F87_TOP; uint8_t tags = p->TagWord; @@ -404,7 +404,7 @@ void fpu_fxrstor64(x64emu_t* emu, void* ed) emu->cw.x16 = p->ControlWord; emu->sw.x16 = p->StatusWord; emu->mxcsr.x32 = p->MxCsr; - if(box64_sse_flushto0) + if(BOX64ENV(sse_flushto0)) applyFlushTo0(emu); emu->top = emu->sw.f.F87_TOP; uint8_t tags = p->TagWord; @@ -488,7 +488,7 @@ void fpu_xrstor(x64emu_t* emu, void* ed, int is32bits) emu->cw.x16 = p->ControlWord; emu->sw.x16 = p->StatusWord; emu->mxcsr.x32 = p->MxCsr; - if(box64_sse_flushto0) + if(BOX64ENV(sse_flushto0)) applyFlushTo0(emu); emu->top = emu->sw.f.F87_TOP; uint8_t tags = p->TagWord; diff --git a/src/include/box64context.h b/src/include/box64context.h index 6c14e219..d2c1d23a 100644 --- a/src/include/box64context.h +++ b/src/include/box64context.h @@ -259,7 +259,7 @@ void FreeBox64Context(box64context_t** context); // Cycle log handling void freeCycleLog(box64context_t* ctx); void initCycleLog(box64context_t* context); -void print_cycle_log(int loglevel); +void print_rolling_log(int loglevel); // return the index of the added header int AddElfHeader(box64context_t* ctx, elfheader_t* head); diff --git a/src/include/debug.h b/src/include/debug.h index 48843392..36a2e14a 100644 --- a/src/include/debug.h +++ b/src/include/debug.h @@ -1,52 +1,16 @@ #ifndef __DEBUG_H_ #define __DEBUG_H_ #include <stdint.h> +#include <env.h> typedef struct box64context_s box64context_t; -extern int box64_log; // log level -extern int box64_dump; // dump elf or not -extern int box64_dynarec_log; -extern int box64_dynarec; +extern box64env_t box64env; + extern uintptr_t box64_pagesize; -extern uintptr_t box64_load_addr; -extern int box64_dynarec_test; -extern int box64_maxcpu; -extern int box64_mmap32; -extern int box64_ignoreint3; extern int box64_rdtsc; -extern int box64_rdtsc_1ghz; extern uint8_t box64_rdtsc_shift; extern int box64_is32bits; -extern int box64_x11sync; -extern int box64_dynarec_gdbjit; -extern int box64_cputype; #ifdef DYNAREC -extern int box64_dynarec_dump; -extern int box64_dynarec_trace; -extern int box64_dynarec_forced; -extern uintptr_t box64_nodynarec_start, box64_nodynarec_end; -extern uintptr_t box64_dynarec_test_start, box64_dynarec_test_end; -extern int box64_dynarec_bigblock; -extern int box64_dynarec_forward; -extern int box64_dynarec_strongmem; -extern int box64_dynarec_weakbarrier; -extern int box64_dynarec_pause; -extern int box64_dynarec_fastnan; -extern int box64_dynarec_fastround; -extern int box64_dynarec_x87double; -extern int box64_dynarec_div0; -extern int box64_dynarec_safeflags; -extern int box64_dynarec_callret; -extern int box64_dynarec_bleeding_edge; -extern int box64_dynarec_tbb; -extern int box64_dynarec_wait; -extern int box64_dynarec_missing; -extern int box64_dynarec_aligned_atomics; -extern int box64_dynarec_nativeflags; -extern int box64_dynarec_df; -extern int box64_dynarec_perf_map; -extern int box64_dynarec_perf_map_fd; -extern int box64_dynarec_dirty; #ifdef ARM64 extern int arm64_asimd; extern int arm64_aes; @@ -84,58 +48,18 @@ extern int la64_lamcas; extern int la64_scq; #endif #endif -extern int box64_libcef; -extern int box64_jvm; -extern int box64_unityplayer; -extern int box64_sdl2_jguid; -extern int dlsym_error; // log dlsym error -extern int cycle_log; // if using rolling logs #ifdef HAVE_TRACE -extern int trace_xmm; // include XMM reg in trace? -extern int trace_emm; // include EMM reg in trace? -extern int trace_regsdiff; // colorize standard registers on changes extern uintptr_t trace_start, trace_end; extern char* trace_func; -extern char* trace_init; -extern char* box64_trace; -extern uint64_t start_cnt; #endif -extern int box64_nosandbox; -extern int box64_inprocessgpu; -extern int box64_cefdisablegpu; -extern int box64_cefdisablegpucompositor; -extern int box64_maxcpu_immutable; -extern int box64_malloc_hack; -extern int box64_dummy_crashhandler; -extern int box64_sse_flushto0; -extern int box64_x87_no80bits; -extern int box64_sync_rounding; -extern int box64_shaext; -extern int box64_sse42; -extern int box64_avx; -extern int box64_avx2; -extern int allow_missing_libs; extern int box64_mapclean; -extern int box64_prefer_wrapped; -extern int box64_prefer_emulated; -extern int box64_wrap_egl; extern int box64_steam; extern int box64_steamcmd; extern int box64_wine; extern int box64_musl; -extern int box64_nopulse; // disabling the use of wrapped pulseaudio -extern int box64_nogtk; // disabling the use of wrapped gtk -extern int box64_novulkan; // disabling the use of wrapped vulkan -extern int box64_showsegv; // show sigv, even if a signal handler is present -extern int box64_showbt; // show a backtrace if a signal is caught extern int box64_isglibc234; // is the program linked with glibc 2.34+ -extern int box64_futex_waitv; -extern int box64_x11threads; -extern int box64_x11glx; -extern char* box64_libGL; extern uintptr_t fmod_smc_start, fmod_smc_end; // to handle libfmod (from Unreal) SMC (self modifying code) extern uint32_t default_gs, default_fs; -extern int jit_gdb; // launch gdb when a segfault is trapped extern int box64_tcmalloc_minimal; // when using tcmalloc_minimal #define LOG_NONE 0 #define LOG_INFO 1 @@ -145,13 +69,22 @@ extern int box64_tcmalloc_minimal; // when using tcmalloc_minimal void printf_ftrace(const char* fmt, ...); -#define printf_log(L, ...) do {if((L)<=box64_log) {printf_ftrace(__VA_ARGS__);}} while(0) +#define printf_log(L, ...) \ + do { \ + if ((L) <= BOX64ENV(log)) { printf_ftrace(__VA_ARGS__); } \ + } while (0) -#define printf_dump(L, ...) do {if(box64_dump || ((L)<=box64_log)) {printf_ftrace(__VA_ARGS__);}} while(0) +#define printf_dump(L, ...) \ + do { \ + if (BOX64ENV(dump) || ((L) <= BOX64ENV(log))) { printf_ftrace(__VA_ARGS__); } \ + } while (0) -#define printf_dlsym(L, ...) do {if(dlsym_error || ((L)<=box64_log)) {printf_ftrace(__VA_ARGS__);}} while(0) +#define printf_dlsym(L, ...) \ + do { \ + if (BOX64ENV(dlsym_error) || ((L) <= BOX64ENV(log))) { printf_ftrace(__VA_ARGS__); } \ + } while (0) -#define dynarec_log(L, ...) do {if((L)<=box64_dynarec_log) {printf_ftrace(__VA_ARGS__);}} while(0) +#define dynarec_log(L, ...) do {if((L)<=BOX64ENV(dynarec_log)) {printf_ftrace(__VA_ARGS__);}} while(0) #define EXPORT __attribute__((visibility("default"))) #ifdef BUILD_DYNAMIC diff --git a/src/include/env.h b/src/include/env.h new file mode 100644 index 00000000..d5385d8a --- /dev/null +++ b/src/include/env.h @@ -0,0 +1,200 @@ +#ifndef __ENV_H +#define __ENV_H + +#include <stdint.h> +#include <unistd.h> + +#define BOX64ENV(name) (box64env.name) +#define SET_BOX64ENV(name, value) { box64env.name = (value); box64env.is_##name##_overridden = 1; } + +/* + INTEGER(NAME, name, default, min, max) + INTEGER64(NAME, name, default) + BOOLEAN(NAME, name, default) + ADDRESS(NAME, name) + STRING(NAME, name) +*/ + +extern char* ftrace_name; +#define DEFAULT_LOG_LEVEL (ftrace_name ? LOG_INFO : (isatty(fileno(stdout)) ? LOG_INFO : LOG_NONE)) + +#define ENVSUPER1() \ + STRING(BOX64_ADDLIBS, addlibs) \ + BOOLEAN(BOX64_ALLOWMISSINGLIBS, allow_missing_libs, 0) \ + STRING(BOX64_ARGS, args) \ + STRING(BOX64_BASH, bash) \ + BOOLEAN(BOX64_CEFDISABLEGPU, cefdisablegpu, 0) \ + BOOLEAN(BOX64_CEFDISABLEGPUCOMPOSITOR, cefdisablegpucompositor, 0) \ + INTEGER(BOX64_CPUTYPE, cputype, 0, 0, 1) \ + BOOLEAN(BOX64_CRASHHANDLER, dummy_crashhandler, 1) \ + BOOLEAN(BOX64_DLSYM_ERROR, dlsym_error, 0) \ + INTEGER(BOX64_DUMP, dump, 0, 0, 2) \ + BOOLEAN(BOX64_DYNAREC_ALIGNED_ATOMICS, dynarec_aligned_atomics, 0) \ + INTEGER(BOX64_DYNAREC_BIGBLOCK, dynarec_bigblock, 1, 0, 3) \ + BOOLEAN(BOX64_DYNAREC_BLEEDING_EDGE, dynarec_bleeding_edge, 1) \ + BOOLEAN(BOX64_DYNAREC_CALLRET, dynarec_callret, 0) \ + BOOLEAN(BOX64_DYNAREC_DF, dynarec_df, 0) \ + BOOLEAN(BOX64_DYNAREC_DIRTY, dynarec_dirty, 0) \ + BOOLEAN(BOX64_DYNAREC_DIV0, dynarec_div0, 0) \ + INTEGER(BOX64_DYNAREC_DUMP, dynarec_dump, 0, 0, 2) \ + BOOLEAN(BOX64_DYNAREC_FASTNAN, dynarec_fastnan, 1) \ + BOOLEAN(BOX64_DYNAREC_FASTROUND, dynarec_fastround, 1) \ + BOOLEAN(BOX64_DYNAREC_FORCED, dynarec_forced, 0) \ + INTEGER(BOX64_DYNAREC_FORWARD, dynarec_forward, 128, 0, 1024) \ + BOOLEAN(BOX64_DYNAREC_GDBJIT, dynarec_gdbjit, 0) \ + INTEGER(BOX64_DYNAREC_LOG, dynarec_log, 0, 0, 3) \ + BOOLEAN(BOX64_DYNAREC_MISSING, dynarec_missing, 0) \ + BOOLEAN(BOX64_DYNAREC_NATIVEFLAGS, dynarec_nativeflags, 1) \ + INTEGER(BOX64_DYNAREC_PAUSE, dynarec_pause, 0, 0, 3) \ + BOOLEAN(BOX64_DYNAREC_PERFMAP, dynarec_perf_map, 0) \ + INTEGER(BOX64_DYNAREC_SAFEFLAGS, dynarec_safeflags, 1, 0, 2) \ + BOOLEAN(BOX64_DYNAREC_STRONGMEM, dynarec_strongmem, 0) \ + BOOLEAN(BOX64_DYNAREC_TBB, dynarec_tbb, 1) \ + STRING(BOX64_DYNAREC_TEST, dynarec_test_str) \ + BOOLEAN(BOX64_DYNAREC_TRACE, dynarec_trace, 0) \ + BOOLEAN(BOX64_DYNAREC_WAIT, dynarec_wait, 1) \ + BOOLEAN(BOX64_DYNAREC_WEAKBARRIER, dynarec_weakbarrier, 1) \ + BOOLEAN(BOX64_DYNAREC_X87DOUBLE, dynarec_x87double, 0) \ + STRING(BOX64_EMULATED_LIBS, emulated_libs) \ + STRING(BOX64_ENV, env) \ + STRING(BOX64_ENV1, env1) \ + STRING(BOX64_ENV2, env2) \ + STRING(BOX64_ENV3, env3) \ + STRING(BOX64_ENV4, env4) \ + STRING(BOX64_ENV5, env5) \ + BOOLEAN(BOX64_EXIT, exit, 0) \ + BOOLEAN(BOX64_FIX_64BIT_INODES, fix_64bit_inodes, 0) \ + BOOLEAN(BOX64_IGNOREINT3, ignoreint3, 0) \ + STRING(BOX64_INSERT_ARGS, insert_args) \ + BOOLEAN(BOX64_INPROCESSGPU, inprocessgpu, 0) \ + BOOLEAN(BOX64_JITGDB, jitgdb, 0) \ + BOOLEAN(BOX64_JVM, jvm, 1) \ + STRING(BOX64_LD_LIBRARY_PATH, ld_library_path) \ + BOOLEAN(BOX64_LIBCEF, libcef, 1) \ + STRING(BOX64_LIBGL, libgl) \ + ADDRESS(BOX64_LOAD_ADDR, load_addr) \ + INTEGER(BOX64_LOG, log, DEFAULT_LOG_LEVEL, 0, 3) \ + BOOLEAN(BOX64_MALLOC_HACK, malloc_hack, 0) \ + INTEGER(BOX64_MAXCPU, new_maxcpu, 0, 0, 100) \ + BOOLEAN(BOX64_NOBANNER, nobanner, (isatty(fileno(stdout)) ? 0 : 1)) \ + STRING(BOX64_NODYNAREC, nodynarec) \ + BOOLEAN(BOX64_NOGTK, nogtk, 0) \ + BOOLEAN(BOX64_NOPULSE, nopulse, 0) \ + BOOLEAN(BOX64_NORCFILES, noenvfiles, 0) \ + BOOLEAN(BOX64_NOSANDBOX, nosandbox, 0) \ + BOOLEAN(BOX64_NOSIGSEGV, nosigsegv, 0) \ + BOOLEAN(BOX64_NOSIGILL, nosigill, 0) \ + BOOLEAN(BOX64_NOVULKAN, novulkan, 0) \ + STRING(BOX64_PATH, path) \ + BOOLEAN(BOX64_PREFER_EMULATED, prefer_emulated, 0) \ + BOOLEAN(BOX64_PREFER_WRAPPED, prefer_wrapped, 0) \ + STRING(BOX64_RCFILE, envfile) \ + BOOLEAN(BOX64_RDTSC_1GHZ, rdtsc_1ghz, 0) \ + BOOLEAN(BOX64_RESERVE_HIGH, reserve_high, 0) \ + BOOLEAN(BOX64_ROLLING_LOG, rolling_log, 0) \ + BOOLEAN(BOX64_SDL2_JGUID, sdl2_jguid, 0) \ + BOOLEAN(BOX64_SHAEXT, shaext, 1) \ + BOOLEAN(BOX64_SHOWBT, showbt, 0) \ + BOOLEAN(BOX64_SHOWSEGV, showsegv, 0) \ + BOOLEAN(BOX64_SSE_FLUSHTO0, sse_flushto0, 0) \ + BOOLEAN(BOX64_SSE42, sse42, 1) \ + BOOLEAN(BOX64_SYNC_ROUNDING, sync_rounding, 0) \ + BOOLEAN(BOX64_TRACE_COLOR, trace_regsdiff, 0) \ + BOOLEAN(BOX64_TRACE_EMM, trace_emm, 0) \ + STRING(BOX64_TRACE_FILE, trace_file) \ + STRING(BOX64_TRACE_INIT, trace_init) \ + INTEGER64(BOX64_TRACE_START, start_cnt, 0) \ + BOOLEAN(BOX64_TRACE_XMM, trace_xmm, 0) \ + STRING(BOX64_TRACE, trace) \ + BOOLEAN(BOX64_UNITYPLAYER, unityplayer, 1) \ + BOOLEAN(BOX64_WRAP_EGL, wrap_egl, 0) \ + BOOLEAN(BOX64_X11GLX, x11glx, 1) \ + BOOLEAN(BOX64_X11SYNC, x11sync, 0) \ + BOOLEAN(BOX64_X11THREADS, x11threads, 0) \ + BOOLEAN(BOX64_X87_NO80BITS, x87_no80bits, 0) + +#ifdef ARM64 +#define ENVSUPER2() \ + INTEGER(BOX64_AVX, avx, 2, 0, 2) +#else +#define ENVSUPER2() \ + INTEGER(BOX64_AVX, avx, 0, 0, 2) +#endif + +#ifdef DYNAREC +#define ENVSUPER3() \ + BOOLEAN(BOX64_DYNAREC, dynarec, 1) +#else +#define ENVSUPER3() \ + BOOLEAN(BOX64_DYNAREC, dynarec, 0) +#endif + +#ifdef BAD_SIGNAL +#define ENVSUPER4() \ + BOOLEAN(BOX64_FUTEX_WAITV, futex_waitv, 0) +#else +#define ENVSUPER4() \ + BOOLEAN(BOX64_FUTEX_WAITV, futex_waitv, 1) +#endif + +#if defined(SD845) || defined(SD888) || defined(SD8G2) || defined(TEGRAX1) +#define ENVSUPER5() \ + BOOLEAN(BOX64_MMAP32, mmap32, 1) +#else +#define ENVSUPER5() \ + BOOLEAN(BOX64_MMAP32, mmap32, 0) +#endif + +#define ENVSUPER() \ + ENVSUPER1() \ + ENVSUPER2() \ + ENVSUPER3() \ + ENVSUPER4() \ + ENVSUPER5() + +typedef struct box64env_s { +#define INTEGER(NAME, name, default, min, max) int name; +#define INTEGER64(NAME, name, default) int64_t name; +#define BOOLEAN(NAME, name, default) int name; +#define ADDRESS(NAME, name) uintptr_t name; +#define STRING(NAME, name) char* name; + ENVSUPER() +#undef INTEGER +#undef INTEGER64 +#undef BOOLEAN +#undef ADDRESS +#undef STRING + +#define INTEGER(NAME, name, default, min, max) uint64_t is_##name##_overridden : 1; +#define INTEGER64(NAME, name, default) uint64_t is_##name##_overridden : 1; +#define BOOLEAN(NAME, name, default) uint64_t is_##name##_overridden : 1; +#define ADDRESS(NAME, name) uint64_t is_##name##_overridden : 1; +#define STRING(NAME, name) uint64_t is_##name##_overridden : 1; + ENVSUPER() +#undef INTEGER +#undef INTEGER64 +#undef BOOLEAN +#undef ADDRESS +#undef STRING + + /******** Custom ones ********/ + int maxcpu; + int dynarec_test; + int avx2; + uintptr_t dynarec_test_start; + uintptr_t dynarec_test_end; + uintptr_t nodynarec_start; + uintptr_t nodynarec_end; + int dynarec_perf_map_fd; + + uint64_t is_dynarec_perf_map_fd_overridden : 1; +} box64env_t; + +void InitializeEnvFiles(); +void ApplyEnvFileEntry(const char* name); +const char* GetLastApplyEntryName(); +void InitializeEnv(); +void LoadEnvVariables(); +void PrintEnvVariables(); + +#endif // __ENV_H diff --git a/src/include/rcfile.h b/src/include/rcfile.h deleted file mode 100644 index 3c8218cb..00000000 --- a/src/include/rcfile.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __RCFILE_H__ -#define __RCFILE_H__ - -void LoadRCFile(const char* filename); -void DeleteParams(void); -void ApplyParams(const char* name); -const char* GetLastApplyName(); - -#endif //__RCFILE_H__ \ No newline at end of file diff --git a/src/librarian/librarian.c b/src/librarian/librarian.c index 4298ee05..c130e092 100644 --- a/src/librarian/librarian.c +++ b/src/librarian/librarian.c @@ -361,7 +361,7 @@ int AddNeededLib(lib_t* maplib, int local, int bindnow, int deepbind, needed_lib DecRefCount(&needed->libs[n-i-1], emu); } // all done - if(allow_missing_libs) return 0; + if(BOX64ENV(allow_missing_libs)) return 0; return ret; } EXPORTDYN @@ -370,7 +370,7 @@ void RemoveNeededLib(lib_t* maplib, int local, needed_libs_t* needed, box64conte if(!needed) // no needed libs, no problems return; for(int i=0; i<needed->size; ++i) { - if(box64_log>=LOG_DEBUG && needed->libs[i]) + if (BOX64ENV(log)>=LOG_DEBUG && needed->libs[i]) printf_dump(LOG_DEBUG, "Will remove after failed init %s\n", needed->names[i]); AddNeededLib_remove(maplib, local, &needed->libs[i], box64, emu); } @@ -585,13 +585,13 @@ int GetGlobalSymbolStartEnd(lib_t *maplib, const char* name, uintptr_t* start, u } #ifndef STATICBUILD // some special case symbol, defined inside box64 itself - if(!strcmp(name, "gdk_display") && !box64_nogtk) { + if(!strcmp(name, "gdk_display") && !BOX64ENV(nogtk)) { *start = (uintptr_t)my_GetGTKDisplay(); *end = *start+sizeof(void*); printf_log(LOG_INFO, "Using global gdk_display for gdk-x11 (%p:%p)\n", start, *(void**)start); return 1; } - if(!strcmp(name, "g_threads_got_initialized") && !box64_nogtk) { + if(!strcmp(name, "g_threads_got_initialized") && !BOX64ENV(nogtk)) { *start = (uintptr_t)my_GetGthreadsGotInitialized(); *end = *start+sizeof(int); printf_log(LOG_INFO, "Using global g_threads_got_initialized for gthread2 (%p:%p)\n", start, *(void**)start); @@ -658,14 +658,14 @@ int GetGlobalWeakSymbolStartEnd(lib_t *maplib, const char* name, uintptr_t* star } #ifndef STATICBUILD // some special case symbol, defined inside box64 itself - if(!strcmp(name, "gdk_display") && !box64_nogtk) { + if(!strcmp(name, "gdk_display") && !BOX64ENV(nogtk)) { *start = (uintptr_t)my_GetGTKDisplay(); *end = *start+sizeof(void*); if(elfsym) *elfsym = NULL; printf_log(LOG_INFO, "Using global gdk_display for gdk-x11 (%p:%p)\n", start, *(void**)start); return 1; } - if(!strcmp(name, "g_threads_got_initialized") && !box64_nogtk) { + if(!strcmp(name, "g_threads_got_initialized") && !BOX64ENV(nogtk)) { *start = (uintptr_t)my_GetGthreadsGotInitialized(); *end = *start+sizeof(int); if(elfsym) *elfsym = NULL; diff --git a/src/librarian/library.c b/src/librarian/library.c index 76e9770f..6ad8d57b 100644 --- a/src/librarian/library.c +++ b/src/librarian/library.c @@ -357,29 +357,29 @@ static int loadEmulatedLib(const char* libname, library_t *lib, box64context_t* printf_dump(LOG_INFO, "Using emulated %s\n", libname); #ifdef DYNAREC - if(libname && box64_dynarec_bleeding_edge && strstr(libname, "libmonobdwgc-2.0.so")) { + if(libname && BOX64ENV(dynarec_bleeding_edge) && strstr(libname, "libmonobdwgc-2.0.so")) { printf_dump(LOG_INFO, "MonoBleedingEdge detected, disable Dynarec BigBlock and enable Dynarec StrongMem\n"); - box64_dynarec_bigblock = 0; - box64_dynarec_strongmem = 1; + SET_BOX64ENV(dynarec_bigblock, 0); + SET_BOX64ENV(dynarec_strongmem, 1); } - if(libname && box64_dynarec_tbb && strstr(libname, "libtbb.so")) { + if(libname && BOX64ENV(dynarec_tbb) && strstr(libname, "libtbb.so")) { printf_dump(LOG_INFO, "libtbb detected, enable Dynarec StrongMem\n"); - box64_dynarec_strongmem = 1; + SET_BOX64ENV(dynarec_strongmem, 1); } #endif - if(libname && box64_jvm && strstr(libname, "libjvm.so")) { + if(libname && BOX64ENV(jvm) && strstr(libname, "libjvm.so")) { #ifdef DYNAREC printf_dump(LOG_INFO, "libjvm detected, disable Dynarec BigBlock and enable Dynarec StrongMem, hide SSE 4.2\n"); - box64_dynarec_bigblock = 0; - box64_dynarec_strongmem = 1; + SET_BOX64ENV(dynarec_bigblock, 0); + SET_BOX64ENV(dynarec_strongmem, 1); #else printf_dump(LOG_INFO, "libjvm detected, hide SSE 4.2\n"); #endif - box64_sse42 = 0; + SET_BOX64ENV(sse42, 0); } - if(libname && box64_libcef && strstr(libname, "libcef.so")) { + if(libname && BOX64ENV(libcef) && strstr(libname, "libcef.so")) { printf_dump(LOG_INFO, "libcef detected, using malloc_hack_2\n"); - box64_malloc_hack = 2; + SET_BOX64ENV(malloc_hack, 2); } return 1; } @@ -442,7 +442,7 @@ static int isEssentialLib(const char* name) { for (unsigned int i=0; i<sizeof(essential_libs)/sizeof(essential_libs[0]); ++i) if(!strcmp(name, essential_libs[i])) return 1; - if(box64_wrap_egl) + if(BOX64ENV(wrap_egl)) for (unsigned int i=0; i<sizeof(essential_libs_egl)/sizeof(essential_libs_egl[0]); ++i) if(!strcmp(name, essential_libs_egl[i])) return 1; @@ -481,14 +481,14 @@ library_t *NewLibrary(const char* path, box64context_t* context, elfheader_t* ve lib->path = box_realpath(path, NULL); if(!lib->path) lib->path = box_strdup(path); - if(box64_libGL && !strcmp(path, box64_libGL)) + if(BOX64ENV(libgl) && !strcmp(path, BOX64ENV(libgl))) lib->name = box_strdup("libGL.so.1"); else lib->name = Path2Name(path); lib->nbdot = NbDot(lib->name); lib->type = LIB_UNNKNOW; printf_dump(LOG_DEBUG, "Simplified name is \"%s\"\n", lib->name); - if(box64_nopulse) { + if(BOX64ENV(nopulse)) { if(strstr(lib->name, "libpulse.so")==lib->name || strstr(lib->name, "libpulse-simple.so")==lib->name) { box_free(lib->name); box_free(lib->path); @@ -497,7 +497,7 @@ library_t *NewLibrary(const char* path, box64context_t* context, elfheader_t* ve return NULL; } } - if(box64_novulkan) { + if(BOX64ENV(novulkan)) { if(strstr(lib->name, "libvulkan.so")==lib->name) { box_free(lib->name); box_free(lib->path); @@ -508,9 +508,9 @@ library_t *NewLibrary(const char* path, box64context_t* context, elfheader_t* ve } int notwrapped = FindInCollection(lib->name, &context->box64_emulated_libs); int essential = isEssentialLib(lib->name); - if(!notwrapped && box64_prefer_emulated && !essential) + if(!notwrapped && BOX64ENV(prefer_emulated) && !essential) notwrapped = 1; - int precise = (!box64_prefer_wrapped && !essential && path && strchr(path, '/'))?1:0; + int precise = (!BOX64ENV(prefer_wrapped) && !essential && path && strchr(path, '/'))?1:0; if(!notwrapped && precise && strstr(path, "libtcmalloc_minimal.so")) precise = 0; // allow native version for tcmalloc_minimum /* diff --git a/src/libtools/signal32.c b/src/libtools/signal32.c index c2d37b3e..bebb4f2e 100644 --- a/src/libtools/signal32.c +++ b/src/libtools/signal32.c @@ -359,7 +359,7 @@ uint32_t RunFunctionHandler32(int* exit, int dynarec, i386_ucontext_t* sigcontex x64emu_t *emu = thread_get_emu(); #ifdef DYNAREC - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) emu->test.test = 0; #endif @@ -405,11 +405,11 @@ uint32_t RunFunctionHandler32(int* exit, int dynarec, i386_ucontext_t* sigcontex emu->flags.quitonlongjmp = oldquitonlongjmp; #ifdef DYNAREC - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { emu->test.test = 0; emu->test.clean = 0; } - #endif +#endif if(emu->flags.longjmp) { // longjmp inside signal handler, lets grab all relevent value and do the actual longjmp in the signal handler @@ -471,7 +471,7 @@ int write_opcode(uintptr_t rip, uintptr_t native_ip, int is32bits); void my_sigactionhandler_oldcode_32(x64emu_t* emu, int32_t sig, int simple, siginfo_t* info, void * ucntx, int* old_code, void* cur_db) { int Locks = unlockMutex(); - int log_minimum = (box64_showsegv)?LOG_NONE:((sig==SIGSEGV && my_context->is_sigaction[sig])?LOG_DEBUG:LOG_INFO); + int log_minimum = (BOX64ENV(showsegv))?LOG_NONE:((sig==SIGSEGV && my_context->is_sigaction[sig])?LOG_DEBUG:LOG_INFO); printf_log(LOG_DEBUG, "Sigactionhanlder32 for signal #%d called (jump to %p/%s)\n", sig, (void*)my_context->signals[sig], GetNativeName((void*)my_context->signals[sig])); diff --git a/src/libtools/signals.c b/src/libtools/signals.c index c1c45ba4..819d89d7 100644 --- a/src/libtools/signals.c +++ b/src/libtools/signals.c @@ -323,7 +323,7 @@ uint64_t RunFunctionHandler(x64emu_t* emu, int* exit, int dynarec, x64_ucontext_ if(!emu) emu = thread_get_emu(); #ifdef DYNAREC - if(box64_dynarec_test) + if (BOX64ENV(dynarec_test)) emu->test.test = 0; #endif @@ -374,11 +374,11 @@ uint64_t RunFunctionHandler(x64emu_t* emu, int* exit, int dynarec, x64_ucontext_ emu->flags.quitonlongjmp = oldquitonlongjmp; #ifdef DYNAREC - if(box64_dynarec_test) { + if (BOX64ENV(dynarec_test)) { emu->test.test = 0; emu->test.clean = 0; } - #endif +#endif if(emu->flags.longjmp) { // longjmp inside signal handler, lets grab all relevent value and do the actual longjmp in the signal handler @@ -688,7 +688,7 @@ int mark_db_unaligned(dynablock_t* db, uintptr_t x64pc) add_unaligned_address(x64pc); db->hash++; // dirty the block MarkDynablock(db); // and mark it -if(box64_showsegv) printf_log(LOG_INFO, "Marked db %p as dirty, and address %p as needing unaligned handling\n", db, (void*)x64pc); +if(BOX64ENV(showsegv)) printf_log(LOG_INFO, "Marked db %p as dirty, and address %p as needing unaligned handling\n", db, (void*)x64pc); return 2; // marked, exit handling... } #endif @@ -1029,7 +1029,7 @@ void my_sigactionhandler_oldcode_32(x64emu_t* emu, int32_t sig, int simple, sigi void my_sigactionhandler_oldcode_64(x64emu_t* emu, int32_t sig, int simple, siginfo_t* info, void * ucntx, int* old_code, void* cur_db) { int Locks = unlockMutex(); - int log_minimum = (box64_showsegv)?LOG_NONE:LOG_DEBUG; + int log_minimum = (BOX64ENV(showsegv))?LOG_NONE:LOG_DEBUG; printf_log(LOG_DEBUG, "Sigactionhanlder for signal #%d called (jump to %p/%s)\n", sig, (void*)my_context->signals[sig], GetNativeName((void*)my_context->signals[sig])); @@ -1494,7 +1494,7 @@ extern int box64_exit_code; void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx) { // sig==SIGSEGV || sig==SIGBUS || sig==SIGILL || sig==SIGABRT here! - int log_minimum = (box64_showsegv)?LOG_NONE:((sig==SIGSEGV && my_context->is_sigaction[sig])?LOG_DEBUG:LOG_INFO); + int log_minimum = (BOX64ENV(showsegv))?LOG_NONE:((sig==SIGSEGV && my_context->is_sigaction[sig])?LOG_DEBUG:LOG_INFO); if(signal_jmpbuf_active) { signal_jmpbuf_active = 0; longjmp(SIG_JMPBUF, 1); @@ -1549,7 +1549,7 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx) int fixed = 0; if((fixed=sigbus_specialcases(info, ucntx, pc, fpsimd, db, x64pc))) { // special case fixed, restore everything and just continues - if(box64_log>=LOG_DEBUG || box64_showsegv) { + if (BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(showsegv)) { static void* old_pc[2] = {0}; static int old_pc_i = 0; if(old_pc[0]!=pc && old_pc[1]!=pc) { @@ -1587,7 +1587,7 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx) int fixed = 0; if((fixed = sigbus_specialcases(info, ucntx, pc, fpsimd, db, x64pc))) { // special case fixed, restore everything and just continues - if(box64_log >= LOG_DEBUG || box64_showsegv) { + if (BOX64ENV(log) >= LOG_DEBUG || BOX64ENV(showsegv)) { static void* old_pc[2] = {0}; static int old_pc_i = 0; if(old_pc[0]!=pc && old_pc[1]!=pc) { @@ -1623,7 +1623,7 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx) unprotectDB((uintptr_t)addr, 1, 1); // unprotect 1 byte... But then, the whole page will be unprotected if(db) CheckHotPage((uintptr_t)addr); int db_need_test = db?getNeedTest((uintptr_t)db->x64_addr):0; - if(db && ((addr>=db->x64_addr && addr<(db->x64_addr+db->x64_size)) || (db_need_test && !box64_dynarec_dirty))) { + if(db && ((addr>=db->x64_addr && addr<(db->x64_addr+db->x64_size)) || (db_need_test && !BOX64ENV(dynarec_dirty)))) { emu = getEmuSignal(emu, p, db); // dynablock got auto-dirty! need to get out of it!!! if(emu->jmpbuf) { @@ -1666,7 +1666,7 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx) dynarec_log(LOG_INFO, "Warning, addr inside current dynablock!\n"); } // mark stuff as unclean - if(box64_dynarec) + if(BOX64ENV(dynarec)) cleanDBFromAddressRange(((uintptr_t)addr)&~(box64_pagesize-1), box64_pagesize, 0); static void* glitch_pc = NULL; static void* glitch_addr = NULL; @@ -1790,19 +1790,19 @@ dynarec_log(/*LOG_DEBUG*/LOG_INFO, "%04d|Repeated SIGSEGV with Access error on % old_prot = prot; const char* name = NULL; const char* x64name = NULL; - if(log_minimum<=box64_log) { - signal_jmpbuf_active = 1; - if(sigsetjmp(SIG_JMPBUF, 1)) { - // segfault while gathering function name... - name = "???"; - } else - name = GetNativeName(pc); - signal_jmpbuf_active = 0; + if (log_minimum<=BOX64ENV(log)) { + signal_jmpbuf_active = 1; + if(sigsetjmp(SIG_JMPBUF, 1)) { + // segfault while gathering function name... + name = "???"; + } else + name = GetNativeName(pc); + signal_jmpbuf_active = 0; } // Adjust RIP for special case of NULL function run if(sig==SIGSEGV && R_RIP==0x1 && (uintptr_t)info->si_addr==0x0) R_RIP = 0x0; - if(log_minimum<=box64_log) { + if(log_minimum<=BOX64ENV(log)) { elfheader_t* elf = FindElfAddress(my_context, x64pc); if(elf) { signal_jmpbuf_active = 1; @@ -1814,7 +1814,7 @@ dynarec_log(/*LOG_DEBUG*/LOG_INFO, "%04d|Repeated SIGSEGV with Access error on % signal_jmpbuf_active = 0; } } - if(jit_gdb) { + if(BOX64ENV(jitgdb)) { pid_t pid = getpid(); int v = vfork(); // is this ok in a signal handler??? if(v<0) { @@ -1822,7 +1822,7 @@ dynarec_log(/*LOG_DEBUG*/LOG_INFO, "%04d|Repeated SIGSEGV with Access error on % } else if(v) { // parent process, the one that have the segfault volatile int waiting = 1; - printf("Waiting for %s (pid %d)...\n", (jit_gdb==2)?"gdbserver":"gdb", pid); + printf("Waiting for %s (pid %d)...\n", (BOX64ENV(jitgdb)==2)?"gdbserver":"gdb", pid); while(waiting) { // using gdb, use "set waiting=0" to stop waiting... usleep(1000); @@ -1830,18 +1830,18 @@ dynarec_log(/*LOG_DEBUG*/LOG_INFO, "%04d|Repeated SIGSEGV with Access error on % } else { char myarg[50] = {0}; sprintf(myarg, "%d", pid); - if(jit_gdb==2) + if(BOX64ENV(jitgdb)==2) execlp("gdbserver", "gdbserver", "127.0.0.1:1234", "--attach", myarg, (char*)NULL); - else if(jit_gdb==3) + else if(BOX64ENV(jitgdb)==3) execlp("lldb", "lldb", "-p", myarg, (char*)NULL); else execlp("gdb", "gdb", "-pid", myarg, (char*)NULL); exit(-1); } } - print_cycle_log(log_minimum); + print_rolling_log(log_minimum); - if((box64_showbt || sig==SIGABRT) && log_minimum<=box64_log) { + if((BOX64ENV(showbt) || sig==SIGABRT) && log_minimum<=BOX64ENV(log)) { // show native bt #define BT_BUF_SIZE 100 int nptrs; @@ -1913,7 +1913,7 @@ dynarec_log(/*LOG_DEBUG*/LOG_INFO, "%04d|Repeated SIGSEGV with Access error on % #undef GO } - if(log_minimum<=box64_log) { + if(log_minimum<=BOX64ENV(log)) { static const char* reg_name[] = {"RAX", "RCX", "RDX", "RBX", "RSP", "RBP", "RSI", "RDI", " R8", " R9","R10","R11", "R12","R13","R14","R15"}; static const char* seg_name[] = {"ES", "CS", "SS", "DS", "FS", "GS"}; int shown_regs = 0; @@ -2031,14 +2031,10 @@ void my_sigactionhandler(int32_t sig, siginfo_t* info, void * ucntx) uintptr_t x64pc = R_RIP; if(db) x64pc = getX64Address(db, (uintptr_t)pc); - if(box64_showsegv) printf_log(LOG_INFO, "sigaction handler for sig %d, pc=%p, x64pc=%p, db=%p\n", sig, pc, x64pc, db); + if(BOX64ENV(showsegv)) printf_log(LOG_INFO, "sigaction handler for sig %d, pc=%p, x64pc=%p, db=%p\n", sig, pc, x64pc, db); my_sigactionhandler_oldcode(emu, sig, 0, info, ucntx, NULL, db, x64pc); } -#ifndef DYNAREC -#define box64_dynarec_dump 0 -#endif - void emit_signal(x64emu_t* emu, int sig, void* addr, int code) { siginfo_t info = {0}; @@ -2055,15 +2051,15 @@ void emit_signal(x64emu_t* emu, int sig, void* addr, int code) info.si_addr = addr; const char* x64name = NULL; const char* elfname = NULL; - if(box64_log>LOG_INFO || box64_dynarec_dump || box64_showsegv) { + if(BOX64ENV(log)>LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(showsegv)) { x64name = getAddrFunctionName(R_RIP); elfheader_t* elf = FindElfAddress(my_context, R_RIP); if(elf) elfname = ElfName(elf); printf_log(LOG_NONE, "Emit Signal %d at IP=%p(%s / %s) / addr=%p, code=0x%x\n", sig, (void*)R_RIP, x64name?x64name:"???", elfname?elfname:"?", addr, code); -print_cycle_log(LOG_INFO); + print_rolling_log(LOG_INFO); - if((box64_showbt || sig==SIGABRT) && box64_log>=LOG_INFO) { + if((BOX64ENV(showbt) || sig==SIGABRT) && BOX64ENV(log)>=LOG_INFO) { // show native bt #define BT_BUF_SIZE 100 int nptrs; @@ -2132,7 +2128,7 @@ void emit_interruption(x64emu_t* emu, int num, void* addr) info.si_addr = NULL;//addr; const char* x64name = NULL; const char* elfname = NULL; - if(box64_log>LOG_INFO || box64_dynarec_dump || box64_showsegv) { + if(BOX64ENV(log)>LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(showsegv)) { x64name = getAddrFunctionName(R_RIP); elfheader_t* elf = FindElfAddress(my_context, R_RIP); if(elf) @@ -2151,7 +2147,7 @@ void emit_div0(x64emu_t* emu, void* addr, int code) info.si_addr = addr; const char* x64name = NULL; const char* elfname = NULL; - if(box64_log>LOG_INFO || box64_dynarec_dump || box64_showsegv) { + if(BOX64ENV(log)>LOG_INFO || BOX64ENV(dynarec_dump) || BOX64ENV(showsegv)) { x64name = getAddrFunctionName(R_RIP); elfheader_t* elf = FindElfAddress(my_context, R_RIP); if(elf) @@ -2503,7 +2499,7 @@ EXPORT void my_makecontext(x64emu_t* emu, void* ucp, void* fnc, int32_t argc, in } void box64_abort() { - if(box64_showbt && LOG_INFO<=box64_log) { + if(BOX64ENV(showbt) && LOG_INFO<=BOX64ENV(log)) { // show native bt #define BT_BUF_SIZE 100 int nptrs; diff --git a/src/libtools/threads.c b/src/libtools/threads.c index a630a7c0..046365d2 100644 --- a/src/libtools/threads.c +++ b/src/libtools/threads.c @@ -535,7 +535,7 @@ EXPORT int my_pthread_create(x64emu_t *emu, void* t, void* attr, void* start_rou et->fnc = (uintptr_t)start_routine; et->arg = arg; #ifdef DYNAREC - if(box64_dynarec) { + if(BOX64ENV(dynarec)) { // pre-creation of the JIT code for the entry point of the thread DBGetBlock(emu, (uintptr_t)start_routine, 1, 0); // function wrapping are 64bits only on box64 } @@ -561,7 +561,7 @@ void* my_prepare_thread(x64emu_t *emu, void* f, void* arg, int ssize, void** pet et->fnc = (uintptr_t)f; et->arg = arg; #ifdef DYNAREC - if(box64_dynarec) { + if(BOX64ENV(dynarec)) { // pre-creation of the JIT code for the entry point of the thread DBGetBlock(emu, (uintptr_t)f, 1, 0); // function wrapping are 64bits only on box64 } diff --git a/src/libtools/threads32.c b/src/libtools/threads32.c index 32e81799..ac75e4c3 100755 --- a/src/libtools/threads32.c +++ b/src/libtools/threads32.c @@ -229,7 +229,7 @@ EXPORT int my32_pthread_create(x64emu_t *emu, void* t, void* attr, void* start_r et->join = 0; } #ifdef DYNAREC - if(box64_dynarec) { + if(BOX64ENV(dynarec)) { // pre-creation of the JIT code for the entry point of the thread DBGetBlock(emu, (uintptr_t)start_routine, 1, 1); } @@ -261,7 +261,7 @@ void* my32_prepare_thread(x64emu_t *emu, void* f, void* arg, int ssize, void** p et->fnc = (uintptr_t)f; et->arg = arg; #ifdef DYNAREC - if(box64_dynarec) { + if(BOX64ENV(dynarec)) { // pre-creation of the JIT code for the entry point of the thread dynablock_t *current = NULL; DBGetBlock(emu, (uintptr_t)f, 1, 1); diff --git a/src/mallochook.c b/src/mallochook.c index 8094ca37..ccc24567 100644 --- a/src/mallochook.c +++ b/src/mallochook.c @@ -844,7 +844,7 @@ void checkHookedSymbols(elfheader_t* h) { int hooked = 0; int hooked_symtab = 0; - if(box64_malloc_hack==1) + if(BOX64ENV(malloc_hack)==1) return; if(box64_is32bits) { /* TODO? */ @@ -899,7 +899,7 @@ void checkHookedSymbols(elfheader_t* h) #undef GO #undef GO2 #define GO(A, B) if(!strcmp(symname, #A)) {uintptr_t alt = AddCheckBridge(my_context->system, B, A, 0, #A); printf_log(LOG_DEBUG, "Redirecting %s function from %p (%s)\n", symname, (void*)offs, ElfName(h)); addRelocJmp((void*)offs, (void*)alt, sz, #A, h, &real_##A);} - #define GO2(A, B) if(!strcmp(symname, #A) && (box64_malloc_hack>1)) {uintptr_t alt = AddCheckBridge(my_context->system, B, my_##A, 0, "my_" #A); printf_log(LOG_DEBUG, "Redirecting %s function from %p (%s)\n", symname, (void*)offs, ElfName(h)); addRelocJmp((void*)offs, (void*)alt, sz, "my_" #A, h, &real_##A);} + #define GO2(A, B) if(!strcmp(symname, #A) && (BOX64ENV(malloc_hack)>1)) {uintptr_t alt = AddCheckBridge(my_context->system, B, my_##A, 0, "my_" #A); printf_log(LOG_DEBUG, "Redirecting %s function from %p (%s)\n", symname, (void*)offs, ElfName(h)); addRelocJmp((void*)offs, (void*)alt, sz, "my_" #A, h, &real_##A);} SUPER() #undef GO #undef GO2 @@ -922,7 +922,7 @@ void checkHookedSymbols(elfheader_t* h) #undef GO #undef GO2 #define GO(A, B) if(!strcmp(symname, #A)) {uintptr_t alt = AddCheckBridge(my_context->system, B, A, 0, #A); printf_log(LOG_DEBUG, "Redirecting %s function from %p (%s)\n", symname, (void*)offs, ElfName(h)); addRelocJmp((void*)offs, (void*)alt, sz, #A, h, &real_##A);} - #define GO2(A, B) if(!strcmp(symname, #A) && (box64_malloc_hack>1)) {uintptr_t alt = AddCheckBridge(my_context->system, B, my_##A, 0, "my_" #A); printf_log(LOG_DEBUG, "Redirecting %s function from %p (%s)\n", symname, (void*)offs, ElfName(h)); addRelocJmp((void*)offs, (void*)alt, sz, "my_" #A, h, &real_##A);} + #define GO2(A, B) if(!strcmp(symname, #A) && (BOX64ENV(malloc_hack)>1)) {uintptr_t alt = AddCheckBridge(my_context->system, B, my_##A, 0, "my_" #A); printf_log(LOG_DEBUG, "Redirecting %s function from %p (%s)\n", symname, (void*)offs, ElfName(h)); addRelocJmp((void*)offs, (void*)alt, sz, "my_" #A, h, &real_##A);} SUPER() #undef GO #undef GO2 @@ -930,7 +930,7 @@ void checkHookedSymbols(elfheader_t* h) } } } - if(box64_malloc_hack==2) + if(BOX64ENV(malloc_hack)==2) h->malloc_hook_2 = 1; } diff --git a/src/steam.c b/src/steam.c index 710fb849..5ed2c39c 100644 --- a/src/steam.c +++ b/src/steam.c @@ -165,18 +165,18 @@ void pressure_vessel(int argc, const char** argv, int nextarg, const char* prog) newargv[i-nextarg] = argv[i]; } } + //setenv("BOX64_PREFER_EMULATED", "1", 1); //setenv("BOX86_PREFER_EMULATED", "1", 1); - -//setenv("BOX64_TRACE_FILE", "/home/seb/trace64-%pid.txt", 1); -//setenv("BOX86_TRACE_FILE", "/home/seb/trace86-%pid.txt", 1); -//setenv("BOX86_LOG", "1", 1); -//setenv("BOX64_LOG", "1", 1); -//setenv("BOX86_SHOWSEGV", "1", 1); -//setenv("BOX64_DLSYM_ERROR", "1", 1); -//setenv("BOX64_SHOWSEGV", "1", 1); -//setenv("BOX64_SHOWBT", "1", 1); -//setenv("BOX64_DYNAREC_LOG", "1", 1); + //setenv("BOX64_TRACE_FILE", "/home/seb/trace64-%pid.txt", 1); + //setenv("BOX86_TRACE_FILE", "/home/seb/trace86-%pid.txt", 1); + //setenv("BOX86_LOG", "1", 1); + //setenv("BOX64_LOG", "1", 1); + //setenv("BOX86_SHOWSEGV", "1", 1); + //setenv("BOX64_DLSYM_ERROR", "1", 1); + //setenv("BOX64_SHOWSEGV", "1", 1); + //setenv("BOX64_SHOWBT", "1", 1); + //setenv("BOX64_DYNAREC_LOG", "1", 1); printf_log(LOG_DEBUG, "Run %s %s and wait\n", x86?"i386":(x64?"x86_64":""), argv[nextarg]); pid_t v = vfork(); diff --git a/src/tools/env.c b/src/tools/env.c new file mode 100644 index 00000000..dc31dbe7 --- /dev/null +++ b/src/tools/env.c @@ -0,0 +1,501 @@ +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <stddef.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <string.h> + +#include "env.h" +#include "khash.h" +#include "debug.h" +#include "fileutils.h" +#include "box64context.h" + +box64env_t box64env = { 0 }; + +KHASH_MAP_INIT_STR(box64env_entry, box64env_t) +static kh_box64env_entry_t* box64env_entries = NULL; +static kh_box64env_entry_t* box64env_entries_gen = NULL; + + +static const char default_rcfile[] = +"[bash]\n" +"BOX64_LOG=0\n" +"\n" +"[deadcells]\n" +"BOX64_PREFER_EMULATED=1\n" +"\n" +"[dontstarve]\n" +"BOX64_EMULATED_LIBS=libSDL2-2.0.so.0\n" +"\n" +"[dota2]\n" +"BOX64_CRASHHANDLER=1\n" +"BOX64_DYNAREC_STRONGMEM=1\n" +"\n" +"[factorio]\n" +"BOX64_DYNAREC_FASTROUND=0\n" +"\n" +"[heroic]\n" +"BOX64_NOSANDBOX=1\n" +"BOX64_MALLOC_HACK=2\n" +"\n" +"[LotCG.x86_64]\n" +"BOX64_DYNAREC_FASTROUND=0\n" +"\n" +"[Mini Metro]\n" +"BOX64_ADDLIBS=stdc++.so.6\n" +"\n" +"[pressure-vessel-wrap]\n" +"BOX64_NOGTK=1\n" +"\n" +"[ShovelKnight]\n" +"BOX64_SDL2_JGUID=1\n" +"\n" +"[Soma.bin.x86_64]\n" +"BOX64_DYNAREC_FASTROUND=0\n" +"\n" +"[streaming_client]\n" +"BOX64_EMULATED_LIBS=libSDL2-2.0.so.0:libSDL2_ttf-2.0.so.0\n" +"\n" +"[steam-runtime-check-requirements]\n" +"BOX64_EXIT=1\n" +"\n" +"[steam-runtime-launcher-service]\n" +"BOX64_EXIT=1\n" +; + +static void applyCustomRules() +{ + if (BOX64ENV(log) == LOG_NEVER) { + SET_BOX64ENV(log, BOX64ENV(log) - 1); + SET_BOX64ENV(dump, 1); + } + + if (BOX64ENV(rolling_log) == 1) { + SET_BOX64ENV(rolling_log, 16); + } + if (BOX64ENV(rolling_log) && BOX64ENV(log) > LOG_INFO) { + SET_BOX64ENV(rolling_log, 0); + } + + if (box64env.is_dynarec_test_str_overridden) { + if (strlen(box64env.dynarec_test_str) == 1) { + if (box64env.dynarec_test_str[0] >= '0' && box64env.dynarec_test_str[0] <= '1') + box64env.dynarec_test = box64env.dynarec_test_str[0] - '0'; + + box64env.dynarec_test_start = 0x0; + box64env.dynarec_test_end = 0x0; + } else if (strchr(box64env.dynarec_test_str, '-')) { + if (sscanf(box64env.dynarec_test_str, "%ld-%ld", &box64env.dynarec_test_start, &box64env.dynarec_test_end) != 2) { + if (sscanf(box64env.dynarec_test_str, "0x%lX-0x%lX", &box64env.dynarec_test_start, &box64env.dynarec_test_end) != 2) + sscanf(box64env.dynarec_test_str, "%lx-%lx", &box64env.dynarec_test_start, &box64env.dynarec_test_end); + } + if (box64env.dynarec_test_end > box64env.dynarec_test_start) { + box64env.dynarec_test = 1; + } else { + box64env.dynarec_test = 0; + } + } + } + + if (box64env.is_nodynarec_overridden) { + if(box64env.nodynarec) { + if (strchr(box64env.nodynarec,'-')) { + if(sscanf(box64env.nodynarec, "%ld-%ld", &box64env.nodynarec_start, &box64env.nodynarec_end)!=2) { + if(sscanf(box64env.nodynarec, "0x%lX-0x%lX", &box64env.nodynarec_start, &box64env.nodynarec_end)!=2) + sscanf(box64env.nodynarec, "%lx-%lx", &box64env.nodynarec_start, &box64env.nodynarec_end); + } + } + } + } + + if (box64env.dynarec_test) { + SET_BOX64ENV(dynarec_fastnan, 0); + SET_BOX64ENV(dynarec_fastround, 0); + SET_BOX64ENV(dynarec_x87double, 1); + SET_BOX64ENV(dynarec_div0, 1); + SET_BOX64ENV(dynarec_callret, 0); +#if defined(RV64) || defined(LA64) + SET_BOX64ENV(dynarec_nativeflags, 0); +#endif + } + + if (box64env.maxcpu == 0 || (!box64_wine && box64env.new_maxcpu < box64env.maxcpu)) { + box64env.maxcpu = box64env.new_maxcpu; + } + + if (box64env.dynarec_perf_map) { + char pathname[32]; + snprintf(pathname, sizeof(pathname), "/tmp/perf-%d.map", getpid()); + SET_BOX64ENV(dynarec_perf_map_fd, open(pathname, O_CREAT | O_RDWR | O_APPEND, S_IRUSR | S_IWUSR)); + } + if (!box64env.libgl) { + const char *p = getenv("SDL_VIDEO_GL_DRIVER"); + if(p) SET_BOX64ENV(libgl, box_strdup(p)); + } + if (box64env.avx == 2) { + box64env.avx = 1; + box64env.avx2 = 1; + } + + if (box64env.exit) exit(0); + + if (box64env.env) setenv("BOX64_ENV", "1", 1); + if (box64env.env1) setenv("BOX64_ENV1", "1", 1); + if (box64env.env2) setenv("BOX64_ENV2", "1", 1); + if (box64env.env3) setenv("BOX64_ENV3", "1", 1); + if (box64env.env4) setenv("BOX64_ENV4", "1", 1); + if (box64env.env5) setenv("BOX64_ENV5", "1", 1); +} + +static void trimStringInplace(char* s) +{ + if (!s) return; + // trim right space/tab + size_t len = strlen(s); + while (len && (s[len - 1] == ' ' || s[len - 1] == '\t' || s[len - 1] == '\n')) + s[--len] = '\0'; + // trim left space/tab + while (s[0] == ' ' || s[0] == '\t') + memmove(s, s + 1, strlen(s)); +} + +static void freeEnv(box64env_t* env) +{ +#define INTEGER(NAME, name, default, min, max) +#define INTEGER64(NAME, name, default) +#define BOOLEAN(NAME, name, default) +#define ADDRESS(NAME, name) +#define STRING(NAME, name) box_free(env->name); + ENVSUPER() +#undef INTEGER +#undef INTEGER64 +#undef BOOLEAN +#undef ADDRESS +#undef STRING +} + +static void pushNewEntry(const char* name, box64env_t* env, int gen) +{ + khint_t k; + kh_box64env_entry_t* khp = gen ? box64env_entries_gen : box64env_entries; + k = kh_get(box64env_entry, khp, name); + if (k == kh_end(khp)) { + int ret; + k = kh_put(box64env_entry, khp, strdup(name), &ret); + } else { + freeEnv(&kh_value(khp, k)); + } + box64env_t* p = &kh_value(khp, k); + memcpy(p, env, sizeof(box64env_t)); +} + +#ifdef ANDROID +static int shm_open(const char *name, int oflag, mode_t mode) { + return -1; +} +static int shm_unlink(const char *name) { + return -1; +} +#endif + +static void initializeEnvFile(const char* filename) +{ + if (box64env.noenvfiles) return; + + FILE* f = NULL; + if (filename) + f = fopen(filename, "r"); + else { + #define TMP_MEMRCFILE "/box64_rcfile" + int tmp = shm_open(TMP_MEMRCFILE, O_RDWR | O_CREAT, S_IRWXU); + if(tmp<0) return; // error, bye bye + shm_unlink(TMP_MEMRCFILE); // remove the shm file, but it will still exist because it's currently in use + int dummy = write(tmp, default_rcfile, sizeof(default_rcfile)); + (void)dummy; + lseek(tmp, 0, SEEK_SET); + f = fdopen(tmp, "r"); + } + if (!f) { + printf("Error: Cannot open env file %s\n", filename); + return; + } + + if (!box64env_entries) + box64env_entries = kh_init(box64env_entry); + if (!box64env_entries_gen) + box64env_entries_gen = kh_init(box64env_entry); + + box64env_t current_env = { 0 }; + size_t linesize = 0, len = 0; + char *line = NULL, *current_name = NULL; + int ret; + bool is_wildcard_name = false; + while ((ret = getline(&line, &linesize, f)) != -1) { + // remove comments + char* p = strchr(line, '#'); + if (p) *p = '\0'; + trimStringInplace(line); + len = strlen(line); + if (line[0] == '[' && strchr(line, ']')) { + // new entry, push the previous one + if (current_name) + pushNewEntry(current_name, ¤t_env, is_wildcard_name); + is_wildcard_name = (line[1] == '*' && line[(intptr_t)(strchr(line, ']') - line) - 1] == '*'); + memset(¤t_env, 0, sizeof(current_env)); + box_free(current_name); + current_name = LowerCase(line + (is_wildcard_name ? 2 : 1)); + *(strchr(current_name, ']') + 1 - (is_wildcard_name ? 2 : 1)) = '\0'; + trimStringInplace(current_name); + } else if (strchr(line, '=')) { + char* key = line; + char* val = strchr(key, '=') + 1; + *strchr(key, '=') = '\0'; + trimStringInplace(key); + trimStringInplace(val); +#define INTEGER(NAME, name, default, min, max) \ + else if (!strcmp(key, #NAME)) \ + { \ + int v = strtol(val, &p, 0); \ + if (p != val && v >= min && v <= max) { \ + current_env.is_##name##_overridden = 1; \ + current_env.name = v; \ + } \ + } +#define INTEGER64(NAME, name, default) \ + else if (!strcmp(key, #NAME)) \ + { \ + int64_t v = strtoll(val, &p, 0); \ + if (p != val) { \ + current_env.is_##name##_overridden = 1; \ + current_env.name = v; \ + } \ + } +#define BOOLEAN(NAME, name, default) \ + else if (!strcmp(key, #NAME)) \ + { \ + if (strcmp(val, "0")) { \ + current_env.is_##name##_overridden = 1; \ + current_env.name = 1; \ + } else { \ + current_env.is_##name##_overridden = 1; \ + current_env.name = 0; \ + } \ + } +#define ADDRESS(NAME, name) \ + else if (!strcmp(key, #NAME)) \ + { \ + uintptr_t v = (uintptr_t)strtoll(val, &p, 0); \ + if (p != val) { \ + current_env.is_##name##_overridden = 1; \ + current_env.name = v; \ + } \ + } +#define STRING(NAME, name) \ + else if (!strcmp(key, #NAME)) \ + { \ + current_env.is_##name##_overridden = 1; \ + if (current_env.name) box_free(current_env.name); \ + current_env.name = strdup(val); \ + } + if (0) + ; + ENVSUPER() + else if (len && current_name) + { + printf_log(LOG_INFO, "BOX64ENV: Warning, unsupported %s=%s for [%s] in %s\n", key, val, current_name, filename); + } +#undef INTEGER +#undef INTEGER64 +#undef BOOLEAN +#undef ADDRESS +#undef STRING + } + } + // push the last entry + if (current_name) { + pushNewEntry(current_name, ¤t_env, is_wildcard_name); + box_free(current_name); + } + box_free(line); + fclose(f); +} + + +void InitializeEnvFiles() +{ + if (BOX64ENV(envfile) && FileExist(BOX64ENV(envfile), IS_FILE)) + initializeEnvFile(BOX64ENV(envfile)); +#ifndef TERMUX + else if (FileExist("/etc/box64.box64rc", IS_FILE)) + initializeEnvFile("/etc/box64.box64rc"); + else if (FileExist("/data/data/com.termux/files/usr/glibc/etc/box64.box64rc", IS_FILE)) + initializeEnvFile("/data/data/com.termux/files/usr/glibc/etc/box64.box64rc"); +#else + else if (FileExist("/data/data/com.termux/files/usr/etc/box64.box64rc", IS_FILE)) + initializeEnvFile("/data/data/com.termux/files/usr/etc/box64.box64rc"); +#endif + else + initializeEnvFile(NULL); // load default rcfile + + char* p = getenv("HOME"); + if (p) { + static char tmp[4096]; + strncpy(tmp, p, 4095); + strncat(tmp, "/.box64rc", 4095); + if (FileExist(tmp, IS_FILE)) { + initializeEnvFile(tmp); + } + } +} + +static char old_entryname[256] = ""; +const char* GetLastApplyEntryName() +{ + return old_entryname; +} +static void internalEnvFileEntry(const char* entryname, const box64env_t* env) +{ +#define INTEGER(NAME, name, default, min, max) \ + if (env->is_##name##_overridden) { \ + box64env.name = env->name; \ + box64env.is_##name##_overridden = 1; \ + } +#define INTEGER64(NAME, name, default) \ + if (env->is_##name##_overridden) { \ + box64env.name = env->name; \ + box64env.is_##name##_overridden = 1; \ + } +#define BOOLEAN(NAME, name, default) \ + if (env->is_##name##_overridden) { \ + box64env.name = env->name; \ + box64env.is_##name##_overridden = 1; \ + } +#define ADDRESS(NAME, name) \ + if (env->is_##name##_overridden) { \ + box64env.name = env->name; \ + box64env.is_##name##_overridden = 1; \ + } +#define STRING(NAME, name) \ + if (env->is_##name##_overridden) { \ + box64env.name = env->name; \ + box64env.is_##name##_overridden = 1; \ + } + ENVSUPER() +#undef INTEGER +#undef INTEGER64 +#undef BOOLEAN +#undef ADDRESS +#undef STRING +} + +void ApplyEnvFileEntry(const char* entryname) +{ + if (!entryname || !box64env_entries) return; + if (!strcasecmp(entryname, old_entryname)) return; + + strncpy(old_entryname, entryname, 255); + khint_t k1; + { + char* lowercase_entryname = LowerCase(entryname); + k1 = kh_get(box64env_entry, box64env_entries, lowercase_entryname); + box64env_t* env; + const char* k2; + kh_foreach_ref(box64env_entries_gen, k2, env, + if (strstr(lowercase_entryname, k2)) + internalEnvFileEntry(entryname, env);) + box_free(lowercase_entryname); + } + if (k1 == kh_end(box64env_entries)) return; + + box64env_t* env = &kh_value(box64env_entries, k1); + internalEnvFileEntry(entryname, env); + applyCustomRules(); +} + +void LoadEnvVariables() +{ +#define INTEGER(NAME, name, default, min, max) box64env.name = default; +#define INTEGER64(NAME, name, default) box64env.name = default; +#define BOOLEAN(NAME, name, default) box64env.name = default; +#define ADDRESS(NAME, name) box64env.name = 0; +#define STRING(NAME, name) box64env.name = NULL; + ENVSUPER() +#undef INTEGER +#undef INTEGER64 +#undef BOOLEAN +#undef ADDRESS +#undef STRING + + char* p; + // load env vars from getenv() +#define INTEGER(NAME, name, default, min, max) \ + p = getenv(#NAME); \ + if (p) { \ + box64env.name = atoi(p); \ + if (box64env.name < min || box64env.name > max) { \ + box64env.name = default; \ + } else { \ + box64env.is_##name##_overridden = 1; \ + } \ + } +#define INTEGER64(NAME, name, default) \ + p = getenv(#NAME); \ + if (p) { \ + box64env.name = atoll(p); \ + box64env.is_##name##_overridden = 1; \ + } +#define BOOLEAN(NAME, name, default) \ + p = getenv(#NAME); \ + if (p) { \ + box64env.name = p[0] != '0'; \ + box64env.is_##name##_overridden = 1; \ + } +#define ADDRESS(NAME, name) \ + p = getenv(#NAME); \ + if (p) { \ + box64env.name = (uintptr_t)atoll(p); \ + box64env.is_##name##_overridden = 1; \ + } +#define STRING(NAME, name) \ + p = getenv(#NAME); \ + if (p) { \ + box64env.name = strdup(p); \ + box64env.is_##name##_overridden = 1; \ + } + ENVSUPER() +#undef INTEGER +#undef INTEGER64 +#undef BOOLEAN +#undef ADDRESS +#undef STRING + applyCustomRules(); +} + +void PrintEnvVariables() +{ + printf_log(LOG_INFO, "BOX64ENV: Variables overridden via env and/or RC file:\n"); +#define INTEGER(NAME, name, default, min, max) \ + if (box64env.is_##name##_overridden) \ + printf_log(LOG_INFO, "\t%s=%d\n", #NAME, box64env.name); +#define INTEGER64(NAME, name, default) \ + if (box64env.is_##name##_overridden) \ + printf_log(LOG_INFO, "\t%s=%lld\n", #NAME, box64env.name); +#define BOOLEAN(NAME, name, default) \ + if (box64env.is_##name##_overridden) \ + printf_log(LOG_INFO, "\t%s=%d\n", #NAME, box64env.name); +#define ADDRESS(NAME, name) \ + if (box64env.is_##name##_overridden) \ + printf_log(LOG_INFO, "\t%s=%p\n", #NAME, (void*)box64env.name); +#define STRING(NAME, name) \ + if (box64env.is_##name##_overridden) \ + printf_log(LOG_INFO, "\t%s=%s\n", #NAME, box64env.name); + ENVSUPER() +#undef INTEGER +#undef INTEGER64 +#undef BOOLEAN +#undef ADDRESS +#undef STRING +} diff --git a/src/tools/my_cpuid.c b/src/tools/my_cpuid.c index 7316937a..b07e19c9 100644 --- a/src/tools/my_cpuid.c +++ b/src/tools/my_cpuid.c @@ -108,8 +108,8 @@ int getNCpu() { if(!nCPU) grabNCpu(); - if(box64_maxcpu && nCPU>box64_maxcpu) - return box64_maxcpu; + if(BOX64ENV(maxcpu) && nCPU>BOX64ENV(maxcpu)) + return BOX64ENV(maxcpu); return nCPU; } @@ -200,7 +200,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) int ncpu = getNCpu(); if(!ncpu) ncpu = 1; int ncluster = 1; - if(box64_cputype) { + if(BOX64ENV(cputype)) { while(ncpu>256) { ncluster++; // do cluster of 256 cpus... if(ncpu>=256) @@ -228,8 +228,8 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) switch(tmp32u) { case 0x0: // emulate a P4. TODO: Emulate a Core2? - R_EAX = box64_cputype?0x0000000f:0x0000000f;//was 0x15 before, but something seems wrong for leaf 0x15, and cpu-z take that as pure cpu speed... - if(box64_cputype) { + R_EAX = BOX64ENV(cputype)?0x0000000f:0x0000000f;//was 0x15 before, but something seems wrong for leaf 0x15, and cpu-z take that as pure cpu speed... + if(BOX64ENV(cputype)) { // return AuthenticAMD R_EBX = 0x68747541; R_ECX = 0x444d4163; @@ -242,7 +242,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x1: - if(box64_cputype) { + if(BOX64ENV(cputype)) { R_EAX = (0xc<<0) | // stepping 0-3 (0x1<<4) | // base model 4-7 (0xf<<8) | // base familly 8-11 @@ -261,7 +261,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) (0x0<<20)| // extended familly 0 ; // family and all, simulating Haswell type of cpu } - R_EBX = 0 | (8<<0x8) | ((box64_cputype?0:ncluster)<<16); // Brand index, CLFlush (8), Max APIC ID (16-23), Local APIC ID (24-31) + R_EBX = 0 | (8<<0x8) | ((BOX64ENV(cputype)?0:ncluster)<<16); // Brand index, CLFlush (8), Max APIC ID (16-23), Local APIC ID (24-31) /*{ int cpu = sched_getcpu(); if(cpu<0) cpu=0; @@ -289,31 +289,31 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) | 1<<24 // fxsr (fxsave, fxrestore) | 1<<25 // SSE | 1<<26 // SSE2 - | (box64_cputype?0:1)<<28 // HT / Multi-core + | (BOX64ENV(cputype)?0:1)<<28 // HT / Multi-core ; R_ECX = 1<<0 // SSE3 | 1<<1 // PCLMULQDQ - | (box64_cputype?0:1)<<2 // DS 64bits + | (BOX64ENV(cputype)?0:1)<<2 // DS 64bits | 1<<3 // Monitor/MWait (priviledge instructions) - | (box64_cputype?0:1)<<5 // VMX //is that usefull + | (BOX64ENV(cputype)?0:1)<<5 // VMX //is that usefull | 1<<9 // SSSE3 - | box64_avx2<<12 // fma + | BOX64ENV(avx2)<<12 // fma | 1<<13 // cx16 (cmpxchg16) | 1<<19 // SSE4_1 - | box64_sse42<<20 // SSE4_2 can be hiden + | BOX64ENV(sse42)<<20 // SSE4_2 can be hiden | 1<<22 // MOVBE | 1<<23 // POPCOUNT | 1<<25 // aesni - | box64_avx<<26 // xsave - | box64_avx<<27 // osxsave - | box64_avx<<28 // AVX - | box64_avx<<29 // F16C - | box64_avx2<<30 // RDRAND + | BOX64ENV(avx)<<26 // xsave + | BOX64ENV(avx)<<27 // osxsave + | BOX64ENV(avx)<<28 // AVX + | BOX64ENV(avx)<<29 // F16C + | BOX64ENV(avx2)<<30 // RDRAND | 0<<31 // Hypervisor guest running ; break; case 0x2: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // reserved R_EAX = R_EBX = R_ECX = R_EDX = 0 ; } else { @@ -326,7 +326,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) break; case 0x4: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // reserved R_EAX = R_EBX = R_ECX = R_EDX = 0 ; } else { @@ -381,23 +381,23 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) if(R_ECX==0) { R_EAX = 0; R_EBX = - box64_avx<<3 | // BMI1 - box64_avx2<<5 | //AVX2 - (box64_cputype?0:1)<<6 | // FDP_EXCPTN_ONLY + BOX64ENV(avx)<<3 | // BMI1 + BOX64ENV(avx2)<<5 | //AVX2 + (BOX64ENV(cputype)?0:1)<<6 | // FDP_EXCPTN_ONLY 1<<7 | // SMEP - box64_avx2<<8 | //BMI2 - (box64_cputype?0:1)<<9 | // Enhanced REP MOVSB // is it a good idea? + BOX64ENV(avx2)<<8 | //BMI2 + (BOX64ENV(cputype)?0:1)<<9 | // Enhanced REP MOVSB // is it a good idea? 1<<10 | //INVPCID (priviledge instruction - (box64_cputype?0:1)<<13 | // Deprecates FPU CS and FPU DS + (BOX64ENV(cputype)?0:1)<<13 | // Deprecates FPU CS and FPU DS 0<<18 | // RDSEED - box64_avx2<<19 | //ADX + BOX64ENV(avx2)<<19 | //ADX 1<<23 | // CLFLUSHOPT 1<<24 | // CLWB - box64_shaext<<29| // SHA extension + BOX64ENV(shaext)<<29| // SHA extension 0; R_RCX = - box64_avx<<9 | //VAES - box64_avx2<<10 | //VPCLMULQDQ. + BOX64ENV(avx)<<9 | //VAES + BOX64ENV(avx2)<<10 | //VPCLMULQDQ. 1<<22 | // RDPID 0; R_RDX = 0; @@ -405,7 +405,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } else {R_EAX = R_ECX = R_EBX = R_EDX = 0;} break; case 0xB: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // reserved R_EAX = R_EBX = R_ECX = R_EDX = 0 ; } else { @@ -416,7 +416,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0xC: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // reserved R_EAX = R_EBX = R_ECX = R_EDX = 0 ; } else { @@ -453,7 +453,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) R_EAX = 0; break; case 0xF: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // reserved R_EAX = R_EBX = R_ECX = R_EDX = 0 ; } else { @@ -475,7 +475,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x14: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // reserved R_EAX = R_EBX = R_ECX = R_EDX = 0 ; } else { @@ -492,7 +492,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x15: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // reserved R_EAX = R_EBX = R_ECX = R_EDX = 0 ; } else { @@ -512,7 +512,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) break; case 0x80000000: // max extended - if(box64_cputype) { + if(BOX64ENV(cputype)) { R_EAX = 0x8000001a; R_EBX = 0x68747541; R_ECX = 0x444d4163; @@ -522,7 +522,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x80000001: //Extended Processor Signature and Feature Bits - if(box64_cputype) { + if(BOX64ENV(cputype)) { R_EAX = (0xc<<0) | // stepping 0-3 (0x1<<4) | // base model 4-7 (0xf<<8) | // base familly 8-11 @@ -606,7 +606,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) R_EDX = ((uint32_t*)branding)[11]; break; case 0x80000005: - if(box64_cputype) { + if(BOX64ENV(cputype)) { //L1 cache and TLB R_EAX = 0; R_EBX = 0; @@ -620,7 +620,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x80000006: // L2 cache line size and associativity - if(box64_cputype) { + if(BOX64ENV(cputype)) { R_EAX = 0; R_EBX = 0; R_ECX = 64 | (0x6<<12) | (256<<16); // bits: 0-7 line size, 15-12: assoc (using special encoding), 31-16: size in K //TODO: read info from /sys/devices/system/cpu/cpuX/cache/index2 @@ -633,7 +633,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x80000007: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // Advanced Power Management Information R_EAX = 0; R_EBX = 0; @@ -648,7 +648,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x80000008: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // Address Size And Physical Core Count Information R_EAX = 0; // 23-16 guest / 15-8 linear / 7-0 phys R_EBX = 0; // reserved @@ -663,7 +663,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x8000000a: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // SVM Revision and Feature Identification R_EAX = 0; R_EBX = 0; @@ -678,7 +678,7 @@ void my_cpuid(x64emu_t* emu, uint32_t tmp32u) } break; case 0x8000001a: - if(box64_cputype) { + if(BOX64ENV(cputype)) { // Performance Optimization Identifiers R_EAX = 1<<0 // FP128 | 1<<1 // MOVU diff --git a/src/tools/rcfile.c b/src/tools/rcfile.c deleted file mode 100644 index 674ec52d..00000000 --- a/src/tools/rcfile.c +++ /dev/null @@ -1,783 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> -#include <stddef.h> -#include <sys/mman.h> -#include <sys/stat.h> -#include <fcntl.h> - -#include "debug.h" -#include "rcfile.h" -#include "box64context.h" -#include "fileutils.h" -#include "pathcoll.h" -#include "x64emu.h" -#ifdef HAVE_TRACE -#include "x64trace.h" -#endif -#include "custommem.h" -#include "khash.h" - -// This file handle the box64rc files -// file are basicaly ini file, with section [XXXX] defining the name of the process -// and BOX64_XXXX=YYYY entry like the env. var. variables - -// default rcfile -static const char default_rcfile[] = -"[bash]\n" -"BOX64_LOG=0\n" -"\n" -"[deadcells]\n" -"BOX64_PREFER_EMULATED=1\n" -"\n" -"[dontstarve]\n" -"BOX64_EMULATED_LIBS=libSDL2-2.0.so.0\n" -"\n" -"[dota2]\n" -"BOX64_CRASHHANDLER=1\n" -"BOX64_DYNAREC_STRONGMEM=1\n" -"\n" -"[factorio]\n" -"BOX64_DYNAREC_FASTROUND=0\n" -"\n" -"[heroic]\n" -"BOX64_NOSANDBOX=1\n" -"BOX64_MALLOC_HACK=2\n" -"\n" -"[LotCG.x86_64]\n" -"BOX64_DYNAREC_FASTROUND=0\n" -"\n" -"[Mini Metro]\n" -"BOX64_ADDLIBS=stdc++.so.6\n" -"\n" -"[pressure-vessel-wrap]\n" -"BOX64_NOGTK=1\n" -"\n" -"[ShovelKnight]\n" -"BOX64_SDL2_JGUID=1\n" -"\n" -"[Soma.bin.x86_64]\n" -"BOX64_DYNAREC_FASTROUND=0\n" -"\n" -"[streaming_client]\n" -"BOX64_EMULATED_LIBS=libSDL2-2.0.so.0:libSDL2_ttf-2.0.so.0\n" -"\n" -"[steam-runtime-check-requirements]\n" -"BOX64_EXIT=1\n" -"\n" -"[steam-runtime-launcher-service]\n" -"BOX64_EXIT=1\n" -; - -// list of all entries -#define SUPER1() \ -ENTRYINTPOS(BOX64_ROLLING_LOG, new_cycle_log) \ -ENTRYSTRING_(BOX64_LD_LIBRARY_PATH, ld_library_path) \ -ENTRYSTRING_(BOX64_PATH, box64_path) \ -ENTRYSTRING_(BOX64_TRACE_FILE, trace_file) \ -ENTRYADDR(BOX64_LOAD_ADDR, box64_load_addr) \ -ENTRYINT(BOX64_LOG, box64_log, 0, 3, 2) \ -ENTRYBOOL(BOX64_DUMP, box64_dump) \ -ENTRYBOOL(BOX64_DLSYM_ERROR, dlsym_error) \ -CENTRYBOOL(BOX64_NOSIGSEGV, no_sigsegv) \ -CENTRYBOOL(BOX64_NOSIGILL, no_sigill) \ -ENTRYBOOL(BOX64_SHOWSEGV, box64_showsegv) \ -ENTRYBOOL(BOX64_SHOWBT, box64_showbt) \ -ENTRYBOOL(BOX64_MMAP32, box64_mmap32) \ -ENTRYBOOL(BOX64_IGNOREINT3, box64_ignoreint3) \ -IGNORE(BOX64_RDTSC) \ -ENTRYBOOL(BOX64_X11THREADS, box64_x11threads) \ -ENTRYBOOL(BOX64_X11GLX, box64_x11glx) \ -ENTRYDSTRING(BOX64_LIBGL, box64_libGL) \ -ENTRYBOOL(BOX64_SSE_FLUSHTO0, box64_sse_flushto0) \ -ENTRYBOOL(BOX64_X87_NO80BITS, box64_x87_no80bits) \ -ENTRYBOOL(BOX64_SYNC_ROUNDING, box64_sync_rounding) \ -ENTRYSTRING_(BOX64_EMULATED_LIBS, emulated_libs) \ -ENTRYBOOL(BOX64_ALLOWMISSINGLIBS, allow_missing_libs) \ -ENTRYBOOL(BOX64_PREFER_WRAPPED, box64_prefer_wrapped) \ -ENTRYBOOL(BOX64_PREFER_EMULATED, box64_prefer_emulated) \ -ENTRYBOOL(BOX64_WRAP_EGl, box64_wrap_egl) \ -ENTRYBOOL(BOX64_CRASHHANDLER, box64_dummy_crashhandler) \ -ENTRYBOOL(BOX64_NOPULSE, box64_nopulse) \ -ENTRYBOOL(BOX64_NOGTK, box64_nogtk) \ -ENTRYBOOL(BOX64_NOVULKAN, box64_novulkan) \ -ENTRYBOOL(BOX64_RDTSC_1GHZ, box64_rdtsc_1ghz) \ -ENTRYBOOL(BOX64_SHAEXT, box64_shaext) \ -ENTRYBOOL(BOX64_SSE42, box64_sse42) \ -ENTRYINT(BOX64_AVX, new_avx, 0, 2, 2) \ -ENTRYBOOL(BOX64_FUTEX_WAITV, box64_futex_waitv) \ -ENTRYSTRING_(BOX64_BASH, bash) \ -ENTRYINT(BOX64_JITGDB, jit_gdb, 0, 3, 2) \ -ENTRYBOOL(BOX64_NOSANDBOX, box64_nosandbox) \ -ENTRYBOOL(BOX64_INPROCESSGPU, box64_inprocessgpu) \ -ENTRYBOOL(BOX64_CEFDISABLEGPU, box64_cefdisablegpu) \ -ENTRYBOOL(BOX64_CEFDISABLEGPUCOMPOSITOR, box64_cefdisablegpucompositor)\ -ENTRYBOOL(BOX64_EXIT, want_exit) \ -ENTRYBOOL(BOX64_LIBCEF, box64_libcef) \ -ENTRYBOOL(BOX64_JVM, box64_jvm) \ -ENTRYBOOL(BOX64_UNITYPLAYER, box64_unityplayer) \ -ENTRYBOOL(BOX64_SDL2_JGUID, box64_sdl2_jguid) \ -ENTRYINT(BOX64_MALLOC_HACK, box64_malloc_hack, 0, 2, 2) \ -ENTRYINTPOS(BOX64_MAXCPU, new_maxcpu) \ -ENTRYSTRING_(BOX64_ADDLIBS, new_addlibs) \ -ENTRYSTRING_(BOX64_ENV, new_env) \ -ENTRYSTRING_(BOX64_ENV1, new_env1) \ -ENTRYSTRING_(BOX64_ENV2, new_env2) \ -ENTRYSTRING_(BOX64_ENV3, new_env3) \ -ENTRYSTRING_(BOX64_ENV4, new_env4) \ -ENTRYSTRING_(BOX64_ARGS, new_args) \ -ENTRYSTRING_(BOX64_INSERT_ARGS, insert_args) \ -ENTRYBOOL(BOX64_RESERVE_HIGH, new_reserve_high) \ -ENTRYINT(BOX64_CPUTYPE, box64_cputype, 0, 1, 1) \ - -#ifdef HAVE_TRACE -#define SUPER2() \ -ENTRYSTRING_(BOX64_TRACE, trace) \ -ENTRYULONG(BOX64_TRACE_START, start_cnt) \ -ENTRYSTRING_(BOX64_TRACE_INIT, trace_init) \ -ENTRYBOOL(BOX64_TRACE_XMM, trace_xmm) \ -ENTRYBOOL(BOX64_TRACE_EMM, trace_emm) \ -ENTRYBOOL(BOX64_TRACE_COLOR, trace_regsdiff) \ - -#else -#define SUPER2() \ -IGNORE(BOX64_TRACE) \ -IGNORE(BOX64_TRACE_START) \ -IGNORE(BOX64_TRACE_INIT) \ -IGNORE(BOX64_TRACE_XMM) \ -IGNORE(BOX64_TRACE_EMM) \ -IGNORE(BOX64_TRACE_COLOR) \ - -#endif - -#ifdef DYNAREC -#define SUPER3() \ -ENTRYBOOL(BOX64_DYNAREC, box64_dynarec) \ -ENTRYINT(BOX64_DYNAREC_DUMP, box64_dynarec_dump, 0, 2, 2) \ -ENTRYINT(BOX64_DYNAREC_LOG, box64_dynarec_log, 0, 3, 2) \ -ENTRYINT(BOX64_DYNAREC_BIGBLOCK, box64_dynarec_bigblock, 0, 3, 2) \ -ENTRYSTRING_(BOX64_DYNAREC_FORWARD, box64_dynarec_forward) \ -ENTRYINT(BOX64_DYNAREC_STRONGMEM, box64_dynarec_strongmem, 0, 3, 2) \ -ENTRYINT(BOX64_DYNAREC_WEAKBARRIER, box64_dynarec_weakbarrier, 0, 2, 2) \ -ENTRYINT(BOX64_DYNAREC_PAUSE, box64_dynarec_pause, 0, 3, 2) \ -ENTRYBOOL(BOX64_DYNAREC_X87DOUBLE, box64_dynarec_x87double) \ -ENTRYBOOL(BOX64_DYNAREC_DIV0, box64_dynarec_div0) \ -ENTRYBOOL(BOX64_DYNAREC_FASTNAN, box64_dynarec_fastnan) \ -ENTRYINT(BOX64_DYNAREC_FASTROUND, box64_dynarec_fastround, 0, 2, 2) \ -ENTRYINT(BOX64_DYNAREC_SAFEFLAGS, box64_dynarec_safeflags, 0, 2, 2) \ -ENTRYBOOL(BOX64_DYNAREC_CALLRET, box64_dynarec_callret) \ -ENTRYBOOL(BOX64_DYNAREC_BLEEDING_EDGE, box64_dynarec_bleeding_edge) \ -ENTRYBOOL(BOX64_DYNAREC_JVM, box64_dynarec_jvm) \ -ENTRYBOOL(BOX64_DYNAREC_TBB, box64_dynarec_tbb) \ -IGNORE(BOX64_DYNAREC_HOTPAGE) \ -IGNORE(BOX64_DYNAREC_FASTPAGE) \ -ENTRYBOOL(BOX64_DYNAREC_ALIGNED_ATOMICS, box64_dynarec_aligned_atomics) \ -ENTRYBOOL(BOX64_DYNAREC_NATIVEFLAGS, box64_dynarec_nativeflags) \ -ENTRYBOOL(BOX64_DYNAREC_WAIT, box64_dynarec_wait) \ -ENTRYSTRING_(BOX64_NODYNAREC, box64_nodynarec) \ -ENTRYSTRING_(BOX64_DYNAREC_TEST, box64_dynarec_test) \ -ENTRYBOOL(BOX64_DYNAREC_MISSING, box64_dynarec_missing) \ -ENTRYBOOL(BOX64_DYNAREC_DF, box64_dynarec_df) \ -ENTRYBOOL(BOX64_DYNAREC_DIRTY, box64_dynarec_dirty) \ - -#else -#define SUPER3() \ -IGNORE(BOX64_DYNAREC) \ -IGNORE(BOX64_DYNAREC_DUMP) \ -IGNORE(BOX64_DYNAREC_LOG) \ -IGNORE(BOX64_DYNAREC_BIGBLOCK) \ -IGNORE(BOX64_DYNAREC_FORWARD) \ -IGNORE(BOX64_DYNAREC_STRONGMEM) \ -IGNORE(BOX64_DYNAREC_WEAKBARRIER) \ -IGNORE(BOX64_DYNAREC_PAUSE) \ -IGNORE(BOX64_DYNAREC_X87DOUBLE) \ -IGNORE(BOX64_DYNAREC_DIV0) \ -IGNORE(BOX64_DYNAREC_FASTNAN) \ -IGNORE(BOX64_DYNAREC_FASTROUND) \ -IGNORE(BOX64_DYNAREC_SAFEFLAGS) \ -IGNORE(BOX64_DYNAREC_CALLRET) \ -IGNORE(BOX64_DYNAREC_BLEEDING_EDGE) \ -IGNORE(BOX64_DYNAREC_JVM) \ -IGNORE(BOX64_DYNAREC_TBB) \ -IGNORE(BOX64_DYNAREC_HOTPAGE) \ -IGNORE(BOX64_DYNAREC_FASTPAGE) \ -IGNORE(BOX64_DYNAREC_ALIGNED_ATOMICS) \ -IGNORE(BOX64_DYNAREC_NATIVEFLAGS) \ -IGNORE(BOX64_DYNAREC_WAIT) \ -IGNORE(BOX64_NODYNAREC) \ -IGNORE(BOX64_DYNAREC_TEST) \ -IGNORE(BOX64_DYNAREC_MISSING) \ -IGNORE(BOX64_DYNAREC_DF) \ -IGNORE(BOX64_DYNAREC_DIRTY) \ - -#endif - -#if defined(HAVE_TRACE) && defined(DYNAREC) -#define SUPER4() \ -ENTRYBOOL(BOX64_DYNAREC_TRACE, box64_dynarec_trace) \ - -#else -#define SUPER4() \ -IGNORE(BOX64_DYNAREC_TRACE) \ - -#endif - -#define SUPER() \ -SUPER1() \ -SUPER2() \ -SUPER3() \ -SUPER4() - -typedef struct my_params_s { -// is present part -#define ENTRYBOOL(NAME, name) uint8_t is_##name##_present:1; -#define CENTRYBOOL(NAME, name) uint8_t is_##name##_present:1; -#define ENTRYINT(NAME, name, minval, maxval, bits) uint8_t is_##name##_present:1; -#define ENTRYINTPOS(NAME, name) uint8_t is_##name##_present:1; -#define ENTRYSTRING(NAME, name) uint8_t is_##name##_present:1; -#define ENTRYSTRING_(NAME, name) uint8_t is_##name##_present:1; -#define ENTRYDSTRING(NAME, name) uint8_t is_##name##_present:1; -#define ENTRYADDR(NAME, name) uint8_t is_##name##_present:1; -#define ENTRYULONG(NAME, name) uint8_t is_##name##_present:1; -#define IGNORE(NAME) -SUPER() -// done -#undef ENTRYBOOL -#undef CENTRYBOOL -#undef ENTRYINT -#undef ENTRYINTPOS -#undef ENTRYSTRING -#undef ENTRYSTRING_ -#undef ENTRYDSTRING -#undef ENTRYADDR -#undef ENTRYULONG -// the actual fields, in two steps to regroup bit fields together -#define ENTRYBOOL(NAME, name) uint8_t name:1; -#define CENTRYBOOL(NAME, name) uint8_t name:1; -#define ENTRYINT(NAME, name, minval, maxval, bits) uint8_t name:bits; -#define ENTRYINTPOS(NAME, name) -#define ENTRYSTRING(NAME, name) -#define ENTRYSTRING_(NAME, name) -#define ENTRYDSTRING(NAME, name) -#define ENTRYADDR(NAME, name) -#define ENTRYULONG(NAME, name) -SUPER() -// done -#undef ENTRYBOOL -#undef CENTRYBOOL -#undef ENTRYINT -#undef ENTRYINTPOS -#undef ENTRYSTRING -#undef ENTRYSTRING_ -#undef ENTRYDSTRING -#undef ENTRYADDR -#undef ENTRYULONG -#define ENTRYBOOL(NAME, name) -#define CENTRYBOOL(NAME, name) -#define ENTRYINT(NAME, name, minval, maxval, bits) -#define ENTRYINTPOS(NAME, name) uint32_t name; -#define ENTRYSTRING(NAME, name) char* name; -#define ENTRYSTRING_(NAME, name) char* name; -#define ENTRYDSTRING(NAME, name) char* name; -#define ENTRYADDR(NAME, name) uintptr_t name; -#define ENTRYULONG(NAME, name) uint64_t name; -SUPER() -// done -#undef ENTRYBOOL -#undef CENTRYBOOL -#undef ENTRYINT -#undef ENTRYINTPOS -#undef ENTRYSTRING -#undef ENTRYSTRING_ -#undef ENTRYDSTRING -#undef ENTRYADDR -#undef ENTRYULONG -} my_params_t; - -KHASH_MAP_INIT_STR(params, my_params_t) - -static kh_params_t *params = NULL; -static kh_params_t *params_gen = NULL; - -static void clearParam(my_params_t* param) -{ - #define ENTRYBOOL(NAME, name) - #define CENTRYBOOL(NAME, name) - #define ENTRYINT(NAME, name, minval, maxval, bits) - #define ENTRYINTPOS(NAME, name) - #define ENTRYSTRING(NAME, name) box_free(param->name); - #define ENTRYSTRING_(NAME, name) box_free(param->name); - #define ENTRYDSTRING(NAME, name) box_free(param->name); - #define ENTRYADDR(NAME, name) - #define ENTRYULONG(NAME, name) - SUPER() - #undef ENTRYBOOL - #undef CENTRYBOOL - #undef ENTRYINT - #undef ENTRYINTPOS - #undef ENTRYSTRING - #undef ENTRYSTRING_ - #undef ENTRYDSTRING - #undef ENTRYADDR - #undef ENTRYULONG -} - -static void addParam(const char* name, my_params_t* param, int gen) -{ - khint_t k; - kh_params_t* khp = gen?params_gen:params; - k = kh_get(params, khp, name); - if(k==kh_end(khp)) { - int ret; - k = kh_put(params, khp, box_strdup(name), &ret); - } else { - clearParam(&kh_value(khp, k)); - } - my_params_t *p = &kh_value(khp, k); - memcpy(p, param, sizeof(my_params_t)); -} - -static void trimString(char* s) -{ - if(!s) - return; - // trim right space/tab - size_t len = strlen(s); - while(len && (s[len-1]==' ' || s[len-1]=='\t' || s[len-1]=='\n')) - s[--len] = '\0'; - // trim left space/tab - while(s[0]==' ' || s[0]=='\t') - memmove(s, s+1, strlen(s)); -} - -#ifdef ANDROID -static int shm_open(const char *name, int oflag, mode_t mode) { - return -1; -} -static int shm_unlink(const char *name) { - return -1; -} -#endif - -void LoadRCFile(const char* filename) -{ - FILE *f = NULL; - if(filename) - f = fopen(filename, "r"); - else { - #define TMP_MEMRCFILE "/box64_rcfile" - int tmp = shm_open(TMP_MEMRCFILE, O_RDWR | O_CREAT, S_IRWXU); - if(tmp<0) return; // error, bye bye - shm_unlink(TMP_MEMRCFILE); // remove the shm file, but it will still exist because it's currently in use - int dummy = write(tmp, default_rcfile, sizeof(default_rcfile)); - (void)dummy; - lseek(tmp, 0, SEEK_SET); - f = fdopen(tmp, "r"); - } - if(!f) { - printf_log(LOG_INFO, "Cannot open RC file %s\n", filename); - return; - } - // init the hash table if needed - if(!params) - params = kh_init(params); - if(!params_gen) - params_gen = kh_init(params); - // prepare to parse the file - char* line = NULL; - size_t lsize = 0; - my_params_t current_param = {0}; - char* current_name = NULL; - int dummy; - size_t len; - char* p; - int decor = 1; - // parsing - while ((dummy = getline(&line, &lsize, f)) != -1) { - // remove comments - if((p=strchr(line, '#'))) - *p = '\0'; - trimString(line); - len = strlen(line); - // check the line content - if(line[0]=='[' && strchr(line, ']')) { - // new entry, will need to add current one - if(current_name) - addParam(current_name, ¤t_param, (decor==2)); - if(line[1]=='*' && line[(intptr_t)(strchr(line, ']')-line)-1]=='*') - decor = 2; - else - decor = 1; - // prepare a new entry - memset(¤t_param, 0, sizeof(current_param)); - box_free(current_name); - current_name = LowerCase(line+decor); - *(strchr(current_name, ']')+1-decor) = '\0'; - trimString(current_name); - } else if(strchr(line, '=')) { - // actual parameters - //get the key and val - char* key = line; - char* val = strchr(key, '=')+1; - *strchr(key, '=') = '\0'; - trimString(key); - trimString(val); - // extract, check and set arg - #define ENTRYINT(NAME, name, minval, maxval, bits) \ - else if(!strcmp(key, #NAME)) { \ - int v = strtol(val, &p, 0); \ - if(p!=val && v>=minval && v<=maxval) { \ - current_param.is_##name##_present = 1; \ - current_param.name = v; \ - } \ - } - #define ENTRYBOOL(NAME, name) ENTRYINT(NAME, name, 0, 1, 1) - #define CENTRYBOOL(NAME, name) ENTRYBOOL(NAME, name) - #define ENTRYINTPOS(NAME, name) \ - else if(!strcmp(key, #NAME)) { \ - int v = strtol(val, &p, 0); \ - if(p!=val) { \ - current_param.is_##name##_present = 1; \ - current_param.name = v; \ - } \ - } - #define ENTRYSTRING(NAME, name) \ - else if(!strcmp(key, #NAME)) { \ - current_param.is_##name##_present = 1; \ - if(current_param.name) box_free(current_param.name);\ - current_param.name = box_strdup(val); \ - } - #define ENTRYSTRING_(NAME, name) ENTRYSTRING(NAME, name) - #define ENTRYDSTRING(NAME, name) ENTRYSTRING(NAME, name) - #define ENTRYADDR(NAME, name) \ - else if(!strcmp(key, #NAME)) { \ - uintptr_t v = strtoul(val, &p, 0); \ - if(p!=val) { \ - current_param.is_##name##_present = 1; \ - current_param.name = v; \ - } \ - } - #define ENTRYULONG(NAME, name) \ - else if(!strcmp(key, #NAME)) { \ - uint64_t v = strtoull(val, &p, 0); \ - if(p!=val) { \ - current_param.is_##name##_present = 1; \ - current_param.name = v; \ - } \ - } - #undef IGNORE - #define IGNORE(NAME) else if(!strcmp(key, #NAME)) ; - if(0) ; - SUPER() - else if(len && current_name) { - printf_log(LOG_INFO, "Warning, unsupported %s=%s for [%s] in %s\n", key, val, current_name, filename); - } - #undef ENTRYBOOL - #undef CENTRYBOOL - #undef ENTRYINT - #undef ENTRYINTPOS - #undef ENTRYSTRING - #undef ENTRYSTRING_ - #undef ENTRYDSTRING - #undef ENTRYADDR - #undef ENTRYULONG - #undef IGNORE - #define IGNORE(NAME) - } - } - // last entry to be pushed too - if(current_name) { - addParam(current_name, ¤t_param, (decor==2)); - box_free(current_name); - } - box_free(line); - fclose(f); - printf_log(LOG_INFO, "Params database has %d entries\n", kh_size(params)); -} - -void DeleteParams() -{ - if(!params) - return; - - // free strings - my_params_t* p; - // need to free duplicated strings - kh_foreach_value_ref(params, p, clearParam(p)); - const char* key; - kh_foreach_key(params, key, box_free((void*)key)); - // free the hash itself - kh_destroy(params, params); - params = NULL; -} - -extern int ftrace_has_pid; -extern FILE* ftrace; -extern char* ftrace_name; -extern char* box64_new_args; -extern char* box64_insert_args; -void openFTrace(const char* newtrace, int reopen); -void addNewEnvVar(const char* s); -void AddNewLibs(const char* libs); -void computeRDTSC(); -void my_reserveHighMem(); -#ifdef DYNAREC -void GatherDynarecExtensions(); -#endif -#ifdef HAVE_TRACE -void setupTraceInit(); -void setupTrace(); -#endif -static char old_name[256] = ""; -const char* GetLastApplyName() -{ - return old_name; -} -void internal_ApplyParams(const char* name, const my_params_t* param); -void ApplyParams(const char* name) -{ - if(!name || !params) - return; - if(!strcasecmp(name, old_name)) { - return; - } - strncpy(old_name, name, 255); - khint_t k1; - { - char* lname = LowerCase(name); - k1 = kh_get(params, params, lname); - my_params_t* param; - const char* k2; - kh_foreach_ref(params_gen, k2, param, - if(strstr(lname, k2)) - internal_ApplyParams(name, param); - ) - box_free(lname); - } - if(k1 == kh_end(params)) - return; - my_params_t* param = &kh_value(params, k1); - internal_ApplyParams(name, param); -} - -void internal_ApplyParams(const char* name, const my_params_t* param) { - int new_cycle_log = cycle_log; - int new_maxcpu = box64_maxcpu; - int new_avx = box64_avx2?2:box64_avx; - int box64_dynarec_jvm = box64_jvm; - int new_reserve_high = 0; - int want_exit = 0; - #ifdef DYNAREC - int olddynarec = box64_dynarec; - #endif - printf_log(LOG_INFO, "Apply RC params for %s\n", name); - #define ENTRYINT(NAME, name, minval, maxval, bits) if(param->is_##name##_present) {printf_log(LOG_INFO, "Applying %s=%d\n", #NAME, param->name); name = param->name;} - #define ENTRYBOOL(NAME, name) ENTRYINT(NAME, name, 0, 1, 1) - #define CENTRYBOOL(NAME, name) if(param->is_##name##_present) {printf_log(LOG_INFO, "Applying %s=%d\n", #NAME, param->name); my_context->name = param->name;} - #define ENTRYINTPOS(NAME, name) if(param->is_##name##_present) {printf_log(LOG_INFO, "Applying %s=%d\n", #NAME, param->name); name = param->name;} - #define ENTRYSTRING(NAME, name) if(param->is_##name##_present) {printf_log(LOG_INFO, "Applying %s=%s\n", #NAME, param->name); name = param->name;} - #define ENTRYSTRING_(NAME, name) - #define ENTRYDSTRING(NAME, name) if(param->is_##name##_present) {printf_log(LOG_INFO, "Applying %s=%s\n", #NAME, param->name); if(name) box_free(name); name = box_strdup(param->name);} - #define ENTRYADDR(NAME, name) if(param->is_##name##_present) {printf_log(LOG_INFO, "Applying %s=%zd\n", #NAME, param->name); name = param->name;} - #define ENTRYULONG(NAME, name) if(param->is_##name##_present) {printf_log(LOG_INFO, "Applying %s=%lld\n", #NAME, param->name); name = param->name;} - SUPER() - #undef ENTRYBOOL - #undef CENTRYBOOL - #undef ENTRYINT - #undef ENTRYINTPOS - #undef ENTRYSTRING - #undef ENTRYSTRING_ - #undef ENTRYDSTRING - #undef ENTRYADDR - #undef ENTRYULONG - // now handle the manuel entry (the one with ending underscore) - if(want_exit) - exit(0); - if(new_cycle_log==1) - new_cycle_log = 16; - if(new_cycle_log!=cycle_log) { - freeCycleLog(my_context); - cycle_log = new_cycle_log; - initCycleLog(my_context); - } - if(new_reserve_high) - my_reserveHighMem(); - if(param->is_new_avx_present) { - if(!new_avx) { - printf_log(LOG_INFO, "Hiding AVX extension\n"); - box64_avx = 0; box64_avx2 = 0; - } else if(new_avx==1) { - printf_log(LOG_INFO, "Exposing AVX extension\n"); - box64_avx = 1; box64_avx2 = 0; - } else if(new_avx==2) { - printf_log(LOG_INFO, "Exposing AVX/AVX2 extensions\n"); - box64_avx = 1; box64_avx2 = 1; - } - } - if(param->is_box64_rdtsc_1ghz_present) - computeRDTSC(); - #ifdef DYNAREC - if(param->is_box64_dynarec_jvm_present && !param->is_box64_jvm_present) - box64_jvm = box64_dynarec_jvm; - #endif - if(!box64_maxcpu_immutable) { - if(new_maxcpu!=box64_maxcpu && box64_maxcpu && box64_maxcpu<new_maxcpu) { - printf_log(LOG_INFO, "Not applying BOX64_MAXCPU=%d because a lesser value is already active: %d\n", new_maxcpu, box64_maxcpu); - } else - box64_maxcpu = new_maxcpu; - } else if(new_maxcpu!=box64_maxcpu) - printf_log(LOG_INFO, "Not applying BOX64_MAXCPU=%d because it's too late\n", new_maxcpu); - if(param->is_ld_library_path_present) AppendList(&my_context->box64_ld_lib, param->ld_library_path, 1); - if(param->is_box64_path_present) AppendList(&my_context->box64_path, param->box64_path, 1); - if(param->is_trace_file_present) { - // open a new ftrace... - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_TRACE_FILE", param->trace_file); - if(ftrace_name) { - fclose(ftrace); - } - openFTrace(param->trace_file, 0); - } - if(param->is_emulated_libs_present) { - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_EMULATED_LIBS", param->emulated_libs); - AppendList(&my_context->box64_emulated_libs, param->emulated_libs, 0); - } - if(param->is_new_addlibs_present) { - AddNewLibs(param->new_addlibs); - } - if(param->is_new_env_present) { - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_ENV", param->new_env); - addNewEnvVar(param->new_env); - } - if(param->is_new_env1_present) { - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_ENV1", param->new_env1); - addNewEnvVar(param->new_env1); - } - if(param->is_new_env2_present) { - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_ENV2", param->new_env2); - addNewEnvVar(param->new_env2); - } - if(param->is_new_env3_present) { - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_ENV3", param->new_env3); - addNewEnvVar(param->new_env3); - } - if(param->is_new_env4_present) { - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_ENV4", param->new_env4); - addNewEnvVar(param->new_env4); - } - if(param->is_new_args_present) { - printf_log(LOG_INFO, "Adding \"%s\" arguments to command line\n", param->new_args); - if(box64_new_args) - box_free(box64_new_args); - box64_new_args = box_strdup(param->new_args); - } - if(param->is_insert_args_present) { - printf_log(LOG_INFO, "Adding \"%s\" arguments to command line\n", param->insert_args); - if(box64_insert_args) - box_free(box64_insert_args); - box64_insert_args = box_strdup(param->insert_args); - } - if(param->is_bash_present && FileIsX64ELF(param->bash)) { - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_BASH", param->bash); - if(my_context->bashpath) - box_free(my_context->bashpath); - my_context->bashpath = box_strdup(param->bash); - } - #ifdef HAVE_TRACE - int old_x64trace = my_context->x64trace; - if(param->is_trace_present) { - char*p = param->trace; - if (strcmp(p, "0")) { - my_context->x64trace = 1; - box64_trace = p; - } - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_TRACE", param->trace); - } - if(param->is_trace_init_present) { - char* p = param->trace_init; - if (strcmp(p, "0")) { - my_context->x64trace = 1; - trace_init = p; - } - printf_log(LOG_INFO, "Applying %s=%s\n", "BOX64_TRACE_INIT", param->trace_init); - } - if(my_context->x64trace && !old_x64trace) { - printf_log(LOG_INFO, "Initializing Zydis lib\n"); - if(InitX64Trace(my_context)) { - printf_log(LOG_INFO, "Zydis init failed, no x86 trace activated\n"); - my_context->x64trace = 0; - } - } - if(param->is_trace_init_present) - setupTraceInit(); - if(param->is_trace_present) - setupTrace(); - #endif - #ifdef DYNAREC - if(param->is_box64_nodynarec_present) { - uintptr_t no_start = 0, no_end = 0; - int ok = 0; - if(sscanf(param->box64_nodynarec, "0x%lX-0x%lX", &no_start, &no_end)==2) - ok = 1; - if(!ok && sscanf(param->box64_nodynarec, "%lx-%lx", &no_start, &no_end)==2) - ok = 1; - if(!ok && sscanf(param->box64_nodynarec, "%ld-%ld", &no_start, &no_end)==2) - ok = 1; - if(ok && no_end>no_start) { - box64_nodynarec_start = no_start; - box64_nodynarec_end = no_end; - printf_log(LOG_INFO, "Appling BOX64_NODYNAREC=%p-%p\n", (void*)box64_nodynarec_start, (void*)box64_nodynarec_end); - } else { - printf_log(LOG_INFO, "Ignoring BOX64_NODYNAREC=%s (%p-%p)\n", param->box64_nodynarec, (void*)box64_nodynarec_start, (void*)box64_nodynarec_end); - } - } - if(param->is_box64_dynarec_test_present) { - uintptr_t no_start = 0, no_end = 0; - if(strlen(param->box64_dynarec_test)==1) { - box64_dynarec_test = param->box64_dynarec_test[0]-'0'; - box64_dynarec_test_start = 0x0; - box64_dynarec_test_end = 0x0; - if(box64_dynarec_test>2) box64_dynarec_test = 0; - printf_log(LOG_INFO, "Appling BOX64_DYNAREC_TEST=%d\n", box64_dynarec_test); - } else { - int ok = 0; - if(sscanf(param->box64_dynarec_test, "0x%lX-0x%lX", &no_start, &no_end)==2) - ok = 1; - if(!ok && sscanf(param->box64_dynarec_test, "%lx-%lx", &no_start, &no_end)==2) - ok = 1; - if(!ok && sscanf(param->box64_dynarec_test, "%ld-%ld", &no_start, &no_end)==2) - ok = 1; - if(ok && no_end>no_start) { - box64_dynarec_test = 1; - box64_dynarec_test_start = no_start; - box64_dynarec_test_end = no_end; - printf_log(LOG_INFO, "Appling BOX64_DYNAREC_TEST=%p-%p\n", (void*)box64_dynarec_test_start, (void*)box64_dynarec_test_end); - } else { - box64_dynarec_test = 0; - printf_log(LOG_INFO, "Ignoring BOX64_DYNAREC_TEST=%s (%p-%p)\n", param->box64_dynarec_test, (void*)box64_dynarec_test_start, (void*)box64_dynarec_test_end); - } - } - } - if(param->is_box64_dynarec_forward_present) { - int forward = 0; - if(sscanf(param->box64_dynarec_forward, "%d", &forward)==1) { - printf_log(LOG_INFO, "Appling BOX64_DYNAREC_FORWARD=%d\n", box64_dynarec_forward); - box64_dynarec_forward = forward; - } - } - if(!olddynarec && box64_dynarec) - GatherDynarecExtensions(); - if(param->is_box64_dynarec_test_present && box64_dynarec_test) { - box64_dynarec_fastnan = 0; - box64_dynarec_fastround = 0; - box64_dynarec_x87double = 1; - box64_dynarec_div0 = 1; - box64_dynarec_callret = 0; - #if defined(RV64) || defined(LA64) - box64_dynarec_nativeflags = 0; - #endif - } - #endif - if(box64_log==3) { - box64_log = 2; - box64_dump = 1; - } -} diff --git a/src/wrapped/wrappedatk.c b/src/wrapped/wrappedatk.c index c45d89e3..4a072e0a 100644 --- a/src/wrapped/wrappedatk.c +++ b/src/wrapped/wrappedatk.c @@ -151,7 +151,7 @@ EXPORT uint32_t my_atk_add_global_event_listener(x64emu_t* emu, void* f, void* p } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedatkbridge.c b/src/wrapped/wrappedatkbridge.c index a4f0aec0..43a8b4e0 100644 --- a/src/wrapped/wrappedatkbridge.c +++ b/src/wrapped/wrappedatkbridge.c @@ -21,7 +21,7 @@ EXPORT int my_atk_bridge_adaptor_init(void* argc, void** argv) } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define NEEDED_LIBS "libatk-1.0.so.0", "libSM.so.6", "libICE.so.6", "libXau.so.6", "libxcb.so.1" diff --git a/src/wrapped/wrappedatspi.c b/src/wrapped/wrappedatspi.c index 2557a609..025176b6 100644 --- a/src/wrapped/wrappedatspi.c +++ b/src/wrapped/wrappedatspi.c @@ -88,7 +88,7 @@ EXPORT void* my_atspi_event_listener_new(x64emu_t* emu, void* f, void* data, voi } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedcrashhandler.c b/src/wrapped/wrappedcrashhandler.c index fe0840bc..603d18d2 100644 --- a/src/wrapped/wrappedcrashhandler.c +++ b/src/wrapped/wrappedcrashhandler.c @@ -21,7 +21,7 @@ const char* crashhandlerName = "crashhandler.so"; #define LIBNAME crashhandler #define PRE_INIT \ - if(!box64_dummy_crashhandler) \ + if(!BOX64ENV(dummy_crashhandler)) \ return -1; \ if(1) \ lib->w.lib = dlopen(NULL, RTLD_LAZY | RTLD_GLOBAL);\ diff --git a/src/wrapped/wrappeddbusglib1.c b/src/wrapped/wrappeddbusglib1.c index 5218882c..26c11c6b 100644 --- a/src/wrapped/wrappeddbusglib1.c +++ b/src/wrapped/wrappeddbusglib1.c @@ -222,7 +222,7 @@ EXPORT void my_dbus_g_proxy_disconnect_signal(x64emu_t* emu, void* proxy, void* } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappeddbusmenuglib.c b/src/wrapped/wrappeddbusmenuglib.c index 2f0696ed..290be612 100644 --- a/src/wrapped/wrappeddbusmenuglib.c +++ b/src/wrapped/wrappeddbusmenuglib.c @@ -100,7 +100,7 @@ EXPORT void my_dbusmenu_menuitem_send_about_to_show(x64emu_t* emu, void* mi, voi } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgconf2.c b/src/wrapped/wrappedgconf2.c index eefd223c..62bc5283 100644 --- a/src/wrapped/wrappedgconf2.c +++ b/src/wrapped/wrappedgconf2.c @@ -85,7 +85,7 @@ EXPORT uint32_t my_gconf_client_notify_add(x64emu_t* emu, void* client, void* se } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgdk3.c b/src/wrapped/wrappedgdk3.c index d8c6e7c7..8c15d8c3 100644 --- a/src/wrapped/wrappedgdk3.c +++ b/src/wrapped/wrappedgdk3.c @@ -199,7 +199,7 @@ EXPORT void my3_gdk_threads_set_lock_functions(x64emu_t* emu, void* enter_fn, vo } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define ALTMY my3_ diff --git a/src/wrapped/wrappedgdkpixbuf2.c b/src/wrapped/wrappedgdkpixbuf2.c index 645dbb2c..eef1b157 100644 --- a/src/wrapped/wrappedgdkpixbuf2.c +++ b/src/wrapped/wrappedgdkpixbuf2.c @@ -63,7 +63,7 @@ EXPORT void* my_gdk_pixbuf_new_from_data(x64emu_t* emu, void* data, int32_t colo } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgdkx112.c b/src/wrapped/wrappedgdkx112.c index 82b10cfe..b81cc3e0 100644 --- a/src/wrapped/wrappedgdkx112.c +++ b/src/wrapped/wrappedgdkx112.c @@ -195,7 +195,7 @@ EXPORT uint32_t my_gdk_threads_add_timeout_full(x64emu_t* emu, int priotity, uin } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedgio2.c b/src/wrapped/wrappedgio2.c index 4a139824..ca5ee25e 100644 --- a/src/wrapped/wrappedgio2.c +++ b/src/wrapped/wrappedgio2.c @@ -624,7 +624,7 @@ EXPORT void my_g_input_stream_read_async(x64emu_t* emu, void* stream, void* buff } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedglib2.c b/src/wrapped/wrappedglib2.c index cd0b4d64..7a24b5f4 100644 --- a/src/wrapped/wrappedglib2.c +++ b/src/wrapped/wrappedglib2.c @@ -1537,7 +1537,7 @@ EXPORT void my_g_queue_insert_sorted(x64emu_t* emu, void* queue, void* data, voi } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgmodule2.c b/src/wrapped/wrappedgmodule2.c index cc71d2ef..1039627e 100644 --- a/src/wrapped/wrappedgmodule2.c +++ b/src/wrapped/wrappedgmodule2.c @@ -18,7 +18,7 @@ const char* gmodule2Name = "libgmodule-2.0.so.0"; #define LIBNAME gmodule2 #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define NEEDED_LIBS "libglib-2.0.so.0" diff --git a/src/wrapped/wrappedgobject2.c b/src/wrapped/wrappedgobject2.c index 13e81a93..0a2893c9 100644 --- a/src/wrapped/wrappedgobject2.c +++ b/src/wrapped/wrappedgobject2.c @@ -839,7 +839,7 @@ EXPORT void* my_g_type_value_table_peek(x64emu_t* emu, size_t type) } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedgstallocators.c b/src/wrapped/wrappedgstallocators.c index 74cd1f3b..6036e2c1 100644 --- a/src/wrapped/wrappedgstallocators.c +++ b/src/wrapped/wrappedgstallocators.c @@ -18,7 +18,7 @@ const char* gstallocatorsName = "libgstallocators-1.0.so.0"; #define LIBNAME gstallocators #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstapp.c b/src/wrapped/wrappedgstapp.c index 3815c347..a9dd8347 100644 --- a/src/wrapped/wrappedgstapp.c +++ b/src/wrapped/wrappedgstapp.c @@ -18,7 +18,7 @@ const char* gstappName = "libgstapp-1.0.so.0"; #define LIBNAME gstapp #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstaudio.c b/src/wrapped/wrappedgstaudio.c index 9e52556e..5471b286 100644 --- a/src/wrapped/wrappedgstaudio.c +++ b/src/wrapped/wrappedgstaudio.c @@ -36,7 +36,7 @@ typedef size_t (*LFv_t)(); #include "wrappercallback.h" #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedgstbase.c b/src/wrapped/wrappedgstbase.c index 786b706e..3d143af7 100644 --- a/src/wrapped/wrappedgstbase.c +++ b/src/wrapped/wrappedgstbase.c @@ -223,7 +223,7 @@ EXPORT void my_gst_collect_pads_set_buffer_function(x64emu_t* emu, void* pads, v } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedgstcheck.c b/src/wrapped/wrappedgstcheck.c index 9092d25b..c84bf454 100644 --- a/src/wrapped/wrappedgstcheck.c +++ b/src/wrapped/wrappedgstcheck.c @@ -18,7 +18,7 @@ const char* gstcheckName = "libgstcheck-1.0.so.0"; #define LIBNAME gstcheck #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstcontroller.c b/src/wrapped/wrappedgstcontroller.c index 2f630154..414af7b1 100644 --- a/src/wrapped/wrappedgstcontroller.c +++ b/src/wrapped/wrappedgstcontroller.c @@ -18,7 +18,7 @@ const char* gstcontrollerName = "libgstcontroller-1.0.so.0"; #define LIBNAME gstcontroller #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstfft.c b/src/wrapped/wrappedgstfft.c index afb8b159..068fc4ce 100644 --- a/src/wrapped/wrappedgstfft.c +++ b/src/wrapped/wrappedgstfft.c @@ -18,7 +18,7 @@ const char* gstfftName = "libgstfft-1.0.so.0"; #define LIBNAME gstfft #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstgl.c b/src/wrapped/wrappedgstgl.c index 3b3aad55..f932ff47 100644 --- a/src/wrapped/wrappedgstgl.c +++ b/src/wrapped/wrappedgstgl.c @@ -265,7 +265,7 @@ EXPORT void my_gst_gl_window_set_resize_callback(x64emu_t* emu, void* window, vo } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedgstnet.c b/src/wrapped/wrappedgstnet.c index 7cdcfb76..01276587 100644 --- a/src/wrapped/wrappedgstnet.c +++ b/src/wrapped/wrappedgstnet.c @@ -18,7 +18,7 @@ const char* gstnetName = "libgstnet-1.0.so.0"; #define LIBNAME gstnet #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstpbutils.c b/src/wrapped/wrappedgstpbutils.c index d9dc054d..f41931eb 100644 --- a/src/wrapped/wrappedgstpbutils.c +++ b/src/wrapped/wrappedgstpbutils.c @@ -18,7 +18,7 @@ const char* gstpbutilsName = "libgstpbutils-1.0.so.0"; #define LIBNAME gstpbutils #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstreamer.c b/src/wrapped/wrappedgstreamer.c index f82dd429..9e5b8198 100644 --- a/src/wrapped/wrappedgstreamer.c +++ b/src/wrapped/wrappedgstreamer.c @@ -1229,7 +1229,7 @@ EXPORT void my_gst_mini_object_init(x64emu_t* emu, void* obj, uint32_t flags, si } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedgstriff.c b/src/wrapped/wrappedgstriff.c index 472db7ce..aa75a2b4 100644 --- a/src/wrapped/wrappedgstriff.c +++ b/src/wrapped/wrappedgstriff.c @@ -18,7 +18,7 @@ const char* gstriffName = "libgstriff-1.0.so.0"; #define LIBNAME gstriff #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstrtp.c b/src/wrapped/wrappedgstrtp.c index 00f3d020..cee7214e 100644 --- a/src/wrapped/wrappedgstrtp.c +++ b/src/wrapped/wrappedgstrtp.c @@ -18,7 +18,7 @@ const char* gstrtpName = "libgstrtp-1.0.so.0"; #define LIBNAME gstrtp #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstrtsp.c b/src/wrapped/wrappedgstrtsp.c index ff3f0cbd..8b6526e1 100644 --- a/src/wrapped/wrappedgstrtsp.c +++ b/src/wrapped/wrappedgstrtsp.c @@ -18,7 +18,7 @@ const char* gstrtspName = "libgstrtsp-1.0.so.0"; #define LIBNAME gstrtsp #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstsdp.c b/src/wrapped/wrappedgstsdp.c index 6edcee42..d752a9ff 100644 --- a/src/wrapped/wrappedgstsdp.c +++ b/src/wrapped/wrappedgstsdp.c @@ -18,7 +18,7 @@ const char* gstsdpName = "libgstsdp-1.0.so.0"; #define LIBNAME gstsdp #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgsttag.c b/src/wrapped/wrappedgsttag.c index 1f02560a..bb10df4e 100644 --- a/src/wrapped/wrappedgsttag.c +++ b/src/wrapped/wrappedgsttag.c @@ -18,7 +18,7 @@ const char* gsttagName = "libgsttag-1.0.so.0"; #define LIBNAME gsttag #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgstvideo.c b/src/wrapped/wrappedgstvideo.c index d7fa4d60..0a836297 100644 --- a/src/wrapped/wrappedgstvideo.c +++ b/src/wrapped/wrappedgstvideo.c @@ -41,7 +41,7 @@ typedef size_t (*LFv_t)(); #include "wrappercallback.h" #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedgthread2.c b/src/wrapped/wrappedgthread2.c index 66b64623..7c77c9f0 100644 --- a/src/wrapped/wrappedgthread2.c +++ b/src/wrapped/wrappedgthread2.c @@ -97,7 +97,7 @@ void** my_GetGthreadsGotInitialized() } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedgtk3.c b/src/wrapped/wrappedgtk3.c index 21ff1d38..1ae74486 100644 --- a/src/wrapped/wrappedgtk3.c +++ b/src/wrapped/wrappedgtk3.c @@ -825,7 +825,7 @@ EXPORT void my3_gtk_list_box_set_header_func(x64emu_t* emu, void* box, void* f, #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define ALTMY my3_ diff --git a/src/wrapped/wrappedgtkx112.c b/src/wrapped/wrappedgtkx112.c index 0d13ce1a..859484fc 100644 --- a/src/wrapped/wrappedgtkx112.c +++ b/src/wrapped/wrappedgtkx112.c @@ -1277,7 +1277,7 @@ static void addGtk2Alternate(library_t* lib) } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped/wrappedlibc.c b/src/wrapped/wrappedlibc.c index 79dcca62..2b4293cd 100644 --- a/src/wrapped/wrappedlibc.c +++ b/src/wrapped/wrappedlibc.c @@ -64,7 +64,7 @@ #include "elfloader.h" #include "bridge.h" #include "globalsymbols.h" -#include "rcfile.h" +#include "env.h" #ifndef LOG_INFO #define LOG_INFO 1 #endif @@ -465,8 +465,8 @@ void EXPORT my___stack_chk_fail(x64emu_t* emu) #else sprintf(buff, "%p: Stack is corrupted, aborting\n", (void*)emu->old_ip); #endif - if(cycle_log) { - print_cycle_log(LOG_INFO); + if(BOX64ENV(rolling_log)) { + print_rolling_log(LOG_INFO); } StopEmu(emu, buff, emu->segs[_CS]==0x23); } @@ -1598,7 +1598,7 @@ struct i386_dirent { EXPORT void* my_readdir(x64emu_t* emu, void* dirp) { - if (fix_64bit_inodes) + if (BOX64ENV(fix_64bit_inodes)) { struct dirent64 *dp64 = readdir64((DIR *)dirp); if (!dp64) return NULL; @@ -1626,7 +1626,7 @@ EXPORT void* my_readdir(x64emu_t* emu, void* dirp) EXPORT int32_t my_readdir_r(x64emu_t* emu, void* dirp, void* entry, void** result) { struct dirent64 d64, *dp64; - if (fix_64bit_inodes && (sizeof(d64.d_name) > 1)) + if (BOX64ENV(fix_64bit_inodes) && (sizeof(d64.d_name) > 1)) { static iFppp_t f = NULL; if(!f) { @@ -1780,11 +1780,11 @@ void CreateCPUInfoFile(int fd) P; sprintf(buff, "flags\t\t: fpu cx8 sep ht cmov clflush mmx sse sse2 syscall tsc lahf_lm ssse3 ht tm lm fxsr cpuid pclmulqdq cx16 aes movbe pni "\ "sse4_1%s%s%s lzcnt popcnt%s%s%s%s%s%s%s%s%s\n", - box64_sse42?" sse4_2":"", box64_avx?" avx":"", box64_shaext?"sha_ni":"", - box64_avx?" bmi1":"", box64_avx2?" avx2":"", box64_avx?" bmi2":"", - box64_avx2?" vaes":"", box64_avx2?" fma":"", - box64_avx?" xsave":"", box64_avx?" f16c":"", box64_avx2?" randr":"", - box64_avx2?" adx":"" + BOX64ENV(sse42)?" sse4_2":"", BOX64ENV(avx)?" avx":"", BOX64ENV(shaext)?"sha_ni":"", + BOX64ENV(avx)?" bmi1":"", BOX64ENV(avx2)?" avx2":"", BOX64ENV(avx)?" bmi2":"", + BOX64ENV(avx2)?" vaes":"", BOX64ENV(avx2)?" fma":"", + BOX64ENV(avx)?" xsave":"", BOX64ENV(avx)?" f16c":"", BOX64ENV(avx2)?" randr":"", + BOX64ENV(avx2)?" adx":"" ); P; sprintf(buff, "address sizes\t: 48 bits physical, 48 bits virtual\n"); @@ -2021,7 +2021,7 @@ EXPORT int32_t my_open64(x64emu_t* emu, void* pathname, int32_t flags, uint32_t lseek(tmp, 0, SEEK_SET); return tmp; } - if(box64_maxcpu && (!strcmp(pathname, "/sys/devices/system/cpu/present") || !strcmp(pathname, "/sys/devices/system/cpu/online")) && (getNCpu()>=box64_maxcpu)) { + if(BOX64ENV(maxcpu) && (!strcmp(pathname, "/sys/devices/system/cpu/present") || !strcmp(pathname, "/sys/devices/system/cpu/online")) && (getNCpu()>=BOX64ENV(maxcpu))) { // special case for cpu present (to limit to 64 cores) int tmp = shm_open(TMP_CPUPRESENT, O_RDWR | O_CREAT, S_IRWXU); if(tmp<0) return open64(pathname, mode); // error fallback @@ -2092,7 +2092,7 @@ EXPORT FILE* my_fopen64(x64emu_t* emu, const char* path, const char* mode) lseek(tmp, 0, SEEK_SET); return fdopen(tmp, mode); } - if(box64_maxcpu && (!strcmp(path, "/sys/devices/system/cpu/present") || !strcmp(path, "/sys/devices/system/cpu/online")) && (getNCpu()>=box64_maxcpu)) { + if(BOX64ENV(maxcpu) && (!strcmp(path, "/sys/devices/system/cpu/present") || !strcmp(path, "/sys/devices/system/cpu/online")) && (getNCpu()>=BOX64ENV(maxcpu))) { // special case for cpu present (to limit to 64 cores) int tmp = shm_open(TMP_CPUPRESENT, O_RDWR | O_CREAT, S_IRWXU); if(tmp<0) return fopen64(path, mode); // error fallback @@ -2272,14 +2272,7 @@ EXPORT int32_t my_execv(x64emu_t* emu, const char* path, char* const argv[]) if(my_environ!=my_context->envv) envv = my_environ; if(my__environ!=my_context->envv) envv = my__environ; if(my___environ!=my_context->envv) envv = my___environ; -/*if(!envv && n>2 && strstr(newargv[2], "fxc.exe")) { -setenv("BOX64_LOG", "2", 1); -setenv("BOX64_TRACE_FILE", "/home/seb/trace-%pid.txt", 1); -setenv("BOX64_TRACE","server_init_process_done", 1); -setenv("BOX64_DYNAREC", "0", 1); -setenv("WINEDEBUG", "+server", 1); -//setenv("BOX64_DYNAREC", "0", 1); -}*/ + int ret; if(envv) ret = execve(newargv[0], (char* const*)newargv, envv); @@ -2987,10 +2980,10 @@ EXPORT void* my_mmap64(x64emu_t* emu, void *addr, size_t length, int prot, int f (void)emu; void* ret = box_mmap(addr, length, prot, flags, fd, offset); int e = errno; - if((ret==MAP_FAILED && (emu || box64_is32bits)) && (box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "%s (%d)\n", strerror(errno), errno);} - if(((ret!=MAP_FAILED) && (emu || box64_is32bits)) && (box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "%p\n", ret);} + if((ret==MAP_FAILED && (emu || box64_is32bits)) && (BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(dynarec_log)>=LOG_DEBUG)) {printf_log(LOG_NONE, "%s (%d)\n", strerror(errno), errno);} + if(((ret!=MAP_FAILED) && (emu || box64_is32bits)) && (BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(dynarec_log)>=LOG_DEBUG)) {printf_log(LOG_NONE, "%p\n", ret);} #ifdef DYNAREC - if(box64_dynarec && ret!=MAP_FAILED) { + if(BOX64ENV(dynarec) && ret!=MAP_FAILED) { /*if(flags&0x100000 && addr!=ret) { // program used MAP_FIXED_NOREPLACE but the host linux didn't support it @@ -3007,12 +3000,12 @@ EXPORT void* my_mmap64(x64emu_t* emu, void *addr, size_t length, int prot, int f if((flags&MAP_SHARED) && (fd>0)) { uint32_t flags = fcntl(fd, F_GETFL); if((flags&O_ACCMODE)==O_RDWR) { - if((box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "Note: Marking the region (%p-%p prot=%x) as NEVERCLEAN because fd have O_RDWR attribute\n", ret, ret+length, prot);} + if((BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(dynarec_log)>=LOG_DEBUG)) {printf_log(LOG_NONE, "Note: Marking the region (%p-%p prot=%x) as NEVERCLEAN because fd have O_RDWR attribute\n", ret, ret+length, prot);} prot |= PROT_NEVERCLEAN; } } static int unityplayer_detected = 0; - if(fd>0 && box64_unityplayer && !unityplayer_detected) { + if(fd>0 && BOX64ENV(unityplayer) && !unityplayer_detected) { char filename[4096]; char buf[128]; sprintf(buf, "/proc/self/fd/%d", fd); @@ -3021,8 +3014,7 @@ EXPORT void* my_mmap64(x64emu_t* emu, void *addr, size_t length, int prot, int f if(r>0 && strlen(filename)>strlen("UnityPlayer.dll") && !strcasecmp(filename+strlen(filename)-strlen("UnityPlayer.dll"), "UnityPlayer.dll")) { printf_log(LOG_INFO, "BOX64: Detected UnityPlayer.dll\n"); #ifdef DYNAREC - if(!box64_dynarec_strongmem) - box64_dynarec_strongmem = 1; + if(!BOX64ENV(dynarec_strongmem)) SET_BOX64ENV(dynarec_strongmem, 1); #endif unityplayer_detected = 1; } @@ -3042,28 +3034,28 @@ EXPORT void* my_mmap(x64emu_t* emu, void *addr, size_t length, int prot, int fla EXPORT void* my_mremap(x64emu_t* emu, void* old_addr, size_t old_size, size_t new_size, int flags, void* new_addr) { (void)emu; - if((emu || box64_is32bits) && (box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "mremap(%p, %lu, %lu, %d, %p)=>", old_addr, old_size, new_size, flags, new_addr);} + if((emu || box64_is32bits) && (BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(dynarec_log)>=LOG_DEBUG)) {printf_log(LOG_NONE, "mremap(%p, %lu, %lu, %d, %p)=>", old_addr, old_size, new_size, flags, new_addr);} void* ret = mremap(old_addr, old_size, new_size, flags, new_addr); - if((emu || box64_is32bits) && (box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "%p\n", ret);} + if((emu || box64_is32bits) && (BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(dynarec_log)>=LOG_DEBUG)) {printf_log(LOG_NONE, "%p\n", ret);} if(ret!=(void*)-1) { uint32_t prot = getProtection((uintptr_t)old_addr)&~PROT_CUSTOM; if(ret==old_addr) { if(old_size && old_size<new_size) { setProtection_mmap((uintptr_t)ret+old_size, new_size-old_size, prot); #ifdef DYNAREC - if(box64_dynarec) + if(BOX64ENV(dynarec)) addDBFromAddressRange((uintptr_t)ret+old_size, new_size-old_size); #endif } else if(old_size && new_size<old_size) { freeProtection((uintptr_t)ret+new_size, old_size-new_size); #ifdef DYNAREC - if(box64_dynarec) + if(BOX64ENV(dynarec)) cleanDBFromAddressRange((uintptr_t)ret+new_size, old_size-new_size, 1); #endif } else if(!old_size) { setProtection_mmap((uintptr_t)ret, new_size, prot); #ifdef DYNAREC - if(box64_dynarec) + if(BOX64ENV(dynarec)) addDBFromAddressRange((uintptr_t)ret, new_size); #endif } @@ -3075,13 +3067,13 @@ EXPORT void* my_mremap(x64emu_t* emu, void* old_addr, size_t old_size, size_t ne ) { freeProtection((uintptr_t)old_addr, old_size); #ifdef DYNAREC - if(box64_dynarec) + if(BOX64ENV(dynarec)) cleanDBFromAddressRange((uintptr_t)old_addr, old_size, 1); #endif } setProtection_mmap((uintptr_t)ret, new_size, prot); // should copy the protection from old block #ifdef DYNAREC - if(box64_dynarec) + if(BOX64ENV(dynarec)) addDBFromAddressRange((uintptr_t)ret, new_size); #endif } @@ -3092,11 +3084,11 @@ EXPORT void* my_mremap(x64emu_t* emu, void* old_addr, size_t old_size, size_t ne EXPORT int my_munmap(x64emu_t* emu, void* addr, size_t length) { (void)emu; - if((emu || box64_is32bits) && (box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "munmap(%p, 0x%lx)\n", addr, length);} + if((emu || box64_is32bits) && (BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(dynarec_log)>=LOG_DEBUG)) {printf_log(LOG_NONE, "munmap(%p, 0x%lx)\n", addr, length);} int ret = box_munmap(addr, length); int e = errno; #ifdef DYNAREC - if(!ret && box64_dynarec && length) { + if(!ret && BOX64ENV(dynarec) && length) { cleanDBFromAddressRange((uintptr_t)addr, length, 1); } #endif @@ -3110,12 +3102,12 @@ EXPORT int my_munmap(x64emu_t* emu, void* addr, size_t length) EXPORT int my_mprotect(x64emu_t* emu, void *addr, unsigned long len, int prot) { (void)emu; - if(emu && (box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "mprotect(%p, 0x%lx, 0x%x)\n", addr, len, prot);} + if(emu && (BOX64ENV(log)>=LOG_DEBUG || BOX64ENV(dynarec_log)>=LOG_DEBUG)) {printf_log(LOG_NONE, "mprotect(%p, 0x%lx, 0x%x)\n", addr, len, prot);} if(prot&PROT_WRITE) prot|=PROT_READ; // PROT_READ is implicit with PROT_WRITE on x86_64 int ret = mprotect(addr, len, prot); #ifdef DYNAREC - if(box64_dynarec && !ret && len) { + if(BOX64ENV(dynarec) && !ret && len) { if(prot& PROT_EXEC) addDBFromAddressRange((uintptr_t)addr, len); else @@ -3422,7 +3414,7 @@ EXPORT int my_semctl(int semid, int semnum, int cmd, union semun b) } EXPORT int64_t userdata_sign = 0x1234598765ABCEF0; -EXPORT uint32_t userdata[1024]; +EXPORT uint32_t userdata[1024]; EXPORT long my_ptrace(x64emu_t* emu, int request, pid_t pid, void* addr, uint32_t* data) { @@ -3785,7 +3777,7 @@ EXPORT int my_prctl(x64emu_t* emu, int option, unsigned long arg2, unsigned long { if(option==PR_SET_NAME) { printf_log(LOG_DEBUG, "BOX64: set process name to \"%s\"\n", (char*)arg2); - ApplyParams((char*)arg2); + ApplyEnvFileEntry((char*)arg2); size_t l = strlen((char*)arg2); if(l>4 && !strcasecmp((char*)arg2+l-4, ".exe")) { printf_log(LOG_DEBUG, "BOX64: hacking orig command line to \"%s\"\n", (char*)arg2); @@ -3801,10 +3793,10 @@ EXPORT int my_prctl(x64emu_t* emu, int option, unsigned long arg2, unsigned long #ifndef _SC_NPROCESSORS_ONLN #define _SC_NPROCESSORS_ONLN 84 -#endif +#endif #ifndef _SC_NPROCESSORS_CONF #define _SC_NPROCESSORS_CONF 83 -#endif +#endif EXPORT long my_sysconf(x64emu_t* emu, int what) { if(what==_SC_NPROCESSORS_ONLN) { return getNCpu(); diff --git a/src/wrapped/wrappedlibdl.c b/src/wrapped/wrappedlibdl.c index 2687b7ad..deb73827 100644 --- a/src/wrapped/wrappedlibdl.c +++ b/src/wrapped/wrappedlibdl.c @@ -354,7 +354,7 @@ void* my_dlsym(x64emu_t* emu, void *handle, void *symbol) if(box64_is32bits && handle==(void*)0xffffffff) handle = (void*)~0LL; CLEARERR - printf_dlsym(LOG_DEBUG, "%04d|Call to dlsym(%p, \"%s\")%s", GetTID(), handle, rsymbol, dlsym_error?"":"\n"); + printf_dlsym(LOG_DEBUG, "%04d|Call to dlsym(%p, \"%s\")%s", GetTID(), handle, rsymbol, BOX64ENV(dlsym_error)?"":"\n"); if(handle==NULL) { // special case, look globably if(GetGlobalSymbolStartEnd(my_context->maplib, rsymbol, &start, &end, NULL, -1, NULL, 0, NULL)) { @@ -522,7 +522,7 @@ void* my_dlvsym(x64emu_t* emu, void *handle, void *symbol, const char *vername) if(box64_is32bits && handle==(void*)0xffffffff) handle = (void*)~0LL; CLEARERR - printf_dlsym(LOG_DEBUG, "Call to dlvsym(%p, \"%s\", %s)%s", handle, rsymbol, vername?vername:"(nil)", dlsym_error?"":"\n"); + printf_dlsym(LOG_DEBUG, "Call to dlvsym(%p, \"%s\", %s)%s", handle, rsymbol, vername?vername:"(nil)", BOX64ENV(dlsym_error)?"":"\n"); if(handle==NULL) { // special case, look globably if(GetGlobalSymbolStartEnd(my_context->maplib, rsymbol, &start, &end, NULL, version, vername, 0, NULL)) { diff --git a/src/wrapped/wrappedlibfuse.c b/src/wrapped/wrappedlibfuse.c index bca9c4cc..bd281d6c 100644 --- a/src/wrapped/wrappedlibfuse.c +++ b/src/wrapped/wrappedlibfuse.c @@ -2312,8 +2312,8 @@ EXPORT size_t my_fuse_add_direntry(x64emu_t* emu, void* req, char *buf, size_t b EXPORT int my_fuse_main_real(x64emu_t* emu, int argc, void* argv, const fuse_operations_t* op, size_t op_size, void* data) { static fuse_operations_t o_ = {0}; -box64_log=2; -box64_showsegv=1; + SET_BOX64ENV(log, 2); + SET_BOX64ENV(showsegv, 1); size_t cvt = 0; #define GO(A) if(cvt<op_size) {o_.A = find_##A##_Fct(op->A); cvt+=sizeof(void*); if(o_.A) printf_log(LOG_DEBUG, "fuse: %s is present\n", #A);} // size is aligned in GOS diff --git a/src/wrapped/wrappedlibgl.c b/src/wrapped/wrappedlibgl.c index 65f06be7..8f854e58 100644 --- a/src/wrapped/wrappedlibgl.c +++ b/src/wrapped/wrappedlibgl.c @@ -412,9 +412,9 @@ static void* find_glGetVkProcAddrNV_Fct(void* fct) #undef SUPER #define PRE_INIT \ - if(box64_libGL) { \ - lib->w.lib = dlopen(box64_libGL, RTLD_LAZY | RTLD_GLOBAL); \ - lib->path = strdup(box64_libGL); \ + if(BOX64ENV(libgl)) { \ + lib->w.lib = dlopen(BOX64ENV(libgl), RTLD_LAZY | RTLD_GLOBAL); \ + lib->path = strdup(BOX64ENV(libgl)); \ } else if(strstr(lib->name, "libGLX_nvidia.so.0")) { \ lib->w.lib = dlopen("libGLX_nvidia.so.0", RTLD_LAZY | RTLD_GLOBAL); \ if(lib->w.lib) lib->path = strdup("libGLX_nvidia.so.0"); \ diff --git a/src/wrapped/wrappedlibibus.c b/src/wrapped/wrappedlibibus.c index f1ee4262..b0bb6ab2 100644 --- a/src/wrapped/wrappedlibibus.c +++ b/src/wrapped/wrappedlibibus.c @@ -114,7 +114,7 @@ EXPORT void my_ibus_input_context_process_key_event_async(x64emu_t* emu, void* b } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define NEEDED_LIBS "libgio-2.0.so.0", "libgobject-2.0.so.0", "libglib-2.0.so.0" diff --git a/src/wrapped/wrappedlibm.c b/src/wrapped/wrappedlibm.c index e190f9ed..8a7273ec 100644 --- a/src/wrapped/wrappedlibm.c +++ b/src/wrapped/wrappedlibm.c @@ -130,7 +130,7 @@ F2D(fmod) // See https://github.com/bminor/glibc/blob/master/sysdeps/x86_64/fpu/fesetround.c EXPORT int my_fesetround(x64emu_t* emu, int round) { - if (box64_sync_rounding) { + if (BOX64ENV(sync_rounding)) { if ((round & ~0xc00) != 0) // round is not valid. return 1; @@ -150,7 +150,7 @@ EXPORT int my_fesetround(x64emu_t* emu, int round) // See https://github.com/bminor/glibc/blob/master/sysdeps/x86_64/fpu/fegetround.c EXPORT int my_fegetround(x64emu_t* emu) { - if (box64_sync_rounding) { + if (BOX64ENV(sync_rounding)) { return emu->cw.x16 & 0xc00; } else { return fegetround(); @@ -160,7 +160,7 @@ EXPORT int my_fegetround(x64emu_t* emu) #define FROUND(N, T, R) \ EXPORT R my_##N(x64emu_t* emu, T val) \ { \ - if (box64_sync_rounding) { \ + if (BOX64ENV(sync_rounding)) { \ int round = emu->cw.x16 & 0xc00; \ fesetround(TO_NATIVE(round)); \ } \ @@ -180,7 +180,7 @@ FROUND(llrintl, long double, long double) #else EXPORT double my_llrintl(x64emu_t* emu, double val) { - if (box64_sync_rounding) { + if (BOX64ENV(sync_rounding)) { int round = emu->cw.x16 & 0xc00; fesetround(TO_NATIVE(round)); } diff --git a/src/wrapped/wrappedlibx11.c b/src/wrapped/wrappedlibx11.c index fba0fa72..bd1ac4e7 100644 --- a/src/wrapped/wrappedlibx11.c +++ b/src/wrapped/wrappedlibx11.c @@ -1361,7 +1361,7 @@ EXPORT int my_XUnregisterIMInstantiateCallback(x64emu_t* emu, void* d, void* db, EXPORT int my_XQueryExtension(x64emu_t* emu, void* display, char* name, int* major, int* first_event, int* first_error) { int ret = my->XQueryExtension(display, name, major, first_event, first_error); - if(!ret && name && !strcmp(name, "GLX") && box64_x11glx) { + if(!ret && name && !strcmp(name, "GLX") && BOX64ENV(x11glx)) { // hack to force GLX to be accepted, even if not present // left major and first_XXX to default... ret = 1; @@ -1552,7 +1552,7 @@ EXPORT void* my_XOpenDisplay(x64emu_t* emu, void* d) if(!ret) return ret; - if(box64_x11sync) {my->XSynchronize(ret, 1); printf_log(LOG_INFO, "Forcing Syncronized opration on Display %p\n", ret);} + if(BOX64ENV(x11sync)) {my->XSynchronize(ret, 1); printf_log(LOG_INFO, "Forcing Syncronized opration on Display %p\n", ret);} bridge_t* system = emu->context->system; #define GO(A, W)\ @@ -1619,7 +1619,7 @@ EXPORT void* my__XGetRequest(x64emu_t* emu, my_XDisplay_t* dpy, uint8_t type, si #define CUSTOM_INIT \ AddAutomaticBridge(lib->w.bridge, vFp, *(void**)dlsym(lib->w.lib, "_XLockMutex_fn"), 0, "_XLockMutex_fn"); \ AddAutomaticBridge(lib->w.bridge, vFp, *(void**)dlsym(lib->w.lib, "_XUnlockMutex_fn"), 0, "_XUnlockMutex_fn"); \ - if(box64_x11threads) my->XInitThreads(); + if(BOX64ENV(x11threads)) my->XInitThreads(); #define NEEDED_LIBS "libxcb.so.1" diff --git a/src/wrapped/wrappednotify.c b/src/wrapped/wrappednotify.c index 1a5e0d8a..028a6664 100644 --- a/src/wrapped/wrappednotify.c +++ b/src/wrapped/wrappednotify.c @@ -24,7 +24,7 @@ const char* notifyName = "libnotify.so.4"; #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedpango.c b/src/wrapped/wrappedpango.c index 5051084a..6b58f167 100644 --- a/src/wrapped/wrappedpango.c +++ b/src/wrapped/wrappedpango.c @@ -94,7 +94,7 @@ EXPORT void my_pango_attribute_init(x64emu_t* emu, void* attr, my_PangoAttrClass } #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define NEEDED_LIBS "libgobject-2.0.so.0", "libglib-2.0.so.0" diff --git a/src/wrapped/wrappedpangocairo.c b/src/wrapped/wrappedpangocairo.c index 8bc12819..83a082a5 100644 --- a/src/wrapped/wrappedpangocairo.c +++ b/src/wrapped/wrappedpangocairo.c @@ -18,7 +18,7 @@ const char* pangocairoName = "libpangocairo-1.0.so.0"; #define LIBNAME pangocairo #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; diff --git a/src/wrapped/wrappedpangoft2.c b/src/wrapped/wrappedpangoft2.c index 4734196c..3acfc082 100644 --- a/src/wrapped/wrappedpangoft2.c +++ b/src/wrapped/wrappedpangoft2.c @@ -15,7 +15,7 @@ const char* pangoft2Name = "libpangoft2-1.0.so.0"; #define LIBNAME pangoft2 #define PRE_INIT \ - if(box64_nogtk) \ + if(BOX64ENV(nogtk)) \ return -1; #define NEEDED_LIBS "libfontconfig.so.1", "libfreetype.so.6" diff --git a/src/wrapped/wrappedpulse.c b/src/wrapped/wrappedpulse.c index d902a538..3caf5256 100644 --- a/src/wrapped/wrappedpulse.c +++ b/src/wrapped/wrappedpulse.c @@ -1578,7 +1578,7 @@ void my_autobridge_mainloop_api(x64emu_t* emu, void* api) } #define PRE_INIT \ - if(box64_nopulse) \ + if(BOX64ENV(nopulse)) \ return -1; #include "wrappedlib_init.h" diff --git a/src/wrapped/wrappedpulsemainloopglib.c b/src/wrapped/wrappedpulsemainloopglib.c index 6bf20107..58fcedd4 100644 --- a/src/wrapped/wrappedpulsemainloopglib.c +++ b/src/wrapped/wrappedpulsemainloopglib.c @@ -35,7 +35,7 @@ EXPORT void* my_pa_glib_mainloop_get_api(x64emu_t* emu, void* mainloop) } #define PRE_INIT \ - if(box64_nopulse) \ + if(BOX64ENV(nopulse)) \ return -1; #define NEEDED_LIBS "libpulse.so.0" diff --git a/src/wrapped/wrappedpulsesimple.c b/src/wrapped/wrappedpulsesimple.c index 0b99b8e4..d4e24c7b 100644 --- a/src/wrapped/wrappedpulsesimple.c +++ b/src/wrapped/wrappedpulsesimple.c @@ -22,8 +22,8 @@ const char* pulsesimpleName = "libpulse-simple.so.0"; #define LIBNAME pulsesimple -#define PRE_INIT \ - if(box64_nopulse) \ +#define PRE_INIT \ + if(BOX64ENV(nopulse)) \ return -1; #define NEEDED_LIBS "libpulse.so.0" diff --git a/src/wrapped/wrappedsdl2.c b/src/wrapped/wrappedsdl2.c index 199e2dba..483bb51e 100644 --- a/src/wrapped/wrappedsdl2.c +++ b/src/wrapped/wrappedsdl2.c @@ -44,13 +44,13 @@ int EXPORT my2_SDL_HasSSE2(void) __attribute__((alias("sdl_Yes"))); int EXPORT my2_SDL_HasSSE3(void) __attribute__((alias("sdl_Yes"))); int EXPORT my2_SDL_HasSSE41(void) __attribute__((alias("sdl_Yes"))); int EXPORT my2_SDL_HasSSE42(void) { - return box64_sse42?1:0; + return BOX64ENV(sse42)?1:0; } int EXPORT my2_SDL_HasAVX(void) { - return box64_avx?1:0; + return BOX64ENV(avx)?1:0; } int EXPORT my2_SDL_HasAVX2(void) { - return box64_avx2?1:0; + return BOX64ENV(avx2)?1:0; } int EXPORT my2_SDL_HasAVX512F(void) __attribute__((alias("sdl_No"))); @@ -657,9 +657,9 @@ EXPORT void* my2_SDL_GL_GetProcAddress(x64emu_t* emu, void* name) if(!lib_checked) { lib_checked = 1; // check if libGL is loaded, load it if not (helps some Haxe games, like DeadCells or Nuclear Blaze) - if(!my_glhandle && !GetLibInternal(box64_libGL?box64_libGL:"libGL.so.1")) + if(!my_glhandle && !GetLibInternal(BOX64ENV(libgl)?BOX64ENV(libgl):"libGL.so.1")) // use a my_dlopen to actually open that lib, like SDL2 is doing... - my_glhandle = my_dlopen(emu, box64_libGL?box64_libGL:"libGL.so.1", RTLD_LAZY|RTLD_GLOBAL); + my_glhandle = my_dlopen(emu, BOX64ENV(libgl)?BOX64ENV(libgl):"libGL.so.1", RTLD_LAZY|RTLD_GLOBAL); } return getGLProcAddress(emu, (glprocaddress_t)my->SDL_GL_GetProcAddress, rname); } @@ -806,7 +806,7 @@ EXPORT void my2_SDL_GetJoystickGUIDInfo(SDL_JoystickGUID guid, uint16_t *vend, u { uint16_t dummy = 0; if(my->SDL_GetJoystickGUIDInfo) - my->SDL_GetJoystickGUIDInfo(guid, vend, prod, ver, box64_sdl2_jguid?(&dummy):crc16); + my->SDL_GetJoystickGUIDInfo(guid, vend, prod, ver, BOX64ENV(sdl2_jguid)?(&dummy):crc16); // fallback else { uint16_t *guid16 = (uint16_t *)guid.data; @@ -837,8 +837,8 @@ EXPORT unsigned long my2_SDL_GetThreadID(x64emu_t* emu, void* thread) EXPORT int my2_SDL_GetCPUCount(x64emu_t* emu) { int ret = my->SDL_GetCPUCount(); - if(box64_maxcpu && ret>box64_maxcpu) - ret = box64_maxcpu; + if(BOX64ENV(maxcpu) && ret>BOX64ENV(maxcpu)) + ret = BOX64ENV(maxcpu); return ret; } diff --git a/src/wrapped/wrappedvulkan.c b/src/wrapped/wrappedvulkan.c index 28bcb260..7fa89794 100644 --- a/src/wrapped/wrappedvulkan.c +++ b/src/wrapped/wrappedvulkan.c @@ -404,8 +404,8 @@ static void* find_DebugUtilsMessengerCallback_Fct(void* fct) //#define PRE_INIT if(libGL) {lib->w.lib = dlopen(libGL, RTLD_LAZY | RTLD_GLOBAL); lib->path = box_strdup(libGL);} else -#define PRE_INIT \ - if(box64_novulkan) \ +#define PRE_INIT \ + if(BOX64ENV(novulkan)) \ return -1; #define CUSTOM_INIT \ diff --git a/src/wrapped32/wrappedcrashhandler.c b/src/wrapped32/wrappedcrashhandler.c index 8e8e68fa..430c5a5d 100755 --- a/src/wrapped32/wrappedcrashhandler.c +++ b/src/wrapped32/wrappedcrashhandler.c @@ -21,7 +21,7 @@ static const char* crashhandlerName = "crashhandler.so"; #define LIBNAME crashhandler #define PRE_INIT \ - if(!box64_dummy_crashhandler) \ + if(!BOX64ENV(dummy_crashhandler)) \ return -1; \ if(1) \ lib->w.lib = dlopen(NULL, RTLD_LAZY | RTLD_GLOBAL);\ diff --git a/src/wrapped32/wrappedlibc.c b/src/wrapped32/wrappedlibc.c index 9f42db10..e1fedcf8 100755 --- a/src/wrapped32/wrappedlibc.c +++ b/src/wrapped32/wrappedlibc.c @@ -21,7 +21,7 @@ #include <poll.h> #include <sys/epoll.h> #include <ftw.h> -#include <sys/syscall.h> +#include <sys/syscall.h> #include <sys/utsname.h> #include <sys/mman.h> #include <sys/ipc.h> @@ -128,7 +128,6 @@ static const char* libcName = #endif ; -extern int fix_64bit_inodes; typedef int32_t (*iFiiV_t)(int32_t, int32_t, ...); typedef int32_t (*iFpipp_t)(void*, int32_t, void*, void*); #if 0 @@ -518,7 +517,7 @@ void EXPORT my32___stack_chk_fail(x64emu_t* emu) #else sprintf(buff, "%p: Stack is corrupted, aborting ESP=0x%x %s\n", addr, R_ESP, name); #endif - print_cycle_log(LOG_INFO); + print_rolling_log(LOG_INFO); StopEmu(emu, buff, 1); } int EXPORT my32___xmknod(x64emu_t* emu, int ver, const char* path, mode_t mode, dev_t* dev) @@ -1119,7 +1118,7 @@ static int FillStatFromStat64(int vers, const struct stat64 *st64, void *st32) i386st->st_dev = st64->st_dev; i386st->__pad1 = 0; - if (fix_64bit_inodes) + if (BOX64ENV(fix_64bit_inodes)) { i386st->st_ino = st64->st_ino ^ (st64->st_ino >> 32); } @@ -1436,7 +1435,7 @@ EXPORT void* my32_readdir(x64emu_t* emu, void* dirp) EXPORT int32_t my32_readdir_r(x64emu_t* emu, void* dirp, void* entry, void** result) { struct dirent64 d64, *dp64; - if (fix_64bit_inodes && (sizeof(d64.d_name) > 1)) + if (BOX64ENV(fix_64bit_inodes) && (sizeof(d64.d_name) > 1)) { static iFppp_t f = NULL; if(!f) { @@ -1530,7 +1529,7 @@ EXPORT ssize_t my32_read(int fd, void* buf, size_t count) { int ret = read(fd, buf, count); #ifdef DYNAREC - if(ret!=count && ret>0 && box64_dynarec) { + if(ret!=count && ret>0 && BOX64ENV(dynarec)) { // continue reading... void* p = buf+ret; if(hasDBFromAddress((uintptr_t)p)) { diff --git a/src/wrapped32/wrappedlibgl.c b/src/wrapped32/wrappedlibgl.c index 25d9ac00..1e191df3 100644 --- a/src/wrapped32/wrappedlibgl.c +++ b/src/wrapped32/wrappedlibgl.c @@ -890,9 +890,9 @@ static void* find_glVDPAUUnmapSurfacesNV_Fct(void* fct) #undef SUPER #define PRE_INIT \ - if(box64_libGL) { \ - lib->w.lib = dlopen(box64_libGL, RTLD_LAZY | RTLD_GLOBAL); \ - lib->path = strdup(box64_libGL); \ + if(BOX64ENV(libgl)) { \ + lib->w.lib = dlopen(BOX64ENV(libgl), RTLD_LAZY | RTLD_GLOBAL); \ + lib->path = strdup(BOX64ENV(libgl)); \ } else if(strstr(lib->name, "libGLX_nvidia.so.0")) { \ lib->w.lib = dlopen("libGLX_nvidia.so.0", RTLD_LAZY | RTLD_GLOBAL); \ if(lib->w.lib) lib->path = strdup("libGLX_nvidia.so.0"); \ diff --git a/src/wrapped32/wrappedlibx11.c b/src/wrapped32/wrappedlibx11.c index 4018febc..acae4c87 100644 --- a/src/wrapped32/wrappedlibx11.c +++ b/src/wrapped32/wrappedlibx11.c @@ -1655,7 +1655,7 @@ EXPORT int my32_XUnregisterIMInstantiateCallback(x64emu_t* emu, void* d, void* d EXPORT int my32_XQueryExtension(x64emu_t* emu, void* display, char* name, int* major, int* first_event, int* first_error) { int ret = my->XQueryExtension(display, name, major, first_event, first_error); - if(!ret && name && !strcmp(name, "GLX") && box64_x11glx) { + if(!ret && name && !strcmp(name, "GLX") && BOX64ENV(x11glx)) { // hack to force GLX to be accepted, even if not present // left major and first_XXX to default... ret = 1; @@ -1688,7 +1688,7 @@ EXPORT void* my32_XSynchronize(x64emu_t* emu, void* display, int onoff) EXPORT void* my32_XOpenDisplay(void* name) { void* ret = my->XOpenDisplay(name); - if(ret && box64_x11sync) {my->XSynchronize(ret, 1); printf_log(LOG_INFO, "Forcing Syncronized operation on Display %p\n", ret);} + if(ret && BOX64ENV(x11sync)) {my->XSynchronize(ret, 1); printf_log(LOG_INFO, "Forcing Syncronized operation on Display %p\n", ret);} return ret; } @@ -2670,7 +2670,7 @@ EXPORT int my32__XReply(x64emu_t* emu, void* dpy, void* rep, int extra, int disc #define CUSTOM_INIT \ AddAutomaticBridge(lib->w.bridge, vFp_32, *(void**)dlsym(lib->w.lib, "_XLockMutex_fn"), 0, "_XLockMutex_fn"); \ AddAutomaticBridge(lib->w.bridge, vFp_32, *(void**)dlsym(lib->w.lib, "_XUnlockMutex_fn"), 0, "_XUnlockMutex_fn"); \ - if(box64_x11threads) my->XInitThreads(); \ + if(BOX64ENV(x11threads)) my->XInitThreads(); \ my_context->libx11 = lib; #define CUSTOM_FINI \ diff --git a/src/wrapped32/wrappedsdl2.c b/src/wrapped32/wrappedsdl2.c index 3b043431..9046b634 100644 --- a/src/wrapped32/wrappedsdl2.c +++ b/src/wrapped32/wrappedsdl2.c @@ -783,8 +783,8 @@ EXPORT unsigned long my32_2_SDL_GetThreadID(x64emu_t* emu, void* thread) EXPORT int my32_2_SDL_GetCPUCount(x64emu_t* emu) { int ret = my->SDL_GetCPUCount(); - if(box64_maxcpu && ret>box64_maxcpu) - ret = box64_maxcpu; + if(BOX64ENV(maxcpu) && ret>BOX64ENV(maxcpu)) + ret = BOX64ENV(maxcpu); return ret; } @@ -803,4 +803,4 @@ EXPORT int my32_2_SDL_GetCPUCount(x64emu_t* emu) my_context->sdl2freerw = NULL; -#include "wrappedlib_init32.h" \ No newline at end of file +#include "wrappedlib_init32.h" |