diff options
| author | ptitSeb <sebastien.chev@gmail.com> | 2021-03-01 15:52:05 +0100 |
|---|---|---|
| committer | ptitSeb <sebastien.chev@gmail.com> | 2021-03-01 15:52:05 +0100 |
| commit | 311842a43aa34276dc41da6e2f3a63ac80d4849c (patch) | |
| tree | 4da4eeeb2c3c4de462389e06b6e1ed504dfea6e9 /src | |
| parent | 4ea8ebb3367810a1a789acdfedb2d346491f603a (diff) | |
| download | box64-311842a43aa34276dc41da6e2f3a63ac80d4849c.tar.gz box64-311842a43aa34276dc41da6e2f3a63ac80d4849c.zip | |
Read elf header of launched executable
Diffstat (limited to 'src')
| -rw-r--r-- | src/custommem.c | 748 | ||||
| -rwxr-xr-x | src/elfs/elfload_dump.c | 366 | ||||
| -rwxr-xr-x | src/elfs/elfloader.c | 1278 | ||||
| -rwxr-xr-x | src/elfs/elfloader_private.h | 109 | ||||
| -rwxr-xr-x | src/elfs/elfparser.c | 313 | ||||
| -rwxr-xr-x | src/include/box64context.h | 6 | ||||
| -rw-r--r-- | src/include/custommem.h | 52 | ||||
| -rwxr-xr-x | src/include/elfload_dump.h | 24 | ||||
| -rwxr-xr-x | src/include/elfloader.h | 59 | ||||
| -rwxr-xr-x | src/main.c | 6 |
10 files changed, 2959 insertions, 2 deletions
diff --git a/src/custommem.c b/src/custommem.c new file mode 100644 index 00000000..00453f9c --- /dev/null +++ b/src/custommem.c @@ -0,0 +1,748 @@ +#define _GNU_SOURCE /* See feature_test_macros(7) */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <dlfcn.h> +#include <signal.h> + +#include "box64context.h" +#include "elfloader.h" +#include "debug.h" +//#include "x86trace.h" +//#include "x86emu.h" +//#include "librarian.h" +//#include "bridge.h" +//#include "library.h" +//#include "callback.h" +//#include "wrapper.h" +//#include "myfts.h" +//#include "threads.h" +//#include "x86trace.h" +//#include "signals.h" +#include <sys/mman.h> +#include "custommem.h" +#ifdef DYNAREC +//#include "dynablock.h" +//#include "dynarec/arm_lock_helper.h" +//#include "khash.h" + +//#define USE_MMAP + +// init inside dynablocks.c +//KHASH_MAP_INIT_INT(dynablocks, dynablock_t*) +//static dynablocklist_t* dynmap[DYNAMAP_SIZE]; // 4G of memory mapped by 4K block +//static pthread_mutex_t mutex_mmap; +//static mmaplist_t *mmaplist; +//static int mmapsize; +//static kh_dynablocks_t *dblist_oversized; // store the list of oversized dynablocks (normal sized are inside mmaplist) +//static uintptr_t *box86_jumptable[JMPTABL_SIZE]; +//static uintptr_t box86_jmptbl_default[1<<JMPTABL_SHIFT]; +#endif +//#define MEMPROT_SHIFT 12 +//#define MEMPROT_SIZE (1<<(32-MEMPROT_SHIFT)) +//static pthread_mutex_t mutex_prot; +//static uint8_t memprot[MEMPROT_SIZE] = {0}; // protection flags by 4K block +static int inited = 0; + +//typedef struct blocklist_s { +// void* block; +// int maxfree; +// size_t size; +//} blocklist_t; + +//#define MMAPSIZE (256*1024) // allocate 256kb sized blocks + +//static pthread_mutex_t mutex_blocks = PTHREAD_MUTEX_INITIALIZER; +//static int n_blocks = 0; // number of blocks for custom malloc +//static blocklist_t* p_blocks = NULL; // actual blocks for custom malloc + +//typedef union mark_s { +// struct { +// unsigned int fill:1; +// unsigned int size:31; +// }; +// uint32_t x32; +//} mark_t; +//typedef struct blockmark_s { +// mark_t prev; +// mark_t next; +//} blockmark_t; + + +// get first subblock free in block, stating at start (from block). return NULL if no block, else first subblock free (mark included), filling size +//static void* getFirstBlock(void* block, int maxsize, int* size) +//{ +// // get start of block +// blockmark_t *m = (blockmark_t*)block; +// while(m->next.x32) { // while there is a subblock +// if(!m->next.fill && m->next.size>=maxsize+sizeof(blockmark_t)) { +// *size = m->next.size; +// return m; +// } +// m = (blockmark_t*)((uintptr_t)m + m->next.size); +// } +// +// return NULL; +//} + +//static int getMaxFreeBlock(void* block, size_t block_size) +//{ +// // get start of block +// blockmark_t *m = (blockmark_t*)((uintptr_t)block+block_size-sizeof(blockmark_t)); // styart with the end +// int maxsize = 0; +// while(m->prev.x32) { // while there is a subblock +// if(!m->prev.fill && m->prev.size>maxsize) { +// maxsize = m->prev.size; +// if((uintptr_t)block+maxsize>(uintptr_t)m) +// return maxsize; // no block large enough left... +// } +// m = (blockmark_t*)((uintptr_t)m - m->prev.size); +// } +// return maxsize; +//} + +//static void* allocBlock(void* block, void *sub, int size) +//{ +// blockmark_t *s = (blockmark_t*)sub; +// blockmark_t *n = (blockmark_t*)((uintptr_t)s + s->next.size); +// +// s->next.fill = 1; +// s->next.size = size+sizeof(blockmark_t); +// blockmark_t *m = (blockmark_t*)((uintptr_t)s + s->next.size); // this is new n +// m->prev.fill = 1; +// m->prev.size = s->next.size; +// if(n!=m) { +// // new mark +// m->prev.fill = 1; +// m->prev.size = s->next.size; +// m->next.fill = 0; +// m->next.size = (uintptr_t)n - (uintptr_t)m; +// n->prev.fill = 0; +// n->prev.size = m->next.size; +// } +// +// return (void*)((uintptr_t)sub + sizeof(blockmark_t)); +//} +//static void freeBlock(void *block, void* sub) +//{ +// blockmark_t *m = (blockmark_t*)block; +// blockmark_t *s = (blockmark_t*)sub; +// blockmark_t *n = (blockmark_t*)((uintptr_t)s + s->next.size); +// if(block!=sub) +// m = (blockmark_t*)((uintptr_t)s - s->prev.size); +// s->next.fill = 0; +// n->prev.fill = 0; +// // check if merge with previous +// if (s->prev.x32 && !s->prev.fill) { +// // remove s... +// m->next.size += s->next.size; +// n->prev.size = m->next.size; +// s = m; +// } +// // check if merge with next +// if(n->next.x32 && !n->next.fill) { +// blockmark_t *n2 = (blockmark_t*)((uintptr_t)n + n->next.size); +// //remove n +// s->next.size += n->next.size; +// n2->prev.size = s->next.size; +// } +//} +// return 1 if block has been expanded to new size, 0 if not +//static int expandBlock(void* block, void* sub, int newsize) +//{ +// newsize = (newsize+3)&~3; +// blockmark_t *s = (blockmark_t*)sub; +// blockmark_t *n = (blockmark_t*)((uintptr_t)s + s->next.size); +// if(s->next.fill) +// return 0; // next block is filled +// if(s->next.size + n->next.size < newsize) +// return 0; // free space too short +// // ok, doing the alloc! +// s->next.size = newsize+sizeof(blockmark_t); +// blockmark_t *m = (blockmark_t*)((uintptr_t)s + s->next.size); // this is new n +// m->prev.fill = 1; +// m->prev.size = s->next.size; +// if(n!=m) { +// // new mark +// m->prev.fill = 1; +// m->prev.size = s->next.size; +// m->next.fill = 0; +// m->next.size = (uintptr_t)n - (uintptr_t)m; +// n->prev.fill = 0; +// n->prev.size = m->next.size; +// } +// return 1; +//} +// return size of block +//static int sizeBlock(void* sub) +//{ +// blockmark_t *s = (blockmark_t*)sub; +// return s->next.size; +//} + +//void* customMalloc(size_t size) +//{ +// // look for free space +// void* sub = NULL; +// pthread_mutex_lock(&mutex_blocks); +// for(int i=0; i<n_blocks; ++i) { +// if(p_blocks[i].maxfree>=size) { +// int rsize = 0; +// sub = getFirstBlock(p_blocks[i].block, size, &rsize); +// if(sub) { +// void* ret = allocBlock(p_blocks[i].block, sub, size); +// if(rsize==p_blocks[i].maxfree) +// p_blocks[i].maxfree = getMaxFreeBlock(p_blocks[i].block, p_blocks[i].size); +// pthread_mutex_unlock(&mutex_blocks); +// return ret; +// } +// } +// } +// // add a new block +// int i = n_blocks++; +// p_blocks = (blocklist_t*)realloc(p_blocks, n_blocks*sizeof(blocklist_t)); +// size_t allocsize = MMAPSIZE; +// if(size+2*sizeof(blockmark_t)>allocsize) +// allocsize = size+2*sizeof(blockmark_t); +// #ifdef USE_MMAP +// void* p = mmap(NULL, allocsize, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); +// memset(p, 0, allocsize); +// #else +// void* p = calloc(1, allocsize); +// #endif +// p_blocks[i].block = p; +// p_blocks[i].size = allocsize; +// // setup marks +// blockmark_t* m = (blockmark_t*)p; +// m->prev.x32 = 0; +// m->next.fill = 0; +// m->next.size = allocsize-sizeof(blockmark_t); +// m = (blockmark_t*)(p+allocsize-sizeof(blockmark_t)); +// m->next.x32 = 0; +// m->prev.fill = 0; +// m->prev.size = allocsize-sizeof(blockmark_t); +// // alloc 1st block +// void* ret = allocBlock(p_blocks[i].block, p, size); +// p_blocks[i].maxfree = getMaxFreeBlock(p_blocks[i].block, p_blocks[i].size); +// pthread_mutex_unlock(&mutex_blocks); +// return ret; +//} +//void* customCalloc(size_t n, size_t size) +//{ +// size_t newsize = n*size; +// void* ret = customMalloc(newsize); +// memset(ret, 0, newsize); +// return ret; +//} +//void* customRealloc(void* p, size_t size) +//{ +// if(!p) +// return customMalloc(size); +// uintptr_t addr = (uintptr_t)p; +// pthread_mutex_lock(&mutex_blocks); +// for(int i=0; i<n_blocks; ++i) { +// if ((addr>(uintptr_t)p_blocks[i].block) +// && (addr<((uintptr_t)p_blocks[i].block+p_blocks[i].size))) { +// void* sub = (void*)(addr-sizeof(blockmark_t)); +// if(expandBlock(p_blocks[i].block, sub, size)) { +// p_blocks[i].maxfree = getMaxFreeBlock(p_blocks[i].block, p_blocks[i].size); +// pthread_mutex_unlock(&mutex_blocks); +// return p; +// } +// pthread_mutex_unlock(&mutex_blocks); +// void* newp = customMalloc(size); +// memcpy(newp, p, sizeBlock(sub)); +// customFree(p); +// return newp; +// +// } +// } +// pthread_mutex_unlock(&mutex_blocks); +// if(n_blocks) +// dynarec_log(LOG_NONE, "Warning, block %p not found in p_blocks for Realloc, Malloc'ng again without free\n", (void*)addr); +// return customMalloc(size); +//} +//void customFree(void* p) +//{ +// if(!p) +// return; +// uintptr_t addr = (uintptr_t)p; +// pthread_mutex_lock(&mutex_blocks); +// for(int i=0; i<n_blocks; ++i) { +// if ((addr>(uintptr_t)p_blocks[i].block) +// && (addr<((uintptr_t)p_blocks[i].block+p_blocks[i].size))) { +// void* sub = (void*)(addr-sizeof(blockmark_t)); +// freeBlock(p_blocks[i].block, sub); +// p_blocks[i].maxfree = getMaxFreeBlock(p_blocks[i].block, p_blocks[i].size); +// pthread_mutex_unlock(&mutex_blocks); +// return; +// } +// } +// pthread_mutex_unlock(&mutex_blocks); +// if(n_blocks) +// dynarec_log(LOG_NONE, "Warning, block %p not found in p_blocks for Free\n", (void*)addr); +//} + +#ifdef DYNAREC +//typedef struct mmaplist_s { +// void* block; +// int maxfree; +// size_t size; +// kh_dynablocks_t* dblist; +// uint8_t* helper; +//} mmaplist_t; + +//uintptr_t FindFreeDynarecMap(dynablock_t* db, int size) +//{ +// // look for free space +// void* sub = NULL; +// for(int i=0; i<mmapsize; ++i) { +// if(mmaplist[i].maxfree>=size) { +// int rsize = 0; +// sub = getFirstBlock(mmaplist[i].block, size, &rsize); +// if(sub) { +// uintptr_t ret = (uintptr_t)allocBlock(mmaplist[i].block, sub, size); +// if(rsize==mmaplist[i].maxfree) +// mmaplist[i].maxfree = getMaxFreeBlock(mmaplist[i].block, mmaplist[i].size); +// kh_dynablocks_t *blocks = mmaplist[i].dblist; +// if(!blocks) { +// blocks = mmaplist[i].dblist = kh_init(dynablocks); +// kh_resize(dynablocks, blocks, 64); +// } +// khint_t k; +// int r; +// k = kh_put(dynablocks, blocks, (uintptr_t)ret, &r); +// kh_value(blocks, k) = db; +// for(int j=0; j<size; ++j) +// mmaplist[i].helper[(uintptr_t)ret-(uintptr_t)mmaplist[i].block+j] = (j<256)?j:255; +// return ret; +// } +// } +// } +// return 0; +//} + +//uintptr_t AddNewDynarecMap(dynablock_t* db, int size) +//{ +// int i = mmapsize++; // yeah, useful post incrementation +// dynarec_log(LOG_DEBUG, "Ask for DynaRec Block Alloc #%d\n", mmapsize); +// mmaplist = (mmaplist_t*)realloc(mmaplist, mmapsize*sizeof(mmaplist_t)); +// #ifndef USE_MMAP +// void *p = NULL; +// if(posix_memalign(&p, box86_pagesize, MMAPSIZE)) { +// dynarec_log(LOG_INFO, "Cannot create memory map of %d byte for dynarec block #%d\n", MMAPSIZE, i); +// --mmapsize; +// return 0; +// } +// mprotect(p, MMAPSIZE, PROT_READ | PROT_WRITE | PROT_EXEC); +// #else +// void* p = mmap(NULL, MMAPSIZE, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); +// if(p==(void*)-1) { +// dynarec_log(LOG_INFO, "Cannot create memory map of %d byte for dynarec block #%d\n", MMAPSIZE, i); +// --mmapsize; +// return 0; +// } +// #endif +// setProtection((uintptr_t)p, MMAPSIZE, PROT_READ | PROT_WRITE | PROT_EXEC); +// +// mmaplist[i].block = p; +// mmaplist[i].size = MMAPSIZE; +// mmaplist[i].helper = (uint8_t*)calloc(1, MMAPSIZE); +// // setup marks +// blockmark_t* m = (blockmark_t*)p; +// m->prev.x32 = 0; +// m->next.fill = 0; +// m->next.size = MMAPSIZE-sizeof(blockmark_t); +// m = (blockmark_t*)(p+MMAPSIZE-sizeof(blockmark_t)); +// m->next.x32 = 0; +// m->prev.fill = 0; +// m->prev.size = MMAPSIZE-sizeof(blockmark_t); +// // alloc 1st block +// uintptr_t sub = (uintptr_t)allocBlock(mmaplist[i].block, p, size); +// mmaplist[i].maxfree = getMaxFreeBlock(mmaplist[i].block, mmaplist[i].size); +// kh_dynablocks_t *blocks = mmaplist[i].dblist = kh_init(dynablocks); +// kh_resize(dynablocks, blocks, 64); +// khint_t k; +// int ret; +// k = kh_put(dynablocks, blocks, (uintptr_t)sub, &ret); +// kh_value(blocks, k) = db; +// for(int j=0; j<size; ++j) +// mmaplist[i].helper[(uintptr_t)sub-(uintptr_t)mmaplist[i].block + j] = (j<256)?j:255; +// return sub; +//} + +//void ActuallyFreeDynarecMap(dynablock_t* db, uintptr_t addr, int size) +//{ +// if(!addr || !size) +// return; +// for(int i=0; i<mmapsize; ++i) { +// if ((addr>(uintptr_t)mmaplist[i].block) +// && (addr<((uintptr_t)mmaplist[i].block+mmaplist[i].size))) { +// void* sub = (void*)(addr-sizeof(blockmark_t)); +// freeBlock(mmaplist[i].block, sub); +// mmaplist[i].maxfree = getMaxFreeBlock(mmaplist[i].block, mmaplist[i].size); +// kh_dynablocks_t *blocks = mmaplist[i].dblist; +// if(blocks) { +// khint_t k = kh_get(dynablocks, blocks, (uintptr_t)sub); +// if(k!=kh_end(blocks)) +// kh_del(dynablocks, blocks, k); +// for(int j=0; j<size; ++j) +// mmaplist[i].helper[(uintptr_t)sub-(uintptr_t)mmaplist[i].block+j] = 0; +// } +// return; +// } +// } +// if(mmapsize) +// dynarec_log(LOG_NONE, "Warning, block %p (size %d) not found in mmaplist for Free\n", (void*)addr, size); +//} + +//dynablock_t* FindDynablockFromNativeAddress(void* addr) +//{ +// // look in actual list +// for(int i=0; i<mmapsize; ++i) { +// if ((uintptr_t)addr>=(uintptr_t)mmaplist[i].block +// && ((uintptr_t)addr<(uintptr_t)mmaplist[i].block+mmaplist[i].size)) { +// if(!mmaplist[i].helper) +// return FindDynablockDynablocklist(addr, mmaplist[i].dblist); +// else { +// uintptr_t p = (uintptr_t)addr - (uintptr_t)mmaplist[i].block; +// while(mmaplist[i].helper[p]) p -= mmaplist[i].helper[p]; +// khint_t k = kh_get(dynablocks, mmaplist[i].dblist, (uintptr_t)mmaplist[i].block + p); +// if(k!=kh_end(mmaplist[i].dblist)) +// return kh_value(mmaplist[i].dblist, k); +// return NULL; +// } +// } +// } +// // look in oversized +// return FindDynablockDynablocklist(addr, dblist_oversized); +//} + +//uintptr_t AllocDynarecMap(dynablock_t* db, int size) +//{ +// if(!size) +// return 0; +// if(size>MMAPSIZE-2*sizeof(blockmark_t)) { +// #ifndef USE_MMAP +// void *p = NULL; +// if(posix_memalign(&p, box86_pagesize, size)) { +// dynarec_log(LOG_INFO, "Cannot create dynamic map of %d bytes\n", size); +// return 0; +// } +// mprotect(p, size, PROT_READ | PROT_WRITE | PROT_EXEC); +// #else +// void* p = mmap(NULL, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); +// if(p==(void*)-1) { +// dynarec_log(LOG_INFO, "Cannot create dynamic map of %d bytes\n", size); +// return 0; +// } +// #endif +// setProtection((uintptr_t)p, size, PROT_READ | PROT_WRITE | PROT_EXEC); +// kh_dynablocks_t *blocks = dblist_oversized; +// if(!blocks) { +// blocks = dblist_oversized = kh_init(dynablocks); +// kh_resize(dynablocks, blocks, 64); +// } +// khint_t k; +// int ret; +// k = kh_put(dynablocks, blocks, (uintptr_t)p, &ret); +// kh_value(blocks, k) = db; +// return (uintptr_t)p; +// } +// +// if(pthread_mutex_trylock(&mutex_mmap)) { +// sched_yield(); // give it a chance +// if(pthread_mutex_trylock(&mutex_mmap)) +// return 0; // cannot lock, baillout +// } +// +// uintptr_t ret = FindFreeDynarecMap(db, size); +// if(!ret) +// ret = AddNewDynarecMap(db, size); +// +// pthread_mutex_unlock(&mutex_mmap); +// +// return ret; +//} + +//void FreeDynarecMap(dynablock_t* db, uintptr_t addr, uint32_t size) +//{ +// if(size>MMAPSIZE-2*sizeof(blockmark_t)) { +// #ifndef USE_MMAP +// free((void*)addr); +// #else +// munmap((void*)addr, size); +// #endif +// kh_dynablocks_t *blocks = dblist_oversized; +// if(blocks) { +// khint_t k = kh_get(dynablocks, blocks, addr); +// if(k!=kh_end(blocks)) +// kh_del(dynablocks, blocks, k); +// } +// return; +// } +// pthread_mutex_lock(&mutex_mmap); +// ActuallyFreeDynarecMap(db, addr, size); +// pthread_mutex_unlock(&mutex_mmap); +//} + +//dynablocklist_t* getDB(uintptr_t idx) +//{ +// return dynmap[idx]; +//} + +// each dynmap is 64k of size + +//void addDBFromAddressRange(uintptr_t addr, uintptr_t size) +//{ +// dynarec_log(LOG_DEBUG, "addDBFromAddressRange %p -> %p\n", (void*)addr, (void*)(addr+size-1)); +// uintptr_t idx = (addr>>DYNAMAP_SHIFT); +// uintptr_t end = ((addr+size-1)>>DYNAMAP_SHIFT); +// for (uintptr_t i=idx; i<=end; ++i) { +// if(!dynmap[i]) { +// dynmap[i] = NewDynablockList(i<<DYNAMAP_SHIFT, 1<<DYNAMAP_SHIFT, 0); +// } +// } +//} + +//void cleanDBFromAddressRange(uintptr_t addr, uintptr_t size, int destroy) +//{ +// dynarec_log(LOG_DEBUG, "cleanDBFromAddressRange %p -> %p %s\n", (void*)addr, (void*)(addr+size-1), destroy?"destroy":"mark"); +// uintptr_t idx = (addr>>DYNAMAP_SHIFT); +// uintptr_t end = ((addr+size-1)>>DYNAMAP_SHIFT); +// for (uintptr_t i=idx; i<=end; ++i) { +// dynablocklist_t* dblist = dynmap[i]; +// if(dblist) { +// if(destroy) +// FreeRangeDynablock(dblist, addr, size); +// else +// MarkRangeDynablock(dblist, addr, size); +// } +// } +//} + +#ifdef ARM +//void arm_next(void); +#endif + +//void addJumpTableIfDefault(void* addr, void* jmp) +//{ +// const uintptr_t idx = ((uintptr_t)addr>>JMPTABL_SHIFT); +// if(box86_jumptable[idx] == box86_jmptbl_default) { +// uintptr_t* tbl = (uintptr_t*)malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t)); +// for(int i=0; i<(1<<JMPTABL_SHIFT); ++i) +// tbl[i] = (uintptr_t)arm_next; +// box86_jumptable[idx] = tbl; +// } +// const uintptr_t off = (uintptr_t)addr&((1<<JMPTABL_SHIFT)-1); +// if(box86_jumptable[idx][off]==(uintptr_t)arm_next) +// box86_jumptable[idx][off] = (uintptr_t)jmp; +//} +//void setJumpTableDefault(void* addr) +//{ +// const uintptr_t idx = ((uintptr_t)addr>>JMPTABL_SHIFT); +// if(box86_jumptable[idx] == box86_jmptbl_default) { +// return; +// } +// const uintptr_t off = (uintptr_t)addr&((1<<JMPTABL_SHIFT)-1); +// box86_jumptable[idx][off] = (uintptr_t)arm_next; +//} +//uintptr_t getJumpTable() +//{ +// return (uintptr_t)box86_jumptable; +//} + +//uintptr_t getJumpTableAddress(uintptr_t addr) +//{ +// const uintptr_t idx = ((uintptr_t)addr>>JMPTABL_SHIFT); +// if(box86_jumptable[idx] == box86_jmptbl_default) { +// uintptr_t* tbl = (uintptr_t*)malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t)); +// for(int i=0; i<(1<<JMPTABL_SHIFT); ++i) +// tbl[i] = (uintptr_t)arm_next; +// box86_jumptable[idx] = tbl; +// } +// const uintptr_t off = (uintptr_t)addr&((1<<JMPTABL_SHIFT)-1); +// return (uintptr_t)&box86_jumptable[idx][off]; +//} + +// Remove the Write flag from an adress range, so DB can be executed +// no log, as it can be executed inside a signal handler +//void protectDB(uintptr_t addr, uintptr_t size) +//{ +// dynarec_log(LOG_DEBUG, "protectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1)); +// uintptr_t idx = (addr>>MEMPROT_SHIFT); +// uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT); +// pthread_mutex_lock(&mutex_prot); +// for (uintptr_t i=idx; i<=end; ++i) { +// uint32_t prot = memprot[i]; +// if(!prot) +// prot = PROT_READ | PROT_WRITE; // comes from malloc & co, so should not be able to execute +// memprot[i] = prot|PROT_DYNAREC; +// if(!(prot&PROT_DYNAREC)) +// mprotect((void*)(i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, prot&~PROT_WRITE); +// } +// pthread_mutex_unlock(&mutex_prot); +//} + +//void protectDBnolock(uintptr_t addr, uintptr_t size) +//{ +// dynarec_log(LOG_DEBUG, "protectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1)); +// uintptr_t idx = (addr>>MEMPROT_SHIFT); +// uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT); +// for (uintptr_t i=idx; i<=end; ++i) { +// uint32_t prot = memprot[i]; +// if(!prot) +// prot = PROT_READ | PROT_WRITE; // comes from malloc & co, so should not be able to execute +// memprot[i] = prot|PROT_DYNAREC; +// if(!(prot&PROT_DYNAREC)) +// mprotect((void*)(i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, prot&~PROT_WRITE); +// } +//} + +//void lockDB() +//{ +// pthread_mutex_lock(&mutex_prot); +//} + +//void unlockDB() +//{ +// pthread_mutex_unlock(&mutex_prot); +//} + +// Add the Write flag from an adress range, and mark all block as dirty +// no log, as it can be executed inside a signal handler +//void unprotectDB(uintptr_t addr, uintptr_t size) +//{ +// dynarec_log(LOG_DEBUG, "unprotectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1)); +// uintptr_t idx = (addr>>MEMPROT_SHIFT); +// uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT); +// pthread_mutex_lock(&mutex_prot); +// for (uintptr_t i=idx; i<=end; ++i) { +// uint32_t prot = memprot[i]; +// memprot[i] = prot&~PROT_DYNAREC; +// if(prot&PROT_DYNAREC) { +// mprotect((void*)(i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, prot&~PROT_DYNAREC); +// cleanDBFromAddressRange((i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, 0); +// } +// } +// pthread_mutex_unlock(&mutex_prot); +//} + +#endif + +void updateProtection(uintptr_t addr, uintptr_t size, uint32_t prot) +{ +// const uintptr_t idx = (addr>>MEMPROT_SHIFT); +// const uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT); +// pthread_mutex_lock(&mutex_prot); +// for (uintptr_t i=idx; i<=end; ++i) { +// uint32_t dyn=(memprot[i]&PROT_DYNAREC); +// if(dyn && (prot&PROT_WRITE)) // need to remove the write protection from this block +// mprotect((void*)(i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, prot&~PROT_WRITE); +// memprot[i] = prot|dyn; +// } +// pthread_mutex_unlock(&mutex_prot); +} + +void setProtection(uintptr_t addr, uintptr_t size, uint32_t prot) +{ +// const uintptr_t idx = (addr>>MEMPROT_SHIFT); +// const uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT); +// pthread_mutex_lock(&mutex_prot); +// for (uintptr_t i=idx; i<=end; ++i) { +// memprot[i] = prot; +// } +// pthread_mutex_unlock(&mutex_prot); +} + +uint32_t getProtection(uintptr_t addr) +{ +// const uintptr_t idx = (addr>>MEMPROT_SHIFT); +// pthread_mutex_lock(&mutex_prot); +// uint32_t ret = memprot[idx]; +// pthread_mutex_unlock(&mutex_prot); +// return ret; + return 0; +} + +void init_custommem_helper(box64context_t* ctx) +{ + if(inited) // already initialized + return; + inited = 1; +// pthread_mutex_init(&mutex_prot, NULL); +#ifdef DYNAREC +// pthread_mutex_init(&mutex_mmap, NULL); +#ifdef ARM +// for(int i=0; i<(1<<JMPTABL_SHIFT); ++i) +// box86_jmptbl_default[i] = (uintptr_t)arm_next; +// for(int i=0; i<JMPTABL_SIZE; ++i) +// box86_jumptable[i] = box86_jmptbl_default; +#else +#error Unsupported architecture! +#endif +#endif +} + +void fini_custommem_helper(box64context_t *ctx) +{ + if(!inited) + return; + inited = 0; +#ifdef DYNAREC +// dynarec_log(LOG_DEBUG, "Free global Dynarecblocks\n"); +// for (int i=0; i<mmapsize; ++i) { +// if(mmaplist[i].block) +// #ifdef USE_MMAP +// munmap(mmaplist[i].block, mmaplist[i].size); +// #else +// free(mmaplist[i].block); +// #endif +// if(mmaplist[i].dblist) { +// kh_destroy(dynablocks, mmaplist[i].dblist); +// mmaplist[i].dblist = NULL; +// } +// if(mmaplist[i].helper) { +// free(mmaplist[i].helper); +// mmaplist[i].helper = NULL; +// } +// } +// if(dblist_oversized) { +// kh_destroy(dynablocks, dblist_oversized); +// dblist_oversized = NULL; +// } +// mmapsize = 0; +// dynarec_log(LOG_DEBUG, "Free dynamic Dynarecblocks\n"); +// uintptr_t idx = 0; +// uintptr_t end = ((0xFFFFFFFF)>>DYNAMAP_SHIFT); +// for (uintptr_t i=idx; i<=end; ++i) { +// dynablocklist_t* dblist = dynmap[i]; +// if(dblist) { +// uintptr_t startdb = StartDynablockList(dblist); +// uintptr_t enddb = EndDynablockList(dblist); +// uintptr_t startaddr = 0; +// if(startaddr<startdb) startaddr = startdb; +// uintptr_t endaddr = 0xFFFFFFFF; +// if(endaddr>enddb) endaddr = enddb; +// FreeRangeDynablock(dblist, startaddr, endaddr-startaddr+1); +// } +// } +// for (uintptr_t i=idx; i<=end; ++i) +// if(dynmap[i]) +// FreeDynablockList(&dynmap[i]); +// pthread_mutex_destroy(&mutex_mmap); +// free(mmaplist); +// for (int i=0; i<DYNAMAP_SIZE; ++i) +// if(box86_jumptable[i]!=box86_jmptbl_default) +// free(box86_jumptable[i]); +#endif +// for(int i=0; i<n_blocks; ++i) +// #ifdef USE_MMAP +// munmap(p_blocks[i].block, p_blocks[i].size); +// #else +// free(p_blocks[i].block); +// #endif +// free(p_blocks); +// pthread_mutex_destroy(&mutex_prot); +// pthread_mutex_destroy(&mutex_blocks); +} diff --git a/src/elfs/elfload_dump.c b/src/elfs/elfload_dump.c new file mode 100755 index 00000000..937d0082 --- /dev/null +++ b/src/elfs/elfload_dump.c @@ -0,0 +1,366 @@ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <elf.h> + +#include "box64version.h" +#include "elfloader.h" +#include "debug.h" +#include "elfload_dump.h" +#include "elfloader_private.h" + +const char* DumpSection(Elf64_Shdr *s, char* SST) { + static char buff[400]; + switch (s->sh_type) { + case SHT_NULL: + return "SHT_NULL"; + #define GO(A) \ + case A: \ + sprintf(buff, #A " Name=\"%s\"(%d) off=0x%X, size=%d, attr=0x%04X, addr=%p(%02X), link/info=%d/%d", \ + SST+s->sh_name, s->sh_name, s->sh_offset, s->sh_size, s->sh_flags, (void*)s->sh_addr, s->sh_addralign, s->sh_link, s->sh_info); \ + break + GO(SHT_PROGBITS); + GO(SHT_SYMTAB); + GO(SHT_STRTAB); + GO(SHT_RELA); + GO(SHT_HASH); + GO(SHT_DYNAMIC); + GO(SHT_NOTE); + GO(SHT_NOBITS); + GO(SHT_REL); + GO(SHT_SHLIB); + GO(SHT_DYNSYM); + GO(SHT_INIT_ARRAY); + GO(SHT_FINI_ARRAY); + GO(SHT_PREINIT_ARRAY); + GO(SHT_GROUP); + GO(SHT_SYMTAB_SHNDX); + GO(SHT_NUM); + GO(SHT_LOPROC); + GO(SHT_HIPROC); + GO(SHT_LOUSER); + GO(SHT_HIUSER); + #if defined(SHT_GNU_versym) && defined(SHT_GNU_ATTRIBUTES) + GO(SHT_GNU_versym); + GO(SHT_GNU_ATTRIBUTES); + GO(SHT_GNU_HASH); + GO(SHT_GNU_LIBLIST); + GO(SHT_CHECKSUM); + GO(SHT_LOSUNW); + //GO(SHT_SUNW_move); + GO(SHT_SUNW_COMDAT); + GO(SHT_SUNW_syminfo); + GO(SHT_GNU_verdef); + GO(SHT_GNU_verneed); + #endif + #undef GO + default: + sprintf(buff, "0x%X unknown type", s->sh_type); + } + return buff; +} + +const char* DumpDynamic(Elf64_Dyn *s) { + static char buff[200]; + switch (s->d_tag) { + case DT_NULL: + return "DT_NULL: End Dynamic Section"; + #define GO(A, Add) \ + case A: \ + sprintf(buff, #A " %s=0x%X", (Add)?"Addr":"Val", (Add)?s->d_un.d_ptr:s->d_un.d_val); \ + break + GO(DT_NEEDED, 0); + GO(DT_PLTRELSZ, 0); + GO(DT_PLTGOT, 1); + GO(DT_HASH, 1); + GO(DT_STRTAB, 1); + GO(DT_SYMTAB, 1); + GO(DT_RELA, 1); + GO(DT_RELASZ, 0); + GO(DT_RELAENT, 0); + GO(DT_STRSZ, 0); + GO(DT_SYMENT, 0); + GO(DT_INIT, 1); + GO(DT_FINI, 1); + GO(DT_SONAME, 0); + GO(DT_RPATH, 0); + GO(DT_SYMBOLIC, 0); + GO(DT_REL, 1); + GO(DT_RELSZ, 0); + GO(DT_RELENT, 0); + GO(DT_PLTREL, 0); + GO(DT_DEBUG, 0); + GO(DT_TEXTREL, 0); + GO(DT_JMPREL, 1); + GO(DT_BIND_NOW, 1); + GO(DT_INIT_ARRAY, 1); + GO(DT_FINI_ARRAY, 1); + GO(DT_INIT_ARRAYSZ, 0); + GO(DT_FINI_ARRAYSZ, 0); + GO(DT_RUNPATH, 0); + GO(DT_FLAGS, 0); + GO(DT_ENCODING, 0); + #if defined(DT_NUM) && defined(DT_TLSDESC_PLT) + GO(DT_NUM, 0); + GO(DT_VALRNGLO, 0); + GO(DT_GNU_PRELINKED, 0); + GO(DT_GNU_CONFLICTSZ, 0); + GO(DT_GNU_LIBLISTSZ, 0); + GO(DT_CHECKSUM, 0); + GO(DT_PLTPADSZ, 0); + GO(DT_MOVEENT, 0); + GO(DT_MOVESZ, 0); + GO(DT_FEATURE_1, 0); + GO(DT_POSFLAG_1, 0); + GO(DT_SYMINSZ, 0); + GO(DT_SYMINENT, 0); + GO(DT_ADDRRNGLO, 0); + GO(DT_GNU_HASH, 0); + GO(DT_TLSDESC_PLT, 0); + GO(DT_TLSDESC_GOT, 0); + GO(DT_GNU_CONFLICT, 0); + GO(DT_GNU_LIBLIST, 0); + GO(DT_CONFIG, 0); + GO(DT_DEPAUDIT, 0); + GO(DT_AUDIT, 0); + GO(DT_PLTPAD, 0); + GO(DT_MOVETAB, 0); + GO(DT_SYMINFO, 0); + GO(DT_VERSYM, 0); + GO(DT_RELACOUNT, 0); + GO(DT_RELCOUNT, 0); + GO(DT_FLAGS_1, 0); + GO(DT_VERDEF, 0); + GO(DT_VERDEFNUM, 0); + GO(DT_VERNEED, 0); + GO(DT_VERNEEDNUM, 0); + GO(DT_AUXILIARY, 0); + GO(DT_FILTER, 0); + #endif + #undef GO + default: + sprintf(buff, "0x%X unknown type", s->d_tag); + } + return buff; +} + +const char* DumpPHEntry(Elf64_Phdr *e) +{ + static char buff[500]; + memset(buff, 0, sizeof(buff)); + switch(e->p_type) { + case PT_NULL: sprintf(buff, "type: %s", "PT_NULL"); break; + #define GO(T) case T: sprintf(buff, "type: %s, Off=%x vaddr=%p paddr=%p filesz=%u memsz=%u flags=%x align=%u", #T, e->p_offset, (void*)e->p_vaddr, (void*)e->p_paddr, e->p_filesz, e->p_memsz, e->p_flags, e->p_align); break + GO(PT_LOAD); + GO(PT_DYNAMIC); + GO(PT_INTERP); + GO(PT_NOTE); + GO(PT_SHLIB); + GO(PT_PHDR); + GO(PT_TLS); + #ifdef PT_NUM + GO(PT_NUM); + GO(PT_LOOS); + GO(PT_GNU_EH_FRAME); + GO(PT_GNU_STACK); + GO(PT_GNU_RELRO); + #endif + #undef GO + default: sprintf(buff, "type: %x, Off=%x vaddr=%p paddr=%p filesz=%u memsz=%u flags=%x align=%u", e->p_type, e->p_offset, (void*)e->p_vaddr, (void*)e->p_paddr, e->p_filesz, e->p_memsz, e->p_flags, e->p_align); break; + } + return buff; +} + +const char* DumpRelType(int t) +{ + static char buff[50]; + memset(buff, 0, sizeof(buff)); + switch(t) { + #define GO(T) case T: sprintf(buff, "type: %s", #T); break + GO(R_X86_64_NONE); + GO(R_X86_64_64); + GO(R_X86_64_PC32); + GO(R_X86_64_GOT32); + GO(R_X86_64_PLT32); + GO(R_X86_64_COPY); + GO(R_X86_64_GLOB_DAT); + GO(R_X86_64_JUMP_SLOT); + GO(R_X86_64_RELATIVE); + GO(R_X86_64_GOTPCREL); + GO(R_X86_64_32); + GO(R_X86_64_32S); + GO(R_X86_64_16); + GO(R_X86_64_PC16); + GO(R_X86_64_8); + GO(R_X86_64_PC8); + GO(R_X86_64_PC64); + #undef GO + default: sprintf(buff, "type: 0x%x (unknown)", t); break; + } + return buff; +} + +const char* DumpSym(elfheader_t *h, Elf64_Sym* sym) +{ + static char buff[4096]; + memset(buff, 0, sizeof(buff)); + sprintf(buff, "\"%s\", value=%p, size=%d, info/other=%d/%d index=%d", + h->DynStr+sym->st_name, (void*)sym->st_value, sym->st_size, + sym->st_info, sym->st_other, sym->st_shndx); + return buff; +} + +const char* SymName(elfheader_t *h, Elf64_Sym* sym) +{ + return h->DynStr+sym->st_name; +} +const char* IdxSymName(elfheader_t *h, int sym) +{ + return h->DynStr+h->DynSym[sym].st_name; +} + +void DumpMainHeader(Elf64_Ehdr *header, elfheader_t *h) +{ + if(box64_log>=LOG_DUMP) { + printf_log(LOG_DUMP, "ELF Dump main header\n"); + printf_log(LOG_DUMP, " Entry point = %p\n", (void*)header->e_entry); + printf_log(LOG_DUMP, " Program Header table offset = %p\n", (void*)header->e_phoff); + printf_log(LOG_DUMP, " Section Header table offset = %p\n", (void*)header->e_shoff); + printf_log(LOG_DUMP, " Flags = 0x%X\n", header->e_flags); + printf_log(LOG_DUMP, " ELF Header size = %d\n", header->e_ehsize); + printf_log(LOG_DUMP, " Program Header Entry num/size = %d(%d)/%d\n", h->numPHEntries, header->e_phnum, header->e_phentsize); + printf_log(LOG_DUMP, " Section Header Entry num/size = %d(%d)/%d\n", h->numSHEntries, header->e_shnum, header->e_shentsize); + printf_log(LOG_DUMP, " Section Header index num = %d(%d)\n", h->SHIdx, header->e_shstrndx); + printf_log(LOG_DUMP, "ELF Dump ==========\n"); + + printf_log(LOG_DUMP, "ELF Dump PEntries (%d)\n", h->numSHEntries); + for (int i=0; i<h->numPHEntries; ++i) + printf_log(LOG_DUMP, " PHEntry %04d : %s\n", i, DumpPHEntry(h->PHEntries+i)); + printf_log(LOG_DUMP, "ELF Dump PEntries ====\n"); + + printf_log(LOG_DUMP, "ELF Dump Sections (%d)\n", h->numSHEntries); + for (int i=0; i<h->numSHEntries; ++i) + printf_log(LOG_DUMP, " Section %04d : %s\n", i, DumpSection(h->SHEntries+i, h->SHStrTab)); + printf_log(LOG_DUMP, "ELF Dump Sections ====\n"); + } +} + +void DumpSymTab(elfheader_t *h) +{ + if(box64_log>=LOG_DUMP && h->SymTab) { + const char* name = ElfName(h); + printf_log(LOG_DUMP, "ELF Dump SymTab(%d)=\n", h->numSymTab); + for (int i=0; i<h->numSymTab; ++i) + printf_log(LOG_DUMP, " %s:SymTab[%d] = \"%s\", value=%p, size=%d, info/other=%d/%d index=%d\n", name, + i, h->StrTab+h->SymTab[i].st_name, (void*)h->SymTab[i].st_value, h->SymTab[i].st_size, + h->SymTab[i].st_info, h->SymTab[i].st_other, h->SymTab[i].st_shndx); + printf_log(LOG_DUMP, "ELF Dump SymTab=====\n"); + } +} + +void DumpDynamicSections(elfheader_t *h) +{ + if(box64_log>=LOG_DUMP && h->Dynamic) { + printf_log(LOG_DUMP, "ELF Dump Dynamic(%d)=\n", h->numDynamic); + for (int i=0; i<h->numDynamic; ++i) + printf_log(LOG_DUMP, " Dynamic %04d : %s\n", i, DumpDynamic(h->Dynamic+i)); + printf_log(LOG_DUMP, "ELF Dump Dynamic=====\n"); + } +} + +void DumpDynSym(elfheader_t *h) +{ + if(box64_log>=LOG_DUMP && h->DynSym) { + const char* name = ElfName(h); + printf_log(LOG_DUMP, "ELF Dump DynSym(%d)=\n", h->numDynSym); + for (int i=0; i<h->numDynSym; ++i) + printf_log(LOG_DUMP, " %s:DynSym[%d] = %s\n", name, i, DumpSym(h, h->DynSym+i)); + printf_log(LOG_DUMP, "ELF Dump DynSym=====\n"); + } +} + +void DumpDynamicNeeded(elfheader_t *h) +{ + if(box64_log>=LOG_DUMP && h->DynStrTab) { + printf_log(LOG_DUMP, "ELF Dump DT_NEEDED=====\n"); + for (int i=0; i<h->numDynamic; ++i) + if(h->Dynamic[i].d_tag==DT_NEEDED) { + printf_log(LOG_DUMP, " Needed : %s\n", h->DynStrTab+h->Dynamic[i].d_un.d_val + h->delta); + } + printf_log(LOG_DUMP, "ELF Dump DT_NEEDED=====\n"); + } +} + +void DumpDynamicRPath(elfheader_t *h) +{ + if(box64_log>=LOG_DUMP && h->DynStrTab) { + printf_log(LOG_DUMP, "ELF Dump DT_RPATH/DT_RUNPATH=====\n"); + for (int i=0; i<h->numDynamic; ++i) { + if(h->Dynamic[i].d_tag==DT_RPATH) { + printf_log(LOG_DUMP, " RPATH : %s\n", h->DynStrTab+h->Dynamic[i].d_un.d_val + h->delta); + } + if(h->Dynamic[i].d_tag==DT_RUNPATH) { + printf_log(LOG_DUMP, " RUNPATH : %s\n", h->DynStrTab+h->Dynamic[i].d_un.d_val + h->delta); + } + } + printf_log(LOG_DUMP, "=====ELF Dump DT_RPATH/DT_RUNPATH\n"); + } +} + +void DumpRelTable(elfheader_t *h, int cnt, Elf64_Rel *rel, const char* name) +{ + if(box64_log>=LOG_DUMP) { + const char* elfname = ElfName(h); + printf_log(LOG_DUMP, "ELF Dump %s Table(%d) @%p\n", name, cnt, rel); + for (int i = 0; i<cnt; ++i) + printf_log(LOG_DUMP, " %s:Rel[%d] = %p (0x%X: %s, sym=0x%0X/%s)\n", elfname, + i, (void*)rel[i].r_offset, rel[i].r_info, DumpRelType(ELF64_R_TYPE(rel[i].r_info)), + ELF64_R_SYM(rel[i].r_info), IdxSymName(h, ELF64_R_SYM(rel[i].r_info))); + printf_log(LOG_DUMP, "ELF Dump Rel Table=====\n"); + } +} + +void DumpRelATable(elfheader_t *h, int cnt, Elf64_Rela *rela, const char* name) +{ + if(box64_log>=LOG_DUMP && h->rela) { + const char* elfname = ElfName(h); + printf_log(LOG_DUMP, "ELF Dump %s Table(%d) @%p\n", name, cnt, rela); + for (int i = 0; i<cnt; ++i) + printf_log(LOG_DUMP, " %s:RelA[%d] = %p (0x%X: %s, sym=0x%X/%s) Addend=%d\n", elfname, + i, (void*)rela[i].r_offset, rela[i].r_info, DumpRelType(ELF64_R_TYPE(rela[i].r_info)), + ELF64_R_SYM(rela[i].r_info), IdxSymName(h, ELF64_R_SYM(rela[i].r_info)), + rela[i].r_addend); + printf_log(LOG_DUMP, "ELF Dump RelA Table=====\n"); + } +} + +void DumpBinary(char* p, int sz) +{ + // dump p as + // PPPPPPPP XX XX XX ... XX | 0123456789ABCDEF + unsigned char* d = (unsigned char*)p; + int delta = ((uintptr_t)p)&0xf; + for (int i = 0; sz; ++i) { + printf_log(LOG_DUMP, "%p ", (void*)(((uintptr_t)d)&~0xf)); + int n = 16 - delta; + if (n>sz) n = sz; + int fill = 16-sz; + for (int j = 0; j<delta; ++j) + printf_log(LOG_DUMP, " "); + for (int j = 0; j<n; ++j) + printf_log(LOG_DUMP, "%02X ", d[j]); + for (int j = 0; j<fill; ++j) + printf_log(LOG_DUMP, " "); + printf_log(LOG_DUMP, " | "); + for (int j = 0; j<delta; ++j) + printf_log(LOG_DUMP, " "); + for (int j = 0; j<n; ++j) + printf_log(LOG_DUMP, "%c", (d[j]<32 || d[j]>127)?'.':d[j]); + for (int j = 0; j<fill; ++j) + printf_log(LOG_DUMP, " "); + printf_log(LOG_DUMP, "\n"); + d+=n; + sz-=n; + delta=0; + } +} diff --git a/src/elfs/elfloader.c b/src/elfs/elfloader.c new file mode 100755 index 00000000..a8a3c928 --- /dev/null +++ b/src/elfs/elfloader.c @@ -0,0 +1,1278 @@ +#define _GNU_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <elf.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/sysmacros.h> +#include <sys/types.h> +#include <link.h> +#include <unistd.h> +#include <errno.h> + +#include "box64version.h" +#include "elfloader.h" +#include "debug.h" +#include "elfload_dump.h" +#include "elfloader_private.h" +//#include "librarian.h" +//#include "x86run.h" +//#include "bridge.h" +//#include "wrapper.h" +#include "box64context.h" +//#include "library.h" +//#include "x86emu.h" +//#include "box86stack.h" +//#include "callback.h" +//#include "dynarec.h" +//#include "box86stack.h" +#include "custommem.h" +#include "wine_tools.h" +//#ifdef DYNAREC +//#include "dynablock.h" +//#endif +//#include "../emu/x86emu_private.h" +//#include "x86tls.h" + +void* my__IO_2_1_stderr_ = NULL; +void* my__IO_2_1_stdin_ = NULL; +void* my__IO_2_1_stdout_ = NULL; + +// return the index of header (-1 if it doesn't exist) +int getElfIndex(box64context_t* ctx, elfheader_t* head) { + for (int i=0; i<ctx->elfsize; ++i) + if(ctx->elfs[i]==head) + return i; + return -1; +} + +elfheader_t* LoadAndCheckElfHeader(FILE* f, const char* name, int exec) +{ + elfheader_t *h = ParseElfHeader(f, name, exec); + if(!h) + return NULL; + + if ((h->path = realpath(name, NULL)) == NULL) { + h->path = (char*)malloc(1); + h->path[0] = '\0'; + } + return h; +} + +void FreeElfHeader(elfheader_t** head) +{ + if(!head || !*head) + return; + elfheader_t *h = *head; +#ifdef DYNAREC + /*if(h->text) { + dynarec_log(LOG_INFO, "Free Dynarec block for %s\n", h->path); + cleanDBFromAddressRange(my_context, h->text, h->textsz, 1); + }*/ // will be free at the end, no need to free it now +#endif + free(h->name); + free(h->path); + free(h->PHEntries); + free(h->SHEntries); + free(h->SHStrTab); + free(h->StrTab); + free(h->Dynamic); + free(h->DynStr); + free(h->SymTab); + free(h->DynSym); + + FreeElfMemory(h); + free(h); + + *head = NULL; +} + +int CalcLoadAddr(elfheader_t* head) +{ + head->memsz = 0; + head->paddr = head->vaddr = ~(uintptr_t)0; + head->align = 1; + for (int i=0; i<head->numPHEntries; ++i) + if(head->PHEntries[i].p_type == PT_LOAD) { + if(head->paddr > (uintptr_t)head->PHEntries[i].p_paddr) + head->paddr = (uintptr_t)head->PHEntries[i].p_paddr; + if(head->vaddr > (uintptr_t)head->PHEntries[i].p_vaddr) + head->vaddr = (uintptr_t)head->PHEntries[i].p_vaddr; + } + + if(head->vaddr==~(uintptr_t)0 || head->paddr==~(uintptr_t)0) { + printf_log(LOG_NONE, "Error: v/p Addr for Elf Load not set\n"); + return 1; + } + + head->stacksz = 1024*1024; //1M stack size default? + head->stackalign = 4; // default align for stack + for (int i=0; i<head->numPHEntries; ++i) { + if(head->PHEntries[i].p_type == PT_LOAD) { + uintptr_t phend = head->PHEntries[i].p_vaddr - head->vaddr + head->PHEntries[i].p_memsz; + if(phend > head->memsz) + head->memsz = phend; + if(head->PHEntries[i].p_align > head->align) + head->align = head->PHEntries[i].p_align; + } + if(head->PHEntries[i].p_type == PT_GNU_STACK) { + if(head->stacksz < head->PHEntries[i].p_memsz) + head->stacksz = head->PHEntries[i].p_memsz; + if(head->stackalign < head->PHEntries[i].p_align) + head->stackalign = head->PHEntries[i].p_align; + } + if(head->PHEntries[i].p_type == PT_TLS) { + head->tlssize = head->PHEntries[i].p_memsz; + head->tlsalign = head->PHEntries[i].p_align; + // force alignement... + if(head->tlsalign>1) + while(head->tlssize&(head->tlsalign-1)) + head->tlssize++; + } + } + printf_log(LOG_DEBUG, "Elf Addr(v/p)=%p/%p Memsize=0x%x (align=0x%x)\n", (void*)head->vaddr, (void*)head->paddr, head->memsz, head->align); + printf_log(LOG_DEBUG, "Elf Stack Memsize=%u (align=%u)\n", head->stacksz, head->stackalign); + printf_log(LOG_DEBUG, "Elf TLS Memsize=%u (align=%u)\n", head->tlssize, head->tlsalign); + + return 0; +} + +const char* ElfName(elfheader_t* head) +{ + if(!head) + return "(noelf)"; + return head->name; +} +int AllocElfMemory(box64context_t* context, elfheader_t* head, int mainbin) +{ + uintptr_t offs = 0; + if(mainbin && head->vaddr==0) { + char* load_addr = getenv("BOX86_LOAD_ADDR"); + if(load_addr) + if(sscanf(load_addr, "0x%x", &offs)!=1) + offs = 0; + } + if(!offs) + offs = head->vaddr; + if(head->vaddr) { + head->multiblock_n = 0; // count PHEntrie with LOAD + for (int i=0; i<head->numPHEntries; ++i) + if(head->PHEntries[i].p_type == PT_LOAD && head->PHEntries[i].p_flags) + ++head->multiblock_n; + head->multiblock_size = (uint64_t*)calloc(head->multiblock_n, sizeof(uint64_t)); + head->multiblock_offs = (uintptr_t*)calloc(head->multiblock_n, sizeof(uintptr_t)); + head->multiblock = (void**)calloc(head->multiblock_n, sizeof(void*)); + // and now, create all individual blocks + head->memory = (char*)0xffffffffffffffff; + int n = 0; + for (int i=0; i<head->numPHEntries; ++i) + if(head->PHEntries[i].p_type == PT_LOAD && head->PHEntries[i].p_flags) { + Elf64_Phdr * e = &head->PHEntries[i]; + uintptr_t bstart = e->p_vaddr; + uint32_t bsize = e->p_memsz; + uintptr_t balign = e->p_align; + if (balign) balign = balign-1; else balign = 1; + if(balign<4095) balign = 4095; + uintptr_t bend = (bstart + bsize + balign)&(~balign); + bstart &= ~balign; + int ok = 0; + for (int j=0; !ok && j<n; ++j) { + uintptr_t start = head->multiblock_offs[j]; + uintptr_t end = head->multiblock_offs[j] + head->multiblock_size[j]; + start &= ~balign; + if( (head->e_type == ET_DYN) || + (((bstart>=start) && (bstart<=end)) || ((bend>=start) && (bend<=end)) || ((bstart<start) && (bend>end)))) + { + // merge + ok = 1; + if(bstart<start) + head->multiblock_offs[j] = bstart; + head->multiblock_size[j] = ((bend>end)?bend:end) - head->multiblock_offs[j]; + --head->multiblock_n; + } + } + if(!ok) { + head->multiblock_offs[n] = bstart; + head->multiblock_size[n] = bend - head->multiblock_offs[n]; + ++n; + } + } + head->multiblock_n = n; // might be less in fact + for (int i=0; i<head->multiblock_n; ++i) { + + printf_log(LOG_DEBUG, "Allocating 0x%x memory @%p for Elf \"%s\"\n", head->multiblock_size[i], (void*)head->multiblock_offs[i], head->name); + void* p = mmap((void*)head->multiblock_offs[i], head->multiblock_size[i] + , PROT_READ | PROT_WRITE | PROT_EXEC + , MAP_PRIVATE | MAP_ANONYMOUS /*| ((wine_preloaded)?MAP_FIXED:0)*/ + , -1, 0); + if(p==MAP_FAILED) { + printf_log(LOG_NONE, "Cannot create memory map (@%p 0x%x/0x%x) for elf \"%s\"\n", (void*)head->multiblock_offs[i], head->multiblock_size[i], head->align, head->name); + return 1; + } + if(head->multiblock_offs[i] &&( p!=(void*)head->multiblock_offs[i])) { + if((head->e_type!=ET_DYN)) { + printf_log(LOG_NONE, "Error, memory map (@%p 0x%x/0x%x) for elf \"%s\" allocated @%p\n", (void*)head->multiblock_offs[i], head->multiblock_size[i], head->align, head->name, p); + return 1; + } else { + printf_log(LOG_INFO, "Allocated memory is not at hinted %p but %p (size %p) \"%s\"\n", (void*)head->multiblock_offs[i], p, (void*)head->multiblock_size[i], head->name); + // need to adjust vaddr! + for (int i=0; i<head->numPHEntries; ++i) + if(head->PHEntries[i].p_type == PT_LOAD) { + Elf64_Phdr * e = &head->PHEntries[i]; + if(e->p_vaddr>=head->multiblock_offs[i] && e->p_vaddr<(head->multiblock_offs[i]+head->multiblock_size[i])) { + e->p_vaddr = e->p_vaddr - head->multiblock_offs[i] + (uintptr_t)p; + if(!head->delta) head->delta = (intptr_t)p - (intptr_t)head->multiblock_offs[i]; + } + } + } + } + setProtection((uintptr_t)p, head->multiblock_size[i], PROT_READ | PROT_WRITE | PROT_EXEC); + head->multiblock[i] = p; + if(p<(void*)head->memory) + head->memory = (char*)p; + } + } else { + // vaddr is 0, load everything has a One block + printf_log(LOG_DEBUG, "Allocating 0x%x memory @%p for Elf \"%s\"\n", head->memsz, (void*)offs, head->name); + void* p = mmap((void*)offs, head->memsz + , PROT_READ | PROT_WRITE | PROT_EXEC + , MAP_PRIVATE | MAP_ANONYMOUS /*| (((offs&&wine_preloaded)?MAP_FIXED:0))*/ + , -1, 0); + if(p==MAP_FAILED) { + printf_log(LOG_NONE, "Cannot create memory map (@%p 0x%x/0x%x) for elf \"%s\"\n", (void*)offs, head->memsz, head->align, head->name); + return 1; + } + if(offs && (p!=(void*)offs) && (head->e_type!=ET_DYN)) { + printf_log(LOG_NONE, "Error, memory map (@%p 0x%x/0x%x) for elf \"%s\" allocated @%p\n", (void*)offs, head->memsz, head->align, head->name, p); + return 1; + } + setProtection((uintptr_t)p, head->memsz, PROT_READ | PROT_WRITE | PROT_EXEC); + head->memory = p; + memset(p, 0, head->memsz); + head->delta = (intptr_t)p - (intptr_t)head->vaddr; + printf_log(LOG_DEBUG, "Got %p (delta=%p)\n", p, (void*)head->delta); + + head->multiblock_n = 1; + head->multiblock_size = (uint64_t*)calloc(head->multiblock_n, sizeof(uint64_t)); + head->multiblock_offs = (uintptr_t*)calloc(head->multiblock_n, sizeof(uintptr_t)); + head->multiblock = (void**)calloc(head->multiblock_n, sizeof(void*)); + head->multiblock_size[0] = head->memsz; + head->multiblock_offs[0] = (uintptr_t)p; + head->multiblock[0] = p; + } + + head->tlsbase = AddTLSPartition(context, head->tlssize); + + return 0; +} + +void FreeElfMemory(elfheader_t* head) +{ + if(head->multiblock_n) { + for(int i=0; i<head->multiblock_n; ++i) + munmap(head->multiblock[i], head->multiblock_size[i]); + free(head->multiblock); + free(head->multiblock_size); + free(head->multiblock_offs); + } +} + +#if 0 +int LoadElfMemory(FILE* f, box64context_t* context, elfheader_t* head) +{ + for (int i=0; i<head->numPHEntries; ++i) { + if(head->PHEntries[i].p_type == PT_LOAD) { + Elf64_Phdr * e = &head->PHEntries[i]; + char* dest = (char*)e->p_paddr + head->delta; + printf_log(LOG_DEBUG, "Loading block #%i @%p (0x%x/0x%x)\n", i, dest, e->p_filesz, e->p_memsz); + fseeko64(f, e->p_offset, SEEK_SET); + if(e->p_filesz) { + if(fread(dest, e->p_filesz, 1, f)!=1) { + printf_log(LOG_NONE, "Fail to read PT_LOAD part #%d (size=%d)\n", i, e->p_filesz); + return 1; + } + } +#ifdef DYNAREC + if(e->p_flags & PF_X) { + dynarec_log(LOG_DEBUG, "Add ELF eXecutable Memory %p:%p\n", dest, (void*)e->p_memsz); + addDBFromAddressRange((uintptr_t)dest, e->p_memsz); + } +#endif + + // zero'd difference between filesz and memsz + /*if(e->p_filesz != e->p_memsz) + memset(dest+e->p_filesz, 0, e->p_memsz - e->p_filesz);*/ //block is already 0'd at creation + } + if(head->PHEntries[i].p_type == PT_TLS) { + Elf64_Phdr * e = &head->PHEntries[i]; + char* dest = (char*)(context->tlsdata+context->tlssize+head->tlsbase); + printf_log(LOG_DEBUG, "Loading TLS block #%i @%p (0x%x/0x%x)\n", i, dest, e->p_filesz, e->p_memsz); + if(e->p_filesz) { + fseeko64(f, e->p_offset, SEEK_SET); + if(fread(dest, e->p_filesz, 1, f)!=1) { + printf_log(LOG_NONE, "Fail to read PT_TLS part #%d (size=%d)\n", i, e->p_filesz); + return 1; + } + } + // zero'd difference between filesz and memsz + if(e->p_filesz != e->p_memsz) + memset(dest+e->p_filesz, 0, e->p_memsz - e->p_filesz); + } + } + return 0; +} + +int ReloadElfMemory(FILE* f, box64context_t* context, elfheader_t* head) +{ + for (int i=0; i<head->numPHEntries; ++i) { + if(head->PHEntries[i].p_type == PT_LOAD) { + Elf64_Phdr * e = &head->PHEntries[i]; + char* dest = (char*)e->p_paddr + head->delta; + printf_log(LOG_DEBUG, "Re-loading block #%i @%p (0x%x/0x%x)\n", i, dest, e->p_filesz, e->p_memsz); + int ret = fseeko64(f, e->p_offset, SEEK_SET); + if(ret==-1) {printf_log(LOG_NONE, "Fail to (re)seek PT_LOAD part #%d (offset=%d, errno=%d/%s)\n", i, e->p_offset, errno, strerror(errno)); return 1;} + if(e->p_filesz) { + ssize_t r = -1; + #ifdef DYNAREC + unprotectDB((uintptr_t)dest, e->p_memsz); + #endif + if((r=fread(dest, e->p_filesz, 1, f))!=1) { + printf_log(LOG_NONE, "Fail to (re)read PT_LOAD part #%d (dest=%p, size=%d, return=%d, feof=%d/ferror=%d/%s)\n", i, dest, e->p_filesz, r, feof(f), ferror(f), strerror(ferror(f))); + return 1; + } + } + // zero'd difference between filesz and memsz + if(e->p_filesz != e->p_memsz) + memset(dest+e->p_filesz, 0, e->p_memsz - e->p_filesz); + } + } + // TLS data are just a copy, no need to re-load it + return 0; +} +int FindR386COPYRel(elfheader_t* h, const char* name, uintptr_t *offs, uint32_t** p) +{ + if(!h) + return 0; + Elf64_Rel * rel = (Elf64_Rel *)(h->rel + h->delta); + if(!h->rel) + return 0; + int cnt = h->relsz / h->relent; + for (int i=0; i<cnt; ++i) { + int t = ELF32_R_TYPE(rel[i].r_info); + Elf64_Sym *sym = &h->DynSym[ELF32_R_SYM(rel[i].r_info)]; + const char* symname = SymName(h, sym); + if(!strcmp(symname, name) && t==R_386_COPY) { + *offs = sym->st_value + h->delta; + *p = (uint32_t*)(rel[i].r_offset + h->delta); + return 1; + } + } + return 0; +} + +int RelocateElfREL(lib_t *maplib, lib_t *local_maplib, elfheader_t* head, int cnt, Elf64_Rel *rel) +{ + for (int i=0; i<cnt; ++i) { + int t = ELF32_R_TYPE(rel[i].r_info); + Elf64_Sym *sym = &head->DynSym[ELF32_R_SYM(rel[i].r_info)]; + int bind = ELF32_ST_BIND(sym->st_info); + const char* symname = SymName(head, sym); + uint32_t ndx = sym->st_shndx; + uint32_t *p = (uint32_t*)(rel[i].r_offset + head->delta); + uintptr_t offs = 0; + uintptr_t end = 0; + uintptr_t tmp = 0; + elfheader_t* h_tls = head; + if(bind==STB_LOCAL) { + offs = sym->st_value + head->delta; + end = offs + sym->st_size; + } else { + // this is probably very very wrong. A proprer way to get reloc need to be writen, but this hack seems ok for now + // at least it work for half-life, unreal, ut99, zsnes, Undertale, ColinMcRae Remake, FTL, ShovelKnight... + if(bind==STB_GLOBAL && (ndx==10 || ndx==19) && t!=R_386_GLOB_DAT) { + offs = sym->st_value + head->delta; + end = offs + sym->st_size; + } + // so weak symbol are the one left + if(!offs && !end) { + h_tls = NULL; + if(local_maplib) + GetGlobalSymbolStartEnd(local_maplib, symname, &offs, &end); + if(!offs && !end) + GetGlobalSymbolStartEnd(maplib, symname, &offs, &end); + } + } + uintptr_t globoffs, globend; + uint32_t* globp; + int delta; + switch(t) { + case R_386_NONE: + // can be ignored + printf_log(LOG_DUMP, "Ignoring %s @%p (%p)\n", DumpRelType(t), p, (void*)(p?(*p):0)); + break; + case R_386_TLS_TPOFF: + // Negated offset in static TLS block + { + if(h_tls) + offs = sym->st_value; + else { + if(local_maplib) + h_tls = GetGlobalSymbolElf(local_maplib, symname); + if(!h_tls) + h_tls = GetGlobalSymbolElf(maplib, symname); + } + if(h_tls) { + delta = *(int*)p; + printf_log(LOG_DUMP, "Applying %s %s on %s @%p (%d -> %d)\n", (bind==STB_LOCAL)?"Local":"Global", DumpRelType(t), symname, p, delta, (int32_t)offs + h_tls->tlsbase); + *p = (uint32_t)((int32_t)offs + h_tls->tlsbase); + } else { + printf_log(LOG_INFO, "Warning, cannot apply %s %s on %s @%p (%d), no elf_header found\n", (bind==STB_LOCAL)?"Local":"Global", DumpRelType(t), symname, p, (int32_t)offs); + } + } + break; + case R_386_PC32: + if (!offs) { + printf_log(LOG_NONE, "Error: Global Symbol %s not found, cannot apply R_386_PC32 @%p (%p) in %s\n", symname, p, *(void**)p, head->name); + } + offs = (offs - (uintptr_t)p); + if(!offs) + printf_log(LOG_DUMP, "Apply %s R_386_PC32 @%p with sym=%s (%p -> %p)\n", (bind==STB_LOCAL)?"Local":"Global", p, symname, *(void**)p, (void*)(*(uintptr_t*)p+offs)); + *p += offs; + break; + case R_386_GLOB_DAT: + if(head!=my_context->elfs[0] && !IsGlobalNoWeakSymbolInNative(maplib, symname) && FindR386COPYRel(my_context->elfs[0], symname, &globoffs, &globp)) { + // set global offs / size for the symbol + offs = sym->st_value + head->delta; + end = offs + sym->st_size; + printf_log(LOG_DUMP, "Apply %s R_386_GLOB_DAT with R_386_COPY @%p/%p (%p/%p -> %p/%p) size=%d on sym=%s \n", (bind==STB_LOCAL)?"Local":"Global", p, globp, (void*)(p?(*p):0), (void*)(globp?(*globp):0), (void*)offs, (void*)globoffs, sym->st_size, symname); + *p = globoffs; + AddWeakSymbol(GetGlobalData(maplib), symname, offs, end-offs+1); + } else { + // Look for same symbol already loaded but not in self (so no need for local_maplib here) + if (GetGlobalNoWeakSymbolStartEnd(maplib, symname, &globoffs, &globend)) { + offs = globoffs; + end = globend; + } + if (!offs) { + if(strcmp(symname, "__gmon_start__")) + printf_log(LOG_NONE, "Error: Global Symbol %s not found, cannot apply R_386_GLOB_DAT @%p (%p) in %s\n", symname, p, *(void**)p, head->name); + } else { + printf_log(LOG_DUMP, "Apply %s R_386_GLOB_DAT @%p (%p -> %p) on sym=%s\n", (bind==STB_LOCAL)?"Local":"Global", p, (void*)(p?(*p):0), (void*)offs, symname); + *p = offs; + } + } + break; + case R_386_COPY: + if(offs) { + uintptr_t old_offs = offs; + uintptr_t old_end = end; + offs = 0; + GetSymbolStartEnd(GetGlobalData(maplib), symname, &offs, &end); // try globaldata symbols first + if(offs==0) { + if(local_maplib) + GetNoSelfSymbolStartEnd(local_maplib, symname, &offs, &end, head); + if(!offs) + GetNoSelfSymbolStartEnd(maplib, symname, &offs, &end, head); // get original copy if any + } + if(!offs) { + offs = old_offs; + end = old_end; + } + printf_log(LOG_DUMP, "Apply %s R_386_COPY @%p with sym=%s, @%p size=%d (", (bind==STB_LOCAL)?"Local":"Global", p, symname, (void*)offs, sym->st_size); + memmove(p, (void*)offs, sym->st_size); + if(LOG_DUMP<=box86_log) { + uint32_t*k = (uint32_t*)p; + for (int i=0; i<((sym->st_size>128)?128:sym->st_size); i+=4, ++k) + printf_log(LOG_DUMP, "%s0x%08X", i?" ":"", *k); + printf_log(LOG_DUMP, "%s)\n", (sym->st_size>128)?" ...":""); + } + } else { + printf_log(LOG_NONE, "Error: Symbol %s not found, cannot apply R_386_COPY @%p (%p) in %s\n", symname, p, *(void**)p, head->name); + } + break; + case R_386_RELATIVE: + printf_log(LOG_DUMP, "Apply %s R_386_RELATIVE @%p (%p -> %p)\n", (bind==STB_LOCAL)?"Local":"Global", p, *(void**)p, (void*)((*p)+head->delta)); + *p += head->delta; + break; + case R_386_32: + if (!offs) { + printf_log(LOG_NONE, "Error: Symbol %s not found, cannot apply R_386_32 @%p (%p) in %s\n", symname, p, *(void**)p, head->name); +// return -1; + } else { + printf_log(LOG_DUMP, "Apply %s R_386_32 @%p with sym=%s (%p -> %p)\n", (bind==STB_LOCAL)?"Local":"Global", p, symname, *(void**)p, (void*)(offs+*(uint32_t*)p)); + *p += offs; + } + break; + case R_386_TLS_DTPMOD32: + // ID of module containing symbol + if(!symname || symname[0]=='\0' || bind==STB_LOCAL) + offs = getElfIndex(my_context, head); + else { + if(!h_tls) { + if(local_maplib) + h_tls = GetGlobalSymbolElf(local_maplib, symname); + if(!h_tls) + h_tls = GetGlobalSymbolElf(maplib, symname); + } + offs = getElfIndex(my_context, h_tls); + } + if(p) { + printf_log(LOG_DUMP, "Apply %s %s @%p with sym=%s (%p -> %p)\n", "R_386_TLS_DTPMOD32", (bind==STB_LOCAL)?"Local":"Global", p, symname, *(void**)p, (void*)offs); + *p = offs; + } else { + printf_log(LOG_NONE, "Warning, Symbol %s or Elf not found, but R_386_TLS_DTPMOD32 Slot Offset is NULL \n", symname); + } + break; + case R_386_TLS_DTPOFF32: + // Offset in TLS block + if (!offs && !end) { + if(bind==STB_WEAK) { + printf_log(LOG_INFO, "Warning: Weak Symbol %s not found, cannot apply R_386_TLS_DTPOFF32 @%p (%p)\n", symname, p, *(void**)p); + } else { + printf_log(LOG_NONE, "Error: Symbol %s not found, cannot apply R_386_TLS_DTPOFF32 @%p (%p) in %s\n", symname, p, *(void**)p, head->name); + } +// return -1; + } else { + if(h_tls) + offs = sym->st_value; + if(p) { + int tlsoffset = offs; // it's not an offset in elf memory + printf_log(LOG_DUMP, "Apply %s R_386_TLS_DTPOFF32 @%p with sym=%s (%p -> %p)\n", (bind==STB_LOCAL)?"Local":"Global", p, symname, (void*)tlsoffset, (void*)offs); + *p = tlsoffset; + } else { + printf_log(LOG_NONE, "Warning, Symbol %s found, but R_386_TLS_DTPOFF32 Slot Offset is NULL \n", symname); + } + } + break; + case R_386_JMP_SLOT: + // apply immediatly for gobject closure marshal or for LOCAL binding. Also, apply immediatly if it doesn't jump in the got + tmp = (uintptr_t)(*p); + if (bind==STB_LOCAL + || ((symname && strstr(symname, "g_cclosure_marshal_")==symname)) + || !tmp + || !((tmp>=head->plt && tmp<head->plt_end) || (tmp>=head->gotplt && tmp<head->gotplt_end)) + ) { + if (!offs) { + if(bind==STB_WEAK) { + printf_log(LOG_INFO, "Warning: Weak Symbol %s not found, cannot apply R_386_JMP_SLOT @%p (%p)\n", symname, p, *(void**)p); + } else { + printf_log(LOG_NONE, "Error: Symbol %s not found, cannot apply R_386_JMP_SLOT @%p (%p) in %s\n", symname, p, *(void**)p, head->name); + } + // return -1; + } else { + if(p) { + printf_log(LOG_DUMP, "Apply %s R_386_JMP_SLOT @%p with sym=%s (%p -> %p)\n", (bind==STB_LOCAL)?"Local":"Global", p, symname, *(void**)p, (void*)offs); + *p = offs; + } else { + printf_log(LOG_NONE, "Warning, Symbol %s found, but Jump Slot Offset is NULL \n", symname); + } + } + } else { + printf_log(LOG_DUMP, "Preparing (if needed) %s R_386_JMP_SLOT @%p (0x%x->0x%0x) with sym=%s to be apply later\n", (bind==STB_LOCAL)?"Local":"Global", p, *p, *p+head->delta, symname); + *p += head->delta; + } + break; + default: + printf_log(LOG_INFO, "Warning, don't know of to handle rel #%d %s (%p)\n", i, DumpRelType(ELF32_R_TYPE(rel[i].r_info)), p); + } + } + return 0; +} + +int RelocateElfRELA(lib_t *maplib, lib_t *local_maplib, elfheader_t* head, int cnt, Elf64_Rela *rela) +{ + for (int i=0; i<cnt; ++i) { + Elf64_Sym *sym = &head->DynSym[ELF32_R_SYM(rela[i].r_info)]; + const char* symname = SymName(head, sym); + uint32_t *p = (uint32_t*)(rela[i].r_offset + head->delta); + uintptr_t offs = 0; + uintptr_t end = 0; + switch(ELF32_R_TYPE(rela[i].r_info)) { + case R_386_NONE: + case R_386_PC32: + // can be ignored + break; + case R_386_COPY: + if(local_maplib) + GetNoSelfSymbolStartEnd(local_maplib, symname, &offs, &end, head); + if(!offs) + GetNoSelfSymbolStartEnd(maplib, symname, &offs, &end, head); + if(offs) { + // add r_addend to p? + printf_log(LOG_DUMP, "Apply R_386_COPY @%p with sym=%s, @%p size=%d\n", p, symname, (void*)offs, sym->st_size); + memcpy(p, (void*)(offs+rela[i].r_addend), sym->st_size); + } else { + printf_log(LOG_NONE, "Error: Symbol %s not found, cannot apply RELA R_386_COPY @%p (%p) in %s\n", symname, p, *(void**)p, head->name); + } + break; + default: + printf_log(LOG_INFO, "Warning, don't know of to handle rela #%d %s on %s\n", i, DumpRelType(ELF32_R_TYPE(rela[i].r_info)), symname); + } + } + return 0; +} +int RelocateElf(lib_t *maplib, lib_t *local_maplib, elfheader_t* head) +{ + if(head->rel) { + int cnt = head->relsz / head->relent; + DumpRelTable(head, cnt, (Elf64_Rel *)(head->rel + head->delta), "Rel"); + printf_log(LOG_DEBUG, "Applying %d Relocation(s) for %s\n", cnt, head->name); + if(RelocateElfREL(maplib, local_maplib, head, cnt, (Elf64_Rel *)(head->rel + head->delta))) + return -1; + } + if(head->rela) { + int cnt = head->relasz / head->relaent; + DumpRelATable(head, cnt, (Elf64_Rela *)(head->rela + head->delta), "RelA"); + printf_log(LOG_DEBUG, "Applying %d Relocation(s) with Addend for %s\n", cnt, head->name); + if(RelocateElfRELA(maplib, local_maplib, head, cnt, (Elf64_Rela *)(head->rela + head->delta))) + return -1; + } + + return 0; +} + +int RelocateElfPlt(lib_t *maplib, lib_t *local_maplib, elfheader_t* head) +{ + if(head->pltrel) { + int cnt = head->pltsz / head->pltent; + if(head->pltrel==DT_REL) { + DumpRelTable(head, cnt, (Elf64_Rel *)(head->jmprel + head->delta), "PLT"); + printf_log(LOG_DEBUG, "Applying %d PLT Relocation(s) for %s\n", cnt, head->name); + if(RelocateElfREL(maplib, local_maplib, head, cnt, (Elf64_Rel *)(head->jmprel + head->delta))) + return -1; + } else if(head->pltrel==DT_RELA) { + DumpRelATable(head, cnt, (Elf64_Rela *)(head->jmprel + head->delta), "PLT"); + printf_log(LOG_DEBUG, "Applying %d PLT Relocation(s) with Addend for %s\n", cnt, head->name); + if(RelocateElfRELA(maplib, local_maplib, head, cnt, (Elf64_Rela *)(head->jmprel + head->delta))) + return -1; + } + if(pltResolver==~0) { + pltResolver = AddBridge(my_context->system, vFE, PltResolver, 0); + } + if(head->pltgot) { + *(uintptr_t*)(head->pltgot+head->delta+8) = pltResolver; + *(uintptr_t*)(head->pltgot+head->delta+4) = (uintptr_t)head; + printf_log(LOG_DEBUG, "PLT Resolver injected in plt.got at %p\n", (void*)(head->pltgot+head->delta+8)); + } else if(head->got) { + *(uintptr_t*)(head->got+head->delta+8) = pltResolver; + *(uintptr_t*)(head->got+head->delta+4) = (uintptr_t)head; + printf_log(LOG_DEBUG, "PLT Resolver injected in got at %p\n", (void*)(head->got+head->delta+8)); + } + } + + return 0; +} + +void CalcStack(elfheader_t* elf, uint32_t* stacksz, int* stackalign) +{ + if(*stacksz < elf->stacksz) + *stacksz = elf->stacksz; + if(*stackalign < elf->stackalign) + *stackalign = elf->stackalign; +} + +Elf64_Sym* GetFunction(elfheader_t* h, const char* name) +{ + // TODO: create a hash on named to avoid this loop + for (int i=0; i<h->numSymTab; ++i) { + int type = ELF32_ST_TYPE(h->SymTab[i].st_info); + if(/*h->SymTab[i].st_info == 18*/type==STT_FUNC) { // TODO: this "18" is probably defined somewhere + const char * symname = h->StrTab+h->SymTab[i].st_name; + if(strcmp(symname, name)==0) { + return h->SymTab+i; + } + } + } + return NULL; +} + +Elf64_Sym* GetElfObject(elfheader_t* h, const char* name) +{ + for (int i=0; i<h->numSymTab; ++i) { + int type = ELF32_ST_TYPE(h->SymTab[i].st_info); + if(/*h->SymTab[i].st_info == 16*/type==STT_OBJECT) { + const char * symname = h->StrTab+h->SymTab[i].st_name; + if(strcmp(symname, name)==0) { + return h->SymTab+i; + } + } + } + return NULL; +} + + +uintptr_t GetFunctionAddress(elfheader_t* h, const char* name) +{ + Elf64_Sym* sym = GetFunction(h, name); + if(sym) return sym->st_value; + return 0; +} + +uintptr_t GetEntryPoint(lib_t* maplib, elfheader_t* h) +{ + uintptr_t ep = h->entrypoint + h->delta; + printf_log(LOG_DEBUG, "Entry Point is %p\n", (void*)ep); + if(box86_log>=LOG_DUMP) { + printf_log(LOG_DUMP, "(short) Dump of Entry point\n"); + int sz = 64; + uintptr_t lastbyte = GetLastByte(h); + if (ep + sz > lastbyte) + sz = lastbyte - ep; + DumpBinary((char*)ep, sz); + } + /* + // but instead of regular entrypoint, lets grab "main", it will be easier to manage I guess + uintptr_t m = FindSymbol(maplib, "main"); + if(m) { + ep = m; + printf_log(LOG_DEBUG, "Using \"main\" as Entry Point @%p\n", ep); + if(box86_log>=LOG_DUMP) { + printf_log(LOG_DUMP, "(short) Dump of Entry point\n"); + int sz = 64; + uintptr_t lastbyte = GetLastByte(h); + if (ep + sz > lastbyte) + sz = lastbyte - ep; + DumpBinary((char*)ep, sz); + } + } + */ + return ep; +} + +uintptr_t GetLastByte(elfheader_t* h) +{ + return (uintptr_t)h->memory/* + h->delta*/ + h->memsz; +} + +void AddSymbols(lib_t *maplib, kh_mapsymbols_t* mapsymbols, kh_mapsymbols_t* weaksymbols, kh_mapsymbols_t* localsymbols, elfheader_t* h) +{ + printf_log(LOG_DUMP, "Will look for Symbol to add in SymTable(%d)\n", h->numSymTab); + for (int i=0; i<h->numSymTab; ++i) { + const char * symname = h->StrTab+h->SymTab[i].st_name; + int bind = ELF32_ST_BIND(h->SymTab[i].st_info); + int type = ELF32_ST_TYPE(h->SymTab[i].st_info); + int vis = h->SymTab[i].st_other&0x3; + if((type==STT_OBJECT || type==STT_FUNC || type==STT_COMMON || type==STT_TLS || type==STT_NOTYPE) + && (vis==STV_DEFAULT || vis==STV_PROTECTED) && (h->SymTab[i].st_shndx!=0)) { + if((bind==10/*STB_GNU_UNIQUE*/ || (bind==STB_GLOBAL && type==STT_FUNC)) && FindGlobalSymbol(maplib, symname)) + continue; + uintptr_t offs = (type==STT_TLS)?h->SymTab[i].st_value:(h->SymTab[i].st_value + h->delta); + uint32_t sz = h->SymTab[i].st_size; + printf_log(LOG_DUMP, "Adding Symbol(bind=%s) \"%s\" with offset=%p sz=%d\n", (bind==STB_LOCAL)?"LOCAL":((bind==STB_WEAK)?"WEAK":"GLOBAL"), symname, (void*)offs, sz); + if(bind==STB_LOCAL) + AddSymbol(localsymbols, symname, offs, sz); + else // add in local and global map + if(bind==STB_WEAK) { + AddSymbol(weaksymbols, symname, offs, sz); + } else { + AddSymbol(mapsymbols, symname, offs, sz); + } + } + } + + printf_log(LOG_DUMP, "Will look for Symbol to add in DynSym (%d)\n", h->numDynSym); + for (int i=0; i<h->numDynSym; ++i) { + const char * symname = h->DynStr+h->DynSym[i].st_name; + int bind = ELF32_ST_BIND(h->DynSym[i].st_info); + int type = ELF32_ST_TYPE(h->DynSym[i].st_info); + int vis = h->DynSym[i].st_other&0x3; + //st_shndx==65521 means ABS value + if((type==STT_OBJECT || type==STT_FUNC || type==STT_COMMON || type==STT_TLS || type==STT_NOTYPE) + && (vis==STV_DEFAULT || vis==STV_PROTECTED) && (h->DynSym[i].st_shndx!=0 && h->DynSym[i].st_shndx<=65521)) { + if((bind==10/*STB_GNU_UNIQUE*/ || (bind==STB_GLOBAL && type==STT_FUNC)) && FindGlobalSymbol(maplib, symname)) + continue; + uintptr_t offs = (type==STT_TLS)?h->DynSym[i].st_value:(h->DynSym[i].st_value + h->delta); + uint32_t sz = h->DynSym[i].st_size; + printf_log(LOG_DUMP, "Adding Symbol(bind=%s) \"%s\" with offset=%p sz=%d\n", (bind==STB_LOCAL)?"LOCAL":((bind==STB_WEAK)?"WEAK":"GLOBAL"), symname, (void*)offs, sz); + if(bind==STB_LOCAL) + AddSymbol(localsymbols, symname, offs, sz); + else // add in local and global map + if(bind==STB_WEAK) { + AddSymbol(weaksymbols, symname, offs, sz); + } else { + AddSymbol(mapsymbols, symname, offs, sz); + } + } + } + +} + +/* +$ORIGIN – Provides the directory the object was loaded from. This token is typical +used for locating dependencies in unbundled packages. For more details of this +token expansion, see “Locating Associated Dependencies” +$OSNAME – Expands to the name of the operating system (see the uname(1) man +page description of the -s option). For more details of this token expansion, see +“System Specific Shared Objects” +$OSREL – Expands to the operating system release level (see the uname(1) man +page description of the -r option). For more details of this token expansion, see +“System Specific Shared Objects” +$PLATFORM – Expands to the processor type of the current machine (see the +uname(1) man page description of the -i option). For more details of this token +expansion, see “System Specific Shared Objects” +*/ +int LoadNeededLibs(elfheader_t* h, lib_t *maplib, needed_libs_t* neededlibs, int local, box64context_t *box86, x86emu_t* emu) +{ + DumpDynamicRPath(h); + // update RPATH first + for (int i=0; i<h->numDynamic; ++i) + if(h->Dynamic[i].d_tag==DT_RPATH || h->Dynamic[i].d_tag==DT_RUNPATH) { + char *rpathref = h->DynStrTab+h->delta+h->Dynamic[i].d_un.d_val; + char* rpath = rpathref; + while(strstr(rpath, "$ORIGIN")) { + char* origin = strdup(h->path); + char* p = strrchr(origin, '/'); + if(p) *p = '\0'; // remove file name to have only full path, without last '/' + char* tmp = (char*)calloc(1, strlen(rpath)-strlen("$ORIGIN")+strlen(origin)+1); + p = strstr(rpath, "$ORIGIN"); + memcpy(tmp, rpath, p-rpath); + strcat(tmp, origin); + strcat(tmp, p+strlen("$ORIGIN")); + if(rpath!=rpathref) + free(rpath); + rpath = tmp; + free(origin); + } + while(strstr(rpath, "${ORIGIN}")) { + char* origin = strdup(h->path); + char* p = strrchr(origin, '/'); + if(p) *p = '\0'; // remove file name to have only full path, without last '/' + char* tmp = (char*)calloc(1, strlen(rpath)-strlen("${ORIGIN}")+strlen(origin)+1); + p = strstr(rpath, "${ORIGIN}"); + memcpy(tmp, rpath, p-rpath); + strcat(tmp, origin); + strcat(tmp, p+strlen("${ORIGIN}")); + if(rpath!=rpathref) + free(rpath); + rpath = tmp; + free(origin); + } + if(strchr(rpath, '$')) { + printf_log(LOG_INFO, "BOX86: Warning, RPATH with $ variable not supported yet (%s)\n", rpath); + } else { + printf_log(LOG_DEBUG, "Prepending path \"%s\" to BOX86_LD_LIBRARY_PATH\n", rpath); + PrependList(&box86->box86_ld_lib, rpath, 1); + } + if(rpath!=rpathref) + free(rpath); + } + + if(!h->neededlibs && neededlibs) + h->neededlibs = neededlibs; + + DumpDynamicNeeded(h); + for (int i=0; i<h->numDynamic; ++i) + if(h->Dynamic[i].d_tag==DT_NEEDED) { + char *needed = h->DynStrTab+h->delta+h->Dynamic[i].d_un.d_val; + // TODO: Add LD_LIBRARY_PATH and RPATH Handling + if(AddNeededLib(maplib, neededlibs, local, needed, box86, emu)) { + printf_log(LOG_INFO, "Error loading needed lib: \"%s\"\n", needed); + if(!allow_missing_libs) + return 1; //error... + } + } + return 0; +} + +int ElfCheckIfUseTCMallocMinimal(elfheader_t* h) +{ + if(!h) + return 0; + for (int i=0; i<h->numDynamic; ++i) + if(h->Dynamic[i].d_tag==DT_NEEDED) { + char *needed = h->DynStrTab+h->delta+h->Dynamic[i].d_un.d_val; + if(!strcmp(needed, "libtcmalloc_minimal.so.4")) // tcmalloc needs to be the 1st lib loaded + return 1; + else + return 0; + } + return 0; +} + +void RunElfInit(elfheader_t* h, x86emu_t *emu) +{ + if(!h || h->init_done) + return; + // reset Segs Cache + memset(emu->segs_serial, 0, sizeof(emu->segs_serial)); + uintptr_t p = h->initentry + h->delta; + box64context_t* context = GetEmuContext(emu); + if(context->deferedInit) { + if(context->deferedInitSz==context->deferedInitCap) { + context->deferedInitCap += 4; + context->deferedInitList = (elfheader_t**)realloc(context->deferedInitList, context->deferedInitCap*sizeof(elfheader_t*)); + } + context->deferedInitList[context->deferedInitSz++] = h; + return; + } + printf_log(LOG_DEBUG, "Calling Init for %s @%p\n", ElfName(h), (void*)p); + if(h->initentry) + RunFunctionWithEmu(emu, 0, p, 3, context->argc, context->argv, context->envv); + printf_log(LOG_DEBUG, "Done Init for %s\n", ElfName(h)); + // and check init array now + Elf64_Addr *addr = (Elf64_Addr*)(h->initarray + h->delta); + for (int i=0; i<h->initarray_sz; ++i) { + printf_log(LOG_DEBUG, "Calling Init[%d] for %s @%p\n", i, ElfName(h), (void*)addr[i]); + RunFunctionWithEmu(emu, 0, (uintptr_t)addr[i], 3, context->argc, context->argv, context->envv); + } + + h->init_done = 1; + h->fini_done = 0; // can be fini'd now (in case it was re-inited) + printf_log(LOG_DEBUG, "All Init Done for %s\n", ElfName(h)); + return; +} + +EXPORTDYN +void RunDeferedElfInit(x86emu_t *emu) +{ + box64context_t* context = GetEmuContext(emu); + if(!context->deferedInit) + return; + context->deferedInit = 0; + if(!context->deferedInitList) + return; + for (int i=0; i<context->deferedInitSz; ++i) + RunElfInit(context->deferedInitList[i], emu); + free(context->deferedInitList); + context->deferedInitList = NULL; + context->deferedInitCap = context->deferedInitSz = 0; +} + +void RunElfFini(elfheader_t* h, x86emu_t *emu) +{ + if(!h || h->fini_done) + return; + h->fini_done = 1; + // first check fini array + Elf64_Addr *addr = (Elf64_Addr*)(h->finiarray + h->delta); + for (int i=0; i<h->finiarray_sz; ++i) { + printf_log(LOG_DEBUG, "Calling Fini[%d] for %s @%p\n", i, ElfName(h), (void*)addr[i]); + RunFunctionWithEmu(emu, 0, (uintptr_t)addr[i], 0); + } + // then the "old-style" fini + if(h->finientry) { + uintptr_t p = h->finientry + h->delta; + printf_log(LOG_DEBUG, "Calling Fini for %s @%p\n", ElfName(h), (void*)p); + RunFunctionWithEmu(emu, 0, p, 0); + } + h->init_done = 0; // can be re-inited again... + return; +} + +uintptr_t GetElfInit(elfheader_t* h) +{ + return h->initentry + h->delta; +} +uintptr_t GetElfFini(elfheader_t* h) +{ + return h->finientry + h->delta; +} + +void* GetBaseAddress(elfheader_t* h) +{ + return h->memory; +} + +void* GetElfDelta(elfheader_t* h) +{ + return (void*)h->delta; +} + +uint32_t GetBaseSize(elfheader_t* h) +{ + return h->memsz; +} + +int IsAddressInElfSpace(elfheader_t* h, uintptr_t addr) +{ + if(!h) + return 0; + for(int i=0; i<h->multiblock_n; ++i) { + uintptr_t base = h->multiblock_offs[i]; + uintptr_t end = h->multiblock_offs[i] + h->multiblock_size[i] - 1; + if(addr>=base && addr<=end) + return 1; + + } + return 0; +} +elfheader_t* FindElfAddress(box64context_t *context, uintptr_t addr) +{ + for (int i=0; i<context->elfsize; ++i) + if(IsAddressInElfSpace(context->elfs[i], addr)) + return context->elfs[i]; + + return NULL; +} + +const char* FindNearestSymbolName(elfheader_t* h, void* p, uintptr_t* start, uint32_t* sz) +{ + uintptr_t addr = (uintptr_t)p; + + uint32_t distance = 0x7fffffff; + const char* ret = NULL; + uintptr_t s = 0; + uint32_t size = 0; + if(!h) + return ret; + + for (int i=0; i<h->numSymTab && distance!=0; ++i) { + const char * symname = h->StrTab+h->SymTab[i].st_name; + uintptr_t offs = h->SymTab[i].st_value + h->delta; + + if(offs<=addr) { + if(distance>addr-offs) { + distance = addr-offs; + ret = symname; + s = offs; + size = h->SymTab[i].st_size; + } + } + } + for (int i=0; i<h->numDynSym && distance!=0; ++i) { + const char * symname = h->DynStr+h->DynSym[i].st_name; + uintptr_t offs = h->DynSym[i].st_value + h->delta; + + if(offs<=addr) { + if(distance>addr-offs) { + distance = addr-offs; + ret = symname; + s = offs; + size = h->DynSym[i].st_size; + } + } + } + + if(start) + *start = s; + if(sz) + *sz = size; + + return ret; +} + +void* GetDTatOffset(box64context_t* context, int index, int offset) +{ + return (void*)((char*)GetTLSPointer(context, context->elfs[index])+offset); +} + +int32_t GetTLSBase(elfheader_t* h) +{ + return h->tlsbase; +} + +uint32_t GetTLSSize(elfheader_t* h) +{ + return h->tlssize; +} + +void* GetTLSPointer(box64context_t* context, elfheader_t* h) +{ + if(!h->tlssize) + return NULL; + tlsdatasize_t* ptr; + if ((ptr = (tlsdatasize_t*)pthread_getspecific(context->tlskey)) == NULL) { + ptr = (tlsdatasize_t*)fillTLSData(context); + } + if(ptr->tlssize != context->tlssize) + ptr = (tlsdatasize_t*)resizeTLSData(context, ptr); + return ptr->tlsdata+(ptr->tlssize+h->tlsbase); +} + +#ifdef DYNAREC +dynablocklist_t* GetDynablocksFromAddress(box64context_t *context, uintptr_t addr) +{ + // if we are here, the there is not block in standard "space" + /*dynablocklist_t* ret = getDBFromAddress(addr); + if(ret) { + return ret; + }*/ + if(box86_dynarec_forced) { + addDBFromAddressRange(addr, 1); + return getDB(addr>>DYNAMAP_SHIFT); + } + //check if address is in an elf... if yes, grant a block (should I warn) + Dl_info info; + if(dladdr((void*)addr, &info)) { + dynarec_log(LOG_INFO, "Address %p is in a native Elf memory space (function \"%s\" in %s)\n", (void*)addr, info.dli_sname, info.dli_fname); + return NULL; + } + dynarec_log(LOG_INFO, "Address %p not found in Elf memory and is not a native call wrapper\n", (void*)addr); + return NULL; +} +#endif + +typedef struct my_dl_phdr_info_s { + void* dlpi_addr; + const char* dlpi_name; + Elf64_Phdr* dlpi_phdr; + int dlpi_phnum; +} my_dl_phdr_info_t; + +static int dl_iterate_phdr_callback(x86emu_t *emu, void* F, my_dl_phdr_info_t *info, size_t size, void* data) +{ + int ret = RunFunctionWithEmu(emu, 0, (uintptr_t)F, 3, info, size, data); + return ret; +} + +#define SUPER() \ +GO(0) \ +GO(1) \ +GO(2) \ +GO(3) \ +GO(4) + +// dl_iterate_phdr ... +#define GO(A) \ +static uintptr_t my_dl_iterate_phdr_fct_##A = 0; \ +static int my_dl_iterate_phdr_##A(struct dl_phdr_info* a, size_t b, void* c)\ +{ \ + if(!a->dlpi_name) \ + return 0; \ + if(!a->dlpi_name[0]) /*don't send informations about box86 itself*/ \ + return 0; \ + return RunFunction(my_context, my_dl_iterate_phdr_fct_##A, 3, a, b, c); \ +} +SUPER() +#undef GO +static void* find_dl_iterate_phdr_Fct(void* fct) +{ + if(!fct) return fct; + if(GetNativeFnc((uintptr_t)fct)) return GetNativeFnc((uintptr_t)fct); + #define GO(A) if(my_dl_iterate_phdr_fct_##A == (uintptr_t)fct) return my_dl_iterate_phdr_##A; + SUPER() + #undef GO + #define GO(A) if(my_dl_iterate_phdr_fct_##A == 0) {my_dl_iterate_phdr_fct_##A = (uintptr_t)fct; return my_dl_iterate_phdr_##A; } + SUPER() + #undef GO + printf_log(LOG_NONE, "Warning, no more slot for elfloader dl_iterate_phdr callback\n"); + return NULL; +} +#undef SUPER + +EXPORT int my_dl_iterate_phdr(x86emu_t *emu, void* F, void *data) { + printf_log(LOG_INFO, "Warning: call to partially implemented dl_iterate_phdr(%p, %p)\n", F, data); + box64context_t *context = GetEmuContext(emu); + const char* empty = ""; + int ret = 0; + for (int idx=0; idx<context->elfsize; ++idx) { + my_dl_phdr_info_t info; + info.dlpi_addr = GetElfDelta(context->elfs[idx]); + info.dlpi_name = idx?context->elfs[idx]->name:empty; //1st elf is program, and this one doesn't get a name + info.dlpi_phdr = context->elfs[idx]->PHEntries; + info.dlpi_phnum = context->elfs[idx]->numPHEntries; + if((ret = dl_iterate_phdr_callback(emu, F, &info, sizeof(info), data))) { + return ret; + } + } + // and now, go on native version + ret = dl_iterate_phdr(find_dl_iterate_phdr_Fct(F), data); + return ret; +} + +void ResetSpecialCaseMainElf(elfheader_t* h) +{ + Elf64_Sym *sym = NULL; + for (int i=0; i<h->numDynSym; ++i) { + if(h->DynSym[i].st_info == 17) { + sym = h->DynSym+i; + const char * symname = h->DynStr+sym->st_name; + if(strcmp(symname, "_IO_2_1_stderr_")==0 && ((void*)sym->st_value+h->delta)) { + memcpy((void*)sym->st_value+h->delta, stderr, sym->st_size); + my__IO_2_1_stderr_ = (void*)sym->st_value+h->delta; + printf_log(LOG_DEBUG, "BOX86: Set @_IO_2_1_stderr_ to %p\n", my__IO_2_1_stderr_); + } else + if(strcmp(symname, "_IO_2_1_stdin_")==0 && ((void*)sym->st_value+h->delta)) { + memcpy((void*)sym->st_value+h->delta, stdin, sym->st_size); + my__IO_2_1_stdin_ = (void*)sym->st_value+h->delta; + printf_log(LOG_DEBUG, "BOX86: Set @_IO_2_1_stdin_ to %p\n", my__IO_2_1_stdin_); + } else + if(strcmp(symname, "_IO_2_1_stdout_")==0 && ((void*)sym->st_value+h->delta)) { + memcpy((void*)sym->st_value+h->delta, stdout, sym->st_size); + my__IO_2_1_stdout_ = (void*)sym->st_value+h->delta; + printf_log(LOG_DEBUG, "BOX86: Set @_IO_2_1_stdout_ to %p\n", my__IO_2_1_stdout_); + } + } + } +} + + +void CreateMemorymapFile(box64context_t* context, int fd) +{ + char buff[1024]; + struct stat st; + int dummy; + (void)dummy; + + elfheader_t *h = context->elfs[0]; + + if (stat(h->path, &st)) { + printf_log(LOG_INFO, "Failed to stat file %s (creating memory maps \"file\")!", h->path); + // Some constants, to have "valid" values + st.st_dev = makedev(0x03, 0x00); + st.st_ino = 0; + } + + for (int i=0; i<h->numPHEntries; ++i) { + if (h->PHEntries[i].p_memsz == 0) continue; + + sprintf(buff, "%08x-%08x %c%c%c%c %08x %02x:%02x %ld %s\n", (uintptr_t)h->PHEntries[i].p_vaddr + h->delta, + (uintptr_t)h->PHEntries[i].p_vaddr + h->PHEntries[i].p_memsz + h->delta, + (h->PHEntries[i].p_type & (PF_R|PF_X) ? 'r':'-'), (h->PHEntries[i].p_type & PF_W ? 'w':'-'), + (h->PHEntries[i].p_type & PF_X ? 'x':'-'), 'p', // p for private or s for shared + (uintptr_t)h->PHEntries[i].p_offset, + major(st.st_dev), minor(st.st_dev), st.st_ino, h->path); + + dummy = write(fd, buff, strlen(buff)); + } +} + +void ElfAttachLib(elfheader_t* head, library_t* lib) +{ + if(!head) + return; + head->lib = lib; +} + +uintptr_t pltResolver = ~0; +EXPORT void PltResolver(x86emu_t* emu) +{ + uintptr_t addr = Pop32(emu); + int slot = (int)Pop32(emu); + elfheader_t *h = (elfheader_t*)addr; + printf_log(LOG_DEBUG, "PltResolver: Addr=%p, Slot=%d Return=%p: elf is %s\n", (void*)addr, slot, *(void**)(R_ESP), h->name); + + Elf64_Rel * rel = (Elf64_Rel *)(h->jmprel + h->delta + slot); + + Elf64_Sym *sym = &h->DynSym[ELF32_R_SYM(rel->r_info)]; + int bind = ELF32_ST_BIND(sym->st_info); + const char* symname = SymName(h, sym); + uint32_t *p = (uint32_t*)(rel->r_offset + h->delta); + uintptr_t offs = 0; + uintptr_t end = 0; + + library_t* lib = h->lib; + lib_t* local_maplib = GetMaplib(lib); + if(local_maplib) + GetGlobalSymbolStartEnd(local_maplib, symname, &offs, &end); + if(!offs && !end) + GetGlobalSymbolStartEnd(my_context->maplib, symname, &offs, &end); + + if (!offs) { + printf_log(LOG_NONE, "Error: PltReolver: Symbol %s not found, cannot apply R_386_JMP_SLOT @%p (%p) in %s\n", symname, p, *(void**)p, h->name); + emu->quit = 1; + return; + } else { + if(p) { + printf_log(LOG_DEBUG, "PltReolver: Apply %s R_386_JMP_SLOT @%p with sym=%s (%p -> %p / %s)\n", (bind==STB_LOCAL)?"Local":"Global", p, symname, *(void**)p, (void*)offs, ElfName(FindElfAddress(my_context, offs))); + *p = offs; + } else { + printf_log(LOG_NONE, "PltReolver: Warning, Symbol %s found, but Jump Slot Offset is NULL \n", symname); + } + } + + // jmp to function + R_EIP = offs; +} +#endif \ No newline at end of file diff --git a/src/elfs/elfloader_private.h b/src/elfs/elfloader_private.h new file mode 100755 index 00000000..8de85633 --- /dev/null +++ b/src/elfs/elfloader_private.h @@ -0,0 +1,109 @@ +#ifndef __ELFLOADER_PRIVATE_H_ +#define __ELFLOADER_PRIVATE_H_ + +//#ifdef DYNAREC +//typedef struct dynablocklist_s dynablocklist_t; +//#endif + +//typedef struct library_s library_t; +//typedef struct needed_libs_s needed_libs_t; + +#include <pthread.h> + +struct elfheader_s { + char* name; + char* path; // Resolved path to file + int numPHEntries; + Elf64_Phdr *PHEntries; + int numSHEntries; + Elf64_Shdr *SHEntries; + int SHIdx; + int numSST; + char* SHStrTab; + char* StrTab; + Elf64_Sym* SymTab; + int numSymTab; + char* DynStr; + Elf64_Sym* DynSym; + int numDynSym; + Elf64_Dyn* Dynamic; + int numDynamic; + char* DynStrTab; + int szDynStrTab; + int e_type; + + intptr_t delta; // should be 0 + + uintptr_t entrypoint; + uintptr_t initentry; + uintptr_t initarray; + int initarray_sz; + uintptr_t finientry; + uintptr_t finiarray; + int finiarray_sz; + + uintptr_t rel; + int relsz; + int relent; + uintptr_t rela; + int relasz; + int relaent; + uintptr_t jmprel; + int pltsz; + int pltent; + uint64_t pltrel; + uintptr_t gotplt; + uintptr_t gotplt_end; + uintptr_t pltgot; + uintptr_t got; + uintptr_t got_end; + uintptr_t plt; + uintptr_t plt_end; + uintptr_t text; + int textsz; + + uintptr_t paddr; + uintptr_t vaddr; + int align; + uint64_t memsz; + uint64_t stacksz; + int stackalign; + uint32_t tlssize; + int tlsalign; + + int32_t tlsbase; // the base of the tlsdata in the global tlsdata (always negative) + + int init_done; + int fini_done; + + char* memory; // char* and not void* to allow math on memory pointer + void** multiblock; + uintptr_t* multiblock_offs; + uint64_t* multiblock_size; + int multiblock_n; + + //library_t *lib; + //needed_libs_t *neededlibs; +}; + +#define R_X86_64_NONE 0 /* No reloc */ +#define R_X86_64_64 1 /* Direct 64 bit */ +#define R_X86_64_PC32 2 /* PC relative 32 bit signed */ +#define R_X86_64_GOT32 3 /* 32 bit GOT entry */ +#define R_X86_64_PLT32 4 /* 32 bit PLT address */ +#define R_X86_64_COPY 5 /* Copy symbol at runtime */ +#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ +#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ +#define R_X86_64_RELATIVE 8 /* Adjust by program base */ +#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative offset to GOT */ +#define R_X86_64_32 10 /* Direct 32 bit zero extended */ +#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ +#define R_X86_64_16 12 /* Direct 16 bit zero extended */ +#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ +#define R_X86_64_8 14 /* Direct 8 bit sign extended */ +#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ +#define R_X86_64_PC64 24 /* Place relative 64-bit signed */ + +elfheader_t* ParseElfHeader(FILE* f, const char* name, int exec); + +#endif //__ELFLOADER_PRIVATE_H_ \ No newline at end of file diff --git a/src/elfs/elfparser.c b/src/elfs/elfparser.c new file mode 100755 index 00000000..3951f2d6 --- /dev/null +++ b/src/elfs/elfparser.c @@ -0,0 +1,313 @@ +#define _GNU_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <elf.h> + +#include "box64version.h" +#include "elfloader.h" +#include "debug.h" +#include "elfload_dump.h" +#include "elfloader_private.h" + +#ifndef PN_XNUM +#define PN_XNUM (0xffff) +#endif + +int LoadSH(FILE *f, Elf64_Shdr *s, void** SH, const char* name, uint32_t type) +{ + if(type && (s->sh_type != type)) { + printf_log(LOG_INFO, "Section Header \"%s\" (off=%d, size=%d) has incorect type (%d != %d)\n", name, s->sh_offset, s->sh_size, s->sh_type, type); + return -1; + } + if (type==SHT_SYMTAB && s->sh_size%sizeof(Elf64_Sym)) { + printf_log(LOG_INFO, "Section Header \"%s\" (off=%d, size=%d) has size (not multiple of %d)\n", name, s->sh_offset, s->sh_size, sizeof(Elf64_Sym)); + } + *SH = calloc(1, s->sh_size); + fseeko64(f, s->sh_offset ,SEEK_SET); + if(fread(*SH, s->sh_size, 1, f)!=1) { + printf_log(LOG_INFO, "Cannot read Section Header \"%s\" (off=%d, size=%d)\n", name, s->sh_offset, s->sh_size); + return -1; + } + + return 0; +} + +int FindSection(Elf64_Shdr *s, int n, char* SHStrTab, const char* name) +{ + for (int i=0; i<n; ++i) { + if(s[i].sh_type!=SHT_NULL) + if(!strcmp(SHStrTab+s[i].sh_name, name)) + return i; + } + return 0; +} + +void LoadNamedSection(FILE *f, Elf64_Shdr *s, int size, char* SHStrTab, const char* name, const char* clearname, uint32_t type, void** what, int* num) +{ + int n = FindSection(s, size, SHStrTab, name); + printf_log(LOG_DEBUG, "Loading %s (idx = %d)\n", clearname, n); + if(n) + LoadSH(f, s+n, what, name, type); + if(type==SHT_SYMTAB || type==SHT_DYNSYM) { + if(*what && num) + *num = s[n].sh_size / sizeof(Elf64_Sym); + } else if(type==SHT_DYNAMIC) { + if(*what && num) + *num = s[n].sh_size / sizeof(Elf64_Dyn); + } +} + +elfheader_t* ParseElfHeader(FILE* f, const char* name, int exec) +{ + Elf64_Ehdr header; + int level = (exec)?LOG_INFO:LOG_DEBUG; + if(fread(&header, sizeof(Elf64_Ehdr), 1, f)!=1) { + printf_log(level, "Cannot read ELF Header\n"); + return NULL; + } + if(memcmp(header.e_ident, ELFMAG, SELFMAG)!=0) { + printf_log(LOG_INFO, "Not an ELF file (sign=%c%c%c%c)\n", header.e_ident[0], header.e_ident[1], header.e_ident[2], header.e_ident[3]); + return NULL; + } + if(header.e_ident[EI_CLASS]!=ELFCLASS64) { + if(header.e_ident[EI_CLASS]==ELFCLASS32) { + printf_log(LOG_INFO, "This is a 64bits ELF! box64 can only run 64bits ELF!\n"); + } else { + printf_log(LOG_INFO, "Not a 64bits ELF (%d)\n", header.e_ident[EI_CLASS]); + } + return NULL; + } + if(header.e_ident[EI_DATA]!=ELFDATA2LSB) { + printf_log(LOG_INFO, "Not a LittleEndian ELF (%d)\n", header.e_ident[EI_DATA]); + return NULL; + } + if(header.e_ident[EI_VERSION]!=EV_CURRENT) { + printf_log(LOG_INFO, "Incorrect ELF version (%d)\n", header.e_ident[EI_VERSION]); + return NULL; + } + if(header.e_ident[EI_OSABI]!=ELFOSABI_LINUX && header.e_ident[EI_OSABI]!=ELFOSABI_NONE && header.e_ident[EI_OSABI]!=ELFOSABI_SYSV) { + printf_log(LOG_INFO, "Not a Linux ELF (%d)\n",header.e_ident[EI_OSABI]); + return NULL; + } + + if(header.e_type != ET_EXEC && header.e_type != ET_DYN) { + printf_log(LOG_INFO, "Not an Executable (%d)\n", header.e_type); + return NULL; + } + + if(header.e_machine != EM_X86_64) { + printf_log(level, "Not an x86_64 ELF (%d)\n", header.e_machine); + return NULL; + } + + if(header.e_entry == 0 && exec) { + printf_log(LOG_INFO, "No entry point in ELF\n"); + return NULL; + } + if(header.e_phentsize != sizeof(Elf64_Phdr)) { + printf_log(LOG_INFO, "Program Header Entry size incorrect (%d != %d)\n", header.e_phentsize, sizeof(Elf64_Phdr)); + return NULL; + } + if(header.e_shentsize != sizeof(Elf64_Shdr) && header.e_shentsize != 0) { + printf_log(LOG_INFO, "Section Header Entry size incorrect (%d != %d)\n", header.e_shentsize, sizeof(Elf64_Shdr)); + return NULL; + } + + elfheader_t *h = calloc(1, sizeof(elfheader_t)); + h->name = strdup(name); + h->entrypoint = header.e_entry; + h->numPHEntries = header.e_phnum; + h->numSHEntries = header.e_shnum; + h->SHIdx = header.e_shstrndx; + h->e_type = header.e_type; + if(header.e_shentsize && header.e_shnum) { + // special cases for nums + if(h->numSHEntries == 0) { + printf_log(LOG_DEBUG, "Read number of Sections in 1st Section\n"); + // read 1st section header and grab actual number from here + fseeko64(f, header.e_shoff, SEEK_SET); + Elf64_Shdr section; + if(fread(§ion, sizeof(Elf64_Shdr), 1, f)!=1) { + free(h); + printf_log(LOG_INFO, "Cannot read Initial Section Header\n"); + return NULL; + } + h->numSHEntries = section.sh_size; + } + // now read all section headers + printf_log(LOG_DEBUG, "Read %d Section header\n", h->numSHEntries); + h->SHEntries = (Elf64_Shdr*)calloc(h->numSHEntries, sizeof(Elf64_Shdr)); + fseeko64(f, header.e_shoff ,SEEK_SET); + if(fread(h->SHEntries, sizeof(Elf64_Shdr), h->numSHEntries, f)!=h->numSHEntries) { + FreeElfHeader(&h); + printf_log(LOG_INFO, "Cannot read all Section Header\n"); + return NULL; + } + + if(h->numPHEntries == PN_XNUM) { + printf_log(LOG_DEBUG, "Read number of Program Header in 1st Section\n"); + // read 1st section header and grab actual number from here + h->numPHEntries = h->SHEntries[0].sh_info; + } + } + + printf_log(LOG_DEBUG, "Read %d Program header\n", h->numPHEntries); + h->PHEntries = (Elf64_Phdr*)calloc(h->numPHEntries, sizeof(Elf64_Phdr)); + fseeko64(f, header.e_phoff ,SEEK_SET); + if(fread(h->PHEntries, sizeof(Elf64_Phdr), h->numPHEntries, f)!=h->numPHEntries) { + FreeElfHeader(&h); + printf_log(LOG_INFO, "Cannot read all Program Header\n"); + return NULL; + } + + if(header.e_shentsize && header.e_shnum) { + if(h->SHIdx == SHN_XINDEX) { + printf_log(LOG_DEBUG, "Read number of String Table in 1st Section\n"); + h->SHIdx = h->SHEntries[0].sh_link; + } + if(h->SHIdx > h->numSHEntries) { + printf_log(LOG_INFO, "Incoherent Section String Table Index : %d / %d\n", h->SHIdx, h->numSHEntries); + FreeElfHeader(&h); + return NULL; + } + // load Section table + printf_log(LOG_DEBUG, "Loading Sections Table String (idx = %d)\n", h->SHIdx); + if(LoadSH(f, h->SHEntries+h->SHIdx, (void*)&h->SHStrTab, ".shstrtab", SHT_STRTAB)) { + FreeElfHeader(&h); + return NULL; + } + if(box64_log>=LOG_DUMP) DumpMainHeader(&header, h); + + LoadNamedSection(f, h->SHEntries, h->numSHEntries, h->SHStrTab, ".strtab", "SymTab Strings", SHT_STRTAB, (void**)&h->StrTab, NULL); + LoadNamedSection(f, h->SHEntries, h->numSHEntries, h->SHStrTab, ".symtab", "SymTab", SHT_SYMTAB, (void**)&h->SymTab, &h->numSymTab); + if(box64_log>=LOG_DUMP && h->SymTab) DumpSymTab(h); + + LoadNamedSection(f, h->SHEntries, h->numSHEntries, h->SHStrTab, ".dynamic", "Dynamic", SHT_DYNAMIC, (void**)&h->Dynamic, &h->numDynamic); + if(box64_log>=LOG_DUMP && h->Dynamic) DumpDynamicSections(h); + // grab DT_REL & DT_RELA stuffs + // also grab the DT_STRTAB string table + { + for (int i=0; i<h->numDynamic; ++i) { + if(h->Dynamic[i].d_tag == DT_REL) + h->rel = h->Dynamic[i].d_un.d_ptr; + else if(h->Dynamic[i].d_tag == DT_RELSZ) + h->relsz = h->Dynamic[i].d_un.d_val; + else if(h->Dynamic[i].d_tag == DT_RELENT) + h->relent = h->Dynamic[i].d_un.d_val; + else if(h->Dynamic[i].d_tag == DT_RELA) + h->rela = h->Dynamic[i].d_un.d_ptr; + else if(h->Dynamic[i].d_tag == DT_RELASZ) + h->relasz = h->Dynamic[i].d_un.d_val; + else if(h->Dynamic[i].d_tag == DT_RELAENT) + h->relaent = h->Dynamic[i].d_un.d_val; + else if(h->Dynamic[i].d_tag == DT_PLTGOT) + h->pltgot = h->Dynamic[i].d_un.d_val; + else if(h->Dynamic[i].d_tag == DT_PLTREL) + h->pltrel = h->Dynamic[i].d_un.d_val; + else if(h->Dynamic[i].d_tag == DT_PLTRELSZ) + h->pltsz = h->Dynamic[i].d_un.d_val; + else if(h->Dynamic[i].d_tag == DT_JMPREL) + h->jmprel = h->Dynamic[i].d_un.d_val; + else if(h->Dynamic[i].d_tag == DT_STRTAB) + h->DynStrTab = (char*)(h->Dynamic[i].d_un.d_ptr); + else if(h->Dynamic[i].d_tag == DT_STRSZ) + h->szDynStrTab = h->Dynamic[i].d_un.d_val; + } + if(h->rel) { + if(h->relent != sizeof(Elf64_Rel)) { + printf_log(LOG_NONE, "Rel Table Entry size invalid (0x%x should be 0x%x)\n", h->relent, sizeof(Elf64_Rel)); + FreeElfHeader(&h); + return NULL; + } + printf_log(LOG_DEBUG, "Rel Table @%p (0x%x/0x%x)\n", (void*)h->rel, h->relsz, h->relent); + } + if(h->rela) { + if(h->relaent != sizeof(Elf64_Rela)) { + printf_log(LOG_NONE, "RelA Table Entry size invalid (0x%x should be 0x%x)\n", h->relaent, sizeof(Elf64_Rela)); + FreeElfHeader(&h); + return NULL; + } + printf_log(LOG_DEBUG, "RelA Table @%p (0x%x/0x%x)\n", (void*)h->rela, h->relasz, h->relaent); + } + if(h->jmprel) { + if(h->pltrel == DT_REL) { + h->pltent = sizeof(Elf64_Rel); + } else if(h->pltrel == DT_RELA) { + h->pltent = sizeof(Elf64_Rela); + } else { + printf_log(LOG_NONE, "PLT Table type is unknown (size = 0x%x, type=%d)\n", h->pltsz, h->pltrel); + FreeElfHeader(&h); + return NULL; + } + if((h->pltsz / h->pltent)*h->pltent != h->pltsz) { + printf_log(LOG_NONE, "PLT Table Entry size invalid (0x%x, ent=0x%x, type=%d)\n", h->pltsz, h->pltent, h->pltrel); + FreeElfHeader(&h); + return NULL; + } + printf_log(LOG_DEBUG, "PLT Table @%p (type=%d 0x%x/0x%0x)\n", (void*)h->jmprel, h->pltrel, h->pltsz, h->pltent); + } + if(h->DynStrTab && h->szDynStrTab) { + //DumpDynamicNeeded(h); cannot dump now, it's not loaded yet + } + } + // look for PLT Offset + int ii = FindSection(h->SHEntries, h->numSHEntries, h->SHStrTab, ".got.plt"); + if(ii) { + h->gotplt = h->SHEntries[ii].sh_addr; + h->gotplt_end = h->gotplt + h->SHEntries[ii].sh_size; + printf_log(LOG_DEBUG, "The GOT.PLT Table is at address %p\n", (void*)h->gotplt); + } + ii = FindSection(h->SHEntries, h->numSHEntries, h->SHStrTab, ".got"); + if(ii) { + h->got = h->SHEntries[ii].sh_addr; + h->got_end = h->got + h->SHEntries[ii].sh_size; + printf_log(LOG_DEBUG, "The GOT Table is at address %p..%p\n", (void*)h->got, (void*)h->got_end); + } + ii = FindSection(h->SHEntries, h->numSHEntries, h->SHStrTab, ".plt"); + if(ii) { + h->plt = h->SHEntries[ii].sh_addr; + h->plt_end = h->plt + h->SHEntries[ii].sh_size; + printf_log(LOG_DEBUG, "The PLT Table is at address %p..%p\n", (void*)h->plt, (void*)h->plt_end); + } + // look for .init entry point + ii = FindSection(h->SHEntries, h->numSHEntries, h->SHStrTab, ".init"); + if(ii) { + h->initentry = h->SHEntries[ii].sh_addr; + printf_log(LOG_DEBUG, "The .init is at address %p\n", (void*)h->initentry); + } + // and .init_array + ii = FindSection(h->SHEntries, h->numSHEntries, h->SHStrTab, ".init_array"); + if(ii) { + h->initarray_sz = h->SHEntries[ii].sh_size / sizeof(Elf64_Addr); + h->initarray = (uintptr_t)(h->SHEntries[ii].sh_addr); + printf_log(LOG_DEBUG, "The .init_array is at address %p, and have %d elements\n", (void*)h->initarray, h->initarray_sz); + } + // look for .fini entry point + ii = FindSection(h->SHEntries, h->numSHEntries, h->SHStrTab, ".fini"); + if(ii) { + h->finientry = h->SHEntries[ii].sh_addr; + printf_log(LOG_DEBUG, "The .fini is at address %p\n", (void*)h->finientry); + } + // and .fini_array + ii = FindSection(h->SHEntries, h->numSHEntries, h->SHStrTab, ".fini_array"); + if(ii) { + h->finiarray_sz = h->SHEntries[ii].sh_size / sizeof(Elf64_Addr); + h->finiarray = (uintptr_t)(h->SHEntries[ii].sh_addr); + printf_log(LOG_DEBUG, "The .fini_array is at address %p, and have %d elements\n", (void*)h->finiarray, h->finiarray_sz); + } + // grab .text for main code + ii = FindSection(h->SHEntries, h->numSHEntries, h->SHStrTab, ".text"); + if(ii) { + h->text = (uintptr_t)(h->SHEntries[ii].sh_addr); + h->textsz = h->SHEntries[ii].sh_size; + printf_log(LOG_DEBUG, "The .text is at address %p, and is %d big\n", (void*)h->text, h->textsz); + } + + LoadNamedSection(f, h->SHEntries, h->numSHEntries, h->SHStrTab, ".dynstr", "DynSym Strings", SHT_STRTAB, (void**)&h->DynStr, NULL); + LoadNamedSection(f, h->SHEntries, h->numSHEntries, h->SHStrTab, ".dynsym", "DynSym", SHT_DYNSYM, (void**)&h->DynSym, &h->numDynSym); + if(box64_log>=LOG_DUMP && h->DynSym) DumpDynSym(h); + } + + return h; +} diff --git a/src/include/box64context.h b/src/include/box64context.h index 680b2d5e..0697e16a 100755 --- a/src/include/box64context.h +++ b/src/include/box64context.h @@ -4,6 +4,8 @@ #include <pthread.h> #include "pathcoll.h" +typedef struct elfheader_s elfheader_t; + typedef void* (*procaddess_t)(const char* name); typedef void* (*vkprocaddess_t)(void* instance, const char* name); @@ -43,6 +45,10 @@ typedef struct box64context_s { int stackalign; void* stack; // alocated stack + elfheader_t **elfs; // elf headers and memory + int elfcap; + int elfsize; // number of elf loaded + int deferedInit; pthread_key_t tlskey; // then tls key to have actual tlsdata diff --git a/src/include/custommem.h b/src/include/custommem.h new file mode 100644 index 00000000..d6bca59c --- /dev/null +++ b/src/include/custommem.h @@ -0,0 +1,52 @@ +#ifndef __CUSTOM_MEM__H_ +#define __CUSTOM_MEM__H_ +#include <unistd.h> +#include <stdint.h> + + +typedef struct box64context_s box64context_t; + +//void* customMalloc(size_t size); +//void* customCalloc(size_t n, size_t size); +//void* customRealloc(void* p, size_t size); +//void customFree(void* p); + +//#define kcalloc customCalloc +//#define kmalloc customMalloc +//#define krealloc customRealloc +//#define kfree customFree + +#ifdef DYNAREC +//typedef struct dynablock_s dynablock_t; +//typedef struct dynablocklist_s dynablocklist_t; +//// custom protection flag to mark Page that are Write protected for Dynarec purpose +//uintptr_t AllocDynarecMap(dynablock_t* db, int size); +//void FreeDynarecMap(dynablock_t* db, uintptr_t addr, uint32_t size); + +//void addDBFromAddressRange(uintptr_t addr, uintptr_t size); +//void cleanDBFromAddressRange(uintptr_t addr, uintptr_t size, int destroy); + +//dynablocklist_t* getDB(uintptr_t idx); +//void addJumpTableIfDefault(void* addr, void* jmp); +//void setJumpTableDefault(void* addr); +//uintptr_t getJumpTable(); +//uintptr_t getJumpTableAddress(uintptr_t addr); +#endif + +#define PROT_DYNAREC 0x80 +void updateProtection(uintptr_t addr, uintptr_t size, uint32_t prot); +void setProtection(uintptr_t addr, uintptr_t size, uint32_t prot); +uint32_t getProtection(uintptr_t addr); +#ifdef DYNAREC +//void protectDB(uintptr_t addr, uintptr_t size); +//void protectDBnolock(uintptr_t addr, uintptr_t size); +//void unprotectDB(uintptr_t addr, uintptr_t size); +//void lockDB(); +//void unlockDB(); +#endif + + +void init_custommem_helper(box64context_t* ctx); +void fini_custommem_helper(box64context_t* ctx); + +#endif //__CUSTOM_MEM__H_ \ No newline at end of file diff --git a/src/include/elfload_dump.h b/src/include/elfload_dump.h new file mode 100755 index 00000000..251ea3b8 --- /dev/null +++ b/src/include/elfload_dump.h @@ -0,0 +1,24 @@ +#ifndef ELFLOADER_DUMP_H +#define ELFLOADER_DUMP_H + +typedef struct elfheader_s elfheader_t; + +const char* DumpSection(Elf64_Shdr *s, char* SST); +const char* DumpDynamic(Elf64_Dyn *s); +const char* DumpPHEntry(Elf64_Phdr *e); +const char* DumpSym(elfheader_t *h, Elf64_Sym* sym); +const char* DumpRelType(int t); +const char* SymName(elfheader_t *h, Elf64_Sym* sym); +const char* IdxSymName(elfheader_t *h, int sym); +void DumpMainHeader(Elf64_Ehdr *header, elfheader_t *h); +void DumpSymTab(elfheader_t *h); +void DumpDynamicSections(elfheader_t *h); +void DumpDynamicNeeded(elfheader_t *h); +void DumpDynamicRPath(elfheader_t *h); +void DumpDynSym(elfheader_t *h); +void DumpRelTable(elfheader_t *h, int cnt, Elf64_Rel *rel, const char* name); +void DumpRelATable(elfheader_t *h, int cnt, Elf64_Rela *rela, const char* name); + +void DumpBinary(char* p, int sz); + +#endif //ELFLOADER_DUMP_H \ No newline at end of file diff --git a/src/include/elfloader.h b/src/include/elfloader.h new file mode 100755 index 00000000..56a51c51 --- /dev/null +++ b/src/include/elfloader.h @@ -0,0 +1,59 @@ +#ifndef __ELF_LOADER_H_ +#define __ELF_LOADER_H_ +#include <stdio.h> + +typedef struct elfheader_s elfheader_t; +//typedef struct lib_s lib_t; +//typedef struct library_s library_t; +//typedef struct kh_mapsymbols_s kh_mapsymbols_t; +typedef struct box64context_s box64context_t; +//typedef struct x86emu_s x86emu_t; +//typedef struct needed_libs_s needed_libs_t; +//#ifdef DYNAREC +//typedef struct dynablocklist_s dynablocklist_t; +//#endif + +elfheader_t* LoadAndCheckElfHeader(FILE* f, const char* name, int exec); // exec : 0 = lib, 1 = exec +void FreeElfHeader(elfheader_t** head); +const char* ElfName(elfheader_t* head); +//void ElfAttachLib(elfheader_t* head, library_t* lib); + +// return 0 if OK +int CalcLoadAddr(elfheader_t* head); +int AllocElfMemory(box64context_t* context, elfheader_t* head, int mainbin); +void FreeElfMemory(elfheader_t* head); +//int LoadElfMemory(FILE* f, box64context_t* context, elfheader_t* head); +//int ReloadElfMemory(FILE* f, box64context_t* context, elfheader_t* head); +//int RelocateElf(lib_t *maplib, lib_t* local_maplib, elfheader_t* head); +//int RelocateElfPlt(lib_t *maplib, lib_t* local_maplib, elfheader_t* head); +//void CalcStack(elfheader_t* h, uint32_t* stacksz, int* stackalign); +//uintptr_t GetEntryPoint(lib_t* maplib, elfheader_t* h); +//uintptr_t GetLastByte(elfheader_t* h); +//void AddSymbols(lib_t *maplib, kh_mapsymbols_t* mapsymbols, kh_mapsymbols_t* weaksymbols, kh_mapsymbols_t* localsymbols, elfheader_t* h); +//int LoadNeededLibs(elfheader_t* h, lib_t *maplib, needed_libs_t* neededlibs, int local, box64context_t *box86, x86emu_t* emu); +//uintptr_t GetElfInit(elfheader_t* h); +//uintptr_t GetElfFini(elfheader_t* h); +//void RunElfInit(elfheader_t* h, x86emu_t *emu); +//void RunElfFini(elfheader_t* h, x86emu_t *emu); +//void RunDeferedElfInit(x86emu_t *emu); +//void* GetBaseAddress(elfheader_t* h); +//void* GetElfDelta(elfheader_t* h); +//uint32_t GetBaseSize(elfheader_t* h); +//int IsAddressInElfSpace(elfheader_t* h, uintptr_t addr); +//elfheader_t* FindElfAddress(box64context_t *context, uintptr_t addr); +//const char* FindNearestSymbolName(elfheader_t* h, void* p, uintptr_t* start, uint32_t* sz); +//int32_t GetTLSBase(elfheader_t* h); +//uint32_t GetTLSSize(elfheader_t* h); +//void* GetTLSPointer(box64context_t* context, elfheader_t* h); +//void* GetDTatOffset(box64context_t* context, int index, int offset); +//#ifdef DYNAREC +//dynablocklist_t* GetDynablocksFromAddress(box64context_t *context, uintptr_t addr); +//dynablocklist_t* GetDynablocksFromElf(elfheader_t* h); +//#endif +//void ResetSpecialCaseMainElf(elfheader_t* h); +//void CreateMemorymapFile(box64context_t* context, int fd); + +//int ElfCheckIfUseTCMallocMinimal(elfheader_t* h); // return 1 if tcmalloc is used + + +#endif //__ELF_LOADER_H_ \ No newline at end of file diff --git a/src/main.c b/src/main.c index 4858e797..3d4e4741 100755 --- a/src/main.c +++ b/src/main.c @@ -14,6 +14,8 @@ #include "fileutils.h" #include "box64context.h" #include "wine_tools.h" +#include "elfloader.h" +#include "custommem.h" box64context_t *my_context = NULL; int box64_log = LOG_NONE; @@ -720,7 +722,7 @@ int main(int argc, const char **argv, const char **env) { FreeCollection(&ld_preload); return -1; } - /*elfheader_t *elf_header = LoadAndCheckElfHeader(f, my_context->argv[0], 1); + elfheader_t *elf_header = LoadAndCheckElfHeader(f, my_context->argv[0], 1); if(!elf_header) { printf_log(LOG_NONE, "Error: reading elf header of %s, try to launch natively instead\n", my_context->argv[0]); fclose(f); @@ -729,6 +731,6 @@ int main(int argc, const char **argv, const char **env) { FreeCollection(&ld_preload); return execvp(argv[1], (char * const*)(argv+1)); } - AddElfHeader(my_context, elf_header);*/ + /*AddElfHeader(my_context, elf_header);*/ return 0; } |