diff options
Diffstat (limited to 'miasm2/jitter')
| -rw-r--r-- | miasm2/jitter/bn.c | 6 | ||||
| -rw-r--r-- | miasm2/jitter/bn.h | 4 | ||||
| -rw-r--r-- | miasm2/jitter/jitcore.py | 12 | ||||
| -rw-r--r-- | miasm2/jitter/jitcore_python.py | 209 | ||||
| -rw-r--r-- | miasm2/jitter/llvmconvert.py | 8 | ||||
| -rw-r--r-- | miasm2/jitter/op_semantics.c | 18 | ||||
| -rw-r--r-- | miasm2/jitter/op_semantics.h | 24 |
7 files changed, 171 insertions, 110 deletions
diff --git a/miasm2/jitter/bn.c b/miasm2/jitter/bn.c index 96e66f4d..c621d102 100644 --- a/miasm2/jitter/bn.c +++ b/miasm2/jitter/bn.c @@ -796,7 +796,7 @@ int bignum_cnttrailzeros(bn_t n, int size) -bn_t bignum_idiv(bn_t a, bn_t b, int size) +bn_t bignum_sdiv(bn_t a, bn_t b, int size) { require(size, "size must be greater than 0"); require(size <= BN_BIT_SIZE, "size must be below bignum max size"); @@ -832,14 +832,14 @@ bn_t bignum_idiv(bn_t a, bn_t b, int size) -bn_t bignum_imod(bn_t a, bn_t b, int size) +bn_t bignum_smod(bn_t a, bn_t b, int size) { require(size, "size must be greater than 0"); require(size <= BN_BIT_SIZE, "size must be below bignum max size"); bn_t c; - c = bignum_idiv(a, b, size); + c = bignum_sdiv(a, b, size); c = bignum_mul(c, b); c = bignum_sub(a, c); c = bignum_mask(c, size); diff --git a/miasm2/jitter/bn.h b/miasm2/jitter/bn.h index 67d20a77..f0a13b53 100644 --- a/miasm2/jitter/bn.h +++ b/miasm2/jitter/bn.h @@ -116,8 +116,8 @@ _MIASM_EXPORT bn_t bignum_sub(bn_t a, bn_t b); /* c = a - b */ _MIASM_EXPORT bn_t bignum_mul(bn_t a, bn_t b); /* c = a * b */ _MIASM_EXPORT bn_t bignum_udiv(bn_t a, bn_t b); /* c = a / b */ _MIASM_EXPORT bn_t bignum_umod(bn_t a, bn_t b); /* c = a % b */ -_MIASM_EXPORT bn_t bignum_idiv(bn_t a, bn_t b, int size); -_MIASM_EXPORT bn_t bignum_imod(bn_t a, bn_t b, int size); +_MIASM_EXPORT bn_t bignum_sdiv(bn_t a, bn_t b, int size); +_MIASM_EXPORT bn_t bignum_smod(bn_t a, bn_t b, int size); //void bignum_udivmod(struct bn* a, struct bn* b, struct bn* c, struct bn* d); /* c = a/b, d = a%b */ diff --git a/miasm2/jitter/jitcore.py b/miasm2/jitter/jitcore.py index fc5cf35e..78e27244 100644 --- a/miasm2/jitter/jitcore.py +++ b/miasm2/jitter/jitcore.py @@ -114,21 +114,11 @@ class JitCore(object): for a, b in self.blocks_mem_interval: vm.add_code_bloc(a, b + 1) - def jit_irblocks(self, label, irblocks): - """JiT a group of irblocks. - @label: the label of the irblocks - @irblocks: a group of irblocks - """ - - raise NotImplementedError("Abstract class") - def add_block(self, block): """Add a block to JiT and JiT it. @block: asm_bloc to add """ - irblocks = self.ir_arch.add_asmblock_to_ircfg(block, self.ircfg, gen_pc_updt = True) - block.blocks = irblocks - self.jit_irblocks(block.loc_key, irblocks) + raise NotImplementedError("Abstract class") def disasm_and_jit_block(self, addr, vm): """Disassemble a new block and JiT it diff --git a/miasm2/jitter/jitcore_python.py b/miasm2/jitter/jitcore_python.py index 0b1f5809..fdd5c2ae 100644 --- a/miasm2/jitter/jitcore_python.py +++ b/miasm2/jitter/jitcore_python.py @@ -1,5 +1,5 @@ import miasm2.jitter.jitcore as jitcore -import miasm2.expression.expression as m2_expr +from miasm2.expression.expression import ExprInt, ExprLoc import miasm2.jitter.csts as csts from miasm2.expression.simplifications import expr_simp_explicit from miasm2.jitter.emulatedsymbexec import EmulatedSymbExec @@ -36,12 +36,38 @@ class JitCore_Python(jitcore.JitCore): "Preload symbols according to current architecture" self.symbexec.reset_regs() - def jit_irblocks(self, loc_key, irblocks): - """Create a python function corresponding to an irblocks' group. - @loc_key: the loc_key of the irblocks - @irblocks: a group of irblocks + def arch_specific(self): + """Return arch specific information for the current architecture""" + arch = self.ir_arch.arch + has_delayslot = False + if arch.name == "mips32": + from miasm2.arch.mips32.jit import mipsCGen + cgen_class = mipsCGen + has_delayslot = True + elif arch.name == "arm": + from miasm2.arch.arm.jit import arm_CGen + cgen_class = arm_CGen + else: + from miasm2.jitter.codegen import CGen + cgen_class = CGen + return cgen_class(self.ir_arch), has_delayslot + + def add_block(self, asmblock): + """Create a python function corresponding to an AsmBlock + @asmblock: AsmBlock """ + # TODO: merge duplicate code with CGen, llvmconvert + codegen, has_delayslot = self.arch_specific() + irblocks_list = codegen.block2assignblks(asmblock) + instr_offsets = [line.offset for line in asmblock.lines] + + loc_db = self.ir_arch.loc_db + local_loc_keys = [] + for irblocks in irblocks_list: + for irblock in irblocks: + local_loc_keys.append(irblock.loc_key) + def myfunc(cpu): """Execute the function according to cpu and vmmngr states @cpu: JitCpu instance @@ -49,86 +75,131 @@ class JitCore_Python(jitcore.JitCore): # Get virtual memory handler vmmngr = cpu.vmmngr - # Keep current location in irblocks - cur_loc_key = loc_key - - # Required to detect new instructions - offsets_jitted = set() - - # Get exec engine + # Get execution engine (EmulatedSymbExec instance) exec_engine = self.symbexec - expr_simp = exec_engine.expr_simp - # For each irbloc inside irblocks - while True: - # Get the current bloc - for irb in irblocks: - if irb.loc_key == cur_loc_key: - break + # Refresh CPU values according to @cpu instance + exec_engine.update_engine_from_cpu() - else: - raise RuntimeError("Irblocks must end with returning an " - "ExprInt instance") - - # Refresh CPU values according to @cpu instance - exec_engine.update_engine_from_cpu() - - # Execute current ir bloc - for assignblk in irb: - instr = assignblk.instr - # For each new instruction (in assembly) - if instr is not None and instr.offset not in offsets_jitted: - # Test exceptions - vmmngr.check_invalid_code_blocs() - vmmngr.check_memory_breakpoint() - if vmmngr.get_exception(): - exec_engine.update_cpu_from_engine() - return instr.offset + # Get initial loc_key + cur_loc_key = asmblock.loc_key - offsets_jitted.add(instr.offset) + # Update PC helper + update_pc = lambda value: setattr(cpu, self.ir_arch.pc.name, value) - # Log registers values - if self.log_regs: - exec_engine.update_cpu_from_engine() - exec_engine.cpu.dump_gpregs_with_attrib(self.ir_arch.attrib) + while True: + # Retrieve the expected irblock + for instr, irblocks in zip(asmblock.lines, irblocks_list): + for index, irblock in enumerate(irblocks): + if irblock.loc_key == cur_loc_key: + break + else: + continue + break + else: + raise RuntimeError("Unable to find the block for %r" % cur_loc_key) + + instr_attrib, irblocks_attributes = codegen.get_attributes( + instr, irblocks, self.log_mn, self.log_regs + ) + irblock_attributes = irblocks_attributes[index] + + # Do IRBlock + new_irblock = self.ir_arch.irbloc_fix_regs_for_mode( + irblock, self.ir_arch.attrib + ) + if index == 0: + # Pre code + if instr_attrib.log_mn: + print "%.8X %s" % ( + instr_attrib.instr.offset, + instr_attrib.instr.to_string(loc_db) + ) + + # Exec IRBlock + instr = instr_attrib.instr + + for index, assignblk in enumerate(irblock): + attributes = irblock_attributes[index] - # Log instruction - if self.log_mn: - print "%08x %s" % (instr.offset, instr) + # Eval current instruction (in IR) + exec_engine.eval_updt_assignblk(assignblk) - # Check for exception - if (vmmngr.get_exception() != 0 or - cpu.get_exception() != 0): - exec_engine.update_cpu_from_engine() + # Check memory access / write exception + # TODO: insert a check between memory reads and writes + if attributes.mem_read or attributes.mem_write: + # Restricted exception + flag = ~csts.EXCEPT_CODE_AUTOMOD & csts.EXCEPT_DO_NOT_UPDATE_PC + if (vmmngr.get_exception() & flag != 0): + # Do not update registers + update_pc(instr.offset) return instr.offset - # Eval current instruction (in IR) - exec_engine.eval_updt_assignblk(assignblk) - # Check for exceptions which do not update PC + # Update registers values exec_engine.update_cpu_from_engine() - if (vmmngr.get_exception() & csts.EXCEPT_DO_NOT_UPDATE_PC != 0 or - cpu.get_exception() > csts.EXCEPT_NUM_UPDT_EIP): - return instr.offset - - vmmngr.check_invalid_code_blocs() - vmmngr.check_memory_breakpoint() - # Get next bloc address - ad = expr_simp(exec_engine.eval_expr(self.ir_arch.IRDst)) + # Check post assignblk exception flags + if attributes.set_exception: + # Restricted exception + if cpu.get_exception() > csts.EXCEPT_NUM_UPDT_EIP: + # Update PC + update_pc(instr.offset) + return instr.offset - # Updates @cpu instance according to new CPU values - exec_engine.update_cpu_from_engine() + dst = exec_engine.eval_expr(self.ir_arch.IRDst) + if dst.is_int(): + loc_key = loc_db.get_or_create_offset_location(int(dst)) + dst = ExprLoc(loc_key, dst.size) + + assert dst.is_loc() + loc_key = dst.loc_key + offset = loc_db.get_location_offset(loc_key) + if offset is None: + # Avoid checks on generated label + cur_loc_key = loc_key + continue + + if instr_attrib.log_regs: + update_pc(offset) + cpu.dump_gpregs_with_attrib(self.ir_arch.attrib) + + # Post-instr checks + if instr_attrib.mem_read | instr_attrib.mem_write: + vmmngr.check_memory_breakpoint() + vmmngr.check_invalid_code_blocs() + if vmmngr.get_exception(): + update_pc(offset) + return offset + + if instr_attrib.set_exception: + if cpu.get_exception(): + update_pc(offset) + return offset + + if instr_attrib.mem_read | instr_attrib.mem_write: + vmmngr.reset_memory_access() # Manage resulting address - if isinstance(ad, m2_expr.ExprInt): - return ad.arg.arg - elif isinstance(ad, m2_expr.ExprLoc): - cur_loc_key = ad.loc_key - else: - raise NotImplementedError("Type not handled: %s" % ad) + if (loc_key in local_loc_keys and + offset > instr.offset): + # Forward local jump + # Note: a backward local jump has to be promoted to extern, + # for max_exec_per_call support + cur_loc_key = loc_key + continue + + # Delay slot + if has_delayslot: + delay_slot_set = exec_engine.eval_expr(codegen.delay_slot_set) + if delay_slot_set.is_int() and int(delay_slot_set) != 0: + return int(exec_engine.eval_expr(codegen.delay_slot_dst)) + + # Extern of asmblock, must have an offset + assert offset is not None + return offset # Associate myfunc with current loc_key - offset = self.ir_arch.loc_db.get_location_offset(loc_key) + offset = loc_db.get_location_offset(asmblock.loc_key) assert offset is not None self.offset_to_jitted_func[offset] = myfunc diff --git a/miasm2/jitter/llvmconvert.py b/miasm2/jitter/llvmconvert.py index 37ce8d52..41461c3a 100644 --- a/miasm2/jitter/llvmconvert.py +++ b/miasm2/jitter/llvmconvert.py @@ -874,15 +874,15 @@ class LLVMFunction(object): self.update_cache(expr, ret) return ret - if op in ["imod", "idiv", "umod", "udiv"]: + if op in ["smod", "sdiv", "umod", "udiv"]: assert len(expr.args) == 2 arg_b = self.add_ir(expr.args[1]) arg_a = self.add_ir(expr.args[0]) - if op == "imod": + if op == "smod": callback = builder.srem - elif op == "idiv": + elif op == "sdiv": callback = builder.sdiv elif op == "umod": callback = builder.urem @@ -1297,7 +1297,7 @@ class LLVMFunction(object): # Update PC for dump_gpregs PC = self.llvm_context.PC t_size = LLVMType.IntType(PC.size) - dst = self.builder.zext(t_size(pc_value), LLVMType.IntType(PC.size)) + dst = self.builder.zext(t_size(pc_value), t_size) self.affect(dst, PC) fc_ptr = self.mod.get_global(self.llvm_context.logging_func) diff --git a/miasm2/jitter/op_semantics.c b/miasm2/jitter/op_semantics.c index 091da87f..46e6cca1 100644 --- a/miasm2/jitter/op_semantics.c +++ b/miasm2/jitter/op_semantics.c @@ -738,12 +738,12 @@ UMOD(16) UMOD(32) UMOD(64) -IDIV(8) -IDIV(16) -IDIV(32) -IDIV(64) - -IMOD(8) -IMOD(16) -IMOD(32) -IMOD(64) +SDIV(8) +SDIV(16) +SDIV(32) +SDIV(64) + +SMOD(8) +SMOD(16) +SMOD(32) +SMOD(64) diff --git a/miasm2/jitter/op_semantics.h b/miasm2/jitter/op_semantics.h index 921c9b9e..690cfb35 100644 --- a/miasm2/jitter/op_semantics.h +++ b/miasm2/jitter/op_semantics.h @@ -66,8 +66,8 @@ _MIASM_EXPORT unsigned int cnttrailzeros(uint64_t size, uint64_t src); } -#define IDIV(sizeA) \ - int ## sizeA ## _t idiv ## sizeA (int ## sizeA ## _t a, int ## sizeA ## _t b) \ +#define SDIV(sizeA) \ + int ## sizeA ## _t sdiv ## sizeA (int ## sizeA ## _t a, int ## sizeA ## _t b) \ { \ int ## sizeA ## _t r; \ if (b == 0) { \ @@ -79,8 +79,8 @@ _MIASM_EXPORT unsigned int cnttrailzeros(uint64_t size, uint64_t src); } -#define IMOD(sizeA) \ - int ## sizeA ## _t imod ## sizeA (int ## sizeA ## _t a, int ## sizeA ## _t b) \ +#define SMOD(sizeA) \ + int ## sizeA ## _t smod ## sizeA (int ## sizeA ## _t a, int ## sizeA ## _t b) \ { \ int ## sizeA ## _t r; \ if (b == 0) { \ @@ -93,23 +93,23 @@ _MIASM_EXPORT unsigned int cnttrailzeros(uint64_t size, uint64_t src); _MIASM_EXPORT uint64_t udiv64(uint64_t a, uint64_t b); _MIASM_EXPORT uint64_t umod64(uint64_t a, uint64_t b); -_MIASM_EXPORT int64_t idiv64(int64_t a, int64_t b); -_MIASM_EXPORT int64_t imod64(int64_t a, int64_t b); +_MIASM_EXPORT int64_t sdiv64(int64_t a, int64_t b); +_MIASM_EXPORT int64_t smod64(int64_t a, int64_t b); _MIASM_EXPORT uint32_t udiv32(uint32_t a, uint32_t b); _MIASM_EXPORT uint32_t umod32(uint32_t a, uint32_t b); -_MIASM_EXPORT int32_t idiv32(int32_t a, int32_t b); -_MIASM_EXPORT int32_t imod32(int32_t a, int32_t b); +_MIASM_EXPORT int32_t sdiv32(int32_t a, int32_t b); +_MIASM_EXPORT int32_t smod32(int32_t a, int32_t b); _MIASM_EXPORT uint16_t udiv16(uint16_t a, uint16_t b); _MIASM_EXPORT uint16_t umod16(uint16_t a, uint16_t b); -_MIASM_EXPORT int16_t idiv16(int16_t a, int16_t b); -_MIASM_EXPORT int16_t imod16(int16_t a, int16_t b); +_MIASM_EXPORT int16_t sdiv16(int16_t a, int16_t b); +_MIASM_EXPORT int16_t smod16(int16_t a, int16_t b); _MIASM_EXPORT uint8_t udiv8(uint8_t a, uint8_t b); _MIASM_EXPORT uint8_t umod8(uint8_t a, uint8_t b); -_MIASM_EXPORT int8_t idiv8(int8_t a, int8_t b); -_MIASM_EXPORT int8_t imod8(int8_t a, int8_t b); +_MIASM_EXPORT int8_t sdiv8(int8_t a, int8_t b); +_MIASM_EXPORT int8_t smod8(int8_t a, int8_t b); _MIASM_EXPORT unsigned int x86_cpuid(unsigned int a, unsigned int reg_num); |