diff options
Diffstat (limited to 'target/i386/tcg/seg_helper.c')
| -rw-r--r-- | target/i386/tcg/seg_helper.c | 662 |
1 files changed, 359 insertions, 303 deletions
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c index aee3d19f29..aac092a356 100644 --- a/target/i386/tcg/seg_helper.c +++ b/target/i386/tcg/seg_helper.c @@ -27,6 +27,70 @@ #include "exec/log.h" #include "helper-tcg.h" #include "seg_helper.h" +#include "access.h" + +#ifdef TARGET_X86_64 +#define SET_ESP(val, sp_mask) \ + do { \ + if ((sp_mask) == 0xffff) { \ + env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ + ((val) & 0xffff); \ + } else if ((sp_mask) == 0xffffffffLL) { \ + env->regs[R_ESP] = (uint32_t)(val); \ + } else { \ + env->regs[R_ESP] = (val); \ + } \ + } while (0) +#else +#define SET_ESP(val, sp_mask) \ + do { \ + env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ + ((val) & (sp_mask)); \ + } while (0) +#endif + +/* XXX: use mmu_index to have proper DPL support */ +typedef struct StackAccess +{ + CPUX86State *env; + uintptr_t ra; + target_ulong ss_base; + target_ulong sp; + target_ulong sp_mask; + int mmu_index; +} StackAccess; + +static void pushw(StackAccess *sa, uint16_t val) +{ + sa->sp -= 2; + cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), + val, sa->mmu_index, sa->ra); +} + +static void pushl(StackAccess *sa, uint32_t val) +{ + sa->sp -= 4; + cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), + val, sa->mmu_index, sa->ra); +} + +static uint16_t popw(StackAccess *sa) +{ + uint16_t ret = cpu_lduw_mmuidx_ra(sa->env, + sa->ss_base + (sa->sp & sa->sp_mask), + sa->mmu_index, sa->ra); + sa->sp += 2; + return ret; +} + +static uint32_t popl(StackAccess *sa) +{ + uint32_t ret = cpu_ldl_mmuidx_ra(sa->env, + sa->ss_base + (sa->sp & sa->sp_mask), + sa->mmu_index, sa->ra); + sa->sp += 4; + return ret; +} int get_pg_mode(CPUX86State *env) { @@ -250,14 +314,15 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector, uint32_t e1, uint32_t e2, int source, uint32_t next_eip, uintptr_t retaddr) { - int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; + int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i; target_ulong tss_base; uint32_t new_regs[8], new_segs[6]; uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; uint32_t old_eflags, eflags_mask; SegmentCache *dt; - int index; + int mmu_index, index; target_ulong ptr; + X86Access old, new; type = (e2 >> DESC_TYPE_SHIFT) & 0xf; LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, @@ -306,35 +371,86 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector, old_tss_limit_max = 43; } + /* new TSS must be busy iff the source is an IRET instruction */ + if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) { + raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); + } + + /* X86Access avoids memory exceptions during the task switch */ + mmu_index = cpu_mmu_index_kernel(env); + access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max, + MMU_DATA_STORE, mmu_index, retaddr); + + if (source == SWITCH_TSS_CALL) { + /* Probe for future write of parent task */ + probe_access(env, tss_base, 2, MMU_DATA_STORE, + mmu_index, retaddr); + } + access_prepare_mmu(&new, env, tss_base, tss_limit, + MMU_DATA_LOAD, mmu_index, retaddr); + + /* save the current state in the old TSS */ + old_eflags = cpu_compute_eflags(env); + if (old_type & 8) { + /* 32 bit */ + access_stl(&old, env->tr.base + 0x20, next_eip); + access_stl(&old, env->tr.base + 0x24, old_eflags); + access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); + access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); + access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); + access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); + access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); + access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); + access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); + access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); + for (i = 0; i < 6; i++) { + access_stw(&old, env->tr.base + (0x48 + i * 4), + env->segs[i].selector); + } + } else { + /* 16 bit */ + access_stw(&old, env->tr.base + 0x0e, next_eip); + access_stw(&old, env->tr.base + 0x10, old_eflags); + access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); + access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); + access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); + access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); + access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); + access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); + access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); + access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); + for (i = 0; i < 4; i++) { + access_stw(&old, env->tr.base + (0x22 + i * 2), + env->segs[i].selector); + } + } + /* read all the registers from the new TSS */ if (type & 8) { /* 32 bit */ - new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); - new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); - new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); + new_cr3 = access_ldl(&new, tss_base + 0x1c); + new_eip = access_ldl(&new, tss_base + 0x20); + new_eflags = access_ldl(&new, tss_base + 0x24); for (i = 0; i < 8; i++) { - new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), - retaddr); + new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4)); } for (i = 0; i < 6; i++) { - new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), - retaddr); + new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4)); } - new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); - new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); + new_ldt = access_ldw(&new, tss_base + 0x60); + new_trap = access_ldl(&new, tss_base + 0x64); } else { /* 16 bit */ new_cr3 = 0; - new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); - new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); + new_eip = access_ldw(&new, tss_base + 0x0e); + new_eflags = access_ldw(&new, tss_base + 0x10); for (i = 0; i < 8; i++) { - new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr); + new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2)); } for (i = 0; i < 4; i++) { - new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2), - retaddr); + new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2)); } - new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); + new_ldt = access_ldw(&new, tss_base + 0x2a); new_segs[R_FS] = 0; new_segs[R_GS] = 0; new_trap = 0; @@ -344,65 +460,26 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector, chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ (void)new_trap; - /* NOTE: we must avoid memory exceptions during the task switch, - so we make dummy accesses before */ - /* XXX: it can still fail in some cases, so a bigger hack is - necessary to valid the TLB after having done the accesses */ - - v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); - v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); - cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); - cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); - /* clear busy bit (it is restartable) */ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { tss_set_busy(env, env->tr.selector, 0, retaddr); } - old_eflags = cpu_compute_eflags(env); + if (source == SWITCH_TSS_IRET) { old_eflags &= ~NT_MASK; + if (old_type & 8) { + access_stl(&old, env->tr.base + 0x24, old_eflags); + } else { + access_stw(&old, env->tr.base + 0x10, old_eflags); + } } - /* save the current state in the old TSS */ - if (old_type & 8) { - /* 32 bit */ - cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); - cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); - cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); - cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); - cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); - cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); - cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); - cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); - cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); - cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); - for (i = 0; i < 6; i++) { - cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), - env->segs[i].selector, retaddr); - } - } else { - /* 16 bit */ - cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); - cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); - cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); - cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); - cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); - cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); - cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); - cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); - cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); - cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); - for (i = 0; i < 4; i++) { - cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2), - env->segs[i].selector, retaddr); - } - } - - /* now if an exception occurs, it will occurs in the next task - context */ - if (source == SWITCH_TSS_CALL) { - cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); + /* + * Thanks to the probe_access above, we know the first two + * bytes addressed by &new are writable too. + */ + access_stw(&new, tss_base, env->tr.selector); new_eflags |= NT_MASK; } @@ -412,7 +489,9 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector, } /* set the new CPU state */ - /* from this point, any exception which occurs can give problems */ + + /* now if an exception occurs, it will occur in the next task context */ + env->cr[0] |= CR0_TS_MASK; env->hflags |= HF_TS_MASK; env->tr.selector = tss_selector; @@ -559,72 +638,19 @@ int exception_has_error_code(int intno) return 0; } -#ifdef TARGET_X86_64 -#define SET_ESP(val, sp_mask) \ - do { \ - if ((sp_mask) == 0xffff) { \ - env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ - ((val) & 0xffff); \ - } else if ((sp_mask) == 0xffffffffLL) { \ - env->regs[R_ESP] = (uint32_t)(val); \ - } else { \ - env->regs[R_ESP] = (val); \ - } \ - } while (0) -#else -#define SET_ESP(val, sp_mask) \ - do { \ - env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ - ((val) & (sp_mask)); \ - } while (0) -#endif - -/* in 64-bit machines, this can overflow. So this segment addition macro - * can be used to trim the value to 32-bit whenever needed */ -#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) - -/* XXX: add a is_user flag to have proper security support */ -#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ - { \ - sp -= 2; \ - cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ - } - -#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ - { \ - sp -= 4; \ - cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \ - } - -#define POPW_RA(ssp, sp, sp_mask, val, ra) \ - { \ - val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ - sp += 2; \ - } - -#define POPL_RA(ssp, sp, sp_mask, val, ra) \ - { \ - val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \ - sp += 4; \ - } - -#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) -#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) -#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) -#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) - /* protected mode interrupt */ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, int error_code, unsigned int next_eip, int is_hw) { SegmentCache *dt; - target_ulong ptr, ssp; + target_ulong ptr; int type, dpl, selector, ss_dpl, cpl; int has_error_code, new_stack, shift; - uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; - uint32_t old_eip, sp_mask, eflags; + uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0; + uint32_t old_eip, eflags; int vm86 = env->eflags & VM_MASK; + StackAccess sa; bool set_rf; has_error_code = 0; @@ -666,6 +692,10 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); } + sa.env = env; + sa.ra = 0; + sa.mmu_index = cpu_mmu_index_kernel(env); + if (type == 5) { /* task gate */ /* must do that check here to return the correct error code */ @@ -674,22 +704,20 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, } shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); if (has_error_code) { - uint32_t mask; - /* push the error code */ if (env->segs[R_SS].flags & DESC_B_MASK) { - mask = 0xffffffff; + sa.sp_mask = 0xffffffff; } else { - mask = 0xffff; + sa.sp_mask = 0xffff; } - esp = (env->regs[R_ESP] - (2 << shift)) & mask; - ssp = env->segs[R_SS].base + esp; + sa.sp = env->regs[R_ESP]; + sa.ss_base = env->segs[R_SS].base; if (shift) { - cpu_stl_kernel(env, ssp, error_code); + pushl(&sa, error_code); } else { - cpu_stw_kernel(env, ssp, error_code); + pushw(&sa, error_code); } - SET_ESP(esp, mask); + SET_ESP(sa.sp, sa.sp_mask); } return; } @@ -723,6 +751,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, } if (dpl < cpl) { /* to inner privilege */ + uint32_t esp; get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); if ((ss & 0xfffc) == 0) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); @@ -746,17 +775,18 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } new_stack = 1; - sp_mask = get_sp_mask(ss_e2); - ssp = get_seg_base(ss_e1, ss_e2); + sa.sp = esp; + sa.sp_mask = get_sp_mask(ss_e2); + sa.ss_base = get_seg_base(ss_e1, ss_e2); } else { /* to same privilege */ if (vm86) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } new_stack = 0; - sp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; - esp = env->regs[R_ESP]; + sa.sp = env->regs[R_ESP]; + sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); + sa.ss_base = env->segs[R_SS].base; } shift = type >> 3; @@ -781,36 +811,36 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, if (shift == 1) { if (new_stack) { if (vm86) { - PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); - PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); - PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); - PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); + pushl(&sa, env->segs[R_GS].selector); + pushl(&sa, env->segs[R_FS].selector); + pushl(&sa, env->segs[R_DS].selector); + pushl(&sa, env->segs[R_ES].selector); } - PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); - PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); + pushl(&sa, env->segs[R_SS].selector); + pushl(&sa, env->regs[R_ESP]); } - PUSHL(ssp, esp, sp_mask, eflags); - PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); - PUSHL(ssp, esp, sp_mask, old_eip); + pushl(&sa, eflags); + pushl(&sa, env->segs[R_CS].selector); + pushl(&sa, old_eip); if (has_error_code) { - PUSHL(ssp, esp, sp_mask, error_code); + pushl(&sa, error_code); } } else { if (new_stack) { if (vm86) { - PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); - PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); - PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); - PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); + pushw(&sa, env->segs[R_GS].selector); + pushw(&sa, env->segs[R_FS].selector); + pushw(&sa, env->segs[R_DS].selector); + pushw(&sa, env->segs[R_ES].selector); } - PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); - PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); + pushw(&sa, env->segs[R_SS].selector); + pushw(&sa, env->regs[R_ESP]); } - PUSHW(ssp, esp, sp_mask, eflags); - PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); - PUSHW(ssp, esp, sp_mask, old_eip); + pushw(&sa, eflags); + pushw(&sa, env->segs[R_CS].selector); + pushw(&sa, old_eip); if (has_error_code) { - PUSHW(ssp, esp, sp_mask, error_code); + pushw(&sa, error_code); } } @@ -828,10 +858,10 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); } ss = (ss & ~3) | dpl; - cpu_x86_load_seg_cache(env, R_SS, ss, - ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); + cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base, + get_seg_limit(ss_e1, ss_e2), ss_e2); } - SET_ESP(esp, sp_mask); + SET_ESP(sa.sp, sa.sp_mask); selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, @@ -843,20 +873,18 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, #ifdef TARGET_X86_64 -#define PUSHQ_RA(sp, val, ra) \ - { \ - sp -= 8; \ - cpu_stq_kernel_ra(env, sp, (val), ra); \ - } - -#define POPQ_RA(sp, val, ra) \ - { \ - val = cpu_ldq_kernel_ra(env, sp, ra); \ - sp += 8; \ - } +static void pushq(StackAccess *sa, uint64_t val) +{ + sa->sp -= 8; + cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra); +} -#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) -#define POPQ(sp, val) POPQ_RA(sp, val, 0) +static uint64_t popq(StackAccess *sa) +{ + uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra); + sa->sp += 8; + return ret; +} static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) { @@ -899,8 +927,9 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, int type, dpl, selector, cpl, ist; int has_error_code, new_stack; uint32_t e1, e2, e3, ss, eflags; - target_ulong old_eip, esp, offset; + target_ulong old_eip, offset; bool set_rf; + StackAccess sa; has_error_code = 0; if (!is_int && !is_hw) { @@ -968,10 +997,16 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, if (e2 & DESC_C_MASK) { dpl = cpl; } + + sa.env = env; + sa.ra = 0; + sa.mmu_index = cpu_mmu_index_kernel(env); + sa.sp_mask = -1; + sa.ss_base = 0; if (dpl < cpl || ist != 0) { /* to inner privilege */ new_stack = 1; - esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); + sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); ss = 0; } else { /* to same privilege */ @@ -979,9 +1014,9 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } new_stack = 0; - esp = env->regs[R_ESP]; + sa.sp = env->regs[R_ESP]; } - esp &= ~0xfLL; /* align stack */ + sa.sp &= ~0xfLL; /* align stack */ /* See do_interrupt_protected. */ eflags = cpu_compute_eflags(env); @@ -989,13 +1024,13 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, eflags |= RF_MASK; } - PUSHQ(esp, env->segs[R_SS].selector); - PUSHQ(esp, env->regs[R_ESP]); - PUSHQ(esp, eflags); - PUSHQ(esp, env->segs[R_CS].selector); - PUSHQ(esp, old_eip); + pushq(&sa, env->segs[R_SS].selector); + pushq(&sa, env->regs[R_ESP]); + pushq(&sa, eflags); + pushq(&sa, env->segs[R_CS].selector); + pushq(&sa, old_eip); if (has_error_code) { - PUSHQ(esp, error_code); + pushq(&sa, error_code); } /* interrupt gate clear IF mask */ @@ -1008,7 +1043,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, ss = 0 | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); } - env->regs[R_ESP] = esp; + env->regs[R_ESP] = sa.sp; selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, @@ -1080,10 +1115,11 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, int error_code, unsigned int next_eip) { SegmentCache *dt; - target_ulong ptr, ssp; + target_ulong ptr; int selector; - uint32_t offset, esp; + uint32_t offset; uint32_t old_cs, old_eip; + StackAccess sa; /* real mode (simpler!) */ dt = &env->idt; @@ -1093,8 +1129,14 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, ptr = dt->base + intno * 4; offset = cpu_lduw_kernel(env, ptr); selector = cpu_lduw_kernel(env, ptr + 2); - esp = env->regs[R_ESP]; - ssp = env->segs[R_SS].base; + + sa.env = env; + sa.ra = 0; + sa.sp = env->regs[R_ESP]; + sa.sp_mask = 0xffff; + sa.ss_base = env->segs[R_SS].base; + sa.mmu_index = cpu_mmu_index_kernel(env); + if (is_int) { old_eip = next_eip; } else { @@ -1102,12 +1144,12 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, } old_cs = env->segs[R_CS].selector; /* XXX: use SS segment size? */ - PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); - PUSHW(ssp, esp, 0xffff, old_cs); - PUSHW(ssp, esp, 0xffff, old_eip); + pushw(&sa, cpu_compute_eflags(env)); + pushw(&sa, old_cs); + pushw(&sa, old_eip); /* update processor state */ - env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); + SET_ESP(sa.sp, sa.sp_mask); env->eip = offset; env->segs[R_CS].selector = selector; env->segs[R_CS].base = (selector << 4); @@ -1550,21 +1592,24 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, int shift, uint32_t next_eip) { - uint32_t esp, esp_mask; - target_ulong ssp; + StackAccess sa; + + sa.env = env; + sa.ra = GETPC(); + sa.sp = env->regs[R_ESP]; + sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); + sa.ss_base = env->segs[R_SS].base; + sa.mmu_index = cpu_mmu_index_kernel(env); - esp = env->regs[R_ESP]; - esp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; if (shift) { - PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); - PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); + pushl(&sa, env->segs[R_CS].selector); + pushl(&sa, next_eip); } else { - PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); - PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); + pushw(&sa, env->segs[R_CS].selector); + pushw(&sa, next_eip); } - SET_ESP(esp, esp_mask); + SET_ESP(sa.sp, sa.sp_mask); env->eip = new_eip; env->segs[R_CS].selector = new_cs; env->segs[R_CS].base = (new_cs << 4); @@ -1576,9 +1621,10 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, { int new_stack, i; uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; - uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; + uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl; uint32_t val, limit, old_sp_mask; - target_ulong ssp, old_ssp, offset, sp; + target_ulong old_ssp, offset; + StackAccess sa; LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); LOG_PCALL_STATE(env_cpu(env)); @@ -1590,6 +1636,11 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, } cpl = env->hflags & HF_CPL_MASK; LOG_PCALL("desc=%08x:%08x\n", e1, e2); + + sa.env = env; + sa.ra = GETPC(); + sa.mmu_index = cpu_mmu_index_kernel(env); + if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); @@ -1617,14 +1668,14 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, #ifdef TARGET_X86_64 /* XXX: check 16/32 bit cases in long mode */ if (shift == 2) { - target_ulong rsp; - /* 64 bit case */ - rsp = env->regs[R_ESP]; - PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); - PUSHQ_RA(rsp, next_eip, GETPC()); + sa.sp = env->regs[R_ESP]; + sa.sp_mask = -1; + sa.ss_base = 0; + pushq(&sa, env->segs[R_CS].selector); + pushq(&sa, next_eip); /* from this point, not restartable */ - env->regs[R_ESP] = rsp; + env->regs[R_ESP] = sa.sp; cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); @@ -1632,15 +1683,15 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, } else #endif { - sp = env->regs[R_ESP]; - sp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; + sa.sp = env->regs[R_ESP]; + sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); + sa.ss_base = env->segs[R_SS].base; if (shift) { - PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); - PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); + pushl(&sa, env->segs[R_CS].selector); + pushl(&sa, next_eip); } else { - PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); - PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); + pushw(&sa, env->segs[R_CS].selector); + pushw(&sa, next_eip); } limit = get_seg_limit(e1, e2); @@ -1648,7 +1699,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } /* from this point, not restartable */ - SET_ESP(sp, sp_mask); + SET_ESP(sa.sp, sa.sp_mask); cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); env->eip = new_eip; @@ -1743,13 +1794,13 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, /* to inner privilege */ #ifdef TARGET_X86_64 if (shift == 2) { - sp = get_rsp_from_tss(env, dpl); ss = dpl; /* SS = NULL selector with RPL = new CPL */ new_stack = 1; - sp_mask = 0; - ssp = 0; /* SS base is always zero in IA-32e mode */ + sa.sp = get_rsp_from_tss(env, dpl); + sa.sp_mask = -1; + sa.ss_base = 0; /* SS base is always zero in IA-32e mode */ LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" - TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); + TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]); } else #endif { @@ -1758,7 +1809,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" TARGET_FMT_lx "\n", ss, sp32, param_count, env->regs[R_ESP]); - sp = sp32; if ((ss & 0xfffc) == 0) { raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); } @@ -1781,63 +1831,64 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); } - sp_mask = get_sp_mask(ss_e2); - ssp = get_seg_base(ss_e1, ss_e2); + sa.sp = sp32; + sa.sp_mask = get_sp_mask(ss_e2); + sa.ss_base = get_seg_base(ss_e1, ss_e2); } /* push_size = ((param_count * 2) + 8) << shift; */ - old_sp_mask = get_sp_mask(env->segs[R_SS].flags); old_ssp = env->segs[R_SS].base; + #ifdef TARGET_X86_64 if (shift == 2) { /* XXX: verify if new stack address is canonical */ - PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); - PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); + pushq(&sa, env->segs[R_SS].selector); + pushq(&sa, env->regs[R_ESP]); /* parameters aren't supported for 64-bit call gates */ } else #endif if (shift == 1) { - PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); - PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); + pushl(&sa, env->segs[R_SS].selector); + pushl(&sa, env->regs[R_ESP]); for (i = param_count - 1; i >= 0; i--) { - val = cpu_ldl_kernel_ra(env, old_ssp + - ((env->regs[R_ESP] + i * 4) & - old_sp_mask), GETPC()); - PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); + val = cpu_ldl_data_ra(env, + old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), + GETPC()); + pushl(&sa, val); } } else { - PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); - PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); + pushw(&sa, env->segs[R_SS].selector); + pushw(&sa, env->regs[R_ESP]); for (i = param_count - 1; i >= 0; i--) { - val = cpu_lduw_kernel_ra(env, old_ssp + - ((env->regs[R_ESP] + i * 2) & - old_sp_mask), GETPC()); - PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); + val = cpu_lduw_data_ra(env, + old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), + GETPC()); + pushw(&sa, val); } } new_stack = 1; } else { /* to same privilege */ - sp = env->regs[R_ESP]; - sp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; + sa.sp = env->regs[R_ESP]; + sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); + sa.ss_base = env->segs[R_SS].base; /* push_size = (4 << shift); */ new_stack = 0; } #ifdef TARGET_X86_64 if (shift == 2) { - PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); - PUSHQ_RA(sp, next_eip, GETPC()); + pushq(&sa, env->segs[R_CS].selector); + pushq(&sa, next_eip); } else #endif if (shift == 1) { - PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); - PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); + pushl(&sa, env->segs[R_CS].selector); + pushl(&sa, next_eip); } else { - PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); - PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); + pushw(&sa, env->segs[R_CS].selector); + pushw(&sa, next_eip); } /* from this point, not restartable */ @@ -1851,7 +1902,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, { ss = (ss & ~3) | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, - ssp, + sa.ss_base, get_seg_limit(ss_e1, ss_e2), ss_e2); } @@ -1862,7 +1913,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); - SET_ESP(sp, sp_mask); + SET_ESP(sa.sp, sa.sp_mask); env->eip = offset; } } @@ -1870,26 +1921,29 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, /* real and vm86 mode iret */ void helper_iret_real(CPUX86State *env, int shift) { - uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; - target_ulong ssp; + uint32_t new_cs, new_eip, new_eflags; int eflags_mask; + StackAccess sa; + + sa.env = env; + sa.ra = GETPC(); + sa.mmu_index = x86_mmu_index_pl(env, 0); + sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */ + sa.sp = env->regs[R_ESP]; + sa.ss_base = env->segs[R_SS].base; - sp_mask = 0xffff; /* XXXX: use SS segment size? */ - sp = env->regs[R_ESP]; - ssp = env->segs[R_SS].base; if (shift == 1) { /* 32 bits */ - POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); - POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); - new_cs &= 0xffff; - POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); + new_eip = popl(&sa); + new_cs = popl(&sa) & 0xffff; + new_eflags = popl(&sa); } else { /* 16 bits */ - POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); - POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); - POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); + new_eip = popw(&sa); + new_cs = popw(&sa); + new_eflags = popw(&sa); } - env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); + SET_ESP(sa.sp, sa.sp_mask); env->segs[R_CS].selector = new_cs; env->segs[R_CS].base = (new_cs << 4); env->eip = new_eip; @@ -1942,47 +1996,52 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, uint32_t new_es, new_ds, new_fs, new_gs; uint32_t e1, e2, ss_e1, ss_e2; int cpl, dpl, rpl, eflags_mask, iopl; - target_ulong ssp, sp, new_eip, new_esp, sp_mask; + target_ulong new_eip, new_esp; + StackAccess sa; + + cpl = env->hflags & HF_CPL_MASK; + + sa.env = env; + sa.ra = retaddr; + sa.mmu_index = x86_mmu_index_pl(env, cpl); #ifdef TARGET_X86_64 if (shift == 2) { - sp_mask = -1; + sa.sp_mask = -1; } else #endif { - sp_mask = get_sp_mask(env->segs[R_SS].flags); + sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); } - sp = env->regs[R_ESP]; - ssp = env->segs[R_SS].base; + sa.sp = env->regs[R_ESP]; + sa.ss_base = env->segs[R_SS].base; new_eflags = 0; /* avoid warning */ #ifdef TARGET_X86_64 if (shift == 2) { - POPQ_RA(sp, new_eip, retaddr); - POPQ_RA(sp, new_cs, retaddr); - new_cs &= 0xffff; + new_eip = popq(&sa); + new_cs = popq(&sa) & 0xffff; if (is_iret) { - POPQ_RA(sp, new_eflags, retaddr); + new_eflags = popq(&sa); } } else #endif { if (shift == 1) { /* 32 bits */ - POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); - POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); - new_cs &= 0xffff; + new_eip = popl(&sa); + new_cs = popl(&sa) & 0xffff; if (is_iret) { - POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); + new_eflags = popl(&sa); if (new_eflags & VM_MASK) { goto return_to_vm86; } } } else { /* 16 bits */ - POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); - POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); + new_eip = popw(&sa); + new_cs = popw(&sa); if (is_iret) { - POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); + new_eflags = popw(&sa); } } } @@ -1999,7 +2058,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, !(e2 & DESC_CS_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } - cpl = env->hflags & HF_CPL_MASK; rpl = new_cs & 3; if (rpl < cpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); @@ -2018,7 +2076,7 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); } - sp += addend; + sa.sp += addend; if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || ((env->hflags & HF_CS64_MASK) && !is_iret))) { /* return to same privilege level */ @@ -2030,21 +2088,19 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, /* return to different privilege level */ #ifdef TARGET_X86_64 if (shift == 2) { - POPQ_RA(sp, new_esp, retaddr); - POPQ_RA(sp, new_ss, retaddr); - new_ss &= 0xffff; + new_esp = popq(&sa); + new_ss = popq(&sa) & 0xffff; } else #endif { if (shift == 1) { /* 32 bits */ - POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); - POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); - new_ss &= 0xffff; + new_esp = popl(&sa); + new_ss = popl(&sa) & 0xffff; } else { /* 16 bits */ - POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); - POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); + new_esp = popw(&sa); + new_ss = popw(&sa); } } LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", @@ -2094,14 +2150,14 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); - sp = new_esp; + sa.sp = new_esp; #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { - sp_mask = -1; + sa.sp_mask = -1; } else #endif { - sp_mask = get_sp_mask(ss_e2); + sa.sp_mask = get_sp_mask(ss_e2); } /* validate data segments */ @@ -2110,9 +2166,9 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, validate_seg(env, R_FS, rpl); validate_seg(env, R_GS, rpl); - sp += addend; + sa.sp += addend; } - SET_ESP(sp, sp_mask); + SET_ESP(sa.sp, sa.sp_mask); env->eip = new_eip; if (is_iret) { /* NOTE: 'cpl' is the _old_ CPL */ @@ -2132,12 +2188,12 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, return; return_to_vm86: - POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); - POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); - POPL_RA(ssp, sp, sp_mask, new_es, retaddr); - POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); - POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); - POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); + new_esp = popl(&sa); + new_ss = popl(&sa); + new_es = popl(&sa); + new_ds = popl(&sa); + new_fs = popl(&sa); + new_gs = popl(&sa); /* modify processor state */ cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | |