summary refs log tree commit diff stats
path: root/target-i386
diff options
context:
space:
mode:
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/cpu.h45
-rw-r--r--target-i386/cpuid.c2
-rw-r--r--target-i386/helper.c73
-rw-r--r--target-i386/kvm.c106
-rw-r--r--target-i386/machine.c180
-rw-r--r--target-i386/op_helper.c35
-rw-r--r--target-i386/translate.c82
7 files changed, 286 insertions, 237 deletions
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 37dde79581..a1ed3e72ea 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -42,7 +42,7 @@
 #define ELF_MACHINE	EM_386
 #endif
 
-#define CPUState struct CPUX86State
+#define CPUArchState struct CPUX86State
 
 #include "cpu-defs.h"
 
@@ -241,6 +241,7 @@
 #define PG_DIRTY_MASK	 (1 << PG_DIRTY_BIT)
 #define PG_PSE_MASK	 (1 << PG_PSE_BIT)
 #define PG_GLOBAL_MASK	 (1 << PG_GLOBAL_BIT)
+#define PG_HI_USER_MASK  0x7ff0000000000000LL
 #define PG_NX_MASK	 (1LL << PG_NX_BIT)
 
 #define PG_ERROR_W_BIT     1
@@ -482,6 +483,7 @@
 #define CPU_INTERRUPT_VIRQ      CPU_INTERRUPT_TGT_INT_0
 #define CPU_INTERRUPT_INIT      CPU_INTERRUPT_TGT_INT_1
 #define CPU_INTERRUPT_SIPI      CPU_INTERRUPT_TGT_INT_2
+#define CPU_INTERRUPT_TPR       CPU_INTERRUPT_TGT_INT_3
 
 
 enum {
@@ -613,6 +615,11 @@ typedef struct {
 
 #define NB_MMU_MODES 2
 
+typedef enum TPRAccess {
+    TPR_ACCESS_READ,
+    TPR_ACCESS_WRITE,
+} TPRAccess;
+
 typedef struct CPUX86State {
     /* standard registers */
     target_ulong regs[CPU_NB_REGS];
@@ -772,6 +779,8 @@ typedef struct CPUX86State {
     XMMReg ymmh_regs[CPU_NB_REGS];
 
     uint64_t xcr0;
+
+    TPRAccess tpr_access_type;
 } CPUX86State;
 
 CPUX86State *cpu_x86_init(const char *cpu_model);
@@ -779,7 +788,7 @@ int cpu_x86_exec(CPUX86State *s);
 void cpu_x86_close(CPUX86State *s);
 void x86_cpu_list (FILE *f, fprintf_function cpu_fprintf, const char *optarg);
 void x86_cpudef_setup(void);
-int cpu_x86_support_mca_broadcast(CPUState *env);
+int cpu_x86_support_mca_broadcast(CPUX86State *env);
 
 int cpu_get_pic_interrupt(CPUX86State *s);
 /* MSDOS compatibility mode FPU exception support */
@@ -961,7 +970,7 @@ uint64_t cpu_get_tsc(CPUX86State *env);
 #define MMU_MODE0_SUFFIX _kernel
 #define MMU_MODE1_SUFFIX _user
 #define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUState *env)
+static inline int cpu_mmu_index (CPUX86State *env)
 {
     return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
 }
@@ -1000,7 +1009,7 @@ static inline int cpu_mmu_index (CPUState *env)
 void optimize_flags_init(void);
 
 #if defined(CONFIG_USER_ONLY)
-static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
 {
     if (newsp)
         env->regs[R_ESP] = newsp;
@@ -1015,7 +1024,7 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
 #include "hw/apic.h"
 #endif
 
-static inline bool cpu_has_work(CPUState *env)
+static inline bool cpu_has_work(CPUX86State *env)
 {
     return ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
             (env->eflags & IF_MASK)) ||
@@ -1027,12 +1036,12 @@ static inline bool cpu_has_work(CPUState *env)
 
 #include "exec-all.h"
 
-static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb)
 {
     env->eip = tb->pc - tb->cs_base;
 }
 
-static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
                                         target_ulong *cs_base, int *flags)
 {
     *cs_base = env->segs[R_CS].base;
@@ -1041,27 +1050,29 @@ static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
         (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
 }
 
-void do_cpu_init(CPUState *env);
-void do_cpu_sipi(CPUState *env);
+void do_cpu_init(CPUX86State *env);
+void do_cpu_sipi(CPUX86State *env);
 
 #define MCE_INJECT_BROADCAST    1
 #define MCE_INJECT_UNCOND_AO    2
 
-void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
                         uint64_t status, uint64_t mcg_status, uint64_t addr,
                         uint64_t misc, int flags);
 
 /* op_helper.c */
-void do_interrupt(CPUState *env);
-void do_interrupt_x86_hardirq(CPUState *env, int intno, int is_hw);
-void QEMU_NORETURN raise_exception_env(int exception_index, CPUState *nenv);
-void QEMU_NORETURN raise_exception_err_env(CPUState *nenv, int exception_index,
+void do_interrupt(CPUX86State *env);
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
+void QEMU_NORETURN raise_exception_env(int exception_index, CPUX86State *nenv);
+void QEMU_NORETURN raise_exception_err_env(CPUX86State *nenv, int exception_index,
                                            int error_code);
 
-void do_smm_enter(CPUState *env1);
+void do_smm_enter(CPUX86State *env1);
+
+void svm_check_intercept(CPUX86State *env1, uint32_t type);
 
-void svm_check_intercept(CPUState *env1, uint32_t type);
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
 
-uint32_t cpu_cc_compute_all(CPUState *env1, int op);
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
 
 #endif /* CPU_I386_H */
diff --git a/target-i386/cpuid.c b/target-i386/cpuid.c
index c2edb646fe..465ea15f45 100644
--- a/target-i386/cpuid.c
+++ b/target-i386/cpuid.c
@@ -50,7 +50,7 @@ static const char *ext_feature_name[] = {
     "fma", "cx16", "xtpr", "pdcm",
     NULL, NULL, "dca", "sse4.1|sse4_1",
     "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
-    NULL, "aes", "xsave", "osxsave",
+    "tsc-deadline", "aes", "xsave", "osxsave",
     "avx", NULL, NULL, "hypervisor",
 };
 static const char *ext2_feature_name[] = {
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 2586aff700..83122bf617 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -16,14 +16,8 @@
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  */
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
 
 #include "cpu.h"
-#include "qemu-common.h"
 #include "kvm.h"
 #ifndef CONFIG_USER_ONLY
 #include "sysemu.h"
@@ -33,7 +27,7 @@
 //#define DEBUG_MMU
 
 /* NOTE: must be called outside the CPU execute loop */
-void cpu_reset(CPUX86State *env)
+void cpu_state_reset(CPUX86State *env)
 {
     int i;
 
@@ -112,7 +106,7 @@ void cpu_x86_close(CPUX86State *env)
     g_free(env);
 }
 
-static void cpu_x86_version(CPUState *env, int *family, int *model)
+static void cpu_x86_version(CPUX86State *env, int *family, int *model)
 {
     int cpuver = env->cpuid_version;
 
@@ -125,7 +119,7 @@ static void cpu_x86_version(CPUState *env, int *family, int *model)
 }
 
 /* Broadcast MCA signal for processor version 06H_EH and above */
-int cpu_x86_support_mca_broadcast(CPUState *env)
+int cpu_x86_support_mca_broadcast(CPUX86State *env)
 {
     int family = 0;
     int model = 0;
@@ -197,7 +191,7 @@ static const char *cc_op_str[] = {
 };
 
 static void
-cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
                        const char *name, struct SegmentCache *sc)
 {
 #ifdef TARGET_X86_64
@@ -254,7 +248,7 @@ done:
 #define DUMP_CODE_BYTES_TOTAL    50
 #define DUMP_CODE_BYTES_BACKWARD 20
 
-void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
                     int flags)
 {
     int eflags, i, nb;
@@ -863,7 +857,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
     return 1;
 }
 
-target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
 {
     target_ulong pde_addr, pte_addr;
     uint64_t pte;
@@ -891,8 +885,8 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
             if (!(pml4e & PG_PRESENT_MASK))
                 return -1;
 
-            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
-                env->a20_mask;
+            pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
+                         (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
             pdpe = ldq_phys(pdpe_addr);
             if (!(pdpe & PG_PRESENT_MASK))
                 return -1;
@@ -906,8 +900,8 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
                 return -1;
         }
 
-        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
-            env->a20_mask;
+        pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
+                    (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
         pde = ldq_phys(pde_addr);
         if (!(pde & PG_PRESENT_MASK)) {
             return -1;
@@ -918,11 +912,12 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
             pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
         } else {
             /* 4 KB page */
-            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
-                env->a20_mask;
+            pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
+                        (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
             page_size = 4096;
             pte = ldq_phys(pte_addr);
         }
+        pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
         if (!(pte & PG_PRESENT_MASK))
             return -1;
     } else {
@@ -957,7 +952,7 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
     return paddr;
 }
 
-void hw_breakpoint_insert(CPUState *env, int index)
+void hw_breakpoint_insert(CPUX86State *env, int index)
 {
     int type, err = 0;
 
@@ -985,7 +980,7 @@ void hw_breakpoint_insert(CPUState *env, int index)
         env->cpu_breakpoint[index] = NULL;
 }
 
-void hw_breakpoint_remove(CPUState *env, int index)
+void hw_breakpoint_remove(CPUX86State *env, int index)
 {
     if (!env->cpu_breakpoint[index])
         return;
@@ -1004,7 +999,7 @@ void hw_breakpoint_remove(CPUState *env, int index)
     }
 }
 
-int check_hw_breakpoints(CPUState *env, int force_dr6_update)
+int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
 {
     target_ulong dr6;
     int reg, type;
@@ -1028,7 +1023,7 @@ int check_hw_breakpoints(CPUState *env, int force_dr6_update)
 
 static CPUDebugExcpHandler *prev_debug_excp_handler;
 
-static void breakpoint_handler(CPUState *env)
+static void breakpoint_handler(CPUX86State *env)
 {
     CPUBreakpoint *bp;
 
@@ -1056,7 +1051,7 @@ static void breakpoint_handler(CPUState *env)
 
 typedef struct MCEInjectionParams {
     Monitor *mon;
-    CPUState *env;
+    CPUX86State *env;
     int bank;
     uint64_t status;
     uint64_t mcg_status;
@@ -1068,7 +1063,7 @@ typedef struct MCEInjectionParams {
 static void do_inject_x86_mce(void *data)
 {
     MCEInjectionParams *params = data;
-    CPUState *cenv = params->env;
+    CPUX86State *cenv = params->env;
     uint64_t *banks = cenv->mce_banks + 4 * params->bank;
 
     cpu_synchronize_state(cenv);
@@ -1138,7 +1133,7 @@ static void do_inject_x86_mce(void *data)
     }
 }
 
-void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
                         uint64_t status, uint64_t mcg_status, uint64_t addr,
                         uint64_t misc, int flags)
 {
@@ -1153,7 +1148,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
         .flags = flags,
     };
     unsigned bank_num = cenv->mcg_cap & 0xff;
-    CPUState *env;
+    CPUX86State *env;
 
     if (!cenv->mcg_cap) {
         monitor_printf(mon, "MCE injection not supported\n");
@@ -1189,6 +1184,22 @@ void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
         }
     }
 }
+
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
+{
+    TranslationBlock *tb;
+
+    if (kvm_enabled()) {
+        env->tpr_access_type = access;
+
+        cpu_interrupt(env, CPU_INTERRUPT_TPR);
+    } else {
+        tb = tb_find_pc(env->mem_io_pc);
+        cpu_restore_state(tb, env, env->mem_io_pc);
+
+        apic_handle_tpr_access_report(env->apic_state, env->eip, access);
+    }
+}
 #endif /* !CONFIG_USER_ONLY */
 
 static void mce_init(CPUX86State *cenv)
@@ -1266,27 +1277,27 @@ CPUX86State *cpu_x86_init(const char *cpu_model)
 }
 
 #if !defined(CONFIG_USER_ONLY)
-void do_cpu_init(CPUState *env)
+void do_cpu_init(CPUX86State *env)
 {
     int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
     uint64_t pat = env->pat;
 
-    cpu_reset(env);
+    cpu_state_reset(env);
     env->interrupt_request = sipi;
     env->pat = pat;
     apic_init_reset(env->apic_state);
     env->halted = !cpu_is_bsp(env);
 }
 
-void do_cpu_sipi(CPUState *env)
+void do_cpu_sipi(CPUX86State *env)
 {
     apic_sipi(env->apic_state);
 }
 #else
-void do_cpu_init(CPUState *env)
+void do_cpu_init(CPUX86State *env)
 {
 }
-void do_cpu_sipi(CPUState *env)
+void do_cpu_sipi(CPUX86State *env)
 {
 }
 #endif
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 981192ddf8..e74a9e4641 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -221,7 +221,7 @@ static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
     return -ENOSYS;
 }
 
-static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code)
+static void kvm_mce_inject(CPUX86State *env, target_phys_addr_t paddr, int code)
 {
     uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
                       MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
@@ -246,7 +246,7 @@ static void hardware_memory_error(void)
     exit(1);
 }
 
-int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
+int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
 {
     ram_addr_t ram_addr;
     target_phys_addr_t paddr;
@@ -306,7 +306,7 @@ int kvm_arch_on_sigbus(int code, void *addr)
     return 0;
 }
 
-static int kvm_inject_mce_oldstyle(CPUState *env)
+static int kvm_inject_mce_oldstyle(CPUX86State *env)
 {
     if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
         unsigned int bank, bank_num = env->mcg_cap & 0xff;
@@ -338,14 +338,14 @@ static int kvm_inject_mce_oldstyle(CPUState *env)
 
 static void cpu_update_state(void *opaque, int running, RunState state)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
 
     if (running) {
         env->tsc_valid = false;
     }
 }
 
-int kvm_arch_init_vcpu(CPUState *env)
+int kvm_arch_init_vcpu(CPUX86State *env)
 {
     struct {
         struct kvm_cpuid2 cpuid;
@@ -555,6 +555,7 @@ int kvm_arch_init_vcpu(CPUState *env)
 
     qemu_add_vm_change_state_handler(cpu_update_state, env);
 
+    cpuid_data.cpuid.padding = 0;
     r = kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
     if (r) {
         return r;
@@ -576,7 +577,7 @@ int kvm_arch_init_vcpu(CPUState *env)
     return 0;
 }
 
-void kvm_arch_reset_vcpu(CPUState *env)
+void kvm_arch_reset_vcpu(CPUX86State *env)
 {
     env->exception_injected = -1;
     env->interrupt_injected = -1;
@@ -740,6 +741,7 @@ static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
     lhs->g = (flags & DESC_G_MASK) != 0;
     lhs->avl = (flags & DESC_AVL_MASK) != 0;
     lhs->unusable = 0;
+    lhs->padding = 0;
 }
 
 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
@@ -766,7 +768,7 @@ static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
     }
 }
 
-static int kvm_getput_regs(CPUState *env, int set)
+static int kvm_getput_regs(CPUX86State *env, int set)
 {
     struct kvm_regs regs;
     int ret = 0;
@@ -807,7 +809,7 @@ static int kvm_getput_regs(CPUState *env, int set)
     return ret;
 }
 
-static int kvm_put_fpu(CPUState *env)
+static int kvm_put_fpu(CPUX86State *env)
 {
     struct kvm_fpu fpu;
     int i;
@@ -839,7 +841,7 @@ static int kvm_put_fpu(CPUState *env)
 #define XSAVE_XSTATE_BV   128
 #define XSAVE_YMMH_SPACE  144
 
-static int kvm_put_xsave(CPUState *env)
+static int kvm_put_xsave(CPUX86State *env)
 {
     struct kvm_xsave* xsave = env->kvm_xsave_buf;
     uint16_t cwd, swd, twd;
@@ -873,7 +875,7 @@ static int kvm_put_xsave(CPUState *env)
     return r;
 }
 
-static int kvm_put_xcrs(CPUState *env)
+static int kvm_put_xcrs(CPUX86State *env)
 {
     struct kvm_xcrs xcrs;
 
@@ -888,7 +890,7 @@ static int kvm_put_xcrs(CPUState *env)
     return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
 }
 
-static int kvm_put_sregs(CPUState *env)
+static int kvm_put_sregs(CPUX86State *env)
 {
     struct kvm_sregs sregs;
 
@@ -919,8 +921,10 @@ static int kvm_put_sregs(CPUState *env)
 
     sregs.idt.limit = env->idt.limit;
     sregs.idt.base = env->idt.base;
+    memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
     sregs.gdt.limit = env->gdt.limit;
     sregs.gdt.base = env->gdt.base;
+    memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
 
     sregs.cr0 = env->cr[0];
     sregs.cr2 = env->cr[2];
@@ -942,7 +946,7 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
     entry->data = value;
 }
 
-static int kvm_put_msrs(CPUState *env, int level)
+static int kvm_put_msrs(CPUX86State *env, int level)
 {
     struct {
         struct kvm_msrs info;
@@ -1025,7 +1029,7 @@ static int kvm_put_msrs(CPUState *env, int level)
 }
 
 
-static int kvm_get_fpu(CPUState *env)
+static int kvm_get_fpu(CPUX86State *env)
 {
     struct kvm_fpu fpu;
     int i, ret;
@@ -1051,7 +1055,7 @@ static int kvm_get_fpu(CPUState *env)
     return 0;
 }
 
-static int kvm_get_xsave(CPUState *env)
+static int kvm_get_xsave(CPUX86State *env)
 {
     struct kvm_xsave* xsave = env->kvm_xsave_buf;
     int ret, i;
@@ -1089,7 +1093,7 @@ static int kvm_get_xsave(CPUState *env)
     return 0;
 }
 
-static int kvm_get_xcrs(CPUState *env)
+static int kvm_get_xcrs(CPUX86State *env)
 {
     int i, ret;
     struct kvm_xcrs xcrs;
@@ -1113,7 +1117,7 @@ static int kvm_get_xcrs(CPUState *env)
     return 0;
 }
 
-static int kvm_get_sregs(CPUState *env)
+static int kvm_get_sregs(CPUX86State *env)
 {
     struct kvm_sregs sregs;
     uint32_t hflags;
@@ -1197,7 +1201,7 @@ static int kvm_get_sregs(CPUState *env)
     return 0;
 }
 
-static int kvm_get_msrs(CPUState *env)
+static int kvm_get_msrs(CPUX86State *env)
 {
     struct {
         struct kvm_msrs info;
@@ -1327,14 +1331,14 @@ static int kvm_get_msrs(CPUState *env)
     return 0;
 }
 
-static int kvm_put_mp_state(CPUState *env)
+static int kvm_put_mp_state(CPUX86State *env)
 {
     struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
 
     return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
 }
 
-static int kvm_get_mp_state(CPUState *env)
+static int kvm_get_mp_state(CPUX86State *env)
 {
     struct kvm_mp_state mp_state;
     int ret;
@@ -1350,7 +1354,7 @@ static int kvm_get_mp_state(CPUState *env)
     return 0;
 }
 
-static int kvm_get_apic(CPUState *env)
+static int kvm_get_apic(CPUX86State *env)
 {
     DeviceState *apic = env->apic_state;
     struct kvm_lapic_state kapic;
@@ -1367,7 +1371,7 @@ static int kvm_get_apic(CPUState *env)
     return 0;
 }
 
-static int kvm_put_apic(CPUState *env)
+static int kvm_put_apic(CPUX86State *env)
 {
     DeviceState *apic = env->apic_state;
     struct kvm_lapic_state kapic;
@@ -1380,7 +1384,7 @@ static int kvm_put_apic(CPUState *env)
     return 0;
 }
 
-static int kvm_put_vcpu_events(CPUState *env, int level)
+static int kvm_put_vcpu_events(CPUX86State *env, int level)
 {
     struct kvm_vcpu_events events;
 
@@ -1392,6 +1396,7 @@ static int kvm_put_vcpu_events(CPUState *env, int level)
     events.exception.nr = env->exception_injected;
     events.exception.has_error_code = env->has_error_code;
     events.exception.error_code = env->error_code;
+    events.exception.pad = 0;
 
     events.interrupt.injected = (env->interrupt_injected >= 0);
     events.interrupt.nr = env->interrupt_injected;
@@ -1400,6 +1405,7 @@ static int kvm_put_vcpu_events(CPUState *env, int level)
     events.nmi.injected = env->nmi_injected;
     events.nmi.pending = env->nmi_pending;
     events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
+    events.nmi.pad = 0;
 
     events.sipi_vector = env->sipi_vector;
 
@@ -1412,7 +1418,7 @@ static int kvm_put_vcpu_events(CPUState *env, int level)
     return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
 }
 
-static int kvm_get_vcpu_events(CPUState *env)
+static int kvm_get_vcpu_events(CPUX86State *env)
 {
     struct kvm_vcpu_events events;
     int ret;
@@ -1447,7 +1453,7 @@ static int kvm_get_vcpu_events(CPUState *env)
     return 0;
 }
 
-static int kvm_guest_debug_workarounds(CPUState *env)
+static int kvm_guest_debug_workarounds(CPUX86State *env)
 {
     int ret = 0;
     unsigned long reinject_trap = 0;
@@ -1476,7 +1482,7 @@ static int kvm_guest_debug_workarounds(CPUState *env)
     return ret;
 }
 
-static int kvm_put_debugregs(CPUState *env)
+static int kvm_put_debugregs(CPUX86State *env)
 {
     struct kvm_debugregs dbgregs;
     int i;
@@ -1495,7 +1501,7 @@ static int kvm_put_debugregs(CPUState *env)
     return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
 }
 
-static int kvm_get_debugregs(CPUState *env)
+static int kvm_get_debugregs(CPUX86State *env)
 {
     struct kvm_debugregs dbgregs;
     int i, ret;
@@ -1517,7 +1523,7 @@ static int kvm_get_debugregs(CPUState *env)
     return 0;
 }
 
-int kvm_arch_put_registers(CPUState *env, int level)
+int kvm_arch_put_registers(CPUX86State *env, int level)
 {
     int ret;
 
@@ -1574,7 +1580,7 @@ int kvm_arch_put_registers(CPUState *env, int level)
     return 0;
 }
 
-int kvm_arch_get_registers(CPUState *env)
+int kvm_arch_get_registers(CPUX86State *env)
 {
     int ret;
 
@@ -1619,7 +1625,7 @@ int kvm_arch_get_registers(CPUState *env)
     return 0;
 }
 
-void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
+void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
 {
     int ret;
 
@@ -1635,8 +1641,10 @@ void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
     }
 
     if (!kvm_irqchip_in_kernel()) {
-        /* Force the VCPU out of its inner loop to process the INIT request */
-        if (env->interrupt_request & CPU_INTERRUPT_INIT) {
+        /* Force the VCPU out of its inner loop to process any INIT requests
+         * or pending TPR access reports. */
+        if (env->interrupt_request &
+            (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
             env->exit_request = 1;
         }
 
@@ -1677,7 +1685,7 @@ void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
     }
 }
 
-void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
+void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
 {
     if (run->if_flag) {
         env->eflags |= IF_MASK;
@@ -1688,7 +1696,7 @@ void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
     cpu_set_apic_base(env->apic_state, run->apic_base);
 }
 
-int kvm_arch_process_async_events(CPUState *env)
+int kvm_arch_process_async_events(CPUX86State *env)
 {
     if (env->interrupt_request & CPU_INTERRUPT_MCE) {
         /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
@@ -1730,11 +1738,17 @@ int kvm_arch_process_async_events(CPUState *env)
         kvm_cpu_synchronize_state(env);
         do_cpu_sipi(env);
     }
+    if (env->interrupt_request & CPU_INTERRUPT_TPR) {
+        env->interrupt_request &= ~CPU_INTERRUPT_TPR;
+        kvm_cpu_synchronize_state(env);
+        apic_handle_tpr_access_report(env->apic_state, env->eip,
+                                      env->tpr_access_type);
+    }
 
     return env->halted;
 }
 
-static int kvm_handle_halt(CPUState *env)
+static int kvm_handle_halt(CPUX86State *env)
 {
     if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
           (env->eflags & IF_MASK)) &&
@@ -1746,7 +1760,17 @@ static int kvm_handle_halt(CPUState *env)
     return 0;
 }
 
-int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+static int kvm_handle_tpr_access(CPUX86State *env)
+{
+    struct kvm_run *run = env->kvm_run;
+
+    apic_handle_tpr_access_report(env->apic_state, run->tpr_access.rip,
+                                  run->tpr_access.is_write ? TPR_ACCESS_WRITE
+                                                           : TPR_ACCESS_READ);
+    return 1;
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
 {
     static const uint8_t int3 = 0xcc;
 
@@ -1757,7 +1781,7 @@ int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
     return 0;
 }
 
-int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
 {
     uint8_t int3;
 
@@ -1900,7 +1924,7 @@ static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
     return ret;
 }
 
-void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
+void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
 {
     const uint8_t type_code[] = {
         [GDB_BREAKPOINT_HW] = 0x0,
@@ -1937,7 +1961,7 @@ static bool host_supports_vmx(void)
 
 #define VMX_INVALID_GUEST_STATE 0x80000021
 
-int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
 {
     uint64_t code;
     int ret;
@@ -1950,6 +1974,9 @@ int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
     case KVM_EXIT_SET_TPR:
         ret = 0;
         break;
+    case KVM_EXIT_TPR_ACCESS:
+        ret = kvm_handle_tpr_access(env);
+        break;
     case KVM_EXIT_FAIL_ENTRY:
         code = run->fail_entry.hardware_entry_failure_reason;
         fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
@@ -1985,8 +2012,9 @@ int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
     return ret;
 }
 
-bool kvm_arch_stop_on_emulation_error(CPUState *env)
+bool kvm_arch_stop_on_emulation_error(CPUX86State *env)
 {
+    kvm_cpu_synchronize_state(env);
     return !(env->cr[0] & CR0_PE_MASK) ||
            ((env->segs[R_CS].selector  & 3) != 3);
 }
diff --git a/target-i386/machine.c b/target-i386/machine.c
index d6e98ff37b..a8be058d2d 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -171,14 +171,14 @@ static const VMStateInfo vmstate_fpreg_1_no_mmx = {
 
 static bool fpregs_is_0(void *opaque, int version_id)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
 
     return (env->fpregs_format_vmstate == 0);
 }
 
 static bool fpregs_is_1_mmx(void *opaque, int version_id)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
     int guess_mmx;
 
     guess_mmx = ((env->fptag_vmstate == 0xff) &&
@@ -188,7 +188,7 @@ static bool fpregs_is_1_mmx(void *opaque, int version_id)
 
 static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
     int guess_mmx;
 
     guess_mmx = ((env->fptag_vmstate == 0xff) &&
@@ -237,7 +237,7 @@ static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
 
 static void cpu_pre_save(void *opaque)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
     int i;
 
     /* FPU */
@@ -252,7 +252,7 @@ static void cpu_pre_save(void *opaque)
 
 static int cpu_post_load(void *opaque, int version_id)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
     int i;
 
     /* XXX: restore FPU round state */
@@ -274,7 +274,7 @@ static int cpu_post_load(void *opaque, int version_id)
 
 static bool async_pf_msr_needed(void *opaque)
 {
-    CPUState *cpu = opaque;
+    CPUX86State *cpu = opaque;
 
     return cpu->async_pf_en_msr != 0;
 }
@@ -285,14 +285,14 @@ static const VMStateDescription vmstate_async_pf_msr = {
     .minimum_version_id = 1,
     .minimum_version_id_old = 1,
     .fields      = (VMStateField []) {
-        VMSTATE_UINT64(async_pf_en_msr, CPUState),
+        VMSTATE_UINT64(async_pf_en_msr, CPUX86State),
         VMSTATE_END_OF_LIST()
     }
 };
 
 static bool fpop_ip_dp_needed(void *opaque)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
 
     return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
 }
@@ -303,16 +303,16 @@ static const VMStateDescription vmstate_fpop_ip_dp = {
     .minimum_version_id = 1,
     .minimum_version_id_old = 1,
     .fields      = (VMStateField []) {
-        VMSTATE_UINT16(fpop, CPUState),
-        VMSTATE_UINT64(fpip, CPUState),
-        VMSTATE_UINT64(fpdp, CPUState),
+        VMSTATE_UINT16(fpop, CPUX86State),
+        VMSTATE_UINT64(fpip, CPUX86State),
+        VMSTATE_UINT64(fpdp, CPUX86State),
         VMSTATE_END_OF_LIST()
     }
 };
 
 static bool tscdeadline_needed(void *opaque)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
 
     return env->tsc_deadline != 0;
 }
@@ -323,14 +323,14 @@ static const VMStateDescription vmstate_msr_tscdeadline = {
     .minimum_version_id = 1,
     .minimum_version_id_old = 1,
     .fields      = (VMStateField []) {
-        VMSTATE_UINT64(tsc_deadline, CPUState),
+        VMSTATE_UINT64(tsc_deadline, CPUX86State),
         VMSTATE_END_OF_LIST()
     }
 };
 
 static bool misc_enable_needed(void *opaque)
 {
-    CPUState *env = opaque;
+    CPUX86State *env = opaque;
 
     return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
 }
@@ -341,7 +341,7 @@ static const VMStateDescription vmstate_msr_ia32_misc_enable = {
     .minimum_version_id = 1,
     .minimum_version_id_old = 1,
     .fields      = (VMStateField []) {
-        VMSTATE_UINT64(msr_ia32_misc_enable, CPUState),
+        VMSTATE_UINT64(msr_ia32_misc_enable, CPUX86State),
         VMSTATE_END_OF_LIST()
     }
 };
@@ -354,98 +354,98 @@ static const VMStateDescription vmstate_cpu = {
     .pre_save = cpu_pre_save,
     .post_load = cpu_post_load,
     .fields      = (VMStateField []) {
-        VMSTATE_UINTTL_ARRAY(regs, CPUState, CPU_NB_REGS),
-        VMSTATE_UINTTL(eip, CPUState),
-        VMSTATE_UINTTL(eflags, CPUState),
-        VMSTATE_UINT32(hflags, CPUState),
+        VMSTATE_UINTTL_ARRAY(regs, CPUX86State, CPU_NB_REGS),
+        VMSTATE_UINTTL(eip, CPUX86State),
+        VMSTATE_UINTTL(eflags, CPUX86State),
+        VMSTATE_UINT32(hflags, CPUX86State),
         /* FPU */
-        VMSTATE_UINT16(fpuc, CPUState),
-        VMSTATE_UINT16(fpus_vmstate, CPUState),
-        VMSTATE_UINT16(fptag_vmstate, CPUState),
-        VMSTATE_UINT16(fpregs_format_vmstate, CPUState),
-        VMSTATE_FP_REGS(fpregs, CPUState, 8),
-
-        VMSTATE_SEGMENT_ARRAY(segs, CPUState, 6),
-        VMSTATE_SEGMENT(ldt, CPUState),
-        VMSTATE_SEGMENT(tr, CPUState),
-        VMSTATE_SEGMENT(gdt, CPUState),
-        VMSTATE_SEGMENT(idt, CPUState),
-
-        VMSTATE_UINT32(sysenter_cs, CPUState),
+        VMSTATE_UINT16(fpuc, CPUX86State),
+        VMSTATE_UINT16(fpus_vmstate, CPUX86State),
+        VMSTATE_UINT16(fptag_vmstate, CPUX86State),
+        VMSTATE_UINT16(fpregs_format_vmstate, CPUX86State),
+        VMSTATE_FP_REGS(fpregs, CPUX86State, 8),
+
+        VMSTATE_SEGMENT_ARRAY(segs, CPUX86State, 6),
+        VMSTATE_SEGMENT(ldt, CPUX86State),
+        VMSTATE_SEGMENT(tr, CPUX86State),
+        VMSTATE_SEGMENT(gdt, CPUX86State),
+        VMSTATE_SEGMENT(idt, CPUX86State),
+
+        VMSTATE_UINT32(sysenter_cs, CPUX86State),
 #ifdef TARGET_X86_64
         /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
-        VMSTATE_HACK_UINT32(sysenter_esp, CPUState, less_than_7),
-        VMSTATE_HACK_UINT32(sysenter_eip, CPUState, less_than_7),
-        VMSTATE_UINTTL_V(sysenter_esp, CPUState, 7),
-        VMSTATE_UINTTL_V(sysenter_eip, CPUState, 7),
+        VMSTATE_HACK_UINT32(sysenter_esp, CPUX86State, less_than_7),
+        VMSTATE_HACK_UINT32(sysenter_eip, CPUX86State, less_than_7),
+        VMSTATE_UINTTL_V(sysenter_esp, CPUX86State, 7),
+        VMSTATE_UINTTL_V(sysenter_eip, CPUX86State, 7),
 #else
-        VMSTATE_UINTTL(sysenter_esp, CPUState),
-        VMSTATE_UINTTL(sysenter_eip, CPUState),
+        VMSTATE_UINTTL(sysenter_esp, CPUX86State),
+        VMSTATE_UINTTL(sysenter_eip, CPUX86State),
 #endif
 
-        VMSTATE_UINTTL(cr[0], CPUState),
-        VMSTATE_UINTTL(cr[2], CPUState),
-        VMSTATE_UINTTL(cr[3], CPUState),
-        VMSTATE_UINTTL(cr[4], CPUState),
-        VMSTATE_UINTTL_ARRAY(dr, CPUState, 8),
+        VMSTATE_UINTTL(cr[0], CPUX86State),
+        VMSTATE_UINTTL(cr[2], CPUX86State),
+        VMSTATE_UINTTL(cr[3], CPUX86State),
+        VMSTATE_UINTTL(cr[4], CPUX86State),
+        VMSTATE_UINTTL_ARRAY(dr, CPUX86State, 8),
         /* MMU */
-        VMSTATE_INT32(a20_mask, CPUState),
+        VMSTATE_INT32(a20_mask, CPUX86State),
         /* XMM */
-        VMSTATE_UINT32(mxcsr, CPUState),
-        VMSTATE_XMM_REGS(xmm_regs, CPUState, CPU_NB_REGS),
+        VMSTATE_UINT32(mxcsr, CPUX86State),
+        VMSTATE_XMM_REGS(xmm_regs, CPUX86State, CPU_NB_REGS),
 
 #ifdef TARGET_X86_64
-        VMSTATE_UINT64(efer, CPUState),
-        VMSTATE_UINT64(star, CPUState),
-        VMSTATE_UINT64(lstar, CPUState),
-        VMSTATE_UINT64(cstar, CPUState),
-        VMSTATE_UINT64(fmask, CPUState),
-        VMSTATE_UINT64(kernelgsbase, CPUState),
+        VMSTATE_UINT64(efer, CPUX86State),
+        VMSTATE_UINT64(star, CPUX86State),
+        VMSTATE_UINT64(lstar, CPUX86State),
+        VMSTATE_UINT64(cstar, CPUX86State),
+        VMSTATE_UINT64(fmask, CPUX86State),
+        VMSTATE_UINT64(kernelgsbase, CPUX86State),
 #endif
-        VMSTATE_UINT32_V(smbase, CPUState, 4),
-
-        VMSTATE_UINT64_V(pat, CPUState, 5),
-        VMSTATE_UINT32_V(hflags2, CPUState, 5),
-
-        VMSTATE_UINT32_TEST(halted, CPUState, version_is_5),
-        VMSTATE_UINT64_V(vm_hsave, CPUState, 5),
-        VMSTATE_UINT64_V(vm_vmcb, CPUState, 5),
-        VMSTATE_UINT64_V(tsc_offset, CPUState, 5),
-        VMSTATE_UINT64_V(intercept, CPUState, 5),
-        VMSTATE_UINT16_V(intercept_cr_read, CPUState, 5),
-        VMSTATE_UINT16_V(intercept_cr_write, CPUState, 5),
-        VMSTATE_UINT16_V(intercept_dr_read, CPUState, 5),
-        VMSTATE_UINT16_V(intercept_dr_write, CPUState, 5),
-        VMSTATE_UINT32_V(intercept_exceptions, CPUState, 5),
-        VMSTATE_UINT8_V(v_tpr, CPUState, 5),
+        VMSTATE_UINT32_V(smbase, CPUX86State, 4),
+
+        VMSTATE_UINT64_V(pat, CPUX86State, 5),
+        VMSTATE_UINT32_V(hflags2, CPUX86State, 5),
+
+        VMSTATE_UINT32_TEST(halted, CPUX86State, version_is_5),
+        VMSTATE_UINT64_V(vm_hsave, CPUX86State, 5),
+        VMSTATE_UINT64_V(vm_vmcb, CPUX86State, 5),
+        VMSTATE_UINT64_V(tsc_offset, CPUX86State, 5),
+        VMSTATE_UINT64_V(intercept, CPUX86State, 5),
+        VMSTATE_UINT16_V(intercept_cr_read, CPUX86State, 5),
+        VMSTATE_UINT16_V(intercept_cr_write, CPUX86State, 5),
+        VMSTATE_UINT16_V(intercept_dr_read, CPUX86State, 5),
+        VMSTATE_UINT16_V(intercept_dr_write, CPUX86State, 5),
+        VMSTATE_UINT32_V(intercept_exceptions, CPUX86State, 5),
+        VMSTATE_UINT8_V(v_tpr, CPUX86State, 5),
         /* MTRRs */
-        VMSTATE_UINT64_ARRAY_V(mtrr_fixed, CPUState, 11, 8),
-        VMSTATE_UINT64_V(mtrr_deftype, CPUState, 8),
-        VMSTATE_MTRR_VARS(mtrr_var, CPUState, 8, 8),
+        VMSTATE_UINT64_ARRAY_V(mtrr_fixed, CPUX86State, 11, 8),
+        VMSTATE_UINT64_V(mtrr_deftype, CPUX86State, 8),
+        VMSTATE_MTRR_VARS(mtrr_var, CPUX86State, 8, 8),
         /* KVM-related states */
-        VMSTATE_INT32_V(interrupt_injected, CPUState, 9),
-        VMSTATE_UINT32_V(mp_state, CPUState, 9),
-        VMSTATE_UINT64_V(tsc, CPUState, 9),
-        VMSTATE_INT32_V(exception_injected, CPUState, 11),
-        VMSTATE_UINT8_V(soft_interrupt, CPUState, 11),
-        VMSTATE_UINT8_V(nmi_injected, CPUState, 11),
-        VMSTATE_UINT8_V(nmi_pending, CPUState, 11),
-        VMSTATE_UINT8_V(has_error_code, CPUState, 11),
-        VMSTATE_UINT32_V(sipi_vector, CPUState, 11),
+        VMSTATE_INT32_V(interrupt_injected, CPUX86State, 9),
+        VMSTATE_UINT32_V(mp_state, CPUX86State, 9),
+        VMSTATE_UINT64_V(tsc, CPUX86State, 9),
+        VMSTATE_INT32_V(exception_injected, CPUX86State, 11),
+        VMSTATE_UINT8_V(soft_interrupt, CPUX86State, 11),
+        VMSTATE_UINT8_V(nmi_injected, CPUX86State, 11),
+        VMSTATE_UINT8_V(nmi_pending, CPUX86State, 11),
+        VMSTATE_UINT8_V(has_error_code, CPUX86State, 11),
+        VMSTATE_UINT32_V(sipi_vector, CPUX86State, 11),
         /* MCE */
-        VMSTATE_UINT64_V(mcg_cap, CPUState, 10),
-        VMSTATE_UINT64_V(mcg_status, CPUState, 10),
-        VMSTATE_UINT64_V(mcg_ctl, CPUState, 10),
-        VMSTATE_UINT64_ARRAY_V(mce_banks, CPUState, MCE_BANKS_DEF *4, 10),
+        VMSTATE_UINT64_V(mcg_cap, CPUX86State, 10),
+        VMSTATE_UINT64_V(mcg_status, CPUX86State, 10),
+        VMSTATE_UINT64_V(mcg_ctl, CPUX86State, 10),
+        VMSTATE_UINT64_ARRAY_V(mce_banks, CPUX86State, MCE_BANKS_DEF *4, 10),
         /* rdtscp */
-        VMSTATE_UINT64_V(tsc_aux, CPUState, 11),
+        VMSTATE_UINT64_V(tsc_aux, CPUX86State, 11),
         /* KVM pvclock msr */
-        VMSTATE_UINT64_V(system_time_msr, CPUState, 11),
-        VMSTATE_UINT64_V(wall_clock_msr, CPUState, 11),
+        VMSTATE_UINT64_V(system_time_msr, CPUX86State, 11),
+        VMSTATE_UINT64_V(wall_clock_msr, CPUX86State, 11),
         /* XSAVE related fields */
-        VMSTATE_UINT64_V(xcr0, CPUState, 12),
-        VMSTATE_UINT64_V(xstate_bv, CPUState, 12),
-        VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUState, CPU_NB_REGS, 12),
+        VMSTATE_UINT64_V(xcr0, CPUX86State, 12),
+        VMSTATE_UINT64_V(xstate_bv, CPUX86State, 12),
+        VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUX86State, CPU_NB_REGS, 12),
         VMSTATE_END_OF_LIST()
         /* The above list is not sorted /wrt version numbers, watch out! */
     },
diff --git a/target-i386/op_helper.c b/target-i386/op_helper.c
index 2aea71bf85..c04ae4464f 100644
--- a/target-i386/op_helper.c
+++ b/target-i386/op_helper.c
@@ -22,7 +22,6 @@
 #include "dyngen-exec.h"
 #include "host-utils.h"
 #include "ioport.h"
-#include "qemu-common.h"
 #include "qemu-log.h"
 #include "cpu-defs.h"
 #include "helper.h"
@@ -126,7 +125,7 @@ static inline void load_eflags(int eflags, int update_mask)
 
 /* load efer and update the corresponding hflags. XXX: do consistency
    checks with cpuid bits ? */
-static inline void cpu_load_efer(CPUState *env, uint64_t val)
+static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
 {
     env->efer = val;
     env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
@@ -1377,9 +1376,9 @@ static void do_interrupt_all(int intno, int is_int, int error_code,
 #endif
 }
 
-void do_interrupt(CPUState *env1)
+void do_interrupt(CPUX86State *env1)
 {
-    CPUState *saved_env;
+    CPUX86State *saved_env;
 
     saved_env = env;
     env = env1;
@@ -1407,9 +1406,9 @@ void do_interrupt(CPUState *env1)
     env = saved_env;
 }
 
-void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
+void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
 {
-    CPUState *saved_env;
+    CPUX86State *saved_env;
 
     saved_env = env;
     env = env1;
@@ -1493,7 +1492,7 @@ static void QEMU_NORETURN raise_exception_err(int exception_index,
     raise_interrupt(exception_index, 0, error_code, 0);
 }
 
-void raise_exception_err_env(CPUState *nenv, int exception_index,
+void raise_exception_err_env(CPUX86State *nenv, int exception_index,
                              int error_code)
 {
     env = nenv;
@@ -1505,7 +1504,7 @@ static void QEMU_NORETURN raise_exception(int exception_index)
     raise_interrupt(exception_index, 0, 0, 0);
 }
 
-void raise_exception_env(int exception_index, CPUState *nenv)
+void raise_exception_env(int exception_index, CPUX86State *nenv)
 {
     env = nenv;
     raise_exception(exception_index);
@@ -1514,7 +1513,7 @@ void raise_exception_env(int exception_index, CPUState *nenv)
 
 #if defined(CONFIG_USER_ONLY)
 
-void do_smm_enter(CPUState *env1)
+void do_smm_enter(CPUX86State *env1)
 {
 }
 
@@ -1530,12 +1529,12 @@ void helper_rsm(void)
 #define SMM_REVISION_ID 0x00020000
 #endif
 
-void do_smm_enter(CPUState *env1)
+void do_smm_enter(CPUX86State *env1)
 {
     target_ulong sm_state;
     SegmentCache *dt;
     int i, offset;
-    CPUState *saved_env;
+    CPUX86State *saved_env;
 
     saved_env = env;
     env = env1;
@@ -5003,7 +5002,7 @@ void helper_boundl(target_ulong a0, int v)
    NULL, it means that the function was called in C code (i.e. not
    from generated code or from helper.c) */
 /* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
               void *retaddr)
 {
     TranslationBlock *tb;
@@ -5067,7 +5066,7 @@ void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
 {
 }
 
-void svm_check_intercept(CPUState *env1, uint32_t type)
+void svm_check_intercept(CPUX86State *env1, uint32_t type)
 {
 }
 
@@ -5102,7 +5101,7 @@ static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
 }
 
 static inline void svm_load_seg_cache(target_phys_addr_t addr, 
-                                      CPUState *env, int seg_reg)
+                                      CPUX86State *env, int seg_reg)
 {
     SegmentCache sc1, *sc = &sc1;
     svm_load_seg(addr, sc);
@@ -5461,9 +5460,9 @@ void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
     }
 }
 
-void svm_check_intercept(CPUState *env1, uint32_t type)
+void svm_check_intercept(CPUX86State *env1, uint32_t type)
 {
-    CPUState *saved_env;
+    CPUX86State *saved_env;
 
     saved_env = env;
     env = env1;
@@ -5841,9 +5840,9 @@ uint32_t helper_cc_compute_all(int op)
     }
 }
 
-uint32_t cpu_cc_compute_all(CPUState *env1, int op)
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
 {
-    CPUState *saved_env;
+    CPUX86State *saved_env;
     uint32_t ret;
 
     saved_env = env;
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 860b4a325f..c1ede1a756 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -388,7 +388,7 @@ static inline void gen_op_addl_T0_T1(void)
 
 static inline void gen_op_jmp_T0(void)
 {
-    tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
+    tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
 }
 
 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
@@ -453,12 +453,12 @@ static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
 
 static inline void gen_op_movl_A0_seg(int reg)
 {
-    tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
+    tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
 }
 
 static inline void gen_op_addl_A0_seg(int reg)
 {
-    tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
+    tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
     tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
 #ifdef TARGET_X86_64
     tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
@@ -468,12 +468,12 @@ static inline void gen_op_addl_A0_seg(int reg)
 #ifdef TARGET_X86_64
 static inline void gen_op_movq_A0_seg(int reg)
 {
-    tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
+    tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
 }
 
 static inline void gen_op_addq_A0_seg(int reg)
 {
-    tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
+    tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
     tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
 }
 
@@ -583,7 +583,7 @@ static inline void gen_op_st_T1_A0(int idx)
 static inline void gen_jmp_im(target_ulong pc)
 {
     tcg_gen_movi_tl(cpu_tmp0, pc);
-    tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
+    tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
 }
 
 static inline void gen_string_movl_A0_ESI(DisasContext *s)
@@ -644,7 +644,7 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s)
 
 static inline void gen_op_movl_T0_Dshift(int ot) 
 {
-    tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
+    tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
     tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
 };
 
@@ -6466,11 +6466,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
         break;
     case 0xfc: /* cld */
         tcg_gen_movi_i32(cpu_tmp2_i32, 1);
-        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
+        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
         break;
     case 0xfd: /* std */
         tcg_gen_movi_i32(cpu_tmp2_i32, -1);
-        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
+        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
         break;
 
         /************************/
@@ -7645,64 +7645,64 @@ void optimize_flags_init(void)
 {
     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
     cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
-                                       offsetof(CPUState, cc_op), "cc_op");
-    cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
+                                       offsetof(CPUX86State, cc_op), "cc_op");
+    cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
                                     "cc_src");
-    cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
+    cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
                                     "cc_dst");
-    cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
+    cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_tmp),
                                     "cc_tmp");
 
 #ifdef TARGET_X86_64
     cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EAX]), "rax");
+                                             offsetof(CPUX86State, regs[R_EAX]), "rax");
     cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_ECX]), "rcx");
+                                             offsetof(CPUX86State, regs[R_ECX]), "rcx");
     cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EDX]), "rdx");
+                                             offsetof(CPUX86State, regs[R_EDX]), "rdx");
     cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EBX]), "rbx");
+                                             offsetof(CPUX86State, regs[R_EBX]), "rbx");
     cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_ESP]), "rsp");
+                                             offsetof(CPUX86State, regs[R_ESP]), "rsp");
     cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EBP]), "rbp");
+                                             offsetof(CPUX86State, regs[R_EBP]), "rbp");
     cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_ESI]), "rsi");
+                                             offsetof(CPUX86State, regs[R_ESI]), "rsi");
     cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EDI]), "rdi");
+                                             offsetof(CPUX86State, regs[R_EDI]), "rdi");
     cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
-                                         offsetof(CPUState, regs[8]), "r8");
+                                         offsetof(CPUX86State, regs[8]), "r8");
     cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
-                                          offsetof(CPUState, regs[9]), "r9");
+                                          offsetof(CPUX86State, regs[9]), "r9");
     cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
-                                          offsetof(CPUState, regs[10]), "r10");
+                                          offsetof(CPUX86State, regs[10]), "r10");
     cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
-                                          offsetof(CPUState, regs[11]), "r11");
+                                          offsetof(CPUX86State, regs[11]), "r11");
     cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
-                                          offsetof(CPUState, regs[12]), "r12");
+                                          offsetof(CPUX86State, regs[12]), "r12");
     cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
-                                          offsetof(CPUState, regs[13]), "r13");
+                                          offsetof(CPUX86State, regs[13]), "r13");
     cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
-                                          offsetof(CPUState, regs[14]), "r14");
+                                          offsetof(CPUX86State, regs[14]), "r14");
     cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
-                                          offsetof(CPUState, regs[15]), "r15");
+                                          offsetof(CPUX86State, regs[15]), "r15");
 #else
     cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EAX]), "eax");
+                                             offsetof(CPUX86State, regs[R_EAX]), "eax");
     cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_ECX]), "ecx");
+                                             offsetof(CPUX86State, regs[R_ECX]), "ecx");
     cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EDX]), "edx");
+                                             offsetof(CPUX86State, regs[R_EDX]), "edx");
     cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EBX]), "ebx");
+                                             offsetof(CPUX86State, regs[R_EBX]), "ebx");
     cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_ESP]), "esp");
+                                             offsetof(CPUX86State, regs[R_ESP]), "esp");
     cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EBP]), "ebp");
+                                             offsetof(CPUX86State, regs[R_EBP]), "ebp");
     cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_ESI]), "esi");
+                                             offsetof(CPUX86State, regs[R_ESI]), "esi");
     cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
-                                             offsetof(CPUState, regs[R_EDI]), "edi");
+                                             offsetof(CPUX86State, regs[R_EDI]), "edi");
 #endif
 
     /* register helpers */
@@ -7713,7 +7713,7 @@ void optimize_flags_init(void)
 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
    basic block 'tb'. If search_pc is TRUE, also generate PC
    information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(CPUState *env,
+static inline void gen_intermediate_code_internal(CPUX86State *env,
                                                   TranslationBlock *tb,
                                                   int search_pc)
 {
@@ -7890,17 +7890,17 @@ static inline void gen_intermediate_code_internal(CPUState *env,
     }
 }
 
-void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
 {
     gen_intermediate_code_internal(env, tb, 0);
 }
 
-void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
 {
     gen_intermediate_code_internal(env, tb, 1);
 }
 
-void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
 {
     int cc_op;
 #ifdef DEBUG_DISAS