summary refs log tree commit diff stats
path: root/target/i386
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386')
-rw-r--r--target/i386/Makefile.objs7
-rw-r--r--target/i386/TODO31
-rw-r--r--target/i386/arch_dump.c453
-rw-r--r--target/i386/arch_memory_mapping.c281
-rw-r--r--target/i386/bpt_helper.c328
-rw-r--r--target/i386/cc_helper.c385
-rw-r--r--target/i386/cc_helper_template.h242
-rw-r--r--target/i386/cpu-qom.h77
-rw-r--r--target/i386/cpu.c3755
-rw-r--r--target/i386/cpu.h1656
-rw-r--r--target/i386/excp_helper.c137
-rw-r--r--target/i386/fpu_helper.c1625
-rw-r--r--target/i386/gdbstub.c234
-rw-r--r--target/i386/helper.c1446
-rw-r--r--target/i386/helper.h230
-rw-r--r--target/i386/hyperv.c140
-rw-r--r--target/i386/hyperv.h42
-rw-r--r--target/i386/int_helper.c483
-rw-r--r--target/i386/kvm-stub.c42
-rw-r--r--target/i386/kvm.c3536
-rw-r--r--target/i386/kvm_i386.h48
-rw-r--r--target/i386/machine.c1047
-rw-r--r--target/i386/mem_helper.c215
-rw-r--r--target/i386/misc_helper.c639
-rw-r--r--target/i386/monitor.c513
-rw-r--r--target/i386/mpx_helper.c168
-rw-r--r--target/i386/ops_sse.h2296
-rw-r--r--target/i386/ops_sse_header.h360
-rw-r--r--target/i386/seg_helper.c2568
-rw-r--r--target/i386/shift_helper_template.h108
-rw-r--r--target/i386/smm_helper.c342
-rw-r--r--target/i386/svm.h222
-rw-r--r--target/i386/svm_helper.c774
-rw-r--r--target/i386/trace-events7
-rw-r--r--target/i386/translate.c8502
35 files changed, 32939 insertions, 0 deletions
diff --git a/target/i386/Makefile.objs b/target/i386/Makefile.objs
new file mode 100644
index 0000000000..b223d7932b
--- /dev/null
+++ b/target/i386/Makefile.objs
@@ -0,0 +1,7 @@
+obj-y += translate.o helper.o cpu.o bpt_helper.o
+obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
+obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o mpx_helper.o
+obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o
+obj-$(CONFIG_KVM) += kvm.o hyperv.o
+obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
diff --git a/target/i386/TODO b/target/i386/TODO
new file mode 100644
index 0000000000..a8d69cf87f
--- /dev/null
+++ b/target/i386/TODO
@@ -0,0 +1,31 @@
+Correctness issues:
+
+- some eflags manipulation incorrectly reset the bit 0x2.
+- SVM: test, cpu save/restore, SMM save/restore. 
+- x86_64: lcall/ljmp intel/amd differences ?
+- better code fetch (different exception handling + CS.limit support)
+- user/kernel PUSHL/POPL in helper.c
+- add missing cpuid tests
+- return UD exception if LOCK prefix incorrectly used
+- test ldt limit < 7 ?
+- fix some 16 bit sp push/pop overflow (pusha/popa, lcall lret)
+- full support of segment limit/rights 
+- full x87 exception support
+- improve x87 bit exactness (use bochs code ?)
+- DRx register support
+- CR0.AC emulation
+- SSE alignment checks
+
+Optimizations/Features:
+
+- add SVM nested paging support
+- add VMX support
+- add AVX support
+- add SSE5 support
+- fxsave/fxrstor AMD extensions
+- improve monitor/mwait support
+- faster EFLAGS update: consider SZAP, C, O can be updated separately
+  with a bit field in CC_OP and more state variables.
+- evaluate x87 stack pointer statically
+- find a way to avoid translating several time the same TB if CR0.TS
+  is set or not.
diff --git a/target/i386/arch_dump.c b/target/i386/arch_dump.c
new file mode 100644
index 0000000000..5a2e4be5d0
--- /dev/null
+++ b/target/i386/arch_dump.c
@@ -0,0 +1,453 @@
+/*
+ * i386 memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ *     Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/cpu-all.h"
+#include "sysemu/dump.h"
+#include "elf.h"
+#include "sysemu/memory_mapping.h"
+
+#ifdef TARGET_X86_64
+typedef struct {
+    target_ulong r15, r14, r13, r12, rbp, rbx, r11, r10;
+    target_ulong r9, r8, rax, rcx, rdx, rsi, rdi, orig_rax;
+    target_ulong rip, cs, eflags;
+    target_ulong rsp, ss;
+    target_ulong fs_base, gs_base;
+    target_ulong ds, es, fs, gs;
+} x86_64_user_regs_struct;
+
+typedef struct {
+    char pad1[32];
+    uint32_t pid;
+    char pad2[76];
+    x86_64_user_regs_struct regs;
+    char pad3[8];
+} x86_64_elf_prstatus;
+
+static int x86_64_write_elf64_note(WriteCoreDumpFunction f,
+                                   CPUX86State *env, int id,
+                                   void *opaque)
+{
+    x86_64_user_regs_struct regs;
+    Elf64_Nhdr *note;
+    char *buf;
+    int descsz, note_size, name_size = 5;
+    const char *name = "CORE";
+    int ret;
+
+    regs.r15 = env->regs[15];
+    regs.r14 = env->regs[14];
+    regs.r13 = env->regs[13];
+    regs.r12 = env->regs[12];
+    regs.r11 = env->regs[11];
+    regs.r10 = env->regs[10];
+    regs.r9  = env->regs[9];
+    regs.r8  = env->regs[8];
+    regs.rbp = env->regs[R_EBP];
+    regs.rsp = env->regs[R_ESP];
+    regs.rdi = env->regs[R_EDI];
+    regs.rsi = env->regs[R_ESI];
+    regs.rdx = env->regs[R_EDX];
+    regs.rcx = env->regs[R_ECX];
+    regs.rbx = env->regs[R_EBX];
+    regs.rax = env->regs[R_EAX];
+    regs.rip = env->eip;
+    regs.eflags = env->eflags;
+
+    regs.orig_rax = 0; /* FIXME */
+    regs.cs = env->segs[R_CS].selector;
+    regs.ss = env->segs[R_SS].selector;
+    regs.fs_base = env->segs[R_FS].base;
+    regs.gs_base = env->segs[R_GS].base;
+    regs.ds = env->segs[R_DS].selector;
+    regs.es = env->segs[R_ES].selector;
+    regs.fs = env->segs[R_FS].selector;
+    regs.gs = env->segs[R_GS].selector;
+
+    descsz = sizeof(x86_64_elf_prstatus);
+    note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
+                (descsz + 3) / 4) * 4;
+    note = g_malloc0(note_size);
+    note->n_namesz = cpu_to_le32(name_size);
+    note->n_descsz = cpu_to_le32(descsz);
+    note->n_type = cpu_to_le32(NT_PRSTATUS);
+    buf = (char *)note;
+    buf += ((sizeof(Elf64_Nhdr) + 3) / 4) * 4;
+    memcpy(buf, name, name_size);
+    buf += ((name_size + 3) / 4) * 4;
+    memcpy(buf + 32, &id, 4); /* pr_pid */
+    buf += descsz - sizeof(x86_64_user_regs_struct)-sizeof(target_ulong);
+    memcpy(buf, &regs, sizeof(x86_64_user_regs_struct));
+
+    ret = f(note, note_size, opaque);
+    g_free(note);
+    if (ret < 0) {
+        return -1;
+    }
+
+    return 0;
+}
+#endif
+
+typedef struct {
+    uint32_t ebx, ecx, edx, esi, edi, ebp, eax;
+    unsigned short ds, __ds, es, __es;
+    unsigned short fs, __fs, gs, __gs;
+    uint32_t orig_eax, eip;
+    unsigned short cs, __cs;
+    uint32_t eflags, esp;
+    unsigned short ss, __ss;
+} x86_user_regs_struct;
+
+typedef struct {
+    char pad1[24];
+    uint32_t pid;
+    char pad2[44];
+    x86_user_regs_struct regs;
+    char pad3[4];
+} x86_elf_prstatus;
+
+static void x86_fill_elf_prstatus(x86_elf_prstatus *prstatus, CPUX86State *env,
+                                  int id)
+{
+    memset(prstatus, 0, sizeof(x86_elf_prstatus));
+    prstatus->regs.ebp = env->regs[R_EBP] & 0xffffffff;
+    prstatus->regs.esp = env->regs[R_ESP] & 0xffffffff;
+    prstatus->regs.edi = env->regs[R_EDI] & 0xffffffff;
+    prstatus->regs.esi = env->regs[R_ESI] & 0xffffffff;
+    prstatus->regs.edx = env->regs[R_EDX] & 0xffffffff;
+    prstatus->regs.ecx = env->regs[R_ECX] & 0xffffffff;
+    prstatus->regs.ebx = env->regs[R_EBX] & 0xffffffff;
+    prstatus->regs.eax = env->regs[R_EAX] & 0xffffffff;
+    prstatus->regs.eip = env->eip & 0xffffffff;
+    prstatus->regs.eflags = env->eflags & 0xffffffff;
+
+    prstatus->regs.cs = env->segs[R_CS].selector;
+    prstatus->regs.ss = env->segs[R_SS].selector;
+    prstatus->regs.ds = env->segs[R_DS].selector;
+    prstatus->regs.es = env->segs[R_ES].selector;
+    prstatus->regs.fs = env->segs[R_FS].selector;
+    prstatus->regs.gs = env->segs[R_GS].selector;
+
+    prstatus->pid = id;
+}
+
+static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env,
+                                int id, void *opaque)
+{
+    x86_elf_prstatus prstatus;
+    Elf64_Nhdr *note;
+    char *buf;
+    int descsz, note_size, name_size = 5;
+    const char *name = "CORE";
+    int ret;
+
+    x86_fill_elf_prstatus(&prstatus, env, id);
+    descsz = sizeof(x86_elf_prstatus);
+    note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
+                (descsz + 3) / 4) * 4;
+    note = g_malloc0(note_size);
+    note->n_namesz = cpu_to_le32(name_size);
+    note->n_descsz = cpu_to_le32(descsz);
+    note->n_type = cpu_to_le32(NT_PRSTATUS);
+    buf = (char *)note;
+    buf += ((sizeof(Elf64_Nhdr) + 3) / 4) * 4;
+    memcpy(buf, name, name_size);
+    buf += ((name_size + 3) / 4) * 4;
+    memcpy(buf, &prstatus, sizeof(prstatus));
+
+    ret = f(note, note_size, opaque);
+    g_free(note);
+    if (ret < 0) {
+        return -1;
+    }
+
+    return 0;
+}
+
+int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+                             int cpuid, void *opaque)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    int ret;
+#ifdef TARGET_X86_64
+    X86CPU *first_x86_cpu = X86_CPU(first_cpu);
+    bool lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK);
+
+    if (lma) {
+        ret = x86_64_write_elf64_note(f, &cpu->env, cpuid, opaque);
+    } else {
+#endif
+        ret = x86_write_elf64_note(f, &cpu->env, cpuid, opaque);
+#ifdef TARGET_X86_64
+    }
+#endif
+
+    return ret;
+}
+
+int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+                             int cpuid, void *opaque)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    x86_elf_prstatus prstatus;
+    Elf32_Nhdr *note;
+    char *buf;
+    int descsz, note_size, name_size = 5;
+    const char *name = "CORE";
+    int ret;
+
+    x86_fill_elf_prstatus(&prstatus, &cpu->env, cpuid);
+    descsz = sizeof(x86_elf_prstatus);
+    note_size = ((sizeof(Elf32_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
+                (descsz + 3) / 4) * 4;
+    note = g_malloc0(note_size);
+    note->n_namesz = cpu_to_le32(name_size);
+    note->n_descsz = cpu_to_le32(descsz);
+    note->n_type = cpu_to_le32(NT_PRSTATUS);
+    buf = (char *)note;
+    buf += ((sizeof(Elf32_Nhdr) + 3) / 4) * 4;
+    memcpy(buf, name, name_size);
+    buf += ((name_size + 3) / 4) * 4;
+    memcpy(buf, &prstatus, sizeof(prstatus));
+
+    ret = f(note, note_size, opaque);
+    g_free(note);
+    if (ret < 0) {
+        return -1;
+    }
+
+    return 0;
+}
+
+/*
+ * please count up QEMUCPUSTATE_VERSION if you have changed definition of
+ * QEMUCPUState, and modify the tools using this information accordingly.
+ */
+#define QEMUCPUSTATE_VERSION (1)
+
+struct QEMUCPUSegment {
+    uint32_t selector;
+    uint32_t limit;
+    uint32_t flags;
+    uint32_t pad;
+    uint64_t base;
+};
+
+typedef struct QEMUCPUSegment QEMUCPUSegment;
+
+struct QEMUCPUState {
+    uint32_t version;
+    uint32_t size;
+    uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp;
+    uint64_t r8, r9, r10, r11, r12, r13, r14, r15;
+    uint64_t rip, rflags;
+    QEMUCPUSegment cs, ds, es, fs, gs, ss;
+    QEMUCPUSegment ldt, tr, gdt, idt;
+    uint64_t cr[5];
+};
+
+typedef struct QEMUCPUState QEMUCPUState;
+
+static void copy_segment(QEMUCPUSegment *d, SegmentCache *s)
+{
+    d->pad = 0;
+    d->selector = s->selector;
+    d->limit = s->limit;
+    d->flags = s->flags;
+    d->base = s->base;
+}
+
+static void qemu_get_cpustate(QEMUCPUState *s, CPUX86State *env)
+{
+    memset(s, 0, sizeof(QEMUCPUState));
+
+    s->version = QEMUCPUSTATE_VERSION;
+    s->size = sizeof(QEMUCPUState);
+
+    s->rax = env->regs[R_EAX];
+    s->rbx = env->regs[R_EBX];
+    s->rcx = env->regs[R_ECX];
+    s->rdx = env->regs[R_EDX];
+    s->rsi = env->regs[R_ESI];
+    s->rdi = env->regs[R_EDI];
+    s->rsp = env->regs[R_ESP];
+    s->rbp = env->regs[R_EBP];
+#ifdef TARGET_X86_64
+    s->r8  = env->regs[8];
+    s->r9  = env->regs[9];
+    s->r10 = env->regs[10];
+    s->r11 = env->regs[11];
+    s->r12 = env->regs[12];
+    s->r13 = env->regs[13];
+    s->r14 = env->regs[14];
+    s->r15 = env->regs[15];
+#endif
+    s->rip = env->eip;
+    s->rflags = env->eflags;
+
+    copy_segment(&s->cs, &env->segs[R_CS]);
+    copy_segment(&s->ds, &env->segs[R_DS]);
+    copy_segment(&s->es, &env->segs[R_ES]);
+    copy_segment(&s->fs, &env->segs[R_FS]);
+    copy_segment(&s->gs, &env->segs[R_GS]);
+    copy_segment(&s->ss, &env->segs[R_SS]);
+    copy_segment(&s->ldt, &env->ldt);
+    copy_segment(&s->tr, &env->tr);
+    copy_segment(&s->gdt, &env->gdt);
+    copy_segment(&s->idt, &env->idt);
+
+    s->cr[0] = env->cr[0];
+    s->cr[1] = env->cr[1];
+    s->cr[2] = env->cr[2];
+    s->cr[3] = env->cr[3];
+    s->cr[4] = env->cr[4];
+}
+
+static inline int cpu_write_qemu_note(WriteCoreDumpFunction f,
+                                      CPUX86State *env,
+                                      void *opaque,
+                                      int type)
+{
+    QEMUCPUState state;
+    Elf64_Nhdr *note64;
+    Elf32_Nhdr *note32;
+    void *note;
+    char *buf;
+    int descsz, note_size, name_size = 5, note_head_size;
+    const char *name = "QEMU";
+    int ret;
+
+    qemu_get_cpustate(&state, env);
+
+    descsz = sizeof(state);
+    if (type == 0) {
+        note_head_size = sizeof(Elf32_Nhdr);
+    } else {
+        note_head_size = sizeof(Elf64_Nhdr);
+    }
+    note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
+                (descsz + 3) / 4) * 4;
+    note = g_malloc0(note_size);
+    if (type == 0) {
+        note32 = note;
+        note32->n_namesz = cpu_to_le32(name_size);
+        note32->n_descsz = cpu_to_le32(descsz);
+        note32->n_type = 0;
+    } else {
+        note64 = note;
+        note64->n_namesz = cpu_to_le32(name_size);
+        note64->n_descsz = cpu_to_le32(descsz);
+        note64->n_type = 0;
+    }
+    buf = note;
+    buf += ((note_head_size + 3) / 4) * 4;
+    memcpy(buf, name, name_size);
+    buf += ((name_size + 3) / 4) * 4;
+    memcpy(buf, &state, sizeof(state));
+
+    ret = f(note, note_size, opaque);
+    g_free(note);
+    if (ret < 0) {
+        return -1;
+    }
+
+    return 0;
+}
+
+int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cs,
+                                 void *opaque)
+{
+    X86CPU *cpu = X86_CPU(cs);
+
+    return cpu_write_qemu_note(f, &cpu->env, opaque, 1);
+}
+
+int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cs,
+                                 void *opaque)
+{
+    X86CPU *cpu = X86_CPU(cs);
+
+    return cpu_write_qemu_note(f, &cpu->env, opaque, 0);
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+                      const GuestPhysBlockList *guest_phys_blocks)
+{
+    bool lma = false;
+    GuestPhysBlock *block;
+
+#ifdef TARGET_X86_64
+    X86CPU *first_x86_cpu = X86_CPU(first_cpu);
+
+    lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK);
+#endif
+
+    if (lma) {
+        info->d_machine = EM_X86_64;
+    } else {
+        info->d_machine = EM_386;
+    }
+    info->d_endian = ELFDATA2LSB;
+
+    if (lma) {
+        info->d_class = ELFCLASS64;
+    } else {
+        info->d_class = ELFCLASS32;
+
+        QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+            if (block->target_end > UINT_MAX) {
+                /* The memory size is greater than 4G */
+                info->d_class = ELFCLASS64;
+                break;
+            }
+        }
+    }
+
+    return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+    int name_size = 5; /* "CORE" or "QEMU" */
+    size_t elf_note_size = 0;
+    size_t qemu_note_size = 0;
+    int elf_desc_size = 0;
+    int qemu_desc_size = 0;
+    int note_head_size;
+
+    if (class == ELFCLASS32) {
+        note_head_size = sizeof(Elf32_Nhdr);
+    } else {
+        note_head_size = sizeof(Elf64_Nhdr);
+    }
+
+    if (machine == EM_386) {
+        elf_desc_size = sizeof(x86_elf_prstatus);
+    }
+#ifdef TARGET_X86_64
+    else {
+        elf_desc_size = sizeof(x86_64_elf_prstatus);
+    }
+#endif
+    qemu_desc_size = sizeof(QEMUCPUState);
+
+    elf_note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
+                     (elf_desc_size + 3) / 4) * 4;
+    qemu_note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
+                      (qemu_desc_size + 3) / 4) * 4;
+
+    return (elf_note_size + qemu_note_size) * nr_cpus;
+}
diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c
new file mode 100644
index 0000000000..88f341e1bb
--- /dev/null
+++ b/target/i386/arch_memory_mapping.c
@@ -0,0 +1,281 @@
+/*
+ * i386 memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ *     Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/cpu-all.h"
+#include "sysemu/memory_mapping.h"
+
+/* PAE Paging or IA-32e Paging */
+static void walk_pte(MemoryMappingList *list, AddressSpace *as,
+                     hwaddr pte_start_addr,
+                     int32_t a20_mask, target_ulong start_line_addr)
+{
+    hwaddr pte_addr, start_paddr;
+    uint64_t pte;
+    target_ulong start_vaddr;
+    int i;
+
+    for (i = 0; i < 512; i++) {
+        pte_addr = (pte_start_addr + i * 8) & a20_mask;
+        pte = address_space_ldq(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+        if (!(pte & PG_PRESENT_MASK)) {
+            /* not present */
+            continue;
+        }
+
+        start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
+        if (cpu_physical_memory_is_io(start_paddr)) {
+            /* I/O region */
+            continue;
+        }
+
+        start_vaddr = start_line_addr | ((i & 0x1ff) << 12);
+        memory_mapping_list_add_merge_sorted(list, start_paddr,
+                                             start_vaddr, 1 << 12);
+    }
+}
+
+/* 32-bit Paging */
+static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
+                      hwaddr pte_start_addr, int32_t a20_mask,
+                      target_ulong start_line_addr)
+{
+    hwaddr pte_addr, start_paddr;
+    uint32_t pte;
+    target_ulong start_vaddr;
+    int i;
+
+    for (i = 0; i < 1024; i++) {
+        pte_addr = (pte_start_addr + i * 4) & a20_mask;
+        pte = address_space_ldl(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+        if (!(pte & PG_PRESENT_MASK)) {
+            /* not present */
+            continue;
+        }
+
+        start_paddr = pte & ~0xfff;
+        if (cpu_physical_memory_is_io(start_paddr)) {
+            /* I/O region */
+            continue;
+        }
+
+        start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
+        memory_mapping_list_add_merge_sorted(list, start_paddr,
+                                             start_vaddr, 1 << 12);
+    }
+}
+
+/* PAE Paging or IA-32e Paging */
+#define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */
+
+static void walk_pde(MemoryMappingList *list, AddressSpace *as,
+                     hwaddr pde_start_addr,
+                     int32_t a20_mask, target_ulong start_line_addr)
+{
+    hwaddr pde_addr, pte_start_addr, start_paddr;
+    uint64_t pde;
+    target_ulong line_addr, start_vaddr;
+    int i;
+
+    for (i = 0; i < 512; i++) {
+        pde_addr = (pde_start_addr + i * 8) & a20_mask;
+        pde = address_space_ldq(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+        if (!(pde & PG_PRESENT_MASK)) {
+            /* not present */
+            continue;
+        }
+
+        line_addr = start_line_addr | ((i & 0x1ff) << 21);
+        if (pde & PG_PSE_MASK) {
+            /* 2 MB page */
+            start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
+            if (cpu_physical_memory_is_io(start_paddr)) {
+                /* I/O region */
+                continue;
+            }
+            start_vaddr = line_addr;
+            memory_mapping_list_add_merge_sorted(list, start_paddr,
+                                                 start_vaddr, 1 << 21);
+            continue;
+        }
+
+        pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask;
+        walk_pte(list, as, pte_start_addr, a20_mask, line_addr);
+    }
+}
+
+/* 32-bit Paging */
+static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
+                      hwaddr pde_start_addr, int32_t a20_mask,
+                      bool pse)
+{
+    hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr;
+    uint32_t pde;
+    target_ulong line_addr, start_vaddr;
+    int i;
+
+    for (i = 0; i < 1024; i++) {
+        pde_addr = (pde_start_addr + i * 4) & a20_mask;
+        pde = address_space_ldl(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+        if (!(pde & PG_PRESENT_MASK)) {
+            /* not present */
+            continue;
+        }
+
+        line_addr = (((unsigned int)i & 0x3ff) << 22);
+        if ((pde & PG_PSE_MASK) && pse) {
+            /*
+             * 4 MB page:
+             * bits 39:32 are bits 20:13 of the PDE
+             * bit3 31:22 are bits 31:22 of the PDE
+             */
+            high_paddr = ((hwaddr)(pde & 0x1fe000) << 19);
+            start_paddr = (pde & ~0x3fffff) | high_paddr;
+            if (cpu_physical_memory_is_io(start_paddr)) {
+                /* I/O region */
+                continue;
+            }
+            start_vaddr = line_addr;
+            memory_mapping_list_add_merge_sorted(list, start_paddr,
+                                                 start_vaddr, 1 << 22);
+            continue;
+        }
+
+        pte_start_addr = (pde & ~0xfff) & a20_mask;
+        walk_pte2(list, as, pte_start_addr, a20_mask, line_addr);
+    }
+}
+
+/* PAE Paging */
+static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
+                       hwaddr pdpe_start_addr, int32_t a20_mask)
+{
+    hwaddr pdpe_addr, pde_start_addr;
+    uint64_t pdpe;
+    target_ulong line_addr;
+    int i;
+
+    for (i = 0; i < 4; i++) {
+        pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
+        pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+        if (!(pdpe & PG_PRESENT_MASK)) {
+            /* not present */
+            continue;
+        }
+
+        line_addr = (((unsigned int)i & 0x3) << 30);
+        pde_start_addr = (pdpe & ~0xfff) & a20_mask;
+        walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
+    }
+}
+
+#ifdef TARGET_X86_64
+/* IA-32e Paging */
+static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
+                      hwaddr pdpe_start_addr, int32_t a20_mask,
+                      target_ulong start_line_addr)
+{
+    hwaddr pdpe_addr, pde_start_addr, start_paddr;
+    uint64_t pdpe;
+    target_ulong line_addr, start_vaddr;
+    int i;
+
+    for (i = 0; i < 512; i++) {
+        pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
+        pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+        if (!(pdpe & PG_PRESENT_MASK)) {
+            /* not present */
+            continue;
+        }
+
+        line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
+        if (pdpe & PG_PSE_MASK) {
+            /* 1 GB page */
+            start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
+            if (cpu_physical_memory_is_io(start_paddr)) {
+                /* I/O region */
+                continue;
+            }
+            start_vaddr = line_addr;
+            memory_mapping_list_add_merge_sorted(list, start_paddr,
+                                                 start_vaddr, 1 << 30);
+            continue;
+        }
+
+        pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask;
+        walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
+    }
+}
+
+/* IA-32e Paging */
+static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
+                       hwaddr pml4e_start_addr, int32_t a20_mask)
+{
+    hwaddr pml4e_addr, pdpe_start_addr;
+    uint64_t pml4e;
+    target_ulong line_addr;
+    int i;
+
+    for (i = 0; i < 512; i++) {
+        pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
+        pml4e = address_space_ldq(as, pml4e_addr, MEMTXATTRS_UNSPECIFIED,
+                                  NULL);
+        if (!(pml4e & PG_PRESENT_MASK)) {
+            /* not present */
+            continue;
+        }
+
+        line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
+        pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask;
+        walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr);
+    }
+}
+#endif
+
+void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
+                                Error **errp)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    if (!cpu_paging_enabled(cs)) {
+        /* paging is disabled */
+        return;
+    }
+
+    if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            hwaddr pml4e_addr;
+
+            pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
+            walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask);
+        } else
+#endif
+        {
+            hwaddr pdpe_addr;
+
+            pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
+            walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask);
+        }
+    } else {
+        hwaddr pde_addr;
+        bool pse;
+
+        pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
+        pse = !!(env->cr[4] & CR4_PSE_MASK);
+        walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse);
+    }
+}
+
diff --git a/target/i386/bpt_helper.c b/target/i386/bpt_helper.c
new file mode 100644
index 0000000000..6fd7fe04a0
--- /dev/null
+++ b/target/i386/bpt_helper.c
@@ -0,0 +1,328 @@
+/*
+ *  i386 breakpoint helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+
+#ifndef CONFIG_USER_ONLY
+static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
+{
+    return (dr7 >> (index * 2)) & 1;
+}
+
+static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
+{
+    return (dr7 >> (index * 2)) & 2;
+
+}
+static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
+{
+    return hw_global_breakpoint_enabled(dr7, index) ||
+           hw_local_breakpoint_enabled(dr7, index);
+}
+
+static inline int hw_breakpoint_type(unsigned long dr7, int index)
+{
+    return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
+}
+
+static inline int hw_breakpoint_len(unsigned long dr7, int index)
+{
+    int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
+    return (len == 2) ? 8 : len + 1;
+}
+
+static int hw_breakpoint_insert(CPUX86State *env, int index)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+    target_ulong dr7 = env->dr[7];
+    target_ulong drN = env->dr[index];
+    int err = 0;
+
+    switch (hw_breakpoint_type(dr7, index)) {
+    case DR7_TYPE_BP_INST:
+        if (hw_breakpoint_enabled(dr7, index)) {
+            err = cpu_breakpoint_insert(cs, drN, BP_CPU,
+                                        &env->cpu_breakpoint[index]);
+        }
+        break;
+
+    case DR7_TYPE_IO_RW:
+        /* Notice when we should enable calls to bpt_io.  */
+        return hw_breakpoint_enabled(env->dr[7], index)
+               ? HF_IOBPT_MASK : 0;
+
+    case DR7_TYPE_DATA_WR:
+        if (hw_breakpoint_enabled(dr7, index)) {
+            err = cpu_watchpoint_insert(cs, drN,
+                                        hw_breakpoint_len(dr7, index),
+                                        BP_CPU | BP_MEM_WRITE,
+                                        &env->cpu_watchpoint[index]);
+        }
+        break;
+
+    case DR7_TYPE_DATA_RW:
+        if (hw_breakpoint_enabled(dr7, index)) {
+            err = cpu_watchpoint_insert(cs, drN,
+                                        hw_breakpoint_len(dr7, index),
+                                        BP_CPU | BP_MEM_ACCESS,
+                                        &env->cpu_watchpoint[index]);
+        }
+        break;
+    }
+    if (err) {
+        env->cpu_breakpoint[index] = NULL;
+    }
+    return 0;
+}
+
+static void hw_breakpoint_remove(CPUX86State *env, int index)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    switch (hw_breakpoint_type(env->dr[7], index)) {
+    case DR7_TYPE_BP_INST:
+        if (env->cpu_breakpoint[index]) {
+            cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
+            env->cpu_breakpoint[index] = NULL;
+        }
+        break;
+
+    case DR7_TYPE_DATA_WR:
+    case DR7_TYPE_DATA_RW:
+        if (env->cpu_breakpoint[index]) {
+            cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
+            env->cpu_breakpoint[index] = NULL;
+        }
+        break;
+
+    case DR7_TYPE_IO_RW:
+        /* HF_IOBPT_MASK cleared elsewhere.  */
+        break;
+    }
+}
+
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
+{
+    target_ulong old_dr7 = env->dr[7];
+    int iobpt = 0;
+    int i;
+
+    new_dr7 |= DR7_FIXED_1;
+
+    /* If nothing is changing except the global/local enable bits,
+       then we can make the change more efficient.  */
+    if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
+        /* Fold the global and local enable bits together into the
+           global fields, then xor to show which registers have
+           changed collective enable state.  */
+        int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
+
+        for (i = 0; i < DR7_MAX_BP; i++) {
+            if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
+                hw_breakpoint_remove(env, i);
+            }
+        }
+        env->dr[7] = new_dr7;
+        for (i = 0; i < DR7_MAX_BP; i++) {
+            if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
+                iobpt |= hw_breakpoint_insert(env, i);
+            } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
+                       && hw_breakpoint_enabled(new_dr7, i)) {
+                iobpt |= HF_IOBPT_MASK;
+            }
+        }
+    } else {
+        for (i = 0; i < DR7_MAX_BP; i++) {
+            hw_breakpoint_remove(env, i);
+        }
+        env->dr[7] = new_dr7;
+        for (i = 0; i < DR7_MAX_BP; i++) {
+            iobpt |= hw_breakpoint_insert(env, i);
+        }
+    }
+
+    env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
+}
+
+static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
+{
+    target_ulong dr6;
+    int reg;
+    bool hit_enabled = false;
+
+    dr6 = env->dr[6] & ~0xf;
+    for (reg = 0; reg < DR7_MAX_BP; reg++) {
+        bool bp_match = false;
+        bool wp_match = false;
+
+        switch (hw_breakpoint_type(env->dr[7], reg)) {
+        case DR7_TYPE_BP_INST:
+            if (env->dr[reg] == env->eip) {
+                bp_match = true;
+            }
+            break;
+        case DR7_TYPE_DATA_WR:
+        case DR7_TYPE_DATA_RW:
+            if (env->cpu_watchpoint[reg] &&
+                env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
+                wp_match = true;
+            }
+            break;
+        case DR7_TYPE_IO_RW:
+            break;
+        }
+        if (bp_match || wp_match) {
+            dr6 |= 1 << reg;
+            if (hw_breakpoint_enabled(env->dr[7], reg)) {
+                hit_enabled = true;
+            }
+        }
+    }
+
+    if (hit_enabled || force_dr6_update) {
+        env->dr[6] = dr6;
+    }
+
+    return hit_enabled;
+}
+
+void breakpoint_handler(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    CPUBreakpoint *bp;
+
+    if (cs->watchpoint_hit) {
+        if (cs->watchpoint_hit->flags & BP_CPU) {
+            cs->watchpoint_hit = NULL;
+            if (check_hw_breakpoints(env, false)) {
+                raise_exception(env, EXCP01_DB);
+            } else {
+                cpu_loop_exit_noexc(cs);
+            }
+        }
+    } else {
+        QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+            if (bp->pc == env->eip) {
+                if (bp->flags & BP_CPU) {
+                    check_hw_breakpoints(env, true);
+                    raise_exception(env, EXCP01_DB);
+                }
+                break;
+            }
+        }
+    }
+}
+#endif
+
+void helper_single_step(CPUX86State *env)
+{
+#ifndef CONFIG_USER_ONLY
+    check_hw_breakpoints(env, true);
+    env->dr[6] |= DR6_BS;
+#endif
+    raise_exception(env, EXCP01_DB);
+}
+
+void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
+{
+#ifndef CONFIG_USER_ONLY
+    switch (reg) {
+    case 0: case 1: case 2: case 3:
+        if (hw_breakpoint_enabled(env->dr[7], reg)
+            && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
+            hw_breakpoint_remove(env, reg);
+            env->dr[reg] = t0;
+            hw_breakpoint_insert(env, reg);
+        } else {
+            env->dr[reg] = t0;
+        }
+        return;
+    case 4:
+        if (env->cr[4] & CR4_DE_MASK) {
+            break;
+        }
+        /* fallthru */
+    case 6:
+        env->dr[6] = t0 | DR6_FIXED_1;
+        return;
+    case 5:
+        if (env->cr[4] & CR4_DE_MASK) {
+            break;
+        }
+        /* fallthru */
+    case 7:
+        cpu_x86_update_dr7(env, t0);
+        return;
+    }
+    raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+#endif
+}
+
+target_ulong helper_get_dr(CPUX86State *env, int reg)
+{
+    switch (reg) {
+    case 0: case 1: case 2: case 3: case 6: case 7:
+        return env->dr[reg];
+    case 4:
+        if (env->cr[4] & CR4_DE_MASK) {
+            break;
+        } else {
+            return env->dr[6];
+        }
+    case 5:
+        if (env->cr[4] & CR4_DE_MASK) {
+            break;
+        } else {
+            return env->dr[7];
+        }
+    }
+    raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+}
+
+/* Check if Port I/O is trapped by a breakpoint.  */
+void helper_bpt_io(CPUX86State *env, uint32_t port,
+                   uint32_t size, target_ulong next_eip)
+{
+#ifndef CONFIG_USER_ONLY
+    target_ulong dr7 = env->dr[7];
+    int i, hit = 0;
+
+    for (i = 0; i < DR7_MAX_BP; ++i) {
+        if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
+            && hw_breakpoint_enabled(dr7, i)) {
+            int bpt_len = hw_breakpoint_len(dr7, i);
+            if (port + size - 1 >= env->dr[i]
+                && port <= env->dr[i] + bpt_len - 1) {
+                hit |= 1 << i;
+            }
+        }
+    }
+
+    if (hit) {
+        env->dr[6] = (env->dr[6] & ~0xf) | hit;
+        env->eip = next_eip;
+        raise_exception(env, EXCP01_DB);
+    }
+#endif
+}
diff --git a/target/i386/cc_helper.c b/target/i386/cc_helper.c
new file mode 100644
index 0000000000..83af223c9f
--- /dev/null
+++ b/target/i386/cc_helper.c
@@ -0,0 +1,385 @@
+/*
+ *  x86 condition code helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+const uint8_t parity_table[256] = {
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+};
+
+#define SHIFT 0
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#ifdef TARGET_X86_64
+
+#define SHIFT 3
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#endif
+
+static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
+                                     target_ulong src2)
+{
+    return (src1 & ~CC_C) | (dst * CC_C);
+}
+
+static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
+                                     target_ulong src2)
+{
+    return (src1 & ~CC_O) | (src2 * CC_O);
+}
+
+static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
+                                      target_ulong src2)
+{
+    return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
+}
+
+target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
+                                   target_ulong src2, int op)
+{
+    switch (op) {
+    default: /* should never happen */
+        return 0;
+
+    case CC_OP_EFLAGS:
+        return src1;
+    case CC_OP_CLR:
+        return CC_Z | CC_P;
+
+    case CC_OP_MULB:
+        return compute_all_mulb(dst, src1);
+    case CC_OP_MULW:
+        return compute_all_mulw(dst, src1);
+    case CC_OP_MULL:
+        return compute_all_mull(dst, src1);
+
+    case CC_OP_ADDB:
+        return compute_all_addb(dst, src1);
+    case CC_OP_ADDW:
+        return compute_all_addw(dst, src1);
+    case CC_OP_ADDL:
+        return compute_all_addl(dst, src1);
+
+    case CC_OP_ADCB:
+        return compute_all_adcb(dst, src1, src2);
+    case CC_OP_ADCW:
+        return compute_all_adcw(dst, src1, src2);
+    case CC_OP_ADCL:
+        return compute_all_adcl(dst, src1, src2);
+
+    case CC_OP_SUBB:
+        return compute_all_subb(dst, src1);
+    case CC_OP_SUBW:
+        return compute_all_subw(dst, src1);
+    case CC_OP_SUBL:
+        return compute_all_subl(dst, src1);
+
+    case CC_OP_SBBB:
+        return compute_all_sbbb(dst, src1, src2);
+    case CC_OP_SBBW:
+        return compute_all_sbbw(dst, src1, src2);
+    case CC_OP_SBBL:
+        return compute_all_sbbl(dst, src1, src2);
+
+    case CC_OP_LOGICB:
+        return compute_all_logicb(dst, src1);
+    case CC_OP_LOGICW:
+        return compute_all_logicw(dst, src1);
+    case CC_OP_LOGICL:
+        return compute_all_logicl(dst, src1);
+
+    case CC_OP_INCB:
+        return compute_all_incb(dst, src1);
+    case CC_OP_INCW:
+        return compute_all_incw(dst, src1);
+    case CC_OP_INCL:
+        return compute_all_incl(dst, src1);
+
+    case CC_OP_DECB:
+        return compute_all_decb(dst, src1);
+    case CC_OP_DECW:
+        return compute_all_decw(dst, src1);
+    case CC_OP_DECL:
+        return compute_all_decl(dst, src1);
+
+    case CC_OP_SHLB:
+        return compute_all_shlb(dst, src1);
+    case CC_OP_SHLW:
+        return compute_all_shlw(dst, src1);
+    case CC_OP_SHLL:
+        return compute_all_shll(dst, src1);
+
+    case CC_OP_SARB:
+        return compute_all_sarb(dst, src1);
+    case CC_OP_SARW:
+        return compute_all_sarw(dst, src1);
+    case CC_OP_SARL:
+        return compute_all_sarl(dst, src1);
+
+    case CC_OP_BMILGB:
+        return compute_all_bmilgb(dst, src1);
+    case CC_OP_BMILGW:
+        return compute_all_bmilgw(dst, src1);
+    case CC_OP_BMILGL:
+        return compute_all_bmilgl(dst, src1);
+
+    case CC_OP_ADCX:
+        return compute_all_adcx(dst, src1, src2);
+    case CC_OP_ADOX:
+        return compute_all_adox(dst, src1, src2);
+    case CC_OP_ADCOX:
+        return compute_all_adcox(dst, src1, src2);
+
+#ifdef TARGET_X86_64
+    case CC_OP_MULQ:
+        return compute_all_mulq(dst, src1);
+    case CC_OP_ADDQ:
+        return compute_all_addq(dst, src1);
+    case CC_OP_ADCQ:
+        return compute_all_adcq(dst, src1, src2);
+    case CC_OP_SUBQ:
+        return compute_all_subq(dst, src1);
+    case CC_OP_SBBQ:
+        return compute_all_sbbq(dst, src1, src2);
+    case CC_OP_LOGICQ:
+        return compute_all_logicq(dst, src1);
+    case CC_OP_INCQ:
+        return compute_all_incq(dst, src1);
+    case CC_OP_DECQ:
+        return compute_all_decq(dst, src1);
+    case CC_OP_SHLQ:
+        return compute_all_shlq(dst, src1);
+    case CC_OP_SARQ:
+        return compute_all_sarq(dst, src1);
+    case CC_OP_BMILGQ:
+        return compute_all_bmilgq(dst, src1);
+#endif
+    }
+}
+
+uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
+{
+    return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
+}
+
+target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
+                                 target_ulong src2, int op)
+{
+    switch (op) {
+    default: /* should never happen */
+    case CC_OP_LOGICB:
+    case CC_OP_LOGICW:
+    case CC_OP_LOGICL:
+    case CC_OP_LOGICQ:
+    case CC_OP_CLR:
+        return 0;
+
+    case CC_OP_EFLAGS:
+    case CC_OP_SARB:
+    case CC_OP_SARW:
+    case CC_OP_SARL:
+    case CC_OP_SARQ:
+    case CC_OP_ADOX:
+        return src1 & 1;
+
+    case CC_OP_INCB:
+    case CC_OP_INCW:
+    case CC_OP_INCL:
+    case CC_OP_INCQ:
+    case CC_OP_DECB:
+    case CC_OP_DECW:
+    case CC_OP_DECL:
+    case CC_OP_DECQ:
+        return src1;
+
+    case CC_OP_MULB:
+    case CC_OP_MULW:
+    case CC_OP_MULL:
+    case CC_OP_MULQ:
+        return src1 != 0;
+
+    case CC_OP_ADCX:
+    case CC_OP_ADCOX:
+        return dst;
+
+    case CC_OP_ADDB:
+        return compute_c_addb(dst, src1);
+    case CC_OP_ADDW:
+        return compute_c_addw(dst, src1);
+    case CC_OP_ADDL:
+        return compute_c_addl(dst, src1);
+
+    case CC_OP_ADCB:
+        return compute_c_adcb(dst, src1, src2);
+    case CC_OP_ADCW:
+        return compute_c_adcw(dst, src1, src2);
+    case CC_OP_ADCL:
+        return compute_c_adcl(dst, src1, src2);
+
+    case CC_OP_SUBB:
+        return compute_c_subb(dst, src1);
+    case CC_OP_SUBW:
+        return compute_c_subw(dst, src1);
+    case CC_OP_SUBL:
+        return compute_c_subl(dst, src1);
+
+    case CC_OP_SBBB:
+        return compute_c_sbbb(dst, src1, src2);
+    case CC_OP_SBBW:
+        return compute_c_sbbw(dst, src1, src2);
+    case CC_OP_SBBL:
+        return compute_c_sbbl(dst, src1, src2);
+
+    case CC_OP_SHLB:
+        return compute_c_shlb(dst, src1);
+    case CC_OP_SHLW:
+        return compute_c_shlw(dst, src1);
+    case CC_OP_SHLL:
+        return compute_c_shll(dst, src1);
+
+    case CC_OP_BMILGB:
+        return compute_c_bmilgb(dst, src1);
+    case CC_OP_BMILGW:
+        return compute_c_bmilgw(dst, src1);
+    case CC_OP_BMILGL:
+        return compute_c_bmilgl(dst, src1);
+
+#ifdef TARGET_X86_64
+    case CC_OP_ADDQ:
+        return compute_c_addq(dst, src1);
+    case CC_OP_ADCQ:
+        return compute_c_adcq(dst, src1, src2);
+    case CC_OP_SUBQ:
+        return compute_c_subq(dst, src1);
+    case CC_OP_SBBQ:
+        return compute_c_sbbq(dst, src1, src2);
+    case CC_OP_SHLQ:
+        return compute_c_shlq(dst, src1);
+    case CC_OP_BMILGQ:
+        return compute_c_bmilgq(dst, src1);
+#endif
+    }
+}
+
+void helper_write_eflags(CPUX86State *env, target_ulong t0,
+                         uint32_t update_mask)
+{
+    cpu_load_eflags(env, t0, update_mask);
+}
+
+target_ulong helper_read_eflags(CPUX86State *env)
+{
+    uint32_t eflags;
+
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    eflags |= (env->df & DF_MASK);
+    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
+    return eflags;
+}
+
+void helper_clts(CPUX86State *env)
+{
+    env->cr[0] &= ~CR0_TS_MASK;
+    env->hflags &= ~HF_TS_MASK;
+}
+
+void helper_reset_rf(CPUX86State *env)
+{
+    env->eflags &= ~RF_MASK;
+}
+
+void helper_cli(CPUX86State *env)
+{
+    env->eflags &= ~IF_MASK;
+}
+
+void helper_sti(CPUX86State *env)
+{
+    env->eflags |= IF_MASK;
+}
+
+void helper_clac(CPUX86State *env)
+{
+    env->eflags &= ~AC_MASK;
+}
+
+void helper_stac(CPUX86State *env)
+{
+    env->eflags |= AC_MASK;
+}
+
+#if 0
+/* vm86plus instructions */
+void helper_cli_vm(CPUX86State *env)
+{
+    env->eflags &= ~VIF_MASK;
+}
+
+void helper_sti_vm(CPUX86State *env)
+{
+    env->eflags |= VIF_MASK;
+    if (env->eflags & VIP_MASK) {
+        raise_exception_ra(env, EXCP0D_GPF, GETPC());
+    }
+}
+#endif
diff --git a/target/i386/cc_helper_template.h b/target/i386/cc_helper_template.h
new file mode 100644
index 0000000000..607311f195
--- /dev/null
+++ b/target/i386/cc_helper_template.h
@@ -0,0 +1,242 @@
+/*
+ *  x86 condition code helpers
+ *
+ *  Copyright (c) 2008 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DATA_BITS (1 << (3 + SHIFT))
+
+#if DATA_BITS == 8
+#define SUFFIX b
+#define DATA_TYPE uint8_t
+#elif DATA_BITS == 16
+#define SUFFIX w
+#define DATA_TYPE uint16_t
+#elif DATA_BITS == 32
+#define SUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_BITS == 64
+#define SUFFIX q
+#define DATA_TYPE uint64_t
+#else
+#error unhandled operand size
+#endif
+
+#define SIGN_MASK (((DATA_TYPE)1) << (DATA_BITS - 1))
+
+/* dynamic flags computation */
+
+static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    int cf, pf, af, zf, sf, of;
+    DATA_TYPE src2 = dst - src1;
+
+    cf = dst < src1;
+    pf = parity_table[(uint8_t)dst];
+    af = (dst ^ src1 ^ src2) & CC_A;
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    return dst < src1;
+}
+
+static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
+                                         DATA_TYPE src3)
+{
+    int cf, pf, af, zf, sf, of;
+    DATA_TYPE src2 = dst - src1 - src3;
+
+    cf = (src3 ? dst <= src1 : dst < src1);
+    pf = parity_table[(uint8_t)dst];
+    af = (dst ^ src1 ^ src2) & 0x10;
+    zf = (dst == 0) << 6;
+    sf = lshift(dst, 8 - DATA_BITS) & 0x80;
+    of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
+                                       DATA_TYPE src3)
+{
+    return src3 ? dst <= src1 : dst < src1;
+}
+
+static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
+{
+    int cf, pf, af, zf, sf, of;
+    DATA_TYPE src1 = dst + src2;
+
+    cf = src1 < src2;
+    pf = parity_table[(uint8_t)dst];
+    af = (dst ^ src1 ^ src2) & CC_A;
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
+{
+    DATA_TYPE src1 = dst + src2;
+
+    return src1 < src2;
+}
+
+static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
+                                         DATA_TYPE src3)
+{
+    int cf, pf, af, zf, sf, of;
+    DATA_TYPE src1 = dst + src2 + src3;
+
+    cf = (src3 ? src1 <= src2 : src1 < src2);
+    pf = parity_table[(uint8_t)dst];
+    af = (dst ^ src1 ^ src2) & 0x10;
+    zf = (dst == 0) << 6;
+    sf = lshift(dst, 8 - DATA_BITS) & 0x80;
+    of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
+                                       DATA_TYPE src3)
+{
+    DATA_TYPE src1 = dst + src2 + src3;
+
+    return (src3 ? src1 <= src2 : src1 < src2);
+}
+
+static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    int cf, pf, af, zf, sf, of;
+
+    cf = 0;
+    pf = parity_table[(uint8_t)dst];
+    af = 0;
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    of = 0;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    int cf, pf, af, zf, sf, of;
+    DATA_TYPE src2;
+
+    cf = src1;
+    src1 = dst - 1;
+    src2 = 1;
+    pf = parity_table[(uint8_t)dst];
+    af = (dst ^ src1 ^ src2) & CC_A;
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    of = (dst == SIGN_MASK) * CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    int cf, pf, af, zf, sf, of;
+    DATA_TYPE src2;
+
+    cf = src1;
+    src1 = dst + 1;
+    src2 = 1;
+    pf = parity_table[(uint8_t)dst];
+    af = (dst ^ src1 ^ src2) & CC_A;
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    of = (dst == SIGN_MASK - 1) * CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    int cf, pf, af, zf, sf, of;
+
+    cf = (src1 >> (DATA_BITS - 1)) & CC_C;
+    pf = parity_table[(uint8_t)dst];
+    af = 0; /* undefined */
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    /* of is defined iff shift count == 1 */
+    of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    return (src1 >> (DATA_BITS - 1)) & CC_C;
+}
+
+static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    int cf, pf, af, zf, sf, of;
+
+    cf = src1 & 1;
+    pf = parity_table[(uint8_t)dst];
+    af = 0; /* undefined */
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    /* of is defined iff shift count == 1 */
+    of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and
+   CF are modified and it is slower to do that.  Note as well that we
+   don't truncate SRC1 for computing carry to DATA_TYPE.  */
+static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
+{
+    int cf, pf, af, zf, sf, of;
+
+    cf = (src1 != 0);
+    pf = parity_table[(uint8_t)dst];
+    af = 0; /* undefined */
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    of = cf * CC_O;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    int cf, pf, af, zf, sf, of;
+
+    cf = (src1 == 0);
+    pf = 0; /* undefined */
+    af = 0; /* undefined */
+    zf = (dst == 0) * CC_Z;
+    sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+    of = 0;
+    return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+    return src1 == 0;
+}
+
+#undef DATA_BITS
+#undef SIGN_MASK
+#undef DATA_TYPE
+#undef DATA_MASK
+#undef SUFFIX
diff --git a/target/i386/cpu-qom.h b/target/i386/cpu-qom.h
new file mode 100644
index 0000000000..7c9a07ae65
--- /dev/null
+++ b/target/i386/cpu-qom.h
@@ -0,0 +1,77 @@
+/*
+ * QEMU x86 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_I386_CPU_QOM_H
+#define QEMU_I386_CPU_QOM_H
+
+#include "qom/cpu.h"
+#include "qemu/notify.h"
+
+#ifdef TARGET_X86_64
+#define TYPE_X86_CPU "x86_64-cpu"
+#else
+#define TYPE_X86_CPU "i386-cpu"
+#endif
+
+#define X86_CPU_CLASS(klass) \
+    OBJECT_CLASS_CHECK(X86CPUClass, (klass), TYPE_X86_CPU)
+#define X86_CPU(obj) \
+    OBJECT_CHECK(X86CPU, (obj), TYPE_X86_CPU)
+#define X86_CPU_GET_CLASS(obj) \
+    OBJECT_GET_CLASS(X86CPUClass, (obj), TYPE_X86_CPU)
+
+/**
+ * X86CPUDefinition:
+ *
+ * CPU model definition data that was not converted to QOM per-subclass
+ * property defaults yet.
+ */
+typedef struct X86CPUDefinition X86CPUDefinition;
+
+/**
+ * X86CPUClass:
+ * @cpu_def: CPU model definition
+ * @kvm_required: Whether CPU model requires KVM to be enabled.
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An x86 CPU model or family.
+ */
+typedef struct X86CPUClass {
+    /*< private >*/
+    CPUClass parent_class;
+    /*< public >*/
+
+    /* Should be eventually replaced by subclass-specific property defaults. */
+    X86CPUDefinition *cpu_def;
+
+    bool kvm_required;
+
+    /* Optional description of CPU model.
+     * If unavailable, cpu_def->model_id is used */
+    const char *model_description;
+
+    DeviceRealize parent_realize;
+    DeviceUnrealize parent_unrealize;
+    void (*parent_reset)(CPUState *cpu);
+} X86CPUClass;
+
+typedef struct X86CPU X86CPU;
+
+#endif
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
new file mode 100644
index 0000000000..de1f30eeda
--- /dev/null
+++ b/target/i386/cpu.c
@@ -0,0 +1,3755 @@
+/*
+ *  i386 CPUID helper functions
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "sysemu/cpus.h"
+#include "kvm_i386.h"
+
+#include "qemu/error-report.h"
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "qapi/qmp/qerror.h"
+
+#include "qapi-types.h"
+#include "qapi-visit.h"
+#include "qapi/visitor.h"
+#include "sysemu/arch_init.h"
+
+#if defined(CONFIG_KVM)
+#include <linux/kvm_para.h>
+#endif
+
+#include "sysemu/sysemu.h"
+#include "hw/qdev-properties.h"
+#include "hw/i386/topology.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/address-spaces.h"
+#include "hw/hw.h"
+#include "hw/xen/xen.h"
+#include "hw/i386/apic_internal.h"
+#endif
+
+
+/* Cache topology CPUID constants: */
+
+/* CPUID Leaf 2 Descriptors */
+
+#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
+#define CPUID_2_L1I_32KB_8WAY_64B 0x30
+#define CPUID_2_L2_2MB_8WAY_64B   0x7d
+#define CPUID_2_L3_16MB_16WAY_64B 0x4d
+
+
+/* CPUID Leaf 4 constants: */
+
+/* EAX: */
+#define CPUID_4_TYPE_DCACHE  1
+#define CPUID_4_TYPE_ICACHE  2
+#define CPUID_4_TYPE_UNIFIED 3
+
+#define CPUID_4_LEVEL(l)          ((l) << 5)
+
+#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
+#define CPUID_4_FULLY_ASSOC     (1 << 9)
+
+/* EDX: */
+#define CPUID_4_NO_INVD_SHARING (1 << 0)
+#define CPUID_4_INCLUSIVE       (1 << 1)
+#define CPUID_4_COMPLEX_IDX     (1 << 2)
+
+#define ASSOC_FULL 0xFF
+
+/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
+#define AMD_ENC_ASSOC(a) (a <=   1 ? a   : \
+                          a ==   2 ? 0x2 : \
+                          a ==   4 ? 0x4 : \
+                          a ==   8 ? 0x6 : \
+                          a ==  16 ? 0x8 : \
+                          a ==  32 ? 0xA : \
+                          a ==  48 ? 0xB : \
+                          a ==  64 ? 0xC : \
+                          a ==  96 ? 0xD : \
+                          a == 128 ? 0xE : \
+                          a == ASSOC_FULL ? 0xF : \
+                          0 /* invalid value */)
+
+
+/* Definitions of the hardcoded cache entries we expose: */
+
+/* L1 data cache: */
+#define L1D_LINE_SIZE         64
+#define L1D_ASSOCIATIVITY      8
+#define L1D_SETS              64
+#define L1D_PARTITIONS         1
+/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
+#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
+/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
+#define L1D_LINES_PER_TAG      1
+#define L1D_SIZE_KB_AMD       64
+#define L1D_ASSOCIATIVITY_AMD  2
+
+/* L1 instruction cache: */
+#define L1I_LINE_SIZE         64
+#define L1I_ASSOCIATIVITY      8
+#define L1I_SETS              64
+#define L1I_PARTITIONS         1
+/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
+#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
+/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
+#define L1I_LINES_PER_TAG      1
+#define L1I_SIZE_KB_AMD       64
+#define L1I_ASSOCIATIVITY_AMD  2
+
+/* Level 2 unified cache: */
+#define L2_LINE_SIZE          64
+#define L2_ASSOCIATIVITY      16
+#define L2_SETS             4096
+#define L2_PARTITIONS          1
+/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
+/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
+#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
+/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
+#define L2_LINES_PER_TAG       1
+#define L2_SIZE_KB_AMD       512
+
+/* Level 3 unified cache: */
+#define L3_SIZE_KB             0 /* disabled */
+#define L3_ASSOCIATIVITY       0 /* disabled */
+#define L3_LINES_PER_TAG       0 /* disabled */
+#define L3_LINE_SIZE           0 /* disabled */
+#define L3_N_LINE_SIZE         64
+#define L3_N_ASSOCIATIVITY     16
+#define L3_N_SETS           16384
+#define L3_N_PARTITIONS         1
+#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
+#define L3_N_LINES_PER_TAG      1
+#define L3_N_SIZE_KB_AMD    16384
+
+/* TLB definitions: */
+
+#define L1_DTLB_2M_ASSOC       1
+#define L1_DTLB_2M_ENTRIES   255
+#define L1_DTLB_4K_ASSOC       1
+#define L1_DTLB_4K_ENTRIES   255
+
+#define L1_ITLB_2M_ASSOC       1
+#define L1_ITLB_2M_ENTRIES   255
+#define L1_ITLB_4K_ASSOC       1
+#define L1_ITLB_4K_ENTRIES   255
+
+#define L2_DTLB_2M_ASSOC       0 /* disabled */
+#define L2_DTLB_2M_ENTRIES     0 /* disabled */
+#define L2_DTLB_4K_ASSOC       4
+#define L2_DTLB_4K_ENTRIES   512
+
+#define L2_ITLB_2M_ASSOC       0 /* disabled */
+#define L2_ITLB_2M_ENTRIES     0 /* disabled */
+#define L2_ITLB_4K_ASSOC       4
+#define L2_ITLB_4K_ENTRIES   512
+
+
+
+static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
+                                     uint32_t vendor2, uint32_t vendor3)
+{
+    int i;
+    for (i = 0; i < 4; i++) {
+        dst[i] = vendor1 >> (8 * i);
+        dst[i + 4] = vendor2 >> (8 * i);
+        dst[i + 8] = vendor3 >> (8 * i);
+    }
+    dst[CPUID_VENDOR_SZ] = '\0';
+}
+
+#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
+#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
+          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
+#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
+          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
+          CPUID_PSE36 | CPUID_FXSR)
+#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
+#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
+          CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
+          CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
+          CPUID_PAE | CPUID_SEP | CPUID_APIC)
+
+#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
+          CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
+          CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
+          CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
+          CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
+          /* partly implemented:
+          CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
+          /* missing:
+          CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
+#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
+          CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
+          CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
+          CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */   \
+          CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
+          /* missing:
+          CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
+          CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
+          CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
+          CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
+          CPUID_EXT_F16C, CPUID_EXT_RDRAND */
+
+#ifdef TARGET_X86_64
+#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
+#else
+#define TCG_EXT2_X86_64_FEATURES 0
+#endif
+
+#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
+          CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
+          CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
+          TCG_EXT2_X86_64_FEATURES)
+#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
+          CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
+#define TCG_EXT4_FEATURES 0
+#define TCG_SVM_FEATURES 0
+#define TCG_KVM_FEATURES 0
+#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
+          CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
+          CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT |            \
+          CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
+          CPUID_7_0_EBX_ERMS)
+          /* missing:
+          CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
+          CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
+          CPUID_7_0_EBX_RDSEED */
+#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
+#define TCG_7_0_EDX_FEATURES 0
+#define TCG_APM_FEATURES 0
+#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
+#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
+          /* missing:
+          CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
+
+typedef struct FeatureWordInfo {
+    /* feature flags names are taken from "Intel Processor Identification and
+     * the CPUID Instruction" and AMD's "CPUID Specification".
+     * In cases of disagreement between feature naming conventions,
+     * aliases may be added.
+     */
+    const char *feat_names[32];
+    uint32_t cpuid_eax;   /* Input EAX for CPUID */
+    bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
+    uint32_t cpuid_ecx;   /* Input ECX value for CPUID */
+    int cpuid_reg;        /* output register (R_* constant) */
+    uint32_t tcg_features; /* Feature flags supported by TCG */
+    uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
+    uint32_t migratable_flags; /* Feature flags known to be migratable */
+} FeatureWordInfo;
+
+static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
+    [FEAT_1_EDX] = {
+        .feat_names = {
+            "fpu", "vme", "de", "pse",
+            "tsc", "msr", "pae", "mce",
+            "cx8", "apic", NULL, "sep",
+            "mtrr", "pge", "mca", "cmov",
+            "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
+            NULL, "ds" /* Intel dts */, "acpi", "mmx",
+            "fxsr", "sse", "sse2", "ss",
+            "ht" /* Intel htt */, "tm", "ia64", "pbe",
+        },
+        .cpuid_eax = 1, .cpuid_reg = R_EDX,
+        .tcg_features = TCG_FEATURES,
+    },
+    [FEAT_1_ECX] = {
+        .feat_names = {
+            "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
+            "ds-cpl", "vmx", "smx", "est",
+            "tm2", "ssse3", "cid", NULL,
+            "fma", "cx16", "xtpr", "pdcm",
+            NULL, "pcid", "dca", "sse4.1",
+            "sse4.2", "x2apic", "movbe", "popcnt",
+            "tsc-deadline", "aes", "xsave", "osxsave",
+            "avx", "f16c", "rdrand", "hypervisor",
+        },
+        .cpuid_eax = 1, .cpuid_reg = R_ECX,
+        .tcg_features = TCG_EXT_FEATURES,
+    },
+    /* Feature names that are already defined on feature_name[] but
+     * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
+     * names on feat_names below. They are copied automatically
+     * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
+     */
+    [FEAT_8000_0001_EDX] = {
+        .feat_names = {
+            NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
+            NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
+            NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
+            NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
+            NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
+            "nx", NULL, "mmxext", NULL /* mmx */,
+            NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
+            NULL, "lm", "3dnowext", "3dnow",
+        },
+        .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
+        .tcg_features = TCG_EXT2_FEATURES,
+    },
+    [FEAT_8000_0001_ECX] = {
+        .feat_names = {
+            "lahf-lm", "cmp-legacy", "svm", "extapic",
+            "cr8legacy", "abm", "sse4a", "misalignsse",
+            "3dnowprefetch", "osvw", "ibs", "xop",
+            "skinit", "wdt", NULL, "lwp",
+            "fma4", "tce", NULL, "nodeid-msr",
+            NULL, "tbm", "topoext", "perfctr-core",
+            "perfctr-nb", NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
+        .tcg_features = TCG_EXT3_FEATURES,
+    },
+    [FEAT_C000_0001_EDX] = {
+        .feat_names = {
+            NULL, NULL, "xstore", "xstore-en",
+            NULL, NULL, "xcrypt", "xcrypt-en",
+            "ace2", "ace2-en", "phe", "phe-en",
+            "pmm", "pmm-en", NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
+        .tcg_features = TCG_EXT4_FEATURES,
+    },
+    [FEAT_KVM] = {
+        .feat_names = {
+            "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
+            "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            "kvmclock-stable-bit", NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
+        .tcg_features = TCG_KVM_FEATURES,
+    },
+    [FEAT_HYPERV_EAX] = {
+        .feat_names = {
+            NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
+            NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
+            NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
+            NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
+            NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
+            NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
+    },
+    [FEAT_HYPERV_EBX] = {
+        .feat_names = {
+            NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
+            NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
+            NULL /* hv_post_messages */, NULL /* hv_signal_events */,
+            NULL /* hv_create_port */, NULL /* hv_connect_port */,
+            NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
+            NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
+            NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
+    },
+    [FEAT_HYPERV_EDX] = {
+        .feat_names = {
+            NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
+            NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
+            NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
+            NULL, NULL,
+            NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
+    },
+    [FEAT_SVM] = {
+        .feat_names = {
+            "npt", "lbrv", "svm-lock", "nrip-save",
+            "tsc-scale", "vmcb-clean",  "flushbyasid", "decodeassists",
+            NULL, NULL, "pause-filter", NULL,
+            "pfthreshold", NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
+        .tcg_features = TCG_SVM_FEATURES,
+    },
+    [FEAT_7_0_EBX] = {
+        .feat_names = {
+            "fsgsbase", "tsc-adjust", NULL, "bmi1",
+            "hle", "avx2", NULL, "smep",
+            "bmi2", "erms", "invpcid", "rtm",
+            NULL, NULL, "mpx", NULL,
+            "avx512f", "avx512dq", "rdseed", "adx",
+            "smap", "avx512ifma", "pcommit", "clflushopt",
+            "clwb", NULL, "avx512pf", "avx512er",
+            "avx512cd", NULL, "avx512bw", "avx512vl",
+        },
+        .cpuid_eax = 7,
+        .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+        .cpuid_reg = R_EBX,
+        .tcg_features = TCG_7_0_EBX_FEATURES,
+    },
+    [FEAT_7_0_ECX] = {
+        .feat_names = {
+            NULL, "avx512vbmi", "umip", "pku",
+            "ospke", NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, "rdpid", NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 7,
+        .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+        .cpuid_reg = R_ECX,
+        .tcg_features = TCG_7_0_ECX_FEATURES,
+    },
+    [FEAT_7_0_EDX] = {
+        .feat_names = {
+            NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 7,
+        .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+        .cpuid_reg = R_EDX,
+        .tcg_features = TCG_7_0_EDX_FEATURES,
+    },
+    [FEAT_8000_0007_EDX] = {
+        .feat_names = {
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            "invtsc", NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 0x80000007,
+        .cpuid_reg = R_EDX,
+        .tcg_features = TCG_APM_FEATURES,
+        .unmigratable_flags = CPUID_APM_INVTSC,
+    },
+    [FEAT_XSAVE] = {
+        .feat_names = {
+            "xsaveopt", "xsavec", "xgetbv1", "xsaves",
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 0xd,
+        .cpuid_needs_ecx = true, .cpuid_ecx = 1,
+        .cpuid_reg = R_EAX,
+        .tcg_features = TCG_XSAVE_FEATURES,
+    },
+    [FEAT_6_EAX] = {
+        .feat_names = {
+            NULL, NULL, "arat", NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+            NULL, NULL, NULL, NULL,
+        },
+        .cpuid_eax = 6, .cpuid_reg = R_EAX,
+        .tcg_features = TCG_6_EAX_FEATURES,
+    },
+    [FEAT_XSAVE_COMP_LO] = {
+        .cpuid_eax = 0xD,
+        .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+        .cpuid_reg = R_EAX,
+        .tcg_features = ~0U,
+        .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
+            XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
+            XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
+            XSTATE_PKRU_MASK,
+    },
+    [FEAT_XSAVE_COMP_HI] = {
+        .cpuid_eax = 0xD,
+        .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+        .cpuid_reg = R_EDX,
+        .tcg_features = ~0U,
+    },
+};
+
+typedef struct X86RegisterInfo32 {
+    /* Name of register */
+    const char *name;
+    /* QAPI enum value register */
+    X86CPURegister32 qapi_enum;
+} X86RegisterInfo32;
+
+#define REGISTER(reg) \
+    [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
+static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
+    REGISTER(EAX),
+    REGISTER(ECX),
+    REGISTER(EDX),
+    REGISTER(EBX),
+    REGISTER(ESP),
+    REGISTER(EBP),
+    REGISTER(ESI),
+    REGISTER(EDI),
+};
+#undef REGISTER
+
+typedef struct ExtSaveArea {
+    uint32_t feature, bits;
+    uint32_t offset, size;
+} ExtSaveArea;
+
+static const ExtSaveArea x86_ext_save_areas[] = {
+    [XSTATE_FP_BIT] = {
+        /* x87 FP state component is always enabled if XSAVE is supported */
+        .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
+        /* x87 state is in the legacy region of the XSAVE area */
+        .offset = 0,
+        .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
+    },
+    [XSTATE_SSE_BIT] = {
+        /* SSE state component is always enabled if XSAVE is supported */
+        .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
+        /* SSE state is in the legacy region of the XSAVE area */
+        .offset = 0,
+        .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
+    },
+    [XSTATE_YMM_BIT] =
+          { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
+            .offset = offsetof(X86XSaveArea, avx_state),
+            .size = sizeof(XSaveAVX) },
+    [XSTATE_BNDREGS_BIT] =
+          { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
+            .offset = offsetof(X86XSaveArea, bndreg_state),
+            .size = sizeof(XSaveBNDREG)  },
+    [XSTATE_BNDCSR_BIT] =
+          { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
+            .offset = offsetof(X86XSaveArea, bndcsr_state),
+            .size = sizeof(XSaveBNDCSR)  },
+    [XSTATE_OPMASK_BIT] =
+          { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+            .offset = offsetof(X86XSaveArea, opmask_state),
+            .size = sizeof(XSaveOpmask) },
+    [XSTATE_ZMM_Hi256_BIT] =
+          { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+            .offset = offsetof(X86XSaveArea, zmm_hi256_state),
+            .size = sizeof(XSaveZMM_Hi256) },
+    [XSTATE_Hi16_ZMM_BIT] =
+          { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+            .offset = offsetof(X86XSaveArea, hi16_zmm_state),
+            .size = sizeof(XSaveHi16_ZMM) },
+    [XSTATE_PKRU_BIT] =
+          { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
+            .offset = offsetof(X86XSaveArea, pkru_state),
+            .size = sizeof(XSavePKRU) },
+};
+
+static uint32_t xsave_area_size(uint64_t mask)
+{
+    int i;
+    uint64_t ret = 0;
+
+    for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+        const ExtSaveArea *esa = &x86_ext_save_areas[i];
+        if ((mask >> i) & 1) {
+            ret = MAX(ret, esa->offset + esa->size);
+        }
+    }
+    return ret;
+}
+
+static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
+{
+    return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
+           cpu->env.features[FEAT_XSAVE_COMP_LO];
+}
+
+const char *get_register_name_32(unsigned int reg)
+{
+    if (reg >= CPU_NB_REGS32) {
+        return NULL;
+    }
+    return x86_reg_info_32[reg].name;
+}
+
+/*
+ * Returns the set of feature flags that are supported and migratable by
+ * QEMU, for a given FeatureWord.
+ */
+static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
+{
+    FeatureWordInfo *wi = &feature_word_info[w];
+    uint32_t r = 0;
+    int i;
+
+    for (i = 0; i < 32; i++) {
+        uint32_t f = 1U << i;
+
+        /* If the feature name is known, it is implicitly considered migratable,
+         * unless it is explicitly set in unmigratable_flags */
+        if ((wi->migratable_flags & f) ||
+            (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
+            r |= f;
+        }
+    }
+    return r;
+}
+
+void host_cpuid(uint32_t function, uint32_t count,
+                uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
+{
+    uint32_t vec[4];
+
+#ifdef __x86_64__
+    asm volatile("cpuid"
+                 : "=a"(vec[0]), "=b"(vec[1]),
+                   "=c"(vec[2]), "=d"(vec[3])
+                 : "0"(function), "c"(count) : "cc");
+#elif defined(__i386__)
+    asm volatile("pusha \n\t"
+                 "cpuid \n\t"
+                 "mov %%eax, 0(%2) \n\t"
+                 "mov %%ebx, 4(%2) \n\t"
+                 "mov %%ecx, 8(%2) \n\t"
+                 "mov %%edx, 12(%2) \n\t"
+                 "popa"
+                 : : "a"(function), "c"(count), "S"(vec)
+                 : "memory", "cc");
+#else
+    abort();
+#endif
+
+    if (eax)
+        *eax = vec[0];
+    if (ebx)
+        *ebx = vec[1];
+    if (ecx)
+        *ecx = vec[2];
+    if (edx)
+        *edx = vec[3];
+}
+
+/* CPU class name definitions: */
+
+#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
+#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
+
+/* Return type name for a given CPU model name
+ * Caller is responsible for freeing the returned string.
+ */
+static char *x86_cpu_type_name(const char *model_name)
+{
+    return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
+}
+
+static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
+{
+    ObjectClass *oc;
+    char *typename;
+
+    if (cpu_model == NULL) {
+        return NULL;
+    }
+
+    typename = x86_cpu_type_name(cpu_model);
+    oc = object_class_by_name(typename);
+    g_free(typename);
+    return oc;
+}
+
+static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
+{
+    const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
+    assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
+    return g_strndup(class_name,
+                     strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
+}
+
+struct X86CPUDefinition {
+    const char *name;
+    uint32_t level;
+    uint32_t xlevel;
+    /* vendor is zero-terminated, 12 character ASCII string */
+    char vendor[CPUID_VENDOR_SZ + 1];
+    int family;
+    int model;
+    int stepping;
+    FeatureWordArray features;
+    char model_id[48];
+};
+
+static X86CPUDefinition builtin_x86_defs[] = {
+    {
+        .name = "qemu64",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 6,
+        .model = 6,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES |
+            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+            CPUID_PSE36,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3 | CPUID_EXT_CX16,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
+        .xlevel = 0x8000000A,
+        .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+    },
+    {
+        .name = "phenom",
+        .level = 5,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 16,
+        .model = 2,
+        .stepping = 3,
+        /* Missing: CPUID_HT */
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES |
+            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+            CPUID_PSE36 | CPUID_VME,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
+            CPUID_EXT_POPCNT,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
+            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
+            CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
+        /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
+                    CPUID_EXT3_CR8LEG,
+                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
+                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
+            CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
+        /* Missing: CPUID_SVM_LBRV */
+        .features[FEAT_SVM] =
+            CPUID_SVM_NPT,
+        .xlevel = 0x8000001A,
+        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
+    },
+    {
+        .name = "core2duo",
+        .level = 10,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 15,
+        .stepping = 11,
+        /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES |
+            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+            CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
+        /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
+         * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
+            CPUID_EXT_CX16,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM,
+        .xlevel = 0x80000008,
+        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
+    },
+    {
+        .name = "kvm64",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 15,
+        .model = 6,
+        .stepping = 1,
+        /* Missing: CPUID_HT */
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES | CPUID_VME |
+            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+            CPUID_PSE36,
+        /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3 | CPUID_EXT_CX16,
+        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
+                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
+                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
+                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
+        .features[FEAT_8000_0001_ECX] =
+            0,
+        .xlevel = 0x80000008,
+        .model_id = "Common KVM processor"
+    },
+    {
+        .name = "qemu32",
+        .level = 4,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 6,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3,
+        .xlevel = 0x80000004,
+        .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+    },
+    {
+        .name = "kvm32",
+        .level = 5,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 15,
+        .model = 6,
+        .stepping = 1,
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES | CPUID_VME |
+            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3,
+        .features[FEAT_8000_0001_ECX] =
+            0,
+        .xlevel = 0x80000008,
+        .model_id = "Common 32-bit KVM processor"
+    },
+    {
+        .name = "coreduo",
+        .level = 10,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 14,
+        .stepping = 8,
+        /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES | CPUID_VME |
+            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
+            CPUID_SS,
+        /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
+         * CPUID_EXT_PDCM, CPUID_EXT_VMX */
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_NX,
+        .xlevel = 0x80000008,
+        .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
+    },
+    {
+        .name = "486",
+        .level = 1,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 4,
+        .model = 8,
+        .stepping = 0,
+        .features[FEAT_1_EDX] =
+            I486_FEATURES,
+        .xlevel = 0,
+    },
+    {
+        .name = "pentium",
+        .level = 1,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 5,
+        .model = 4,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            PENTIUM_FEATURES,
+        .xlevel = 0,
+    },
+    {
+        .name = "pentium2",
+        .level = 2,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 5,
+        .stepping = 2,
+        .features[FEAT_1_EDX] =
+            PENTIUM2_FEATURES,
+        .xlevel = 0,
+    },
+    {
+        .name = "pentium3",
+        .level = 3,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 7,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            PENTIUM3_FEATURES,
+        .xlevel = 0,
+    },
+    {
+        .name = "athlon",
+        .level = 2,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 6,
+        .model = 2,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
+            CPUID_MCA,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
+        .xlevel = 0x80000008,
+        .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+    },
+    {
+        .name = "n270",
+        .level = 10,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 28,
+        .stepping = 2,
+        /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+        .features[FEAT_1_EDX] =
+            PPRO_FEATURES |
+            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
+            CPUID_ACPI | CPUID_SS,
+            /* Some CPUs got no CPUID_SEP */
+        /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
+         * CPUID_EXT_XTPR */
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
+            CPUID_EXT_MOVBE,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_NX,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM,
+        .xlevel = 0x80000008,
+        .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
+    },
+    {
+        .name = "Conroe",
+        .level = 10,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 15,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
+    },
+    {
+        .name = "Penryn",
+        .level = 10,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 23,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+            CPUID_EXT_SSE3,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
+    },
+    {
+        .name = "Nehalem",
+        .level = 11,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 26,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+            CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
+    },
+    {
+        .name = "Westmere",
+        .level = 11,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 44,
+        .stepping = 1,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+            CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x80000008,
+        .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
+    },
+    {
+        .name = "SandyBridge",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 42,
+        .stepping = 1,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+            CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+            CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+            CPUID_EXT_SSE3,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM,
+        .features[FEAT_XSAVE] =
+            CPUID_XSAVE_XSAVEOPT,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Xeon E312xx (Sandy Bridge)",
+    },
+    {
+        .name = "IvyBridge",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 58,
+        .stepping = 9,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+            CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+            CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+            CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+        .features[FEAT_7_0_EBX] =
+            CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
+            CPUID_7_0_EBX_ERMS,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_LAHF_LM,
+        .features[FEAT_XSAVE] =
+            CPUID_XSAVE_XSAVEOPT,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
+    },
+    {
+        .name = "Haswell-noTSX",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 60,
+        .stepping = 1,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+            CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+            CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+            CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
+        .features[FEAT_7_0_EBX] =
+            CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+            CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+            CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
+        .features[FEAT_XSAVE] =
+            CPUID_XSAVE_XSAVEOPT,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Core Processor (Haswell, no TSX)",
+    },    {
+        .name = "Haswell",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 60,
+        .stepping = 1,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+            CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+            CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+            CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
+        .features[FEAT_7_0_EBX] =
+            CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+            CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+            CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+            CPUID_7_0_EBX_RTM,
+        .features[FEAT_XSAVE] =
+            CPUID_XSAVE_XSAVEOPT,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Core Processor (Haswell)",
+    },
+    {
+        .name = "Broadwell-noTSX",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 61,
+        .stepping = 2,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+            CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+            CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+            CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+        .features[FEAT_7_0_EBX] =
+            CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+            CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+            CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+            CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+            CPUID_7_0_EBX_SMAP,
+        .features[FEAT_XSAVE] =
+            CPUID_XSAVE_XSAVEOPT,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Core Processor (Broadwell, no TSX)",
+    },
+    {
+        .name = "Broadwell",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 61,
+        .stepping = 2,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+            CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+            CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+            CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+        .features[FEAT_7_0_EBX] =
+            CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+            CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+            CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+            CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+            CPUID_7_0_EBX_SMAP,
+        .features[FEAT_XSAVE] =
+            CPUID_XSAVE_XSAVEOPT,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Core Processor (Broadwell)",
+    },
+    {
+        .name = "Skylake-Client",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_INTEL,
+        .family = 6,
+        .model = 94,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+            CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+            CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+            CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+        .features[FEAT_7_0_EBX] =
+            CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+            CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+            CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+            CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+            CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
+        /* Missing: XSAVES (not supported by some Linux versions,
+         * including v4.1 to v4.6).
+         * KVM doesn't yet expose any XSAVES state save component,
+         * and the only one defined in Skylake (processor tracing)
+         * probably will block migration anyway.
+         */
+        .features[FEAT_XSAVE] =
+            CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+            CPUID_XSAVE_XGETBV1,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x80000008,
+        .model_id = "Intel Core Processor (Skylake)",
+    },
+    {
+        .name = "Opteron_G1",
+        .level = 5,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 15,
+        .model = 6,
+        .stepping = 1,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_SSE3,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+            CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+            CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+            CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+            CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+            CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+        .xlevel = 0x80000008,
+        .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
+    },
+    {
+        .name = "Opteron_G2",
+        .level = 5,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 15,
+        .model = 6,
+        .stepping = 1,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_CX16 | CPUID_EXT_SSE3,
+        /* Missing: CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_FXSR |
+            CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
+            CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
+            CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
+            CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
+            CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
+            CPUID_EXT2_DE | CPUID_EXT2_FPU,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+        .xlevel = 0x80000008,
+        .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
+    },
+    {
+        .name = "Opteron_G3",
+        .level = 5,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 16,
+        .model = 2,
+        .stepping = 3,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
+            CPUID_EXT_SSE3,
+        /* Missing: CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_FXSR |
+            CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
+            CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
+            CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
+            CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
+            CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
+            CPUID_EXT2_DE | CPUID_EXT2_FPU,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
+            CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+        .xlevel = 0x80000008,
+        .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
+    },
+    {
+        .name = "Opteron_G4",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 21,
+        .model = 1,
+        .stepping = 2,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+            CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+            CPUID_EXT_SSE3,
+        /* Missing: CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM |
+            CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+            CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+            CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+            CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+            CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+            CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+            CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+            CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+            CPUID_EXT3_LAHF_LM,
+        /* no xsaveopt! */
+        .xlevel = 0x8000001A,
+        .model_id = "AMD Opteron 62xx class CPU",
+    },
+    {
+        .name = "Opteron_G5",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 21,
+        .model = 2,
+        .stepping = 0,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
+            CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+            CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+        /* Missing: CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM |
+            CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+            CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+            CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+            CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+            CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+            CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+            CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+            CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+            CPUID_EXT3_LAHF_LM,
+        /* no xsaveopt! */
+        .xlevel = 0x8000001A,
+        .model_id = "AMD Opteron 63xx class CPU",
+    },
+};
+
+typedef struct PropValue {
+    const char *prop, *value;
+} PropValue;
+
+/* KVM-specific features that are automatically added/removed
+ * from all CPU models when KVM is enabled.
+ */
+static PropValue kvm_default_props[] = {
+    { "kvmclock", "on" },
+    { "kvm-nopiodelay", "on" },
+    { "kvm-asyncpf", "on" },
+    { "kvm-steal-time", "on" },
+    { "kvm-pv-eoi", "on" },
+    { "kvmclock-stable-bit", "on" },
+    { "x2apic", "on" },
+    { "acpi", "off" },
+    { "monitor", "off" },
+    { "svm", "off" },
+    { NULL, NULL },
+};
+
+/* TCG-specific defaults that override all CPU models when using TCG
+ */
+static PropValue tcg_default_props[] = {
+    { "vme", "off" },
+    { NULL, NULL },
+};
+
+
+void x86_cpu_change_kvm_default(const char *prop, const char *value)
+{
+    PropValue *pv;
+    for (pv = kvm_default_props; pv->prop; pv++) {
+        if (!strcmp(pv->prop, prop)) {
+            pv->value = value;
+            break;
+        }
+    }
+
+    /* It is valid to call this function only for properties that
+     * are already present in the kvm_default_props table.
+     */
+    assert(pv->prop);
+}
+
+static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
+                                                   bool migratable_only);
+
+#ifdef CONFIG_KVM
+
+static bool lmce_supported(void)
+{
+    uint64_t mce_cap;
+
+    if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
+        return false;
+    }
+
+    return !!(mce_cap & MCG_LMCE_P);
+}
+
+static int cpu_x86_fill_model_id(char *str)
+{
+    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+    int i;
+
+    for (i = 0; i < 3; i++) {
+        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
+        memcpy(str + i * 16 +  0, &eax, 4);
+        memcpy(str + i * 16 +  4, &ebx, 4);
+        memcpy(str + i * 16 +  8, &ecx, 4);
+        memcpy(str + i * 16 + 12, &edx, 4);
+    }
+    return 0;
+}
+
+static X86CPUDefinition host_cpudef;
+
+static Property host_x86_cpu_properties[] = {
+    DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
+    DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
+    DEFINE_PROP_END_OF_LIST()
+};
+
+/* class_init for the "host" CPU model
+ *
+ * This function may be called before KVM is initialized.
+ */
+static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(oc);
+    X86CPUClass *xcc = X86_CPU_CLASS(oc);
+    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+
+    xcc->kvm_required = true;
+
+    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
+    x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
+
+    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
+    host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
+    host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
+    host_cpudef.stepping = eax & 0x0F;
+
+    cpu_x86_fill_model_id(host_cpudef.model_id);
+
+    xcc->cpu_def = &host_cpudef;
+    xcc->model_description =
+        "KVM processor with all supported host features "
+        "(only available in KVM mode)";
+
+    /* level, xlevel, xlevel2, and the feature words are initialized on
+     * instance_init, because they require KVM to be initialized.
+     */
+
+    dc->props = host_x86_cpu_properties;
+    /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
+    dc->cannot_destroy_with_object_finalize_yet = true;
+}
+
+static void host_x86_cpu_initfn(Object *obj)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    KVMState *s = kvm_state;
+
+    /* We can't fill the features array here because we don't know yet if
+     * "migratable" is true or false.
+     */
+    cpu->host_features = true;
+
+    /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
+    if (kvm_enabled()) {
+        env->cpuid_min_level =
+            kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+        env->cpuid_min_xlevel =
+            kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+        env->cpuid_min_xlevel2 =
+            kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+
+        if (lmce_supported()) {
+            object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
+        }
+    }
+
+    object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
+}
+
+static const TypeInfo host_x86_cpu_type_info = {
+    .name = X86_CPU_TYPE_NAME("host"),
+    .parent = TYPE_X86_CPU,
+    .instance_init = host_x86_cpu_initfn,
+    .class_init = host_x86_cpu_class_init,
+};
+
+#endif
+
+static void report_unavailable_features(FeatureWord w, uint32_t mask)
+{
+    FeatureWordInfo *f = &feature_word_info[w];
+    int i;
+
+    for (i = 0; i < 32; ++i) {
+        if ((1UL << i) & mask) {
+            const char *reg = get_register_name_32(f->cpuid_reg);
+            assert(reg);
+            fprintf(stderr, "warning: %s doesn't support requested feature: "
+                "CPUID.%02XH:%s%s%s [bit %d]\n",
+                kvm_enabled() ? "host" : "TCG",
+                f->cpuid_eax, reg,
+                f->feat_names[i] ? "." : "",
+                f->feat_names[i] ? f->feat_names[i] : "", i);
+        }
+    }
+}
+
+static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
+                                         const char *name, void *opaque,
+                                         Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    int64_t value;
+
+    value = (env->cpuid_version >> 8) & 0xf;
+    if (value == 0xf) {
+        value += (env->cpuid_version >> 20) & 0xff;
+    }
+    visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
+                                         const char *name, void *opaque,
+                                         Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    const int64_t min = 0;
+    const int64_t max = 0xff + 0xf;
+    Error *local_err = NULL;
+    int64_t value;
+
+    visit_type_int(v, name, &value, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        return;
+    }
+    if (value < min || value > max) {
+        error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+                   name ? name : "null", value, min, max);
+        return;
+    }
+
+    env->cpuid_version &= ~0xff00f00;
+    if (value > 0x0f) {
+        env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
+    } else {
+        env->cpuid_version |= value << 8;
+    }
+}
+
+static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
+                                        const char *name, void *opaque,
+                                        Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    int64_t value;
+
+    value = (env->cpuid_version >> 4) & 0xf;
+    value |= ((env->cpuid_version >> 16) & 0xf) << 4;
+    visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
+                                        const char *name, void *opaque,
+                                        Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    const int64_t min = 0;
+    const int64_t max = 0xff;
+    Error *local_err = NULL;
+    int64_t value;
+
+    visit_type_int(v, name, &value, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        return;
+    }
+    if (value < min || value > max) {
+        error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+                   name ? name : "null", value, min, max);
+        return;
+    }
+
+    env->cpuid_version &= ~0xf00f0;
+    env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
+}
+
+static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
+                                           const char *name, void *opaque,
+                                           Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    int64_t value;
+
+    value = env->cpuid_version & 0xf;
+    visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
+                                           const char *name, void *opaque,
+                                           Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    const int64_t min = 0;
+    const int64_t max = 0xf;
+    Error *local_err = NULL;
+    int64_t value;
+
+    visit_type_int(v, name, &value, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        return;
+    }
+    if (value < min || value > max) {
+        error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+                   name ? name : "null", value, min, max);
+        return;
+    }
+
+    env->cpuid_version &= ~0xf;
+    env->cpuid_version |= value & 0xf;
+}
+
+static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    char *value;
+
+    value = g_malloc(CPUID_VENDOR_SZ + 1);
+    x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
+                             env->cpuid_vendor3);
+    return value;
+}
+
+static void x86_cpuid_set_vendor(Object *obj, const char *value,
+                                 Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    if (strlen(value) != CPUID_VENDOR_SZ) {
+        error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
+        return;
+    }
+
+    env->cpuid_vendor1 = 0;
+    env->cpuid_vendor2 = 0;
+    env->cpuid_vendor3 = 0;
+    for (i = 0; i < 4; i++) {
+        env->cpuid_vendor1 |= ((uint8_t)value[i    ]) << (8 * i);
+        env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
+        env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
+    }
+}
+
+static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    char *value;
+    int i;
+
+    value = g_malloc(48 + 1);
+    for (i = 0; i < 48; i++) {
+        value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
+    }
+    value[48] = '\0';
+    return value;
+}
+
+static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
+                                   Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    CPUX86State *env = &cpu->env;
+    int c, len, i;
+
+    if (model_id == NULL) {
+        model_id = "";
+    }
+    len = strlen(model_id);
+    memset(env->cpuid_model, 0, 48);
+    for (i = 0; i < 48; i++) {
+        if (i >= len) {
+            c = '\0';
+        } else {
+            c = (uint8_t)model_id[i];
+        }
+        env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
+    }
+}
+
+static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
+                                   void *opaque, Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    int64_t value;
+
+    value = cpu->env.tsc_khz * 1000;
+    visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
+                                   void *opaque, Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    const int64_t min = 0;
+    const int64_t max = INT64_MAX;
+    Error *local_err = NULL;
+    int64_t value;
+
+    visit_type_int(v, name, &value, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        return;
+    }
+    if (value < min || value > max) {
+        error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+                   name ? name : "null", value, min, max);
+        return;
+    }
+
+    cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
+}
+
+/* Generic getter for "feature-words" and "filtered-features" properties */
+static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
+                                      const char *name, void *opaque,
+                                      Error **errp)
+{
+    uint32_t *array = (uint32_t *)opaque;
+    FeatureWord w;
+    X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
+    X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
+    X86CPUFeatureWordInfoList *list = NULL;
+
+    for (w = 0; w < FEATURE_WORDS; w++) {
+        FeatureWordInfo *wi = &feature_word_info[w];
+        X86CPUFeatureWordInfo *qwi = &word_infos[w];
+        qwi->cpuid_input_eax = wi->cpuid_eax;
+        qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
+        qwi->cpuid_input_ecx = wi->cpuid_ecx;
+        qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
+        qwi->features = array[w];
+
+        /* List will be in reverse order, but order shouldn't matter */
+        list_entries[w].next = list;
+        list_entries[w].value = &word_infos[w];
+        list = &list_entries[w];
+    }
+
+    visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
+}
+
+static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
+                                 void *opaque, Error **errp)
+{
+    X86CPU *cpu = X86_CPU(obj);
+    int64_t value = cpu->hyperv_spinlock_attempts;
+
+    visit_type_int(v, name, &value, errp);
+}
+
+static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
+                                 void *opaque, Error **errp)
+{
+    const int64_t min = 0xFFF;
+    const int64_t max = UINT_MAX;
+    X86CPU *cpu = X86_CPU(obj);
+    Error *err = NULL;
+    int64_t value;
+
+    visit_type_int(v, name, &value, &err);
+    if (err) {
+        error_propagate(errp, err);
+        return;
+    }
+
+    if (value < min || value > max) {
+        error_setg(errp, "Property %s.%s doesn't take value %" PRId64
+                   " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
+                   object_get_typename(obj), name ? name : "null",
+                   value, min, max);
+        return;
+    }
+    cpu->hyperv_spinlock_attempts = value;
+}
+
+static PropertyInfo qdev_prop_spinlocks = {
+    .name  = "int",
+    .get   = x86_get_hv_spinlocks,
+    .set   = x86_set_hv_spinlocks,
+};
+
+/* Convert all '_' in a feature string option name to '-', to make feature
+ * name conform to QOM property naming rule, which uses '-' instead of '_'.
+ */
+static inline void feat2prop(char *s)
+{
+    while ((s = strchr(s, '_'))) {
+        *s = '-';
+    }
+}
+
+/* Return the feature property name for a feature flag bit */
+static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
+{
+    /* XSAVE components are automatically enabled by other features,
+     * so return the original feature name instead
+     */
+    if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
+        int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
+
+        if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
+            x86_ext_save_areas[comp].bits) {
+            w = x86_ext_save_areas[comp].feature;
+            bitnr = ctz32(x86_ext_save_areas[comp].bits);
+        }
+    }
+
+    assert(bitnr < 32);
+    assert(w < FEATURE_WORDS);
+    return feature_word_info[w].feat_names[bitnr];
+}
+
+/* Compatibily hack to maintain legacy +-feat semantic,
+ * where +-feat overwrites any feature set by
+ * feat=on|feat even if the later is parsed after +-feat
+ * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
+ */
+static GList *plus_features, *minus_features;
+
+static gint compare_string(gconstpointer a, gconstpointer b)
+{
+    return g_strcmp0(a, b);
+}
+
+/* Parse "+feature,-feature,feature=foo" CPU feature string
+ */
+static void x86_cpu_parse_featurestr(const char *typename, char *features,
+                                     Error **errp)
+{
+    char *featurestr; /* Single 'key=value" string being parsed */
+    static bool cpu_globals_initialized;
+    bool ambiguous = false;
+
+    if (cpu_globals_initialized) {
+        return;
+    }
+    cpu_globals_initialized = true;
+
+    if (!features) {
+        return;
+    }
+
+    for (featurestr = strtok(features, ",");
+         featurestr;
+         featurestr = strtok(NULL, ",")) {
+        const char *name;
+        const char *val = NULL;
+        char *eq = NULL;
+        char num[32];
+        GlobalProperty *prop;
+
+        /* Compatibility syntax: */
+        if (featurestr[0] == '+') {
+            plus_features = g_list_append(plus_features,
+                                          g_strdup(featurestr + 1));
+            continue;
+        } else if (featurestr[0] == '-') {
+            minus_features = g_list_append(minus_features,
+                                           g_strdup(featurestr + 1));
+            continue;
+        }
+
+        eq = strchr(featurestr, '=');
+        if (eq) {
+            *eq++ = 0;
+            val = eq;
+        } else {
+            val = "on";
+        }
+
+        feat2prop(featurestr);
+        name = featurestr;
+
+        if (g_list_find_custom(plus_features, name, compare_string)) {
+            error_report("warning: Ambiguous CPU model string. "
+                         "Don't mix both \"+%s\" and \"%s=%s\"",
+                         name, name, val);
+            ambiguous = true;
+        }
+        if (g_list_find_custom(minus_features, name, compare_string)) {
+            error_report("warning: Ambiguous CPU model string. "
+                         "Don't mix both \"-%s\" and \"%s=%s\"",
+                         name, name, val);
+            ambiguous = true;
+        }
+
+        /* Special case: */
+        if (!strcmp(name, "tsc-freq")) {
+            int64_t tsc_freq;
+            char *err;
+
+            tsc_freq = qemu_strtosz_suffix_unit(val, &err,
+                                           QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
+            if (tsc_freq < 0 || *err) {
+                error_setg(errp, "bad numerical value %s", val);
+                return;
+            }
+            snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
+            val = num;
+            name = "tsc-frequency";
+        }
+
+        prop = g_new0(typeof(*prop), 1);
+        prop->driver = typename;
+        prop->property = g_strdup(name);
+        prop->value = g_strdup(val);
+        prop->errp = &error_fatal;
+        qdev_prop_register_global(prop);
+    }
+
+    if (ambiguous) {
+        error_report("warning: Compatibility of ambiguous CPU model "
+                     "strings won't be kept on future QEMU versions");
+    }
+}
+
+static void x86_cpu_load_features(X86CPU *cpu, Error **errp);
+static int x86_cpu_filter_features(X86CPU *cpu);
+
+/* Check for missing features that may prevent the CPU class from
+ * running using the current machine and accelerator.
+ */
+static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
+                                                 strList **missing_feats)
+{
+    X86CPU *xc;
+    FeatureWord w;
+    Error *err = NULL;
+    strList **next = missing_feats;
+
+    if (xcc->kvm_required && !kvm_enabled()) {
+        strList *new = g_new0(strList, 1);
+        new->value = g_strdup("kvm");;
+        *missing_feats = new;
+        return;
+    }
+
+    xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
+
+    x86_cpu_load_features(xc, &err);
+    if (err) {
+        /* Errors at x86_cpu_load_features should never happen,
+         * but in case it does, just report the model as not
+         * runnable at all using the "type" property.
+         */
+        strList *new = g_new0(strList, 1);
+        new->value = g_strdup("type");
+        *next = new;
+        next = &new->next;
+    }
+
+    x86_cpu_filter_features(xc);
+
+    for (w = 0; w < FEATURE_WORDS; w++) {
+        uint32_t filtered = xc->filtered_features[w];
+        int i;
+        for (i = 0; i < 32; i++) {
+            if (filtered & (1UL << i)) {
+                strList *new = g_new0(strList, 1);
+                new->value = g_strdup(x86_cpu_feature_name(w, i));
+                *next = new;
+                next = &new->next;
+            }
+        }
+    }
+
+    object_unref(OBJECT(xc));
+}
+
+/* Print all cpuid feature names in featureset
+ */
+static void listflags(FILE *f, fprintf_function print, const char **featureset)
+{
+    int bit;
+    bool first = true;
+
+    for (bit = 0; bit < 32; bit++) {
+        if (featureset[bit]) {
+            print(f, "%s%s", first ? "" : " ", featureset[bit]);
+            first = false;
+        }
+    }
+}
+
+/* Sort alphabetically by type name, listing kvm_required models last. */
+static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+    ObjectClass *class_a = (ObjectClass *)a;
+    ObjectClass *class_b = (ObjectClass *)b;
+    X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
+    X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
+    const char *name_a, *name_b;
+
+    if (cc_a->kvm_required != cc_b->kvm_required) {
+        /* kvm_required items go last */
+        return cc_a->kvm_required ? 1 : -1;
+    } else {
+        name_a = object_class_get_name(class_a);
+        name_b = object_class_get_name(class_b);
+        return strcmp(name_a, name_b);
+    }
+}
+
+static GSList *get_sorted_cpu_model_list(void)
+{
+    GSList *list = object_class_get_list(TYPE_X86_CPU, false);
+    list = g_slist_sort(list, x86_cpu_list_compare);
+    return list;
+}
+
+static void x86_cpu_list_entry(gpointer data, gpointer user_data)
+{
+    ObjectClass *oc = data;
+    X86CPUClass *cc = X86_CPU_CLASS(oc);
+    CPUListState *s = user_data;
+    char *name = x86_cpu_class_get_model_name(cc);
+    const char *desc = cc->model_description;
+    if (!desc) {
+        desc = cc->cpu_def->model_id;
+    }
+
+    (*s->cpu_fprintf)(s->file, "x86 %16s  %-48s\n",
+                      name, desc);
+    g_free(name);
+}
+
+/* list available CPU models and flags */
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+    int i;
+    CPUListState s = {
+        .file = f,
+        .cpu_fprintf = cpu_fprintf,
+    };
+    GSList *list;
+
+    (*cpu_fprintf)(f, "Available CPUs:\n");
+    list = get_sorted_cpu_model_list();
+    g_slist_foreach(list, x86_cpu_list_entry, &s);
+    g_slist_free(list);
+
+    (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
+    for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
+        FeatureWordInfo *fw = &feature_word_info[i];
+
+        (*cpu_fprintf)(f, "  ");
+        listflags(f, cpu_fprintf, fw->feat_names);
+        (*cpu_fprintf)(f, "\n");
+    }
+}
+
+static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
+{
+    ObjectClass *oc = data;
+    X86CPUClass *cc = X86_CPU_CLASS(oc);
+    CpuDefinitionInfoList **cpu_list = user_data;
+    CpuDefinitionInfoList *entry;
+    CpuDefinitionInfo *info;
+
+    info = g_malloc0(sizeof(*info));
+    info->name = x86_cpu_class_get_model_name(cc);
+    x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
+    info->has_unavailable_features = true;
+
+    entry = g_malloc0(sizeof(*entry));
+    entry->value = info;
+    entry->next = *cpu_list;
+    *cpu_list = entry;
+}
+
+CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
+{
+    CpuDefinitionInfoList *cpu_list = NULL;
+    GSList *list = get_sorted_cpu_model_list();
+    g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
+    g_slist_free(list);
+    return cpu_list;
+}
+
+static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
+                                                   bool migratable_only)
+{
+    FeatureWordInfo *wi = &feature_word_info[w];
+    uint32_t r;
+
+    if (kvm_enabled()) {
+        r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
+                                                    wi->cpuid_ecx,
+                                                    wi->cpuid_reg);
+    } else if (tcg_enabled()) {
+        r = wi->tcg_features;
+    } else {
+        return ~0;
+    }
+    if (migratable_only) {
+        r &= x86_cpu_get_migratable_flags(w);
+    }
+    return r;
+}
+
+/*
+ * Filters CPU feature words based on host availability of each feature.
+ *
+ * Returns: 0 if all flags are supported by the host, non-zero otherwise.
+ */
+static int x86_cpu_filter_features(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    FeatureWord w;
+    int rv = 0;
+
+    for (w = 0; w < FEATURE_WORDS; w++) {
+        uint32_t host_feat =
+            x86_cpu_get_supported_feature_word(w, false);
+        uint32_t requested_features = env->features[w];
+        env->features[w] &= host_feat;
+        cpu->filtered_features[w] = requested_features & ~env->features[w];
+        if (cpu->filtered_features[w]) {
+            rv = 1;
+        }
+    }
+
+    return rv;
+}
+
+static void x86_cpu_report_filtered_features(X86CPU *cpu)
+{
+    FeatureWord w;
+
+    for (w = 0; w < FEATURE_WORDS; w++) {
+        report_unavailable_features(w, cpu->filtered_features[w]);
+    }
+}
+
+static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
+{
+    PropValue *pv;
+    for (pv = props; pv->prop; pv++) {
+        if (!pv->value) {
+            continue;
+        }
+        object_property_parse(OBJECT(cpu), pv->value, pv->prop,
+                              &error_abort);
+    }
+}
+
+/* Load data from X86CPUDefinition
+ */
+static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
+{
+    CPUX86State *env = &cpu->env;
+    const char *vendor;
+    char host_vendor[CPUID_VENDOR_SZ + 1];
+    FeatureWord w;
+
+    /* CPU models only set _minimum_ values for level/xlevel: */
+    object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
+    object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
+
+    object_property_set_int(OBJECT(cpu), def->family, "family", errp);
+    object_property_set_int(OBJECT(cpu), def->model, "model", errp);
+    object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
+    object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
+    for (w = 0; w < FEATURE_WORDS; w++) {
+        env->features[w] = def->features[w];
+    }
+
+    /* Special cases not set in the X86CPUDefinition structs: */
+    if (kvm_enabled()) {
+        if (!kvm_irqchip_in_kernel()) {
+            x86_cpu_change_kvm_default("x2apic", "off");
+        }
+
+        x86_cpu_apply_props(cpu, kvm_default_props);
+    } else if (tcg_enabled()) {
+        x86_cpu_apply_props(cpu, tcg_default_props);
+    }
+
+    env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
+
+    /* sysenter isn't supported in compatibility mode on AMD,
+     * syscall isn't supported in compatibility mode on Intel.
+     * Normally we advertise the actual CPU vendor, but you can
+     * override this using the 'vendor' property if you want to use
+     * KVM's sysenter/syscall emulation in compatibility mode and
+     * when doing cross vendor migration
+     */
+    vendor = def->vendor;
+    if (kvm_enabled()) {
+        uint32_t  ebx = 0, ecx = 0, edx = 0;
+        host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
+        x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
+        vendor = host_vendor;
+    }
+
+    object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
+
+}
+
+X86CPU *cpu_x86_init(const char *cpu_model)
+{
+    return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
+}
+
+static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+{
+    X86CPUDefinition *cpudef = data;
+    X86CPUClass *xcc = X86_CPU_CLASS(oc);
+
+    xcc->cpu_def = cpudef;
+}
+
+static void x86_register_cpudef_type(X86CPUDefinition *def)
+{
+    char *typename = x86_cpu_type_name(def->name);
+    TypeInfo ti = {
+        .name = typename,
+        .parent = TYPE_X86_CPU,
+        .class_init = x86_cpu_cpudef_class_init,
+        .class_data = def,
+    };
+
+    type_register(&ti);
+    g_free(typename);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void cpu_clear_apic_feature(CPUX86State *env)
+{
+    env->features[FEAT_1_EDX] &= ~CPUID_APIC;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
+                   uint32_t *eax, uint32_t *ebx,
+                   uint32_t *ecx, uint32_t *edx)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+    uint32_t pkg_offset;
+
+    /* test if maximum index reached */
+    if (index & 0x80000000) {
+        if (index > env->cpuid_xlevel) {
+            if (env->cpuid_xlevel2 > 0) {
+                /* Handle the Centaur's CPUID instruction. */
+                if (index > env->cpuid_xlevel2) {
+                    index = env->cpuid_xlevel2;
+                } else if (index < 0xC0000000) {
+                    index = env->cpuid_xlevel;
+                }
+            } else {
+                /* Intel documentation states that invalid EAX input will
+                 * return the same information as EAX=cpuid_level
+                 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
+                 */
+                index =  env->cpuid_level;
+            }
+        }
+    } else {
+        if (index > env->cpuid_level)
+            index = env->cpuid_level;
+    }
+
+    switch(index) {
+    case 0:
+        *eax = env->cpuid_level;
+        *ebx = env->cpuid_vendor1;
+        *edx = env->cpuid_vendor2;
+        *ecx = env->cpuid_vendor3;
+        break;
+    case 1:
+        *eax = env->cpuid_version;
+        *ebx = (cpu->apic_id << 24) |
+               8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
+        *ecx = env->features[FEAT_1_ECX];
+        if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
+            *ecx |= CPUID_EXT_OSXSAVE;
+        }
+        *edx = env->features[FEAT_1_EDX];
+        if (cs->nr_cores * cs->nr_threads > 1) {
+            *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
+            *edx |= CPUID_HT;
+        }
+        break;
+    case 2:
+        /* cache info: needed for Pentium Pro compatibility */
+        if (cpu->cache_info_passthrough) {
+            host_cpuid(index, 0, eax, ebx, ecx, edx);
+            break;
+        }
+        *eax = 1; /* Number of CPUID[EAX=2] calls required */
+        *ebx = 0;
+        if (!cpu->enable_l3_cache) {
+            *ecx = 0;
+        } else {
+            *ecx = L3_N_DESCRIPTOR;
+        }
+        *edx = (L1D_DESCRIPTOR << 16) | \
+               (L1I_DESCRIPTOR <<  8) | \
+               (L2_DESCRIPTOR);
+        break;
+    case 4:
+        /* cache info: needed for Core compatibility */
+        if (cpu->cache_info_passthrough) {
+            host_cpuid(index, count, eax, ebx, ecx, edx);
+            *eax &= ~0xFC000000;
+        } else {
+            *eax = 0;
+            switch (count) {
+            case 0: /* L1 dcache info */
+                *eax |= CPUID_4_TYPE_DCACHE | \
+                        CPUID_4_LEVEL(1) | \
+                        CPUID_4_SELF_INIT_LEVEL;
+                *ebx = (L1D_LINE_SIZE - 1) | \
+                       ((L1D_PARTITIONS - 1) << 12) | \
+                       ((L1D_ASSOCIATIVITY - 1) << 22);
+                *ecx = L1D_SETS - 1;
+                *edx = CPUID_4_NO_INVD_SHARING;
+                break;
+            case 1: /* L1 icache info */
+                *eax |= CPUID_4_TYPE_ICACHE | \
+                        CPUID_4_LEVEL(1) | \
+                        CPUID_4_SELF_INIT_LEVEL;
+                *ebx = (L1I_LINE_SIZE - 1) | \
+                       ((L1I_PARTITIONS - 1) << 12) | \
+                       ((L1I_ASSOCIATIVITY - 1) << 22);
+                *ecx = L1I_SETS - 1;
+                *edx = CPUID_4_NO_INVD_SHARING;
+                break;
+            case 2: /* L2 cache info */
+                *eax |= CPUID_4_TYPE_UNIFIED | \
+                        CPUID_4_LEVEL(2) | \
+                        CPUID_4_SELF_INIT_LEVEL;
+                if (cs->nr_threads > 1) {
+                    *eax |= (cs->nr_threads - 1) << 14;
+                }
+                *ebx = (L2_LINE_SIZE - 1) | \
+                       ((L2_PARTITIONS - 1) << 12) | \
+                       ((L2_ASSOCIATIVITY - 1) << 22);
+                *ecx = L2_SETS - 1;
+                *edx = CPUID_4_NO_INVD_SHARING;
+                break;
+            case 3: /* L3 cache info */
+                if (!cpu->enable_l3_cache) {
+                    *eax = 0;
+                    *ebx = 0;
+                    *ecx = 0;
+                    *edx = 0;
+                    break;
+                }
+                *eax |= CPUID_4_TYPE_UNIFIED | \
+                        CPUID_4_LEVEL(3) | \
+                        CPUID_4_SELF_INIT_LEVEL;
+                pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+                *eax |= ((1 << pkg_offset) - 1) << 14;
+                *ebx = (L3_N_LINE_SIZE - 1) | \
+                       ((L3_N_PARTITIONS - 1) << 12) | \
+                       ((L3_N_ASSOCIATIVITY - 1) << 22);
+                *ecx = L3_N_SETS - 1;
+                *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
+                break;
+            default: /* end of info */
+                *eax = 0;
+                *ebx = 0;
+                *ecx = 0;
+                *edx = 0;
+                break;
+            }
+        }
+
+        /* QEMU gives out its own APIC IDs, never pass down bits 31..26.  */
+        if ((*eax & 31) && cs->nr_cores > 1) {
+            *eax |= (cs->nr_cores - 1) << 26;
+        }
+        break;
+    case 5:
+        /* mwait info: needed for Core compatibility */
+        *eax = 0; /* Smallest monitor-line size in bytes */
+        *ebx = 0; /* Largest monitor-line size in bytes */
+        *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
+        *edx = 0;
+        break;
+    case 6:
+        /* Thermal and Power Leaf */
+        *eax = env->features[FEAT_6_EAX];
+        *ebx = 0;
+        *ecx = 0;
+        *edx = 0;
+        break;
+    case 7:
+        /* Structured Extended Feature Flags Enumeration Leaf */
+        if (count == 0) {
+            *eax = 0; /* Maximum ECX value for sub-leaves */
+            *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
+            *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
+            if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
+                *ecx |= CPUID_7_0_ECX_OSPKE;
+            }
+            *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
+        } else {
+            *eax = 0;
+            *ebx = 0;
+            *ecx = 0;
+            *edx = 0;
+        }
+        break;
+    case 9:
+        /* Direct Cache Access Information Leaf */
+        *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
+        *ebx = 0;
+        *ecx = 0;
+        *edx = 0;
+        break;
+    case 0xA:
+        /* Architectural Performance Monitoring Leaf */
+        if (kvm_enabled() && cpu->enable_pmu) {
+            KVMState *s = cs->kvm_state;
+
+            *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
+            *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
+            *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
+            *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
+        } else {
+            *eax = 0;
+            *ebx = 0;
+            *ecx = 0;
+            *edx = 0;
+        }
+        break;
+    case 0xB:
+        /* Extended Topology Enumeration Leaf */
+        if (!cpu->enable_cpuid_0xb) {
+                *eax = *ebx = *ecx = *edx = 0;
+                break;
+        }
+
+        *ecx = count & 0xff;
+        *edx = cpu->apic_id;
+
+        switch (count) {
+        case 0:
+            *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
+            *ebx = cs->nr_threads;
+            *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
+            break;
+        case 1:
+            *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+            *ebx = cs->nr_cores * cs->nr_threads;
+            *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
+            break;
+        default:
+            *eax = 0;
+            *ebx = 0;
+            *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
+        }
+
+        assert(!(*eax & ~0x1f));
+        *ebx &= 0xffff; /* The count doesn't need to be reliable. */
+        break;
+    case 0xD: {
+        /* Processor Extended State */
+        *eax = 0;
+        *ebx = 0;
+        *ecx = 0;
+        *edx = 0;
+        if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+            break;
+        }
+
+        if (count == 0) {
+            *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
+            *eax = env->features[FEAT_XSAVE_COMP_LO];
+            *edx = env->features[FEAT_XSAVE_COMP_HI];
+            *ebx = *ecx;
+        } else if (count == 1) {
+            *eax = env->features[FEAT_XSAVE];
+        } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
+            if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
+                const ExtSaveArea *esa = &x86_ext_save_areas[count];
+                *eax = esa->size;
+                *ebx = esa->offset;
+            }
+        }
+        break;
+    }
+    case 0x80000000:
+        *eax = env->cpuid_xlevel;
+        *ebx = env->cpuid_vendor1;
+        *edx = env->cpuid_vendor2;
+        *ecx = env->cpuid_vendor3;
+        break;
+    case 0x80000001:
+        *eax = env->cpuid_version;
+        *ebx = 0;
+        *ecx = env->features[FEAT_8000_0001_ECX];
+        *edx = env->features[FEAT_8000_0001_EDX];
+
+        /* The Linux kernel checks for the CMPLegacy bit and
+         * discards multiple thread information if it is set.
+         * So don't set it here for Intel to make Linux guests happy.
+         */
+        if (cs->nr_cores * cs->nr_threads > 1) {
+            if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
+                env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
+                env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
+                *ecx |= 1 << 1;    /* CmpLegacy bit */
+            }
+        }
+        break;
+    case 0x80000002:
+    case 0x80000003:
+    case 0x80000004:
+        *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
+        *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
+        *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
+        *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
+        break;
+    case 0x80000005:
+        /* cache info (L1 cache) */
+        if (cpu->cache_info_passthrough) {
+            host_cpuid(index, 0, eax, ebx, ecx, edx);
+            break;
+        }
+        *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
+               (L1_ITLB_2M_ASSOC <<  8) | (L1_ITLB_2M_ENTRIES);
+        *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
+               (L1_ITLB_4K_ASSOC <<  8) | (L1_ITLB_4K_ENTRIES);
+        *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
+               (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
+        *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
+               (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
+        break;
+    case 0x80000006:
+        /* cache info (L2 cache) */
+        if (cpu->cache_info_passthrough) {
+            host_cpuid(index, 0, eax, ebx, ecx, edx);
+            break;
+        }
+        *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
+               (L2_DTLB_2M_ENTRIES << 16) | \
+               (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
+               (L2_ITLB_2M_ENTRIES);
+        *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
+               (L2_DTLB_4K_ENTRIES << 16) | \
+               (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
+               (L2_ITLB_4K_ENTRIES);
+        *ecx = (L2_SIZE_KB_AMD << 16) | \
+               (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
+               (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
+        if (!cpu->enable_l3_cache) {
+            *edx = ((L3_SIZE_KB / 512) << 18) | \
+                   (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
+                   (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
+        } else {
+            *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
+                   (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
+                   (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
+        }
+        break;
+    case 0x80000007:
+        *eax = 0;
+        *ebx = 0;
+        *ecx = 0;
+        *edx = env->features[FEAT_8000_0007_EDX];
+        break;
+    case 0x80000008:
+        /* virtual & phys address size in low 2 bytes. */
+        if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+            /* 64 bit processor, 48 bits virtual, configurable
+             * physical bits.
+             */
+            *eax = 0x00003000 + cpu->phys_bits;
+        } else {
+            *eax = cpu->phys_bits;
+        }
+        *ebx = 0;
+        *ecx = 0;
+        *edx = 0;
+        if (cs->nr_cores * cs->nr_threads > 1) {
+            *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
+        }
+        break;
+    case 0x8000000A:
+        if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+            *eax = 0x00000001; /* SVM Revision */
+            *ebx = 0x00000010; /* nr of ASIDs */
+            *ecx = 0;
+            *edx = env->features[FEAT_SVM]; /* optional features */
+        } else {
+            *eax = 0;
+            *ebx = 0;
+            *ecx = 0;
+            *edx = 0;
+        }
+        break;
+    case 0xC0000000:
+        *eax = env->cpuid_xlevel2;
+        *ebx = 0;
+        *ecx = 0;
+        *edx = 0;
+        break;
+    case 0xC0000001:
+        /* Support for VIA CPU's CPUID instruction */
+        *eax = env->cpuid_version;
+        *ebx = 0;
+        *ecx = 0;
+        *edx = env->features[FEAT_C000_0001_EDX];
+        break;
+    case 0xC0000002:
+    case 0xC0000003:
+    case 0xC0000004:
+        /* Reserved for the future, and now filled with zero */
+        *eax = 0;
+        *ebx = 0;
+        *ecx = 0;
+        *edx = 0;
+        break;
+    default:
+        /* reserved values: zero */
+        *eax = 0;
+        *ebx = 0;
+        *ecx = 0;
+        *edx = 0;
+        break;
+    }
+}
+
+/* CPUClass::reset() */
+static void x86_cpu_reset(CPUState *s)
+{
+    X86CPU *cpu = X86_CPU(s);
+    X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
+    CPUX86State *env = &cpu->env;
+    target_ulong cr4;
+    uint64_t xcr0;
+    int i;
+
+    xcc->parent_reset(s);
+
+    memset(env, 0, offsetof(CPUX86State, end_reset_fields));
+
+    tlb_flush(s, 1);
+
+    env->old_exception = -1;
+
+    /* init to reset state */
+
+    env->hflags2 |= HF2_GIF_MASK;
+
+    cpu_x86_update_cr0(env, 0x60000010);
+    env->a20_mask = ~0x0;
+    env->smbase = 0x30000;
+
+    env->idt.limit = 0xffff;
+    env->gdt.limit = 0xffff;
+    env->ldt.limit = 0xffff;
+    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
+    env->tr.limit = 0xffff;
+    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
+
+    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
+                           DESC_R_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_A_MASK);
+
+    env->eip = 0xfff0;
+    env->regs[R_EDX] = env->cpuid_version;
+
+    env->eflags = 0x2;
+
+    /* FPU init */
+    for (i = 0; i < 8; i++) {
+        env->fptags[i] = 1;
+    }
+    cpu_set_fpuc(env, 0x37f);
+
+    env->mxcsr = 0x1f80;
+    /* All units are in INIT state.  */
+    env->xstate_bv = 0;
+
+    env->pat = 0x0007040600070406ULL;
+    env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
+
+    memset(env->dr, 0, sizeof(env->dr));
+    env->dr[6] = DR6_FIXED_1;
+    env->dr[7] = DR7_FIXED_1;
+    cpu_breakpoint_remove_all(s, BP_CPU);
+    cpu_watchpoint_remove_all(s, BP_CPU);
+
+    cr4 = 0;
+    xcr0 = XSTATE_FP_MASK;
+
+#ifdef CONFIG_USER_ONLY
+    /* Enable all the features for user-mode.  */
+    if (env->features[FEAT_1_EDX] & CPUID_SSE) {
+        xcr0 |= XSTATE_SSE_MASK;
+    }
+    for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+        const ExtSaveArea *esa = &x86_ext_save_areas[i];
+        if (env->features[esa->feature] & esa->bits) {
+            xcr0 |= 1ull << i;
+        }
+    }
+
+    if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
+        cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
+    }
+    if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
+        cr4 |= CR4_FSGSBASE_MASK;
+    }
+#endif
+
+    env->xcr0 = xcr0;
+    cpu_x86_update_cr4(env, cr4);
+
+    /*
+     * SDM 11.11.5 requires:
+     *  - IA32_MTRR_DEF_TYPE MSR.E = 0
+     *  - IA32_MTRR_PHYSMASKn.V = 0
+     * All other bits are undefined.  For simplification, zero it all.
+     */
+    env->mtrr_deftype = 0;
+    memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
+    memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
+
+#if !defined(CONFIG_USER_ONLY)
+    /* We hard-wire the BSP to the first CPU. */
+    apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
+
+    s->halted = !cpu_is_bsp(cpu);
+
+    if (kvm_enabled()) {
+        kvm_arch_reset_vcpu(cpu);
+    }
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+bool cpu_is_bsp(X86CPU *cpu)
+{
+    return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
+}
+
+/* TODO: remove me, when reset over QOM tree is implemented */
+static void x86_cpu_machine_reset_cb(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    cpu_reset(CPU(cpu));
+}
+#endif
+
+static void mce_init(X86CPU *cpu)
+{
+    CPUX86State *cenv = &cpu->env;
+    unsigned int bank;
+
+    if (((cenv->cpuid_version >> 8) & 0xf) >= 6
+        && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
+            (CPUID_MCE | CPUID_MCA)) {
+        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
+                        (cpu->enable_lmce ? MCG_LMCE_P : 0);
+        cenv->mcg_ctl = ~(uint64_t)0;
+        for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
+            cenv->mce_banks[bank * 4] = ~(uint64_t)0;
+        }
+    }
+}
+
+#ifndef CONFIG_USER_ONLY
+APICCommonClass *apic_get_class(void)
+{
+    const char *apic_type = "apic";
+
+    if (kvm_apic_in_kernel()) {
+        apic_type = "kvm-apic";
+    } else if (xen_enabled()) {
+        apic_type = "xen-apic";
+    }
+
+    return APIC_COMMON_CLASS(object_class_by_name(apic_type));
+}
+
+static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
+{
+    APICCommonState *apic;
+    ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
+
+    cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
+
+    object_property_add_child(OBJECT(cpu), "lapic",
+                              OBJECT(cpu->apic_state), &error_abort);
+    object_unref(OBJECT(cpu->apic_state));
+
+    qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
+    /* TODO: convert to link<> */
+    apic = APIC_COMMON(cpu->apic_state);
+    apic->cpu = cpu;
+    apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
+}
+
+static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
+{
+    APICCommonState *apic;
+    static bool apic_mmio_map_once;
+
+    if (cpu->apic_state == NULL) {
+        return;
+    }
+    object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
+                             errp);
+
+    /* Map APIC MMIO area */
+    apic = APIC_COMMON(cpu->apic_state);
+    if (!apic_mmio_map_once) {
+        memory_region_add_subregion_overlap(get_system_memory(),
+                                            apic->apicbase &
+                                            MSR_IA32_APICBASE_BASE,
+                                            &apic->io_memory,
+                                            0x1000);
+        apic_mmio_map_once = true;
+     }
+}
+
+static void x86_cpu_machine_done(Notifier *n, void *unused)
+{
+    X86CPU *cpu = container_of(n, X86CPU, machine_done);
+    MemoryRegion *smram =
+        (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
+
+    if (smram) {
+        cpu->smram = g_new(MemoryRegion, 1);
+        memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
+                                 smram, 0, 1ull << 32);
+        memory_region_set_enabled(cpu->smram, false);
+        memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
+    }
+}
+#else
+static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
+{
+}
+#endif
+
+/* Note: Only safe for use on x86(-64) hosts */
+static uint32_t x86_host_phys_bits(void)
+{
+    uint32_t eax;
+    uint32_t host_phys_bits;
+
+    host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
+    if (eax >= 0x80000008) {
+        host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
+        /* Note: According to AMD doc 25481 rev 2.34 they have a field
+         * at 23:16 that can specify a maximum physical address bits for
+         * the guest that can override this value; but I've not seen
+         * anything with that set.
+         */
+        host_phys_bits = eax & 0xff;
+    } else {
+        /* It's an odd 64 bit machine that doesn't have the leaf for
+         * physical address bits; fall back to 36 that's most older
+         * Intel.
+         */
+        host_phys_bits = 36;
+    }
+
+    return host_phys_bits;
+}
+
+static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
+{
+    if (*min < value) {
+        *min = value;
+    }
+}
+
+/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
+static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
+{
+    CPUX86State *env = &cpu->env;
+    FeatureWordInfo *fi = &feature_word_info[w];
+    uint32_t eax = fi->cpuid_eax;
+    uint32_t region = eax & 0xF0000000;
+
+    if (!env->features[w]) {
+        return;
+    }
+
+    switch (region) {
+    case 0x00000000:
+        x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
+    break;
+    case 0x80000000:
+        x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
+    break;
+    case 0xC0000000:
+        x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
+    break;
+    }
+}
+
+/* Calculate XSAVE components based on the configured CPU feature flags */
+static void x86_cpu_enable_xsave_components(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    int i;
+    uint64_t mask;
+
+    if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+        return;
+    }
+
+    mask = 0;
+    for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+        const ExtSaveArea *esa = &x86_ext_save_areas[i];
+        if (env->features[esa->feature] & esa->bits) {
+            mask |= (1ULL << i);
+        }
+    }
+
+    env->features[FEAT_XSAVE_COMP_LO] = mask;
+    env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
+}
+
+/* Load CPUID data based on configured features */
+static void x86_cpu_load_features(X86CPU *cpu, Error **errp)
+{
+    CPUX86State *env = &cpu->env;
+    FeatureWord w;
+    GList *l;
+    Error *local_err = NULL;
+
+    /*TODO: cpu->host_features incorrectly overwrites features
+     * set using "feat=on|off". Once we fix this, we can convert
+     * plus_features & minus_features to global properties
+     * inside x86_cpu_parse_featurestr() too.
+     */
+    if (cpu->host_features) {
+        for (w = 0; w < FEATURE_WORDS; w++) {
+            env->features[w] =
+                x86_cpu_get_supported_feature_word(w, cpu->migratable);
+        }
+    }
+
+    for (l = plus_features; l; l = l->next) {
+        const char *prop = l->data;
+        object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
+        if (local_err) {
+            goto out;
+        }
+    }
+
+    for (l = minus_features; l; l = l->next) {
+        const char *prop = l->data;
+        object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
+        if (local_err) {
+            goto out;
+        }
+    }
+
+    if (!kvm_enabled() || !cpu->expose_kvm) {
+        env->features[FEAT_KVM] = 0;
+    }
+
+    x86_cpu_enable_xsave_components(cpu);
+
+    /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
+    x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
+    if (cpu->full_cpuid_auto_level) {
+        x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
+        x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
+        x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
+        x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
+        x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
+        x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
+        x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
+        x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
+        x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
+        x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
+        /* SVM requires CPUID[0x8000000A] */
+        if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+            x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
+        }
+    }
+
+    /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
+    if (env->cpuid_level == UINT32_MAX) {
+        env->cpuid_level = env->cpuid_min_level;
+    }
+    if (env->cpuid_xlevel == UINT32_MAX) {
+        env->cpuid_xlevel = env->cpuid_min_xlevel;
+    }
+    if (env->cpuid_xlevel2 == UINT32_MAX) {
+        env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
+    }
+
+out:
+    if (local_err != NULL) {
+        error_propagate(errp, local_err);
+    }
+}
+
+#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
+                           (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
+                           (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
+#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
+                         (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
+                         (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
+static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+    CPUState *cs = CPU(dev);
+    X86CPU *cpu = X86_CPU(dev);
+    X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
+    CPUX86State *env = &cpu->env;
+    Error *local_err = NULL;
+    static bool ht_warned;
+
+    if (xcc->kvm_required && !kvm_enabled()) {
+        char *name = x86_cpu_class_get_model_name(xcc);
+        error_setg(&local_err, "CPU model '%s' requires KVM", name);
+        g_free(name);
+        goto out;
+    }
+
+    if (cpu->apic_id == UNASSIGNED_APIC_ID) {
+        error_setg(errp, "apic-id property was not initialized properly");
+        return;
+    }
+
+    x86_cpu_load_features(cpu, &local_err);
+    if (local_err) {
+        goto out;
+    }
+
+    if (x86_cpu_filter_features(cpu) &&
+        (cpu->check_cpuid || cpu->enforce_cpuid)) {
+        x86_cpu_report_filtered_features(cpu);
+        if (cpu->enforce_cpuid) {
+            error_setg(&local_err,
+                       kvm_enabled() ?
+                           "Host doesn't support requested features" :
+                           "TCG doesn't support requested features");
+            goto out;
+        }
+    }
+
+    /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
+     * CPUID[1].EDX.
+     */
+    if (IS_AMD_CPU(env)) {
+        env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
+        env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
+           & CPUID_EXT2_AMD_ALIASES);
+    }
+
+    /* For 64bit systems think about the number of physical bits to present.
+     * ideally this should be the same as the host; anything other than matching
+     * the host can cause incorrect guest behaviour.
+     * QEMU used to pick the magic value of 40 bits that corresponds to
+     * consumer AMD devices but nothing else.
+     */
+    if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+        if (kvm_enabled()) {
+            uint32_t host_phys_bits = x86_host_phys_bits();
+            static bool warned;
+
+            if (cpu->host_phys_bits) {
+                /* The user asked for us to use the host physical bits */
+                cpu->phys_bits = host_phys_bits;
+            }
+
+            /* Print a warning if the user set it to a value that's not the
+             * host value.
+             */
+            if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
+                !warned) {
+                error_report("Warning: Host physical bits (%u)"
+                                 " does not match phys-bits property (%u)",
+                                 host_phys_bits, cpu->phys_bits);
+                warned = true;
+            }
+
+            if (cpu->phys_bits &&
+                (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
+                cpu->phys_bits < 32)) {
+                error_setg(errp, "phys-bits should be between 32 and %u "
+                                 " (but is %u)",
+                                 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
+                return;
+            }
+        } else {
+            if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
+                error_setg(errp, "TCG only supports phys-bits=%u",
+                                  TCG_PHYS_ADDR_BITS);
+                return;
+            }
+        }
+        /* 0 means it was not explicitly set by the user (or by machine
+         * compat_props or by the host code above). In this case, the default
+         * is the value used by TCG (40).
+         */
+        if (cpu->phys_bits == 0) {
+            cpu->phys_bits = TCG_PHYS_ADDR_BITS;
+        }
+    } else {
+        /* For 32 bit systems don't use the user set value, but keep
+         * phys_bits consistent with what we tell the guest.
+         */
+        if (cpu->phys_bits != 0) {
+            error_setg(errp, "phys-bits is not user-configurable in 32 bit");
+            return;
+        }
+
+        if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
+            cpu->phys_bits = 36;
+        } else {
+            cpu->phys_bits = 32;
+        }
+    }
+    cpu_exec_realizefn(cs, &local_err);
+    if (local_err != NULL) {
+        error_propagate(errp, local_err);
+        return;
+    }
+
+    if (tcg_enabled()) {
+        tcg_x86_init();
+    }
+
+#ifndef CONFIG_USER_ONLY
+    qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
+
+    if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
+        x86_cpu_apic_create(cpu, &local_err);
+        if (local_err != NULL) {
+            goto out;
+        }
+    }
+#endif
+
+    mce_init(cpu);
+
+#ifndef CONFIG_USER_ONLY
+    if (tcg_enabled()) {
+        AddressSpace *newas = g_new(AddressSpace, 1);
+
+        cpu->cpu_as_mem = g_new(MemoryRegion, 1);
+        cpu->cpu_as_root = g_new(MemoryRegion, 1);
+
+        /* Outer container... */
+        memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
+        memory_region_set_enabled(cpu->cpu_as_root, true);
+
+        /* ... with two regions inside: normal system memory with low
+         * priority, and...
+         */
+        memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
+                                 get_system_memory(), 0, ~0ull);
+        memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
+        memory_region_set_enabled(cpu->cpu_as_mem, true);
+        address_space_init(newas, cpu->cpu_as_root, "CPU");
+        cs->num_ases = 1;
+        cpu_address_space_init(cs, newas, 0);
+
+        /* ... SMRAM with higher priority, linked from /machine/smram.  */
+        cpu->machine_done.notify = x86_cpu_machine_done;
+        qemu_add_machine_init_done_notifier(&cpu->machine_done);
+    }
+#endif
+
+    qemu_init_vcpu(cs);
+
+    /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
+     * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
+     * based on inputs (sockets,cores,threads), it is still better to gives
+     * users a warning.
+     *
+     * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
+     * cs->nr_threads hasn't be populated yet and the checking is incorrect.
+     */
+    if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
+        error_report("AMD CPU doesn't support hyperthreading. Please configure"
+                     " -smp options properly.");
+        ht_warned = true;
+    }
+
+    x86_cpu_apic_realize(cpu, &local_err);
+    if (local_err != NULL) {
+        goto out;
+    }
+    cpu_reset(cs);
+
+    xcc->parent_realize(dev, &local_err);
+
+out:
+    if (local_err != NULL) {
+        error_propagate(errp, local_err);
+        return;
+    }
+}
+
+static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
+{
+    X86CPU *cpu = X86_CPU(dev);
+    X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
+    Error *local_err = NULL;
+
+#ifndef CONFIG_USER_ONLY
+    cpu_remove_sync(CPU(dev));
+    qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
+#endif
+
+    if (cpu->apic_state) {
+        object_unparent(OBJECT(cpu->apic_state));
+        cpu->apic_state = NULL;
+    }
+
+    xcc->parent_unrealize(dev, &local_err);
+    if (local_err != NULL) {
+        error_propagate(errp, local_err);
+        return;
+    }
+}
+
+typedef struct BitProperty {
+    uint32_t *ptr;
+    uint32_t mask;
+} BitProperty;
+
+static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
+                                 void *opaque, Error **errp)
+{
+    BitProperty *fp = opaque;
+    bool value = (*fp->ptr & fp->mask) == fp->mask;
+    visit_type_bool(v, name, &value, errp);
+}
+
+static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
+                                 void *opaque, Error **errp)
+{
+    DeviceState *dev = DEVICE(obj);
+    BitProperty *fp = opaque;
+    Error *local_err = NULL;
+    bool value;
+
+    if (dev->realized) {
+        qdev_prop_set_after_realize(dev, name, errp);
+        return;
+    }
+
+    visit_type_bool(v, name, &value, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        return;
+    }
+
+    if (value) {
+        *fp->ptr |= fp->mask;
+    } else {
+        *fp->ptr &= ~fp->mask;
+    }
+}
+
+static void x86_cpu_release_bit_prop(Object *obj, const char *name,
+                                     void *opaque)
+{
+    BitProperty *prop = opaque;
+    g_free(prop);
+}
+
+/* Register a boolean property to get/set a single bit in a uint32_t field.
+ *
+ * The same property name can be registered multiple times to make it affect
+ * multiple bits in the same FeatureWord. In that case, the getter will return
+ * true only if all bits are set.
+ */
+static void x86_cpu_register_bit_prop(X86CPU *cpu,
+                                      const char *prop_name,
+                                      uint32_t *field,
+                                      int bitnr)
+{
+    BitProperty *fp;
+    ObjectProperty *op;
+    uint32_t mask = (1UL << bitnr);
+
+    op = object_property_find(OBJECT(cpu), prop_name, NULL);
+    if (op) {
+        fp = op->opaque;
+        assert(fp->ptr == field);
+        fp->mask |= mask;
+    } else {
+        fp = g_new0(BitProperty, 1);
+        fp->ptr = field;
+        fp->mask = mask;
+        object_property_add(OBJECT(cpu), prop_name, "bool",
+                            x86_cpu_get_bit_prop,
+                            x86_cpu_set_bit_prop,
+                            x86_cpu_release_bit_prop, fp, &error_abort);
+    }
+}
+
+static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
+                                               FeatureWord w,
+                                               int bitnr)
+{
+    FeatureWordInfo *fi = &feature_word_info[w];
+    const char *name = fi->feat_names[bitnr];
+
+    if (!name) {
+        return;
+    }
+
+    /* Property names should use "-" instead of "_".
+     * Old names containing underscores are registered as aliases
+     * using object_property_add_alias()
+     */
+    assert(!strchr(name, '_'));
+    /* aliases don't use "|" delimiters anymore, they are registered
+     * manually using object_property_add_alias() */
+    assert(!strchr(name, '|'));
+    x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
+}
+
+static void x86_cpu_initfn(Object *obj)
+{
+    CPUState *cs = CPU(obj);
+    X86CPU *cpu = X86_CPU(obj);
+    X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
+    CPUX86State *env = &cpu->env;
+    FeatureWord w;
+
+    cs->env_ptr = env;
+
+    object_property_add(obj, "family", "int",
+                        x86_cpuid_version_get_family,
+                        x86_cpuid_version_set_family, NULL, NULL, NULL);
+    object_property_add(obj, "model", "int",
+                        x86_cpuid_version_get_model,
+                        x86_cpuid_version_set_model, NULL, NULL, NULL);
+    object_property_add(obj, "stepping", "int",
+                        x86_cpuid_version_get_stepping,
+                        x86_cpuid_version_set_stepping, NULL, NULL, NULL);
+    object_property_add_str(obj, "vendor",
+                            x86_cpuid_get_vendor,
+                            x86_cpuid_set_vendor, NULL);
+    object_property_add_str(obj, "model-id",
+                            x86_cpuid_get_model_id,
+                            x86_cpuid_set_model_id, NULL);
+    object_property_add(obj, "tsc-frequency", "int",
+                        x86_cpuid_get_tsc_freq,
+                        x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
+    object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
+                        x86_cpu_get_feature_words,
+                        NULL, NULL, (void *)env->features, NULL);
+    object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
+                        x86_cpu_get_feature_words,
+                        NULL, NULL, (void *)cpu->filtered_features, NULL);
+
+    cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
+
+    for (w = 0; w < FEATURE_WORDS; w++) {
+        int bitnr;
+
+        for (bitnr = 0; bitnr < 32; bitnr++) {
+            x86_cpu_register_feature_bit_props(cpu, w, bitnr);
+        }
+    }
+
+    object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
+    object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
+    object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
+    object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
+    object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
+    object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
+    object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
+
+    object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
+    object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
+    object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
+    object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
+    object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
+    object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
+    object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
+    object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
+    object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
+    object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
+    object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
+    object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
+    object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
+    object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
+    object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
+    object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
+    object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
+    object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
+    object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
+    object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
+    object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
+
+    x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
+}
+
+static int64_t x86_cpu_get_arch_id(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+
+    return cpu->apic_id;
+}
+
+static bool x86_cpu_get_paging_enabled(const CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+
+    return cpu->env.cr[0] & CR0_PG_MASK;
+}
+
+static void x86_cpu_set_pc(CPUState *cs, vaddr value)
+{
+    X86CPU *cpu = X86_CPU(cs);
+
+    cpu->env.eip = value;
+}
+
+static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+    X86CPU *cpu = X86_CPU(cs);
+
+    cpu->env.eip = tb->pc - tb->cs_base;
+}
+
+static bool x86_cpu_has_work(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
+                                      CPU_INTERRUPT_POLL)) &&
+            (env->eflags & IF_MASK)) ||
+           (cs->interrupt_request & (CPU_INTERRUPT_NMI |
+                                     CPU_INTERRUPT_INIT |
+                                     CPU_INTERRUPT_SIPI |
+                                     CPU_INTERRUPT_MCE)) ||
+           ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
+            !(env->hflags & HF_SMM_MASK));
+}
+
+static Property x86_cpu_properties[] = {
+#ifdef CONFIG_USER_ONLY
+    /* apic_id = 0 by default for *-user, see commit 9886e834 */
+    DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
+    DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
+    DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
+    DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
+#else
+    DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
+    DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
+    DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
+    DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
+#endif
+    DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
+    { .name  = "hv-spinlocks", .info  = &qdev_prop_spinlocks },
+    DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
+    DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
+    DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
+    DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
+    DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
+    DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
+    DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
+    DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
+    DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
+    DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
+    DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
+    DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
+    DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
+    DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
+    DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
+    DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
+    DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
+    DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
+    DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
+    DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
+    DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
+    DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
+    DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
+    DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
+    DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
+    DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
+    DEFINE_PROP_END_OF_LIST()
+};
+
+static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
+{
+    X86CPUClass *xcc = X86_CPU_CLASS(oc);
+    CPUClass *cc = CPU_CLASS(oc);
+    DeviceClass *dc = DEVICE_CLASS(oc);
+
+    xcc->parent_realize = dc->realize;
+    xcc->parent_unrealize = dc->unrealize;
+    dc->realize = x86_cpu_realizefn;
+    dc->unrealize = x86_cpu_unrealizefn;
+    dc->props = x86_cpu_properties;
+
+    xcc->parent_reset = cc->reset;
+    cc->reset = x86_cpu_reset;
+    cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
+
+    cc->class_by_name = x86_cpu_class_by_name;
+    cc->parse_features = x86_cpu_parse_featurestr;
+    cc->has_work = x86_cpu_has_work;
+    cc->do_interrupt = x86_cpu_do_interrupt;
+    cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
+    cc->dump_state = x86_cpu_dump_state;
+    cc->set_pc = x86_cpu_set_pc;
+    cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
+    cc->gdb_read_register = x86_cpu_gdb_read_register;
+    cc->gdb_write_register = x86_cpu_gdb_write_register;
+    cc->get_arch_id = x86_cpu_get_arch_id;
+    cc->get_paging_enabled = x86_cpu_get_paging_enabled;
+#ifdef CONFIG_USER_ONLY
+    cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
+#else
+    cc->get_memory_mapping = x86_cpu_get_memory_mapping;
+    cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
+    cc->write_elf64_note = x86_cpu_write_elf64_note;
+    cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
+    cc->write_elf32_note = x86_cpu_write_elf32_note;
+    cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
+    cc->vmsd = &vmstate_x86_cpu;
+#endif
+    /* CPU_NB_REGS * 2 = general regs + xmm regs
+     * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
+     */
+    cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
+#ifndef CONFIG_USER_ONLY
+    cc->debug_excp_handler = breakpoint_handler;
+#endif
+    cc->cpu_exec_enter = x86_cpu_exec_enter;
+    cc->cpu_exec_exit = x86_cpu_exec_exit;
+
+    dc->cannot_instantiate_with_device_add_yet = false;
+}
+
+static const TypeInfo x86_cpu_type_info = {
+    .name = TYPE_X86_CPU,
+    .parent = TYPE_CPU,
+    .instance_size = sizeof(X86CPU),
+    .instance_init = x86_cpu_initfn,
+    .abstract = true,
+    .class_size = sizeof(X86CPUClass),
+    .class_init = x86_cpu_common_class_init,
+};
+
+static void x86_cpu_register_types(void)
+{
+    int i;
+
+    type_register_static(&x86_cpu_type_info);
+    for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
+        x86_register_cpudef_type(&builtin_x86_defs[i]);
+    }
+#ifdef CONFIG_KVM
+    type_register_static(&host_x86_cpu_type_info);
+#endif
+}
+
+type_init(x86_cpu_register_types)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
new file mode 100644
index 0000000000..c605724022
--- /dev/null
+++ b/target/i386/cpu.h
@@ -0,0 +1,1656 @@
+/*
+ * i386 virtual CPU header
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef I386_CPU_H
+#define I386_CPU_H
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "standard-headers/asm-x86/hyperv.h"
+
+#ifdef TARGET_X86_64
+#define TARGET_LONG_BITS 64
+#else
+#define TARGET_LONG_BITS 32
+#endif
+
+/* Maximum instruction code size */
+#define TARGET_MAX_INSN_SIZE 16
+
+/* support for self modifying code even if the modified instruction is
+   close to the modifying instruction */
+#define TARGET_HAS_PRECISE_SMC
+
+#ifdef TARGET_X86_64
+#define I386_ELF_MACHINE  EM_X86_64
+#define ELF_MACHINE_UNAME "x86_64"
+#else
+#define I386_ELF_MACHINE  EM_386
+#define ELF_MACHINE_UNAME "i686"
+#endif
+
+#define CPUArchState struct CPUX86State
+
+#include "exec/cpu-defs.h"
+
+#include "fpu/softfloat.h"
+
+#define R_EAX 0
+#define R_ECX 1
+#define R_EDX 2
+#define R_EBX 3
+#define R_ESP 4
+#define R_EBP 5
+#define R_ESI 6
+#define R_EDI 7
+
+#define R_AL 0
+#define R_CL 1
+#define R_DL 2
+#define R_BL 3
+#define R_AH 4
+#define R_CH 5
+#define R_DH 6
+#define R_BH 7
+
+#define R_ES 0
+#define R_CS 1
+#define R_SS 2
+#define R_DS 3
+#define R_FS 4
+#define R_GS 5
+
+/* segment descriptor fields */
+#define DESC_G_MASK     (1 << 23)
+#define DESC_B_SHIFT    22
+#define DESC_B_MASK     (1 << DESC_B_SHIFT)
+#define DESC_L_SHIFT    21 /* x86_64 only : 64 bit code segment */
+#define DESC_L_MASK     (1 << DESC_L_SHIFT)
+#define DESC_AVL_MASK   (1 << 20)
+#define DESC_P_MASK     (1 << 15)
+#define DESC_DPL_SHIFT  13
+#define DESC_DPL_MASK   (3 << DESC_DPL_SHIFT)
+#define DESC_S_MASK     (1 << 12)
+#define DESC_TYPE_SHIFT 8
+#define DESC_TYPE_MASK  (15 << DESC_TYPE_SHIFT)
+#define DESC_A_MASK     (1 << 8)
+
+#define DESC_CS_MASK    (1 << 11) /* 1=code segment 0=data segment */
+#define DESC_C_MASK     (1 << 10) /* code: conforming */
+#define DESC_R_MASK     (1 << 9)  /* code: readable */
+
+#define DESC_E_MASK     (1 << 10) /* data: expansion direction */
+#define DESC_W_MASK     (1 << 9)  /* data: writable */
+
+#define DESC_TSS_BUSY_MASK (1 << 9)
+
+/* eflags masks */
+#define CC_C    0x0001
+#define CC_P    0x0004
+#define CC_A    0x0010
+#define CC_Z    0x0040
+#define CC_S    0x0080
+#define CC_O    0x0800
+
+#define TF_SHIFT   8
+#define IOPL_SHIFT 12
+#define VM_SHIFT   17
+
+#define TF_MASK                 0x00000100
+#define IF_MASK                 0x00000200
+#define DF_MASK                 0x00000400
+#define IOPL_MASK               0x00003000
+#define NT_MASK                 0x00004000
+#define RF_MASK                 0x00010000
+#define VM_MASK                 0x00020000
+#define AC_MASK                 0x00040000
+#define VIF_MASK                0x00080000
+#define VIP_MASK                0x00100000
+#define ID_MASK                 0x00200000
+
+/* hidden flags - used internally by qemu to represent additional cpu
+   states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
+   avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
+   positions to ease oring with eflags. */
+/* current cpl */
+#define HF_CPL_SHIFT         0
+/* true if hardware interrupts must be disabled for next instruction */
+#define HF_INHIBIT_IRQ_SHIFT 3
+/* 16 or 32 segments */
+#define HF_CS32_SHIFT        4
+#define HF_SS32_SHIFT        5
+/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
+#define HF_ADDSEG_SHIFT      6
+/* copy of CR0.PE (protected mode) */
+#define HF_PE_SHIFT          7
+#define HF_TF_SHIFT          8 /* must be same as eflags */
+#define HF_MP_SHIFT          9 /* the order must be MP, EM, TS */
+#define HF_EM_SHIFT         10
+#define HF_TS_SHIFT         11
+#define HF_IOPL_SHIFT       12 /* must be same as eflags */
+#define HF_LMA_SHIFT        14 /* only used on x86_64: long mode active */
+#define HF_CS64_SHIFT       15 /* only used on x86_64: 64 bit code segment  */
+#define HF_RF_SHIFT         16 /* must be same as eflags */
+#define HF_VM_SHIFT         17 /* must be same as eflags */
+#define HF_AC_SHIFT         18 /* must be same as eflags */
+#define HF_SMM_SHIFT        19 /* CPU in SMM mode */
+#define HF_SVME_SHIFT       20 /* SVME enabled (copy of EFER.SVME) */
+#define HF_SVMI_SHIFT       21 /* SVM intercepts are active */
+#define HF_OSFXSR_SHIFT     22 /* CR4.OSFXSR */
+#define HF_SMAP_SHIFT       23 /* CR4.SMAP */
+#define HF_IOBPT_SHIFT      24 /* an io breakpoint enabled */
+#define HF_MPX_EN_SHIFT     25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
+#define HF_MPX_IU_SHIFT     26 /* BND registers in-use */
+
+#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
+#define HF_INHIBIT_IRQ_MASK  (1 << HF_INHIBIT_IRQ_SHIFT)
+#define HF_CS32_MASK         (1 << HF_CS32_SHIFT)
+#define HF_SS32_MASK         (1 << HF_SS32_SHIFT)
+#define HF_ADDSEG_MASK       (1 << HF_ADDSEG_SHIFT)
+#define HF_PE_MASK           (1 << HF_PE_SHIFT)
+#define HF_TF_MASK           (1 << HF_TF_SHIFT)
+#define HF_MP_MASK           (1 << HF_MP_SHIFT)
+#define HF_EM_MASK           (1 << HF_EM_SHIFT)
+#define HF_TS_MASK           (1 << HF_TS_SHIFT)
+#define HF_IOPL_MASK         (3 << HF_IOPL_SHIFT)
+#define HF_LMA_MASK          (1 << HF_LMA_SHIFT)
+#define HF_CS64_MASK         (1 << HF_CS64_SHIFT)
+#define HF_RF_MASK           (1 << HF_RF_SHIFT)
+#define HF_VM_MASK           (1 << HF_VM_SHIFT)
+#define HF_AC_MASK           (1 << HF_AC_SHIFT)
+#define HF_SMM_MASK          (1 << HF_SMM_SHIFT)
+#define HF_SVME_MASK         (1 << HF_SVME_SHIFT)
+#define HF_SVMI_MASK         (1 << HF_SVMI_SHIFT)
+#define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
+#define HF_SMAP_MASK         (1 << HF_SMAP_SHIFT)
+#define HF_IOBPT_MASK        (1 << HF_IOBPT_SHIFT)
+#define HF_MPX_EN_MASK       (1 << HF_MPX_EN_SHIFT)
+#define HF_MPX_IU_MASK       (1 << HF_MPX_IU_SHIFT)
+
+/* hflags2 */
+
+#define HF2_GIF_SHIFT            0 /* if set CPU takes interrupts */
+#define HF2_HIF_SHIFT            1 /* value of IF_MASK when entering SVM */
+#define HF2_NMI_SHIFT            2 /* CPU serving NMI */
+#define HF2_VINTR_SHIFT          3 /* value of V_INTR_MASKING bit */
+#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
+#define HF2_MPX_PR_SHIFT         5 /* BNDCFGx.BNDPRESERVE */
+
+#define HF2_GIF_MASK            (1 << HF2_GIF_SHIFT)
+#define HF2_HIF_MASK            (1 << HF2_HIF_SHIFT)
+#define HF2_NMI_MASK            (1 << HF2_NMI_SHIFT)
+#define HF2_VINTR_MASK          (1 << HF2_VINTR_SHIFT)
+#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
+#define HF2_MPX_PR_MASK         (1 << HF2_MPX_PR_SHIFT)
+
+#define CR0_PE_SHIFT 0
+#define CR0_MP_SHIFT 1
+
+#define CR0_PE_MASK  (1U << 0)
+#define CR0_MP_MASK  (1U << 1)
+#define CR0_EM_MASK  (1U << 2)
+#define CR0_TS_MASK  (1U << 3)
+#define CR0_ET_MASK  (1U << 4)
+#define CR0_NE_MASK  (1U << 5)
+#define CR0_WP_MASK  (1U << 16)
+#define CR0_AM_MASK  (1U << 18)
+#define CR0_PG_MASK  (1U << 31)
+
+#define CR4_VME_MASK  (1U << 0)
+#define CR4_PVI_MASK  (1U << 1)
+#define CR4_TSD_MASK  (1U << 2)
+#define CR4_DE_MASK   (1U << 3)
+#define CR4_PSE_MASK  (1U << 4)
+#define CR4_PAE_MASK  (1U << 5)
+#define CR4_MCE_MASK  (1U << 6)
+#define CR4_PGE_MASK  (1U << 7)
+#define CR4_PCE_MASK  (1U << 8)
+#define CR4_OSFXSR_SHIFT 9
+#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
+#define CR4_OSXMMEXCPT_MASK  (1U << 10)
+#define CR4_VMXE_MASK   (1U << 13)
+#define CR4_SMXE_MASK   (1U << 14)
+#define CR4_FSGSBASE_MASK (1U << 16)
+#define CR4_PCIDE_MASK  (1U << 17)
+#define CR4_OSXSAVE_MASK (1U << 18)
+#define CR4_SMEP_MASK   (1U << 20)
+#define CR4_SMAP_MASK   (1U << 21)
+#define CR4_PKE_MASK   (1U << 22)
+
+#define DR6_BD          (1 << 13)
+#define DR6_BS          (1 << 14)
+#define DR6_BT          (1 << 15)
+#define DR6_FIXED_1     0xffff0ff0
+
+#define DR7_GD          (1 << 13)
+#define DR7_TYPE_SHIFT  16
+#define DR7_LEN_SHIFT   18
+#define DR7_FIXED_1     0x00000400
+#define DR7_GLOBAL_BP_MASK   0xaa
+#define DR7_LOCAL_BP_MASK    0x55
+#define DR7_MAX_BP           4
+#define DR7_TYPE_BP_INST     0x0
+#define DR7_TYPE_DATA_WR     0x1
+#define DR7_TYPE_IO_RW       0x2
+#define DR7_TYPE_DATA_RW     0x3
+
+#define PG_PRESENT_BIT  0
+#define PG_RW_BIT       1
+#define PG_USER_BIT     2
+#define PG_PWT_BIT      3
+#define PG_PCD_BIT      4
+#define PG_ACCESSED_BIT 5
+#define PG_DIRTY_BIT    6
+#define PG_PSE_BIT      7
+#define PG_GLOBAL_BIT   8
+#define PG_PSE_PAT_BIT  12
+#define PG_PKRU_BIT     59
+#define PG_NX_BIT       63
+
+#define PG_PRESENT_MASK  (1 << PG_PRESENT_BIT)
+#define PG_RW_MASK       (1 << PG_RW_BIT)
+#define PG_USER_MASK     (1 << PG_USER_BIT)
+#define PG_PWT_MASK      (1 << PG_PWT_BIT)
+#define PG_PCD_MASK      (1 << PG_PCD_BIT)
+#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
+#define PG_DIRTY_MASK    (1 << PG_DIRTY_BIT)
+#define PG_PSE_MASK      (1 << PG_PSE_BIT)
+#define PG_GLOBAL_MASK   (1 << PG_GLOBAL_BIT)
+#define PG_PSE_PAT_MASK  (1 << PG_PSE_PAT_BIT)
+#define PG_ADDRESS_MASK  0x000ffffffffff000LL
+#define PG_HI_RSVD_MASK  (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
+#define PG_HI_USER_MASK  0x7ff0000000000000LL
+#define PG_PKRU_MASK     (15ULL << PG_PKRU_BIT)
+#define PG_NX_MASK       (1ULL << PG_NX_BIT)
+
+#define PG_ERROR_W_BIT     1
+
+#define PG_ERROR_P_MASK    0x01
+#define PG_ERROR_W_MASK    (1 << PG_ERROR_W_BIT)
+#define PG_ERROR_U_MASK    0x04
+#define PG_ERROR_RSVD_MASK 0x08
+#define PG_ERROR_I_D_MASK  0x10
+#define PG_ERROR_PK_MASK   0x20
+
+#define MCG_CTL_P       (1ULL<<8)   /* MCG_CAP register available */
+#define MCG_SER_P       (1ULL<<24) /* MCA recovery/new status bits */
+#define MCG_LMCE_P      (1ULL<<27) /* Local Machine Check Supported */
+
+#define MCE_CAP_DEF     (MCG_CTL_P|MCG_SER_P)
+#define MCE_BANKS_DEF   10
+
+#define MCG_CAP_BANKS_MASK 0xff
+
+#define MCG_STATUS_RIPV (1ULL<<0)   /* restart ip valid */
+#define MCG_STATUS_EIPV (1ULL<<1)   /* ip points to correct instruction */
+#define MCG_STATUS_MCIP (1ULL<<2)   /* machine check in progress */
+#define MCG_STATUS_LMCE (1ULL<<3)   /* Local MCE signaled */
+
+#define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
+
+#define MCI_STATUS_VAL   (1ULL<<63)  /* valid error */
+#define MCI_STATUS_OVER  (1ULL<<62)  /* previous errors lost */
+#define MCI_STATUS_UC    (1ULL<<61)  /* uncorrected error */
+#define MCI_STATUS_EN    (1ULL<<60)  /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59)  /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58)  /* addr reg. valid */
+#define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */
+#define MCI_STATUS_S     (1ULL<<56)  /* Signaled machine check */
+#define MCI_STATUS_AR    (1ULL<<55)  /* Action required */
+
+/* MISC register defines */
+#define MCM_ADDR_SEGOFF  0      /* segment offset */
+#define MCM_ADDR_LINEAR  1      /* linear address */
+#define MCM_ADDR_PHYS    2      /* physical address */
+#define MCM_ADDR_MEM     3      /* memory address */
+#define MCM_ADDR_GENERIC 7      /* generic */
+
+#define MSR_IA32_TSC                    0x10
+#define MSR_IA32_APICBASE               0x1b
+#define MSR_IA32_APICBASE_BSP           (1<<8)
+#define MSR_IA32_APICBASE_ENABLE        (1<<11)
+#define MSR_IA32_APICBASE_EXTD          (1 << 10)
+#define MSR_IA32_APICBASE_BASE          (0xfffffU<<12)
+#define MSR_IA32_FEATURE_CONTROL        0x0000003a
+#define MSR_TSC_ADJUST                  0x0000003b
+#define MSR_IA32_TSCDEADLINE            0x6e0
+
+#define FEATURE_CONTROL_LOCKED                    (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
+#define FEATURE_CONTROL_LMCE                      (1<<20)
+
+#define MSR_P6_PERFCTR0                 0xc1
+
+#define MSR_IA32_SMBASE                 0x9e
+#define MSR_MTRRcap                     0xfe
+#define MSR_MTRRcap_VCNT                8
+#define MSR_MTRRcap_FIXRANGE_SUPPORT    (1 << 8)
+#define MSR_MTRRcap_WC_SUPPORTED        (1 << 10)
+
+#define MSR_IA32_SYSENTER_CS            0x174
+#define MSR_IA32_SYSENTER_ESP           0x175
+#define MSR_IA32_SYSENTER_EIP           0x176
+
+#define MSR_MCG_CAP                     0x179
+#define MSR_MCG_STATUS                  0x17a
+#define MSR_MCG_CTL                     0x17b
+#define MSR_MCG_EXT_CTL                 0x4d0
+
+#define MSR_P6_EVNTSEL0                 0x186
+
+#define MSR_IA32_PERF_STATUS            0x198
+
+#define MSR_IA32_MISC_ENABLE            0x1a0
+/* Indicates good rep/movs microcode on some processors: */
+#define MSR_IA32_MISC_ENABLE_DEFAULT    1
+
+#define MSR_MTRRphysBase(reg)           (0x200 + 2 * (reg))
+#define MSR_MTRRphysMask(reg)           (0x200 + 2 * (reg) + 1)
+
+#define MSR_MTRRphysIndex(addr)         ((((addr) & ~1u) - 0x200) / 2)
+
+#define MSR_MTRRfix64K_00000            0x250
+#define MSR_MTRRfix16K_80000            0x258
+#define MSR_MTRRfix16K_A0000            0x259
+#define MSR_MTRRfix4K_C0000             0x268
+#define MSR_MTRRfix4K_C8000             0x269
+#define MSR_MTRRfix4K_D0000             0x26a
+#define MSR_MTRRfix4K_D8000             0x26b
+#define MSR_MTRRfix4K_E0000             0x26c
+#define MSR_MTRRfix4K_E8000             0x26d
+#define MSR_MTRRfix4K_F0000             0x26e
+#define MSR_MTRRfix4K_F8000             0x26f
+
+#define MSR_PAT                         0x277
+
+#define MSR_MTRRdefType                 0x2ff
+
+#define MSR_CORE_PERF_FIXED_CTR0        0x309
+#define MSR_CORE_PERF_FIXED_CTR1        0x30a
+#define MSR_CORE_PERF_FIXED_CTR2        0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL    0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS     0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL       0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL   0x390
+
+#define MSR_MC0_CTL                     0x400
+#define MSR_MC0_STATUS                  0x401
+#define MSR_MC0_ADDR                    0x402
+#define MSR_MC0_MISC                    0x403
+
+#define MSR_EFER                        0xc0000080
+
+#define MSR_EFER_SCE   (1 << 0)
+#define MSR_EFER_LME   (1 << 8)
+#define MSR_EFER_LMA   (1 << 10)
+#define MSR_EFER_NXE   (1 << 11)
+#define MSR_EFER_SVME  (1 << 12)
+#define MSR_EFER_FFXSR (1 << 14)
+
+#define MSR_STAR                        0xc0000081
+#define MSR_LSTAR                       0xc0000082
+#define MSR_CSTAR                       0xc0000083
+#define MSR_FMASK                       0xc0000084
+#define MSR_FSBASE                      0xc0000100
+#define MSR_GSBASE                      0xc0000101
+#define MSR_KERNELGSBASE                0xc0000102
+#define MSR_TSC_AUX                     0xc0000103
+
+#define MSR_VM_HSAVE_PA                 0xc0010117
+
+#define MSR_IA32_BNDCFGS                0x00000d90
+#define MSR_IA32_XSS                    0x00000da0
+
+#define XSTATE_FP_BIT                   0
+#define XSTATE_SSE_BIT                  1
+#define XSTATE_YMM_BIT                  2
+#define XSTATE_BNDREGS_BIT              3
+#define XSTATE_BNDCSR_BIT               4
+#define XSTATE_OPMASK_BIT               5
+#define XSTATE_ZMM_Hi256_BIT            6
+#define XSTATE_Hi16_ZMM_BIT             7
+#define XSTATE_PKRU_BIT                 9
+
+#define XSTATE_FP_MASK                  (1ULL << XSTATE_FP_BIT)
+#define XSTATE_SSE_MASK                 (1ULL << XSTATE_SSE_BIT)
+#define XSTATE_YMM_MASK                 (1ULL << XSTATE_YMM_BIT)
+#define XSTATE_BNDREGS_MASK             (1ULL << XSTATE_BNDREGS_BIT)
+#define XSTATE_BNDCSR_MASK              (1ULL << XSTATE_BNDCSR_BIT)
+#define XSTATE_OPMASK_MASK              (1ULL << XSTATE_OPMASK_BIT)
+#define XSTATE_ZMM_Hi256_MASK           (1ULL << XSTATE_ZMM_Hi256_BIT)
+#define XSTATE_Hi16_ZMM_MASK            (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PKRU_MASK                (1ULL << XSTATE_PKRU_BIT)
+
+/* CPUID feature words */
+typedef enum FeatureWord {
+    FEAT_1_EDX,         /* CPUID[1].EDX */
+    FEAT_1_ECX,         /* CPUID[1].ECX */
+    FEAT_7_0_EBX,       /* CPUID[EAX=7,ECX=0].EBX */
+    FEAT_7_0_ECX,       /* CPUID[EAX=7,ECX=0].ECX */
+    FEAT_7_0_EDX,       /* CPUID[EAX=7,ECX=0].EDX */
+    FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
+    FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
+    FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
+    FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
+    FEAT_KVM,           /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
+    FEAT_HYPERV_EAX,    /* CPUID[4000_0003].EAX */
+    FEAT_HYPERV_EBX,    /* CPUID[4000_0003].EBX */
+    FEAT_HYPERV_EDX,    /* CPUID[4000_0003].EDX */
+    FEAT_SVM,           /* CPUID[8000_000A].EDX */
+    FEAT_XSAVE,         /* CPUID[EAX=0xd,ECX=1].EAX */
+    FEAT_6_EAX,         /* CPUID[6].EAX */
+    FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
+    FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
+    FEATURE_WORDS,
+} FeatureWord;
+
+typedef uint32_t FeatureWordArray[FEATURE_WORDS];
+
+/* cpuid_features bits */
+#define CPUID_FP87 (1U << 0)
+#define CPUID_VME  (1U << 1)
+#define CPUID_DE   (1U << 2)
+#define CPUID_PSE  (1U << 3)
+#define CPUID_TSC  (1U << 4)
+#define CPUID_MSR  (1U << 5)
+#define CPUID_PAE  (1U << 6)
+#define CPUID_MCE  (1U << 7)
+#define CPUID_CX8  (1U << 8)
+#define CPUID_APIC (1U << 9)
+#define CPUID_SEP  (1U << 11) /* sysenter/sysexit */
+#define CPUID_MTRR (1U << 12)
+#define CPUID_PGE  (1U << 13)
+#define CPUID_MCA  (1U << 14)
+#define CPUID_CMOV (1U << 15)
+#define CPUID_PAT  (1U << 16)
+#define CPUID_PSE36   (1U << 17)
+#define CPUID_PN   (1U << 18)
+#define CPUID_CLFLUSH (1U << 19)
+#define CPUID_DTS (1U << 21)
+#define CPUID_ACPI (1U << 22)
+#define CPUID_MMX  (1U << 23)
+#define CPUID_FXSR (1U << 24)
+#define CPUID_SSE  (1U << 25)
+#define CPUID_SSE2 (1U << 26)
+#define CPUID_SS (1U << 27)
+#define CPUID_HT (1U << 28)
+#define CPUID_TM (1U << 29)
+#define CPUID_IA64 (1U << 30)
+#define CPUID_PBE (1U << 31)
+
+#define CPUID_EXT_SSE3     (1U << 0)
+#define CPUID_EXT_PCLMULQDQ (1U << 1)
+#define CPUID_EXT_DTES64   (1U << 2)
+#define CPUID_EXT_MONITOR  (1U << 3)
+#define CPUID_EXT_DSCPL    (1U << 4)
+#define CPUID_EXT_VMX      (1U << 5)
+#define CPUID_EXT_SMX      (1U << 6)
+#define CPUID_EXT_EST      (1U << 7)
+#define CPUID_EXT_TM2      (1U << 8)
+#define CPUID_EXT_SSSE3    (1U << 9)
+#define CPUID_EXT_CID      (1U << 10)
+#define CPUID_EXT_FMA      (1U << 12)
+#define CPUID_EXT_CX16     (1U << 13)
+#define CPUID_EXT_XTPR     (1U << 14)
+#define CPUID_EXT_PDCM     (1U << 15)
+#define CPUID_EXT_PCID     (1U << 17)
+#define CPUID_EXT_DCA      (1U << 18)
+#define CPUID_EXT_SSE41    (1U << 19)
+#define CPUID_EXT_SSE42    (1U << 20)
+#define CPUID_EXT_X2APIC   (1U << 21)
+#define CPUID_EXT_MOVBE    (1U << 22)
+#define CPUID_EXT_POPCNT   (1U << 23)
+#define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
+#define CPUID_EXT_AES      (1U << 25)
+#define CPUID_EXT_XSAVE    (1U << 26)
+#define CPUID_EXT_OSXSAVE  (1U << 27)
+#define CPUID_EXT_AVX      (1U << 28)
+#define CPUID_EXT_F16C     (1U << 29)
+#define CPUID_EXT_RDRAND   (1U << 30)
+#define CPUID_EXT_HYPERVISOR  (1U << 31)
+
+#define CPUID_EXT2_FPU     (1U << 0)
+#define CPUID_EXT2_VME     (1U << 1)
+#define CPUID_EXT2_DE      (1U << 2)
+#define CPUID_EXT2_PSE     (1U << 3)
+#define CPUID_EXT2_TSC     (1U << 4)
+#define CPUID_EXT2_MSR     (1U << 5)
+#define CPUID_EXT2_PAE     (1U << 6)
+#define CPUID_EXT2_MCE     (1U << 7)
+#define CPUID_EXT2_CX8     (1U << 8)
+#define CPUID_EXT2_APIC    (1U << 9)
+#define CPUID_EXT2_SYSCALL (1U << 11)
+#define CPUID_EXT2_MTRR    (1U << 12)
+#define CPUID_EXT2_PGE     (1U << 13)
+#define CPUID_EXT2_MCA     (1U << 14)
+#define CPUID_EXT2_CMOV    (1U << 15)
+#define CPUID_EXT2_PAT     (1U << 16)
+#define CPUID_EXT2_PSE36   (1U << 17)
+#define CPUID_EXT2_MP      (1U << 19)
+#define CPUID_EXT2_NX      (1U << 20)
+#define CPUID_EXT2_MMXEXT  (1U << 22)
+#define CPUID_EXT2_MMX     (1U << 23)
+#define CPUID_EXT2_FXSR    (1U << 24)
+#define CPUID_EXT2_FFXSR   (1U << 25)
+#define CPUID_EXT2_PDPE1GB (1U << 26)
+#define CPUID_EXT2_RDTSCP  (1U << 27)
+#define CPUID_EXT2_LM      (1U << 29)
+#define CPUID_EXT2_3DNOWEXT (1U << 30)
+#define CPUID_EXT2_3DNOW   (1U << 31)
+
+/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
+                                CPUID_EXT2_DE | CPUID_EXT2_PSE | \
+                                CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
+                                CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
+                                CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
+                                CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
+                                CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
+                                CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
+                                CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
+
+#define CPUID_EXT3_LAHF_LM (1U << 0)
+#define CPUID_EXT3_CMP_LEG (1U << 1)
+#define CPUID_EXT3_SVM     (1U << 2)
+#define CPUID_EXT3_EXTAPIC (1U << 3)
+#define CPUID_EXT3_CR8LEG  (1U << 4)
+#define CPUID_EXT3_ABM     (1U << 5)
+#define CPUID_EXT3_SSE4A   (1U << 6)
+#define CPUID_EXT3_MISALIGNSSE (1U << 7)
+#define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
+#define CPUID_EXT3_OSVW    (1U << 9)
+#define CPUID_EXT3_IBS     (1U << 10)
+#define CPUID_EXT3_XOP     (1U << 11)
+#define CPUID_EXT3_SKINIT  (1U << 12)
+#define CPUID_EXT3_WDT     (1U << 13)
+#define CPUID_EXT3_LWP     (1U << 15)
+#define CPUID_EXT3_FMA4    (1U << 16)
+#define CPUID_EXT3_TCE     (1U << 17)
+#define CPUID_EXT3_NODEID  (1U << 19)
+#define CPUID_EXT3_TBM     (1U << 21)
+#define CPUID_EXT3_TOPOEXT (1U << 22)
+#define CPUID_EXT3_PERFCORE (1U << 23)
+#define CPUID_EXT3_PERFNB  (1U << 24)
+
+#define CPUID_SVM_NPT          (1U << 0)
+#define CPUID_SVM_LBRV         (1U << 1)
+#define CPUID_SVM_SVMLOCK      (1U << 2)
+#define CPUID_SVM_NRIPSAVE     (1U << 3)
+#define CPUID_SVM_TSCSCALE     (1U << 4)
+#define CPUID_SVM_VMCBCLEAN    (1U << 5)
+#define CPUID_SVM_FLUSHASID    (1U << 6)
+#define CPUID_SVM_DECODEASSIST (1U << 7)
+#define CPUID_SVM_PAUSEFILTER  (1U << 10)
+#define CPUID_SVM_PFTHRESHOLD  (1U << 12)
+
+#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
+#define CPUID_7_0_EBX_BMI1     (1U << 3)
+#define CPUID_7_0_EBX_HLE      (1U << 4)
+#define CPUID_7_0_EBX_AVX2     (1U << 5)
+#define CPUID_7_0_EBX_SMEP     (1U << 7)
+#define CPUID_7_0_EBX_BMI2     (1U << 8)
+#define CPUID_7_0_EBX_ERMS     (1U << 9)
+#define CPUID_7_0_EBX_INVPCID  (1U << 10)
+#define CPUID_7_0_EBX_RTM      (1U << 11)
+#define CPUID_7_0_EBX_MPX      (1U << 14)
+#define CPUID_7_0_EBX_AVX512F  (1U << 16) /* AVX-512 Foundation */
+#define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */
+#define CPUID_7_0_EBX_RDSEED   (1U << 18)
+#define CPUID_7_0_EBX_ADX      (1U << 19)
+#define CPUID_7_0_EBX_SMAP     (1U << 20)
+#define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */
+#define CPUID_7_0_EBX_PCOMMIT  (1U << 22) /* Persistent Commit */
+#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
+#define CPUID_7_0_EBX_CLWB     (1U << 24) /* Cache Line Write Back */
+#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
+#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
+#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
+#define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
+#define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
+
+#define CPUID_7_0_ECX_VBMI     (1U << 1)  /* AVX-512 Vector Byte Manipulation Instrs */
+#define CPUID_7_0_ECX_UMIP     (1U << 2)
+#define CPUID_7_0_ECX_PKU      (1U << 3)
+#define CPUID_7_0_ECX_OSPKE    (1U << 4)
+#define CPUID_7_0_ECX_RDPID    (1U << 22)
+
+#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */
+#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */
+
+#define CPUID_XSAVE_XSAVEOPT   (1U << 0)
+#define CPUID_XSAVE_XSAVEC     (1U << 1)
+#define CPUID_XSAVE_XGETBV1    (1U << 2)
+#define CPUID_XSAVE_XSAVES     (1U << 3)
+
+#define CPUID_6_EAX_ARAT       (1U << 2)
+
+/* CPUID[0x80000007].EDX flags: */
+#define CPUID_APM_INVTSC       (1U << 8)
+
+#define CPUID_VENDOR_SZ      12
+
+#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
+#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
+#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
+#define CPUID_VENDOR_INTEL "GenuineIntel"
+
+#define CPUID_VENDOR_AMD_1   0x68747541 /* "Auth" */
+#define CPUID_VENDOR_AMD_2   0x69746e65 /* "enti" */
+#define CPUID_VENDOR_AMD_3   0x444d4163 /* "cAMD" */
+#define CPUID_VENDOR_AMD   "AuthenticAMD"
+
+#define CPUID_VENDOR_VIA   "CentaurHauls"
+
+#define CPUID_MWAIT_IBE     (1U << 1) /* Interrupts can exit capability */
+#define CPUID_MWAIT_EMX     (1U << 0) /* enumeration supported */
+
+/* CPUID[0xB].ECX level types */
+#define CPUID_TOPOLOGY_LEVEL_INVALID  (0U << 8)
+#define CPUID_TOPOLOGY_LEVEL_SMT      (1U << 8)
+#define CPUID_TOPOLOGY_LEVEL_CORE     (2U << 8)
+
+#ifndef HYPERV_SPINLOCK_NEVER_RETRY
+#define HYPERV_SPINLOCK_NEVER_RETRY             0xFFFFFFFF
+#endif
+
+#define EXCP00_DIVZ	0
+#define EXCP01_DB	1
+#define EXCP02_NMI	2
+#define EXCP03_INT3	3
+#define EXCP04_INTO	4
+#define EXCP05_BOUND	5
+#define EXCP06_ILLOP	6
+#define EXCP07_PREX	7
+#define EXCP08_DBLE	8
+#define EXCP09_XERR	9
+#define EXCP0A_TSS	10
+#define EXCP0B_NOSEG	11
+#define EXCP0C_STACK	12
+#define EXCP0D_GPF	13
+#define EXCP0E_PAGE	14
+#define EXCP10_COPR	16
+#define EXCP11_ALGN	17
+#define EXCP12_MCHK	18
+
+#define EXCP_SYSCALL    0x100 /* only happens in user only emulation
+                                 for syscall instruction */
+
+/* i386-specific interrupt pending bits.  */
+#define CPU_INTERRUPT_POLL      CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_SMI       CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_NMI       CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_MCE       CPU_INTERRUPT_TGT_EXT_4
+#define CPU_INTERRUPT_VIRQ      CPU_INTERRUPT_TGT_INT_0
+#define CPU_INTERRUPT_SIPI      CPU_INTERRUPT_TGT_INT_1
+#define CPU_INTERRUPT_TPR       CPU_INTERRUPT_TGT_INT_2
+
+/* Use a clearer name for this.  */
+#define CPU_INTERRUPT_INIT      CPU_INTERRUPT_RESET
+
+/* Instead of computing the condition codes after each x86 instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
+typedef enum {
+    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
+    CC_OP_EFLAGS,  /* all cc are explicitly computed, CC_SRC = flags */
+
+    CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
+    CC_OP_MULW,
+    CC_OP_MULL,
+    CC_OP_MULQ,
+
+    CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_ADDW,
+    CC_OP_ADDL,
+    CC_OP_ADDQ,
+
+    CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_ADCW,
+    CC_OP_ADCL,
+    CC_OP_ADCQ,
+
+    CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_SUBW,
+    CC_OP_SUBL,
+    CC_OP_SUBQ,
+
+    CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_SBBW,
+    CC_OP_SBBL,
+    CC_OP_SBBQ,
+
+    CC_OP_LOGICB, /* modify all flags, CC_DST = res */
+    CC_OP_LOGICW,
+    CC_OP_LOGICL,
+    CC_OP_LOGICQ,
+
+    CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
+    CC_OP_INCW,
+    CC_OP_INCL,
+    CC_OP_INCQ,
+
+    CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C  */
+    CC_OP_DECW,
+    CC_OP_DECL,
+    CC_OP_DECQ,
+
+    CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
+    CC_OP_SHLW,
+    CC_OP_SHLL,
+    CC_OP_SHLQ,
+
+    CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
+    CC_OP_SARW,
+    CC_OP_SARL,
+    CC_OP_SARQ,
+
+    CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
+    CC_OP_BMILGW,
+    CC_OP_BMILGL,
+    CC_OP_BMILGQ,
+
+    CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest.  */
+    CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest.  */
+    CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest.  */
+
+    CC_OP_CLR, /* Z set, all other flags clear.  */
+
+    CC_OP_NB,
+} CCOp;
+
+typedef struct SegmentCache {
+    uint32_t selector;
+    target_ulong base;
+    uint32_t limit;
+    uint32_t flags;
+} SegmentCache;
+
+#define MMREG_UNION(n, bits)        \
+    union n {                       \
+        uint8_t  _b_##n[(bits)/8];  \
+        uint16_t _w_##n[(bits)/16]; \
+        uint32_t _l_##n[(bits)/32]; \
+        uint64_t _q_##n[(bits)/64]; \
+        float32  _s_##n[(bits)/32]; \
+        float64  _d_##n[(bits)/64]; \
+    }
+
+typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
+typedef MMREG_UNION(MMXReg, 64)  MMXReg;
+
+typedef struct BNDReg {
+    uint64_t lb;
+    uint64_t ub;
+} BNDReg;
+
+typedef struct BNDCSReg {
+    uint64_t cfgu;
+    uint64_t sts;
+} BNDCSReg;
+
+#define BNDCFG_ENABLE       1ULL
+#define BNDCFG_BNDPRESERVE  2ULL
+#define BNDCFG_BDIR_MASK    TARGET_PAGE_MASK
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define ZMM_B(n) _b_ZMMReg[63 - (n)]
+#define ZMM_W(n) _w_ZMMReg[31 - (n)]
+#define ZMM_L(n) _l_ZMMReg[15 - (n)]
+#define ZMM_S(n) _s_ZMMReg[15 - (n)]
+#define ZMM_Q(n) _q_ZMMReg[7 - (n)]
+#define ZMM_D(n) _d_ZMMReg[7 - (n)]
+
+#define MMX_B(n) _b_MMXReg[7 - (n)]
+#define MMX_W(n) _w_MMXReg[3 - (n)]
+#define MMX_L(n) _l_MMXReg[1 - (n)]
+#define MMX_S(n) _s_MMXReg[1 - (n)]
+#else
+#define ZMM_B(n) _b_ZMMReg[n]
+#define ZMM_W(n) _w_ZMMReg[n]
+#define ZMM_L(n) _l_ZMMReg[n]
+#define ZMM_S(n) _s_ZMMReg[n]
+#define ZMM_Q(n) _q_ZMMReg[n]
+#define ZMM_D(n) _d_ZMMReg[n]
+
+#define MMX_B(n) _b_MMXReg[n]
+#define MMX_W(n) _w_MMXReg[n]
+#define MMX_L(n) _l_MMXReg[n]
+#define MMX_S(n) _s_MMXReg[n]
+#endif
+#define MMX_Q(n) _q_MMXReg[n]
+
+typedef union {
+    floatx80 d __attribute__((aligned(16)));
+    MMXReg mmx;
+} FPReg;
+
+typedef struct {
+    uint64_t base;
+    uint64_t mask;
+} MTRRVar;
+
+#define CPU_NB_REGS64 16
+#define CPU_NB_REGS32 8
+
+#ifdef TARGET_X86_64
+#define CPU_NB_REGS CPU_NB_REGS64
+#else
+#define CPU_NB_REGS CPU_NB_REGS32
+#endif
+
+#define MAX_FIXED_COUNTERS 3
+#define MAX_GP_COUNTERS    (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
+
+#define NB_MMU_MODES 3
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+#define NB_OPMASK_REGS 8
+
+/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
+ * that APIC ID hasn't been set yet
+ */
+#define UNASSIGNED_APIC_ID 0xFFFFFFFF
+
+typedef union X86LegacyXSaveArea {
+    struct {
+        uint16_t fcw;
+        uint16_t fsw;
+        uint8_t ftw;
+        uint8_t reserved;
+        uint16_t fpop;
+        uint64_t fpip;
+        uint64_t fpdp;
+        uint32_t mxcsr;
+        uint32_t mxcsr_mask;
+        FPReg fpregs[8];
+        uint8_t xmm_regs[16][16];
+    };
+    uint8_t data[512];
+} X86LegacyXSaveArea;
+
+typedef struct X86XSaveHeader {
+    uint64_t xstate_bv;
+    uint64_t xcomp_bv;
+    uint64_t reserve0;
+    uint8_t reserved[40];
+} X86XSaveHeader;
+
+/* Ext. save area 2: AVX State */
+typedef struct XSaveAVX {
+    uint8_t ymmh[16][16];
+} XSaveAVX;
+
+/* Ext. save area 3: BNDREG */
+typedef struct XSaveBNDREG {
+    BNDReg bnd_regs[4];
+} XSaveBNDREG;
+
+/* Ext. save area 4: BNDCSR */
+typedef union XSaveBNDCSR {
+    BNDCSReg bndcsr;
+    uint8_t data[64];
+} XSaveBNDCSR;
+
+/* Ext. save area 5: Opmask */
+typedef struct XSaveOpmask {
+    uint64_t opmask_regs[NB_OPMASK_REGS];
+} XSaveOpmask;
+
+/* Ext. save area 6: ZMM_Hi256 */
+typedef struct XSaveZMM_Hi256 {
+    uint8_t zmm_hi256[16][32];
+} XSaveZMM_Hi256;
+
+/* Ext. save area 7: Hi16_ZMM */
+typedef struct XSaveHi16_ZMM {
+    uint8_t hi16_zmm[16][64];
+} XSaveHi16_ZMM;
+
+/* Ext. save area 9: PKRU state */
+typedef struct XSavePKRU {
+    uint32_t pkru;
+    uint32_t padding;
+} XSavePKRU;
+
+typedef struct X86XSaveArea {
+    X86LegacyXSaveArea legacy;
+    X86XSaveHeader header;
+
+    /* Extended save areas: */
+
+    /* AVX State: */
+    XSaveAVX avx_state;
+    uint8_t padding[960 - 576 - sizeof(XSaveAVX)];
+    /* MPX State: */
+    XSaveBNDREG bndreg_state;
+    XSaveBNDCSR bndcsr_state;
+    /* AVX-512 State: */
+    XSaveOpmask opmask_state;
+    XSaveZMM_Hi256 zmm_hi256_state;
+    XSaveHi16_ZMM hi16_zmm_state;
+    /* PKRU State: */
+    XSavePKRU pkru_state;
+} X86XSaveArea;
+
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240);
+QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0);
+QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400);
+QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440);
+QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480);
+QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680);
+QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80);
+QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
+
+typedef enum TPRAccess {
+    TPR_ACCESS_READ,
+    TPR_ACCESS_WRITE,
+} TPRAccess;
+
+typedef struct CPUX86State {
+    /* standard registers */
+    target_ulong regs[CPU_NB_REGS];
+    target_ulong eip;
+    target_ulong eflags; /* eflags register. During CPU emulation, CC
+                        flags and DF are set to zero because they are
+                        stored elsewhere */
+
+    /* emulator internal eflags handling */
+    target_ulong cc_dst;
+    target_ulong cc_src;
+    target_ulong cc_src2;
+    uint32_t cc_op;
+    int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
+    uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
+                        are known at translation time. */
+    uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
+
+    /* segments */
+    SegmentCache segs[6]; /* selector values */
+    SegmentCache ldt;
+    SegmentCache tr;
+    SegmentCache gdt; /* only base and limit are used */
+    SegmentCache idt; /* only base and limit are used */
+
+    target_ulong cr[5]; /* NOTE: cr1 is unused */
+    int32_t a20_mask;
+
+    BNDReg bnd_regs[4];
+    BNDCSReg bndcs_regs;
+    uint64_t msr_bndcfgs;
+    uint64_t efer;
+
+    /* Beginning of state preserved by INIT (dummy marker).  */
+    struct {} start_init_save;
+
+    /* FPU state */
+    unsigned int fpstt; /* top of stack index */
+    uint16_t fpus;
+    uint16_t fpuc;
+    uint8_t fptags[8];   /* 0 = valid, 1 = empty */
+    FPReg fpregs[8];
+    /* KVM-only so far */
+    uint16_t fpop;
+    uint64_t fpip;
+    uint64_t fpdp;
+
+    /* emulator internal variables */
+    float_status fp_status;
+    floatx80 ft0;
+
+    float_status mmx_status; /* for 3DNow! float ops */
+    float_status sse_status;
+    uint32_t mxcsr;
+    ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
+    ZMMReg xmm_t0;
+    MMXReg mmx_t0;
+
+    uint64_t opmask_regs[NB_OPMASK_REGS];
+
+    /* sysenter registers */
+    uint32_t sysenter_cs;
+    target_ulong sysenter_esp;
+    target_ulong sysenter_eip;
+    uint64_t star;
+
+    uint64_t vm_hsave;
+
+#ifdef TARGET_X86_64
+    target_ulong lstar;
+    target_ulong cstar;
+    target_ulong fmask;
+    target_ulong kernelgsbase;
+#endif
+
+    uint64_t tsc;
+    uint64_t tsc_adjust;
+    uint64_t tsc_deadline;
+    uint64_t tsc_aux;
+
+    uint64_t xcr0;
+
+    uint64_t mcg_status;
+    uint64_t msr_ia32_misc_enable;
+    uint64_t msr_ia32_feature_control;
+
+    uint64_t msr_fixed_ctr_ctrl;
+    uint64_t msr_global_ctrl;
+    uint64_t msr_global_status;
+    uint64_t msr_global_ovf_ctrl;
+    uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
+    uint64_t msr_gp_counters[MAX_GP_COUNTERS];
+    uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
+
+    uint64_t pat;
+    uint32_t smbase;
+
+    uint32_t pkru;
+
+    /* End of state preserved by INIT (dummy marker).  */
+    struct {} end_init_save;
+
+    uint64_t system_time_msr;
+    uint64_t wall_clock_msr;
+    uint64_t steal_time_msr;
+    uint64_t async_pf_en_msr;
+    uint64_t pv_eoi_en_msr;
+
+    uint64_t msr_hv_hypercall;
+    uint64_t msr_hv_guest_os_id;
+    uint64_t msr_hv_vapic;
+    uint64_t msr_hv_tsc;
+    uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
+    uint64_t msr_hv_runtime;
+    uint64_t msr_hv_synic_control;
+    uint64_t msr_hv_synic_version;
+    uint64_t msr_hv_synic_evt_page;
+    uint64_t msr_hv_synic_msg_page;
+    uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];
+    uint64_t msr_hv_stimer_config[HV_SYNIC_STIMER_COUNT];
+    uint64_t msr_hv_stimer_count[HV_SYNIC_STIMER_COUNT];
+
+    /* exception/interrupt handling */
+    int error_code;
+    int exception_is_int;
+    target_ulong exception_next_eip;
+    target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
+    union {
+        struct CPUBreakpoint *cpu_breakpoint[4];
+        struct CPUWatchpoint *cpu_watchpoint[4];
+    }; /* break/watchpoints for dr[0..3] */
+    int old_exception;  /* exception in flight */
+
+    uint64_t vm_vmcb;
+    uint64_t tsc_offset;
+    uint64_t intercept;
+    uint16_t intercept_cr_read;
+    uint16_t intercept_cr_write;
+    uint16_t intercept_dr_read;
+    uint16_t intercept_dr_write;
+    uint32_t intercept_exceptions;
+    uint8_t v_tpr;
+
+    /* KVM states, automatically cleared on reset */
+    uint8_t nmi_injected;
+    uint8_t nmi_pending;
+
+    CPU_COMMON
+
+    /* Fields from here on are preserved across CPU reset. */
+    struct {} end_reset_fields;
+
+    /* processor features (e.g. for CPUID insn) */
+    /* Minimum level/xlevel/xlevel2, based on CPU model + features */
+    uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
+    /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
+    uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
+    /* Actual level/xlevel/xlevel2 value: */
+    uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
+    uint32_t cpuid_vendor1;
+    uint32_t cpuid_vendor2;
+    uint32_t cpuid_vendor3;
+    uint32_t cpuid_version;
+    FeatureWordArray features;
+    uint32_t cpuid_model[12];
+
+    /* MTRRs */
+    uint64_t mtrr_fixed[11];
+    uint64_t mtrr_deftype;
+    MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
+
+    /* For KVM */
+    uint32_t mp_state;
+    int32_t exception_injected;
+    int32_t interrupt_injected;
+    uint8_t soft_interrupt;
+    uint8_t has_error_code;
+    uint32_t sipi_vector;
+    bool tsc_valid;
+    int64_t tsc_khz;
+    int64_t user_tsc_khz; /* for sanity check only */
+    void *kvm_xsave_buf;
+
+    uint64_t mcg_cap;
+    uint64_t mcg_ctl;
+    uint64_t mcg_ext_ctl;
+    uint64_t mce_banks[MCE_BANKS_DEF*4];
+    uint64_t xstate_bv;
+
+    /* vmstate */
+    uint16_t fpus_vmstate;
+    uint16_t fptag_vmstate;
+    uint16_t fpregs_format_vmstate;
+
+    uint64_t xss;
+
+    TPRAccess tpr_access_type;
+} CPUX86State;
+
+struct kvm_msrs;
+
+/**
+ * X86CPU:
+ * @env: #CPUX86State
+ * @migratable: If set, only migratable flags will be accepted when "enforce"
+ * mode is used, and only migratable flags will be included in the "host"
+ * CPU model.
+ *
+ * An x86 CPU.
+ */
+struct X86CPU {
+    /*< private >*/
+    CPUState parent_obj;
+    /*< public >*/
+
+    CPUX86State env;
+
+    bool hyperv_vapic;
+    bool hyperv_relaxed_timing;
+    int hyperv_spinlock_attempts;
+    char *hyperv_vendor_id;
+    bool hyperv_time;
+    bool hyperv_crash;
+    bool hyperv_reset;
+    bool hyperv_vpindex;
+    bool hyperv_runtime;
+    bool hyperv_synic;
+    bool hyperv_stimer;
+    bool check_cpuid;
+    bool enforce_cpuid;
+    bool expose_kvm;
+    bool migratable;
+    bool host_features;
+    uint32_t apic_id;
+
+    /* if true the CPUID code directly forward host cache leaves to the guest */
+    bool cache_info_passthrough;
+
+    /* Features that were filtered out because of missing host capabilities */
+    uint32_t filtered_features[FEATURE_WORDS];
+
+    /* Enable PMU CPUID bits. This can't be enabled by default yet because
+     * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
+     * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
+     * capabilities) directly to the guest.
+     */
+    bool enable_pmu;
+
+    /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
+     * disabled by default to avoid breaking migration between QEMU with
+     * different LMCE configurations.
+     */
+    bool enable_lmce;
+
+    /* Compatibility bits for old machine types.
+     * If true present virtual l3 cache for VM, the vcpus in the same virtual
+     * socket share an virtual l3 cache.
+     */
+    bool enable_l3_cache;
+
+    /* Compatibility bits for old machine types: */
+    bool enable_cpuid_0xb;
+
+    /* Enable auto level-increase for all CPUID leaves */
+    bool full_cpuid_auto_level;
+
+    /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
+    bool fill_mtrr_mask;
+
+    /* if true override the phys_bits value with a value read from the host */
+    bool host_phys_bits;
+
+    /* Number of physical address bits supported */
+    uint32_t phys_bits;
+
+    /* in order to simplify APIC support, we leave this pointer to the
+       user */
+    struct DeviceState *apic_state;
+    struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
+    Notifier machine_done;
+
+    struct kvm_msrs *kvm_msr_buf;
+
+    int32_t socket_id;
+    int32_t core_id;
+    int32_t thread_id;
+};
+
+static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
+{
+    return container_of(env, X86CPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(X86CPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern struct VMStateDescription vmstate_x86_cpu;
+#endif
+
+/**
+ * x86_cpu_do_interrupt:
+ * @cpu: vCPU the interrupt is to be handled by.
+ */
+void x86_cpu_do_interrupt(CPUState *cpu);
+bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
+                             int cpuid, void *opaque);
+int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
+                             int cpuid, void *opaque);
+int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
+                                 void *opaque);
+int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
+                                 void *opaque);
+
+void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+                                Error **errp);
+
+void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+                        int flags);
+
+hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+
+int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void x86_cpu_exec_enter(CPUState *cpu);
+void x86_cpu_exec_exit(CPUState *cpu);
+
+X86CPU *cpu_x86_init(const char *cpu_model);
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+int cpu_x86_support_mca_broadcast(CPUX86State *env);
+
+int cpu_get_pic_interrupt(CPUX86State *s);
+/* MSDOS compatibility mode FPU exception support */
+void cpu_set_ferr(CPUX86State *s);
+
+/* this function must always be used to load data in the segment
+   cache: it synchronizes the hflags with the segment cache values */
+static inline void cpu_x86_load_seg_cache(CPUX86State *env,
+                                          int seg_reg, unsigned int selector,
+                                          target_ulong base,
+                                          unsigned int limit,
+                                          unsigned int flags)
+{
+    SegmentCache *sc;
+    unsigned int new_hflags;
+
+    sc = &env->segs[seg_reg];
+    sc->selector = selector;
+    sc->base = base;
+    sc->limit = limit;
+    sc->flags = flags;
+
+    /* update the hidden flags */
+    {
+        if (seg_reg == R_CS) {
+#ifdef TARGET_X86_64
+            if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
+                /* long mode */
+                env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+                env->hflags &= ~(HF_ADDSEG_MASK);
+            } else
+#endif
+            {
+                /* legacy / compatibility case */
+                new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
+                    >> (DESC_B_SHIFT - HF_CS32_SHIFT);
+                env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
+                    new_hflags;
+            }
+        }
+        if (seg_reg == R_SS) {
+            int cpl = (flags >> DESC_DPL_SHIFT) & 3;
+#if HF_CPL_MASK != 3
+#error HF_CPL_MASK is hardcoded
+#endif
+            env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
+        }
+        new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
+            >> (DESC_B_SHIFT - HF_SS32_SHIFT);
+        if (env->hflags & HF_CS64_MASK) {
+            /* zero base assumed for DS, ES and SS in long mode */
+        } else if (!(env->cr[0] & CR0_PE_MASK) ||
+                   (env->eflags & VM_MASK) ||
+                   !(env->hflags & HF_CS32_MASK)) {
+            /* XXX: try to avoid this test. The problem comes from the
+               fact that is real mode or vm86 mode we only modify the
+               'base' and 'selector' fields of the segment cache to go
+               faster. A solution may be to force addseg to one in
+               translate-i386.c. */
+            new_hflags |= HF_ADDSEG_MASK;
+        } else {
+            new_hflags |= ((env->segs[R_DS].base |
+                            env->segs[R_ES].base |
+                            env->segs[R_SS].base) != 0) <<
+                HF_ADDSEG_SHIFT;
+        }
+        env->hflags = (env->hflags &
+                       ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
+    }
+}
+
+static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
+                                               uint8_t sipi_vector)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+
+    env->eip = 0;
+    cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
+                           sipi_vector << 12,
+                           env->segs[R_CS].limit,
+                           env->segs[R_CS].flags);
+    cs->halted = 0;
+}
+
+int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
+                            target_ulong *base, unsigned int *limit,
+                            unsigned int *flags);
+
+/* op_helper.c */
+/* used for debug or cpu save/restore */
+void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
+floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper);
+
+/* cpu-exec.c */
+/* the following helpers are only usable in user mode simulation as
+   they can trigger unexpected exceptions */
+void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
+void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
+void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
+
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+   signal handlers to inform the virtual CPU of exceptions. non zero
+   is returned if the signal was handled by the virtual CPU.  */
+int cpu_x86_signal_handler(int host_signum, void *pinfo,
+                           void *puc);
+
+/* cpu.c */
+void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
+                   uint32_t *eax, uint32_t *ebx,
+                   uint32_t *ecx, uint32_t *edx);
+void cpu_clear_apic_feature(CPUX86State *env);
+void host_cpuid(uint32_t function, uint32_t count,
+                uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
+
+/* helper.c */
+int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
+                             int is_write, int mmu_idx);
+void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
+
+#ifndef CONFIG_USER_ONLY
+uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
+uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
+uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
+uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
+void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
+void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
+#endif
+
+void breakpoint_handler(CPUState *cs);
+
+/* will be suppressed */
+void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
+void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
+void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
+
+/* hw/pc.c */
+uint64_t cpu_get_tsc(CPUX86State *env);
+
+#define TARGET_PAGE_BITS 12
+
+#ifdef TARGET_X86_64
+#define TARGET_PHYS_ADDR_SPACE_BITS 52
+/* ??? This is really 48 bits, sign-extended, but the only thing
+   accessible to userland with bit 48 set is the VSYSCALL, and that
+   is handled via other mechanisms.  */
+#define TARGET_VIRT_ADDR_SPACE_BITS 47
+#else
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+
+/* XXX: This value should match the one returned by CPUID
+ * and in exec.c */
+# if defined(TARGET_X86_64)
+# define TCG_PHYS_ADDR_BITS 40
+# else
+# define TCG_PHYS_ADDR_BITS 36
+# endif
+
+#define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS)
+
+#define cpu_init(cpu_model) CPU(cpu_x86_init(cpu_model))
+
+#define cpu_signal_handler cpu_x86_signal_handler
+#define cpu_list x86_cpu_list
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _ksmap
+#define MMU_MODE1_SUFFIX _user
+#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
+#define MMU_KSMAP_IDX   0
+#define MMU_USER_IDX    1
+#define MMU_KNOSMAP_IDX 2
+static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
+{
+    return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
+        (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
+        ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+}
+
+static inline int cpu_mmu_index_kernel(CPUX86State *env)
+{
+    return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
+        ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
+        ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+}
+
+#define CC_DST  (env->cc_dst)
+#define CC_SRC  (env->cc_src)
+#define CC_SRC2 (env->cc_src2)
+#define CC_OP   (env->cc_op)
+
+/* n must be a constant to be efficient */
+static inline target_long lshift(target_long x, int n)
+{
+    if (n >= 0) {
+        return x << n;
+    } else {
+        return x >> (-n);
+    }
+}
+
+/* float macros */
+#define FT0    (env->ft0)
+#define ST0    (env->fpregs[env->fpstt].d)
+#define ST(n)  (env->fpregs[(env->fpstt + (n)) & 7].d)
+#define ST1    ST(1)
+
+/* translate.c */
+void tcg_x86_init(void);
+
+#include "exec/cpu-all.h"
+#include "svm.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/i386/apic.h"
+#endif
+
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
+                                        target_ulong *cs_base, uint32_t *flags)
+{
+    *cs_base = env->segs[R_CS].base;
+    *pc = *cs_base + env->eip;
+    *flags = env->hflags |
+        (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
+}
+
+void do_cpu_init(X86CPU *cpu);
+void do_cpu_sipi(X86CPU *cpu);
+
+#define MCE_INJECT_BROADCAST    1
+#define MCE_INJECT_UNCOND_AO    2
+
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
+                        uint64_t status, uint64_t mcg_status, uint64_t addr,
+                        uint64_t misc, int flags);
+
+/* excp_helper.c */
+void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
+void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
+                                      uintptr_t retaddr);
+void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
+                                       int error_code);
+void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
+                                          int error_code, uintptr_t retaddr);
+void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
+                                   int error_code, int next_eip_addend);
+
+/* cc_helper.c */
+extern const uint8_t parity_table[256];
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
+void update_fp_status(CPUX86State *env);
+
+static inline uint32_t cpu_compute_eflags(CPUX86State *env)
+{
+    return env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
+}
+
+/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
+ * after generating a call to a helper that uses this.
+ */
+static inline void cpu_load_eflags(CPUX86State *env, int eflags,
+                                   int update_mask)
+{
+    CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+    CC_OP = CC_OP_EFLAGS;
+    env->df = 1 - (2 * ((eflags >> 10) & 1));
+    env->eflags = (env->eflags & ~update_mask) |
+        (eflags & update_mask) | 0x2;
+}
+
+/* load efer and update the corresponding hflags. XXX: do consistency
+   checks with cpuid bits? */
+static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
+{
+    env->efer = val;
+    env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
+    if (env->efer & MSR_EFER_LMA) {
+        env->hflags |= HF_LMA_MASK;
+    }
+    if (env->efer & MSR_EFER_SVME) {
+        env->hflags |= HF_SVME_MASK;
+    }
+}
+
+static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
+{
+    return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
+}
+
+/* fpu_helper.c */
+void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
+void cpu_set_fpuc(CPUX86State *env, uint16_t val);
+
+/* mem_helper.c */
+void helper_lock_init(void);
+
+/* svm_helper.c */
+void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
+                                   uint64_t param);
+void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
+
+/* seg_helper.c */
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
+
+/* smm_helper.c */
+void do_smm_enter(X86CPU *cpu);
+void cpu_smm_update(X86CPU *cpu);
+
+/* apic.c */
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
+void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
+                                   TPRAccess access);
+
+
+/* Change the value of a KVM-specific default
+ *
+ * If value is NULL, no default will be set and the original
+ * value from the CPU model table will be kept.
+ *
+ * It is valid to call this function only for properties that
+ * are already present in the kvm_default_props table.
+ */
+void x86_cpu_change_kvm_default(const char *prop, const char *value);
+
+/* mpx_helper.c */
+void cpu_sync_bndcs_hflags(CPUX86State *env);
+
+/* Return name of 32-bit register, from a R_* constant */
+const char *get_register_name_32(unsigned int reg);
+
+void enable_compat_apic_id_mode(void);
+
+#define APIC_DEFAULT_ADDRESS 0xfee00000
+#define APIC_SPACE_SIZE      0x100000
+
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+                                   fprintf_function cpu_fprintf, int flags);
+
+/* cpu.c */
+bool cpu_is_bsp(X86CPU *cpu);
+
+#endif /* I386_CPU_H */
diff --git a/target/i386/excp_helper.c b/target/i386/excp_helper.c
new file mode 100644
index 0000000000..f0dc4996c1
--- /dev/null
+++ b/target/i386/excp_helper.c
@@ -0,0 +1,137 @@
+/*
+ *  x86 exception helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+#include "exec/helper-proto.h"
+
+void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
+{
+    raise_interrupt(env, intno, 1, 0, next_eip_addend);
+}
+
+void helper_raise_exception(CPUX86State *env, int exception_index)
+{
+    raise_exception(env, exception_index);
+}
+
+/*
+ * Check nested exceptions and change to double or triple fault if
+ * needed. It should only be called, if this is not an interrupt.
+ * Returns the new exception number.
+ */
+static int check_exception(CPUX86State *env, int intno, int *error_code)
+{
+    int first_contributory = env->old_exception == 0 ||
+                              (env->old_exception >= 10 &&
+                               env->old_exception <= 13);
+    int second_contributory = intno == 0 ||
+                               (intno >= 10 && intno <= 13);
+
+    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
+                env->old_exception, intno);
+
+#if !defined(CONFIG_USER_ONLY)
+    if (env->old_exception == EXCP08_DBLE) {
+        if (env->hflags & HF_SVMI_MASK) {
+            cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0); /* does not return */
+        }
+
+        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
+
+        qemu_system_reset_request();
+        return EXCP_HLT;
+    }
+#endif
+
+    if ((first_contributory && second_contributory)
+        || (env->old_exception == EXCP0E_PAGE &&
+            (second_contributory || (intno == EXCP0E_PAGE)))) {
+        intno = EXCP08_DBLE;
+        *error_code = 0;
+    }
+
+    if (second_contributory || (intno == EXCP0E_PAGE) ||
+        (intno == EXCP08_DBLE)) {
+        env->old_exception = intno;
+    }
+
+    return intno;
+}
+
+/*
+ * Signal an interruption. It is executed in the main CPU loop.
+ * is_int is TRUE if coming from the int instruction. next_eip is the
+ * env->eip value AFTER the interrupt instruction. It is only relevant if
+ * is_int is TRUE.
+ */
+static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
+                                           int is_int, int error_code,
+                                           int next_eip_addend,
+                                           uintptr_t retaddr)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    if (!is_int) {
+        cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
+                                      error_code);
+        intno = check_exception(env, intno, &error_code);
+    } else {
+        cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0);
+    }
+
+    cs->exception_index = intno;
+    env->error_code = error_code;
+    env->exception_is_int = is_int;
+    env->exception_next_eip = env->eip + next_eip_addend;
+    cpu_loop_exit_restore(cs, retaddr);
+}
+
+/* shortcuts to generate exceptions */
+
+void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
+                                   int error_code, int next_eip_addend)
+{
+    raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
+}
+
+void raise_exception_err(CPUX86State *env, int exception_index,
+                         int error_code)
+{
+    raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
+}
+
+void raise_exception_err_ra(CPUX86State *env, int exception_index,
+                            int error_code, uintptr_t retaddr)
+{
+    raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
+}
+
+void raise_exception(CPUX86State *env, int exception_index)
+{
+    raise_interrupt2(env, exception_index, 0, 0, 0, 0);
+}
+
+void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
+{
+    raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
+}
diff --git a/target/i386/fpu_helper.c b/target/i386/fpu_helper.c
new file mode 100644
index 0000000000..2049a8c01d
--- /dev/null
+++ b/target/i386/fpu_helper.c
@@ -0,0 +1,1625 @@
+/*
+ *  x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include <math.h>
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#define FPU_RC_MASK         0xc00
+#define FPU_RC_NEAR         0x000
+#define FPU_RC_DOWN         0x400
+#define FPU_RC_UP           0x800
+#define FPU_RC_CHOP         0xc00
+
+#define MAXTAN 9223372036854775808.0
+
+/* the following deal with x86 long double-precision numbers */
+#define MAXEXPD 0x7fff
+#define EXPBIAS 16383
+#define EXPD(fp)        (fp.l.upper & 0x7fff)
+#define SIGND(fp)       ((fp.l.upper) & 0x8000)
+#define MANTD(fp)       (fp.l.lower)
+#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
+
+#define FPUS_IE (1 << 0)
+#define FPUS_DE (1 << 1)
+#define FPUS_ZE (1 << 2)
+#define FPUS_OE (1 << 3)
+#define FPUS_UE (1 << 4)
+#define FPUS_PE (1 << 5)
+#define FPUS_SF (1 << 6)
+#define FPUS_SE (1 << 7)
+#define FPUS_B  (1 << 15)
+
+#define FPUC_EM 0x3f
+
+#define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL)
+#define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL)
+#define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL)
+
+static inline void fpush(CPUX86State *env)
+{
+    env->fpstt = (env->fpstt - 1) & 7;
+    env->fptags[env->fpstt] = 0; /* validate stack entry */
+}
+
+static inline void fpop(CPUX86State *env)
+{
+    env->fptags[env->fpstt] = 1; /* invalidate stack entry */
+    env->fpstt = (env->fpstt + 1) & 7;
+}
+
+static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr,
+                                   uintptr_t retaddr)
+{
+    CPU_LDoubleU temp;
+
+    temp.l.lower = cpu_ldq_data_ra(env, ptr, retaddr);
+    temp.l.upper = cpu_lduw_data_ra(env, ptr + 8, retaddr);
+    return temp.d;
+}
+
+static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr,
+                               uintptr_t retaddr)
+{
+    CPU_LDoubleU temp;
+
+    temp.d = f;
+    cpu_stq_data_ra(env, ptr, temp.l.lower, retaddr);
+    cpu_stw_data_ra(env, ptr + 8, temp.l.upper, retaddr);
+}
+
+/* x87 FPU helpers */
+
+static inline double floatx80_to_double(CPUX86State *env, floatx80 a)
+{
+    union {
+        float64 f64;
+        double d;
+    } u;
+
+    u.f64 = floatx80_to_float64(a, &env->fp_status);
+    return u.d;
+}
+
+static inline floatx80 double_to_floatx80(CPUX86State *env, double a)
+{
+    union {
+        float64 f64;
+        double d;
+    } u;
+
+    u.d = a;
+    return float64_to_floatx80(u.f64, &env->fp_status);
+}
+
+static void fpu_set_exception(CPUX86State *env, int mask)
+{
+    env->fpus |= mask;
+    if (env->fpus & (~env->fpuc & FPUC_EM)) {
+        env->fpus |= FPUS_SE | FPUS_B;
+    }
+}
+
+static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
+{
+    if (floatx80_is_zero(b)) {
+        fpu_set_exception(env, FPUS_ZE);
+    }
+    return floatx80_div(a, b, &env->fp_status);
+}
+
+static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
+{
+    if (env->cr[0] & CR0_NE_MASK) {
+        raise_exception_ra(env, EXCP10_COPR, retaddr);
+    }
+#if !defined(CONFIG_USER_ONLY)
+    else {
+        cpu_set_ferr(env);
+    }
+#endif
+}
+
+void helper_flds_FT0(CPUX86State *env, uint32_t val)
+{
+    union {
+        float32 f;
+        uint32_t i;
+    } u;
+
+    u.i = val;
+    FT0 = float32_to_floatx80(u.f, &env->fp_status);
+}
+
+void helper_fldl_FT0(CPUX86State *env, uint64_t val)
+{
+    union {
+        float64 f;
+        uint64_t i;
+    } u;
+
+    u.i = val;
+    FT0 = float64_to_floatx80(u.f, &env->fp_status);
+}
+
+void helper_fildl_FT0(CPUX86State *env, int32_t val)
+{
+    FT0 = int32_to_floatx80(val, &env->fp_status);
+}
+
+void helper_flds_ST0(CPUX86State *env, uint32_t val)
+{
+    int new_fpstt;
+    union {
+        float32 f;
+        uint32_t i;
+    } u;
+
+    new_fpstt = (env->fpstt - 1) & 7;
+    u.i = val;
+    env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
+    env->fpstt = new_fpstt;
+    env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fldl_ST0(CPUX86State *env, uint64_t val)
+{
+    int new_fpstt;
+    union {
+        float64 f;
+        uint64_t i;
+    } u;
+
+    new_fpstt = (env->fpstt - 1) & 7;
+    u.i = val;
+    env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
+    env->fpstt = new_fpstt;
+    env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildl_ST0(CPUX86State *env, int32_t val)
+{
+    int new_fpstt;
+
+    new_fpstt = (env->fpstt - 1) & 7;
+    env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
+    env->fpstt = new_fpstt;
+    env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildll_ST0(CPUX86State *env, int64_t val)
+{
+    int new_fpstt;
+
+    new_fpstt = (env->fpstt - 1) & 7;
+    env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
+    env->fpstt = new_fpstt;
+    env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+uint32_t helper_fsts_ST0(CPUX86State *env)
+{
+    union {
+        float32 f;
+        uint32_t i;
+    } u;
+
+    u.f = floatx80_to_float32(ST0, &env->fp_status);
+    return u.i;
+}
+
+uint64_t helper_fstl_ST0(CPUX86State *env)
+{
+    union {
+        float64 f;
+        uint64_t i;
+    } u;
+
+    u.f = floatx80_to_float64(ST0, &env->fp_status);
+    return u.i;
+}
+
+int32_t helper_fist_ST0(CPUX86State *env)
+{
+    int32_t val;
+
+    val = floatx80_to_int32(ST0, &env->fp_status);
+    if (val != (int16_t)val) {
+        val = -32768;
+    }
+    return val;
+}
+
+int32_t helper_fistl_ST0(CPUX86State *env)
+{
+    int32_t val;
+    signed char old_exp_flags;
+
+    old_exp_flags = get_float_exception_flags(&env->fp_status);
+    set_float_exception_flags(0, &env->fp_status);
+
+    val = floatx80_to_int32(ST0, &env->fp_status);
+    if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+        val = 0x80000000;
+    }
+    set_float_exception_flags(get_float_exception_flags(&env->fp_status)
+                                | old_exp_flags, &env->fp_status);
+    return val;
+}
+
+int64_t helper_fistll_ST0(CPUX86State *env)
+{
+    int64_t val;
+    signed char old_exp_flags;
+
+    old_exp_flags = get_float_exception_flags(&env->fp_status);
+    set_float_exception_flags(0, &env->fp_status);
+
+    val = floatx80_to_int64(ST0, &env->fp_status);
+    if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+        val = 0x8000000000000000ULL;
+    }
+    set_float_exception_flags(get_float_exception_flags(&env->fp_status)
+                                | old_exp_flags, &env->fp_status);
+    return val;
+}
+
+int32_t helper_fistt_ST0(CPUX86State *env)
+{
+    int32_t val;
+
+    val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+    if (val != (int16_t)val) {
+        val = -32768;
+    }
+    return val;
+}
+
+int32_t helper_fisttl_ST0(CPUX86State *env)
+{
+    return floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+}
+
+int64_t helper_fisttll_ST0(CPUX86State *env)
+{
+    return floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
+}
+
+void helper_fldt_ST0(CPUX86State *env, target_ulong ptr)
+{
+    int new_fpstt;
+
+    new_fpstt = (env->fpstt - 1) & 7;
+    env->fpregs[new_fpstt].d = helper_fldt(env, ptr, GETPC());
+    env->fpstt = new_fpstt;
+    env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fstt_ST0(CPUX86State *env, target_ulong ptr)
+{
+    helper_fstt(env, ST0, ptr, GETPC());
+}
+
+void helper_fpush(CPUX86State *env)
+{
+    fpush(env);
+}
+
+void helper_fpop(CPUX86State *env)
+{
+    fpop(env);
+}
+
+void helper_fdecstp(CPUX86State *env)
+{
+    env->fpstt = (env->fpstt - 1) & 7;
+    env->fpus &= ~0x4700;
+}
+
+void helper_fincstp(CPUX86State *env)
+{
+    env->fpstt = (env->fpstt + 1) & 7;
+    env->fpus &= ~0x4700;
+}
+
+/* FPU move */
+
+void helper_ffree_STN(CPUX86State *env, int st_index)
+{
+    env->fptags[(env->fpstt + st_index) & 7] = 1;
+}
+
+void helper_fmov_ST0_FT0(CPUX86State *env)
+{
+    ST0 = FT0;
+}
+
+void helper_fmov_FT0_STN(CPUX86State *env, int st_index)
+{
+    FT0 = ST(st_index);
+}
+
+void helper_fmov_ST0_STN(CPUX86State *env, int st_index)
+{
+    ST0 = ST(st_index);
+}
+
+void helper_fmov_STN_ST0(CPUX86State *env, int st_index)
+{
+    ST(st_index) = ST0;
+}
+
+void helper_fxchg_ST0_STN(CPUX86State *env, int st_index)
+{
+    floatx80 tmp;
+
+    tmp = ST(st_index);
+    ST(st_index) = ST0;
+    ST0 = tmp;
+}
+
+/* FPU operations */
+
+static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
+
+void helper_fcom_ST0_FT0(CPUX86State *env)
+{
+    int ret;
+
+    ret = floatx80_compare(ST0, FT0, &env->fp_status);
+    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+}
+
+void helper_fucom_ST0_FT0(CPUX86State *env)
+{
+    int ret;
+
+    ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
+    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+}
+
+static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
+
+void helper_fcomi_ST0_FT0(CPUX86State *env)
+{
+    int eflags;
+    int ret;
+
+    ret = floatx80_compare(ST0, FT0, &env->fp_status);
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
+    CC_SRC = eflags;
+}
+
+void helper_fucomi_ST0_FT0(CPUX86State *env)
+{
+    int eflags;
+    int ret;
+
+    ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
+    CC_SRC = eflags;
+}
+
+void helper_fadd_ST0_FT0(CPUX86State *env)
+{
+    ST0 = floatx80_add(ST0, FT0, &env->fp_status);
+}
+
+void helper_fmul_ST0_FT0(CPUX86State *env)
+{
+    ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
+}
+
+void helper_fsub_ST0_FT0(CPUX86State *env)
+{
+    ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
+}
+
+void helper_fsubr_ST0_FT0(CPUX86State *env)
+{
+    ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
+}
+
+void helper_fdiv_ST0_FT0(CPUX86State *env)
+{
+    ST0 = helper_fdiv(env, ST0, FT0);
+}
+
+void helper_fdivr_ST0_FT0(CPUX86State *env)
+{
+    ST0 = helper_fdiv(env, FT0, ST0);
+}
+
+/* fp operations between STN and ST0 */
+
+void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
+{
+    ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
+{
+    ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
+{
+    ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
+{
+    ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
+}
+
+void helper_fdiv_STN_ST0(CPUX86State *env, int st_index)
+{
+    floatx80 *p;
+
+    p = &ST(st_index);
+    *p = helper_fdiv(env, *p, ST0);
+}
+
+void helper_fdivr_STN_ST0(CPUX86State *env, int st_index)
+{
+    floatx80 *p;
+
+    p = &ST(st_index);
+    *p = helper_fdiv(env, ST0, *p);
+}
+
+/* misc FPU operations */
+void helper_fchs_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_chs(ST0);
+}
+
+void helper_fabs_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_abs(ST0);
+}
+
+void helper_fld1_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_one;
+}
+
+void helper_fldl2t_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_l2t;
+}
+
+void helper_fldl2e_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_l2e;
+}
+
+void helper_fldpi_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_pi;
+}
+
+void helper_fldlg2_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_lg2;
+}
+
+void helper_fldln2_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_ln2;
+}
+
+void helper_fldz_ST0(CPUX86State *env)
+{
+    ST0 = floatx80_zero;
+}
+
+void helper_fldz_FT0(CPUX86State *env)
+{
+    FT0 = floatx80_zero;
+}
+
+uint32_t helper_fnstsw(CPUX86State *env)
+{
+    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+}
+
+uint32_t helper_fnstcw(CPUX86State *env)
+{
+    return env->fpuc;
+}
+
+void update_fp_status(CPUX86State *env)
+{
+    int rnd_type;
+
+    /* set rounding mode */
+    switch (env->fpuc & FPU_RC_MASK) {
+    default:
+    case FPU_RC_NEAR:
+        rnd_type = float_round_nearest_even;
+        break;
+    case FPU_RC_DOWN:
+        rnd_type = float_round_down;
+        break;
+    case FPU_RC_UP:
+        rnd_type = float_round_up;
+        break;
+    case FPU_RC_CHOP:
+        rnd_type = float_round_to_zero;
+        break;
+    }
+    set_float_rounding_mode(rnd_type, &env->fp_status);
+    switch ((env->fpuc >> 8) & 3) {
+    case 0:
+        rnd_type = 32;
+        break;
+    case 2:
+        rnd_type = 64;
+        break;
+    case 3:
+    default:
+        rnd_type = 80;
+        break;
+    }
+    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
+}
+
+void helper_fldcw(CPUX86State *env, uint32_t val)
+{
+    cpu_set_fpuc(env, val);
+}
+
+void helper_fclex(CPUX86State *env)
+{
+    env->fpus &= 0x7f00;
+}
+
+void helper_fwait(CPUX86State *env)
+{
+    if (env->fpus & FPUS_SE) {
+        fpu_raise_exception(env, GETPC());
+    }
+}
+
+void helper_fninit(CPUX86State *env)
+{
+    env->fpus = 0;
+    env->fpstt = 0;
+    cpu_set_fpuc(env, 0x37f);
+    env->fptags[0] = 1;
+    env->fptags[1] = 1;
+    env->fptags[2] = 1;
+    env->fptags[3] = 1;
+    env->fptags[4] = 1;
+    env->fptags[5] = 1;
+    env->fptags[6] = 1;
+    env->fptags[7] = 1;
+}
+
+/* BCD ops */
+
+void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
+{
+    floatx80 tmp;
+    uint64_t val;
+    unsigned int v;
+    int i;
+
+    val = 0;
+    for (i = 8; i >= 0; i--) {
+        v = cpu_ldub_data_ra(env, ptr + i, GETPC());
+        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
+    }
+    tmp = int64_to_floatx80(val, &env->fp_status);
+    if (cpu_ldub_data_ra(env, ptr + 9, GETPC()) & 0x80) {
+        tmp = floatx80_chs(tmp);
+    }
+    fpush(env);
+    ST0 = tmp;
+}
+
+void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
+{
+    int v;
+    target_ulong mem_ref, mem_end;
+    int64_t val;
+
+    val = floatx80_to_int64(ST0, &env->fp_status);
+    mem_ref = ptr;
+    mem_end = mem_ref + 9;
+    if (val < 0) {
+        cpu_stb_data_ra(env, mem_end, 0x80, GETPC());
+        val = -val;
+    } else {
+        cpu_stb_data_ra(env, mem_end, 0x00, GETPC());
+    }
+    while (mem_ref < mem_end) {
+        if (val == 0) {
+            break;
+        }
+        v = val % 100;
+        val = val / 100;
+        v = ((v / 10) << 4) | (v % 10);
+        cpu_stb_data_ra(env, mem_ref++, v, GETPC());
+    }
+    while (mem_ref < mem_end) {
+        cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
+    }
+}
+
+void helper_f2xm1(CPUX86State *env)
+{
+    double val = floatx80_to_double(env, ST0);
+
+    val = pow(2.0, val) - 1.0;
+    ST0 = double_to_floatx80(env, val);
+}
+
+void helper_fyl2x(CPUX86State *env)
+{
+    double fptemp = floatx80_to_double(env, ST0);
+
+    if (fptemp > 0.0) {
+        fptemp = log(fptemp) / log(2.0); /* log2(ST) */
+        fptemp *= floatx80_to_double(env, ST1);
+        ST1 = double_to_floatx80(env, fptemp);
+        fpop(env);
+    } else {
+        env->fpus &= ~0x4700;
+        env->fpus |= 0x400;
+    }
+}
+
+void helper_fptan(CPUX86State *env)
+{
+    double fptemp = floatx80_to_double(env, ST0);
+
+    if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+        env->fpus |= 0x400;
+    } else {
+        fptemp = tan(fptemp);
+        ST0 = double_to_floatx80(env, fptemp);
+        fpush(env);
+        ST0 = floatx80_one;
+        env->fpus &= ~0x400; /* C2 <-- 0 */
+        /* the above code is for |arg| < 2**52 only */
+    }
+}
+
+void helper_fpatan(CPUX86State *env)
+{
+    double fptemp, fpsrcop;
+
+    fpsrcop = floatx80_to_double(env, ST1);
+    fptemp = floatx80_to_double(env, ST0);
+    ST1 = double_to_floatx80(env, atan2(fpsrcop, fptemp));
+    fpop(env);
+}
+
+void helper_fxtract(CPUX86State *env)
+{
+    CPU_LDoubleU temp;
+
+    temp.d = ST0;
+
+    if (floatx80_is_zero(ST0)) {
+        /* Easy way to generate -inf and raising division by 0 exception */
+        ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero,
+                           &env->fp_status);
+        fpush(env);
+        ST0 = temp.d;
+    } else {
+        int expdif;
+
+        expdif = EXPD(temp) - EXPBIAS;
+        /* DP exponent bias */
+        ST0 = int32_to_floatx80(expdif, &env->fp_status);
+        fpush(env);
+        BIASEXPONENT(temp);
+        ST0 = temp.d;
+    }
+}
+
+void helper_fprem1(CPUX86State *env)
+{
+    double st0, st1, dblq, fpsrcop, fptemp;
+    CPU_LDoubleU fpsrcop1, fptemp1;
+    int expdif;
+    signed long long int q;
+
+    st0 = floatx80_to_double(env, ST0);
+    st1 = floatx80_to_double(env, ST1);
+
+    if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
+        ST0 = double_to_floatx80(env, 0.0 / 0.0); /* NaN */
+        env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+        return;
+    }
+
+    fpsrcop = st0;
+    fptemp = st1;
+    fpsrcop1.d = ST0;
+    fptemp1.d = ST1;
+    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
+
+    if (expdif < 0) {
+        /* optimisation? taken from the AMD docs */
+        env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+        /* ST0 is unchanged */
+        return;
+    }
+
+    if (expdif < 53) {
+        dblq = fpsrcop / fptemp;
+        /* round dblq towards nearest integer */
+        dblq = rint(dblq);
+        st0 = fpsrcop - fptemp * dblq;
+
+        /* convert dblq to q by truncating towards zero */
+        if (dblq < 0.0) {
+            q = (signed long long int)(-dblq);
+        } else {
+            q = (signed long long int)dblq;
+        }
+
+        env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+        /* (C0,C3,C1) <-- (q2,q1,q0) */
+        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
+        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
+        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
+    } else {
+        env->fpus |= 0x400;  /* C2 <-- 1 */
+        fptemp = pow(2.0, expdif - 50);
+        fpsrcop = (st0 / st1) / fptemp;
+        /* fpsrcop = integer obtained by chopping */
+        fpsrcop = (fpsrcop < 0.0) ?
+                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
+        st0 -= (st1 * fpsrcop * fptemp);
+    }
+    ST0 = double_to_floatx80(env, st0);
+}
+
+void helper_fprem(CPUX86State *env)
+{
+    double st0, st1, dblq, fpsrcop, fptemp;
+    CPU_LDoubleU fpsrcop1, fptemp1;
+    int expdif;
+    signed long long int q;
+
+    st0 = floatx80_to_double(env, ST0);
+    st1 = floatx80_to_double(env, ST1);
+
+    if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
+        ST0 = double_to_floatx80(env, 0.0 / 0.0); /* NaN */
+        env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+        return;
+    }
+
+    fpsrcop = st0;
+    fptemp = st1;
+    fpsrcop1.d = ST0;
+    fptemp1.d = ST1;
+    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
+
+    if (expdif < 0) {
+        /* optimisation? taken from the AMD docs */
+        env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+        /* ST0 is unchanged */
+        return;
+    }
+
+    if (expdif < 53) {
+        dblq = fpsrcop / fptemp; /* ST0 / ST1 */
+        /* round dblq towards zero */
+        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
+        st0 = fpsrcop - fptemp * dblq; /* fpsrcop is ST0 */
+
+        /* convert dblq to q by truncating towards zero */
+        if (dblq < 0.0) {
+            q = (signed long long int)(-dblq);
+        } else {
+            q = (signed long long int)dblq;
+        }
+
+        env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+        /* (C0,C3,C1) <-- (q2,q1,q0) */
+        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
+        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
+        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
+    } else {
+        int N = 32 + (expdif % 32); /* as per AMD docs */
+
+        env->fpus |= 0x400;  /* C2 <-- 1 */
+        fptemp = pow(2.0, (double)(expdif - N));
+        fpsrcop = (st0 / st1) / fptemp;
+        /* fpsrcop = integer obtained by chopping */
+        fpsrcop = (fpsrcop < 0.0) ?
+                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
+        st0 -= (st1 * fpsrcop * fptemp);
+    }
+    ST0 = double_to_floatx80(env, st0);
+}
+
+void helper_fyl2xp1(CPUX86State *env)
+{
+    double fptemp = floatx80_to_double(env, ST0);
+
+    if ((fptemp + 1.0) > 0.0) {
+        fptemp = log(fptemp + 1.0) / log(2.0); /* log2(ST + 1.0) */
+        fptemp *= floatx80_to_double(env, ST1);
+        ST1 = double_to_floatx80(env, fptemp);
+        fpop(env);
+    } else {
+        env->fpus &= ~0x4700;
+        env->fpus |= 0x400;
+    }
+}
+
+void helper_fsqrt(CPUX86State *env)
+{
+    if (floatx80_is_neg(ST0)) {
+        env->fpus &= ~0x4700;  /* (C3,C2,C1,C0) <-- 0000 */
+        env->fpus |= 0x400;
+    }
+    ST0 = floatx80_sqrt(ST0, &env->fp_status);
+}
+
+void helper_fsincos(CPUX86State *env)
+{
+    double fptemp = floatx80_to_double(env, ST0);
+
+    if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+        env->fpus |= 0x400;
+    } else {
+        ST0 = double_to_floatx80(env, sin(fptemp));
+        fpush(env);
+        ST0 = double_to_floatx80(env, cos(fptemp));
+        env->fpus &= ~0x400;  /* C2 <-- 0 */
+        /* the above code is for |arg| < 2**63 only */
+    }
+}
+
+void helper_frndint(CPUX86State *env)
+{
+    ST0 = floatx80_round_to_int(ST0, &env->fp_status);
+}
+
+void helper_fscale(CPUX86State *env)
+{
+    if (floatx80_is_any_nan(ST1)) {
+        ST0 = ST1;
+    } else {
+        int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
+        ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
+    }
+}
+
+void helper_fsin(CPUX86State *env)
+{
+    double fptemp = floatx80_to_double(env, ST0);
+
+    if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+        env->fpus |= 0x400;
+    } else {
+        ST0 = double_to_floatx80(env, sin(fptemp));
+        env->fpus &= ~0x400;  /* C2 <-- 0 */
+        /* the above code is for |arg| < 2**53 only */
+    }
+}
+
+void helper_fcos(CPUX86State *env)
+{
+    double fptemp = floatx80_to_double(env, ST0);
+
+    if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+        env->fpus |= 0x400;
+    } else {
+        ST0 = double_to_floatx80(env, cos(fptemp));
+        env->fpus &= ~0x400;  /* C2 <-- 0 */
+        /* the above code is for |arg| < 2**63 only */
+    }
+}
+
+void helper_fxam_ST0(CPUX86State *env)
+{
+    CPU_LDoubleU temp;
+    int expdif;
+
+    temp.d = ST0;
+
+    env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+    if (SIGND(temp)) {
+        env->fpus |= 0x200; /* C1 <-- 1 */
+    }
+
+    /* XXX: test fptags too */
+    expdif = EXPD(temp);
+    if (expdif == MAXEXPD) {
+        if (MANTD(temp) == 0x8000000000000000ULL) {
+            env->fpus |= 0x500; /* Infinity */
+        } else {
+            env->fpus |= 0x100; /* NaN */
+        }
+    } else if (expdif == 0) {
+        if (MANTD(temp) == 0) {
+            env->fpus |=  0x4000; /* Zero */
+        } else {
+            env->fpus |= 0x4400; /* Denormal */
+        }
+    } else {
+        env->fpus |= 0x400;
+    }
+}
+
+static void do_fstenv(CPUX86State *env, target_ulong ptr, int data32,
+                      uintptr_t retaddr)
+{
+    int fpus, fptag, exp, i;
+    uint64_t mant;
+    CPU_LDoubleU tmp;
+
+    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+    fptag = 0;
+    for (i = 7; i >= 0; i--) {
+        fptag <<= 2;
+        if (env->fptags[i]) {
+            fptag |= 3;
+        } else {
+            tmp.d = env->fpregs[i].d;
+            exp = EXPD(tmp);
+            mant = MANTD(tmp);
+            if (exp == 0 && mant == 0) {
+                /* zero */
+                fptag |= 1;
+            } else if (exp == 0 || exp == MAXEXPD
+                       || (mant & (1LL << 63)) == 0) {
+                /* NaNs, infinity, denormal */
+                fptag |= 2;
+            }
+        }
+    }
+    if (data32) {
+        /* 32 bit */
+        cpu_stl_data_ra(env, ptr, env->fpuc, retaddr);
+        cpu_stl_data_ra(env, ptr + 4, fpus, retaddr);
+        cpu_stl_data_ra(env, ptr + 8, fptag, retaddr);
+        cpu_stl_data_ra(env, ptr + 12, 0, retaddr); /* fpip */
+        cpu_stl_data_ra(env, ptr + 16, 0, retaddr); /* fpcs */
+        cpu_stl_data_ra(env, ptr + 20, 0, retaddr); /* fpoo */
+        cpu_stl_data_ra(env, ptr + 24, 0, retaddr); /* fpos */
+    } else {
+        /* 16 bit */
+        cpu_stw_data_ra(env, ptr, env->fpuc, retaddr);
+        cpu_stw_data_ra(env, ptr + 2, fpus, retaddr);
+        cpu_stw_data_ra(env, ptr + 4, fptag, retaddr);
+        cpu_stw_data_ra(env, ptr + 6, 0, retaddr);
+        cpu_stw_data_ra(env, ptr + 8, 0, retaddr);
+        cpu_stw_data_ra(env, ptr + 10, 0, retaddr);
+        cpu_stw_data_ra(env, ptr + 12, 0, retaddr);
+    }
+}
+
+void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32)
+{
+    do_fstenv(env, ptr, data32, GETPC());
+}
+
+static void do_fldenv(CPUX86State *env, target_ulong ptr, int data32,
+                      uintptr_t retaddr)
+{
+    int i, fpus, fptag;
+
+    if (data32) {
+        cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
+        fpus = cpu_lduw_data_ra(env, ptr + 4, retaddr);
+        fptag = cpu_lduw_data_ra(env, ptr + 8, retaddr);
+    } else {
+        cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
+        fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr);
+        fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr);
+    }
+    env->fpstt = (fpus >> 11) & 7;
+    env->fpus = fpus & ~0x3800;
+    for (i = 0; i < 8; i++) {
+        env->fptags[i] = ((fptag & 3) == 3);
+        fptag >>= 2;
+    }
+}
+
+void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32)
+{
+    do_fldenv(env, ptr, data32, GETPC());
+}
+
+void helper_fsave(CPUX86State *env, target_ulong ptr, int data32)
+{
+    floatx80 tmp;
+    int i;
+
+    do_fstenv(env, ptr, data32, GETPC());
+
+    ptr += (14 << data32);
+    for (i = 0; i < 8; i++) {
+        tmp = ST(i);
+        helper_fstt(env, tmp, ptr, GETPC());
+        ptr += 10;
+    }
+
+    /* fninit */
+    env->fpus = 0;
+    env->fpstt = 0;
+    cpu_set_fpuc(env, 0x37f);
+    env->fptags[0] = 1;
+    env->fptags[1] = 1;
+    env->fptags[2] = 1;
+    env->fptags[3] = 1;
+    env->fptags[4] = 1;
+    env->fptags[5] = 1;
+    env->fptags[6] = 1;
+    env->fptags[7] = 1;
+}
+
+void helper_frstor(CPUX86State *env, target_ulong ptr, int data32)
+{
+    floatx80 tmp;
+    int i;
+
+    do_fldenv(env, ptr, data32, GETPC());
+    ptr += (14 << data32);
+
+    for (i = 0; i < 8; i++) {
+        tmp = helper_fldt(env, ptr, GETPC());
+        ST(i) = tmp;
+        ptr += 10;
+    }
+}
+
+#if defined(CONFIG_USER_ONLY)
+void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32)
+{
+    helper_fsave(env, ptr, data32);
+}
+
+void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
+{
+    helper_frstor(env, ptr, data32);
+}
+#endif
+
+#define XO(X)  offsetof(X86XSaveArea, X)
+
+static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    int fpus, fptag, i;
+    target_ulong addr;
+
+    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+    fptag = 0;
+    for (i = 0; i < 8; i++) {
+        fptag |= (env->fptags[i] << i);
+    }
+
+    cpu_stw_data_ra(env, ptr + XO(legacy.fcw), env->fpuc, ra);
+    cpu_stw_data_ra(env, ptr + XO(legacy.fsw), fpus, ra);
+    cpu_stw_data_ra(env, ptr + XO(legacy.ftw), fptag ^ 0xff, ra);
+
+    /* In 32-bit mode this is eip, sel, dp, sel.
+       In 64-bit mode this is rip, rdp.
+       But in either case we don't write actual data, just zeros.  */
+    cpu_stq_data_ra(env, ptr + XO(legacy.fpip), 0, ra); /* eip+sel; rip */
+    cpu_stq_data_ra(env, ptr + XO(legacy.fpdp), 0, ra); /* edp+sel; rdp */
+
+    addr = ptr + XO(legacy.fpregs);
+    for (i = 0; i < 8; i++) {
+        floatx80 tmp = ST(i);
+        helper_fstt(env, tmp, addr, ra);
+        addr += 16;
+    }
+}
+
+static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr), env->mxcsr, ra);
+    cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr_mask), 0x0000ffff, ra);
+}
+
+static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    int i, nb_xmm_regs;
+    target_ulong addr;
+
+    if (env->hflags & HF_CS64_MASK) {
+        nb_xmm_regs = 16;
+    } else {
+        nb_xmm_regs = 8;
+    }
+
+    addr = ptr + XO(legacy.xmm_regs);
+    for (i = 0; i < nb_xmm_regs; i++) {
+        cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra);
+        cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra);
+        addr += 16;
+    }
+}
+
+static void do_xsave_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs);
+    int i;
+
+    for (i = 0; i < 4; i++, addr += 16) {
+        cpu_stq_data_ra(env, addr, env->bnd_regs[i].lb, ra);
+        cpu_stq_data_ra(env, addr + 8, env->bnd_regs[i].ub, ra);
+    }
+}
+
+static void do_xsave_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu),
+                    env->bndcs_regs.cfgu, ra);
+    cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts),
+                    env->bndcs_regs.sts, ra);
+}
+
+static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    cpu_stq_data_ra(env, ptr, env->pkru, ra);
+}
+
+void helper_fxsave(CPUX86State *env, target_ulong ptr)
+{
+    uintptr_t ra = GETPC();
+
+    /* The operand must be 16 byte aligned */
+    if (ptr & 0xf) {
+        raise_exception_ra(env, EXCP0D_GPF, ra);
+    }
+
+    do_xsave_fpu(env, ptr, ra);
+
+    if (env->cr[4] & CR4_OSFXSR_MASK) {
+        do_xsave_mxcsr(env, ptr, ra);
+        /* Fast FXSAVE leaves out the XMM registers */
+        if (!(env->efer & MSR_EFER_FFXSR)
+            || (env->hflags & HF_CPL_MASK)
+            || !(env->hflags & HF_LMA_MASK)) {
+            do_xsave_sse(env, ptr, ra);
+        }
+    }
+}
+
+static uint64_t get_xinuse(CPUX86State *env)
+{
+    uint64_t inuse = -1;
+
+    /* For the most part, we don't track XINUSE.  We could calculate it
+       here for all components, but it's probably less work to simply
+       indicate in use.  That said, the state of BNDREGS is important
+       enough to track in HFLAGS, so we might as well use that here.  */
+    if ((env->hflags & HF_MPX_IU_MASK) == 0) {
+       inuse &= ~XSTATE_BNDREGS_MASK;
+    }
+    return inuse;
+}
+
+static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm,
+                     uint64_t inuse, uint64_t opt, uintptr_t ra)
+{
+    uint64_t old_bv, new_bv;
+
+    /* The OS must have enabled XSAVE.  */
+    if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+        raise_exception_ra(env, EXCP06_ILLOP, ra);
+    }
+
+    /* The operand must be 64 byte aligned.  */
+    if (ptr & 63) {
+        raise_exception_ra(env, EXCP0D_GPF, ra);
+    }
+
+    /* Never save anything not enabled by XCR0.  */
+    rfbm &= env->xcr0;
+    opt &= rfbm;
+
+    if (opt & XSTATE_FP_MASK) {
+        do_xsave_fpu(env, ptr, ra);
+    }
+    if (rfbm & XSTATE_SSE_MASK) {
+        /* Note that saving MXCSR is not suppressed by XSAVEOPT.  */
+        do_xsave_mxcsr(env, ptr, ra);
+    }
+    if (opt & XSTATE_SSE_MASK) {
+        do_xsave_sse(env, ptr, ra);
+    }
+    if (opt & XSTATE_BNDREGS_MASK) {
+        do_xsave_bndregs(env, ptr + XO(bndreg_state), ra);
+    }
+    if (opt & XSTATE_BNDCSR_MASK) {
+        do_xsave_bndcsr(env, ptr + XO(bndcsr_state), ra);
+    }
+    if (opt & XSTATE_PKRU_MASK) {
+        do_xsave_pkru(env, ptr + XO(pkru_state), ra);
+    }
+
+    /* Update the XSTATE_BV field.  */
+    old_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra);
+    new_bv = (old_bv & ~rfbm) | (inuse & rfbm);
+    cpu_stq_data_ra(env, ptr + XO(header.xstate_bv), new_bv, ra);
+}
+
+void helper_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+    do_xsave(env, ptr, rfbm, get_xinuse(env), -1, GETPC());
+}
+
+void helper_xsaveopt(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+    uint64_t inuse = get_xinuse(env);
+    do_xsave(env, ptr, rfbm, inuse, inuse, GETPC());
+}
+
+static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    int i, fpuc, fpus, fptag;
+    target_ulong addr;
+
+    fpuc = cpu_lduw_data_ra(env, ptr + XO(legacy.fcw), ra);
+    fpus = cpu_lduw_data_ra(env, ptr + XO(legacy.fsw), ra);
+    fptag = cpu_lduw_data_ra(env, ptr + XO(legacy.ftw), ra);
+    cpu_set_fpuc(env, fpuc);
+    env->fpstt = (fpus >> 11) & 7;
+    env->fpus = fpus & ~0x3800;
+    fptag ^= 0xff;
+    for (i = 0; i < 8; i++) {
+        env->fptags[i] = ((fptag >> i) & 1);
+    }
+
+    addr = ptr + XO(legacy.fpregs);
+    for (i = 0; i < 8; i++) {
+        floatx80 tmp = helper_fldt(env, addr, ra);
+        ST(i) = tmp;
+        addr += 16;
+    }
+}
+
+static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + XO(legacy.mxcsr), ra));
+}
+
+static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    int i, nb_xmm_regs;
+    target_ulong addr;
+
+    if (env->hflags & HF_CS64_MASK) {
+        nb_xmm_regs = 16;
+    } else {
+        nb_xmm_regs = 8;
+    }
+
+    addr = ptr + XO(legacy.xmm_regs);
+    for (i = 0; i < nb_xmm_regs; i++) {
+        env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra);
+        env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra);
+        addr += 16;
+    }
+}
+
+static void do_xrstor_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs);
+    int i;
+
+    for (i = 0; i < 4; i++, addr += 16) {
+        env->bnd_regs[i].lb = cpu_ldq_data_ra(env, addr, ra);
+        env->bnd_regs[i].ub = cpu_ldq_data_ra(env, addr + 8, ra);
+    }
+}
+
+static void do_xrstor_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    /* FIXME: Extend highest implemented bit of linear address.  */
+    env->bndcs_regs.cfgu
+        = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu), ra);
+    env->bndcs_regs.sts
+        = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts), ra);
+}
+
+static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+    env->pkru = cpu_ldq_data_ra(env, ptr, ra);
+}
+
+void helper_fxrstor(CPUX86State *env, target_ulong ptr)
+{
+    uintptr_t ra = GETPC();
+
+    /* The operand must be 16 byte aligned */
+    if (ptr & 0xf) {
+        raise_exception_ra(env, EXCP0D_GPF, ra);
+    }
+
+    do_xrstor_fpu(env, ptr, ra);
+
+    if (env->cr[4] & CR4_OSFXSR_MASK) {
+        do_xrstor_mxcsr(env, ptr, ra);
+        /* Fast FXRSTOR leaves out the XMM registers */
+        if (!(env->efer & MSR_EFER_FFXSR)
+            || (env->hflags & HF_CPL_MASK)
+            || !(env->hflags & HF_LMA_MASK)) {
+            do_xrstor_sse(env, ptr, ra);
+        }
+    }
+}
+
+void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+    uintptr_t ra = GETPC();
+    uint64_t xstate_bv, xcomp_bv, reserve0;
+
+    rfbm &= env->xcr0;
+
+    /* The OS must have enabled XSAVE.  */
+    if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+        raise_exception_ra(env, EXCP06_ILLOP, ra);
+    }
+
+    /* The operand must be 64 byte aligned.  */
+    if (ptr & 63) {
+        raise_exception_ra(env, EXCP0D_GPF, ra);
+    }
+
+    xstate_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra);
+
+    if ((int64_t)xstate_bv < 0) {
+        /* FIXME: Compact form.  */
+        raise_exception_ra(env, EXCP0D_GPF, ra);
+    }
+
+    /* Standard form.  */
+
+    /* The XSTATE_BV field must not set bits not present in XCR0.  */
+    if (xstate_bv & ~env->xcr0) {
+        raise_exception_ra(env, EXCP0D_GPF, ra);
+    }
+
+    /* The XCOMP_BV field must be zero.  Note that, as of the April 2016
+       revision, the description of the XSAVE Header (Vol 1, Sec 13.4.2)
+       describes only XCOMP_BV, but the description of the standard form
+       of XRSTOR (Vol 1, Sec 13.8.1) checks bytes 23:8 for zero, which
+       includes the next 64-bit field.  */
+    xcomp_bv = cpu_ldq_data_ra(env, ptr + XO(header.xcomp_bv), ra);
+    reserve0 = cpu_ldq_data_ra(env, ptr + XO(header.reserve0), ra);
+    if (xcomp_bv || reserve0) {
+        raise_exception_ra(env, EXCP0D_GPF, ra);
+    }
+
+    if (rfbm & XSTATE_FP_MASK) {
+        if (xstate_bv & XSTATE_FP_MASK) {
+            do_xrstor_fpu(env, ptr, ra);
+        } else {
+            helper_fninit(env);
+            memset(env->fpregs, 0, sizeof(env->fpregs));
+        }
+    }
+    if (rfbm & XSTATE_SSE_MASK) {
+        /* Note that the standard form of XRSTOR loads MXCSR from memory
+           whether or not the XSTATE_BV bit is set.  */
+        do_xrstor_mxcsr(env, ptr, ra);
+        if (xstate_bv & XSTATE_SSE_MASK) {
+            do_xrstor_sse(env, ptr, ra);
+        } else {
+            /* ??? When AVX is implemented, we may have to be more
+               selective in the clearing.  */
+            memset(env->xmm_regs, 0, sizeof(env->xmm_regs));
+        }
+    }
+    if (rfbm & XSTATE_BNDREGS_MASK) {
+        if (xstate_bv & XSTATE_BNDREGS_MASK) {
+            do_xrstor_bndregs(env, ptr + XO(bndreg_state), ra);
+            env->hflags |= HF_MPX_IU_MASK;
+        } else {
+            memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+            env->hflags &= ~HF_MPX_IU_MASK;
+        }
+    }
+    if (rfbm & XSTATE_BNDCSR_MASK) {
+        if (xstate_bv & XSTATE_BNDCSR_MASK) {
+            do_xrstor_bndcsr(env, ptr + XO(bndcsr_state), ra);
+        } else {
+            memset(&env->bndcs_regs, 0, sizeof(env->bndcs_regs));
+        }
+        cpu_sync_bndcs_hflags(env);
+    }
+    if (rfbm & XSTATE_PKRU_MASK) {
+        uint64_t old_pkru = env->pkru;
+        if (xstate_bv & XSTATE_PKRU_MASK) {
+            do_xrstor_pkru(env, ptr + XO(pkru_state), ra);
+        } else {
+            env->pkru = 0;
+        }
+        if (env->pkru != old_pkru) {
+            CPUState *cs = CPU(x86_env_get_cpu(env));
+            tlb_flush(cs, 1);
+        }
+    }
+}
+
+#undef XO
+
+uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx)
+{
+    /* The OS must have enabled XSAVE.  */
+    if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+        raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+    }
+
+    switch (ecx) {
+    case 0:
+        return env->xcr0;
+    case 1:
+        if (env->features[FEAT_XSAVE] & CPUID_XSAVE_XGETBV1) {
+            return env->xcr0 & get_xinuse(env);
+        }
+        break;
+    }
+    raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
+void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask)
+{
+    uint32_t dummy, ena_lo, ena_hi;
+    uint64_t ena;
+
+    /* The OS must have enabled XSAVE.  */
+    if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+        raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+    }
+
+    /* Only XCR0 is defined at present; the FPU may not be disabled.  */
+    if (ecx != 0 || (mask & XSTATE_FP_MASK) == 0) {
+        goto do_gpf;
+    }
+
+    /* Disallow enabling unimplemented features.  */
+    cpu_x86_cpuid(env, 0x0d, 0, &ena_lo, &dummy, &dummy, &ena_hi);
+    ena = ((uint64_t)ena_hi << 32) | ena_lo;
+    if (mask & ~ena) {
+        goto do_gpf;
+    }
+
+    /* Disallow enabling only half of MPX.  */
+    if ((mask ^ (mask * (XSTATE_BNDCSR_MASK / XSTATE_BNDREGS_MASK)))
+        & XSTATE_BNDCSR_MASK) {
+        goto do_gpf;
+    }
+
+    env->xcr0 = mask;
+    cpu_sync_bndcs_hflags(env);
+    return;
+
+ do_gpf:
+    raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
+void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
+{
+    CPU_LDoubleU temp;
+
+    temp.d = f;
+    *pmant = temp.l.lower;
+    *pexp = temp.l.upper;
+}
+
+floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
+{
+    CPU_LDoubleU temp;
+
+    temp.l.upper = upper;
+    temp.l.lower = mant;
+    return temp.d;
+}
+
+/* MMX/SSE */
+/* XXX: optimize by storing fptt and fptags in the static cpu state */
+
+#define SSE_DAZ             0x0040
+#define SSE_RC_MASK         0x6000
+#define SSE_RC_NEAR         0x0000
+#define SSE_RC_DOWN         0x2000
+#define SSE_RC_UP           0x4000
+#define SSE_RC_CHOP         0x6000
+#define SSE_FZ              0x8000
+
+void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
+{
+    int rnd_type;
+
+    env->mxcsr = mxcsr;
+
+    /* set rounding mode */
+    switch (mxcsr & SSE_RC_MASK) {
+    default:
+    case SSE_RC_NEAR:
+        rnd_type = float_round_nearest_even;
+        break;
+    case SSE_RC_DOWN:
+        rnd_type = float_round_down;
+        break;
+    case SSE_RC_UP:
+        rnd_type = float_round_up;
+        break;
+    case SSE_RC_CHOP:
+        rnd_type = float_round_to_zero;
+        break;
+    }
+    set_float_rounding_mode(rnd_type, &env->sse_status);
+
+    /* set denormals are zero */
+    set_flush_inputs_to_zero((mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
+
+    /* set flush to zero */
+    set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
+}
+
+void cpu_set_fpuc(CPUX86State *env, uint16_t val)
+{
+    env->fpuc = val;
+    update_fp_status(env);
+}
+
+void helper_ldmxcsr(CPUX86State *env, uint32_t val)
+{
+    cpu_set_mxcsr(env, val);
+}
+
+void helper_enter_mmx(CPUX86State *env)
+{
+    env->fpstt = 0;
+    *(uint32_t *)(env->fptags) = 0;
+    *(uint32_t *)(env->fptags + 4) = 0;
+}
+
+void helper_emms(CPUX86State *env)
+{
+    /* set to empty state */
+    *(uint32_t *)(env->fptags) = 0x01010101;
+    *(uint32_t *)(env->fptags + 4) = 0x01010101;
+}
+
+/* XXX: suppress */
+void helper_movq(CPUX86State *env, void *d, void *s)
+{
+    *(uint64_t *)d = *(uint64_t *)s;
+}
+
+#define SHIFT 0
+#include "ops_sse.h"
+
+#define SHIFT 1
+#include "ops_sse.h"
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
new file mode 100644
index 0000000000..c494535df1
--- /dev/null
+++ b/target/i386/gdbstub.c
@@ -0,0 +1,234 @@
+/*
+ * x86 gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+#ifdef TARGET_X86_64
+static const int gpr_map[16] = {
+    R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
+    8, 9, 10, 11, 12, 13, 14, 15
+};
+#else
+#define gpr_map gpr_map32
+#endif
+static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+#define IDX_IP_REG      CPU_NB_REGS
+#define IDX_FLAGS_REG   (IDX_IP_REG + 1)
+#define IDX_SEG_REGS    (IDX_FLAGS_REG + 1)
+#define IDX_FP_REGS     (IDX_SEG_REGS + 6)
+#define IDX_XMM_REGS    (IDX_FP_REGS + 16)
+#define IDX_MXCSR_REG   (IDX_XMM_REGS + CPU_NB_REGS)
+
+int x86_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    if (n < CPU_NB_REGS) {
+        if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+            return gdb_get_reg64(mem_buf, env->regs[gpr_map[n]]);
+        } else if (n < CPU_NB_REGS32) {
+            return gdb_get_reg32(mem_buf, env->regs[gpr_map32[n]]);
+        }
+    } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
+#ifdef USE_X86LDOUBLE
+        /* FIXME: byteswap float values - after fixing fpregs layout. */
+        memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
+#else
+        memset(mem_buf, 0, 10);
+#endif
+        return 10;
+    } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
+        n -= IDX_XMM_REGS;
+        if (n < CPU_NB_REGS32 ||
+            (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
+            stq_p(mem_buf, env->xmm_regs[n].ZMM_Q(0));
+            stq_p(mem_buf + 8, env->xmm_regs[n].ZMM_Q(1));
+            return 16;
+        }
+    } else {
+        switch (n) {
+        case IDX_IP_REG:
+            if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+                return gdb_get_reg64(mem_buf, env->eip);
+            } else {
+                return gdb_get_reg32(mem_buf, env->eip);
+            }
+        case IDX_FLAGS_REG:
+            return gdb_get_reg32(mem_buf, env->eflags);
+
+        case IDX_SEG_REGS:
+            return gdb_get_reg32(mem_buf, env->segs[R_CS].selector);
+        case IDX_SEG_REGS + 1:
+            return gdb_get_reg32(mem_buf, env->segs[R_SS].selector);
+        case IDX_SEG_REGS + 2:
+            return gdb_get_reg32(mem_buf, env->segs[R_DS].selector);
+        case IDX_SEG_REGS + 3:
+            return gdb_get_reg32(mem_buf, env->segs[R_ES].selector);
+        case IDX_SEG_REGS + 4:
+            return gdb_get_reg32(mem_buf, env->segs[R_FS].selector);
+        case IDX_SEG_REGS + 5:
+            return gdb_get_reg32(mem_buf, env->segs[R_GS].selector);
+
+        case IDX_FP_REGS + 8:
+            return gdb_get_reg32(mem_buf, env->fpuc);
+        case IDX_FP_REGS + 9:
+            return gdb_get_reg32(mem_buf, (env->fpus & ~0x3800) |
+                                          (env->fpstt & 0x7) << 11);
+        case IDX_FP_REGS + 10:
+            return gdb_get_reg32(mem_buf, 0); /* ftag */
+        case IDX_FP_REGS + 11:
+            return gdb_get_reg32(mem_buf, 0); /* fiseg */
+        case IDX_FP_REGS + 12:
+            return gdb_get_reg32(mem_buf, 0); /* fioff */
+        case IDX_FP_REGS + 13:
+            return gdb_get_reg32(mem_buf, 0); /* foseg */
+        case IDX_FP_REGS + 14:
+            return gdb_get_reg32(mem_buf, 0); /* fooff */
+        case IDX_FP_REGS + 15:
+            return gdb_get_reg32(mem_buf, 0); /* fop */
+
+        case IDX_MXCSR_REG:
+            return gdb_get_reg32(mem_buf, env->mxcsr);
+        }
+    }
+    return 0;
+}
+
+static int x86_cpu_gdb_load_seg(X86CPU *cpu, int sreg, uint8_t *mem_buf)
+{
+    CPUX86State *env = &cpu->env;
+    uint16_t selector = ldl_p(mem_buf);
+
+    if (selector != env->segs[sreg].selector) {
+#if defined(CONFIG_USER_ONLY)
+        cpu_x86_load_seg(env, sreg, selector);
+#else
+        unsigned int limit, flags;
+        target_ulong base;
+
+        if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+            int dpl = (env->eflags & VM_MASK) ? 3 : 0;
+            base = selector << 4;
+            limit = 0xffff;
+            flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                    DESC_A_MASK | (dpl << DESC_DPL_SHIFT);
+        } else {
+            if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
+                                         &flags)) {
+                return 4;
+            }
+        }
+        cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
+#endif
+    }
+    return 4;
+}
+
+int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    uint32_t tmp;
+
+    if (n < CPU_NB_REGS) {
+        if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+            env->regs[gpr_map[n]] = ldtul_p(mem_buf);
+            return sizeof(target_ulong);
+        } else if (n < CPU_NB_REGS32) {
+            n = gpr_map32[n];
+            env->regs[n] &= ~0xffffffffUL;
+            env->regs[n] |= (uint32_t)ldl_p(mem_buf);
+            return 4;
+        }
+    } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
+#ifdef USE_X86LDOUBLE
+        /* FIXME: byteswap float values - after fixing fpregs layout. */
+        memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
+#endif
+        return 10;
+    } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
+        n -= IDX_XMM_REGS;
+        if (n < CPU_NB_REGS32 ||
+            (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
+            env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf);
+            env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8);
+            return 16;
+        }
+    } else {
+        switch (n) {
+        case IDX_IP_REG:
+            if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+                env->eip = ldq_p(mem_buf);
+                return 8;
+            } else {
+                env->eip &= ~0xffffffffUL;
+                env->eip |= (uint32_t)ldl_p(mem_buf);
+                return 4;
+            }
+        case IDX_FLAGS_REG:
+            env->eflags = ldl_p(mem_buf);
+            return 4;
+
+        case IDX_SEG_REGS:
+            return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf);
+        case IDX_SEG_REGS + 1:
+            return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf);
+        case IDX_SEG_REGS + 2:
+            return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf);
+        case IDX_SEG_REGS + 3:
+            return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf);
+        case IDX_SEG_REGS + 4:
+            return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf);
+        case IDX_SEG_REGS + 5:
+            return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf);
+
+        case IDX_FP_REGS + 8:
+            cpu_set_fpuc(env, ldl_p(mem_buf));
+            return 4;
+        case IDX_FP_REGS + 9:
+            tmp = ldl_p(mem_buf);
+            env->fpstt = (tmp >> 11) & 7;
+            env->fpus = tmp & ~0x3800;
+            return 4;
+        case IDX_FP_REGS + 10: /* ftag */
+            return 4;
+        case IDX_FP_REGS + 11: /* fiseg */
+            return 4;
+        case IDX_FP_REGS + 12: /* fioff */
+            return 4;
+        case IDX_FP_REGS + 13: /* foseg */
+            return 4;
+        case IDX_FP_REGS + 14: /* fooff */
+            return 4;
+        case IDX_FP_REGS + 15: /* fop */
+            return 4;
+
+        case IDX_MXCSR_REG:
+            cpu_set_mxcsr(env, ldl_p(mem_buf));
+            return 4;
+        }
+    }
+    /* Unrecognised register.  */
+    return 0;
+}
diff --git a/target/i386/helper.c b/target/i386/helper.c
new file mode 100644
index 0000000000..4ecc0912a4
--- /dev/null
+++ b/target/i386/helper.c
@@ -0,0 +1,1446 @@
+/*
+ *  i386 helpers (without register variable usage)
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "kvm_i386.h"
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/sysemu.h"
+#include "monitor/monitor.h"
+#include "hw/i386/apic_internal.h"
+#endif
+
+static void cpu_x86_version(CPUX86State *env, int *family, int *model)
+{
+    int cpuver = env->cpuid_version;
+
+    if (family == NULL || model == NULL) {
+        return;
+    }
+
+    *family = (cpuver >> 8) & 0x0f;
+    *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
+}
+
+/* Broadcast MCA signal for processor version 06H_EH and above */
+int cpu_x86_support_mca_broadcast(CPUX86State *env)
+{
+    int family = 0;
+    int model = 0;
+
+    cpu_x86_version(env, &family, &model);
+    if ((family == 6 && model >= 14) || family > 6) {
+        return 1;
+    }
+
+    return 0;
+}
+
+/***********************************************************/
+/* x86 debug */
+
+static const char *cc_op_str[CC_OP_NB] = {
+    "DYNAMIC",
+    "EFLAGS",
+
+    "MULB",
+    "MULW",
+    "MULL",
+    "MULQ",
+
+    "ADDB",
+    "ADDW",
+    "ADDL",
+    "ADDQ",
+
+    "ADCB",
+    "ADCW",
+    "ADCL",
+    "ADCQ",
+
+    "SUBB",
+    "SUBW",
+    "SUBL",
+    "SUBQ",
+
+    "SBBB",
+    "SBBW",
+    "SBBL",
+    "SBBQ",
+
+    "LOGICB",
+    "LOGICW",
+    "LOGICL",
+    "LOGICQ",
+
+    "INCB",
+    "INCW",
+    "INCL",
+    "INCQ",
+
+    "DECB",
+    "DECW",
+    "DECL",
+    "DECQ",
+
+    "SHLB",
+    "SHLW",
+    "SHLL",
+    "SHLQ",
+
+    "SARB",
+    "SARW",
+    "SARL",
+    "SARQ",
+
+    "BMILGB",
+    "BMILGW",
+    "BMILGL",
+    "BMILGQ",
+
+    "ADCX",
+    "ADOX",
+    "ADCOX",
+
+    "CLR",
+};
+
+static void
+cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
+                       const char *name, struct SegmentCache *sc)
+{
+#ifdef TARGET_X86_64
+    if (env->hflags & HF_CS64_MASK) {
+        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
+                    sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
+    } else
+#endif
+    {
+        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
+                    (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
+    }
+
+    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
+        goto done;
+
+    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
+    if (sc->flags & DESC_S_MASK) {
+        if (sc->flags & DESC_CS_MASK) {
+            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
+                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
+            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
+                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
+        } else {
+            cpu_fprintf(f,
+                        (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
+                        ? "DS  " : "DS16");
+            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
+                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
+        }
+        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
+    } else {
+        static const char *sys_type_name[2][16] = {
+            { /* 32 bit mode */
+                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
+                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
+                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
+                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
+            },
+            { /* 64 bit mode */
+                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
+                "Reserved", "Reserved", "Reserved", "Reserved",
+                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
+                "Reserved", "IntGate64", "TrapGate64"
+            }
+        };
+        cpu_fprintf(f, "%s",
+                    sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
+                                 [(sc->flags & DESC_TYPE_MASK)
+                                  >> DESC_TYPE_SHIFT]);
+    }
+done:
+    cpu_fprintf(f, "\n");
+}
+
+#ifndef CONFIG_USER_ONLY
+
+/* ARRAY_SIZE check is not required because
+ * DeliveryMode(dm) has a size of 3 bit.
+ */
+static inline const char *dm2str(uint32_t dm)
+{
+    static const char *str[] = {
+        "Fixed",
+        "...",
+        "SMI",
+        "...",
+        "NMI",
+        "INIT",
+        "...",
+        "ExtINT"
+    };
+    return str[dm];
+}
+
+static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
+                          const char *name, uint32_t lvt, bool is_timer)
+{
+    uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
+    cpu_fprintf(f,
+                "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
+                name, lvt,
+                lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
+                lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
+                lvt & APIC_LVT_MASKED ? "masked" : "",
+                lvt & APIC_LVT_DELIV_STS ? "pending" : "",
+                !is_timer ?
+                    "" : lvt & APIC_LVT_TIMER_PERIODIC ?
+                            "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
+                                            "tsc-deadline" : "one-shot",
+                dm2str(dm));
+    if (dm != APIC_DM_NMI) {
+        cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
+    } else {
+        cpu_fprintf(f, "\n");
+    }
+}
+
+/* ARRAY_SIZE check is not required because
+ * destination shorthand has a size of 2 bit.
+ */
+static inline const char *shorthand2str(uint32_t shorthand)
+{
+    const char *str[] = {
+        "no-shorthand", "self", "all-self", "all"
+    };
+    return str[shorthand];
+}
+
+static inline uint8_t divider_conf(uint32_t divide_conf)
+{
+    uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
+
+    return divide_val == 7 ? 1 : 2 << divide_val;
+}
+
+static inline void mask2str(char *str, uint32_t val, uint8_t size)
+{
+    while (size--) {
+        *str++ = (val >> size) & 1 ? '1' : '0';
+    }
+    *str = 0;
+}
+
+#define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
+
+static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
+                          APICCommonState *s, CPUX86State *env)
+{
+    uint32_t icr = s->icr[0], icr2 = s->icr[1];
+    uint8_t dest_shorthand = \
+        (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
+    bool logical_mod = icr & APIC_ICR_DEST_MOD;
+    char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
+    uint32_t dest_field;
+    bool x2apic;
+
+    cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
+                icr,
+                logical_mod ? "logical" : "physical",
+                icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
+                icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
+                shorthand2str(dest_shorthand));
+
+    cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
+    if (dest_shorthand != 0) {
+        cpu_fprintf(f, "\n");
+        return;
+    }
+    x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
+    dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
+
+    if (!logical_mod) {
+        if (x2apic) {
+            cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
+        } else {
+            cpu_fprintf(f, " cpu %u (APIC ID)\n",
+                        dest_field & APIC_LOGDEST_XAPIC_ID);
+        }
+        return;
+    }
+
+    if (s->dest_mode == 0xf) { /* flat mode */
+        mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
+        cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
+    } else if (s->dest_mode == 0) { /* cluster mode */
+        if (x2apic) {
+            mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
+            cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
+                        dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
+        } else {
+            mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
+            cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
+                        dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
+        }
+    }
+}
+
+static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
+                                const char *name, uint32_t *ireg_tab,
+                                uint32_t *tmr_tab)
+{
+    int i, empty = true;
+
+    cpu_fprintf(f, "%s\t ", name);
+    for (i = 0; i < 256; i++) {
+        if (apic_get_bit(ireg_tab, i)) {
+            cpu_fprintf(f, "%u%s ", i,
+                        apic_get_bit(tmr_tab, i) ? "(level)" : "");
+            empty = false;
+        }
+    }
+    cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
+}
+
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+                                   fprintf_function cpu_fprintf, int flags)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    APICCommonState *s = APIC_COMMON(cpu->apic_state);
+    uint32_t *lvt = s->lvt;
+
+    cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
+                CPU(cpu)->cpu_index);
+    dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
+    dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
+    dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
+    dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
+    dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
+    dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
+
+    cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
+                s->divide_conf & APIC_DCR_MASK,
+                divider_conf(s->divide_conf),
+                s->initial_count);
+
+    cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
+                s->spurious_vec,
+                s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
+                s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
+                s->spurious_vec & APIC_VECTOR_MASK);
+
+    dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
+
+    cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
+
+    dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
+    dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
+
+    cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
+                s->arb_id, s->tpr, s->dest_mode, s->log_dest);
+    if (s->dest_mode == 0) {
+        cpu_fprintf(f, "(cluster %u: id %u)",
+                    s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
+                    s->log_dest & APIC_LOGDEST_XAPIC_ID);
+    }
+    cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
+}
+#else
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+                                   fprintf_function cpu_fprintf, int flags)
+{
+}
+#endif /* !CONFIG_USER_ONLY */
+
+#define DUMP_CODE_BYTES_TOTAL    50
+#define DUMP_CODE_BYTES_BACKWARD 20
+
+void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+                        int flags)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    int eflags, i, nb;
+    char cc_op_name[32];
+    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
+
+    eflags = cpu_compute_eflags(env);
+#ifdef TARGET_X86_64
+    if (env->hflags & HF_CS64_MASK) {
+        cpu_fprintf(f,
+                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
+                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
+                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
+                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
+                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
+                    env->regs[R_EAX],
+                    env->regs[R_EBX],
+                    env->regs[R_ECX],
+                    env->regs[R_EDX],
+                    env->regs[R_ESI],
+                    env->regs[R_EDI],
+                    env->regs[R_EBP],
+                    env->regs[R_ESP],
+                    env->regs[8],
+                    env->regs[9],
+                    env->regs[10],
+                    env->regs[11],
+                    env->regs[12],
+                    env->regs[13],
+                    env->regs[14],
+                    env->regs[15],
+                    env->eip, eflags,
+                    eflags & DF_MASK ? 'D' : '-',
+                    eflags & CC_O ? 'O' : '-',
+                    eflags & CC_S ? 'S' : '-',
+                    eflags & CC_Z ? 'Z' : '-',
+                    eflags & CC_A ? 'A' : '-',
+                    eflags & CC_P ? 'P' : '-',
+                    eflags & CC_C ? 'C' : '-',
+                    env->hflags & HF_CPL_MASK,
+                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
+                    (env->a20_mask >> 20) & 1,
+                    (env->hflags >> HF_SMM_SHIFT) & 1,
+                    cs->halted);
+    } else
+#endif
+    {
+        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
+                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
+                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
+                    (uint32_t)env->regs[R_EAX],
+                    (uint32_t)env->regs[R_EBX],
+                    (uint32_t)env->regs[R_ECX],
+                    (uint32_t)env->regs[R_EDX],
+                    (uint32_t)env->regs[R_ESI],
+                    (uint32_t)env->regs[R_EDI],
+                    (uint32_t)env->regs[R_EBP],
+                    (uint32_t)env->regs[R_ESP],
+                    (uint32_t)env->eip, eflags,
+                    eflags & DF_MASK ? 'D' : '-',
+                    eflags & CC_O ? 'O' : '-',
+                    eflags & CC_S ? 'S' : '-',
+                    eflags & CC_Z ? 'Z' : '-',
+                    eflags & CC_A ? 'A' : '-',
+                    eflags & CC_P ? 'P' : '-',
+                    eflags & CC_C ? 'C' : '-',
+                    env->hflags & HF_CPL_MASK,
+                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
+                    (env->a20_mask >> 20) & 1,
+                    (env->hflags >> HF_SMM_SHIFT) & 1,
+                    cs->halted);
+    }
+
+    for(i = 0; i < 6; i++) {
+        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
+                               &env->segs[i]);
+    }
+    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
+    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
+
+#ifdef TARGET_X86_64
+    if (env->hflags & HF_LMA_MASK) {
+        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
+                    env->gdt.base, env->gdt.limit);
+        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
+                    env->idt.base, env->idt.limit);
+        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
+                    (uint32_t)env->cr[0],
+                    env->cr[2],
+                    env->cr[3],
+                    (uint32_t)env->cr[4]);
+        for(i = 0; i < 4; i++)
+            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
+        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
+                    env->dr[6], env->dr[7]);
+    } else
+#endif
+    {
+        cpu_fprintf(f, "GDT=     %08x %08x\n",
+                    (uint32_t)env->gdt.base, env->gdt.limit);
+        cpu_fprintf(f, "IDT=     %08x %08x\n",
+                    (uint32_t)env->idt.base, env->idt.limit);
+        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
+                    (uint32_t)env->cr[0],
+                    (uint32_t)env->cr[2],
+                    (uint32_t)env->cr[3],
+                    (uint32_t)env->cr[4]);
+        for(i = 0; i < 4; i++) {
+            cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
+        }
+        cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
+                    env->dr[6], env->dr[7]);
+    }
+    if (flags & CPU_DUMP_CCOP) {
+        if ((unsigned)env->cc_op < CC_OP_NB)
+            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
+        else
+            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_CS64_MASK) {
+            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
+                        env->cc_src, env->cc_dst,
+                        cc_op_name);
+        } else
+#endif
+        {
+            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
+                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
+                        cc_op_name);
+        }
+    }
+    cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
+    if (flags & CPU_DUMP_FPU) {
+        int fptag;
+        fptag = 0;
+        for(i = 0; i < 8; i++) {
+            fptag |= ((!env->fptags[i]) << i);
+        }
+        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
+                    env->fpuc,
+                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
+                    env->fpstt,
+                    fptag,
+                    env->mxcsr);
+        for(i=0;i<8;i++) {
+            CPU_LDoubleU u;
+            u.d = env->fpregs[i].d;
+            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
+                        i, u.l.lower, u.l.upper);
+            if ((i & 1) == 1)
+                cpu_fprintf(f, "\n");
+            else
+                cpu_fprintf(f, " ");
+        }
+        if (env->hflags & HF_CS64_MASK)
+            nb = 16;
+        else
+            nb = 8;
+        for(i=0;i<nb;i++) {
+            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
+                        i,
+                        env->xmm_regs[i].ZMM_L(3),
+                        env->xmm_regs[i].ZMM_L(2),
+                        env->xmm_regs[i].ZMM_L(1),
+                        env->xmm_regs[i].ZMM_L(0));
+            if ((i & 1) == 1)
+                cpu_fprintf(f, "\n");
+            else
+                cpu_fprintf(f, " ");
+        }
+    }
+    if (flags & CPU_DUMP_CODE) {
+        target_ulong base = env->segs[R_CS].base + env->eip;
+        target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
+        uint8_t code;
+        char codestr[3];
+
+        cpu_fprintf(f, "Code=");
+        for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
+            if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
+                snprintf(codestr, sizeof(codestr), "%02x", code);
+            } else {
+                snprintf(codestr, sizeof(codestr), "??");
+            }
+            cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
+                        i == offs ? "<" : "", codestr, i == offs ? ">" : "");
+        }
+        cpu_fprintf(f, "\n");
+    }
+}
+
+/***********************************************************/
+/* x86 mmu */
+/* XXX: add PGE support */
+
+void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
+{
+    CPUX86State *env = &cpu->env;
+
+    a20_state = (a20_state != 0);
+    if (a20_state != ((env->a20_mask >> 20) & 1)) {
+        CPUState *cs = CPU(cpu);
+
+        qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
+        /* if the cpu is currently executing code, we must unlink it and
+           all the potentially executing TB */
+        cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
+
+        /* when a20 is changed, all the MMU mappings are invalid, so
+           we must flush everything */
+        tlb_flush(cs, 1);
+        env->a20_mask = ~(1 << 20) | (a20_state << 20);
+    }
+}
+
+void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    int pe_state;
+
+    qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
+    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
+        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
+        tlb_flush(CPU(cpu), 1);
+    }
+
+#ifdef TARGET_X86_64
+    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
+        (env->efer & MSR_EFER_LME)) {
+        /* enter in long mode */
+        /* XXX: generate an exception */
+        if (!(env->cr[4] & CR4_PAE_MASK))
+            return;
+        env->efer |= MSR_EFER_LMA;
+        env->hflags |= HF_LMA_MASK;
+    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
+               (env->efer & MSR_EFER_LMA)) {
+        /* exit long mode */
+        env->efer &= ~MSR_EFER_LMA;
+        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
+        env->eip &= 0xffffffff;
+    }
+#endif
+    env->cr[0] = new_cr0 | CR0_ET_MASK;
+
+    /* update PE flag in hidden flags */
+    pe_state = (env->cr[0] & CR0_PE_MASK);
+    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
+    /* ensure that ADDSEG is always set in real mode */
+    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
+    /* update FPU flags */
+    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
+        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
+}
+
+/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
+   the PDPT */
+void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+
+    env->cr[3] = new_cr3;
+    if (env->cr[0] & CR0_PG_MASK) {
+        qemu_log_mask(CPU_LOG_MMU,
+                        "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
+        tlb_flush(CPU(cpu), 0);
+    }
+}
+
+void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    uint32_t hflags;
+
+#if defined(DEBUG_MMU)
+    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
+#endif
+    if ((new_cr4 ^ env->cr[4]) &
+        (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
+         CR4_SMEP_MASK | CR4_SMAP_MASK)) {
+        tlb_flush(CPU(cpu), 1);
+    }
+
+    /* Clear bits we're going to recompute.  */
+    hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
+
+    /* SSE handling */
+    if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
+        new_cr4 &= ~CR4_OSFXSR_MASK;
+    }
+    if (new_cr4 & CR4_OSFXSR_MASK) {
+        hflags |= HF_OSFXSR_MASK;
+    }
+
+    if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
+        new_cr4 &= ~CR4_SMAP_MASK;
+    }
+    if (new_cr4 & CR4_SMAP_MASK) {
+        hflags |= HF_SMAP_MASK;
+    }
+
+    if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
+        new_cr4 &= ~CR4_PKE_MASK;
+    }
+
+    env->cr[4] = new_cr4;
+    env->hflags = hflags;
+
+    cpu_sync_bndcs_hflags(env);
+}
+
+#if defined(CONFIG_USER_ONLY)
+
+int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
+                             int is_write, int mmu_idx)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    /* user mode only emulation */
+    is_write &= 1;
+    env->cr[2] = addr;
+    env->error_code = (is_write << PG_ERROR_W_BIT);
+    env->error_code |= PG_ERROR_U_MASK;
+    cs->exception_index = EXCP0E_PAGE;
+    env->exception_is_int = 0;
+    env->exception_next_eip = -1;
+    return 1;
+}
+
+#else
+
+/* return value:
+ * -1 = cannot handle fault
+ * 0  = nothing more to do
+ * 1  = generate PF fault
+ */
+int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
+                             int is_write1, int mmu_idx)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    uint64_t ptep, pte;
+    target_ulong pde_addr, pte_addr;
+    int error_code = 0;
+    int is_dirty, prot, page_size, is_write, is_user;
+    hwaddr paddr;
+    uint64_t rsvd_mask = PG_HI_RSVD_MASK;
+    uint32_t page_offset;
+    target_ulong vaddr;
+
+    is_user = mmu_idx == MMU_USER_IDX;
+#if defined(DEBUG_MMU)
+    printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
+           addr, is_write1, is_user, env->eip);
+#endif
+    is_write = is_write1 & 1;
+
+    if (!(env->cr[0] & CR0_PG_MASK)) {
+        pte = addr;
+#ifdef TARGET_X86_64
+        if (!(env->hflags & HF_LMA_MASK)) {
+            /* Without long mode we can only address 32bits in real mode */
+            pte = (uint32_t)pte;
+        }
+#endif
+        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+        page_size = 4096;
+        goto do_mapping;
+    }
+
+    if (!(env->efer & MSR_EFER_NXE)) {
+        rsvd_mask |= PG_NX_MASK;
+    }
+
+    if (env->cr[4] & CR4_PAE_MASK) {
+        uint64_t pde, pdpe;
+        target_ulong pdpe_addr;
+
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            uint64_t pml4e_addr, pml4e;
+            int32_t sext;
+
+            /* test virtual address sign extension */
+            sext = (int64_t)addr >> 47;
+            if (sext != 0 && sext != -1) {
+                env->error_code = 0;
+                cs->exception_index = EXCP0D_GPF;
+                return 1;
+            }
+
+            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
+                env->a20_mask;
+            pml4e = x86_ldq_phys(cs, pml4e_addr);
+            if (!(pml4e & PG_PRESENT_MASK)) {
+                goto do_fault;
+            }
+            if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
+                goto do_fault_rsvd;
+            }
+            if (!(pml4e & PG_ACCESSED_MASK)) {
+                pml4e |= PG_ACCESSED_MASK;
+                x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
+            }
+            ptep = pml4e ^ PG_NX_MASK;
+            pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
+                env->a20_mask;
+            pdpe = x86_ldq_phys(cs, pdpe_addr);
+            if (!(pdpe & PG_PRESENT_MASK)) {
+                goto do_fault;
+            }
+            if (pdpe & rsvd_mask) {
+                goto do_fault_rsvd;
+            }
+            ptep &= pdpe ^ PG_NX_MASK;
+            if (!(pdpe & PG_ACCESSED_MASK)) {
+                pdpe |= PG_ACCESSED_MASK;
+                x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
+            }
+            if (pdpe & PG_PSE_MASK) {
+                /* 1 GB page */
+                page_size = 1024 * 1024 * 1024;
+                pte_addr = pdpe_addr;
+                pte = pdpe;
+                goto do_check_protect;
+            }
+        } else
+#endif
+        {
+            /* XXX: load them when cr3 is loaded ? */
+            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
+                env->a20_mask;
+            pdpe = x86_ldq_phys(cs, pdpe_addr);
+            if (!(pdpe & PG_PRESENT_MASK)) {
+                goto do_fault;
+            }
+            rsvd_mask |= PG_HI_USER_MASK;
+            if (pdpe & (rsvd_mask | PG_NX_MASK)) {
+                goto do_fault_rsvd;
+            }
+            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
+        }
+
+        pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
+            env->a20_mask;
+        pde = x86_ldq_phys(cs, pde_addr);
+        if (!(pde & PG_PRESENT_MASK)) {
+            goto do_fault;
+        }
+        if (pde & rsvd_mask) {
+            goto do_fault_rsvd;
+        }
+        ptep &= pde ^ PG_NX_MASK;
+        if (pde & PG_PSE_MASK) {
+            /* 2 MB page */
+            page_size = 2048 * 1024;
+            pte_addr = pde_addr;
+            pte = pde;
+            goto do_check_protect;
+        }
+        /* 4 KB page */
+        if (!(pde & PG_ACCESSED_MASK)) {
+            pde |= PG_ACCESSED_MASK;
+            x86_stl_phys_notdirty(cs, pde_addr, pde);
+        }
+        pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
+            env->a20_mask;
+        pte = x86_ldq_phys(cs, pte_addr);
+        if (!(pte & PG_PRESENT_MASK)) {
+            goto do_fault;
+        }
+        if (pte & rsvd_mask) {
+            goto do_fault_rsvd;
+        }
+        /* combine pde and pte nx, user and rw protections */
+        ptep &= pte ^ PG_NX_MASK;
+        page_size = 4096;
+    } else {
+        uint32_t pde;
+
+        /* page directory entry */
+        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
+            env->a20_mask;
+        pde = x86_ldl_phys(cs, pde_addr);
+        if (!(pde & PG_PRESENT_MASK)) {
+            goto do_fault;
+        }
+        ptep = pde | PG_NX_MASK;
+
+        /* if PSE bit is set, then we use a 4MB page */
+        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+            page_size = 4096 * 1024;
+            pte_addr = pde_addr;
+
+            /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
+             * Leave bits 20-13 in place for setting accessed/dirty bits below.
+             */
+            pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
+            rsvd_mask = 0x200000;
+            goto do_check_protect_pse36;
+        }
+
+        if (!(pde & PG_ACCESSED_MASK)) {
+            pde |= PG_ACCESSED_MASK;
+            x86_stl_phys_notdirty(cs, pde_addr, pde);
+        }
+
+        /* page directory entry */
+        pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
+            env->a20_mask;
+        pte = x86_ldl_phys(cs, pte_addr);
+        if (!(pte & PG_PRESENT_MASK)) {
+            goto do_fault;
+        }
+        /* combine pde and pte user and rw protections */
+        ptep &= pte | PG_NX_MASK;
+        page_size = 4096;
+        rsvd_mask = 0;
+    }
+
+do_check_protect:
+    rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
+do_check_protect_pse36:
+    if (pte & rsvd_mask) {
+        goto do_fault_rsvd;
+    }
+    ptep ^= PG_NX_MASK;
+
+    /* can the page can be put in the TLB?  prot will tell us */
+    if (is_user && !(ptep & PG_USER_MASK)) {
+        goto do_fault_protect;
+    }
+
+    prot = 0;
+    if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
+        prot |= PAGE_READ;
+        if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
+            prot |= PAGE_WRITE;
+        }
+    }
+    if (!(ptep & PG_NX_MASK) &&
+        (mmu_idx == MMU_USER_IDX ||
+         !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
+        prot |= PAGE_EXEC;
+    }
+    if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
+        (ptep & PG_USER_MASK) && env->pkru) {
+        uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
+        uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
+        uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
+        uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+        if (pkru_ad) {
+            pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
+        } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
+            pkru_prot &= ~PAGE_WRITE;
+        }
+
+        prot &= pkru_prot;
+        if ((pkru_prot & (1 << is_write1)) == 0) {
+            assert(is_write1 != 2);
+            error_code |= PG_ERROR_PK_MASK;
+            goto do_fault_protect;
+        }
+    }
+
+    if ((prot & (1 << is_write1)) == 0) {
+        goto do_fault_protect;
+    }
+
+    /* yes, it can! */
+    is_dirty = is_write && !(pte & PG_DIRTY_MASK);
+    if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
+        pte |= PG_ACCESSED_MASK;
+        if (is_dirty) {
+            pte |= PG_DIRTY_MASK;
+        }
+        x86_stl_phys_notdirty(cs, pte_addr, pte);
+    }
+
+    if (!(pte & PG_DIRTY_MASK)) {
+        /* only set write access if already dirty... otherwise wait
+           for dirty access */
+        assert(!is_write);
+        prot &= ~PAGE_WRITE;
+    }
+
+ do_mapping:
+    pte = pte & env->a20_mask;
+
+    /* align to page_size */
+    pte &= PG_ADDRESS_MASK & ~(page_size - 1);
+
+    /* Even if 4MB pages, we map only one 4KB page in the cache to
+       avoid filling it too fast */
+    vaddr = addr & TARGET_PAGE_MASK;
+    page_offset = vaddr & (page_size - 1);
+    paddr = pte + page_offset;
+
+    assert(prot & (1 << is_write1));
+    tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
+                            prot, mmu_idx, page_size);
+    return 0;
+ do_fault_rsvd:
+    error_code |= PG_ERROR_RSVD_MASK;
+ do_fault_protect:
+    error_code |= PG_ERROR_P_MASK;
+ do_fault:
+    error_code |= (is_write << PG_ERROR_W_BIT);
+    if (is_user)
+        error_code |= PG_ERROR_U_MASK;
+    if (is_write1 == 2 &&
+        (((env->efer & MSR_EFER_NXE) &&
+          (env->cr[4] & CR4_PAE_MASK)) ||
+         (env->cr[4] & CR4_SMEP_MASK)))
+        error_code |= PG_ERROR_I_D_MASK;
+    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
+        /* cr2 is not modified in case of exceptions */
+        x86_stq_phys(cs,
+                 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+                 addr);
+    } else {
+        env->cr[2] = addr;
+    }
+    env->error_code = error_code;
+    cs->exception_index = EXCP0E_PAGE;
+    return 1;
+}
+
+hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    target_ulong pde_addr, pte_addr;
+    uint64_t pte;
+    uint32_t page_offset;
+    int page_size;
+
+    if (!(env->cr[0] & CR0_PG_MASK)) {
+        pte = addr & env->a20_mask;
+        page_size = 4096;
+    } else if (env->cr[4] & CR4_PAE_MASK) {
+        target_ulong pdpe_addr;
+        uint64_t pde, pdpe;
+
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            uint64_t pml4e_addr, pml4e;
+            int32_t sext;
+
+            /* test virtual address sign extension */
+            sext = (int64_t)addr >> 47;
+            if (sext != 0 && sext != -1) {
+                return -1;
+            }
+            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
+                env->a20_mask;
+            pml4e = x86_ldq_phys(cs, pml4e_addr);
+            if (!(pml4e & PG_PRESENT_MASK)) {
+                return -1;
+            }
+            pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
+                         (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
+            pdpe = x86_ldq_phys(cs, pdpe_addr);
+            if (!(pdpe & PG_PRESENT_MASK)) {
+                return -1;
+            }
+            if (pdpe & PG_PSE_MASK) {
+                page_size = 1024 * 1024 * 1024;
+                pte = pdpe;
+                goto out;
+            }
+
+        } else
+#endif
+        {
+            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
+                env->a20_mask;
+            pdpe = x86_ldq_phys(cs, pdpe_addr);
+            if (!(pdpe & PG_PRESENT_MASK))
+                return -1;
+        }
+
+        pde_addr = ((pdpe & PG_ADDRESS_MASK) +
+                    (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
+        pde = x86_ldq_phys(cs, pde_addr);
+        if (!(pde & PG_PRESENT_MASK)) {
+            return -1;
+        }
+        if (pde & PG_PSE_MASK) {
+            /* 2 MB page */
+            page_size = 2048 * 1024;
+            pte = pde;
+        } else {
+            /* 4 KB page */
+            pte_addr = ((pde & PG_ADDRESS_MASK) +
+                        (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
+            page_size = 4096;
+            pte = x86_ldq_phys(cs, pte_addr);
+        }
+        if (!(pte & PG_PRESENT_MASK)) {
+            return -1;
+        }
+    } else {
+        uint32_t pde;
+
+        /* page directory entry */
+        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
+        pde = x86_ldl_phys(cs, pde_addr);
+        if (!(pde & PG_PRESENT_MASK))
+            return -1;
+        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+            pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
+            page_size = 4096 * 1024;
+        } else {
+            /* page directory entry */
+            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
+            pte = x86_ldl_phys(cs, pte_addr);
+            if (!(pte & PG_PRESENT_MASK)) {
+                return -1;
+            }
+            page_size = 4096;
+        }
+        pte = pte & env->a20_mask;
+    }
+
+#ifdef TARGET_X86_64
+out:
+#endif
+    pte &= PG_ADDRESS_MASK & ~(page_size - 1);
+    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
+    return pte | page_offset;
+}
+
+typedef struct MCEInjectionParams {
+    Monitor *mon;
+    int bank;
+    uint64_t status;
+    uint64_t mcg_status;
+    uint64_t addr;
+    uint64_t misc;
+    int flags;
+} MCEInjectionParams;
+
+static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
+{
+    MCEInjectionParams *params = data.host_ptr;
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *cenv = &cpu->env;
+    uint64_t *banks = cenv->mce_banks + 4 * params->bank;
+
+    cpu_synchronize_state(cs);
+
+    /*
+     * If there is an MCE exception being processed, ignore this SRAO MCE
+     * unless unconditional injection was requested.
+     */
+    if (!(params->flags & MCE_INJECT_UNCOND_AO)
+        && !(params->status & MCI_STATUS_AR)
+        && (cenv->mcg_status & MCG_STATUS_MCIP)) {
+        return;
+    }
+
+    if (params->status & MCI_STATUS_UC) {
+        /*
+         * if MSR_MCG_CTL is not all 1s, the uncorrected error
+         * reporting is disabled
+         */
+        if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
+            monitor_printf(params->mon,
+                           "CPU %d: Uncorrected error reporting disabled\n",
+                           cs->cpu_index);
+            return;
+        }
+
+        /*
+         * if MSR_MCi_CTL is not all 1s, the uncorrected error
+         * reporting is disabled for the bank
+         */
+        if (banks[0] != ~(uint64_t)0) {
+            monitor_printf(params->mon,
+                           "CPU %d: Uncorrected error reporting disabled for"
+                           " bank %d\n",
+                           cs->cpu_index, params->bank);
+            return;
+        }
+
+        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
+            !(cenv->cr[4] & CR4_MCE_MASK)) {
+            monitor_printf(params->mon,
+                           "CPU %d: Previous MCE still in progress, raising"
+                           " triple fault\n",
+                           cs->cpu_index);
+            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
+            qemu_system_reset_request();
+            return;
+        }
+        if (banks[1] & MCI_STATUS_VAL) {
+            params->status |= MCI_STATUS_OVER;
+        }
+        banks[2] = params->addr;
+        banks[3] = params->misc;
+        cenv->mcg_status = params->mcg_status;
+        banks[1] = params->status;
+        cpu_interrupt(cs, CPU_INTERRUPT_MCE);
+    } else if (!(banks[1] & MCI_STATUS_VAL)
+               || !(banks[1] & MCI_STATUS_UC)) {
+        if (banks[1] & MCI_STATUS_VAL) {
+            params->status |= MCI_STATUS_OVER;
+        }
+        banks[2] = params->addr;
+        banks[3] = params->misc;
+        banks[1] = params->status;
+    } else {
+        banks[1] |= MCI_STATUS_OVER;
+    }
+}
+
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
+                        uint64_t status, uint64_t mcg_status, uint64_t addr,
+                        uint64_t misc, int flags)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *cenv = &cpu->env;
+    MCEInjectionParams params = {
+        .mon = mon,
+        .bank = bank,
+        .status = status,
+        .mcg_status = mcg_status,
+        .addr = addr,
+        .misc = misc,
+        .flags = flags,
+    };
+    unsigned bank_num = cenv->mcg_cap & 0xff;
+
+    if (!cenv->mcg_cap) {
+        monitor_printf(mon, "MCE injection not supported\n");
+        return;
+    }
+    if (bank >= bank_num) {
+        monitor_printf(mon, "Invalid MCE bank number\n");
+        return;
+    }
+    if (!(status & MCI_STATUS_VAL)) {
+        monitor_printf(mon, "Invalid MCE status code\n");
+        return;
+    }
+    if ((flags & MCE_INJECT_BROADCAST)
+        && !cpu_x86_support_mca_broadcast(cenv)) {
+        monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
+        return;
+    }
+
+    run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
+    if (flags & MCE_INJECT_BROADCAST) {
+        CPUState *other_cs;
+
+        params.bank = 1;
+        params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
+        params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
+        params.addr = 0;
+        params.misc = 0;
+        CPU_FOREACH(other_cs) {
+            if (other_cs == cs) {
+                continue;
+            }
+            run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
+        }
+    }
+}
+
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+
+    if (kvm_enabled()) {
+        env->tpr_access_type = access;
+
+        cpu_interrupt(cs, CPU_INTERRUPT_TPR);
+    } else {
+        cpu_restore_state(cs, cs->mem_io_pc);
+
+        apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
+    }
+}
+#endif /* !CONFIG_USER_ONLY */
+
+int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
+                            target_ulong *base, unsigned int *limit,
+                            unsigned int *flags)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+    SegmentCache *dt;
+    target_ulong ptr;
+    uint32_t e1, e2;
+    int index;
+
+    if (selector & 0x4)
+        dt = &env->ldt;
+    else
+        dt = &env->gdt;
+    index = selector & ~7;
+    ptr = dt->base + index;
+    if ((index + 7) > dt->limit
+        || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
+        || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
+        return 0;
+
+    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
+    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+    if (e2 & DESC_G_MASK)
+        *limit = (*limit << 12) | 0xfff;
+    *flags = e2;
+
+    return 1;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void do_cpu_init(X86CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+    CPUX86State *save = g_new(CPUX86State, 1);
+    int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
+
+    *save = *env;
+
+    cpu_reset(cs);
+    cs->interrupt_request = sipi;
+    memcpy(&env->start_init_save, &save->start_init_save,
+           offsetof(CPUX86State, end_init_save) -
+           offsetof(CPUX86State, start_init_save));
+    g_free(save);
+
+    if (kvm_enabled()) {
+        kvm_arch_do_init_vcpu(cpu);
+    }
+    apic_init_reset(cpu->apic_state);
+}
+
+void do_cpu_sipi(X86CPU *cpu)
+{
+    apic_sipi(cpu->apic_state);
+}
+#else
+void do_cpu_init(X86CPU *cpu)
+{
+}
+void do_cpu_sipi(X86CPU *cpu)
+{
+}
+#endif
+
+/* Frob eflags into and out of the CPU temporary format.  */
+
+void x86_cpu_exec_enter(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+    env->df = 1 - (2 * ((env->eflags >> 10) & 1));
+    CC_OP = CC_OP_EFLAGS;
+    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+}
+
+void x86_cpu_exec_exit(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    env->eflags = cpu_compute_eflags(env);
+}
+
+#ifndef CONFIG_USER_ONLY
+uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return address_space_ldub(cs->as, addr,
+                              cpu_get_mem_attrs(env),
+                              NULL);
+}
+
+uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return address_space_lduw(cs->as, addr,
+                              cpu_get_mem_attrs(env),
+                              NULL);
+}
+
+uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return address_space_ldl(cs->as, addr,
+                             cpu_get_mem_attrs(env),
+                             NULL);
+}
+
+uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return address_space_ldq(cs->as, addr,
+                             cpu_get_mem_attrs(env),
+                             NULL);
+}
+
+void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stb(cs->as, addr, val,
+                      cpu_get_mem_attrs(env),
+                      NULL);
+}
+
+void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stl_notdirty(cs->as, addr, val,
+                               cpu_get_mem_attrs(env),
+                               NULL);
+}
+
+void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stw(cs->as, addr, val,
+                      cpu_get_mem_attrs(env),
+                      NULL);
+}
+
+void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stl(cs->as, addr, val,
+                      cpu_get_mem_attrs(env),
+                      NULL);
+}
+
+void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stq(cs->as, addr, val,
+                      cpu_get_mem_attrs(env),
+                      NULL);
+}
+#endif
diff --git a/target/i386/helper.h b/target/i386/helper.h
new file mode 100644
index 0000000000..4e859eba9d
--- /dev/null
+++ b/target/i386/helper.h
@@ -0,0 +1,230 @@
+DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
+DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
+
+DEF_HELPER_3(write_eflags, void, env, tl, i32)
+DEF_HELPER_1(read_eflags, tl, env)
+DEF_HELPER_2(divb_AL, void, env, tl)
+DEF_HELPER_2(idivb_AL, void, env, tl)
+DEF_HELPER_2(divw_AX, void, env, tl)
+DEF_HELPER_2(idivw_AX, void, env, tl)
+DEF_HELPER_2(divl_EAX, void, env, tl)
+DEF_HELPER_2(idivl_EAX, void, env, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(divq_EAX, void, env, tl)
+DEF_HELPER_2(idivq_EAX, void, env, tl)
+#endif
+DEF_HELPER_FLAGS_2(cr4_testbit, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_FLAGS_2(bndck, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_3(bndldx32, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_3(bndldx64, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_5(bndstx32, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_FLAGS_5(bndstx64, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_1(bnd_jmp, void, env)
+
+DEF_HELPER_2(aam, void, env, int)
+DEF_HELPER_2(aad, void, env, int)
+DEF_HELPER_1(aaa, void, env)
+DEF_HELPER_1(aas, void, env)
+DEF_HELPER_1(daa, void, env)
+DEF_HELPER_1(das, void, env)
+
+DEF_HELPER_2(lsl, tl, env, tl)
+DEF_HELPER_2(lar, tl, env, tl)
+DEF_HELPER_2(verr, void, env, tl)
+DEF_HELPER_2(verw, void, env, tl)
+DEF_HELPER_2(lldt, void, env, int)
+DEF_HELPER_2(ltr, void, env, int)
+DEF_HELPER_3(load_seg, void, env, int, int)
+DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl)
+DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
+DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl)
+DEF_HELPER_2(iret_real, void, env, int)
+DEF_HELPER_3(iret_protected, void, env, int, int)
+DEF_HELPER_3(lret_protected, void, env, int, int)
+DEF_HELPER_2(read_crN, tl, env, int)
+DEF_HELPER_3(write_crN, void, env, int, tl)
+DEF_HELPER_2(lmsw, void, env, tl)
+DEF_HELPER_1(clts, void, env)
+DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl)
+DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int)
+DEF_HELPER_2(invlpg, void, env, tl)
+
+DEF_HELPER_1(sysenter, void, env)
+DEF_HELPER_2(sysexit, void, env, int)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(syscall, void, env, int)
+DEF_HELPER_2(sysret, void, env, int)
+#endif
+DEF_HELPER_2(hlt, void, env, int)
+DEF_HELPER_2(monitor, void, env, tl)
+DEF_HELPER_2(mwait, void, env, int)
+DEF_HELPER_2(pause, void, env, int)
+DEF_HELPER_1(debug, void, env)
+DEF_HELPER_1(reset_rf, void, env)
+DEF_HELPER_3(raise_interrupt, void, env, int, int)
+DEF_HELPER_2(raise_exception, void, env, int)
+DEF_HELPER_1(cli, void, env)
+DEF_HELPER_1(sti, void, env)
+DEF_HELPER_1(clac, void, env)
+DEF_HELPER_1(stac, void, env)
+DEF_HELPER_3(boundw, void, env, tl, int)
+DEF_HELPER_3(boundl, void, env, tl, int)
+DEF_HELPER_1(rsm, void, env)
+DEF_HELPER_2(into, void, env, int)
+DEF_HELPER_2(cmpxchg8b_unlocked, void, env, tl)
+DEF_HELPER_2(cmpxchg8b, void, env, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cmpxchg16b_unlocked, void, env, tl)
+DEF_HELPER_2(cmpxchg16b, void, env, tl)
+#endif
+DEF_HELPER_1(single_step, void, env)
+DEF_HELPER_1(cpuid, void, env)
+DEF_HELPER_1(rdtsc, void, env)
+DEF_HELPER_1(rdtscp, void, env)
+DEF_HELPER_1(rdpmc, void, env)
+DEF_HELPER_1(rdmsr, void, env)
+DEF_HELPER_1(wrmsr, void, env)
+
+DEF_HELPER_2(check_iob, void, env, i32)
+DEF_HELPER_2(check_iow, void, env, i32)
+DEF_HELPER_2(check_iol, void, env, i32)
+DEF_HELPER_3(outb, void, env, i32, i32)
+DEF_HELPER_2(inb, tl, env, i32)
+DEF_HELPER_3(outw, void, env, i32, i32)
+DEF_HELPER_2(inw, tl, env, i32)
+DEF_HELPER_3(outl, void, env, i32, i32)
+DEF_HELPER_2(inl, tl, env, i32)
+DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl)
+
+DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64)
+DEF_HELPER_3(vmexit, void, env, i32, i64)
+DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32)
+DEF_HELPER_3(vmrun, void, env, int, int)
+DEF_HELPER_1(vmmcall, void, env)
+DEF_HELPER_2(vmload, void, env, int)
+DEF_HELPER_2(vmsave, void, env, int)
+DEF_HELPER_1(stgi, void, env)
+DEF_HELPER_1(clgi, void, env)
+DEF_HELPER_1(skinit, void, env)
+DEF_HELPER_2(invlpga, void, env, int)
+
+/* x86 FPU */
+
+DEF_HELPER_2(flds_FT0, void, env, i32)
+DEF_HELPER_2(fldl_FT0, void, env, i64)
+DEF_HELPER_2(fildl_FT0, void, env, s32)
+DEF_HELPER_2(flds_ST0, void, env, i32)
+DEF_HELPER_2(fldl_ST0, void, env, i64)
+DEF_HELPER_2(fildl_ST0, void, env, s32)
+DEF_HELPER_2(fildll_ST0, void, env, s64)
+DEF_HELPER_1(fsts_ST0, i32, env)
+DEF_HELPER_1(fstl_ST0, i64, env)
+DEF_HELPER_1(fist_ST0, s32, env)
+DEF_HELPER_1(fistl_ST0, s32, env)
+DEF_HELPER_1(fistll_ST0, s64, env)
+DEF_HELPER_1(fistt_ST0, s32, env)
+DEF_HELPER_1(fisttl_ST0, s32, env)
+DEF_HELPER_1(fisttll_ST0, s64, env)
+DEF_HELPER_2(fldt_ST0, void, env, tl)
+DEF_HELPER_2(fstt_ST0, void, env, tl)
+DEF_HELPER_1(fpush, void, env)
+DEF_HELPER_1(fpop, void, env)
+DEF_HELPER_1(fdecstp, void, env)
+DEF_HELPER_1(fincstp, void, env)
+DEF_HELPER_2(ffree_STN, void, env, int)
+DEF_HELPER_1(fmov_ST0_FT0, void, env)
+DEF_HELPER_2(fmov_FT0_STN, void, env, int)
+DEF_HELPER_2(fmov_ST0_STN, void, env, int)
+DEF_HELPER_2(fmov_STN_ST0, void, env, int)
+DEF_HELPER_2(fxchg_ST0_STN, void, env, int)
+DEF_HELPER_1(fcom_ST0_FT0, void, env)
+DEF_HELPER_1(fucom_ST0_FT0, void, env)
+DEF_HELPER_1(fcomi_ST0_FT0, void, env)
+DEF_HELPER_1(fucomi_ST0_FT0, void, env)
+DEF_HELPER_1(fadd_ST0_FT0, void, env)
+DEF_HELPER_1(fmul_ST0_FT0, void, env)
+DEF_HELPER_1(fsub_ST0_FT0, void, env)
+DEF_HELPER_1(fsubr_ST0_FT0, void, env)
+DEF_HELPER_1(fdiv_ST0_FT0, void, env)
+DEF_HELPER_1(fdivr_ST0_FT0, void, env)
+DEF_HELPER_2(fadd_STN_ST0, void, env, int)
+DEF_HELPER_2(fmul_STN_ST0, void, env, int)
+DEF_HELPER_2(fsub_STN_ST0, void, env, int)
+DEF_HELPER_2(fsubr_STN_ST0, void, env, int)
+DEF_HELPER_2(fdiv_STN_ST0, void, env, int)
+DEF_HELPER_2(fdivr_STN_ST0, void, env, int)
+DEF_HELPER_1(fchs_ST0, void, env)
+DEF_HELPER_1(fabs_ST0, void, env)
+DEF_HELPER_1(fxam_ST0, void, env)
+DEF_HELPER_1(fld1_ST0, void, env)
+DEF_HELPER_1(fldl2t_ST0, void, env)
+DEF_HELPER_1(fldl2e_ST0, void, env)
+DEF_HELPER_1(fldpi_ST0, void, env)
+DEF_HELPER_1(fldlg2_ST0, void, env)
+DEF_HELPER_1(fldln2_ST0, void, env)
+DEF_HELPER_1(fldz_ST0, void, env)
+DEF_HELPER_1(fldz_FT0, void, env)
+DEF_HELPER_1(fnstsw, i32, env)
+DEF_HELPER_1(fnstcw, i32, env)
+DEF_HELPER_2(fldcw, void, env, i32)
+DEF_HELPER_1(fclex, void, env)
+DEF_HELPER_1(fwait, void, env)
+DEF_HELPER_1(fninit, void, env)
+DEF_HELPER_2(fbld_ST0, void, env, tl)
+DEF_HELPER_2(fbst_ST0, void, env, tl)
+DEF_HELPER_1(f2xm1, void, env)
+DEF_HELPER_1(fyl2x, void, env)
+DEF_HELPER_1(fptan, void, env)
+DEF_HELPER_1(fpatan, void, env)
+DEF_HELPER_1(fxtract, void, env)
+DEF_HELPER_1(fprem1, void, env)
+DEF_HELPER_1(fprem, void, env)
+DEF_HELPER_1(fyl2xp1, void, env)
+DEF_HELPER_1(fsqrt, void, env)
+DEF_HELPER_1(fsincos, void, env)
+DEF_HELPER_1(frndint, void, env)
+DEF_HELPER_1(fscale, void, env)
+DEF_HELPER_1(fsin, void, env)
+DEF_HELPER_1(fcos, void, env)
+DEF_HELPER_3(fstenv, void, env, tl, int)
+DEF_HELPER_3(fldenv, void, env, tl, int)
+DEF_HELPER_3(fsave, void, env, tl, int)
+DEF_HELPER_3(frstor, void, env, tl, int)
+DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_3(xsave, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xsaveopt, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xrstor, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_2(xgetbv, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_2(rdpkru, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(wrpkru, TCG_CALL_NO_WG, void, env, i32, i64)
+
+DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+
+/* MMX/SSE */
+
+DEF_HELPER_2(ldmxcsr, void, env, i32)
+DEF_HELPER_1(enter_mmx, void, env)
+DEF_HELPER_1(emms, void, env)
+DEF_HELPER_3(movq, void, env, ptr, ptr)
+
+#define SHIFT 0
+#include "ops_sse_header.h"
+#define SHIFT 1
+#include "ops_sse_header.h"
+
+DEF_HELPER_3(rclb, tl, env, tl, tl)
+DEF_HELPER_3(rclw, tl, env, tl, tl)
+DEF_HELPER_3(rcll, tl, env, tl, tl)
+DEF_HELPER_3(rcrb, tl, env, tl, tl)
+DEF_HELPER_3(rcrw, tl, env, tl, tl)
+DEF_HELPER_3(rcrl, tl, env, tl, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_3(rclq, tl, env, tl, tl)
+DEF_HELPER_3(rcrq, tl, env, tl, tl)
+#endif
diff --git a/target/i386/hyperv.c b/target/i386/hyperv.c
new file mode 100644
index 0000000000..39a230f119
--- /dev/null
+++ b/target/i386/hyperv.c
@@ -0,0 +1,140 @@
+/*
+ * QEMU KVM Hyper-V support
+ *
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ *  Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hyperv.h"
+#include "standard-headers/asm-x86/hyperv.h"
+
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
+{
+    CPUX86State *env = &cpu->env;
+
+    switch (exit->type) {
+    case KVM_EXIT_HYPERV_SYNIC:
+        if (!cpu->hyperv_synic) {
+            return -1;
+        }
+
+        /*
+         * For now just track changes in SynIC control and msg/evt pages msr's.
+         * When SynIC messaging/events processing will be added in future
+         * here we will do messages queues flushing and pages remapping.
+         */
+        switch (exit->u.synic.msr) {
+        case HV_X64_MSR_SCONTROL:
+            env->msr_hv_synic_control = exit->u.synic.control;
+            break;
+        case HV_X64_MSR_SIMP:
+            env->msr_hv_synic_msg_page = exit->u.synic.msg_page;
+            break;
+        case HV_X64_MSR_SIEFP:
+            env->msr_hv_synic_evt_page = exit->u.synic.evt_page;
+            break;
+        default:
+            return -1;
+        }
+        return 0;
+    case KVM_EXIT_HYPERV_HCALL: {
+        uint16_t code;
+
+        code  = exit->u.hcall.input & 0xffff;
+        switch (code) {
+        case HVCALL_POST_MESSAGE:
+        case HVCALL_SIGNAL_EVENT:
+        default:
+            exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE;
+            return 0;
+        }
+    }
+    default:
+        return -1;
+    }
+}
+
+static void kvm_hv_sint_ack_handler(EventNotifier *notifier)
+{
+    HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
+                                           sint_ack_notifier);
+    event_notifier_test_and_clear(notifier);
+    if (sint_route->sint_ack_clb) {
+        sint_route->sint_ack_clb(sint_route);
+    }
+}
+
+HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint,
+                                      HvSintAckClb sint_ack_clb)
+{
+    HvSintRoute *sint_route;
+    int r, gsi;
+
+    sint_route = g_malloc0(sizeof(*sint_route));
+    r = event_notifier_init(&sint_route->sint_set_notifier, false);
+    if (r) {
+        goto err;
+    }
+
+    r = event_notifier_init(&sint_route->sint_ack_notifier, false);
+    if (r) {
+        goto err_sint_set_notifier;
+    }
+
+    event_notifier_set_handler(&sint_route->sint_ack_notifier, false,
+                               kvm_hv_sint_ack_handler);
+
+    gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vcpu_id, sint);
+    if (gsi < 0) {
+        goto err_gsi;
+    }
+
+    r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
+                                           &sint_route->sint_set_notifier,
+                                           &sint_route->sint_ack_notifier, gsi);
+    if (r) {
+        goto err_irqfd;
+    }
+    sint_route->gsi = gsi;
+    sint_route->sint_ack_clb = sint_ack_clb;
+    sint_route->vcpu_id = vcpu_id;
+    sint_route->sint = sint;
+
+    return sint_route;
+
+err_irqfd:
+    kvm_irqchip_release_virq(kvm_state, gsi);
+err_gsi:
+    event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL);
+    event_notifier_cleanup(&sint_route->sint_ack_notifier);
+err_sint_set_notifier:
+    event_notifier_cleanup(&sint_route->sint_set_notifier);
+err:
+    g_free(sint_route);
+
+    return NULL;
+}
+
+void kvm_hv_sint_route_destroy(HvSintRoute *sint_route)
+{
+    kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
+                                          &sint_route->sint_set_notifier,
+                                          sint_route->gsi);
+    kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
+    event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL);
+    event_notifier_cleanup(&sint_route->sint_ack_notifier);
+    event_notifier_cleanup(&sint_route->sint_set_notifier);
+    g_free(sint_route);
+}
+
+int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route)
+{
+    return event_notifier_set(&sint_route->sint_set_notifier);
+}
diff --git a/target/i386/hyperv.h b/target/i386/hyperv.h
new file mode 100644
index 0000000000..0c3b562018
--- /dev/null
+++ b/target/i386/hyperv.h
@@ -0,0 +1,42 @@
+/*
+ * QEMU KVM Hyper-V support
+ *
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ *  Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TARGET_I386_HYPERV_H
+#define TARGET_I386_HYPERV_H
+
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "qemu/event_notifier.h"
+
+typedef struct HvSintRoute HvSintRoute;
+typedef void (*HvSintAckClb)(HvSintRoute *sint_route);
+
+struct HvSintRoute {
+    uint32_t sint;
+    uint32_t vcpu_id;
+    int gsi;
+    EventNotifier sint_set_notifier;
+    EventNotifier sint_ack_notifier;
+    HvSintAckClb sint_ack_clb;
+};
+
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit);
+
+HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint,
+                                      HvSintAckClb sint_ack_clb);
+
+void kvm_hv_sint_route_destroy(HvSintRoute *sint_route);
+
+int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route);
+
+#endif
diff --git a/target/i386/int_helper.c b/target/i386/int_helper.c
new file mode 100644
index 0000000000..9e873ac150
--- /dev/null
+++ b/target/i386/int_helper.c
@@ -0,0 +1,483 @@
+/*
+ *  x86 integer helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+//#define DEBUG_MULDIV
+
+/* modulo 9 table */
+static const uint8_t rclb_table[32] = {
+    0, 1, 2, 3, 4, 5, 6, 7,
+    8, 0, 1, 2, 3, 4, 5, 6,
+    7, 8, 0, 1, 2, 3, 4, 5,
+    6, 7, 8, 0, 1, 2, 3, 4,
+};
+
+/* modulo 17 table */
+static const uint8_t rclw_table[32] = {
+    0, 1, 2, 3, 4, 5, 6, 7,
+    8, 9, 10, 11, 12, 13, 14, 15,
+    16, 0, 1, 2, 3, 4, 5, 6,
+    7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+/* division, flags are undefined */
+
+void helper_divb_AL(CPUX86State *env, target_ulong t0)
+{
+    unsigned int num, den, q, r;
+
+    num = (env->regs[R_EAX] & 0xffff);
+    den = (t0 & 0xff);
+    if (den == 0) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q = (num / den);
+    if (q > 0xff) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q &= 0xff;
+    r = (num % den) & 0xff;
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
+}
+
+void helper_idivb_AL(CPUX86State *env, target_ulong t0)
+{
+    int num, den, q, r;
+
+    num = (int16_t)env->regs[R_EAX];
+    den = (int8_t)t0;
+    if (den == 0) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q = (num / den);
+    if (q != (int8_t)q) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q &= 0xff;
+    r = (num % den) & 0xff;
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
+}
+
+void helper_divw_AX(CPUX86State *env, target_ulong t0)
+{
+    unsigned int num, den, q, r;
+
+    num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
+    den = (t0 & 0xffff);
+    if (den == 0) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q = (num / den);
+    if (q > 0xffff) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q &= 0xffff;
+    r = (num % den) & 0xffff;
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
+    env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
+}
+
+void helper_idivw_AX(CPUX86State *env, target_ulong t0)
+{
+    int num, den, q, r;
+
+    num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
+    den = (int16_t)t0;
+    if (den == 0) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q = (num / den);
+    if (q != (int16_t)q) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q &= 0xffff;
+    r = (num % den) & 0xffff;
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
+    env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
+}
+
+void helper_divl_EAX(CPUX86State *env, target_ulong t0)
+{
+    unsigned int den, r;
+    uint64_t num, q;
+
+    num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+    den = t0;
+    if (den == 0) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q = (num / den);
+    r = (num % den);
+    if (q > 0xffffffff) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    env->regs[R_EAX] = (uint32_t)q;
+    env->regs[R_EDX] = (uint32_t)r;
+}
+
+void helper_idivl_EAX(CPUX86State *env, target_ulong t0)
+{
+    int den, r;
+    int64_t num, q;
+
+    num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+    den = t0;
+    if (den == 0) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    q = (num / den);
+    r = (num % den);
+    if (q != (int32_t)q) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    env->regs[R_EAX] = (uint32_t)q;
+    env->regs[R_EDX] = (uint32_t)r;
+}
+
+/* bcd */
+
+/* XXX: exception */
+void helper_aam(CPUX86State *env, int base)
+{
+    int al, ah;
+
+    al = env->regs[R_EAX] & 0xff;
+    ah = al / base;
+    al = al % base;
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+    CC_DST = al;
+}
+
+void helper_aad(CPUX86State *env, int base)
+{
+    int al, ah;
+
+    al = env->regs[R_EAX] & 0xff;
+    ah = (env->regs[R_EAX] >> 8) & 0xff;
+    al = ((ah * base) + al) & 0xff;
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al;
+    CC_DST = al;
+}
+
+void helper_aaa(CPUX86State *env)
+{
+    int icarry;
+    int al, ah, af;
+    int eflags;
+
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    af = eflags & CC_A;
+    al = env->regs[R_EAX] & 0xff;
+    ah = (env->regs[R_EAX] >> 8) & 0xff;
+
+    icarry = (al > 0xf9);
+    if (((al & 0x0f) > 9) || af) {
+        al = (al + 6) & 0x0f;
+        ah = (ah + 1 + icarry) & 0xff;
+        eflags |= CC_C | CC_A;
+    } else {
+        eflags &= ~(CC_C | CC_A);
+        al &= 0x0f;
+    }
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+    CC_SRC = eflags;
+}
+
+void helper_aas(CPUX86State *env)
+{
+    int icarry;
+    int al, ah, af;
+    int eflags;
+
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    af = eflags & CC_A;
+    al = env->regs[R_EAX] & 0xff;
+    ah = (env->regs[R_EAX] >> 8) & 0xff;
+
+    icarry = (al < 6);
+    if (((al & 0x0f) > 9) || af) {
+        al = (al - 6) & 0x0f;
+        ah = (ah - 1 - icarry) & 0xff;
+        eflags |= CC_C | CC_A;
+    } else {
+        eflags &= ~(CC_C | CC_A);
+        al &= 0x0f;
+    }
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+    CC_SRC = eflags;
+}
+
+void helper_daa(CPUX86State *env)
+{
+    int old_al, al, af, cf;
+    int eflags;
+
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    cf = eflags & CC_C;
+    af = eflags & CC_A;
+    old_al = al = env->regs[R_EAX] & 0xff;
+
+    eflags = 0;
+    if (((al & 0x0f) > 9) || af) {
+        al = (al + 6) & 0xff;
+        eflags |= CC_A;
+    }
+    if ((old_al > 0x99) || cf) {
+        al = (al + 0x60) & 0xff;
+        eflags |= CC_C;
+    }
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
+    /* well, speed is not an issue here, so we compute the flags by hand */
+    eflags |= (al == 0) << 6; /* zf */
+    eflags |= parity_table[al]; /* pf */
+    eflags |= (al & 0x80); /* sf */
+    CC_SRC = eflags;
+}
+
+void helper_das(CPUX86State *env)
+{
+    int al, al1, af, cf;
+    int eflags;
+
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    cf = eflags & CC_C;
+    af = eflags & CC_A;
+    al = env->regs[R_EAX] & 0xff;
+
+    eflags = 0;
+    al1 = al;
+    if (((al & 0x0f) > 9) || af) {
+        eflags |= CC_A;
+        if (al < 6 || cf) {
+            eflags |= CC_C;
+        }
+        al = (al - 6) & 0xff;
+    }
+    if ((al1 > 0x99) || cf) {
+        al = (al - 0x60) & 0xff;
+        eflags |= CC_C;
+    }
+    env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
+    /* well, speed is not an issue here, so we compute the flags by hand */
+    eflags |= (al == 0) << 6; /* zf */
+    eflags |= parity_table[al]; /* pf */
+    eflags |= (al & 0x80); /* sf */
+    CC_SRC = eflags;
+}
+
+#ifdef TARGET_X86_64
+static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
+{
+    *plow += a;
+    /* carry test */
+    if (*plow < a) {
+        (*phigh)++;
+    }
+    *phigh += b;
+}
+
+static void neg128(uint64_t *plow, uint64_t *phigh)
+{
+    *plow = ~*plow;
+    *phigh = ~*phigh;
+    add128(plow, phigh, 1, 0);
+}
+
+/* return TRUE if overflow */
+static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
+{
+    uint64_t q, r, a1, a0;
+    int i, qb, ab;
+
+    a0 = *plow;
+    a1 = *phigh;
+    if (a1 == 0) {
+        q = a0 / b;
+        r = a0 % b;
+        *plow = q;
+        *phigh = r;
+    } else {
+        if (a1 >= b) {
+            return 1;
+        }
+        /* XXX: use a better algorithm */
+        for (i = 0; i < 64; i++) {
+            ab = a1 >> 63;
+            a1 = (a1 << 1) | (a0 >> 63);
+            if (ab || a1 >= b) {
+                a1 -= b;
+                qb = 1;
+            } else {
+                qb = 0;
+            }
+            a0 = (a0 << 1) | qb;
+        }
+#if defined(DEBUG_MULDIV)
+        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
+               ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
+               *phigh, *plow, b, a0, a1);
+#endif
+        *plow = a0;
+        *phigh = a1;
+    }
+    return 0;
+}
+
+/* return TRUE if overflow */
+static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
+{
+    int sa, sb;
+
+    sa = ((int64_t)*phigh < 0);
+    if (sa) {
+        neg128(plow, phigh);
+    }
+    sb = (b < 0);
+    if (sb) {
+        b = -b;
+    }
+    if (div64(plow, phigh, b) != 0) {
+        return 1;
+    }
+    if (sa ^ sb) {
+        if (*plow > (1ULL << 63)) {
+            return 1;
+        }
+        *plow = -*plow;
+    } else {
+        if (*plow >= (1ULL << 63)) {
+            return 1;
+        }
+    }
+    if (sa) {
+        *phigh = -*phigh;
+    }
+    return 0;
+}
+
+void helper_divq_EAX(CPUX86State *env, target_ulong t0)
+{
+    uint64_t r0, r1;
+
+    if (t0 == 0) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    r0 = env->regs[R_EAX];
+    r1 = env->regs[R_EDX];
+    if (div64(&r0, &r1, t0)) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    env->regs[R_EAX] = r0;
+    env->regs[R_EDX] = r1;
+}
+
+void helper_idivq_EAX(CPUX86State *env, target_ulong t0)
+{
+    uint64_t r0, r1;
+
+    if (t0 == 0) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    r0 = env->regs[R_EAX];
+    r1 = env->regs[R_EDX];
+    if (idiv64(&r0, &r1, t0)) {
+        raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+    }
+    env->regs[R_EAX] = r0;
+    env->regs[R_EDX] = r1;
+}
+#endif
+
+#if TARGET_LONG_BITS == 32
+# define ctztl  ctz32
+# define clztl  clz32
+#else
+# define ctztl  ctz64
+# define clztl  clz64
+#endif
+
+/* bit operations */
+target_ulong helper_ctz(target_ulong t0)
+{
+    return ctztl(t0);
+}
+
+target_ulong helper_clz(target_ulong t0)
+{
+    return clztl(t0);
+}
+
+target_ulong helper_pdep(target_ulong src, target_ulong mask)
+{
+    target_ulong dest = 0;
+    int i, o;
+
+    for (i = 0; mask != 0; i++) {
+        o = ctztl(mask);
+        mask &= mask - 1;
+        dest |= ((src >> i) & 1) << o;
+    }
+    return dest;
+}
+
+target_ulong helper_pext(target_ulong src, target_ulong mask)
+{
+    target_ulong dest = 0;
+    int i, o;
+
+    for (o = 0; mask != 0; o++) {
+        i = ctztl(mask);
+        mask &= mask - 1;
+        dest |= ((src >> i) & 1) << o;
+    }
+    return dest;
+}
+
+#define SHIFT 0
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#ifdef TARGET_X86_64
+#define SHIFT 3
+#include "shift_helper_template.h"
+#undef SHIFT
+#endif
+
+/* Test that BIT is enabled in CR4.  If not, raise an illegal opcode
+   exception.  This reduces the requirements for rare CR4 bits being
+   mapped into HFLAGS.  */
+void helper_cr4_testbit(CPUX86State *env, uint32_t bit)
+{
+    if (unlikely((env->cr[4] & bit) == 0)) {
+        raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+    }
+}
diff --git a/target/i386/kvm-stub.c b/target/i386/kvm-stub.c
new file mode 100644
index 0000000000..bda4dc2f0c
--- /dev/null
+++ b/target/i386/kvm-stub.c
@@ -0,0 +1,42 @@
+/*
+ * QEMU KVM x86 specific function stubs
+ *
+ * Copyright Linaro Limited 2012
+ *
+ * Author: Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "kvm_i386.h"
+
+bool kvm_allows_irq0_override(void)
+{
+    return 1;
+}
+
+#ifndef __OPTIMIZE__
+bool kvm_has_smm(void)
+{
+    return 1;
+}
+
+bool kvm_enable_x2apic(void)
+{
+    return false;
+}
+
+/* This function is only called inside conditionals which we
+ * rely on the compiler to optimize out when CONFIG_KVM is not
+ * defined.
+ */
+uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
+                                      uint32_t index, int reg)
+{
+    abort();
+}
+#endif
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
new file mode 100644
index 0000000000..f62264a7a8
--- /dev/null
+++ b/target/i386/kvm.c
@@ -0,0 +1,3536 @@
+/*
+ * QEMU KVM support
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ *  Anthony Liguori   <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include <sys/ioctl.h>
+#include <sys/utsname.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm_int.h"
+#include "kvm_i386.h"
+#include "hyperv.h"
+
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#include "qemu/config-file.h"
+#include "qemu/error-report.h"
+#include "hw/i386/pc.h"
+#include "hw/i386/apic.h"
+#include "hw/i386/apic_internal.h"
+#include "hw/i386/apic-msidef.h"
+#include "hw/i386/intel_iommu.h"
+#include "hw/i386/x86-iommu.h"
+
+#include "exec/ioport.h"
+#include "standard-headers/asm-x86/hyperv.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
+#include "migration/migration.h"
+#include "exec/memattrs.h"
+#include "trace.h"
+
+//#define DEBUG_KVM
+
+#ifdef DEBUG_KVM
+#define DPRINTF(fmt, ...) \
+    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+#define MSR_KVM_WALL_CLOCK  0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
+ * 255 kvm_msr_entry structs */
+#define MSR_BUF_SIZE 4096
+
+#ifndef BUS_MCEERR_AR
+#define BUS_MCEERR_AR 4
+#endif
+#ifndef BUS_MCEERR_AO
+#define BUS_MCEERR_AO 5
+#endif
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+    KVM_CAP_INFO(SET_TSS_ADDR),
+    KVM_CAP_INFO(EXT_CPUID),
+    KVM_CAP_INFO(MP_STATE),
+    KVM_CAP_LAST_INFO
+};
+
+static bool has_msr_star;
+static bool has_msr_hsave_pa;
+static bool has_msr_tsc_aux;
+static bool has_msr_tsc_adjust;
+static bool has_msr_tsc_deadline;
+static bool has_msr_feature_control;
+static bool has_msr_misc_enable;
+static bool has_msr_smbase;
+static bool has_msr_bndcfgs;
+static int lm_capable_kernel;
+static bool has_msr_hv_hypercall;
+static bool has_msr_hv_crash;
+static bool has_msr_hv_reset;
+static bool has_msr_hv_vpindex;
+static bool has_msr_hv_runtime;
+static bool has_msr_hv_synic;
+static bool has_msr_hv_stimer;
+static bool has_msr_xss;
+
+static bool has_msr_architectural_pmu;
+static uint32_t num_architectural_pmu_counters;
+
+static int has_xsave;
+static int has_xcrs;
+static int has_pit_state2;
+
+static bool has_msr_mcg_ext_ctl;
+
+static struct kvm_cpuid2 *cpuid_cache;
+
+int kvm_has_pit_state2(void)
+{
+    return has_pit_state2;
+}
+
+bool kvm_has_smm(void)
+{
+    return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
+}
+
+bool kvm_allows_irq0_override(void)
+{
+    return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
+}
+
+static bool kvm_x2apic_api_set_flags(uint64_t flags)
+{
+    KVMState *s = KVM_STATE(current_machine->accelerator);
+
+    return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
+}
+
+#define MEMORIZE(fn, _result) \
+    ({ \
+        static bool _memorized; \
+        \
+        if (_memorized) { \
+            return _result; \
+        } \
+        _memorized = true; \
+        _result = fn; \
+    })
+
+static bool has_x2apic_api;
+
+bool kvm_has_x2apic_api(void)
+{
+    return has_x2apic_api;
+}
+
+bool kvm_enable_x2apic(void)
+{
+    return MEMORIZE(
+             kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
+                                      KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
+             has_x2apic_api);
+}
+
+static int kvm_get_tsc(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    struct {
+        struct kvm_msrs info;
+        struct kvm_msr_entry entries[1];
+    } msr_data;
+    int ret;
+
+    if (env->tsc_valid) {
+        return 0;
+    }
+
+    msr_data.info.nmsrs = 1;
+    msr_data.entries[0].index = MSR_IA32_TSC;
+    env->tsc_valid = !runstate_is_running();
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
+    if (ret < 0) {
+        return ret;
+    }
+
+    assert(ret == 1);
+    env->tsc = msr_data.entries[0].data;
+    return 0;
+}
+
+static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
+{
+    kvm_get_tsc(cpu);
+}
+
+void kvm_synchronize_all_tsc(void)
+{
+    CPUState *cpu;
+
+    if (kvm_enabled()) {
+        CPU_FOREACH(cpu) {
+            run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
+        }
+    }
+}
+
+static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
+{
+    struct kvm_cpuid2 *cpuid;
+    int r, size;
+
+    size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
+    cpuid = g_malloc0(size);
+    cpuid->nent = max;
+    r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
+    if (r == 0 && cpuid->nent >= max) {
+        r = -E2BIG;
+    }
+    if (r < 0) {
+        if (r == -E2BIG) {
+            g_free(cpuid);
+            return NULL;
+        } else {
+            fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
+                    strerror(-r));
+            exit(1);
+        }
+    }
+    return cpuid;
+}
+
+/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
+ * for all entries.
+ */
+static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
+{
+    struct kvm_cpuid2 *cpuid;
+    int max = 1;
+
+    if (cpuid_cache != NULL) {
+        return cpuid_cache;
+    }
+    while ((cpuid = try_get_cpuid(s, max)) == NULL) {
+        max *= 2;
+    }
+    cpuid_cache = cpuid;
+    return cpuid;
+}
+
+static const struct kvm_para_features {
+    int cap;
+    int feature;
+} para_features[] = {
+    { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
+    { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
+    { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
+    { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
+};
+
+static int get_para_features(KVMState *s)
+{
+    int i, features = 0;
+
+    for (i = 0; i < ARRAY_SIZE(para_features); i++) {
+        if (kvm_check_extension(s, para_features[i].cap)) {
+            features |= (1 << para_features[i].feature);
+        }
+    }
+
+    return features;
+}
+
+
+/* Returns the value for a specific register on the cpuid entry
+ */
+static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
+{
+    uint32_t ret = 0;
+    switch (reg) {
+    case R_EAX:
+        ret = entry->eax;
+        break;
+    case R_EBX:
+        ret = entry->ebx;
+        break;
+    case R_ECX:
+        ret = entry->ecx;
+        break;
+    case R_EDX:
+        ret = entry->edx;
+        break;
+    }
+    return ret;
+}
+
+/* Find matching entry for function/index on kvm_cpuid2 struct
+ */
+static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+                                                 uint32_t function,
+                                                 uint32_t index)
+{
+    int i;
+    for (i = 0; i < cpuid->nent; ++i) {
+        if (cpuid->entries[i].function == function &&
+            cpuid->entries[i].index == index) {
+            return &cpuid->entries[i];
+        }
+    }
+    /* not found: */
+    return NULL;
+}
+
+uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
+                                      uint32_t index, int reg)
+{
+    struct kvm_cpuid2 *cpuid;
+    uint32_t ret = 0;
+    uint32_t cpuid_1_edx;
+    bool found = false;
+
+    cpuid = get_supported_cpuid(s);
+
+    struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
+    if (entry) {
+        found = true;
+        ret = cpuid_entry_get_reg(entry, reg);
+    }
+
+    /* Fixups for the data returned by KVM, below */
+
+    if (function == 1 && reg == R_EDX) {
+        /* KVM before 2.6.30 misreports the following features */
+        ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
+    } else if (function == 1 && reg == R_ECX) {
+        /* We can set the hypervisor flag, even if KVM does not return it on
+         * GET_SUPPORTED_CPUID
+         */
+        ret |= CPUID_EXT_HYPERVISOR;
+        /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
+         * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
+         * and the irqchip is in the kernel.
+         */
+        if (kvm_irqchip_in_kernel() &&
+                kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
+            ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
+        }
+
+        /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
+         * without the in-kernel irqchip
+         */
+        if (!kvm_irqchip_in_kernel()) {
+            ret &= ~CPUID_EXT_X2APIC;
+        }
+    } else if (function == 6 && reg == R_EAX) {
+        ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
+    } else if (function == 0x80000001 && reg == R_EDX) {
+        /* On Intel, kvm returns cpuid according to the Intel spec,
+         * so add missing bits according to the AMD spec:
+         */
+        cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
+        ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
+    } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
+        /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
+         * be enabled without the in-kernel irqchip
+         */
+        if (!kvm_irqchip_in_kernel()) {
+            ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
+        }
+    }
+
+    /* fallback for older kernels */
+    if ((function == KVM_CPUID_FEATURES) && !found) {
+        ret = get_para_features(s);
+    }
+
+    return ret;
+}
+
+typedef struct HWPoisonPage {
+    ram_addr_t ram_addr;
+    QLIST_ENTRY(HWPoisonPage) list;
+} HWPoisonPage;
+
+static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
+    QLIST_HEAD_INITIALIZER(hwpoison_page_list);
+
+static void kvm_unpoison_all(void *param)
+{
+    HWPoisonPage *page, *next_page;
+
+    QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
+        QLIST_REMOVE(page, list);
+        qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
+        g_free(page);
+    }
+}
+
+static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
+{
+    HWPoisonPage *page;
+
+    QLIST_FOREACH(page, &hwpoison_page_list, list) {
+        if (page->ram_addr == ram_addr) {
+            return;
+        }
+    }
+    page = g_new(HWPoisonPage, 1);
+    page->ram_addr = ram_addr;
+    QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
+}
+
+static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
+                                     int *max_banks)
+{
+    int r;
+
+    r = kvm_check_extension(s, KVM_CAP_MCE);
+    if (r > 0) {
+        *max_banks = r;
+        return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
+    }
+    return -ENOSYS;
+}
+
+static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+    uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
+                      MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
+    uint64_t mcg_status = MCG_STATUS_MCIP;
+    int flags = 0;
+
+    if (code == BUS_MCEERR_AR) {
+        status |= MCI_STATUS_AR | 0x134;
+        mcg_status |= MCG_STATUS_EIPV;
+    } else {
+        status |= 0xc0;
+        mcg_status |= MCG_STATUS_RIPV;
+    }
+
+    flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
+    /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
+     * guest kernel back into env->mcg_ext_ctl.
+     */
+    cpu_synchronize_state(cs);
+    if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
+        mcg_status |= MCG_STATUS_LMCE;
+        flags = 0;
+    }
+
+    cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
+                       (MCM_ADDR_PHYS << 6) | 0xc, flags);
+}
+
+static void hardware_memory_error(void)
+{
+    fprintf(stderr, "Hardware memory error!\n");
+    exit(1);
+}
+
+int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
+{
+    X86CPU *cpu = X86_CPU(c);
+    CPUX86State *env = &cpu->env;
+    ram_addr_t ram_addr;
+    hwaddr paddr;
+
+    if ((env->mcg_cap & MCG_SER_P) && addr
+        && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
+        ram_addr = qemu_ram_addr_from_host(addr);
+        if (ram_addr == RAM_ADDR_INVALID ||
+            !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
+            fprintf(stderr, "Hardware memory error for memory used by "
+                    "QEMU itself instead of guest system!\n");
+            /* Hope we are lucky for AO MCE */
+            if (code == BUS_MCEERR_AO) {
+                return 0;
+            } else {
+                hardware_memory_error();
+            }
+        }
+        kvm_hwpoison_page_add(ram_addr);
+        kvm_mce_inject(cpu, paddr, code);
+    } else {
+        if (code == BUS_MCEERR_AO) {
+            return 0;
+        } else if (code == BUS_MCEERR_AR) {
+            hardware_memory_error();
+        } else {
+            return 1;
+        }
+    }
+    return 0;
+}
+
+int kvm_arch_on_sigbus(int code, void *addr)
+{
+    X86CPU *cpu = X86_CPU(first_cpu);
+
+    if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
+        ram_addr_t ram_addr;
+        hwaddr paddr;
+
+        /* Hope we are lucky for AO MCE */
+        ram_addr = qemu_ram_addr_from_host(addr);
+        if (ram_addr == RAM_ADDR_INVALID ||
+            !kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
+                                                addr, &paddr)) {
+            fprintf(stderr, "Hardware memory error for memory used by "
+                    "QEMU itself instead of guest system!: %p\n", addr);
+            return 0;
+        }
+        kvm_hwpoison_page_add(ram_addr);
+        kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
+    } else {
+        if (code == BUS_MCEERR_AO) {
+            return 0;
+        } else if (code == BUS_MCEERR_AR) {
+            hardware_memory_error();
+        } else {
+            return 1;
+        }
+    }
+    return 0;
+}
+
+static int kvm_inject_mce_oldstyle(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+
+    if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
+        unsigned int bank, bank_num = env->mcg_cap & 0xff;
+        struct kvm_x86_mce mce;
+
+        env->exception_injected = -1;
+
+        /*
+         * There must be at least one bank in use if an MCE is pending.
+         * Find it and use its values for the event injection.
+         */
+        for (bank = 0; bank < bank_num; bank++) {
+            if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
+                break;
+            }
+        }
+        assert(bank < bank_num);
+
+        mce.bank = bank;
+        mce.status = env->mce_banks[bank * 4 + 1];
+        mce.mcg_status = env->mcg_status;
+        mce.addr = env->mce_banks[bank * 4 + 2];
+        mce.misc = env->mce_banks[bank * 4 + 3];
+
+        return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
+    }
+    return 0;
+}
+
+static void cpu_update_state(void *opaque, int running, RunState state)
+{
+    CPUX86State *env = opaque;
+
+    if (running) {
+        env->tsc_valid = false;
+    }
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    return cpu->apic_id;
+}
+
+#ifndef KVM_CPUID_SIGNATURE_NEXT
+#define KVM_CPUID_SIGNATURE_NEXT                0x40000100
+#endif
+
+static bool hyperv_hypercall_available(X86CPU *cpu)
+{
+    return cpu->hyperv_vapic ||
+           (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
+}
+
+static bool hyperv_enabled(X86CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
+           (hyperv_hypercall_available(cpu) ||
+            cpu->hyperv_time  ||
+            cpu->hyperv_relaxed_timing ||
+            cpu->hyperv_crash ||
+            cpu->hyperv_reset ||
+            cpu->hyperv_vpindex ||
+            cpu->hyperv_runtime ||
+            cpu->hyperv_synic ||
+            cpu->hyperv_stimer);
+}
+
+static int kvm_arch_set_tsc_khz(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    int r;
+
+    if (!env->tsc_khz) {
+        return 0;
+    }
+
+    r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
+        kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
+        -ENOTSUP;
+    if (r < 0) {
+        /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
+         * TSC frequency doesn't match the one we want.
+         */
+        int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+                       kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+                       -ENOTSUP;
+        if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
+            error_report("warning: TSC frequency mismatch between "
+                         "VM (%" PRId64 " kHz) and host (%d kHz), "
+                         "and TSC scaling unavailable",
+                         env->tsc_khz, cur_freq);
+            return r;
+        }
+    }
+
+    return 0;
+}
+
+static int hyperv_handle_properties(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    if (cpu->hyperv_time &&
+            kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
+        cpu->hyperv_time = false;
+    }
+
+    if (cpu->hyperv_relaxed_timing) {
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+    }
+    if (cpu->hyperv_vapic) {
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
+    }
+    if (cpu->hyperv_time) {
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
+        env->features[FEAT_HYPERV_EAX] |= 0x200;
+    }
+    if (cpu->hyperv_crash && has_msr_hv_crash) {
+        env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
+    }
+    env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+    if (cpu->hyperv_reset && has_msr_hv_reset) {
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE;
+    }
+    if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE;
+    }
+    if (cpu->hyperv_runtime && has_msr_hv_runtime) {
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
+    }
+    if (cpu->hyperv_synic) {
+        int sint;
+
+        if (!has_msr_hv_synic ||
+            kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
+            fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
+            return -ENOSYS;
+        }
+
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE;
+        env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
+        for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
+            env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
+        }
+    }
+    if (cpu->hyperv_stimer) {
+        if (!has_msr_hv_stimer) {
+            fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
+            return -ENOSYS;
+        }
+        env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE;
+    }
+    return 0;
+}
+
+static Error *invtsc_mig_blocker;
+
+#define KVM_MAX_CPUID_ENTRIES  100
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+    struct {
+        struct kvm_cpuid2 cpuid;
+        struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
+    } QEMU_PACKED cpuid_data;
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    uint32_t limit, i, j, cpuid_i;
+    uint32_t unused;
+    struct kvm_cpuid_entry2 *c;
+    uint32_t signature[3];
+    int kvm_base = KVM_CPUID_SIGNATURE;
+    int r;
+
+    memset(&cpuid_data, 0, sizeof(cpuid_data));
+
+    cpuid_i = 0;
+
+    /* Paravirtualization CPUIDs */
+    if (hyperv_enabled(cpu)) {
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
+        if (!cpu->hyperv_vendor_id) {
+            memcpy(signature, "Microsoft Hv", 12);
+        } else {
+            size_t len = strlen(cpu->hyperv_vendor_id);
+
+            if (len > 12) {
+                error_report("hv-vendor-id truncated to 12 characters");
+                len = 12;
+            }
+            memset(signature, 0, 12);
+            memcpy(signature, cpu->hyperv_vendor_id, len);
+        }
+        c->eax = HYPERV_CPUID_MIN;
+        c->ebx = signature[0];
+        c->ecx = signature[1];
+        c->edx = signature[2];
+
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = HYPERV_CPUID_INTERFACE;
+        memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
+        c->eax = signature[0];
+        c->ebx = 0;
+        c->ecx = 0;
+        c->edx = 0;
+
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = HYPERV_CPUID_VERSION;
+        c->eax = 0x00001bbc;
+        c->ebx = 0x00060001;
+
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = HYPERV_CPUID_FEATURES;
+        r = hyperv_handle_properties(cs);
+        if (r) {
+            return r;
+        }
+        c->eax = env->features[FEAT_HYPERV_EAX];
+        c->ebx = env->features[FEAT_HYPERV_EBX];
+        c->edx = env->features[FEAT_HYPERV_EDX];
+
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
+        if (cpu->hyperv_relaxed_timing) {
+            c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+        }
+        if (cpu->hyperv_vapic) {
+            c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+        }
+        c->ebx = cpu->hyperv_spinlock_attempts;
+
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
+        c->eax = 0x40;
+        c->ebx = 0x40;
+
+        kvm_base = KVM_CPUID_SIGNATURE_NEXT;
+        has_msr_hv_hypercall = true;
+    }
+
+    if (cpu->expose_kvm) {
+        memcpy(signature, "KVMKVMKVM\0\0\0", 12);
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = KVM_CPUID_SIGNATURE | kvm_base;
+        c->eax = KVM_CPUID_FEATURES | kvm_base;
+        c->ebx = signature[0];
+        c->ecx = signature[1];
+        c->edx = signature[2];
+
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = KVM_CPUID_FEATURES | kvm_base;
+        c->eax = env->features[FEAT_KVM];
+    }
+
+    cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
+
+    for (i = 0; i <= limit; i++) {
+        if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+            fprintf(stderr, "unsupported level value: 0x%x\n", limit);
+            abort();
+        }
+        c = &cpuid_data.entries[cpuid_i++];
+
+        switch (i) {
+        case 2: {
+            /* Keep reading function 2 till all the input is received */
+            int times;
+
+            c->function = i;
+            c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
+                       KVM_CPUID_FLAG_STATE_READ_NEXT;
+            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+            times = c->eax & 0xff;
+
+            for (j = 1; j < times; ++j) {
+                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+                    fprintf(stderr, "cpuid_data is full, no space for "
+                            "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
+                    abort();
+                }
+                c = &cpuid_data.entries[cpuid_i++];
+                c->function = i;
+                c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
+                cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+            }
+            break;
+        }
+        case 4:
+        case 0xb:
+        case 0xd:
+            for (j = 0; ; j++) {
+                if (i == 0xd && j == 64) {
+                    break;
+                }
+                c->function = i;
+                c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                c->index = j;
+                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+                if (i == 4 && c->eax == 0) {
+                    break;
+                }
+                if (i == 0xb && !(c->ecx & 0xff00)) {
+                    break;
+                }
+                if (i == 0xd && c->eax == 0) {
+                    continue;
+                }
+                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+                    fprintf(stderr, "cpuid_data is full, no space for "
+                            "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+                    abort();
+                }
+                c = &cpuid_data.entries[cpuid_i++];
+            }
+            break;
+        default:
+            c->function = i;
+            c->flags = 0;
+            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+            break;
+        }
+    }
+
+    if (limit >= 0x0a) {
+        uint32_t ver;
+
+        cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
+        if ((ver & 0xff) > 0) {
+            has_msr_architectural_pmu = true;
+            num_architectural_pmu_counters = (ver & 0xff00) >> 8;
+
+            /* Shouldn't be more than 32, since that's the number of bits
+             * available in EBX to tell us _which_ counters are available.
+             * Play it safe.
+             */
+            if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
+                num_architectural_pmu_counters = MAX_GP_COUNTERS;
+            }
+        }
+    }
+
+    cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
+
+    for (i = 0x80000000; i <= limit; i++) {
+        if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+            fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
+            abort();
+        }
+        c = &cpuid_data.entries[cpuid_i++];
+
+        c->function = i;
+        c->flags = 0;
+        cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+    }
+
+    /* Call Centaur's CPUID instructions they are supported. */
+    if (env->cpuid_xlevel2 > 0) {
+        cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
+
+        for (i = 0xC0000000; i <= limit; i++) {
+            if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+                fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
+                abort();
+            }
+            c = &cpuid_data.entries[cpuid_i++];
+
+            c->function = i;
+            c->flags = 0;
+            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+        }
+    }
+
+    cpuid_data.cpuid.nent = cpuid_i;
+
+    if (((env->cpuid_version >> 8)&0xF) >= 6
+        && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
+           (CPUID_MCE | CPUID_MCA)
+        && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
+        uint64_t mcg_cap, unsupported_caps;
+        int banks;
+        int ret;
+
+        ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
+        if (ret < 0) {
+            fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
+            return ret;
+        }
+
+        if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
+            error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
+                         (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
+            return -ENOTSUP;
+        }
+
+        unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
+        if (unsupported_caps) {
+            if (unsupported_caps & MCG_LMCE_P) {
+                error_report("kvm: LMCE not supported");
+                return -ENOTSUP;
+            }
+            error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
+                         unsupported_caps);
+        }
+
+        env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
+        ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
+        if (ret < 0) {
+            fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
+            return ret;
+        }
+    }
+
+    qemu_add_vm_change_state_handler(cpu_update_state, env);
+
+    c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
+    if (c) {
+        has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
+                                  !!(c->ecx & CPUID_EXT_SMX);
+    }
+
+    if (env->mcg_cap & MCG_LMCE_P) {
+        has_msr_mcg_ext_ctl = has_msr_feature_control = true;
+    }
+
+    c = cpuid_find_entry(&cpuid_data.cpuid, 0x80000007, 0);
+    if (c && (c->edx & 1<<8) && invtsc_mig_blocker == NULL) {
+        /* for migration */
+        error_setg(&invtsc_mig_blocker,
+                   "State blocked by non-migratable CPU device"
+                   " (invtsc flag)");
+        migrate_add_blocker(invtsc_mig_blocker);
+        /* for savevm */
+        vmstate_x86_cpu.unmigratable = 1;
+    }
+
+    cpuid_data.cpuid.padding = 0;
+    r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
+    if (r) {
+        return r;
+    }
+
+    r = kvm_arch_set_tsc_khz(cs);
+    if (r < 0) {
+        return r;
+    }
+
+    /* vcpu's TSC frequency is either specified by user, or following
+     * the value used by KVM if the former is not present. In the
+     * latter case, we query it from KVM and record in env->tsc_khz,
+     * so that vcpu's TSC frequency can be migrated later via this field.
+     */
+    if (!env->tsc_khz) {
+        r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+            kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+            -ENOTSUP;
+        if (r > 0) {
+            env->tsc_khz = r;
+        }
+    }
+
+    if (has_xsave) {
+        env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
+    }
+    cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
+
+    if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
+        has_msr_tsc_aux = false;
+    }
+
+    return 0;
+}
+
+void kvm_arch_reset_vcpu(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+
+    env->exception_injected = -1;
+    env->interrupt_injected = -1;
+    env->xcr0 = 1;
+    if (kvm_irqchip_in_kernel()) {
+        env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
+                                          KVM_MP_STATE_UNINITIALIZED;
+    } else {
+        env->mp_state = KVM_MP_STATE_RUNNABLE;
+    }
+}
+
+void kvm_arch_do_init_vcpu(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+
+    /* APs get directly into wait-for-SIPI state.  */
+    if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
+        env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
+    }
+}
+
+static int kvm_get_supported_msrs(KVMState *s)
+{
+    static int kvm_supported_msrs;
+    int ret = 0;
+
+    /* first time */
+    if (kvm_supported_msrs == 0) {
+        struct kvm_msr_list msr_list, *kvm_msr_list;
+
+        kvm_supported_msrs = -1;
+
+        /* Obtain MSR list from KVM.  These are the MSRs that we must
+         * save/restore */
+        msr_list.nmsrs = 0;
+        ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
+        if (ret < 0 && ret != -E2BIG) {
+            return ret;
+        }
+        /* Old kernel modules had a bug and could write beyond the provided
+           memory. Allocate at least a safe amount of 1K. */
+        kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
+                                              msr_list.nmsrs *
+                                              sizeof(msr_list.indices[0])));
+
+        kvm_msr_list->nmsrs = msr_list.nmsrs;
+        ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
+        if (ret >= 0) {
+            int i;
+
+            for (i = 0; i < kvm_msr_list->nmsrs; i++) {
+                if (kvm_msr_list->indices[i] == MSR_STAR) {
+                    has_msr_star = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
+                    has_msr_hsave_pa = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
+                    has_msr_tsc_aux = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
+                    has_msr_tsc_adjust = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
+                    has_msr_tsc_deadline = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
+                    has_msr_smbase = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
+                    has_msr_misc_enable = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
+                    has_msr_bndcfgs = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
+                    has_msr_xss = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
+                    has_msr_hv_crash = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
+                    has_msr_hv_reset = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
+                    has_msr_hv_vpindex = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
+                    has_msr_hv_runtime = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
+                    has_msr_hv_synic = true;
+                    continue;
+                }
+                if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
+                    has_msr_hv_stimer = true;
+                    continue;
+                }
+            }
+        }
+
+        g_free(kvm_msr_list);
+    }
+
+    return ret;
+}
+
+static Notifier smram_machine_done;
+static KVMMemoryListener smram_listener;
+static AddressSpace smram_address_space;
+static MemoryRegion smram_as_root;
+static MemoryRegion smram_as_mem;
+
+static void register_smram_listener(Notifier *n, void *unused)
+{
+    MemoryRegion *smram =
+        (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
+
+    /* Outer container... */
+    memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
+    memory_region_set_enabled(&smram_as_root, true);
+
+    /* ... with two regions inside: normal system memory with low
+     * priority, and...
+     */
+    memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
+                             get_system_memory(), 0, ~0ull);
+    memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
+    memory_region_set_enabled(&smram_as_mem, true);
+
+    if (smram) {
+        /* ... SMRAM with higher priority */
+        memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
+        memory_region_set_enabled(smram, true);
+    }
+
+    address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
+    kvm_memory_listener_register(kvm_state, &smram_listener,
+                                 &smram_address_space, 1);
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+    uint64_t identity_base = 0xfffbc000;
+    uint64_t shadow_mem;
+    int ret;
+    struct utsname utsname;
+
+#ifdef KVM_CAP_XSAVE
+    has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
+#endif
+
+#ifdef KVM_CAP_XCRS
+    has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
+#endif
+
+#ifdef KVM_CAP_PIT_STATE2
+    has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
+#endif
+
+    ret = kvm_get_supported_msrs(s);
+    if (ret < 0) {
+        return ret;
+    }
+
+    uname(&utsname);
+    lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
+
+    /*
+     * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
+     * In order to use vm86 mode, an EPT identity map and a TSS  are needed.
+     * Since these must be part of guest physical memory, we need to allocate
+     * them, both by setting their start addresses in the kernel and by
+     * creating a corresponding e820 entry. We need 4 pages before the BIOS.
+     *
+     * Older KVM versions may not support setting the identity map base. In
+     * that case we need to stick with the default, i.e. a 256K maximum BIOS
+     * size.
+     */
+    if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
+        /* Allows up to 16M BIOSes. */
+        identity_base = 0xfeffc000;
+
+        ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    /* Set TSS base one page after EPT identity map. */
+    ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
+    if (ret < 0) {
+        return ret;
+    }
+
+    /* Tell fw_cfg to notify the BIOS to reserve the range. */
+    ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
+    if (ret < 0) {
+        fprintf(stderr, "e820_add_entry() table is full\n");
+        return ret;
+    }
+    qemu_register_reset(kvm_unpoison_all, NULL);
+
+    shadow_mem = machine_kvm_shadow_mem(ms);
+    if (shadow_mem != -1) {
+        shadow_mem /= 4096;
+        ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
+        smram_machine_done.notify = register_smram_listener;
+        qemu_add_machine_init_done_notifier(&smram_machine_done);
+    }
+    return 0;
+}
+
+static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
+{
+    lhs->selector = rhs->selector;
+    lhs->base = rhs->base;
+    lhs->limit = rhs->limit;
+    lhs->type = 3;
+    lhs->present = 1;
+    lhs->dpl = 3;
+    lhs->db = 0;
+    lhs->s = 1;
+    lhs->l = 0;
+    lhs->g = 0;
+    lhs->avl = 0;
+    lhs->unusable = 0;
+}
+
+static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
+{
+    unsigned flags = rhs->flags;
+    lhs->selector = rhs->selector;
+    lhs->base = rhs->base;
+    lhs->limit = rhs->limit;
+    lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
+    lhs->present = (flags & DESC_P_MASK) != 0;
+    lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
+    lhs->db = (flags >> DESC_B_SHIFT) & 1;
+    lhs->s = (flags & DESC_S_MASK) != 0;
+    lhs->l = (flags >> DESC_L_SHIFT) & 1;
+    lhs->g = (flags & DESC_G_MASK) != 0;
+    lhs->avl = (flags & DESC_AVL_MASK) != 0;
+    lhs->unusable = !lhs->present;
+    lhs->padding = 0;
+}
+
+static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
+{
+    lhs->selector = rhs->selector;
+    lhs->base = rhs->base;
+    lhs->limit = rhs->limit;
+    if (rhs->unusable) {
+        lhs->flags = 0;
+    } else {
+        lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
+                     (rhs->present * DESC_P_MASK) |
+                     (rhs->dpl << DESC_DPL_SHIFT) |
+                     (rhs->db << DESC_B_SHIFT) |
+                     (rhs->s * DESC_S_MASK) |
+                     (rhs->l << DESC_L_SHIFT) |
+                     (rhs->g * DESC_G_MASK) |
+                     (rhs->avl * DESC_AVL_MASK);
+    }
+}
+
+static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
+{
+    if (set) {
+        *kvm_reg = *qemu_reg;
+    } else {
+        *qemu_reg = *kvm_reg;
+    }
+}
+
+static int kvm_getput_regs(X86CPU *cpu, int set)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_regs regs;
+    int ret = 0;
+
+    if (!set) {
+        ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
+    kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
+    kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
+    kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
+    kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
+    kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
+    kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
+    kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
+#ifdef TARGET_X86_64
+    kvm_getput_reg(&regs.r8, &env->regs[8], set);
+    kvm_getput_reg(&regs.r9, &env->regs[9], set);
+    kvm_getput_reg(&regs.r10, &env->regs[10], set);
+    kvm_getput_reg(&regs.r11, &env->regs[11], set);
+    kvm_getput_reg(&regs.r12, &env->regs[12], set);
+    kvm_getput_reg(&regs.r13, &env->regs[13], set);
+    kvm_getput_reg(&regs.r14, &env->regs[14], set);
+    kvm_getput_reg(&regs.r15, &env->regs[15], set);
+#endif
+
+    kvm_getput_reg(&regs.rflags, &env->eflags, set);
+    kvm_getput_reg(&regs.rip, &env->eip, set);
+
+    if (set) {
+        ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
+    }
+
+    return ret;
+}
+
+static int kvm_put_fpu(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_fpu fpu;
+    int i;
+
+    memset(&fpu, 0, sizeof fpu);
+    fpu.fsw = env->fpus & ~(7 << 11);
+    fpu.fsw |= (env->fpstt & 7) << 11;
+    fpu.fcw = env->fpuc;
+    fpu.last_opcode = env->fpop;
+    fpu.last_ip = env->fpip;
+    fpu.last_dp = env->fpdp;
+    for (i = 0; i < 8; ++i) {
+        fpu.ftwx |= (!env->fptags[i]) << i;
+    }
+    memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
+    for (i = 0; i < CPU_NB_REGS; i++) {
+        stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
+        stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
+    }
+    fpu.mxcsr = env->mxcsr;
+
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
+}
+
+#define XSAVE_FCW_FSW     0
+#define XSAVE_FTW_FOP     1
+#define XSAVE_CWD_RIP     2
+#define XSAVE_CWD_RDP     4
+#define XSAVE_MXCSR       6
+#define XSAVE_ST_SPACE    8
+#define XSAVE_XMM_SPACE   40
+#define XSAVE_XSTATE_BV   128
+#define XSAVE_YMMH_SPACE  144
+#define XSAVE_BNDREGS     240
+#define XSAVE_BNDCSR      256
+#define XSAVE_OPMASK      272
+#define XSAVE_ZMM_Hi256   288
+#define XSAVE_Hi16_ZMM    416
+#define XSAVE_PKRU        672
+
+#define XSAVE_BYTE_OFFSET(word_offset) \
+    ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
+
+#define ASSERT_OFFSET(word_offset, field) \
+    QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
+                      offsetof(X86XSaveArea, field))
+
+ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
+ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
+ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
+ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
+ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
+ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
+ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
+ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
+ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
+ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
+ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
+ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
+ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
+ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
+ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
+
+static int kvm_put_xsave(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    X86XSaveArea *xsave = env->kvm_xsave_buf;
+    uint16_t cwd, swd, twd;
+    int i;
+
+    if (!has_xsave) {
+        return kvm_put_fpu(cpu);
+    }
+
+    memset(xsave, 0, sizeof(struct kvm_xsave));
+    twd = 0;
+    swd = env->fpus & ~(7 << 11);
+    swd |= (env->fpstt & 7) << 11;
+    cwd = env->fpuc;
+    for (i = 0; i < 8; ++i) {
+        twd |= (!env->fptags[i]) << i;
+    }
+    xsave->legacy.fcw = cwd;
+    xsave->legacy.fsw = swd;
+    xsave->legacy.ftw = twd;
+    xsave->legacy.fpop = env->fpop;
+    xsave->legacy.fpip = env->fpip;
+    xsave->legacy.fpdp = env->fpdp;
+    memcpy(&xsave->legacy.fpregs, env->fpregs,
+            sizeof env->fpregs);
+    xsave->legacy.mxcsr = env->mxcsr;
+    xsave->header.xstate_bv = env->xstate_bv;
+    memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
+            sizeof env->bnd_regs);
+    xsave->bndcsr_state.bndcsr = env->bndcs_regs;
+    memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
+            sizeof env->opmask_regs);
+
+    for (i = 0; i < CPU_NB_REGS; i++) {
+        uint8_t *xmm = xsave->legacy.xmm_regs[i];
+        uint8_t *ymmh = xsave->avx_state.ymmh[i];
+        uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
+        stq_p(xmm,     env->xmm_regs[i].ZMM_Q(0));
+        stq_p(xmm+8,   env->xmm_regs[i].ZMM_Q(1));
+        stq_p(ymmh,    env->xmm_regs[i].ZMM_Q(2));
+        stq_p(ymmh+8,  env->xmm_regs[i].ZMM_Q(3));
+        stq_p(zmmh,    env->xmm_regs[i].ZMM_Q(4));
+        stq_p(zmmh+8,  env->xmm_regs[i].ZMM_Q(5));
+        stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
+        stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
+    }
+
+#ifdef TARGET_X86_64
+    memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
+            16 * sizeof env->xmm_regs[16]);
+    memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
+#endif
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
+}
+
+static int kvm_put_xcrs(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_xcrs xcrs = {};
+
+    if (!has_xcrs) {
+        return 0;
+    }
+
+    xcrs.nr_xcrs = 1;
+    xcrs.flags = 0;
+    xcrs.xcrs[0].xcr = 0;
+    xcrs.xcrs[0].value = env->xcr0;
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
+}
+
+static int kvm_put_sregs(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_sregs sregs;
+
+    memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
+    if (env->interrupt_injected >= 0) {
+        sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
+                (uint64_t)1 << (env->interrupt_injected % 64);
+    }
+
+    if ((env->eflags & VM_MASK)) {
+        set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
+        set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
+        set_v8086_seg(&sregs.es, &env->segs[R_ES]);
+        set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
+        set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
+        set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
+    } else {
+        set_seg(&sregs.cs, &env->segs[R_CS]);
+        set_seg(&sregs.ds, &env->segs[R_DS]);
+        set_seg(&sregs.es, &env->segs[R_ES]);
+        set_seg(&sregs.fs, &env->segs[R_FS]);
+        set_seg(&sregs.gs, &env->segs[R_GS]);
+        set_seg(&sregs.ss, &env->segs[R_SS]);
+    }
+
+    set_seg(&sregs.tr, &env->tr);
+    set_seg(&sregs.ldt, &env->ldt);
+
+    sregs.idt.limit = env->idt.limit;
+    sregs.idt.base = env->idt.base;
+    memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
+    sregs.gdt.limit = env->gdt.limit;
+    sregs.gdt.base = env->gdt.base;
+    memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
+
+    sregs.cr0 = env->cr[0];
+    sregs.cr2 = env->cr[2];
+    sregs.cr3 = env->cr[3];
+    sregs.cr4 = env->cr[4];
+
+    sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
+    sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
+
+    sregs.efer = env->efer;
+
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
+}
+
+static void kvm_msr_buf_reset(X86CPU *cpu)
+{
+    memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
+}
+
+static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
+{
+    struct kvm_msrs *msrs = cpu->kvm_msr_buf;
+    void *limit = ((void *)msrs) + MSR_BUF_SIZE;
+    struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
+
+    assert((void *)(entry + 1) <= limit);
+
+    entry->index = index;
+    entry->reserved = 0;
+    entry->data = value;
+    msrs->nmsrs++;
+}
+
+static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
+{
+    kvm_msr_buf_reset(cpu);
+    kvm_msr_entry_add(cpu, index, value);
+
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+}
+
+void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
+{
+    int ret;
+
+    ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
+    assert(ret == 1);
+}
+
+static int kvm_put_tscdeadline_msr(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    int ret;
+
+    if (!has_msr_tsc_deadline) {
+        return 0;
+    }
+
+    ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
+    if (ret < 0) {
+        return ret;
+    }
+
+    assert(ret == 1);
+    return 0;
+}
+
+/*
+ * Provide a separate write service for the feature control MSR in order to
+ * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
+ * before writing any other state because forcibly leaving nested mode
+ * invalidates the VCPU state.
+ */
+static int kvm_put_msr_feature_control(X86CPU *cpu)
+{
+    int ret;
+
+    if (!has_msr_feature_control) {
+        return 0;
+    }
+
+    ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
+                          cpu->env.msr_ia32_feature_control);
+    if (ret < 0) {
+        return ret;
+    }
+
+    assert(ret == 1);
+    return 0;
+}
+
+static int kvm_put_msrs(X86CPU *cpu, int level)
+{
+    CPUX86State *env = &cpu->env;
+    int i;
+    int ret;
+
+    kvm_msr_buf_reset(cpu);
+
+    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+    kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
+    if (has_msr_star) {
+        kvm_msr_entry_add(cpu, MSR_STAR, env->star);
+    }
+    if (has_msr_hsave_pa) {
+        kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
+    }
+    if (has_msr_tsc_aux) {
+        kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
+    }
+    if (has_msr_tsc_adjust) {
+        kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
+    }
+    if (has_msr_misc_enable) {
+        kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
+                          env->msr_ia32_misc_enable);
+    }
+    if (has_msr_smbase) {
+        kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
+    }
+    if (has_msr_bndcfgs) {
+        kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
+    }
+    if (has_msr_xss) {
+        kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
+    }
+#ifdef TARGET_X86_64
+    if (lm_capable_kernel) {
+        kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
+        kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
+        kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
+        kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
+    }
+#endif
+    /*
+     * The following MSRs have side effects on the guest or are too heavy
+     * for normal writeback. Limit them to reset or full state updates.
+     */
+    if (level >= KVM_PUT_RESET_STATE) {
+        kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
+        kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
+        kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
+        if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
+            kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
+        }
+        if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
+            kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
+        }
+        if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
+            kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
+        }
+        if (has_msr_architectural_pmu) {
+            /* Stop the counter.  */
+            kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+            kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
+            /* Set the counter values.  */
+            for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+                kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
+                                  env->msr_fixed_counters[i]);
+            }
+            for (i = 0; i < num_architectural_pmu_counters; i++) {
+                kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
+                                  env->msr_gp_counters[i]);
+                kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
+                                  env->msr_gp_evtsel[i]);
+            }
+            kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
+                              env->msr_global_status);
+            kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
+                              env->msr_global_ovf_ctrl);
+
+            /* Now start the PMU.  */
+            kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
+                              env->msr_fixed_ctr_ctrl);
+            kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
+                              env->msr_global_ctrl);
+        }
+        if (has_msr_hv_hypercall) {
+            kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
+                              env->msr_hv_guest_os_id);
+            kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
+                              env->msr_hv_hypercall);
+        }
+        if (cpu->hyperv_vapic) {
+            kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
+                              env->msr_hv_vapic);
+        }
+        if (cpu->hyperv_time) {
+            kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
+        }
+        if (has_msr_hv_crash) {
+            int j;
+
+            for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
+                kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
+                                  env->msr_hv_crash_params[j]);
+
+            kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
+                              HV_X64_MSR_CRASH_CTL_NOTIFY);
+        }
+        if (has_msr_hv_runtime) {
+            kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
+        }
+        if (cpu->hyperv_synic) {
+            int j;
+
+            kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
+                              env->msr_hv_synic_control);
+            kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
+                              env->msr_hv_synic_version);
+            kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
+                              env->msr_hv_synic_evt_page);
+            kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
+                              env->msr_hv_synic_msg_page);
+
+            for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+                kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
+                                  env->msr_hv_synic_sint[j]);
+            }
+        }
+        if (has_msr_hv_stimer) {
+            int j;
+
+            for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
+                kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
+                                env->msr_hv_stimer_config[j]);
+            }
+
+            for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
+                kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
+                                env->msr_hv_stimer_count[j]);
+            }
+        }
+        if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+            uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
+
+            kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
+            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
+            for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+                /* The CPU GPs if we write to a bit above the physical limit of
+                 * the host CPU (and KVM emulates that)
+                 */
+                uint64_t mask = env->mtrr_var[i].mask;
+                mask &= phys_mask;
+
+                kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
+                                  env->mtrr_var[i].base);
+                kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
+            }
+        }
+
+        /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
+         *       kvm_put_msr_feature_control. */
+    }
+    if (env->mcg_cap) {
+        int i;
+
+        kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
+        kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
+        if (has_msr_mcg_ext_ctl) {
+            kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
+        }
+        for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
+            kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
+        }
+    }
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+    if (ret < 0) {
+        return ret;
+    }
+
+    assert(ret == cpu->kvm_msr_buf->nmsrs);
+    return 0;
+}
+
+
+static int kvm_get_fpu(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_fpu fpu;
+    int i, ret;
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
+    if (ret < 0) {
+        return ret;
+    }
+
+    env->fpstt = (fpu.fsw >> 11) & 7;
+    env->fpus = fpu.fsw;
+    env->fpuc = fpu.fcw;
+    env->fpop = fpu.last_opcode;
+    env->fpip = fpu.last_ip;
+    env->fpdp = fpu.last_dp;
+    for (i = 0; i < 8; ++i) {
+        env->fptags[i] = !((fpu.ftwx >> i) & 1);
+    }
+    memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
+    for (i = 0; i < CPU_NB_REGS; i++) {
+        env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
+        env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
+    }
+    env->mxcsr = fpu.mxcsr;
+
+    return 0;
+}
+
+static int kvm_get_xsave(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    X86XSaveArea *xsave = env->kvm_xsave_buf;
+    int ret, i;
+    uint16_t cwd, swd, twd;
+
+    if (!has_xsave) {
+        return kvm_get_fpu(cpu);
+    }
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
+    if (ret < 0) {
+        return ret;
+    }
+
+    cwd = xsave->legacy.fcw;
+    swd = xsave->legacy.fsw;
+    twd = xsave->legacy.ftw;
+    env->fpop = xsave->legacy.fpop;
+    env->fpstt = (swd >> 11) & 7;
+    env->fpus = swd;
+    env->fpuc = cwd;
+    for (i = 0; i < 8; ++i) {
+        env->fptags[i] = !((twd >> i) & 1);
+    }
+    env->fpip = xsave->legacy.fpip;
+    env->fpdp = xsave->legacy.fpdp;
+    env->mxcsr = xsave->legacy.mxcsr;
+    memcpy(env->fpregs, &xsave->legacy.fpregs,
+            sizeof env->fpregs);
+    env->xstate_bv = xsave->header.xstate_bv;
+    memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
+            sizeof env->bnd_regs);
+    env->bndcs_regs = xsave->bndcsr_state.bndcsr;
+    memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
+            sizeof env->opmask_regs);
+
+    for (i = 0; i < CPU_NB_REGS; i++) {
+        uint8_t *xmm = xsave->legacy.xmm_regs[i];
+        uint8_t *ymmh = xsave->avx_state.ymmh[i];
+        uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
+        env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
+        env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
+        env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
+        env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
+        env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
+        env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
+        env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
+        env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
+    }
+
+#ifdef TARGET_X86_64
+    memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
+           16 * sizeof env->xmm_regs[16]);
+    memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
+#endif
+    return 0;
+}
+
+static int kvm_get_xcrs(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    int i, ret;
+    struct kvm_xcrs xcrs;
+
+    if (!has_xcrs) {
+        return 0;
+    }
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
+    if (ret < 0) {
+        return ret;
+    }
+
+    for (i = 0; i < xcrs.nr_xcrs; i++) {
+        /* Only support xcr0 now */
+        if (xcrs.xcrs[i].xcr == 0) {
+            env->xcr0 = xcrs.xcrs[i].value;
+            break;
+        }
+    }
+    return 0;
+}
+
+static int kvm_get_sregs(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_sregs sregs;
+    uint32_t hflags;
+    int bit, i, ret;
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+    if (ret < 0) {
+        return ret;
+    }
+
+    /* There can only be one pending IRQ set in the bitmap at a time, so try
+       to find it and save its number instead (-1 for none). */
+    env->interrupt_injected = -1;
+    for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
+        if (sregs.interrupt_bitmap[i]) {
+            bit = ctz64(sregs.interrupt_bitmap[i]);
+            env->interrupt_injected = i * 64 + bit;
+            break;
+        }
+    }
+
+    get_seg(&env->segs[R_CS], &sregs.cs);
+    get_seg(&env->segs[R_DS], &sregs.ds);
+    get_seg(&env->segs[R_ES], &sregs.es);
+    get_seg(&env->segs[R_FS], &sregs.fs);
+    get_seg(&env->segs[R_GS], &sregs.gs);
+    get_seg(&env->segs[R_SS], &sregs.ss);
+
+    get_seg(&env->tr, &sregs.tr);
+    get_seg(&env->ldt, &sregs.ldt);
+
+    env->idt.limit = sregs.idt.limit;
+    env->idt.base = sregs.idt.base;
+    env->gdt.limit = sregs.gdt.limit;
+    env->gdt.base = sregs.gdt.base;
+
+    env->cr[0] = sregs.cr0;
+    env->cr[2] = sregs.cr2;
+    env->cr[3] = sregs.cr3;
+    env->cr[4] = sregs.cr4;
+
+    env->efer = sregs.efer;
+
+    /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
+
+#define HFLAG_COPY_MASK \
+    ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
+       HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
+       HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
+       HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
+
+    hflags = env->hflags & HFLAG_COPY_MASK;
+    hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+    hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
+    hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
+                (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
+    hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
+
+    if (env->cr[4] & CR4_OSFXSR_MASK) {
+        hflags |= HF_OSFXSR_MASK;
+    }
+
+    if (env->efer & MSR_EFER_LMA) {
+        hflags |= HF_LMA_MASK;
+    }
+
+    if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
+        hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+    } else {
+        hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
+                    (DESC_B_SHIFT - HF_CS32_SHIFT);
+        hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
+                    (DESC_B_SHIFT - HF_SS32_SHIFT);
+        if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
+            !(hflags & HF_CS32_MASK)) {
+            hflags |= HF_ADDSEG_MASK;
+        } else {
+            hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
+                        env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
+        }
+    }
+    env->hflags = hflags;
+
+    return 0;
+}
+
+static int kvm_get_msrs(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
+    int ret, i;
+    uint64_t mtrr_top_bits;
+
+    kvm_msr_buf_reset(cpu);
+
+    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
+    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
+    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
+    kvm_msr_entry_add(cpu, MSR_PAT, 0);
+    if (has_msr_star) {
+        kvm_msr_entry_add(cpu, MSR_STAR, 0);
+    }
+    if (has_msr_hsave_pa) {
+        kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
+    }
+    if (has_msr_tsc_aux) {
+        kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
+    }
+    if (has_msr_tsc_adjust) {
+        kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
+    }
+    if (has_msr_tsc_deadline) {
+        kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
+    }
+    if (has_msr_misc_enable) {
+        kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
+    }
+    if (has_msr_smbase) {
+        kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
+    }
+    if (has_msr_feature_control) {
+        kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
+    }
+    if (has_msr_bndcfgs) {
+        kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
+    }
+    if (has_msr_xss) {
+        kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
+    }
+
+
+    if (!env->tsc_valid) {
+        kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
+        env->tsc_valid = !runstate_is_running();
+    }
+
+#ifdef TARGET_X86_64
+    if (lm_capable_kernel) {
+        kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
+        kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
+        kvm_msr_entry_add(cpu, MSR_FMASK, 0);
+        kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
+    }
+#endif
+    kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
+    kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
+    if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
+        kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
+    }
+    if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
+        kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
+    }
+    if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
+        kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
+    }
+    if (has_msr_architectural_pmu) {
+        kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+        kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+        kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
+        kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
+        for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+            kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
+        }
+        for (i = 0; i < num_architectural_pmu_counters; i++) {
+            kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
+            kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
+        }
+    }
+
+    if (env->mcg_cap) {
+        kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
+        kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
+        if (has_msr_mcg_ext_ctl) {
+            kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
+        }
+        for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
+            kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
+        }
+    }
+
+    if (has_msr_hv_hypercall) {
+        kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
+        kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
+    }
+    if (cpu->hyperv_vapic) {
+        kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
+    }
+    if (cpu->hyperv_time) {
+        kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
+    }
+    if (has_msr_hv_crash) {
+        int j;
+
+        for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
+            kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
+        }
+    }
+    if (has_msr_hv_runtime) {
+        kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
+    }
+    if (cpu->hyperv_synic) {
+        uint32_t msr;
+
+        kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
+        kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
+        kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
+        kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
+        for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
+            kvm_msr_entry_add(cpu, msr, 0);
+        }
+    }
+    if (has_msr_hv_stimer) {
+        uint32_t msr;
+
+        for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
+             msr++) {
+            kvm_msr_entry_add(cpu, msr, 0);
+        }
+    }
+    if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+        kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
+        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
+        for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+            kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
+            kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
+        }
+    }
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
+    if (ret < 0) {
+        return ret;
+    }
+
+    assert(ret == cpu->kvm_msr_buf->nmsrs);
+    /*
+     * MTRR masks: Each mask consists of 5 parts
+     * a  10..0: must be zero
+     * b  11   : valid bit
+     * c n-1.12: actual mask bits
+     * d  51..n: reserved must be zero
+     * e  63.52: reserved must be zero
+     *
+     * 'n' is the number of physical bits supported by the CPU and is
+     * apparently always <= 52.   We know our 'n' but don't know what
+     * the destinations 'n' is; it might be smaller, in which case
+     * it masks (c) on loading. It might be larger, in which case
+     * we fill 'd' so that d..c is consistent irrespetive of the 'n'
+     * we're migrating to.
+     */
+
+    if (cpu->fill_mtrr_mask) {
+        QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
+        assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
+        mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
+    } else {
+        mtrr_top_bits = 0;
+    }
+
+    for (i = 0; i < ret; i++) {
+        uint32_t index = msrs[i].index;
+        switch (index) {
+        case MSR_IA32_SYSENTER_CS:
+            env->sysenter_cs = msrs[i].data;
+            break;
+        case MSR_IA32_SYSENTER_ESP:
+            env->sysenter_esp = msrs[i].data;
+            break;
+        case MSR_IA32_SYSENTER_EIP:
+            env->sysenter_eip = msrs[i].data;
+            break;
+        case MSR_PAT:
+            env->pat = msrs[i].data;
+            break;
+        case MSR_STAR:
+            env->star = msrs[i].data;
+            break;
+#ifdef TARGET_X86_64
+        case MSR_CSTAR:
+            env->cstar = msrs[i].data;
+            break;
+        case MSR_KERNELGSBASE:
+            env->kernelgsbase = msrs[i].data;
+            break;
+        case MSR_FMASK:
+            env->fmask = msrs[i].data;
+            break;
+        case MSR_LSTAR:
+            env->lstar = msrs[i].data;
+            break;
+#endif
+        case MSR_IA32_TSC:
+            env->tsc = msrs[i].data;
+            break;
+        case MSR_TSC_AUX:
+            env->tsc_aux = msrs[i].data;
+            break;
+        case MSR_TSC_ADJUST:
+            env->tsc_adjust = msrs[i].data;
+            break;
+        case MSR_IA32_TSCDEADLINE:
+            env->tsc_deadline = msrs[i].data;
+            break;
+        case MSR_VM_HSAVE_PA:
+            env->vm_hsave = msrs[i].data;
+            break;
+        case MSR_KVM_SYSTEM_TIME:
+            env->system_time_msr = msrs[i].data;
+            break;
+        case MSR_KVM_WALL_CLOCK:
+            env->wall_clock_msr = msrs[i].data;
+            break;
+        case MSR_MCG_STATUS:
+            env->mcg_status = msrs[i].data;
+            break;
+        case MSR_MCG_CTL:
+            env->mcg_ctl = msrs[i].data;
+            break;
+        case MSR_MCG_EXT_CTL:
+            env->mcg_ext_ctl = msrs[i].data;
+            break;
+        case MSR_IA32_MISC_ENABLE:
+            env->msr_ia32_misc_enable = msrs[i].data;
+            break;
+        case MSR_IA32_SMBASE:
+            env->smbase = msrs[i].data;
+            break;
+        case MSR_IA32_FEATURE_CONTROL:
+            env->msr_ia32_feature_control = msrs[i].data;
+            break;
+        case MSR_IA32_BNDCFGS:
+            env->msr_bndcfgs = msrs[i].data;
+            break;
+        case MSR_IA32_XSS:
+            env->xss = msrs[i].data;
+            break;
+        default:
+            if (msrs[i].index >= MSR_MC0_CTL &&
+                msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
+                env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
+            }
+            break;
+        case MSR_KVM_ASYNC_PF_EN:
+            env->async_pf_en_msr = msrs[i].data;
+            break;
+        case MSR_KVM_PV_EOI_EN:
+            env->pv_eoi_en_msr = msrs[i].data;
+            break;
+        case MSR_KVM_STEAL_TIME:
+            env->steal_time_msr = msrs[i].data;
+            break;
+        case MSR_CORE_PERF_FIXED_CTR_CTRL:
+            env->msr_fixed_ctr_ctrl = msrs[i].data;
+            break;
+        case MSR_CORE_PERF_GLOBAL_CTRL:
+            env->msr_global_ctrl = msrs[i].data;
+            break;
+        case MSR_CORE_PERF_GLOBAL_STATUS:
+            env->msr_global_status = msrs[i].data;
+            break;
+        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+            env->msr_global_ovf_ctrl = msrs[i].data;
+            break;
+        case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
+            env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
+            break;
+        case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
+            env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
+            break;
+        case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
+            env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
+            break;
+        case HV_X64_MSR_HYPERCALL:
+            env->msr_hv_hypercall = msrs[i].data;
+            break;
+        case HV_X64_MSR_GUEST_OS_ID:
+            env->msr_hv_guest_os_id = msrs[i].data;
+            break;
+        case HV_X64_MSR_APIC_ASSIST_PAGE:
+            env->msr_hv_vapic = msrs[i].data;
+            break;
+        case HV_X64_MSR_REFERENCE_TSC:
+            env->msr_hv_tsc = msrs[i].data;
+            break;
+        case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+            env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
+            break;
+        case HV_X64_MSR_VP_RUNTIME:
+            env->msr_hv_runtime = msrs[i].data;
+            break;
+        case HV_X64_MSR_SCONTROL:
+            env->msr_hv_synic_control = msrs[i].data;
+            break;
+        case HV_X64_MSR_SVERSION:
+            env->msr_hv_synic_version = msrs[i].data;
+            break;
+        case HV_X64_MSR_SIEFP:
+            env->msr_hv_synic_evt_page = msrs[i].data;
+            break;
+        case HV_X64_MSR_SIMP:
+            env->msr_hv_synic_msg_page = msrs[i].data;
+            break;
+        case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+            env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
+            break;
+        case HV_X64_MSR_STIMER0_CONFIG:
+        case HV_X64_MSR_STIMER1_CONFIG:
+        case HV_X64_MSR_STIMER2_CONFIG:
+        case HV_X64_MSR_STIMER3_CONFIG:
+            env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
+                                msrs[i].data;
+            break;
+        case HV_X64_MSR_STIMER0_COUNT:
+        case HV_X64_MSR_STIMER1_COUNT:
+        case HV_X64_MSR_STIMER2_COUNT:
+        case HV_X64_MSR_STIMER3_COUNT:
+            env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
+                                msrs[i].data;
+            break;
+        case MSR_MTRRdefType:
+            env->mtrr_deftype = msrs[i].data;
+            break;
+        case MSR_MTRRfix64K_00000:
+            env->mtrr_fixed[0] = msrs[i].data;
+            break;
+        case MSR_MTRRfix16K_80000:
+            env->mtrr_fixed[1] = msrs[i].data;
+            break;
+        case MSR_MTRRfix16K_A0000:
+            env->mtrr_fixed[2] = msrs[i].data;
+            break;
+        case MSR_MTRRfix4K_C0000:
+            env->mtrr_fixed[3] = msrs[i].data;
+            break;
+        case MSR_MTRRfix4K_C8000:
+            env->mtrr_fixed[4] = msrs[i].data;
+            break;
+        case MSR_MTRRfix4K_D0000:
+            env->mtrr_fixed[5] = msrs[i].data;
+            break;
+        case MSR_MTRRfix4K_D8000:
+            env->mtrr_fixed[6] = msrs[i].data;
+            break;
+        case MSR_MTRRfix4K_E0000:
+            env->mtrr_fixed[7] = msrs[i].data;
+            break;
+        case MSR_MTRRfix4K_E8000:
+            env->mtrr_fixed[8] = msrs[i].data;
+            break;
+        case MSR_MTRRfix4K_F0000:
+            env->mtrr_fixed[9] = msrs[i].data;
+            break;
+        case MSR_MTRRfix4K_F8000:
+            env->mtrr_fixed[10] = msrs[i].data;
+            break;
+        case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
+            if (index & 1) {
+                env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
+                                                               mtrr_top_bits;
+            } else {
+                env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
+            }
+            break;
+        }
+    }
+
+    return 0;
+}
+
+static int kvm_put_mp_state(X86CPU *cpu)
+{
+    struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
+
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+}
+
+static int kvm_get_mp_state(X86CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+    struct kvm_mp_state mp_state;
+    int ret;
+
+    ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
+    if (ret < 0) {
+        return ret;
+    }
+    env->mp_state = mp_state.mp_state;
+    if (kvm_irqchip_in_kernel()) {
+        cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
+    }
+    return 0;
+}
+
+static int kvm_get_apic(X86CPU *cpu)
+{
+    DeviceState *apic = cpu->apic_state;
+    struct kvm_lapic_state kapic;
+    int ret;
+
+    if (apic && kvm_irqchip_in_kernel()) {
+        ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
+        if (ret < 0) {
+            return ret;
+        }
+
+        kvm_get_apic_state(apic, &kapic);
+    }
+    return 0;
+}
+
+static int kvm_put_vcpu_events(X86CPU *cpu, int level)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+    struct kvm_vcpu_events events = {};
+
+    if (!kvm_has_vcpu_events()) {
+        return 0;
+    }
+
+    events.exception.injected = (env->exception_injected >= 0);
+    events.exception.nr = env->exception_injected;
+    events.exception.has_error_code = env->has_error_code;
+    events.exception.error_code = env->error_code;
+    events.exception.pad = 0;
+
+    events.interrupt.injected = (env->interrupt_injected >= 0);
+    events.interrupt.nr = env->interrupt_injected;
+    events.interrupt.soft = env->soft_interrupt;
+
+    events.nmi.injected = env->nmi_injected;
+    events.nmi.pending = env->nmi_pending;
+    events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
+    events.nmi.pad = 0;
+
+    events.sipi_vector = env->sipi_vector;
+    events.flags = 0;
+
+    if (has_msr_smbase) {
+        events.smi.smm = !!(env->hflags & HF_SMM_MASK);
+        events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
+        if (kvm_irqchip_in_kernel()) {
+            /* As soon as these are moved to the kernel, remove them
+             * from cs->interrupt_request.
+             */
+            events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
+            events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
+            cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
+        } else {
+            /* Keep these in cs->interrupt_request.  */
+            events.smi.pending = 0;
+            events.smi.latched_init = 0;
+        }
+        events.flags |= KVM_VCPUEVENT_VALID_SMM;
+    }
+
+    if (level >= KVM_PUT_RESET_STATE) {
+        events.flags |=
+            KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
+    }
+
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+}
+
+static int kvm_get_vcpu_events(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_vcpu_events events;
+    int ret;
+
+    if (!kvm_has_vcpu_events()) {
+        return 0;
+    }
+
+    memset(&events, 0, sizeof(events));
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
+    if (ret < 0) {
+       return ret;
+    }
+    env->exception_injected =
+       events.exception.injected ? events.exception.nr : -1;
+    env->has_error_code = events.exception.has_error_code;
+    env->error_code = events.exception.error_code;
+
+    env->interrupt_injected =
+        events.interrupt.injected ? events.interrupt.nr : -1;
+    env->soft_interrupt = events.interrupt.soft;
+
+    env->nmi_injected = events.nmi.injected;
+    env->nmi_pending = events.nmi.pending;
+    if (events.nmi.masked) {
+        env->hflags2 |= HF2_NMI_MASK;
+    } else {
+        env->hflags2 &= ~HF2_NMI_MASK;
+    }
+
+    if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
+        if (events.smi.smm) {
+            env->hflags |= HF_SMM_MASK;
+        } else {
+            env->hflags &= ~HF_SMM_MASK;
+        }
+        if (events.smi.pending) {
+            cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
+        } else {
+            cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
+        }
+        if (events.smi.smm_inside_nmi) {
+            env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
+        } else {
+            env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
+        }
+        if (events.smi.latched_init) {
+            cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
+        } else {
+            cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
+        }
+    }
+
+    env->sipi_vector = events.sipi_vector;
+
+    return 0;
+}
+
+static int kvm_guest_debug_workarounds(X86CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+    int ret = 0;
+    unsigned long reinject_trap = 0;
+
+    if (!kvm_has_vcpu_events()) {
+        if (env->exception_injected == 1) {
+            reinject_trap = KVM_GUESTDBG_INJECT_DB;
+        } else if (env->exception_injected == 3) {
+            reinject_trap = KVM_GUESTDBG_INJECT_BP;
+        }
+        env->exception_injected = -1;
+    }
+
+    /*
+     * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
+     * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
+     * by updating the debug state once again if single-stepping is on.
+     * Another reason to call kvm_update_guest_debug here is a pending debug
+     * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
+     * reinject them via SET_GUEST_DEBUG.
+     */
+    if (reinject_trap ||
+        (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
+        ret = kvm_update_guest_debug(cs, reinject_trap);
+    }
+    return ret;
+}
+
+static int kvm_put_debugregs(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_debugregs dbgregs;
+    int i;
+
+    if (!kvm_has_debugregs()) {
+        return 0;
+    }
+
+    for (i = 0; i < 4; i++) {
+        dbgregs.db[i] = env->dr[i];
+    }
+    dbgregs.dr6 = env->dr[6];
+    dbgregs.dr7 = env->dr[7];
+    dbgregs.flags = 0;
+
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
+}
+
+static int kvm_get_debugregs(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    struct kvm_debugregs dbgregs;
+    int i, ret;
+
+    if (!kvm_has_debugregs()) {
+        return 0;
+    }
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
+    if (ret < 0) {
+        return ret;
+    }
+    for (i = 0; i < 4; i++) {
+        env->dr[i] = dbgregs.db[i];
+    }
+    env->dr[4] = env->dr[6] = dbgregs.dr6;
+    env->dr[5] = env->dr[7] = dbgregs.dr7;
+
+    return 0;
+}
+
+int kvm_arch_put_registers(CPUState *cpu, int level)
+{
+    X86CPU *x86_cpu = X86_CPU(cpu);
+    int ret;
+
+    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+    if (level >= KVM_PUT_RESET_STATE) {
+        ret = kvm_put_msr_feature_control(x86_cpu);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    if (level == KVM_PUT_FULL_STATE) {
+        /* We don't check for kvm_arch_set_tsc_khz() errors here,
+         * because TSC frequency mismatch shouldn't abort migration,
+         * unless the user explicitly asked for a more strict TSC
+         * setting (e.g. using an explicit "tsc-freq" option).
+         */
+        kvm_arch_set_tsc_khz(cpu);
+    }
+
+    ret = kvm_getput_regs(x86_cpu, 1);
+    if (ret < 0) {
+        return ret;
+    }
+    ret = kvm_put_xsave(x86_cpu);
+    if (ret < 0) {
+        return ret;
+    }
+    ret = kvm_put_xcrs(x86_cpu);
+    if (ret < 0) {
+        return ret;
+    }
+    ret = kvm_put_sregs(x86_cpu);
+    if (ret < 0) {
+        return ret;
+    }
+    /* must be before kvm_put_msrs */
+    ret = kvm_inject_mce_oldstyle(x86_cpu);
+    if (ret < 0) {
+        return ret;
+    }
+    ret = kvm_put_msrs(x86_cpu, level);
+    if (ret < 0) {
+        return ret;
+    }
+    if (level >= KVM_PUT_RESET_STATE) {
+        ret = kvm_put_mp_state(x86_cpu);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    ret = kvm_put_tscdeadline_msr(x86_cpu);
+    if (ret < 0) {
+        return ret;
+    }
+
+    ret = kvm_put_vcpu_events(x86_cpu, level);
+    if (ret < 0) {
+        return ret;
+    }
+    ret = kvm_put_debugregs(x86_cpu);
+    if (ret < 0) {
+        return ret;
+    }
+    /* must be last */
+    ret = kvm_guest_debug_workarounds(x86_cpu);
+    if (ret < 0) {
+        return ret;
+    }
+    return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    int ret;
+
+    assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
+
+    ret = kvm_getput_regs(cpu, 0);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = kvm_get_xsave(cpu);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = kvm_get_xcrs(cpu);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = kvm_get_sregs(cpu);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = kvm_get_msrs(cpu);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = kvm_get_mp_state(cpu);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = kvm_get_apic(cpu);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = kvm_get_vcpu_events(cpu);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = kvm_get_debugregs(cpu);
+    if (ret < 0) {
+        goto out;
+    }
+    ret = 0;
+ out:
+    cpu_sync_bndcs_hflags(&cpu->env);
+    return ret;
+}
+
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
+{
+    X86CPU *x86_cpu = X86_CPU(cpu);
+    CPUX86State *env = &x86_cpu->env;
+    int ret;
+
+    /* Inject NMI */
+    if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+        if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+            qemu_mutex_lock_iothread();
+            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+            qemu_mutex_unlock_iothread();
+            DPRINTF("injected NMI\n");
+            ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
+            if (ret < 0) {
+                fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
+                        strerror(-ret));
+            }
+        }
+        if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+            qemu_mutex_lock_iothread();
+            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+            qemu_mutex_unlock_iothread();
+            DPRINTF("injected SMI\n");
+            ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
+            if (ret < 0) {
+                fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
+                        strerror(-ret));
+            }
+        }
+    }
+
+    if (!kvm_pic_in_kernel()) {
+        qemu_mutex_lock_iothread();
+    }
+
+    /* Force the VCPU out of its inner loop to process any INIT requests
+     * or (for userspace APIC, but it is cheap to combine the checks here)
+     * pending TPR access reports.
+     */
+    if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+        if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+            !(env->hflags & HF_SMM_MASK)) {
+            cpu->exit_request = 1;
+        }
+        if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+            cpu->exit_request = 1;
+        }
+    }
+
+    if (!kvm_pic_in_kernel()) {
+        /* Try to inject an interrupt if the guest can accept it */
+        if (run->ready_for_interrupt_injection &&
+            (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+            (env->eflags & IF_MASK)) {
+            int irq;
+
+            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+            irq = cpu_get_pic_interrupt(env);
+            if (irq >= 0) {
+                struct kvm_interrupt intr;
+
+                intr.irq = irq;
+                DPRINTF("injected interrupt %d\n", irq);
+                ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
+                if (ret < 0) {
+                    fprintf(stderr,
+                            "KVM: injection failed, interrupt lost (%s)\n",
+                            strerror(-ret));
+                }
+            }
+        }
+
+        /* If we have an interrupt but the guest is not ready to receive an
+         * interrupt, request an interrupt window exit.  This will
+         * cause a return to userspace as soon as the guest is ready to
+         * receive interrupts. */
+        if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+            run->request_interrupt_window = 1;
+        } else {
+            run->request_interrupt_window = 0;
+        }
+
+        DPRINTF("setting tpr\n");
+        run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
+
+        qemu_mutex_unlock_iothread();
+    }
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
+{
+    X86CPU *x86_cpu = X86_CPU(cpu);
+    CPUX86State *env = &x86_cpu->env;
+
+    if (run->flags & KVM_RUN_X86_SMM) {
+        env->hflags |= HF_SMM_MASK;
+    } else {
+        env->hflags &= ~HF_SMM_MASK;
+    }
+    if (run->if_flag) {
+        env->eflags |= IF_MASK;
+    } else {
+        env->eflags &= ~IF_MASK;
+    }
+
+    /* We need to protect the apic state against concurrent accesses from
+     * different threads in case the userspace irqchip is used. */
+    if (!kvm_irqchip_in_kernel()) {
+        qemu_mutex_lock_iothread();
+    }
+    cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
+    cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
+    if (!kvm_irqchip_in_kernel()) {
+        qemu_mutex_unlock_iothread();
+    }
+    return cpu_get_mem_attrs(env);
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
+        /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
+        assert(env->mcg_cap);
+
+        cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+
+        kvm_cpu_synchronize_state(cs);
+
+        if (env->exception_injected == EXCP08_DBLE) {
+            /* this means triple fault */
+            qemu_system_reset_request();
+            cs->exit_request = 1;
+            return 0;
+        }
+        env->exception_injected = EXCP12_MCHK;
+        env->has_error_code = 0;
+
+        cs->halted = 0;
+        if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
+            env->mp_state = KVM_MP_STATE_RUNNABLE;
+        }
+    }
+
+    if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
+        !(env->hflags & HF_SMM_MASK)) {
+        kvm_cpu_synchronize_state(cs);
+        do_cpu_init(cpu);
+    }
+
+    if (kvm_irqchip_in_kernel()) {
+        return 0;
+    }
+
+    if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+        cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+        apic_poll_irq(cpu->apic_state);
+    }
+    if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+         (env->eflags & IF_MASK)) ||
+        (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+        cs->halted = 0;
+    }
+    if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
+        kvm_cpu_synchronize_state(cs);
+        do_cpu_sipi(cpu);
+    }
+    if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+        cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
+        kvm_cpu_synchronize_state(cs);
+        apic_handle_tpr_access_report(cpu->apic_state, env->eip,
+                                      env->tpr_access_type);
+    }
+
+    return cs->halted;
+}
+
+static int kvm_handle_halt(X86CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+
+    if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+          (env->eflags & IF_MASK)) &&
+        !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+        cs->halted = 1;
+        return EXCP_HLT;
+    }
+
+    return 0;
+}
+
+static int kvm_handle_tpr_access(X86CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    struct kvm_run *run = cs->kvm_run;
+
+    apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
+                                  run->tpr_access.is_write ? TPR_ACCESS_WRITE
+                                                           : TPR_ACCESS_READ);
+    return 1;
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+    static const uint8_t int3 = 0xcc;
+
+    if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
+        cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
+        return -EINVAL;
+    }
+    return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+    uint8_t int3;
+
+    if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
+        cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
+        return -EINVAL;
+    }
+    return 0;
+}
+
+static struct {
+    target_ulong addr;
+    int len;
+    int type;
+} hw_breakpoint[4];
+
+static int nb_hw_breakpoint;
+
+static int find_hw_breakpoint(target_ulong addr, int len, int type)
+{
+    int n;
+
+    for (n = 0; n < nb_hw_breakpoint; n++) {
+        if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
+            (hw_breakpoint[n].len == len || len == -1)) {
+            return n;
+        }
+    }
+    return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+                                  target_ulong len, int type)
+{
+    switch (type) {
+    case GDB_BREAKPOINT_HW:
+        len = 1;
+        break;
+    case GDB_WATCHPOINT_WRITE:
+    case GDB_WATCHPOINT_ACCESS:
+        switch (len) {
+        case 1:
+            break;
+        case 2:
+        case 4:
+        case 8:
+            if (addr & (len - 1)) {
+                return -EINVAL;
+            }
+            break;
+        default:
+            return -EINVAL;
+        }
+        break;
+    default:
+        return -ENOSYS;
+    }
+
+    if (nb_hw_breakpoint == 4) {
+        return -ENOBUFS;
+    }
+    if (find_hw_breakpoint(addr, len, type) >= 0) {
+        return -EEXIST;
+    }
+    hw_breakpoint[nb_hw_breakpoint].addr = addr;
+    hw_breakpoint[nb_hw_breakpoint].len = len;
+    hw_breakpoint[nb_hw_breakpoint].type = type;
+    nb_hw_breakpoint++;
+
+    return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+                                  target_ulong len, int type)
+{
+    int n;
+
+    n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
+    if (n < 0) {
+        return -ENOENT;
+    }
+    nb_hw_breakpoint--;
+    hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
+
+    return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+    nb_hw_breakpoint = 0;
+}
+
+static CPUWatchpoint hw_watchpoint;
+
+static int kvm_handle_debug(X86CPU *cpu,
+                            struct kvm_debug_exit_arch *arch_info)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+    int ret = 0;
+    int n;
+
+    if (arch_info->exception == 1) {
+        if (arch_info->dr6 & (1 << 14)) {
+            if (cs->singlestep_enabled) {
+                ret = EXCP_DEBUG;
+            }
+        } else {
+            for (n = 0; n < 4; n++) {
+                if (arch_info->dr6 & (1 << n)) {
+                    switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
+                    case 0x0:
+                        ret = EXCP_DEBUG;
+                        break;
+                    case 0x1:
+                        ret = EXCP_DEBUG;
+                        cs->watchpoint_hit = &hw_watchpoint;
+                        hw_watchpoint.vaddr = hw_breakpoint[n].addr;
+                        hw_watchpoint.flags = BP_MEM_WRITE;
+                        break;
+                    case 0x3:
+                        ret = EXCP_DEBUG;
+                        cs->watchpoint_hit = &hw_watchpoint;
+                        hw_watchpoint.vaddr = hw_breakpoint[n].addr;
+                        hw_watchpoint.flags = BP_MEM_ACCESS;
+                        break;
+                    }
+                }
+            }
+        }
+    } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
+        ret = EXCP_DEBUG;
+    }
+    if (ret == 0) {
+        cpu_synchronize_state(cs);
+        assert(env->exception_injected == -1);
+
+        /* pass to guest */
+        env->exception_injected = arch_info->exception;
+        env->has_error_code = 0;
+    }
+
+    return ret;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+    const uint8_t type_code[] = {
+        [GDB_BREAKPOINT_HW] = 0x0,
+        [GDB_WATCHPOINT_WRITE] = 0x1,
+        [GDB_WATCHPOINT_ACCESS] = 0x3
+    };
+    const uint8_t len_code[] = {
+        [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
+    };
+    int n;
+
+    if (kvm_sw_breakpoints_active(cpu)) {
+        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+    }
+    if (nb_hw_breakpoint > 0) {
+        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+        dbg->arch.debugreg[7] = 0x0600;
+        for (n = 0; n < nb_hw_breakpoint; n++) {
+            dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
+            dbg->arch.debugreg[7] |= (2 << (n * 2)) |
+                (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
+                ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
+        }
+    }
+}
+
+static bool host_supports_vmx(void)
+{
+    uint32_t ecx, unused;
+
+    host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
+    return ecx & CPUID_EXT_VMX;
+}
+
+#define VMX_INVALID_GUEST_STATE 0x80000021
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    uint64_t code;
+    int ret;
+
+    switch (run->exit_reason) {
+    case KVM_EXIT_HLT:
+        DPRINTF("handle_hlt\n");
+        qemu_mutex_lock_iothread();
+        ret = kvm_handle_halt(cpu);
+        qemu_mutex_unlock_iothread();
+        break;
+    case KVM_EXIT_SET_TPR:
+        ret = 0;
+        break;
+    case KVM_EXIT_TPR_ACCESS:
+        qemu_mutex_lock_iothread();
+        ret = kvm_handle_tpr_access(cpu);
+        qemu_mutex_unlock_iothread();
+        break;
+    case KVM_EXIT_FAIL_ENTRY:
+        code = run->fail_entry.hardware_entry_failure_reason;
+        fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
+                code);
+        if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
+            fprintf(stderr,
+                    "\nIf you're running a guest on an Intel machine without "
+                        "unrestricted mode\n"
+                    "support, the failure can be most likely due to the guest "
+                        "entering an invalid\n"
+                    "state for Intel VT. For example, the guest maybe running "
+                        "in big real mode\n"
+                    "which is not supported on less recent Intel processors."
+                        "\n\n");
+        }
+        ret = -1;
+        break;
+    case KVM_EXIT_EXCEPTION:
+        fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
+                run->ex.exception, run->ex.error_code);
+        ret = -1;
+        break;
+    case KVM_EXIT_DEBUG:
+        DPRINTF("kvm_exit_debug\n");
+        qemu_mutex_lock_iothread();
+        ret = kvm_handle_debug(cpu, &run->debug.arch);
+        qemu_mutex_unlock_iothread();
+        break;
+    case KVM_EXIT_HYPERV:
+        ret = kvm_hv_handle_exit(cpu, &run->hyperv);
+        break;
+    case KVM_EXIT_IOAPIC_EOI:
+        ioapic_eoi_broadcast(run->eoi.vector);
+        ret = 0;
+        break;
+    default:
+        fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
+        ret = -1;
+        break;
+    }
+
+    return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    kvm_cpu_synchronize_state(cs);
+    return !(env->cr[0] & CR0_PE_MASK) ||
+           ((env->segs[R_CS].selector  & 3) != 3);
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+    if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
+        /* If kernel can't do irq routing, interrupt source
+         * override 0->2 cannot be set up as required by HPET.
+         * So we have to disable it.
+         */
+        no_hpet = 1;
+    }
+    /* We know at this point that we're using the in-kernel
+     * irqchip, so we can use irqfds, and on x86 we know
+     * we can use msi via irqfd and GSI routing.
+     */
+    kvm_msi_via_irqfd_allowed = true;
+    kvm_gsi_routing_allowed = true;
+
+    if (kvm_irqchip_is_split()) {
+        int i;
+
+        /* If the ioapic is in QEMU and the lapics are in KVM, reserve
+           MSI routes for signaling interrupts to the local apics. */
+        for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+            if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
+                error_report("Could not enable split IRQ mode.");
+                exit(1);
+            }
+        }
+    }
+}
+
+int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
+{
+    int ret;
+    if (machine_kernel_irqchip_split(ms)) {
+        ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
+        if (ret) {
+            error_report("Could not enable split irqchip mode: %s",
+                         strerror(-ret));
+            exit(1);
+        } else {
+            DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
+            kvm_split_irqchip = true;
+            return 1;
+        }
+    } else {
+        return 0;
+    }
+}
+
+/* Classic KVM device assignment interface. Will remain x86 only. */
+int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
+                          uint32_t flags, uint32_t *dev_id)
+{
+    struct kvm_assigned_pci_dev dev_data = {
+        .segnr = dev_addr->domain,
+        .busnr = dev_addr->bus,
+        .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
+        .flags = flags,
+    };
+    int ret;
+
+    dev_data.assigned_dev_id =
+        (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
+
+    ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
+    if (ret < 0) {
+        return ret;
+    }
+
+    *dev_id = dev_data.assigned_dev_id;
+
+    return 0;
+}
+
+int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
+{
+    struct kvm_assigned_pci_dev dev_data = {
+        .assigned_dev_id = dev_id,
+    };
+
+    return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
+}
+
+static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
+                                   uint32_t irq_type, uint32_t guest_irq)
+{
+    struct kvm_assigned_irq assigned_irq = {
+        .assigned_dev_id = dev_id,
+        .guest_irq = guest_irq,
+        .flags = irq_type,
+    };
+
+    if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
+        return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
+    } else {
+        return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
+    }
+}
+
+int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
+                           uint32_t guest_irq)
+{
+    uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
+        (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
+
+    return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
+}
+
+int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
+{
+    struct kvm_assigned_pci_dev dev_data = {
+        .assigned_dev_id = dev_id,
+        .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
+    };
+
+    return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
+}
+
+static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
+                                     uint32_t type)
+{
+    struct kvm_assigned_irq assigned_irq = {
+        .assigned_dev_id = dev_id,
+        .flags = type,
+    };
+
+    return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
+}
+
+int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
+{
+    return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
+        (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
+}
+
+int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
+{
+    return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
+                                              KVM_DEV_IRQ_GUEST_MSI, virq);
+}
+
+int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
+{
+    return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
+                                                KVM_DEV_IRQ_HOST_MSI);
+}
+
+bool kvm_device_msix_supported(KVMState *s)
+{
+    /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
+     * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
+    return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
+}
+
+int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
+                                 uint32_t nr_vectors)
+{
+    struct kvm_assigned_msix_nr msix_nr = {
+        .assigned_dev_id = dev_id,
+        .entry_nr = nr_vectors,
+    };
+
+    return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
+}
+
+int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
+                               int virq)
+{
+    struct kvm_assigned_msix_entry msix_entry = {
+        .assigned_dev_id = dev_id,
+        .gsi = virq,
+        .entry = vector,
+    };
+
+    return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
+}
+
+int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
+{
+    return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
+                                              KVM_DEV_IRQ_GUEST_MSIX, 0);
+}
+
+int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
+{
+    return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
+                                                KVM_DEV_IRQ_HOST_MSIX);
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+                             uint64_t address, uint32_t data, PCIDevice *dev)
+{
+    X86IOMMUState *iommu = x86_iommu_get_default();
+
+    if (iommu) {
+        int ret;
+        MSIMessage src, dst;
+        X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
+
+        src.address = route->u.msi.address_hi;
+        src.address <<= VTD_MSI_ADDR_HI_SHIFT;
+        src.address |= route->u.msi.address_lo;
+        src.data = route->u.msi.data;
+
+        ret = class->int_remap(iommu, &src, &dst, dev ? \
+                               pci_requester_id(dev) : \
+                               X86_IOMMU_SID_INVALID);
+        if (ret) {
+            trace_kvm_x86_fixup_msi_error(route->gsi);
+            return 1;
+        }
+
+        route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
+        route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
+        route->u.msi.data = dst.data;
+    }
+
+    return 0;
+}
+
+typedef struct MSIRouteEntry MSIRouteEntry;
+
+struct MSIRouteEntry {
+    PCIDevice *dev;             /* Device pointer */
+    int vector;                 /* MSI/MSIX vector index */
+    int virq;                   /* Virtual IRQ index */
+    QLIST_ENTRY(MSIRouteEntry) list;
+};
+
+/* List of used GSI routes */
+static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
+    QLIST_HEAD_INITIALIZER(msi_route_list);
+
+static void kvm_update_msi_routes_all(void *private, bool global,
+                                      uint32_t index, uint32_t mask)
+{
+    int cnt = 0;
+    MSIRouteEntry *entry;
+    MSIMessage msg;
+    /* TODO: explicit route update */
+    QLIST_FOREACH(entry, &msi_route_list, list) {
+        cnt++;
+        msg = pci_get_msi_message(entry->dev, entry->vector);
+        kvm_irqchip_update_msi_route(kvm_state, entry->virq,
+                                     msg, entry->dev);
+    }
+    kvm_irqchip_commit_routes(kvm_state);
+    trace_kvm_x86_update_msi_routes(cnt);
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+                                int vector, PCIDevice *dev)
+{
+    static bool notify_list_inited = false;
+    MSIRouteEntry *entry;
+
+    if (!dev) {
+        /* These are (possibly) IOAPIC routes only used for split
+         * kernel irqchip mode, while what we are housekeeping are
+         * PCI devices only. */
+        return 0;
+    }
+
+    entry = g_new0(MSIRouteEntry, 1);
+    entry->dev = dev;
+    entry->vector = vector;
+    entry->virq = route->gsi;
+    QLIST_INSERT_HEAD(&msi_route_list, entry, list);
+
+    trace_kvm_x86_add_msi_route(route->gsi);
+
+    if (!notify_list_inited) {
+        /* For the first time we do add route, add ourselves into
+         * IOMMU's IEC notify list if needed. */
+        X86IOMMUState *iommu = x86_iommu_get_default();
+        if (iommu) {
+            x86_iommu_iec_register_notifier(iommu,
+                                            kvm_update_msi_routes_all,
+                                            NULL);
+        }
+        notify_list_inited = true;
+    }
+    return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+    MSIRouteEntry *entry, *next;
+    QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
+        if (entry->virq == virq) {
+            trace_kvm_x86_remove_msi_route(virq);
+            QLIST_REMOVE(entry, list);
+            break;
+        }
+    }
+    return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+    abort();
+}
diff --git a/target/i386/kvm_i386.h b/target/i386/kvm_i386.h
new file mode 100644
index 0000000000..76079295b2
--- /dev/null
+++ b/target/i386/kvm_i386.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU KVM support -- x86 specific functions.
+ *
+ * Copyright (c) 2012 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_KVM_I386_H
+#define QEMU_KVM_I386_H
+
+#include "sysemu/kvm.h"
+
+#define kvm_apic_in_kernel() (kvm_irqchip_in_kernel())
+
+bool kvm_allows_irq0_override(void);
+bool kvm_has_smm(void);
+void kvm_synchronize_all_tsc(void);
+void kvm_arch_reset_vcpu(X86CPU *cs);
+void kvm_arch_do_init_vcpu(X86CPU *cs);
+
+int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
+                          uint32_t flags, uint32_t *dev_id);
+int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id);
+
+int kvm_device_intx_assign(KVMState *s, uint32_t dev_id,
+                           bool use_host_msi, uint32_t guest_irq);
+int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked);
+int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi);
+
+int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq);
+int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id);
+
+bool kvm_device_msix_supported(KVMState *s);
+int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
+                                 uint32_t nr_vectors);
+int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
+                               int virq);
+int kvm_device_msix_assign(KVMState *s, uint32_t dev_id);
+int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id);
+
+void kvm_put_apicbase(X86CPU *cpu, uint64_t value);
+
+bool kvm_enable_x2apic(void);
+bool kvm_has_x2apic_api(void);
+#endif
diff --git a/target/i386/machine.c b/target/i386/machine.c
new file mode 100644
index 0000000000..760f82b6c7
--- /dev/null
+++ b/target/i386/machine.c
@@ -0,0 +1,1047 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "hw/i386/pc.h"
+#include "hw/isa/isa.h"
+#include "migration/cpu.h"
+
+#include "sysemu/kvm.h"
+
+#include "qemu/error-report.h"
+
+static const VMStateDescription vmstate_segment = {
+    .name = "segment",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(selector, SegmentCache),
+        VMSTATE_UINTTL(base, SegmentCache),
+        VMSTATE_UINT32(limit, SegmentCache),
+        VMSTATE_UINT32(flags, SegmentCache),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define VMSTATE_SEGMENT(_field, _state) {                            \
+    .name       = (stringify(_field)),                               \
+    .size       = sizeof(SegmentCache),                              \
+    .vmsd       = &vmstate_segment,                                  \
+    .flags      = VMS_STRUCT,                                        \
+    .offset     = offsetof(_state, _field)                           \
+            + type_check(SegmentCache,typeof_field(_state, _field))  \
+}
+
+#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
+    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
+
+static const VMStateDescription vmstate_xmm_reg = {
+    .name = "xmm_reg",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define VMSTATE_XMM_REGS(_field, _state, _start)                         \
+    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
+                             vmstate_xmm_reg, ZMMReg)
+
+/* YMMH format is the same as XMM, but for bits 128-255 */
+static const VMStateDescription vmstate_ymmh_reg = {
+    .name = "ymmh_reg",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
+    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
+                             vmstate_ymmh_reg, ZMMReg)
+
+static const VMStateDescription vmstate_zmmh_reg = {
+    .name = "zmmh_reg",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
+    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
+                             vmstate_zmmh_reg, ZMMReg)
+
+#ifdef TARGET_X86_64
+static const VMStateDescription vmstate_hi16_zmm_reg = {
+    .name = "hi16_zmm_reg",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
+    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
+                             vmstate_hi16_zmm_reg, ZMMReg)
+#endif
+
+static const VMStateDescription vmstate_bnd_regs = {
+    .name = "bnd_regs",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(lb, BNDReg),
+        VMSTATE_UINT64(ub, BNDReg),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define VMSTATE_BND_REGS(_field, _state, _n)          \
+    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
+
+static const VMStateDescription vmstate_mtrr_var = {
+    .name = "mtrr_var",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(base, MTRRVar),
+        VMSTATE_UINT64(mask, MTRRVar),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
+    VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
+
+static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size)
+{
+    fprintf(stderr, "call put_fpreg() with invalid arguments\n");
+    exit(0);
+}
+
+/* XXX: add that in a FPU generic layer */
+union x86_longdouble {
+    uint64_t mant;
+    uint16_t exp;
+};
+
+#define MANTD1(fp)	(fp & ((1LL << 52) - 1))
+#define EXPBIAS1 1023
+#define EXPD1(fp)	((fp >> 52) & 0x7FF)
+#define SIGND1(fp)	((fp >> 32) & 0x80000000)
+
+static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
+{
+    int e;
+    /* mantissa */
+    p->mant = (MANTD1(temp) << 11) | (1LL << 63);
+    /* exponent + sign */
+    e = EXPD1(temp) - EXPBIAS1 + 16383;
+    e |= SIGND1(temp) >> 16;
+    p->exp = e;
+}
+
+static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
+{
+    FPReg *fp_reg = opaque;
+    uint64_t mant;
+    uint16_t exp;
+
+    qemu_get_be64s(f, &mant);
+    qemu_get_be16s(f, &exp);
+    fp_reg->d = cpu_set_fp80(mant, exp);
+    return 0;
+}
+
+static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
+{
+    FPReg *fp_reg = opaque;
+    uint64_t mant;
+    uint16_t exp;
+    /* we save the real CPU data (in case of MMX usage only 'mant'
+       contains the MMX register */
+    cpu_get_fp80(&mant, &exp, fp_reg->d);
+    qemu_put_be64s(f, &mant);
+    qemu_put_be16s(f, &exp);
+}
+
+static const VMStateInfo vmstate_fpreg = {
+    .name = "fpreg",
+    .get  = get_fpreg,
+    .put  = put_fpreg,
+};
+
+static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size)
+{
+    union x86_longdouble *p = opaque;
+    uint64_t mant;
+
+    qemu_get_be64s(f, &mant);
+    p->mant = mant;
+    p->exp = 0xffff;
+    return 0;
+}
+
+static const VMStateInfo vmstate_fpreg_1_mmx = {
+    .name = "fpreg_1_mmx",
+    .get  = get_fpreg_1_mmx,
+    .put  = put_fpreg_error,
+};
+
+static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size)
+{
+    union x86_longdouble *p = opaque;
+    uint64_t mant;
+
+    qemu_get_be64s(f, &mant);
+    fp64_to_fp80(p, mant);
+    return 0;
+}
+
+static const VMStateInfo vmstate_fpreg_1_no_mmx = {
+    .name = "fpreg_1_no_mmx",
+    .get  = get_fpreg_1_no_mmx,
+    .put  = put_fpreg_error,
+};
+
+static bool fpregs_is_0(void *opaque, int version_id)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return (env->fpregs_format_vmstate == 0);
+}
+
+static bool fpregs_is_1_mmx(void *opaque, int version_id)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int guess_mmx;
+
+    guess_mmx = ((env->fptag_vmstate == 0xff) &&
+                 (env->fpus_vmstate & 0x3800) == 0);
+    return (guess_mmx && (env->fpregs_format_vmstate == 1));
+}
+
+static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int guess_mmx;
+
+    guess_mmx = ((env->fptag_vmstate == 0xff) &&
+                 (env->fpus_vmstate & 0x3800) == 0);
+    return (!guess_mmx && (env->fpregs_format_vmstate == 1));
+}
+
+#define VMSTATE_FP_REGS(_field, _state, _n)                               \
+    VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0, vmstate_fpreg, FPReg), \
+    VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \
+    VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg)
+
+static bool version_is_5(void *opaque, int version_id)
+{
+    return version_id == 5;
+}
+
+#ifdef TARGET_X86_64
+static bool less_than_7(void *opaque, int version_id)
+{
+    return version_id < 7;
+}
+
+static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
+{
+    uint64_t *v = pv;
+    *v = qemu_get_be32(f);
+    return 0;
+}
+
+static void put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
+{
+    uint64_t *v = pv;
+    qemu_put_be32(f, *v);
+}
+
+static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
+    .name = "uint64_as_uint32",
+    .get  = get_uint64_as_uint32,
+    .put  = put_uint64_as_uint32,
+};
+
+#define VMSTATE_HACK_UINT32(_f, _s, _t)                                  \
+    VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint64_as_uint32, uint64_t)
+#endif
+
+static void cpu_pre_save(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    /* FPU */
+    env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+    env->fptag_vmstate = 0;
+    for(i = 0; i < 8; i++) {
+        env->fptag_vmstate |= ((!env->fptags[i]) << i);
+    }
+
+    env->fpregs_format_vmstate = 0;
+
+    /*
+     * Real mode guest segments register DPL should be zero.
+     * Older KVM version were setting it wrongly.
+     * Fixing it will allow live migration to host with unrestricted guest
+     * support (otherwise the migration will fail with invalid guest state
+     * error).
+     */
+    if (!(env->cr[0] & CR0_PE_MASK) &&
+        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
+        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
+        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
+        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
+        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
+        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
+        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
+    }
+
+}
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+    X86CPU *cpu = opaque;
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    if (env->tsc_khz && env->user_tsc_khz &&
+        env->tsc_khz != env->user_tsc_khz) {
+        error_report("Mismatch between user-specified TSC frequency and "
+                     "migrated TSC frequency");
+        return -EINVAL;
+    }
+
+    /*
+     * Real mode guest segments register DPL should be zero.
+     * Older KVM version were setting it wrongly.
+     * Fixing it will allow live migration from such host that don't have
+     * restricted guest support to a host with unrestricted guest support
+     * (otherwise the migration will fail with invalid guest state
+     * error).
+     */
+    if (!(env->cr[0] & CR0_PE_MASK) &&
+        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
+        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
+        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
+        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
+        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
+        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
+        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
+    }
+
+    /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
+     * running under KVM.  This is wrong for conforming code segments.
+     * Luckily, in our implementation the CPL field of hflags is redundant
+     * and we can get the right value from the SS descriptor privilege level.
+     */
+    env->hflags &= ~HF_CPL_MASK;
+    env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+
+    env->fpstt = (env->fpus_vmstate >> 11) & 7;
+    env->fpus = env->fpus_vmstate & ~0x3800;
+    env->fptag_vmstate ^= 0xff;
+    for(i = 0; i < 8; i++) {
+        env->fptags[i] = (env->fptag_vmstate >> i) & 1;
+    }
+    update_fp_status(env);
+
+    cpu_breakpoint_remove_all(cs, BP_CPU);
+    cpu_watchpoint_remove_all(cs, BP_CPU);
+    {
+        /* Indicate all breakpoints disabled, as they are, then
+           let the helper re-enable them.  */
+        target_ulong dr7 = env->dr[7];
+        env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
+        cpu_x86_update_dr7(env, dr7);
+    }
+    tlb_flush(cs, 1);
+
+    if (tcg_enabled()) {
+        cpu_smm_update(cpu);
+    }
+    return 0;
+}
+
+static bool async_pf_msr_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+
+    return cpu->env.async_pf_en_msr != 0;
+}
+
+static bool pv_eoi_msr_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+
+    return cpu->env.pv_eoi_en_msr != 0;
+}
+
+static bool steal_time_msr_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+
+    return cpu->env.steal_time_msr != 0;
+}
+
+static const VMStateDescription vmstate_steal_time_msr = {
+    .name = "cpu/steal_time_msr",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = steal_time_msr_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.steal_time_msr, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_async_pf_msr = {
+    .name = "cpu/async_pf_msr",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = async_pf_msr_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_pv_eoi_msr = {
+    .name = "cpu/async_pv_eoi_msr",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = pv_eoi_msr_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool fpop_ip_dp_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
+}
+
+static const VMStateDescription vmstate_fpop_ip_dp = {
+    .name = "cpu/fpop_ip_dp",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = fpop_ip_dp_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT16(env.fpop, X86CPU),
+        VMSTATE_UINT64(env.fpip, X86CPU),
+        VMSTATE_UINT64(env.fpdp, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool tsc_adjust_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->tsc_adjust != 0;
+}
+
+static const VMStateDescription vmstate_msr_tsc_adjust = {
+    .name = "cpu/msr_tsc_adjust",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = tsc_adjust_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.tsc_adjust, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool tscdeadline_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->tsc_deadline != 0;
+}
+
+static const VMStateDescription vmstate_msr_tscdeadline = {
+    .name = "cpu/msr_tscdeadline",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = tscdeadline_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.tsc_deadline, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool misc_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
+}
+
+static bool feature_control_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->msr_ia32_feature_control != 0;
+}
+
+static const VMStateDescription vmstate_msr_ia32_misc_enable = {
+    .name = "cpu/msr_ia32_misc_enable",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = misc_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_msr_ia32_feature_control = {
+    .name = "cpu/msr_ia32_feature_control",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = feature_control_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool pmu_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
+        env->msr_global_status || env->msr_global_ovf_ctrl) {
+        return true;
+    }
+    for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+        if (env->msr_fixed_counters[i]) {
+            return true;
+        }
+    }
+    for (i = 0; i < MAX_GP_COUNTERS; i++) {
+        if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static const VMStateDescription vmstate_msr_architectural_pmu = {
+    .name = "cpu/msr_architectural_pmu",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = pmu_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
+        VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
+        VMSTATE_UINT64(env.msr_global_status, X86CPU),
+        VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
+        VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
+        VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
+        VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool mpx_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    unsigned int i;
+
+    for (i = 0; i < 4; i++) {
+        if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
+            return true;
+        }
+    }
+
+    if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
+        return true;
+    }
+
+    return !!env->msr_bndcfgs;
+}
+
+static const VMStateDescription vmstate_mpx = {
+    .name = "cpu/mpx",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = mpx_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
+        VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
+        VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
+        VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool hyperv_hypercall_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
+}
+
+static const VMStateDescription vmstate_msr_hypercall_hypercall = {
+    .name = "cpu/msr_hyperv_hypercall",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_hypercall_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
+        VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool hyperv_vapic_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->msr_hv_vapic != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_vapic = {
+    .name = "cpu/msr_hyperv_vapic",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_vapic_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool hyperv_time_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->msr_hv_tsc != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_time = {
+    .name = "cpu/msr_hyperv_time",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_time_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool hyperv_crash_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) {
+        if (env->msr_hv_crash_params[i]) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_crash = {
+    .name = "cpu/msr_hyperv_crash",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_crash_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params,
+                             X86CPU, HV_X64_MSR_CRASH_PARAMS),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool hyperv_runtime_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    if (!cpu->hyperv_runtime) {
+        return false;
+    }
+
+    return env->msr_hv_runtime != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_runtime = {
+    .name = "cpu/msr_hyperv_runtime",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_runtime_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool hyperv_synic_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    if (env->msr_hv_synic_control != 0 ||
+        env->msr_hv_synic_evt_page != 0 ||
+        env->msr_hv_synic_msg_page != 0) {
+        return true;
+    }
+
+    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+        if (env->msr_hv_synic_sint[i] != 0) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_synic = {
+    .name = "cpu/msr_hyperv_synic",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_synic_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
+        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
+        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
+        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
+                             HV_SYNIC_SINT_COUNT),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool hyperv_stimer_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
+        if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_stimer = {
+    .name = "cpu/msr_hyperv_stimer",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_stimer_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config,
+                             X86CPU, HV_SYNIC_STIMER_COUNT),
+        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count,
+                             X86CPU, HV_SYNIC_STIMER_COUNT),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool avx512_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    unsigned int i;
+
+    for (i = 0; i < NB_OPMASK_REGS; i++) {
+        if (env->opmask_regs[i]) {
+            return true;
+        }
+    }
+
+    for (i = 0; i < CPU_NB_REGS; i++) {
+#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
+        if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
+            ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
+            return true;
+        }
+#ifdef TARGET_X86_64
+        if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
+            ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
+            ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
+            ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
+            return true;
+        }
+#endif
+    }
+
+    return false;
+}
+
+static const VMStateDescription vmstate_avx512 = {
+    .name = "cpu/avx512",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = avx512_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
+        VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
+#ifdef TARGET_X86_64
+        VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
+#endif
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool xss_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->xss != 0;
+}
+
+static const VMStateDescription vmstate_xss = {
+    .name = "cpu/xss",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = xss_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.xss, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#ifdef TARGET_X86_64
+static bool pkru_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->pkru != 0;
+}
+
+static const VMStateDescription vmstate_pkru = {
+    .name = "cpu/pkru",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = pkru_needed,
+    .fields = (VMStateField[]){
+        VMSTATE_UINT32(env.pkru, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+#endif
+
+static bool tsc_khz_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+    PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
+    return env->tsc_khz && pcmc->save_tsc_khz;
+}
+
+static const VMStateDescription vmstate_tsc_khz = {
+    .name = "cpu/tsc_khz",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = tsc_khz_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_INT64(env.tsc_khz, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool mcg_ext_ctl_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    return cpu->enable_lmce && env->mcg_ext_ctl;
+}
+
+static const VMStateDescription vmstate_mcg_ext_ctl = {
+    .name = "cpu/mcg_ext_ctl",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = mcg_ext_ctl_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+VMStateDescription vmstate_x86_cpu = {
+    .name = "cpu",
+    .version_id = 12,
+    .minimum_version_id = 3,
+    .pre_save = cpu_pre_save,
+    .post_load = cpu_post_load,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
+        VMSTATE_UINTTL(env.eip, X86CPU),
+        VMSTATE_UINTTL(env.eflags, X86CPU),
+        VMSTATE_UINT32(env.hflags, X86CPU),
+        /* FPU */
+        VMSTATE_UINT16(env.fpuc, X86CPU),
+        VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
+        VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
+        VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
+        VMSTATE_FP_REGS(env.fpregs, X86CPU, 8),
+
+        VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
+        VMSTATE_SEGMENT(env.ldt, X86CPU),
+        VMSTATE_SEGMENT(env.tr, X86CPU),
+        VMSTATE_SEGMENT(env.gdt, X86CPU),
+        VMSTATE_SEGMENT(env.idt, X86CPU),
+
+        VMSTATE_UINT32(env.sysenter_cs, X86CPU),
+#ifdef TARGET_X86_64
+        /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
+        VMSTATE_HACK_UINT32(env.sysenter_esp, X86CPU, less_than_7),
+        VMSTATE_HACK_UINT32(env.sysenter_eip, X86CPU, less_than_7),
+        VMSTATE_UINTTL_V(env.sysenter_esp, X86CPU, 7),
+        VMSTATE_UINTTL_V(env.sysenter_eip, X86CPU, 7),
+#else
+        VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
+        VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
+#endif
+
+        VMSTATE_UINTTL(env.cr[0], X86CPU),
+        VMSTATE_UINTTL(env.cr[2], X86CPU),
+        VMSTATE_UINTTL(env.cr[3], X86CPU),
+        VMSTATE_UINTTL(env.cr[4], X86CPU),
+        VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
+        /* MMU */
+        VMSTATE_INT32(env.a20_mask, X86CPU),
+        /* XMM */
+        VMSTATE_UINT32(env.mxcsr, X86CPU),
+        VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
+
+#ifdef TARGET_X86_64
+        VMSTATE_UINT64(env.efer, X86CPU),
+        VMSTATE_UINT64(env.star, X86CPU),
+        VMSTATE_UINT64(env.lstar, X86CPU),
+        VMSTATE_UINT64(env.cstar, X86CPU),
+        VMSTATE_UINT64(env.fmask, X86CPU),
+        VMSTATE_UINT64(env.kernelgsbase, X86CPU),
+#endif
+        VMSTATE_UINT32_V(env.smbase, X86CPU, 4),
+
+        VMSTATE_UINT64_V(env.pat, X86CPU, 5),
+        VMSTATE_UINT32_V(env.hflags2, X86CPU, 5),
+
+        VMSTATE_UINT32_TEST(parent_obj.halted, X86CPU, version_is_5),
+        VMSTATE_UINT64_V(env.vm_hsave, X86CPU, 5),
+        VMSTATE_UINT64_V(env.vm_vmcb, X86CPU, 5),
+        VMSTATE_UINT64_V(env.tsc_offset, X86CPU, 5),
+        VMSTATE_UINT64_V(env.intercept, X86CPU, 5),
+        VMSTATE_UINT16_V(env.intercept_cr_read, X86CPU, 5),
+        VMSTATE_UINT16_V(env.intercept_cr_write, X86CPU, 5),
+        VMSTATE_UINT16_V(env.intercept_dr_read, X86CPU, 5),
+        VMSTATE_UINT16_V(env.intercept_dr_write, X86CPU, 5),
+        VMSTATE_UINT32_V(env.intercept_exceptions, X86CPU, 5),
+        VMSTATE_UINT8_V(env.v_tpr, X86CPU, 5),
+        /* MTRRs */
+        VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
+        VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
+        VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
+        /* KVM-related states */
+        VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
+        VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
+        VMSTATE_UINT64_V(env.tsc, X86CPU, 9),
+        VMSTATE_INT32_V(env.exception_injected, X86CPU, 11),
+        VMSTATE_UINT8_V(env.soft_interrupt, X86CPU, 11),
+        VMSTATE_UINT8_V(env.nmi_injected, X86CPU, 11),
+        VMSTATE_UINT8_V(env.nmi_pending, X86CPU, 11),
+        VMSTATE_UINT8_V(env.has_error_code, X86CPU, 11),
+        VMSTATE_UINT32_V(env.sipi_vector, X86CPU, 11),
+        /* MCE */
+        VMSTATE_UINT64_V(env.mcg_cap, X86CPU, 10),
+        VMSTATE_UINT64_V(env.mcg_status, X86CPU, 10),
+        VMSTATE_UINT64_V(env.mcg_ctl, X86CPU, 10),
+        VMSTATE_UINT64_ARRAY_V(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4, 10),
+        /* rdtscp */
+        VMSTATE_UINT64_V(env.tsc_aux, X86CPU, 11),
+        /* KVM pvclock msr */
+        VMSTATE_UINT64_V(env.system_time_msr, X86CPU, 11),
+        VMSTATE_UINT64_V(env.wall_clock_msr, X86CPU, 11),
+        /* XSAVE related fields */
+        VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
+        VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
+        VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
+        VMSTATE_END_OF_LIST()
+        /* The above list is not sorted /wrt version numbers, watch out! */
+    },
+    .subsections = (const VMStateDescription*[]) {
+        &vmstate_async_pf_msr,
+        &vmstate_pv_eoi_msr,
+        &vmstate_steal_time_msr,
+        &vmstate_fpop_ip_dp,
+        &vmstate_msr_tsc_adjust,
+        &vmstate_msr_tscdeadline,
+        &vmstate_msr_ia32_misc_enable,
+        &vmstate_msr_ia32_feature_control,
+        &vmstate_msr_architectural_pmu,
+        &vmstate_mpx,
+        &vmstate_msr_hypercall_hypercall,
+        &vmstate_msr_hyperv_vapic,
+        &vmstate_msr_hyperv_time,
+        &vmstate_msr_hyperv_crash,
+        &vmstate_msr_hyperv_runtime,
+        &vmstate_msr_hyperv_synic,
+        &vmstate_msr_hyperv_stimer,
+        &vmstate_avx512,
+        &vmstate_xss,
+        &vmstate_tsc_khz,
+#ifdef TARGET_X86_64
+        &vmstate_pkru,
+#endif
+        &vmstate_mcg_ext_ctl,
+        NULL
+    }
+};
diff --git a/target/i386/mem_helper.c b/target/i386/mem_helper.c
new file mode 100644
index 0000000000..70f67668ab
--- /dev/null
+++ b/target/i386/mem_helper.c
@@ -0,0 +1,215 @@
+/*
+ *  x86 memory access helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/int128.h"
+#include "tcg.h"
+
+void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
+{
+    uintptr_t ra = GETPC();
+    uint64_t oldv, cmpv, newv;
+    int eflags;
+
+    eflags = cpu_cc_compute_all(env, CC_OP);
+
+    cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
+    newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
+
+    oldv = cpu_ldq_data_ra(env, a0, ra);
+    newv = (cmpv == oldv ? newv : oldv);
+    /* always do the store */
+    cpu_stq_data_ra(env, a0, newv, ra);
+
+    if (oldv == cmpv) {
+        eflags |= CC_Z;
+    } else {
+        env->regs[R_EAX] = (uint32_t)oldv;
+        env->regs[R_EDX] = (uint32_t)(oldv >> 32);
+        eflags &= ~CC_Z;
+    }
+    CC_SRC = eflags;
+}
+
+void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
+{
+#ifdef CONFIG_ATOMIC64
+    uint64_t oldv, cmpv, newv;
+    int eflags;
+
+    eflags = cpu_cc_compute_all(env, CC_OP);
+
+    cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
+    newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
+
+#ifdef CONFIG_USER_ONLY
+    {
+        uint64_t *haddr = g2h(a0);
+        cmpv = cpu_to_le64(cmpv);
+        newv = cpu_to_le64(newv);
+        oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
+        oldv = le64_to_cpu(oldv);
+    }
+#else
+    {
+        uintptr_t ra = GETPC();
+        int mem_idx = cpu_mmu_index(env, false);
+        TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
+        oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
+    }
+#endif
+
+    if (oldv == cmpv) {
+        eflags |= CC_Z;
+    } else {
+        env->regs[R_EAX] = (uint32_t)oldv;
+        env->regs[R_EDX] = (uint32_t)(oldv >> 32);
+        eflags &= ~CC_Z;
+    }
+    CC_SRC = eflags;
+#else
+    cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
+#endif /* CONFIG_ATOMIC64 */
+}
+
+#ifdef TARGET_X86_64
+void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0)
+{
+    uintptr_t ra = GETPC();
+    Int128 oldv, cmpv, newv;
+    uint64_t o0, o1;
+    int eflags;
+    bool success;
+
+    if ((a0 & 0xf) != 0) {
+        raise_exception_ra(env, EXCP0D_GPF, GETPC());
+    }
+    eflags = cpu_cc_compute_all(env, CC_OP);
+
+    cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
+    newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
+
+    o0 = cpu_ldq_data_ra(env, a0 + 0, ra);
+    o1 = cpu_ldq_data_ra(env, a0 + 8, ra);
+
+    oldv = int128_make128(o0, o1);
+    success = int128_eq(oldv, cmpv);
+    if (!success) {
+        newv = oldv;
+    }
+
+    cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra);
+    cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra);
+
+    if (success) {
+        eflags |= CC_Z;
+    } else {
+        env->regs[R_EAX] = int128_getlo(oldv);
+        env->regs[R_EDX] = int128_gethi(oldv);
+        eflags &= ~CC_Z;
+    }
+    CC_SRC = eflags;
+}
+
+void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
+{
+    uintptr_t ra = GETPC();
+
+    if ((a0 & 0xf) != 0) {
+        raise_exception_ra(env, EXCP0D_GPF, ra);
+    } else {
+#ifndef CONFIG_ATOMIC128
+        cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+#else
+        int eflags = cpu_cc_compute_all(env, CC_OP);
+
+        Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
+        Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
+
+        int mem_idx = cpu_mmu_index(env, false);
+        TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+        Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
+                                                    newv, oi, ra);
+
+        if (int128_eq(oldv, cmpv)) {
+            eflags |= CC_Z;
+        } else {
+            env->regs[R_EAX] = int128_getlo(oldv);
+            env->regs[R_EDX] = int128_gethi(oldv);
+            eflags &= ~CC_Z;
+        }
+        CC_SRC = eflags;
+#endif
+    }
+}
+#endif
+
+void helper_boundw(CPUX86State *env, target_ulong a0, int v)
+{
+    int low, high;
+
+    low = cpu_ldsw_data_ra(env, a0, GETPC());
+    high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
+    v = (int16_t)v;
+    if (v < low || v > high) {
+        if (env->hflags & HF_MPX_EN_MASK) {
+            env->bndcs_regs.sts = 0;
+        }
+        raise_exception_ra(env, EXCP05_BOUND, GETPC());
+    }
+}
+
+void helper_boundl(CPUX86State *env, target_ulong a0, int v)
+{
+    int low, high;
+
+    low = cpu_ldl_data_ra(env, a0, GETPC());
+    high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
+    if (v < low || v > high) {
+        if (env->hflags & HF_MPX_EN_MASK) {
+            env->bndcs_regs.sts = 0;
+        }
+        raise_exception_ra(env, EXCP05_BOUND, GETPC());
+    }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* try to fill the TLB and return an exception if error. If retaddr is
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+              int mmu_idx, uintptr_t retaddr)
+{
+    int ret;
+
+    ret = x86_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+    if (ret) {
+        X86CPU *cpu = X86_CPU(cs);
+        CPUX86State *env = &cpu->env;
+
+        raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr);
+    }
+}
+#endif
diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c
new file mode 100644
index 0000000000..3f666b4b87
--- /dev/null
+++ b/target/i386/misc_helper.c
@@ -0,0 +1,639 @@
+/*
+ *  x86 misc helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/address-spaces.h"
+
+void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
+{
+#ifdef CONFIG_USER_ONLY
+    fprintf(stderr, "outb: port=0x%04x, data=%02x\n", port, data);
+#else
+    address_space_stb(&address_space_io, port, data,
+                      cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+target_ulong helper_inb(CPUX86State *env, uint32_t port)
+{
+#ifdef CONFIG_USER_ONLY
+    fprintf(stderr, "inb: port=0x%04x\n", port);
+    return 0;
+#else
+    return address_space_ldub(&address_space_io, port,
+                              cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
+{
+#ifdef CONFIG_USER_ONLY
+    fprintf(stderr, "outw: port=0x%04x, data=%04x\n", port, data);
+#else
+    address_space_stw(&address_space_io, port, data,
+                      cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+target_ulong helper_inw(CPUX86State *env, uint32_t port)
+{
+#ifdef CONFIG_USER_ONLY
+    fprintf(stderr, "inw: port=0x%04x\n", port);
+    return 0;
+#else
+    return address_space_lduw(&address_space_io, port,
+                              cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
+{
+#ifdef CONFIG_USER_ONLY
+    fprintf(stderr, "outw: port=0x%04x, data=%08x\n", port, data);
+#else
+    address_space_stl(&address_space_io, port, data,
+                      cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+target_ulong helper_inl(CPUX86State *env, uint32_t port)
+{
+#ifdef CONFIG_USER_ONLY
+    fprintf(stderr, "inl: port=0x%04x\n", port);
+    return 0;
+#else
+    return address_space_ldl(&address_space_io, port,
+                             cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+void helper_into(CPUX86State *env, int next_eip_addend)
+{
+    int eflags;
+
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    if (eflags & CC_O) {
+        raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
+    }
+}
+
+void helper_cpuid(CPUX86State *env)
+{
+    uint32_t eax, ebx, ecx, edx;
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0);
+
+    cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
+                  &eax, &ebx, &ecx, &edx);
+    env->regs[R_EAX] = eax;
+    env->regs[R_EBX] = ebx;
+    env->regs[R_ECX] = ecx;
+    env->regs[R_EDX] = edx;
+}
+
+#if defined(CONFIG_USER_ONLY)
+target_ulong helper_read_crN(CPUX86State *env, int reg)
+{
+    return 0;
+}
+
+void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
+{
+}
+#else
+target_ulong helper_read_crN(CPUX86State *env, int reg)
+{
+    target_ulong val;
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0);
+    switch (reg) {
+    default:
+        val = env->cr[reg];
+        break;
+    case 8:
+        if (!(env->hflags2 & HF2_VINTR_MASK)) {
+            val = cpu_get_apic_tpr(x86_env_get_cpu(env)->apic_state);
+        } else {
+            val = env->v_tpr;
+        }
+        break;
+    }
+    return val;
+}
+
+void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
+{
+    cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0);
+    switch (reg) {
+    case 0:
+        cpu_x86_update_cr0(env, t0);
+        break;
+    case 3:
+        cpu_x86_update_cr3(env, t0);
+        break;
+    case 4:
+        cpu_x86_update_cr4(env, t0);
+        break;
+    case 8:
+        if (!(env->hflags2 & HF2_VINTR_MASK)) {
+            cpu_set_apic_tpr(x86_env_get_cpu(env)->apic_state, t0);
+        }
+        env->v_tpr = t0 & 0x0f;
+        break;
+    default:
+        env->cr[reg] = t0;
+        break;
+    }
+}
+#endif
+
+void helper_lmsw(CPUX86State *env, target_ulong t0)
+{
+    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
+       if already set to one. */
+    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
+    helper_write_crN(env, 0, t0);
+}
+
+void helper_invlpg(CPUX86State *env, target_ulong addr)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
+    tlb_flush_page(CPU(cpu), addr);
+}
+
+void helper_rdtsc(CPUX86State *env)
+{
+    uint64_t val;
+
+    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
+        raise_exception_ra(env, EXCP0D_GPF, GETPC());
+    }
+    cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
+
+    val = cpu_get_tsc(env) + env->tsc_offset;
+    env->regs[R_EAX] = (uint32_t)(val);
+    env->regs[R_EDX] = (uint32_t)(val >> 32);
+}
+
+void helper_rdtscp(CPUX86State *env)
+{
+    helper_rdtsc(env);
+    env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
+}
+
+void helper_rdpmc(CPUX86State *env)
+{
+    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
+        raise_exception_ra(env, EXCP0D_GPF, GETPC());
+    }
+    cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
+
+    /* currently unimplemented */
+    qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
+    raise_exception_err(env, EXCP06_ILLOP, 0);
+}
+
+#if defined(CONFIG_USER_ONLY)
+void helper_wrmsr(CPUX86State *env)
+{
+}
+
+void helper_rdmsr(CPUX86State *env)
+{
+}
+#else
+void helper_wrmsr(CPUX86State *env)
+{
+    uint64_t val;
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1);
+
+    val = ((uint32_t)env->regs[R_EAX]) |
+        ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+
+    switch ((uint32_t)env->regs[R_ECX]) {
+    case MSR_IA32_SYSENTER_CS:
+        env->sysenter_cs = val & 0xffff;
+        break;
+    case MSR_IA32_SYSENTER_ESP:
+        env->sysenter_esp = val;
+        break;
+    case MSR_IA32_SYSENTER_EIP:
+        env->sysenter_eip = val;
+        break;
+    case MSR_IA32_APICBASE:
+        cpu_set_apic_base(x86_env_get_cpu(env)->apic_state, val);
+        break;
+    case MSR_EFER:
+        {
+            uint64_t update_mask;
+
+            update_mask = 0;
+            if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
+                update_mask |= MSR_EFER_SCE;
+            }
+            if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+                update_mask |= MSR_EFER_LME;
+            }
+            if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
+                update_mask |= MSR_EFER_FFXSR;
+            }
+            if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
+                update_mask |= MSR_EFER_NXE;
+            }
+            if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+                update_mask |= MSR_EFER_SVME;
+            }
+            if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
+                update_mask |= MSR_EFER_FFXSR;
+            }
+            cpu_load_efer(env, (env->efer & ~update_mask) |
+                          (val & update_mask));
+        }
+        break;
+    case MSR_STAR:
+        env->star = val;
+        break;
+    case MSR_PAT:
+        env->pat = val;
+        break;
+    case MSR_VM_HSAVE_PA:
+        env->vm_hsave = val;
+        break;
+#ifdef TARGET_X86_64
+    case MSR_LSTAR:
+        env->lstar = val;
+        break;
+    case MSR_CSTAR:
+        env->cstar = val;
+        break;
+    case MSR_FMASK:
+        env->fmask = val;
+        break;
+    case MSR_FSBASE:
+        env->segs[R_FS].base = val;
+        break;
+    case MSR_GSBASE:
+        env->segs[R_GS].base = val;
+        break;
+    case MSR_KERNELGSBASE:
+        env->kernelgsbase = val;
+        break;
+#endif
+    case MSR_MTRRphysBase(0):
+    case MSR_MTRRphysBase(1):
+    case MSR_MTRRphysBase(2):
+    case MSR_MTRRphysBase(3):
+    case MSR_MTRRphysBase(4):
+    case MSR_MTRRphysBase(5):
+    case MSR_MTRRphysBase(6):
+    case MSR_MTRRphysBase(7):
+        env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+                       MSR_MTRRphysBase(0)) / 2].base = val;
+        break;
+    case MSR_MTRRphysMask(0):
+    case MSR_MTRRphysMask(1):
+    case MSR_MTRRphysMask(2):
+    case MSR_MTRRphysMask(3):
+    case MSR_MTRRphysMask(4):
+    case MSR_MTRRphysMask(5):
+    case MSR_MTRRphysMask(6):
+    case MSR_MTRRphysMask(7):
+        env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+                       MSR_MTRRphysMask(0)) / 2].mask = val;
+        break;
+    case MSR_MTRRfix64K_00000:
+        env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+                        MSR_MTRRfix64K_00000] = val;
+        break;
+    case MSR_MTRRfix16K_80000:
+    case MSR_MTRRfix16K_A0000:
+        env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+                        MSR_MTRRfix16K_80000 + 1] = val;
+        break;
+    case MSR_MTRRfix4K_C0000:
+    case MSR_MTRRfix4K_C8000:
+    case MSR_MTRRfix4K_D0000:
+    case MSR_MTRRfix4K_D8000:
+    case MSR_MTRRfix4K_E0000:
+    case MSR_MTRRfix4K_E8000:
+    case MSR_MTRRfix4K_F0000:
+    case MSR_MTRRfix4K_F8000:
+        env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+                        MSR_MTRRfix4K_C0000 + 3] = val;
+        break;
+    case MSR_MTRRdefType:
+        env->mtrr_deftype = val;
+        break;
+    case MSR_MCG_STATUS:
+        env->mcg_status = val;
+        break;
+    case MSR_MCG_CTL:
+        if ((env->mcg_cap & MCG_CTL_P)
+            && (val == 0 || val == ~(uint64_t)0)) {
+            env->mcg_ctl = val;
+        }
+        break;
+    case MSR_TSC_AUX:
+        env->tsc_aux = val;
+        break;
+    case MSR_IA32_MISC_ENABLE:
+        env->msr_ia32_misc_enable = val;
+        break;
+    case MSR_IA32_BNDCFGS:
+        /* FIXME: #GP if reserved bits are set.  */
+        /* FIXME: Extend highest implemented bit of linear address.  */
+        env->msr_bndcfgs = val;
+        cpu_sync_bndcs_hflags(env);
+        break;
+    default:
+        if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
+            && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
+            (4 * env->mcg_cap & 0xff)) {
+            uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
+            if ((offset & 0x3) != 0
+                || (val == 0 || val == ~(uint64_t)0)) {
+                env->mce_banks[offset] = val;
+            }
+            break;
+        }
+        /* XXX: exception? */
+        break;
+    }
+}
+
+void helper_rdmsr(CPUX86State *env)
+{
+    uint64_t val;
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0);
+
+    switch ((uint32_t)env->regs[R_ECX]) {
+    case MSR_IA32_SYSENTER_CS:
+        val = env->sysenter_cs;
+        break;
+    case MSR_IA32_SYSENTER_ESP:
+        val = env->sysenter_esp;
+        break;
+    case MSR_IA32_SYSENTER_EIP:
+        val = env->sysenter_eip;
+        break;
+    case MSR_IA32_APICBASE:
+        val = cpu_get_apic_base(x86_env_get_cpu(env)->apic_state);
+        break;
+    case MSR_EFER:
+        val = env->efer;
+        break;
+    case MSR_STAR:
+        val = env->star;
+        break;
+    case MSR_PAT:
+        val = env->pat;
+        break;
+    case MSR_VM_HSAVE_PA:
+        val = env->vm_hsave;
+        break;
+    case MSR_IA32_PERF_STATUS:
+        /* tsc_increment_by_tick */
+        val = 1000ULL;
+        /* CPU multiplier */
+        val |= (((uint64_t)4ULL) << 40);
+        break;
+#ifdef TARGET_X86_64
+    case MSR_LSTAR:
+        val = env->lstar;
+        break;
+    case MSR_CSTAR:
+        val = env->cstar;
+        break;
+    case MSR_FMASK:
+        val = env->fmask;
+        break;
+    case MSR_FSBASE:
+        val = env->segs[R_FS].base;
+        break;
+    case MSR_GSBASE:
+        val = env->segs[R_GS].base;
+        break;
+    case MSR_KERNELGSBASE:
+        val = env->kernelgsbase;
+        break;
+    case MSR_TSC_AUX:
+        val = env->tsc_aux;
+        break;
+#endif
+    case MSR_MTRRphysBase(0):
+    case MSR_MTRRphysBase(1):
+    case MSR_MTRRphysBase(2):
+    case MSR_MTRRphysBase(3):
+    case MSR_MTRRphysBase(4):
+    case MSR_MTRRphysBase(5):
+    case MSR_MTRRphysBase(6):
+    case MSR_MTRRphysBase(7):
+        val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+                             MSR_MTRRphysBase(0)) / 2].base;
+        break;
+    case MSR_MTRRphysMask(0):
+    case MSR_MTRRphysMask(1):
+    case MSR_MTRRphysMask(2):
+    case MSR_MTRRphysMask(3):
+    case MSR_MTRRphysMask(4):
+    case MSR_MTRRphysMask(5):
+    case MSR_MTRRphysMask(6):
+    case MSR_MTRRphysMask(7):
+        val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+                             MSR_MTRRphysMask(0)) / 2].mask;
+        break;
+    case MSR_MTRRfix64K_00000:
+        val = env->mtrr_fixed[0];
+        break;
+    case MSR_MTRRfix16K_80000:
+    case MSR_MTRRfix16K_A0000:
+        val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+                              MSR_MTRRfix16K_80000 + 1];
+        break;
+    case MSR_MTRRfix4K_C0000:
+    case MSR_MTRRfix4K_C8000:
+    case MSR_MTRRfix4K_D0000:
+    case MSR_MTRRfix4K_D8000:
+    case MSR_MTRRfix4K_E0000:
+    case MSR_MTRRfix4K_E8000:
+    case MSR_MTRRfix4K_F0000:
+    case MSR_MTRRfix4K_F8000:
+        val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+                              MSR_MTRRfix4K_C0000 + 3];
+        break;
+    case MSR_MTRRdefType:
+        val = env->mtrr_deftype;
+        break;
+    case MSR_MTRRcap:
+        if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
+                MSR_MTRRcap_WC_SUPPORTED;
+        } else {
+            /* XXX: exception? */
+            val = 0;
+        }
+        break;
+    case MSR_MCG_CAP:
+        val = env->mcg_cap;
+        break;
+    case MSR_MCG_CTL:
+        if (env->mcg_cap & MCG_CTL_P) {
+            val = env->mcg_ctl;
+        } else {
+            val = 0;
+        }
+        break;
+    case MSR_MCG_STATUS:
+        val = env->mcg_status;
+        break;
+    case MSR_IA32_MISC_ENABLE:
+        val = env->msr_ia32_misc_enable;
+        break;
+    case MSR_IA32_BNDCFGS:
+        val = env->msr_bndcfgs;
+        break;
+    default:
+        if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
+            && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
+            (4 * env->mcg_cap & 0xff)) {
+            uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
+            val = env->mce_banks[offset];
+            break;
+        }
+        /* XXX: exception? */
+        val = 0;
+        break;
+    }
+    env->regs[R_EAX] = (uint32_t)(val);
+    env->regs[R_EDX] = (uint32_t)(val >> 32);
+}
+#endif
+
+static void do_pause(X86CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+
+    /* Just let another CPU run.  */
+    cs->exception_index = EXCP_INTERRUPT;
+    cpu_loop_exit(cs);
+}
+
+static void do_hlt(X86CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    CPUX86State *env = &cpu->env;
+
+    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
+    cs->halted = 1;
+    cs->exception_index = EXCP_HLT;
+    cpu_loop_exit(cs);
+}
+
+void helper_hlt(CPUX86State *env, int next_eip_addend)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
+    env->eip += next_eip_addend;
+
+    do_hlt(cpu);
+}
+
+void helper_monitor(CPUX86State *env, target_ulong ptr)
+{
+    if ((uint32_t)env->regs[R_ECX] != 0) {
+        raise_exception_ra(env, EXCP0D_GPF, GETPC());
+    }
+    /* XXX: store address? */
+    cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
+}
+
+void helper_mwait(CPUX86State *env, int next_eip_addend)
+{
+    CPUState *cs;
+    X86CPU *cpu;
+
+    if ((uint32_t)env->regs[R_ECX] != 0) {
+        raise_exception_ra(env, EXCP0D_GPF, GETPC());
+    }
+    cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
+    env->eip += next_eip_addend;
+
+    cpu = x86_env_get_cpu(env);
+    cs = CPU(cpu);
+    /* XXX: not complete but not completely erroneous */
+    if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
+        do_pause(cpu);
+    } else {
+        do_hlt(cpu);
+    }
+}
+
+void helper_pause(CPUX86State *env, int next_eip_addend)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0);
+    env->eip += next_eip_addend;
+
+    do_pause(cpu);
+}
+
+void helper_debug(CPUX86State *env)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    cs->exception_index = EXCP_DEBUG;
+    cpu_loop_exit(cs);
+}
+
+uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
+{
+    if ((env->cr[4] & CR4_PKE_MASK) == 0) {
+        raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+    }
+    if (ecx != 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+    }
+
+    return env->pkru;
+}
+
+void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    if ((env->cr[4] & CR4_PKE_MASK) == 0) {
+        raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+    }
+    if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) {
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+    }
+
+    env->pkru = val;
+    tlb_flush(cs, 1);
+}
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
new file mode 100644
index 0000000000..9a3b4d746e
--- /dev/null
+++ b/target/i386/monitor.c
@@ -0,0 +1,513 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hw/i386/pc.h"
+#include "sysemu/kvm.h"
+#include "hmp.h"
+
+
+static void print_pte(Monitor *mon, hwaddr addr,
+                      hwaddr pte,
+                      hwaddr mask)
+{
+#ifdef TARGET_X86_64
+    if (addr & (1ULL << 47)) {
+        addr |= -1LL << 48;
+    }
+#endif
+    monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
+                   " %c%c%c%c%c%c%c%c%c\n",
+                   addr,
+                   pte & mask,
+                   pte & PG_NX_MASK ? 'X' : '-',
+                   pte & PG_GLOBAL_MASK ? 'G' : '-',
+                   pte & PG_PSE_MASK ? 'P' : '-',
+                   pte & PG_DIRTY_MASK ? 'D' : '-',
+                   pte & PG_ACCESSED_MASK ? 'A' : '-',
+                   pte & PG_PCD_MASK ? 'C' : '-',
+                   pte & PG_PWT_MASK ? 'T' : '-',
+                   pte & PG_USER_MASK ? 'U' : '-',
+                   pte & PG_RW_MASK ? 'W' : '-');
+}
+
+static void tlb_info_32(Monitor *mon, CPUArchState *env)
+{
+    unsigned int l1, l2;
+    uint32_t pgd, pde, pte;
+
+    pgd = env->cr[3] & ~0xfff;
+    for(l1 = 0; l1 < 1024; l1++) {
+        cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
+        pde = le32_to_cpu(pde);
+        if (pde & PG_PRESENT_MASK) {
+            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+                /* 4M pages */
+                print_pte(mon, (l1 << 22), pde, ~((1 << 21) - 1));
+            } else {
+                for(l2 = 0; l2 < 1024; l2++) {
+                    cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
+                    pte = le32_to_cpu(pte);
+                    if (pte & PG_PRESENT_MASK) {
+                        print_pte(mon, (l1 << 22) + (l2 << 12),
+                                  pte & ~PG_PSE_MASK,
+                                  ~0xfff);
+                    }
+                }
+            }
+        }
+    }
+}
+
+static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
+{
+    unsigned int l1, l2, l3;
+    uint64_t pdpe, pde, pte;
+    uint64_t pdp_addr, pd_addr, pt_addr;
+
+    pdp_addr = env->cr[3] & ~0x1f;
+    for (l1 = 0; l1 < 4; l1++) {
+        cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
+        pdpe = le64_to_cpu(pdpe);
+        if (pdpe & PG_PRESENT_MASK) {
+            pd_addr = pdpe & 0x3fffffffff000ULL;
+            for (l2 = 0; l2 < 512; l2++) {
+                cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
+                pde = le64_to_cpu(pde);
+                if (pde & PG_PRESENT_MASK) {
+                    if (pde & PG_PSE_MASK) {
+                        /* 2M pages with PAE, CR4.PSE is ignored */
+                        print_pte(mon, (l1 << 30 ) + (l2 << 21), pde,
+                                  ~((hwaddr)(1 << 20) - 1));
+                    } else {
+                        pt_addr = pde & 0x3fffffffff000ULL;
+                        for (l3 = 0; l3 < 512; l3++) {
+                            cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
+                            pte = le64_to_cpu(pte);
+                            if (pte & PG_PRESENT_MASK) {
+                                print_pte(mon, (l1 << 30 ) + (l2 << 21)
+                                          + (l3 << 12),
+                                          pte & ~PG_PSE_MASK,
+                                          ~(hwaddr)0xfff);
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+#ifdef TARGET_X86_64
+static void tlb_info_64(Monitor *mon, CPUArchState *env)
+{
+    uint64_t l1, l2, l3, l4;
+    uint64_t pml4e, pdpe, pde, pte;
+    uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr;
+
+    pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
+    for (l1 = 0; l1 < 512; l1++) {
+        cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+        pml4e = le64_to_cpu(pml4e);
+        if (pml4e & PG_PRESENT_MASK) {
+            pdp_addr = pml4e & 0x3fffffffff000ULL;
+            for (l2 = 0; l2 < 512; l2++) {
+                cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+                pdpe = le64_to_cpu(pdpe);
+                if (pdpe & PG_PRESENT_MASK) {
+                    if (pdpe & PG_PSE_MASK) {
+                        /* 1G pages, CR4.PSE is ignored */
+                        print_pte(mon, (l1 << 39) + (l2 << 30), pdpe,
+                                  0x3ffffc0000000ULL);
+                    } else {
+                        pd_addr = pdpe & 0x3fffffffff000ULL;
+                        for (l3 = 0; l3 < 512; l3++) {
+                            cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+                            pde = le64_to_cpu(pde);
+                            if (pde & PG_PRESENT_MASK) {
+                                if (pde & PG_PSE_MASK) {
+                                    /* 2M pages, CR4.PSE is ignored */
+                                    print_pte(mon, (l1 << 39) + (l2 << 30) +
+                                              (l3 << 21), pde,
+                                              0x3ffffffe00000ULL);
+                                } else {
+                                    pt_addr = pde & 0x3fffffffff000ULL;
+                                    for (l4 = 0; l4 < 512; l4++) {
+                                        cpu_physical_memory_read(pt_addr
+                                                                 + l4 * 8,
+                                                                 &pte, 8);
+                                        pte = le64_to_cpu(pte);
+                                        if (pte & PG_PRESENT_MASK) {
+                                            print_pte(mon, (l1 << 39) +
+                                                      (l2 << 30) +
+                                                      (l3 << 21) + (l4 << 12),
+                                                      pte & ~PG_PSE_MASK,
+                                                      0x3fffffffff000ULL);
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+#endif /* TARGET_X86_64 */
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+    CPUArchState *env;
+
+    env = mon_get_cpu_env();
+
+    if (!(env->cr[0] & CR0_PG_MASK)) {
+        monitor_printf(mon, "PG disabled\n");
+        return;
+    }
+    if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            tlb_info_64(mon, env);
+        } else
+#endif
+        {
+            tlb_info_pae32(mon, env);
+        }
+    } else {
+        tlb_info_32(mon, env);
+    }
+}
+
+static void mem_print(Monitor *mon, hwaddr *pstart,
+                      int *plast_prot,
+                      hwaddr end, int prot)
+{
+    int prot1;
+    prot1 = *plast_prot;
+    if (prot != prot1) {
+        if (*pstart != -1) {
+            monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
+                           TARGET_FMT_plx " %c%c%c\n",
+                           *pstart, end, end - *pstart,
+                           prot1 & PG_USER_MASK ? 'u' : '-',
+                           'r',
+                           prot1 & PG_RW_MASK ? 'w' : '-');
+        }
+        if (prot != 0)
+            *pstart = end;
+        else
+            *pstart = -1;
+        *plast_prot = prot;
+    }
+}
+
+static void mem_info_32(Monitor *mon, CPUArchState *env)
+{
+    unsigned int l1, l2;
+    int prot, last_prot;
+    uint32_t pgd, pde, pte;
+    hwaddr start, end;
+
+    pgd = env->cr[3] & ~0xfff;
+    last_prot = 0;
+    start = -1;
+    for(l1 = 0; l1 < 1024; l1++) {
+        cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
+        pde = le32_to_cpu(pde);
+        end = l1 << 22;
+        if (pde & PG_PRESENT_MASK) {
+            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+                prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
+                mem_print(mon, &start, &last_prot, end, prot);
+            } else {
+                for(l2 = 0; l2 < 1024; l2++) {
+                    cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
+                    pte = le32_to_cpu(pte);
+                    end = (l1 << 22) + (l2 << 12);
+                    if (pte & PG_PRESENT_MASK) {
+                        prot = pte & pde &
+                            (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
+                    } else {
+                        prot = 0;
+                    }
+                    mem_print(mon, &start, &last_prot, end, prot);
+                }
+            }
+        } else {
+            prot = 0;
+            mem_print(mon, &start, &last_prot, end, prot);
+        }
+    }
+    /* Flush last range */
+    mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
+}
+
+static void mem_info_pae32(Monitor *mon, CPUArchState *env)
+{
+    unsigned int l1, l2, l3;
+    int prot, last_prot;
+    uint64_t pdpe, pde, pte;
+    uint64_t pdp_addr, pd_addr, pt_addr;
+    hwaddr start, end;
+
+    pdp_addr = env->cr[3] & ~0x1f;
+    last_prot = 0;
+    start = -1;
+    for (l1 = 0; l1 < 4; l1++) {
+        cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
+        pdpe = le64_to_cpu(pdpe);
+        end = l1 << 30;
+        if (pdpe & PG_PRESENT_MASK) {
+            pd_addr = pdpe & 0x3fffffffff000ULL;
+            for (l2 = 0; l2 < 512; l2++) {
+                cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
+                pde = le64_to_cpu(pde);
+                end = (l1 << 30) + (l2 << 21);
+                if (pde & PG_PRESENT_MASK) {
+                    if (pde & PG_PSE_MASK) {
+                        prot = pde & (PG_USER_MASK | PG_RW_MASK |
+                                      PG_PRESENT_MASK);
+                        mem_print(mon, &start, &last_prot, end, prot);
+                    } else {
+                        pt_addr = pde & 0x3fffffffff000ULL;
+                        for (l3 = 0; l3 < 512; l3++) {
+                            cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
+                            pte = le64_to_cpu(pte);
+                            end = (l1 << 30) + (l2 << 21) + (l3 << 12);
+                            if (pte & PG_PRESENT_MASK) {
+                                prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
+                                                    PG_PRESENT_MASK);
+                            } else {
+                                prot = 0;
+                            }
+                            mem_print(mon, &start, &last_prot, end, prot);
+                        }
+                    }
+                } else {
+                    prot = 0;
+                    mem_print(mon, &start, &last_prot, end, prot);
+                }
+            }
+        } else {
+            prot = 0;
+            mem_print(mon, &start, &last_prot, end, prot);
+        }
+    }
+    /* Flush last range */
+    mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
+}
+
+
+#ifdef TARGET_X86_64
+static void mem_info_64(Monitor *mon, CPUArchState *env)
+{
+    int prot, last_prot;
+    uint64_t l1, l2, l3, l4;
+    uint64_t pml4e, pdpe, pde, pte;
+    uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
+
+    pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
+    last_prot = 0;
+    start = -1;
+    for (l1 = 0; l1 < 512; l1++) {
+        cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+        pml4e = le64_to_cpu(pml4e);
+        end = l1 << 39;
+        if (pml4e & PG_PRESENT_MASK) {
+            pdp_addr = pml4e & 0x3fffffffff000ULL;
+            for (l2 = 0; l2 < 512; l2++) {
+                cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+                pdpe = le64_to_cpu(pdpe);
+                end = (l1 << 39) + (l2 << 30);
+                if (pdpe & PG_PRESENT_MASK) {
+                    if (pdpe & PG_PSE_MASK) {
+                        prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
+                                       PG_PRESENT_MASK);
+                        prot &= pml4e;
+                        mem_print(mon, &start, &last_prot, end, prot);
+                    } else {
+                        pd_addr = pdpe & 0x3fffffffff000ULL;
+                        for (l3 = 0; l3 < 512; l3++) {
+                            cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+                            pde = le64_to_cpu(pde);
+                            end = (l1 << 39) + (l2 << 30) + (l3 << 21);
+                            if (pde & PG_PRESENT_MASK) {
+                                if (pde & PG_PSE_MASK) {
+                                    prot = pde & (PG_USER_MASK | PG_RW_MASK |
+                                                  PG_PRESENT_MASK);
+                                    prot &= pml4e & pdpe;
+                                    mem_print(mon, &start, &last_prot, end, prot);
+                                } else {
+                                    pt_addr = pde & 0x3fffffffff000ULL;
+                                    for (l4 = 0; l4 < 512; l4++) {
+                                        cpu_physical_memory_read(pt_addr
+                                                                 + l4 * 8,
+                                                                 &pte, 8);
+                                        pte = le64_to_cpu(pte);
+                                        end = (l1 << 39) + (l2 << 30) +
+                                            (l3 << 21) + (l4 << 12);
+                                        if (pte & PG_PRESENT_MASK) {
+                                            prot = pte & (PG_USER_MASK | PG_RW_MASK |
+                                                          PG_PRESENT_MASK);
+                                            prot &= pml4e & pdpe & pde;
+                                        } else {
+                                            prot = 0;
+                                        }
+                                        mem_print(mon, &start, &last_prot, end, prot);
+                                    }
+                                }
+                            } else {
+                                prot = 0;
+                                mem_print(mon, &start, &last_prot, end, prot);
+                            }
+                        }
+                    }
+                } else {
+                    prot = 0;
+                    mem_print(mon, &start, &last_prot, end, prot);
+                }
+            }
+        } else {
+            prot = 0;
+            mem_print(mon, &start, &last_prot, end, prot);
+        }
+    }
+    /* Flush last range */
+    mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
+}
+#endif /* TARGET_X86_64 */
+
+void hmp_info_mem(Monitor *mon, const QDict *qdict)
+{
+    CPUArchState *env;
+
+    env = mon_get_cpu_env();
+
+    if (!(env->cr[0] & CR0_PG_MASK)) {
+        monitor_printf(mon, "PG disabled\n");
+        return;
+    }
+    if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            mem_info_64(mon, env);
+        } else
+#endif
+        {
+            mem_info_pae32(mon, env);
+        }
+    } else {
+        mem_info_32(mon, env);
+    }
+}
+
+void hmp_mce(Monitor *mon, const QDict *qdict)
+{
+    X86CPU *cpu;
+    CPUState *cs;
+    int cpu_index = qdict_get_int(qdict, "cpu_index");
+    int bank = qdict_get_int(qdict, "bank");
+    uint64_t status = qdict_get_int(qdict, "status");
+    uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
+    uint64_t addr = qdict_get_int(qdict, "addr");
+    uint64_t misc = qdict_get_int(qdict, "misc");
+    int flags = MCE_INJECT_UNCOND_AO;
+
+    if (qdict_get_try_bool(qdict, "broadcast", false)) {
+        flags |= MCE_INJECT_BROADCAST;
+    }
+    cs = qemu_get_cpu(cpu_index);
+    if (cs != NULL) {
+        cpu = X86_CPU(cs);
+        cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
+                           flags);
+    }
+}
+
+static target_long monitor_get_pc(const struct MonitorDef *md, int val)
+{
+    CPUArchState *env = mon_get_cpu_env();
+    return env->eip + env->segs[R_CS].base;
+}
+
+const MonitorDef monitor_defs[] = {
+#define SEG(name, seg) \
+    { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
+    { name ".base", offsetof(CPUX86State, segs[seg].base) },\
+    { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
+
+    { "eax", offsetof(CPUX86State, regs[0]) },
+    { "ecx", offsetof(CPUX86State, regs[1]) },
+    { "edx", offsetof(CPUX86State, regs[2]) },
+    { "ebx", offsetof(CPUX86State, regs[3]) },
+    { "esp|sp", offsetof(CPUX86State, regs[4]) },
+    { "ebp|fp", offsetof(CPUX86State, regs[5]) },
+    { "esi", offsetof(CPUX86State, regs[6]) },
+    { "edi", offsetof(CPUX86State, regs[7]) },
+#ifdef TARGET_X86_64
+    { "r8", offsetof(CPUX86State, regs[8]) },
+    { "r9", offsetof(CPUX86State, regs[9]) },
+    { "r10", offsetof(CPUX86State, regs[10]) },
+    { "r11", offsetof(CPUX86State, regs[11]) },
+    { "r12", offsetof(CPUX86State, regs[12]) },
+    { "r13", offsetof(CPUX86State, regs[13]) },
+    { "r14", offsetof(CPUX86State, regs[14]) },
+    { "r15", offsetof(CPUX86State, regs[15]) },
+#endif
+    { "eflags", offsetof(CPUX86State, eflags) },
+    { "eip", offsetof(CPUX86State, eip) },
+    SEG("cs", R_CS)
+    SEG("ds", R_DS)
+    SEG("es", R_ES)
+    SEG("ss", R_SS)
+    SEG("fs", R_FS)
+    SEG("gs", R_GS)
+    { "pc", 0, monitor_get_pc, },
+    { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+    return monitor_defs;
+}
+
+void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
+{
+    x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon, monitor_fprintf,
+                                  CPU_DUMP_FPU);
+}
+
+void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
+{
+    if (kvm_irqchip_in_kernel() &&
+        !kvm_irqchip_is_split()) {
+        kvm_ioapic_dump_state(mon, qdict);
+    } else {
+        ioapic_dump_state(mon, qdict);
+    }
+}
diff --git a/target/i386/mpx_helper.c b/target/i386/mpx_helper.c
new file mode 100644
index 0000000000..7e44820659
--- /dev/null
+++ b/target/i386/mpx_helper.c
@@ -0,0 +1,168 @@
+/*
+ *  x86 MPX helpers
+ *
+ *  Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+
+
+void cpu_sync_bndcs_hflags(CPUX86State *env)
+{
+    uint32_t hflags = env->hflags;
+    uint32_t hflags2 = env->hflags2;
+    uint32_t bndcsr;
+
+    if ((hflags & HF_CPL_MASK) == 3) {
+        bndcsr = env->bndcs_regs.cfgu;
+    } else {
+        bndcsr = env->msr_bndcfgs;
+    }
+
+    if ((env->cr[4] & CR4_OSXSAVE_MASK)
+        && (env->xcr0 & XSTATE_BNDCSR_MASK)
+        && (bndcsr & BNDCFG_ENABLE)) {
+        hflags |= HF_MPX_EN_MASK;
+    } else {
+        hflags &= ~HF_MPX_EN_MASK;
+    }
+
+    if (bndcsr & BNDCFG_BNDPRESERVE) {
+        hflags2 |= HF2_MPX_PR_MASK;
+    } else {
+        hflags2 &= ~HF2_MPX_PR_MASK;
+    }
+
+    env->hflags = hflags;
+    env->hflags2 = hflags2;
+}
+
+void helper_bndck(CPUX86State *env, uint32_t fail)
+{
+    if (unlikely(fail)) {
+        env->bndcs_regs.sts = 1;
+        raise_exception_ra(env, EXCP05_BOUND, GETPC());
+    }
+}
+
+static uint64_t lookup_bte64(CPUX86State *env, uint64_t base, uintptr_t ra)
+{
+    uint64_t bndcsr, bde, bt;
+
+    if ((env->hflags & HF_CPL_MASK) == 3) {
+        bndcsr = env->bndcs_regs.cfgu;
+    } else {
+        bndcsr = env->msr_bndcfgs;
+    }
+
+    bde = (extract64(base, 20, 28) << 3) + (extract64(bndcsr, 20, 44) << 12);
+    bt = cpu_ldq_data_ra(env, bde, ra);
+    if ((bt & 1) == 0) {
+        env->bndcs_regs.sts = bde | 2;
+        raise_exception_ra(env, EXCP05_BOUND, ra);
+    }
+
+    return (extract64(base, 3, 17) << 5) + (bt & ~7);
+}
+
+static uint32_t lookup_bte32(CPUX86State *env, uint32_t base, uintptr_t ra)
+{
+    uint32_t bndcsr, bde, bt;
+
+    if ((env->hflags & HF_CPL_MASK) == 3) {
+        bndcsr = env->bndcs_regs.cfgu;
+    } else {
+        bndcsr = env->msr_bndcfgs;
+    }
+
+    bde = (extract32(base, 12, 20) << 2) + (bndcsr & TARGET_PAGE_MASK);
+    bt = cpu_ldl_data_ra(env, bde, ra);
+    if ((bt & 1) == 0) {
+        env->bndcs_regs.sts = bde | 2;
+        raise_exception_ra(env, EXCP05_BOUND, ra);
+    }
+
+    return (extract32(base, 2, 10) << 4) + (bt & ~3);
+}
+
+uint64_t helper_bndldx64(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+    uintptr_t ra = GETPC();
+    uint64_t bte, lb, ub, pt;
+
+    bte = lookup_bte64(env, base, ra);
+    lb = cpu_ldq_data_ra(env, bte, ra);
+    ub = cpu_ldq_data_ra(env, bte + 8, ra);
+    pt = cpu_ldq_data_ra(env, bte + 16, ra);
+
+    if (pt != ptr) {
+        lb = ub = 0;
+    }
+    env->mmx_t0.MMX_Q(0) = ub;
+    return lb;
+}
+
+uint64_t helper_bndldx32(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+    uintptr_t ra = GETPC();
+    uint32_t bte, lb, ub, pt;
+
+    bte = lookup_bte32(env, base, ra);
+    lb = cpu_ldl_data_ra(env, bte, ra);
+    ub = cpu_ldl_data_ra(env, bte + 4, ra);
+    pt = cpu_ldl_data_ra(env, bte + 8, ra);
+
+    if (pt != ptr) {
+        lb = ub = 0;
+    }
+    return ((uint64_t)ub << 32) | lb;
+}
+
+void helper_bndstx64(CPUX86State *env, target_ulong base, target_ulong ptr,
+                     uint64_t lb, uint64_t ub)
+{
+    uintptr_t ra = GETPC();
+    uint64_t bte;
+
+    bte = lookup_bte64(env, base, ra);
+    cpu_stq_data_ra(env, bte, lb, ra);
+    cpu_stq_data_ra(env, bte + 8, ub, ra);
+    cpu_stq_data_ra(env, bte + 16, ptr, ra);
+}
+
+void helper_bndstx32(CPUX86State *env, target_ulong base, target_ulong ptr,
+                     uint64_t lb, uint64_t ub)
+{
+    uintptr_t ra = GETPC();
+    uint32_t bte;
+
+    bte = lookup_bte32(env, base, ra);
+    cpu_stl_data_ra(env, bte, lb, ra);
+    cpu_stl_data_ra(env, bte + 4, ub, ra);
+    cpu_stl_data_ra(env, bte + 8, ptr, ra);
+}
+
+void helper_bnd_jmp(CPUX86State *env)
+{
+    if (!(env->hflags2 & HF2_MPX_PR_MASK)) {
+        memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+        env->hflags &= ~HF_MPX_IU_MASK;
+    }
+}
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
new file mode 100644
index 0000000000..7a98f53864
--- /dev/null
+++ b/target/i386/ops_sse.h
@@ -0,0 +1,2296 @@
+/*
+ *  MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
+ *
+ *  Copyright (c) 2005 Fabrice Bellard
+ *  Copyright (c) 2008 Intel Corporation  <andrew.zaborowski@intel.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "crypto/aes.h"
+
+#if SHIFT == 0
+#define Reg MMXReg
+#define XMM_ONLY(...)
+#define B(n) MMX_B(n)
+#define W(n) MMX_W(n)
+#define L(n) MMX_L(n)
+#define Q(n) MMX_Q(n)
+#define SUFFIX _mmx
+#else
+#define Reg ZMMReg
+#define XMM_ONLY(...) __VA_ARGS__
+#define B(n) ZMM_B(n)
+#define W(n) ZMM_W(n)
+#define L(n) ZMM_L(n)
+#define Q(n) ZMM_Q(n)
+#define SUFFIX _xmm
+#endif
+
+void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift;
+
+    if (s->Q(0) > 15) {
+        d->Q(0) = 0;
+#if SHIFT == 1
+        d->Q(1) = 0;
+#endif
+    } else {
+        shift = s->B(0);
+        d->W(0) >>= shift;
+        d->W(1) >>= shift;
+        d->W(2) >>= shift;
+        d->W(3) >>= shift;
+#if SHIFT == 1
+        d->W(4) >>= shift;
+        d->W(5) >>= shift;
+        d->W(6) >>= shift;
+        d->W(7) >>= shift;
+#endif
+    }
+}
+
+void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift;
+
+    if (s->Q(0) > 15) {
+        shift = 15;
+    } else {
+        shift = s->B(0);
+    }
+    d->W(0) = (int16_t)d->W(0) >> shift;
+    d->W(1) = (int16_t)d->W(1) >> shift;
+    d->W(2) = (int16_t)d->W(2) >> shift;
+    d->W(3) = (int16_t)d->W(3) >> shift;
+#if SHIFT == 1
+    d->W(4) = (int16_t)d->W(4) >> shift;
+    d->W(5) = (int16_t)d->W(5) >> shift;
+    d->W(6) = (int16_t)d->W(6) >> shift;
+    d->W(7) = (int16_t)d->W(7) >> shift;
+#endif
+}
+
+void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift;
+
+    if (s->Q(0) > 15) {
+        d->Q(0) = 0;
+#if SHIFT == 1
+        d->Q(1) = 0;
+#endif
+    } else {
+        shift = s->B(0);
+        d->W(0) <<= shift;
+        d->W(1) <<= shift;
+        d->W(2) <<= shift;
+        d->W(3) <<= shift;
+#if SHIFT == 1
+        d->W(4) <<= shift;
+        d->W(5) <<= shift;
+        d->W(6) <<= shift;
+        d->W(7) <<= shift;
+#endif
+    }
+}
+
+void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift;
+
+    if (s->Q(0) > 31) {
+        d->Q(0) = 0;
+#if SHIFT == 1
+        d->Q(1) = 0;
+#endif
+    } else {
+        shift = s->B(0);
+        d->L(0) >>= shift;
+        d->L(1) >>= shift;
+#if SHIFT == 1
+        d->L(2) >>= shift;
+        d->L(3) >>= shift;
+#endif
+    }
+}
+
+void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift;
+
+    if (s->Q(0) > 31) {
+        shift = 31;
+    } else {
+        shift = s->B(0);
+    }
+    d->L(0) = (int32_t)d->L(0) >> shift;
+    d->L(1) = (int32_t)d->L(1) >> shift;
+#if SHIFT == 1
+    d->L(2) = (int32_t)d->L(2) >> shift;
+    d->L(3) = (int32_t)d->L(3) >> shift;
+#endif
+}
+
+void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift;
+
+    if (s->Q(0) > 31) {
+        d->Q(0) = 0;
+#if SHIFT == 1
+        d->Q(1) = 0;
+#endif
+    } else {
+        shift = s->B(0);
+        d->L(0) <<= shift;
+        d->L(1) <<= shift;
+#if SHIFT == 1
+        d->L(2) <<= shift;
+        d->L(3) <<= shift;
+#endif
+    }
+}
+
+void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift;
+
+    if (s->Q(0) > 63) {
+        d->Q(0) = 0;
+#if SHIFT == 1
+        d->Q(1) = 0;
+#endif
+    } else {
+        shift = s->B(0);
+        d->Q(0) >>= shift;
+#if SHIFT == 1
+        d->Q(1) >>= shift;
+#endif
+    }
+}
+
+void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift;
+
+    if (s->Q(0) > 63) {
+        d->Q(0) = 0;
+#if SHIFT == 1
+        d->Q(1) = 0;
+#endif
+    } else {
+        shift = s->B(0);
+        d->Q(0) <<= shift;
+#if SHIFT == 1
+        d->Q(1) <<= shift;
+#endif
+    }
+}
+
+#if SHIFT == 1
+void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift, i;
+
+    shift = s->L(0);
+    if (shift > 16) {
+        shift = 16;
+    }
+    for (i = 0; i < 16 - shift; i++) {
+        d->B(i) = d->B(i + shift);
+    }
+    for (i = 16 - shift; i < 16; i++) {
+        d->B(i) = 0;
+    }
+}
+
+void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int shift, i;
+
+    shift = s->L(0);
+    if (shift > 16) {
+        shift = 16;
+    }
+    for (i = 15; i >= shift; i--) {
+        d->B(i) = d->B(i - shift);
+    }
+    for (i = 0; i < shift; i++) {
+        d->B(i) = 0;
+    }
+}
+#endif
+
+#define SSE_HELPER_B(name, F)                                   \
+    void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)   \
+    {                                                           \
+        d->B(0) = F(d->B(0), s->B(0));                          \
+        d->B(1) = F(d->B(1), s->B(1));                          \
+        d->B(2) = F(d->B(2), s->B(2));                          \
+        d->B(3) = F(d->B(3), s->B(3));                          \
+        d->B(4) = F(d->B(4), s->B(4));                          \
+        d->B(5) = F(d->B(5), s->B(5));                          \
+        d->B(6) = F(d->B(6), s->B(6));                          \
+        d->B(7) = F(d->B(7), s->B(7));                          \
+        XMM_ONLY(                                               \
+                 d->B(8) = F(d->B(8), s->B(8));                 \
+                 d->B(9) = F(d->B(9), s->B(9));                 \
+                 d->B(10) = F(d->B(10), s->B(10));              \
+                 d->B(11) = F(d->B(11), s->B(11));              \
+                 d->B(12) = F(d->B(12), s->B(12));              \
+                 d->B(13) = F(d->B(13), s->B(13));              \
+                 d->B(14) = F(d->B(14), s->B(14));              \
+                 d->B(15) = F(d->B(15), s->B(15));              \
+                                                        )       \
+            }
+
+#define SSE_HELPER_W(name, F)                                   \
+    void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)   \
+    {                                                           \
+        d->W(0) = F(d->W(0), s->W(0));                          \
+        d->W(1) = F(d->W(1), s->W(1));                          \
+        d->W(2) = F(d->W(2), s->W(2));                          \
+        d->W(3) = F(d->W(3), s->W(3));                          \
+        XMM_ONLY(                                               \
+                 d->W(4) = F(d->W(4), s->W(4));                 \
+                 d->W(5) = F(d->W(5), s->W(5));                 \
+                 d->W(6) = F(d->W(6), s->W(6));                 \
+                 d->W(7) = F(d->W(7), s->W(7));                 \
+                                                        )       \
+            }
+
+#define SSE_HELPER_L(name, F)                                   \
+    void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)   \
+    {                                                           \
+        d->L(0) = F(d->L(0), s->L(0));                          \
+        d->L(1) = F(d->L(1), s->L(1));                          \
+        XMM_ONLY(                                               \
+                 d->L(2) = F(d->L(2), s->L(2));                 \
+                 d->L(3) = F(d->L(3), s->L(3));                 \
+                                                        )       \
+            }
+
+#define SSE_HELPER_Q(name, F)                                   \
+    void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)   \
+    {                                                           \
+        d->Q(0) = F(d->Q(0), s->Q(0));                          \
+        XMM_ONLY(                                               \
+                 d->Q(1) = F(d->Q(1), s->Q(1));                 \
+                                                        )       \
+            }
+
+#if SHIFT == 0
+static inline int satub(int x)
+{
+    if (x < 0) {
+        return 0;
+    } else if (x > 255) {
+        return 255;
+    } else {
+        return x;
+    }
+}
+
+static inline int satuw(int x)
+{
+    if (x < 0) {
+        return 0;
+    } else if (x > 65535) {
+        return 65535;
+    } else {
+        return x;
+    }
+}
+
+static inline int satsb(int x)
+{
+    if (x < -128) {
+        return -128;
+    } else if (x > 127) {
+        return 127;
+    } else {
+        return x;
+    }
+}
+
+static inline int satsw(int x)
+{
+    if (x < -32768) {
+        return -32768;
+    } else if (x > 32767) {
+        return 32767;
+    } else {
+        return x;
+    }
+}
+
+#define FADD(a, b) ((a) + (b))
+#define FADDUB(a, b) satub((a) + (b))
+#define FADDUW(a, b) satuw((a) + (b))
+#define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b))
+#define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b))
+
+#define FSUB(a, b) ((a) - (b))
+#define FSUBUB(a, b) satub((a) - (b))
+#define FSUBUW(a, b) satuw((a) - (b))
+#define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b))
+#define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b))
+#define FMINUB(a, b) ((a) < (b)) ? (a) : (b)
+#define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b)
+#define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
+#define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
+
+#define FAND(a, b) ((a) & (b))
+#define FANDN(a, b) ((~(a)) & (b))
+#define FOR(a, b) ((a) | (b))
+#define FXOR(a, b) ((a) ^ (b))
+
+#define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0)
+#define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0)
+#define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0)
+#define FCMPEQ(a, b) ((a) == (b) ? -1 : 0)
+
+#define FMULLW(a, b) ((a) * (b))
+#define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16)
+#define FMULHUW(a, b) ((a) * (b) >> 16)
+#define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16)
+
+#define FAVG(a, b) (((a) + (b) + 1) >> 1)
+#endif
+
+SSE_HELPER_B(helper_paddb, FADD)
+SSE_HELPER_W(helper_paddw, FADD)
+SSE_HELPER_L(helper_paddl, FADD)
+SSE_HELPER_Q(helper_paddq, FADD)
+
+SSE_HELPER_B(helper_psubb, FSUB)
+SSE_HELPER_W(helper_psubw, FSUB)
+SSE_HELPER_L(helper_psubl, FSUB)
+SSE_HELPER_Q(helper_psubq, FSUB)
+
+SSE_HELPER_B(helper_paddusb, FADDUB)
+SSE_HELPER_B(helper_paddsb, FADDSB)
+SSE_HELPER_B(helper_psubusb, FSUBUB)
+SSE_HELPER_B(helper_psubsb, FSUBSB)
+
+SSE_HELPER_W(helper_paddusw, FADDUW)
+SSE_HELPER_W(helper_paddsw, FADDSW)
+SSE_HELPER_W(helper_psubusw, FSUBUW)
+SSE_HELPER_W(helper_psubsw, FSUBSW)
+
+SSE_HELPER_B(helper_pminub, FMINUB)
+SSE_HELPER_B(helper_pmaxub, FMAXUB)
+
+SSE_HELPER_W(helper_pminsw, FMINSW)
+SSE_HELPER_W(helper_pmaxsw, FMAXSW)
+
+SSE_HELPER_Q(helper_pand, FAND)
+SSE_HELPER_Q(helper_pandn, FANDN)
+SSE_HELPER_Q(helper_por, FOR)
+SSE_HELPER_Q(helper_pxor, FXOR)
+
+SSE_HELPER_B(helper_pcmpgtb, FCMPGTB)
+SSE_HELPER_W(helper_pcmpgtw, FCMPGTW)
+SSE_HELPER_L(helper_pcmpgtl, FCMPGTL)
+
+SSE_HELPER_B(helper_pcmpeqb, FCMPEQ)
+SSE_HELPER_W(helper_pcmpeqw, FCMPEQ)
+SSE_HELPER_L(helper_pcmpeql, FCMPEQ)
+
+SSE_HELPER_W(helper_pmullw, FMULLW)
+#if SHIFT == 0
+SSE_HELPER_W(helper_pmulhrw, FMULHRW)
+#endif
+SSE_HELPER_W(helper_pmulhuw, FMULHUW)
+SSE_HELPER_W(helper_pmulhw, FMULHW)
+
+SSE_HELPER_B(helper_pavgb, FAVG)
+SSE_HELPER_W(helper_pavgw, FAVG)
+
+void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0);
+#if SHIFT == 1
+    d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2);
+#endif
+}
+
+void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int i;
+
+    for (i = 0; i < (2 << SHIFT); i++) {
+        d->L(i) = (int16_t)s->W(2 * i) * (int16_t)d->W(2 * i) +
+            (int16_t)s->W(2 * i + 1) * (int16_t)d->W(2 * i + 1);
+    }
+}
+
+#if SHIFT == 0
+static inline int abs1(int a)
+{
+    if (a < 0) {
+        return -a;
+    } else {
+        return a;
+    }
+}
+#endif
+void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    unsigned int val;
+
+    val = 0;
+    val += abs1(d->B(0) - s->B(0));
+    val += abs1(d->B(1) - s->B(1));
+    val += abs1(d->B(2) - s->B(2));
+    val += abs1(d->B(3) - s->B(3));
+    val += abs1(d->B(4) - s->B(4));
+    val += abs1(d->B(5) - s->B(5));
+    val += abs1(d->B(6) - s->B(6));
+    val += abs1(d->B(7) - s->B(7));
+    d->Q(0) = val;
+#if SHIFT == 1
+    val = 0;
+    val += abs1(d->B(8) - s->B(8));
+    val += abs1(d->B(9) - s->B(9));
+    val += abs1(d->B(10) - s->B(10));
+    val += abs1(d->B(11) - s->B(11));
+    val += abs1(d->B(12) - s->B(12));
+    val += abs1(d->B(13) - s->B(13));
+    val += abs1(d->B(14) - s->B(14));
+    val += abs1(d->B(15) - s->B(15));
+    d->Q(1) = val;
+#endif
+}
+
+void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                  target_ulong a0)
+{
+    int i;
+
+    for (i = 0; i < (8 << SHIFT); i++) {
+        if (s->B(i) & 0x80) {
+            cpu_stb_data_ra(env, a0 + i, d->B(i), GETPC());
+        }
+    }
+}
+
+void glue(helper_movl_mm_T0, SUFFIX)(Reg *d, uint32_t val)
+{
+    d->L(0) = val;
+    d->L(1) = 0;
+#if SHIFT == 1
+    d->Q(1) = 0;
+#endif
+}
+
+#ifdef TARGET_X86_64
+void glue(helper_movq_mm_T0, SUFFIX)(Reg *d, uint64_t val)
+{
+    d->Q(0) = val;
+#if SHIFT == 1
+    d->Q(1) = 0;
+#endif
+}
+#endif
+
+#if SHIFT == 0
+void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+    Reg r;
+
+    r.W(0) = s->W(order & 3);
+    r.W(1) = s->W((order >> 2) & 3);
+    r.W(2) = s->W((order >> 4) & 3);
+    r.W(3) = s->W((order >> 6) & 3);
+    *d = r;
+}
+#else
+void helper_shufps(Reg *d, Reg *s, int order)
+{
+    Reg r;
+
+    r.L(0) = d->L(order & 3);
+    r.L(1) = d->L((order >> 2) & 3);
+    r.L(2) = s->L((order >> 4) & 3);
+    r.L(3) = s->L((order >> 6) & 3);
+    *d = r;
+}
+
+void helper_shufpd(Reg *d, Reg *s, int order)
+{
+    Reg r;
+
+    r.Q(0) = d->Q(order & 1);
+    r.Q(1) = s->Q((order >> 1) & 1);
+    *d = r;
+}
+
+void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order)
+{
+    Reg r;
+
+    r.L(0) = s->L(order & 3);
+    r.L(1) = s->L((order >> 2) & 3);
+    r.L(2) = s->L((order >> 4) & 3);
+    r.L(3) = s->L((order >> 6) & 3);
+    *d = r;
+}
+
+void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+    Reg r;
+
+    r.W(0) = s->W(order & 3);
+    r.W(1) = s->W((order >> 2) & 3);
+    r.W(2) = s->W((order >> 4) & 3);
+    r.W(3) = s->W((order >> 6) & 3);
+    r.Q(1) = s->Q(1);
+    *d = r;
+}
+
+void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+    Reg r;
+
+    r.Q(0) = s->Q(0);
+    r.W(4) = s->W(4 + (order & 3));
+    r.W(5) = s->W(4 + ((order >> 2) & 3));
+    r.W(6) = s->W(4 + ((order >> 4) & 3));
+    r.W(7) = s->W(4 + ((order >> 6) & 3));
+    *d = r;
+}
+#endif
+
+#if SHIFT == 1
+/* FPU ops */
+/* XXX: not accurate */
+
+#define SSE_HELPER_S(name, F)                                           \
+    void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s)        \
+    {                                                                   \
+        d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0));                  \
+        d->ZMM_S(1) = F(32, d->ZMM_S(1), s->ZMM_S(1));                  \
+        d->ZMM_S(2) = F(32, d->ZMM_S(2), s->ZMM_S(2));                  \
+        d->ZMM_S(3) = F(32, d->ZMM_S(3), s->ZMM_S(3));                  \
+    }                                                                   \
+                                                                        \
+    void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s)        \
+    {                                                                   \
+        d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0));                  \
+    }                                                                   \
+                                                                        \
+    void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s)        \
+    {                                                                   \
+        d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0));                  \
+        d->ZMM_D(1) = F(64, d->ZMM_D(1), s->ZMM_D(1));                  \
+    }                                                                   \
+                                                                        \
+    void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s)        \
+    {                                                                   \
+        d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0));                  \
+    }
+
+#define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
+#define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status)
+#define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status)
+#define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status)
+#define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status)
+
+/* Note that the choice of comparison op here is important to get the
+ * special cases right: for min and max Intel specifies that (-0,0),
+ * (NaN, anything) and (anything, NaN) return the second argument.
+ */
+#define FPU_MIN(size, a, b)                                     \
+    (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b))
+#define FPU_MAX(size, a, b)                                     \
+    (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b))
+
+SSE_HELPER_S(add, FPU_ADD)
+SSE_HELPER_S(sub, FPU_SUB)
+SSE_HELPER_S(mul, FPU_MUL)
+SSE_HELPER_S(div, FPU_DIV)
+SSE_HELPER_S(min, FPU_MIN)
+SSE_HELPER_S(max, FPU_MAX)
+SSE_HELPER_S(sqrt, FPU_SQRT)
+
+
+/* float to float conversions */
+void helper_cvtps2pd(CPUX86State *env, Reg *d, Reg *s)
+{
+    float32 s0, s1;
+
+    s0 = s->ZMM_S(0);
+    s1 = s->ZMM_S(1);
+    d->ZMM_D(0) = float32_to_float64(s0, &env->sse_status);
+    d->ZMM_D(1) = float32_to_float64(s1, &env->sse_status);
+}
+
+void helper_cvtpd2ps(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status);
+    d->ZMM_S(1) = float64_to_float32(s->ZMM_D(1), &env->sse_status);
+    d->Q(1) = 0;
+}
+
+void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status);
+}
+
+void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status);
+}
+
+/* integer to float */
+void helper_cvtdq2ps(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->ZMM_S(0) = int32_to_float32(s->ZMM_L(0), &env->sse_status);
+    d->ZMM_S(1) = int32_to_float32(s->ZMM_L(1), &env->sse_status);
+    d->ZMM_S(2) = int32_to_float32(s->ZMM_L(2), &env->sse_status);
+    d->ZMM_S(3) = int32_to_float32(s->ZMM_L(3), &env->sse_status);
+}
+
+void helper_cvtdq2pd(CPUX86State *env, Reg *d, Reg *s)
+{
+    int32_t l0, l1;
+
+    l0 = (int32_t)s->ZMM_L(0);
+    l1 = (int32_t)s->ZMM_L(1);
+    d->ZMM_D(0) = int32_to_float64(l0, &env->sse_status);
+    d->ZMM_D(1) = int32_to_float64(l1, &env->sse_status);
+}
+
+void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s)
+{
+    d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status);
+    d->ZMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status);
+}
+
+void helper_cvtpi2pd(CPUX86State *env, ZMMReg *d, MMXReg *s)
+{
+    d->ZMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status);
+    d->ZMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status);
+}
+
+void helper_cvtsi2ss(CPUX86State *env, ZMMReg *d, uint32_t val)
+{
+    d->ZMM_S(0) = int32_to_float32(val, &env->sse_status);
+}
+
+void helper_cvtsi2sd(CPUX86State *env, ZMMReg *d, uint32_t val)
+{
+    d->ZMM_D(0) = int32_to_float64(val, &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+void helper_cvtsq2ss(CPUX86State *env, ZMMReg *d, uint64_t val)
+{
+    d->ZMM_S(0) = int64_to_float32(val, &env->sse_status);
+}
+
+void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val)
+{
+    d->ZMM_D(0) = int64_to_float64(val, &env->sse_status);
+}
+#endif
+
+/* float to integer */
+void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status);
+    d->ZMM_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status);
+    d->ZMM_L(2) = float32_to_int32(s->ZMM_S(2), &env->sse_status);
+    d->ZMM_L(3) = float32_to_int32(s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status);
+    d->ZMM_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status);
+    d->ZMM_Q(1) = 0;
+}
+
+void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+    d->MMX_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status);
+    d->MMX_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status);
+}
+
+void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+    d->MMX_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status);
+    d->MMX_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status);
+}
+
+int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s)
+{
+    return float32_to_int32(s->ZMM_S(0), &env->sse_status);
+}
+
+int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s)
+{
+    return float64_to_int32(s->ZMM_D(0), &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s)
+{
+    return float32_to_int64(s->ZMM_S(0), &env->sse_status);
+}
+
+int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s)
+{
+    return float64_to_int64(s->ZMM_D(0), &env->sse_status);
+}
+#endif
+
+/* float to integer truncated */
+void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+    d->ZMM_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
+    d->ZMM_L(2) = float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status);
+    d->ZMM_L(3) = float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+    d->ZMM_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
+    d->ZMM_Q(1) = 0;
+}
+
+void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+    d->MMX_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+    d->MMX_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
+}
+
+void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+    d->MMX_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+    d->MMX_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
+}
+
+int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s)
+{
+    return float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+}
+
+int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s)
+{
+    return float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s)
+{
+    return float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status);
+}
+
+int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
+{
+    return float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status);
+}
+#endif
+
+void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_S(0) = float32_div(float32_one,
+                              float32_sqrt(s->ZMM_S(0), &env->sse_status),
+                              &env->sse_status);
+    d->ZMM_S(1) = float32_div(float32_one,
+                              float32_sqrt(s->ZMM_S(1), &env->sse_status),
+                              &env->sse_status);
+    d->ZMM_S(2) = float32_div(float32_one,
+                              float32_sqrt(s->ZMM_S(2), &env->sse_status),
+                              &env->sse_status);
+    d->ZMM_S(3) = float32_div(float32_one,
+                              float32_sqrt(s->ZMM_S(3), &env->sse_status),
+                              &env->sse_status);
+}
+
+void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_S(0) = float32_div(float32_one,
+                              float32_sqrt(s->ZMM_S(0), &env->sse_status),
+                              &env->sse_status);
+}
+
+void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
+    d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status);
+    d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status);
+    d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
+}
+
+static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
+{
+    uint64_t mask;
+
+    if (len == 0) {
+        mask = ~0LL;
+    } else {
+        mask = (1ULL << len) - 1;
+    }
+    return (src >> shift) & mask;
+}
+
+void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0));
+}
+
+void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length)
+{
+    d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length);
+}
+
+static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
+{
+    uint64_t mask;
+
+    if (len == 0) {
+        mask = ~0ULL;
+    } else {
+        mask = (1ULL << len) - 1;
+    }
+    return (src & ~(mask << shift)) | ((src & mask) << shift);
+}
+
+void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8));
+}
+
+void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length)
+{
+    d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), index, length);
+}
+
+void helper_haddps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    ZMMReg r;
+
+    r.ZMM_S(0) = float32_add(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status);
+    r.ZMM_S(1) = float32_add(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status);
+    r.ZMM_S(2) = float32_add(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status);
+    r.ZMM_S(3) = float32_add(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status);
+    *d = r;
+}
+
+void helper_haddpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    ZMMReg r;
+
+    r.ZMM_D(0) = float64_add(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status);
+    r.ZMM_D(1) = float64_add(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status);
+    *d = r;
+}
+
+void helper_hsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    ZMMReg r;
+
+    r.ZMM_S(0) = float32_sub(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status);
+    r.ZMM_S(1) = float32_sub(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status);
+    r.ZMM_S(2) = float32_sub(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status);
+    r.ZMM_S(3) = float32_sub(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status);
+    *d = r;
+}
+
+void helper_hsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    ZMMReg r;
+
+    r.ZMM_D(0) = float64_sub(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status);
+    r.ZMM_D(1) = float64_sub(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status);
+    *d = r;
+}
+
+void helper_addsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_S(0) = float32_sub(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status);
+    d->ZMM_S(1) = float32_add(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status);
+    d->ZMM_S(2) = float32_sub(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status);
+    d->ZMM_S(3) = float32_add(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_addsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+    d->ZMM_D(0) = float64_sub(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
+    d->ZMM_D(1) = float64_add(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
+}
+
+/* XXX: unordered */
+#define SSE_HELPER_CMP(name, F)                                         \
+    void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s)        \
+    {                                                                   \
+        d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0));                  \
+        d->ZMM_L(1) = F(32, d->ZMM_S(1), s->ZMM_S(1));                  \
+        d->ZMM_L(2) = F(32, d->ZMM_S(2), s->ZMM_S(2));                  \
+        d->ZMM_L(3) = F(32, d->ZMM_S(3), s->ZMM_S(3));                  \
+    }                                                                   \
+                                                                        \
+    void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s)        \
+    {                                                                   \
+        d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0));                  \
+    }                                                                   \
+                                                                        \
+    void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s)        \
+    {                                                                   \
+        d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0));                  \
+        d->ZMM_Q(1) = F(64, d->ZMM_D(1), s->ZMM_D(1));                  \
+    }                                                                   \
+                                                                        \
+    void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s)        \
+    {                                                                   \
+        d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0));                  \
+    }
+
+#define FPU_CMPEQ(size, a, b)                                           \
+    (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPLT(size, a, b)                                           \
+    (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPLE(size, a, b)                                           \
+    (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPUNORD(size, a, b)                                        \
+    (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPNEQ(size, a, b)                                          \
+    (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPNLT(size, a, b)                                          \
+    (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPNLE(size, a, b)                                          \
+    (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPORD(size, a, b)                                          \
+    (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1)
+
+SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
+SSE_HELPER_CMP(cmplt, FPU_CMPLT)
+SSE_HELPER_CMP(cmple, FPU_CMPLE)
+SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
+SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
+SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
+SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
+SSE_HELPER_CMP(cmpord, FPU_CMPORD)
+
+static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
+
+void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s)
+{
+    int ret;
+    float32 s0, s1;
+
+    s0 = d->ZMM_S(0);
+    s1 = s->ZMM_S(0);
+    ret = float32_compare_quiet(s0, s1, &env->sse_status);
+    CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_comiss(CPUX86State *env, Reg *d, Reg *s)
+{
+    int ret;
+    float32 s0, s1;
+
+    s0 = d->ZMM_S(0);
+    s1 = s->ZMM_S(0);
+    ret = float32_compare(s0, s1, &env->sse_status);
+    CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s)
+{
+    int ret;
+    float64 d0, d1;
+
+    d0 = d->ZMM_D(0);
+    d1 = s->ZMM_D(0);
+    ret = float64_compare_quiet(d0, d1, &env->sse_status);
+    CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_comisd(CPUX86State *env, Reg *d, Reg *s)
+{
+    int ret;
+    float64 d0, d1;
+
+    d0 = d->ZMM_D(0);
+    d1 = s->ZMM_D(0);
+    ret = float64_compare(d0, d1, &env->sse_status);
+    CC_SRC = comis_eflags[ret + 1];
+}
+
+uint32_t helper_movmskps(CPUX86State *env, Reg *s)
+{
+    int b0, b1, b2, b3;
+
+    b0 = s->ZMM_L(0) >> 31;
+    b1 = s->ZMM_L(1) >> 31;
+    b2 = s->ZMM_L(2) >> 31;
+    b3 = s->ZMM_L(3) >> 31;
+    return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3);
+}
+
+uint32_t helper_movmskpd(CPUX86State *env, Reg *s)
+{
+    int b0, b1;
+
+    b0 = s->ZMM_L(1) >> 31;
+    b1 = s->ZMM_L(3) >> 31;
+    return b0 | (b1 << 1);
+}
+
+#endif
+
+uint32_t glue(helper_pmovmskb, SUFFIX)(CPUX86State *env, Reg *s)
+{
+    uint32_t val;
+
+    val = 0;
+    val |= (s->B(0) >> 7);
+    val |= (s->B(1) >> 6) & 0x02;
+    val |= (s->B(2) >> 5) & 0x04;
+    val |= (s->B(3) >> 4) & 0x08;
+    val |= (s->B(4) >> 3) & 0x10;
+    val |= (s->B(5) >> 2) & 0x20;
+    val |= (s->B(6) >> 1) & 0x40;
+    val |= (s->B(7)) & 0x80;
+#if SHIFT == 1
+    val |= (s->B(8) << 1) & 0x0100;
+    val |= (s->B(9) << 2) & 0x0200;
+    val |= (s->B(10) << 3) & 0x0400;
+    val |= (s->B(11) << 4) & 0x0800;
+    val |= (s->B(12) << 5) & 0x1000;
+    val |= (s->B(13) << 6) & 0x2000;
+    val |= (s->B(14) << 7) & 0x4000;
+    val |= (s->B(15) << 8) & 0x8000;
+#endif
+    return val;
+}
+
+void glue(helper_packsswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    Reg r;
+
+    r.B(0) = satsb((int16_t)d->W(0));
+    r.B(1) = satsb((int16_t)d->W(1));
+    r.B(2) = satsb((int16_t)d->W(2));
+    r.B(3) = satsb((int16_t)d->W(3));
+#if SHIFT == 1
+    r.B(4) = satsb((int16_t)d->W(4));
+    r.B(5) = satsb((int16_t)d->W(5));
+    r.B(6) = satsb((int16_t)d->W(6));
+    r.B(7) = satsb((int16_t)d->W(7));
+#endif
+    r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0));
+    r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1));
+    r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2));
+    r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3));
+#if SHIFT == 1
+    r.B(12) = satsb((int16_t)s->W(4));
+    r.B(13) = satsb((int16_t)s->W(5));
+    r.B(14) = satsb((int16_t)s->W(6));
+    r.B(15) = satsb((int16_t)s->W(7));
+#endif
+    *d = r;
+}
+
+void glue(helper_packuswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    Reg r;
+
+    r.B(0) = satub((int16_t)d->W(0));
+    r.B(1) = satub((int16_t)d->W(1));
+    r.B(2) = satub((int16_t)d->W(2));
+    r.B(3) = satub((int16_t)d->W(3));
+#if SHIFT == 1
+    r.B(4) = satub((int16_t)d->W(4));
+    r.B(5) = satub((int16_t)d->W(5));
+    r.B(6) = satub((int16_t)d->W(6));
+    r.B(7) = satub((int16_t)d->W(7));
+#endif
+    r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0));
+    r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1));
+    r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2));
+    r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3));
+#if SHIFT == 1
+    r.B(12) = satub((int16_t)s->W(4));
+    r.B(13) = satub((int16_t)s->W(5));
+    r.B(14) = satub((int16_t)s->W(6));
+    r.B(15) = satub((int16_t)s->W(7));
+#endif
+    *d = r;
+}
+
+void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    Reg r;
+
+    r.W(0) = satsw(d->L(0));
+    r.W(1) = satsw(d->L(1));
+#if SHIFT == 1
+    r.W(2) = satsw(d->L(2));
+    r.W(3) = satsw(d->L(3));
+#endif
+    r.W((2 << SHIFT) + 0) = satsw(s->L(0));
+    r.W((2 << SHIFT) + 1) = satsw(s->L(1));
+#if SHIFT == 1
+    r.W(6) = satsw(s->L(2));
+    r.W(7) = satsw(s->L(3));
+#endif
+    *d = r;
+}
+
+#define UNPCK_OP(base_name, base)                                       \
+                                                                        \
+    void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\
+                                                        Reg *d, Reg *s) \
+    {                                                                   \
+        Reg r;                                                          \
+                                                                        \
+        r.B(0) = d->B((base << (SHIFT + 2)) + 0);                       \
+        r.B(1) = s->B((base << (SHIFT + 2)) + 0);                       \
+        r.B(2) = d->B((base << (SHIFT + 2)) + 1);                       \
+        r.B(3) = s->B((base << (SHIFT + 2)) + 1);                       \
+        r.B(4) = d->B((base << (SHIFT + 2)) + 2);                       \
+        r.B(5) = s->B((base << (SHIFT + 2)) + 2);                       \
+        r.B(6) = d->B((base << (SHIFT + 2)) + 3);                       \
+        r.B(7) = s->B((base << (SHIFT + 2)) + 3);                       \
+        XMM_ONLY(                                                       \
+                 r.B(8) = d->B((base << (SHIFT + 2)) + 4);              \
+                 r.B(9) = s->B((base << (SHIFT + 2)) + 4);              \
+                 r.B(10) = d->B((base << (SHIFT + 2)) + 5);             \
+                 r.B(11) = s->B((base << (SHIFT + 2)) + 5);             \
+                 r.B(12) = d->B((base << (SHIFT + 2)) + 6);             \
+                 r.B(13) = s->B((base << (SHIFT + 2)) + 6);             \
+                 r.B(14) = d->B((base << (SHIFT + 2)) + 7);             \
+                 r.B(15) = s->B((base << (SHIFT + 2)) + 7);             \
+                                                                      ) \
+            *d = r;                                                     \
+    }                                                                   \
+                                                                        \
+    void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\
+                                                        Reg *d, Reg *s) \
+    {                                                                   \
+        Reg r;                                                          \
+                                                                        \
+        r.W(0) = d->W((base << (SHIFT + 1)) + 0);                       \
+        r.W(1) = s->W((base << (SHIFT + 1)) + 0);                       \
+        r.W(2) = d->W((base << (SHIFT + 1)) + 1);                       \
+        r.W(3) = s->W((base << (SHIFT + 1)) + 1);                       \
+        XMM_ONLY(                                                       \
+                 r.W(4) = d->W((base << (SHIFT + 1)) + 2);              \
+                 r.W(5) = s->W((base << (SHIFT + 1)) + 2);              \
+                 r.W(6) = d->W((base << (SHIFT + 1)) + 3);              \
+                 r.W(7) = s->W((base << (SHIFT + 1)) + 3);              \
+                                                                      ) \
+            *d = r;                                                     \
+    }                                                                   \
+                                                                        \
+    void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\
+                                                        Reg *d, Reg *s) \
+    {                                                                   \
+        Reg r;                                                          \
+                                                                        \
+        r.L(0) = d->L((base << SHIFT) + 0);                             \
+        r.L(1) = s->L((base << SHIFT) + 0);                             \
+        XMM_ONLY(                                                       \
+                 r.L(2) = d->L((base << SHIFT) + 1);                    \
+                 r.L(3) = s->L((base << SHIFT) + 1);                    \
+                                                                      ) \
+            *d = r;                                                     \
+    }                                                                   \
+                                                                        \
+    XMM_ONLY(                                                           \
+             void glue(helper_punpck ## base_name ## qdq, SUFFIX)(CPUX86State \
+                                                                  *env, \
+                                                                  Reg *d, \
+                                                                  Reg *s) \
+             {                                                          \
+                 Reg r;                                                 \
+                                                                        \
+                 r.Q(0) = d->Q(base);                                   \
+                 r.Q(1) = s->Q(base);                                   \
+                 *d = r;                                                \
+             }                                                          \
+                                                                        )
+
+UNPCK_OP(l, 0)
+UNPCK_OP(h, 1)
+
+/* 3DNow! float ops */
+#if SHIFT == 0
+void helper_pi2fd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status);
+    d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status);
+}
+
+void helper_pi2fw(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status);
+    d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status);
+}
+
+void helper_pf2id(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status);
+    d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pf2iw(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0),
+                                                       &env->mmx_status));
+    d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1),
+                                                       &env->mmx_status));
+}
+
+void helper_pfacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    MMXReg r;
+
+    r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+    r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+    *d = r;
+}
+
+void helper_pfadd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+    d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfcmpeq(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0),
+                                   &env->mmx_status) ? -1 : 0;
+    d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1),
+                                   &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfcmpge(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0),
+                             &env->mmx_status) ? -1 : 0;
+    d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1),
+                             &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfcmpgt(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0),
+                             &env->mmx_status) ? -1 : 0;
+    d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1),
+                             &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfmax(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) {
+        d->MMX_S(0) = s->MMX_S(0);
+    }
+    if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) {
+        d->MMX_S(1) = s->MMX_S(1);
+    }
+}
+
+void helper_pfmin(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) {
+        d->MMX_S(0) = s->MMX_S(0);
+    }
+    if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) {
+        d->MMX_S(1) = s->MMX_S(1);
+    }
+}
+
+void helper_pfmul(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+    d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfnacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    MMXReg r;
+
+    r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+    r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+    *d = r;
+}
+
+void helper_pfpnacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    MMXReg r;
+
+    r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+    r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+    *d = r;
+}
+
+void helper_pfrcp(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_S(0) = float32_div(float32_one, s->MMX_S(0), &env->mmx_status);
+    d->MMX_S(1) = d->MMX_S(0);
+}
+
+void helper_pfrsqrt(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff;
+    d->MMX_S(1) = float32_div(float32_one,
+                              float32_sqrt(d->MMX_S(1), &env->mmx_status),
+                              &env->mmx_status);
+    d->MMX_L(1) |= s->MMX_L(0) & 0x80000000;
+    d->MMX_L(0) = d->MMX_L(1);
+}
+
+void helper_pfsub(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+    d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfsubr(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status);
+    d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pswapd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+    MMXReg r;
+
+    r.MMX_L(0) = s->MMX_L(1);
+    r.MMX_L(1) = s->MMX_L(0);
+    *d = r;
+}
+#endif
+
+/* SSSE3 op helpers */
+void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int i;
+    Reg r;
+
+    for (i = 0; i < (8 << SHIFT); i++) {
+        r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1)));
+    }
+
+    *d = r;
+}
+
+void glue(helper_phaddw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1);
+    d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3);
+    XMM_ONLY(d->W(2) = (int16_t)d->W(4) + (int16_t)d->W(5));
+    XMM_ONLY(d->W(3) = (int16_t)d->W(6) + (int16_t)d->W(7));
+    d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1);
+    d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3);
+    XMM_ONLY(d->W(6) = (int16_t)s->W(4) + (int16_t)s->W(5));
+    XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7));
+}
+
+void glue(helper_phaddd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1);
+    XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3));
+    d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1);
+    XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3));
+}
+
+void glue(helper_phaddsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1));
+    d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3));
+    XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5)));
+    XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7)));
+    d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1));
+    d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3));
+    XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5)));
+    XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7)));
+}
+
+void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->W(0) = satsw((int8_t)s->B(0) * (uint8_t)d->B(0) +
+                    (int8_t)s->B(1) * (uint8_t)d->B(1));
+    d->W(1) = satsw((int8_t)s->B(2) * (uint8_t)d->B(2) +
+                    (int8_t)s->B(3) * (uint8_t)d->B(3));
+    d->W(2) = satsw((int8_t)s->B(4) * (uint8_t)d->B(4) +
+                    (int8_t)s->B(5) * (uint8_t)d->B(5));
+    d->W(3) = satsw((int8_t)s->B(6) * (uint8_t)d->B(6) +
+                    (int8_t)s->B(7) * (uint8_t)d->B(7));
+#if SHIFT == 1
+    d->W(4) = satsw((int8_t)s->B(8) * (uint8_t)d->B(8) +
+                    (int8_t)s->B(9) * (uint8_t)d->B(9));
+    d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) +
+                    (int8_t)s->B(11) * (uint8_t)d->B(11));
+    d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) +
+                    (int8_t)s->B(13) * (uint8_t)d->B(13));
+    d->W(7) = satsw((int8_t)s->B(14) * (uint8_t)d->B(14) +
+                    (int8_t)s->B(15) * (uint8_t)d->B(15));
+#endif
+}
+
+void glue(helper_phsubw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1);
+    d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3);
+    XMM_ONLY(d->W(2) = (int16_t)d->W(4) - (int16_t)d->W(5));
+    XMM_ONLY(d->W(3) = (int16_t)d->W(6) - (int16_t)d->W(7));
+    d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) - (int16_t)s->W(1);
+    d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) - (int16_t)s->W(3);
+    XMM_ONLY(d->W(6) = (int16_t)s->W(4) - (int16_t)s->W(5));
+    XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7));
+}
+
+void glue(helper_phsubd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1);
+    XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3));
+    d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) - (int32_t)s->L(1);
+    XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3));
+}
+
+void glue(helper_phsubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1));
+    d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3));
+    XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) - (int16_t)d->W(5)));
+    XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) - (int16_t)d->W(7)));
+    d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) - (int16_t)s->W(1));
+    d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) - (int16_t)s->W(3));
+    XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) - (int16_t)s->W(5)));
+    XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7)));
+}
+
+#define FABSB(_, x) (x > INT8_MAX  ? -(int8_t)x : x)
+#define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x)
+#define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x)
+SSE_HELPER_B(helper_pabsb, FABSB)
+SSE_HELPER_W(helper_pabsw, FABSW)
+SSE_HELPER_L(helper_pabsd, FABSL)
+
+#define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15)
+SSE_HELPER_W(helper_pmulhrsw, FMULHRSW)
+
+#define FSIGNB(d, s) (s <= INT8_MAX  ? s ? d : 0 : -(int8_t)d)
+#define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d)
+#define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d)
+SSE_HELPER_B(helper_psignb, FSIGNB)
+SSE_HELPER_W(helper_psignw, FSIGNW)
+SSE_HELPER_L(helper_psignd, FSIGNL)
+
+void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                  int32_t shift)
+{
+    Reg r;
+
+    /* XXX could be checked during translation */
+    if (shift >= (16 << SHIFT)) {
+        r.Q(0) = 0;
+        XMM_ONLY(r.Q(1) = 0);
+    } else {
+        shift <<= 3;
+#define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
+#if SHIFT == 0
+        r.Q(0) = SHR(s->Q(0), shift - 0) |
+            SHR(d->Q(0), shift -  64);
+#else
+        r.Q(0) = SHR(s->Q(0), shift - 0) |
+            SHR(s->Q(1), shift -  64) |
+            SHR(d->Q(0), shift - 128) |
+            SHR(d->Q(1), shift - 192);
+        r.Q(1) = SHR(s->Q(0), shift + 64) |
+            SHR(s->Q(1), shift -   0) |
+            SHR(d->Q(0), shift -  64) |
+            SHR(d->Q(1), shift - 128);
+#endif
+#undef SHR
+    }
+
+    *d = r;
+}
+
+#define XMM0 (env->xmm_regs[0])
+
+#if SHIFT == 1
+#define SSE_HELPER_V(name, elem, num, F)                                \
+    void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)           \
+    {                                                                   \
+        d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0));           \
+        d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1));           \
+        if (num > 2) {                                                  \
+            d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2));       \
+            d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3));       \
+            if (num > 4) {                                              \
+                d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4));   \
+                d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5));   \
+                d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6));   \
+                d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7));   \
+                if (num > 8) {                                          \
+                    d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \
+                    d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \
+                    d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \
+                    d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \
+                    d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \
+                    d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \
+                    d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \
+                    d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \
+                }                                                       \
+            }                                                           \
+        }                                                               \
+    }
+
+#define SSE_HELPER_I(name, elem, num, F)                                \
+    void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t imm) \
+    {                                                                   \
+        d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1));       \
+        d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1));       \
+        if (num > 2) {                                                  \
+            d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1));   \
+            d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1));   \
+            if (num > 4) {                                              \
+                d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \
+                d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \
+                d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \
+                d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \
+                if (num > 8) {                                          \
+                    d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \
+                    d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \
+                    d->elem(10) = F(d->elem(10), s->elem(10),           \
+                                    ((imm >> 10) & 1));                 \
+                    d->elem(11) = F(d->elem(11), s->elem(11),           \
+                                    ((imm >> 11) & 1));                 \
+                    d->elem(12) = F(d->elem(12), s->elem(12),           \
+                                    ((imm >> 12) & 1));                 \
+                    d->elem(13) = F(d->elem(13), s->elem(13),           \
+                                    ((imm >> 13) & 1));                 \
+                    d->elem(14) = F(d->elem(14), s->elem(14),           \
+                                    ((imm >> 14) & 1));                 \
+                    d->elem(15) = F(d->elem(15), s->elem(15),           \
+                                    ((imm >> 15) & 1));                 \
+                }                                                       \
+            }                                                           \
+        }                                                               \
+    }
+
+/* SSE4.1 op helpers */
+#define FBLENDVB(d, s, m) ((m & 0x80) ? s : d)
+#define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d)
+#define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d)
+SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB)
+SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS)
+SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD)
+
+void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    uint64_t zf = (s->Q(0) &  d->Q(0)) | (s->Q(1) &  d->Q(1));
+    uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1));
+
+    CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C);
+}
+
+#define SSE_HELPER_F(name, elem, num, F)        \
+    void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)     \
+    {                                           \
+        d->elem(0) = F(0);                      \
+        d->elem(1) = F(1);                      \
+        if (num > 2) {                          \
+            d->elem(2) = F(2);                  \
+            d->elem(3) = F(3);                  \
+            if (num > 4) {                      \
+                d->elem(4) = F(4);              \
+                d->elem(5) = F(5);              \
+                d->elem(6) = F(6);              \
+                d->elem(7) = F(7);              \
+            }                                   \
+        }                                       \
+    }
+
+SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxbq, Q, 2, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxwd, L, 4, (int16_t) s->W)
+SSE_HELPER_F(helper_pmovsxwq, Q, 2, (int16_t) s->W)
+SSE_HELPER_F(helper_pmovsxdq, Q, 2, (int32_t) s->L)
+SSE_HELPER_F(helper_pmovzxbw, W, 8, s->B)
+SSE_HELPER_F(helper_pmovzxbd, L, 4, s->B)
+SSE_HELPER_F(helper_pmovzxbq, Q, 2, s->B)
+SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W)
+SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W)
+SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L)
+
+void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->Q(0) = (int64_t)(int32_t) d->L(0) * (int32_t) s->L(0);
+    d->Q(1) = (int64_t)(int32_t) d->L(2) * (int32_t) s->L(2);
+}
+
+#define FCMPEQQ(d, s) (d == s ? -1 : 0)
+SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ)
+
+void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    d->W(0) = satuw((int32_t) d->L(0));
+    d->W(1) = satuw((int32_t) d->L(1));
+    d->W(2) = satuw((int32_t) d->L(2));
+    d->W(3) = satuw((int32_t) d->L(3));
+    d->W(4) = satuw((int32_t) s->L(0));
+    d->W(5) = satuw((int32_t) s->L(1));
+    d->W(6) = satuw((int32_t) s->L(2));
+    d->W(7) = satuw((int32_t) s->L(3));
+}
+
+#define FMINSB(d, s) MIN((int8_t)d, (int8_t)s)
+#define FMINSD(d, s) MIN((int32_t)d, (int32_t)s)
+#define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s)
+#define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s)
+SSE_HELPER_B(helper_pminsb, FMINSB)
+SSE_HELPER_L(helper_pminsd, FMINSD)
+SSE_HELPER_W(helper_pminuw, MIN)
+SSE_HELPER_L(helper_pminud, MIN)
+SSE_HELPER_B(helper_pmaxsb, FMAXSB)
+SSE_HELPER_L(helper_pmaxsd, FMAXSD)
+SSE_HELPER_W(helper_pmaxuw, MAX)
+SSE_HELPER_L(helper_pmaxud, MAX)
+
+#define FMULLD(d, s) ((int32_t)d * (int32_t)s)
+SSE_HELPER_L(helper_pmulld, FMULLD)
+
+void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int idx = 0;
+
+    if (s->W(1) < s->W(idx)) {
+        idx = 1;
+    }
+    if (s->W(2) < s->W(idx)) {
+        idx = 2;
+    }
+    if (s->W(3) < s->W(idx)) {
+        idx = 3;
+    }
+    if (s->W(4) < s->W(idx)) {
+        idx = 4;
+    }
+    if (s->W(5) < s->W(idx)) {
+        idx = 5;
+    }
+    if (s->W(6) < s->W(idx)) {
+        idx = 6;
+    }
+    if (s->W(7) < s->W(idx)) {
+        idx = 7;
+    }
+
+    d->Q(1) = 0;
+    d->L(1) = 0;
+    d->W(1) = idx;
+    d->W(0) = s->W(idx);
+}
+
+void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                  uint32_t mode)
+{
+    signed char prev_rounding_mode;
+
+    prev_rounding_mode = env->sse_status.float_rounding_mode;
+    if (!(mode & (1 << 2))) {
+        switch (mode & 3) {
+        case 0:
+            set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+            break;
+        case 1:
+            set_float_rounding_mode(float_round_down, &env->sse_status);
+            break;
+        case 2:
+            set_float_rounding_mode(float_round_up, &env->sse_status);
+            break;
+        case 3:
+            set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+            break;
+        }
+    }
+
+    d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
+    d->ZMM_S(1) = float32_round_to_int(s->ZMM_S(1), &env->sse_status);
+    d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status);
+    d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status);
+
+#if 0 /* TODO */
+    if (mode & (1 << 3)) {
+        set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+                                  ~float_flag_inexact,
+                                  &env->sse_status);
+    }
+#endif
+    env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                  uint32_t mode)
+{
+    signed char prev_rounding_mode;
+
+    prev_rounding_mode = env->sse_status.float_rounding_mode;
+    if (!(mode & (1 << 2))) {
+        switch (mode & 3) {
+        case 0:
+            set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+            break;
+        case 1:
+            set_float_rounding_mode(float_round_down, &env->sse_status);
+            break;
+        case 2:
+            set_float_rounding_mode(float_round_up, &env->sse_status);
+            break;
+        case 3:
+            set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+            break;
+        }
+    }
+
+    d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
+    d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status);
+
+#if 0 /* TODO */
+    if (mode & (1 << 3)) {
+        set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+                                  ~float_flag_inexact,
+                                  &env->sse_status);
+    }
+#endif
+    env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                  uint32_t mode)
+{
+    signed char prev_rounding_mode;
+
+    prev_rounding_mode = env->sse_status.float_rounding_mode;
+    if (!(mode & (1 << 2))) {
+        switch (mode & 3) {
+        case 0:
+            set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+            break;
+        case 1:
+            set_float_rounding_mode(float_round_down, &env->sse_status);
+            break;
+        case 2:
+            set_float_rounding_mode(float_round_up, &env->sse_status);
+            break;
+        case 3:
+            set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+            break;
+        }
+    }
+
+    d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
+
+#if 0 /* TODO */
+    if (mode & (1 << 3)) {
+        set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+                                  ~float_flag_inexact,
+                                  &env->sse_status);
+    }
+#endif
+    env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                  uint32_t mode)
+{
+    signed char prev_rounding_mode;
+
+    prev_rounding_mode = env->sse_status.float_rounding_mode;
+    if (!(mode & (1 << 2))) {
+        switch (mode & 3) {
+        case 0:
+            set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+            break;
+        case 1:
+            set_float_rounding_mode(float_round_down, &env->sse_status);
+            break;
+        case 2:
+            set_float_rounding_mode(float_round_up, &env->sse_status);
+            break;
+        case 3:
+            set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+            break;
+        }
+    }
+
+    d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
+
+#if 0 /* TODO */
+    if (mode & (1 << 3)) {
+        set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+                                  ~float_flag_inexact,
+                                  &env->sse_status);
+    }
+#endif
+    env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+#define FBLENDP(d, s, m) (m ? s : d)
+SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
+SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
+SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
+
+void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+{
+    float32 iresult = float32_zero;
+
+    if (mask & (1 << 4)) {
+        iresult = float32_add(iresult,
+                              float32_mul(d->ZMM_S(0), s->ZMM_S(0),
+                                          &env->sse_status),
+                              &env->sse_status);
+    }
+    if (mask & (1 << 5)) {
+        iresult = float32_add(iresult,
+                              float32_mul(d->ZMM_S(1), s->ZMM_S(1),
+                                          &env->sse_status),
+                              &env->sse_status);
+    }
+    if (mask & (1 << 6)) {
+        iresult = float32_add(iresult,
+                              float32_mul(d->ZMM_S(2), s->ZMM_S(2),
+                                          &env->sse_status),
+                              &env->sse_status);
+    }
+    if (mask & (1 << 7)) {
+        iresult = float32_add(iresult,
+                              float32_mul(d->ZMM_S(3), s->ZMM_S(3),
+                                          &env->sse_status),
+                              &env->sse_status);
+    }
+    d->ZMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero;
+    d->ZMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero;
+    d->ZMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero;
+    d->ZMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero;
+}
+
+void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+{
+    float64 iresult = float64_zero;
+
+    if (mask & (1 << 4)) {
+        iresult = float64_add(iresult,
+                              float64_mul(d->ZMM_D(0), s->ZMM_D(0),
+                                          &env->sse_status),
+                              &env->sse_status);
+    }
+    if (mask & (1 << 5)) {
+        iresult = float64_add(iresult,
+                              float64_mul(d->ZMM_D(1), s->ZMM_D(1),
+                                          &env->sse_status),
+                              &env->sse_status);
+    }
+    d->ZMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero;
+    d->ZMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero;
+}
+
+void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                  uint32_t offset)
+{
+    int s0 = (offset & 3) << 2;
+    int d0 = (offset & 4) << 0;
+    int i;
+    Reg r;
+
+    for (i = 0; i < 8; i++, d0++) {
+        r.W(i) = 0;
+        r.W(i) += abs1(d->B(d0 + 0) - s->B(s0 + 0));
+        r.W(i) += abs1(d->B(d0 + 1) - s->B(s0 + 1));
+        r.W(i) += abs1(d->B(d0 + 2) - s->B(s0 + 2));
+        r.W(i) += abs1(d->B(d0 + 3) - s->B(s0 + 3));
+    }
+
+    *d = r;
+}
+
+/* SSE4.2 op helpers */
+#define FCMPGTQ(d, s) ((int64_t)d > (int64_t)s ? -1 : 0)
+SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ)
+
+static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl)
+{
+    int val;
+
+    /* Presence of REX.W is indicated by a bit higher than 7 set */
+    if (ctrl >> 8) {
+        val = abs1((int64_t)env->regs[reg]);
+    } else {
+        val = abs1((int32_t)env->regs[reg]);
+    }
+
+    if (ctrl & 1) {
+        if (val > 8) {
+            return 8;
+        }
+    } else {
+        if (val > 16) {
+            return 16;
+        }
+    }
+    return val;
+}
+
+static inline int pcmp_ilen(Reg *r, uint8_t ctrl)
+{
+    int val = 0;
+
+    if (ctrl & 1) {
+        while (val < 8 && r->W(val)) {
+            val++;
+        }
+    } else {
+        while (val < 16 && r->B(val)) {
+            val++;
+        }
+    }
+
+    return val;
+}
+
+static inline int pcmp_val(Reg *r, uint8_t ctrl, int i)
+{
+    switch ((ctrl >> 0) & 3) {
+    case 0:
+        return r->B(i);
+    case 1:
+        return r->W(i);
+    case 2:
+        return (int8_t)r->B(i);
+    case 3:
+    default:
+        return (int16_t)r->W(i);
+    }
+}
+
+static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s,
+                                 int8_t ctrl, int valids, int validd)
+{
+    unsigned int res = 0;
+    int v;
+    int j, i;
+    int upper = (ctrl & 1) ? 7 : 15;
+
+    valids--;
+    validd--;
+
+    CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0);
+
+    switch ((ctrl >> 2) & 3) {
+    case 0:
+        for (j = valids; j >= 0; j--) {
+            res <<= 1;
+            v = pcmp_val(s, ctrl, j);
+            for (i = validd; i >= 0; i--) {
+                res |= (v == pcmp_val(d, ctrl, i));
+            }
+        }
+        break;
+    case 1:
+        for (j = valids; j >= 0; j--) {
+            res <<= 1;
+            v = pcmp_val(s, ctrl, j);
+            for (i = ((validd - 1) | 1); i >= 0; i -= 2) {
+                res |= (pcmp_val(d, ctrl, i - 0) >= v &&
+                        pcmp_val(d, ctrl, i - 1) <= v);
+            }
+        }
+        break;
+    case 2:
+        res = (1 << (upper - MAX(valids, validd))) - 1;
+        res <<= MAX(valids, validd) - MIN(valids, validd);
+        for (i = MIN(valids, validd); i >= 0; i--) {
+            res <<= 1;
+            v = pcmp_val(s, ctrl, i);
+            res |= (v == pcmp_val(d, ctrl, i));
+        }
+        break;
+    case 3:
+        for (j = valids; j >= 0; j--) {
+            res <<= 1;
+            v = 1;
+            for (i = MIN(valids - j, validd); i >= 0; i--) {
+                v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i));
+            }
+            res |= v;
+        }
+        break;
+    }
+
+    switch ((ctrl >> 4) & 3) {
+    case 1:
+        res ^= (2 << upper) - 1;
+        break;
+    case 3:
+        res ^= (1 << (valids + 1)) - 1;
+        break;
+    }
+
+    if (res) {
+        CC_SRC |= CC_C;
+    }
+    if (res & 1) {
+        CC_SRC |= CC_O;
+    }
+
+    return res;
+}
+
+void glue(helper_pcmpestri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                    uint32_t ctrl)
+{
+    unsigned int res = pcmpxstrx(env, d, s, ctrl,
+                                 pcmp_elen(env, R_EDX, ctrl),
+                                 pcmp_elen(env, R_EAX, ctrl));
+
+    if (res) {
+        env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res);
+    } else {
+        env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
+    }
+}
+
+void glue(helper_pcmpestrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                    uint32_t ctrl)
+{
+    int i;
+    unsigned int res = pcmpxstrx(env, d, s, ctrl,
+                                 pcmp_elen(env, R_EDX, ctrl),
+                                 pcmp_elen(env, R_EAX, ctrl));
+
+    if ((ctrl >> 6) & 1) {
+        if (ctrl & 1) {
+            for (i = 0; i < 8; i++, res >>= 1) {
+                env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0;
+            }
+        } else {
+            for (i = 0; i < 16; i++, res >>= 1) {
+                env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0;
+            }
+        }
+    } else {
+        env->xmm_regs[0].Q(1) = 0;
+        env->xmm_regs[0].Q(0) = res;
+    }
+}
+
+void glue(helper_pcmpistri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                    uint32_t ctrl)
+{
+    unsigned int res = pcmpxstrx(env, d, s, ctrl,
+                                 pcmp_ilen(s, ctrl),
+                                 pcmp_ilen(d, ctrl));
+
+    if (res) {
+        env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res);
+    } else {
+        env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
+    }
+}
+
+void glue(helper_pcmpistrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                    uint32_t ctrl)
+{
+    int i;
+    unsigned int res = pcmpxstrx(env, d, s, ctrl,
+                                 pcmp_ilen(s, ctrl),
+                                 pcmp_ilen(d, ctrl));
+
+    if ((ctrl >> 6) & 1) {
+        if (ctrl & 1) {
+            for (i = 0; i < 8; i++, res >>= 1) {
+                env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0;
+            }
+        } else {
+            for (i = 0; i < 16; i++, res >>= 1) {
+                env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0;
+            }
+        }
+    } else {
+        env->xmm_regs[0].Q(1) = 0;
+        env->xmm_regs[0].Q(0) = res;
+    }
+}
+
+#define CRCPOLY        0x1edc6f41
+#define CRCPOLY_BITREV 0x82f63b78
+target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len)
+{
+    target_ulong crc = (msg & ((target_ulong) -1 >>
+                               (TARGET_LONG_BITS - len))) ^ crc1;
+
+    while (len--) {
+        crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0);
+    }
+
+    return crc;
+}
+
+#define POPMASK(i)     ((target_ulong) -1 / ((1LL << (1 << i)) + 1))
+#define POPCOUNT(n, i) ((n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i)))
+target_ulong helper_popcnt(CPUX86State *env, target_ulong n, uint32_t type)
+{
+    CC_SRC = n ? 0 : CC_Z;
+
+    n = POPCOUNT(n, 0);
+    n = POPCOUNT(n, 1);
+    n = POPCOUNT(n, 2);
+    n = POPCOUNT(n, 3);
+    if (type == 1) {
+        return n & 0xff;
+    }
+
+    n = POPCOUNT(n, 4);
+#ifndef TARGET_X86_64
+    return n;
+#else
+    if (type == 2) {
+        return n & 0xff;
+    }
+
+    return POPCOUNT(n, 5);
+#endif
+}
+
+void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                    uint32_t ctrl)
+{
+    uint64_t ah, al, b, resh, resl;
+
+    ah = 0;
+    al = d->Q((ctrl & 1) != 0);
+    b = s->Q((ctrl & 16) != 0);
+    resh = resl = 0;
+
+    while (b) {
+        if (b & 1) {
+            resl ^= al;
+            resh ^= ah;
+        }
+        ah = (ah << 1) | (al >> 63);
+        al <<= 1;
+        b >>= 1;
+    }
+
+    d->Q(0) = resl;
+    d->Q(1) = resh;
+}
+
+void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int i;
+    Reg st = *d;
+    Reg rk = *s;
+
+    for (i = 0 ; i < 4 ; i++) {
+        d->L(i) = rk.L(i) ^ bswap32(AES_Td0[st.B(AES_ishifts[4*i+0])] ^
+                                    AES_Td1[st.B(AES_ishifts[4*i+1])] ^
+                                    AES_Td2[st.B(AES_ishifts[4*i+2])] ^
+                                    AES_Td3[st.B(AES_ishifts[4*i+3])]);
+    }
+}
+
+void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int i;
+    Reg st = *d;
+    Reg rk = *s;
+
+    for (i = 0; i < 16; i++) {
+        d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i])]);
+    }
+}
+
+void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int i;
+    Reg st = *d;
+    Reg rk = *s;
+
+    for (i = 0 ; i < 4 ; i++) {
+        d->L(i) = rk.L(i) ^ bswap32(AES_Te0[st.B(AES_shifts[4*i+0])] ^
+                                    AES_Te1[st.B(AES_shifts[4*i+1])] ^
+                                    AES_Te2[st.B(AES_shifts[4*i+2])] ^
+                                    AES_Te3[st.B(AES_shifts[4*i+3])]);
+    }
+}
+
+void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int i;
+    Reg st = *d;
+    Reg rk = *s;
+
+    for (i = 0; i < 16; i++) {
+        d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i])]);
+    }
+
+}
+
+void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+    int i;
+    Reg tmp = *s;
+
+    for (i = 0 ; i < 4 ; i++) {
+        d->L(i) = bswap32(AES_imc[tmp.B(4*i+0)][0] ^
+                          AES_imc[tmp.B(4*i+1)][1] ^
+                          AES_imc[tmp.B(4*i+2)][2] ^
+                          AES_imc[tmp.B(4*i+3)][3]);
+    }
+}
+
+void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                                          uint32_t ctrl)
+{
+    int i;
+    Reg tmp = *s;
+
+    for (i = 0 ; i < 4 ; i++) {
+        d->B(i) = AES_sbox[tmp.B(i + 4)];
+        d->B(i + 8) = AES_sbox[tmp.B(i + 12)];
+    }
+    d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl;
+    d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl;
+}
+#endif
+
+#undef SHIFT
+#undef XMM_ONLY
+#undef Reg
+#undef B
+#undef W
+#undef L
+#undef Q
+#undef SUFFIX
diff --git a/target/i386/ops_sse_header.h b/target/i386/ops_sse_header.h
new file mode 100644
index 0000000000..64c5857cf4
--- /dev/null
+++ b/target/i386/ops_sse_header.h
@@ -0,0 +1,360 @@
+/*
+ *  MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
+ *
+ *  Copyright (c) 2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if SHIFT == 0
+#define Reg MMXReg
+#define SUFFIX _mmx
+#else
+#define Reg ZMMReg
+#define SUFFIX _xmm
+#endif
+
+#define dh_alias_Reg ptr
+#define dh_alias_ZMMReg ptr
+#define dh_alias_MMXReg ptr
+#define dh_ctype_Reg Reg *
+#define dh_ctype_ZMMReg ZMMReg *
+#define dh_ctype_MMXReg MMXReg *
+#define dh_is_signed_Reg dh_is_signed_ptr
+#define dh_is_signed_ZMMReg dh_is_signed_ptr
+#define dh_is_signed_MMXReg dh_is_signed_ptr
+
+DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psllw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrad, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pslld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrlq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psllq, SUFFIX), void, env, Reg, Reg)
+
+#if SHIFT == 1
+DEF_HELPER_3(glue(psrldq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pslldq, SUFFIX), void, env, Reg, Reg)
+#endif
+
+#define SSE_HELPER_B(name, F)\
+    DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_W(name, F)\
+    DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_L(name, F)\
+    DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_Q(name, F)\
+    DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+SSE_HELPER_B(paddb, FADD)
+SSE_HELPER_W(paddw, FADD)
+SSE_HELPER_L(paddl, FADD)
+SSE_HELPER_Q(paddq, FADD)
+
+SSE_HELPER_B(psubb, FSUB)
+SSE_HELPER_W(psubw, FSUB)
+SSE_HELPER_L(psubl, FSUB)
+SSE_HELPER_Q(psubq, FSUB)
+
+SSE_HELPER_B(paddusb, FADDUB)
+SSE_HELPER_B(paddsb, FADDSB)
+SSE_HELPER_B(psubusb, FSUBUB)
+SSE_HELPER_B(psubsb, FSUBSB)
+
+SSE_HELPER_W(paddusw, FADDUW)
+SSE_HELPER_W(paddsw, FADDSW)
+SSE_HELPER_W(psubusw, FSUBUW)
+SSE_HELPER_W(psubsw, FSUBSW)
+
+SSE_HELPER_B(pminub, FMINUB)
+SSE_HELPER_B(pmaxub, FMAXUB)
+
+SSE_HELPER_W(pminsw, FMINSW)
+SSE_HELPER_W(pmaxsw, FMAXSW)
+
+SSE_HELPER_Q(pand, FAND)
+SSE_HELPER_Q(pandn, FANDN)
+SSE_HELPER_Q(por, FOR)
+SSE_HELPER_Q(pxor, FXOR)
+
+SSE_HELPER_B(pcmpgtb, FCMPGTB)
+SSE_HELPER_W(pcmpgtw, FCMPGTW)
+SSE_HELPER_L(pcmpgtl, FCMPGTL)
+
+SSE_HELPER_B(pcmpeqb, FCMPEQ)
+SSE_HELPER_W(pcmpeqw, FCMPEQ)
+SSE_HELPER_L(pcmpeql, FCMPEQ)
+
+SSE_HELPER_W(pmullw, FMULLW)
+#if SHIFT == 0
+SSE_HELPER_W(pmulhrw, FMULHRW)
+#endif
+SSE_HELPER_W(pmulhuw, FMULHUW)
+SSE_HELPER_W(pmulhw, FMULHW)
+
+SSE_HELPER_B(pavgb, FAVG)
+SSE_HELPER_W(pavgw, FAVG)
+
+DEF_HELPER_3(glue(pmuludq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaddwd, SUFFIX), void, env, Reg, Reg)
+
+DEF_HELPER_3(glue(psadbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(maskmov, SUFFIX), void, env, Reg, Reg, tl)
+DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64)
+#endif
+
+#if SHIFT == 0
+DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int)
+#else
+DEF_HELPER_3(shufps, void, Reg, Reg, int)
+DEF_HELPER_3(shufpd, void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int)
+#endif
+
+#if SHIFT == 1
+/* FPU ops */
+/* XXX: not accurate */
+
+#define SSE_HELPER_S(name, F)                            \
+    DEF_HELPER_3(name ## ps, void, env, Reg, Reg)        \
+    DEF_HELPER_3(name ## ss, void, env, Reg, Reg)        \
+    DEF_HELPER_3(name ## pd, void, env, Reg, Reg)        \
+    DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
+
+SSE_HELPER_S(add, FPU_ADD)
+SSE_HELPER_S(sub, FPU_SUB)
+SSE_HELPER_S(mul, FPU_MUL)
+SSE_HELPER_S(div, FPU_DIV)
+SSE_HELPER_S(min, FPU_MIN)
+SSE_HELPER_S(max, FPU_MAX)
+SSE_HELPER_S(sqrt, FPU_SQRT)
+
+
+DEF_HELPER_3(cvtps2pd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtpd2ps, void, env, Reg, Reg)
+DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg)
+DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg)
+DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtpi2ps, void, env, ZMMReg, MMXReg)
+DEF_HELPER_3(cvtpi2pd, void, env, ZMMReg, MMXReg)
+DEF_HELPER_3(cvtsi2ss, void, env, ZMMReg, i32)
+DEF_HELPER_3(cvtsi2sd, void, env, ZMMReg, i32)
+
+#ifdef TARGET_X86_64
+DEF_HELPER_3(cvtsq2ss, void, env, ZMMReg, i64)
+DEF_HELPER_3(cvtsq2sd, void, env, ZMMReg, i64)
+#endif
+
+DEF_HELPER_3(cvtps2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvtpd2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvtps2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_2(cvtss2si, s32, env, ZMMReg)
+DEF_HELPER_2(cvtsd2si, s32, env, ZMMReg)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cvtss2sq, s64, env, ZMMReg)
+DEF_HELPER_2(cvtsd2sq, s64, env, ZMMReg)
+#endif
+
+DEF_HELPER_3(cvttps2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvttpd2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvttps2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_2(cvttss2si, s32, env, ZMMReg)
+DEF_HELPER_2(cvttsd2si, s32, env, ZMMReg)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cvttss2sq, s64, env, ZMMReg)
+DEF_HELPER_2(cvttsd2sq, s64, env, ZMMReg)
+#endif
+
+DEF_HELPER_3(rsqrtps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rsqrtss, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rcpps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_3(haddps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(haddpd, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(hsubps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(hsubpd, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(addsubps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(addsubpd, void, env, ZMMReg, ZMMReg)
+
+#define SSE_HELPER_CMP(name, F)                           \
+    DEF_HELPER_3(name ## ps, void, env, Reg, Reg)         \
+    DEF_HELPER_3(name ## ss, void, env, Reg, Reg)         \
+    DEF_HELPER_3(name ## pd, void, env, Reg, Reg)         \
+    DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
+
+SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
+SSE_HELPER_CMP(cmplt, FPU_CMPLT)
+SSE_HELPER_CMP(cmple, FPU_CMPLE)
+SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
+SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
+SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
+SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
+SSE_HELPER_CMP(cmpord, FPU_CMPORD)
+
+DEF_HELPER_3(ucomiss, void, env, Reg, Reg)
+DEF_HELPER_3(comiss, void, env, Reg, Reg)
+DEF_HELPER_3(ucomisd, void, env, Reg, Reg)
+DEF_HELPER_3(comisd, void, env, Reg, Reg)
+DEF_HELPER_2(movmskps, i32, env, Reg)
+DEF_HELPER_2(movmskpd, i32, env, Reg)
+#endif
+
+DEF_HELPER_2(glue(pmovmskb, SUFFIX), i32, env, Reg)
+DEF_HELPER_3(glue(packsswb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packuswb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packssdw, SUFFIX), void, env, Reg, Reg)
+#define UNPCK_OP(base_name, base)                                       \
+    DEF_HELPER_3(glue(punpck ## base_name ## bw, SUFFIX), void, env, Reg, Reg) \
+    DEF_HELPER_3(glue(punpck ## base_name ## wd, SUFFIX), void, env, Reg, Reg) \
+    DEF_HELPER_3(glue(punpck ## base_name ## dq, SUFFIX), void, env, Reg, Reg)
+
+UNPCK_OP(l, 0)
+UNPCK_OP(h, 1)
+
+#if SHIFT == 1
+DEF_HELPER_3(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg)
+#endif
+
+/* 3DNow! float ops */
+#if SHIFT == 0
+DEF_HELPER_3(pi2fd, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pi2fw, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pf2id, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pf2iw, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfadd, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpeq, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpge, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpgt, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmax, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmin, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmul, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfnacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfpnacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfrcp, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfrsqrt, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfsub, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfsubr, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pswapd, void, env, MMXReg, MMXReg)
+#endif
+
+/* SSSE3 op helpers */
+DEF_HELPER_3(glue(phaddw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phaddd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phaddsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pshufb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(palignr, SUFFIX), void, env, Reg, Reg, s32)
+
+/* SSE4.1 op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(pblendvb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(blendvps, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(blendvpd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(ptest, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxwd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxwq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxwd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxwq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmuldq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pcmpeqq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packusdw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminud, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxud, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmulld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phminposuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(roundps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundpd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundss, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundsd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(blendps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(blendpd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pblendw, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(dpps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(dppd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, i32)
+#endif
+
+/* SSE4.2 op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(pcmpgtq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(pcmpestri, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_3(crc32, tl, i32, tl, i32)
+DEF_HELPER_3(popcnt, tl, env, tl, i32)
+#endif
+
+/* AES-NI op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(aesdec, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesdeclast, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesenc, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesenclast, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesimc, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(aeskeygenassist, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32)
+#endif
+
+#undef SHIFT
+#undef Reg
+#undef SUFFIX
+
+#undef SSE_HELPER_B
+#undef SSE_HELPER_W
+#undef SSE_HELPER_L
+#undef SSE_HELPER_Q
+#undef SSE_HELPER_S
+#undef SSE_HELPER_CMP
+#undef UNPCK_OP
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c
new file mode 100644
index 0000000000..fb79f3180d
--- /dev/null
+++ b/target/i386/seg_helper.c
@@ -0,0 +1,2568 @@
+/*
+ *  x86 segmentation related helpers:
+ *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/log.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+
+//#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
+# define LOG_PCALL_STATE(cpu)                                  \
+    log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
+#else
+# define LOG_PCALL(...) do { } while (0)
+# define LOG_PCALL_STATE(cpu) do { } while (0)
+#endif
+
+#ifdef CONFIG_USER_ONLY
+#define MEMSUFFIX _kernel
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_useronly_template.h"
+#undef MEMSUFFIX
+#else
+#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
+#define MEMSUFFIX _kernel
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif
+
+/* return non zero if error */
+static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
+                               uint32_t *e2_ptr, int selector,
+                               uintptr_t retaddr)
+{
+    SegmentCache *dt;
+    int index;
+    target_ulong ptr;
+
+    if (selector & 0x4) {
+        dt = &env->ldt;
+    } else {
+        dt = &env->gdt;
+    }
+    index = selector & ~7;
+    if ((index + 7) > dt->limit) {
+        return -1;
+    }
+    ptr = dt->base + index;
+    *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
+    *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+    return 0;
+}
+
+static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
+                               uint32_t *e2_ptr, int selector)
+{
+    return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
+}
+
+static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
+{
+    unsigned int limit;
+
+    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+    if (e2 & DESC_G_MASK) {
+        limit = (limit << 12) | 0xfff;
+    }
+    return limit;
+}
+
+static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
+{
+    return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
+}
+
+static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
+                                         uint32_t e2)
+{
+    sc->base = get_seg_base(e1, e2);
+    sc->limit = get_seg_limit(e1, e2);
+    sc->flags = e2;
+}
+
+/* init the segment cache in vm86 mode. */
+static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
+{
+    selector &= 0xffff;
+
+    cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_A_MASK | (3 << DESC_DPL_SHIFT));
+}
+
+static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
+                                       uint32_t *esp_ptr, int dpl,
+                                       uintptr_t retaddr)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    int type, index, shift;
+
+#if 0
+    {
+        int i;
+        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
+        for (i = 0; i < env->tr.limit; i++) {
+            printf("%02x ", env->tr.base[i]);
+            if ((i & 7) == 7) {
+                printf("\n");
+            }
+        }
+        printf("\n");
+    }
+#endif
+
+    if (!(env->tr.flags & DESC_P_MASK)) {
+        cpu_abort(CPU(cpu), "invalid tss");
+    }
+    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+    if ((type & 7) != 1) {
+        cpu_abort(CPU(cpu), "invalid tss type");
+    }
+    shift = type >> 3;
+    index = (dpl * 4 + 2) << shift;
+    if (index + (4 << shift) - 1 > env->tr.limit) {
+        raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
+    }
+    if (shift == 0) {
+        *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
+        *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
+    } else {
+        *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
+        *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
+    }
+}
+
+static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
+                         uintptr_t retaddr)
+{
+    uint32_t e1, e2;
+    int rpl, dpl;
+
+    if ((selector & 0xfffc) != 0) {
+        if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
+            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+        }
+        if (!(e2 & DESC_S_MASK)) {
+            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+        }
+        rpl = selector & 3;
+        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+        if (seg_reg == R_CS) {
+            if (!(e2 & DESC_CS_MASK)) {
+                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+            }
+            if (dpl != rpl) {
+                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+            }
+        } else if (seg_reg == R_SS) {
+            /* SS must be writable data */
+            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
+                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+            }
+            if (dpl != cpl || dpl != rpl) {
+                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+            }
+        } else {
+            /* not readable code */
+            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
+                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+            }
+            /* if data or non conforming code, checks the rights */
+            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
+                if (dpl < cpl || dpl < rpl) {
+                    raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+                }
+            }
+        }
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
+        }
+        cpu_x86_load_seg_cache(env, seg_reg, selector,
+                               get_seg_base(e1, e2),
+                               get_seg_limit(e1, e2),
+                               e2);
+    } else {
+        if (seg_reg == R_SS || seg_reg == R_CS) {
+            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+        }
+    }
+}
+
+#define SWITCH_TSS_JMP  0
+#define SWITCH_TSS_IRET 1
+#define SWITCH_TSS_CALL 2
+
+/* XXX: restore CPU state in registers (PowerPC case) */
+static void switch_tss_ra(CPUX86State *env, int tss_selector,
+                          uint32_t e1, uint32_t e2, int source,
+                          uint32_t next_eip, uintptr_t retaddr)
+{
+    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
+    target_ulong tss_base;
+    uint32_t new_regs[8], new_segs[6];
+    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
+    uint32_t old_eflags, eflags_mask;
+    SegmentCache *dt;
+    int index;
+    target_ulong ptr;
+
+    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
+              source);
+
+    /* if task gate, we read the TSS segment and we load it */
+    if (type == 5) {
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
+        }
+        tss_selector = e1 >> 16;
+        if (tss_selector & 4) {
+            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
+        }
+        if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
+            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+        }
+        if (e2 & DESC_S_MASK) {
+            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+        }
+        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+        if ((type & 7) != 1) {
+            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+        }
+    }
+
+    if (!(e2 & DESC_P_MASK)) {
+        raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
+    }
+
+    if (type & 8) {
+        tss_limit_max = 103;
+    } else {
+        tss_limit_max = 43;
+    }
+    tss_limit = get_seg_limit(e1, e2);
+    tss_base = get_seg_base(e1, e2);
+    if ((tss_selector & 4) != 0 ||
+        tss_limit < tss_limit_max) {
+        raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
+    }
+    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+    if (old_type & 8) {
+        old_tss_limit_max = 103;
+    } else {
+        old_tss_limit_max = 43;
+    }
+
+    /* read all the registers from the new TSS */
+    if (type & 8) {
+        /* 32 bit */
+        new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
+        new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
+        new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
+        for (i = 0; i < 8; i++) {
+            new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
+                                            retaddr);
+        }
+        for (i = 0; i < 6; i++) {
+            new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
+                                             retaddr);
+        }
+        new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
+        new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
+    } else {
+        /* 16 bit */
+        new_cr3 = 0;
+        new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
+        new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
+        for (i = 0; i < 8; i++) {
+            new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
+                                             retaddr) | 0xffff0000;
+        }
+        for (i = 0; i < 4; i++) {
+            new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
+                                             retaddr);
+        }
+        new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
+        new_segs[R_FS] = 0;
+        new_segs[R_GS] = 0;
+        new_trap = 0;
+    }
+    /* XXX: avoid a compiler warning, see
+     http://support.amd.com/us/Processor_TechDocs/24593.pdf
+     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
+    (void)new_trap;
+
+    /* NOTE: we must avoid memory exceptions during the task switch,
+       so we make dummy accesses before */
+    /* XXX: it can still fail in some cases, so a bigger hack is
+       necessary to valid the TLB after having done the accesses */
+
+    v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
+    v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
+    cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
+    cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
+
+    /* clear busy bit (it is restartable) */
+    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
+        target_ulong ptr;
+        uint32_t e2;
+
+        ptr = env->gdt.base + (env->tr.selector & ~7);
+        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+        e2 &= ~DESC_TSS_BUSY_MASK;
+        cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+    }
+    old_eflags = cpu_compute_eflags(env);
+    if (source == SWITCH_TSS_IRET) {
+        old_eflags &= ~NT_MASK;
+    }
+
+    /* save the current state in the old TSS */
+    if (type & 8) {
+        /* 32 bit */
+        cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
+        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
+        for (i = 0; i < 6; i++) {
+            cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
+                              env->segs[i].selector, retaddr);
+        }
+    } else {
+        /* 16 bit */
+        cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
+        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
+        for (i = 0; i < 4; i++) {
+            cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
+                              env->segs[i].selector, retaddr);
+        }
+    }
+
+    /* now if an exception occurs, it will occurs in the next task
+       context */
+
+    if (source == SWITCH_TSS_CALL) {
+        cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
+        new_eflags |= NT_MASK;
+    }
+
+    /* set busy bit */
+    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
+        target_ulong ptr;
+        uint32_t e2;
+
+        ptr = env->gdt.base + (tss_selector & ~7);
+        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+        e2 |= DESC_TSS_BUSY_MASK;
+        cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+    }
+
+    /* set the new CPU state */
+    /* from this point, any exception which occurs can give problems */
+    env->cr[0] |= CR0_TS_MASK;
+    env->hflags |= HF_TS_MASK;
+    env->tr.selector = tss_selector;
+    env->tr.base = tss_base;
+    env->tr.limit = tss_limit;
+    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
+
+    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
+        cpu_x86_update_cr3(env, new_cr3);
+    }
+
+    /* load all registers without an exception, then reload them with
+       possible exception */
+    env->eip = new_eip;
+    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
+        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
+    if (!(type & 8)) {
+        eflags_mask &= 0xffff;
+    }
+    cpu_load_eflags(env, new_eflags, eflags_mask);
+    /* XXX: what to do in 16 bit case? */
+    env->regs[R_EAX] = new_regs[0];
+    env->regs[R_ECX] = new_regs[1];
+    env->regs[R_EDX] = new_regs[2];
+    env->regs[R_EBX] = new_regs[3];
+    env->regs[R_ESP] = new_regs[4];
+    env->regs[R_EBP] = new_regs[5];
+    env->regs[R_ESI] = new_regs[6];
+    env->regs[R_EDI] = new_regs[7];
+    if (new_eflags & VM_MASK) {
+        for (i = 0; i < 6; i++) {
+            load_seg_vm(env, i, new_segs[i]);
+        }
+    } else {
+        /* first just selectors as the rest may trigger exceptions */
+        for (i = 0; i < 6; i++) {
+            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
+        }
+    }
+
+    env->ldt.selector = new_ldt & ~4;
+    env->ldt.base = 0;
+    env->ldt.limit = 0;
+    env->ldt.flags = 0;
+
+    /* load the LDT */
+    if (new_ldt & 4) {
+        raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+    }
+
+    if ((new_ldt & 0xfffc) != 0) {
+        dt = &env->gdt;
+        index = new_ldt & ~7;
+        if ((index + 7) > dt->limit) {
+            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+        }
+        ptr = dt->base + index;
+        e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
+        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
+            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+        }
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+        }
+        load_seg_cache_raw_dt(&env->ldt, e1, e2);
+    }
+
+    /* load the segments */
+    if (!(new_eflags & VM_MASK)) {
+        int cpl = new_segs[R_CS] & 3;
+        tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
+        tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
+        tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
+        tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
+        tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
+        tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
+    }
+
+    /* check that env->eip is in the CS segment limits */
+    if (new_eip > env->segs[R_CS].limit) {
+        /* XXX: different exception if CALL? */
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+    }
+
+#ifndef CONFIG_USER_ONLY
+    /* reset local breakpoints */
+    if (env->dr[7] & DR7_LOCAL_BP_MASK) {
+        cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
+    }
+#endif
+}
+
+static void switch_tss(CPUX86State *env, int tss_selector,
+                       uint32_t e1, uint32_t e2, int source,
+                        uint32_t next_eip)
+{
+    switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
+}
+
+static inline unsigned int get_sp_mask(unsigned int e2)
+{
+    if (e2 & DESC_B_MASK) {
+        return 0xffffffff;
+    } else {
+        return 0xffff;
+    }
+}
+
+static int exception_has_error_code(int intno)
+{
+    switch (intno) {
+    case 8:
+    case 10:
+    case 11:
+    case 12:
+    case 13:
+    case 14:
+    case 17:
+        return 1;
+    }
+    return 0;
+}
+
+#ifdef TARGET_X86_64
+#define SET_ESP(val, sp_mask)                                   \
+    do {                                                        \
+        if ((sp_mask) == 0xffff) {                              \
+            env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
+                ((val) & 0xffff);                               \
+        } else if ((sp_mask) == 0xffffffffLL) {                 \
+            env->regs[R_ESP] = (uint32_t)(val);                 \
+        } else {                                                \
+            env->regs[R_ESP] = (val);                           \
+        }                                                       \
+    } while (0)
+#else
+#define SET_ESP(val, sp_mask)                                   \
+    do {                                                        \
+        env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
+            ((val) & (sp_mask));                                \
+    } while (0)
+#endif
+
+/* in 64-bit machines, this can overflow. So this segment addition macro
+ * can be used to trim the value to 32-bit whenever needed */
+#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
+
+/* XXX: add a is_user flag to have proper security support */
+#define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
+    {                                                            \
+        sp -= 2;                                                 \
+        cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
+    }
+
+#define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
+    {                                                                   \
+        sp -= 4;                                                        \
+        cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
+    }
+
+#define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
+    {                                                            \
+        val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
+        sp += 2;                                                 \
+    }
+
+#define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
+    {                                                                   \
+        val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
+        sp += 4;                                                        \
+    }
+
+#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
+#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
+#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
+#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
+
+/* protected mode interrupt */
+static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
+                                   int error_code, unsigned int next_eip,
+                                   int is_hw)
+{
+    SegmentCache *dt;
+    target_ulong ptr, ssp;
+    int type, dpl, selector, ss_dpl, cpl;
+    int has_error_code, new_stack, shift;
+    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
+    uint32_t old_eip, sp_mask;
+    int vm86 = env->eflags & VM_MASK;
+
+    has_error_code = 0;
+    if (!is_int && !is_hw) {
+        has_error_code = exception_has_error_code(intno);
+    }
+    if (is_int) {
+        old_eip = next_eip;
+    } else {
+        old_eip = env->eip;
+    }
+
+    dt = &env->idt;
+    if (intno * 8 + 7 > dt->limit) {
+        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+    }
+    ptr = dt->base + intno * 8;
+    e1 = cpu_ldl_kernel(env, ptr);
+    e2 = cpu_ldl_kernel(env, ptr + 4);
+    /* check gate type */
+    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+    switch (type) {
+    case 5: /* task gate */
+        /* must do that check here to return the correct error code */
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
+        }
+        switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
+        if (has_error_code) {
+            int type;
+            uint32_t mask;
+
+            /* push the error code */
+            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+            shift = type >> 3;
+            if (env->segs[R_SS].flags & DESC_B_MASK) {
+                mask = 0xffffffff;
+            } else {
+                mask = 0xffff;
+            }
+            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
+            ssp = env->segs[R_SS].base + esp;
+            if (shift) {
+                cpu_stl_kernel(env, ssp, error_code);
+            } else {
+                cpu_stw_kernel(env, ssp, error_code);
+            }
+            SET_ESP(esp, mask);
+        }
+        return;
+    case 6: /* 286 interrupt gate */
+    case 7: /* 286 trap gate */
+    case 14: /* 386 interrupt gate */
+    case 15: /* 386 trap gate */
+        break;
+    default:
+        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+        break;
+    }
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    cpl = env->hflags & HF_CPL_MASK;
+    /* check privilege if software int */
+    if (is_int && dpl < cpl) {
+        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+    }
+    /* check valid bit */
+    if (!(e2 & DESC_P_MASK)) {
+        raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
+    }
+    selector = e1 >> 16;
+    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+    if ((selector & 0xfffc) == 0) {
+        raise_exception_err(env, EXCP0D_GPF, 0);
+    }
+    if (load_segment(env, &e1, &e2, selector) != 0) {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+    }
+    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+    }
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    if (dpl > cpl) {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+    }
+    if (!(e2 & DESC_P_MASK)) {
+        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+    }
+    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+        /* to inner privilege */
+        get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
+        if ((ss & 0xfffc) == 0) {
+            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+        }
+        if ((ss & 3) != dpl) {
+            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+        }
+        if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
+            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+        }
+        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+        if (ss_dpl != dpl) {
+            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+        }
+        if (!(ss_e2 & DESC_S_MASK) ||
+            (ss_e2 & DESC_CS_MASK) ||
+            !(ss_e2 & DESC_W_MASK)) {
+            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+        }
+        if (!(ss_e2 & DESC_P_MASK)) {
+            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+        }
+        new_stack = 1;
+        sp_mask = get_sp_mask(ss_e2);
+        ssp = get_seg_base(ss_e1, ss_e2);
+    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
+        /* to same privilege */
+        if (vm86) {
+            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+        }
+        new_stack = 0;
+        sp_mask = get_sp_mask(env->segs[R_SS].flags);
+        ssp = env->segs[R_SS].base;
+        esp = env->regs[R_ESP];
+        dpl = cpl;
+    } else {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+        new_stack = 0; /* avoid warning */
+        sp_mask = 0; /* avoid warning */
+        ssp = 0; /* avoid warning */
+        esp = 0; /* avoid warning */
+    }
+
+    shift = type >> 3;
+
+#if 0
+    /* XXX: check that enough room is available */
+    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
+    if (vm86) {
+        push_size += 8;
+    }
+    push_size <<= shift;
+#endif
+    if (shift == 1) {
+        if (new_stack) {
+            if (vm86) {
+                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
+                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
+                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
+                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
+            }
+            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
+            PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
+        }
+        PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
+        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
+        PUSHL(ssp, esp, sp_mask, old_eip);
+        if (has_error_code) {
+            PUSHL(ssp, esp, sp_mask, error_code);
+        }
+    } else {
+        if (new_stack) {
+            if (vm86) {
+                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
+                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
+                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
+                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
+            }
+            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
+            PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
+        }
+        PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
+        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
+        PUSHW(ssp, esp, sp_mask, old_eip);
+        if (has_error_code) {
+            PUSHW(ssp, esp, sp_mask, error_code);
+        }
+    }
+
+    /* interrupt gate clear IF mask */
+    if ((type & 1) == 0) {
+        env->eflags &= ~IF_MASK;
+    }
+    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+
+    if (new_stack) {
+        if (vm86) {
+            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
+            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
+            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
+            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
+        }
+        ss = (ss & ~3) | dpl;
+        cpu_x86_load_seg_cache(env, R_SS, ss,
+                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
+    }
+    SET_ESP(esp, sp_mask);
+
+    selector = (selector & ~3) | dpl;
+    cpu_x86_load_seg_cache(env, R_CS, selector,
+                   get_seg_base(e1, e2),
+                   get_seg_limit(e1, e2),
+                   e2);
+    env->eip = offset;
+}
+
+#ifdef TARGET_X86_64
+
+#define PUSHQ_RA(sp, val, ra)                   \
+    {                                           \
+        sp -= 8;                                \
+        cpu_stq_kernel_ra(env, sp, (val), ra);  \
+    }
+
+#define POPQ_RA(sp, val, ra)                    \
+    {                                           \
+        val = cpu_ldq_kernel_ra(env, sp, ra);   \
+        sp += 8;                                \
+    }
+
+#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
+#define POPQ(sp, val) POPQ_RA(sp, val, 0)
+
+static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    int index;
+
+#if 0
+    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
+           env->tr.base, env->tr.limit);
+#endif
+
+    if (!(env->tr.flags & DESC_P_MASK)) {
+        cpu_abort(CPU(cpu), "invalid tss");
+    }
+    index = 8 * level + 4;
+    if ((index + 7) > env->tr.limit) {
+        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
+    }
+    return cpu_ldq_kernel(env, env->tr.base + index);
+}
+
+/* 64 bit interrupt */
+static void do_interrupt64(CPUX86State *env, int intno, int is_int,
+                           int error_code, target_ulong next_eip, int is_hw)
+{
+    SegmentCache *dt;
+    target_ulong ptr;
+    int type, dpl, selector, cpl, ist;
+    int has_error_code, new_stack;
+    uint32_t e1, e2, e3, ss;
+    target_ulong old_eip, esp, offset;
+
+    has_error_code = 0;
+    if (!is_int && !is_hw) {
+        has_error_code = exception_has_error_code(intno);
+    }
+    if (is_int) {
+        old_eip = next_eip;
+    } else {
+        old_eip = env->eip;
+    }
+
+    dt = &env->idt;
+    if (intno * 16 + 15 > dt->limit) {
+        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+    }
+    ptr = dt->base + intno * 16;
+    e1 = cpu_ldl_kernel(env, ptr);
+    e2 = cpu_ldl_kernel(env, ptr + 4);
+    e3 = cpu_ldl_kernel(env, ptr + 8);
+    /* check gate type */
+    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+    switch (type) {
+    case 14: /* 386 interrupt gate */
+    case 15: /* 386 trap gate */
+        break;
+    default:
+        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+        break;
+    }
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    cpl = env->hflags & HF_CPL_MASK;
+    /* check privilege if software int */
+    if (is_int && dpl < cpl) {
+        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+    }
+    /* check valid bit */
+    if (!(e2 & DESC_P_MASK)) {
+        raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
+    }
+    selector = e1 >> 16;
+    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+    ist = e2 & 7;
+    if ((selector & 0xfffc) == 0) {
+        raise_exception_err(env, EXCP0D_GPF, 0);
+    }
+
+    if (load_segment(env, &e1, &e2, selector) != 0) {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+    }
+    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+    }
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    if (dpl > cpl) {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+    }
+    if (!(e2 & DESC_P_MASK)) {
+        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+    }
+    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+    }
+    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
+        /* to inner privilege */
+        new_stack = 1;
+        esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
+        ss = 0;
+    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
+        /* to same privilege */
+        if (env->eflags & VM_MASK) {
+            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+        }
+        new_stack = 0;
+        esp = env->regs[R_ESP];
+        dpl = cpl;
+    } else {
+        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+        new_stack = 0; /* avoid warning */
+        esp = 0; /* avoid warning */
+    }
+    esp &= ~0xfLL; /* align stack */
+
+    PUSHQ(esp, env->segs[R_SS].selector);
+    PUSHQ(esp, env->regs[R_ESP]);
+    PUSHQ(esp, cpu_compute_eflags(env));
+    PUSHQ(esp, env->segs[R_CS].selector);
+    PUSHQ(esp, old_eip);
+    if (has_error_code) {
+        PUSHQ(esp, error_code);
+    }
+
+    /* interrupt gate clear IF mask */
+    if ((type & 1) == 0) {
+        env->eflags &= ~IF_MASK;
+    }
+    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+
+    if (new_stack) {
+        ss = 0 | dpl;
+        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
+    }
+    env->regs[R_ESP] = esp;
+
+    selector = (selector & ~3) | dpl;
+    cpu_x86_load_seg_cache(env, R_CS, selector,
+                   get_seg_base(e1, e2),
+                   get_seg_limit(e1, e2),
+                   e2);
+    env->eip = offset;
+}
+#endif
+
+#ifdef TARGET_X86_64
+#if defined(CONFIG_USER_ONLY)
+void helper_syscall(CPUX86State *env, int next_eip_addend)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    cs->exception_index = EXCP_SYSCALL;
+    env->exception_next_eip = env->eip + next_eip_addend;
+    cpu_loop_exit(cs);
+}
+#else
+void helper_syscall(CPUX86State *env, int next_eip_addend)
+{
+    int selector;
+
+    if (!(env->efer & MSR_EFER_SCE)) {
+        raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+    }
+    selector = (env->star >> 32) & 0xffff;
+    if (env->hflags & HF_LMA_MASK) {
+        int code64;
+
+        env->regs[R_ECX] = env->eip + next_eip_addend;
+        env->regs[11] = cpu_compute_eflags(env);
+
+        code64 = env->hflags & HF_CS64_MASK;
+
+        env->eflags &= ~env->fmask;
+        cpu_load_eflags(env, env->eflags, 0);
+        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+                           0, 0xffffffff,
+                               DESC_G_MASK | DESC_P_MASK |
+                               DESC_S_MASK |
+                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+                               DESC_L_MASK);
+        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+                               0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK |
+                               DESC_W_MASK | DESC_A_MASK);
+        if (code64) {
+            env->eip = env->lstar;
+        } else {
+            env->eip = env->cstar;
+        }
+    } else {
+        env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
+
+        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
+        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+                           0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK |
+                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+                               0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK |
+                               DESC_W_MASK | DESC_A_MASK);
+        env->eip = (uint32_t)env->star;
+    }
+}
+#endif
+#endif
+
+#ifdef TARGET_X86_64
+void helper_sysret(CPUX86State *env, int dflag)
+{
+    int cpl, selector;
+
+    if (!(env->efer & MSR_EFER_SCE)) {
+        raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+    }
+    cpl = env->hflags & HF_CPL_MASK;
+    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+    }
+    selector = (env->star >> 48) & 0xffff;
+    if (env->hflags & HF_LMA_MASK) {
+        cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
+                        | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
+                        NT_MASK);
+        if (dflag == 2) {
+            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
+                                   0, 0xffffffff,
+                                   DESC_G_MASK | DESC_P_MASK |
+                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+                                   DESC_L_MASK);
+            env->eip = env->regs[R_ECX];
+        } else {
+            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+                                   0, 0xffffffff,
+                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+            env->eip = (uint32_t)env->regs[R_ECX];
+        }
+        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
+                               0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                               DESC_W_MASK | DESC_A_MASK);
+    } else {
+        env->eflags |= IF_MASK;
+        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+                               0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+        env->eip = (uint32_t)env->regs[R_ECX];
+        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
+                               0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                               DESC_W_MASK | DESC_A_MASK);
+    }
+}
+#endif
+
+/* real mode interrupt */
+static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
+                              int error_code, unsigned int next_eip)
+{
+    SegmentCache *dt;
+    target_ulong ptr, ssp;
+    int selector;
+    uint32_t offset, esp;
+    uint32_t old_cs, old_eip;
+
+    /* real mode (simpler!) */
+    dt = &env->idt;
+    if (intno * 4 + 3 > dt->limit) {
+        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+    }
+    ptr = dt->base + intno * 4;
+    offset = cpu_lduw_kernel(env, ptr);
+    selector = cpu_lduw_kernel(env, ptr + 2);
+    esp = env->regs[R_ESP];
+    ssp = env->segs[R_SS].base;
+    if (is_int) {
+        old_eip = next_eip;
+    } else {
+        old_eip = env->eip;
+    }
+    old_cs = env->segs[R_CS].selector;
+    /* XXX: use SS segment size? */
+    PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
+    PUSHW(ssp, esp, 0xffff, old_cs);
+    PUSHW(ssp, esp, 0xffff, old_eip);
+
+    /* update processor state */
+    env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
+    env->eip = offset;
+    env->segs[R_CS].selector = selector;
+    env->segs[R_CS].base = (selector << 4);
+    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
+}
+
+#if defined(CONFIG_USER_ONLY)
+/* fake user mode interrupt. is_int is TRUE if coming from the int
+ * instruction. next_eip is the env->eip value AFTER the interrupt
+ * instruction. It is only relevant if is_int is TRUE or if intno
+ * is EXCP_SYSCALL.
+ */
+static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
+                              int error_code, target_ulong next_eip)
+{
+    if (is_int) {
+        SegmentCache *dt;
+        target_ulong ptr;
+        int dpl, cpl, shift;
+        uint32_t e2;
+
+        dt = &env->idt;
+        if (env->hflags & HF_LMA_MASK) {
+            shift = 4;
+        } else {
+            shift = 3;
+        }
+        ptr = dt->base + (intno << shift);
+        e2 = cpu_ldl_kernel(env, ptr + 4);
+
+        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+        cpl = env->hflags & HF_CPL_MASK;
+        /* check privilege if software int */
+        if (dpl < cpl) {
+            raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
+        }
+    }
+
+    /* Since we emulate only user space, we cannot do more than
+       exiting the emulation with the suitable exception and error
+       code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
+    if (is_int || intno == EXCP_SYSCALL) {
+        env->eip = next_eip;
+    }
+}
+
+#else
+
+static void handle_even_inj(CPUX86State *env, int intno, int is_int,
+                            int error_code, int is_hw, int rm)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+    uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                                          control.event_inj));
+
+    if (!(event_inj & SVM_EVTINJ_VALID)) {
+        int type;
+
+        if (is_int) {
+            type = SVM_EVTINJ_TYPE_SOFT;
+        } else {
+            type = SVM_EVTINJ_TYPE_EXEPT;
+        }
+        event_inj = intno | type | SVM_EVTINJ_VALID;
+        if (!rm && exception_has_error_code(intno)) {
+            event_inj |= SVM_EVTINJ_VALID_ERR;
+            x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                             control.event_inj_err),
+                     error_code);
+        }
+        x86_stl_phys(cs,
+                 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+                 event_inj);
+    }
+}
+#endif
+
+/*
+ * Begin execution of an interruption. is_int is TRUE if coming from
+ * the int instruction. next_eip is the env->eip value AFTER the interrupt
+ * instruction. It is only relevant if is_int is TRUE.
+ */
+static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
+                             int error_code, target_ulong next_eip, int is_hw)
+{
+    CPUX86State *env = &cpu->env;
+
+    if (qemu_loglevel_mask(CPU_LOG_INT)) {
+        if ((env->cr[0] & CR0_PE_MASK)) {
+            static int count;
+
+            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
+                     " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
+                     count, intno, error_code, is_int,
+                     env->hflags & HF_CPL_MASK,
+                     env->segs[R_CS].selector, env->eip,
+                     (int)env->segs[R_CS].base + env->eip,
+                     env->segs[R_SS].selector, env->regs[R_ESP]);
+            if (intno == 0x0e) {
+                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
+            } else {
+                qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
+            }
+            qemu_log("\n");
+            log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
+#if 0
+            {
+                int i;
+                target_ulong ptr;
+
+                qemu_log("       code=");
+                ptr = env->segs[R_CS].base + env->eip;
+                for (i = 0; i < 16; i++) {
+                    qemu_log(" %02x", ldub(ptr + i));
+                }
+                qemu_log("\n");
+            }
+#endif
+            count++;
+        }
+    }
+    if (env->cr[0] & CR0_PE_MASK) {
+#if !defined(CONFIG_USER_ONLY)
+        if (env->hflags & HF_SVMI_MASK) {
+            handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
+        }
+#endif
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
+        } else
+#endif
+        {
+            do_interrupt_protected(env, intno, is_int, error_code, next_eip,
+                                   is_hw);
+        }
+    } else {
+#if !defined(CONFIG_USER_ONLY)
+        if (env->hflags & HF_SVMI_MASK) {
+            handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
+        }
+#endif
+        do_interrupt_real(env, intno, is_int, error_code, next_eip);
+    }
+
+#if !defined(CONFIG_USER_ONLY)
+    if (env->hflags & HF_SVMI_MASK) {
+        CPUState *cs = CPU(cpu);
+        uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
+                                      offsetof(struct vmcb,
+                                               control.event_inj));
+
+        x86_stl_phys(cs,
+                 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+                 event_inj & ~SVM_EVTINJ_VALID);
+    }
+#endif
+}
+
+void x86_cpu_do_interrupt(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+#if defined(CONFIG_USER_ONLY)
+    /* if user mode only, we simulate a fake exception
+       which will be handled outside the cpu execution
+       loop */
+    do_interrupt_user(env, cs->exception_index,
+                      env->exception_is_int,
+                      env->error_code,
+                      env->exception_next_eip);
+    /* successfully delivered */
+    env->old_exception = -1;
+#else
+    /* simulate a real cpu exception. On i386, it can
+       trigger new exceptions, but we do not handle
+       double or triple faults yet. */
+    do_interrupt_all(cpu, cs->exception_index,
+                     env->exception_is_int,
+                     env->error_code,
+                     env->exception_next_eip, 0);
+    /* successfully delivered */
+    env->old_exception = -1;
+#endif
+}
+
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
+{
+    do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
+}
+
+bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    bool ret = false;
+
+#if !defined(CONFIG_USER_ONLY)
+    if (interrupt_request & CPU_INTERRUPT_POLL) {
+        cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+        apic_poll_irq(cpu->apic_state);
+        /* Don't process multiple interrupt requests in a single call.
+           This is required to make icount-driven execution deterministic. */
+        return true;
+    }
+#endif
+    if (interrupt_request & CPU_INTERRUPT_SIPI) {
+        do_cpu_sipi(cpu);
+    } else if (env->hflags2 & HF2_GIF_MASK) {
+        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
+            !(env->hflags & HF_SMM_MASK)) {
+            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
+            cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+            do_smm_enter(cpu);
+            ret = true;
+        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
+                   !(env->hflags2 & HF2_NMI_MASK)) {
+            cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+            env->hflags2 |= HF2_NMI_MASK;
+            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
+            ret = true;
+        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
+            cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
+            ret = true;
+        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+                   (((env->hflags2 & HF2_VINTR_MASK) &&
+                     (env->hflags2 & HF2_HIF_MASK)) ||
+                    (!(env->hflags2 & HF2_VINTR_MASK) &&
+                     (env->eflags & IF_MASK &&
+                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
+            int intno;
+            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
+            cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+                                       CPU_INTERRUPT_VIRQ);
+            intno = cpu_get_pic_interrupt(env);
+            qemu_log_mask(CPU_LOG_TB_IN_ASM,
+                          "Servicing hardware INT=0x%02x\n", intno);
+            do_interrupt_x86_hardirq(env, intno, 1);
+            /* ensure that no TB jump will be modified as
+               the program flow was changed */
+            ret = true;
+#if !defined(CONFIG_USER_ONLY)
+        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
+                   (env->eflags & IF_MASK) &&
+                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
+            int intno;
+            /* FIXME: this should respect TPR */
+            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
+            intno = x86_ldl_phys(cs, env->vm_vmcb
+                             + offsetof(struct vmcb, control.int_vector));
+            qemu_log_mask(CPU_LOG_TB_IN_ASM,
+                          "Servicing virtual hardware INT=0x%02x\n", intno);
+            do_interrupt_x86_hardirq(env, intno, 1);
+            cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+            ret = true;
+#endif
+        }
+    }
+
+    return ret;
+}
+
+void helper_lldt(CPUX86State *env, int selector)
+{
+    SegmentCache *dt;
+    uint32_t e1, e2;
+    int index, entry_limit;
+    target_ulong ptr;
+
+    selector &= 0xffff;
+    if ((selector & 0xfffc) == 0) {
+        /* XXX: NULL selector case: invalid LDT */
+        env->ldt.base = 0;
+        env->ldt.limit = 0;
+    } else {
+        if (selector & 0x4) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        dt = &env->gdt;
+        index = selector & ~7;
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            entry_limit = 15;
+        } else
+#endif
+        {
+            entry_limit = 7;
+        }
+        if ((index + entry_limit) > dt->limit) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        ptr = dt->base + index;
+        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+        }
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            uint32_t e3;
+
+            e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
+            load_seg_cache_raw_dt(&env->ldt, e1, e2);
+            env->ldt.base |= (target_ulong)e3 << 32;
+        } else
+#endif
+        {
+            load_seg_cache_raw_dt(&env->ldt, e1, e2);
+        }
+    }
+    env->ldt.selector = selector;
+}
+
+void helper_ltr(CPUX86State *env, int selector)
+{
+    SegmentCache *dt;
+    uint32_t e1, e2;
+    int index, type, entry_limit;
+    target_ulong ptr;
+
+    selector &= 0xffff;
+    if ((selector & 0xfffc) == 0) {
+        /* NULL selector case: invalid TR */
+        env->tr.base = 0;
+        env->tr.limit = 0;
+        env->tr.flags = 0;
+    } else {
+        if (selector & 0x4) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        dt = &env->gdt;
+        index = selector & ~7;
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            entry_limit = 15;
+        } else
+#endif
+        {
+            entry_limit = 7;
+        }
+        if ((index + entry_limit) > dt->limit) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        ptr = dt->base + index;
+        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+        if ((e2 & DESC_S_MASK) ||
+            (type != 1 && type != 9)) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+        }
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            uint32_t e3, e4;
+
+            e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
+            e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
+            if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
+                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+            }
+            load_seg_cache_raw_dt(&env->tr, e1, e2);
+            env->tr.base |= (target_ulong)e3 << 32;
+        } else
+#endif
+        {
+            load_seg_cache_raw_dt(&env->tr, e1, e2);
+        }
+        e2 |= DESC_TSS_BUSY_MASK;
+        cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
+    }
+    env->tr.selector = selector;
+}
+
+/* only works if protected mode and not VM86. seg_reg must be != R_CS */
+void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
+{
+    uint32_t e1, e2;
+    int cpl, dpl, rpl;
+    SegmentCache *dt;
+    int index;
+    target_ulong ptr;
+
+    selector &= 0xffff;
+    cpl = env->hflags & HF_CPL_MASK;
+    if ((selector & 0xfffc) == 0) {
+        /* null selector case */
+        if (seg_reg == R_SS
+#ifdef TARGET_X86_64
+            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
+#endif
+            ) {
+            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+        }
+        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
+    } else {
+
+        if (selector & 0x4) {
+            dt = &env->ldt;
+        } else {
+            dt = &env->gdt;
+        }
+        index = selector & ~7;
+        if ((index + 7) > dt->limit) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        ptr = dt->base + index;
+        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+
+        if (!(e2 & DESC_S_MASK)) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        rpl = selector & 3;
+        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+        if (seg_reg == R_SS) {
+            /* must be writable segment */
+            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
+                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+            }
+            if (rpl != cpl || dpl != cpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+            }
+        } else {
+            /* must be readable segment */
+            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
+                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+            }
+
+            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
+                /* if not conforming code, test rights */
+                if (dpl < cpl || dpl < rpl) {
+                    raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+                }
+            }
+        }
+
+        if (!(e2 & DESC_P_MASK)) {
+            if (seg_reg == R_SS) {
+                raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
+            } else {
+                raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+            }
+        }
+
+        /* set the access bit if not already set */
+        if (!(e2 & DESC_A_MASK)) {
+            e2 |= DESC_A_MASK;
+            cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
+        }
+
+        cpu_x86_load_seg_cache(env, seg_reg, selector,
+                       get_seg_base(e1, e2),
+                       get_seg_limit(e1, e2),
+                       e2);
+#if 0
+        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
+                selector, (unsigned long)sc->base, sc->limit, sc->flags);
+#endif
+    }
+}
+
+/* protected mode jump */
+void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
+                           target_ulong next_eip)
+{
+    int gate_cs, type;
+    uint32_t e1, e2, cpl, dpl, rpl, limit;
+
+    if ((new_cs & 0xfffc) == 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+    }
+    if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+    }
+    cpl = env->hflags & HF_CPL_MASK;
+    if (e2 & DESC_S_MASK) {
+        if (!(e2 & DESC_CS_MASK)) {
+            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+        }
+        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+        if (e2 & DESC_C_MASK) {
+            /* conforming code segment */
+            if (dpl > cpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+        } else {
+            /* non conforming code segment */
+            rpl = new_cs & 3;
+            if (rpl > cpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+            if (dpl != cpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+        }
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+        }
+        limit = get_seg_limit(e1, e2);
+        if (new_eip > limit &&
+            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
+            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+        }
+        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+                       get_seg_base(e1, e2), limit, e2);
+        env->eip = new_eip;
+    } else {
+        /* jump to call or task gate */
+        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+        rpl = new_cs & 3;
+        cpl = env->hflags & HF_CPL_MASK;
+        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+        switch (type) {
+        case 1: /* 286 TSS */
+        case 9: /* 386 TSS */
+        case 5: /* task gate */
+            if (dpl < cpl || dpl < rpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+            switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
+            break;
+        case 4: /* 286 call gate */
+        case 12: /* 386 call gate */
+            if ((dpl < cpl) || (dpl < rpl)) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+            if (!(e2 & DESC_P_MASK)) {
+                raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+            }
+            gate_cs = e1 >> 16;
+            new_eip = (e1 & 0xffff);
+            if (type == 12) {
+                new_eip |= (e2 & 0xffff0000);
+            }
+            if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
+                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+            }
+            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+            /* must be code segment */
+            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
+                 (DESC_S_MASK | DESC_CS_MASK))) {
+                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+            }
+            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
+                (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
+                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+            }
+            if (!(e2 & DESC_P_MASK)) {
+                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+            }
+            limit = get_seg_limit(e1, e2);
+            if (new_eip > limit) {
+                raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+            }
+            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
+                                   get_seg_base(e1, e2), limit, e2);
+            env->eip = new_eip;
+            break;
+        default:
+            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            break;
+        }
+    }
+}
+
+/* real mode call */
+void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
+                       int shift, int next_eip)
+{
+    int new_eip;
+    uint32_t esp, esp_mask;
+    target_ulong ssp;
+
+    new_eip = new_eip1;
+    esp = env->regs[R_ESP];
+    esp_mask = get_sp_mask(env->segs[R_SS].flags);
+    ssp = env->segs[R_SS].base;
+    if (shift) {
+        PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
+        PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
+    } else {
+        PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
+        PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
+    }
+
+    SET_ESP(esp, esp_mask);
+    env->eip = new_eip;
+    env->segs[R_CS].selector = new_cs;
+    env->segs[R_CS].base = (new_cs << 4);
+}
+
+/* protected mode call */
+void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
+                            int shift, target_ulong next_eip)
+{
+    int new_stack, i;
+    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
+    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
+    uint32_t val, limit, old_sp_mask;
+    target_ulong ssp, old_ssp;
+
+    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
+    LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
+    if ((new_cs & 0xfffc) == 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+    }
+    if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+    }
+    cpl = env->hflags & HF_CPL_MASK;
+    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
+    if (e2 & DESC_S_MASK) {
+        if (!(e2 & DESC_CS_MASK)) {
+            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+        }
+        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+        if (e2 & DESC_C_MASK) {
+            /* conforming code segment */
+            if (dpl > cpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+        } else {
+            /* non conforming code segment */
+            rpl = new_cs & 3;
+            if (rpl > cpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+            if (dpl != cpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+        }
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+        }
+
+#ifdef TARGET_X86_64
+        /* XXX: check 16/32 bit cases in long mode */
+        if (shift == 2) {
+            target_ulong rsp;
+
+            /* 64 bit case */
+            rsp = env->regs[R_ESP];
+            PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
+            PUSHQ_RA(rsp, next_eip, GETPC());
+            /* from this point, not restartable */
+            env->regs[R_ESP] = rsp;
+            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+                                   get_seg_base(e1, e2),
+                                   get_seg_limit(e1, e2), e2);
+            env->eip = new_eip;
+        } else
+#endif
+        {
+            sp = env->regs[R_ESP];
+            sp_mask = get_sp_mask(env->segs[R_SS].flags);
+            ssp = env->segs[R_SS].base;
+            if (shift) {
+                PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+                PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
+            } else {
+                PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+                PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
+            }
+
+            limit = get_seg_limit(e1, e2);
+            if (new_eip > limit) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+            /* from this point, not restartable */
+            SET_ESP(sp, sp_mask);
+            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+                                   get_seg_base(e1, e2), limit, e2);
+            env->eip = new_eip;
+        }
+    } else {
+        /* check gate type */
+        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+        rpl = new_cs & 3;
+        switch (type) {
+        case 1: /* available 286 TSS */
+        case 9: /* available 386 TSS */
+        case 5: /* task gate */
+            if (dpl < cpl || dpl < rpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            }
+            switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
+            return;
+        case 4: /* 286 call gate */
+        case 12: /* 386 call gate */
+            break;
+        default:
+            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+            break;
+        }
+        shift = type >> 3;
+
+        if (dpl < cpl || dpl < rpl) {
+            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+        }
+        /* check valid bit */
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
+        }
+        selector = e1 >> 16;
+        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+        param_count = e2 & 0x1f;
+        if ((selector & 0xfffc) == 0) {
+            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+        }
+
+        if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+        if (dpl > cpl) {
+            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+        }
+        if (!(e2 & DESC_P_MASK)) {
+            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+        }
+
+        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+            /* to inner privilege */
+            get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
+            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
+                      TARGET_FMT_lx "\n", ss, sp, param_count,
+                      env->regs[R_ESP]);
+            if ((ss & 0xfffc) == 0) {
+                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+            }
+            if ((ss & 3) != dpl) {
+                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+            }
+            if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
+                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+            }
+            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+            if (ss_dpl != dpl) {
+                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+            }
+            if (!(ss_e2 & DESC_S_MASK) ||
+                (ss_e2 & DESC_CS_MASK) ||
+                !(ss_e2 & DESC_W_MASK)) {
+                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+            }
+            if (!(ss_e2 & DESC_P_MASK)) {
+                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+            }
+
+            /* push_size = ((param_count * 2) + 8) << shift; */
+
+            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
+            old_ssp = env->segs[R_SS].base;
+
+            sp_mask = get_sp_mask(ss_e2);
+            ssp = get_seg_base(ss_e1, ss_e2);
+            if (shift) {
+                PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
+                PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
+                for (i = param_count - 1; i >= 0; i--) {
+                    val = cpu_ldl_kernel_ra(env, old_ssp +
+                                            ((env->regs[R_ESP] + i * 4) &
+                                             old_sp_mask), GETPC());
+                    PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
+                }
+            } else {
+                PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
+                PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
+                for (i = param_count - 1; i >= 0; i--) {
+                    val = cpu_lduw_kernel_ra(env, old_ssp +
+                                             ((env->regs[R_ESP] + i * 2) &
+                                              old_sp_mask), GETPC());
+                    PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
+                }
+            }
+            new_stack = 1;
+        } else {
+            /* to same privilege */
+            sp = env->regs[R_ESP];
+            sp_mask = get_sp_mask(env->segs[R_SS].flags);
+            ssp = env->segs[R_SS].base;
+            /* push_size = (4 << shift); */
+            new_stack = 0;
+        }
+
+        if (shift) {
+            PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+            PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
+        } else {
+            PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+            PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
+        }
+
+        /* from this point, not restartable */
+
+        if (new_stack) {
+            ss = (ss & ~3) | dpl;
+            cpu_x86_load_seg_cache(env, R_SS, ss,
+                                   ssp,
+                                   get_seg_limit(ss_e1, ss_e2),
+                                   ss_e2);
+        }
+
+        selector = (selector & ~3) | dpl;
+        cpu_x86_load_seg_cache(env, R_CS, selector,
+                       get_seg_base(e1, e2),
+                       get_seg_limit(e1, e2),
+                       e2);
+        SET_ESP(sp, sp_mask);
+        env->eip = offset;
+    }
+}
+
+/* real and vm86 mode iret */
+void helper_iret_real(CPUX86State *env, int shift)
+{
+    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
+    target_ulong ssp;
+    int eflags_mask;
+
+    sp_mask = 0xffff; /* XXXX: use SS segment size? */
+    sp = env->regs[R_ESP];
+    ssp = env->segs[R_SS].base;
+    if (shift == 1) {
+        /* 32 bits */
+        POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
+        POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
+        new_cs &= 0xffff;
+        POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
+    } else {
+        /* 16 bits */
+        POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
+        POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
+        POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
+    }
+    env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
+    env->segs[R_CS].selector = new_cs;
+    env->segs[R_CS].base = (new_cs << 4);
+    env->eip = new_eip;
+    if (env->eflags & VM_MASK) {
+        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
+            NT_MASK;
+    } else {
+        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
+            RF_MASK | NT_MASK;
+    }
+    if (shift == 0) {
+        eflags_mask &= 0xffff;
+    }
+    cpu_load_eflags(env, new_eflags, eflags_mask);
+    env->hflags2 &= ~HF2_NMI_MASK;
+}
+
+static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
+{
+    int dpl;
+    uint32_t e2;
+
+    /* XXX: on x86_64, we do not want to nullify FS and GS because
+       they may still contain a valid base. I would be interested to
+       know how a real x86_64 CPU behaves */
+    if ((seg_reg == R_FS || seg_reg == R_GS) &&
+        (env->segs[seg_reg].selector & 0xfffc) == 0) {
+        return;
+    }
+
+    e2 = env->segs[seg_reg].flags;
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
+        /* data or non conforming code segment */
+        if (dpl < cpl) {
+            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
+        }
+    }
+}
+
+/* protected mode iret */
+static inline void helper_ret_protected(CPUX86State *env, int shift,
+                                        int is_iret, int addend,
+                                        uintptr_t retaddr)
+{
+    uint32_t new_cs, new_eflags, new_ss;
+    uint32_t new_es, new_ds, new_fs, new_gs;
+    uint32_t e1, e2, ss_e1, ss_e2;
+    int cpl, dpl, rpl, eflags_mask, iopl;
+    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
+
+#ifdef TARGET_X86_64
+    if (shift == 2) {
+        sp_mask = -1;
+    } else
+#endif
+    {
+        sp_mask = get_sp_mask(env->segs[R_SS].flags);
+    }
+    sp = env->regs[R_ESP];
+    ssp = env->segs[R_SS].base;
+    new_eflags = 0; /* avoid warning */
+#ifdef TARGET_X86_64
+    if (shift == 2) {
+        POPQ_RA(sp, new_eip, retaddr);
+        POPQ_RA(sp, new_cs, retaddr);
+        new_cs &= 0xffff;
+        if (is_iret) {
+            POPQ_RA(sp, new_eflags, retaddr);
+        }
+    } else
+#endif
+    {
+        if (shift == 1) {
+            /* 32 bits */
+            POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
+            POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
+            new_cs &= 0xffff;
+            if (is_iret) {
+                POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
+                if (new_eflags & VM_MASK) {
+                    goto return_to_vm86;
+                }
+            }
+        } else {
+            /* 16 bits */
+            POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
+            POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
+            if (is_iret) {
+                POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
+            }
+        }
+    }
+    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
+              new_cs, new_eip, shift, addend);
+    LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
+    if ((new_cs & 0xfffc) == 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+    }
+    if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+    }
+    if (!(e2 & DESC_S_MASK) ||
+        !(e2 & DESC_CS_MASK)) {
+        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+    }
+    cpl = env->hflags & HF_CPL_MASK;
+    rpl = new_cs & 3;
+    if (rpl < cpl) {
+        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+    }
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    if (e2 & DESC_C_MASK) {
+        if (dpl > rpl) {
+            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+        }
+    } else {
+        if (dpl != rpl) {
+            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+        }
+    }
+    if (!(e2 & DESC_P_MASK)) {
+        raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
+    }
+
+    sp += addend;
+    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
+                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
+        /* return to same privilege level */
+        cpu_x86_load_seg_cache(env, R_CS, new_cs,
+                       get_seg_base(e1, e2),
+                       get_seg_limit(e1, e2),
+                       e2);
+    } else {
+        /* return to different privilege level */
+#ifdef TARGET_X86_64
+        if (shift == 2) {
+            POPQ_RA(sp, new_esp, retaddr);
+            POPQ_RA(sp, new_ss, retaddr);
+            new_ss &= 0xffff;
+        } else
+#endif
+        {
+            if (shift == 1) {
+                /* 32 bits */
+                POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
+                POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
+                new_ss &= 0xffff;
+            } else {
+                /* 16 bits */
+                POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
+                POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
+            }
+        }
+        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
+                  new_ss, new_esp);
+        if ((new_ss & 0xfffc) == 0) {
+#ifdef TARGET_X86_64
+            /* NULL ss is allowed in long mode if cpl != 3 */
+            /* XXX: test CS64? */
+            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
+                cpu_x86_load_seg_cache(env, R_SS, new_ss,
+                                       0, 0xffffffff,
+                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
+                                       DESC_W_MASK | DESC_A_MASK);
+                ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
+            } else
+#endif
+            {
+                raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+            }
+        } else {
+            if ((new_ss & 3) != rpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+            }
+            if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+            }
+            if (!(ss_e2 & DESC_S_MASK) ||
+                (ss_e2 & DESC_CS_MASK) ||
+                !(ss_e2 & DESC_W_MASK)) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+            }
+            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+            if (dpl != rpl) {
+                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+            }
+            if (!(ss_e2 & DESC_P_MASK)) {
+                raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
+            }
+            cpu_x86_load_seg_cache(env, R_SS, new_ss,
+                                   get_seg_base(ss_e1, ss_e2),
+                                   get_seg_limit(ss_e1, ss_e2),
+                                   ss_e2);
+        }
+
+        cpu_x86_load_seg_cache(env, R_CS, new_cs,
+                       get_seg_base(e1, e2),
+                       get_seg_limit(e1, e2),
+                       e2);
+        sp = new_esp;
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_CS64_MASK) {
+            sp_mask = -1;
+        } else
+#endif
+        {
+            sp_mask = get_sp_mask(ss_e2);
+        }
+
+        /* validate data segments */
+        validate_seg(env, R_ES, rpl);
+        validate_seg(env, R_DS, rpl);
+        validate_seg(env, R_FS, rpl);
+        validate_seg(env, R_GS, rpl);
+
+        sp += addend;
+    }
+    SET_ESP(sp, sp_mask);
+    env->eip = new_eip;
+    if (is_iret) {
+        /* NOTE: 'cpl' is the _old_ CPL */
+        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
+        if (cpl == 0) {
+            eflags_mask |= IOPL_MASK;
+        }
+        iopl = (env->eflags >> IOPL_SHIFT) & 3;
+        if (cpl <= iopl) {
+            eflags_mask |= IF_MASK;
+        }
+        if (shift == 0) {
+            eflags_mask &= 0xffff;
+        }
+        cpu_load_eflags(env, new_eflags, eflags_mask);
+    }
+    return;
+
+ return_to_vm86:
+    POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
+    POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
+    POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
+    POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
+    POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
+    POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
+
+    /* modify processor state */
+    cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
+                    IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
+                    VIP_MASK);
+    load_seg_vm(env, R_CS, new_cs & 0xffff);
+    load_seg_vm(env, R_SS, new_ss & 0xffff);
+    load_seg_vm(env, R_ES, new_es & 0xffff);
+    load_seg_vm(env, R_DS, new_ds & 0xffff);
+    load_seg_vm(env, R_FS, new_fs & 0xffff);
+    load_seg_vm(env, R_GS, new_gs & 0xffff);
+
+    env->eip = new_eip & 0xffff;
+    env->regs[R_ESP] = new_esp;
+}
+
+void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
+{
+    int tss_selector, type;
+    uint32_t e1, e2;
+
+    /* specific case for TSS */
+    if (env->eflags & NT_MASK) {
+#ifdef TARGET_X86_64
+        if (env->hflags & HF_LMA_MASK) {
+            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+        }
+#endif
+        tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
+        if (tss_selector & 4) {
+            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+        }
+        if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
+            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+        }
+        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
+        /* NOTE: we check both segment and busy TSS */
+        if (type != 3) {
+            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+        }
+        switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
+    } else {
+        helper_ret_protected(env, shift, 1, 0, GETPC());
+    }
+    env->hflags2 &= ~HF2_NMI_MASK;
+}
+
+void helper_lret_protected(CPUX86State *env, int shift, int addend)
+{
+    helper_ret_protected(env, shift, 0, addend, GETPC());
+}
+
+void helper_sysenter(CPUX86State *env)
+{
+    if (env->sysenter_cs == 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+    }
+    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
+
+#ifdef TARGET_X86_64
+    if (env->hflags & HF_LMA_MASK) {
+        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+                               0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK |
+                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+                               DESC_L_MASK);
+    } else
+#endif
+    {
+        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+                               0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK |
+                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+    }
+    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
+                           0, 0xffffffff,
+                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                           DESC_S_MASK |
+                           DESC_W_MASK | DESC_A_MASK);
+    env->regs[R_ESP] = env->sysenter_esp;
+    env->eip = env->sysenter_eip;
+}
+
+void helper_sysexit(CPUX86State *env, int dflag)
+{
+    int cpl;
+
+    cpl = env->hflags & HF_CPL_MASK;
+    if (env->sysenter_cs == 0 || cpl != 0) {
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+    }
+#ifdef TARGET_X86_64
+    if (dflag == 2) {
+        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
+                               3, 0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+                               DESC_L_MASK);
+        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
+                               3, 0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                               DESC_W_MASK | DESC_A_MASK);
+    } else
+#endif
+    {
+        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
+                               3, 0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
+                               3, 0, 0xffffffff,
+                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+                               DESC_W_MASK | DESC_A_MASK);
+    }
+    env->regs[R_ESP] = env->regs[R_ECX];
+    env->eip = env->regs[R_EDX];
+}
+
+target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
+{
+    unsigned int limit;
+    uint32_t e1, e2, eflags, selector;
+    int rpl, dpl, cpl, type;
+
+    selector = selector1 & 0xffff;
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    if ((selector & 0xfffc) == 0) {
+        goto fail;
+    }
+    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+        goto fail;
+    }
+    rpl = selector & 3;
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    cpl = env->hflags & HF_CPL_MASK;
+    if (e2 & DESC_S_MASK) {
+        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
+            /* conforming */
+        } else {
+            if (dpl < cpl || dpl < rpl) {
+                goto fail;
+            }
+        }
+    } else {
+        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+        switch (type) {
+        case 1:
+        case 2:
+        case 3:
+        case 9:
+        case 11:
+            break;
+        default:
+            goto fail;
+        }
+        if (dpl < cpl || dpl < rpl) {
+        fail:
+            CC_SRC = eflags & ~CC_Z;
+            return 0;
+        }
+    }
+    limit = get_seg_limit(e1, e2);
+    CC_SRC = eflags | CC_Z;
+    return limit;
+}
+
+target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
+{
+    uint32_t e1, e2, eflags, selector;
+    int rpl, dpl, cpl, type;
+
+    selector = selector1 & 0xffff;
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    if ((selector & 0xfffc) == 0) {
+        goto fail;
+    }
+    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+        goto fail;
+    }
+    rpl = selector & 3;
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    cpl = env->hflags & HF_CPL_MASK;
+    if (e2 & DESC_S_MASK) {
+        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
+            /* conforming */
+        } else {
+            if (dpl < cpl || dpl < rpl) {
+                goto fail;
+            }
+        }
+    } else {
+        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+        switch (type) {
+        case 1:
+        case 2:
+        case 3:
+        case 4:
+        case 5:
+        case 9:
+        case 11:
+        case 12:
+            break;
+        default:
+            goto fail;
+        }
+        if (dpl < cpl || dpl < rpl) {
+        fail:
+            CC_SRC = eflags & ~CC_Z;
+            return 0;
+        }
+    }
+    CC_SRC = eflags | CC_Z;
+    return e2 & 0x00f0ff00;
+}
+
+void helper_verr(CPUX86State *env, target_ulong selector1)
+{
+    uint32_t e1, e2, eflags, selector;
+    int rpl, dpl, cpl;
+
+    selector = selector1 & 0xffff;
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    if ((selector & 0xfffc) == 0) {
+        goto fail;
+    }
+    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+        goto fail;
+    }
+    if (!(e2 & DESC_S_MASK)) {
+        goto fail;
+    }
+    rpl = selector & 3;
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    cpl = env->hflags & HF_CPL_MASK;
+    if (e2 & DESC_CS_MASK) {
+        if (!(e2 & DESC_R_MASK)) {
+            goto fail;
+        }
+        if (!(e2 & DESC_C_MASK)) {
+            if (dpl < cpl || dpl < rpl) {
+                goto fail;
+            }
+        }
+    } else {
+        if (dpl < cpl || dpl < rpl) {
+        fail:
+            CC_SRC = eflags & ~CC_Z;
+            return;
+        }
+    }
+    CC_SRC = eflags | CC_Z;
+}
+
+void helper_verw(CPUX86State *env, target_ulong selector1)
+{
+    uint32_t e1, e2, eflags, selector;
+    int rpl, dpl, cpl;
+
+    selector = selector1 & 0xffff;
+    eflags = cpu_cc_compute_all(env, CC_OP);
+    if ((selector & 0xfffc) == 0) {
+        goto fail;
+    }
+    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+        goto fail;
+    }
+    if (!(e2 & DESC_S_MASK)) {
+        goto fail;
+    }
+    rpl = selector & 3;
+    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+    cpl = env->hflags & HF_CPL_MASK;
+    if (e2 & DESC_CS_MASK) {
+        goto fail;
+    } else {
+        if (dpl < cpl || dpl < rpl) {
+            goto fail;
+        }
+        if (!(e2 & DESC_W_MASK)) {
+        fail:
+            CC_SRC = eflags & ~CC_Z;
+            return;
+        }
+    }
+    CC_SRC = eflags | CC_Z;
+}
+
+#if defined(CONFIG_USER_ONLY)
+void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
+{
+    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+        int dpl = (env->eflags & VM_MASK) ? 3 : 0;
+        selector &= 0xffff;
+        cpu_x86_load_seg_cache(env, seg_reg, selector,
+                               (selector << 4), 0xffff,
+                               DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                               DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
+    } else {
+        helper_load_seg(env, seg_reg, selector);
+    }
+}
+#endif
+
+/* check if Port I/O is allowed in TSS */
+static inline void check_io(CPUX86State *env, int addr, int size,
+                            uintptr_t retaddr)
+{
+    int io_offset, val, mask;
+
+    /* TSS must be a valid 32 bit one */
+    if (!(env->tr.flags & DESC_P_MASK) ||
+        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
+        env->tr.limit < 103) {
+        goto fail;
+    }
+    io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
+    io_offset += (addr >> 3);
+    /* Note: the check needs two bytes */
+    if ((io_offset + 1) > env->tr.limit) {
+        goto fail;
+    }
+    val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
+    val >>= (addr & 7);
+    mask = (1 << size) - 1;
+    /* all bits must be zero to allow the I/O */
+    if ((val & mask) != 0) {
+    fail:
+        raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+    }
+}
+
+void helper_check_iob(CPUX86State *env, uint32_t t0)
+{
+    check_io(env, t0, 1, GETPC());
+}
+
+void helper_check_iow(CPUX86State *env, uint32_t t0)
+{
+    check_io(env, t0, 2, GETPC());
+}
+
+void helper_check_iol(CPUX86State *env, uint32_t t0)
+{
+    check_io(env, t0, 4, GETPC());
+}
diff --git a/target/i386/shift_helper_template.h b/target/i386/shift_helper_template.h
new file mode 100644
index 0000000000..cf91a2d284
--- /dev/null
+++ b/target/i386/shift_helper_template.h
@@ -0,0 +1,108 @@
+/*
+ *  x86 shift helpers
+ *
+ *  Copyright (c) 2008 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DATA_BITS (1 << (3 + SHIFT))
+#define SHIFT_MASK (DATA_BITS - 1)
+#if DATA_BITS <= 32
+#define SHIFT1_MASK 0x1f
+#else
+#define SHIFT1_MASK 0x3f
+#endif
+
+#if DATA_BITS == 8
+#define SUFFIX b
+#define DATA_MASK 0xff
+#elif DATA_BITS == 16
+#define SUFFIX w
+#define DATA_MASK 0xffff
+#elif DATA_BITS == 32
+#define SUFFIX l
+#define DATA_MASK 0xffffffff
+#elif DATA_BITS == 64
+#define SUFFIX q
+#define DATA_MASK 0xffffffffffffffffULL
+#else
+#error unhandled operand size
+#endif
+
+target_ulong glue(helper_rcl, SUFFIX)(CPUX86State *env, target_ulong t0,
+                                      target_ulong t1)
+{
+    int count, eflags;
+    target_ulong src;
+    target_long res;
+
+    count = t1 & SHIFT1_MASK;
+#if DATA_BITS == 16
+    count = rclw_table[count];
+#elif DATA_BITS == 8
+    count = rclb_table[count];
+#endif
+    if (count) {
+        eflags = env->cc_src;
+        t0 &= DATA_MASK;
+        src = t0;
+        res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1));
+        if (count > 1) {
+            res |= t0 >> (DATA_BITS + 1 - count);
+        }
+        t0 = res;
+        env->cc_src = (eflags & ~(CC_C | CC_O)) |
+            (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
+            ((src >> (DATA_BITS - count)) & CC_C);
+    }
+    return t0;
+}
+
+target_ulong glue(helper_rcr, SUFFIX)(CPUX86State *env, target_ulong t0,
+                                      target_ulong t1)
+{
+    int count, eflags;
+    target_ulong src;
+    target_long res;
+
+    count = t1 & SHIFT1_MASK;
+#if DATA_BITS == 16
+    count = rclw_table[count];
+#elif DATA_BITS == 8
+    count = rclb_table[count];
+#endif
+    if (count) {
+        eflags = env->cc_src;
+        t0 &= DATA_MASK;
+        src = t0;
+        res = (t0 >> count) |
+            ((target_ulong)(eflags & CC_C) << (DATA_BITS - count));
+        if (count > 1) {
+            res |= t0 << (DATA_BITS + 1 - count);
+        }
+        t0 = res;
+        env->cc_src = (eflags & ~(CC_C | CC_O)) |
+            (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
+            ((src >> (count - 1)) & CC_C);
+    }
+    return t0;
+}
+
+#undef DATA_BITS
+#undef SHIFT_MASK
+#undef SHIFT1_MASK
+#undef DATA_TYPE
+#undef DATA_MASK
+#undef SUFFIX
diff --git a/target/i386/smm_helper.c b/target/i386/smm_helper.c
new file mode 100644
index 0000000000..4dd6a2c544
--- /dev/null
+++ b/target/i386/smm_helper.c
@@ -0,0 +1,342 @@
+/*
+ *  x86 SMM helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/log.h"
+
+/* SMM support */
+
+#if defined(CONFIG_USER_ONLY)
+
+void do_smm_enter(X86CPU *cpu)
+{
+}
+
+void helper_rsm(CPUX86State *env)
+{
+}
+
+#else
+
+#ifdef TARGET_X86_64
+#define SMM_REVISION_ID 0x00020064
+#else
+#define SMM_REVISION_ID 0x00020000
+#endif
+
+void cpu_smm_update(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    bool smm_enabled = (env->hflags & HF_SMM_MASK);
+
+    if (cpu->smram) {
+        memory_region_set_enabled(cpu->smram, smm_enabled);
+    }
+}
+
+void do_smm_enter(X86CPU *cpu)
+{
+    CPUX86State *env = &cpu->env;
+    CPUState *cs = CPU(cpu);
+    target_ulong sm_state;
+    SegmentCache *dt;
+    int i, offset;
+
+    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
+    log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
+
+    env->hflags |= HF_SMM_MASK;
+    if (env->hflags2 & HF2_NMI_MASK) {
+        env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
+    } else {
+        env->hflags2 |= HF2_NMI_MASK;
+    }
+    cpu_smm_update(cpu);
+
+    sm_state = env->smbase + 0x8000;
+
+#ifdef TARGET_X86_64
+    for (i = 0; i < 6; i++) {
+        dt = &env->segs[i];
+        offset = 0x7e00 + i * 16;
+        x86_stw_phys(cs, sm_state + offset, dt->selector);
+        x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+        x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+        x86_stq_phys(cs, sm_state + offset + 8, dt->base);
+    }
+
+    x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
+    x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
+
+    x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
+    x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
+    x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
+    x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+
+    x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
+    x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
+
+    x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
+    x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
+    x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
+    x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+
+    /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
+       is saved at offset 7ED0.  Vol 3, 34.4.1.1, Table 32-2, has
+       7EA0-7ED7 as "reserved".  What's this, and what's really
+       supposed to happen?  */
+    x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
+
+    x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
+    x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
+    x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
+    x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
+    x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
+    x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
+    x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
+    x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
+    for (i = 8; i < 16; i++) {
+        x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
+    }
+    x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
+    x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
+    x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
+    x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
+
+    x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
+    x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
+    x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
+
+    x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+    x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
+#else
+    x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
+    x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
+    x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
+    x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
+    x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
+    x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
+    x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
+    x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
+    x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
+    x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
+    x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
+    x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
+    x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
+    x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
+
+    x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
+    x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
+    x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
+    x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
+
+    x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
+    x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
+    x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
+    x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
+
+    x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
+    x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
+
+    x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
+    x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
+
+    for (i = 0; i < 6; i++) {
+        dt = &env->segs[i];
+        if (i < 3) {
+            offset = 0x7f84 + i * 12;
+        } else {
+            offset = 0x7f2c + (i - 3) * 12;
+        }
+        x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
+        x86_stl_phys(cs, sm_state + offset + 8, dt->base);
+        x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+        x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
+    }
+    x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
+
+    x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+    x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
+#endif
+    /* init SMM cpu state */
+
+#ifdef TARGET_X86_64
+    cpu_load_efer(env, 0);
+#endif
+    cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
+                              DF_MASK));
+    env->eip = 0x00008000;
+    cpu_x86_update_cr0(env,
+                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
+                                      CR0_PG_MASK));
+    cpu_x86_update_cr4(env, 0);
+    env->dr[7] = 0x00000400;
+
+    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
+                           0xffffffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_G_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_G_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_G_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_G_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_G_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
+                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+                           DESC_G_MASK | DESC_A_MASK);
+}
+
+void helper_rsm(CPUX86State *env)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+    target_ulong sm_state;
+    int i, offset;
+    uint32_t val;
+
+    sm_state = env->smbase + 0x8000;
+#ifdef TARGET_X86_64
+    cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
+
+    env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
+    env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
+
+    env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
+    env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
+    env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
+    env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
+
+    env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
+    env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
+
+    env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
+    env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
+    env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
+    env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
+
+    env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
+    env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
+    env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
+    env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
+    env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
+    env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
+    env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
+    env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
+    for (i = 8; i < 16; i++) {
+        env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
+    }
+    env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
+    cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
+                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+    env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
+    env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
+
+    cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
+    cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
+    cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
+
+    for (i = 0; i < 6; i++) {
+        offset = 0x7e00 + i * 16;
+        cpu_x86_load_seg_cache(env, i,
+                               x86_lduw_phys(cs, sm_state + offset),
+                               x86_ldq_phys(cs, sm_state + offset + 8),
+                               x86_ldl_phys(cs, sm_state + offset + 4),
+                               (x86_lduw_phys(cs, sm_state + offset + 2) &
+                                0xf0ff) << 8);
+    }
+
+    val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
+    if (val & 0x20000) {
+        env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00);
+    }
+#else
+    cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
+    cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
+    cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
+                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+    env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
+    env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
+    env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
+    env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
+    env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
+    env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
+    env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
+    env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
+    env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
+    env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
+    env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
+
+    env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
+    env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
+    env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
+    env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
+
+    env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
+    env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
+    env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
+    env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
+
+    env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
+    env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
+
+    env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
+    env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
+
+    for (i = 0; i < 6; i++) {
+        if (i < 3) {
+            offset = 0x7f84 + i * 12;
+        } else {
+            offset = 0x7f2c + (i - 3) * 12;
+        }
+        cpu_x86_load_seg_cache(env, i,
+                               x86_ldl_phys(cs,
+                                        sm_state + 0x7fa8 + i * 4) & 0xffff,
+                               x86_ldl_phys(cs, sm_state + offset + 8),
+                               x86_ldl_phys(cs, sm_state + offset + 4),
+                               (x86_ldl_phys(cs,
+                                         sm_state + offset) & 0xf0ff) << 8);
+    }
+    cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
+
+    val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
+    if (val & 0x20000) {
+        env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8);
+    }
+#endif
+    if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
+        env->hflags2 &= ~HF2_NMI_MASK;
+    }
+    env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
+    env->hflags &= ~HF_SMM_MASK;
+    cpu_smm_update(cpu);
+
+    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
+    log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/i386/svm.h b/target/i386/svm.h
new file mode 100644
index 0000000000..922c8fd39c
--- /dev/null
+++ b/target/i386/svm.h
@@ -0,0 +1,222 @@
+#ifndef SVM_H
+#define SVM_H
+
+#define TLB_CONTROL_DO_NOTHING 0
+#define TLB_CONTROL_FLUSH_ALL_ASID 1
+
+#define V_TPR_MASK 0x0f
+
+#define V_IRQ_SHIFT 8
+#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
+
+#define V_INTR_PRIO_SHIFT 16
+#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
+
+#define V_IGN_TPR_SHIFT 20
+#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+
+#define V_INTR_MASKING_SHIFT 24
+#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+
+#define SVM_INTERRUPT_SHADOW_MASK 1
+
+#define SVM_IOIO_STR_SHIFT 2
+#define SVM_IOIO_REP_SHIFT 3
+#define SVM_IOIO_SIZE_SHIFT 4
+#define SVM_IOIO_ASIZE_SHIFT 7
+
+#define SVM_IOIO_TYPE_MASK 1
+#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
+#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
+#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
+#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+
+#define SVM_EVTINJ_VEC_MASK 0xff
+
+#define SVM_EVTINJ_TYPE_SHIFT 8
+#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_VALID (1 << 31)
+#define SVM_EVTINJ_VALID_ERR (1 << 11)
+
+#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
+
+#define	SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
+#define	SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
+#define	SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
+#define	SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
+
+#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
+#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+
+#define	SVM_EXIT_READ_CR0 	0x000
+#define	SVM_EXIT_READ_CR3 	0x003
+#define	SVM_EXIT_READ_CR4 	0x004
+#define	SVM_EXIT_READ_CR8 	0x008
+#define	SVM_EXIT_WRITE_CR0 	0x010
+#define	SVM_EXIT_WRITE_CR3 	0x013
+#define	SVM_EXIT_WRITE_CR4 	0x014
+#define	SVM_EXIT_WRITE_CR8 	0x018
+#define	SVM_EXIT_READ_DR0 	0x020
+#define	SVM_EXIT_READ_DR1 	0x021
+#define	SVM_EXIT_READ_DR2 	0x022
+#define	SVM_EXIT_READ_DR3 	0x023
+#define	SVM_EXIT_READ_DR4 	0x024
+#define	SVM_EXIT_READ_DR5 	0x025
+#define	SVM_EXIT_READ_DR6 	0x026
+#define	SVM_EXIT_READ_DR7 	0x027
+#define	SVM_EXIT_WRITE_DR0 	0x030
+#define	SVM_EXIT_WRITE_DR1 	0x031
+#define	SVM_EXIT_WRITE_DR2 	0x032
+#define	SVM_EXIT_WRITE_DR3 	0x033
+#define	SVM_EXIT_WRITE_DR4 	0x034
+#define	SVM_EXIT_WRITE_DR5 	0x035
+#define	SVM_EXIT_WRITE_DR6 	0x036
+#define	SVM_EXIT_WRITE_DR7 	0x037
+#define SVM_EXIT_EXCP_BASE      0x040
+#define SVM_EXIT_INTR		0x060
+#define SVM_EXIT_NMI		0x061
+#define SVM_EXIT_SMI		0x062
+#define SVM_EXIT_INIT		0x063
+#define SVM_EXIT_VINTR		0x064
+#define SVM_EXIT_CR0_SEL_WRITE	0x065
+#define SVM_EXIT_IDTR_READ	0x066
+#define SVM_EXIT_GDTR_READ	0x067
+#define SVM_EXIT_LDTR_READ	0x068
+#define SVM_EXIT_TR_READ	0x069
+#define SVM_EXIT_IDTR_WRITE	0x06a
+#define SVM_EXIT_GDTR_WRITE	0x06b
+#define SVM_EXIT_LDTR_WRITE	0x06c
+#define SVM_EXIT_TR_WRITE	0x06d
+#define SVM_EXIT_RDTSC		0x06e
+#define SVM_EXIT_RDPMC		0x06f
+#define SVM_EXIT_PUSHF		0x070
+#define SVM_EXIT_POPF		0x071
+#define SVM_EXIT_CPUID		0x072
+#define SVM_EXIT_RSM		0x073
+#define SVM_EXIT_IRET		0x074
+#define SVM_EXIT_SWINT		0x075
+#define SVM_EXIT_INVD		0x076
+#define SVM_EXIT_PAUSE		0x077
+#define SVM_EXIT_HLT		0x078
+#define SVM_EXIT_INVLPG		0x079
+#define SVM_EXIT_INVLPGA	0x07a
+#define SVM_EXIT_IOIO		0x07b
+#define SVM_EXIT_MSR		0x07c
+#define SVM_EXIT_TASK_SWITCH	0x07d
+#define SVM_EXIT_FERR_FREEZE	0x07e
+#define SVM_EXIT_SHUTDOWN	0x07f
+#define SVM_EXIT_VMRUN		0x080
+#define SVM_EXIT_VMMCALL	0x081
+#define SVM_EXIT_VMLOAD		0x082
+#define SVM_EXIT_VMSAVE		0x083
+#define SVM_EXIT_STGI		0x084
+#define SVM_EXIT_CLGI		0x085
+#define SVM_EXIT_SKINIT		0x086
+#define SVM_EXIT_RDTSCP		0x087
+#define SVM_EXIT_ICEBP		0x088
+#define SVM_EXIT_WBINVD		0x089
+/* only included in documentation, maybe wrong */
+#define SVM_EXIT_MONITOR	0x08a
+#define SVM_EXIT_MWAIT		0x08b
+#define SVM_EXIT_NPF  		0x400
+
+#define SVM_EXIT_ERR		-1
+
+#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
+
+struct QEMU_PACKED vmcb_control_area {
+	uint16_t intercept_cr_read;
+	uint16_t intercept_cr_write;
+	uint16_t intercept_dr_read;
+	uint16_t intercept_dr_write;
+	uint32_t intercept_exceptions;
+	uint64_t intercept;
+	uint8_t reserved_1[44];
+	uint64_t iopm_base_pa;
+	uint64_t msrpm_base_pa;
+	uint64_t tsc_offset;
+	uint32_t asid;
+	uint8_t tlb_ctl;
+	uint8_t reserved_2[3];
+	uint32_t int_ctl;
+	uint32_t int_vector;
+	uint32_t int_state;
+	uint8_t reserved_3[4];
+	uint64_t exit_code;
+	uint64_t exit_info_1;
+	uint64_t exit_info_2;
+	uint32_t exit_int_info;
+	uint32_t exit_int_info_err;
+	uint64_t nested_ctl;
+	uint8_t reserved_4[16];
+	uint32_t event_inj;
+	uint32_t event_inj_err;
+	uint64_t nested_cr3;
+	uint64_t lbr_ctl;
+	uint8_t reserved_5[832];
+};
+
+struct QEMU_PACKED vmcb_seg {
+	uint16_t selector;
+	uint16_t attrib;
+	uint32_t limit;
+	uint64_t base;
+};
+
+struct QEMU_PACKED vmcb_save_area {
+	struct vmcb_seg es;
+	struct vmcb_seg cs;
+	struct vmcb_seg ss;
+	struct vmcb_seg ds;
+	struct vmcb_seg fs;
+	struct vmcb_seg gs;
+	struct vmcb_seg gdtr;
+	struct vmcb_seg ldtr;
+	struct vmcb_seg idtr;
+	struct vmcb_seg tr;
+	uint8_t reserved_1[43];
+	uint8_t cpl;
+	uint8_t reserved_2[4];
+	uint64_t efer;
+	uint8_t reserved_3[112];
+	uint64_t cr4;
+	uint64_t cr3;
+	uint64_t cr0;
+	uint64_t dr7;
+	uint64_t dr6;
+	uint64_t rflags;
+	uint64_t rip;
+	uint8_t reserved_4[88];
+	uint64_t rsp;
+	uint8_t reserved_5[24];
+	uint64_t rax;
+	uint64_t star;
+	uint64_t lstar;
+	uint64_t cstar;
+	uint64_t sfmask;
+	uint64_t kernel_gs_base;
+	uint64_t sysenter_cs;
+	uint64_t sysenter_esp;
+	uint64_t sysenter_eip;
+	uint64_t cr2;
+	uint8_t reserved_6[32];
+	uint64_t g_pat;
+	uint64_t dbgctl;
+	uint64_t br_from;
+	uint64_t br_to;
+	uint64_t last_excp_from;
+	uint64_t last_excp_to;
+};
+
+struct QEMU_PACKED vmcb {
+	struct vmcb_control_area control;
+	struct vmcb_save_area save;
+};
+
+#endif
diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c
new file mode 100644
index 0000000000..782b3f12f0
--- /dev/null
+++ b/target/i386/svm_helper.c
@@ -0,0 +1,774 @@
+/*
+ *  x86 SVM helpers
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/cpu-all.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+/* Secure Virtual Machine helpers */
+
+#if defined(CONFIG_USER_ONLY)
+
+void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
+{
+}
+
+void helper_vmmcall(CPUX86State *env)
+{
+}
+
+void helper_vmload(CPUX86State *env, int aflag)
+{
+}
+
+void helper_vmsave(CPUX86State *env, int aflag)
+{
+}
+
+void helper_stgi(CPUX86State *env)
+{
+}
+
+void helper_clgi(CPUX86State *env)
+{
+}
+
+void helper_skinit(CPUX86State *env)
+{
+}
+
+void helper_invlpga(CPUX86State *env, int aflag)
+{
+}
+
+void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
+{
+}
+
+void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
+{
+}
+
+void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+                                      uint64_t param)
+{
+}
+
+void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+                                   uint64_t param)
+{
+}
+
+void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
+                         uint32_t next_eip_addend)
+{
+}
+#else
+
+static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
+                                const SegmentCache *sc)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
+             sc->selector);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
+             sc->base);
+    x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
+             sc->limit);
+    x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
+             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
+}
+
+static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
+                                SegmentCache *sc)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+    unsigned int flags;
+
+    sc->selector = x86_lduw_phys(cs,
+                             addr + offsetof(struct vmcb_seg, selector));
+    sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
+    sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
+    flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
+    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
+}
+
+static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
+                                      int seg_reg)
+{
+    SegmentCache sc1, *sc = &sc1;
+
+    svm_load_seg(env, addr, sc);
+    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
+                           sc->base, sc->limit, sc->flags);
+}
+
+void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+    target_ulong addr;
+    uint32_t event_inj;
+    uint32_t int_ctl;
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
+
+    if (aflag == 2) {
+        addr = env->regs[R_EAX];
+    } else {
+        addr = (uint32_t)env->regs[R_EAX];
+    }
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
+
+    env->vm_vmcb = addr;
+
+    /* save the current CPU state in the hsave page */
+    x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
+             env->gdt.base);
+    x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
+             env->gdt.limit);
+
+    x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
+             env->idt.base);
+    x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
+             env->idt.limit);
+
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
+
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.rflags),
+             cpu_compute_eflags(env));
+
+    svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
+                 &env->segs[R_ES]);
+    svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
+                 &env->segs[R_CS]);
+    svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
+                 &env->segs[R_SS]);
+    svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
+                 &env->segs[R_DS]);
+
+    x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
+             env->eip + next_eip_addend);
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+    x86_stq_phys(cs,
+             env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+
+    /* load the interception bitmaps so we do not need to access the
+       vmcb in svm mode */
+    env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                                      control.intercept));
+    env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
+                                       offsetof(struct vmcb,
+                                                control.intercept_cr_read));
+    env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
+                                        offsetof(struct vmcb,
+                                                 control.intercept_cr_write));
+    env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
+                                       offsetof(struct vmcb,
+                                                control.intercept_dr_read));
+    env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
+                                        offsetof(struct vmcb,
+                                                 control.intercept_dr_write));
+    env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
+                                         offsetof(struct vmcb,
+                                                  control.intercept_exceptions
+                                                  ));
+
+    /* enable intercepts */
+    env->hflags |= HF_SVMI_MASK;
+
+    env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
+                               offsetof(struct vmcb, control.tsc_offset));
+
+    env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                                      save.gdtr.base));
+    env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                                      save.gdtr.limit));
+
+    env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                                      save.idtr.base));
+    env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                                      save.idtr.limit));
+
+    /* clear exit_info_2 so we behave like the real hardware */
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
+
+    cpu_x86_update_cr0(env, x86_ldq_phys(cs,
+                                     env->vm_vmcb + offsetof(struct vmcb,
+                                                             save.cr0)));
+    cpu_x86_update_cr4(env, x86_ldq_phys(cs,
+                                     env->vm_vmcb + offsetof(struct vmcb,
+                                                             save.cr4)));
+    cpu_x86_update_cr3(env, x86_ldq_phys(cs,
+                                     env->vm_vmcb + offsetof(struct vmcb,
+                                                             save.cr3)));
+    env->cr[2] = x86_ldq_phys(cs,
+                          env->vm_vmcb + offsetof(struct vmcb, save.cr2));
+    int_ctl = x86_ldl_phys(cs,
+                       env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+    if (int_ctl & V_INTR_MASKING_MASK) {
+        env->v_tpr = int_ctl & V_TPR_MASK;
+        env->hflags2 |= HF2_VINTR_MASK;
+        if (env->eflags & IF_MASK) {
+            env->hflags2 |= HF2_HIF_MASK;
+        }
+    }
+
+    cpu_load_efer(env,
+                  x86_ldq_phys(cs,
+                           env->vm_vmcb + offsetof(struct vmcb, save.efer)));
+    env->eflags = 0;
+    cpu_load_eflags(env, x86_ldq_phys(cs,
+                                  env->vm_vmcb + offsetof(struct vmcb,
+                                                          save.rflags)),
+                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+
+    svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
+                       R_ES);
+    svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
+                       R_CS);
+    svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
+                       R_SS);
+    svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
+                       R_DS);
+
+    env->eip = x86_ldq_phys(cs,
+                        env->vm_vmcb + offsetof(struct vmcb, save.rip));
+
+    env->regs[R_ESP] = x86_ldq_phys(cs,
+                                env->vm_vmcb + offsetof(struct vmcb, save.rsp));
+    env->regs[R_EAX] = x86_ldq_phys(cs,
+                                env->vm_vmcb + offsetof(struct vmcb, save.rax));
+    env->dr[7] = x86_ldq_phys(cs,
+                          env->vm_vmcb + offsetof(struct vmcb, save.dr7));
+    env->dr[6] = x86_ldq_phys(cs,
+                          env->vm_vmcb + offsetof(struct vmcb, save.dr6));
+
+    /* FIXME: guest state consistency checks */
+
+    switch (x86_ldub_phys(cs,
+                      env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
+    case TLB_CONTROL_DO_NOTHING:
+        break;
+    case TLB_CONTROL_FLUSH_ALL_ASID:
+        /* FIXME: this is not 100% correct but should work for now */
+        tlb_flush(cs, 1);
+        break;
+    }
+
+    env->hflags2 |= HF2_GIF_MASK;
+
+    if (int_ctl & V_IRQ_MASK) {
+        CPUState *cs = CPU(x86_env_get_cpu(env));
+
+        cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
+    }
+
+    /* maybe we need to inject an event */
+    event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                                 control.event_inj));
+    if (event_inj & SVM_EVTINJ_VALID) {
+        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
+        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
+        uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
+                                          offsetof(struct vmcb,
+                                                   control.event_inj_err));
+
+        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
+        /* FIXME: need to implement valid_err */
+        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
+        case SVM_EVTINJ_TYPE_INTR:
+            cs->exception_index = vector;
+            env->error_code = event_inj_err;
+            env->exception_is_int = 0;
+            env->exception_next_eip = -1;
+            qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
+            /* XXX: is it always correct? */
+            do_interrupt_x86_hardirq(env, vector, 1);
+            break;
+        case SVM_EVTINJ_TYPE_NMI:
+            cs->exception_index = EXCP02_NMI;
+            env->error_code = event_inj_err;
+            env->exception_is_int = 0;
+            env->exception_next_eip = env->eip;
+            qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
+            cpu_loop_exit(cs);
+            break;
+        case SVM_EVTINJ_TYPE_EXEPT:
+            cs->exception_index = vector;
+            env->error_code = event_inj_err;
+            env->exception_is_int = 0;
+            env->exception_next_eip = -1;
+            qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
+            cpu_loop_exit(cs);
+            break;
+        case SVM_EVTINJ_TYPE_SOFT:
+            cs->exception_index = vector;
+            env->error_code = event_inj_err;
+            env->exception_is_int = 1;
+            env->exception_next_eip = env->eip;
+            qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
+            cpu_loop_exit(cs);
+            break;
+        }
+        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
+                      env->error_code);
+    }
+}
+
+void helper_vmmcall(CPUX86State *env)
+{
+    cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
+    raise_exception(env, EXCP06_ILLOP);
+}
+
+void helper_vmload(CPUX86State *env, int aflag)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+    target_ulong addr;
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
+
+    if (aflag == 2) {
+        addr = env->regs[R_EAX];
+    } else {
+        addr = (uint32_t)env->regs[R_EAX];
+    }
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
+                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
+                  addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+                                                          save.fs.base)),
+                  env->segs[R_FS].base);
+
+    svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
+    svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
+    svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
+    svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
+
+#ifdef TARGET_X86_64
+    env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+                                                 save.kernel_gs_base));
+    env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
+    env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
+    env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
+#endif
+    env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
+    env->sysenter_cs = x86_ldq_phys(cs,
+                                addr + offsetof(struct vmcb, save.sysenter_cs));
+    env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+                                                 save.sysenter_esp));
+    env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+                                                 save.sysenter_eip));
+}
+
+void helper_vmsave(CPUX86State *env, int aflag)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+    target_ulong addr;
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
+
+    if (aflag == 2) {
+        addr = env->regs[R_EAX];
+    } else {
+        addr = (uint32_t)env->regs[R_EAX];
+    }
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
+                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
+                  addr, x86_ldq_phys(cs,
+                                 addr + offsetof(struct vmcb, save.fs.base)),
+                  env->segs[R_FS].base);
+
+    svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
+                 &env->segs[R_FS]);
+    svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
+                 &env->segs[R_GS]);
+    svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
+                 &env->tr);
+    svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
+                 &env->ldt);
+
+#ifdef TARGET_X86_64
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
+             env->kernelgsbase);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
+#endif
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
+    x86_stq_phys(cs,
+             addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
+             env->sysenter_esp);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
+             env->sysenter_eip);
+}
+
+void helper_stgi(CPUX86State *env)
+{
+    cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
+    env->hflags2 |= HF2_GIF_MASK;
+}
+
+void helper_clgi(CPUX86State *env)
+{
+    cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
+    env->hflags2 &= ~HF2_GIF_MASK;
+}
+
+void helper_skinit(CPUX86State *env)
+{
+    cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
+    /* XXX: not implemented */
+    raise_exception(env, EXCP06_ILLOP);
+}
+
+void helper_invlpga(CPUX86State *env, int aflag)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    target_ulong addr;
+
+    cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
+
+    if (aflag == 2) {
+        addr = env->regs[R_EAX];
+    } else {
+        addr = (uint32_t)env->regs[R_EAX];
+    }
+
+    /* XXX: could use the ASID to see if it is needed to do the
+       flush */
+    tlb_flush_page(CPU(cpu), addr);
+}
+
+void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+                                      uint64_t param)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    if (likely(!(env->hflags & HF_SVMI_MASK))) {
+        return;
+    }
+    switch (type) {
+    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
+        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
+            helper_vmexit(env, type, param);
+        }
+        break;
+    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
+        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
+            helper_vmexit(env, type, param);
+        }
+        break;
+    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
+        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
+            helper_vmexit(env, type, param);
+        }
+        break;
+    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
+        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
+            helper_vmexit(env, type, param);
+        }
+        break;
+    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
+        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
+            helper_vmexit(env, type, param);
+        }
+        break;
+    case SVM_EXIT_MSR:
+        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
+            /* FIXME: this should be read in at vmrun (faster this way?) */
+            uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
+                                     offsetof(struct vmcb,
+                                              control.msrpm_base_pa));
+            uint32_t t0, t1;
+
+            switch ((uint32_t)env->regs[R_ECX]) {
+            case 0 ... 0x1fff:
+                t0 = (env->regs[R_ECX] * 2) % 8;
+                t1 = (env->regs[R_ECX] * 2) / 8;
+                break;
+            case 0xc0000000 ... 0xc0001fff:
+                t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
+                t1 = (t0 / 8);
+                t0 %= 8;
+                break;
+            case 0xc0010000 ... 0xc0011fff:
+                t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
+                t1 = (t0 / 8);
+                t0 %= 8;
+                break;
+            default:
+                helper_vmexit(env, type, param);
+                t0 = 0;
+                t1 = 0;
+                break;
+            }
+            if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
+                helper_vmexit(env, type, param);
+            }
+        }
+        break;
+    default:
+        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
+            helper_vmexit(env, type, param);
+        }
+        break;
+    }
+}
+
+void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+                                   uint64_t param)
+{
+    helper_svm_check_intercept_param(env, type, param);
+}
+
+void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
+                         uint32_t next_eip_addend)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
+        /* FIXME: this should be read in at vmrun (faster this way?) */
+        uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
+                                 offsetof(struct vmcb, control.iopm_base_pa));
+        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
+
+        if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
+            /* next env->eip */
+            x86_stq_phys(cs,
+                     env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+                     env->eip + next_eip_addend);
+            helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
+        }
+    }
+}
+
+/* Note: currently only 32 bits of exit_code are used */
+void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
+{
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+    uint32_t int_ctl;
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
+                  PRIx64 ", " TARGET_FMT_lx ")!\n",
+                  exit_code, exit_info_1,
+                  x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                                   control.exit_info_2)),
+                  env->eip);
+
+    if (env->hflags & HF_INHIBIT_IRQ_MASK) {
+        x86_stl_phys(cs,
+                 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
+                 SVM_INTERRUPT_SHADOW_MASK);
+        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+    } else {
+        x86_stl_phys(cs,
+                 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
+    }
+
+    /* Save the VM state in the vmcb */
+    svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
+                 &env->segs[R_ES]);
+    svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
+                 &env->segs[R_CS]);
+    svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
+                 &env->segs[R_SS]);
+    svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
+                 &env->segs[R_DS]);
+
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
+             env->gdt.base);
+    x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
+             env->gdt.limit);
+
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
+             env->idt.base);
+    x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
+             env->idt.limit);
+
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
+
+    int_ctl = x86_ldl_phys(cs,
+                       env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
+    int_ctl |= env->v_tpr & V_TPR_MASK;
+    if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
+        int_ctl |= V_IRQ_MASK;
+    }
+    x86_stl_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
+
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
+             cpu_compute_eflags(env));
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
+             env->eip);
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
+    x86_stq_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
+    x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
+             env->hflags & HF_CPL_MASK);
+
+    /* Reload the host state from vm_hsave */
+    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+    env->hflags &= ~HF_SVMI_MASK;
+    env->intercept = 0;
+    env->intercept_exceptions = 0;
+    cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+    env->tsc_offset = 0;
+
+    env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+                                                       save.gdtr.base));
+    env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+                                                       save.gdtr.limit));
+
+    env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+                                                       save.idtr.base));
+    env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+                                                       save.idtr.limit));
+
+    cpu_x86_update_cr0(env, x86_ldq_phys(cs,
+                                     env->vm_hsave + offsetof(struct vmcb,
+                                                              save.cr0)) |
+                       CR0_PE_MASK);
+    cpu_x86_update_cr4(env, x86_ldq_phys(cs,
+                                     env->vm_hsave + offsetof(struct vmcb,
+                                                              save.cr4)));
+    cpu_x86_update_cr3(env, x86_ldq_phys(cs,
+                                     env->vm_hsave + offsetof(struct vmcb,
+                                                              save.cr3)));
+    /* we need to set the efer after the crs so the hidden flags get
+       set properly */
+    cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+                                                         save.efer)));
+    env->eflags = 0;
+    cpu_load_eflags(env, x86_ldq_phys(cs,
+                                  env->vm_hsave + offsetof(struct vmcb,
+                                                           save.rflags)),
+                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
+                      VM_MASK));
+
+    svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
+                       R_ES);
+    svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
+                       R_CS);
+    svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
+                       R_SS);
+    svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
+                       R_DS);
+
+    env->eip = x86_ldq_phys(cs,
+                        env->vm_hsave + offsetof(struct vmcb, save.rip));
+    env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
+                                offsetof(struct vmcb, save.rsp));
+    env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
+                                offsetof(struct vmcb, save.rax));
+
+    env->dr[6] = x86_ldq_phys(cs,
+                          env->vm_hsave + offsetof(struct vmcb, save.dr6));
+    env->dr[7] = x86_ldq_phys(cs,
+                          env->vm_hsave + offsetof(struct vmcb, save.dr7));
+
+    /* other setups */
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
+             exit_code);
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
+             exit_info_1);
+
+    x86_stl_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
+             x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                              control.event_inj)));
+    x86_stl_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
+             x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+                                              control.event_inj_err)));
+    x86_stl_phys(cs,
+             env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
+
+    env->hflags2 &= ~HF2_GIF_MASK;
+    /* FIXME: Resets the current ASID register to zero (host ASID). */
+
+    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
+
+    /* Clears the TSC_OFFSET inside the processor. */
+
+    /* If the host is in PAE mode, the processor reloads the host's PDPEs
+       from the page table indicated the host's CR3. If the PDPEs contain
+       illegal state, the processor causes a shutdown. */
+
+    /* Disables all breakpoints in the host DR7 register. */
+
+    /* Checks the reloaded host state for consistency. */
+
+    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
+       host's code segment or non-canonical (in the case of long mode), a
+       #GP fault is delivered inside the host. */
+
+    /* remove any pending exception */
+    cs->exception_index = -1;
+    env->error_code = 0;
+    env->old_exception = -1;
+
+    cpu_loop_exit(cs);
+}
+
+void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
+{
+    helper_vmexit(env, exit_code, exit_info_1);
+}
+
+#endif
diff --git a/target/i386/trace-events b/target/i386/trace-events
new file mode 100644
index 0000000000..de6a1cf0cb
--- /dev/null
+++ b/target/i386/trace-events
@@ -0,0 +1,7 @@
+# See docs/tracing.txt for syntax documentation.
+
+# target/i386/kvm.c
+kvm_x86_fixup_msi_error(uint32_t gsi) "VT-d failed to remap interrupt for GSI %" PRIu32
+kvm_x86_add_msi_route(int virq) "Adding route entry for virq %d"
+kvm_x86_remove_msi_route(int virq) "Removing route entry for virq %d"
+kvm_x86_update_msi_routes(int num) "Updated %d MSI routes"
diff --git a/target/i386/translate.c b/target/i386/translate.c
new file mode 100644
index 0000000000..324103c885
--- /dev/null
+++ b/target/i386/translate.c
@@ -0,0 +1,8502 @@
+/*
+ *  i386 translation
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "qemu/host-utils.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#define PREFIX_REPZ   0x01
+#define PREFIX_REPNZ  0x02
+#define PREFIX_LOCK   0x04
+#define PREFIX_DATA   0x08
+#define PREFIX_ADR    0x10
+#define PREFIX_VEX    0x20
+
+#ifdef TARGET_X86_64
+#define CODE64(s) ((s)->code64)
+#define REX_X(s) ((s)->rex_x)
+#define REX_B(s) ((s)->rex_b)
+#else
+#define CODE64(s) 0
+#define REX_X(s) 0
+#define REX_B(s) 0
+#endif
+
+#ifdef TARGET_X86_64
+# define ctztl  ctz64
+# define clztl  clz64
+#else
+# define ctztl  ctz32
+# define clztl  clz32
+#endif
+
+/* For a switch indexed by MODRM, match all memory operands for a given OP.  */
+#define CASE_MODRM_MEM_OP(OP) \
+    case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+    case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+    case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
+
+#define CASE_MODRM_OP(OP) \
+    case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+    case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+    case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
+    case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
+
+//#define MACRO_TEST   1
+
+/* global register indexes */
+static TCGv_env cpu_env;
+static TCGv cpu_A0;
+static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
+static TCGv_i32 cpu_cc_op;
+static TCGv cpu_regs[CPU_NB_REGS];
+static TCGv cpu_seg_base[6];
+static TCGv_i64 cpu_bndl[4];
+static TCGv_i64 cpu_bndu[4];
+/* local temps */
+static TCGv cpu_T0, cpu_T1;
+/* local register indexes (only used inside old micro ops) */
+static TCGv cpu_tmp0, cpu_tmp4;
+static TCGv_ptr cpu_ptr0, cpu_ptr1;
+static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
+static TCGv_i64 cpu_tmp1_i64;
+
+#include "exec/gen-icount.h"
+
+#ifdef TARGET_X86_64
+static int x86_64_hregs;
+#endif
+
+typedef struct DisasContext {
+    /* current insn context */
+    int override; /* -1 if no override */
+    int prefix;
+    TCGMemOp aflag;
+    TCGMemOp dflag;
+    target_ulong pc_start;
+    target_ulong pc; /* pc = eip + cs_base */
+    int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
+                   static state change (stop translation) */
+    /* current block context */
+    target_ulong cs_base; /* base of CS segment */
+    int pe;     /* protected mode */
+    int code32; /* 32 bit code segment */
+#ifdef TARGET_X86_64
+    int lma;    /* long mode active */
+    int code64; /* 64 bit code segment */
+    int rex_x, rex_b;
+#endif
+    int vex_l;  /* vex vector length */
+    int vex_v;  /* vex vvvv register, without 1's compliment.  */
+    int ss32;   /* 32 bit stack segment */
+    CCOp cc_op;  /* current CC operation */
+    bool cc_op_dirty;
+    int addseg; /* non zero if either DS/ES/SS have a non zero base */
+    int f_st;   /* currently unused */
+    int vm86;   /* vm86 mode */
+    int cpl;
+    int iopl;
+    int tf;     /* TF cpu flag */
+    int singlestep_enabled; /* "hardware" single step enabled */
+    int jmp_opt; /* use direct block chaining for direct jumps */
+    int repz_opt; /* optimize jumps within repz instructions */
+    int mem_index; /* select memory access functions */
+    uint64_t flags; /* all execution flags */
+    struct TranslationBlock *tb;
+    int popl_esp_hack; /* for correct popl with esp base handling */
+    int rip_offset; /* only used in x86_64, but left for simplicity */
+    int cpuid_features;
+    int cpuid_ext_features;
+    int cpuid_ext2_features;
+    int cpuid_ext3_features;
+    int cpuid_7_0_ebx_features;
+    int cpuid_xsave_features;
+} DisasContext;
+
+static void gen_eob(DisasContext *s);
+static void gen_jmp(DisasContext *s, target_ulong eip);
+static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
+static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
+
+/* i386 arith/logic operations */
+enum {
+    OP_ADDL,
+    OP_ORL,
+    OP_ADCL,
+    OP_SBBL,
+    OP_ANDL,
+    OP_SUBL,
+    OP_XORL,
+    OP_CMPL,
+};
+
+/* i386 shift ops */
+enum {
+    OP_ROL,
+    OP_ROR,
+    OP_RCL,
+    OP_RCR,
+    OP_SHL,
+    OP_SHR,
+    OP_SHL1, /* undocumented */
+    OP_SAR = 7,
+};
+
+enum {
+    JCC_O,
+    JCC_B,
+    JCC_Z,
+    JCC_BE,
+    JCC_S,
+    JCC_P,
+    JCC_L,
+    JCC_LE,
+};
+
+enum {
+    /* I386 int registers */
+    OR_EAX,   /* MUST be even numbered */
+    OR_ECX,
+    OR_EDX,
+    OR_EBX,
+    OR_ESP,
+    OR_EBP,
+    OR_ESI,
+    OR_EDI,
+
+    OR_TMP0 = 16,    /* temporary operand register */
+    OR_TMP1,
+    OR_A0, /* temporary register used when doing address evaluation */
+};
+
+enum {
+    USES_CC_DST  = 1,
+    USES_CC_SRC  = 2,
+    USES_CC_SRC2 = 4,
+    USES_CC_SRCT = 8,
+};
+
+/* Bit set if the global variable is live after setting CC_OP to X.  */
+static const uint8_t cc_op_live[CC_OP_NB] = {
+    [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+    [CC_OP_EFLAGS] = USES_CC_SRC,
+    [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
+    [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
+    [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+    [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
+    [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+    [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
+    [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
+    [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
+    [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
+    [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
+    [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
+    [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
+    [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
+    [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+    [CC_OP_CLR] = 0,
+};
+
+static void set_cc_op(DisasContext *s, CCOp op)
+{
+    int dead;
+
+    if (s->cc_op == op) {
+        return;
+    }
+
+    /* Discard CC computation that will no longer be used.  */
+    dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
+    if (dead & USES_CC_DST) {
+        tcg_gen_discard_tl(cpu_cc_dst);
+    }
+    if (dead & USES_CC_SRC) {
+        tcg_gen_discard_tl(cpu_cc_src);
+    }
+    if (dead & USES_CC_SRC2) {
+        tcg_gen_discard_tl(cpu_cc_src2);
+    }
+    if (dead & USES_CC_SRCT) {
+        tcg_gen_discard_tl(cpu_cc_srcT);
+    }
+
+    if (op == CC_OP_DYNAMIC) {
+        /* The DYNAMIC setting is translator only, and should never be
+           stored.  Thus we always consider it clean.  */
+        s->cc_op_dirty = false;
+    } else {
+        /* Discard any computed CC_OP value (see shifts).  */
+        if (s->cc_op == CC_OP_DYNAMIC) {
+            tcg_gen_discard_i32(cpu_cc_op);
+        }
+        s->cc_op_dirty = true;
+    }
+    s->cc_op = op;
+}
+
+static void gen_update_cc_op(DisasContext *s)
+{
+    if (s->cc_op_dirty) {
+        tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
+        s->cc_op_dirty = false;
+    }
+}
+
+#ifdef TARGET_X86_64
+
+#define NB_OP_SIZES 4
+
+#else /* !TARGET_X86_64 */
+
+#define NB_OP_SIZES 3
+
+#endif /* !TARGET_X86_64 */
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define REG_B_OFFSET (sizeof(target_ulong) - 1)
+#define REG_H_OFFSET (sizeof(target_ulong) - 2)
+#define REG_W_OFFSET (sizeof(target_ulong) - 2)
+#define REG_L_OFFSET (sizeof(target_ulong) - 4)
+#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
+#else
+#define REG_B_OFFSET 0
+#define REG_H_OFFSET 1
+#define REG_W_OFFSET 0
+#define REG_L_OFFSET 0
+#define REG_LH_OFFSET 4
+#endif
+
+/* In instruction encodings for byte register accesses the
+ * register number usually indicates "low 8 bits of register N";
+ * however there are some special cases where N 4..7 indicates
+ * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
+ * true for this special case, false otherwise.
+ */
+static inline bool byte_reg_is_xH(int reg)
+{
+    if (reg < 4) {
+        return false;
+    }
+#ifdef TARGET_X86_64
+    if (reg >= 8 || x86_64_hregs) {
+        return false;
+    }
+#endif
+    return true;
+}
+
+/* Select the size of a push/pop operation.  */
+static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
+{
+    if (CODE64(s)) {
+        return ot == MO_16 ? MO_16 : MO_64;
+    } else {
+        return ot;
+    }
+}
+
+/* Select the size of the stack pointer.  */
+static inline TCGMemOp mo_stacksize(DisasContext *s)
+{
+    return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+}
+
+/* Select only size 64 else 32.  Used for SSE operand sizes.  */
+static inline TCGMemOp mo_64_32(TCGMemOp ot)
+{
+#ifdef TARGET_X86_64
+    return ot == MO_64 ? MO_64 : MO_32;
+#else
+    return MO_32;
+#endif
+}
+
+/* Select size 8 if lsb of B is clear, else OT.  Used for decoding
+   byte vs word opcodes.  */
+static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
+{
+    return b & 1 ? ot : MO_8;
+}
+
+/* Select size 8 if lsb of B is clear, else OT capped at 32.
+   Used for decoding operand size of port opcodes.  */
+static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
+{
+    return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
+}
+
+static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
+{
+    switch(ot) {
+    case MO_8:
+        if (!byte_reg_is_xH(reg)) {
+            tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
+        } else {
+            tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
+        }
+        break;
+    case MO_16:
+        tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
+        break;
+    case MO_32:
+        /* For x86_64, this sets the higher half of register to zero.
+           For i386, this is equivalent to a mov. */
+        tcg_gen_ext32u_tl(cpu_regs[reg], t0);
+        break;
+#ifdef TARGET_X86_64
+    case MO_64:
+        tcg_gen_mov_tl(cpu_regs[reg], t0);
+        break;
+#endif
+    default:
+        tcg_abort();
+    }
+}
+
+static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
+{
+    if (ot == MO_8 && byte_reg_is_xH(reg)) {
+        tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
+        tcg_gen_ext8u_tl(t0, t0);
+    } else {
+        tcg_gen_mov_tl(t0, cpu_regs[reg]);
+    }
+}
+
+static void gen_add_A0_im(DisasContext *s, int val)
+{
+    tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
+    if (!CODE64(s)) {
+        tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+    }
+}
+
+static inline void gen_op_jmp_v(TCGv dest)
+{
+    tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
+}
+
+static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
+{
+    tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
+    gen_op_mov_reg_v(size, reg, cpu_tmp0);
+}
+
+static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
+{
+    tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
+    gen_op_mov_reg_v(size, reg, cpu_tmp0);
+}
+
+static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
+{
+    tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
+}
+
+static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
+{
+    tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
+}
+
+static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
+{
+    if (d == OR_TMP0) {
+        gen_op_st_v(s, idx, cpu_T0, cpu_A0);
+    } else {
+        gen_op_mov_reg_v(idx, d, cpu_T0);
+    }
+}
+
+static inline void gen_jmp_im(target_ulong pc)
+{
+    tcg_gen_movi_tl(cpu_tmp0, pc);
+    gen_op_jmp_v(cpu_tmp0);
+}
+
+/* Compute SEG:REG into A0.  SEG is selected from the override segment
+   (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
+   indicate no override.  */
+static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
+                          int def_seg, int ovr_seg)
+{
+    switch (aflag) {
+#ifdef TARGET_X86_64
+    case MO_64:
+        if (ovr_seg < 0) {
+            tcg_gen_mov_tl(cpu_A0, a0);
+            return;
+        }
+        break;
+#endif
+    case MO_32:
+        /* 32 bit address */
+        if (ovr_seg < 0 && s->addseg) {
+            ovr_seg = def_seg;
+        }
+        if (ovr_seg < 0) {
+            tcg_gen_ext32u_tl(cpu_A0, a0);
+            return;
+        }
+        break;
+    case MO_16:
+        /* 16 bit address */
+        tcg_gen_ext16u_tl(cpu_A0, a0);
+        a0 = cpu_A0;
+        if (ovr_seg < 0) {
+            if (s->addseg) {
+                ovr_seg = def_seg;
+            } else {
+                return;
+            }
+        }
+        break;
+    default:
+        tcg_abort();
+    }
+
+    if (ovr_seg >= 0) {
+        TCGv seg = cpu_seg_base[ovr_seg];
+
+        if (aflag == MO_64) {
+            tcg_gen_add_tl(cpu_A0, a0, seg);
+        } else if (CODE64(s)) {
+            tcg_gen_ext32u_tl(cpu_A0, a0);
+            tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
+        } else {
+            tcg_gen_add_tl(cpu_A0, a0, seg);
+            tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+        }
+    }
+}
+
+static inline void gen_string_movl_A0_ESI(DisasContext *s)
+{
+    gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
+}
+
+static inline void gen_string_movl_A0_EDI(DisasContext *s)
+{
+    gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
+}
+
+static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
+{
+    tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
+    tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
+};
+
+static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
+{
+    switch (size) {
+    case MO_8:
+        if (sign) {
+            tcg_gen_ext8s_tl(dst, src);
+        } else {
+            tcg_gen_ext8u_tl(dst, src);
+        }
+        return dst;
+    case MO_16:
+        if (sign) {
+            tcg_gen_ext16s_tl(dst, src);
+        } else {
+            tcg_gen_ext16u_tl(dst, src);
+        }
+        return dst;
+#ifdef TARGET_X86_64
+    case MO_32:
+        if (sign) {
+            tcg_gen_ext32s_tl(dst, src);
+        } else {
+            tcg_gen_ext32u_tl(dst, src);
+        }
+        return dst;
+#endif
+    default:
+        return src;
+    }
+}
+
+static void gen_extu(TCGMemOp ot, TCGv reg)
+{
+    gen_ext_tl(reg, reg, ot, false);
+}
+
+static void gen_exts(TCGMemOp ot, TCGv reg)
+{
+    gen_ext_tl(reg, reg, ot, true);
+}
+
+static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
+{
+    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
+    gen_extu(size, cpu_tmp0);
+    tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
+}
+
+static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
+{
+    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
+    gen_extu(size, cpu_tmp0);
+    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
+}
+
+static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
+{
+    switch (ot) {
+    case MO_8:
+        gen_helper_inb(v, cpu_env, n);
+        break;
+    case MO_16:
+        gen_helper_inw(v, cpu_env, n);
+        break;
+    case MO_32:
+        gen_helper_inl(v, cpu_env, n);
+        break;
+    default:
+        tcg_abort();
+    }
+}
+
+static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
+{
+    switch (ot) {
+    case MO_8:
+        gen_helper_outb(cpu_env, v, n);
+        break;
+    case MO_16:
+        gen_helper_outw(cpu_env, v, n);
+        break;
+    case MO_32:
+        gen_helper_outl(cpu_env, v, n);
+        break;
+    default:
+        tcg_abort();
+    }
+}
+
+static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
+                         uint32_t svm_flags)
+{
+    target_ulong next_eip;
+
+    if (s->pe && (s->cpl > s->iopl || s->vm86)) {
+        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+        switch (ot) {
+        case MO_8:
+            gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
+            break;
+        case MO_16:
+            gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
+            break;
+        case MO_32:
+            gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
+            break;
+        default:
+            tcg_abort();
+        }
+    }
+    if(s->flags & HF_SVMI_MASK) {
+        gen_update_cc_op(s);
+        gen_jmp_im(cur_eip);
+        svm_flags |= (1 << (4 + ot));
+        next_eip = s->pc - s->cs_base;
+        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+        gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
+                                tcg_const_i32(svm_flags),
+                                tcg_const_i32(next_eip - cur_eip));
+    }
+}
+
+static inline void gen_movs(DisasContext *s, TCGMemOp ot)
+{
+    gen_string_movl_A0_ESI(s);
+    gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+    gen_string_movl_A0_EDI(s);
+    gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+    gen_op_movl_T0_Dshift(ot);
+    gen_op_add_reg_T0(s->aflag, R_ESI);
+    gen_op_add_reg_T0(s->aflag, R_EDI);
+}
+
+static void gen_op_update1_cc(void)
+{
+    tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+}
+
+static void gen_op_update2_cc(void)
+{
+    tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+    tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+}
+
+static void gen_op_update3_cc(TCGv reg)
+{
+    tcg_gen_mov_tl(cpu_cc_src2, reg);
+    tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+    tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+}
+
+static inline void gen_op_testl_T0_T1_cc(void)
+{
+    tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
+}
+
+static void gen_op_update_neg_cc(void)
+{
+    tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+    tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
+    tcg_gen_movi_tl(cpu_cc_srcT, 0);
+}
+
+/* compute all eflags to cc_src */
+static void gen_compute_eflags(DisasContext *s)
+{
+    TCGv zero, dst, src1, src2;
+    int live, dead;
+
+    if (s->cc_op == CC_OP_EFLAGS) {
+        return;
+    }
+    if (s->cc_op == CC_OP_CLR) {
+        tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
+        set_cc_op(s, CC_OP_EFLAGS);
+        return;
+    }
+
+    TCGV_UNUSED(zero);
+    dst = cpu_cc_dst;
+    src1 = cpu_cc_src;
+    src2 = cpu_cc_src2;
+
+    /* Take care to not read values that are not live.  */
+    live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
+    dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
+    if (dead) {
+        zero = tcg_const_tl(0);
+        if (dead & USES_CC_DST) {
+            dst = zero;
+        }
+        if (dead & USES_CC_SRC) {
+            src1 = zero;
+        }
+        if (dead & USES_CC_SRC2) {
+            src2 = zero;
+        }
+    }
+
+    gen_update_cc_op(s);
+    gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
+    set_cc_op(s, CC_OP_EFLAGS);
+
+    if (dead) {
+        tcg_temp_free(zero);
+    }
+}
+
+typedef struct CCPrepare {
+    TCGCond cond;
+    TCGv reg;
+    TCGv reg2;
+    target_ulong imm;
+    target_ulong mask;
+    bool use_reg2;
+    bool no_setcond;
+} CCPrepare;
+
+/* compute eflags.C to reg */
+static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
+{
+    TCGv t0, t1;
+    int size, shift;
+
+    switch (s->cc_op) {
+    case CC_OP_SUBB ... CC_OP_SUBQ:
+        /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
+        size = s->cc_op - CC_OP_SUBB;
+        t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
+        /* If no temporary was used, be careful not to alias t1 and t0.  */
+        t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
+        tcg_gen_mov_tl(t0, cpu_cc_srcT);
+        gen_extu(size, t0);
+        goto add_sub;
+
+    case CC_OP_ADDB ... CC_OP_ADDQ:
+        /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
+        size = s->cc_op - CC_OP_ADDB;
+        t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
+        t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
+    add_sub:
+        return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
+                             .reg2 = t1, .mask = -1, .use_reg2 = true };
+
+    case CC_OP_LOGICB ... CC_OP_LOGICQ:
+    case CC_OP_CLR:
+        return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+
+    case CC_OP_INCB ... CC_OP_INCQ:
+    case CC_OP_DECB ... CC_OP_DECQ:
+        return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+                             .mask = -1, .no_setcond = true };
+
+    case CC_OP_SHLB ... CC_OP_SHLQ:
+        /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
+        size = s->cc_op - CC_OP_SHLB;
+        shift = (8 << size) - 1;
+        return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+                             .mask = (target_ulong)1 << shift };
+
+    case CC_OP_MULB ... CC_OP_MULQ:
+        return (CCPrepare) { .cond = TCG_COND_NE,
+                             .reg = cpu_cc_src, .mask = -1 };
+
+    case CC_OP_BMILGB ... CC_OP_BMILGQ:
+        size = s->cc_op - CC_OP_BMILGB;
+        t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
+        return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
+
+    case CC_OP_ADCX:
+    case CC_OP_ADCOX:
+        return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
+                             .mask = -1, .no_setcond = true };
+
+    case CC_OP_EFLAGS:
+    case CC_OP_SARB ... CC_OP_SARQ:
+        /* CC_SRC & 1 */
+        return (CCPrepare) { .cond = TCG_COND_NE,
+                             .reg = cpu_cc_src, .mask = CC_C };
+
+    default:
+       /* The need to compute only C from CC_OP_DYNAMIC is important
+          in efficiently implementing e.g. INC at the start of a TB.  */
+       gen_update_cc_op(s);
+       gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
+                               cpu_cc_src2, cpu_cc_op);
+       return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+                            .mask = -1, .no_setcond = true };
+    }
+}
+
+/* compute eflags.P to reg */
+static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
+{
+    gen_compute_eflags(s);
+    return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+                         .mask = CC_P };
+}
+
+/* compute eflags.S to reg */
+static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
+{
+    switch (s->cc_op) {
+    case CC_OP_DYNAMIC:
+        gen_compute_eflags(s);
+        /* FALLTHRU */
+    case CC_OP_EFLAGS:
+    case CC_OP_ADCX:
+    case CC_OP_ADOX:
+    case CC_OP_ADCOX:
+        return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+                             .mask = CC_S };
+    case CC_OP_CLR:
+        return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+    default:
+        {
+            TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
+            TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
+            return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
+        }
+    }
+}
+
+/* compute eflags.O to reg */
+static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
+{
+    switch (s->cc_op) {
+    case CC_OP_ADOX:
+    case CC_OP_ADCOX:
+        return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
+                             .mask = -1, .no_setcond = true };
+    case CC_OP_CLR:
+        return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+    default:
+        gen_compute_eflags(s);
+        return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+                             .mask = CC_O };
+    }
+}
+
+/* compute eflags.Z to reg */
+static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
+{
+    switch (s->cc_op) {
+    case CC_OP_DYNAMIC:
+        gen_compute_eflags(s);
+        /* FALLTHRU */
+    case CC_OP_EFLAGS:
+    case CC_OP_ADCX:
+    case CC_OP_ADOX:
+    case CC_OP_ADCOX:
+        return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+                             .mask = CC_Z };
+    case CC_OP_CLR:
+        return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
+    default:
+        {
+            TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
+            TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
+            return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
+        }
+    }
+}
+
+/* perform a conditional store into register 'reg' according to jump opcode
+   value 'b'. In the fast case, T0 is guaranted not to be used. */
+static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
+{
+    int inv, jcc_op, cond;
+    TCGMemOp size;
+    CCPrepare cc;
+    TCGv t0;
+
+    inv = b & 1;
+    jcc_op = (b >> 1) & 7;
+
+    switch (s->cc_op) {
+    case CC_OP_SUBB ... CC_OP_SUBQ:
+        /* We optimize relational operators for the cmp/jcc case.  */
+        size = s->cc_op - CC_OP_SUBB;
+        switch (jcc_op) {
+        case JCC_BE:
+            tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
+            gen_extu(size, cpu_tmp4);
+            t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
+            cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
+                               .reg2 = t0, .mask = -1, .use_reg2 = true };
+            break;
+
+        case JCC_L:
+            cond = TCG_COND_LT;
+            goto fast_jcc_l;
+        case JCC_LE:
+            cond = TCG_COND_LE;
+        fast_jcc_l:
+            tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
+            gen_exts(size, cpu_tmp4);
+            t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
+            cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
+                               .reg2 = t0, .mask = -1, .use_reg2 = true };
+            break;
+
+        default:
+            goto slow_jcc;
+        }
+        break;
+
+    default:
+    slow_jcc:
+        /* This actually generates good code for JC, JZ and JS.  */
+        switch (jcc_op) {
+        case JCC_O:
+            cc = gen_prepare_eflags_o(s, reg);
+            break;
+        case JCC_B:
+            cc = gen_prepare_eflags_c(s, reg);
+            break;
+        case JCC_Z:
+            cc = gen_prepare_eflags_z(s, reg);
+            break;
+        case JCC_BE:
+            gen_compute_eflags(s);
+            cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+                               .mask = CC_Z | CC_C };
+            break;
+        case JCC_S:
+            cc = gen_prepare_eflags_s(s, reg);
+            break;
+        case JCC_P:
+            cc = gen_prepare_eflags_p(s, reg);
+            break;
+        case JCC_L:
+            gen_compute_eflags(s);
+            if (TCGV_EQUAL(reg, cpu_cc_src)) {
+                reg = cpu_tmp0;
+            }
+            tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
+            tcg_gen_xor_tl(reg, reg, cpu_cc_src);
+            cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+                               .mask = CC_S };
+            break;
+        default:
+        case JCC_LE:
+            gen_compute_eflags(s);
+            if (TCGV_EQUAL(reg, cpu_cc_src)) {
+                reg = cpu_tmp0;
+            }
+            tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
+            tcg_gen_xor_tl(reg, reg, cpu_cc_src);
+            cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+                               .mask = CC_S | CC_Z };
+            break;
+        }
+        break;
+    }
+
+    if (inv) {
+        cc.cond = tcg_invert_cond(cc.cond);
+    }
+    return cc;
+}
+
+static void gen_setcc1(DisasContext *s, int b, TCGv reg)
+{
+    CCPrepare cc = gen_prepare_cc(s, b, reg);
+
+    if (cc.no_setcond) {
+        if (cc.cond == TCG_COND_EQ) {
+            tcg_gen_xori_tl(reg, cc.reg, 1);
+        } else {
+            tcg_gen_mov_tl(reg, cc.reg);
+        }
+        return;
+    }
+
+    if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
+        cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
+        tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
+        tcg_gen_andi_tl(reg, reg, 1);
+        return;
+    }
+    if (cc.mask != -1) {
+        tcg_gen_andi_tl(reg, cc.reg, cc.mask);
+        cc.reg = reg;
+    }
+    if (cc.use_reg2) {
+        tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
+    } else {
+        tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
+    }
+}
+
+static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
+{
+    gen_setcc1(s, JCC_B << 1, reg);
+}
+
+/* generate a conditional jump to label 'l1' according to jump opcode
+   value 'b'. In the fast case, T0 is guaranted not to be used. */
+static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
+{
+    CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
+
+    if (cc.mask != -1) {
+        tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
+        cc.reg = cpu_T0;
+    }
+    if (cc.use_reg2) {
+        tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
+    } else {
+        tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
+    }
+}
+
+/* Generate a conditional jump to label 'l1' according to jump opcode
+   value 'b'. In the fast case, T0 is guaranted not to be used.
+   A translation block must end soon.  */
+static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
+{
+    CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
+
+    gen_update_cc_op(s);
+    if (cc.mask != -1) {
+        tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
+        cc.reg = cpu_T0;
+    }
+    set_cc_op(s, CC_OP_DYNAMIC);
+    if (cc.use_reg2) {
+        tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
+    } else {
+        tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
+    }
+}
+
+/* XXX: does not work with gdbstub "ice" single step - not a
+   serious problem */
+static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
+{
+    TCGLabel *l1 = gen_new_label();
+    TCGLabel *l2 = gen_new_label();
+    gen_op_jnz_ecx(s->aflag, l1);
+    gen_set_label(l2);
+    gen_jmp_tb(s, next_eip, 1);
+    gen_set_label(l1);
+    return l2;
+}
+
+static inline void gen_stos(DisasContext *s, TCGMemOp ot)
+{
+    gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+    gen_string_movl_A0_EDI(s);
+    gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+    gen_op_movl_T0_Dshift(ot);
+    gen_op_add_reg_T0(s->aflag, R_EDI);
+}
+
+static inline void gen_lods(DisasContext *s, TCGMemOp ot)
+{
+    gen_string_movl_A0_ESI(s);
+    gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+    gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
+    gen_op_movl_T0_Dshift(ot);
+    gen_op_add_reg_T0(s->aflag, R_ESI);
+}
+
+static inline void gen_scas(DisasContext *s, TCGMemOp ot)
+{
+    gen_string_movl_A0_EDI(s);
+    gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+    gen_op(s, OP_CMPL, ot, R_EAX);
+    gen_op_movl_T0_Dshift(ot);
+    gen_op_add_reg_T0(s->aflag, R_EDI);
+}
+
+static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
+{
+    gen_string_movl_A0_EDI(s);
+    gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+    gen_string_movl_A0_ESI(s);
+    gen_op(s, OP_CMPL, ot, OR_TMP0);
+    gen_op_movl_T0_Dshift(ot);
+    gen_op_add_reg_T0(s->aflag, R_ESI);
+    gen_op_add_reg_T0(s->aflag, R_EDI);
+}
+
+static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
+{
+    if (s->flags & HF_IOBPT_MASK) {
+        TCGv_i32 t_size = tcg_const_i32(1 << ot);
+        TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
+
+        gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
+        tcg_temp_free_i32(t_size);
+        tcg_temp_free(t_next);
+    }
+}
+
+
+static inline void gen_ins(DisasContext *s, TCGMemOp ot)
+{
+    if (s->tb->cflags & CF_USE_ICOUNT) {
+        gen_io_start();
+    }
+    gen_string_movl_A0_EDI(s);
+    /* Note: we must do this dummy write first to be restartable in
+       case of page fault. */
+    tcg_gen_movi_tl(cpu_T0, 0);
+    gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+    tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
+    tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
+    gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
+    gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+    gen_op_movl_T0_Dshift(ot);
+    gen_op_add_reg_T0(s->aflag, R_EDI);
+    gen_bpt_io(s, cpu_tmp2_i32, ot);
+    if (s->tb->cflags & CF_USE_ICOUNT) {
+        gen_io_end();
+    }
+}
+
+static inline void gen_outs(DisasContext *s, TCGMemOp ot)
+{
+    if (s->tb->cflags & CF_USE_ICOUNT) {
+        gen_io_start();
+    }
+    gen_string_movl_A0_ESI(s);
+    gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+
+    tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
+    tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
+    tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
+    gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
+    gen_op_movl_T0_Dshift(ot);
+    gen_op_add_reg_T0(s->aflag, R_ESI);
+    gen_bpt_io(s, cpu_tmp2_i32, ot);
+    if (s->tb->cflags & CF_USE_ICOUNT) {
+        gen_io_end();
+    }
+}
+
+/* same method as Valgrind : we generate jumps to current or next
+   instruction */
+#define GEN_REPZ(op)                                                          \
+static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot,              \
+                                 target_ulong cur_eip, target_ulong next_eip) \
+{                                                                             \
+    TCGLabel *l2;                                                             \
+    gen_update_cc_op(s);                                                      \
+    l2 = gen_jz_ecx_string(s, next_eip);                                      \
+    gen_ ## op(s, ot);                                                        \
+    gen_op_add_reg_im(s->aflag, R_ECX, -1);                                   \
+    /* a loop would cause two single step exceptions if ECX = 1               \
+       before rep string_insn */                                              \
+    if (s->repz_opt)                                                          \
+        gen_op_jz_ecx(s->aflag, l2);                                          \
+    gen_jmp(s, cur_eip);                                                      \
+}
+
+#define GEN_REPZ2(op)                                                         \
+static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot,              \
+                                   target_ulong cur_eip,                      \
+                                   target_ulong next_eip,                     \
+                                   int nz)                                    \
+{                                                                             \
+    TCGLabel *l2;                                                             \
+    gen_update_cc_op(s);                                                      \
+    l2 = gen_jz_ecx_string(s, next_eip);                                      \
+    gen_ ## op(s, ot);                                                        \
+    gen_op_add_reg_im(s->aflag, R_ECX, -1);                                   \
+    gen_update_cc_op(s);                                                      \
+    gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);                                 \
+    if (s->repz_opt)                                                          \
+        gen_op_jz_ecx(s->aflag, l2);                                          \
+    gen_jmp(s, cur_eip);                                                      \
+}
+
+GEN_REPZ(movs)
+GEN_REPZ(stos)
+GEN_REPZ(lods)
+GEN_REPZ(ins)
+GEN_REPZ(outs)
+GEN_REPZ2(scas)
+GEN_REPZ2(cmps)
+
+static void gen_helper_fp_arith_ST0_FT0(int op)
+{
+    switch (op) {
+    case 0:
+        gen_helper_fadd_ST0_FT0(cpu_env);
+        break;
+    case 1:
+        gen_helper_fmul_ST0_FT0(cpu_env);
+        break;
+    case 2:
+        gen_helper_fcom_ST0_FT0(cpu_env);
+        break;
+    case 3:
+        gen_helper_fcom_ST0_FT0(cpu_env);
+        break;
+    case 4:
+        gen_helper_fsub_ST0_FT0(cpu_env);
+        break;
+    case 5:
+        gen_helper_fsubr_ST0_FT0(cpu_env);
+        break;
+    case 6:
+        gen_helper_fdiv_ST0_FT0(cpu_env);
+        break;
+    case 7:
+        gen_helper_fdivr_ST0_FT0(cpu_env);
+        break;
+    }
+}
+
+/* NOTE the exception in "r" op ordering */
+static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
+{
+    TCGv_i32 tmp = tcg_const_i32(opreg);
+    switch (op) {
+    case 0:
+        gen_helper_fadd_STN_ST0(cpu_env, tmp);
+        break;
+    case 1:
+        gen_helper_fmul_STN_ST0(cpu_env, tmp);
+        break;
+    case 4:
+        gen_helper_fsubr_STN_ST0(cpu_env, tmp);
+        break;
+    case 5:
+        gen_helper_fsub_STN_ST0(cpu_env, tmp);
+        break;
+    case 6:
+        gen_helper_fdivr_STN_ST0(cpu_env, tmp);
+        break;
+    case 7:
+        gen_helper_fdiv_STN_ST0(cpu_env, tmp);
+        break;
+    }
+}
+
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
+{
+    if (d != OR_TMP0) {
+        gen_op_mov_v_reg(ot, cpu_T0, d);
+    } else if (!(s1->prefix & PREFIX_LOCK)) {
+        gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
+    }
+    switch(op) {
+    case OP_ADCL:
+        gen_compute_eflags_c(s1, cpu_tmp4);
+        if (s1->prefix & PREFIX_LOCK) {
+            tcg_gen_add_tl(cpu_T0, cpu_tmp4, cpu_T1);
+            tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
+                                        s1->mem_index, ot | MO_LE);
+        } else {
+            tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+            tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
+            gen_op_st_rm_T0_A0(s1, ot, d);
+        }
+        gen_op_update3_cc(cpu_tmp4);
+        set_cc_op(s1, CC_OP_ADCB + ot);
+        break;
+    case OP_SBBL:
+        gen_compute_eflags_c(s1, cpu_tmp4);
+        if (s1->prefix & PREFIX_LOCK) {
+            tcg_gen_add_tl(cpu_T0, cpu_T1, cpu_tmp4);
+            tcg_gen_neg_tl(cpu_T0, cpu_T0);
+            tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
+                                        s1->mem_index, ot | MO_LE);
+        } else {
+            tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
+            tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
+            gen_op_st_rm_T0_A0(s1, ot, d);
+        }
+        gen_op_update3_cc(cpu_tmp4);
+        set_cc_op(s1, CC_OP_SBBB + ot);
+        break;
+    case OP_ADDL:
+        if (s1->prefix & PREFIX_LOCK) {
+            tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
+                                        s1->mem_index, ot | MO_LE);
+        } else {
+            tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+            gen_op_st_rm_T0_A0(s1, ot, d);
+        }
+        gen_op_update2_cc();
+        set_cc_op(s1, CC_OP_ADDB + ot);
+        break;
+    case OP_SUBL:
+        if (s1->prefix & PREFIX_LOCK) {
+            tcg_gen_neg_tl(cpu_T0, cpu_T1);
+            tcg_gen_atomic_fetch_add_tl(cpu_cc_srcT, cpu_A0, cpu_T0,
+                                        s1->mem_index, ot | MO_LE);
+            tcg_gen_sub_tl(cpu_T0, cpu_cc_srcT, cpu_T1);
+        } else {
+            tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
+            tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
+            gen_op_st_rm_T0_A0(s1, ot, d);
+        }
+        gen_op_update2_cc();
+        set_cc_op(s1, CC_OP_SUBB + ot);
+        break;
+    default:
+    case OP_ANDL:
+        if (s1->prefix & PREFIX_LOCK) {
+            tcg_gen_atomic_and_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
+                                        s1->mem_index, ot | MO_LE);
+        } else {
+            tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
+            gen_op_st_rm_T0_A0(s1, ot, d);
+        }
+        gen_op_update1_cc();
+        set_cc_op(s1, CC_OP_LOGICB + ot);
+        break;
+    case OP_ORL:
+        if (s1->prefix & PREFIX_LOCK) {
+            tcg_gen_atomic_or_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
+                                       s1->mem_index, ot | MO_LE);
+        } else {
+            tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
+            gen_op_st_rm_T0_A0(s1, ot, d);
+        }
+        gen_op_update1_cc();
+        set_cc_op(s1, CC_OP_LOGICB + ot);
+        break;
+    case OP_XORL:
+        if (s1->prefix & PREFIX_LOCK) {
+            tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
+                                        s1->mem_index, ot | MO_LE);
+        } else {
+            tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
+            gen_op_st_rm_T0_A0(s1, ot, d);
+        }
+        gen_op_update1_cc();
+        set_cc_op(s1, CC_OP_LOGICB + ot);
+        break;
+    case OP_CMPL:
+        tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+        tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
+        tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
+        set_cc_op(s1, CC_OP_SUBB + ot);
+        break;
+    }
+}
+
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
+{
+    if (s1->prefix & PREFIX_LOCK) {
+        tcg_gen_movi_tl(cpu_T0, c > 0 ? 1 : -1);
+        tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
+                                    s1->mem_index, ot | MO_LE);
+    } else {
+        if (d != OR_TMP0) {
+            gen_op_mov_v_reg(ot, cpu_T0, d);
+        } else {
+            gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
+        }
+        tcg_gen_addi_tl(cpu_T0, cpu_T0, (c > 0 ? 1 : -1));
+        gen_op_st_rm_T0_A0(s1, ot, d);
+    }
+
+    gen_compute_eflags_c(s1, cpu_cc_src);
+    tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+    set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
+}
+
+static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
+                            TCGv shm1, TCGv count, bool is_right)
+{
+    TCGv_i32 z32, s32, oldop;
+    TCGv z_tl;
+
+    /* Store the results into the CC variables.  If we know that the
+       variable must be dead, store unconditionally.  Otherwise we'll
+       need to not disrupt the current contents.  */
+    z_tl = tcg_const_tl(0);
+    if (cc_op_live[s->cc_op] & USES_CC_DST) {
+        tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
+                           result, cpu_cc_dst);
+    } else {
+        tcg_gen_mov_tl(cpu_cc_dst, result);
+    }
+    if (cc_op_live[s->cc_op] & USES_CC_SRC) {
+        tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
+                           shm1, cpu_cc_src);
+    } else {
+        tcg_gen_mov_tl(cpu_cc_src, shm1);
+    }
+    tcg_temp_free(z_tl);
+
+    /* Get the two potential CC_OP values into temporaries.  */
+    tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
+    if (s->cc_op == CC_OP_DYNAMIC) {
+        oldop = cpu_cc_op;
+    } else {
+        tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
+        oldop = cpu_tmp3_i32;
+    }
+
+    /* Conditionally store the CC_OP value.  */
+    z32 = tcg_const_i32(0);
+    s32 = tcg_temp_new_i32();
+    tcg_gen_trunc_tl_i32(s32, count);
+    tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
+    tcg_temp_free_i32(z32);
+    tcg_temp_free_i32(s32);
+
+    /* The CC_OP value is no longer predictable.  */
+    set_cc_op(s, CC_OP_DYNAMIC);
+}
+
+static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
+                            int is_right, int is_arith)
+{
+    target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
+
+    /* load */
+    if (op1 == OR_TMP0) {
+        gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+    } else {
+        gen_op_mov_v_reg(ot, cpu_T0, op1);
+    }
+
+    tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
+    tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
+
+    if (is_right) {
+        if (is_arith) {
+            gen_exts(ot, cpu_T0);
+            tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+            tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
+        } else {
+            gen_extu(ot, cpu_T0);
+            tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+            tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
+        }
+    } else {
+        tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+        tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
+    }
+
+    /* store */
+    gen_op_st_rm_T0_A0(s, ot, op1);
+
+    gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
+}
+
+static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
+                            int is_right, int is_arith)
+{
+    int mask = (ot == MO_64 ? 0x3f : 0x1f);
+
+    /* load */
+    if (op1 == OR_TMP0)
+        gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+    else
+        gen_op_mov_v_reg(ot, cpu_T0, op1);
+
+    op2 &= mask;
+    if (op2 != 0) {
+        if (is_right) {
+            if (is_arith) {
+                gen_exts(ot, cpu_T0);
+                tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
+                tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
+            } else {
+                gen_extu(ot, cpu_T0);
+                tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
+                tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
+            }
+        } else {
+            tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
+            tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
+        }
+    }
+
+    /* store */
+    gen_op_st_rm_T0_A0(s, ot, op1);
+
+    /* update eflags if non zero shift */
+    if (op2 != 0) {
+        tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
+        tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+        set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
+    }
+}
+
+static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
+{
+    target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
+    TCGv_i32 t0, t1;
+
+    /* load */
+    if (op1 == OR_TMP0) {
+        gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+    } else {
+        gen_op_mov_v_reg(ot, cpu_T0, op1);
+    }
+
+    tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
+
+    switch (ot) {
+    case MO_8:
+        /* Replicate the 8-bit input so that a 32-bit rotate works.  */
+        tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+        tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
+        goto do_long;
+    case MO_16:
+        /* Replicate the 16-bit input so that a 32-bit rotate works.  */
+        tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
+        goto do_long;
+    do_long:
+#ifdef TARGET_X86_64
+    case MO_32:
+        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+        tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
+        if (is_right) {
+            tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
+        } else {
+            tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
+        }
+        tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+        break;
+#endif
+    default:
+        if (is_right) {
+            tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
+        } else {
+            tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
+        }
+        break;
+    }
+
+    /* store */
+    gen_op_st_rm_T0_A0(s, ot, op1);
+
+    /* We'll need the flags computed into CC_SRC.  */
+    gen_compute_eflags(s);
+
+    /* The value that was "rotated out" is now present at the other end
+       of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
+       since we've computed the flags into CC_SRC, these variables are
+       currently dead.  */
+    if (is_right) {
+        tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
+        tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
+        tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
+    } else {
+        tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
+        tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
+    }
+    tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
+    tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
+
+    /* Now conditionally store the new CC_OP value.  If the shift count
+       is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
+       Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
+       exactly as we computed above.  */
+    t0 = tcg_const_i32(0);
+    t1 = tcg_temp_new_i32();
+    tcg_gen_trunc_tl_i32(t1, cpu_T1);
+    tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX); 
+    tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
+    tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
+                        cpu_tmp2_i32, cpu_tmp3_i32);
+    tcg_temp_free_i32(t0);
+    tcg_temp_free_i32(t1);
+
+    /* The CC_OP value is no longer predictable.  */ 
+    set_cc_op(s, CC_OP_DYNAMIC);
+}
+
+static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
+                          int is_right)
+{
+    int mask = (ot == MO_64 ? 0x3f : 0x1f);
+    int shift;
+
+    /* load */
+    if (op1 == OR_TMP0) {
+        gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+    } else {
+        gen_op_mov_v_reg(ot, cpu_T0, op1);
+    }
+
+    op2 &= mask;
+    if (op2 != 0) {
+        switch (ot) {
+#ifdef TARGET_X86_64
+        case MO_32:
+            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+            if (is_right) {
+                tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
+            } else {
+                tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
+            }
+            tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+            break;
+#endif
+        default:
+            if (is_right) {
+                tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
+            } else {
+                tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
+            }
+            break;
+        case MO_8:
+            mask = 7;
+            goto do_shifts;
+        case MO_16:
+            mask = 15;
+        do_shifts:
+            shift = op2 & mask;
+            if (is_right) {
+                shift = mask + 1 - shift;
+            }
+            gen_extu(ot, cpu_T0);
+            tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
+            tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
+            tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
+            break;
+        }
+    }
+
+    /* store */
+    gen_op_st_rm_T0_A0(s, ot, op1);
+
+    if (op2 != 0) {
+        /* Compute the flags into CC_SRC.  */
+        gen_compute_eflags(s);
+
+        /* The value that was "rotated out" is now present at the other end
+           of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
+           since we've computed the flags into CC_SRC, these variables are
+           currently dead.  */
+        if (is_right) {
+            tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
+            tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
+            tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
+        } else {
+            tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
+            tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
+        }
+        tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
+        tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
+        set_cc_op(s, CC_OP_ADCOX);
+    }
+}
+
+/* XXX: add faster immediate = 1 case */
+static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
+                           int is_right)
+{
+    gen_compute_eflags(s);
+    assert(s->cc_op == CC_OP_EFLAGS);
+
+    /* load */
+    if (op1 == OR_TMP0)
+        gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+    else
+        gen_op_mov_v_reg(ot, cpu_T0, op1);
+    
+    if (is_right) {
+        switch (ot) {
+        case MO_8:
+            gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+            break;
+        case MO_16:
+            gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+            break;
+        case MO_32:
+            gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+            break;
+#ifdef TARGET_X86_64
+        case MO_64:
+            gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+            break;
+#endif
+        default:
+            tcg_abort();
+        }
+    } else {
+        switch (ot) {
+        case MO_8:
+            gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+            break;
+        case MO_16:
+            gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+            break;
+        case MO_32:
+            gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+            break;
+#ifdef TARGET_X86_64
+        case MO_64:
+            gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+            break;
+#endif
+        default:
+            tcg_abort();
+        }
+    }
+    /* store */
+    gen_op_st_rm_T0_A0(s, ot, op1);
+}
+
+/* XXX: add faster immediate case */
+static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
+                             bool is_right, TCGv count_in)
+{
+    target_ulong mask = (ot == MO_64 ? 63 : 31);
+    TCGv count;
+
+    /* load */
+    if (op1 == OR_TMP0) {
+        gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+    } else {
+        gen_op_mov_v_reg(ot, cpu_T0, op1);
+    }
+
+    count = tcg_temp_new();
+    tcg_gen_andi_tl(count, count_in, mask);
+
+    switch (ot) {
+    case MO_16:
+        /* Note: we implement the Intel behaviour for shift count > 16.
+           This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
+           portion by constructing it as a 32-bit value.  */
+        if (is_right) {
+            tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
+            tcg_gen_mov_tl(cpu_T1, cpu_T0);
+            tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
+        } else {
+            tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
+        }
+        /* FALLTHRU */
+#ifdef TARGET_X86_64
+    case MO_32:
+        /* Concatenate the two 32-bit values and use a 64-bit shift.  */
+        tcg_gen_subi_tl(cpu_tmp0, count, 1);
+        if (is_right) {
+            tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
+            tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
+            tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
+        } else {
+            tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
+            tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
+            tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
+            tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
+            tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
+        }
+        break;
+#endif
+    default:
+        tcg_gen_subi_tl(cpu_tmp0, count, 1);
+        if (is_right) {
+            tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+
+            tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
+            tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
+            tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
+        } else {
+            tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+            if (ot == MO_16) {
+                /* Only needed if count > 16, for Intel behaviour.  */
+                tcg_gen_subfi_tl(cpu_tmp4, 33, count);
+                tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
+                tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
+            }
+
+            tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
+            tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
+            tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
+        }
+        tcg_gen_movi_tl(cpu_tmp4, 0);
+        tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
+                           cpu_tmp4, cpu_T1);
+        tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
+        break;
+    }
+
+    /* store */
+    gen_op_st_rm_T0_A0(s, ot, op1);
+
+    gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
+    tcg_temp_free(count);
+}
+
+static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
+{
+    if (s != OR_TMP1)
+        gen_op_mov_v_reg(ot, cpu_T1, s);
+    switch(op) {
+    case OP_ROL:
+        gen_rot_rm_T1(s1, ot, d, 0);
+        break;
+    case OP_ROR:
+        gen_rot_rm_T1(s1, ot, d, 1);
+        break;
+    case OP_SHL:
+    case OP_SHL1:
+        gen_shift_rm_T1(s1, ot, d, 0, 0);
+        break;
+    case OP_SHR:
+        gen_shift_rm_T1(s1, ot, d, 1, 0);
+        break;
+    case OP_SAR:
+        gen_shift_rm_T1(s1, ot, d, 1, 1);
+        break;
+    case OP_RCL:
+        gen_rotc_rm_T1(s1, ot, d, 0);
+        break;
+    case OP_RCR:
+        gen_rotc_rm_T1(s1, ot, d, 1);
+        break;
+    }
+}
+
+static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
+{
+    switch(op) {
+    case OP_ROL:
+        gen_rot_rm_im(s1, ot, d, c, 0);
+        break;
+    case OP_ROR:
+        gen_rot_rm_im(s1, ot, d, c, 1);
+        break;
+    case OP_SHL:
+    case OP_SHL1:
+        gen_shift_rm_im(s1, ot, d, c, 0, 0);
+        break;
+    case OP_SHR:
+        gen_shift_rm_im(s1, ot, d, c, 1, 0);
+        break;
+    case OP_SAR:
+        gen_shift_rm_im(s1, ot, d, c, 1, 1);
+        break;
+    default:
+        /* currently not optimized */
+        tcg_gen_movi_tl(cpu_T1, c);
+        gen_shift(s1, op, ot, d, OR_TMP1);
+        break;
+    }
+}
+
+/* Decompose an address.  */
+
+typedef struct AddressParts {
+    int def_seg;
+    int base;
+    int index;
+    int scale;
+    target_long disp;
+} AddressParts;
+
+static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
+                                    int modrm)
+{
+    int def_seg, base, index, scale, mod, rm;
+    target_long disp;
+    bool havesib;
+
+    def_seg = R_DS;
+    index = -1;
+    scale = 0;
+    disp = 0;
+
+    mod = (modrm >> 6) & 3;
+    rm = modrm & 7;
+    base = rm | REX_B(s);
+
+    if (mod == 3) {
+        /* Normally filtered out earlier, but including this path
+           simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
+        goto done;
+    }
+
+    switch (s->aflag) {
+    case MO_64:
+    case MO_32:
+        havesib = 0;
+        if (rm == 4) {
+            int code = cpu_ldub_code(env, s->pc++);
+            scale = (code >> 6) & 3;
+            index = ((code >> 3) & 7) | REX_X(s);
+            if (index == 4) {
+                index = -1;  /* no index */
+            }
+            base = (code & 7) | REX_B(s);
+            havesib = 1;
+        }
+
+        switch (mod) {
+        case 0:
+            if ((base & 7) == 5) {
+                base = -1;
+                disp = (int32_t)cpu_ldl_code(env, s->pc);
+                s->pc += 4;
+                if (CODE64(s) && !havesib) {
+                    base = -2;
+                    disp += s->pc + s->rip_offset;
+                }
+            }
+            break;
+        case 1:
+            disp = (int8_t)cpu_ldub_code(env, s->pc++);
+            break;
+        default:
+        case 2:
+            disp = (int32_t)cpu_ldl_code(env, s->pc);
+            s->pc += 4;
+            break;
+        }
+
+        /* For correct popl handling with esp.  */
+        if (base == R_ESP && s->popl_esp_hack) {
+            disp += s->popl_esp_hack;
+        }
+        if (base == R_EBP || base == R_ESP) {
+            def_seg = R_SS;
+        }
+        break;
+
+    case MO_16:
+        if (mod == 0) {
+            if (rm == 6) {
+                base = -1;
+                disp = cpu_lduw_code(env, s->pc);
+                s->pc += 2;
+                break;
+            }
+        } else if (mod == 1) {
+            disp = (int8_t)cpu_ldub_code(env, s->pc++);
+        } else {
+            disp = (int16_t)cpu_lduw_code(env, s->pc);
+            s->pc += 2;
+        }
+
+        switch (rm) {
+        case 0:
+            base = R_EBX;
+            index = R_ESI;
+            break;
+        case 1:
+            base = R_EBX;
+            index = R_EDI;
+            break;
+        case 2:
+            base = R_EBP;
+            index = R_ESI;
+            def_seg = R_SS;
+            break;
+        case 3:
+            base = R_EBP;
+            index = R_EDI;
+            def_seg = R_SS;
+            break;
+        case 4:
+            base = R_ESI;
+            break;
+        case 5:
+            base = R_EDI;
+            break;
+        case 6:
+            base = R_EBP;
+            def_seg = R_SS;
+            break;
+        default:
+        case 7:
+            base = R_EBX;
+            break;
+        }
+        break;
+
+    default:
+        tcg_abort();
+    }
+
+ done:
+    return (AddressParts){ def_seg, base, index, scale, disp };
+}
+
+/* Compute the address, with a minimum number of TCG ops.  */
+static TCGv gen_lea_modrm_1(AddressParts a)
+{
+    TCGv ea;
+
+    TCGV_UNUSED(ea);
+    if (a.index >= 0) {
+        if (a.scale == 0) {
+            ea = cpu_regs[a.index];
+        } else {
+            tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
+            ea = cpu_A0;
+        }
+        if (a.base >= 0) {
+            tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
+            ea = cpu_A0;
+        }
+    } else if (a.base >= 0) {
+        ea = cpu_regs[a.base];
+    }
+    if (TCGV_IS_UNUSED(ea)) {
+        tcg_gen_movi_tl(cpu_A0, a.disp);
+        ea = cpu_A0;
+    } else if (a.disp != 0) {
+        tcg_gen_addi_tl(cpu_A0, ea, a.disp);
+        ea = cpu_A0;
+    }
+
+    return ea;
+}
+
+static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+    AddressParts a = gen_lea_modrm_0(env, s, modrm);
+    TCGv ea = gen_lea_modrm_1(a);
+    gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
+}
+
+static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+    (void)gen_lea_modrm_0(env, s, modrm);
+}
+
+/* Used for BNDCL, BNDCU, BNDCN.  */
+static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
+                      TCGCond cond, TCGv_i64 bndv)
+{
+    TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
+
+    tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
+    if (!CODE64(s)) {
+        tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
+    }
+    tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
+    tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
+    gen_helper_bndck(cpu_env, cpu_tmp2_i32);
+}
+
+/* used for LEA and MOV AX, mem */
+static void gen_add_A0_ds_seg(DisasContext *s)
+{
+    gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
+}
+
+/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
+   OR_TMP0 */
+static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
+                           TCGMemOp ot, int reg, int is_store)
+{
+    int mod, rm;
+
+    mod = (modrm >> 6) & 3;
+    rm = (modrm & 7) | REX_B(s);
+    if (mod == 3) {
+        if (is_store) {
+            if (reg != OR_TMP0)
+                gen_op_mov_v_reg(ot, cpu_T0, reg);
+            gen_op_mov_reg_v(ot, rm, cpu_T0);
+        } else {
+            gen_op_mov_v_reg(ot, cpu_T0, rm);
+            if (reg != OR_TMP0)
+                gen_op_mov_reg_v(ot, reg, cpu_T0);
+        }
+    } else {
+        gen_lea_modrm(env, s, modrm);
+        if (is_store) {
+            if (reg != OR_TMP0)
+                gen_op_mov_v_reg(ot, cpu_T0, reg);
+            gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+        } else {
+            gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+            if (reg != OR_TMP0)
+                gen_op_mov_reg_v(ot, reg, cpu_T0);
+        }
+    }
+}
+
+static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
+{
+    uint32_t ret;
+
+    switch (ot) {
+    case MO_8:
+        ret = cpu_ldub_code(env, s->pc);
+        s->pc++;
+        break;
+    case MO_16:
+        ret = cpu_lduw_code(env, s->pc);
+        s->pc += 2;
+        break;
+    case MO_32:
+#ifdef TARGET_X86_64
+    case MO_64:
+#endif
+        ret = cpu_ldl_code(env, s->pc);
+        s->pc += 4;
+        break;
+    default:
+        tcg_abort();
+    }
+    return ret;
+}
+
+static inline int insn_const_size(TCGMemOp ot)
+{
+    if (ot <= MO_32) {
+        return 1 << ot;
+    } else {
+        return 4;
+    }
+}
+
+static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
+{
+#ifndef CONFIG_USER_ONLY
+    return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
+           (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
+#else
+    return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
+{
+    target_ulong pc = s->cs_base + eip;
+
+    if (use_goto_tb(s, pc))  {
+        /* jump to same page: we can use a direct jump */
+        tcg_gen_goto_tb(tb_num);
+        gen_jmp_im(eip);
+        tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
+    } else {
+        /* jump to another page: currently not optimized */
+        gen_jmp_im(eip);
+        gen_eob(s);
+    }
+}
+
+static inline void gen_jcc(DisasContext *s, int b,
+                           target_ulong val, target_ulong next_eip)
+{
+    TCGLabel *l1, *l2;
+
+    if (s->jmp_opt) {
+        l1 = gen_new_label();
+        gen_jcc1(s, b, l1);
+
+        gen_goto_tb(s, 0, next_eip);
+
+        gen_set_label(l1);
+        gen_goto_tb(s, 1, val);
+        s->is_jmp = DISAS_TB_JUMP;
+    } else {
+        l1 = gen_new_label();
+        l2 = gen_new_label();
+        gen_jcc1(s, b, l1);
+
+        gen_jmp_im(next_eip);
+        tcg_gen_br(l2);
+
+        gen_set_label(l1);
+        gen_jmp_im(val);
+        gen_set_label(l2);
+        gen_eob(s);
+    }
+}
+
+static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
+                        int modrm, int reg)
+{
+    CCPrepare cc;
+
+    gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+    cc = gen_prepare_cc(s, b, cpu_T1);
+    if (cc.mask != -1) {
+        TCGv t0 = tcg_temp_new();
+        tcg_gen_andi_tl(t0, cc.reg, cc.mask);
+        cc.reg = t0;
+    }
+    if (!cc.use_reg2) {
+        cc.reg2 = tcg_const_tl(cc.imm);
+    }
+
+    tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
+                       cpu_T0, cpu_regs[reg]);
+    gen_op_mov_reg_v(ot, reg, cpu_T0);
+
+    if (cc.mask != -1) {
+        tcg_temp_free(cc.reg);
+    }
+    if (!cc.use_reg2) {
+        tcg_temp_free(cc.reg2);
+    }
+}
+
+static inline void gen_op_movl_T0_seg(int seg_reg)
+{
+    tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+                     offsetof(CPUX86State,segs[seg_reg].selector));
+}
+
+static inline void gen_op_movl_seg_T0_vm(int seg_reg)
+{
+    tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+    tcg_gen_st32_tl(cpu_T0, cpu_env,
+                    offsetof(CPUX86State,segs[seg_reg].selector));
+    tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
+}
+
+/* move T0 to seg_reg and compute if the CPU state may change. Never
+   call this function with seg_reg == R_CS */
+static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
+{
+    if (s->pe && !s->vm86) {
+        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+        gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
+        /* abort translation because the addseg value may change or
+           because ss32 may change. For R_SS, translation must always
+           stop as a special handling must be done to disable hardware
+           interrupts for the next instruction */
+        if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
+            s->is_jmp = DISAS_TB_JUMP;
+    } else {
+        gen_op_movl_seg_T0_vm(seg_reg);
+        if (seg_reg == R_SS)
+            s->is_jmp = DISAS_TB_JUMP;
+    }
+}
+
+static inline int svm_is_rep(int prefixes)
+{
+    return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
+}
+
+static inline void
+gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
+                              uint32_t type, uint64_t param)
+{
+    /* no SVM activated; fast case */
+    if (likely(!(s->flags & HF_SVMI_MASK)))
+        return;
+    gen_update_cc_op(s);
+    gen_jmp_im(pc_start - s->cs_base);
+    gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
+                                         tcg_const_i64(param));
+}
+
+static inline void
+gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
+{
+    gen_svm_check_intercept_param(s, pc_start, type, 0);
+}
+
+static inline void gen_stack_update(DisasContext *s, int addend)
+{
+    gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
+}
+
+/* Generate a push. It depends on ss32, addseg and dflag.  */
+static void gen_push_v(DisasContext *s, TCGv val)
+{
+    TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+    TCGMemOp a_ot = mo_stacksize(s);
+    int size = 1 << d_ot;
+    TCGv new_esp = cpu_A0;
+
+    tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
+
+    if (!CODE64(s)) {
+        if (s->addseg) {
+            new_esp = cpu_tmp4;
+            tcg_gen_mov_tl(new_esp, cpu_A0);
+        }
+        gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+    }
+
+    gen_op_st_v(s, d_ot, val, cpu_A0);
+    gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
+}
+
+/* two step pop is necessary for precise exceptions */
+static TCGMemOp gen_pop_T0(DisasContext *s)
+{
+    TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+
+    gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
+    gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+
+    return d_ot;
+}
+
+static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
+{
+    gen_stack_update(s, 1 << ot);
+}
+
+static inline void gen_stack_A0(DisasContext *s)
+{
+    gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
+}
+
+static void gen_pusha(DisasContext *s)
+{
+    TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
+    TCGMemOp d_ot = s->dflag;
+    int size = 1 << d_ot;
+    int i;
+
+    for (i = 0; i < 8; i++) {
+        tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
+        gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
+        gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
+    }
+
+    gen_stack_update(s, -8 * size);
+}
+
+static void gen_popa(DisasContext *s)
+{
+    TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
+    TCGMemOp d_ot = s->dflag;
+    int size = 1 << d_ot;
+    int i;
+
+    for (i = 0; i < 8; i++) {
+        /* ESP is not reloaded */
+        if (7 - i == R_ESP) {
+            continue;
+        }
+        tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
+        gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
+        gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+        gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
+    }
+
+    gen_stack_update(s, 8 * size);
+}
+
+static void gen_enter(DisasContext *s, int esp_addend, int level)
+{
+    TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+    TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+    int size = 1 << d_ot;
+
+    /* Push BP; compute FrameTemp into T1.  */
+    tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
+    gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
+    gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
+
+    level &= 31;
+    if (level != 0) {
+        int i;
+
+        /* Copy level-1 pointers from the previous frame.  */
+        for (i = 1; i < level; ++i) {
+            tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
+            gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+            gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
+
+            tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
+            gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+            gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
+        }
+
+        /* Push the current FrameTemp as the last level.  */
+        tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
+        gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+        gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
+    }
+
+    /* Copy the FrameTemp value to EBP.  */
+    gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
+
+    /* Compute the final value of ESP.  */
+    tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
+    gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
+}
+
+static void gen_leave(DisasContext *s)
+{
+    TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+    TCGMemOp a_ot = mo_stacksize(s);
+
+    gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
+    gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+
+    tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
+
+    gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
+    gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
+}
+
+static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
+{
+    gen_update_cc_op(s);
+    gen_jmp_im(cur_eip);
+    gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
+    s->is_jmp = DISAS_TB_JUMP;
+}
+
+/* Generate #UD for the current instruction.  The assumption here is that
+   the instruction is known, but it isn't allowed in the current cpu mode.  */
+static void gen_illegal_opcode(DisasContext *s)
+{
+    gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
+}
+
+/* Similarly, except that the assumption here is that we don't decode
+   the instruction at all -- either a missing opcode, an unimplemented
+   feature, or just a bogus instruction stream.  */
+static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
+{
+    gen_illegal_opcode(s);
+
+    if (qemu_loglevel_mask(LOG_UNIMP)) {
+        target_ulong pc = s->pc_start, end = s->pc;
+        qemu_log_lock();
+        qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
+        for (; pc < end; ++pc) {
+            qemu_log(" %02x", cpu_ldub_code(env, pc));
+        }
+        qemu_log("\n");
+        qemu_log_unlock();
+    }
+}
+
+/* an interrupt is different from an exception because of the
+   privilege checks */
+static void gen_interrupt(DisasContext *s, int intno,
+                          target_ulong cur_eip, target_ulong next_eip)
+{
+    gen_update_cc_op(s);
+    gen_jmp_im(cur_eip);
+    gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
+                               tcg_const_i32(next_eip - cur_eip));
+    s->is_jmp = DISAS_TB_JUMP;
+}
+
+static void gen_debug(DisasContext *s, target_ulong cur_eip)
+{
+    gen_update_cc_op(s);
+    gen_jmp_im(cur_eip);
+    gen_helper_debug(cpu_env);
+    s->is_jmp = DISAS_TB_JUMP;
+}
+
+static void gen_set_hflag(DisasContext *s, uint32_t mask)
+{
+    if ((s->flags & mask) == 0) {
+        TCGv_i32 t = tcg_temp_new_i32();
+        tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+        tcg_gen_ori_i32(t, t, mask);
+        tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+        tcg_temp_free_i32(t);
+        s->flags |= mask;
+    }
+}
+
+static void gen_reset_hflag(DisasContext *s, uint32_t mask)
+{
+    if (s->flags & mask) {
+        TCGv_i32 t = tcg_temp_new_i32();
+        tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+        tcg_gen_andi_i32(t, t, ~mask);
+        tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+        tcg_temp_free_i32(t);
+        s->flags &= ~mask;
+    }
+}
+
+/* Clear BND registers during legacy branches.  */
+static void gen_bnd_jmp(DisasContext *s)
+{
+    /* Clear the registers only if BND prefix is missing, MPX is enabled,
+       and if the BNDREGs are known to be in use (non-zero) already.
+       The helper itself will check BNDPRESERVE at runtime.  */
+    if ((s->prefix & PREFIX_REPNZ) == 0
+        && (s->flags & HF_MPX_EN_MASK) != 0
+        && (s->flags & HF_MPX_IU_MASK) != 0) {
+        gen_helper_bnd_jmp(cpu_env);
+    }
+}
+
+/* Generate an end of block. Trace exception is also generated if needed.
+   If IIM, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
+static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
+{
+    gen_update_cc_op(s);
+
+    /* If several instructions disable interrupts, only the first does it.  */
+    if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
+        gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
+    } else {
+        gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
+    }
+
+    if (s->tb->flags & HF_RF_MASK) {
+        gen_helper_reset_rf(cpu_env);
+    }
+    if (s->singlestep_enabled) {
+        gen_helper_debug(cpu_env);
+    } else if (s->tf) {
+        gen_helper_single_step(cpu_env);
+    } else {
+        tcg_gen_exit_tb(0);
+    }
+    s->is_jmp = DISAS_TB_JUMP;
+}
+
+/* End of block, resetting the inhibit irq flag.  */
+static void gen_eob(DisasContext *s)
+{
+    gen_eob_inhibit_irq(s, false);
+}
+
+/* generate a jump to eip. No segment change must happen before as a
+   direct call to the next block may occur */
+static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
+{
+    gen_update_cc_op(s);
+    set_cc_op(s, CC_OP_DYNAMIC);
+    if (s->jmp_opt) {
+        gen_goto_tb(s, tb_num, eip);
+        s->is_jmp = DISAS_TB_JUMP;
+    } else {
+        gen_jmp_im(eip);
+        gen_eob(s);
+    }
+}
+
+static void gen_jmp(DisasContext *s, target_ulong eip)
+{
+    gen_jmp_tb(s, eip, 0);
+}
+
+static inline void gen_ldq_env_A0(DisasContext *s, int offset)
+{
+    tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
+    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
+}
+
+static inline void gen_stq_env_A0(DisasContext *s, int offset)
+{
+    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
+    tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
+}
+
+static inline void gen_ldo_env_A0(DisasContext *s, int offset)
+{
+    int mem_index = s->mem_index;
+    tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
+    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
+    tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
+    tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
+    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
+}
+
+static inline void gen_sto_env_A0(DisasContext *s, int offset)
+{
+    int mem_index = s->mem_index;
+    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
+    tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
+    tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
+    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
+    tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
+}
+
+static inline void gen_op_movo(int d_offset, int s_offset)
+{
+    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
+    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
+    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
+    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
+}
+
+static inline void gen_op_movq(int d_offset, int s_offset)
+{
+    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
+    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
+}
+
+static inline void gen_op_movl(int d_offset, int s_offset)
+{
+    tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
+    tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
+}
+
+static inline void gen_op_movq_env_0(int d_offset)
+{
+    tcg_gen_movi_i64(cpu_tmp1_i64, 0);
+    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
+}
+
+typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
+typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
+typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
+typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
+typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
+typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
+                               TCGv_i32 val);
+typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
+typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
+                               TCGv val);
+
+#define SSE_SPECIAL ((void *)1)
+#define SSE_DUMMY ((void *)2)
+
+#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
+#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
+                     gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
+
+static const SSEFunc_0_epp sse_op_table1[256][4] = {
+    /* 3DNow! extensions */
+    [0x0e] = { SSE_DUMMY }, /* femms */
+    [0x0f] = { SSE_DUMMY }, /* pf... */
+    /* pure SSE operations */
+    [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
+    [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
+    [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
+    [0x13] = { SSE_SPECIAL, SSE_SPECIAL },  /* movlps, movlpd */
+    [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
+    [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
+    [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },  /* movhps, movhpd, movshdup */
+    [0x17] = { SSE_SPECIAL, SSE_SPECIAL },  /* movhps, movhpd */
+
+    [0x28] = { SSE_SPECIAL, SSE_SPECIAL },  /* movaps, movapd */
+    [0x29] = { SSE_SPECIAL, SSE_SPECIAL },  /* movaps, movapd */
+    [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
+    [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
+    [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
+    [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
+    [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
+    [0x2f] = { gen_helper_comiss, gen_helper_comisd },
+    [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
+    [0x51] = SSE_FOP(sqrt),
+    [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
+    [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
+    [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
+    [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
+    [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
+    [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
+    [0x58] = SSE_FOP(add),
+    [0x59] = SSE_FOP(mul),
+    [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
+               gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
+    [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
+    [0x5c] = SSE_FOP(sub),
+    [0x5d] = SSE_FOP(min),
+    [0x5e] = SSE_FOP(div),
+    [0x5f] = SSE_FOP(max),
+
+    [0xc2] = SSE_FOP(cmpeq),
+    [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
+               (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
+
+    /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX.  */
+    [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+    [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+
+    /* MMX ops and their SSE extensions */
+    [0x60] = MMX_OP2(punpcklbw),
+    [0x61] = MMX_OP2(punpcklwd),
+    [0x62] = MMX_OP2(punpckldq),
+    [0x63] = MMX_OP2(packsswb),
+    [0x64] = MMX_OP2(pcmpgtb),
+    [0x65] = MMX_OP2(pcmpgtw),
+    [0x66] = MMX_OP2(pcmpgtl),
+    [0x67] = MMX_OP2(packuswb),
+    [0x68] = MMX_OP2(punpckhbw),
+    [0x69] = MMX_OP2(punpckhwd),
+    [0x6a] = MMX_OP2(punpckhdq),
+    [0x6b] = MMX_OP2(packssdw),
+    [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
+    [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
+    [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
+    [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
+    [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
+               (SSEFunc_0_epp)gen_helper_pshufd_xmm,
+               (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
+               (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
+    [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
+    [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
+    [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
+    [0x74] = MMX_OP2(pcmpeqb),
+    [0x75] = MMX_OP2(pcmpeqw),
+    [0x76] = MMX_OP2(pcmpeql),
+    [0x77] = { SSE_DUMMY }, /* emms */
+    [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
+    [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
+    [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
+    [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
+    [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
+    [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
+    [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
+    [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
+    [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
+    [0xd1] = MMX_OP2(psrlw),
+    [0xd2] = MMX_OP2(psrld),
+    [0xd3] = MMX_OP2(psrlq),
+    [0xd4] = MMX_OP2(paddq),
+    [0xd5] = MMX_OP2(pmullw),
+    [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+    [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
+    [0xd8] = MMX_OP2(psubusb),
+    [0xd9] = MMX_OP2(psubusw),
+    [0xda] = MMX_OP2(pminub),
+    [0xdb] = MMX_OP2(pand),
+    [0xdc] = MMX_OP2(paddusb),
+    [0xdd] = MMX_OP2(paddusw),
+    [0xde] = MMX_OP2(pmaxub),
+    [0xdf] = MMX_OP2(pandn),
+    [0xe0] = MMX_OP2(pavgb),
+    [0xe1] = MMX_OP2(psraw),
+    [0xe2] = MMX_OP2(psrad),
+    [0xe3] = MMX_OP2(pavgw),
+    [0xe4] = MMX_OP2(pmulhuw),
+    [0xe5] = MMX_OP2(pmulhw),
+    [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
+    [0xe7] = { SSE_SPECIAL , SSE_SPECIAL },  /* movntq, movntq */
+    [0xe8] = MMX_OP2(psubsb),
+    [0xe9] = MMX_OP2(psubsw),
+    [0xea] = MMX_OP2(pminsw),
+    [0xeb] = MMX_OP2(por),
+    [0xec] = MMX_OP2(paddsb),
+    [0xed] = MMX_OP2(paddsw),
+    [0xee] = MMX_OP2(pmaxsw),
+    [0xef] = MMX_OP2(pxor),
+    [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
+    [0xf1] = MMX_OP2(psllw),
+    [0xf2] = MMX_OP2(pslld),
+    [0xf3] = MMX_OP2(psllq),
+    [0xf4] = MMX_OP2(pmuludq),
+    [0xf5] = MMX_OP2(pmaddwd),
+    [0xf6] = MMX_OP2(psadbw),
+    [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
+               (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
+    [0xf8] = MMX_OP2(psubb),
+    [0xf9] = MMX_OP2(psubw),
+    [0xfa] = MMX_OP2(psubl),
+    [0xfb] = MMX_OP2(psubq),
+    [0xfc] = MMX_OP2(paddb),
+    [0xfd] = MMX_OP2(paddw),
+    [0xfe] = MMX_OP2(paddl),
+};
+
+static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
+    [0 + 2] = MMX_OP2(psrlw),
+    [0 + 4] = MMX_OP2(psraw),
+    [0 + 6] = MMX_OP2(psllw),
+    [8 + 2] = MMX_OP2(psrld),
+    [8 + 4] = MMX_OP2(psrad),
+    [8 + 6] = MMX_OP2(pslld),
+    [16 + 2] = MMX_OP2(psrlq),
+    [16 + 3] = { NULL, gen_helper_psrldq_xmm },
+    [16 + 6] = MMX_OP2(psllq),
+    [16 + 7] = { NULL, gen_helper_pslldq_xmm },
+};
+
+static const SSEFunc_0_epi sse_op_table3ai[] = {
+    gen_helper_cvtsi2ss,
+    gen_helper_cvtsi2sd
+};
+
+#ifdef TARGET_X86_64
+static const SSEFunc_0_epl sse_op_table3aq[] = {
+    gen_helper_cvtsq2ss,
+    gen_helper_cvtsq2sd
+};
+#endif
+
+static const SSEFunc_i_ep sse_op_table3bi[] = {
+    gen_helper_cvttss2si,
+    gen_helper_cvtss2si,
+    gen_helper_cvttsd2si,
+    gen_helper_cvtsd2si
+};
+
+#ifdef TARGET_X86_64
+static const SSEFunc_l_ep sse_op_table3bq[] = {
+    gen_helper_cvttss2sq,
+    gen_helper_cvtss2sq,
+    gen_helper_cvttsd2sq,
+    gen_helper_cvtsd2sq
+};
+#endif
+
+static const SSEFunc_0_epp sse_op_table4[8][4] = {
+    SSE_FOP(cmpeq),
+    SSE_FOP(cmplt),
+    SSE_FOP(cmple),
+    SSE_FOP(cmpunord),
+    SSE_FOP(cmpneq),
+    SSE_FOP(cmpnlt),
+    SSE_FOP(cmpnle),
+    SSE_FOP(cmpord),
+};
+
+static const SSEFunc_0_epp sse_op_table5[256] = {
+    [0x0c] = gen_helper_pi2fw,
+    [0x0d] = gen_helper_pi2fd,
+    [0x1c] = gen_helper_pf2iw,
+    [0x1d] = gen_helper_pf2id,
+    [0x8a] = gen_helper_pfnacc,
+    [0x8e] = gen_helper_pfpnacc,
+    [0x90] = gen_helper_pfcmpge,
+    [0x94] = gen_helper_pfmin,
+    [0x96] = gen_helper_pfrcp,
+    [0x97] = gen_helper_pfrsqrt,
+    [0x9a] = gen_helper_pfsub,
+    [0x9e] = gen_helper_pfadd,
+    [0xa0] = gen_helper_pfcmpgt,
+    [0xa4] = gen_helper_pfmax,
+    [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
+    [0xa7] = gen_helper_movq, /* pfrsqit1 */
+    [0xaa] = gen_helper_pfsubr,
+    [0xae] = gen_helper_pfacc,
+    [0xb0] = gen_helper_pfcmpeq,
+    [0xb4] = gen_helper_pfmul,
+    [0xb6] = gen_helper_movq, /* pfrcpit2 */
+    [0xb7] = gen_helper_pmulhrw_mmx,
+    [0xbb] = gen_helper_pswapd,
+    [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
+};
+
+struct SSEOpHelper_epp {
+    SSEFunc_0_epp op[2];
+    uint32_t ext_mask;
+};
+
+struct SSEOpHelper_eppi {
+    SSEFunc_0_eppi op[2];
+    uint32_t ext_mask;
+};
+
+#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
+#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
+#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
+#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
+#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
+        CPUID_EXT_PCLMULQDQ }
+#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
+
+static const struct SSEOpHelper_epp sse_op_table6[256] = {
+    [0x00] = SSSE3_OP(pshufb),
+    [0x01] = SSSE3_OP(phaddw),
+    [0x02] = SSSE3_OP(phaddd),
+    [0x03] = SSSE3_OP(phaddsw),
+    [0x04] = SSSE3_OP(pmaddubsw),
+    [0x05] = SSSE3_OP(phsubw),
+    [0x06] = SSSE3_OP(phsubd),
+    [0x07] = SSSE3_OP(phsubsw),
+    [0x08] = SSSE3_OP(psignb),
+    [0x09] = SSSE3_OP(psignw),
+    [0x0a] = SSSE3_OP(psignd),
+    [0x0b] = SSSE3_OP(pmulhrsw),
+    [0x10] = SSE41_OP(pblendvb),
+    [0x14] = SSE41_OP(blendvps),
+    [0x15] = SSE41_OP(blendvpd),
+    [0x17] = SSE41_OP(ptest),
+    [0x1c] = SSSE3_OP(pabsb),
+    [0x1d] = SSSE3_OP(pabsw),
+    [0x1e] = SSSE3_OP(pabsd),
+    [0x20] = SSE41_OP(pmovsxbw),
+    [0x21] = SSE41_OP(pmovsxbd),
+    [0x22] = SSE41_OP(pmovsxbq),
+    [0x23] = SSE41_OP(pmovsxwd),
+    [0x24] = SSE41_OP(pmovsxwq),
+    [0x25] = SSE41_OP(pmovsxdq),
+    [0x28] = SSE41_OP(pmuldq),
+    [0x29] = SSE41_OP(pcmpeqq),
+    [0x2a] = SSE41_SPECIAL, /* movntqda */
+    [0x2b] = SSE41_OP(packusdw),
+    [0x30] = SSE41_OP(pmovzxbw),
+    [0x31] = SSE41_OP(pmovzxbd),
+    [0x32] = SSE41_OP(pmovzxbq),
+    [0x33] = SSE41_OP(pmovzxwd),
+    [0x34] = SSE41_OP(pmovzxwq),
+    [0x35] = SSE41_OP(pmovzxdq),
+    [0x37] = SSE42_OP(pcmpgtq),
+    [0x38] = SSE41_OP(pminsb),
+    [0x39] = SSE41_OP(pminsd),
+    [0x3a] = SSE41_OP(pminuw),
+    [0x3b] = SSE41_OP(pminud),
+    [0x3c] = SSE41_OP(pmaxsb),
+    [0x3d] = SSE41_OP(pmaxsd),
+    [0x3e] = SSE41_OP(pmaxuw),
+    [0x3f] = SSE41_OP(pmaxud),
+    [0x40] = SSE41_OP(pmulld),
+    [0x41] = SSE41_OP(phminposuw),
+    [0xdb] = AESNI_OP(aesimc),
+    [0xdc] = AESNI_OP(aesenc),
+    [0xdd] = AESNI_OP(aesenclast),
+    [0xde] = AESNI_OP(aesdec),
+    [0xdf] = AESNI_OP(aesdeclast),
+};
+
+static const struct SSEOpHelper_eppi sse_op_table7[256] = {
+    [0x08] = SSE41_OP(roundps),
+    [0x09] = SSE41_OP(roundpd),
+    [0x0a] = SSE41_OP(roundss),
+    [0x0b] = SSE41_OP(roundsd),
+    [0x0c] = SSE41_OP(blendps),
+    [0x0d] = SSE41_OP(blendpd),
+    [0x0e] = SSE41_OP(pblendw),
+    [0x0f] = SSSE3_OP(palignr),
+    [0x14] = SSE41_SPECIAL, /* pextrb */
+    [0x15] = SSE41_SPECIAL, /* pextrw */
+    [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
+    [0x17] = SSE41_SPECIAL, /* extractps */
+    [0x20] = SSE41_SPECIAL, /* pinsrb */
+    [0x21] = SSE41_SPECIAL, /* insertps */
+    [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
+    [0x40] = SSE41_OP(dpps),
+    [0x41] = SSE41_OP(dppd),
+    [0x42] = SSE41_OP(mpsadbw),
+    [0x44] = PCLMULQDQ_OP(pclmulqdq),
+    [0x60] = SSE42_OP(pcmpestrm),
+    [0x61] = SSE42_OP(pcmpestri),
+    [0x62] = SSE42_OP(pcmpistrm),
+    [0x63] = SSE42_OP(pcmpistri),
+    [0xdf] = AESNI_OP(aeskeygenassist),
+};
+
+static void gen_sse(CPUX86State *env, DisasContext *s, int b,
+                    target_ulong pc_start, int rex_r)
+{
+    int b1, op1_offset, op2_offset, is_xmm, val;
+    int modrm, mod, rm, reg;
+    SSEFunc_0_epp sse_fn_epp;
+    SSEFunc_0_eppi sse_fn_eppi;
+    SSEFunc_0_ppi sse_fn_ppi;
+    SSEFunc_0_eppt sse_fn_eppt;
+    TCGMemOp ot;
+
+    b &= 0xff;
+    if (s->prefix & PREFIX_DATA)
+        b1 = 1;
+    else if (s->prefix & PREFIX_REPZ)
+        b1 = 2;
+    else if (s->prefix & PREFIX_REPNZ)
+        b1 = 3;
+    else
+        b1 = 0;
+    sse_fn_epp = sse_op_table1[b][b1];
+    if (!sse_fn_epp) {
+        goto unknown_op;
+    }
+    if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
+        is_xmm = 1;
+    } else {
+        if (b1 == 0) {
+            /* MMX case */
+            is_xmm = 0;
+        } else {
+            is_xmm = 1;
+        }
+    }
+    /* simple MMX/SSE operation */
+    if (s->flags & HF_TS_MASK) {
+        gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+        return;
+    }
+    if (s->flags & HF_EM_MASK) {
+    illegal_op:
+        gen_illegal_opcode(s);
+        return;
+    }
+    if (is_xmm
+        && !(s->flags & HF_OSFXSR_MASK)
+        && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
+        goto unknown_op;
+    }
+    if (b == 0x0e) {
+        if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
+            /* If we were fully decoding this we might use illegal_op.  */
+            goto unknown_op;
+        }
+        /* femms */
+        gen_helper_emms(cpu_env);
+        return;
+    }
+    if (b == 0x77) {
+        /* emms */
+        gen_helper_emms(cpu_env);
+        return;
+    }
+    /* prepare MMX state (XXX: optimize by storing fptt and fptags in
+       the static cpu state) */
+    if (!is_xmm) {
+        gen_helper_enter_mmx(cpu_env);
+    }
+
+    modrm = cpu_ldub_code(env, s->pc++);
+    reg = ((modrm >> 3) & 7);
+    if (is_xmm)
+        reg |= rex_r;
+    mod = (modrm >> 6) & 3;
+    if (sse_fn_epp == SSE_SPECIAL) {
+        b |= (b1 << 8);
+        switch(b) {
+        case 0x0e7: /* movntq */
+            if (mod == 3) {
+                goto illegal_op;
+            }
+            gen_lea_modrm(env, s, modrm);
+            gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+            break;
+        case 0x1e7: /* movntdq */
+        case 0x02b: /* movntps */
+        case 0x12b: /* movntps */
+            if (mod == 3)
+                goto illegal_op;
+            gen_lea_modrm(env, s, modrm);
+            gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+            break;
+        case 0x3f0: /* lddqu */
+            if (mod == 3)
+                goto illegal_op;
+            gen_lea_modrm(env, s, modrm);
+            gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+            break;
+        case 0x22b: /* movntss */
+        case 0x32b: /* movntsd */
+            if (mod == 3)
+                goto illegal_op;
+            gen_lea_modrm(env, s, modrm);
+            if (b1 & 1) {
+                gen_stq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(0)));
+            } else {
+                tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+                    xmm_regs[reg].ZMM_L(0)));
+                gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+            }
+            break;
+        case 0x6e: /* movd mm, ea */
+#ifdef TARGET_X86_64
+            if (s->dflag == MO_64) {
+                gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
+                tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
+            } else
+#endif
+            {
+                gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
+                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
+                                 offsetof(CPUX86State,fpregs[reg].mmx));
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
+            }
+            break;
+        case 0x16e: /* movd xmm, ea */
+#ifdef TARGET_X86_64
+            if (s->dflag == MO_64) {
+                gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
+                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
+                                 offsetof(CPUX86State,xmm_regs[reg]));
+                gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
+            } else
+#endif
+            {
+                gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
+                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
+                                 offsetof(CPUX86State,xmm_regs[reg]));
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
+            }
+            break;
+        case 0x6f: /* movq mm, ea */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+            } else {
+                rm = (modrm & 7);
+                tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
+                               offsetof(CPUX86State,fpregs[rm].mmx));
+                tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
+                               offsetof(CPUX86State,fpregs[reg].mmx));
+            }
+            break;
+        case 0x010: /* movups */
+        case 0x110: /* movupd */
+        case 0x028: /* movaps */
+        case 0x128: /* movapd */
+        case 0x16f: /* movdqa xmm, ea */
+        case 0x26f: /* movdqu xmm, ea */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
+                            offsetof(CPUX86State,xmm_regs[rm]));
+            }
+            break;
+        case 0x210: /* movss xmm, ea */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+                tcg_gen_movi_tl(cpu_T0, 0);
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
+            }
+            break;
+        case 0x310: /* movsd xmm, ea */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(0)));
+                tcg_gen_movi_tl(cpu_T0, 0);
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+            }
+            break;
+        case 0x012: /* movlps */
+        case 0x112: /* movlpd */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(0)));
+            } else {
+                /* movhlps */
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
+            }
+            break;
+        case 0x212: /* movsldup */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
+                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
+            }
+            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
+                        offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
+                        offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+            break;
+        case 0x312: /* movddup */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(0)));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+            }
+            gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
+                        offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+            break;
+        case 0x016: /* movhps */
+        case 0x116: /* movhpd */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(1)));
+            } else {
+                /* movlhps */
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+            }
+            break;
+        case 0x216: /* movshdup */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
+                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
+            }
+            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+                        offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
+            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
+                        offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+            break;
+        case 0x178:
+        case 0x378:
+            {
+                int bit_index, field_length;
+
+                if (b1 == 1 && reg != 0)
+                    goto illegal_op;
+                field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
+                bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
+                tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+                    offsetof(CPUX86State,xmm_regs[reg]));
+                if (b1 == 1)
+                    gen_helper_extrq_i(cpu_env, cpu_ptr0,
+                                       tcg_const_i32(bit_index),
+                                       tcg_const_i32(field_length));
+                else
+                    gen_helper_insertq_i(cpu_env, cpu_ptr0,
+                                         tcg_const_i32(bit_index),
+                                         tcg_const_i32(field_length));
+            }
+            break;
+        case 0x7e: /* movd ea, mm */
+#ifdef TARGET_X86_64
+            if (s->dflag == MO_64) {
+                tcg_gen_ld_i64(cpu_T0, cpu_env,
+                               offsetof(CPUX86State,fpregs[reg].mmx));
+                gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
+            } else
+#endif
+            {
+                tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+                                 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
+                gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
+            }
+            break;
+        case 0x17e: /* movd ea, xmm */
+#ifdef TARGET_X86_64
+            if (s->dflag == MO_64) {
+                tcg_gen_ld_i64(cpu_T0, cpu_env,
+                               offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+                gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
+            } else
+#endif
+            {
+                tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+                                 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+                gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
+            }
+            break;
+        case 0x27e: /* movq xmm, ea */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_ldq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(0)));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+                            offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+            }
+            gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
+            break;
+        case 0x7f: /* movq ea, mm */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+            } else {
+                rm = (modrm & 7);
+                gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
+                            offsetof(CPUX86State,fpregs[reg].mmx));
+            }
+            break;
+        case 0x011: /* movups */
+        case 0x111: /* movupd */
+        case 0x029: /* movaps */
+        case 0x129: /* movapd */
+        case 0x17f: /* movdqa ea, xmm */
+        case 0x27f: /* movdqu ea, xmm */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
+                            offsetof(CPUX86State,xmm_regs[reg]));
+            }
+            break;
+        case 0x211: /* movss ea, xmm */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+                gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
+                            offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+            }
+            break;
+        case 0x311: /* movsd ea, xmm */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_stq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(0)));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
+                            offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+            }
+            break;
+        case 0x013: /* movlps */
+        case 0x113: /* movlpd */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_stq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(0)));
+            } else {
+                goto illegal_op;
+            }
+            break;
+        case 0x017: /* movhps */
+        case 0x117: /* movhpd */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_stq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(1)));
+            } else {
+                goto illegal_op;
+            }
+            break;
+        case 0x71: /* shift mm, im */
+        case 0x72:
+        case 0x73:
+        case 0x171: /* shift xmm, im */
+        case 0x172:
+        case 0x173:
+            if (b1 >= 2) {
+	        goto unknown_op;
+            }
+            val = cpu_ldub_code(env, s->pc++);
+            if (is_xmm) {
+                tcg_gen_movi_tl(cpu_T0, val);
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+                tcg_gen_movi_tl(cpu_T0, 0);
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
+                op1_offset = offsetof(CPUX86State,xmm_t0);
+            } else {
+                tcg_gen_movi_tl(cpu_T0, val);
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
+                tcg_gen_movi_tl(cpu_T0, 0);
+                tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
+                op1_offset = offsetof(CPUX86State,mmx_t0);
+            }
+            sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
+                                       (((modrm >> 3)) & 7)][b1];
+            if (!sse_fn_epp) {
+                goto unknown_op;
+            }
+            if (is_xmm) {
+                rm = (modrm & 7) | REX_B(s);
+                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+            } else {
+                rm = (modrm & 7);
+                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+            }
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
+            sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+            break;
+        case 0x050: /* movmskps */
+            rm = (modrm & 7) | REX_B(s);
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
+                             offsetof(CPUX86State,xmm_regs[rm]));
+            gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+            tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
+            break;
+        case 0x150: /* movmskpd */
+            rm = (modrm & 7) | REX_B(s);
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
+                             offsetof(CPUX86State,xmm_regs[rm]));
+            gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+            tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
+            break;
+        case 0x02a: /* cvtpi2ps */
+        case 0x12a: /* cvtpi2pd */
+            gen_helper_enter_mmx(cpu_env);
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                op2_offset = offsetof(CPUX86State,mmx_t0);
+                gen_ldq_env_A0(s, op2_offset);
+            } else {
+                rm = (modrm & 7);
+                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+            }
+            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            switch(b >> 8) {
+            case 0x0:
+                gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
+                break;
+            default:
+            case 0x1:
+                gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
+                break;
+            }
+            break;
+        case 0x22a: /* cvtsi2ss */
+        case 0x32a: /* cvtsi2sd */
+            ot = mo_64_32(s->dflag);
+            gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            if (ot == MO_32) {
+                SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
+            } else {
+#ifdef TARGET_X86_64
+                SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
+                sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
+#else
+                goto illegal_op;
+#endif
+            }
+            break;
+        case 0x02c: /* cvttps2pi */
+        case 0x12c: /* cvttpd2pi */
+        case 0x02d: /* cvtps2pi */
+        case 0x12d: /* cvtpd2pi */
+            gen_helper_enter_mmx(cpu_env);
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                op2_offset = offsetof(CPUX86State,xmm_t0);
+                gen_ldo_env_A0(s, op2_offset);
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+            }
+            op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            switch(b) {
+            case 0x02c:
+                gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
+                break;
+            case 0x12c:
+                gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
+                break;
+            case 0x02d:
+                gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
+                break;
+            case 0x12d:
+                gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
+                break;
+            }
+            break;
+        case 0x22c: /* cvttss2si */
+        case 0x32c: /* cvttsd2si */
+        case 0x22d: /* cvtss2si */
+        case 0x32d: /* cvtsd2si */
+            ot = mo_64_32(s->dflag);
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                if ((b >> 8) & 1) {
+                    gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
+                } else {
+                    gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+                    tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+                }
+                op2_offset = offsetof(CPUX86State,xmm_t0);
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+            }
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
+            if (ot == MO_32) {
+                SSEFunc_i_ep sse_fn_i_ep =
+                    sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
+                sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+                tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+            } else {
+#ifdef TARGET_X86_64
+                SSEFunc_l_ep sse_fn_l_ep =
+                    sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
+                sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
+#else
+                goto illegal_op;
+#endif
+            }
+            gen_op_mov_reg_v(ot, reg, cpu_T0);
+            break;
+        case 0xc4: /* pinsrw */
+        case 0x1c4:
+            s->rip_offset = 1;
+            gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+            val = cpu_ldub_code(env, s->pc++);
+            if (b1) {
+                val &= 7;
+                tcg_gen_st16_tl(cpu_T0, cpu_env,
+                                offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
+            } else {
+                val &= 3;
+                tcg_gen_st16_tl(cpu_T0, cpu_env,
+                                offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
+            }
+            break;
+        case 0xc5: /* pextrw */
+        case 0x1c5:
+            if (mod != 3)
+                goto illegal_op;
+            ot = mo_64_32(s->dflag);
+            val = cpu_ldub_code(env, s->pc++);
+            if (b1) {
+                val &= 7;
+                rm = (modrm & 7) | REX_B(s);
+                tcg_gen_ld16u_tl(cpu_T0, cpu_env,
+                                 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
+            } else {
+                val &= 3;
+                rm = (modrm & 7);
+                tcg_gen_ld16u_tl(cpu_T0, cpu_env,
+                                offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
+            }
+            reg = ((modrm >> 3) & 7) | rex_r;
+            gen_op_mov_reg_v(ot, reg, cpu_T0);
+            break;
+        case 0x1d6: /* movq ea, xmm */
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_stq_env_A0(s, offsetof(CPUX86State,
+                                           xmm_regs[reg].ZMM_Q(0)));
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
+                            offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+                gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
+            }
+            break;
+        case 0x2d6: /* movq2dq */
+            gen_helper_enter_mmx(cpu_env);
+            rm = (modrm & 7);
+            gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+                        offsetof(CPUX86State,fpregs[rm].mmx));
+            gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
+            break;
+        case 0x3d6: /* movdq2q */
+            gen_helper_enter_mmx(cpu_env);
+            rm = (modrm & 7) | REX_B(s);
+            gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
+                        offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+            break;
+        case 0xd7: /* pmovmskb */
+        case 0x1d7:
+            if (mod != 3)
+                goto illegal_op;
+            if (b1) {
+                rm = (modrm & 7) | REX_B(s);
+                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
+                gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+            } else {
+                rm = (modrm & 7);
+                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
+                gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+            }
+            reg = ((modrm >> 3) & 7) | rex_r;
+            tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
+            break;
+
+        case 0x138:
+        case 0x038:
+            b = modrm;
+            if ((b & 0xf0) == 0xf0) {
+                goto do_0f_38_fx;
+            }
+            modrm = cpu_ldub_code(env, s->pc++);
+            rm = modrm & 7;
+            reg = ((modrm >> 3) & 7) | rex_r;
+            mod = (modrm >> 6) & 3;
+            if (b1 >= 2) {
+                goto unknown_op;
+            }
+
+            sse_fn_epp = sse_op_table6[b].op[b1];
+            if (!sse_fn_epp) {
+                goto unknown_op;
+            }
+            if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
+                goto illegal_op;
+
+            if (b1) {
+                op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+                if (mod == 3) {
+                    op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
+                } else {
+                    op2_offset = offsetof(CPUX86State,xmm_t0);
+                    gen_lea_modrm(env, s, modrm);
+                    switch (b) {
+                    case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
+                    case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
+                    case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
+                        gen_ldq_env_A0(s, op2_offset +
+                                        offsetof(ZMMReg, ZMM_Q(0)));
+                        break;
+                    case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
+                    case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
+                        tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
+                                        offsetof(ZMMReg, ZMM_L(0)));
+                        break;
+                    case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
+                        tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
+                                           s->mem_index, MO_LEUW);
+                        tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
+                                        offsetof(ZMMReg, ZMM_W(0)));
+                        break;
+                    case 0x2a:            /* movntqda */
+                        gen_ldo_env_A0(s, op1_offset);
+                        return;
+                    default:
+                        gen_ldo_env_A0(s, op2_offset);
+                    }
+                }
+            } else {
+                op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+                if (mod == 3) {
+                    op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+                } else {
+                    op2_offset = offsetof(CPUX86State,mmx_t0);
+                    gen_lea_modrm(env, s, modrm);
+                    gen_ldq_env_A0(s, op2_offset);
+                }
+            }
+            if (sse_fn_epp == SSE_SPECIAL) {
+                goto unknown_op;
+            }
+
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+
+            if (b == 0x17) {
+                set_cc_op(s, CC_OP_EFLAGS);
+            }
+            break;
+
+        case 0x238:
+        case 0x338:
+        do_0f_38_fx:
+            /* Various integer extensions at 0f 38 f[0-f].  */
+            b = modrm | (b1 << 8);
+            modrm = cpu_ldub_code(env, s->pc++);
+            reg = ((modrm >> 3) & 7) | rex_r;
+
+            switch (b) {
+            case 0x3f0: /* crc32 Gd,Eb */
+            case 0x3f1: /* crc32 Gd,Ey */
+            do_crc32:
+                if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
+                    goto illegal_op;
+                }
+                if ((b & 0xff) == 0xf0) {
+                    ot = MO_8;
+                } else if (s->dflag != MO_64) {
+                    ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
+                } else {
+                    ot = MO_64;
+                }
+
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
+                                 cpu_T0, tcg_const_i32(8 << ot));
+
+                ot = mo_64_32(s->dflag);
+                gen_op_mov_reg_v(ot, reg, cpu_T0);
+                break;
+
+            case 0x1f0: /* crc32 or movbe */
+            case 0x1f1:
+                /* For these insns, the f3 prefix is supposed to have priority
+                   over the 66 prefix, but that's not what we implement above
+                   setting b1.  */
+                if (s->prefix & PREFIX_REPNZ) {
+                    goto do_crc32;
+                }
+                /* FALLTHRU */
+            case 0x0f0: /* movbe Gy,My */
+            case 0x0f1: /* movbe My,Gy */
+                if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
+                    goto illegal_op;
+                }
+                if (s->dflag != MO_64) {
+                    ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
+                } else {
+                    ot = MO_64;
+                }
+
+                gen_lea_modrm(env, s, modrm);
+                if ((b & 1) == 0) {
+                    tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
+                                       s->mem_index, ot | MO_BE);
+                    gen_op_mov_reg_v(ot, reg, cpu_T0);
+                } else {
+                    tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
+                                       s->mem_index, ot | MO_BE);
+                }
+                break;
+
+            case 0x0f2: /* andn Gy, By, Ey */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
+                gen_op_mov_reg_v(ot, reg, cpu_T0);
+                gen_op_update1_cc();
+                set_cc_op(s, CC_OP_LOGICB + ot);
+                break;
+
+            case 0x0f7: /* bextr Gy, Ey, By */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                {
+                    TCGv bound, zero;
+
+                    gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                    /* Extract START, and shift the operand.
+                       Shifts larger than operand size get zeros.  */
+                    tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
+                    tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
+
+                    bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
+                    zero = tcg_const_tl(0);
+                    tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
+                                       cpu_T0, zero);
+                    tcg_temp_free(zero);
+
+                    /* Extract the LEN into a mask.  Lengths larger than
+                       operand size get all ones.  */
+                    tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
+                    tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
+                    tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
+                                       cpu_A0, bound);
+                    tcg_temp_free(bound);
+                    tcg_gen_movi_tl(cpu_T1, 1);
+                    tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
+                    tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
+                    tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
+
+                    gen_op_mov_reg_v(ot, reg, cpu_T0);
+                    gen_op_update1_cc();
+                    set_cc_op(s, CC_OP_LOGICB + ot);
+                }
+                break;
+
+            case 0x0f5: /* bzhi Gy, Ey, By */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
+                {
+                    TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
+                    /* Note that since we're using BMILG (in order to get O
+                       cleared) we need to store the inverse into C.  */
+                    tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
+                                       cpu_T1, bound);
+                    tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
+                                       bound, bound, cpu_T1);
+                    tcg_temp_free(bound);
+                }
+                tcg_gen_movi_tl(cpu_A0, -1);
+                tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
+                tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
+                gen_op_mov_reg_v(ot, reg, cpu_T0);
+                gen_op_update1_cc();
+                set_cc_op(s, CC_OP_BMILGB + ot);
+                break;
+
+            case 0x3f6: /* mulx By, Gy, rdx, Ey */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                switch (ot) {
+                default:
+                    tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                    tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
+                    tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
+                                      cpu_tmp2_i32, cpu_tmp3_i32);
+                    tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
+                    tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
+                    break;
+#ifdef TARGET_X86_64
+                case MO_64:
+                    tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
+                                      cpu_T0, cpu_regs[R_EDX]);
+                    tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
+                    tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
+                    break;
+#endif
+                }
+                break;
+
+            case 0x3f5: /* pdep Gy, By, Ey */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                /* Note that by zero-extending the mask operand, we
+                   automatically handle zero-extending the result.  */
+                if (ot == MO_64) {
+                    tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
+                } else {
+                    tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
+                }
+                gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
+                break;
+
+            case 0x2f5: /* pext Gy, By, Ey */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                /* Note that by zero-extending the mask operand, we
+                   automatically handle zero-extending the result.  */
+                if (ot == MO_64) {
+                    tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
+                } else {
+                    tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
+                }
+                gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
+                break;
+
+            case 0x1f6: /* adcx Gy, Ey */
+            case 0x2f6: /* adox Gy, Ey */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
+                    goto illegal_op;
+                } else {
+                    TCGv carry_in, carry_out, zero;
+                    int end_op;
+
+                    ot = mo_64_32(s->dflag);
+                    gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+                    /* Re-use the carry-out from a previous round.  */
+                    TCGV_UNUSED(carry_in);
+                    carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
+                    switch (s->cc_op) {
+                    case CC_OP_ADCX:
+                        if (b == 0x1f6) {
+                            carry_in = cpu_cc_dst;
+                            end_op = CC_OP_ADCX;
+                        } else {
+                            end_op = CC_OP_ADCOX;
+                        }
+                        break;
+                    case CC_OP_ADOX:
+                        if (b == 0x1f6) {
+                            end_op = CC_OP_ADCOX;
+                        } else {
+                            carry_in = cpu_cc_src2;
+                            end_op = CC_OP_ADOX;
+                        }
+                        break;
+                    case CC_OP_ADCOX:
+                        end_op = CC_OP_ADCOX;
+                        carry_in = carry_out;
+                        break;
+                    default:
+                        end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
+                        break;
+                    }
+                    /* If we can't reuse carry-out, get it out of EFLAGS.  */
+                    if (TCGV_IS_UNUSED(carry_in)) {
+                        if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
+                            gen_compute_eflags(s);
+                        }
+                        carry_in = cpu_tmp0;
+                        tcg_gen_shri_tl(carry_in, cpu_cc_src,
+                                        ctz32(b == 0x1f6 ? CC_C : CC_O));
+                        tcg_gen_andi_tl(carry_in, carry_in, 1);
+                    }
+
+                    switch (ot) {
+#ifdef TARGET_X86_64
+                    case MO_32:
+                        /* If we know TL is 64-bit, and we want a 32-bit
+                           result, just do everything in 64-bit arithmetic.  */
+                        tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
+                        tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
+                        tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
+                        tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
+                        tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
+                        tcg_gen_shri_i64(carry_out, cpu_T0, 32);
+                        break;
+#endif
+                    default:
+                        /* Otherwise compute the carry-out in two steps.  */
+                        zero = tcg_const_tl(0);
+                        tcg_gen_add2_tl(cpu_T0, carry_out,
+                                        cpu_T0, zero,
+                                        carry_in, zero);
+                        tcg_gen_add2_tl(cpu_regs[reg], carry_out,
+                                        cpu_regs[reg], carry_out,
+                                        cpu_T0, zero);
+                        tcg_temp_free(zero);
+                        break;
+                    }
+                    set_cc_op(s, end_op);
+                }
+                break;
+
+            case 0x1f7: /* shlx Gy, Ey, By */
+            case 0x2f7: /* sarx Gy, Ey, By */
+            case 0x3f7: /* shrx Gy, Ey, By */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                if (ot == MO_64) {
+                    tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
+                } else {
+                    tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
+                }
+                if (b == 0x1f7) {
+                    tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
+                } else if (b == 0x2f7) {
+                    if (ot != MO_64) {
+                        tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+                    }
+                    tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
+                } else {
+                    if (ot != MO_64) {
+                        tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
+                    }
+                    tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
+                }
+                gen_op_mov_reg_v(ot, reg, cpu_T0);
+                break;
+
+            case 0x0f3:
+            case 0x1f3:
+            case 0x2f3:
+            case 0x3f3: /* Group 17 */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+                switch (reg & 7) {
+                case 1: /* blsr By,Ey */
+                    tcg_gen_neg_tl(cpu_T1, cpu_T0);
+                    tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
+                    gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
+                    gen_op_update2_cc();
+                    set_cc_op(s, CC_OP_BMILGB + ot);
+                    break;
+
+                case 2: /* blsmsk By,Ey */
+                    tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+                    tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
+                    tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
+                    tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+                    set_cc_op(s, CC_OP_BMILGB + ot);
+                    break;
+
+                case 3: /* blsi By, Ey */
+                    tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+                    tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
+                    tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
+                    tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+                    set_cc_op(s, CC_OP_BMILGB + ot);
+                    break;
+
+                default:
+                    goto unknown_op;
+                }
+                break;
+
+            default:
+                goto unknown_op;
+            }
+            break;
+
+        case 0x03a:
+        case 0x13a:
+            b = modrm;
+            modrm = cpu_ldub_code(env, s->pc++);
+            rm = modrm & 7;
+            reg = ((modrm >> 3) & 7) | rex_r;
+            mod = (modrm >> 6) & 3;
+            if (b1 >= 2) {
+                goto unknown_op;
+            }
+
+            sse_fn_eppi = sse_op_table7[b].op[b1];
+            if (!sse_fn_eppi) {
+                goto unknown_op;
+            }
+            if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
+                goto illegal_op;
+
+            if (sse_fn_eppi == SSE_SPECIAL) {
+                ot = mo_64_32(s->dflag);
+                rm = (modrm & 7) | REX_B(s);
+                if (mod != 3)
+                    gen_lea_modrm(env, s, modrm);
+                reg = ((modrm >> 3) & 7) | rex_r;
+                val = cpu_ldub_code(env, s->pc++);
+                switch (b) {
+                case 0x14: /* pextrb */
+                    tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+                                            xmm_regs[reg].ZMM_B(val & 15)));
+                    if (mod == 3) {
+                        gen_op_mov_reg_v(ot, rm, cpu_T0);
+                    } else {
+                        tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
+                                           s->mem_index, MO_UB);
+                    }
+                    break;
+                case 0x15: /* pextrw */
+                    tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+                                            xmm_regs[reg].ZMM_W(val & 7)));
+                    if (mod == 3) {
+                        gen_op_mov_reg_v(ot, rm, cpu_T0);
+                    } else {
+                        tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
+                                           s->mem_index, MO_LEUW);
+                    }
+                    break;
+                case 0x16:
+                    if (ot == MO_32) { /* pextrd */
+                        tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
+                                        offsetof(CPUX86State,
+                                                xmm_regs[reg].ZMM_L(val & 3)));
+                        if (mod == 3) {
+                            tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
+                        } else {
+                            tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+                                                s->mem_index, MO_LEUL);
+                        }
+                    } else { /* pextrq */
+#ifdef TARGET_X86_64
+                        tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
+                                        offsetof(CPUX86State,
+                                                xmm_regs[reg].ZMM_Q(val & 1)));
+                        if (mod == 3) {
+                            tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
+                        } else {
+                            tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
+                                                s->mem_index, MO_LEQ);
+                        }
+#else
+                        goto illegal_op;
+#endif
+                    }
+                    break;
+                case 0x17: /* extractps */
+                    tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+                                            xmm_regs[reg].ZMM_L(val & 3)));
+                    if (mod == 3) {
+                        gen_op_mov_reg_v(ot, rm, cpu_T0);
+                    } else {
+                        tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
+                                           s->mem_index, MO_LEUL);
+                    }
+                    break;
+                case 0x20: /* pinsrb */
+                    if (mod == 3) {
+                        gen_op_mov_v_reg(MO_32, cpu_T0, rm);
+                    } else {
+                        tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
+                                           s->mem_index, MO_UB);
+                    }
+                    tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+                                            xmm_regs[reg].ZMM_B(val & 15)));
+                    break;
+                case 0x21: /* insertps */
+                    if (mod == 3) {
+                        tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
+                                        offsetof(CPUX86State,xmm_regs[rm]
+                                                .ZMM_L((val >> 6) & 3)));
+                    } else {
+                        tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                    }
+                    tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
+                                    offsetof(CPUX86State,xmm_regs[reg]
+                                            .ZMM_L((val >> 4) & 3)));
+                    if ((val >> 0) & 1)
+                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+                                        cpu_env, offsetof(CPUX86State,
+                                                xmm_regs[reg].ZMM_L(0)));
+                    if ((val >> 1) & 1)
+                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+                                        cpu_env, offsetof(CPUX86State,
+                                                xmm_regs[reg].ZMM_L(1)));
+                    if ((val >> 2) & 1)
+                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+                                        cpu_env, offsetof(CPUX86State,
+                                                xmm_regs[reg].ZMM_L(2)));
+                    if ((val >> 3) & 1)
+                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+                                        cpu_env, offsetof(CPUX86State,
+                                                xmm_regs[reg].ZMM_L(3)));
+                    break;
+                case 0x22:
+                    if (ot == MO_32) { /* pinsrd */
+                        if (mod == 3) {
+                            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
+                        } else {
+                            tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                                s->mem_index, MO_LEUL);
+                        }
+                        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
+                                        offsetof(CPUX86State,
+                                                xmm_regs[reg].ZMM_L(val & 3)));
+                    } else { /* pinsrq */
+#ifdef TARGET_X86_64
+                        if (mod == 3) {
+                            gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
+                        } else {
+                            tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
+                                                s->mem_index, MO_LEQ);
+                        }
+                        tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
+                                        offsetof(CPUX86State,
+                                                xmm_regs[reg].ZMM_Q(val & 1)));
+#else
+                        goto illegal_op;
+#endif
+                    }
+                    break;
+                }
+                return;
+            }
+
+            if (b1) {
+                op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+                if (mod == 3) {
+                    op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
+                } else {
+                    op2_offset = offsetof(CPUX86State,xmm_t0);
+                    gen_lea_modrm(env, s, modrm);
+                    gen_ldo_env_A0(s, op2_offset);
+                }
+            } else {
+                op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+                if (mod == 3) {
+                    op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+                } else {
+                    op2_offset = offsetof(CPUX86State,mmx_t0);
+                    gen_lea_modrm(env, s, modrm);
+                    gen_ldq_env_A0(s, op2_offset);
+                }
+            }
+            val = cpu_ldub_code(env, s->pc++);
+
+            if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
+                set_cc_op(s, CC_OP_EFLAGS);
+
+                if (s->dflag == MO_64) {
+                    /* The helper must use entire 64-bit gp registers */
+                    val |= 1 << 8;
+                }
+            }
+
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
+            break;
+
+        case 0x33a:
+            /* Various integer extensions at 0f 3a f[0-f].  */
+            b = modrm | (b1 << 8);
+            modrm = cpu_ldub_code(env, s->pc++);
+            reg = ((modrm >> 3) & 7) | rex_r;
+
+            switch (b) {
+            case 0x3f0: /* rorx Gy,Ey, Ib */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+                    || !(s->prefix & PREFIX_VEX)
+                    || s->vex_l != 0) {
+                    goto illegal_op;
+                }
+                ot = mo_64_32(s->dflag);
+                gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+                b = cpu_ldub_code(env, s->pc++);
+                if (ot == MO_64) {
+                    tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
+                } else {
+                    tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                    tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
+                    tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+                }
+                gen_op_mov_reg_v(ot, reg, cpu_T0);
+                break;
+
+            default:
+                goto unknown_op;
+            }
+            break;
+
+        default:
+        unknown_op:
+            gen_unknown_opcode(env, s);
+            return;
+        }
+    } else {
+        /* generic MMX or SSE operation */
+        switch(b) {
+        case 0x70: /* pshufx insn */
+        case 0xc6: /* pshufx insn */
+        case 0xc2: /* compare insns */
+            s->rip_offset = 1;
+            break;
+        default:
+            break;
+        }
+        if (is_xmm) {
+            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+            if (mod != 3) {
+                int sz = 4;
+
+                gen_lea_modrm(env, s, modrm);
+                op2_offset = offsetof(CPUX86State,xmm_t0);
+
+                switch (b) {
+                case 0x50 ... 0x5a:
+                case 0x5c ... 0x5f:
+                case 0xc2:
+                    /* Most sse scalar operations.  */
+                    if (b1 == 2) {
+                        sz = 2;
+                    } else if (b1 == 3) {
+                        sz = 3;
+                    }
+                    break;
+
+                case 0x2e:  /* ucomis[sd] */
+                case 0x2f:  /* comis[sd] */
+                    if (b1 == 0) {
+                        sz = 2;
+                    } else {
+                        sz = 3;
+                    }
+                    break;
+                }
+
+                switch (sz) {
+                case 2:
+                    /* 32 bit access */
+                    gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+                    tcg_gen_st32_tl(cpu_T0, cpu_env,
+                                    offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+                    break;
+                case 3:
+                    /* 64 bit access */
+                    gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
+                    break;
+                default:
+                    /* 128 bit access */
+                    gen_ldo_env_A0(s, op2_offset);
+                    break;
+                }
+            } else {
+                rm = (modrm & 7) | REX_B(s);
+                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+            }
+        } else {
+            op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                op2_offset = offsetof(CPUX86State,mmx_t0);
+                gen_ldq_env_A0(s, op2_offset);
+            } else {
+                rm = (modrm & 7);
+                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+            }
+        }
+        switch(b) {
+        case 0x0f: /* 3DNow! data insns */
+            val = cpu_ldub_code(env, s->pc++);
+            sse_fn_epp = sse_op_table5[val];
+            if (!sse_fn_epp) {
+                goto unknown_op;
+            }
+            if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
+                goto illegal_op;
+            }
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+            break;
+        case 0x70: /* pshufx insn */
+        case 0xc6: /* pshufx insn */
+            val = cpu_ldub_code(env, s->pc++);
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            /* XXX: introduce a new table? */
+            sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
+            sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
+            break;
+        case 0xc2:
+            /* compare insns */
+            val = cpu_ldub_code(env, s->pc++);
+            if (val >= 8)
+                goto unknown_op;
+            sse_fn_epp = sse_op_table4[val][b1];
+
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+            break;
+        case 0xf7:
+            /* maskmov : we must prepare A0 */
+            if (mod != 3)
+                goto illegal_op;
+            tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
+            gen_extu(s->aflag, cpu_A0);
+            gen_add_A0_ds_seg(s);
+
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            /* XXX: introduce a new table? */
+            sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
+            sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
+            break;
+        default:
+            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+            sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+            break;
+        }
+        if (b == 0x2e || b == 0x2f) {
+            set_cc_op(s, CC_OP_EFLAGS);
+        }
+    }
+}
+
+/* convert one instruction. s->is_jmp is set if the translation must
+   be stopped. Return the next pc value */
+static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
+                               target_ulong pc_start)
+{
+    int b, prefixes;
+    int shift;
+    TCGMemOp ot, aflag, dflag;
+    int modrm, reg, rm, mod, op, opreg, val;
+    target_ulong next_eip, tval;
+    int rex_w, rex_r;
+
+    s->pc_start = s->pc = pc_start;
+    prefixes = 0;
+    s->override = -1;
+    rex_w = -1;
+    rex_r = 0;
+#ifdef TARGET_X86_64
+    s->rex_x = 0;
+    s->rex_b = 0;
+    x86_64_hregs = 0;
+#endif
+    s->rip_offset = 0; /* for relative ip address */
+    s->vex_l = 0;
+    s->vex_v = 0;
+ next_byte:
+    b = cpu_ldub_code(env, s->pc);
+    s->pc++;
+    /* Collect prefixes.  */
+    switch (b) {
+    case 0xf3:
+        prefixes |= PREFIX_REPZ;
+        goto next_byte;
+    case 0xf2:
+        prefixes |= PREFIX_REPNZ;
+        goto next_byte;
+    case 0xf0:
+        prefixes |= PREFIX_LOCK;
+        goto next_byte;
+    case 0x2e:
+        s->override = R_CS;
+        goto next_byte;
+    case 0x36:
+        s->override = R_SS;
+        goto next_byte;
+    case 0x3e:
+        s->override = R_DS;
+        goto next_byte;
+    case 0x26:
+        s->override = R_ES;
+        goto next_byte;
+    case 0x64:
+        s->override = R_FS;
+        goto next_byte;
+    case 0x65:
+        s->override = R_GS;
+        goto next_byte;
+    case 0x66:
+        prefixes |= PREFIX_DATA;
+        goto next_byte;
+    case 0x67:
+        prefixes |= PREFIX_ADR;
+        goto next_byte;
+#ifdef TARGET_X86_64
+    case 0x40 ... 0x4f:
+        if (CODE64(s)) {
+            /* REX prefix */
+            rex_w = (b >> 3) & 1;
+            rex_r = (b & 0x4) << 1;
+            s->rex_x = (b & 0x2) << 2;
+            REX_B(s) = (b & 0x1) << 3;
+            x86_64_hregs = 1; /* select uniform byte register addressing */
+            goto next_byte;
+        }
+        break;
+#endif
+    case 0xc5: /* 2-byte VEX */
+    case 0xc4: /* 3-byte VEX */
+        /* VEX prefixes cannot be used except in 32-bit mode.
+           Otherwise the instruction is LES or LDS.  */
+        if (s->code32 && !s->vm86) {
+            static const int pp_prefix[4] = {
+                0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
+            };
+            int vex3, vex2 = cpu_ldub_code(env, s->pc);
+
+            if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
+                /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
+                   otherwise the instruction is LES or LDS.  */
+                break;
+            }
+            s->pc++;
+
+            /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
+            if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
+                            | PREFIX_LOCK | PREFIX_DATA)) {
+                goto illegal_op;
+            }
+#ifdef TARGET_X86_64
+            if (x86_64_hregs) {
+                goto illegal_op;
+            }
+#endif
+            rex_r = (~vex2 >> 4) & 8;
+            if (b == 0xc5) {
+                vex3 = vex2;
+                b = cpu_ldub_code(env, s->pc++);
+            } else {
+#ifdef TARGET_X86_64
+                s->rex_x = (~vex2 >> 3) & 8;
+                s->rex_b = (~vex2 >> 2) & 8;
+#endif
+                vex3 = cpu_ldub_code(env, s->pc++);
+                rex_w = (vex3 >> 7) & 1;
+                switch (vex2 & 0x1f) {
+                case 0x01: /* Implied 0f leading opcode bytes.  */
+                    b = cpu_ldub_code(env, s->pc++) | 0x100;
+                    break;
+                case 0x02: /* Implied 0f 38 leading opcode bytes.  */
+                    b = 0x138;
+                    break;
+                case 0x03: /* Implied 0f 3a leading opcode bytes.  */
+                    b = 0x13a;
+                    break;
+                default:   /* Reserved for future use.  */
+                    goto unknown_op;
+                }
+            }
+            s->vex_v = (~vex3 >> 3) & 0xf;
+            s->vex_l = (vex3 >> 2) & 1;
+            prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
+        }
+        break;
+    }
+
+    /* Post-process prefixes.  */
+    if (CODE64(s)) {
+        /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
+           data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
+           over 0x66 if both are present.  */
+        dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
+        /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
+        aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
+    } else {
+        /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
+        if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
+            dflag = MO_32;
+        } else {
+            dflag = MO_16;
+        }
+        /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
+        if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
+            aflag = MO_32;
+        }  else {
+            aflag = MO_16;
+        }
+    }
+
+    s->prefix = prefixes;
+    s->aflag = aflag;
+    s->dflag = dflag;
+
+    /* now check op code */
+ reswitch:
+    switch(b) {
+    case 0x0f:
+        /**************************/
+        /* extended op code */
+        b = cpu_ldub_code(env, s->pc++) | 0x100;
+        goto reswitch;
+
+        /**************************/
+        /* arith & logic */
+    case 0x00 ... 0x05:
+    case 0x08 ... 0x0d:
+    case 0x10 ... 0x15:
+    case 0x18 ... 0x1d:
+    case 0x20 ... 0x25:
+    case 0x28 ... 0x2d:
+    case 0x30 ... 0x35:
+    case 0x38 ... 0x3d:
+        {
+            int op, f, val;
+            op = (b >> 3) & 7;
+            f = (b >> 1) & 3;
+
+            ot = mo_b_d(b, dflag);
+
+            switch(f) {
+            case 0: /* OP Ev, Gv */
+                modrm = cpu_ldub_code(env, s->pc++);
+                reg = ((modrm >> 3) & 7) | rex_r;
+                mod = (modrm >> 6) & 3;
+                rm = (modrm & 7) | REX_B(s);
+                if (mod != 3) {
+                    gen_lea_modrm(env, s, modrm);
+                    opreg = OR_TMP0;
+                } else if (op == OP_XORL && rm == reg) {
+                xor_zero:
+                    /* xor reg, reg optimisation */
+                    set_cc_op(s, CC_OP_CLR);
+                    tcg_gen_movi_tl(cpu_T0, 0);
+                    gen_op_mov_reg_v(ot, reg, cpu_T0);
+                    break;
+                } else {
+                    opreg = rm;
+                }
+                gen_op_mov_v_reg(ot, cpu_T1, reg);
+                gen_op(s, op, ot, opreg);
+                break;
+            case 1: /* OP Gv, Ev */
+                modrm = cpu_ldub_code(env, s->pc++);
+                mod = (modrm >> 6) & 3;
+                reg = ((modrm >> 3) & 7) | rex_r;
+                rm = (modrm & 7) | REX_B(s);
+                if (mod != 3) {
+                    gen_lea_modrm(env, s, modrm);
+                    gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+                } else if (op == OP_XORL && rm == reg) {
+                    goto xor_zero;
+                } else {
+                    gen_op_mov_v_reg(ot, cpu_T1, rm);
+                }
+                gen_op(s, op, ot, reg);
+                break;
+            case 2: /* OP A, Iv */
+                val = insn_get(env, s, ot);
+                tcg_gen_movi_tl(cpu_T1, val);
+                gen_op(s, op, ot, OR_EAX);
+                break;
+            }
+        }
+        break;
+
+    case 0x82:
+        if (CODE64(s))
+            goto illegal_op;
+    case 0x80: /* GRP1 */
+    case 0x81:
+    case 0x83:
+        {
+            int val;
+
+            ot = mo_b_d(b, dflag);
+
+            modrm = cpu_ldub_code(env, s->pc++);
+            mod = (modrm >> 6) & 3;
+            rm = (modrm & 7) | REX_B(s);
+            op = (modrm >> 3) & 7;
+
+            if (mod != 3) {
+                if (b == 0x83)
+                    s->rip_offset = 1;
+                else
+                    s->rip_offset = insn_const_size(ot);
+                gen_lea_modrm(env, s, modrm);
+                opreg = OR_TMP0;
+            } else {
+                opreg = rm;
+            }
+
+            switch(b) {
+            default:
+            case 0x80:
+            case 0x81:
+            case 0x82:
+                val = insn_get(env, s, ot);
+                break;
+            case 0x83:
+                val = (int8_t)insn_get(env, s, MO_8);
+                break;
+            }
+            tcg_gen_movi_tl(cpu_T1, val);
+            gen_op(s, op, ot, opreg);
+        }
+        break;
+
+        /**************************/
+        /* inc, dec, and other misc arith */
+    case 0x40 ... 0x47: /* inc Gv */
+        ot = dflag;
+        gen_inc(s, ot, OR_EAX + (b & 7), 1);
+        break;
+    case 0x48 ... 0x4f: /* dec Gv */
+        ot = dflag;
+        gen_inc(s, ot, OR_EAX + (b & 7), -1);
+        break;
+    case 0xf6: /* GRP3 */
+    case 0xf7:
+        ot = mo_b_d(b, dflag);
+
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        rm = (modrm & 7) | REX_B(s);
+        op = (modrm >> 3) & 7;
+        if (mod != 3) {
+            if (op == 0) {
+                s->rip_offset = insn_const_size(ot);
+            }
+            gen_lea_modrm(env, s, modrm);
+            /* For those below that handle locked memory, don't load here.  */
+            if (!(s->prefix & PREFIX_LOCK)
+                || op != 2) {
+                gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+            }
+        } else {
+            gen_op_mov_v_reg(ot, cpu_T0, rm);
+        }
+
+        switch(op) {
+        case 0: /* test */
+            val = insn_get(env, s, ot);
+            tcg_gen_movi_tl(cpu_T1, val);
+            gen_op_testl_T0_T1_cc();
+            set_cc_op(s, CC_OP_LOGICB + ot);
+            break;
+        case 2: /* not */
+            if (s->prefix & PREFIX_LOCK) {
+                if (mod == 3) {
+                    goto illegal_op;
+                }
+                tcg_gen_movi_tl(cpu_T0, ~0);
+                tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
+                                            s->mem_index, ot | MO_LE);
+            } else {
+                tcg_gen_not_tl(cpu_T0, cpu_T0);
+                if (mod != 3) {
+                    gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+                } else {
+                    gen_op_mov_reg_v(ot, rm, cpu_T0);
+                }
+            }
+            break;
+        case 3: /* neg */
+            if (s->prefix & PREFIX_LOCK) {
+                TCGLabel *label1;
+                TCGv a0, t0, t1, t2;
+
+                if (mod == 3) {
+                    goto illegal_op;
+                }
+                a0 = tcg_temp_local_new();
+                t0 = tcg_temp_local_new();
+                label1 = gen_new_label();
+
+                tcg_gen_mov_tl(a0, cpu_A0);
+                tcg_gen_mov_tl(t0, cpu_T0);
+
+                gen_set_label(label1);
+                t1 = tcg_temp_new();
+                t2 = tcg_temp_new();
+                tcg_gen_mov_tl(t2, t0);
+                tcg_gen_neg_tl(t1, t0);
+                tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
+                                          s->mem_index, ot | MO_LE);
+                tcg_temp_free(t1);
+                tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
+
+                tcg_temp_free(t2);
+                tcg_temp_free(a0);
+                tcg_gen_mov_tl(cpu_T0, t0);
+                tcg_temp_free(t0);
+            } else {
+                tcg_gen_neg_tl(cpu_T0, cpu_T0);
+                if (mod != 3) {
+                    gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+                } else {
+                    gen_op_mov_reg_v(ot, rm, cpu_T0);
+                }
+            }
+            gen_op_update_neg_cc();
+            set_cc_op(s, CC_OP_SUBB + ot);
+            break;
+        case 4: /* mul */
+            switch(ot) {
+            case MO_8:
+                gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
+                tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+                tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
+                /* XXX: use 32 bit mul which could be faster */
+                tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+                gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+                tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+                tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
+                set_cc_op(s, CC_OP_MULB);
+                break;
+            case MO_16:
+                gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
+                tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+                tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
+                /* XXX: use 32 bit mul which could be faster */
+                tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+                gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+                tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+                tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
+                gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
+                tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+                set_cc_op(s, CC_OP_MULW);
+                break;
+            default:
+            case MO_32:
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
+                tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
+                                  cpu_tmp2_i32, cpu_tmp3_i32);
+                tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
+                tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
+                tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+                tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
+                set_cc_op(s, CC_OP_MULL);
+                break;
+#ifdef TARGET_X86_64
+            case MO_64:
+                tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
+                                  cpu_T0, cpu_regs[R_EAX]);
+                tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+                tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
+                set_cc_op(s, CC_OP_MULQ);
+                break;
+#endif
+            }
+            break;
+        case 5: /* imul */
+            switch(ot) {
+            case MO_8:
+                gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
+                tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+                tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
+                /* XXX: use 32 bit mul which could be faster */
+                tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+                gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+                tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+                tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
+                tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+                set_cc_op(s, CC_OP_MULB);
+                break;
+            case MO_16:
+                gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
+                tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+                tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
+                /* XXX: use 32 bit mul which could be faster */
+                tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+                gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+                tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+                tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
+                tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+                tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
+                gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
+                set_cc_op(s, CC_OP_MULW);
+                break;
+            default:
+            case MO_32:
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
+                tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
+                                  cpu_tmp2_i32, cpu_tmp3_i32);
+                tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
+                tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
+                tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
+                tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+                tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
+                tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
+                set_cc_op(s, CC_OP_MULL);
+                break;
+#ifdef TARGET_X86_64
+            case MO_64:
+                tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
+                                  cpu_T0, cpu_regs[R_EAX]);
+                tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+                tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
+                tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
+                set_cc_op(s, CC_OP_MULQ);
+                break;
+#endif
+            }
+            break;
+        case 6: /* div */
+            switch(ot) {
+            case MO_8:
+                gen_helper_divb_AL(cpu_env, cpu_T0);
+                break;
+            case MO_16:
+                gen_helper_divw_AX(cpu_env, cpu_T0);
+                break;
+            default:
+            case MO_32:
+                gen_helper_divl_EAX(cpu_env, cpu_T0);
+                break;
+#ifdef TARGET_X86_64
+            case MO_64:
+                gen_helper_divq_EAX(cpu_env, cpu_T0);
+                break;
+#endif
+            }
+            break;
+        case 7: /* idiv */
+            switch(ot) {
+            case MO_8:
+                gen_helper_idivb_AL(cpu_env, cpu_T0);
+                break;
+            case MO_16:
+                gen_helper_idivw_AX(cpu_env, cpu_T0);
+                break;
+            default:
+            case MO_32:
+                gen_helper_idivl_EAX(cpu_env, cpu_T0);
+                break;
+#ifdef TARGET_X86_64
+            case MO_64:
+                gen_helper_idivq_EAX(cpu_env, cpu_T0);
+                break;
+#endif
+            }
+            break;
+        default:
+            goto unknown_op;
+        }
+        break;
+
+    case 0xfe: /* GRP4 */
+    case 0xff: /* GRP5 */
+        ot = mo_b_d(b, dflag);
+
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        rm = (modrm & 7) | REX_B(s);
+        op = (modrm >> 3) & 7;
+        if (op >= 2 && b == 0xfe) {
+            goto unknown_op;
+        }
+        if (CODE64(s)) {
+            if (op == 2 || op == 4) {
+                /* operand size for jumps is 64 bit */
+                ot = MO_64;
+            } else if (op == 3 || op == 5) {
+                ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
+            } else if (op == 6) {
+                /* default push size is 64 bit */
+                ot = mo_pushpop(s, dflag);
+            }
+        }
+        if (mod != 3) {
+            gen_lea_modrm(env, s, modrm);
+            if (op >= 2 && op != 3 && op != 5)
+                gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+        } else {
+            gen_op_mov_v_reg(ot, cpu_T0, rm);
+        }
+
+        switch(op) {
+        case 0: /* inc Ev */
+            if (mod != 3)
+                opreg = OR_TMP0;
+            else
+                opreg = rm;
+            gen_inc(s, ot, opreg, 1);
+            break;
+        case 1: /* dec Ev */
+            if (mod != 3)
+                opreg = OR_TMP0;
+            else
+                opreg = rm;
+            gen_inc(s, ot, opreg, -1);
+            break;
+        case 2: /* call Ev */
+            /* XXX: optimize if memory (no 'and' is necessary) */
+            if (dflag == MO_16) {
+                tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+            }
+            next_eip = s->pc - s->cs_base;
+            tcg_gen_movi_tl(cpu_T1, next_eip);
+            gen_push_v(s, cpu_T1);
+            gen_op_jmp_v(cpu_T0);
+            gen_bnd_jmp(s);
+            gen_eob(s);
+            break;
+        case 3: /* lcall Ev */
+            gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+            gen_add_A0_im(s, 1 << ot);
+            gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
+        do_lcall:
+            if (s->pe && !s->vm86) {
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
+                                           tcg_const_i32(dflag - 1),
+                                           tcg_const_tl(s->pc - s->cs_base));
+            } else {
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
+                                      tcg_const_i32(dflag - 1),
+                                      tcg_const_i32(s->pc - s->cs_base));
+            }
+            gen_eob(s);
+            break;
+        case 4: /* jmp Ev */
+            if (dflag == MO_16) {
+                tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+            }
+            gen_op_jmp_v(cpu_T0);
+            gen_bnd_jmp(s);
+            gen_eob(s);
+            break;
+        case 5: /* ljmp Ev */
+            gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+            gen_add_A0_im(s, 1 << ot);
+            gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
+        do_ljmp:
+            if (s->pe && !s->vm86) {
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
+                                          tcg_const_tl(s->pc - s->cs_base));
+            } else {
+                gen_op_movl_seg_T0_vm(R_CS);
+                gen_op_jmp_v(cpu_T1);
+            }
+            gen_eob(s);
+            break;
+        case 6: /* push Ev */
+            gen_push_v(s, cpu_T0);
+            break;
+        default:
+            goto unknown_op;
+        }
+        break;
+
+    case 0x84: /* test Ev, Gv */
+    case 0x85:
+        ot = mo_b_d(b, dflag);
+
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+
+        gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+        gen_op_mov_v_reg(ot, cpu_T1, reg);
+        gen_op_testl_T0_T1_cc();
+        set_cc_op(s, CC_OP_LOGICB + ot);
+        break;
+
+    case 0xa8: /* test eAX, Iv */
+    case 0xa9:
+        ot = mo_b_d(b, dflag);
+        val = insn_get(env, s, ot);
+
+        gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
+        tcg_gen_movi_tl(cpu_T1, val);
+        gen_op_testl_T0_T1_cc();
+        set_cc_op(s, CC_OP_LOGICB + ot);
+        break;
+
+    case 0x98: /* CWDE/CBW */
+        switch (dflag) {
+#ifdef TARGET_X86_64
+        case MO_64:
+            gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+            tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+            gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
+            break;
+#endif
+        case MO_32:
+            gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
+            tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+            gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
+            break;
+        case MO_16:
+            gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
+            tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+            gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+            break;
+        default:
+            tcg_abort();
+        }
+        break;
+    case 0x99: /* CDQ/CWD */
+        switch (dflag) {
+#ifdef TARGET_X86_64
+        case MO_64:
+            gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
+            tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
+            gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
+            break;
+#endif
+        case MO_32:
+            gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+            tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+            tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
+            gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
+            break;
+        case MO_16:
+            gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
+            tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+            tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
+            gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
+            break;
+        default:
+            tcg_abort();
+        }
+        break;
+    case 0x1af: /* imul Gv, Ev */
+    case 0x69: /* imul Gv, Ev, I */
+    case 0x6b:
+        ot = dflag;
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+        if (b == 0x69)
+            s->rip_offset = insn_const_size(ot);
+        else if (b == 0x6b)
+            s->rip_offset = 1;
+        gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+        if (b == 0x69) {
+            val = insn_get(env, s, ot);
+            tcg_gen_movi_tl(cpu_T1, val);
+        } else if (b == 0x6b) {
+            val = (int8_t)insn_get(env, s, MO_8);
+            tcg_gen_movi_tl(cpu_T1, val);
+        } else {
+            gen_op_mov_v_reg(ot, cpu_T1, reg);
+        }
+        switch (ot) {
+#ifdef TARGET_X86_64
+        case MO_64:
+            tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
+            tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
+            tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
+            tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
+            break;
+#endif
+        case MO_32:
+            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+            tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
+            tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
+                              cpu_tmp2_i32, cpu_tmp3_i32);
+            tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
+            tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
+            tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
+            tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
+            tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
+            break;
+        default:
+            tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+            tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
+            /* XXX: use 32 bit mul which could be faster */
+            tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+            tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+            tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
+            tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+            gen_op_mov_reg_v(ot, reg, cpu_T0);
+            break;
+        }
+        set_cc_op(s, CC_OP_MULB + ot);
+        break;
+    case 0x1c0:
+    case 0x1c1: /* xadd Ev, Gv */
+        ot = mo_b_d(b, dflag);
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+        mod = (modrm >> 6) & 3;
+        gen_op_mov_v_reg(ot, cpu_T0, reg);
+        if (mod == 3) {
+            rm = (modrm & 7) | REX_B(s);
+            gen_op_mov_v_reg(ot, cpu_T1, rm);
+            tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+            gen_op_mov_reg_v(ot, reg, cpu_T1);
+            gen_op_mov_reg_v(ot, rm, cpu_T0);
+        } else {
+            gen_lea_modrm(env, s, modrm);
+            if (s->prefix & PREFIX_LOCK) {
+                tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0,
+                                            s->mem_index, ot | MO_LE);
+                tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+            } else {
+                gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+                tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+                gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+            }
+            gen_op_mov_reg_v(ot, reg, cpu_T1);
+        }
+        gen_op_update2_cc();
+        set_cc_op(s, CC_OP_ADDB + ot);
+        break;
+    case 0x1b0:
+    case 0x1b1: /* cmpxchg Ev, Gv */
+        {
+            TCGv oldv, newv, cmpv;
+
+            ot = mo_b_d(b, dflag);
+            modrm = cpu_ldub_code(env, s->pc++);
+            reg = ((modrm >> 3) & 7) | rex_r;
+            mod = (modrm >> 6) & 3;
+            oldv = tcg_temp_new();
+            newv = tcg_temp_new();
+            cmpv = tcg_temp_new();
+            gen_op_mov_v_reg(ot, newv, reg);
+            tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
+
+            if (s->prefix & PREFIX_LOCK) {
+                if (mod == 3) {
+                    goto illegal_op;
+                }
+                gen_lea_modrm(env, s, modrm);
+                tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv,
+                                          s->mem_index, ot | MO_LE);
+                gen_op_mov_reg_v(ot, R_EAX, oldv);
+            } else {
+                if (mod == 3) {
+                    rm = (modrm & 7) | REX_B(s);
+                    gen_op_mov_v_reg(ot, oldv, rm);
+                } else {
+                    gen_lea_modrm(env, s, modrm);
+                    gen_op_ld_v(s, ot, oldv, cpu_A0);
+                    rm = 0; /* avoid warning */
+                }
+                gen_extu(ot, oldv);
+                gen_extu(ot, cmpv);
+                /* store value = (old == cmp ? new : old);  */
+                tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
+                if (mod == 3) {
+                    gen_op_mov_reg_v(ot, R_EAX, oldv);
+                    gen_op_mov_reg_v(ot, rm, newv);
+                } else {
+                    /* Perform an unconditional store cycle like physical cpu;
+                       must be before changing accumulator to ensure
+                       idempotency if the store faults and the instruction
+                       is restarted */
+                    gen_op_st_v(s, ot, newv, cpu_A0);
+                    gen_op_mov_reg_v(ot, R_EAX, oldv);
+                }
+            }
+            tcg_gen_mov_tl(cpu_cc_src, oldv);
+            tcg_gen_mov_tl(cpu_cc_srcT, cmpv);
+            tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
+            set_cc_op(s, CC_OP_SUBB + ot);
+            tcg_temp_free(oldv);
+            tcg_temp_free(newv);
+            tcg_temp_free(cmpv);
+        }
+        break;
+    case 0x1c7: /* cmpxchg8b */
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        if ((mod == 3) || ((modrm & 0x38) != 0x8))
+            goto illegal_op;
+#ifdef TARGET_X86_64
+        if (dflag == MO_64) {
+            if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
+                goto illegal_op;
+            gen_lea_modrm(env, s, modrm);
+            if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
+                gen_helper_cmpxchg16b(cpu_env, cpu_A0);
+            } else {
+                gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
+            }
+        } else
+#endif        
+        {
+            if (!(s->cpuid_features & CPUID_CX8))
+                goto illegal_op;
+            gen_lea_modrm(env, s, modrm);
+            if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
+                gen_helper_cmpxchg8b(cpu_env, cpu_A0);
+            } else {
+                gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
+            }
+        }
+        set_cc_op(s, CC_OP_EFLAGS);
+        break;
+
+        /**************************/
+        /* push/pop */
+    case 0x50 ... 0x57: /* push */
+        gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
+        gen_push_v(s, cpu_T0);
+        break;
+    case 0x58 ... 0x5f: /* pop */
+        ot = gen_pop_T0(s);
+        /* NOTE: order is important for pop %sp */
+        gen_pop_update(s, ot);
+        gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
+        break;
+    case 0x60: /* pusha */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_pusha(s);
+        break;
+    case 0x61: /* popa */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_popa(s);
+        break;
+    case 0x68: /* push Iv */
+    case 0x6a:
+        ot = mo_pushpop(s, dflag);
+        if (b == 0x68)
+            val = insn_get(env, s, ot);
+        else
+            val = (int8_t)insn_get(env, s, MO_8);
+        tcg_gen_movi_tl(cpu_T0, val);
+        gen_push_v(s, cpu_T0);
+        break;
+    case 0x8f: /* pop Ev */
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        ot = gen_pop_T0(s);
+        if (mod == 3) {
+            /* NOTE: order is important for pop %sp */
+            gen_pop_update(s, ot);
+            rm = (modrm & 7) | REX_B(s);
+            gen_op_mov_reg_v(ot, rm, cpu_T0);
+        } else {
+            /* NOTE: order is important too for MMU exceptions */
+            s->popl_esp_hack = 1 << ot;
+            gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+            s->popl_esp_hack = 0;
+            gen_pop_update(s, ot);
+        }
+        break;
+    case 0xc8: /* enter */
+        {
+            int level;
+            val = cpu_lduw_code(env, s->pc);
+            s->pc += 2;
+            level = cpu_ldub_code(env, s->pc++);
+            gen_enter(s, val, level);
+        }
+        break;
+    case 0xc9: /* leave */
+        gen_leave(s);
+        break;
+    case 0x06: /* push es */
+    case 0x0e: /* push cs */
+    case 0x16: /* push ss */
+    case 0x1e: /* push ds */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_op_movl_T0_seg(b >> 3);
+        gen_push_v(s, cpu_T0);
+        break;
+    case 0x1a0: /* push fs */
+    case 0x1a8: /* push gs */
+        gen_op_movl_T0_seg((b >> 3) & 7);
+        gen_push_v(s, cpu_T0);
+        break;
+    case 0x07: /* pop es */
+    case 0x17: /* pop ss */
+    case 0x1f: /* pop ds */
+        if (CODE64(s))
+            goto illegal_op;
+        reg = b >> 3;
+        ot = gen_pop_T0(s);
+        gen_movl_seg_T0(s, reg);
+        gen_pop_update(s, ot);
+        /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp.  */
+        if (s->is_jmp) {
+            gen_jmp_im(s->pc - s->cs_base);
+            if (reg == R_SS) {
+                s->tf = 0;
+                gen_eob_inhibit_irq(s, true);
+            } else {
+                gen_eob(s);
+            }
+        }
+        break;
+    case 0x1a1: /* pop fs */
+    case 0x1a9: /* pop gs */
+        ot = gen_pop_T0(s);
+        gen_movl_seg_T0(s, (b >> 3) & 7);
+        gen_pop_update(s, ot);
+        if (s->is_jmp) {
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+        }
+        break;
+
+        /**************************/
+        /* mov */
+    case 0x88:
+    case 0x89: /* mov Gv, Ev */
+        ot = mo_b_d(b, dflag);
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+
+        /* generate a generic store */
+        gen_ldst_modrm(env, s, modrm, ot, reg, 1);
+        break;
+    case 0xc6:
+    case 0xc7: /* mov Ev, Iv */
+        ot = mo_b_d(b, dflag);
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        if (mod != 3) {
+            s->rip_offset = insn_const_size(ot);
+            gen_lea_modrm(env, s, modrm);
+        }
+        val = insn_get(env, s, ot);
+        tcg_gen_movi_tl(cpu_T0, val);
+        if (mod != 3) {
+            gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+        } else {
+            gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
+        }
+        break;
+    case 0x8a:
+    case 0x8b: /* mov Ev, Gv */
+        ot = mo_b_d(b, dflag);
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+
+        gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+        gen_op_mov_reg_v(ot, reg, cpu_T0);
+        break;
+    case 0x8e: /* mov seg, Gv */
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = (modrm >> 3) & 7;
+        if (reg >= 6 || reg == R_CS)
+            goto illegal_op;
+        gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+        gen_movl_seg_T0(s, reg);
+        /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp.  */
+        if (s->is_jmp) {
+            gen_jmp_im(s->pc - s->cs_base);
+            if (reg == R_SS) {
+                s->tf = 0;
+                gen_eob_inhibit_irq(s, true);
+            } else {
+                gen_eob(s);
+            }
+        }
+        break;
+    case 0x8c: /* mov Gv, seg */
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = (modrm >> 3) & 7;
+        mod = (modrm >> 6) & 3;
+        if (reg >= 6)
+            goto illegal_op;
+        gen_op_movl_T0_seg(reg);
+        ot = mod == 3 ? dflag : MO_16;
+        gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+        break;
+
+    case 0x1b6: /* movzbS Gv, Eb */
+    case 0x1b7: /* movzwS Gv, Eb */
+    case 0x1be: /* movsbS Gv, Eb */
+    case 0x1bf: /* movswS Gv, Eb */
+        {
+            TCGMemOp d_ot;
+            TCGMemOp s_ot;
+
+            /* d_ot is the size of destination */
+            d_ot = dflag;
+            /* ot is the size of source */
+            ot = (b & 1) + MO_8;
+            /* s_ot is the sign+size of source */
+            s_ot = b & 8 ? MO_SIGN | ot : ot;
+
+            modrm = cpu_ldub_code(env, s->pc++);
+            reg = ((modrm >> 3) & 7) | rex_r;
+            mod = (modrm >> 6) & 3;
+            rm = (modrm & 7) | REX_B(s);
+
+            if (mod == 3) {
+                gen_op_mov_v_reg(ot, cpu_T0, rm);
+                switch (s_ot) {
+                case MO_UB:
+                    tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+                    break;
+                case MO_SB:
+                    tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+                    break;
+                case MO_UW:
+                    tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+                    break;
+                default:
+                case MO_SW:
+                    tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+                    break;
+                }
+                gen_op_mov_reg_v(d_ot, reg, cpu_T0);
+            } else {
+                gen_lea_modrm(env, s, modrm);
+                gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
+                gen_op_mov_reg_v(d_ot, reg, cpu_T0);
+            }
+        }
+        break;
+
+    case 0x8d: /* lea */
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        if (mod == 3)
+            goto illegal_op;
+        reg = ((modrm >> 3) & 7) | rex_r;
+        {
+            AddressParts a = gen_lea_modrm_0(env, s, modrm);
+            TCGv ea = gen_lea_modrm_1(a);
+            gen_lea_v_seg(s, s->aflag, ea, -1, -1);
+            gen_op_mov_reg_v(dflag, reg, cpu_A0);
+        }
+        break;
+
+    case 0xa0: /* mov EAX, Ov */
+    case 0xa1:
+    case 0xa2: /* mov Ov, EAX */
+    case 0xa3:
+        {
+            target_ulong offset_addr;
+
+            ot = mo_b_d(b, dflag);
+            switch (s->aflag) {
+#ifdef TARGET_X86_64
+            case MO_64:
+                offset_addr = cpu_ldq_code(env, s->pc);
+                s->pc += 8;
+                break;
+#endif
+            default:
+                offset_addr = insn_get(env, s, s->aflag);
+                break;
+            }
+            tcg_gen_movi_tl(cpu_A0, offset_addr);
+            gen_add_A0_ds_seg(s);
+            if ((b & 2) == 0) {
+                gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+                gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
+            } else {
+                gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
+                gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+            }
+        }
+        break;
+    case 0xd7: /* xlat */
+        tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
+        tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
+        tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
+        gen_extu(s->aflag, cpu_A0);
+        gen_add_A0_ds_seg(s);
+        gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
+        gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
+        break;
+    case 0xb0 ... 0xb7: /* mov R, Ib */
+        val = insn_get(env, s, MO_8);
+        tcg_gen_movi_tl(cpu_T0, val);
+        gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
+        break;
+    case 0xb8 ... 0xbf: /* mov R, Iv */
+#ifdef TARGET_X86_64
+        if (dflag == MO_64) {
+            uint64_t tmp;
+            /* 64 bit case */
+            tmp = cpu_ldq_code(env, s->pc);
+            s->pc += 8;
+            reg = (b & 7) | REX_B(s);
+            tcg_gen_movi_tl(cpu_T0, tmp);
+            gen_op_mov_reg_v(MO_64, reg, cpu_T0);
+        } else
+#endif
+        {
+            ot = dflag;
+            val = insn_get(env, s, ot);
+            reg = (b & 7) | REX_B(s);
+            tcg_gen_movi_tl(cpu_T0, val);
+            gen_op_mov_reg_v(ot, reg, cpu_T0);
+        }
+        break;
+
+    case 0x91 ... 0x97: /* xchg R, EAX */
+    do_xchg_reg_eax:
+        ot = dflag;
+        reg = (b & 7) | REX_B(s);
+        rm = R_EAX;
+        goto do_xchg_reg;
+    case 0x86:
+    case 0x87: /* xchg Ev, Gv */
+        ot = mo_b_d(b, dflag);
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+        mod = (modrm >> 6) & 3;
+        if (mod == 3) {
+            rm = (modrm & 7) | REX_B(s);
+        do_xchg_reg:
+            gen_op_mov_v_reg(ot, cpu_T0, reg);
+            gen_op_mov_v_reg(ot, cpu_T1, rm);
+            gen_op_mov_reg_v(ot, rm, cpu_T0);
+            gen_op_mov_reg_v(ot, reg, cpu_T1);
+        } else {
+            gen_lea_modrm(env, s, modrm);
+            gen_op_mov_v_reg(ot, cpu_T0, reg);
+            /* for xchg, lock is implicit */
+            tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0,
+                                   s->mem_index, ot | MO_LE);
+            gen_op_mov_reg_v(ot, reg, cpu_T1);
+        }
+        break;
+    case 0xc4: /* les Gv */
+        /* In CODE64 this is VEX3; see above.  */
+        op = R_ES;
+        goto do_lxx;
+    case 0xc5: /* lds Gv */
+        /* In CODE64 this is VEX2; see above.  */
+        op = R_DS;
+        goto do_lxx;
+    case 0x1b2: /* lss Gv */
+        op = R_SS;
+        goto do_lxx;
+    case 0x1b4: /* lfs Gv */
+        op = R_FS;
+        goto do_lxx;
+    case 0x1b5: /* lgs Gv */
+        op = R_GS;
+    do_lxx:
+        ot = dflag != MO_16 ? MO_32 : MO_16;
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+        mod = (modrm >> 6) & 3;
+        if (mod == 3)
+            goto illegal_op;
+        gen_lea_modrm(env, s, modrm);
+        gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+        gen_add_A0_im(s, 1 << ot);
+        /* load the segment first to handle exceptions properly */
+        gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
+        gen_movl_seg_T0(s, op);
+        /* then put the data */
+        gen_op_mov_reg_v(ot, reg, cpu_T1);
+        if (s->is_jmp) {
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+        }
+        break;
+
+        /************************/
+        /* shifts */
+    case 0xc0:
+    case 0xc1:
+        /* shift Ev,Ib */
+        shift = 2;
+    grp2:
+        {
+            ot = mo_b_d(b, dflag);
+            modrm = cpu_ldub_code(env, s->pc++);
+            mod = (modrm >> 6) & 3;
+            op = (modrm >> 3) & 7;
+
+            if (mod != 3) {
+                if (shift == 2) {
+                    s->rip_offset = 1;
+                }
+                gen_lea_modrm(env, s, modrm);
+                opreg = OR_TMP0;
+            } else {
+                opreg = (modrm & 7) | REX_B(s);
+            }
+
+            /* simpler op */
+            if (shift == 0) {
+                gen_shift(s, op, ot, opreg, OR_ECX);
+            } else {
+                if (shift == 2) {
+                    shift = cpu_ldub_code(env, s->pc++);
+                }
+                gen_shifti(s, op, ot, opreg, shift);
+            }
+        }
+        break;
+    case 0xd0:
+    case 0xd1:
+        /* shift Ev,1 */
+        shift = 1;
+        goto grp2;
+    case 0xd2:
+    case 0xd3:
+        /* shift Ev,cl */
+        shift = 0;
+        goto grp2;
+
+    case 0x1a4: /* shld imm */
+        op = 0;
+        shift = 1;
+        goto do_shiftd;
+    case 0x1a5: /* shld cl */
+        op = 0;
+        shift = 0;
+        goto do_shiftd;
+    case 0x1ac: /* shrd imm */
+        op = 1;
+        shift = 1;
+        goto do_shiftd;
+    case 0x1ad: /* shrd cl */
+        op = 1;
+        shift = 0;
+    do_shiftd:
+        ot = dflag;
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        rm = (modrm & 7) | REX_B(s);
+        reg = ((modrm >> 3) & 7) | rex_r;
+        if (mod != 3) {
+            gen_lea_modrm(env, s, modrm);
+            opreg = OR_TMP0;
+        } else {
+            opreg = rm;
+        }
+        gen_op_mov_v_reg(ot, cpu_T1, reg);
+
+        if (shift) {
+            TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
+            gen_shiftd_rm_T1(s, ot, opreg, op, imm);
+            tcg_temp_free(imm);
+        } else {
+            gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
+        }
+        break;
+
+        /************************/
+        /* floats */
+    case 0xd8 ... 0xdf:
+        if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
+            /* if CR0.EM or CR0.TS are set, generate an FPU exception */
+            /* XXX: what to do if illegal op ? */
+            gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+            break;
+        }
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        rm = modrm & 7;
+        op = ((b & 7) << 3) | ((modrm >> 3) & 7);
+        if (mod != 3) {
+            /* memory op */
+            gen_lea_modrm(env, s, modrm);
+            switch(op) {
+            case 0x00 ... 0x07: /* fxxxs */
+            case 0x10 ... 0x17: /* fixxxl */
+            case 0x20 ... 0x27: /* fxxxl */
+            case 0x30 ... 0x37: /* fixxx */
+                {
+                    int op1;
+                    op1 = op & 7;
+
+                    switch(op >> 4) {
+                    case 0:
+                        tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
+                        break;
+                    case 1:
+                        tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
+                        break;
+                    case 2:
+                        tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
+                                            s->mem_index, MO_LEQ);
+                        gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
+                        break;
+                    case 3:
+                    default:
+                        tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LESW);
+                        gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
+                        break;
+                    }
+
+                    gen_helper_fp_arith_ST0_FT0(op1);
+                    if (op1 == 3) {
+                        /* fcomp needs pop */
+                        gen_helper_fpop(cpu_env);
+                    }
+                }
+                break;
+            case 0x08: /* flds */
+            case 0x0a: /* fsts */
+            case 0x0b: /* fstps */
+            case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
+            case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
+            case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
+                switch(op & 7) {
+                case 0:
+                    switch(op >> 4) {
+                    case 0:
+                        tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
+                        break;
+                    case 1:
+                        tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
+                        break;
+                    case 2:
+                        tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
+                                            s->mem_index, MO_LEQ);
+                        gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
+                        break;
+                    case 3:
+                    default:
+                        tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LESW);
+                        gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
+                        break;
+                    }
+                    break;
+                case 1:
+                    /* XXX: the corresponding CPUID bit must be tested ! */
+                    switch(op >> 4) {
+                    case 1:
+                        gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
+                        tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        break;
+                    case 2:
+                        gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
+                        tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
+                                            s->mem_index, MO_LEQ);
+                        break;
+                    case 3:
+                    default:
+                        gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
+                        tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUW);
+                        break;
+                    }
+                    gen_helper_fpop(cpu_env);
+                    break;
+                default:
+                    switch(op >> 4) {
+                    case 0:
+                        gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
+                        tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        break;
+                    case 1:
+                        gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
+                        tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        break;
+                    case 2:
+                        gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
+                        tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
+                                            s->mem_index, MO_LEQ);
+                        break;
+                    case 3:
+                    default:
+                        gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
+                        tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+                                            s->mem_index, MO_LEUW);
+                        break;
+                    }
+                    if ((op & 7) == 3)
+                        gen_helper_fpop(cpu_env);
+                    break;
+                }
+                break;
+            case 0x0c: /* fldenv mem */
+                gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
+                break;
+            case 0x0d: /* fldcw mem */
+                tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+                                    s->mem_index, MO_LEUW);
+                gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
+                break;
+            case 0x0e: /* fnstenv mem */
+                gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
+                break;
+            case 0x0f: /* fnstcw mem */
+                gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
+                tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+                                    s->mem_index, MO_LEUW);
+                break;
+            case 0x1d: /* fldt mem */
+                gen_helper_fldt_ST0(cpu_env, cpu_A0);
+                break;
+            case 0x1f: /* fstpt mem */
+                gen_helper_fstt_ST0(cpu_env, cpu_A0);
+                gen_helper_fpop(cpu_env);
+                break;
+            case 0x2c: /* frstor mem */
+                gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
+                break;
+            case 0x2e: /* fnsave mem */
+                gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
+                break;
+            case 0x2f: /* fnstsw mem */
+                gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
+                tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+                                    s->mem_index, MO_LEUW);
+                break;
+            case 0x3c: /* fbld */
+                gen_helper_fbld_ST0(cpu_env, cpu_A0);
+                break;
+            case 0x3e: /* fbstp */
+                gen_helper_fbst_ST0(cpu_env, cpu_A0);
+                gen_helper_fpop(cpu_env);
+                break;
+            case 0x3d: /* fildll */
+                tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
+                gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
+                break;
+            case 0x3f: /* fistpll */
+                gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
+                tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
+                gen_helper_fpop(cpu_env);
+                break;
+            default:
+                goto unknown_op;
+            }
+        } else {
+            /* register float ops */
+            opreg = rm;
+
+            switch(op) {
+            case 0x08: /* fld sti */
+                gen_helper_fpush(cpu_env);
+                gen_helper_fmov_ST0_STN(cpu_env,
+                                        tcg_const_i32((opreg + 1) & 7));
+                break;
+            case 0x09: /* fxchg sti */
+            case 0x29: /* fxchg4 sti, undocumented op */
+            case 0x39: /* fxchg7 sti, undocumented op */
+                gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
+                break;
+            case 0x0a: /* grp d9/2 */
+                switch(rm) {
+                case 0: /* fnop */
+                    /* check exceptions (FreeBSD FPU probe) */
+                    gen_helper_fwait(cpu_env);
+                    break;
+                default:
+                    goto unknown_op;
+                }
+                break;
+            case 0x0c: /* grp d9/4 */
+                switch(rm) {
+                case 0: /* fchs */
+                    gen_helper_fchs_ST0(cpu_env);
+                    break;
+                case 1: /* fabs */
+                    gen_helper_fabs_ST0(cpu_env);
+                    break;
+                case 4: /* ftst */
+                    gen_helper_fldz_FT0(cpu_env);
+                    gen_helper_fcom_ST0_FT0(cpu_env);
+                    break;
+                case 5: /* fxam */
+                    gen_helper_fxam_ST0(cpu_env);
+                    break;
+                default:
+                    goto unknown_op;
+                }
+                break;
+            case 0x0d: /* grp d9/5 */
+                {
+                    switch(rm) {
+                    case 0:
+                        gen_helper_fpush(cpu_env);
+                        gen_helper_fld1_ST0(cpu_env);
+                        break;
+                    case 1:
+                        gen_helper_fpush(cpu_env);
+                        gen_helper_fldl2t_ST0(cpu_env);
+                        break;
+                    case 2:
+                        gen_helper_fpush(cpu_env);
+                        gen_helper_fldl2e_ST0(cpu_env);
+                        break;
+                    case 3:
+                        gen_helper_fpush(cpu_env);
+                        gen_helper_fldpi_ST0(cpu_env);
+                        break;
+                    case 4:
+                        gen_helper_fpush(cpu_env);
+                        gen_helper_fldlg2_ST0(cpu_env);
+                        break;
+                    case 5:
+                        gen_helper_fpush(cpu_env);
+                        gen_helper_fldln2_ST0(cpu_env);
+                        break;
+                    case 6:
+                        gen_helper_fpush(cpu_env);
+                        gen_helper_fldz_ST0(cpu_env);
+                        break;
+                    default:
+                        goto unknown_op;
+                    }
+                }
+                break;
+            case 0x0e: /* grp d9/6 */
+                switch(rm) {
+                case 0: /* f2xm1 */
+                    gen_helper_f2xm1(cpu_env);
+                    break;
+                case 1: /* fyl2x */
+                    gen_helper_fyl2x(cpu_env);
+                    break;
+                case 2: /* fptan */
+                    gen_helper_fptan(cpu_env);
+                    break;
+                case 3: /* fpatan */
+                    gen_helper_fpatan(cpu_env);
+                    break;
+                case 4: /* fxtract */
+                    gen_helper_fxtract(cpu_env);
+                    break;
+                case 5: /* fprem1 */
+                    gen_helper_fprem1(cpu_env);
+                    break;
+                case 6: /* fdecstp */
+                    gen_helper_fdecstp(cpu_env);
+                    break;
+                default:
+                case 7: /* fincstp */
+                    gen_helper_fincstp(cpu_env);
+                    break;
+                }
+                break;
+            case 0x0f: /* grp d9/7 */
+                switch(rm) {
+                case 0: /* fprem */
+                    gen_helper_fprem(cpu_env);
+                    break;
+                case 1: /* fyl2xp1 */
+                    gen_helper_fyl2xp1(cpu_env);
+                    break;
+                case 2: /* fsqrt */
+                    gen_helper_fsqrt(cpu_env);
+                    break;
+                case 3: /* fsincos */
+                    gen_helper_fsincos(cpu_env);
+                    break;
+                case 5: /* fscale */
+                    gen_helper_fscale(cpu_env);
+                    break;
+                case 4: /* frndint */
+                    gen_helper_frndint(cpu_env);
+                    break;
+                case 6: /* fsin */
+                    gen_helper_fsin(cpu_env);
+                    break;
+                default:
+                case 7: /* fcos */
+                    gen_helper_fcos(cpu_env);
+                    break;
+                }
+                break;
+            case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
+            case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
+            case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
+                {
+                    int op1;
+
+                    op1 = op & 7;
+                    if (op >= 0x20) {
+                        gen_helper_fp_arith_STN_ST0(op1, opreg);
+                        if (op >= 0x30)
+                            gen_helper_fpop(cpu_env);
+                    } else {
+                        gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                        gen_helper_fp_arith_ST0_FT0(op1);
+                    }
+                }
+                break;
+            case 0x02: /* fcom */
+            case 0x22: /* fcom2, undocumented op */
+                gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fcom_ST0_FT0(cpu_env);
+                break;
+            case 0x03: /* fcomp */
+            case 0x23: /* fcomp3, undocumented op */
+            case 0x32: /* fcomp5, undocumented op */
+                gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fcom_ST0_FT0(cpu_env);
+                gen_helper_fpop(cpu_env);
+                break;
+            case 0x15: /* da/5 */
+                switch(rm) {
+                case 1: /* fucompp */
+                    gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+                    gen_helper_fucom_ST0_FT0(cpu_env);
+                    gen_helper_fpop(cpu_env);
+                    gen_helper_fpop(cpu_env);
+                    break;
+                default:
+                    goto unknown_op;
+                }
+                break;
+            case 0x1c:
+                switch(rm) {
+                case 0: /* feni (287 only, just do nop here) */
+                    break;
+                case 1: /* fdisi (287 only, just do nop here) */
+                    break;
+                case 2: /* fclex */
+                    gen_helper_fclex(cpu_env);
+                    break;
+                case 3: /* fninit */
+                    gen_helper_fninit(cpu_env);
+                    break;
+                case 4: /* fsetpm (287 only, just do nop here) */
+                    break;
+                default:
+                    goto unknown_op;
+                }
+                break;
+            case 0x1d: /* fucomi */
+                if (!(s->cpuid_features & CPUID_CMOV)) {
+                    goto illegal_op;
+                }
+                gen_update_cc_op(s);
+                gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fucomi_ST0_FT0(cpu_env);
+                set_cc_op(s, CC_OP_EFLAGS);
+                break;
+            case 0x1e: /* fcomi */
+                if (!(s->cpuid_features & CPUID_CMOV)) {
+                    goto illegal_op;
+                }
+                gen_update_cc_op(s);
+                gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fcomi_ST0_FT0(cpu_env);
+                set_cc_op(s, CC_OP_EFLAGS);
+                break;
+            case 0x28: /* ffree sti */
+                gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+                break;
+            case 0x2a: /* fst sti */
+                gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+                break;
+            case 0x2b: /* fstp sti */
+            case 0x0b: /* fstp1 sti, undocumented op */
+            case 0x3a: /* fstp8 sti, undocumented op */
+            case 0x3b: /* fstp9 sti, undocumented op */
+                gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fpop(cpu_env);
+                break;
+            case 0x2c: /* fucom st(i) */
+                gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fucom_ST0_FT0(cpu_env);
+                break;
+            case 0x2d: /* fucomp st(i) */
+                gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fucom_ST0_FT0(cpu_env);
+                gen_helper_fpop(cpu_env);
+                break;
+            case 0x33: /* de/3 */
+                switch(rm) {
+                case 1: /* fcompp */
+                    gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+                    gen_helper_fcom_ST0_FT0(cpu_env);
+                    gen_helper_fpop(cpu_env);
+                    gen_helper_fpop(cpu_env);
+                    break;
+                default:
+                    goto unknown_op;
+                }
+                break;
+            case 0x38: /* ffreep sti, undocumented op */
+                gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fpop(cpu_env);
+                break;
+            case 0x3c: /* df/4 */
+                switch(rm) {
+                case 0:
+                    gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
+                    tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+                    gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+                    break;
+                default:
+                    goto unknown_op;
+                }
+                break;
+            case 0x3d: /* fucomip */
+                if (!(s->cpuid_features & CPUID_CMOV)) {
+                    goto illegal_op;
+                }
+                gen_update_cc_op(s);
+                gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fucomi_ST0_FT0(cpu_env);
+                gen_helper_fpop(cpu_env);
+                set_cc_op(s, CC_OP_EFLAGS);
+                break;
+            case 0x3e: /* fcomip */
+                if (!(s->cpuid_features & CPUID_CMOV)) {
+                    goto illegal_op;
+                }
+                gen_update_cc_op(s);
+                gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+                gen_helper_fcomi_ST0_FT0(cpu_env);
+                gen_helper_fpop(cpu_env);
+                set_cc_op(s, CC_OP_EFLAGS);
+                break;
+            case 0x10 ... 0x13: /* fcmovxx */
+            case 0x18 ... 0x1b:
+                {
+                    int op1;
+                    TCGLabel *l1;
+                    static const uint8_t fcmov_cc[8] = {
+                        (JCC_B << 1),
+                        (JCC_Z << 1),
+                        (JCC_BE << 1),
+                        (JCC_P << 1),
+                    };
+
+                    if (!(s->cpuid_features & CPUID_CMOV)) {
+                        goto illegal_op;
+                    }
+                    op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
+                    l1 = gen_new_label();
+                    gen_jcc1_noeob(s, op1, l1);
+                    gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
+                    gen_set_label(l1);
+                }
+                break;
+            default:
+                goto unknown_op;
+            }
+        }
+        break;
+        /************************/
+        /* string ops */
+
+    case 0xa4: /* movsS */
+    case 0xa5:
+        ot = mo_b_d(b, dflag);
+        if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+            gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+        } else {
+            gen_movs(s, ot);
+        }
+        break;
+
+    case 0xaa: /* stosS */
+    case 0xab:
+        ot = mo_b_d(b, dflag);
+        if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+            gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+        } else {
+            gen_stos(s, ot);
+        }
+        break;
+    case 0xac: /* lodsS */
+    case 0xad:
+        ot = mo_b_d(b, dflag);
+        if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+            gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+        } else {
+            gen_lods(s, ot);
+        }
+        break;
+    case 0xae: /* scasS */
+    case 0xaf:
+        ot = mo_b_d(b, dflag);
+        if (prefixes & PREFIX_REPNZ) {
+            gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
+        } else if (prefixes & PREFIX_REPZ) {
+            gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
+        } else {
+            gen_scas(s, ot);
+        }
+        break;
+
+    case 0xa6: /* cmpsS */
+    case 0xa7:
+        ot = mo_b_d(b, dflag);
+        if (prefixes & PREFIX_REPNZ) {
+            gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
+        } else if (prefixes & PREFIX_REPZ) {
+            gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
+        } else {
+            gen_cmps(s, ot);
+        }
+        break;
+    case 0x6c: /* insS */
+    case 0x6d:
+        ot = mo_b_d32(b, dflag);
+        tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
+        gen_check_io(s, ot, pc_start - s->cs_base, 
+                     SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
+        if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+            gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+        } else {
+            gen_ins(s, ot);
+            if (s->tb->cflags & CF_USE_ICOUNT) {
+                gen_jmp(s, s->pc - s->cs_base);
+            }
+        }
+        break;
+    case 0x6e: /* outsS */
+    case 0x6f:
+        ot = mo_b_d32(b, dflag);
+        tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
+        gen_check_io(s, ot, pc_start - s->cs_base,
+                     svm_is_rep(prefixes) | 4);
+        if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+            gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+        } else {
+            gen_outs(s, ot);
+            if (s->tb->cflags & CF_USE_ICOUNT) {
+                gen_jmp(s, s->pc - s->cs_base);
+            }
+        }
+        break;
+
+        /************************/
+        /* port I/O */
+
+    case 0xe4:
+    case 0xe5:
+        ot = mo_b_d32(b, dflag);
+        val = cpu_ldub_code(env, s->pc++);
+        tcg_gen_movi_tl(cpu_T0, val);
+        gen_check_io(s, ot, pc_start - s->cs_base,
+                     SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_start();
+	}
+        tcg_gen_movi_i32(cpu_tmp2_i32, val);
+        gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
+        gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
+        gen_bpt_io(s, cpu_tmp2_i32, ot);
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_end();
+            gen_jmp(s, s->pc - s->cs_base);
+        }
+        break;
+    case 0xe6:
+    case 0xe7:
+        ot = mo_b_d32(b, dflag);
+        val = cpu_ldub_code(env, s->pc++);
+        tcg_gen_movi_tl(cpu_T0, val);
+        gen_check_io(s, ot, pc_start - s->cs_base,
+                     svm_is_rep(prefixes));
+        gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
+
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_start();
+	}
+        tcg_gen_movi_i32(cpu_tmp2_i32, val);
+        tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
+        gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
+        gen_bpt_io(s, cpu_tmp2_i32, ot);
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_end();
+            gen_jmp(s, s->pc - s->cs_base);
+        }
+        break;
+    case 0xec:
+    case 0xed:
+        ot = mo_b_d32(b, dflag);
+        tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
+        gen_check_io(s, ot, pc_start - s->cs_base,
+                     SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_start();
+	}
+        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+        gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
+        gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
+        gen_bpt_io(s, cpu_tmp2_i32, ot);
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_end();
+            gen_jmp(s, s->pc - s->cs_base);
+        }
+        break;
+    case 0xee:
+    case 0xef:
+        ot = mo_b_d32(b, dflag);
+        tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
+        gen_check_io(s, ot, pc_start - s->cs_base,
+                     svm_is_rep(prefixes));
+        gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
+
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_start();
+	}
+        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+        tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
+        gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
+        gen_bpt_io(s, cpu_tmp2_i32, ot);
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_end();
+            gen_jmp(s, s->pc - s->cs_base);
+        }
+        break;
+
+        /************************/
+        /* control */
+    case 0xc2: /* ret im */
+        val = cpu_ldsw_code(env, s->pc);
+        s->pc += 2;
+        ot = gen_pop_T0(s);
+        gen_stack_update(s, val + (1 << ot));
+        /* Note that gen_pop_T0 uses a zero-extending load.  */
+        gen_op_jmp_v(cpu_T0);
+        gen_bnd_jmp(s);
+        gen_eob(s);
+        break;
+    case 0xc3: /* ret */
+        ot = gen_pop_T0(s);
+        gen_pop_update(s, ot);
+        /* Note that gen_pop_T0 uses a zero-extending load.  */
+        gen_op_jmp_v(cpu_T0);
+        gen_bnd_jmp(s);
+        gen_eob(s);
+        break;
+    case 0xca: /* lret im */
+        val = cpu_ldsw_code(env, s->pc);
+        s->pc += 2;
+    do_lret:
+        if (s->pe && !s->vm86) {
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
+                                      tcg_const_i32(val));
+        } else {
+            gen_stack_A0(s);
+            /* pop offset */
+            gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
+            /* NOTE: keeping EIP updated is not a problem in case of
+               exception */
+            gen_op_jmp_v(cpu_T0);
+            /* pop selector */
+            gen_add_A0_im(s, 1 << dflag);
+            gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
+            gen_op_movl_seg_T0_vm(R_CS);
+            /* add stack offset */
+            gen_stack_update(s, val + (2 << dflag));
+        }
+        gen_eob(s);
+        break;
+    case 0xcb: /* lret */
+        val = 0;
+        goto do_lret;
+    case 0xcf: /* iret */
+        gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
+        if (!s->pe) {
+            /* real mode */
+            gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
+            set_cc_op(s, CC_OP_EFLAGS);
+        } else if (s->vm86) {
+            if (s->iopl != 3) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+            } else {
+                gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
+                set_cc_op(s, CC_OP_EFLAGS);
+            }
+        } else {
+            gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
+                                      tcg_const_i32(s->pc - s->cs_base));
+            set_cc_op(s, CC_OP_EFLAGS);
+        }
+        gen_eob(s);
+        break;
+    case 0xe8: /* call im */
+        {
+            if (dflag != MO_16) {
+                tval = (int32_t)insn_get(env, s, MO_32);
+            } else {
+                tval = (int16_t)insn_get(env, s, MO_16);
+            }
+            next_eip = s->pc - s->cs_base;
+            tval += next_eip;
+            if (dflag == MO_16) {
+                tval &= 0xffff;
+            } else if (!CODE64(s)) {
+                tval &= 0xffffffff;
+            }
+            tcg_gen_movi_tl(cpu_T0, next_eip);
+            gen_push_v(s, cpu_T0);
+            gen_bnd_jmp(s);
+            gen_jmp(s, tval);
+        }
+        break;
+    case 0x9a: /* lcall im */
+        {
+            unsigned int selector, offset;
+
+            if (CODE64(s))
+                goto illegal_op;
+            ot = dflag;
+            offset = insn_get(env, s, ot);
+            selector = insn_get(env, s, MO_16);
+
+            tcg_gen_movi_tl(cpu_T0, selector);
+            tcg_gen_movi_tl(cpu_T1, offset);
+        }
+        goto do_lcall;
+    case 0xe9: /* jmp im */
+        if (dflag != MO_16) {
+            tval = (int32_t)insn_get(env, s, MO_32);
+        } else {
+            tval = (int16_t)insn_get(env, s, MO_16);
+        }
+        tval += s->pc - s->cs_base;
+        if (dflag == MO_16) {
+            tval &= 0xffff;
+        } else if (!CODE64(s)) {
+            tval &= 0xffffffff;
+        }
+        gen_bnd_jmp(s);
+        gen_jmp(s, tval);
+        break;
+    case 0xea: /* ljmp im */
+        {
+            unsigned int selector, offset;
+
+            if (CODE64(s))
+                goto illegal_op;
+            ot = dflag;
+            offset = insn_get(env, s, ot);
+            selector = insn_get(env, s, MO_16);
+
+            tcg_gen_movi_tl(cpu_T0, selector);
+            tcg_gen_movi_tl(cpu_T1, offset);
+        }
+        goto do_ljmp;
+    case 0xeb: /* jmp Jb */
+        tval = (int8_t)insn_get(env, s, MO_8);
+        tval += s->pc - s->cs_base;
+        if (dflag == MO_16) {
+            tval &= 0xffff;
+        }
+        gen_jmp(s, tval);
+        break;
+    case 0x70 ... 0x7f: /* jcc Jb */
+        tval = (int8_t)insn_get(env, s, MO_8);
+        goto do_jcc;
+    case 0x180 ... 0x18f: /* jcc Jv */
+        if (dflag != MO_16) {
+            tval = (int32_t)insn_get(env, s, MO_32);
+        } else {
+            tval = (int16_t)insn_get(env, s, MO_16);
+        }
+    do_jcc:
+        next_eip = s->pc - s->cs_base;
+        tval += next_eip;
+        if (dflag == MO_16) {
+            tval &= 0xffff;
+        }
+        gen_bnd_jmp(s);
+        gen_jcc(s, b, tval, next_eip);
+        break;
+
+    case 0x190 ... 0x19f: /* setcc Gv */
+        modrm = cpu_ldub_code(env, s->pc++);
+        gen_setcc1(s, b, cpu_T0);
+        gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
+        break;
+    case 0x140 ... 0x14f: /* cmov Gv, Ev */
+        if (!(s->cpuid_features & CPUID_CMOV)) {
+            goto illegal_op;
+        }
+        ot = dflag;
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+        gen_cmovcc1(env, s, ot, b, modrm, reg);
+        break;
+
+        /************************/
+        /* flags */
+    case 0x9c: /* pushf */
+        gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
+        if (s->vm86 && s->iopl != 3) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_update_cc_op(s);
+            gen_helper_read_eflags(cpu_T0, cpu_env);
+            gen_push_v(s, cpu_T0);
+        }
+        break;
+    case 0x9d: /* popf */
+        gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
+        if (s->vm86 && s->iopl != 3) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            ot = gen_pop_T0(s);
+            if (s->cpl == 0) {
+                if (dflag != MO_16) {
+                    gen_helper_write_eflags(cpu_env, cpu_T0,
+                                            tcg_const_i32((TF_MASK | AC_MASK |
+                                                           ID_MASK | NT_MASK |
+                                                           IF_MASK |
+                                                           IOPL_MASK)));
+                } else {
+                    gen_helper_write_eflags(cpu_env, cpu_T0,
+                                            tcg_const_i32((TF_MASK | AC_MASK |
+                                                           ID_MASK | NT_MASK |
+                                                           IF_MASK | IOPL_MASK)
+                                                          & 0xffff));
+                }
+            } else {
+                if (s->cpl <= s->iopl) {
+                    if (dflag != MO_16) {
+                        gen_helper_write_eflags(cpu_env, cpu_T0,
+                                                tcg_const_i32((TF_MASK |
+                                                               AC_MASK |
+                                                               ID_MASK |
+                                                               NT_MASK |
+                                                               IF_MASK)));
+                    } else {
+                        gen_helper_write_eflags(cpu_env, cpu_T0,
+                                                tcg_const_i32((TF_MASK |
+                                                               AC_MASK |
+                                                               ID_MASK |
+                                                               NT_MASK |
+                                                               IF_MASK)
+                                                              & 0xffff));
+                    }
+                } else {
+                    if (dflag != MO_16) {
+                        gen_helper_write_eflags(cpu_env, cpu_T0,
+                                           tcg_const_i32((TF_MASK | AC_MASK |
+                                                          ID_MASK | NT_MASK)));
+                    } else {
+                        gen_helper_write_eflags(cpu_env, cpu_T0,
+                                           tcg_const_i32((TF_MASK | AC_MASK |
+                                                          ID_MASK | NT_MASK)
+                                                         & 0xffff));
+                    }
+                }
+            }
+            gen_pop_update(s, ot);
+            set_cc_op(s, CC_OP_EFLAGS);
+            /* abort translation because TF/AC flag may change */
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+        }
+        break;
+    case 0x9e: /* sahf */
+        if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
+            goto illegal_op;
+        gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
+        gen_compute_eflags(s);
+        tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
+        tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
+        tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
+        break;
+    case 0x9f: /* lahf */
+        if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
+            goto illegal_op;
+        gen_compute_eflags(s);
+        /* Note: gen_compute_eflags() only gives the condition codes */
+        tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
+        gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
+        break;
+    case 0xf5: /* cmc */
+        gen_compute_eflags(s);
+        tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
+        break;
+    case 0xf8: /* clc */
+        gen_compute_eflags(s);
+        tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
+        break;
+    case 0xf9: /* stc */
+        gen_compute_eflags(s);
+        tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
+        break;
+    case 0xfc: /* cld */
+        tcg_gen_movi_i32(cpu_tmp2_i32, 1);
+        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
+        break;
+    case 0xfd: /* std */
+        tcg_gen_movi_i32(cpu_tmp2_i32, -1);
+        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
+        break;
+
+        /************************/
+        /* bit operations */
+    case 0x1ba: /* bt/bts/btr/btc Gv, im */
+        ot = dflag;
+        modrm = cpu_ldub_code(env, s->pc++);
+        op = (modrm >> 3) & 7;
+        mod = (modrm >> 6) & 3;
+        rm = (modrm & 7) | REX_B(s);
+        if (mod != 3) {
+            s->rip_offset = 1;
+            gen_lea_modrm(env, s, modrm);
+            if (!(s->prefix & PREFIX_LOCK)) {
+                gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+            }
+        } else {
+            gen_op_mov_v_reg(ot, cpu_T0, rm);
+        }
+        /* load shift */
+        val = cpu_ldub_code(env, s->pc++);
+        tcg_gen_movi_tl(cpu_T1, val);
+        if (op < 4)
+            goto unknown_op;
+        op -= 4;
+        goto bt_op;
+    case 0x1a3: /* bt Gv, Ev */
+        op = 0;
+        goto do_btx;
+    case 0x1ab: /* bts */
+        op = 1;
+        goto do_btx;
+    case 0x1b3: /* btr */
+        op = 2;
+        goto do_btx;
+    case 0x1bb: /* btc */
+        op = 3;
+    do_btx:
+        ot = dflag;
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+        mod = (modrm >> 6) & 3;
+        rm = (modrm & 7) | REX_B(s);
+        gen_op_mov_v_reg(MO_32, cpu_T1, reg);
+        if (mod != 3) {
+            AddressParts a = gen_lea_modrm_0(env, s, modrm);
+            /* specific case: we need to add a displacement */
+            gen_exts(ot, cpu_T1);
+            tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
+            tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
+            tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0);
+            gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+            if (!(s->prefix & PREFIX_LOCK)) {
+                gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+            }
+        } else {
+            gen_op_mov_v_reg(ot, cpu_T0, rm);
+        }
+    bt_op:
+        tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
+        tcg_gen_movi_tl(cpu_tmp0, 1);
+        tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
+        if (s->prefix & PREFIX_LOCK) {
+            switch (op) {
+            case 0: /* bt */
+                /* Needs no atomic ops; we surpressed the normal
+                   memory load for LOCK above so do it now.  */
+                gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+                break;
+            case 1: /* bts */
+                tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0,
+                                           s->mem_index, ot | MO_LE);
+                break;
+            case 2: /* btr */
+                tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
+                tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0,
+                                            s->mem_index, ot | MO_LE);
+                break;
+            default:
+            case 3: /* btc */
+                tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0,
+                                            s->mem_index, ot | MO_LE);
+                break;
+            }
+            tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
+        } else {
+            tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
+            switch (op) {
+            case 0: /* bt */
+                /* Data already loaded; nothing to do.  */
+                break;
+            case 1: /* bts */
+                tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
+                break;
+            case 2: /* btr */
+                tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
+                break;
+            default:
+            case 3: /* btc */
+                tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
+                break;
+            }
+            if (op != 0) {
+                if (mod != 3) {
+                    gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+                } else {
+                    gen_op_mov_reg_v(ot, rm, cpu_T0);
+                }
+            }
+        }
+
+        /* Delay all CC updates until after the store above.  Note that
+           C is the result of the test, Z is unchanged, and the others
+           are all undefined.  */
+        switch (s->cc_op) {
+        case CC_OP_MULB ... CC_OP_MULQ:
+        case CC_OP_ADDB ... CC_OP_ADDQ:
+        case CC_OP_ADCB ... CC_OP_ADCQ:
+        case CC_OP_SUBB ... CC_OP_SUBQ:
+        case CC_OP_SBBB ... CC_OP_SBBQ:
+        case CC_OP_LOGICB ... CC_OP_LOGICQ:
+        case CC_OP_INCB ... CC_OP_INCQ:
+        case CC_OP_DECB ... CC_OP_DECQ:
+        case CC_OP_SHLB ... CC_OP_SHLQ:
+        case CC_OP_SARB ... CC_OP_SARQ:
+        case CC_OP_BMILGB ... CC_OP_BMILGQ:
+            /* Z was going to be computed from the non-zero status of CC_DST.
+               We can get that same Z value (and the new C value) by leaving
+               CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
+               same width.  */
+            tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
+            set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
+            break;
+        default:
+            /* Otherwise, generate EFLAGS and replace the C bit.  */
+            gen_compute_eflags(s);
+            tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
+                               ctz32(CC_C), 1);
+            break;
+        }
+        break;
+    case 0x1bc: /* bsf / tzcnt */
+    case 0x1bd: /* bsr / lzcnt */
+        ot = dflag;
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+        gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+        gen_extu(ot, cpu_T0);
+
+        /* Note that lzcnt and tzcnt are in different extensions.  */
+        if ((prefixes & PREFIX_REPZ)
+            && (b & 1
+                ? s->cpuid_ext3_features & CPUID_EXT3_ABM
+                : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
+            int size = 8 << ot;
+            tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+            if (b & 1) {
+                /* For lzcnt, reduce the target_ulong result by the
+                   number of zeros that we expect to find at the top.  */
+                gen_helper_clz(cpu_T0, cpu_T0);
+                tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
+            } else {
+                /* For tzcnt, a zero input must return the operand size:
+                   force all bits outside the operand size to 1.  */
+                target_ulong mask = (target_ulong)-2 << (size - 1);
+                tcg_gen_ori_tl(cpu_T0, cpu_T0, mask);
+                gen_helper_ctz(cpu_T0, cpu_T0);
+            }
+            /* For lzcnt/tzcnt, C and Z bits are defined and are
+               related to the result.  */
+            gen_op_update1_cc();
+            set_cc_op(s, CC_OP_BMILGB + ot);
+        } else {
+            /* For bsr/bsf, only the Z bit is defined and it is related
+               to the input and not the result.  */
+            tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+            set_cc_op(s, CC_OP_LOGICB + ot);
+            if (b & 1) {
+                /* For bsr, return the bit index of the first 1 bit,
+                   not the count of leading zeros.  */
+                gen_helper_clz(cpu_T0, cpu_T0);
+                tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
+            } else {
+                gen_helper_ctz(cpu_T0, cpu_T0);
+            }
+            /* ??? The manual says that the output is undefined when the
+               input is zero, but real hardware leaves it unchanged, and
+               real programs appear to depend on that.  */
+            tcg_gen_movi_tl(cpu_tmp0, 0);
+            tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T0, cpu_cc_dst, cpu_tmp0,
+                               cpu_regs[reg], cpu_T0);
+        }
+        gen_op_mov_reg_v(ot, reg, cpu_T0);
+        break;
+        /************************/
+        /* bcd */
+    case 0x27: /* daa */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_update_cc_op(s);
+        gen_helper_daa(cpu_env);
+        set_cc_op(s, CC_OP_EFLAGS);
+        break;
+    case 0x2f: /* das */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_update_cc_op(s);
+        gen_helper_das(cpu_env);
+        set_cc_op(s, CC_OP_EFLAGS);
+        break;
+    case 0x37: /* aaa */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_update_cc_op(s);
+        gen_helper_aaa(cpu_env);
+        set_cc_op(s, CC_OP_EFLAGS);
+        break;
+    case 0x3f: /* aas */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_update_cc_op(s);
+        gen_helper_aas(cpu_env);
+        set_cc_op(s, CC_OP_EFLAGS);
+        break;
+    case 0xd4: /* aam */
+        if (CODE64(s))
+            goto illegal_op;
+        val = cpu_ldub_code(env, s->pc++);
+        if (val == 0) {
+            gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
+        } else {
+            gen_helper_aam(cpu_env, tcg_const_i32(val));
+            set_cc_op(s, CC_OP_LOGICB);
+        }
+        break;
+    case 0xd5: /* aad */
+        if (CODE64(s))
+            goto illegal_op;
+        val = cpu_ldub_code(env, s->pc++);
+        gen_helper_aad(cpu_env, tcg_const_i32(val));
+        set_cc_op(s, CC_OP_LOGICB);
+        break;
+        /************************/
+        /* misc */
+    case 0x90: /* nop */
+        /* XXX: correct lock test for all insn */
+        if (prefixes & PREFIX_LOCK) {
+            goto illegal_op;
+        }
+        /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
+        if (REX_B(s)) {
+            goto do_xchg_reg_eax;
+        }
+        if (prefixes & PREFIX_REPZ) {
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
+            s->is_jmp = DISAS_TB_JUMP;
+        }
+        break;
+    case 0x9b: /* fwait */
+        if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
+            (HF_MP_MASK | HF_TS_MASK)) {
+            gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+        } else {
+            gen_helper_fwait(cpu_env);
+        }
+        break;
+    case 0xcc: /* int3 */
+        gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
+        break;
+    case 0xcd: /* int N */
+        val = cpu_ldub_code(env, s->pc++);
+        if (s->vm86 && s->iopl != 3) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
+        }
+        break;
+    case 0xce: /* into */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_update_cc_op(s);
+        gen_jmp_im(pc_start - s->cs_base);
+        gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
+        break;
+#ifdef WANT_ICEBP
+    case 0xf1: /* icebp (undocumented, exits to external debugger) */
+        gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
+#if 1
+        gen_debug(s, pc_start - s->cs_base);
+#else
+        /* start debug */
+        tb_flush(CPU(x86_env_get_cpu(env)));
+        qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
+#endif
+        break;
+#endif
+    case 0xfa: /* cli */
+        if (!s->vm86) {
+            if (s->cpl <= s->iopl) {
+                gen_helper_cli(cpu_env);
+            } else {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+            }
+        } else {
+            if (s->iopl == 3) {
+                gen_helper_cli(cpu_env);
+            } else {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+            }
+        }
+        break;
+    case 0xfb: /* sti */
+        if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
+            gen_helper_sti(cpu_env);
+            /* interruptions are enabled only the first insn after sti */
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob_inhibit_irq(s, true);
+        } else {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        }
+        break;
+    case 0x62: /* bound */
+        if (CODE64(s))
+            goto illegal_op;
+        ot = dflag;
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = (modrm >> 3) & 7;
+        mod = (modrm >> 6) & 3;
+        if (mod == 3)
+            goto illegal_op;
+        gen_op_mov_v_reg(ot, cpu_T0, reg);
+        gen_lea_modrm(env, s, modrm);
+        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+        if (ot == MO_16) {
+            gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
+        } else {
+            gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
+        }
+        break;
+    case 0x1c8 ... 0x1cf: /* bswap reg */
+        reg = (b & 7) | REX_B(s);
+#ifdef TARGET_X86_64
+        if (dflag == MO_64) {
+            gen_op_mov_v_reg(MO_64, cpu_T0, reg);
+            tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
+            gen_op_mov_reg_v(MO_64, reg, cpu_T0);
+        } else
+#endif
+        {
+            gen_op_mov_v_reg(MO_32, cpu_T0, reg);
+            tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
+            tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
+            gen_op_mov_reg_v(MO_32, reg, cpu_T0);
+        }
+        break;
+    case 0xd6: /* salc */
+        if (CODE64(s))
+            goto illegal_op;
+        gen_compute_eflags_c(s, cpu_T0);
+        tcg_gen_neg_tl(cpu_T0, cpu_T0);
+        gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
+        break;
+    case 0xe0: /* loopnz */
+    case 0xe1: /* loopz */
+    case 0xe2: /* loop */
+    case 0xe3: /* jecxz */
+        {
+            TCGLabel *l1, *l2, *l3;
+
+            tval = (int8_t)insn_get(env, s, MO_8);
+            next_eip = s->pc - s->cs_base;
+            tval += next_eip;
+            if (dflag == MO_16) {
+                tval &= 0xffff;
+            }
+
+            l1 = gen_new_label();
+            l2 = gen_new_label();
+            l3 = gen_new_label();
+            b &= 3;
+            switch(b) {
+            case 0: /* loopnz */
+            case 1: /* loopz */
+                gen_op_add_reg_im(s->aflag, R_ECX, -1);
+                gen_op_jz_ecx(s->aflag, l3);
+                gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
+                break;
+            case 2: /* loop */
+                gen_op_add_reg_im(s->aflag, R_ECX, -1);
+                gen_op_jnz_ecx(s->aflag, l1);
+                break;
+            default:
+            case 3: /* jcxz */
+                gen_op_jz_ecx(s->aflag, l1);
+                break;
+            }
+
+            gen_set_label(l3);
+            gen_jmp_im(next_eip);
+            tcg_gen_br(l2);
+
+            gen_set_label(l1);
+            gen_jmp_im(tval);
+            gen_set_label(l2);
+            gen_eob(s);
+        }
+        break;
+    case 0x130: /* wrmsr */
+    case 0x132: /* rdmsr */
+        if (s->cpl != 0) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            if (b & 2) {
+                gen_helper_rdmsr(cpu_env);
+            } else {
+                gen_helper_wrmsr(cpu_env);
+            }
+        }
+        break;
+    case 0x131: /* rdtsc */
+        gen_update_cc_op(s);
+        gen_jmp_im(pc_start - s->cs_base);
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_start();
+	}
+        gen_helper_rdtsc(cpu_env);
+        if (s->tb->cflags & CF_USE_ICOUNT) {
+            gen_io_end();
+            gen_jmp(s, s->pc - s->cs_base);
+        }
+        break;
+    case 0x133: /* rdpmc */
+        gen_update_cc_op(s);
+        gen_jmp_im(pc_start - s->cs_base);
+        gen_helper_rdpmc(cpu_env);
+        break;
+    case 0x134: /* sysenter */
+        /* For Intel SYSENTER is valid on 64-bit */
+        if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+            goto illegal_op;
+        if (!s->pe) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_helper_sysenter(cpu_env);
+            gen_eob(s);
+        }
+        break;
+    case 0x135: /* sysexit */
+        /* For Intel SYSEXIT is valid on 64-bit */
+        if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+            goto illegal_op;
+        if (!s->pe) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
+            gen_eob(s);
+        }
+        break;
+#ifdef TARGET_X86_64
+    case 0x105: /* syscall */
+        /* XXX: is it usable in real mode ? */
+        gen_update_cc_op(s);
+        gen_jmp_im(pc_start - s->cs_base);
+        gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
+        gen_eob(s);
+        break;
+    case 0x107: /* sysret */
+        if (!s->pe) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
+            /* condition codes are modified only in long mode */
+            if (s->lma) {
+                set_cc_op(s, CC_OP_EFLAGS);
+            }
+            gen_eob(s);
+        }
+        break;
+#endif
+    case 0x1a2: /* cpuid */
+        gen_update_cc_op(s);
+        gen_jmp_im(pc_start - s->cs_base);
+        gen_helper_cpuid(cpu_env);
+        break;
+    case 0xf4: /* hlt */
+        if (s->cpl != 0) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
+            s->is_jmp = DISAS_TB_JUMP;
+        }
+        break;
+    case 0x100:
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        op = (modrm >> 3) & 7;
+        switch(op) {
+        case 0: /* sldt */
+            if (!s->pe || s->vm86)
+                goto illegal_op;
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
+            tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+                             offsetof(CPUX86State, ldt.selector));
+            ot = mod == 3 ? dflag : MO_16;
+            gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+            break;
+        case 2: /* lldt */
+            if (!s->pe || s->vm86)
+                goto illegal_op;
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+            } else {
+                gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
+                gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                gen_helper_lldt(cpu_env, cpu_tmp2_i32);
+            }
+            break;
+        case 1: /* str */
+            if (!s->pe || s->vm86)
+                goto illegal_op;
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
+            tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+                             offsetof(CPUX86State, tr.selector));
+            ot = mod == 3 ? dflag : MO_16;
+            gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+            break;
+        case 3: /* ltr */
+            if (!s->pe || s->vm86)
+                goto illegal_op;
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+            } else {
+                gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
+                gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+                gen_helper_ltr(cpu_env, cpu_tmp2_i32);
+            }
+            break;
+        case 4: /* verr */
+        case 5: /* verw */
+            if (!s->pe || s->vm86)
+                goto illegal_op;
+            gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+            gen_update_cc_op(s);
+            if (op == 4) {
+                gen_helper_verr(cpu_env, cpu_T0);
+            } else {
+                gen_helper_verw(cpu_env, cpu_T0);
+            }
+            set_cc_op(s, CC_OP_EFLAGS);
+            break;
+        default:
+            goto unknown_op;
+        }
+        break;
+
+    case 0x101:
+        modrm = cpu_ldub_code(env, s->pc++);
+        switch (modrm) {
+        CASE_MODRM_MEM_OP(0): /* sgdt */
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
+            gen_lea_modrm(env, s, modrm);
+            tcg_gen_ld32u_tl(cpu_T0,
+                             cpu_env, offsetof(CPUX86State, gdt.limit));
+            gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
+            gen_add_A0_im(s, 2);
+            tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
+            if (dflag == MO_16) {
+                tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+            }
+            gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+            break;
+
+        case 0xc8: /* monitor */
+            if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+                goto illegal_op;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
+            gen_extu(s->aflag, cpu_A0);
+            gen_add_A0_ds_seg(s);
+            gen_helper_monitor(cpu_env, cpu_A0);
+            break;
+
+        case 0xc9: /* mwait */
+            if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+                goto illegal_op;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
+            gen_eob(s);
+            break;
+
+        case 0xca: /* clac */
+            if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+                || s->cpl != 0) {
+                goto illegal_op;
+            }
+            gen_helper_clac(cpu_env);
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+            break;
+
+        case 0xcb: /* stac */
+            if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+                || s->cpl != 0) {
+                goto illegal_op;
+            }
+            gen_helper_stac(cpu_env);
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+            break;
+
+        CASE_MODRM_MEM_OP(1): /* sidt */
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
+            gen_lea_modrm(env, s, modrm);
+            tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
+            gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
+            gen_add_A0_im(s, 2);
+            tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
+            if (dflag == MO_16) {
+                tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+            }
+            gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+            break;
+
+        case 0xd0: /* xgetbv */
+            if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+                || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
+                goto illegal_op;
+            }
+            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+            gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
+            tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
+            break;
+
+        case 0xd1: /* xsetbv */
+            if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+                || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
+                goto illegal_op;
+            }
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+                                  cpu_regs[R_EDX]);
+            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+            gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
+            /* End TB because translation flags may change.  */
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+            break;
+
+        case 0xd8: /* VMRUN */
+            if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+                goto illegal_op;
+            }
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
+                             tcg_const_i32(s->pc - pc_start));
+            tcg_gen_exit_tb(0);
+            s->is_jmp = DISAS_TB_JUMP;
+            break;
+
+        case 0xd9: /* VMMCALL */
+            if (!(s->flags & HF_SVME_MASK)) {
+                goto illegal_op;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_vmmcall(cpu_env);
+            break;
+
+        case 0xda: /* VMLOAD */
+            if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+                goto illegal_op;
+            }
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
+            break;
+
+        case 0xdb: /* VMSAVE */
+            if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+                goto illegal_op;
+            }
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
+            break;
+
+        case 0xdc: /* STGI */
+            if ((!(s->flags & HF_SVME_MASK)
+                   && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+                || !s->pe) {
+                goto illegal_op;
+            }
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_stgi(cpu_env);
+            break;
+
+        case 0xdd: /* CLGI */
+            if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+                goto illegal_op;
+            }
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_clgi(cpu_env);
+            break;
+
+        case 0xde: /* SKINIT */
+            if ((!(s->flags & HF_SVME_MASK)
+                 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+                || !s->pe) {
+                goto illegal_op;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_skinit(cpu_env);
+            break;
+
+        case 0xdf: /* INVLPGA */
+            if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+                goto illegal_op;
+            }
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
+            break;
+
+        CASE_MODRM_MEM_OP(2): /* lgdt */
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
+            gen_lea_modrm(env, s, modrm);
+            gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
+            gen_add_A0_im(s, 2);
+            gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+            if (dflag == MO_16) {
+                tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+            }
+            tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
+            tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
+            break;
+
+        CASE_MODRM_MEM_OP(3): /* lidt */
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
+            gen_lea_modrm(env, s, modrm);
+            gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
+            gen_add_A0_im(s, 2);
+            gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+            if (dflag == MO_16) {
+                tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+            }
+            tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
+            tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
+            break;
+
+        CASE_MODRM_OP(4): /* smsw */
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
+            tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
+            if (CODE64(s)) {
+                mod = (modrm >> 6) & 3;
+                ot = (mod != 3 ? MO_16 : s->dflag);
+            } else {
+                ot = MO_16;
+            }
+            gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+            break;
+        case 0xee: /* rdpkru */
+            if (prefixes & PREFIX_LOCK) {
+                goto illegal_op;
+            }
+            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+            gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
+            tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
+            break;
+        case 0xef: /* wrpkru */
+            if (prefixes & PREFIX_LOCK) {
+                goto illegal_op;
+            }
+            tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+                                  cpu_regs[R_EDX]);
+            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+            gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
+            break;
+        CASE_MODRM_OP(6): /* lmsw */
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
+            gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+            gen_helper_lmsw(cpu_env, cpu_T0);
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+            break;
+
+        CASE_MODRM_MEM_OP(7): /* invlpg */
+            if (s->cpl != 0) {
+                gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                break;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            gen_lea_modrm(env, s, modrm);
+            gen_helper_invlpg(cpu_env, cpu_A0);
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+            break;
+
+        case 0xf8: /* swapgs */
+#ifdef TARGET_X86_64
+            if (CODE64(s)) {
+                if (s->cpl != 0) {
+                    gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+                } else {
+                    tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
+                    tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
+                                  offsetof(CPUX86State, kernelgsbase));
+                    tcg_gen_st_tl(cpu_T0, cpu_env,
+                                  offsetof(CPUX86State, kernelgsbase));
+                }
+                break;
+            }
+#endif
+            goto illegal_op;
+
+        case 0xf9: /* rdtscp */
+            if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
+                goto illegal_op;
+            }
+            gen_update_cc_op(s);
+            gen_jmp_im(pc_start - s->cs_base);
+            if (s->tb->cflags & CF_USE_ICOUNT) {
+                gen_io_start();
+            }
+            gen_helper_rdtscp(cpu_env);
+            if (s->tb->cflags & CF_USE_ICOUNT) {
+                gen_io_end();
+                gen_jmp(s, s->pc - s->cs_base);
+            }
+            break;
+
+        default:
+            goto unknown_op;
+        }
+        break;
+
+    case 0x108: /* invd */
+    case 0x109: /* wbinvd */
+        if (s->cpl != 0) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
+            /* nothing to do */
+        }
+        break;
+    case 0x63: /* arpl or movslS (x86_64) */
+#ifdef TARGET_X86_64
+        if (CODE64(s)) {
+            int d_ot;
+            /* d_ot is the size of destination */
+            d_ot = dflag;
+
+            modrm = cpu_ldub_code(env, s->pc++);
+            reg = ((modrm >> 3) & 7) | rex_r;
+            mod = (modrm >> 6) & 3;
+            rm = (modrm & 7) | REX_B(s);
+
+            if (mod == 3) {
+                gen_op_mov_v_reg(MO_32, cpu_T0, rm);
+                /* sign extend */
+                if (d_ot == MO_64) {
+                    tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+                }
+                gen_op_mov_reg_v(d_ot, reg, cpu_T0);
+            } else {
+                gen_lea_modrm(env, s, modrm);
+                gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
+                gen_op_mov_reg_v(d_ot, reg, cpu_T0);
+            }
+        } else
+#endif
+        {
+            TCGLabel *label1;
+            TCGv t0, t1, t2, a0;
+
+            if (!s->pe || s->vm86)
+                goto illegal_op;
+            t0 = tcg_temp_local_new();
+            t1 = tcg_temp_local_new();
+            t2 = tcg_temp_local_new();
+            ot = MO_16;
+            modrm = cpu_ldub_code(env, s->pc++);
+            reg = (modrm >> 3) & 7;
+            mod = (modrm >> 6) & 3;
+            rm = modrm & 7;
+            if (mod != 3) {
+                gen_lea_modrm(env, s, modrm);
+                gen_op_ld_v(s, ot, t0, cpu_A0);
+                a0 = tcg_temp_local_new();
+                tcg_gen_mov_tl(a0, cpu_A0);
+            } else {
+                gen_op_mov_v_reg(ot, t0, rm);
+                TCGV_UNUSED(a0);
+            }
+            gen_op_mov_v_reg(ot, t1, reg);
+            tcg_gen_andi_tl(cpu_tmp0, t0, 3);
+            tcg_gen_andi_tl(t1, t1, 3);
+            tcg_gen_movi_tl(t2, 0);
+            label1 = gen_new_label();
+            tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
+            tcg_gen_andi_tl(t0, t0, ~3);
+            tcg_gen_or_tl(t0, t0, t1);
+            tcg_gen_movi_tl(t2, CC_Z);
+            gen_set_label(label1);
+            if (mod != 3) {
+                gen_op_st_v(s, ot, t0, a0);
+                tcg_temp_free(a0);
+           } else {
+                gen_op_mov_reg_v(ot, rm, t0);
+            }
+            gen_compute_eflags(s);
+            tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
+            tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
+            tcg_temp_free(t0);
+            tcg_temp_free(t1);
+            tcg_temp_free(t2);
+        }
+        break;
+    case 0x102: /* lar */
+    case 0x103: /* lsl */
+        {
+            TCGLabel *label1;
+            TCGv t0;
+            if (!s->pe || s->vm86)
+                goto illegal_op;
+            ot = dflag != MO_16 ? MO_32 : MO_16;
+            modrm = cpu_ldub_code(env, s->pc++);
+            reg = ((modrm >> 3) & 7) | rex_r;
+            gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+            t0 = tcg_temp_local_new();
+            gen_update_cc_op(s);
+            if (b == 0x102) {
+                gen_helper_lar(t0, cpu_env, cpu_T0);
+            } else {
+                gen_helper_lsl(t0, cpu_env, cpu_T0);
+            }
+            tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
+            label1 = gen_new_label();
+            tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
+            gen_op_mov_reg_v(ot, reg, t0);
+            gen_set_label(label1);
+            set_cc_op(s, CC_OP_EFLAGS);
+            tcg_temp_free(t0);
+        }
+        break;
+    case 0x118:
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        op = (modrm >> 3) & 7;
+        switch(op) {
+        case 0: /* prefetchnta */
+        case 1: /* prefetchnt0 */
+        case 2: /* prefetchnt0 */
+        case 3: /* prefetchnt0 */
+            if (mod == 3)
+                goto illegal_op;
+            gen_nop_modrm(env, s, modrm);
+            /* nothing more to do */
+            break;
+        default: /* nop (multi byte) */
+            gen_nop_modrm(env, s, modrm);
+            break;
+        }
+        break;
+    case 0x11a:
+        modrm = cpu_ldub_code(env, s->pc++);
+        if (s->flags & HF_MPX_EN_MASK) {
+            mod = (modrm >> 6) & 3;
+            reg = ((modrm >> 3) & 7) | rex_r;
+            if (prefixes & PREFIX_REPZ) {
+                /* bndcl */
+                if (reg >= 4
+                    || (prefixes & PREFIX_LOCK)
+                    || s->aflag == MO_16) {
+                    goto illegal_op;
+                }
+                gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
+            } else if (prefixes & PREFIX_REPNZ) {
+                /* bndcu */
+                if (reg >= 4
+                    || (prefixes & PREFIX_LOCK)
+                    || s->aflag == MO_16) {
+                    goto illegal_op;
+                }
+                TCGv_i64 notu = tcg_temp_new_i64();
+                tcg_gen_not_i64(notu, cpu_bndu[reg]);
+                gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
+                tcg_temp_free_i64(notu);
+            } else if (prefixes & PREFIX_DATA) {
+                /* bndmov -- from reg/mem */
+                if (reg >= 4 || s->aflag == MO_16) {
+                    goto illegal_op;
+                }
+                if (mod == 3) {
+                    int reg2 = (modrm & 7) | REX_B(s);
+                    if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+                        goto illegal_op;
+                    }
+                    if (s->flags & HF_MPX_IU_MASK) {
+                        tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
+                        tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
+                    }
+                } else {
+                    gen_lea_modrm(env, s, modrm);
+                    if (CODE64(s)) {
+                        tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
+                                            s->mem_index, MO_LEQ);
+                        tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
+                        tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
+                                            s->mem_index, MO_LEQ);
+                    } else {
+                        tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
+                        tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                    }
+                    /* bnd registers are now in-use */
+                    gen_set_hflag(s, HF_MPX_IU_MASK);
+                }
+            } else if (mod != 3) {
+                /* bndldx */
+                AddressParts a = gen_lea_modrm_0(env, s, modrm);
+                if (reg >= 4
+                    || (prefixes & PREFIX_LOCK)
+                    || s->aflag == MO_16
+                    || a.base < -1) {
+                    goto illegal_op;
+                }
+                if (a.base >= 0) {
+                    tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
+                } else {
+                    tcg_gen_movi_tl(cpu_A0, 0);
+                }
+                gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+                if (a.index >= 0) {
+                    tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
+                } else {
+                    tcg_gen_movi_tl(cpu_T0, 0);
+                }
+                if (CODE64(s)) {
+                    gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
+                    tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
+                                   offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
+                } else {
+                    gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
+                    tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
+                    tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
+                }
+                gen_set_hflag(s, HF_MPX_IU_MASK);
+            }
+        }
+        gen_nop_modrm(env, s, modrm);
+        break;
+    case 0x11b:
+        modrm = cpu_ldub_code(env, s->pc++);
+        if (s->flags & HF_MPX_EN_MASK) {
+            mod = (modrm >> 6) & 3;
+            reg = ((modrm >> 3) & 7) | rex_r;
+            if (mod != 3 && (prefixes & PREFIX_REPZ)) {
+                /* bndmk */
+                if (reg >= 4
+                    || (prefixes & PREFIX_LOCK)
+                    || s->aflag == MO_16) {
+                    goto illegal_op;
+                }
+                AddressParts a = gen_lea_modrm_0(env, s, modrm);
+                if (a.base >= 0) {
+                    tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
+                    if (!CODE64(s)) {
+                        tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
+                    }
+                } else if (a.base == -1) {
+                    /* no base register has lower bound of 0 */
+                    tcg_gen_movi_i64(cpu_bndl[reg], 0);
+                } else {
+                    /* rip-relative generates #ud */
+                    goto illegal_op;
+                }
+                tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
+                if (!CODE64(s)) {
+                    tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+                }
+                tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
+                /* bnd registers are now in-use */
+                gen_set_hflag(s, HF_MPX_IU_MASK);
+                break;
+            } else if (prefixes & PREFIX_REPNZ) {
+                /* bndcn */
+                if (reg >= 4
+                    || (prefixes & PREFIX_LOCK)
+                    || s->aflag == MO_16) {
+                    goto illegal_op;
+                }
+                gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
+            } else if (prefixes & PREFIX_DATA) {
+                /* bndmov -- to reg/mem */
+                if (reg >= 4 || s->aflag == MO_16) {
+                    goto illegal_op;
+                }
+                if (mod == 3) {
+                    int reg2 = (modrm & 7) | REX_B(s);
+                    if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+                        goto illegal_op;
+                    }
+                    if (s->flags & HF_MPX_IU_MASK) {
+                        tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
+                        tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
+                    }
+                } else {
+                    gen_lea_modrm(env, s, modrm);
+                    if (CODE64(s)) {
+                        tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
+                                            s->mem_index, MO_LEQ);
+                        tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
+                        tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
+                                            s->mem_index, MO_LEQ);
+                    } else {
+                        tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                        tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
+                        tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
+                                            s->mem_index, MO_LEUL);
+                    }
+                }
+            } else if (mod != 3) {
+                /* bndstx */
+                AddressParts a = gen_lea_modrm_0(env, s, modrm);
+                if (reg >= 4
+                    || (prefixes & PREFIX_LOCK)
+                    || s->aflag == MO_16
+                    || a.base < -1) {
+                    goto illegal_op;
+                }
+                if (a.base >= 0) {
+                    tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
+                } else {
+                    tcg_gen_movi_tl(cpu_A0, 0);
+                }
+                gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+                if (a.index >= 0) {
+                    tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
+                } else {
+                    tcg_gen_movi_tl(cpu_T0, 0);
+                }
+                if (CODE64(s)) {
+                    gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
+                                        cpu_bndl[reg], cpu_bndu[reg]);
+                } else {
+                    gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
+                                        cpu_bndl[reg], cpu_bndu[reg]);
+                }
+            }
+        }
+        gen_nop_modrm(env, s, modrm);
+        break;
+    case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
+        modrm = cpu_ldub_code(env, s->pc++);
+        gen_nop_modrm(env, s, modrm);
+        break;
+    case 0x120: /* mov reg, crN */
+    case 0x122: /* mov crN, reg */
+        if (s->cpl != 0) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            modrm = cpu_ldub_code(env, s->pc++);
+            /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+             * AMD documentation (24594.pdf) and testing of
+             * intel 386 and 486 processors all show that the mod bits
+             * are assumed to be 1's, regardless of actual values.
+             */
+            rm = (modrm & 7) | REX_B(s);
+            reg = ((modrm >> 3) & 7) | rex_r;
+            if (CODE64(s))
+                ot = MO_64;
+            else
+                ot = MO_32;
+            if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
+                (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
+                reg = 8;
+            }
+            switch(reg) {
+            case 0:
+            case 2:
+            case 3:
+            case 4:
+            case 8:
+                gen_update_cc_op(s);
+                gen_jmp_im(pc_start - s->cs_base);
+                if (b & 2) {
+                    gen_op_mov_v_reg(ot, cpu_T0, rm);
+                    gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
+                                         cpu_T0);
+                    gen_jmp_im(s->pc - s->cs_base);
+                    gen_eob(s);
+                } else {
+                    gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
+                    gen_op_mov_reg_v(ot, rm, cpu_T0);
+                }
+                break;
+            default:
+                goto unknown_op;
+            }
+        }
+        break;
+    case 0x121: /* mov reg, drN */
+    case 0x123: /* mov drN, reg */
+        if (s->cpl != 0) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            modrm = cpu_ldub_code(env, s->pc++);
+            /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+             * AMD documentation (24594.pdf) and testing of
+             * intel 386 and 486 processors all show that the mod bits
+             * are assumed to be 1's, regardless of actual values.
+             */
+            rm = (modrm & 7) | REX_B(s);
+            reg = ((modrm >> 3) & 7) | rex_r;
+            if (CODE64(s))
+                ot = MO_64;
+            else
+                ot = MO_32;
+            if (reg >= 8) {
+                goto illegal_op;
+            }
+            if (b & 2) {
+                gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
+                gen_op_mov_v_reg(ot, cpu_T0, rm);
+                tcg_gen_movi_i32(cpu_tmp2_i32, reg);
+                gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
+                gen_jmp_im(s->pc - s->cs_base);
+                gen_eob(s);
+            } else {
+                gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
+                tcg_gen_movi_i32(cpu_tmp2_i32, reg);
+                gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
+                gen_op_mov_reg_v(ot, rm, cpu_T0);
+            }
+        }
+        break;
+    case 0x106: /* clts */
+        if (s->cpl != 0) {
+            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+        } else {
+            gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
+            gen_helper_clts(cpu_env);
+            /* abort block because static cpu state changed */
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+        }
+        break;
+    /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
+    case 0x1c3: /* MOVNTI reg, mem */
+        if (!(s->cpuid_features & CPUID_SSE2))
+            goto illegal_op;
+        ot = mo_64_32(dflag);
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        if (mod == 3)
+            goto illegal_op;
+        reg = ((modrm >> 3) & 7) | rex_r;
+        /* generate a generic store */
+        gen_ldst_modrm(env, s, modrm, ot, reg, 1);
+        break;
+    case 0x1ae:
+        modrm = cpu_ldub_code(env, s->pc++);
+        switch (modrm) {
+        CASE_MODRM_MEM_OP(0): /* fxsave */
+            if (!(s->cpuid_features & CPUID_FXSR)
+                || (prefixes & PREFIX_LOCK)) {
+                goto illegal_op;
+            }
+            if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
+                gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+                break;
+            }
+            gen_lea_modrm(env, s, modrm);
+            gen_helper_fxsave(cpu_env, cpu_A0);
+            break;
+
+        CASE_MODRM_MEM_OP(1): /* fxrstor */
+            if (!(s->cpuid_features & CPUID_FXSR)
+                || (prefixes & PREFIX_LOCK)) {
+                goto illegal_op;
+            }
+            if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
+                gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+                break;
+            }
+            gen_lea_modrm(env, s, modrm);
+            gen_helper_fxrstor(cpu_env, cpu_A0);
+            break;
+
+        CASE_MODRM_MEM_OP(2): /* ldmxcsr */
+            if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
+                goto illegal_op;
+            }
+            if (s->flags & HF_TS_MASK) {
+                gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+                break;
+            }
+            gen_lea_modrm(env, s, modrm);
+            tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
+            gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
+            break;
+
+        CASE_MODRM_MEM_OP(3): /* stmxcsr */
+            if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
+                goto illegal_op;
+            }
+            if (s->flags & HF_TS_MASK) {
+                gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+                break;
+            }
+            gen_lea_modrm(env, s, modrm);
+            tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
+            gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+            break;
+
+        CASE_MODRM_MEM_OP(4): /* xsave */
+            if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+                || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+                                | PREFIX_REPZ | PREFIX_REPNZ))) {
+                goto illegal_op;
+            }
+            gen_lea_modrm(env, s, modrm);
+            tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+                                  cpu_regs[R_EDX]);
+            gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
+            break;
+
+        CASE_MODRM_MEM_OP(5): /* xrstor */
+            if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+                || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+                                | PREFIX_REPZ | PREFIX_REPNZ))) {
+                goto illegal_op;
+            }
+            gen_lea_modrm(env, s, modrm);
+            tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+                                  cpu_regs[R_EDX]);
+            gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
+            /* XRSTOR is how MPX is enabled, which changes how
+               we translate.  Thus we need to end the TB.  */
+            gen_update_cc_op(s);
+            gen_jmp_im(s->pc - s->cs_base);
+            gen_eob(s);
+            break;
+
+        CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
+            if (prefixes & PREFIX_LOCK) {
+                goto illegal_op;
+            }
+            if (prefixes & PREFIX_DATA) {
+                /* clwb */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
+                    goto illegal_op;
+                }
+                gen_nop_modrm(env, s, modrm);
+            } else {
+                /* xsaveopt */
+                if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+                    || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
+                    || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
+                    goto illegal_op;
+                }
+                gen_lea_modrm(env, s, modrm);
+                tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+                                      cpu_regs[R_EDX]);
+                gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
+            }
+            break;
+
+        CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
+            if (prefixes & PREFIX_LOCK) {
+                goto illegal_op;
+            }
+            if (prefixes & PREFIX_DATA) {
+                /* clflushopt */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
+                    goto illegal_op;
+                }
+            } else {
+                /* clflush */
+                if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
+                    || !(s->cpuid_features & CPUID_CLFLUSH)) {
+                    goto illegal_op;
+                }
+            }
+            gen_nop_modrm(env, s, modrm);
+            break;
+
+        case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
+        case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
+        case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
+        case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
+            if (CODE64(s)
+                && (prefixes & PREFIX_REPZ)
+                && !(prefixes & PREFIX_LOCK)
+                && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
+                TCGv base, treg, src, dst;
+
+                /* Preserve hflags bits by testing CR4 at runtime.  */
+                tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
+                gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
+
+                base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
+                treg = cpu_regs[(modrm & 7) | REX_B(s)];
+
+                if (modrm & 0x10) {
+                    /* wr*base */
+                    dst = base, src = treg;
+                } else {
+                    /* rd*base */
+                    dst = treg, src = base;
+                }
+
+                if (s->dflag == MO_32) {
+                    tcg_gen_ext32u_tl(dst, src);
+                } else {
+                    tcg_gen_mov_tl(dst, src);
+                }
+                break;
+            }
+            goto unknown_op;
+
+        case 0xf8: /* sfence / pcommit */
+            if (prefixes & PREFIX_DATA) {
+                /* pcommit */
+                if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
+                    || (prefixes & PREFIX_LOCK)) {
+                    goto illegal_op;
+                }
+                break;
+            }
+            /* fallthru */
+        case 0xf9 ... 0xff: /* sfence */
+            if (!(s->cpuid_features & CPUID_SSE)
+                || (prefixes & PREFIX_LOCK)) {
+                goto illegal_op;
+            }
+            tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
+            break;
+        case 0xe8 ... 0xef: /* lfence */
+            if (!(s->cpuid_features & CPUID_SSE)
+                || (prefixes & PREFIX_LOCK)) {
+                goto illegal_op;
+            }
+            tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
+            break;
+        case 0xf0 ... 0xf7: /* mfence */
+            if (!(s->cpuid_features & CPUID_SSE2)
+                || (prefixes & PREFIX_LOCK)) {
+                goto illegal_op;
+            }
+            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+            break;
+
+        default:
+            goto unknown_op;
+        }
+        break;
+
+    case 0x10d: /* 3DNow! prefetch(w) */
+        modrm = cpu_ldub_code(env, s->pc++);
+        mod = (modrm >> 6) & 3;
+        if (mod == 3)
+            goto illegal_op;
+        gen_nop_modrm(env, s, modrm);
+        break;
+    case 0x1aa: /* rsm */
+        gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
+        if (!(s->flags & HF_SMM_MASK))
+            goto illegal_op;
+        gen_update_cc_op(s);
+        gen_jmp_im(s->pc - s->cs_base);
+        gen_helper_rsm(cpu_env);
+        gen_eob(s);
+        break;
+    case 0x1b8: /* SSE4.2 popcnt */
+        if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
+             PREFIX_REPZ)
+            goto illegal_op;
+        if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
+            goto illegal_op;
+
+        modrm = cpu_ldub_code(env, s->pc++);
+        reg = ((modrm >> 3) & 7) | rex_r;
+
+        if (s->prefix & PREFIX_DATA) {
+            ot = MO_16;
+        } else {
+            ot = mo_64_32(dflag);
+        }
+
+        gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+        gen_helper_popcnt(cpu_T0, cpu_env, cpu_T0, tcg_const_i32(ot));
+        gen_op_mov_reg_v(ot, reg, cpu_T0);
+
+        set_cc_op(s, CC_OP_EFLAGS);
+        break;
+    case 0x10e ... 0x10f:
+        /* 3DNow! instructions, ignore prefixes */
+        s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
+    case 0x110 ... 0x117:
+    case 0x128 ... 0x12f:
+    case 0x138 ... 0x13a:
+    case 0x150 ... 0x179:
+    case 0x17c ... 0x17f:
+    case 0x1c2:
+    case 0x1c4 ... 0x1c6:
+    case 0x1d0 ... 0x1fe:
+        gen_sse(env, s, b, pc_start, rex_r);
+        break;
+    default:
+        goto unknown_op;
+    }
+    return s->pc;
+ illegal_op:
+    gen_illegal_opcode(s);
+    return s->pc;
+ unknown_op:
+    gen_unknown_opcode(env, s);
+    return s->pc;
+}
+
+void tcg_x86_init(void)
+{
+    static const char reg_names[CPU_NB_REGS][4] = {
+#ifdef TARGET_X86_64
+        [R_EAX] = "rax",
+        [R_EBX] = "rbx",
+        [R_ECX] = "rcx",
+        [R_EDX] = "rdx",
+        [R_ESI] = "rsi",
+        [R_EDI] = "rdi",
+        [R_EBP] = "rbp",
+        [R_ESP] = "rsp",
+        [8]  = "r8",
+        [9]  = "r9",
+        [10] = "r10",
+        [11] = "r11",
+        [12] = "r12",
+        [13] = "r13",
+        [14] = "r14",
+        [15] = "r15",
+#else
+        [R_EAX] = "eax",
+        [R_EBX] = "ebx",
+        [R_ECX] = "ecx",
+        [R_EDX] = "edx",
+        [R_ESI] = "esi",
+        [R_EDI] = "edi",
+        [R_EBP] = "ebp",
+        [R_ESP] = "esp",
+#endif
+    };
+    static const char seg_base_names[6][8] = {
+        [R_CS] = "cs_base",
+        [R_DS] = "ds_base",
+        [R_ES] = "es_base",
+        [R_FS] = "fs_base",
+        [R_GS] = "gs_base",
+        [R_SS] = "ss_base",
+    };
+    static const char bnd_regl_names[4][8] = {
+        "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
+    };
+    static const char bnd_regu_names[4][8] = {
+        "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
+    };
+    int i;
+    static bool initialized;
+
+    if (initialized) {
+        return;
+    }
+    initialized = true;
+
+    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+    tcg_ctx.tcg_env = cpu_env;
+    cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
+                                       offsetof(CPUX86State, cc_op), "cc_op");
+    cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
+                                    "cc_dst");
+    cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
+                                    "cc_src");
+    cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
+                                     "cc_src2");
+
+    for (i = 0; i < CPU_NB_REGS; ++i) {
+        cpu_regs[i] = tcg_global_mem_new(cpu_env,
+                                         offsetof(CPUX86State, regs[i]),
+                                         reg_names[i]);
+    }
+
+    for (i = 0; i < 6; ++i) {
+        cpu_seg_base[i]
+            = tcg_global_mem_new(cpu_env,
+                                 offsetof(CPUX86State, segs[i].base),
+                                 seg_base_names[i]);
+    }
+
+    for (i = 0; i < 4; ++i) {
+        cpu_bndl[i]
+            = tcg_global_mem_new_i64(cpu_env,
+                                     offsetof(CPUX86State, bnd_regs[i].lb),
+                                     bnd_regl_names[i]);
+        cpu_bndu[i]
+            = tcg_global_mem_new_i64(cpu_env,
+                                     offsetof(CPUX86State, bnd_regs[i].ub),
+                                     bnd_regu_names[i]);
+    }
+}
+
+/* generate intermediate code for basic block 'tb'.  */
+void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
+{
+    X86CPU *cpu = x86_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+    DisasContext dc1, *dc = &dc1;
+    target_ulong pc_ptr;
+    uint32_t flags;
+    target_ulong pc_start;
+    target_ulong cs_base;
+    int num_insns;
+    int max_insns;
+
+    /* generate intermediate code */
+    pc_start = tb->pc;
+    cs_base = tb->cs_base;
+    flags = tb->flags;
+
+    dc->pe = (flags >> HF_PE_SHIFT) & 1;
+    dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
+    dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
+    dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
+    dc->f_st = 0;
+    dc->vm86 = (flags >> VM_SHIFT) & 1;
+    dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
+    dc->iopl = (flags >> IOPL_SHIFT) & 3;
+    dc->tf = (flags >> TF_SHIFT) & 1;
+    dc->singlestep_enabled = cs->singlestep_enabled;
+    dc->cc_op = CC_OP_DYNAMIC;
+    dc->cc_op_dirty = false;
+    dc->cs_base = cs_base;
+    dc->tb = tb;
+    dc->popl_esp_hack = 0;
+    /* select memory access functions */
+    dc->mem_index = 0;
+#ifdef CONFIG_SOFTMMU
+    dc->mem_index = cpu_mmu_index(env, false);
+#endif
+    dc->cpuid_features = env->features[FEAT_1_EDX];
+    dc->cpuid_ext_features = env->features[FEAT_1_ECX];
+    dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
+    dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
+    dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
+    dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
+#ifdef TARGET_X86_64
+    dc->lma = (flags >> HF_LMA_SHIFT) & 1;
+    dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
+#endif
+    dc->flags = flags;
+    dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
+                    (flags & HF_INHIBIT_IRQ_MASK));
+    /* Do not optimize repz jumps at all in icount mode, because
+       rep movsS instructions are execured with different paths
+       in !repz_opt and repz_opt modes. The first one was used
+       always except single step mode. And this setting
+       disables jumps optimization and control paths become
+       equivalent in run and single step modes.
+       Now there will be no jump optimization for repz in
+       record/replay modes and there will always be an
+       additional step for ecx=0 when icount is enabled.
+     */
+    dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
+#if 0
+    /* check addseg logic */
+    if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
+        printf("ERROR addseg\n");
+#endif
+
+    cpu_T0 = tcg_temp_new();
+    cpu_T1 = tcg_temp_new();
+    cpu_A0 = tcg_temp_new();
+
+    cpu_tmp0 = tcg_temp_new();
+    cpu_tmp1_i64 = tcg_temp_new_i64();
+    cpu_tmp2_i32 = tcg_temp_new_i32();
+    cpu_tmp3_i32 = tcg_temp_new_i32();
+    cpu_tmp4 = tcg_temp_new();
+    cpu_ptr0 = tcg_temp_new_ptr();
+    cpu_ptr1 = tcg_temp_new_ptr();
+    cpu_cc_srcT = tcg_temp_local_new();
+
+    dc->is_jmp = DISAS_NEXT;
+    pc_ptr = pc_start;
+    num_insns = 0;
+    max_insns = tb->cflags & CF_COUNT_MASK;
+    if (max_insns == 0) {
+        max_insns = CF_COUNT_MASK;
+    }
+    if (max_insns > TCG_MAX_INSNS) {
+        max_insns = TCG_MAX_INSNS;
+    }
+
+    gen_tb_start(tb);
+    for(;;) {
+        tcg_gen_insn_start(pc_ptr, dc->cc_op);
+        num_insns++;
+
+        /* If RF is set, suppress an internally generated breakpoint.  */
+        if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
+                                         tb->flags & HF_RF_MASK
+                                         ? BP_GDB : BP_ANY))) {
+            gen_debug(dc, pc_ptr - dc->cs_base);
+            /* The address covered by the breakpoint must be included in
+               [tb->pc, tb->pc + tb->size) in order to for it to be
+               properly cleared -- thus we increment the PC here so that
+               the logic setting tb->size below does the right thing.  */
+            pc_ptr += 1;
+            goto done_generating;
+        }
+        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+            gen_io_start();
+        }
+
+        pc_ptr = disas_insn(env, dc, pc_ptr);
+        /* stop translation if indicated */
+        if (dc->is_jmp)
+            break;
+        /* if single step mode, we generate only one instruction and
+           generate an exception */
+        /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
+           the flag and abort the translation to give the irqs a
+           change to be happen */
+        if (dc->tf || dc->singlestep_enabled ||
+            (flags & HF_INHIBIT_IRQ_MASK)) {
+            gen_jmp_im(pc_ptr - dc->cs_base);
+            gen_eob(dc);
+            break;
+        }
+        /* Do not cross the boundary of the pages in icount mode,
+           it can cause an exception. Do it only when boundary is
+           crossed by the first instruction in the block.
+           If current instruction already crossed the bound - it's ok,
+           because an exception hasn't stopped this code.
+         */
+        if ((tb->cflags & CF_USE_ICOUNT)
+            && ((pc_ptr & TARGET_PAGE_MASK)
+                != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
+                || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
+            gen_jmp_im(pc_ptr - dc->cs_base);
+            gen_eob(dc);
+            break;
+        }
+        /* if too long translation, stop generation too */
+        if (tcg_op_buf_full() ||
+            (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
+            num_insns >= max_insns) {
+            gen_jmp_im(pc_ptr - dc->cs_base);
+            gen_eob(dc);
+            break;
+        }
+        if (singlestep) {
+            gen_jmp_im(pc_ptr - dc->cs_base);
+            gen_eob(dc);
+            break;
+        }
+    }
+    if (tb->cflags & CF_LAST_IO)
+        gen_io_end();
+done_generating:
+    gen_tb_end(tb, num_insns);
+
+#ifdef DEBUG_DISAS
+    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+        && qemu_log_in_addr_range(pc_start)) {
+        int disas_flags;
+        qemu_log_lock();
+        qemu_log("----------------\n");
+        qemu_log("IN: %s\n", lookup_symbol(pc_start));
+#ifdef TARGET_X86_64
+        if (dc->code64)
+            disas_flags = 2;
+        else
+#endif
+            disas_flags = !dc->code32;
+        log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
+        qemu_log("\n");
+        qemu_log_unlock();
+    }
+#endif
+
+    tb->size = pc_ptr - pc_start;
+    tb->icount = num_insns;
+}
+
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
+                          target_ulong *data)
+{
+    int cc_op = data[1];
+    env->eip = data[0] - tb->cs_base;
+    if (cc_op != CC_OP_DYNAMIC) {
+        env->cc_op = cc_op;
+    }
+}