summary refs log tree commit diff stats
path: root/include/exec
diff options
context:
space:
mode:
Diffstat (limited to 'include/exec')
-rw-r--r--include/exec/address-spaces.h41
-rw-r--r--include/exec/cpu-all.h533
-rw-r--r--include/exec/cpu-common.h124
-rw-r--r--include/exec/cpu-defs.h207
-rw-r--r--include/exec/cputlb.h46
-rw-r--r--include/exec/def-helper.h275
-rw-r--r--include/exec/exec-all.h412
-rw-r--r--include/exec/gdbstub.h53
-rw-r--r--include/exec/gen-icount.h53
-rw-r--r--include/exec/hwaddr.h24
-rw-r--r--include/exec/ioport.h78
-rw-r--r--include/exec/iorange.h31
-rw-r--r--include/exec/memory-internal.h141
-rw-r--r--include/exec/memory.h898
-rw-r--r--include/exec/poison.h64
-rw-r--r--include/exec/softmmu-semi.h77
-rw-r--r--include/exec/softmmu_defs.h37
-rw-r--r--include/exec/softmmu_exec.h163
-rw-r--r--include/exec/softmmu_header.h213
-rw-r--r--include/exec/softmmu_template.h354
-rw-r--r--include/exec/spinlock.h49
-rw-r--r--include/exec/user/abitypes.h36
-rw-r--r--include/exec/user/thunk.h189
23 files changed, 4098 insertions, 0 deletions
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
new file mode 100644
index 0000000000..3d12cddeec
--- /dev/null
+++ b/include/exec/address-spaces.h
@@ -0,0 +1,41 @@
+/*
+ * Internal memory management interfaces
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *  Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef EXEC_MEMORY_H
+#define EXEC_MEMORY_H
+
+/*
+ * Internal interfaces between memory.c/exec.c/vl.c.  Do not #include unless
+ * you're one of them.
+ */
+
+#include "exec/memory.h"
+
+#ifndef CONFIG_USER_ONLY
+
+/* Get the root memory region.  This interface should only be used temporarily
+ * until a proper bus interface is available.
+ */
+MemoryRegion *get_system_memory(void);
+
+/* Get the root I/O port region.  This interface should only be used
+ * temporarily until a proper bus interface is available.
+ */
+MemoryRegion *get_system_io(void);
+
+extern AddressSpace address_space_memory;
+extern AddressSpace address_space_io;
+
+#endif
+
+#endif
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
new file mode 100644
index 0000000000..439e88deb4
--- /dev/null
+++ b/include/exec/cpu-all.h
@@ -0,0 +1,533 @@
+/*
+ * defines common to all virtual CPUs
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_ALL_H
+#define CPU_ALL_H
+
+#include "qemu-common.h"
+#include "qemu/tls.h"
+#include "exec/cpu-common.h"
+#include "qemu/thread.h"
+
+/* some important defines:
+ *
+ * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
+ * memory accesses.
+ *
+ * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
+ * otherwise little endian.
+ *
+ * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
+ *
+ * TARGET_WORDS_BIGENDIAN : same for target cpu
+ */
+
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#define BSWAP_NEEDED
+#endif
+
+#ifdef BSWAP_NEEDED
+
+static inline uint16_t tswap16(uint16_t s)
+{
+    return bswap16(s);
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+    return bswap32(s);
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+    return bswap64(s);
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+    *s = bswap16(*s);
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+    *s = bswap32(*s);
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+    *s = bswap64(*s);
+}
+
+#else
+
+static inline uint16_t tswap16(uint16_t s)
+{
+    return s;
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+    return s;
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+    return s;
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+}
+
+#endif
+
+#if TARGET_LONG_SIZE == 4
+#define tswapl(s) tswap32(s)
+#define tswapls(s) tswap32s((uint32_t *)(s))
+#define bswaptls(s) bswap32s(s)
+#else
+#define tswapl(s) tswap64(s)
+#define tswapls(s) tswap64s((uint64_t *)(s))
+#define bswaptls(s) bswap64s(s)
+#endif
+
+/* CPU memory access without any memory or io remapping */
+
+/*
+ * the generic syntax for the memory accesses is:
+ *
+ * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
+ *
+ * store: st{type}{size}{endian}_{access_type}(ptr, val)
+ *
+ * type is:
+ * (empty): integer access
+ *   f    : float access
+ *
+ * sign is:
+ * (empty): for floats or 32 bit size
+ *   u    : unsigned
+ *   s    : signed
+ *
+ * size is:
+ *   b: 8 bits
+ *   w: 16 bits
+ *   l: 32 bits
+ *   q: 64 bits
+ *
+ * endian is:
+ * (empty): target cpu endianness or 8 bit access
+ *   r    : reversed target cpu endianness (not implemented yet)
+ *   be   : big endian (not implemented yet)
+ *   le   : little endian (not implemented yet)
+ *
+ * access_type is:
+ *   raw    : host memory access
+ *   user   : user mode access using soft MMU
+ *   kernel : kernel mode access using soft MMU
+ */
+
+/* target-endianness CPU memory access functions */
+#if defined(TARGET_WORDS_BIGENDIAN)
+#define lduw_p(p) lduw_be_p(p)
+#define ldsw_p(p) ldsw_be_p(p)
+#define ldl_p(p) ldl_be_p(p)
+#define ldq_p(p) ldq_be_p(p)
+#define ldfl_p(p) ldfl_be_p(p)
+#define ldfq_p(p) ldfq_be_p(p)
+#define stw_p(p, v) stw_be_p(p, v)
+#define stl_p(p, v) stl_be_p(p, v)
+#define stq_p(p, v) stq_be_p(p, v)
+#define stfl_p(p, v) stfl_be_p(p, v)
+#define stfq_p(p, v) stfq_be_p(p, v)
+#else
+#define lduw_p(p) lduw_le_p(p)
+#define ldsw_p(p) ldsw_le_p(p)
+#define ldl_p(p) ldl_le_p(p)
+#define ldq_p(p) ldq_le_p(p)
+#define ldfl_p(p) ldfl_le_p(p)
+#define ldfq_p(p) ldfq_le_p(p)
+#define stw_p(p, v) stw_le_p(p, v)
+#define stl_p(p, v) stl_le_p(p, v)
+#define stq_p(p, v) stq_le_p(p, v)
+#define stfl_p(p, v) stfl_le_p(p, v)
+#define stfq_p(p, v) stfq_le_p(p, v)
+#endif
+
+/* MMU memory access macros */
+
+#if defined(CONFIG_USER_ONLY)
+#include <assert.h>
+#include "exec/user/abitypes.h"
+
+/* On some host systems the guest address space is reserved on the host.
+ * This allows the guest address space to be offset to a convenient location.
+ */
+#if defined(CONFIG_USE_GUEST_BASE)
+extern unsigned long guest_base;
+extern int have_guest_base;
+extern unsigned long reserved_va;
+#define GUEST_BASE guest_base
+#define RESERVED_VA reserved_va
+#else
+#define GUEST_BASE 0ul
+#define RESERVED_VA 0ul
+#endif
+
+/* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
+#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
+
+#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
+#define h2g_valid(x) 1
+#else
+#define h2g_valid(x) ({ \
+    unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
+    (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
+    (!RESERVED_VA || (__guest < RESERVED_VA)); \
+})
+#endif
+
+#define h2g(x) ({ \
+    unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
+    /* Check if given address fits target address space */ \
+    assert(h2g_valid(x)); \
+    (abi_ulong)__ret; \
+})
+
+#define saddr(x) g2h(x)
+#define laddr(x) g2h(x)
+
+#else /* !CONFIG_USER_ONLY */
+/* NOTE: we use double casts if pointers and target_ulong have
+   different sizes */
+#define saddr(x) (uint8_t *)(intptr_t)(x)
+#define laddr(x) (uint8_t *)(intptr_t)(x)
+#endif
+
+#define ldub_raw(p) ldub_p(laddr((p)))
+#define ldsb_raw(p) ldsb_p(laddr((p)))
+#define lduw_raw(p) lduw_p(laddr((p)))
+#define ldsw_raw(p) ldsw_p(laddr((p)))
+#define ldl_raw(p) ldl_p(laddr((p)))
+#define ldq_raw(p) ldq_p(laddr((p)))
+#define ldfl_raw(p) ldfl_p(laddr((p)))
+#define ldfq_raw(p) ldfq_p(laddr((p)))
+#define stb_raw(p, v) stb_p(saddr((p)), v)
+#define stw_raw(p, v) stw_p(saddr((p)), v)
+#define stl_raw(p, v) stl_p(saddr((p)), v)
+#define stq_raw(p, v) stq_p(saddr((p)), v)
+#define stfl_raw(p, v) stfl_p(saddr((p)), v)
+#define stfq_raw(p, v) stfq_p(saddr((p)), v)
+
+
+#if defined(CONFIG_USER_ONLY)
+
+/* if user mode, no other memory access functions */
+#define ldub(p) ldub_raw(p)
+#define ldsb(p) ldsb_raw(p)
+#define lduw(p) lduw_raw(p)
+#define ldsw(p) ldsw_raw(p)
+#define ldl(p) ldl_raw(p)
+#define ldq(p) ldq_raw(p)
+#define ldfl(p) ldfl_raw(p)
+#define ldfq(p) ldfq_raw(p)
+#define stb(p, v) stb_raw(p, v)
+#define stw(p, v) stw_raw(p, v)
+#define stl(p, v) stl_raw(p, v)
+#define stq(p, v) stq_raw(p, v)
+#define stfl(p, v) stfl_raw(p, v)
+#define stfq(p, v) stfq_raw(p, v)
+
+#define cpu_ldub_code(env1, p) ldub_raw(p)
+#define cpu_ldsb_code(env1, p) ldsb_raw(p)
+#define cpu_lduw_code(env1, p) lduw_raw(p)
+#define cpu_ldsw_code(env1, p) ldsw_raw(p)
+#define cpu_ldl_code(env1, p) ldl_raw(p)
+#define cpu_ldq_code(env1, p) ldq_raw(p)
+
+#define cpu_ldub_data(env, addr) ldub_raw(addr)
+#define cpu_lduw_data(env, addr) lduw_raw(addr)
+#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
+#define cpu_ldl_data(env, addr) ldl_raw(addr)
+#define cpu_ldq_data(env, addr) ldq_raw(addr)
+
+#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
+#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
+
+#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
+#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
+#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
+#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
+#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
+
+#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
+#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
+
+#define ldub_kernel(p) ldub_raw(p)
+#define ldsb_kernel(p) ldsb_raw(p)
+#define lduw_kernel(p) lduw_raw(p)
+#define ldsw_kernel(p) ldsw_raw(p)
+#define ldl_kernel(p) ldl_raw(p)
+#define ldq_kernel(p) ldq_raw(p)
+#define ldfl_kernel(p) ldfl_raw(p)
+#define ldfq_kernel(p) ldfq_raw(p)
+#define stb_kernel(p, v) stb_raw(p, v)
+#define stw_kernel(p, v) stw_raw(p, v)
+#define stl_kernel(p, v) stl_raw(p, v)
+#define stq_kernel(p, v) stq_raw(p, v)
+#define stfl_kernel(p, v) stfl_raw(p, v)
+#define stfq_kernel(p, vt) stfq_raw(p, v)
+
+#define cpu_ldub_data(env, addr) ldub_raw(addr)
+#define cpu_lduw_data(env, addr) lduw_raw(addr)
+#define cpu_ldl_data(env, addr) ldl_raw(addr)
+
+#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
+#endif /* defined(CONFIG_USER_ONLY) */
+
+/* page related stuff */
+
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
+#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
+
+/* ??? These should be the larger of uintptr_t and target_ulong.  */
+extern uintptr_t qemu_real_host_page_size;
+extern uintptr_t qemu_host_page_size;
+extern uintptr_t qemu_host_page_mask;
+
+#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
+
+/* same as PROT_xxx */
+#define PAGE_READ      0x0001
+#define PAGE_WRITE     0x0002
+#define PAGE_EXEC      0x0004
+#define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_VALID     0x0008
+/* original state of the write flag (used when tracking self-modifying
+   code */
+#define PAGE_WRITE_ORG 0x0010
+#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
+/* FIXME: Code that sets/uses this is broken and needs to go away.  */
+#define PAGE_RESERVED  0x0020
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+void page_dump(FILE *f);
+
+typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
+                                      abi_ulong, unsigned long);
+int walk_memory_regions(void *, walk_memory_regions_fn);
+
+int page_get_flags(target_ulong address);
+void page_set_flags(target_ulong start, target_ulong end, int flags);
+int page_check_range(target_ulong start, target_ulong len, int flags);
+#endif
+
+CPUArchState *cpu_copy(CPUArchState *env);
+CPUArchState *qemu_get_cpu(int cpu);
+
+#define CPU_DUMP_CODE 0x00010000
+#define CPU_DUMP_FPU 0x00020000 /* dump FPU register state, not just integer */
+/* dump info about TCG QEMU's condition code optimization state */
+#define CPU_DUMP_CCOP 0x00040000
+
+void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
+                    int flags);
+void cpu_dump_statistics(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
+                         int flags);
+
+void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
+    GCC_FMT_ATTR(2, 3);
+extern CPUArchState *first_cpu;
+DECLARE_TLS(CPUArchState *,cpu_single_env);
+#define cpu_single_env tls_var(cpu_single_env)
+
+/* Flags for use in ENV->INTERRUPT_PENDING.
+
+   The numbers assigned here are non-sequential in order to preserve
+   binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
+   previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
+   the vmstate dump.  */
+
+/* External hardware interrupt pending.  This is typically used for
+   interrupts from devices.  */
+#define CPU_INTERRUPT_HARD        0x0002
+
+/* Exit the current TB.  This is typically used when some system-level device
+   makes some change to the memory mapping.  E.g. the a20 line change.  */
+#define CPU_INTERRUPT_EXITTB      0x0004
+
+/* Halt the CPU.  */
+#define CPU_INTERRUPT_HALT        0x0020
+
+/* Debug event pending.  */
+#define CPU_INTERRUPT_DEBUG       0x0080
+
+/* Several target-specific external hardware interrupts.  Each target/cpu.h
+   should define proper names based on these defines.  */
+#define CPU_INTERRUPT_TGT_EXT_0   0x0008
+#define CPU_INTERRUPT_TGT_EXT_1   0x0010
+#define CPU_INTERRUPT_TGT_EXT_2   0x0040
+#define CPU_INTERRUPT_TGT_EXT_3   0x0200
+#define CPU_INTERRUPT_TGT_EXT_4   0x1000
+
+/* Several target-specific internal interrupts.  These differ from the
+   preceding target-specific interrupts in that they are intended to
+   originate from within the cpu itself, typically in response to some
+   instruction being executed.  These, therefore, are not masked while
+   single-stepping within the debugger.  */
+#define CPU_INTERRUPT_TGT_INT_0   0x0100
+#define CPU_INTERRUPT_TGT_INT_1   0x0400
+#define CPU_INTERRUPT_TGT_INT_2   0x0800
+#define CPU_INTERRUPT_TGT_INT_3   0x2000
+
+/* First unused bit: 0x4000.  */
+
+/* The set of all bits that should be masked when single-stepping.  */
+#define CPU_INTERRUPT_SSTEP_MASK \
+    (CPU_INTERRUPT_HARD          \
+     | CPU_INTERRUPT_TGT_EXT_0   \
+     | CPU_INTERRUPT_TGT_EXT_1   \
+     | CPU_INTERRUPT_TGT_EXT_2   \
+     | CPU_INTERRUPT_TGT_EXT_3   \
+     | CPU_INTERRUPT_TGT_EXT_4)
+
+#ifndef CONFIG_USER_ONLY
+typedef void (*CPUInterruptHandler)(CPUArchState *, int);
+
+extern CPUInterruptHandler cpu_interrupt_handler;
+
+static inline void cpu_interrupt(CPUArchState *s, int mask)
+{
+    cpu_interrupt_handler(s, mask);
+}
+#else /* USER_ONLY */
+void cpu_interrupt(CPUArchState *env, int mask);
+#endif /* USER_ONLY */
+
+void cpu_reset_interrupt(CPUArchState *env, int mask);
+
+void cpu_exit(CPUArchState *s);
+
+/* Breakpoint/watchpoint flags */
+#define BP_MEM_READ           0x01
+#define BP_MEM_WRITE          0x02
+#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
+#define BP_STOP_BEFORE_ACCESS 0x04
+#define BP_WATCHPOINT_HIT     0x08
+#define BP_GDB                0x10
+#define BP_CPU                0x20
+
+int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
+                          CPUBreakpoint **breakpoint);
+int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
+void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
+void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
+int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
+                          int flags, CPUWatchpoint **watchpoint);
+int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
+                          target_ulong len, int flags);
+void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
+void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
+
+#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
+#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
+#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
+
+void cpu_single_step(CPUArchState *env, int enabled);
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* Return the physical page corresponding to a virtual one. Use it
+   only for debugging because no protection checks are done. Return -1
+   if no page found. */
+hwaddr cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
+
+/* memory API */
+
+extern int phys_ram_fd;
+extern ram_addr_t ram_size;
+
+/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
+#define RAM_PREALLOC_MASK   (1 << 0)
+
+typedef struct RAMBlock {
+    struct MemoryRegion *mr;
+    uint8_t *host;
+    ram_addr_t offset;
+    ram_addr_t length;
+    uint32_t flags;
+    char idstr[256];
+    /* Reads can take either the iothread or the ramlist lock.
+     * Writes must take both locks.
+     */
+    QTAILQ_ENTRY(RAMBlock) next;
+#if defined(__linux__) && !defined(TARGET_S390X)
+    int fd;
+#endif
+} RAMBlock;
+
+typedef struct RAMList {
+    QemuMutex mutex;
+    /* Protected by the iothread lock.  */
+    uint8_t *phys_dirty;
+    RAMBlock *mru_block;
+    /* Protected by the ramlist lock.  */
+    QTAILQ_HEAD(, RAMBlock) blocks;
+    uint32_t version;
+} RAMList;
+extern RAMList ram_list;
+
+extern const char *mem_path;
+extern int mem_prealloc;
+
+/* Flags stored in the low bits of the TLB virtual address.  These are
+   defined so that fast path ram access is all zeros.  */
+/* Zero if TLB entry is valid.  */
+#define TLB_INVALID_MASK   (1 << 3)
+/* Set if TLB entry references a clean RAM page.  The iotlb entry will
+   contain the page physical address.  */
+#define TLB_NOTDIRTY    (1 << 4)
+/* Set if TLB entry is an IO callback.  */
+#define TLB_MMIO        (1 << 5)
+
+void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
+ram_addr_t last_ram_offset(void);
+void qemu_mutex_lock_ramlist(void);
+void qemu_mutex_unlock_ramlist(void);
+#endif /* !CONFIG_USER_ONLY */
+
+int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
+                        uint8_t *buf, int len, int is_write);
+
+#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
new file mode 100644
index 0000000000..2e5f11f47f
--- /dev/null
+++ b/include/exec/cpu-common.h
@@ -0,0 +1,124 @@
+#ifndef CPU_COMMON_H
+#define CPU_COMMON_H 1
+
+/* CPU interfaces that are target independent.  */
+
+#include "exec/hwaddr.h"
+
+#ifndef NEED_CPU_H
+#include "exec/poison.h"
+#endif
+
+#include "qemu/bswap.h"
+#include "qemu/queue.h"
+
+/**
+ * CPUListState:
+ * @cpu_fprintf: Print function.
+ * @file: File to print to using @cpu_fprint.
+ *
+ * State commonly used for iterating over CPU models.
+ */
+typedef struct CPUListState {
+    fprintf_function cpu_fprintf;
+    FILE *file;
+} CPUListState;
+
+#if !defined(CONFIG_USER_ONLY)
+
+enum device_endian {
+    DEVICE_NATIVE_ENDIAN,
+    DEVICE_BIG_ENDIAN,
+    DEVICE_LITTLE_ENDIAN,
+};
+
+/* address in the RAM (different from a physical address) */
+#if defined(CONFIG_XEN_BACKEND)
+typedef uint64_t ram_addr_t;
+#  define RAM_ADDR_MAX UINT64_MAX
+#  define RAM_ADDR_FMT "%" PRIx64
+#else
+typedef uintptr_t ram_addr_t;
+#  define RAM_ADDR_MAX UINTPTR_MAX
+#  define RAM_ADDR_FMT "%" PRIxPTR
+#endif
+
+/* memory API */
+
+typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
+typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
+
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+/* This should only be used for ram local to a device.  */
+void *qemu_get_ram_ptr(ram_addr_t addr);
+void qemu_put_ram_ptr(void *addr);
+/* This should not be used by devices.  */
+int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
+void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
+
+void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
+                            int len, int is_write);
+static inline void cpu_physical_memory_read(hwaddr addr,
+                                            void *buf, int len)
+{
+    cpu_physical_memory_rw(addr, buf, len, 0);
+}
+static inline void cpu_physical_memory_write(hwaddr addr,
+                                             const void *buf, int len)
+{
+    cpu_physical_memory_rw(addr, (void *)buf, len, 1);
+}
+void *cpu_physical_memory_map(hwaddr addr,
+                              hwaddr *plen,
+                              int is_write);
+void cpu_physical_memory_unmap(void *buffer, hwaddr len,
+                               int is_write, hwaddr access_len);
+void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
+
+bool cpu_physical_memory_is_io(hwaddr phys_addr);
+
+/* Coalesced MMIO regions are areas where write operations can be reordered.
+ * This usually implies that write operations are side-effect free.  This allows
+ * batching which can make a major impact on performance when using
+ * virtualization.
+ */
+void qemu_flush_coalesced_mmio_buffer(void);
+
+uint32_t ldub_phys(hwaddr addr);
+uint32_t lduw_le_phys(hwaddr addr);
+uint32_t lduw_be_phys(hwaddr addr);
+uint32_t ldl_le_phys(hwaddr addr);
+uint32_t ldl_be_phys(hwaddr addr);
+uint64_t ldq_le_phys(hwaddr addr);
+uint64_t ldq_be_phys(hwaddr addr);
+void stb_phys(hwaddr addr, uint32_t val);
+void stw_le_phys(hwaddr addr, uint32_t val);
+void stw_be_phys(hwaddr addr, uint32_t val);
+void stl_le_phys(hwaddr addr, uint32_t val);
+void stl_be_phys(hwaddr addr, uint32_t val);
+void stq_le_phys(hwaddr addr, uint64_t val);
+void stq_be_phys(hwaddr addr, uint64_t val);
+
+#ifdef NEED_CPU_H
+uint32_t lduw_phys(hwaddr addr);
+uint32_t ldl_phys(hwaddr addr);
+uint64_t ldq_phys(hwaddr addr);
+void stl_phys_notdirty(hwaddr addr, uint32_t val);
+void stq_phys_notdirty(hwaddr addr, uint64_t val);
+void stw_phys(hwaddr addr, uint32_t val);
+void stl_phys(hwaddr addr, uint32_t val);
+void stq_phys(hwaddr addr, uint64_t val);
+#endif
+
+void cpu_physical_memory_write_rom(hwaddr addr,
+                                   const uint8_t *buf, int len);
+
+extern struct MemoryRegion io_mem_ram;
+extern struct MemoryRegion io_mem_rom;
+extern struct MemoryRegion io_mem_unassigned;
+extern struct MemoryRegion io_mem_notdirty;
+
+#endif
+
+#endif /* !CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
new file mode 100644
index 0000000000..b22b4c6255
--- /dev/null
+++ b/include/exec/cpu-defs.h
@@ -0,0 +1,207 @@
+/*
+ * common defines for all CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_DEFS_H
+#define CPU_DEFS_H
+
+#ifndef NEED_CPU_H
+#error cpu.h included from common code
+#endif
+
+#include "config.h"
+#include <setjmp.h>
+#include <inttypes.h>
+#include <signal.h>
+#include "qemu/osdep.h"
+#include "qemu/queue.h"
+#include "exec/hwaddr.h"
+
+#ifndef TARGET_LONG_BITS
+#error TARGET_LONG_BITS must be defined before including this header
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+typedef int16_t target_short __attribute__ ((aligned(TARGET_SHORT_ALIGNMENT)));
+typedef uint16_t target_ushort __attribute__((aligned(TARGET_SHORT_ALIGNMENT)));
+typedef int32_t target_int __attribute__((aligned(TARGET_INT_ALIGNMENT)));
+typedef uint32_t target_uint __attribute__((aligned(TARGET_INT_ALIGNMENT)));
+typedef int64_t target_llong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
+typedef uint64_t target_ullong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+typedef uint32_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+typedef uint64_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#define EXCP_INTERRUPT 	0x10000 /* async interruption */
+#define EXCP_HLT        0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG      0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED     0x10003 /* cpu is halted (waiting for external event) */
+
+#define TB_JMP_CACHE_BITS 12
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
+/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
+   addresses on the same page.  The top bits are the same.  This allows
+   TLB invalidation to quickly clear a subset of the hash table.  */
+#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
+#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
+#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
+#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
+
+#if !defined(CONFIG_USER_ONLY)
+#define CPU_TLB_BITS 8
+#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
+
+#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
+#define CPU_TLB_ENTRY_BITS 4
+#else
+#define CPU_TLB_ENTRY_BITS 5
+#endif
+
+typedef struct CPUTLBEntry {
+    /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
+       bit TARGET_PAGE_BITS-1..4  : Nonzero for accesses that should not
+                                    go directly to ram.
+       bit 3                      : indicates that the entry is invalid
+       bit 2..0                   : zero
+    */
+    target_ulong addr_read;
+    target_ulong addr_write;
+    target_ulong addr_code;
+    /* Addend to virtual address to get host address.  IO accesses
+       use the corresponding iotlb value.  */
+    uintptr_t addend;
+    /* padding to get a power of two size */
+    uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
+                  (sizeof(target_ulong) * 3 +
+                   ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
+                   sizeof(uintptr_t))];
+} CPUTLBEntry;
+
+extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1];
+
+#define CPU_COMMON_TLB \
+    /* The meaning of the MMU modes is defined in the target code. */   \
+    CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];                  \
+    hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE];               \
+    target_ulong tlb_flush_addr;                                        \
+    target_ulong tlb_flush_mask;
+
+#else
+
+#define CPU_COMMON_TLB
+
+#endif
+
+
+#ifdef HOST_WORDS_BIGENDIAN
+typedef struct icount_decr_u16 {
+    uint16_t high;
+    uint16_t low;
+} icount_decr_u16;
+#else
+typedef struct icount_decr_u16 {
+    uint16_t low;
+    uint16_t high;
+} icount_decr_u16;
+#endif
+
+struct qemu_work_item;
+
+typedef struct CPUBreakpoint {
+    target_ulong pc;
+    int flags; /* BP_* */
+    QTAILQ_ENTRY(CPUBreakpoint) entry;
+} CPUBreakpoint;
+
+typedef struct CPUWatchpoint {
+    target_ulong vaddr;
+    target_ulong len_mask;
+    int flags; /* BP_* */
+    QTAILQ_ENTRY(CPUWatchpoint) entry;
+} CPUWatchpoint;
+
+#define CPU_TEMP_BUF_NLONGS 128
+#define CPU_COMMON                                                      \
+    struct TranslationBlock *current_tb; /* currently executing TB  */  \
+    /* soft mmu support */                                              \
+    /* in order to avoid passing too many arguments to the MMIO         \
+       helpers, we store some rarely used information in the CPU        \
+       context) */                                                      \
+    uintptr_t mem_io_pc; /* host pc at which the memory was             \
+                            accessed */                                 \
+    target_ulong mem_io_vaddr; /* target virtual addr at which the      \
+                                     memory was accessed */             \
+    uint32_t halted; /* Nonzero if the CPU is in suspend state */       \
+    uint32_t interrupt_request;                                         \
+    volatile sig_atomic_t exit_request;                                 \
+    CPU_COMMON_TLB                                                      \
+    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];           \
+    /* buffer for temporaries in the code generator */                  \
+    long temp_buf[CPU_TEMP_BUF_NLONGS];                                 \
+                                                                        \
+    int64_t icount_extra; /* Instructions until next timer event.  */   \
+    /* Number of cycles left, with interrupt flag in high bit.          \
+       This allows a single read-compare-cbranch-write sequence to test \
+       for both decrementer underflow and exceptions.  */               \
+    union {                                                             \
+        uint32_t u32;                                                   \
+        icount_decr_u16 u16;                                            \
+    } icount_decr;                                                      \
+    uint32_t can_do_io; /* nonzero if memory mapped IO is safe.  */     \
+                                                                        \
+    /* from this point: preserved by CPU reset */                       \
+    /* ice debug support */                                             \
+    QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;            \
+    int singlestep_enabled;                                             \
+                                                                        \
+    QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;            \
+    CPUWatchpoint *watchpoint_hit;                                      \
+                                                                        \
+    struct GDBRegisterState *gdb_regs;                                  \
+                                                                        \
+    /* Core interrupt code */                                           \
+    jmp_buf jmp_env;                                                    \
+    int exception_index;                                                \
+                                                                        \
+    CPUArchState *next_cpu; /* next CPU sharing TB cache */                 \
+    int cpu_index; /* CPU index (informative) */                        \
+    uint32_t host_tid; /* host thread ID */                             \
+    int numa_node; /* NUMA node this cpu is belonging to  */            \
+    int nr_cores;  /* number of cores within this CPU package */        \
+    int nr_threads;/* number of threads within this CPU */              \
+    int running; /* Nonzero if cpu is currently running(usermode).  */  \
+    /* user data */                                                     \
+    void *opaque;                                                       \
+                                                                        \
+    const char *cpu_model_str;
+
+#endif
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
new file mode 100644
index 0000000000..733c885a1f
--- /dev/null
+++ b/include/exec/cputlb.h
@@ -0,0 +1,46 @@
+/*
+ *  Common CPU TLB handling
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPUTLB_H
+#define CPUTLB_H
+
+#if !defined(CONFIG_USER_ONLY)
+/* cputlb.c */
+void tlb_protect_code(ram_addr_t ram_addr);
+void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
+                             target_ulong vaddr);
+void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
+                           uintptr_t length);
+MemoryRegionSection *phys_page_find(struct AddressSpaceDispatch *d,
+                                    hwaddr index);
+void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length);
+void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
+extern int tlb_flush_count;
+
+/* exec.c */
+void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr);
+hwaddr memory_region_section_get_iotlb(CPUArchState *env,
+                                                   MemoryRegionSection *section,
+                                                   target_ulong vaddr,
+                                                   hwaddr paddr,
+                                                   int prot,
+                                                   target_ulong *address);
+bool memory_region_is_unassigned(MemoryRegion *mr);
+
+#endif
+#endif
diff --git a/include/exec/def-helper.h b/include/exec/def-helper.h
new file mode 100644
index 0000000000..022a9ceb6a
--- /dev/null
+++ b/include/exec/def-helper.h
@@ -0,0 +1,275 @@
+/* Helper file for declaring TCG helper functions.
+   Should be included at the start and end of target-foo/helper.h.
+
+   Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
+   functions.  Names should be specified without the helper_ prefix, and
+   the return and argument types specified.  3 basic types are understood
+   (i32, i64 and ptr).  Additional aliases are provided for convenience and
+   to match the types used by the C helper implementation.
+
+   The target helper.h should be included in all files that use/define
+   helper functions.  THis will ensure that function prototypes are
+   consistent.  In addition it should be included an extra two times for
+   helper.c, defining:
+    GEN_HELPER 1 to produce op generation functions (gen_helper_*)
+    GEN_HELPER 2 to do runtime registration helper functions.
+ */
+
+#ifndef DEF_HELPER_H
+#define DEF_HELPER_H 1
+
+#define HELPER(name) glue(helper_, name)
+
+#define GET_TCGV_i32 GET_TCGV_I32
+#define GET_TCGV_i64 GET_TCGV_I64
+#define GET_TCGV_ptr GET_TCGV_PTR
+
+/* Some types that make sense in C, but not for TCG.  */
+#define dh_alias_i32 i32
+#define dh_alias_s32 i32
+#define dh_alias_int i32
+#define dh_alias_i64 i64
+#define dh_alias_s64 i64
+#define dh_alias_f32 i32
+#define dh_alias_f64 i64
+#if TARGET_LONG_BITS == 32
+#define dh_alias_tl i32
+#else
+#define dh_alias_tl i64
+#endif
+#define dh_alias_ptr ptr
+#define dh_alias_void void
+#define dh_alias_noreturn noreturn
+#define dh_alias_env ptr
+#define dh_alias(t) glue(dh_alias_, t)
+
+#define dh_ctype_i32 uint32_t
+#define dh_ctype_s32 int32_t
+#define dh_ctype_int int
+#define dh_ctype_i64 uint64_t
+#define dh_ctype_s64 int64_t
+#define dh_ctype_f32 float32
+#define dh_ctype_f64 float64
+#define dh_ctype_tl target_ulong
+#define dh_ctype_ptr void *
+#define dh_ctype_void void
+#define dh_ctype_noreturn void QEMU_NORETURN
+#define dh_ctype_env CPUArchState *
+#define dh_ctype(t) dh_ctype_##t
+
+/* We can't use glue() here because it falls foul of C preprocessor
+   recursive expansion rules.  */
+#define dh_retvar_decl0_void void
+#define dh_retvar_decl0_noreturn void
+#define dh_retvar_decl0_i32 TCGv_i32 retval
+#define dh_retvar_decl0_i64 TCGv_i64 retval
+#define dh_retvar_decl0_ptr TCGv_ptr retval
+#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
+
+#define dh_retvar_decl_void
+#define dh_retvar_decl_noreturn
+#define dh_retvar_decl_i32 TCGv_i32 retval,
+#define dh_retvar_decl_i64 TCGv_i64 retval,
+#define dh_retvar_decl_ptr TCGv_ptr retval,
+#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
+
+#define dh_retvar_void TCG_CALL_DUMMY_ARG
+#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG
+#define dh_retvar_i32 GET_TCGV_i32(retval)
+#define dh_retvar_i64 GET_TCGV_i64(retval)
+#define dh_retvar_ptr GET_TCGV_ptr(retval)
+#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
+
+#define dh_is_64bit_void 0
+#define dh_is_64bit_noreturn 0
+#define dh_is_64bit_i32 0
+#define dh_is_64bit_i64 1
+#define dh_is_64bit_ptr (TCG_TARGET_REG_BITS == 64)
+#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
+
+#define dh_is_signed_void 0
+#define dh_is_signed_noreturn 0
+#define dh_is_signed_i32 0
+#define dh_is_signed_s32 1
+#define dh_is_signed_i64 0
+#define dh_is_signed_s64 1
+#define dh_is_signed_f32 0
+#define dh_is_signed_f64 0
+#define dh_is_signed_tl  0
+#define dh_is_signed_int 1
+/* ??? This is highly specific to the host cpu.  There are even special
+   extension instructions that may be required, e.g. ia64's addp4.  But
+   for now we don't support any 64-bit targets with 32-bit pointers.  */
+#define dh_is_signed_ptr 0
+#define dh_is_signed_env dh_is_signed_ptr
+#define dh_is_signed(t) dh_is_signed_##t
+
+#define dh_sizemask(t, n) \
+  sizemask |= dh_is_64bit(t) << (n*2); \
+  sizemask |= dh_is_signed(t) << (n*2+1)
+
+#define dh_arg(t, n) \
+  args[n - 1] = glue(GET_TCGV_, dh_alias(t))(glue(arg, n)); \
+  dh_sizemask(t, n)
+
+#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)
+
+
+#define DEF_HELPER_0(name, ret) \
+    DEF_HELPER_FLAGS_0(name, 0, ret)
+#define DEF_HELPER_1(name, ret, t1) \
+    DEF_HELPER_FLAGS_1(name, 0, ret, t1)
+#define DEF_HELPER_2(name, ret, t1, t2) \
+    DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2)
+#define DEF_HELPER_3(name, ret, t1, t2, t3) \
+    DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3)
+#define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \
+    DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
+#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
+    DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
+
+/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+
+#endif /* DEF_HELPER_H */
+
+#ifndef GEN_HELPER
+/* Function prototypes.  */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+dh_ctype(ret) HELPER(name) (void);
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1));
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+                                   dh_ctype(t4));
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+                            dh_ctype(t4), dh_ctype(t5));
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == 1
+/* Gen functions.  */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
+{ \
+  int sizemask; \
+  sizemask = dh_is_64bit(ret); \
+  tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 0, NULL); \
+}
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1)) \
+{ \
+  TCGArg args[1]; \
+  int sizemask = 0; \
+  dh_sizemask(ret, 0); \
+  dh_arg(t1, 1); \
+  tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 1, args); \
+}
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+    dh_arg_decl(t2, 2)) \
+{ \
+  TCGArg args[2]; \
+  int sizemask = 0; \
+  dh_sizemask(ret, 0); \
+  dh_arg(t1, 1); \
+  dh_arg(t2, 2); \
+  tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 2, args); \
+}
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+    dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
+{ \
+  TCGArg args[3]; \
+  int sizemask = 0; \
+  dh_sizemask(ret, 0); \
+  dh_arg(t1, 1); \
+  dh_arg(t2, 2); \
+  dh_arg(t3, 3); \
+  tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 3, args); \
+}
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+    dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
+{ \
+  TCGArg args[4]; \
+  int sizemask = 0; \
+  dh_sizemask(ret, 0); \
+  dh_arg(t1, 1); \
+  dh_arg(t2, 2); \
+  dh_arg(t3, 3); \
+  dh_arg(t4, 4); \
+  tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 4, args); \
+}
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+    dh_arg_decl(t1, 1),  dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+    dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
+{ \
+  TCGArg args[5]; \
+  int sizemask = 0; \
+  dh_sizemask(ret, 0); \
+  dh_arg(t1, 1); \
+  dh_arg(t2, 2); \
+  dh_arg(t3, 3); \
+  dh_arg(t4, 4); \
+  dh_arg(t5, 5); \
+  tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 5, args); \
+}
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == 2
+/* Register helpers.  */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+tcg_register_helper(HELPER(name), #name);
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == -1
+/* Undefine macros.  */
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef GEN_HELPER
+
+#endif
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
new file mode 100644
index 0000000000..46dca74fda
--- /dev/null
+++ b/include/exec/exec-all.h
@@ -0,0 +1,412 @@
+/*
+ * internal execution defines for qemu
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _EXEC_ALL_H_
+#define _EXEC_ALL_H_
+
+#include "qemu-common.h"
+
+/* allow to see translation results - the slowdown should be negligible, so we leave it */
+#define DEBUG_DISAS
+
+/* Page tracking code uses ram addresses in system mode, and virtual
+   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
+   type.  */
+#if defined(CONFIG_USER_ONLY)
+typedef abi_ulong tb_page_addr_t;
+#else
+typedef ram_addr_t tb_page_addr_t;
+#endif
+
+/* is_jmp field values */
+#define DISAS_NEXT    0 /* next instruction can be analyzed */
+#define DISAS_JUMP    1 /* only pc was modified dynamically */
+#define DISAS_UPDATE  2 /* cpu state was modified dynamically */
+#define DISAS_TB_JUMP 3 /* only pc was modified statically */
+
+struct TranslationBlock;
+typedef struct TranslationBlock TranslationBlock;
+
+/* XXX: make safe guess about sizes */
+#define MAX_OP_PER_INSTR 208
+
+#if HOST_LONG_BITS == 32
+#define MAX_OPC_PARAM_PER_ARG 2
+#else
+#define MAX_OPC_PARAM_PER_ARG 1
+#endif
+#define MAX_OPC_PARAM_IARGS 5
+#define MAX_OPC_PARAM_OARGS 1
+#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
+
+/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
+ * and up to 4 + N parameters on 64-bit archs
+ * (N = number of input arguments + output arguments).  */
+#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
+#define OPC_BUF_SIZE 640
+#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
+
+/* Maximum size a TCG op can expand to.  This is complicated because a
+   single op may require several host instructions and register reloads.
+   For now take a wild guess at 192 bytes, which should allow at least
+   a couple of fixup instructions per argument.  */
+#define TCG_MAX_OP_SIZE 192
+
+#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
+
+#include "qemu/log.h"
+
+void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
+void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
+void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
+                          int pc_pos);
+
+void cpu_gen_init(void);
+int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
+                 int *gen_code_size_ptr);
+bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
+
+void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
+void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
+TranslationBlock *tb_gen_code(CPUArchState *env, 
+                              target_ulong pc, target_ulong cs_base, int flags,
+                              int cflags);
+void cpu_exec_init(CPUArchState *env);
+void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
+int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
+                                   int is_cpu_write_access);
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
+                              int is_cpu_write_access);
+#if !defined(CONFIG_USER_ONLY)
+/* cputlb.c */
+void tlb_flush_page(CPUArchState *env, target_ulong addr);
+void tlb_flush(CPUArchState *env, int flush_global);
+void tlb_set_page(CPUArchState *env, target_ulong vaddr,
+                  hwaddr paddr, int prot,
+                  int mmu_idx, target_ulong size);
+void tb_invalidate_phys_addr(hwaddr addr);
+#else
+static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
+{
+}
+
+static inline void tlb_flush(CPUArchState *env, int flush_global)
+{
+}
+#endif
+
+#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
+
+#define CODE_GEN_PHYS_HASH_BITS     15
+#define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
+
+/* estimated block size for TB allocation */
+/* XXX: use a per code average code fragment size and modulate it
+   according to the host CPU */
+#if defined(CONFIG_SOFTMMU)
+#define CODE_GEN_AVG_BLOCK_SIZE 128
+#else
+#define CODE_GEN_AVG_BLOCK_SIZE 64
+#endif
+
+#if defined(__arm__) || defined(_ARCH_PPC) \
+    || defined(__x86_64__) || defined(__i386__) \
+    || defined(__sparc__) \
+    || defined(CONFIG_TCG_INTERPRETER)
+#define USE_DIRECT_JUMP
+#endif
+
+struct TranslationBlock {
+    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
+    target_ulong cs_base; /* CS base for this block */
+    uint64_t flags; /* flags defining in which context the code was generated */
+    uint16_t size;      /* size of target code for this block (1 <=
+                           size <= TARGET_PAGE_SIZE) */
+    uint16_t cflags;    /* compile flags */
+#define CF_COUNT_MASK  0x7fff
+#define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
+
+    uint8_t *tc_ptr;    /* pointer to the translated code */
+    /* next matching tb for physical address. */
+    struct TranslationBlock *phys_hash_next;
+    /* first and second physical page containing code. The lower bit
+       of the pointer tells the index in page_next[] */
+    struct TranslationBlock *page_next[2];
+    tb_page_addr_t page_addr[2];
+
+    /* the following data are used to directly call another TB from
+       the code of this one. */
+    uint16_t tb_next_offset[2]; /* offset of original jump target */
+#ifdef USE_DIRECT_JUMP
+    uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
+#else
+    uintptr_t tb_next[2]; /* address of jump generated code */
+#endif
+    /* list of TBs jumping to this one. This is a circular list using
+       the two least significant bits of the pointers to tell what is
+       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
+       jmp_first */
+    struct TranslationBlock *jmp_next[2];
+    struct TranslationBlock *jmp_first;
+    uint32_t icount;
+};
+
+static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
+{
+    target_ulong tmp;
+    tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
+    return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
+}
+
+static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+{
+    target_ulong tmp;
+    tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
+    return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
+	    | (tmp & TB_JMP_ADDR_MASK));
+}
+
+static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
+{
+    return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
+}
+
+void tb_free(TranslationBlock *tb);
+void tb_flush(CPUArchState *env);
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+
+extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
+
+#if defined(USE_DIRECT_JUMP)
+
+#if defined(CONFIG_TCG_INTERPRETER)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+    /* patch the branch destination */
+    *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+    /* no need to flush icache explicitly */
+}
+#elif defined(_ARCH_PPC)
+void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
+#define tb_set_jmp_target1 ppc_tb_set_jmp_target
+#elif defined(__i386__) || defined(__x86_64__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+    /* patch the branch destination */
+    *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+    /* no need to flush icache explicitly */
+}
+#elif defined(__arm__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+#if !QEMU_GNUC_PREREQ(4, 1)
+    register unsigned long _beg __asm ("a1");
+    register unsigned long _end __asm ("a2");
+    register unsigned long _flg __asm ("a3");
+#endif
+
+    /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
+    *(uint32_t *)jmp_addr =
+        (*(uint32_t *)jmp_addr & ~0xffffff)
+        | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
+
+#if QEMU_GNUC_PREREQ(4, 1)
+    __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
+#else
+    /* flush icache */
+    _beg = jmp_addr;
+    _end = jmp_addr + 4;
+    _flg = 0;
+    __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+#endif
+}
+#elif defined(__sparc__)
+void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
+#else
+#error tb_set_jmp_target1 is missing
+#endif
+
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+                                     int n, uintptr_t addr)
+{
+    uint16_t offset = tb->tb_jmp_offset[n];
+    tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
+}
+
+#else
+
+/* set the jump target */
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+                                     int n, uintptr_t addr)
+{
+    tb->tb_next[n] = addr;
+}
+
+#endif
+
+static inline void tb_add_jump(TranslationBlock *tb, int n,
+                               TranslationBlock *tb_next)
+{
+    /* NOTE: this test is only needed for thread safety */
+    if (!tb->jmp_next[n]) {
+        /* patch the native jump address */
+        tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
+
+        /* add in TB jmp circular list */
+        tb->jmp_next[n] = tb_next->jmp_first;
+        tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
+    }
+}
+
+#include "exec/spinlock.h"
+
+extern spinlock_t tb_lock;
+
+extern int tb_invalidated_flag;
+
+/* The return address may point to the start of the next instruction.
+   Subtracting one gets us the call instruction itself.  */
+#if defined(CONFIG_TCG_INTERPRETER)
+/* Softmmu, Alpha, MIPS, SH4 and SPARC user mode emulations call GETPC().
+   For all others, GETPC remains undefined (which makes TCI a little faster. */
+# if defined(CONFIG_SOFTMMU) || \
+    defined(TARGET_ALPHA) || defined(TARGET_MIPS) || \
+    defined(TARGET_SH4) || defined(TARGET_SPARC)
+extern uintptr_t tci_tb_ptr;
+#  define GETPC() tci_tb_ptr
+# endif
+#elif defined(__s390__) && !defined(__s390x__)
+# define GETPC() \
+    (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
+#elif defined(__arm__)
+/* Thumb return addresses have the low bit set, so we need to subtract two.
+   This is still safe in ARM mode because instructions are 4 bytes.  */
+# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
+#else
+# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
+#endif
+
+#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
+/* qemu_ld/st optimization split code generation to fast and slow path, thus,
+   it needs special handling for an MMU helper which is called from the slow
+   path, to get the fast path's pc without any additional argument.
+   It uses a tricky solution which embeds the fast path pc into the slow path.
+
+   Code flow in slow path:
+   (1) pre-process
+   (2) call MMU helper
+   (3) jump to (5)
+   (4) fast path information (implementation specific)
+   (5) post-process (e.g. stack adjust)
+   (6) jump to corresponding code of the next of fast path
+ */
+# if defined(__i386__) || defined(__x86_64__)
+/* To avoid broken disassembling, long jmp is used for embedding fast path pc,
+   so that the destination is the next code of fast path, though this jmp is
+   never executed.
+
+   call MMU helper
+   jmp POST_PROC (2byte)    <- GETRA()
+   jmp NEXT_CODE (5byte)
+   POST_PROCESS ...         <- GETRA() + 7
+ */
+#  define GETRA() ((uintptr_t)__builtin_return_address(0))
+#  define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \
+                                    *(int32_t *)((void *)GETRA() + 3) - 1))
+# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
+#  define GETRA() ((uintptr_t)__builtin_return_address(0))
+#  define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
+# else
+#  error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
+# endif
+bool is_tcg_gen_code(uintptr_t pc_ptr);
+# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
+#else
+# define GETPC_EXT() GETPC()
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+
+struct MemoryRegion *iotlb_to_region(hwaddr index);
+uint64_t io_mem_read(struct MemoryRegion *mr, hwaddr addr,
+                     unsigned size);
+void io_mem_write(struct MemoryRegion *mr, hwaddr addr,
+                  uint64_t value, unsigned size);
+
+void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
+              uintptr_t retaddr);
+
+#include "exec/softmmu_defs.h"
+
+#define ACCESS_TYPE (NB_MMU_MODES + 1)
+#define MEMSUFFIX _code
+
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
+{
+    return addr;
+}
+#else
+/* cputlb.c */
+tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
+#endif
+
+typedef void (CPUDebugExcpHandler)(CPUArchState *env);
+
+void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
+
+/* vl.c */
+extern int singlestep;
+
+/* cpu-exec.c */
+extern volatile sig_atomic_t exit_request;
+
+/* Deterministic execution requires that IO only be performed on the last
+   instruction of a TB so that interrupts take effect immediately.  */
+static inline int can_do_io(CPUArchState *env)
+{
+    if (!use_icount) {
+        return 1;
+    }
+    /* If not executing code then assume we are ok.  */
+    if (!env->current_tb) {
+        return 1;
+    }
+    return env->can_do_io != 0;
+}
+
+#endif
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
new file mode 100644
index 0000000000..668de66000
--- /dev/null
+++ b/include/exec/gdbstub.h
@@ -0,0 +1,53 @@
+#ifndef GDBSTUB_H
+#define GDBSTUB_H
+
+#define DEFAULT_GDBSTUB_PORT "1234"
+
+/* GDB breakpoint/watchpoint types */
+#define GDB_BREAKPOINT_SW        0
+#define GDB_BREAKPOINT_HW        1
+#define GDB_WATCHPOINT_WRITE     2
+#define GDB_WATCHPOINT_READ      3
+#define GDB_WATCHPOINT_ACCESS    4
+
+#ifdef NEED_CPU_H
+typedef void (*gdb_syscall_complete_cb)(CPUArchState *env,
+                                        target_ulong ret, target_ulong err);
+
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+int use_gdb_syscalls(void);
+void gdb_set_stop_cpu(CPUArchState *env);
+void gdb_exit(CPUArchState *, int);
+#ifdef CONFIG_USER_ONLY
+int gdb_queuesig (void);
+int gdb_handlesig (CPUArchState *, int);
+void gdb_signalled(CPUArchState *, int);
+void gdbserver_fork(CPUArchState *);
+#endif
+/* Get or set a register.  Returns the size of the register.  */
+typedef int (*gdb_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
+void gdb_register_coprocessor(CPUArchState *env,
+                              gdb_reg_cb get_reg, gdb_reg_cb set_reg,
+                              int num_regs, const char *xml, int g_pos);
+
+static inline int cpu_index(CPUArchState *env)
+{
+#if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
+    return env->host_tid;
+#else
+    return env->cpu_index + 1;
+#endif
+}
+
+#endif
+
+#ifdef CONFIG_USER_ONLY
+int gdbserver_start(int);
+#else
+int gdbserver_start(const char *port);
+#endif
+
+/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
+extern const char *const xml_builtin[][2];
+
+#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
new file mode 100644
index 0000000000..8043b3ba26
--- /dev/null
+++ b/include/exec/gen-icount.h
@@ -0,0 +1,53 @@
+#ifndef GEN_ICOUNT_H
+#define GEN_ICOUNT_H 1
+
+#include "qemu/timer.h"
+
+/* Helpers for instruction counting code generation.  */
+
+static TCGArg *icount_arg;
+static int icount_label;
+
+static inline void gen_icount_start(void)
+{
+    TCGv_i32 count;
+
+    if (!use_icount)
+        return;
+
+    icount_label = gen_new_label();
+    count = tcg_temp_local_new_i32();
+    tcg_gen_ld_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u32));
+    /* This is a horrid hack to allow fixing up the value later.  */
+    icount_arg = tcg_ctx.gen_opparam_ptr + 1;
+    tcg_gen_subi_i32(count, count, 0xdeadbeef);
+
+    tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
+    tcg_gen_st16_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u16.low));
+    tcg_temp_free_i32(count);
+}
+
+static void gen_icount_end(TranslationBlock *tb, int num_insns)
+{
+    if (use_icount) {
+        *icount_arg = num_insns;
+        gen_set_label(icount_label);
+        tcg_gen_exit_tb((tcg_target_long)tb + 2);
+    }
+}
+
+static inline void gen_io_start(void)
+{
+    TCGv_i32 tmp = tcg_const_i32(1);
+    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
+    tcg_temp_free_i32(tmp);
+}
+
+static inline void gen_io_end(void)
+{
+    TCGv_i32 tmp = tcg_const_i32(0);
+    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
+    tcg_temp_free_i32(tmp);
+}
+
+#endif
diff --git a/include/exec/hwaddr.h b/include/exec/hwaddr.h
new file mode 100644
index 0000000000..251cf9216f
--- /dev/null
+++ b/include/exec/hwaddr.h
@@ -0,0 +1,24 @@
+/* Define hwaddr if it exists.  */
+
+#ifndef HWADDR_H
+#define HWADDR_H
+
+#ifndef CONFIG_USER_ONLY
+
+#define HWADDR_BITS 64
+/* hwaddr is the type of a physical address (its size can
+   be different from 'target_ulong').  */
+
+typedef uint64_t hwaddr;
+#define HWADDR_MAX UINT64_MAX
+#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_PRId PRId64
+#define HWADDR_PRIi PRIi64
+#define HWADDR_PRIo PRIo64
+#define HWADDR_PRIu PRIu64
+#define HWADDR_PRIx PRIx64
+#define HWADDR_PRIX PRIX64
+
+#endif
+
+#endif
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
new file mode 100644
index 0000000000..fc28350a3c
--- /dev/null
+++ b/include/exec/ioport.h
@@ -0,0 +1,78 @@
+/*
+ * defines ioport related functions
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************************
+ * IO ports API
+ */
+
+#ifndef IOPORT_H
+#define IOPORT_H
+
+#include "qemu-common.h"
+#include "exec/iorange.h"
+
+typedef uint32_t pio_addr_t;
+#define FMT_pioaddr     PRIx32
+
+#define MAX_IOPORTS     (64 * 1024)
+#define IOPORTS_MASK    (MAX_IOPORTS - 1)
+
+/* These should really be in isa.h, but are here to make pc.h happy.  */
+typedef void (IOPortWriteFunc)(void *opaque, uint32_t address, uint32_t data);
+typedef uint32_t (IOPortReadFunc)(void *opaque, uint32_t address);
+typedef void (IOPortDestructor)(void *opaque);
+
+void ioport_register(IORange *iorange);
+int register_ioport_read(pio_addr_t start, int length, int size,
+                         IOPortReadFunc *func, void *opaque);
+int register_ioport_write(pio_addr_t start, int length, int size,
+                          IOPortWriteFunc *func, void *opaque);
+void isa_unassign_ioport(pio_addr_t start, int length);
+bool isa_is_ioport_assigned(pio_addr_t start);
+
+void cpu_outb(pio_addr_t addr, uint8_t val);
+void cpu_outw(pio_addr_t addr, uint16_t val);
+void cpu_outl(pio_addr_t addr, uint32_t val);
+uint8_t cpu_inb(pio_addr_t addr);
+uint16_t cpu_inw(pio_addr_t addr);
+uint32_t cpu_inl(pio_addr_t addr);
+
+struct MemoryRegion;
+struct MemoryRegionPortio;
+
+typedef struct PortioList {
+    const struct MemoryRegionPortio *ports;
+    struct MemoryRegion *address_space;
+    unsigned nr;
+    struct MemoryRegion **regions;
+    struct MemoryRegion **aliases;
+    void *opaque;
+    const char *name;
+} PortioList;
+
+void portio_list_init(PortioList *piolist,
+                      const struct MemoryRegionPortio *callbacks,
+                      void *opaque, const char *name);
+void portio_list_destroy(PortioList *piolist);
+void portio_list_add(PortioList *piolist,
+                     struct MemoryRegion *address_space,
+                     uint32_t addr);
+void portio_list_del(PortioList *piolist);
+
+#endif /* IOPORT_H */
diff --git a/include/exec/iorange.h b/include/exec/iorange.h
new file mode 100644
index 0000000000..cd980a8312
--- /dev/null
+++ b/include/exec/iorange.h
@@ -0,0 +1,31 @@
+#ifndef IORANGE_H
+#define IORANGE_H
+
+#include <stdint.h>
+
+typedef struct IORange IORange;
+typedef struct IORangeOps IORangeOps;
+
+struct IORangeOps {
+    void (*read)(IORange *iorange, uint64_t offset, unsigned width,
+                 uint64_t *data);
+    void (*write)(IORange *iorange, uint64_t offset, unsigned width,
+                  uint64_t data);
+    void (*destructor)(IORange *iorange);
+};
+
+struct IORange {
+    const IORangeOps *ops;
+    uint64_t base;
+    uint64_t len;
+};
+
+static inline void iorange_init(IORange *iorange, const IORangeOps *ops,
+                                uint64_t base, uint64_t len)
+{
+    iorange->ops = ops;
+    iorange->base = base;
+    iorange->len = len;
+}
+
+#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
new file mode 100644
index 0000000000..1da240039d
--- /dev/null
+++ b/include/exec/memory-internal.h
@@ -0,0 +1,141 @@
+/*
+ * Declarations for obsolete exec.c functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *  Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later.  See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY.  Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef MEMORY_INTERNAL_H
+#define MEMORY_INTERNAL_H
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/xen.h"
+
+typedef struct PhysPageEntry PhysPageEntry;
+
+struct PhysPageEntry {
+    uint16_t is_leaf : 1;
+     /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
+    uint16_t ptr : 15;
+};
+
+typedef struct AddressSpaceDispatch AddressSpaceDispatch;
+
+struct AddressSpaceDispatch {
+    /* This is a multi-level map on the physical address space.
+     * The bottom level has pointers to MemoryRegionSections.
+     */
+    PhysPageEntry phys_map;
+    MemoryListener listener;
+};
+
+void address_space_init_dispatch(AddressSpace *as);
+void address_space_destroy_dispatch(AddressSpace *as);
+
+ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+                                   MemoryRegion *mr);
+ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
+void qemu_ram_free(ram_addr_t addr);
+void qemu_ram_free_from_ptr(ram_addr_t addr);
+
+struct MemoryRegion;
+struct MemoryRegionSection;
+
+void qemu_register_coalesced_mmio(hwaddr addr, ram_addr_t size);
+void qemu_unregister_coalesced_mmio(hwaddr addr, ram_addr_t size);
+
+#define VGA_DIRTY_FLAG       0x01
+#define CODE_DIRTY_FLAG      0x02
+#define MIGRATION_DIRTY_FLAG 0x08
+
+static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
+{
+    return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
+}
+
+/* read dirty bit (return 0 or 1) */
+static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
+{
+    return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
+}
+
+static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
+                                                ram_addr_t length,
+                                                int dirty_flags)
+{
+    int ret = 0;
+    ram_addr_t addr, end;
+
+    end = TARGET_PAGE_ALIGN(start + length);
+    start &= TARGET_PAGE_MASK;
+    for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+        ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
+    }
+    return ret;
+}
+
+static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
+                                                      int dirty_flags)
+{
+    return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
+}
+
+static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
+{
+    cpu_physical_memory_set_dirty_flags(addr, 0xff);
+}
+
+static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
+                                                        int dirty_flags)
+{
+    int mask = ~dirty_flags;
+
+    return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
+}
+
+static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
+                                                       ram_addr_t length,
+                                                       int dirty_flags)
+{
+    ram_addr_t addr, end;
+
+    end = TARGET_PAGE_ALIGN(start + length);
+    start &= TARGET_PAGE_MASK;
+    for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+        cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
+    }
+    xen_modified_memory(addr, length);
+}
+
+static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
+                                                        ram_addr_t length,
+                                                        int dirty_flags)
+{
+    ram_addr_t addr, end;
+
+    end = TARGET_PAGE_ALIGN(start + length);
+    start &= TARGET_PAGE_MASK;
+    for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+        cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
+    }
+}
+
+void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
+                                     int dirty_flags);
+
+extern const IORangeOps memory_region_iorange_ops;
+
+#endif
+
+#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
new file mode 100644
index 0000000000..2322732dce
--- /dev/null
+++ b/include/exec/memory.h
@@ -0,0 +1,898 @@
+/*
+ * Physical memory management API
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *  Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_H
+#define MEMORY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "qemu-common.h"
+#include "exec/cpu-common.h"
+#include "exec/hwaddr.h"
+#include "qemu/queue.h"
+#include "exec/iorange.h"
+#include "exec/ioport.h"
+#include "qemu/int128.h"
+
+typedef struct MemoryRegionOps MemoryRegionOps;
+typedef struct MemoryRegion MemoryRegion;
+typedef struct MemoryRegionPortio MemoryRegionPortio;
+typedef struct MemoryRegionMmio MemoryRegionMmio;
+
+/* Must match *_DIRTY_FLAGS in cpu-all.h.  To be replaced with dynamic
+ * registration.
+ */
+#define DIRTY_MEMORY_VGA       0
+#define DIRTY_MEMORY_CODE      1
+#define DIRTY_MEMORY_MIGRATION 3
+
+struct MemoryRegionMmio {
+    CPUReadMemoryFunc *read[3];
+    CPUWriteMemoryFunc *write[3];
+};
+
+/* Internal use; thunks between old-style IORange and MemoryRegions. */
+typedef struct MemoryRegionIORange MemoryRegionIORange;
+struct MemoryRegionIORange {
+    IORange iorange;
+    MemoryRegion *mr;
+    hwaddr offset;
+};
+
+/*
+ * Memory region callbacks
+ */
+struct MemoryRegionOps {
+    /* Read from the memory region. @addr is relative to @mr; @size is
+     * in bytes. */
+    uint64_t (*read)(void *opaque,
+                     hwaddr addr,
+                     unsigned size);
+    /* Write to the memory region. @addr is relative to @mr; @size is
+     * in bytes. */
+    void (*write)(void *opaque,
+                  hwaddr addr,
+                  uint64_t data,
+                  unsigned size);
+
+    enum device_endian endianness;
+    /* Guest-visible constraints: */
+    struct {
+        /* If nonzero, specify bounds on access sizes beyond which a machine
+         * check is thrown.
+         */
+        unsigned min_access_size;
+        unsigned max_access_size;
+        /* If true, unaligned accesses are supported.  Otherwise unaligned
+         * accesses throw machine checks.
+         */
+         bool unaligned;
+        /*
+         * If present, and returns #false, the transaction is not accepted
+         * by the device (and results in machine dependent behaviour such
+         * as a machine check exception).
+         */
+        bool (*accepts)(void *opaque, hwaddr addr,
+                        unsigned size, bool is_write);
+    } valid;
+    /* Internal implementation constraints: */
+    struct {
+        /* If nonzero, specifies the minimum size implemented.  Smaller sizes
+         * will be rounded upwards and a partial result will be returned.
+         */
+        unsigned min_access_size;
+        /* If nonzero, specifies the maximum size implemented.  Larger sizes
+         * will be done as a series of accesses with smaller sizes.
+         */
+        unsigned max_access_size;
+        /* If true, unaligned accesses are supported.  Otherwise all accesses
+         * are converted to (possibly multiple) naturally aligned accesses.
+         */
+         bool unaligned;
+    } impl;
+
+    /* If .read and .write are not present, old_portio may be used for
+     * backwards compatibility with old portio registration
+     */
+    const MemoryRegionPortio *old_portio;
+    /* If .read and .write are not present, old_mmio may be used for
+     * backwards compatibility with old mmio registration
+     */
+    const MemoryRegionMmio old_mmio;
+};
+
+typedef struct CoalescedMemoryRange CoalescedMemoryRange;
+typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
+
+struct MemoryRegion {
+    /* All fields are private - violators will be prosecuted */
+    const MemoryRegionOps *ops;
+    void *opaque;
+    MemoryRegion *parent;
+    Int128 size;
+    hwaddr addr;
+    void (*destructor)(MemoryRegion *mr);
+    ram_addr_t ram_addr;
+    bool subpage;
+    bool terminates;
+    bool readable;
+    bool ram;
+    bool readonly; /* For RAM regions */
+    bool enabled;
+    bool rom_device;
+    bool warning_printed; /* For reservations */
+    bool flush_coalesced_mmio;
+    MemoryRegion *alias;
+    hwaddr alias_offset;
+    unsigned priority;
+    bool may_overlap;
+    QTAILQ_HEAD(subregions, MemoryRegion) subregions;
+    QTAILQ_ENTRY(MemoryRegion) subregions_link;
+    QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
+    const char *name;
+    uint8_t dirty_log_mask;
+    unsigned ioeventfd_nb;
+    MemoryRegionIoeventfd *ioeventfds;
+};
+
+struct MemoryRegionPortio {
+    uint32_t offset;
+    uint32_t len;
+    unsigned size;
+    IOPortReadFunc *read;
+    IOPortWriteFunc *write;
+};
+
+#define PORTIO_END_OF_LIST() { }
+
+typedef struct AddressSpace AddressSpace;
+
+/**
+ * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ */
+struct AddressSpace {
+    /* All fields are private. */
+    const char *name;
+    MemoryRegion *root;
+    struct FlatView *current_map;
+    int ioeventfd_nb;
+    struct MemoryRegionIoeventfd *ioeventfds;
+    struct AddressSpaceDispatch *dispatch;
+    QTAILQ_ENTRY(AddressSpace) address_spaces_link;
+};
+
+typedef struct MemoryRegionSection MemoryRegionSection;
+
+/**
+ * MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @address_space: the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ *     relative to the region's address space
+ * @readonly: writes to this section are ignored
+ */
+struct MemoryRegionSection {
+    MemoryRegion *mr;
+    AddressSpace *address_space;
+    hwaddr offset_within_region;
+    uint64_t size;
+    hwaddr offset_within_address_space;
+    bool readonly;
+};
+
+typedef struct MemoryListener MemoryListener;
+
+/**
+ * MemoryListener: callbacks structure for updates to the physical memory map
+ *
+ * Allows a component to adjust to changes in the guest-visible memory map.
+ * Use with memory_listener_register() and memory_listener_unregister().
+ */
+struct MemoryListener {
+    void (*begin)(MemoryListener *listener);
+    void (*commit)(MemoryListener *listener);
+    void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*log_global_start)(MemoryListener *listener);
+    void (*log_global_stop)(MemoryListener *listener);
+    void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
+                        bool match_data, uint64_t data, EventNotifier *e);
+    void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
+                        bool match_data, uint64_t data, EventNotifier *e);
+    void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+                               hwaddr addr, hwaddr len);
+    void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+                               hwaddr addr, hwaddr len);
+    /* Lower = earlier (during add), later (during del) */
+    unsigned priority;
+    AddressSpace *address_space_filter;
+    QTAILQ_ENTRY(MemoryListener) link;
+};
+
+/**
+ * memory_region_init: Initialize a memory region
+ *
+ * The region typically acts as a container for other memory regions.  Use
+ * memory_region_add_subregion() to add subregions.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region; any subregions beyond this size will be clipped
+ */
+void memory_region_init(MemoryRegion *mr,
+                        const char *name,
+                        uint64_t size);
+/**
+ * memory_region_init_io: Initialize an I/O memory region.
+ *
+ * Accesses into the region will cause the callbacks in @ops to be called.
+ * if @size is nonzero, subregions will be clipped to @size.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @ops: a structure containing read and write callbacks to be used when
+ *       I/O is performed on the region.
+ * @opaque: passed to to the read and write callbacks of the @ops structure.
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_io(MemoryRegion *mr,
+                           const MemoryRegionOps *ops,
+                           void *opaque,
+                           const char *name,
+                           uint64_t size);
+
+/**
+ * memory_region_init_ram:  Initialize RAM memory region.  Accesses into the
+ *                          region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: the name of the region.
+ * @size: size of the region.
+ */
+void memory_region_init_ram(MemoryRegion *mr,
+                            const char *name,
+                            uint64_t size);
+
+/**
+ * memory_region_init_ram_ptr:  Initialize RAM memory region from a
+ *                              user-provided pointer.  Accesses into the
+ *                              region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ */
+void memory_region_init_ram_ptr(MemoryRegion *mr,
+                                const char *name,
+                                uint64_t size,
+                                void *ptr);
+
+/**
+ * memory_region_init_alias: Initialize a memory region that aliases all or a
+ *                           part of another memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: used for debugging; not visible to the user or ABI
+ * @orig: the region to be referenced; @mr will be equivalent to
+ *        @orig between @offset and @offset + @size - 1.
+ * @offset: start of the section in @orig to be referenced.
+ * @size: size of the region.
+ */
+void memory_region_init_alias(MemoryRegion *mr,
+                              const char *name,
+                              MemoryRegion *orig,
+                              hwaddr offset,
+                              uint64_t size);
+
+/**
+ * memory_region_init_rom_device:  Initialize a ROM memory region.  Writes are
+ *                                 handled via callbacks.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @ops: callbacks for write access handling.
+ * @name: the name of the region.
+ * @size: size of the region.
+ */
+void memory_region_init_rom_device(MemoryRegion *mr,
+                                   const MemoryRegionOps *ops,
+                                   void *opaque,
+                                   const char *name,
+                                   uint64_t size);
+
+/**
+ * memory_region_init_reservation: Initialize a memory region that reserves
+ *                                 I/O space.
+ *
+ * A reservation region primariy serves debugging purposes.  It claims I/O
+ * space that is not supposed to be handled by QEMU itself.  Any access via
+ * the memory API will cause an abort().
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_reservation(MemoryRegion *mr,
+                                    const char *name,
+                                    uint64_t size);
+/**
+ * memory_region_destroy: Destroy a memory region and reclaim all resources.
+ *
+ * @mr: the region to be destroyed.  May not currently be a subregion
+ *      (see memory_region_add_subregion()) or referenced in an alias
+ *      (see memory_region_init_alias()).
+ */
+void memory_region_destroy(MemoryRegion *mr);
+
+/**
+ * memory_region_size: get a memory region's size.
+ *
+ * @mr: the memory region being queried.
+ */
+uint64_t memory_region_size(MemoryRegion *mr);
+
+/**
+ * memory_region_is_ram: check whether a memory region is random access
+ *
+ * Returns %true is a memory region is random access.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_ram(MemoryRegion *mr);
+
+/**
+ * memory_region_is_romd: check whether a memory region is ROMD
+ *
+ * Returns %true is a memory region is ROMD and currently set to allow
+ * direct reads.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_romd(MemoryRegion *mr)
+{
+    return mr->rom_device && mr->readable;
+}
+
+/**
+ * memory_region_name: get a memory region's name
+ *
+ * Returns the string that was used to initialize the memory region.
+ *
+ * @mr: the memory region being queried
+ */
+const char *memory_region_name(MemoryRegion *mr);
+
+/**
+ * memory_region_is_logging: return whether a memory region is logging writes
+ *
+ * Returns %true if the memory region is logging writes
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_logging(MemoryRegion *mr);
+
+/**
+ * memory_region_is_rom: check whether a memory region is ROM
+ *
+ * Returns %true is a memory region is read-only memory.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_rom(MemoryRegion *mr);
+
+/**
+ * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
+ *
+ * Returns a host pointer to a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()).  Use with
+ * care.
+ *
+ * @mr: the memory region being queried.
+ */
+void *memory_region_get_ram_ptr(MemoryRegion *mr);
+
+/**
+ * memory_region_set_log: Turn dirty logging on or off for a region.
+ *
+ * Turns dirty logging on or off for a specified client (display, migration).
+ * Only meaningful for RAM regions.
+ *
+ * @mr: the memory region being updated.
+ * @log: whether dirty logging is to be enabled or disabled.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ *          %DIRTY_MEMORY_VGA.
+ */
+void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
+
+/**
+ * memory_region_get_dirty: Check whether a range of bytes is dirty
+ *                          for a specified client.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client.  Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ *          %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
+                             hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
+ *
+ * Marks a range of bytes as dirty, after it has been dirtied outside
+ * guest code.
+ *
+ * @mr: the memory region being dirtied.
+ * @addr: the address (relative to the start of the region) being dirtied.
+ * @size: size of the range being dirtied.
+ */
+void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
+                             hwaddr size);
+
+/**
+ * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
+ *                                     for a specified client. It clears them.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client.  Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ *          %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
+                                        hwaddr size, unsigned client);
+/**
+ * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
+ *                                  any external TLBs (e.g. kvm)
+ *
+ * Flushes dirty information from accelerators such as kvm and vhost-net
+ * and makes it available to users of the memory API.
+ *
+ * @mr: the region being flushed.
+ */
+void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
+
+/**
+ * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
+ *                            client.
+ *
+ * Marks a range of pages as no longer dirty.
+ *
+ * @mr: the region being updated.
+ * @addr: the start of the subrange being cleaned.
+ * @size: the size of the subrange being cleaned.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ *          %DIRTY_MEMORY_VGA.
+ */
+void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
+                               hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_readonly: Turn a memory region read-only (or read-write)
+ *
+ * Allows a memory region to be marked as read-only (turning it into a ROM).
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @readonly: whether rhe region is to be ROM or RAM.
+ */
+void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
+
+/**
+ * memory_region_rom_device_set_readable: enable/disable ROM readability
+ *
+ * Allows a ROM device (initialized with memory_region_init_rom_device() to
+ * to be marked as readable (default) or not readable.  When it is readable,
+ * the device is mapped to guest memory.  When not readable, reads are
+ * forwarded to the #MemoryRegion.read function.
+ *
+ * @mr: the memory region to be updated
+ * @readable: whether reads are satisified directly (%true) or via callbacks
+ *            (%false)
+ */
+void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable);
+
+/**
+ * memory_region_set_coalescing: Enable memory coalescing for the region.
+ *
+ * Enabled writes to a region to be queued for later processing. MMIO ->write
+ * callbacks may be delayed until a non-coalesced MMIO is issued.
+ * Only useful for IO regions.  Roughly similar to write-combining hardware.
+ *
+ * @mr: the memory region to be write coalesced
+ */
+void memory_region_set_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
+ *                               a region.
+ *
+ * Like memory_region_set_coalescing(), but works on a sub-range of a region.
+ * Multiple calls can be issued coalesced disjoint ranges.
+ *
+ * @mr: the memory region to be updated.
+ * @offset: the start of the range within the region to be coalesced.
+ * @size: the size of the subrange to be coalesced.
+ */
+void memory_region_add_coalescing(MemoryRegion *mr,
+                                  hwaddr offset,
+                                  uint64_t size);
+
+/**
+ * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
+ *
+ * Disables any coalescing caused by memory_region_set_coalescing() or
+ * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
+ * hardware.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
+ *                                    accesses.
+ *
+ * Ensure that pending coalesced MMIO request are flushed before the memory
+ * region is accessed. This property is automatically enabled for all regions
+ * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
+ *                                      accesses.
+ *
+ * Clear the automatic coalesced MMIO flushing enabled via
+ * memory_region_set_flush_coalesced. Note that this service has no effect on
+ * memory regions that have MMIO coalescing enabled for themselves. For them,
+ * automatic flushing will stop once coalescing is disabled.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_add_eventfd: Request an eventfd to be triggered when a word
+ *                            is written to a location.
+ *
+ * Marks a word in an IO region (initialized with memory_region_init_io())
+ * as a trigger for an eventfd event.  The I/O callback will not be called.
+ * The caller must be prepared to handle failure (that is, take the required
+ * action if the callback _is_ called).
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ **/
+void memory_region_add_eventfd(MemoryRegion *mr,
+                               hwaddr addr,
+                               unsigned size,
+                               bool match_data,
+                               uint64_t data,
+                               EventNotifier *e);
+
+/**
+ * memory_region_del_eventfd: Cancel an eventfd.
+ *
+ * Cancels an eventfd trigger requested by a previous
+ * memory_region_add_eventfd() call.
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ */
+void memory_region_del_eventfd(MemoryRegion *mr,
+                               hwaddr addr,
+                               unsigned size,
+                               bool match_data,
+                               uint64_t data,
+                               EventNotifier *e);
+
+/**
+ * memory_region_add_subregion: Add a subregion to a container.
+ *
+ * Adds a subregion at @offset.  The subregion may not overlap with other
+ * subregions (except for those explicitly marked as overlapping).  A region
+ * may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ *      initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ */
+void memory_region_add_subregion(MemoryRegion *mr,
+                                 hwaddr offset,
+                                 MemoryRegion *subregion);
+/**
+ * memory_region_add_subregion_overlap: Add a subregion to a container
+ *                                      with overlap.
+ *
+ * Adds a subregion at @offset.  The subregion may overlap with other
+ * subregions.  Conflicts are resolved by having a higher @priority hide a
+ * lower @priority. Subregions without priority are taken as @priority 0.
+ * A region may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ *      initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ * @priority: used for resolving overlaps; highest priority wins.
+ */
+void memory_region_add_subregion_overlap(MemoryRegion *mr,
+                                         hwaddr offset,
+                                         MemoryRegion *subregion,
+                                         unsigned priority);
+
+/**
+ * memory_region_get_ram_addr: Get the ram address associated with a memory
+ *                             region
+ *
+ * DO NOT USE THIS FUNCTION.  This is a temporary workaround while the Xen
+ * code is being reworked.
+ */
+ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
+
+/**
+ * memory_region_del_subregion: Remove a subregion.
+ *
+ * Removes a subregion from its container.
+ *
+ * @mr: the container to be updated.
+ * @subregion: the region being removed; must be a current subregion of @mr.
+ */
+void memory_region_del_subregion(MemoryRegion *mr,
+                                 MemoryRegion *subregion);
+
+/*
+ * memory_region_set_enabled: dynamically enable or disable a region
+ *
+ * Enables or disables a memory region.  A disabled memory region
+ * ignores all accesses to itself and its subregions.  It does not
+ * obscure sibling subregions with lower priority - it simply behaves as
+ * if it was removed from the hierarchy.
+ *
+ * Regions default to being enabled.
+ *
+ * @mr: the region to be updated
+ * @enabled: whether to enable or disable the region
+ */
+void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
+
+/*
+ * memory_region_set_address: dynamically update the address of a region
+ *
+ * Dynamically updates the address of a region, relative to its parent.
+ * May be used on regions are currently part of a memory hierarchy.
+ *
+ * @mr: the region to be updated
+ * @addr: new address, relative to parent region
+ */
+void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
+
+/*
+ * memory_region_set_alias_offset: dynamically update a memory alias's offset
+ *
+ * Dynamically updates the offset into the target region that an alias points
+ * to, as if the fourth argument to memory_region_init_alias() has changed.
+ *
+ * @mr: the #MemoryRegion to be updated; should be an alias.
+ * @offset: the new offset into the target memory region
+ */
+void memory_region_set_alias_offset(MemoryRegion *mr,
+                                    hwaddr offset);
+
+/**
+ * memory_region_find: locate a MemoryRegion in an address space
+ *
+ * Locates the first #MemoryRegion within an address space given by
+ * @address_space that overlaps the range given by @addr and @size.
+ *
+ * Returns a #MemoryRegionSection that describes a contiguous overlap.
+ * It will have the following characteristics:
+ *    .@offset_within_address_space >= @addr
+ *    .@offset_within_address_space + .@size <= @addr + @size
+ *    .@size = 0 iff no overlap was found
+ *    .@mr is non-%NULL iff an overlap was found
+ *
+ * @address_space: a top-level (i.e. parentless) region that contains
+ *       the region to be found
+ * @addr: start of the area within @address_space to be searched
+ * @size: size of the area to be searched
+ */
+MemoryRegionSection memory_region_find(MemoryRegion *address_space,
+                                       hwaddr addr, uint64_t size);
+
+/**
+ * memory_region_section_addr: get offset within MemoryRegionSection
+ *
+ * Returns offset within MemoryRegionSection
+ *
+ * @section: the memory region section being queried
+ * @addr: address in address space
+ */
+static inline hwaddr
+memory_region_section_addr(MemoryRegionSection *section,
+                           hwaddr addr)
+{
+    addr -= section->offset_within_address_space;
+    addr += section->offset_within_region;
+    return addr;
+}
+
+/**
+ * memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
+ *
+ * Synchronizes the dirty page log for an entire address space.
+ * @address_space: a top-level (i.e. parentless) region that contains the
+ *       memory being synchronized
+ */
+void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
+
+/**
+ * memory_region_transaction_begin: Start a transaction.
+ *
+ * During a transaction, changes will be accumulated and made visible
+ * only when the transaction ends (is committed).
+ */
+void memory_region_transaction_begin(void);
+
+/**
+ * memory_region_transaction_commit: Commit a transaction and make changes
+ *                                   visible to the guest.
+ */
+void memory_region_transaction_commit(void);
+
+/**
+ * memory_listener_register: register callbacks to be called when memory
+ *                           sections are mapped or unmapped into an address
+ *                           space
+ *
+ * @listener: an object containing the callbacks to be called
+ * @filter: if non-%NULL, only regions in this address space will be observed
+ */
+void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
+
+/**
+ * memory_listener_unregister: undo the effect of memory_listener_register()
+ *
+ * @listener: an object containing the callbacks to be removed
+ */
+void memory_listener_unregister(MemoryListener *listener);
+
+/**
+ * memory_global_dirty_log_start: begin dirty logging for all regions
+ */
+void memory_global_dirty_log_start(void);
+
+/**
+ * memory_global_dirty_log_stop: end dirty logging for all regions
+ */
+void memory_global_dirty_log_stop(void);
+
+void mtree_info(fprintf_function mon_printf, void *f);
+
+/**
+ * address_space_init: initializes an address space
+ *
+ * @as: an uninitialized #AddressSpace
+ * @root: a #MemoryRegion that routes addesses for the address space
+ */
+void address_space_init(AddressSpace *as, MemoryRegion *root);
+
+
+/**
+ * address_space_destroy: destroy an address space
+ *
+ * Releases all resources associated with an address space.  After an address space
+ * is destroyed, its root memory region (given by address_space_init()) may be destroyed
+ * as well.
+ *
+ * @as: address space to be destroyed
+ */
+void address_space_destroy(AddressSpace *as);
+
+/**
+ * address_space_rw: read from or write to an address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
+                      int len, bool is_write);
+
+/**
+ * address_space_write: write to address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ */
+void address_space_write(AddressSpace *as, hwaddr addr,
+                         const uint8_t *buf, int len);
+
+/**
+ * address_space_read: read from an address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ */
+void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
+
+/* address_space_map: map a physical memory region into a host virtual address
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @plen: pointer to length of buffer; updated on return
+ * @is_write: indicates the transfer direction
+ */
+void *address_space_map(AddressSpace *as, hwaddr addr,
+                        hwaddr *plen, bool is_write);
+
+/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
+ *
+ * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ *
+ * @as: #AddressSpace used
+ * @addr: address within that address space
+ * @len: buffer length as returned by address_space_map()
+ * @access_len: amount of data actually transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
+                         int is_write, hwaddr access_len);
+
+
+#endif
+
+#endif
diff --git a/include/exec/poison.h b/include/exec/poison.h
new file mode 100644
index 0000000000..7d7b23b1fc
--- /dev/null
+++ b/include/exec/poison.h
@@ -0,0 +1,64 @@
+/* Poison identifiers that should not be used when building
+   target independent device code.  */
+
+#ifndef HW_POISON_H
+#define HW_POISON_H
+#ifdef __GNUC__
+
+#pragma GCC poison TARGET_I386
+#pragma GCC poison TARGET_X86_64
+#pragma GCC poison TARGET_ALPHA
+#pragma GCC poison TARGET_ARM
+#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_LM32
+#pragma GCC poison TARGET_M68K
+#pragma GCC poison TARGET_MIPS
+#pragma GCC poison TARGET_MIPS64
+#pragma GCC poison TARGET_OPENRISC
+#pragma GCC poison TARGET_PPC
+#pragma GCC poison TARGET_PPCEMB
+#pragma GCC poison TARGET_PPC64
+#pragma GCC poison TARGET_ABI32
+#pragma GCC poison TARGET_SH4
+#pragma GCC poison TARGET_SPARC
+#pragma GCC poison TARGET_SPARC64
+
+#pragma GCC poison TARGET_WORDS_BIGENDIAN
+#pragma GCC poison BSWAP_NEEDED
+
+#pragma GCC poison TARGET_LONG_BITS
+#pragma GCC poison TARGET_FMT_lx
+#pragma GCC poison TARGET_FMT_ld
+
+#pragma GCC poison TARGET_PAGE_SIZE
+#pragma GCC poison TARGET_PAGE_MASK
+#pragma GCC poison TARGET_PAGE_BITS
+#pragma GCC poison TARGET_PAGE_ALIGN
+
+#pragma GCC poison CPUArchState
+#pragma GCC poison env
+
+#pragma GCC poison lduw_phys
+#pragma GCC poison ldl_phys
+#pragma GCC poison ldq_phys
+#pragma GCC poison stl_phys_notdirty
+#pragma GCC poison stq_phys_notdirty
+#pragma GCC poison stw_phys
+#pragma GCC poison stl_phys
+#pragma GCC poison stq_phys
+
+#pragma GCC poison CPU_INTERRUPT_HARD
+#pragma GCC poison CPU_INTERRUPT_EXITTB
+#pragma GCC poison CPU_INTERRUPT_HALT
+#pragma GCC poison CPU_INTERRUPT_DEBUG
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
+
+#endif
+#endif
diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h
new file mode 100644
index 0000000000..93798b9614
--- /dev/null
+++ b/include/exec/softmmu-semi.h
@@ -0,0 +1,77 @@
+/*
+ * Helper routines to provide target memory access for semihosting
+ * syscalls in system emulation mode.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ *
+ * This code is licensed under the GPL
+ */
+#ifndef SOFTMMU_SEMI_H
+#define SOFTMMU_SEMI_H 1
+
+static inline uint32_t softmmu_tget32(CPUArchState *env, uint32_t addr)
+{
+    uint32_t val;
+
+    cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 0);
+    return tswap32(val);
+}
+static inline uint32_t softmmu_tget8(CPUArchState *env, uint32_t addr)
+{
+    uint8_t val;
+
+    cpu_memory_rw_debug(env, addr, &val, 1, 0);
+    return val;
+}
+
+#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
+#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
+#define get_user_ual(arg, p) get_user_u32(arg, p)
+
+static inline void softmmu_tput32(CPUArchState *env, uint32_t addr, uint32_t val)
+{
+    val = tswap32(val);
+    cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 1);
+}
+#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
+#define put_user_ual(arg, p) put_user_u32(arg, p)
+
+static void *softmmu_lock_user(CPUArchState *env, uint32_t addr, uint32_t len,
+                               int copy)
+{
+    uint8_t *p;
+    /* TODO: Make this something that isn't fixed size.  */
+    p = malloc(len);
+    if (p && copy)
+        cpu_memory_rw_debug(env, addr, p, len, 0);
+    return p;
+}
+#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
+static char *softmmu_lock_user_string(CPUArchState *env, uint32_t addr)
+{
+    char *p;
+    char *s;
+    uint8_t c;
+    /* TODO: Make this something that isn't fixed size.  */
+    s = p = malloc(1024);
+    if (!s) {
+        return NULL;
+    }
+    do {
+        cpu_memory_rw_debug(env, addr, &c, 1, 0);
+        addr++;
+        *(p++) = c;
+    } while (c);
+    return s;
+}
+#define lock_user_string(p) softmmu_lock_user_string(env, p)
+static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
+                                target_ulong len)
+{
+    if (len)
+        cpu_memory_rw_debug(env, addr, p, len, 1);
+    free(p);
+}
+#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
+
+#endif
diff --git a/include/exec/softmmu_defs.h b/include/exec/softmmu_defs.h
new file mode 100644
index 0000000000..1f25e33ce4
--- /dev/null
+++ b/include/exec/softmmu_defs.h
@@ -0,0 +1,37 @@
+/*
+ *  Software MMU support
+ *
+ * Declare helpers used by TCG for qemu_ld/st ops.
+ *
+ * Used by softmmu_exec.h, TCG targets and exec-all.h.
+ *
+ */
+#ifndef SOFTMMU_DEFS_H
+#define SOFTMMU_DEFS_H
+
+uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+                    int mmu_idx);
+uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+                    int mmu_idx);
+uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+                    int mmu_idx);
+uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+                    int mmu_idx);
+
+uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
+int mmu_idx);
+uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
+                     int mmu_idx);
+uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
+                     int mmu_idx);
+uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
+                     int mmu_idx);
+#endif
diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h
new file mode 100644
index 0000000000..3e4e886a30
--- /dev/null
+++ b/include/exec/softmmu_exec.h
@@ -0,0 +1,163 @@
+/*
+ *  Software MMU support
+ *
+ * Generate inline load/store functions for all MMU modes (typically
+ * at least _user and _kernel) as well as _data versions, for all data
+ * sizes.
+ *
+ * Used by target op helpers.
+ *
+ * MMU mode suffixes are defined in target cpu.h.
+ */
+
+/* XXX: find something cleaner.
+ * Furthermore, this is false for 64 bits targets
+ */
+#define ldul_user       ldl_user
+#define ldul_kernel     ldl_kernel
+#define ldul_hypv       ldl_hypv
+#define ldul_executive  ldl_executive
+#define ldul_supervisor ldl_supervisor
+
+#include "exec/softmmu_defs.h"
+
+#define ACCESS_TYPE 0
+#define MEMSUFFIX MMU_MODE0_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#define ACCESS_TYPE 1
+#define MEMSUFFIX MMU_MODE1_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#if (NB_MMU_MODES >= 3)
+
+#define ACCESS_TYPE 2
+#define MEMSUFFIX MMU_MODE2_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 3) */
+
+#if (NB_MMU_MODES >= 4)
+
+#define ACCESS_TYPE 3
+#define MEMSUFFIX MMU_MODE3_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 4) */
+
+#if (NB_MMU_MODES >= 5)
+
+#define ACCESS_TYPE 4
+#define MEMSUFFIX MMU_MODE4_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 5) */
+
+#if (NB_MMU_MODES >= 6)
+
+#define ACCESS_TYPE 5
+#define MEMSUFFIX MMU_MODE5_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 6) */
+
+#if (NB_MMU_MODES > 6)
+#error "NB_MMU_MODES > 6 is not supported for now"
+#endif /* (NB_MMU_MODES > 6) */
+
+/* these access are slower, they must be as rare as possible */
+#define ACCESS_TYPE (NB_MMU_MODES)
+#define MEMSUFFIX _data
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#define ldub(p) ldub_data(p)
+#define ldsb(p) ldsb_data(p)
+#define lduw(p) lduw_data(p)
+#define ldsw(p) ldsw_data(p)
+#define ldl(p) ldl_data(p)
+#define ldq(p) ldq_data(p)
+
+#define stb(p, v) stb_data(p, v)
+#define stw(p, v) stw_data(p, v)
+#define stl(p, v) stl_data(p, v)
+#define stq(p, v) stq_data(p, v)
diff --git a/include/exec/softmmu_header.h b/include/exec/softmmu_header.h
new file mode 100644
index 0000000000..d8d9c81b05
--- /dev/null
+++ b/include/exec/softmmu_header.h
@@ -0,0 +1,213 @@
+/*
+ *  Software MMU support
+ *
+ * Generate inline load/store functions for one MMU mode and data
+ * size.
+ *
+ * Generate a store function as well as signed and unsigned loads. For
+ * 32 and 64 bit cases, also generate floating point functions with
+ * the same size.
+ *
+ * Not used directly but included from softmmu_exec.h and exec-all.h.
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define USUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define USUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define USUFFIX uw
+#define DATA_TYPE uint16_t
+#define DATA_STYPE int16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define USUFFIX ub
+#define DATA_TYPE uint8_t
+#define DATA_STYPE int8_t
+#else
+#error unsupported data size
+#endif
+
+#if ACCESS_TYPE < (NB_MMU_MODES)
+
+#define CPU_MMU_INDEX ACCESS_TYPE
+#define MMUSUFFIX _mmu
+
+#elif ACCESS_TYPE == (NB_MMU_MODES)
+
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MMUSUFFIX _mmu
+
+#elif ACCESS_TYPE == (NB_MMU_MODES + 1)
+
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MMUSUFFIX _cmmu
+
+#else
+#error invalid ACCESS_TYPE
+#endif
+
+#if DATA_SIZE == 8
+#define RES_TYPE uint64_t
+#else
+#define RES_TYPE uint32_t
+#endif
+
+#if ACCESS_TYPE == (NB_MMU_MODES + 1)
+#define ADDR_READ addr_code
+#else
+#define ADDR_READ addr_read
+#endif
+
+/* generic load/store macros */
+
+static inline RES_TYPE
+glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+    int page_index;
+    RES_TYPE res;
+    target_ulong addr;
+    int mmu_idx;
+
+    addr = ptr;
+    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+    mmu_idx = CPU_MMU_INDEX;
+    if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
+                 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+        res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
+    } else {
+        uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+        res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
+    }
+    return res;
+}
+
+#if DATA_SIZE <= 2
+static inline int
+glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+    int res, page_index;
+    target_ulong addr;
+    int mmu_idx;
+
+    addr = ptr;
+    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+    mmu_idx = CPU_MMU_INDEX;
+    if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
+                 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+        res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
+                               MMUSUFFIX)(env, addr, mmu_idx);
+    } else {
+        uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+        res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
+    }
+    return res;
+}
+#endif
+
+#if ACCESS_TYPE != (NB_MMU_MODES + 1)
+
+/* generic store macro */
+
+static inline void
+glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
+                                      RES_TYPE v)
+{
+    int page_index;
+    target_ulong addr;
+    int mmu_idx;
+
+    addr = ptr;
+    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+    mmu_idx = CPU_MMU_INDEX;
+    if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
+                 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+        glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
+    } else {
+        uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+        glue(glue(st, SUFFIX), _raw)(hostaddr, v);
+    }
+}
+
+#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
+
+#if ACCESS_TYPE != (NB_MMU_MODES + 1)
+
+#if DATA_SIZE == 8
+static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
+                                                target_ulong ptr)
+{
+    union {
+        float64 d;
+        uint64_t i;
+    } u;
+    u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr);
+    return u.d;
+}
+
+static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env,
+                                             target_ulong ptr, float64 v)
+{
+    union {
+        float64 d;
+        uint64_t i;
+    } u;
+    u.d = v;
+    glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i);
+}
+#endif /* DATA_SIZE == 8 */
+
+#if DATA_SIZE == 4
+static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env,
+                                                target_ulong ptr)
+{
+    union {
+        float32 f;
+        uint32_t i;
+    } u;
+    u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr);
+    return u.f;
+}
+
+static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
+                                             target_ulong ptr, float32 v)
+{
+    union {
+        float32 f;
+        uint32_t i;
+    } u;
+    u.f = v;
+    glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i);
+}
+#endif /* DATA_SIZE == 4 */
+
+#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
+
+#undef RES_TYPE
+#undef DATA_TYPE
+#undef DATA_STYPE
+#undef SUFFIX
+#undef USUFFIX
+#undef DATA_SIZE
+#undef CPU_MMU_INDEX
+#undef MMUSUFFIX
+#undef ADDR_READ
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
new file mode 100644
index 0000000000..b219191abd
--- /dev/null
+++ b/include/exec/softmmu_template.h
@@ -0,0 +1,354 @@
+/*
+ *  Software MMU support
+ *
+ * Generate helpers used by TCG for qemu_ld/st ops and code load
+ * functions.
+ *
+ * Included from target op helpers and exec.c.
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/timer.h"
+#include "exec/memory.h"
+
+#define DATA_SIZE (1 << SHIFT)
+
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define USUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define USUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define USUFFIX uw
+#define DATA_TYPE uint16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define USUFFIX ub
+#define DATA_TYPE uint8_t
+#else
+#error unsupported data size
+#endif
+
+#ifdef SOFTMMU_CODE_ACCESS
+#define READ_ACCESS_TYPE 2
+#define ADDR_READ addr_code
+#else
+#define READ_ACCESS_TYPE 0
+#define ADDR_READ addr_read
+#endif
+
+static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                                        target_ulong addr,
+                                                        int mmu_idx,
+                                                        uintptr_t retaddr);
+static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
+                                              hwaddr physaddr,
+                                              target_ulong addr,
+                                              uintptr_t retaddr)
+{
+    DATA_TYPE res;
+    MemoryRegion *mr = iotlb_to_region(physaddr);
+
+    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+    env->mem_io_pc = retaddr;
+    if (mr != &io_mem_ram && mr != &io_mem_rom
+        && mr != &io_mem_unassigned
+        && mr != &io_mem_notdirty
+            && !can_do_io(env)) {
+        cpu_io_recompile(env, retaddr);
+    }
+
+    env->mem_io_vaddr = addr;
+#if SHIFT <= 2
+    res = io_mem_read(mr, physaddr, 1 << SHIFT);
+#else
+#ifdef TARGET_WORDS_BIGENDIAN
+    res = io_mem_read(mr, physaddr, 4) << 32;
+    res |= io_mem_read(mr, physaddr + 4, 4);
+#else
+    res = io_mem_read(mr, physaddr, 4);
+    res |= io_mem_read(mr, physaddr + 4, 4) << 32;
+#endif
+#endif /* SHIFT > 2 */
+    return res;
+}
+
+/* handle all cases except unaligned access which span two pages */
+DATA_TYPE
+glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
+                                         int mmu_idx)
+{
+    DATA_TYPE res;
+    int index;
+    target_ulong tlb_addr;
+    hwaddr ioaddr;
+    uintptr_t retaddr;
+
+    /* test if there is match for unaligned or IO access */
+    /* XXX: could done more in memory macro in a non portable way */
+    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+    tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+        if (tlb_addr & ~TARGET_PAGE_MASK) {
+            /* IO access */
+            if ((addr & (DATA_SIZE - 1)) != 0)
+                goto do_unaligned_access;
+            retaddr = GETPC_EXT();
+            ioaddr = env->iotlb[mmu_idx][index];
+            res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+            /* slow unaligned access (it spans two pages or IO) */
+        do_unaligned_access:
+            retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+            do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+#endif
+            res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
+                                                         mmu_idx, retaddr);
+        } else {
+            /* unaligned/aligned access in the same page */
+            uintptr_t addend;
+#ifdef ALIGNED_ONLY
+            if ((addr & (DATA_SIZE - 1)) != 0) {
+                retaddr = GETPC_EXT();
+                do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+            }
+#endif
+            addend = env->tlb_table[mmu_idx][index].addend;
+            res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
+                                                (addr + addend));
+        }
+    } else {
+        /* the page is not in the TLB : fill it */
+        retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+        if ((addr & (DATA_SIZE - 1)) != 0)
+            do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+#endif
+        tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+        goto redo;
+    }
+    return res;
+}
+
+/* handle all unaligned cases */
+static DATA_TYPE
+glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                       target_ulong addr,
+                                       int mmu_idx,
+                                       uintptr_t retaddr)
+{
+    DATA_TYPE res, res1, res2;
+    int index, shift;
+    hwaddr ioaddr;
+    target_ulong tlb_addr, addr1, addr2;
+
+    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+    tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+        if (tlb_addr & ~TARGET_PAGE_MASK) {
+            /* IO access */
+            if ((addr & (DATA_SIZE - 1)) != 0)
+                goto do_unaligned_access;
+            ioaddr = env->iotlb[mmu_idx][index];
+            res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+        do_unaligned_access:
+            /* slow unaligned access (it spans two pages) */
+            addr1 = addr & ~(DATA_SIZE - 1);
+            addr2 = addr1 + DATA_SIZE;
+            res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
+                                                          mmu_idx, retaddr);
+            res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
+                                                          mmu_idx, retaddr);
+            shift = (addr & (DATA_SIZE - 1)) * 8;
+#ifdef TARGET_WORDS_BIGENDIAN
+            res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
+#else
+            res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
+#endif
+            res = (DATA_TYPE)res;
+        } else {
+            /* unaligned/aligned access in the same page */
+            uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
+            res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
+                                                (addr + addend));
+        }
+    } else {
+        /* the page is not in the TLB : fill it */
+        tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+        goto redo;
+    }
+    return res;
+}
+
+#ifndef SOFTMMU_CODE_ACCESS
+
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                                   target_ulong addr,
+                                                   DATA_TYPE val,
+                                                   int mmu_idx,
+                                                   uintptr_t retaddr);
+
+static inline void glue(io_write, SUFFIX)(CPUArchState *env,
+                                          hwaddr physaddr,
+                                          DATA_TYPE val,
+                                          target_ulong addr,
+                                          uintptr_t retaddr)
+{
+    MemoryRegion *mr = iotlb_to_region(physaddr);
+
+    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+    if (mr != &io_mem_ram && mr != &io_mem_rom
+        && mr != &io_mem_unassigned
+        && mr != &io_mem_notdirty
+            && !can_do_io(env)) {
+        cpu_io_recompile(env, retaddr);
+    }
+
+    env->mem_io_vaddr = addr;
+    env->mem_io_pc = retaddr;
+#if SHIFT <= 2
+    io_mem_write(mr, physaddr, val, 1 << SHIFT);
+#else
+#ifdef TARGET_WORDS_BIGENDIAN
+    io_mem_write(mr, physaddr, (val >> 32), 4);
+    io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
+#else
+    io_mem_write(mr, physaddr, (uint32_t)val, 4);
+    io_mem_write(mr, physaddr + 4, val >> 32, 4);
+#endif
+#endif /* SHIFT > 2 */
+}
+
+void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                              target_ulong addr, DATA_TYPE val,
+                                              int mmu_idx)
+{
+    hwaddr ioaddr;
+    target_ulong tlb_addr;
+    uintptr_t retaddr;
+    int index;
+
+    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+        if (tlb_addr & ~TARGET_PAGE_MASK) {
+            /* IO access */
+            if ((addr & (DATA_SIZE - 1)) != 0)
+                goto do_unaligned_access;
+            retaddr = GETPC_EXT();
+            ioaddr = env->iotlb[mmu_idx][index];
+            glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+        do_unaligned_access:
+            retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+            do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+#endif
+            glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
+                                                   mmu_idx, retaddr);
+        } else {
+            /* aligned/unaligned access in the same page */
+            uintptr_t addend;
+#ifdef ALIGNED_ONLY
+            if ((addr & (DATA_SIZE - 1)) != 0) {
+                retaddr = GETPC_EXT();
+                do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+            }
+#endif
+            addend = env->tlb_table[mmu_idx][index].addend;
+            glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
+                                         (addr + addend), val);
+        }
+    } else {
+        /* the page is not in the TLB : fill it */
+        retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+        if ((addr & (DATA_SIZE - 1)) != 0)
+            do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+#endif
+        tlb_fill(env, addr, 1, mmu_idx, retaddr);
+        goto redo;
+    }
+}
+
+/* handles all unaligned cases */
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                                   target_ulong addr,
+                                                   DATA_TYPE val,
+                                                   int mmu_idx,
+                                                   uintptr_t retaddr)
+{
+    hwaddr ioaddr;
+    target_ulong tlb_addr;
+    int index, i;
+
+    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+    if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+        if (tlb_addr & ~TARGET_PAGE_MASK) {
+            /* IO access */
+            if ((addr & (DATA_SIZE - 1)) != 0)
+                goto do_unaligned_access;
+            ioaddr = env->iotlb[mmu_idx][index];
+            glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+        do_unaligned_access:
+            /* XXX: not efficient, but simple */
+            /* Note: relies on the fact that tlb_fill() does not remove the
+             * previous page from the TLB cache.  */
+            for(i = DATA_SIZE - 1; i >= 0; i--) {
+#ifdef TARGET_WORDS_BIGENDIAN
+                glue(slow_stb, MMUSUFFIX)(env, addr + i,
+                                          val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
+                                          mmu_idx, retaddr);
+#else
+                glue(slow_stb, MMUSUFFIX)(env, addr + i,
+                                          val >> (i * 8),
+                                          mmu_idx, retaddr);
+#endif
+            }
+        } else {
+            /* aligned/unaligned access in the same page */
+            uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
+            glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
+                                         (addr + addend), val);
+        }
+    } else {
+        /* the page is not in the TLB : fill it */
+        tlb_fill(env, addr, 1, mmu_idx, retaddr);
+        goto redo;
+    }
+}
+
+#endif /* !defined(SOFTMMU_CODE_ACCESS) */
+
+#undef READ_ACCESS_TYPE
+#undef SHIFT
+#undef DATA_TYPE
+#undef SUFFIX
+#undef USUFFIX
+#undef DATA_SIZE
+#undef ADDR_READ
diff --git a/include/exec/spinlock.h b/include/exec/spinlock.h
new file mode 100644
index 0000000000..a72edda1d2
--- /dev/null
+++ b/include/exec/spinlock.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+/* configure guarantees us that we have pthreads on any host except
+ * mingw32, which doesn't support any of the user-only targets.
+ * So we can simply assume we have pthread mutexes here.
+ */
+#if defined(CONFIG_USER_ONLY)
+
+#include <pthread.h>
+#define spin_lock pthread_mutex_lock
+#define spin_unlock pthread_mutex_unlock
+#define spinlock_t pthread_mutex_t
+#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
+
+#else
+
+/* Empty implementations, on the theory that system mode emulation
+ * is single-threaded. This means that these functions should only
+ * be used from code run in the TCG cpu thread, and cannot protect
+ * data structures which might also be accessed from the IO thread
+ * or from signal handlers.
+ */
+typedef int spinlock_t;
+#define SPIN_LOCK_UNLOCKED 0
+
+static inline void spin_lock(spinlock_t *lock)
+{
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+}
+
+#endif
diff --git a/include/exec/user/abitypes.h b/include/exec/user/abitypes.h
new file mode 100644
index 0000000000..fe7f6624f9
--- /dev/null
+++ b/include/exec/user/abitypes.h
@@ -0,0 +1,36 @@
+#ifndef QEMU_TYPES_H
+#define QEMU_TYPES_H
+#include "cpu.h"
+
+#ifdef TARGET_ABI32
+typedef uint32_t abi_ulong;
+typedef int32_t abi_long;
+#define TARGET_ABI_FMT_lx "%08x"
+#define TARGET_ABI_FMT_ld "%d"
+#define TARGET_ABI_FMT_lu "%u"
+#define TARGET_ABI_BITS 32
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+    return tswap32(v);
+}
+
+#else
+typedef target_ulong abi_ulong;
+typedef target_long abi_long;
+#define TARGET_ABI_FMT_lx TARGET_FMT_lx
+#define TARGET_ABI_FMT_ld TARGET_FMT_ld
+#define TARGET_ABI_FMT_lu TARGET_FMT_lu
+#define TARGET_ABI_BITS TARGET_LONG_BITS
+/* for consistency, define ABI32 too */
+#if TARGET_ABI_BITS == 32
+#define TARGET_ABI32 1
+#endif
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+    return tswapl(v);
+}
+
+#endif
+#endif
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
new file mode 100644
index 0000000000..87025c3b04
--- /dev/null
+++ b/include/exec/user/thunk.h
@@ -0,0 +1,189 @@
+/*
+ *  Generic thunking code to convert data between host and target CPU
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef THUNK_H
+#define THUNK_H
+
+#include <inttypes.h>
+#include "cpu.h"
+
+/* types enums definitions */
+
+typedef enum argtype {
+    TYPE_NULL,
+    TYPE_CHAR,
+    TYPE_SHORT,
+    TYPE_INT,
+    TYPE_LONG,
+    TYPE_ULONG,
+    TYPE_PTRVOID, /* pointer on unknown data */
+    TYPE_LONGLONG,
+    TYPE_ULONGLONG,
+    TYPE_PTR,
+    TYPE_ARRAY,
+    TYPE_STRUCT,
+    TYPE_OLDDEVT,
+} argtype;
+
+#define MK_PTR(type) TYPE_PTR, type
+#define MK_ARRAY(type, size) TYPE_ARRAY, size, type
+#define MK_STRUCT(id) TYPE_STRUCT, id
+
+#define THUNK_TARGET 0
+#define THUNK_HOST   1
+
+typedef struct {
+    /* standard struct handling */
+    const argtype *field_types;
+    int nb_fields;
+    int *field_offsets[2];
+    /* special handling */
+    void (*convert[2])(void *dst, const void *src);
+    int size[2];
+    int align[2];
+    const char *name;
+} StructEntry;
+
+/* Translation table for bitmasks... */
+typedef struct bitmask_transtbl {
+	unsigned int	x86_mask;
+	unsigned int	x86_bits;
+	unsigned int	alpha_mask;
+	unsigned int	alpha_bits;
+} bitmask_transtbl;
+
+void thunk_register_struct(int id, const char *name, const argtype *types);
+void thunk_register_struct_direct(int id, const char *name,
+                                  const StructEntry *se1);
+const argtype *thunk_convert(void *dst, const void *src,
+                             const argtype *type_ptr, int to_host);
+#ifndef NO_THUNK_TYPE_SIZE
+
+extern StructEntry struct_entries[];
+
+int thunk_type_size_array(const argtype *type_ptr, int is_host);
+int thunk_type_align_array(const argtype *type_ptr, int is_host);
+
+static inline int thunk_type_size(const argtype *type_ptr, int is_host)
+{
+    int type, size;
+    const StructEntry *se;
+
+    type = *type_ptr;
+    switch(type) {
+    case TYPE_CHAR:
+        return 1;
+    case TYPE_SHORT:
+        return 2;
+    case TYPE_INT:
+        return 4;
+    case TYPE_LONGLONG:
+    case TYPE_ULONGLONG:
+        return 8;
+    case TYPE_LONG:
+    case TYPE_ULONG:
+    case TYPE_PTRVOID:
+    case TYPE_PTR:
+        if (is_host) {
+            return sizeof(void *);
+        } else {
+            return TARGET_ABI_BITS / 8;
+        }
+        break;
+    case TYPE_OLDDEVT:
+        if (is_host) {
+#if defined(HOST_X86_64)
+            return 8;
+#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
+      defined(HOST_PARISC) || defined(HOST_SPARC64)
+            return 4;
+#elif defined(HOST_PPC)
+            return sizeof(void *);
+#else
+            return 2;
+#endif
+        } else {
+#if defined(TARGET_X86_64)
+            return 8;
+#elif defined(TARGET_ALPHA) || defined(TARGET_IA64) || defined(TARGET_MIPS) || \
+      defined(TARGET_PARISC) || defined(TARGET_SPARC64)
+            return 4;
+#elif defined(TARGET_PPC)
+            return TARGET_ABI_BITS / 8;
+#else
+            return 2;
+#endif
+        }
+        break;
+    case TYPE_ARRAY:
+        size = type_ptr[1];
+        return size * thunk_type_size_array(type_ptr + 2, is_host);
+    case TYPE_STRUCT:
+        se = struct_entries + type_ptr[1];
+        return se->size[is_host];
+    default:
+        return -1;
+    }
+}
+
+static inline int thunk_type_align(const argtype *type_ptr, int is_host)
+{
+    int type;
+    const StructEntry *se;
+
+    type = *type_ptr;
+    switch(type) {
+    case TYPE_CHAR:
+        return 1;
+    case TYPE_SHORT:
+        return 2;
+    case TYPE_INT:
+        return 4;
+    case TYPE_LONGLONG:
+    case TYPE_ULONGLONG:
+        return 8;
+    case TYPE_LONG:
+    case TYPE_ULONG:
+    case TYPE_PTRVOID:
+    case TYPE_PTR:
+        if (is_host) {
+            return sizeof(void *);
+        } else {
+            return TARGET_ABI_BITS / 8;
+        }
+        break;
+    case TYPE_OLDDEVT:
+        return thunk_type_size(type_ptr, is_host);
+    case TYPE_ARRAY:
+        return thunk_type_align_array(type_ptr + 2, is_host);
+    case TYPE_STRUCT:
+        se = struct_entries + type_ptr[1];
+        return se->align[is_host];
+    default:
+        return -1;
+    }
+}
+
+#endif /* NO_THUNK_TYPE_SIZE */
+
+unsigned int target_to_host_bitmask(unsigned int x86_mask,
+                                    const bitmask_transtbl * trans_tbl);
+unsigned int host_to_target_bitmask(unsigned int alpha_mask,
+                                    const bitmask_transtbl * trans_tbl);
+
+#endif