summary refs log tree commit diff stats
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/riscv/Makefile.objs1
-rw-r--r--target/riscv/cpu.c432
-rw-r--r--target/riscv/cpu.h296
-rw-r--r--target/riscv/cpu_bits.h411
-rw-r--r--target/riscv/cpu_user.h13
-rw-r--r--target/riscv/fpu_helper.c373
-rw-r--r--target/riscv/gdbstub.c62
-rw-r--r--target/riscv/helper.c503
-rw-r--r--target/riscv/helper.h78
-rw-r--r--target/riscv/instmap.h364
-rw-r--r--target/riscv/op_helper.c669
-rw-r--r--target/riscv/pmp.c380
-rw-r--r--target/riscv/pmp.h64
-rw-r--r--target/riscv/translate.c1978
-rw-r--r--target/s390x/cpu.h66
-rw-r--r--target/s390x/mem_helper.c20
-rw-r--r--target/s390x/mmu_helper.c54
-rw-r--r--target/sparc/translate.c5
18 files changed, 5699 insertions, 70 deletions
diff --git a/target/riscv/Makefile.objs b/target/riscv/Makefile.objs
new file mode 100644
index 0000000000..abd0a7cde3
--- /dev/null
+++ b/target/riscv/Makefile.objs
@@ -0,0 +1 @@
+obj-y += translate.o op_helper.o helper.o cpu.o fpu_helper.o gdbstub.o pmp.o
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
new file mode 100644
index 0000000000..4851890844
--- /dev/null
+++ b/target/riscv/cpu.c
@@ -0,0 +1,432 @@
+/*
+ * QEMU RISC-V CPU
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qapi/error.h"
+#include "migration/vmstate.h"
+
+/* RISC-V CPU definitions */
+
+static const char riscv_exts[26] = "IMAFDQECLBJTPVNSUHKORWXYZG";
+
+const char * const riscv_int_regnames[] = {
+  "zero", "ra  ", "sp  ", "gp  ", "tp  ", "t0  ", "t1  ", "t2  ",
+  "s0  ", "s1  ", "a0  ", "a1  ", "a2  ", "a3  ", "a4  ", "a5  ",
+  "a6  ", "a7  ", "s2  ", "s3  ", "s4  ", "s5  ", "s6  ", "s7  ",
+  "s8  ", "s9  ", "s10 ", "s11 ", "t3  ", "t4  ", "t5  ", "t6  "
+};
+
+const char * const riscv_fpr_regnames[] = {
+  "ft0 ", "ft1 ", "ft2 ", "ft3 ", "ft4 ", "ft5 ", "ft6 ",  "ft7 ",
+  "fs0 ", "fs1 ", "fa0 ", "fa1 ", "fa2 ", "fa3 ", "fa4 ",  "fa5 ",
+  "fa6 ", "fa7 ", "fs2 ", "fs3 ", "fs4 ", "fs5 ", "fs6 ",  "fs7 ",
+  "fs8 ", "fs9 ", "fs10", "fs11", "ft8 ", "ft9 ", "ft10",  "ft11"
+};
+
+const char * const riscv_excp_names[] = {
+    "misaligned_fetch",
+    "fault_fetch",
+    "illegal_instruction",
+    "breakpoint",
+    "misaligned_load",
+    "fault_load",
+    "misaligned_store",
+    "fault_store",
+    "user_ecall",
+    "supervisor_ecall",
+    "hypervisor_ecall",
+    "machine_ecall",
+    "exec_page_fault",
+    "load_page_fault",
+    "reserved",
+    "store_page_fault"
+};
+
+const char * const riscv_intr_names[] = {
+    "u_software",
+    "s_software",
+    "h_software",
+    "m_software",
+    "u_timer",
+    "s_timer",
+    "h_timer",
+    "m_timer",
+    "u_external",
+    "s_external",
+    "h_external",
+    "m_external",
+    "coprocessor",
+    "host"
+};
+
+typedef struct RISCVCPUInfo {
+    const int bit_widths;
+    const char *name;
+    void (*initfn)(Object *obj);
+} RISCVCPUInfo;
+
+static void set_misa(CPURISCVState *env, target_ulong misa)
+{
+    env->misa = misa;
+}
+
+static void set_versions(CPURISCVState *env, int user_ver, int priv_ver)
+{
+    env->user_ver = user_ver;
+    env->priv_ver = priv_ver;
+}
+
+static void set_feature(CPURISCVState *env, int feature)
+{
+    env->features |= (1ULL << feature);
+}
+
+static void set_resetvec(CPURISCVState *env, int resetvec)
+{
+#ifndef CONFIG_USER_ONLY
+    env->resetvec = resetvec;
+#endif
+}
+
+static void riscv_any_cpu_init(Object *obj)
+{
+    CPURISCVState *env = &RISCV_CPU(obj)->env;
+    set_misa(env, RVXLEN | RVI | RVM | RVA | RVF | RVD | RVC | RVU);
+    set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
+    set_resetvec(env, DEFAULT_RSTVEC);
+}
+
+static void rv32gcsu_priv1_09_1_cpu_init(Object *obj)
+{
+    CPURISCVState *env = &RISCV_CPU(obj)->env;
+    set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
+    set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_09_1);
+    set_resetvec(env, DEFAULT_RSTVEC);
+    set_feature(env, RISCV_FEATURE_MMU);
+}
+
+static void rv32gcsu_priv1_10_0_cpu_init(Object *obj)
+{
+    CPURISCVState *env = &RISCV_CPU(obj)->env;
+    set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
+    set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
+    set_resetvec(env, DEFAULT_RSTVEC);
+    set_feature(env, RISCV_FEATURE_MMU);
+}
+
+static void rv32imacu_nommu_cpu_init(Object *obj)
+{
+    CPURISCVState *env = &RISCV_CPU(obj)->env;
+    set_misa(env, RV32 | RVI | RVM | RVA | RVC | RVU);
+    set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
+    set_resetvec(env, DEFAULT_RSTVEC);
+}
+
+static void rv64gcsu_priv1_09_1_cpu_init(Object *obj)
+{
+    CPURISCVState *env = &RISCV_CPU(obj)->env;
+    set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
+    set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_09_1);
+    set_resetvec(env, DEFAULT_RSTVEC);
+    set_feature(env, RISCV_FEATURE_MMU);
+}
+
+static void rv64gcsu_priv1_10_0_cpu_init(Object *obj)
+{
+    CPURISCVState *env = &RISCV_CPU(obj)->env;
+    set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
+    set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
+    set_resetvec(env, DEFAULT_RSTVEC);
+    set_feature(env, RISCV_FEATURE_MMU);
+}
+
+static void rv64imacu_nommu_cpu_init(Object *obj)
+{
+    CPURISCVState *env = &RISCV_CPU(obj)->env;
+    set_misa(env, RV64 | RVI | RVM | RVA | RVC | RVU);
+    set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
+    set_resetvec(env, DEFAULT_RSTVEC);
+}
+
+static const RISCVCPUInfo riscv_cpus[] = {
+    { 96, TYPE_RISCV_CPU_ANY,              riscv_any_cpu_init },
+    { 32, TYPE_RISCV_CPU_RV32GCSU_V1_09_1, rv32gcsu_priv1_09_1_cpu_init },
+    { 32, TYPE_RISCV_CPU_RV32GCSU_V1_10_0, rv32gcsu_priv1_10_0_cpu_init },
+    { 32, TYPE_RISCV_CPU_RV32IMACU_NOMMU,  rv32imacu_nommu_cpu_init },
+    { 32, TYPE_RISCV_CPU_SIFIVE_E31,       rv32imacu_nommu_cpu_init },
+    { 32, TYPE_RISCV_CPU_SIFIVE_U34,       rv32gcsu_priv1_10_0_cpu_init },
+    { 64, TYPE_RISCV_CPU_RV64GCSU_V1_09_1, rv64gcsu_priv1_09_1_cpu_init },
+    { 64, TYPE_RISCV_CPU_RV64GCSU_V1_10_0, rv64gcsu_priv1_10_0_cpu_init },
+    { 64, TYPE_RISCV_CPU_RV64IMACU_NOMMU,  rv64imacu_nommu_cpu_init },
+    { 64, TYPE_RISCV_CPU_SIFIVE_E51,       rv64imacu_nommu_cpu_init },
+    { 64, TYPE_RISCV_CPU_SIFIVE_U54,       rv64gcsu_priv1_10_0_cpu_init },
+    { 0, NULL, NULL }
+};
+
+static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
+{
+    ObjectClass *oc;
+    char *typename;
+    char **cpuname;
+
+    cpuname = g_strsplit(cpu_model, ",", 1);
+    typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
+    oc = object_class_by_name(typename);
+    g_strfreev(cpuname);
+    g_free(typename);
+    if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
+        object_class_is_abstract(oc)) {
+        return NULL;
+    }
+    return oc;
+}
+
+static void riscv_cpu_dump_state(CPUState *cs, FILE *f,
+    fprintf_function cpu_fprintf, int flags)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    int i;
+
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
+#ifndef CONFIG_USER_ONLY
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", env->mstatus);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip     ",
+        (target_ulong)atomic_read(&env->mip));
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie     ", env->mie);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtvec   ", env->mtvec);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mepc    ", env->mepc);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mcause  ", env->mcause);
+#endif
+
+    for (i = 0; i < 32; i++) {
+        cpu_fprintf(f, " %s " TARGET_FMT_lx,
+            riscv_int_regnames[i], env->gpr[i]);
+        if ((i & 3) == 3) {
+            cpu_fprintf(f, "\n");
+        }
+    }
+    for (i = 0; i < 32; i++) {
+        cpu_fprintf(f, " %s %016" PRIx64,
+            riscv_fpr_regnames[i], env->fpr[i]);
+        if ((i & 3) == 3) {
+            cpu_fprintf(f, "\n");
+        }
+    }
+}
+
+static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    env->pc = value;
+}
+
+static void riscv_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    env->pc = tb->pc;
+}
+
+static bool riscv_cpu_has_work(CPUState *cs)
+{
+#ifndef CONFIG_USER_ONLY
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    /*
+     * Definition of the WFI instruction requires it to ignore the privilege
+     * mode and delegation registers, but respect individual enables
+     */
+    return (atomic_read(&env->mip) & env->mie) != 0;
+#else
+    return true;
+#endif
+}
+
+void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
+                          target_ulong *data)
+{
+    env->pc = data[0];
+}
+
+static void riscv_cpu_reset(CPUState *cs)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
+    CPURISCVState *env = &cpu->env;
+
+    mcc->parent_reset(cs);
+#ifndef CONFIG_USER_ONLY
+    env->priv = PRV_M;
+    env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
+    env->mcause = 0;
+    env->pc = env->resetvec;
+#endif
+    cs->exception_index = EXCP_NONE;
+    set_default_nan_mode(1, &env->fp_status);
+}
+
+static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
+{
+#if defined(TARGET_RISCV32)
+    info->print_insn = print_insn_riscv32;
+#elif defined(TARGET_RISCV64)
+    info->print_insn = print_insn_riscv64;
+#endif
+}
+
+static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+{
+    CPUState *cs = CPU(dev);
+    RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
+    Error *local_err = NULL;
+
+    cpu_exec_realizefn(cs, &local_err);
+    if (local_err != NULL) {
+        error_propagate(errp, local_err);
+        return;
+    }
+
+    qemu_init_vcpu(cs);
+    cpu_reset(cs);
+
+    mcc->parent_realize(dev, errp);
+}
+
+static void riscv_cpu_init(Object *obj)
+{
+    CPUState *cs = CPU(obj);
+    RISCVCPU *cpu = RISCV_CPU(obj);
+
+    cs->env_ptr = &cpu->env;
+}
+
+static const VMStateDescription vmstate_riscv_cpu = {
+    .name = "cpu",
+    .unmigratable = 1,
+};
+
+static void riscv_cpu_class_init(ObjectClass *c, void *data)
+{
+    RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
+    CPUClass *cc = CPU_CLASS(c);
+    DeviceClass *dc = DEVICE_CLASS(c);
+
+    mcc->parent_realize = dc->realize;
+    dc->realize = riscv_cpu_realize;
+
+    mcc->parent_reset = cc->reset;
+    cc->reset = riscv_cpu_reset;
+
+    cc->class_by_name = riscv_cpu_class_by_name;
+    cc->has_work = riscv_cpu_has_work;
+    cc->do_interrupt = riscv_cpu_do_interrupt;
+    cc->cpu_exec_interrupt = riscv_cpu_exec_interrupt;
+    cc->dump_state = riscv_cpu_dump_state;
+    cc->set_pc = riscv_cpu_set_pc;
+    cc->synchronize_from_tb = riscv_cpu_synchronize_from_tb;
+    cc->gdb_read_register = riscv_cpu_gdb_read_register;
+    cc->gdb_write_register = riscv_cpu_gdb_write_register;
+    cc->gdb_num_core_regs = 65;
+    cc->gdb_stop_before_watchpoint = true;
+    cc->disas_set_info = riscv_cpu_disas_set_info;
+#ifdef CONFIG_USER_ONLY
+    cc->handle_mmu_fault = riscv_cpu_handle_mmu_fault;
+#else
+    cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
+    cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
+#endif
+#ifdef CONFIG_TCG
+    cc->tcg_initialize = riscv_translate_init;
+#endif
+    /* For now, mark unmigratable: */
+    cc->vmsd = &vmstate_riscv_cpu;
+}
+
+static void cpu_register(const RISCVCPUInfo *info)
+{
+    TypeInfo type_info = {
+        .name = info->name,
+        .parent = TYPE_RISCV_CPU,
+        .instance_size = sizeof(RISCVCPU),
+        .instance_init = info->initfn,
+    };
+
+    type_register(&type_info);
+}
+
+static const TypeInfo riscv_cpu_type_info = {
+    .name = TYPE_RISCV_CPU,
+    .parent = TYPE_CPU,
+    .instance_size = sizeof(RISCVCPU),
+    .instance_init = riscv_cpu_init,
+    .abstract = false,
+    .class_size = sizeof(RISCVCPUClass),
+    .class_init = riscv_cpu_class_init,
+};
+
+char *riscv_isa_string(RISCVCPU *cpu)
+{
+    int i;
+    size_t maxlen = 5 + ctz32(cpu->env.misa);
+    char *isa_string = g_new0(char, maxlen);
+    snprintf(isa_string, maxlen, "rv%d", TARGET_LONG_BITS);
+    for (i = 0; i < sizeof(riscv_exts); i++) {
+        if (cpu->env.misa & RV(riscv_exts[i])) {
+            isa_string[strlen(isa_string)] = riscv_exts[i] - 'A' + 'a';
+
+        }
+    }
+    return isa_string;
+}
+
+void riscv_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+    const RISCVCPUInfo *info = riscv_cpus;
+
+    while (info->name) {
+        if (info->bit_widths & TARGET_LONG_BITS) {
+            (*cpu_fprintf)(f, "%s\n", info->name);
+        }
+        info++;
+    }
+}
+
+static void riscv_cpu_register_types(void)
+{
+    const RISCVCPUInfo *info = riscv_cpus;
+
+    type_register_static(&riscv_cpu_type_info);
+
+    while (info->name) {
+        if (info->bit_widths & TARGET_LONG_BITS) {
+            cpu_register(info);
+        }
+        info++;
+    }
+}
+
+type_init(riscv_cpu_register_types)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
new file mode 100644
index 0000000000..cff02a2857
--- /dev/null
+++ b/target/riscv/cpu.h
@@ -0,0 +1,296 @@
+/*
+ * QEMU RISC-V CPU
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RISCV_CPU_H
+#define RISCV_CPU_H
+
+/* QEMU addressing/paging config */
+#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */
+#if defined(TARGET_RISCV64)
+#define TARGET_LONG_BITS 64
+#define TARGET_PHYS_ADDR_SPACE_BITS 50
+#define TARGET_VIRT_ADDR_SPACE_BITS 39
+#elif defined(TARGET_RISCV32)
+#define TARGET_LONG_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 34
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+
+#define TCG_GUEST_DEFAULT_MO 0
+
+#define ELF_MACHINE EM_RISCV
+#define CPUArchState struct CPURISCVState
+
+#include "qemu-common.h"
+#include "qom/cpu.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+#define TYPE_RISCV_CPU "riscv-cpu"
+
+#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
+#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
+
+#define TYPE_RISCV_CPU_ANY              RISCV_CPU_TYPE_NAME("any")
+#define TYPE_RISCV_CPU_RV32GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.9.1")
+#define TYPE_RISCV_CPU_RV32GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.10.0")
+#define TYPE_RISCV_CPU_RV32IMACU_NOMMU  RISCV_CPU_TYPE_NAME("rv32imacu-nommu")
+#define TYPE_RISCV_CPU_RV64GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.9.1")
+#define TYPE_RISCV_CPU_RV64GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.10.0")
+#define TYPE_RISCV_CPU_RV64IMACU_NOMMU  RISCV_CPU_TYPE_NAME("rv64imacu-nommu")
+#define TYPE_RISCV_CPU_SIFIVE_E31       RISCV_CPU_TYPE_NAME("sifive-e31")
+#define TYPE_RISCV_CPU_SIFIVE_E51       RISCV_CPU_TYPE_NAME("sifive-e51")
+#define TYPE_RISCV_CPU_SIFIVE_U34       RISCV_CPU_TYPE_NAME("sifive-u34")
+#define TYPE_RISCV_CPU_SIFIVE_U54       RISCV_CPU_TYPE_NAME("sifive-u54")
+
+#define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2))
+#define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2))
+
+#if defined(TARGET_RISCV32)
+#define RVXLEN RV32
+#elif defined(TARGET_RISCV64)
+#define RVXLEN RV64
+#endif
+
+#define RV(x) ((target_ulong)1 << (x - 'A'))
+
+#define RVI RV('I')
+#define RVM RV('M')
+#define RVA RV('A')
+#define RVF RV('F')
+#define RVD RV('D')
+#define RVC RV('C')
+#define RVS RV('S')
+#define RVU RV('U')
+
+/* S extension denotes that Supervisor mode exists, however it is possible
+   to have a core that support S mode but does not have an MMU and there
+   is currently no bit in misa to indicate whether an MMU exists or not
+   so a cpu features bitfield is required */
+enum {
+    RISCV_FEATURE_MMU
+};
+
+#define USER_VERSION_2_02_0 0x00020200
+#define PRIV_VERSION_1_09_1 0x00010901
+#define PRIV_VERSION_1_10_0 0x00011000
+
+#define TRANSLATE_FAIL 1
+#define TRANSLATE_SUCCESS 0
+#define NB_MMU_MODES 4
+#define MMU_USER_IDX 3
+
+#define MAX_RISCV_PMPS (16)
+
+typedef struct CPURISCVState CPURISCVState;
+
+#include "pmp.h"
+
+struct CPURISCVState {
+    target_ulong gpr[32];
+    uint64_t fpr[32]; /* assume both F and D extensions */
+    target_ulong pc;
+    target_ulong load_res;
+    target_ulong load_val;
+
+    target_ulong frm;
+
+    target_ulong badaddr;
+
+    target_ulong user_ver;
+    target_ulong priv_ver;
+    target_ulong misa;
+
+    uint32_t features;
+
+#ifndef CONFIG_USER_ONLY
+    target_ulong priv;
+    target_ulong resetvec;
+
+    target_ulong mhartid;
+    target_ulong mstatus;
+    /*
+     * CAUTION! Unlike the rest of this struct, mip is accessed asynchonously
+     * by I/O threads and other vCPUs, so hold the iothread mutex before
+     * operating on it.  CPU_INTERRUPT_HARD should be in effect iff this is
+     * non-zero.  Use riscv_cpu_set_local_interrupt.
+     */
+    uint32_t mip;        /* allow atomic_read for >= 32-bit hosts */
+    target_ulong mie;
+    target_ulong mideleg;
+
+    target_ulong sptbr;  /* until: priv-1.9.1 */
+    target_ulong satp;   /* since: priv-1.10.0 */
+    target_ulong sbadaddr;
+    target_ulong mbadaddr;
+    target_ulong medeleg;
+
+    target_ulong stvec;
+    target_ulong sepc;
+    target_ulong scause;
+
+    target_ulong mtvec;
+    target_ulong mepc;
+    target_ulong mcause;
+    target_ulong mtval;  /* since: priv-1.10.0 */
+
+    uint32_t mucounteren;
+    uint32_t mscounteren;
+    target_ulong scounteren; /* since: priv-1.10.0 */
+    target_ulong mcounteren; /* since: priv-1.10.0 */
+
+    target_ulong sscratch;
+    target_ulong mscratch;
+
+    /* temporary htif regs */
+    uint64_t mfromhost;
+    uint64_t mtohost;
+    uint64_t timecmp;
+
+    /* physical memory protection */
+    pmp_table_t pmp_state;
+#endif
+
+    float_status fp_status;
+
+    /* QEMU */
+    CPU_COMMON
+
+    /* Fields from here on are preserved across CPU reset. */
+    QEMUTimer *timer; /* Internal timer */
+};
+
+#define RISCV_CPU_CLASS(klass) \
+    OBJECT_CLASS_CHECK(RISCVCPUClass, (klass), TYPE_RISCV_CPU)
+#define RISCV_CPU(obj) \
+    OBJECT_CHECK(RISCVCPU, (obj), TYPE_RISCV_CPU)
+#define RISCV_CPU_GET_CLASS(obj) \
+    OBJECT_GET_CLASS(RISCVCPUClass, (obj), TYPE_RISCV_CPU)
+
+/**
+ * RISCVCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A RISCV CPU model.
+ */
+typedef struct RISCVCPUClass {
+    /*< private >*/
+    CPUClass parent_class;
+    /*< public >*/
+    DeviceRealize parent_realize;
+    void (*parent_reset)(CPUState *cpu);
+} RISCVCPUClass;
+
+/**
+ * RISCVCPU:
+ * @env: #CPURISCVState
+ *
+ * A RISCV CPU.
+ */
+typedef struct RISCVCPU {
+    /*< private >*/
+    CPUState parent_obj;
+    /*< public >*/
+    CPURISCVState env;
+} RISCVCPU;
+
+static inline RISCVCPU *riscv_env_get_cpu(CPURISCVState *env)
+{
+    return container_of(env, RISCVCPU, env);
+}
+
+static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
+{
+    return (env->misa & ext) != 0;
+}
+
+static inline bool riscv_feature(CPURISCVState *env, int feature)
+{
+    return env->features & (1ULL << feature);
+}
+
+#include "cpu_user.h"
+#include "cpu_bits.h"
+
+extern const char * const riscv_int_regnames[];
+extern const char * const riscv_fpr_regnames[];
+extern const char * const riscv_excp_names[];
+extern const char * const riscv_intr_names[];
+
+#define ENV_GET_CPU(e) CPU(riscv_env_get_cpu(e))
+#define ENV_OFFSET offsetof(RISCVCPU, env)
+
+void riscv_cpu_do_interrupt(CPUState *cpu);
+int riscv_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
+int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
+hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+                                    MMUAccessType access_type, int mmu_idx,
+                                    uintptr_t retaddr);
+int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
+                              int rw, int mmu_idx);
+
+char *riscv_isa_string(RISCVCPU *cpu);
+void riscv_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+
+#define cpu_init(cpu_model) cpu_generic_init(TYPE_RISCV_CPU, cpu_model)
+#define cpu_signal_handler cpu_riscv_signal_handler
+#define cpu_list riscv_cpu_list
+#define cpu_mmu_index riscv_cpu_mmu_index
+
+void riscv_set_mode(CPURISCVState *env, target_ulong newpriv);
+
+void riscv_translate_init(void);
+RISCVCPU *cpu_riscv_init(const char *cpu_model);
+int cpu_riscv_signal_handler(int host_signum, void *pinfo, void *puc);
+void QEMU_NORETURN do_raise_exception_err(CPURISCVState *env,
+                                          uint32_t exception, uintptr_t pc);
+
+target_ulong cpu_riscv_get_fflags(CPURISCVState *env);
+void cpu_riscv_set_fflags(CPURISCVState *env, target_ulong);
+
+#define TB_FLAGS_MMU_MASK  3
+#define TB_FLAGS_FP_ENABLE MSTATUS_FS
+
+static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
+                                        target_ulong *cs_base, uint32_t *flags)
+{
+    *pc = env->pc;
+    *cs_base = 0;
+#ifdef CONFIG_USER_ONLY
+    *flags = TB_FLAGS_FP_ENABLE;
+#else
+    *flags = cpu_mmu_index(env, 0) | (env->mstatus & MSTATUS_FS);
+#endif
+}
+
+void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
+        target_ulong csrno);
+target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno);
+
+#ifndef CONFIG_USER_ONLY
+void riscv_set_local_interrupt(RISCVCPU *cpu, target_ulong mask, int value);
+#endif
+
+#include "exec/cpu-all.h"
+
+#endif /* RISCV_CPU_H */
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
new file mode 100644
index 0000000000..64aa097181
--- /dev/null
+++ b/target/riscv/cpu_bits.h
@@ -0,0 +1,411 @@
+/* RISC-V ISA constants */
+
+#define get_field(reg, mask) (((reg) & \
+                 (target_ulong)(mask)) / ((mask) & ~((mask) << 1)))
+#define set_field(reg, mask, val) (((reg) & ~(target_ulong)(mask)) | \
+                 (((target_ulong)(val) * ((mask) & ~((mask) << 1))) & \
+                 (target_ulong)(mask)))
+
+#define PGSHIFT 12
+
+#define FSR_RD_SHIFT 5
+#define FSR_RD   (0x7 << FSR_RD_SHIFT)
+
+#define FPEXC_NX 0x01
+#define FPEXC_UF 0x02
+#define FPEXC_OF 0x04
+#define FPEXC_DZ 0x08
+#define FPEXC_NV 0x10
+
+#define FSR_AEXC_SHIFT 0
+#define FSR_NVA  (FPEXC_NV << FSR_AEXC_SHIFT)
+#define FSR_OFA  (FPEXC_OF << FSR_AEXC_SHIFT)
+#define FSR_UFA  (FPEXC_UF << FSR_AEXC_SHIFT)
+#define FSR_DZA  (FPEXC_DZ << FSR_AEXC_SHIFT)
+#define FSR_NXA  (FPEXC_NX << FSR_AEXC_SHIFT)
+#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
+
+/* CSR numbers */
+#define CSR_FFLAGS 0x1
+#define CSR_FRM 0x2
+#define CSR_FCSR 0x3
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_HPMCOUNTER3 0xc03
+#define CSR_HPMCOUNTER4 0xc04
+#define CSR_HPMCOUNTER5 0xc05
+#define CSR_HPMCOUNTER6 0xc06
+#define CSR_HPMCOUNTER7 0xc07
+#define CSR_HPMCOUNTER8 0xc08
+#define CSR_HPMCOUNTER9 0xc09
+#define CSR_HPMCOUNTER10 0xc0a
+#define CSR_HPMCOUNTER11 0xc0b
+#define CSR_HPMCOUNTER12 0xc0c
+#define CSR_HPMCOUNTER13 0xc0d
+#define CSR_HPMCOUNTER14 0xc0e
+#define CSR_HPMCOUNTER15 0xc0f
+#define CSR_HPMCOUNTER16 0xc10
+#define CSR_HPMCOUNTER17 0xc11
+#define CSR_HPMCOUNTER18 0xc12
+#define CSR_HPMCOUNTER19 0xc13
+#define CSR_HPMCOUNTER20 0xc14
+#define CSR_HPMCOUNTER21 0xc15
+#define CSR_HPMCOUNTER22 0xc16
+#define CSR_HPMCOUNTER23 0xc17
+#define CSR_HPMCOUNTER24 0xc18
+#define CSR_HPMCOUNTER25 0xc19
+#define CSR_HPMCOUNTER26 0xc1a
+#define CSR_HPMCOUNTER27 0xc1b
+#define CSR_HPMCOUNTER28 0xc1c
+#define CSR_HPMCOUNTER29 0xc1d
+#define CSR_HPMCOUNTER30 0xc1e
+#define CSR_HPMCOUNTER31 0xc1f
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_SBADADDR 0x143
+#define CSR_SIP 0x144
+#define CSR_SPTBR 0x180
+#define CSR_SATP 0x180
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MEDELEG 0x302
+#define CSR_MIDELEG 0x303
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MCOUNTEREN 0x306
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MBADADDR 0x343
+#define CSR_MIP 0x344
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPCFG1 0x3a1
+#define CSR_PMPCFG2 0x3a2
+#define CSR_PMPCFG3 0x3a3
+#define CSR_PMPADDR0 0x3b0
+#define CSR_PMPADDR1 0x3b1
+#define CSR_PMPADDR2 0x3b2
+#define CSR_PMPADDR3 0x3b3
+#define CSR_PMPADDR4 0x3b4
+#define CSR_PMPADDR5 0x3b5
+#define CSR_PMPADDR6 0x3b6
+#define CSR_PMPADDR7 0x3b7
+#define CSR_PMPADDR8 0x3b8
+#define CSR_PMPADDR9 0x3b9
+#define CSR_PMPADDR10 0x3ba
+#define CSR_PMPADDR11 0x3bb
+#define CSR_PMPADDR12 0x3bc
+#define CSR_PMPADDR13 0x3bd
+#define CSR_PMPADDR14 0x3be
+#define CSR_PMPADDR15 0x3bf
+#define CSR_TSELECT 0x7a0
+#define CSR_TDATA1 0x7a1
+#define CSR_TDATA2 0x7a2
+#define CSR_TDATA3 0x7a3
+#define CSR_DCSR 0x7b0
+#define CSR_DPC 0x7b1
+#define CSR_DSCRATCH 0x7b2
+#define CSR_MCYCLE 0xb00
+#define CSR_MINSTRET 0xb02
+#define CSR_MHPMCOUNTER3 0xb03
+#define CSR_MHPMCOUNTER4 0xb04
+#define CSR_MHPMCOUNTER5 0xb05
+#define CSR_MHPMCOUNTER6 0xb06
+#define CSR_MHPMCOUNTER7 0xb07
+#define CSR_MHPMCOUNTER8 0xb08
+#define CSR_MHPMCOUNTER9 0xb09
+#define CSR_MHPMCOUNTER10 0xb0a
+#define CSR_MHPMCOUNTER11 0xb0b
+#define CSR_MHPMCOUNTER12 0xb0c
+#define CSR_MHPMCOUNTER13 0xb0d
+#define CSR_MHPMCOUNTER14 0xb0e
+#define CSR_MHPMCOUNTER15 0xb0f
+#define CSR_MHPMCOUNTER16 0xb10
+#define CSR_MHPMCOUNTER17 0xb11
+#define CSR_MHPMCOUNTER18 0xb12
+#define CSR_MHPMCOUNTER19 0xb13
+#define CSR_MHPMCOUNTER20 0xb14
+#define CSR_MHPMCOUNTER21 0xb15
+#define CSR_MHPMCOUNTER22 0xb16
+#define CSR_MHPMCOUNTER23 0xb17
+#define CSR_MHPMCOUNTER24 0xb18
+#define CSR_MHPMCOUNTER25 0xb19
+#define CSR_MHPMCOUNTER26 0xb1a
+#define CSR_MHPMCOUNTER27 0xb1b
+#define CSR_MHPMCOUNTER28 0xb1c
+#define CSR_MHPMCOUNTER29 0xb1d
+#define CSR_MHPMCOUNTER30 0xb1e
+#define CSR_MHPMCOUNTER31 0xb1f
+#define CSR_MUCOUNTEREN 0x320
+#define CSR_MSCOUNTEREN 0x321
+#define CSR_MHPMEVENT3 0x323
+#define CSR_MHPMEVENT4 0x324
+#define CSR_MHPMEVENT5 0x325
+#define CSR_MHPMEVENT6 0x326
+#define CSR_MHPMEVENT7 0x327
+#define CSR_MHPMEVENT8 0x328
+#define CSR_MHPMEVENT9 0x329
+#define CSR_MHPMEVENT10 0x32a
+#define CSR_MHPMEVENT11 0x32b
+#define CSR_MHPMEVENT12 0x32c
+#define CSR_MHPMEVENT13 0x32d
+#define CSR_MHPMEVENT14 0x32e
+#define CSR_MHPMEVENT15 0x32f
+#define CSR_MHPMEVENT16 0x330
+#define CSR_MHPMEVENT17 0x331
+#define CSR_MHPMEVENT18 0x332
+#define CSR_MHPMEVENT19 0x333
+#define CSR_MHPMEVENT20 0x334
+#define CSR_MHPMEVENT21 0x335
+#define CSR_MHPMEVENT22 0x336
+#define CSR_MHPMEVENT23 0x337
+#define CSR_MHPMEVENT24 0x338
+#define CSR_MHPMEVENT25 0x339
+#define CSR_MHPMEVENT26 0x33a
+#define CSR_MHPMEVENT27 0x33b
+#define CSR_MHPMEVENT28 0x33c
+#define CSR_MHPMEVENT29 0x33d
+#define CSR_MHPMEVENT30 0x33e
+#define CSR_MHPMEVENT31 0x33f
+#define CSR_MVENDORID 0xf11
+#define CSR_MARCHID 0xf12
+#define CSR_MIMPID 0xf13
+#define CSR_MHARTID 0xf14
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+#define CSR_HPMCOUNTER3H 0xc83
+#define CSR_HPMCOUNTER4H 0xc84
+#define CSR_HPMCOUNTER5H 0xc85
+#define CSR_HPMCOUNTER6H 0xc86
+#define CSR_HPMCOUNTER7H 0xc87
+#define CSR_HPMCOUNTER8H 0xc88
+#define CSR_HPMCOUNTER9H 0xc89
+#define CSR_HPMCOUNTER10H 0xc8a
+#define CSR_HPMCOUNTER11H 0xc8b
+#define CSR_HPMCOUNTER12H 0xc8c
+#define CSR_HPMCOUNTER13H 0xc8d
+#define CSR_HPMCOUNTER14H 0xc8e
+#define CSR_HPMCOUNTER15H 0xc8f
+#define CSR_HPMCOUNTER16H 0xc90
+#define CSR_HPMCOUNTER17H 0xc91
+#define CSR_HPMCOUNTER18H 0xc92
+#define CSR_HPMCOUNTER19H 0xc93
+#define CSR_HPMCOUNTER20H 0xc94
+#define CSR_HPMCOUNTER21H 0xc95
+#define CSR_HPMCOUNTER22H 0xc96
+#define CSR_HPMCOUNTER23H 0xc97
+#define CSR_HPMCOUNTER24H 0xc98
+#define CSR_HPMCOUNTER25H 0xc99
+#define CSR_HPMCOUNTER26H 0xc9a
+#define CSR_HPMCOUNTER27H 0xc9b
+#define CSR_HPMCOUNTER28H 0xc9c
+#define CSR_HPMCOUNTER29H 0xc9d
+#define CSR_HPMCOUNTER30H 0xc9e
+#define CSR_HPMCOUNTER31H 0xc9f
+#define CSR_MCYCLEH 0xb80
+#define CSR_MINSTRETH 0xb82
+#define CSR_MHPMCOUNTER3H 0xb83
+#define CSR_MHPMCOUNTER4H 0xb84
+#define CSR_MHPMCOUNTER5H 0xb85
+#define CSR_MHPMCOUNTER6H 0xb86
+#define CSR_MHPMCOUNTER7H 0xb87
+#define CSR_MHPMCOUNTER8H 0xb88
+#define CSR_MHPMCOUNTER9H 0xb89
+#define CSR_MHPMCOUNTER10H 0xb8a
+#define CSR_MHPMCOUNTER11H 0xb8b
+#define CSR_MHPMCOUNTER12H 0xb8c
+#define CSR_MHPMCOUNTER13H 0xb8d
+#define CSR_MHPMCOUNTER14H 0xb8e
+#define CSR_MHPMCOUNTER15H 0xb8f
+#define CSR_MHPMCOUNTER16H 0xb90
+#define CSR_MHPMCOUNTER17H 0xb91
+#define CSR_MHPMCOUNTER18H 0xb92
+#define CSR_MHPMCOUNTER19H 0xb93
+#define CSR_MHPMCOUNTER20H 0xb94
+#define CSR_MHPMCOUNTER21H 0xb95
+#define CSR_MHPMCOUNTER22H 0xb96
+#define CSR_MHPMCOUNTER23H 0xb97
+#define CSR_MHPMCOUNTER24H 0xb98
+#define CSR_MHPMCOUNTER25H 0xb99
+#define CSR_MHPMCOUNTER26H 0xb9a
+#define CSR_MHPMCOUNTER27H 0xb9b
+#define CSR_MHPMCOUNTER28H 0xb9c
+#define CSR_MHPMCOUNTER29H 0xb9d
+#define CSR_MHPMCOUNTER30H 0xb9e
+#define CSR_MHPMCOUNTER31H 0xb9f
+
+/* mstatus bits */
+#define MSTATUS_UIE         0x00000001
+#define MSTATUS_SIE         0x00000002
+#define MSTATUS_HIE         0x00000004
+#define MSTATUS_MIE         0x00000008
+#define MSTATUS_UPIE        0x00000010
+#define MSTATUS_SPIE        0x00000020
+#define MSTATUS_HPIE        0x00000040
+#define MSTATUS_MPIE        0x00000080
+#define MSTATUS_SPP         0x00000100
+#define MSTATUS_HPP         0x00000600
+#define MSTATUS_MPP         0x00001800
+#define MSTATUS_FS          0x00006000
+#define MSTATUS_XS          0x00018000
+#define MSTATUS_MPRV        0x00020000
+#define MSTATUS_PUM         0x00040000 /* until: priv-1.9.1 */
+#define MSTATUS_SUM         0x00040000 /* since: priv-1.10 */
+#define MSTATUS_MXR         0x00080000
+#define MSTATUS_VM          0x1F000000 /* until: priv-1.9.1 */
+#define MSTATUS_TVM         0x00100000 /* since: priv-1.10 */
+#define MSTATUS_TW          0x20000000 /* since: priv-1.10 */
+#define MSTATUS_TSR         0x40000000 /* since: priv-1.10 */
+
+#define MSTATUS64_UXL       0x0000000300000000ULL
+#define MSTATUS64_SXL       0x0000000C00000000ULL
+
+#define MSTATUS32_SD        0x80000000
+#define MSTATUS64_SD        0x8000000000000000ULL
+
+#if defined(TARGET_RISCV32)
+#define MSTATUS_SD MSTATUS32_SD
+#elif defined(TARGET_RISCV64)
+#define MSTATUS_SD MSTATUS64_SD
+#endif
+
+/* sstatus bits */
+#define SSTATUS_UIE         0x00000001
+#define SSTATUS_SIE         0x00000002
+#define SSTATUS_UPIE        0x00000010
+#define SSTATUS_SPIE        0x00000020
+#define SSTATUS_SPP         0x00000100
+#define SSTATUS_FS          0x00006000
+#define SSTATUS_XS          0x00018000
+#define SSTATUS_PUM         0x00040000 /* until: priv-1.9.1 */
+#define SSTATUS_SUM         0x00040000 /* since: priv-1.10 */
+#define SSTATUS_MXR         0x00080000
+
+#define SSTATUS32_SD        0x80000000
+#define SSTATUS64_SD        0x8000000000000000ULL
+
+#if defined(TARGET_RISCV32)
+#define SSTATUS_SD SSTATUS32_SD
+#elif defined(TARGET_RISCV64)
+#define SSTATUS_SD SSTATUS64_SD
+#endif
+
+/* irqs */
+#define MIP_SSIP            (1 << IRQ_S_SOFT)
+#define MIP_HSIP            (1 << IRQ_H_SOFT)
+#define MIP_MSIP            (1 << IRQ_M_SOFT)
+#define MIP_STIP            (1 << IRQ_S_TIMER)
+#define MIP_HTIP            (1 << IRQ_H_TIMER)
+#define MIP_MTIP            (1 << IRQ_M_TIMER)
+#define MIP_SEIP            (1 << IRQ_S_EXT)
+#define MIP_HEIP            (1 << IRQ_H_EXT)
+#define MIP_MEIP            (1 << IRQ_M_EXT)
+
+#define SIP_SSIP            MIP_SSIP
+#define SIP_STIP            MIP_STIP
+#define SIP_SEIP            MIP_SEIP
+
+#define PRV_U 0
+#define PRV_S 1
+#define PRV_H 2
+#define PRV_M 3
+
+/* privileged ISA 1.9.1 VM modes (mstatus.vm) */
+#define VM_1_09_MBARE 0
+#define VM_1_09_MBB   1
+#define VM_1_09_MBBID 2
+#define VM_1_09_SV32  8
+#define VM_1_09_SV39  9
+#define VM_1_09_SV48  10
+
+/* privileged ISA 1.10.0 VM modes (satp.mode) */
+#define VM_1_10_MBARE 0
+#define VM_1_10_SV32  1
+#define VM_1_10_SV39  8
+#define VM_1_10_SV48  9
+#define VM_1_10_SV57  10
+#define VM_1_10_SV64  11
+
+/* privileged ISA interrupt causes */
+#define IRQ_U_SOFT      0  /* since: priv-1.10 */
+#define IRQ_S_SOFT      1
+#define IRQ_H_SOFT      2  /* until: priv-1.9.1 */
+#define IRQ_M_SOFT      3  /* until: priv-1.9.1 */
+#define IRQ_U_TIMER     4  /* since: priv-1.10 */
+#define IRQ_S_TIMER     5
+#define IRQ_H_TIMER     6  /* until: priv-1.9.1 */
+#define IRQ_M_TIMER     7  /* until: priv-1.9.1 */
+#define IRQ_U_EXT       8  /* since: priv-1.10 */
+#define IRQ_S_EXT       9
+#define IRQ_H_EXT       10 /* until: priv-1.9.1 */
+#define IRQ_M_EXT       11 /* until: priv-1.9.1 */
+#define IRQ_X_COP       12 /* non-standard */
+
+/* Default addresses */
+#define DEFAULT_RSTVEC     0x00001000
+
+/* RV32 satp field masks */
+#define SATP32_MODE 0x80000000
+#define SATP32_ASID 0x7fc00000
+#define SATP32_PPN  0x003fffff
+
+/* RV64 satp field masks */
+#define SATP64_MODE 0xF000000000000000ULL
+#define SATP64_ASID 0x0FFFF00000000000ULL
+#define SATP64_PPN  0x00000FFFFFFFFFFFULL
+
+#if defined(TARGET_RISCV32)
+#define SATP_MODE SATP32_MODE
+#define SATP_ASID SATP32_ASID
+#define SATP_PPN  SATP32_PPN
+#endif
+#if defined(TARGET_RISCV64)
+#define SATP_MODE SATP64_MODE
+#define SATP_ASID SATP64_ASID
+#define SATP_PPN  SATP64_PPN
+#endif
+
+/* RISCV Exception Codes */
+#define EXCP_NONE                       -1 /* not a real RISCV exception code */
+#define RISCV_EXCP_INST_ADDR_MIS           0x0
+#define RISCV_EXCP_INST_ACCESS_FAULT       0x1
+#define RISCV_EXCP_ILLEGAL_INST            0x2
+#define RISCV_EXCP_BREAKPOINT              0x3
+#define RISCV_EXCP_LOAD_ADDR_MIS           0x4
+#define RISCV_EXCP_LOAD_ACCESS_FAULT       0x5
+#define RISCV_EXCP_STORE_AMO_ADDR_MIS      0x6
+#define RISCV_EXCP_STORE_AMO_ACCESS_FAULT  0x7
+#define RISCV_EXCP_U_ECALL                 0x8 /* for convenience, report all
+                                                  ECALLs as this, handler
+                                                  fixes */
+#define RISCV_EXCP_S_ECALL                 0x9
+#define RISCV_EXCP_H_ECALL                 0xa
+#define RISCV_EXCP_M_ECALL                 0xb
+#define RISCV_EXCP_INST_PAGE_FAULT         0xc /* since: priv-1.10.0 */
+#define RISCV_EXCP_LOAD_PAGE_FAULT         0xd /* since: priv-1.10.0 */
+#define RISCV_EXCP_STORE_PAGE_FAULT        0xf /* since: priv-1.10.0 */
+
+#define RISCV_EXCP_INT_FLAG                0x80000000
+#define RISCV_EXCP_INT_MASK                0x7fffffff
+
+/* page table entry (PTE) fields */
+#define PTE_V     0x001 /* Valid */
+#define PTE_R     0x002 /* Read */
+#define PTE_W     0x004 /* Write */
+#define PTE_X     0x008 /* Execute */
+#define PTE_U     0x010 /* User */
+#define PTE_G     0x020 /* Global */
+#define PTE_A     0x040 /* Accessed */
+#define PTE_D     0x080 /* Dirty */
+#define PTE_SOFT  0x300 /* Reserved for Software */
+
+#define PTE_PPN_SHIFT 10
+
+#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V)
diff --git a/target/riscv/cpu_user.h b/target/riscv/cpu_user.h
new file mode 100644
index 0000000000..c2199610ab
--- /dev/null
+++ b/target/riscv/cpu_user.h
@@ -0,0 +1,13 @@
+#define xRA 1   /* return address (aka link register) */
+#define xSP 2   /* stack pointer */
+#define xGP 3   /* global pointer */
+#define xTP 4   /* thread pointer */
+
+#define xA0 10  /* gpr[10-17] are syscall arguments */
+#define xA1 11
+#define xA2 12
+#define xA3 13
+#define xA4 14
+#define xA5 15
+#define xA6 16
+#define xA7 17  /* syscall number goes here */
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
new file mode 100644
index 0000000000..abbadead5c
--- /dev/null
+++ b/target/riscv/fpu_helper.c
@@ -0,0 +1,373 @@
+/*
+ * RISC-V FPU Emulation Helpers for QEMU.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include <stdlib.h>
+#include "cpu.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+target_ulong cpu_riscv_get_fflags(CPURISCVState *env)
+{
+    int soft = get_float_exception_flags(&env->fp_status);
+    target_ulong hard = 0;
+
+    hard |= (soft & float_flag_inexact) ? FPEXC_NX : 0;
+    hard |= (soft & float_flag_underflow) ? FPEXC_UF : 0;
+    hard |= (soft & float_flag_overflow) ? FPEXC_OF : 0;
+    hard |= (soft & float_flag_divbyzero) ? FPEXC_DZ : 0;
+    hard |= (soft & float_flag_invalid) ? FPEXC_NV : 0;
+
+    return hard;
+}
+
+void cpu_riscv_set_fflags(CPURISCVState *env, target_ulong hard)
+{
+    int soft = 0;
+
+    soft |= (hard & FPEXC_NX) ? float_flag_inexact : 0;
+    soft |= (hard & FPEXC_UF) ? float_flag_underflow : 0;
+    soft |= (hard & FPEXC_OF) ? float_flag_overflow : 0;
+    soft |= (hard & FPEXC_DZ) ? float_flag_divbyzero : 0;
+    soft |= (hard & FPEXC_NV) ? float_flag_invalid : 0;
+
+    set_float_exception_flags(soft, &env->fp_status);
+}
+
+void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm)
+{
+    int softrm;
+
+    if (rm == 7) {
+        rm = env->frm;
+    }
+    switch (rm) {
+    case 0:
+        softrm = float_round_nearest_even;
+        break;
+    case 1:
+        softrm = float_round_to_zero;
+        break;
+    case 2:
+        softrm = float_round_down;
+        break;
+    case 3:
+        softrm = float_round_up;
+        break;
+    case 4:
+        softrm = float_round_ties_away;
+        break;
+    default:
+        do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+    }
+
+    set_float_rounding_mode(softrm, &env->fp_status);
+}
+
+uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                        uint64_t frs3)
+{
+    return float32_muladd(frs1, frs2, frs3, 0, &env->fp_status);
+}
+
+uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                        uint64_t frs3)
+{
+    return float64_muladd(frs1, frs2, frs3, 0, &env->fp_status);
+}
+
+uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                        uint64_t frs3)
+{
+    return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c,
+                          &env->fp_status);
+}
+
+uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                        uint64_t frs3)
+{
+    return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c,
+                          &env->fp_status);
+}
+
+uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                         uint64_t frs3)
+{
+    return float32_muladd(frs1, frs2, frs3, float_muladd_negate_product,
+                          &env->fp_status);
+}
+
+uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                         uint64_t frs3)
+{
+    return float64_muladd(frs1, frs2, frs3, float_muladd_negate_product,
+                          &env->fp_status);
+}
+
+uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                         uint64_t frs3)
+{
+    return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c |
+                          float_muladd_negate_product, &env->fp_status);
+}
+
+uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                         uint64_t frs3)
+{
+    return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c |
+                          float_muladd_negate_product, &env->fp_status);
+}
+
+uint64_t helper_fadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_add(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_sub(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmul_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_mul(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_div(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmin_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_minnum(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmax_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_maxnum(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t frs1)
+{
+    return float32_sqrt(frs1, &env->fp_status);
+}
+
+target_ulong helper_fle_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_le(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_flt_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_lt(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_feq_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float32_eq_quiet(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t frs1)
+{
+    return float32_to_int32(frs1, &env->fp_status);
+}
+
+target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t frs1)
+{
+    return (int32_t)float32_to_uint32(frs1, &env->fp_status);
+}
+
+#if defined(TARGET_RISCV64)
+uint64_t helper_fcvt_l_s(CPURISCVState *env, uint64_t frs1)
+{
+    return float32_to_int64(frs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_lu_s(CPURISCVState *env, uint64_t frs1)
+{
+    return float32_to_uint64(frs1, &env->fp_status);
+}
+#endif
+
+uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1)
+{
+    return int32_to_float32((int32_t)rs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1)
+{
+    return uint32_to_float32((uint32_t)rs1, &env->fp_status);
+}
+
+#if defined(TARGET_RISCV64)
+uint64_t helper_fcvt_s_l(CPURISCVState *env, uint64_t rs1)
+{
+    return int64_to_float32(rs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1)
+{
+    return uint64_to_float32(rs1, &env->fp_status);
+}
+#endif
+
+target_ulong helper_fclass_s(uint64_t frs1)
+{
+    float32 f = frs1;
+    bool sign = float32_is_neg(f);
+
+    if (float32_is_infinity(f)) {
+        return sign ? 1 << 0 : 1 << 7;
+    } else if (float32_is_zero(f)) {
+        return sign ? 1 << 3 : 1 << 4;
+    } else if (float32_is_zero_or_denormal(f)) {
+        return sign ? 1 << 2 : 1 << 5;
+    } else if (float32_is_any_nan(f)) {
+        float_status s = { }; /* for snan_bit_is_one */
+        return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
+    } else {
+        return sign ? 1 << 1 : 1 << 6;
+    }
+}
+
+uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_add(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_sub(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmul_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_mul(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_div(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_minnum(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_maxnum(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
+{
+    rs1 = float64_to_float32(rs1, &env->fp_status);
+    return float32_maybe_silence_nan(rs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1)
+{
+    rs1 = float32_to_float64(rs1, &env->fp_status);
+    return float64_maybe_silence_nan(rs1, &env->fp_status);
+}
+
+uint64_t helper_fsqrt_d(CPURISCVState *env, uint64_t frs1)
+{
+    return float64_sqrt(frs1, &env->fp_status);
+}
+
+target_ulong helper_fle_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_le(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_flt_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_lt(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_feq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    return float64_eq_quiet(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_fcvt_w_d(CPURISCVState *env, uint64_t frs1)
+{
+    return float64_to_int32(frs1, &env->fp_status);
+}
+
+target_ulong helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1)
+{
+    return (int32_t)float64_to_uint32(frs1, &env->fp_status);
+}
+
+#if defined(TARGET_RISCV64)
+uint64_t helper_fcvt_l_d(CPURISCVState *env, uint64_t frs1)
+{
+    return float64_to_int64(frs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_lu_d(CPURISCVState *env, uint64_t frs1)
+{
+    return float64_to_uint64(frs1, &env->fp_status);
+}
+#endif
+
+uint64_t helper_fcvt_d_w(CPURISCVState *env, target_ulong rs1)
+{
+    return int32_to_float64((int32_t)rs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_d_wu(CPURISCVState *env, target_ulong rs1)
+{
+    return uint32_to_float64((uint32_t)rs1, &env->fp_status);
+}
+
+#if defined(TARGET_RISCV64)
+uint64_t helper_fcvt_d_l(CPURISCVState *env, uint64_t rs1)
+{
+    return int64_to_float64(rs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_d_lu(CPURISCVState *env, uint64_t rs1)
+{
+    return uint64_to_float64(rs1, &env->fp_status);
+}
+#endif
+
+target_ulong helper_fclass_d(uint64_t frs1)
+{
+    float64 f = frs1;
+    bool sign = float64_is_neg(f);
+
+    if (float64_is_infinity(f)) {
+        return sign ? 1 << 0 : 1 << 7;
+    } else if (float64_is_zero(f)) {
+        return sign ? 1 << 3 : 1 << 4;
+    } else if (float64_is_zero_or_denormal(f)) {
+        return sign ? 1 << 2 : 1 << 5;
+    } else if (float64_is_any_nan(f)) {
+        float_status s = { }; /* for snan_bit_is_one */
+        return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
+    } else {
+        return sign ? 1 << 1 : 1 << 6;
+    }
+}
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
new file mode 100644
index 0000000000..4f919b6c34
--- /dev/null
+++ b/target/riscv/gdbstub.c
@@ -0,0 +1,62 @@
+/*
+ * RISC-V GDB Server Stub
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+#include "cpu.h"
+
+int riscv_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+
+    if (n < 32) {
+        return gdb_get_regl(mem_buf, env->gpr[n]);
+    } else if (n == 32) {
+        return gdb_get_regl(mem_buf, env->pc);
+    } else if (n < 65) {
+        return gdb_get_reg64(mem_buf, env->fpr[n - 33]);
+    } else if (n < 4096 + 65) {
+        return gdb_get_regl(mem_buf, csr_read_helper(env, n - 65));
+    }
+    return 0;
+}
+
+int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+
+    if (n == 0) {
+        /* discard writes to x0 */
+        return sizeof(target_ulong);
+    } else if (n < 32) {
+        env->gpr[n] = ldtul_p(mem_buf);
+        return sizeof(target_ulong);
+    } else if (n == 32) {
+        env->pc = ldtul_p(mem_buf);
+        return sizeof(target_ulong);
+    } else if (n < 65) {
+        env->fpr[n - 33] = ldq_p(mem_buf); /* always 64-bit */
+        return sizeof(uint64_t);
+    } else if (n < 4096 + 65) {
+        csr_write_helper(env, ldtul_p(mem_buf), n - 65);
+    }
+    return 0;
+}
diff --git a/target/riscv/helper.c b/target/riscv/helper.c
new file mode 100644
index 0000000000..02cbcea2b7
--- /dev/null
+++ b/target/riscv/helper.c
@@ -0,0 +1,503 @@
+/*
+ * RISC-V emulation helpers for qemu.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+
+#define RISCV_DEBUG_INTERRUPT 0
+
+int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+    return 0;
+#else
+    return env->priv;
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+/*
+ * Return RISC-V IRQ number if an interrupt should be taken, else -1.
+ * Used in cpu-exec.c
+ *
+ * Adapted from Spike's processor_t::take_interrupt()
+ */
+static int riscv_cpu_hw_interrupts_pending(CPURISCVState *env)
+{
+    target_ulong pending_interrupts = atomic_read(&env->mip) & env->mie;
+
+    target_ulong mie = get_field(env->mstatus, MSTATUS_MIE);
+    target_ulong m_enabled = env->priv < PRV_M || (env->priv == PRV_M && mie);
+    target_ulong enabled_interrupts = pending_interrupts &
+                                      ~env->mideleg & -m_enabled;
+
+    target_ulong sie = get_field(env->mstatus, MSTATUS_SIE);
+    target_ulong s_enabled = env->priv < PRV_S || (env->priv == PRV_S && sie);
+    enabled_interrupts |= pending_interrupts & env->mideleg &
+                          -s_enabled;
+
+    if (enabled_interrupts) {
+        return ctz64(enabled_interrupts); /* since non-zero */
+    } else {
+        return EXCP_NONE; /* indicates no pending interrupt */
+    }
+}
+#endif
+
+bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+#if !defined(CONFIG_USER_ONLY)
+    if (interrupt_request & CPU_INTERRUPT_HARD) {
+        RISCVCPU *cpu = RISCV_CPU(cs);
+        CPURISCVState *env = &cpu->env;
+        int interruptno = riscv_cpu_hw_interrupts_pending(env);
+        if (interruptno >= 0) {
+            cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
+            riscv_cpu_do_interrupt(cs);
+            return true;
+        }
+    }
+#endif
+    return false;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* get_physical_address - get the physical address for this virtual address
+ *
+ * Do a page table walk to obtain the physical address corresponding to a
+ * virtual address. Returns 0 if the translation was successful
+ *
+ * Adapted from Spike's mmu_t::translate and mmu_t::walk
+ *
+ */
+static int get_physical_address(CPURISCVState *env, hwaddr *physical,
+                                int *prot, target_ulong addr,
+                                int access_type, int mmu_idx)
+{
+    /* NOTE: the env->pc value visible here will not be
+     * correct, but the value visible to the exception handler
+     * (riscv_cpu_do_interrupt) is correct */
+
+    int mode = mmu_idx;
+
+    if (mode == PRV_M && access_type != MMU_INST_FETCH) {
+        if (get_field(env->mstatus, MSTATUS_MPRV)) {
+            mode = get_field(env->mstatus, MSTATUS_MPP);
+        }
+    }
+
+    if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
+        *physical = addr;
+        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+        return TRANSLATE_SUCCESS;
+    }
+
+    *prot = 0;
+
+    target_ulong base;
+    int levels, ptidxbits, ptesize, vm, sum;
+    int mxr = get_field(env->mstatus, MSTATUS_MXR);
+
+    if (env->priv_ver >= PRIV_VERSION_1_10_0) {
+        base = get_field(env->satp, SATP_PPN) << PGSHIFT;
+        sum = get_field(env->mstatus, MSTATUS_SUM);
+        vm = get_field(env->satp, SATP_MODE);
+        switch (vm) {
+        case VM_1_10_SV32:
+          levels = 2; ptidxbits = 10; ptesize = 4; break;
+        case VM_1_10_SV39:
+          levels = 3; ptidxbits = 9; ptesize = 8; break;
+        case VM_1_10_SV48:
+          levels = 4; ptidxbits = 9; ptesize = 8; break;
+        case VM_1_10_SV57:
+          levels = 5; ptidxbits = 9; ptesize = 8; break;
+        case VM_1_10_MBARE:
+            *physical = addr;
+            *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+            return TRANSLATE_SUCCESS;
+        default:
+          g_assert_not_reached();
+        }
+    } else {
+        base = env->sptbr << PGSHIFT;
+        sum = !get_field(env->mstatus, MSTATUS_PUM);
+        vm = get_field(env->mstatus, MSTATUS_VM);
+        switch (vm) {
+        case VM_1_09_SV32:
+          levels = 2; ptidxbits = 10; ptesize = 4; break;
+        case VM_1_09_SV39:
+          levels = 3; ptidxbits = 9; ptesize = 8; break;
+        case VM_1_09_SV48:
+          levels = 4; ptidxbits = 9; ptesize = 8; break;
+        case VM_1_09_MBARE:
+            *physical = addr;
+            *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+            return TRANSLATE_SUCCESS;
+        default:
+          g_assert_not_reached();
+        }
+    }
+
+    CPUState *cs = CPU(riscv_env_get_cpu(env));
+    int va_bits = PGSHIFT + levels * ptidxbits;
+    target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
+    target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask;
+    if (masked_msbs != 0 && masked_msbs != mask) {
+        return TRANSLATE_FAIL;
+    }
+
+    int ptshift = (levels - 1) * ptidxbits;
+    int i;
+
+#if !TCG_OVERSIZED_GUEST
+restart:
+#endif
+    for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
+        target_ulong idx = (addr >> (PGSHIFT + ptshift)) &
+                           ((1 << ptidxbits) - 1);
+
+        /* check that physical address of PTE is legal */
+        target_ulong pte_addr = base + idx * ptesize;
+#if defined(TARGET_RISCV32)
+        target_ulong pte = ldl_phys(cs->as, pte_addr);
+#elif defined(TARGET_RISCV64)
+        target_ulong pte = ldq_phys(cs->as, pte_addr);
+#endif
+        target_ulong ppn = pte >> PTE_PPN_SHIFT;
+
+        if (PTE_TABLE(pte)) { /* next level of page table */
+            base = ppn << PGSHIFT;
+        } else if ((pte & PTE_U) ? (mode == PRV_S) && !sum : !(mode == PRV_S)) {
+            break;
+        } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) {
+            break;
+        } else if (access_type == MMU_INST_FETCH ? !(pte & PTE_X) :
+                  access_type == MMU_DATA_LOAD ?  !(pte & PTE_R) &&
+                  !(mxr && (pte & PTE_X)) : !((pte & PTE_R) && (pte & PTE_W))) {
+            break;
+        } else {
+            /* if necessary, set accessed and dirty bits. */
+            target_ulong updated_pte = pte | PTE_A |
+                (access_type == MMU_DATA_STORE ? PTE_D : 0);
+
+            /* Page table updates need to be atomic with MTTCG enabled */
+            if (updated_pte != pte) {
+                /* if accessed or dirty bits need updating, and the PTE is
+                 * in RAM, then we do so atomically with a compare and swap.
+                 * if the PTE is in IO space, then it can't be updated.
+                 * if the PTE changed, then we must re-walk the page table
+                   as the PTE is no longer valid */
+                MemoryRegion *mr;
+                hwaddr l = sizeof(target_ulong), addr1;
+                mr = address_space_translate(cs->as, pte_addr,
+                    &addr1, &l, false);
+                if (memory_access_is_direct(mr, true)) {
+                    target_ulong *pte_pa =
+                        qemu_map_ram_ptr(mr->ram_block, addr1);
+#if TCG_OVERSIZED_GUEST
+                    /* MTTCG is not enabled on oversized TCG guests so
+                     * page table updates do not need to be atomic */
+                    *pte_pa = pte = updated_pte;
+#else
+                    target_ulong old_pte =
+                        atomic_cmpxchg(pte_pa, pte, updated_pte);
+                    if (old_pte != pte) {
+                        goto restart;
+                    } else {
+                        pte = updated_pte;
+                    }
+#endif
+                } else {
+                    /* misconfigured PTE in ROM (AD bits are not preset) or
+                     * PTE is in IO space and can't be updated atomically */
+                    return TRANSLATE_FAIL;
+                }
+            }
+
+            /* for superpage mappings, make a fake leaf PTE for the TLB's
+               benefit. */
+            target_ulong vpn = addr >> PGSHIFT;
+            *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
+
+            if ((pte & PTE_R)) {
+                *prot |= PAGE_READ;
+            }
+            if ((pte & PTE_X)) {
+                *prot |= PAGE_EXEC;
+            }
+           /* only add write permission on stores or if the page
+              is already dirty, so that we don't miss further
+              page table walks to update the dirty bit */
+            if ((pte & PTE_W) &&
+                    (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
+                *prot |= PAGE_WRITE;
+            }
+            return TRANSLATE_SUCCESS;
+        }
+    }
+    return TRANSLATE_FAIL;
+}
+
+static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
+                                MMUAccessType access_type)
+{
+    CPUState *cs = CPU(riscv_env_get_cpu(env));
+    int page_fault_exceptions =
+        (env->priv_ver >= PRIV_VERSION_1_10_0) &&
+        get_field(env->satp, SATP_MODE) != VM_1_10_MBARE;
+    switch (access_type) {
+    case MMU_INST_FETCH:
+        cs->exception_index = page_fault_exceptions ?
+            RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
+        break;
+    case MMU_DATA_LOAD:
+        cs->exception_index = page_fault_exceptions ?
+            RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
+        break;
+    case MMU_DATA_STORE:
+        cs->exception_index = page_fault_exceptions ?
+            RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+        break;
+    default:
+        g_assert_not_reached();
+    }
+    env->badaddr = address;
+}
+
+hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    hwaddr phys_addr;
+    int prot;
+    int mmu_idx = cpu_mmu_index(&cpu->env, false);
+
+    if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) {
+        return -1;
+    }
+    return phys_addr;
+}
+
+void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+                                   MMUAccessType access_type, int mmu_idx,
+                                   uintptr_t retaddr)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    switch (access_type) {
+    case MMU_INST_FETCH:
+        cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
+        break;
+    case MMU_DATA_LOAD:
+        cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+        break;
+    case MMU_DATA_STORE:
+        cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+        break;
+    default:
+        g_assert_not_reached();
+    }
+    env->badaddr = addr;
+    do_raise_exception_err(env, cs->exception_index, retaddr);
+}
+
+/* called by qemu's softmmu to fill the qemu tlb */
+void tlb_fill(CPUState *cs, target_ulong addr, int size,
+        MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+{
+    int ret;
+    ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
+    if (ret == TRANSLATE_FAIL) {
+        RISCVCPU *cpu = RISCV_CPU(cs);
+        CPURISCVState *env = &cpu->env;
+        do_raise_exception_err(env, cs->exception_index, retaddr);
+    }
+}
+
+#endif
+
+int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
+        int rw, int mmu_idx)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+#if !defined(CONFIG_USER_ONLY)
+    hwaddr pa = 0;
+    int prot;
+#endif
+    int ret = TRANSLATE_FAIL;
+
+    qemu_log_mask(CPU_LOG_MMU,
+            "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \
+             %d\n", __func__, env->pc, address, rw, mmu_idx);
+
+#if !defined(CONFIG_USER_ONLY)
+    ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx);
+    qemu_log_mask(CPU_LOG_MMU,
+            "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
+             " prot %d\n", __func__, address, ret, pa, prot);
+    if (!pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) {
+        ret = TRANSLATE_FAIL;
+    }
+    if (ret == TRANSLATE_SUCCESS) {
+        tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
+                     prot, mmu_idx, TARGET_PAGE_SIZE);
+    } else if (ret == TRANSLATE_FAIL) {
+        raise_mmu_exception(env, address, rw);
+    }
+#else
+    switch (rw) {
+    case MMU_INST_FETCH:
+        cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
+        break;
+    case MMU_DATA_LOAD:
+        cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
+        break;
+    case MMU_DATA_STORE:
+        cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
+        break;
+    }
+#endif
+    return ret;
+}
+
+/*
+ * Handle Traps
+ *
+ * Adapted from Spike's processor_t::take_trap.
+ *
+ */
+void riscv_cpu_do_interrupt(CPUState *cs)
+{
+#if !defined(CONFIG_USER_ONLY)
+
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+
+    if (RISCV_DEBUG_INTERRUPT) {
+        int log_cause = cs->exception_index & RISCV_EXCP_INT_MASK;
+        if (cs->exception_index & RISCV_EXCP_INT_FLAG) {
+            qemu_log_mask(LOG_TRACE, "core   0: trap %s, epc 0x" TARGET_FMT_lx,
+                riscv_intr_names[log_cause], env->pc);
+        } else {
+            qemu_log_mask(LOG_TRACE, "core   0: intr %s, epc 0x" TARGET_FMT_lx,
+                riscv_excp_names[log_cause], env->pc);
+        }
+    }
+
+    target_ulong fixed_cause = 0;
+    if (cs->exception_index & (RISCV_EXCP_INT_FLAG)) {
+        /* hacky for now. the MSB (bit 63) indicates interrupt but cs->exception
+           index is only 32 bits wide */
+        fixed_cause = cs->exception_index & RISCV_EXCP_INT_MASK;
+        fixed_cause |= ((target_ulong)1) << (TARGET_LONG_BITS - 1);
+    } else {
+        /* fixup User ECALL -> correct priv ECALL */
+        if (cs->exception_index == RISCV_EXCP_U_ECALL) {
+            switch (env->priv) {
+            case PRV_U:
+                fixed_cause = RISCV_EXCP_U_ECALL;
+                break;
+            case PRV_S:
+                fixed_cause = RISCV_EXCP_S_ECALL;
+                break;
+            case PRV_H:
+                fixed_cause = RISCV_EXCP_H_ECALL;
+                break;
+            case PRV_M:
+                fixed_cause = RISCV_EXCP_M_ECALL;
+                break;
+            }
+        } else {
+            fixed_cause = cs->exception_index;
+        }
+    }
+
+    target_ulong backup_epc = env->pc;
+
+    target_ulong bit = fixed_cause;
+    target_ulong deleg = env->medeleg;
+
+    int hasbadaddr =
+        (fixed_cause == RISCV_EXCP_INST_ADDR_MIS) ||
+        (fixed_cause == RISCV_EXCP_INST_ACCESS_FAULT) ||
+        (fixed_cause == RISCV_EXCP_LOAD_ADDR_MIS) ||
+        (fixed_cause == RISCV_EXCP_STORE_AMO_ADDR_MIS) ||
+        (fixed_cause == RISCV_EXCP_LOAD_ACCESS_FAULT) ||
+        (fixed_cause == RISCV_EXCP_STORE_AMO_ACCESS_FAULT) ||
+        (fixed_cause == RISCV_EXCP_INST_PAGE_FAULT) ||
+        (fixed_cause == RISCV_EXCP_LOAD_PAGE_FAULT) ||
+        (fixed_cause == RISCV_EXCP_STORE_PAGE_FAULT);
+
+    if (bit & ((target_ulong)1 << (TARGET_LONG_BITS - 1))) {
+        deleg = env->mideleg;
+        bit &= ~((target_ulong)1 << (TARGET_LONG_BITS - 1));
+    }
+
+    if (env->priv <= PRV_S && bit < 64 && ((deleg >> bit) & 1)) {
+        /* handle the trap in S-mode */
+        /* No need to check STVEC for misaligned - lower 2 bits cannot be set */
+        env->pc = env->stvec;
+        env->scause = fixed_cause;
+        env->sepc = backup_epc;
+
+        if (hasbadaddr) {
+            if (RISCV_DEBUG_INTERRUPT) {
+                qemu_log_mask(LOG_TRACE, "core " TARGET_FMT_ld
+                    ": badaddr 0x" TARGET_FMT_lx, env->mhartid, env->badaddr);
+            }
+            env->sbadaddr = env->badaddr;
+        }
+
+        target_ulong s = env->mstatus;
+        s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
+            get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv));
+        s = set_field(s, MSTATUS_SPP, env->priv);
+        s = set_field(s, MSTATUS_SIE, 0);
+        csr_write_helper(env, s, CSR_MSTATUS);
+        riscv_set_mode(env, PRV_S);
+    } else {
+        /* No need to check MTVEC for misaligned - lower 2 bits cannot be set */
+        env->pc = env->mtvec;
+        env->mepc = backup_epc;
+        env->mcause = fixed_cause;
+
+        if (hasbadaddr) {
+            if (RISCV_DEBUG_INTERRUPT) {
+                qemu_log_mask(LOG_TRACE, "core " TARGET_FMT_ld
+                    ": badaddr 0x" TARGET_FMT_lx, env->mhartid, env->badaddr);
+            }
+            env->mbadaddr = env->badaddr;
+        }
+
+        target_ulong s = env->mstatus;
+        s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
+            get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv));
+        s = set_field(s, MSTATUS_MPP, env->priv);
+        s = set_field(s, MSTATUS_MIE, 0);
+        csr_write_helper(env, s, CSR_MSTATUS);
+        riscv_set_mode(env, PRV_M);
+    }
+    /* TODO yield load reservation  */
+#endif
+    cs->exception_index = EXCP_NONE; /* mark handled to qemu */
+}
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
new file mode 100644
index 0000000000..debb22a480
--- /dev/null
+++ b/target/riscv/helper.h
@@ -0,0 +1,78 @@
+/* Exceptions */
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
+
+/* Floating Point - rounding mode */
+DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32)
+
+/* Floating Point - fused */
+DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+
+/* Floating Point - Single Precision */
+DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_3(fle_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(flt_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(feq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_2(fcvt_w_s, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_wu_s, TCG_CALL_NO_RWG, tl, env, i64)
+#if defined(TARGET_RISCV64)
+DEF_HELPER_FLAGS_2(fcvt_l_s, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_lu_s, TCG_CALL_NO_RWG, tl, env, i64)
+#endif
+DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl)
+#if defined(TARGET_RISCV64)
+DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl)
+#endif
+DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64)
+
+/* Floating Point - Double Precision */
+DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(fcvt_s_d, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_d_s, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_3(fle_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(flt_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(feq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_2(fcvt_w_d, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_wu_d, TCG_CALL_NO_RWG, tl, env, i64)
+#if defined(TARGET_RISCV64)
+DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, tl, env, i64)
+#endif
+DEF_HELPER_FLAGS_2(fcvt_d_w, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_d_wu, TCG_CALL_NO_RWG, i64, env, tl)
+#if defined(TARGET_RISCV64)
+DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl)
+#endif
+DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64)
+
+/* Special functions */
+DEF_HELPER_3(csrrw, tl, env, tl, tl)
+DEF_HELPER_4(csrrs, tl, env, tl, tl, tl)
+DEF_HELPER_4(csrrc, tl, env, tl, tl, tl)
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_2(sret, tl, env, tl)
+DEF_HELPER_2(mret, tl, env, tl)
+DEF_HELPER_1(wfi, void, env)
+DEF_HELPER_1(tlb_flush, void, env)
+#endif
diff --git a/target/riscv/instmap.h b/target/riscv/instmap.h
new file mode 100644
index 0000000000..58baa1ba1f
--- /dev/null
+++ b/target/riscv/instmap.h
@@ -0,0 +1,364 @@
+/*
+ * RISC-V emulation for qemu: Instruction decode helpers
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define MASK_OP_MAJOR(op)  (op & 0x7F)
+enum {
+    /* rv32i, rv64i, rv32m */
+    OPC_RISC_LUI    = (0x37),
+    OPC_RISC_AUIPC  = (0x17),
+    OPC_RISC_JAL    = (0x6F),
+    OPC_RISC_JALR   = (0x67),
+    OPC_RISC_BRANCH = (0x63),
+    OPC_RISC_LOAD   = (0x03),
+    OPC_RISC_STORE  = (0x23),
+    OPC_RISC_ARITH_IMM  = (0x13),
+    OPC_RISC_ARITH      = (0x33),
+    OPC_RISC_FENCE      = (0x0F),
+    OPC_RISC_SYSTEM     = (0x73),
+
+    /* rv64i, rv64m */
+    OPC_RISC_ARITH_IMM_W = (0x1B),
+    OPC_RISC_ARITH_W = (0x3B),
+
+    /* rv32a, rv64a */
+    OPC_RISC_ATOMIC = (0x2F),
+
+    /* floating point */
+    OPC_RISC_FP_LOAD = (0x7),
+    OPC_RISC_FP_STORE = (0x27),
+
+    OPC_RISC_FMADD = (0x43),
+    OPC_RISC_FMSUB = (0x47),
+    OPC_RISC_FNMSUB = (0x4B),
+    OPC_RISC_FNMADD = (0x4F),
+
+    OPC_RISC_FP_ARITH = (0x53),
+};
+
+#define MASK_OP_ARITH(op)   (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | \
+                            (0x7F << 25))))
+enum {
+    OPC_RISC_ADD   = OPC_RISC_ARITH | (0x0 << 12) | (0x00 << 25),
+    OPC_RISC_SUB   = OPC_RISC_ARITH | (0x0 << 12) | (0x20 << 25),
+    OPC_RISC_SLL   = OPC_RISC_ARITH | (0x1 << 12) | (0x00 << 25),
+    OPC_RISC_SLT   = OPC_RISC_ARITH | (0x2 << 12) | (0x00 << 25),
+    OPC_RISC_SLTU  = OPC_RISC_ARITH | (0x3 << 12) | (0x00 << 25),
+    OPC_RISC_XOR   = OPC_RISC_ARITH | (0x4 << 12) | (0x00 << 25),
+    OPC_RISC_SRL   = OPC_RISC_ARITH | (0x5 << 12) | (0x00 << 25),
+    OPC_RISC_SRA   = OPC_RISC_ARITH | (0x5 << 12) | (0x20 << 25),
+    OPC_RISC_OR    = OPC_RISC_ARITH | (0x6 << 12) | (0x00 << 25),
+    OPC_RISC_AND   = OPC_RISC_ARITH | (0x7 << 12) | (0x00 << 25),
+
+    /* RV64M */
+    OPC_RISC_MUL    = OPC_RISC_ARITH | (0x0 << 12) | (0x01 << 25),
+    OPC_RISC_MULH   = OPC_RISC_ARITH | (0x1 << 12) | (0x01 << 25),
+    OPC_RISC_MULHSU = OPC_RISC_ARITH | (0x2 << 12) | (0x01 << 25),
+    OPC_RISC_MULHU  = OPC_RISC_ARITH | (0x3 << 12) | (0x01 << 25),
+
+    OPC_RISC_DIV    = OPC_RISC_ARITH | (0x4 << 12) | (0x01 << 25),
+    OPC_RISC_DIVU   = OPC_RISC_ARITH | (0x5 << 12) | (0x01 << 25),
+    OPC_RISC_REM    = OPC_RISC_ARITH | (0x6 << 12) | (0x01 << 25),
+    OPC_RISC_REMU   = OPC_RISC_ARITH | (0x7 << 12) | (0x01 << 25),
+};
+
+
+#define MASK_OP_ARITH_IMM(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_ADDI   = OPC_RISC_ARITH_IMM | (0x0 << 12),
+    OPC_RISC_SLTI   = OPC_RISC_ARITH_IMM | (0x2 << 12),
+    OPC_RISC_SLTIU  = OPC_RISC_ARITH_IMM | (0x3 << 12),
+    OPC_RISC_XORI   = OPC_RISC_ARITH_IMM | (0x4 << 12),
+    OPC_RISC_ORI    = OPC_RISC_ARITH_IMM | (0x6 << 12),
+    OPC_RISC_ANDI   = OPC_RISC_ARITH_IMM | (0x7 << 12),
+    OPC_RISC_SLLI   = OPC_RISC_ARITH_IMM | (0x1 << 12), /* additional part of
+                                                           IMM */
+    OPC_RISC_SHIFT_RIGHT_I = OPC_RISC_ARITH_IMM | (0x5 << 12) /* SRAI, SRLI */
+};
+
+#define MASK_OP_BRANCH(op)     (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_BEQ  = OPC_RISC_BRANCH  | (0x0  << 12),
+    OPC_RISC_BNE  = OPC_RISC_BRANCH  | (0x1  << 12),
+    OPC_RISC_BLT  = OPC_RISC_BRANCH  | (0x4  << 12),
+    OPC_RISC_BGE  = OPC_RISC_BRANCH  | (0x5  << 12),
+    OPC_RISC_BLTU = OPC_RISC_BRANCH  | (0x6  << 12),
+    OPC_RISC_BGEU = OPC_RISC_BRANCH  | (0x7  << 12)
+};
+
+enum {
+    OPC_RISC_ADDIW   = OPC_RISC_ARITH_IMM_W | (0x0 << 12),
+    OPC_RISC_SLLIW   = OPC_RISC_ARITH_IMM_W | (0x1 << 12), /* additional part of
+                                                              IMM */
+    OPC_RISC_SHIFT_RIGHT_IW = OPC_RISC_ARITH_IMM_W | (0x5 << 12) /* SRAI, SRLI
+                                                                  */
+};
+
+enum {
+    OPC_RISC_ADDW   = OPC_RISC_ARITH_W | (0x0 << 12) | (0x00 << 25),
+    OPC_RISC_SUBW   = OPC_RISC_ARITH_W | (0x0 << 12) | (0x20 << 25),
+    OPC_RISC_SLLW   = OPC_RISC_ARITH_W | (0x1 << 12) | (0x00 << 25),
+    OPC_RISC_SRLW   = OPC_RISC_ARITH_W | (0x5 << 12) | (0x00 << 25),
+    OPC_RISC_SRAW   = OPC_RISC_ARITH_W | (0x5 << 12) | (0x20 << 25),
+
+    /* RV64M */
+    OPC_RISC_MULW   = OPC_RISC_ARITH_W | (0x0 << 12) | (0x01 << 25),
+    OPC_RISC_DIVW   = OPC_RISC_ARITH_W | (0x4 << 12) | (0x01 << 25),
+    OPC_RISC_DIVUW  = OPC_RISC_ARITH_W | (0x5 << 12) | (0x01 << 25),
+    OPC_RISC_REMW   = OPC_RISC_ARITH_W | (0x6 << 12) | (0x01 << 25),
+    OPC_RISC_REMUW  = OPC_RISC_ARITH_W | (0x7 << 12) | (0x01 << 25),
+};
+
+#define MASK_OP_LOAD(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_LB   = OPC_RISC_LOAD | (0x0 << 12),
+    OPC_RISC_LH   = OPC_RISC_LOAD | (0x1 << 12),
+    OPC_RISC_LW   = OPC_RISC_LOAD | (0x2 << 12),
+    OPC_RISC_LD   = OPC_RISC_LOAD | (0x3 << 12),
+    OPC_RISC_LBU  = OPC_RISC_LOAD | (0x4 << 12),
+    OPC_RISC_LHU  = OPC_RISC_LOAD | (0x5 << 12),
+    OPC_RISC_LWU  = OPC_RISC_LOAD | (0x6 << 12),
+};
+
+#define MASK_OP_STORE(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_SB   = OPC_RISC_STORE | (0x0 << 12),
+    OPC_RISC_SH   = OPC_RISC_STORE | (0x1 << 12),
+    OPC_RISC_SW   = OPC_RISC_STORE | (0x2 << 12),
+    OPC_RISC_SD   = OPC_RISC_STORE | (0x3 << 12),
+};
+
+#define MASK_OP_JALR(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+/* no enum since OPC_RISC_JALR is the actual value */
+
+#define MASK_OP_ATOMIC(op) \
+    (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | (0x7F << 25))))
+#define MASK_OP_ATOMIC_NO_AQ_RL_SZ(op) \
+    (MASK_OP_MAJOR(op) | (op & (0x1F << 27)))
+
+enum {
+    OPC_RISC_LR          = OPC_RISC_ATOMIC | (0x02 << 27),
+    OPC_RISC_SC          = OPC_RISC_ATOMIC | (0x03 << 27),
+    OPC_RISC_AMOSWAP     = OPC_RISC_ATOMIC | (0x01 << 27),
+    OPC_RISC_AMOADD      = OPC_RISC_ATOMIC | (0x00 << 27),
+    OPC_RISC_AMOXOR      = OPC_RISC_ATOMIC | (0x04 << 27),
+    OPC_RISC_AMOAND      = OPC_RISC_ATOMIC | (0x0C << 27),
+    OPC_RISC_AMOOR       = OPC_RISC_ATOMIC | (0x08 << 27),
+    OPC_RISC_AMOMIN      = OPC_RISC_ATOMIC | (0x10 << 27),
+    OPC_RISC_AMOMAX      = OPC_RISC_ATOMIC | (0x14 << 27),
+    OPC_RISC_AMOMINU     = OPC_RISC_ATOMIC | (0x18 << 27),
+    OPC_RISC_AMOMAXU     = OPC_RISC_ATOMIC | (0x1C << 27),
+};
+
+#define MASK_OP_SYSTEM(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_ECALL       = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_EBREAK      = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_ERET        = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_MRTS        = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_MRTH        = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_HRTS        = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_WFI         = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_SFENCEVM    = OPC_RISC_SYSTEM | (0x0 << 12),
+
+    OPC_RISC_CSRRW       = OPC_RISC_SYSTEM | (0x1 << 12),
+    OPC_RISC_CSRRS       = OPC_RISC_SYSTEM | (0x2 << 12),
+    OPC_RISC_CSRRC       = OPC_RISC_SYSTEM | (0x3 << 12),
+    OPC_RISC_CSRRWI      = OPC_RISC_SYSTEM | (0x5 << 12),
+    OPC_RISC_CSRRSI      = OPC_RISC_SYSTEM | (0x6 << 12),
+    OPC_RISC_CSRRCI      = OPC_RISC_SYSTEM | (0x7 << 12),
+};
+
+#define MASK_OP_FP_LOAD(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_FLW   = OPC_RISC_FP_LOAD | (0x2 << 12),
+    OPC_RISC_FLD   = OPC_RISC_FP_LOAD | (0x3 << 12),
+};
+
+#define MASK_OP_FP_STORE(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_FSW   = OPC_RISC_FP_STORE | (0x2 << 12),
+    OPC_RISC_FSD   = OPC_RISC_FP_STORE | (0x3 << 12),
+};
+
+#define MASK_OP_FP_FMADD(op)   (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+    OPC_RISC_FMADD_S = OPC_RISC_FMADD | (0x0 << 25),
+    OPC_RISC_FMADD_D = OPC_RISC_FMADD | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FMSUB(op)   (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+    OPC_RISC_FMSUB_S = OPC_RISC_FMSUB | (0x0 << 25),
+    OPC_RISC_FMSUB_D = OPC_RISC_FMSUB | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FNMADD(op)   (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+    OPC_RISC_FNMADD_S = OPC_RISC_FNMADD | (0x0 << 25),
+    OPC_RISC_FNMADD_D = OPC_RISC_FNMADD | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FNMSUB(op)   (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+    OPC_RISC_FNMSUB_S = OPC_RISC_FNMSUB | (0x0 << 25),
+    OPC_RISC_FNMSUB_D = OPC_RISC_FNMSUB | (0x1 << 25),
+};
+
+#define MASK_OP_FP_ARITH(op)   (MASK_OP_MAJOR(op) | (op & (0x7F << 25)))
+enum {
+    /* float */
+    OPC_RISC_FADD_S    = OPC_RISC_FP_ARITH | (0x0 << 25),
+    OPC_RISC_FSUB_S    = OPC_RISC_FP_ARITH | (0x4 << 25),
+    OPC_RISC_FMUL_S    = OPC_RISC_FP_ARITH | (0x8 << 25),
+    OPC_RISC_FDIV_S    = OPC_RISC_FP_ARITH | (0xC << 25),
+
+    OPC_RISC_FSGNJ_S   = OPC_RISC_FP_ARITH | (0x10 << 25),
+    OPC_RISC_FSGNJN_S  = OPC_RISC_FP_ARITH | (0x10 << 25),
+    OPC_RISC_FSGNJX_S  = OPC_RISC_FP_ARITH | (0x10 << 25),
+
+    OPC_RISC_FMIN_S    = OPC_RISC_FP_ARITH | (0x14 << 25),
+    OPC_RISC_FMAX_S    = OPC_RISC_FP_ARITH | (0x14 << 25),
+
+    OPC_RISC_FSQRT_S   = OPC_RISC_FP_ARITH | (0x2C << 25),
+
+    OPC_RISC_FEQ_S     = OPC_RISC_FP_ARITH | (0x50 << 25),
+    OPC_RISC_FLT_S     = OPC_RISC_FP_ARITH | (0x50 << 25),
+    OPC_RISC_FLE_S     = OPC_RISC_FP_ARITH | (0x50 << 25),
+
+    OPC_RISC_FCVT_W_S  = OPC_RISC_FP_ARITH | (0x60 << 25),
+    OPC_RISC_FCVT_WU_S = OPC_RISC_FP_ARITH | (0x60 << 25),
+    OPC_RISC_FCVT_L_S  = OPC_RISC_FP_ARITH | (0x60 << 25),
+    OPC_RISC_FCVT_LU_S = OPC_RISC_FP_ARITH | (0x60 << 25),
+
+    OPC_RISC_FCVT_S_W  = OPC_RISC_FP_ARITH | (0x68 << 25),
+    OPC_RISC_FCVT_S_WU = OPC_RISC_FP_ARITH | (0x68 << 25),
+    OPC_RISC_FCVT_S_L  = OPC_RISC_FP_ARITH | (0x68 << 25),
+    OPC_RISC_FCVT_S_LU = OPC_RISC_FP_ARITH | (0x68 << 25),
+
+    OPC_RISC_FMV_X_S   = OPC_RISC_FP_ARITH | (0x70 << 25),
+    OPC_RISC_FCLASS_S  = OPC_RISC_FP_ARITH | (0x70 << 25),
+
+    OPC_RISC_FMV_S_X   = OPC_RISC_FP_ARITH | (0x78 << 25),
+
+    /* double */
+    OPC_RISC_FADD_D    = OPC_RISC_FP_ARITH | (0x1 << 25),
+    OPC_RISC_FSUB_D    = OPC_RISC_FP_ARITH | (0x5 << 25),
+    OPC_RISC_FMUL_D    = OPC_RISC_FP_ARITH | (0x9 << 25),
+    OPC_RISC_FDIV_D    = OPC_RISC_FP_ARITH | (0xD << 25),
+
+    OPC_RISC_FSGNJ_D   = OPC_RISC_FP_ARITH | (0x11 << 25),
+    OPC_RISC_FSGNJN_D  = OPC_RISC_FP_ARITH | (0x11 << 25),
+    OPC_RISC_FSGNJX_D  = OPC_RISC_FP_ARITH | (0x11 << 25),
+
+    OPC_RISC_FMIN_D    = OPC_RISC_FP_ARITH | (0x15 << 25),
+    OPC_RISC_FMAX_D    = OPC_RISC_FP_ARITH | (0x15 << 25),
+
+    OPC_RISC_FCVT_S_D = OPC_RISC_FP_ARITH | (0x20 << 25),
+
+    OPC_RISC_FCVT_D_S = OPC_RISC_FP_ARITH | (0x21 << 25),
+
+    OPC_RISC_FSQRT_D   = OPC_RISC_FP_ARITH | (0x2D << 25),
+
+    OPC_RISC_FEQ_D     = OPC_RISC_FP_ARITH | (0x51 << 25),
+    OPC_RISC_FLT_D     = OPC_RISC_FP_ARITH | (0x51 << 25),
+    OPC_RISC_FLE_D     = OPC_RISC_FP_ARITH | (0x51 << 25),
+
+    OPC_RISC_FCVT_W_D  = OPC_RISC_FP_ARITH | (0x61 << 25),
+    OPC_RISC_FCVT_WU_D = OPC_RISC_FP_ARITH | (0x61 << 25),
+    OPC_RISC_FCVT_L_D  = OPC_RISC_FP_ARITH | (0x61 << 25),
+    OPC_RISC_FCVT_LU_D = OPC_RISC_FP_ARITH | (0x61 << 25),
+
+    OPC_RISC_FCVT_D_W  = OPC_RISC_FP_ARITH | (0x69 << 25),
+    OPC_RISC_FCVT_D_WU = OPC_RISC_FP_ARITH | (0x69 << 25),
+    OPC_RISC_FCVT_D_L  = OPC_RISC_FP_ARITH | (0x69 << 25),
+    OPC_RISC_FCVT_D_LU = OPC_RISC_FP_ARITH | (0x69 << 25),
+
+    OPC_RISC_FMV_X_D   = OPC_RISC_FP_ARITH | (0x71 << 25),
+    OPC_RISC_FCLASS_D  = OPC_RISC_FP_ARITH | (0x71 << 25),
+
+    OPC_RISC_FMV_D_X   = OPC_RISC_FP_ARITH | (0x79 << 25),
+};
+
+#define GET_B_IMM(inst) ((extract32(inst, 8, 4) << 1) \
+                         | (extract32(inst, 25, 6) << 5) \
+                         | (extract32(inst, 7, 1) << 11) \
+                         | (sextract64(inst, 31, 1) << 12))
+
+#define GET_STORE_IMM(inst) ((extract32(inst, 7, 5)) \
+                             | (sextract64(inst, 25, 7) << 5))
+
+#define GET_JAL_IMM(inst) ((extract32(inst, 21, 10) << 1) \
+                           | (extract32(inst, 20, 1) << 11) \
+                           | (extract32(inst, 12, 8) << 12) \
+                           | (sextract64(inst, 31, 1) << 20))
+
+#define GET_RM(inst)   extract32(inst, 12, 3)
+#define GET_RS3(inst)  extract32(inst, 27, 5)
+#define GET_RS1(inst)  extract32(inst, 15, 5)
+#define GET_RS2(inst)  extract32(inst, 20, 5)
+#define GET_RD(inst)   extract32(inst, 7, 5)
+#define GET_IMM(inst)  sextract64(inst, 20, 12)
+
+/* RVC decoding macros */
+#define GET_C_IMM(inst)             (extract32(inst, 2, 5) \
+                                    | (sextract64(inst, 12, 1) << 5))
+#define GET_C_ZIMM(inst)            (extract32(inst, 2, 5) \
+                                    | (extract32(inst, 12, 1) << 5))
+#define GET_C_ADDI4SPN_IMM(inst)    ((extract32(inst, 6, 1) << 2) \
+                                    | (extract32(inst, 5, 1) << 3) \
+                                    | (extract32(inst, 11, 2) << 4) \
+                                    | (extract32(inst, 7, 4) << 6))
+#define GET_C_ADDI16SP_IMM(inst)    ((extract32(inst, 6, 1) << 4) \
+                                    | (extract32(inst, 2, 1) << 5) \
+                                    | (extract32(inst, 5, 1) << 6) \
+                                    | (extract32(inst, 3, 2) << 7) \
+                                    | (sextract64(inst, 12, 1) << 9))
+#define GET_C_LWSP_IMM(inst)        ((extract32(inst, 4, 3) << 2) \
+                                    | (extract32(inst, 12, 1) << 5) \
+                                    | (extract32(inst, 2, 2) << 6))
+#define GET_C_LDSP_IMM(inst)        ((extract32(inst, 5, 2) << 3) \
+                                    | (extract32(inst, 12, 1) << 5) \
+                                    | (extract32(inst, 2, 3) << 6))
+#define GET_C_SWSP_IMM(inst)        ((extract32(inst, 9, 4) << 2) \
+                                    | (extract32(inst, 7, 2) << 6))
+#define GET_C_SDSP_IMM(inst)        ((extract32(inst, 10, 3) << 3) \
+                                    | (extract32(inst, 7, 3) << 6))
+#define GET_C_LW_IMM(inst)          ((extract32(inst, 6, 1) << 2) \
+                                    | (extract32(inst, 10, 3) << 3) \
+                                    | (extract32(inst, 5, 1) << 6))
+#define GET_C_LD_IMM(inst)          ((extract32(inst, 10, 3) << 3) \
+                                    | (extract32(inst, 5, 2) << 6))
+#define GET_C_J_IMM(inst)           ((extract32(inst, 3, 3) << 1) \
+                                    | (extract32(inst, 11, 1) << 4) \
+                                    | (extract32(inst, 2, 1) << 5) \
+                                    | (extract32(inst, 7, 1) << 6) \
+                                    | (extract32(inst, 6, 1) << 7) \
+                                    | (extract32(inst, 9, 2) << 8) \
+                                    | (extract32(inst, 8, 1) << 10) \
+                                    | (sextract64(inst, 12, 1) << 11))
+#define GET_C_B_IMM(inst)           ((extract32(inst, 3, 2) << 1) \
+                                    | (extract32(inst, 10, 2) << 3) \
+                                    | (extract32(inst, 2, 1) << 5) \
+                                    | (extract32(inst, 5, 2) << 6) \
+                                    | (sextract64(inst, 12, 1) << 8))
+#define GET_C_SIMM3(inst)           extract32(inst, 10, 3)
+#define GET_C_RD(inst)              GET_RD(inst)
+#define GET_C_RS1(inst)             GET_RD(inst)
+#define GET_C_RS2(inst)             extract32(inst, 2, 5)
+#define GET_C_RS1S(inst)            (8 + extract32(inst, 7, 3))
+#define GET_C_RS2S(inst)            (8 + extract32(inst, 2, 3))
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
new file mode 100644
index 0000000000..e34715df4e
--- /dev/null
+++ b/target/riscv/op_helper.c
@@ -0,0 +1,669 @@
+/*
+ * RISC-V Emulation Helpers for QEMU.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "qemu/main-loop.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#ifndef CONFIG_USER_ONLY
+
+#if defined(TARGET_RISCV32)
+static const char valid_vm_1_09[16] = {
+    [VM_1_09_MBARE] = 1,
+    [VM_1_09_SV32] = 1,
+};
+static const char valid_vm_1_10[16] = {
+    [VM_1_10_MBARE] = 1,
+    [VM_1_10_SV32] = 1
+};
+#elif defined(TARGET_RISCV64)
+static const char valid_vm_1_09[16] = {
+    [VM_1_09_MBARE] = 1,
+    [VM_1_09_SV39] = 1,
+    [VM_1_09_SV48] = 1,
+};
+static const char valid_vm_1_10[16] = {
+    [VM_1_10_MBARE] = 1,
+    [VM_1_10_SV39] = 1,
+    [VM_1_10_SV48] = 1,
+    [VM_1_10_SV57] = 1
+};
+#endif
+
+static int validate_vm(CPURISCVState *env, target_ulong vm)
+{
+    return (env->priv_ver >= PRIV_VERSION_1_10_0) ?
+        valid_vm_1_10[vm & 0xf] : valid_vm_1_09[vm & 0xf];
+}
+
+#endif
+
+/* Exceptions processing helpers */
+void QEMU_NORETURN do_raise_exception_err(CPURISCVState *env,
+                                          uint32_t exception, uintptr_t pc)
+{
+    CPUState *cs = CPU(riscv_env_get_cpu(env));
+    qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception);
+    cs->exception_index = exception;
+    cpu_loop_exit_restore(cs, pc);
+}
+
+void helper_raise_exception(CPURISCVState *env, uint32_t exception)
+{
+    do_raise_exception_err(env, exception, 0);
+}
+
+static void validate_mstatus_fs(CPURISCVState *env, uintptr_t ra)
+{
+#ifndef CONFIG_USER_ONLY
+    if (!(env->mstatus & MSTATUS_FS)) {
+        do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra);
+    }
+#endif
+}
+
+/*
+ * Handle writes to CSRs and any resulting special behavior
+ *
+ * Adapted from Spike's processor_t::set_csr
+ */
+void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
+        target_ulong csrno)
+{
+#ifndef CONFIG_USER_ONLY
+    uint64_t delegable_ints = MIP_SSIP | MIP_STIP | MIP_SEIP | (1 << IRQ_X_COP);
+    uint64_t all_ints = delegable_ints | MIP_MSIP | MIP_MTIP;
+#endif
+
+    switch (csrno) {
+    case CSR_FFLAGS:
+        validate_mstatus_fs(env, GETPC());
+        cpu_riscv_set_fflags(env, val_to_write & (FSR_AEXC >> FSR_AEXC_SHIFT));
+        break;
+    case CSR_FRM:
+        validate_mstatus_fs(env, GETPC());
+        env->frm = val_to_write & (FSR_RD >> FSR_RD_SHIFT);
+        break;
+    case CSR_FCSR:
+        validate_mstatus_fs(env, GETPC());
+        env->frm = (val_to_write & FSR_RD) >> FSR_RD_SHIFT;
+        cpu_riscv_set_fflags(env, (val_to_write & FSR_AEXC) >> FSR_AEXC_SHIFT);
+        break;
+#ifndef CONFIG_USER_ONLY
+    case CSR_MSTATUS: {
+        target_ulong mstatus = env->mstatus;
+        target_ulong mask = 0;
+        target_ulong mpp = get_field(val_to_write, MSTATUS_MPP);
+
+        /* flush tlb on mstatus fields that affect VM */
+        if (env->priv_ver <= PRIV_VERSION_1_09_1) {
+            if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP |
+                    MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_VM)) {
+                helper_tlb_flush(env);
+            }
+            mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
+                MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
+                MSTATUS_MPP | MSTATUS_MXR |
+                (validate_vm(env, get_field(val_to_write, MSTATUS_VM)) ?
+                    MSTATUS_VM : 0);
+        }
+        if (env->priv_ver >= PRIV_VERSION_1_10_0) {
+            if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP |
+                    MSTATUS_MPRV | MSTATUS_SUM)) {
+                helper_tlb_flush(env);
+            }
+            mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
+                MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
+                MSTATUS_MPP | MSTATUS_MXR;
+        }
+
+        /* silenty discard mstatus.mpp writes for unsupported modes */
+        if (mpp == PRV_H ||
+            (!riscv_has_ext(env, RVS) && mpp == PRV_S) ||
+            (!riscv_has_ext(env, RVU) && mpp == PRV_U)) {
+            mask &= ~MSTATUS_MPP;
+        }
+
+        mstatus = (mstatus & ~mask) | (val_to_write & mask);
+        int dirty = (mstatus & MSTATUS_FS) == MSTATUS_FS;
+        dirty |= (mstatus & MSTATUS_XS) == MSTATUS_XS;
+        mstatus = set_field(mstatus, MSTATUS_SD, dirty);
+        env->mstatus = mstatus;
+        break;
+    }
+    case CSR_MIP: {
+        /*
+         * Since the writeable bits in MIP are not set asynchrously by the
+         * CLINT, no additional locking is needed for read-modifiy-write
+         * CSR operations
+         */
+        qemu_mutex_lock_iothread();
+        RISCVCPU *cpu = riscv_env_get_cpu(env);
+        riscv_set_local_interrupt(cpu, MIP_SSIP,
+                                  (val_to_write & MIP_SSIP) != 0);
+        riscv_set_local_interrupt(cpu, MIP_STIP,
+                                  (val_to_write & MIP_STIP) != 0);
+        /*
+         * csrs, csrc on mip.SEIP is not decomposable into separate read and
+         * write steps, so a different implementation is needed
+         */
+        qemu_mutex_unlock_iothread();
+        break;
+    }
+    case CSR_MIE: {
+        env->mie = (env->mie & ~all_ints) |
+            (val_to_write & all_ints);
+        break;
+    }
+    case CSR_MIDELEG:
+        env->mideleg = (env->mideleg & ~delegable_ints)
+                                | (val_to_write & delegable_ints);
+        break;
+    case CSR_MEDELEG: {
+        target_ulong mask = 0;
+        mask |= 1ULL << (RISCV_EXCP_INST_ADDR_MIS);
+        mask |= 1ULL << (RISCV_EXCP_INST_ACCESS_FAULT);
+        mask |= 1ULL << (RISCV_EXCP_ILLEGAL_INST);
+        mask |= 1ULL << (RISCV_EXCP_BREAKPOINT);
+        mask |= 1ULL << (RISCV_EXCP_LOAD_ADDR_MIS);
+        mask |= 1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT);
+        mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS);
+        mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
+        mask |= 1ULL << (RISCV_EXCP_U_ECALL);
+        mask |= 1ULL << (RISCV_EXCP_S_ECALL);
+        mask |= 1ULL << (RISCV_EXCP_H_ECALL);
+        mask |= 1ULL << (RISCV_EXCP_M_ECALL);
+        mask |= 1ULL << (RISCV_EXCP_INST_PAGE_FAULT);
+        mask |= 1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT);
+        mask |= 1ULL << (RISCV_EXCP_STORE_PAGE_FAULT);
+        env->medeleg = (env->medeleg & ~mask)
+                                | (val_to_write & mask);
+        break;
+    }
+    case CSR_MINSTRET:
+        qemu_log_mask(LOG_UNIMP, "CSR_MINSTRET: write not implemented");
+        goto do_illegal;
+    case CSR_MCYCLE:
+        qemu_log_mask(LOG_UNIMP, "CSR_MCYCLE: write not implemented");
+        goto do_illegal;
+    case CSR_MINSTRETH:
+        qemu_log_mask(LOG_UNIMP, "CSR_MINSTRETH: write not implemented");
+        goto do_illegal;
+    case CSR_MCYCLEH:
+        qemu_log_mask(LOG_UNIMP, "CSR_MCYCLEH: write not implemented");
+        goto do_illegal;
+    case CSR_MUCOUNTEREN:
+        env->mucounteren = val_to_write;
+        break;
+    case CSR_MSCOUNTEREN:
+        env->mscounteren = val_to_write;
+        break;
+    case CSR_SSTATUS: {
+        target_ulong ms = env->mstatus;
+        target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE
+            | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS
+            | SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD;
+        ms = (ms & ~mask) | (val_to_write & mask);
+        csr_write_helper(env, ms, CSR_MSTATUS);
+        break;
+    }
+    case CSR_SIP: {
+        qemu_mutex_lock_iothread();
+        target_ulong next_mip = (env->mip & ~env->mideleg)
+                                | (val_to_write & env->mideleg);
+        qemu_mutex_unlock_iothread();
+        csr_write_helper(env, next_mip, CSR_MIP);
+        break;
+    }
+    case CSR_SIE: {
+        target_ulong next_mie = (env->mie & ~env->mideleg)
+                                | (val_to_write & env->mideleg);
+        csr_write_helper(env, next_mie, CSR_MIE);
+        break;
+    }
+    case CSR_SATP: /* CSR_SPTBR */ {
+        if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
+            goto do_illegal;
+        }
+        if (env->priv_ver <= PRIV_VERSION_1_09_1 && (val_to_write ^ env->sptbr))
+        {
+            helper_tlb_flush(env);
+            env->sptbr = val_to_write & (((target_ulong)
+                1 << (TARGET_PHYS_ADDR_SPACE_BITS - PGSHIFT)) - 1);
+        }
+        if (env->priv_ver >= PRIV_VERSION_1_10_0 &&
+            validate_vm(env, get_field(val_to_write, SATP_MODE)) &&
+            ((val_to_write ^ env->satp) & (SATP_MODE | SATP_ASID | SATP_PPN)))
+        {
+            helper_tlb_flush(env);
+            env->satp = val_to_write;
+        }
+        break;
+    }
+    case CSR_SEPC:
+        env->sepc = val_to_write;
+        break;
+    case CSR_STVEC:
+        if (val_to_write & 1) {
+            qemu_log_mask(LOG_UNIMP, "CSR_STVEC: vectored traps not supported");
+            goto do_illegal;
+        }
+        env->stvec = val_to_write >> 2 << 2;
+        break;
+    case CSR_SCOUNTEREN:
+        env->scounteren = val_to_write;
+        break;
+    case CSR_SSCRATCH:
+        env->sscratch = val_to_write;
+        break;
+    case CSR_SCAUSE:
+        env->scause = val_to_write;
+        break;
+    case CSR_SBADADDR:
+        env->sbadaddr = val_to_write;
+        break;
+    case CSR_MEPC:
+        env->mepc = val_to_write;
+        break;
+    case CSR_MTVEC:
+        if (val_to_write & 1) {
+            qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: vectored traps not supported");
+            goto do_illegal;
+        }
+        env->mtvec = val_to_write >> 2 << 2;
+        break;
+    case CSR_MCOUNTEREN:
+        env->mcounteren = val_to_write;
+        break;
+    case CSR_MSCRATCH:
+        env->mscratch = val_to_write;
+        break;
+    case CSR_MCAUSE:
+        env->mcause = val_to_write;
+        break;
+    case CSR_MBADADDR:
+        env->mbadaddr = val_to_write;
+        break;
+    case CSR_MISA: {
+        qemu_log_mask(LOG_UNIMP, "CSR_MISA: misa writes not supported");
+        goto do_illegal;
+    }
+    case CSR_PMPCFG0:
+    case CSR_PMPCFG1:
+    case CSR_PMPCFG2:
+    case CSR_PMPCFG3:
+       pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val_to_write);
+       break;
+    case CSR_PMPADDR0:
+    case CSR_PMPADDR1:
+    case CSR_PMPADDR2:
+    case CSR_PMPADDR3:
+    case CSR_PMPADDR4:
+    case CSR_PMPADDR5:
+    case CSR_PMPADDR6:
+    case CSR_PMPADDR7:
+    case CSR_PMPADDR8:
+    case CSR_PMPADDR9:
+    case CSR_PMPADDR10:
+    case CSR_PMPADDR11:
+    case CSR_PMPADDR12:
+    case CSR_PMPADDR13:
+    case CSR_PMPADDR14:
+    case CSR_PMPADDR15:
+       pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val_to_write);
+       break;
+    do_illegal:
+#endif
+    default:
+        do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+    }
+}
+
+/*
+ * Handle reads to CSRs and any resulting special behavior
+ *
+ * Adapted from Spike's processor_t::get_csr
+ */
+target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno)
+{
+#ifndef CONFIG_USER_ONLY
+    target_ulong ctr_en = env->priv == PRV_U ? env->mucounteren :
+                   env->priv == PRV_S ? env->mscounteren : -1U;
+#else
+    target_ulong ctr_en = -1;
+#endif
+    target_ulong ctr_ok = (ctr_en >> (csrno & 31)) & 1;
+
+    if (csrno >= CSR_HPMCOUNTER3 && csrno <= CSR_HPMCOUNTER31) {
+        if (ctr_ok) {
+            return 0;
+        }
+    }
+#if defined(TARGET_RISCV32)
+    if (csrno >= CSR_HPMCOUNTER3H && csrno <= CSR_HPMCOUNTER31H) {
+        if (ctr_ok) {
+            return 0;
+        }
+    }
+#endif
+    if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) {
+        return 0;
+    }
+#if defined(TARGET_RISCV32)
+    if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) {
+        return 0;
+    }
+#endif
+    if (csrno >= CSR_MHPMEVENT3 && csrno <= CSR_MHPMEVENT31) {
+        return 0;
+    }
+
+    switch (csrno) {
+    case CSR_FFLAGS:
+        validate_mstatus_fs(env, GETPC());
+        return cpu_riscv_get_fflags(env);
+    case CSR_FRM:
+        validate_mstatus_fs(env, GETPC());
+        return env->frm;
+    case CSR_FCSR:
+        validate_mstatus_fs(env, GETPC());
+        return (cpu_riscv_get_fflags(env) << FSR_AEXC_SHIFT)
+                | (env->frm << FSR_RD_SHIFT);
+    /* rdtime/rdtimeh is trapped and emulated by bbl in system mode */
+#ifdef CONFIG_USER_ONLY
+    case CSR_TIME:
+        return cpu_get_host_ticks();
+#if defined(TARGET_RISCV32)
+    case CSR_TIMEH:
+        return cpu_get_host_ticks() >> 32;
+#endif
+#endif
+    case CSR_INSTRET:
+    case CSR_CYCLE:
+        if (ctr_ok) {
+            return cpu_get_host_ticks();
+        }
+        break;
+#if defined(TARGET_RISCV32)
+    case CSR_INSTRETH:
+    case CSR_CYCLEH:
+        if (ctr_ok) {
+            return cpu_get_host_ticks() >> 32;
+        }
+        break;
+#endif
+#ifndef CONFIG_USER_ONLY
+    case CSR_MINSTRET:
+    case CSR_MCYCLE:
+        return cpu_get_host_ticks();
+    case CSR_MINSTRETH:
+    case CSR_MCYCLEH:
+#if defined(TARGET_RISCV32)
+        return cpu_get_host_ticks() >> 32;
+#endif
+        break;
+    case CSR_MUCOUNTEREN:
+        return env->mucounteren;
+    case CSR_MSCOUNTEREN:
+        return env->mscounteren;
+    case CSR_SSTATUS: {
+        target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE
+            | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS
+            | SSTATUS_SUM |  SSTATUS_SD;
+        if (env->priv_ver >= PRIV_VERSION_1_10_0) {
+            mask |= SSTATUS_MXR;
+        }
+        return env->mstatus & mask;
+    }
+    case CSR_SIP: {
+        qemu_mutex_lock_iothread();
+        target_ulong tmp = env->mip & env->mideleg;
+        qemu_mutex_unlock_iothread();
+        return tmp;
+    }
+    case CSR_SIE:
+        return env->mie & env->mideleg;
+    case CSR_SEPC:
+        return env->sepc;
+    case CSR_SBADADDR:
+        return env->sbadaddr;
+    case CSR_STVEC:
+        return env->stvec;
+    case CSR_SCOUNTEREN:
+        return env->scounteren;
+    case CSR_SCAUSE:
+        return env->scause;
+    case CSR_SPTBR:
+        if (env->priv_ver >= PRIV_VERSION_1_10_0) {
+            return env->satp;
+        } else {
+            return env->sptbr;
+        }
+    case CSR_SSCRATCH:
+        return env->sscratch;
+    case CSR_MSTATUS:
+        return env->mstatus;
+    case CSR_MIP: {
+        qemu_mutex_lock_iothread();
+        target_ulong tmp = env->mip;
+        qemu_mutex_unlock_iothread();
+        return tmp;
+    }
+    case CSR_MIE:
+        return env->mie;
+    case CSR_MEPC:
+        return env->mepc;
+    case CSR_MSCRATCH:
+        return env->mscratch;
+    case CSR_MCAUSE:
+        return env->mcause;
+    case CSR_MBADADDR:
+        return env->mbadaddr;
+    case CSR_MISA:
+        return env->misa;
+    case CSR_MARCHID:
+        return 0; /* as spike does */
+    case CSR_MIMPID:
+        return 0; /* as spike does */
+    case CSR_MVENDORID:
+        return 0; /* as spike does */
+    case CSR_MHARTID:
+        return env->mhartid;
+    case CSR_MTVEC:
+        return env->mtvec;
+    case CSR_MCOUNTEREN:
+        return env->mcounteren;
+    case CSR_MEDELEG:
+        return env->medeleg;
+    case CSR_MIDELEG:
+        return env->mideleg;
+    case CSR_PMPCFG0:
+    case CSR_PMPCFG1:
+    case CSR_PMPCFG2:
+    case CSR_PMPCFG3:
+       return pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
+    case CSR_PMPADDR0:
+    case CSR_PMPADDR1:
+    case CSR_PMPADDR2:
+    case CSR_PMPADDR3:
+    case CSR_PMPADDR4:
+    case CSR_PMPADDR5:
+    case CSR_PMPADDR6:
+    case CSR_PMPADDR7:
+    case CSR_PMPADDR8:
+    case CSR_PMPADDR9:
+    case CSR_PMPADDR10:
+    case CSR_PMPADDR11:
+    case CSR_PMPADDR12:
+    case CSR_PMPADDR13:
+    case CSR_PMPADDR14:
+    case CSR_PMPADDR15:
+       return pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
+#endif
+    }
+    /* used by e.g. MTIME read */
+    do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+}
+
+/*
+ * Check that CSR access is allowed.
+ *
+ * Adapted from Spike's decode.h:validate_csr
+ */
+static void validate_csr(CPURISCVState *env, uint64_t which,
+                         uint64_t write, uintptr_t ra)
+{
+#ifndef CONFIG_USER_ONLY
+    unsigned csr_priv = get_field((which), 0x300);
+    unsigned csr_read_only = get_field((which), 0xC00) == 3;
+    if (((write) && csr_read_only) || (env->priv < csr_priv)) {
+        do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra);
+    }
+#endif
+}
+
+target_ulong helper_csrrw(CPURISCVState *env, target_ulong src,
+        target_ulong csr)
+{
+    validate_csr(env, csr, 1, GETPC());
+    uint64_t csr_backup = csr_read_helper(env, csr);
+    csr_write_helper(env, src, csr);
+    return csr_backup;
+}
+
+target_ulong helper_csrrs(CPURISCVState *env, target_ulong src,
+        target_ulong csr, target_ulong rs1_pass)
+{
+    validate_csr(env, csr, rs1_pass != 0, GETPC());
+    uint64_t csr_backup = csr_read_helper(env, csr);
+    if (rs1_pass != 0) {
+        csr_write_helper(env, src | csr_backup, csr);
+    }
+    return csr_backup;
+}
+
+target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
+        target_ulong csr, target_ulong rs1_pass)
+{
+    validate_csr(env, csr, rs1_pass != 0, GETPC());
+    uint64_t csr_backup = csr_read_helper(env, csr);
+    if (rs1_pass != 0) {
+        csr_write_helper(env, (~src) & csr_backup, csr);
+    }
+    return csr_backup;
+}
+
+#ifndef CONFIG_USER_ONLY
+
+/* iothread_mutex must be held */
+void riscv_set_local_interrupt(RISCVCPU *cpu, target_ulong mask, int value)
+{
+    target_ulong old_mip = cpu->env.mip;
+    cpu->env.mip = (old_mip & ~mask) | (value ? mask : 0);
+
+    if (cpu->env.mip && !old_mip) {
+        cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+    } else if (!cpu->env.mip && old_mip) {
+        cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+    }
+}
+
+void riscv_set_mode(CPURISCVState *env, target_ulong newpriv)
+{
+    if (newpriv > PRV_M) {
+        g_assert_not_reached();
+    }
+    if (newpriv == PRV_H) {
+        newpriv = PRV_U;
+    }
+    /* tlb_flush is unnecessary as mode is contained in mmu_idx */
+    env->priv = newpriv;
+}
+
+target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
+{
+    if (!(env->priv >= PRV_S)) {
+        do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+    }
+
+    target_ulong retpc = env->sepc;
+    if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+        do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
+    }
+
+    target_ulong mstatus = env->mstatus;
+    target_ulong prev_priv = get_field(mstatus, MSTATUS_SPP);
+    mstatus = set_field(mstatus,
+        env->priv_ver >= PRIV_VERSION_1_10_0 ?
+        MSTATUS_SIE : MSTATUS_UIE << prev_priv,
+        get_field(mstatus, MSTATUS_SPIE));
+    mstatus = set_field(mstatus, MSTATUS_SPIE, 0);
+    mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
+    riscv_set_mode(env, prev_priv);
+    csr_write_helper(env, mstatus, CSR_MSTATUS);
+
+    return retpc;
+}
+
+target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
+{
+    if (!(env->priv >= PRV_M)) {
+        do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+    }
+
+    target_ulong retpc = env->mepc;
+    if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+        do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
+    }
+
+    target_ulong mstatus = env->mstatus;
+    target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
+    mstatus = set_field(mstatus,
+        env->priv_ver >= PRIV_VERSION_1_10_0 ?
+        MSTATUS_MIE : MSTATUS_UIE << prev_priv,
+        get_field(mstatus, MSTATUS_MPIE));
+    mstatus = set_field(mstatus, MSTATUS_MPIE, 0);
+    mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U);
+    riscv_set_mode(env, prev_priv);
+    csr_write_helper(env, mstatus, CSR_MSTATUS);
+
+    return retpc;
+}
+
+
+void helper_wfi(CPURISCVState *env)
+{
+    CPUState *cs = CPU(riscv_env_get_cpu(env));
+
+    cs->halted = 1;
+    cs->exception_index = EXCP_HLT;
+    cpu_loop_exit(cs);
+}
+
+void helper_tlb_flush(CPURISCVState *env)
+{
+    RISCVCPU *cpu = riscv_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+    tlb_flush(cs);
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
new file mode 100644
index 0000000000..f432f3b759
--- /dev/null
+++ b/target/riscv/pmp.c
@@ -0,0 +1,380 @@
+/*
+ * QEMU RISC-V PMP (Physical Memory Protection)
+ *
+ * Author: Daire McNamara, daire.mcnamara@emdalo.com
+ *         Ivan Griffin, ivan.griffin@emdalo.com
+ *
+ * This provides a RISC-V Physical Memory Protection implementation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * PMP (Physical Memory Protection) is as-of-yet unused and needs testing.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+
+#ifndef CONFIG_USER_ONLY
+
+#define RISCV_DEBUG_PMP 0
+#define PMP_DEBUG(fmt, ...)                                                    \
+    do {                                                                       \
+        if (RISCV_DEBUG_PMP) {                                                 \
+            qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\
+        }                                                                      \
+    } while (0)
+
+static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
+    uint8_t val);
+static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
+static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
+
+/*
+ * Accessor method to extract address matching type 'a field' from cfg reg
+ */
+static inline uint8_t pmp_get_a_field(uint8_t cfg)
+{
+    uint8_t a = cfg >> 3;
+    return a & 0x3;
+}
+
+/*
+ * Check whether a PMP is locked or not.
+ */
+static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
+{
+
+    if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
+        return 1;
+    }
+
+    /* Top PMP has no 'next' to check */
+    if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
+        return 0;
+    }
+
+    /* In TOR mode, need to check the lock bit of the next pmp
+     * (if there is a next)
+     */
+    const uint8_t a_field =
+        pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg);
+    if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) &&
+         (PMP_AMATCH_TOR == a_field)) {
+        return 1;
+    }
+
+    return 0;
+}
+
+/*
+ * Count the number of active rules.
+ */
+static inline uint32_t pmp_get_num_rules(CPURISCVState *env)
+{
+     return env->pmp_state.num_rules;
+}
+
+/*
+ * Accessor to get the cfg reg for a specific PMP/HART
+ */
+static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
+{
+    if (pmp_index < MAX_RISCV_PMPS) {
+        return env->pmp_state.pmp[pmp_index].cfg_reg;
+    }
+
+    return 0;
+}
+
+
+/*
+ * Accessor to set the cfg reg for a specific PMP/HART
+ * Bounds checks and relevant lock bit.
+ */
+static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
+{
+    if (pmp_index < MAX_RISCV_PMPS) {
+        if (!pmp_is_locked(env, pmp_index)) {
+            env->pmp_state.pmp[pmp_index].cfg_reg = val;
+            pmp_update_rule(env, pmp_index);
+        } else {
+            PMP_DEBUG("ignoring write - locked");
+        }
+    } else {
+        PMP_DEBUG("ignoring write - out of bounds");
+    }
+}
+
+static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea)
+{
+    /*
+       aaaa...aaa0   8-byte NAPOT range
+       aaaa...aa01   16-byte NAPOT range
+       aaaa...a011   32-byte NAPOT range
+       ...
+       aa01...1111   2^XLEN-byte NAPOT range
+       a011...1111   2^(XLEN+1)-byte NAPOT range
+       0111...1111   2^(XLEN+2)-byte NAPOT range
+       1111...1111   Reserved
+    */
+    if (a == -1) {
+        *sa = 0u;
+        *ea = -1;
+        return;
+    } else {
+        target_ulong t1 = ctz64(~a);
+        target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 3;
+        target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1;
+        *sa = base;
+        *ea = base + range;
+    }
+}
+
+
+/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
+ *   end address values.
+ *   This function is called relatively infrequently whereas the check that
+ *   an address is within a pmp rule is called often, so optimise that one
+ */
+static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index)
+{
+    int i;
+
+    env->pmp_state.num_rules = 0;
+
+    uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
+    target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
+    target_ulong prev_addr = 0u;
+    target_ulong sa = 0u;
+    target_ulong ea = 0u;
+
+    if (pmp_index >= 1u) {
+        prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
+    }
+
+    switch (pmp_get_a_field(this_cfg)) {
+    case PMP_AMATCH_OFF:
+        sa = 0u;
+        ea = -1;
+        break;
+
+    case PMP_AMATCH_TOR:
+        sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
+        ea = (this_addr << 2) - 1u;
+        break;
+
+    case PMP_AMATCH_NA4:
+        sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
+        ea = (this_addr + 4u) - 1u;
+        break;
+
+    case PMP_AMATCH_NAPOT:
+        pmp_decode_napot(this_addr, &sa, &ea);
+        break;
+
+    default:
+        sa = 0u;
+        ea = 0u;
+        break;
+    }
+
+    env->pmp_state.addr[pmp_index].sa = sa;
+    env->pmp_state.addr[pmp_index].ea = ea;
+
+    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+        const uint8_t a_field =
+            pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
+        if (PMP_AMATCH_OFF != a_field) {
+            env->pmp_state.num_rules++;
+        }
+    }
+}
+
+static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
+{
+    int result = 0;
+
+    if ((addr >= env->pmp_state.addr[pmp_index].sa)
+        && (addr <= env->pmp_state.addr[pmp_index].ea)) {
+        result = 1;
+    } else {
+        result = 0;
+    }
+
+    return result;
+}
+
+
+/*
+ * Public Interface
+ */
+
+/*
+ * Check if the address has required RWX privs to complete desired operation
+ */
+bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
+    target_ulong size, pmp_priv_t privs)
+{
+    int i = 0;
+    int ret = -1;
+    target_ulong s = 0;
+    target_ulong e = 0;
+    pmp_priv_t allowed_privs = 0;
+
+    /* Short cut if no rules */
+    if (0 == pmp_get_num_rules(env)) {
+        return true;
+    }
+
+    /* 1.10 draft priv spec states there is an implicit order
+         from low to high */
+    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+        s = pmp_is_in_range(env, i, addr);
+        e = pmp_is_in_range(env, i, addr + size);
+
+        /* partially inside */
+        if ((s + e) == 1) {
+            PMP_DEBUG("pmp violation - access is partially inside");
+            ret = 0;
+            break;
+        }
+
+        /* fully inside */
+        const uint8_t a_field =
+            pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
+        if ((s + e) == 2) {
+            if (PMP_AMATCH_OFF == a_field) {
+                return 1;
+            }
+
+            allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+            if ((env->priv != PRV_M) || pmp_is_locked(env, i)) {
+                allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+            }
+
+            if ((privs & allowed_privs) == privs) {
+                ret = 1;
+                break;
+            } else {
+                ret = 0;
+                break;
+            }
+        }
+    }
+
+    /* No rule matched */
+    if (ret == -1) {
+        if (env->priv == PRV_M) {
+            ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an
+                      * M-Mode access, the access succeeds */
+        } else {
+            ret = 0; /* Other modes are not allowed to succeed if they don't
+                      * match a rule, but there are rules.  We've checked for
+                      * no rule earlier in this function. */
+        }
+    }
+
+    return ret == 1 ? true : false;
+}
+
+
+/*
+ * Handle a write to a pmpcfg CSP
+ */
+void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
+    target_ulong val)
+{
+    int i;
+    uint8_t cfg_val;
+
+    PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx,
+        env->mhartid, reg_index, val);
+
+    if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
+        PMP_DEBUG("ignoring write - incorrect address");
+        return;
+    }
+
+    for (i = 0; i < sizeof(target_ulong); i++) {
+        cfg_val = (val >> 8 * i)  & 0xff;
+        pmp_write_cfg(env, (reg_index * sizeof(target_ulong)) + i,
+            cfg_val);
+    }
+}
+
+
+/*
+ * Handle a read from a pmpcfg CSP
+ */
+target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
+{
+    int i;
+    target_ulong cfg_val = 0;
+    uint8_t val = 0;
+
+    for (i = 0; i < sizeof(target_ulong); i++) {
+        val = pmp_read_cfg(env, (reg_index * sizeof(target_ulong)) + i);
+        cfg_val |= (val << (i * 8));
+    }
+
+    PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx,
+        env->mhartid, reg_index, cfg_val);
+
+    return cfg_val;
+}
+
+
+/*
+ * Handle a write to a pmpaddr CSP
+ */
+void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
+    target_ulong val)
+{
+    PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx,
+        env->mhartid, addr_index, val);
+
+    if (addr_index < MAX_RISCV_PMPS) {
+        if (!pmp_is_locked(env, addr_index)) {
+            env->pmp_state.pmp[addr_index].addr_reg = val;
+            pmp_update_rule(env, addr_index);
+        } else {
+            PMP_DEBUG("ignoring write - locked");
+        }
+    } else {
+        PMP_DEBUG("ignoring write - out of bounds");
+    }
+}
+
+
+/*
+ * Handle a read from a pmpaddr CSP
+ */
+target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
+{
+    PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx,
+        env->mhartid, addr_index,
+        env->pmp_state.pmp[addr_index].addr_reg);
+    if (addr_index < MAX_RISCV_PMPS) {
+        return env->pmp_state.pmp[addr_index].addr_reg;
+    } else {
+        PMP_DEBUG("ignoring read - out of bounds");
+        return 0;
+    }
+}
+
+#endif
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
new file mode 100644
index 0000000000..e3953c885f
--- /dev/null
+++ b/target/riscv/pmp.h
@@ -0,0 +1,64 @@
+/*
+ * QEMU RISC-V PMP (Physical Memory Protection)
+ *
+ * Author: Daire McNamara, daire.mcnamara@emdalo.com
+ *         Ivan Griffin, ivan.griffin@emdalo.com
+ *
+ * This provides a RISC-V Physical Memory Protection interface
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _RISCV_PMP_H_
+#define _RISCV_PMP_H_
+
+typedef enum {
+    PMP_READ  = 1 << 0,
+    PMP_WRITE = 1 << 1,
+    PMP_EXEC  = 1 << 2,
+    PMP_LOCK  = 1 << 7
+} pmp_priv_t;
+
+typedef enum {
+    PMP_AMATCH_OFF,  /* Null (off)                            */
+    PMP_AMATCH_TOR,  /* Top of Range                          */
+    PMP_AMATCH_NA4,  /* Naturally aligned four-byte region    */
+    PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */
+} pmp_am_t;
+
+typedef struct {
+    target_ulong addr_reg;
+    uint8_t  cfg_reg;
+} pmp_entry_t;
+
+typedef struct {
+    target_ulong sa;
+    target_ulong ea;
+} pmp_addr_t;
+
+typedef struct {
+    pmp_entry_t pmp[MAX_RISCV_PMPS];
+    pmp_addr_t  addr[MAX_RISCV_PMPS];
+    uint32_t num_rules;
+} pmp_table_t;
+
+void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
+    target_ulong val);
+target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index);
+void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
+    target_ulong val);
+target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
+bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
+    target_ulong size, pmp_priv_t priv);
+
+#endif
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
new file mode 100644
index 0000000000..808eab7f50
--- /dev/null
+++ b/target/riscv/translate.c
@@ -0,0 +1,1978 @@
+/*
+ * RISC-V emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "tcg-op.h"
+#include "disas/disas.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "exec/log.h"
+
+#include "instmap.h"
+
+/* global register indices */
+static TCGv cpu_gpr[32], cpu_pc;
+static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
+static TCGv load_res;
+static TCGv load_val;
+
+#include "exec/gen-icount.h"
+
+typedef struct DisasContext {
+    struct TranslationBlock *tb;
+    target_ulong pc;
+    target_ulong next_pc;
+    uint32_t opcode;
+    uint32_t flags;
+    uint32_t mem_idx;
+    int singlestep_enabled;
+    int bstate;
+    /* Remember the rounding mode encoded in the previous fp instruction,
+       which we have already installed into env->fp_status.  Or -1 for
+       no previous fp instruction.  Note that we exit the TB when writing
+       to any system register, which includes CSR_FRM, so we do not have
+       to reset this known value.  */
+    int frm;
+} DisasContext;
+
+enum {
+    BS_NONE     = 0, /* When seen outside of translation while loop, indicates
+                     need to exit tb due to end of page. */
+    BS_STOP     = 1, /* Need to exit tb for syscall, sret, etc. */
+    BS_BRANCH   = 2, /* Need to exit tb for branch, jal, etc. */
+};
+
+/* convert riscv funct3 to qemu memop for load/store */
+static const int tcg_memop_lookup[8] = {
+    [0 ... 7] = -1,
+    [0] = MO_SB,
+    [1] = MO_TESW,
+    [2] = MO_TESL,
+    [4] = MO_UB,
+    [5] = MO_TEUW,
+#ifdef TARGET_RISCV64
+    [3] = MO_TEQ,
+    [6] = MO_TEUL,
+#endif
+};
+
+#ifdef TARGET_RISCV64
+#define CASE_OP_32_64(X) case X: case glue(X, W)
+#else
+#define CASE_OP_32_64(X) case X
+#endif
+
+static void generate_exception(DisasContext *ctx, int excp)
+{
+    tcg_gen_movi_tl(cpu_pc, ctx->pc);
+    TCGv_i32 helper_tmp = tcg_const_i32(excp);
+    gen_helper_raise_exception(cpu_env, helper_tmp);
+    tcg_temp_free_i32(helper_tmp);
+    ctx->bstate = BS_BRANCH;
+}
+
+static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
+{
+    tcg_gen_movi_tl(cpu_pc, ctx->pc);
+    tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
+    TCGv_i32 helper_tmp = tcg_const_i32(excp);
+    gen_helper_raise_exception(cpu_env, helper_tmp);
+    tcg_temp_free_i32(helper_tmp);
+    ctx->bstate = BS_BRANCH;
+}
+
+static void gen_exception_debug(void)
+{
+    TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
+    gen_helper_raise_exception(cpu_env, helper_tmp);
+    tcg_temp_free_i32(helper_tmp);
+}
+
+static void gen_exception_illegal(DisasContext *ctx)
+{
+    generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
+}
+
+static void gen_exception_inst_addr_mis(DisasContext *ctx)
+{
+    generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
+}
+
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+    if (unlikely(ctx->singlestep_enabled)) {
+        return false;
+    }
+
+#ifndef CONFIG_USER_ONLY
+    return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+    return true;
+#endif
+}
+
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+    if (use_goto_tb(ctx, dest)) {
+        /* chaining is only allowed when the jump is to the same page */
+        tcg_gen_goto_tb(n);
+        tcg_gen_movi_tl(cpu_pc, dest);
+        tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
+    } else {
+        tcg_gen_movi_tl(cpu_pc, dest);
+        if (ctx->singlestep_enabled) {
+            gen_exception_debug();
+        } else {
+            tcg_gen_exit_tb(0);
+        }
+    }
+}
+
+/* Wrapper for getting reg values - need to check of reg is zero since
+ * cpu_gpr[0] is not actually allocated
+ */
+static inline void gen_get_gpr(TCGv t, int reg_num)
+{
+    if (reg_num == 0) {
+        tcg_gen_movi_tl(t, 0);
+    } else {
+        tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
+    }
+}
+
+/* Wrapper for setting reg values - need to check of reg is zero since
+ * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
+ * since we usually avoid calling the OP_TYPE_gen function if we see a write to
+ * $zero
+ */
+static inline void gen_set_gpr(int reg_num_dst, TCGv t)
+{
+    if (reg_num_dst != 0) {
+        tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
+    }
+}
+
+static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
+{
+    TCGv rl = tcg_temp_new();
+    TCGv rh = tcg_temp_new();
+
+    tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
+    /* fix up for one negative */
+    tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
+    tcg_gen_and_tl(rl, rl, arg2);
+    tcg_gen_sub_tl(ret, rh, rl);
+
+    tcg_temp_free(rl);
+    tcg_temp_free(rh);
+}
+
+static void gen_fsgnj(DisasContext *ctx, uint32_t rd, uint32_t rs1,
+    uint32_t rs2, int rm, uint64_t min)
+{
+    switch (rm) {
+    case 0: /* fsgnj */
+        if (rs1 == rs2) { /* FMOV */
+            tcg_gen_mov_i64(cpu_fpr[rd], cpu_fpr[rs1]);
+        } else {
+            tcg_gen_deposit_i64(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1],
+                                0, min == INT32_MIN ? 31 : 63);
+        }
+        break;
+    case 1: /* fsgnjn */
+        if (rs1 == rs2) { /* FNEG */
+            tcg_gen_xori_i64(cpu_fpr[rd], cpu_fpr[rs1], min);
+        } else {
+            TCGv_i64 t0 = tcg_temp_new_i64();
+            tcg_gen_not_i64(t0, cpu_fpr[rs2]);
+            tcg_gen_deposit_i64(cpu_fpr[rd], t0, cpu_fpr[rs1],
+                                0, min == INT32_MIN ? 31 : 63);
+            tcg_temp_free_i64(t0);
+        }
+        break;
+    case 2: /* fsgnjx */
+        if (rs1 == rs2) { /* FABS */
+            tcg_gen_andi_i64(cpu_fpr[rd], cpu_fpr[rs1], ~min);
+        } else {
+            TCGv_i64 t0 = tcg_temp_new_i64();
+            tcg_gen_andi_i64(t0, cpu_fpr[rs2], min);
+            tcg_gen_xor_i64(cpu_fpr[rd], cpu_fpr[rs1], t0);
+            tcg_temp_free_i64(t0);
+        }
+        break;
+    default:
+        gen_exception_illegal(ctx);
+    }
+}
+
+static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs1,
+        int rs2)
+{
+    TCGv source1, source2, cond1, cond2, zeroreg, resultopt1;
+    source1 = tcg_temp_new();
+    source2 = tcg_temp_new();
+    gen_get_gpr(source1, rs1);
+    gen_get_gpr(source2, rs2);
+
+    switch (opc) {
+    CASE_OP_32_64(OPC_RISC_ADD):
+        tcg_gen_add_tl(source1, source1, source2);
+        break;
+    CASE_OP_32_64(OPC_RISC_SUB):
+        tcg_gen_sub_tl(source1, source1, source2);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_SLLW:
+        tcg_gen_andi_tl(source2, source2, 0x1F);
+        tcg_gen_shl_tl(source1, source1, source2);
+        break;
+#endif
+    case OPC_RISC_SLL:
+        tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
+        tcg_gen_shl_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_SLT:
+        tcg_gen_setcond_tl(TCG_COND_LT, source1, source1, source2);
+        break;
+    case OPC_RISC_SLTU:
+        tcg_gen_setcond_tl(TCG_COND_LTU, source1, source1, source2);
+        break;
+    case OPC_RISC_XOR:
+        tcg_gen_xor_tl(source1, source1, source2);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_SRLW:
+        /* clear upper 32 */
+        tcg_gen_ext32u_tl(source1, source1);
+        tcg_gen_andi_tl(source2, source2, 0x1F);
+        tcg_gen_shr_tl(source1, source1, source2);
+        break;
+#endif
+    case OPC_RISC_SRL:
+        tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
+        tcg_gen_shr_tl(source1, source1, source2);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_SRAW:
+        /* first, trick to get it to act like working on 32 bits (get rid of
+        upper 32, sign extend to fill space) */
+        tcg_gen_ext32s_tl(source1, source1);
+        tcg_gen_andi_tl(source2, source2, 0x1F);
+        tcg_gen_sar_tl(source1, source1, source2);
+        break;
+        /* fall through to SRA */
+#endif
+    case OPC_RISC_SRA:
+        tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
+        tcg_gen_sar_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_OR:
+        tcg_gen_or_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_AND:
+        tcg_gen_and_tl(source1, source1, source2);
+        break;
+    CASE_OP_32_64(OPC_RISC_MUL):
+        tcg_gen_mul_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_MULH:
+        tcg_gen_muls2_tl(source2, source1, source1, source2);
+        break;
+    case OPC_RISC_MULHSU:
+        gen_mulhsu(source1, source1, source2);
+        break;
+    case OPC_RISC_MULHU:
+        tcg_gen_mulu2_tl(source2, source1, source1, source2);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_DIVW:
+        tcg_gen_ext32s_tl(source1, source1);
+        tcg_gen_ext32s_tl(source2, source2);
+        /* fall through to DIV */
+#endif
+    case OPC_RISC_DIV:
+        /* Handle by altering args to tcg_gen_div to produce req'd results:
+         * For overflow: want source1 in source1 and 1 in source2
+         * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */
+        cond1 = tcg_temp_new();
+        cond2 = tcg_temp_new();
+        zeroreg = tcg_const_tl(0);
+        resultopt1 = tcg_temp_new();
+
+        tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
+                            ((target_ulong)1) << (TARGET_LONG_BITS - 1));
+        tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
+        /* if div by zero, set source1 to -1, otherwise don't change */
+        tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
+                resultopt1);
+        /* if overflow or div by zero, set source2 to 1, else don't change */
+        tcg_gen_or_tl(cond1, cond1, cond2);
+        tcg_gen_movi_tl(resultopt1, (target_ulong)1);
+        tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
+                resultopt1);
+        tcg_gen_div_tl(source1, source1, source2);
+
+        tcg_temp_free(cond1);
+        tcg_temp_free(cond2);
+        tcg_temp_free(zeroreg);
+        tcg_temp_free(resultopt1);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_DIVUW:
+        tcg_gen_ext32u_tl(source1, source1);
+        tcg_gen_ext32u_tl(source2, source2);
+        /* fall through to DIVU */
+#endif
+    case OPC_RISC_DIVU:
+        cond1 = tcg_temp_new();
+        zeroreg = tcg_const_tl(0);
+        resultopt1 = tcg_temp_new();
+
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
+        tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
+        tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
+                resultopt1);
+        tcg_gen_movi_tl(resultopt1, (target_ulong)1);
+        tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
+                resultopt1);
+        tcg_gen_divu_tl(source1, source1, source2);
+
+        tcg_temp_free(cond1);
+        tcg_temp_free(zeroreg);
+        tcg_temp_free(resultopt1);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_REMW:
+        tcg_gen_ext32s_tl(source1, source1);
+        tcg_gen_ext32s_tl(source2, source2);
+        /* fall through to REM */
+#endif
+    case OPC_RISC_REM:
+        cond1 = tcg_temp_new();
+        cond2 = tcg_temp_new();
+        zeroreg = tcg_const_tl(0);
+        resultopt1 = tcg_temp_new();
+
+        tcg_gen_movi_tl(resultopt1, 1L);
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
+                            (target_ulong)1 << (TARGET_LONG_BITS - 1));
+        tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
+        /* if overflow or div by zero, set source2 to 1, else don't change */
+        tcg_gen_or_tl(cond2, cond1, cond2);
+        tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
+                resultopt1);
+        tcg_gen_rem_tl(resultopt1, source1, source2);
+        /* if div by zero, just return the original dividend */
+        tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
+                source1);
+
+        tcg_temp_free(cond1);
+        tcg_temp_free(cond2);
+        tcg_temp_free(zeroreg);
+        tcg_temp_free(resultopt1);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_REMUW:
+        tcg_gen_ext32u_tl(source1, source1);
+        tcg_gen_ext32u_tl(source2, source2);
+        /* fall through to REMU */
+#endif
+    case OPC_RISC_REMU:
+        cond1 = tcg_temp_new();
+        zeroreg = tcg_const_tl(0);
+        resultopt1 = tcg_temp_new();
+
+        tcg_gen_movi_tl(resultopt1, (target_ulong)1);
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
+        tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
+                resultopt1);
+        tcg_gen_remu_tl(resultopt1, source1, source2);
+        /* if div by zero, just return the original dividend */
+        tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
+                source1);
+
+        tcg_temp_free(cond1);
+        tcg_temp_free(zeroreg);
+        tcg_temp_free(resultopt1);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        return;
+    }
+
+    if (opc & 0x8) { /* sign extend for W instructions */
+        tcg_gen_ext32s_tl(source1, source1);
+    }
+
+    gen_set_gpr(rd, source1);
+    tcg_temp_free(source1);
+    tcg_temp_free(source2);
+}
+
+static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rd,
+        int rs1, target_long imm)
+{
+    TCGv source1 = tcg_temp_new();
+    int shift_len = TARGET_LONG_BITS;
+    int shift_a;
+
+    gen_get_gpr(source1, rs1);
+
+    switch (opc) {
+    case OPC_RISC_ADDI:
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_ADDIW:
+#endif
+        tcg_gen_addi_tl(source1, source1, imm);
+        break;
+    case OPC_RISC_SLTI:
+        tcg_gen_setcondi_tl(TCG_COND_LT, source1, source1, imm);
+        break;
+    case OPC_RISC_SLTIU:
+        tcg_gen_setcondi_tl(TCG_COND_LTU, source1, source1, imm);
+        break;
+    case OPC_RISC_XORI:
+        tcg_gen_xori_tl(source1, source1, imm);
+        break;
+    case OPC_RISC_ORI:
+        tcg_gen_ori_tl(source1, source1, imm);
+        break;
+    case OPC_RISC_ANDI:
+        tcg_gen_andi_tl(source1, source1, imm);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_SLLIW:
+        shift_len = 32;
+        /* FALLTHRU */
+#endif
+    case OPC_RISC_SLLI:
+        if (imm >= shift_len) {
+            goto do_illegal;
+        }
+        tcg_gen_shli_tl(source1, source1, imm);
+        break;
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_SHIFT_RIGHT_IW:
+        shift_len = 32;
+        /* FALLTHRU */
+#endif
+    case OPC_RISC_SHIFT_RIGHT_I:
+        /* differentiate on IMM */
+        shift_a = imm & 0x400;
+        imm &= 0x3ff;
+        if (imm >= shift_len) {
+            goto do_illegal;
+        }
+        if (imm != 0) {
+            if (shift_a) {
+                /* SRAI[W] */
+                tcg_gen_sextract_tl(source1, source1, imm, shift_len - imm);
+            } else {
+                /* SRLI[W] */
+                tcg_gen_extract_tl(source1, source1, imm, shift_len - imm);
+            }
+            /* No further sign-extension needed for W instructions.  */
+            opc &= ~0x8;
+        }
+        break;
+    default:
+    do_illegal:
+        gen_exception_illegal(ctx);
+        return;
+    }
+
+    if (opc & 0x8) { /* sign-extend for W instructions */
+        tcg_gen_ext32s_tl(source1, source1);
+    }
+
+    gen_set_gpr(rd, source1);
+    tcg_temp_free(source1);
+}
+
+static void gen_jal(CPURISCVState *env, DisasContext *ctx, int rd,
+                    target_ulong imm)
+{
+    target_ulong next_pc;
+
+    /* check misaligned: */
+    next_pc = ctx->pc + imm;
+    if (!riscv_has_ext(env, RVC)) {
+        if ((next_pc & 0x3) != 0) {
+            gen_exception_inst_addr_mis(ctx);
+            return;
+        }
+    }
+    if (rd != 0) {
+        tcg_gen_movi_tl(cpu_gpr[rd], ctx->next_pc);
+    }
+
+    gen_goto_tb(ctx, 0, ctx->pc + imm); /* must use this for safety */
+    ctx->bstate = BS_BRANCH;
+}
+
+static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
+                     int rd, int rs1, target_long imm)
+{
+    /* no chaining with JALR */
+    TCGLabel *misaligned = NULL;
+    TCGv t0 = tcg_temp_new();
+
+    switch (opc) {
+    case OPC_RISC_JALR:
+        gen_get_gpr(cpu_pc, rs1);
+        tcg_gen_addi_tl(cpu_pc, cpu_pc, imm);
+        tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
+
+        if (!riscv_has_ext(env, RVC)) {
+            misaligned = gen_new_label();
+            tcg_gen_andi_tl(t0, cpu_pc, 0x2);
+            tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
+        }
+
+        if (rd != 0) {
+            tcg_gen_movi_tl(cpu_gpr[rd], ctx->next_pc);
+        }
+        tcg_gen_exit_tb(0);
+
+        if (misaligned) {
+            gen_set_label(misaligned);
+            gen_exception_inst_addr_mis(ctx);
+        }
+        ctx->bstate = BS_BRANCH;
+        break;
+
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+    tcg_temp_free(t0);
+}
+
+static void gen_branch(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
+                       int rs1, int rs2, target_long bimm)
+{
+    TCGLabel *l = gen_new_label();
+    TCGv source1, source2;
+    source1 = tcg_temp_new();
+    source2 = tcg_temp_new();
+    gen_get_gpr(source1, rs1);
+    gen_get_gpr(source2, rs2);
+
+    switch (opc) {
+    case OPC_RISC_BEQ:
+        tcg_gen_brcond_tl(TCG_COND_EQ, source1, source2, l);
+        break;
+    case OPC_RISC_BNE:
+        tcg_gen_brcond_tl(TCG_COND_NE, source1, source2, l);
+        break;
+    case OPC_RISC_BLT:
+        tcg_gen_brcond_tl(TCG_COND_LT, source1, source2, l);
+        break;
+    case OPC_RISC_BGE:
+        tcg_gen_brcond_tl(TCG_COND_GE, source1, source2, l);
+        break;
+    case OPC_RISC_BLTU:
+        tcg_gen_brcond_tl(TCG_COND_LTU, source1, source2, l);
+        break;
+    case OPC_RISC_BGEU:
+        tcg_gen_brcond_tl(TCG_COND_GEU, source1, source2, l);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        return;
+    }
+    tcg_temp_free(source1);
+    tcg_temp_free(source2);
+
+    gen_goto_tb(ctx, 1, ctx->next_pc);
+    gen_set_label(l); /* branch taken */
+    if (!riscv_has_ext(env, RVC) && ((ctx->pc + bimm) & 0x3)) {
+        /* misaligned */
+        gen_exception_inst_addr_mis(ctx);
+    } else {
+        gen_goto_tb(ctx, 0, ctx->pc + bimm);
+    }
+    ctx->bstate = BS_BRANCH;
+}
+
+static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1,
+        target_long imm)
+{
+    TCGv t0 = tcg_temp_new();
+    TCGv t1 = tcg_temp_new();
+    gen_get_gpr(t0, rs1);
+    tcg_gen_addi_tl(t0, t0, imm);
+    int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
+
+    if (memop < 0) {
+        gen_exception_illegal(ctx);
+        return;
+    }
+
+    tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
+    gen_set_gpr(rd, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+}
+
+static void gen_store(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
+        target_long imm)
+{
+    TCGv t0 = tcg_temp_new();
+    TCGv dat = tcg_temp_new();
+    gen_get_gpr(t0, rs1);
+    tcg_gen_addi_tl(t0, t0, imm);
+    gen_get_gpr(dat, rs2);
+    int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
+
+    if (memop < 0) {
+        gen_exception_illegal(ctx);
+        return;
+    }
+
+    tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
+    tcg_temp_free(t0);
+    tcg_temp_free(dat);
+}
+
+static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
+        int rs1, target_long imm)
+{
+    TCGv t0;
+
+    if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
+        gen_exception_illegal(ctx);
+        return;
+    }
+
+    t0 = tcg_temp_new();
+    gen_get_gpr(t0, rs1);
+    tcg_gen_addi_tl(t0, t0, imm);
+
+    switch (opc) {
+    case OPC_RISC_FLW:
+        tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
+        /* RISC-V requires NaN-boxing of narrower width floating point values */
+        tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
+        break;
+    case OPC_RISC_FLD:
+        tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+    tcg_temp_free(t0);
+}
+
+static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
+        int rs2, target_long imm)
+{
+    TCGv t0;
+
+    if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
+        gen_exception_illegal(ctx);
+        return;
+    }
+
+    t0 = tcg_temp_new();
+    gen_get_gpr(t0, rs1);
+    tcg_gen_addi_tl(t0, t0, imm);
+
+    switch (opc) {
+    case OPC_RISC_FSW:
+        tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
+        break;
+    case OPC_RISC_FSD:
+        tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+
+    tcg_temp_free(t0);
+}
+
+static void gen_atomic(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int rs2)
+{
+    TCGv src1, src2, dat;
+    TCGLabel *l1, *l2;
+    TCGMemOp mop;
+    TCGCond cond;
+    bool aq, rl;
+
+    /* Extract the size of the atomic operation.  */
+    switch (extract32(opc, 12, 3)) {
+    case 2: /* 32-bit */
+        mop = MO_ALIGN | MO_TESL;
+        break;
+#if defined(TARGET_RISCV64)
+    case 3: /* 64-bit */
+        mop = MO_ALIGN | MO_TEQ;
+        break;
+#endif
+    default:
+        gen_exception_illegal(ctx);
+        return;
+    }
+    rl = extract32(opc, 25, 1);
+    aq = extract32(opc, 26, 1);
+
+    src1 = tcg_temp_new();
+    src2 = tcg_temp_new();
+
+    switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc)) {
+    case OPC_RISC_LR:
+        /* Put addr in load_res, data in load_val.  */
+        gen_get_gpr(src1, rs1);
+        if (rl) {
+            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+        }
+        tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
+        if (aq) {
+            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+        }
+        tcg_gen_mov_tl(load_res, src1);
+        gen_set_gpr(rd, load_val);
+        break;
+
+    case OPC_RISC_SC:
+        l1 = gen_new_label();
+        l2 = gen_new_label();
+        dat = tcg_temp_new();
+
+        gen_get_gpr(src1, rs1);
+        tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
+
+        gen_get_gpr(src2, rs2);
+        /* Note that the TCG atomic primitives are SC,
+           so we can ignore AQ/RL along this path.  */
+        tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
+                                  ctx->mem_idx, mop);
+        tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
+        gen_set_gpr(rd, dat);
+        tcg_gen_br(l2);
+
+        gen_set_label(l1);
+        /* Address comparion failure.  However, we still need to
+           provide the memory barrier implied by AQ/RL.  */
+        tcg_gen_mb(TCG_MO_ALL + aq * TCG_BAR_LDAQ + rl * TCG_BAR_STRL);
+        tcg_gen_movi_tl(dat, 1);
+        gen_set_gpr(rd, dat);
+
+        gen_set_label(l2);
+        tcg_temp_free(dat);
+        break;
+
+    case OPC_RISC_AMOSWAP:
+        /* Note that the TCG atomic primitives are SC,
+           so we can ignore AQ/RL along this path.  */
+        gen_get_gpr(src1, rs1);
+        gen_get_gpr(src2, rs2);
+        tcg_gen_atomic_xchg_tl(src2, src1, src2, ctx->mem_idx, mop);
+        gen_set_gpr(rd, src2);
+        break;
+    case OPC_RISC_AMOADD:
+        gen_get_gpr(src1, rs1);
+        gen_get_gpr(src2, rs2);
+        tcg_gen_atomic_fetch_add_tl(src2, src1, src2, ctx->mem_idx, mop);
+        gen_set_gpr(rd, src2);
+        break;
+    case OPC_RISC_AMOXOR:
+        gen_get_gpr(src1, rs1);
+        gen_get_gpr(src2, rs2);
+        tcg_gen_atomic_fetch_xor_tl(src2, src1, src2, ctx->mem_idx, mop);
+        gen_set_gpr(rd, src2);
+        break;
+    case OPC_RISC_AMOAND:
+        gen_get_gpr(src1, rs1);
+        gen_get_gpr(src2, rs2);
+        tcg_gen_atomic_fetch_and_tl(src2, src1, src2, ctx->mem_idx, mop);
+        gen_set_gpr(rd, src2);
+        break;
+    case OPC_RISC_AMOOR:
+        gen_get_gpr(src1, rs1);
+        gen_get_gpr(src2, rs2);
+        tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
+        gen_set_gpr(rd, src2);
+        break;
+
+    case OPC_RISC_AMOMIN:
+        cond = TCG_COND_LT;
+        goto do_minmax;
+    case OPC_RISC_AMOMAX:
+        cond = TCG_COND_GT;
+        goto do_minmax;
+    case OPC_RISC_AMOMINU:
+        cond = TCG_COND_LTU;
+        goto do_minmax;
+    case OPC_RISC_AMOMAXU:
+        cond = TCG_COND_GTU;
+        goto do_minmax;
+    do_minmax:
+        /* Handle the RL barrier.  The AQ barrier is handled along the
+           parallel path by the SC atomic cmpxchg.  On the serial path,
+           of course, barriers do not matter.  */
+        if (rl) {
+            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+        }
+        if (tb_cflags(ctx->tb) & CF_PARALLEL) {
+            l1 = gen_new_label();
+            gen_set_label(l1);
+        } else {
+            l1 = NULL;
+        }
+
+        gen_get_gpr(src1, rs1);
+        gen_get_gpr(src2, rs2);
+        if ((mop & MO_SSIZE) == MO_SL) {
+            /* Sign-extend the register comparison input.  */
+            tcg_gen_ext32s_tl(src2, src2);
+        }
+        dat = tcg_temp_local_new();
+        tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
+        tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
+
+        if (tb_cflags(ctx->tb) & CF_PARALLEL) {
+            /* Parallel context.  Make this operation atomic by verifying
+               that the memory didn't change while we computed the result.  */
+            tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
+
+            /* If the cmpxchg failed, retry. */
+            /* ??? There is an assumption here that this will eventually
+               succeed, such that we don't live-lock.  This is not unlike
+               a similar loop that the compiler would generate for e.g.
+               __atomic_fetch_and_xor, so don't worry about it.  */
+            tcg_gen_brcond_tl(TCG_COND_NE, dat, src2, l1);
+        } else {
+            /* Serial context.  Directly store the result.  */
+            tcg_gen_qemu_st_tl(src2, src1, ctx->mem_idx, mop);
+        }
+        gen_set_gpr(rd, dat);
+        tcg_temp_free(dat);
+        break;
+
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+
+    tcg_temp_free(src1);
+    tcg_temp_free(src2);
+}
+
+static void gen_set_rm(DisasContext *ctx, int rm)
+{
+    TCGv_i32 t0;
+
+    if (ctx->frm == rm) {
+        return;
+    }
+    ctx->frm = rm;
+    t0 = tcg_const_i32(rm);
+    gen_helper_set_rounding_mode(cpu_env, t0);
+    tcg_temp_free_i32(t0);
+}
+
+static void gen_fp_fmadd(DisasContext *ctx, uint32_t opc, int rd,
+                         int rs1, int rs2, int rs3, int rm)
+{
+    switch (opc) {
+    case OPC_RISC_FMADD_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
+                           cpu_fpr[rs2], cpu_fpr[rs3]);
+        break;
+    case OPC_RISC_FMADD_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
+                           cpu_fpr[rs2], cpu_fpr[rs3]);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+}
+
+static void gen_fp_fmsub(DisasContext *ctx, uint32_t opc, int rd,
+                         int rs1, int rs2, int rs3, int rm)
+{
+    switch (opc) {
+    case OPC_RISC_FMSUB_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
+                           cpu_fpr[rs2], cpu_fpr[rs3]);
+        break;
+    case OPC_RISC_FMSUB_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
+                           cpu_fpr[rs2], cpu_fpr[rs3]);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+}
+
+static void gen_fp_fnmsub(DisasContext *ctx, uint32_t opc, int rd,
+                          int rs1, int rs2, int rs3, int rm)
+{
+    switch (opc) {
+    case OPC_RISC_FNMSUB_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fnmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
+                            cpu_fpr[rs2], cpu_fpr[rs3]);
+        break;
+    case OPC_RISC_FNMSUB_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fnmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
+                            cpu_fpr[rs2], cpu_fpr[rs3]);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+}
+
+static void gen_fp_fnmadd(DisasContext *ctx, uint32_t opc, int rd,
+                          int rs1, int rs2, int rs3, int rm)
+{
+    switch (opc) {
+    case OPC_RISC_FNMADD_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fnmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
+                            cpu_fpr[rs2], cpu_fpr[rs3]);
+        break;
+    case OPC_RISC_FNMADD_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fnmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
+                            cpu_fpr[rs2], cpu_fpr[rs3]);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+}
+
+static void gen_fp_arith(DisasContext *ctx, uint32_t opc, int rd,
+                         int rs1, int rs2, int rm)
+{
+    TCGv t0 = NULL;
+
+    if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
+        goto do_illegal;
+    }
+
+    switch (opc) {
+    case OPC_RISC_FADD_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+        break;
+    case OPC_RISC_FSUB_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+        break;
+    case OPC_RISC_FMUL_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fmul_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+        break;
+    case OPC_RISC_FDIV_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fdiv_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+        break;
+    case OPC_RISC_FSQRT_S:
+        gen_set_rm(ctx, rm);
+        gen_helper_fsqrt_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
+        break;
+    case OPC_RISC_FSGNJ_S:
+        gen_fsgnj(ctx, rd, rs1, rs2, rm, INT32_MIN);
+        break;
+
+    case OPC_RISC_FMIN_S:
+        /* also handles: OPC_RISC_FMAX_S */
+        switch (rm) {
+        case 0x0:
+            gen_helper_fmin_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        case 0x1:
+            gen_helper_fmax_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        default:
+            goto do_illegal;
+        }
+        break;
+
+    case OPC_RISC_FEQ_S:
+        /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */
+        t0 = tcg_temp_new();
+        switch (rm) {
+        case 0x0:
+            gen_helper_fle_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        case 0x1:
+            gen_helper_flt_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        case 0x2:
+            gen_helper_feq_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        default:
+            goto do_illegal;
+        }
+        gen_set_gpr(rd, t0);
+        tcg_temp_free(t0);
+        break;
+
+    case OPC_RISC_FCVT_W_S:
+        /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */
+        t0 = tcg_temp_new();
+        switch (rs2) {
+        case 0: /* FCVT_W_S */
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[rs1]);
+            break;
+        case 1: /* FCVT_WU_S */
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[rs1]);
+            break;
+#if defined(TARGET_RISCV64)
+        case 2: /* FCVT_L_S */
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[rs1]);
+            break;
+        case 3: /* FCVT_LU_S */
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[rs1]);
+            break;
+#endif
+        default:
+            goto do_illegal;
+        }
+        gen_set_gpr(rd, t0);
+        tcg_temp_free(t0);
+        break;
+
+    case OPC_RISC_FCVT_S_W:
+        /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */
+        t0 = tcg_temp_new();
+        gen_get_gpr(t0, rs1);
+        switch (rs2) {
+        case 0: /* FCVT_S_W */
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_s_w(cpu_fpr[rd], cpu_env, t0);
+            break;
+        case 1: /* FCVT_S_WU */
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_s_wu(cpu_fpr[rd], cpu_env, t0);
+            break;
+#if defined(TARGET_RISCV64)
+        case 2: /* FCVT_S_L */
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_s_l(cpu_fpr[rd], cpu_env, t0);
+            break;
+        case 3: /* FCVT_S_LU */
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_s_lu(cpu_fpr[rd], cpu_env, t0);
+            break;
+#endif
+        default:
+            goto do_illegal;
+        }
+        tcg_temp_free(t0);
+        break;
+
+    case OPC_RISC_FMV_X_S:
+        /* also OPC_RISC_FCLASS_S */
+        t0 = tcg_temp_new();
+        switch (rm) {
+        case 0: /* FMV */
+#if defined(TARGET_RISCV64)
+            tcg_gen_ext32s_tl(t0, cpu_fpr[rs1]);
+#else
+            tcg_gen_extrl_i64_i32(t0, cpu_fpr[rs1]);
+#endif
+            break;
+        case 1:
+            gen_helper_fclass_s(t0, cpu_fpr[rs1]);
+            break;
+        default:
+            goto do_illegal;
+        }
+        gen_set_gpr(rd, t0);
+        tcg_temp_free(t0);
+        break;
+
+    case OPC_RISC_FMV_S_X:
+        t0 = tcg_temp_new();
+        gen_get_gpr(t0, rs1);
+#if defined(TARGET_RISCV64)
+        tcg_gen_mov_i64(cpu_fpr[rd], t0);
+#else
+        tcg_gen_extu_i32_i64(cpu_fpr[rd], t0);
+#endif
+        tcg_temp_free(t0);
+        break;
+
+    /* double */
+    case OPC_RISC_FADD_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+        break;
+    case OPC_RISC_FSUB_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+        break;
+    case OPC_RISC_FMUL_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fmul_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+        break;
+    case OPC_RISC_FDIV_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fdiv_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+        break;
+    case OPC_RISC_FSQRT_D:
+        gen_set_rm(ctx, rm);
+        gen_helper_fsqrt_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
+        break;
+    case OPC_RISC_FSGNJ_D:
+        gen_fsgnj(ctx, rd, rs1, rs2, rm, INT64_MIN);
+        break;
+
+    case OPC_RISC_FMIN_D:
+        /* also OPC_RISC_FMAX_D */
+        switch (rm) {
+        case 0:
+            gen_helper_fmin_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        case 1:
+            gen_helper_fmax_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        default:
+            goto do_illegal;
+        }
+        break;
+
+    case OPC_RISC_FCVT_S_D:
+        switch (rs2) {
+        case 1:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_s_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
+            break;
+        default:
+            goto do_illegal;
+        }
+        break;
+
+    case OPC_RISC_FCVT_D_S:
+        switch (rs2) {
+        case 0:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_d_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
+            break;
+        default:
+            goto do_illegal;
+        }
+        break;
+
+    case OPC_RISC_FEQ_D:
+        /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */
+        t0 = tcg_temp_new();
+        switch (rm) {
+        case 0:
+            gen_helper_fle_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        case 1:
+            gen_helper_flt_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        case 2:
+            gen_helper_feq_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
+            break;
+        default:
+            goto do_illegal;
+        }
+        gen_set_gpr(rd, t0);
+        tcg_temp_free(t0);
+        break;
+
+    case OPC_RISC_FCVT_W_D:
+        /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */
+        t0 = tcg_temp_new();
+        switch (rs2) {
+        case 0:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[rs1]);
+            break;
+        case 1:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[rs1]);
+            break;
+#if defined(TARGET_RISCV64)
+        case 2:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[rs1]);
+            break;
+        case 3:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[rs1]);
+            break;
+#endif
+        default:
+            goto do_illegal;
+        }
+        gen_set_gpr(rd, t0);
+        tcg_temp_free(t0);
+        break;
+
+    case OPC_RISC_FCVT_D_W:
+        /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */
+        t0 = tcg_temp_new();
+        gen_get_gpr(t0, rs1);
+        switch (rs2) {
+        case 0:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_d_w(cpu_fpr[rd], cpu_env, t0);
+            break;
+        case 1:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_d_wu(cpu_fpr[rd], cpu_env, t0);
+            break;
+#if defined(TARGET_RISCV64)
+        case 2:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_d_l(cpu_fpr[rd], cpu_env, t0);
+            break;
+        case 3:
+            gen_set_rm(ctx, rm);
+            gen_helper_fcvt_d_lu(cpu_fpr[rd], cpu_env, t0);
+            break;
+#endif
+        default:
+            goto do_illegal;
+        }
+        tcg_temp_free(t0);
+        break;
+
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_FMV_X_D:
+        /* also OPC_RISC_FCLASS_D */
+        switch (rm) {
+        case 0: /* FMV */
+            gen_set_gpr(rd, cpu_fpr[rs1]);
+            break;
+        case 1:
+            t0 = tcg_temp_new();
+            gen_helper_fclass_d(t0, cpu_fpr[rs1]);
+            gen_set_gpr(rd, t0);
+            tcg_temp_free(t0);
+            break;
+        default:
+            goto do_illegal;
+        }
+        break;
+
+    case OPC_RISC_FMV_D_X:
+        t0 = tcg_temp_new();
+        gen_get_gpr(t0, rs1);
+        tcg_gen_mov_tl(cpu_fpr[rd], t0);
+        tcg_temp_free(t0);
+        break;
+#endif
+
+    default:
+    do_illegal:
+        if (t0) {
+            tcg_temp_free(t0);
+        }
+        gen_exception_illegal(ctx);
+        break;
+    }
+}
+
+static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int csr)
+{
+    TCGv source1, csr_store, dest, rs1_pass, imm_rs1;
+    source1 = tcg_temp_new();
+    csr_store = tcg_temp_new();
+    dest = tcg_temp_new();
+    rs1_pass = tcg_temp_new();
+    imm_rs1 = tcg_temp_new();
+    gen_get_gpr(source1, rs1);
+    tcg_gen_movi_tl(cpu_pc, ctx->pc);
+    tcg_gen_movi_tl(rs1_pass, rs1);
+    tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */
+
+#ifndef CONFIG_USER_ONLY
+    /* Extract funct7 value and check whether it matches SFENCE.VMA */
+    if ((opc == OPC_RISC_ECALL) && ((csr >> 5) == 9)) {
+        /* sfence.vma */
+        /* TODO: handle ASID specific fences */
+        gen_helper_tlb_flush(cpu_env);
+        return;
+    }
+#endif
+
+    switch (opc) {
+    case OPC_RISC_ECALL:
+        switch (csr) {
+        case 0x0: /* ECALL */
+            /* always generates U-level ECALL, fixed in do_interrupt handler */
+            generate_exception(ctx, RISCV_EXCP_U_ECALL);
+            tcg_gen_exit_tb(0); /* no chaining */
+            ctx->bstate = BS_BRANCH;
+            break;
+        case 0x1: /* EBREAK */
+            generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
+            tcg_gen_exit_tb(0); /* no chaining */
+            ctx->bstate = BS_BRANCH;
+            break;
+#ifndef CONFIG_USER_ONLY
+        case 0x002: /* URET */
+            gen_exception_illegal(ctx);
+            break;
+        case 0x102: /* SRET */
+            if (riscv_has_ext(env, RVS)) {
+                gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
+                tcg_gen_exit_tb(0); /* no chaining */
+                ctx->bstate = BS_BRANCH;
+            } else {
+                gen_exception_illegal(ctx);
+            }
+            break;
+        case 0x202: /* HRET */
+            gen_exception_illegal(ctx);
+            break;
+        case 0x302: /* MRET */
+            gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
+            tcg_gen_exit_tb(0); /* no chaining */
+            ctx->bstate = BS_BRANCH;
+            break;
+        case 0x7b2: /* DRET */
+            gen_exception_illegal(ctx);
+            break;
+        case 0x105: /* WFI */
+            tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
+            gen_helper_wfi(cpu_env);
+            break;
+        case 0x104: /* SFENCE.VM */
+            gen_helper_tlb_flush(cpu_env);
+            break;
+#endif
+        default:
+            gen_exception_illegal(ctx);
+            break;
+        }
+        break;
+    default:
+        tcg_gen_movi_tl(imm_rs1, rs1);
+        switch (opc) {
+        case OPC_RISC_CSRRW:
+            gen_helper_csrrw(dest, cpu_env, source1, csr_store);
+            break;
+        case OPC_RISC_CSRRS:
+            gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass);
+            break;
+        case OPC_RISC_CSRRC:
+            gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass);
+            break;
+        case OPC_RISC_CSRRWI:
+            gen_helper_csrrw(dest, cpu_env, imm_rs1, csr_store);
+            break;
+        case OPC_RISC_CSRRSI:
+            gen_helper_csrrs(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
+            break;
+        case OPC_RISC_CSRRCI:
+            gen_helper_csrrc(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
+            break;
+        default:
+            gen_exception_illegal(ctx);
+            return;
+        }
+        gen_set_gpr(rd, dest);
+        /* end tb since we may be changing priv modes, to get mmu_index right */
+        tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
+        tcg_gen_exit_tb(0); /* no chaining */
+        ctx->bstate = BS_BRANCH;
+        break;
+    }
+    tcg_temp_free(source1);
+    tcg_temp_free(csr_store);
+    tcg_temp_free(dest);
+    tcg_temp_free(rs1_pass);
+    tcg_temp_free(imm_rs1);
+}
+
+static void decode_RV32_64C0(DisasContext *ctx)
+{
+    uint8_t funct3 = extract32(ctx->opcode, 13, 3);
+    uint8_t rd_rs2 = GET_C_RS2S(ctx->opcode);
+    uint8_t rs1s = GET_C_RS1S(ctx->opcode);
+
+    switch (funct3) {
+    case 0:
+        /* illegal */
+        if (ctx->opcode == 0) {
+            gen_exception_illegal(ctx);
+        } else {
+            /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/
+            gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs2, 2,
+                          GET_C_ADDI4SPN_IMM(ctx->opcode));
+        }
+        break;
+    case 1:
+        /* C.FLD -> fld rd', offset[7:3](rs1')*/
+        gen_fp_load(ctx, OPC_RISC_FLD, rd_rs2, rs1s,
+                    GET_C_LD_IMM(ctx->opcode));
+        /* C.LQ(RV128) */
+        break;
+    case 2:
+        /* C.LW -> lw rd', offset[6:2](rs1') */
+        gen_load(ctx, OPC_RISC_LW, rd_rs2, rs1s,
+                 GET_C_LW_IMM(ctx->opcode));
+        break;
+    case 3:
+#if defined(TARGET_RISCV64)
+        /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
+        gen_load(ctx, OPC_RISC_LD, rd_rs2, rs1s,
+                 GET_C_LD_IMM(ctx->opcode));
+#else
+        /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
+        gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
+                    GET_C_LW_IMM(ctx->opcode));
+#endif
+        break;
+    case 4:
+        /* reserved */
+        gen_exception_illegal(ctx);
+        break;
+    case 5:
+        /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */
+        gen_fp_store(ctx, OPC_RISC_FSD, rs1s, rd_rs2,
+                     GET_C_LD_IMM(ctx->opcode));
+        /* C.SQ (RV128) */
+        break;
+    case 6:
+        /* C.SW -> sw rs2', offset[6:2](rs1')*/
+        gen_store(ctx, OPC_RISC_SW, rs1s, rd_rs2,
+                  GET_C_LW_IMM(ctx->opcode));
+        break;
+    case 7:
+#if defined(TARGET_RISCV64)
+        /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
+        gen_store(ctx, OPC_RISC_SD, rs1s, rd_rs2,
+                  GET_C_LD_IMM(ctx->opcode));
+#else
+        /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
+        gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
+                     GET_C_LW_IMM(ctx->opcode));
+#endif
+        break;
+    }
+}
+
+static void decode_RV32_64C1(CPURISCVState *env, DisasContext *ctx)
+{
+    uint8_t funct3 = extract32(ctx->opcode, 13, 3);
+    uint8_t rd_rs1 = GET_C_RS1(ctx->opcode);
+    uint8_t rs1s, rs2s;
+    uint8_t funct2;
+
+    switch (funct3) {
+    case 0:
+        /* C.ADDI -> addi rd, rd, nzimm[5:0] */
+        gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, rd_rs1,
+                      GET_C_IMM(ctx->opcode));
+        break;
+    case 1:
+#if defined(TARGET_RISCV64)
+        /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/
+        gen_arith_imm(ctx, OPC_RISC_ADDIW, rd_rs1, rd_rs1,
+                      GET_C_IMM(ctx->opcode));
+#else
+        /* C.JAL(RV32) -> jal x1, offset[11:1] */
+        gen_jal(env, ctx, 1, GET_C_J_IMM(ctx->opcode));
+#endif
+        break;
+    case 2:
+        /* C.LI -> addi rd, x0, imm[5:0]*/
+        gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, 0, GET_C_IMM(ctx->opcode));
+        break;
+    case 3:
+        if (rd_rs1 == 2) {
+            /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/
+            gen_arith_imm(ctx, OPC_RISC_ADDI, 2, 2,
+                          GET_C_ADDI16SP_IMM(ctx->opcode));
+        } else if (rd_rs1 != 0) {
+            /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/
+            tcg_gen_movi_tl(cpu_gpr[rd_rs1],
+                            GET_C_IMM(ctx->opcode) << 12);
+        }
+        break;
+    case 4:
+        funct2 = extract32(ctx->opcode, 10, 2);
+        rs1s = GET_C_RS1S(ctx->opcode);
+        switch (funct2) {
+        case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */
+            gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
+                               GET_C_ZIMM(ctx->opcode));
+            /* C.SRLI64(RV128) */
+            break;
+        case 1:
+            /* C.SRAI -> srai rd', rd', shamt[5:0]*/
+            gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
+                            GET_C_ZIMM(ctx->opcode) | 0x400);
+            /* C.SRAI64(RV128) */
+            break;
+        case 2:
+            /* C.ANDI -> andi rd', rd', imm[5:0]*/
+            gen_arith_imm(ctx, OPC_RISC_ANDI, rs1s, rs1s,
+                          GET_C_IMM(ctx->opcode));
+            break;
+        case 3:
+            funct2 = extract32(ctx->opcode, 5, 2);
+            rs2s = GET_C_RS2S(ctx->opcode);
+            switch (funct2) {
+            case 0:
+                /* C.SUB -> sub rd', rd', rs2' */
+                if (extract32(ctx->opcode, 12, 1) == 0) {
+                    gen_arith(ctx, OPC_RISC_SUB, rs1s, rs1s, rs2s);
+                }
+#if defined(TARGET_RISCV64)
+                else {
+                    gen_arith(ctx, OPC_RISC_SUBW, rs1s, rs1s, rs2s);
+                }
+#endif
+                break;
+            case 1:
+                /* C.XOR -> xor rs1', rs1', rs2' */
+                if (extract32(ctx->opcode, 12, 1) == 0) {
+                    gen_arith(ctx, OPC_RISC_XOR, rs1s, rs1s, rs2s);
+                }
+#if defined(TARGET_RISCV64)
+                else {
+                    /* C.ADDW (RV64/128) */
+                    gen_arith(ctx, OPC_RISC_ADDW, rs1s, rs1s, rs2s);
+                }
+#endif
+                break;
+            case 2:
+                /* C.OR -> or rs1', rs1', rs2' */
+                gen_arith(ctx, OPC_RISC_OR, rs1s, rs1s, rs2s);
+                break;
+            case 3:
+                /* C.AND -> and rs1', rs1', rs2' */
+                gen_arith(ctx, OPC_RISC_AND, rs1s, rs1s, rs2s);
+                break;
+            }
+            break;
+        }
+        break;
+    case 5:
+        /* C.J -> jal x0, offset[11:1]*/
+        gen_jal(env, ctx, 0, GET_C_J_IMM(ctx->opcode));
+        break;
+    case 6:
+        /* C.BEQZ -> beq rs1', x0, offset[8:1]*/
+        rs1s = GET_C_RS1S(ctx->opcode);
+        gen_branch(env, ctx, OPC_RISC_BEQ, rs1s, 0, GET_C_B_IMM(ctx->opcode));
+        break;
+    case 7:
+        /* C.BNEZ -> bne rs1', x0, offset[8:1]*/
+        rs1s = GET_C_RS1S(ctx->opcode);
+        gen_branch(env, ctx, OPC_RISC_BNE, rs1s, 0, GET_C_B_IMM(ctx->opcode));
+        break;
+    }
+}
+
+static void decode_RV32_64C2(CPURISCVState *env, DisasContext *ctx)
+{
+    uint8_t rd, rs2;
+    uint8_t funct3 = extract32(ctx->opcode, 13, 3);
+
+
+    rd = GET_RD(ctx->opcode);
+
+    switch (funct3) {
+    case 0: /* C.SLLI -> slli rd, rd, shamt[5:0]
+               C.SLLI64 -> */
+        gen_arith_imm(ctx, OPC_RISC_SLLI, rd, rd, GET_C_ZIMM(ctx->opcode));
+        break;
+    case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */
+        gen_fp_load(ctx, OPC_RISC_FLD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
+        break;
+    case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */
+        gen_load(ctx, OPC_RISC_LW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
+        break;
+    case 3:
+#if defined(TARGET_RISCV64)
+        /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */
+        gen_load(ctx, OPC_RISC_LD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
+#else
+        /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */
+        gen_fp_load(ctx, OPC_RISC_FLW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
+#endif
+        break;
+    case 4:
+        rs2 = GET_C_RS2(ctx->opcode);
+
+        if (extract32(ctx->opcode, 12, 1) == 0) {
+            if (rs2 == 0) {
+                /* C.JR -> jalr x0, rs1, 0*/
+                gen_jalr(env, ctx, OPC_RISC_JALR, 0, rd, 0);
+            } else {
+                /* C.MV -> add rd, x0, rs2 */
+                gen_arith(ctx, OPC_RISC_ADD, rd, 0, rs2);
+            }
+        } else {
+            if (rd == 0) {
+                /* C.EBREAK -> ebreak*/
+                gen_system(env, ctx, OPC_RISC_ECALL, 0, 0, 0x1);
+            } else {
+                if (rs2 == 0) {
+                    /* C.JALR -> jalr x1, rs1, 0*/
+                    gen_jalr(env, ctx, OPC_RISC_JALR, 1, rd, 0);
+                } else {
+                    /* C.ADD -> add rd, rd, rs2 */
+                    gen_arith(ctx, OPC_RISC_ADD, rd, rd, rs2);
+                }
+            }
+        }
+        break;
+    case 5:
+        /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/
+        gen_fp_store(ctx, OPC_RISC_FSD, 2, GET_C_RS2(ctx->opcode),
+                     GET_C_SDSP_IMM(ctx->opcode));
+        /* C.SQSP */
+        break;
+    case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/
+        gen_store(ctx, OPC_RISC_SW, 2, GET_C_RS2(ctx->opcode),
+                  GET_C_SWSP_IMM(ctx->opcode));
+        break;
+    case 7:
+#if defined(TARGET_RISCV64)
+        /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/
+        gen_store(ctx, OPC_RISC_SD, 2, GET_C_RS2(ctx->opcode),
+                  GET_C_SDSP_IMM(ctx->opcode));
+#else
+        /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */
+        gen_fp_store(ctx, OPC_RISC_FSW, 2, GET_C_RS2(ctx->opcode),
+                     GET_C_SWSP_IMM(ctx->opcode));
+#endif
+        break;
+    }
+}
+
+static void decode_RV32_64C(CPURISCVState *env, DisasContext *ctx)
+{
+    uint8_t op = extract32(ctx->opcode, 0, 2);
+
+    switch (op) {
+    case 0:
+        decode_RV32_64C0(ctx);
+        break;
+    case 1:
+        decode_RV32_64C1(env, ctx);
+        break;
+    case 2:
+        decode_RV32_64C2(env, ctx);
+        break;
+    }
+}
+
+static void decode_RV32_64G(CPURISCVState *env, DisasContext *ctx)
+{
+    int rs1;
+    int rs2;
+    int rd;
+    uint32_t op;
+    target_long imm;
+
+    /* We do not do misaligned address check here: the address should never be
+     * misaligned at this point. Instructions that set PC must do the check,
+     * since epc must be the address of the instruction that caused us to
+     * perform the misaligned instruction fetch */
+
+    op = MASK_OP_MAJOR(ctx->opcode);
+    rs1 = GET_RS1(ctx->opcode);
+    rs2 = GET_RS2(ctx->opcode);
+    rd = GET_RD(ctx->opcode);
+    imm = GET_IMM(ctx->opcode);
+
+    switch (op) {
+    case OPC_RISC_LUI:
+        if (rd == 0) {
+            break; /* NOP */
+        }
+        tcg_gen_movi_tl(cpu_gpr[rd], sextract64(ctx->opcode, 12, 20) << 12);
+        break;
+    case OPC_RISC_AUIPC:
+        if (rd == 0) {
+            break; /* NOP */
+        }
+        tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) +
+               ctx->pc);
+        break;
+    case OPC_RISC_JAL:
+        imm = GET_JAL_IMM(ctx->opcode);
+        gen_jal(env, ctx, rd, imm);
+        break;
+    case OPC_RISC_JALR:
+        gen_jalr(env, ctx, MASK_OP_JALR(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_BRANCH:
+        gen_branch(env, ctx, MASK_OP_BRANCH(ctx->opcode), rs1, rs2,
+                   GET_B_IMM(ctx->opcode));
+        break;
+    case OPC_RISC_LOAD:
+        gen_load(ctx, MASK_OP_LOAD(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_STORE:
+        gen_store(ctx, MASK_OP_STORE(ctx->opcode), rs1, rs2,
+                  GET_STORE_IMM(ctx->opcode));
+        break;
+    case OPC_RISC_ARITH_IMM:
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_ARITH_IMM_W:
+#endif
+        if (rd == 0) {
+            break; /* NOP */
+        }
+        gen_arith_imm(ctx, MASK_OP_ARITH_IMM(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_ARITH:
+#if defined(TARGET_RISCV64)
+    case OPC_RISC_ARITH_W:
+#endif
+        if (rd == 0) {
+            break; /* NOP */
+        }
+        gen_arith(ctx, MASK_OP_ARITH(ctx->opcode), rd, rs1, rs2);
+        break;
+    case OPC_RISC_FP_LOAD:
+        gen_fp_load(ctx, MASK_OP_FP_LOAD(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_FP_STORE:
+        gen_fp_store(ctx, MASK_OP_FP_STORE(ctx->opcode), rs1, rs2,
+                     GET_STORE_IMM(ctx->opcode));
+        break;
+    case OPC_RISC_ATOMIC:
+        gen_atomic(ctx, MASK_OP_ATOMIC(ctx->opcode), rd, rs1, rs2);
+        break;
+    case OPC_RISC_FMADD:
+        gen_fp_fmadd(ctx, MASK_OP_FP_FMADD(ctx->opcode), rd, rs1, rs2,
+                     GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FMSUB:
+        gen_fp_fmsub(ctx, MASK_OP_FP_FMSUB(ctx->opcode), rd, rs1, rs2,
+                     GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FNMSUB:
+        gen_fp_fnmsub(ctx, MASK_OP_FP_FNMSUB(ctx->opcode), rd, rs1, rs2,
+                      GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FNMADD:
+        gen_fp_fnmadd(ctx, MASK_OP_FP_FNMADD(ctx->opcode), rd, rs1, rs2,
+                      GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FP_ARITH:
+        gen_fp_arith(ctx, MASK_OP_FP_ARITH(ctx->opcode), rd, rs1, rs2,
+                     GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FENCE:
+#ifndef CONFIG_USER_ONLY
+        if (ctx->opcode & 0x1000) {
+            /* FENCE_I is a no-op in QEMU,
+             * however we need to end the translation block */
+            tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
+            tcg_gen_exit_tb(0);
+            ctx->bstate = BS_BRANCH;
+        } else {
+            /* FENCE is a full memory barrier. */
+            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+        }
+#endif
+        break;
+    case OPC_RISC_SYSTEM:
+        gen_system(env, ctx, MASK_OP_SYSTEM(ctx->opcode), rd, rs1,
+                   (ctx->opcode & 0xFFF00000) >> 20);
+        break;
+    default:
+        gen_exception_illegal(ctx);
+        break;
+    }
+}
+
+static void decode_opc(CPURISCVState *env, DisasContext *ctx)
+{
+    /* check for compressed insn */
+    if (extract32(ctx->opcode, 0, 2) != 3) {
+        if (!riscv_has_ext(env, RVC)) {
+            gen_exception_illegal(ctx);
+        } else {
+            ctx->next_pc = ctx->pc + 2;
+            decode_RV32_64C(env, ctx);
+        }
+    } else {
+        ctx->next_pc = ctx->pc + 4;
+        decode_RV32_64G(env, ctx);
+    }
+}
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
+{
+    CPURISCVState *env = cs->env_ptr;
+    DisasContext ctx;
+    target_ulong pc_start;
+    target_ulong next_page_start;
+    int num_insns;
+    int max_insns;
+    pc_start = tb->pc;
+    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+    ctx.pc = pc_start;
+
+    /* once we have GDB, the rest of the translate.c implementation should be
+       ready for singlestep */
+    ctx.singlestep_enabled = cs->singlestep_enabled;
+
+    ctx.tb = tb;
+    ctx.bstate = BS_NONE;
+    ctx.flags = tb->flags;
+    ctx.mem_idx = tb->flags & TB_FLAGS_MMU_MASK;
+    ctx.frm = -1;  /* unknown rounding mode */
+
+    num_insns = 0;
+    max_insns = tb->cflags & CF_COUNT_MASK;
+    if (max_insns == 0) {
+        max_insns = CF_COUNT_MASK;
+    }
+    if (max_insns > TCG_MAX_INSNS) {
+        max_insns = TCG_MAX_INSNS;
+    }
+    gen_tb_start(tb);
+
+    while (ctx.bstate == BS_NONE) {
+        tcg_gen_insn_start(ctx.pc);
+        num_insns++;
+
+        if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
+            tcg_gen_movi_tl(cpu_pc, ctx.pc);
+            ctx.bstate = BS_BRANCH;
+            gen_exception_debug();
+            /* The address covered by the breakpoint must be included in
+               [tb->pc, tb->pc + tb->size) in order to for it to be
+               properly cleared -- thus we increment the PC here so that
+               the logic setting tb->size below does the right thing.  */
+            ctx.pc += 4;
+            goto done_generating;
+        }
+
+        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+            gen_io_start();
+        }
+
+        ctx.opcode = cpu_ldl_code(env, ctx.pc);
+        decode_opc(env, &ctx);
+        ctx.pc = ctx.next_pc;
+
+        if (cs->singlestep_enabled) {
+            break;
+        }
+        if (ctx.pc >= next_page_start) {
+            break;
+        }
+        if (tcg_op_buf_full()) {
+            break;
+        }
+        if (num_insns >= max_insns) {
+            break;
+        }
+        if (singlestep) {
+            break;
+        }
+
+    }
+    if (tb->cflags & CF_LAST_IO) {
+        gen_io_end();
+    }
+    switch (ctx.bstate) {
+    case BS_STOP:
+        gen_goto_tb(&ctx, 0, ctx.pc);
+        break;
+    case BS_NONE: /* handle end of page - DO NOT CHAIN. See gen_goto_tb. */
+        tcg_gen_movi_tl(cpu_pc, ctx.pc);
+        if (cs->singlestep_enabled) {
+            gen_exception_debug();
+        } else {
+            tcg_gen_exit_tb(0);
+        }
+        break;
+    case BS_BRANCH: /* ops using BS_BRANCH generate own exit seq */
+    default:
+        break;
+    }
+done_generating:
+    gen_tb_end(tb, num_insns);
+    tb->size = ctx.pc - pc_start;
+    tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+        && qemu_log_in_addr_range(pc_start)) {
+        qemu_log("IN: %s\n", lookup_symbol(pc_start));
+        log_target_disas(cs, pc_start, ctx.pc - pc_start);
+        qemu_log("\n");
+    }
+#endif
+}
+
+void riscv_translate_init(void)
+{
+    int i;
+
+    /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
+    /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
+    /* registers, unless you specifically block reads/writes to reg 0 */
+    cpu_gpr[0] = NULL;
+
+    for (i = 1; i < 32; i++) {
+        cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+            offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
+    }
+
+    for (i = 0; i < 32; i++) {
+        cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
+            offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
+    }
+
+    cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
+    load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
+                             "load_res");
+    load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
+                             "load_val");
+}
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index c5ef930876..5f357a4e2d 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -538,39 +538,39 @@ typedef union SysIB {
 QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096);
 
 /* MMU defines */
-#define _ASCE_ORIGIN            ~0xfffULL /* segment table origin             */
-#define _ASCE_SUBSPACE          0x200     /* subspace group control           */
-#define _ASCE_PRIVATE_SPACE     0x100     /* private space control            */
-#define _ASCE_ALT_EVENT         0x80      /* storage alteration event control */
-#define _ASCE_SPACE_SWITCH      0x40      /* space switch event               */
-#define _ASCE_REAL_SPACE        0x20      /* real space control               */
-#define _ASCE_TYPE_MASK         0x0c      /* asce table type mask             */
-#define _ASCE_TYPE_REGION1      0x0c      /* region first table type          */
-#define _ASCE_TYPE_REGION2      0x08      /* region second table type         */
-#define _ASCE_TYPE_REGION3      0x04      /* region third table type          */
-#define _ASCE_TYPE_SEGMENT      0x00      /* segment table type               */
-#define _ASCE_TABLE_LENGTH      0x03      /* region table length              */
-
-#define _REGION_ENTRY_ORIGIN    ~0xfffULL /* region/segment table origin      */
-#define _REGION_ENTRY_RO        0x200     /* region/segment protection bit    */
-#define _REGION_ENTRY_TF        0xc0      /* region/segment table offset      */
-#define _REGION_ENTRY_INV       0x20      /* invalid region table entry       */
-#define _REGION_ENTRY_TYPE_MASK 0x0c      /* region/segment table type mask   */
-#define _REGION_ENTRY_TYPE_R1   0x0c      /* region first table type          */
-#define _REGION_ENTRY_TYPE_R2   0x08      /* region second table type         */
-#define _REGION_ENTRY_TYPE_R3   0x04      /* region third table type          */
-#define _REGION_ENTRY_LENGTH    0x03      /* region third length              */
-
-#define _SEGMENT_ENTRY_ORIGIN   ~0x7ffULL /* segment table origin             */
-#define _SEGMENT_ENTRY_FC       0x400     /* format control                   */
-#define _SEGMENT_ENTRY_RO       0x200     /* page protection bit              */
-#define _SEGMENT_ENTRY_INV      0x20      /* invalid segment table entry      */
-
-#define VADDR_PX                0xff000   /* page index bits                  */
-
-#define _PAGE_RO        0x200            /* HW read-only bit  */
-#define _PAGE_INVALID   0x400            /* HW invalid bit    */
-#define _PAGE_RES0      0x800            /* bit must be zero  */
+#define ASCE_ORIGIN           (~0xfffULL) /* segment table origin             */
+#define ASCE_SUBSPACE         0x200       /* subspace group control           */
+#define ASCE_PRIVATE_SPACE    0x100       /* private space control            */
+#define ASCE_ALT_EVENT        0x80        /* storage alteration event control */
+#define ASCE_SPACE_SWITCH     0x40        /* space switch event               */
+#define ASCE_REAL_SPACE       0x20        /* real space control               */
+#define ASCE_TYPE_MASK        0x0c        /* asce table type mask             */
+#define ASCE_TYPE_REGION1     0x0c        /* region first table type          */
+#define ASCE_TYPE_REGION2     0x08        /* region second table type         */
+#define ASCE_TYPE_REGION3     0x04        /* region third table type          */
+#define ASCE_TYPE_SEGMENT     0x00        /* segment table type               */
+#define ASCE_TABLE_LENGTH     0x03        /* region table length              */
+
+#define REGION_ENTRY_ORIGIN   (~0xfffULL) /* region/segment table origin    */
+#define REGION_ENTRY_RO       0x200       /* region/segment protection bit  */
+#define REGION_ENTRY_TF       0xc0        /* region/segment table offset    */
+#define REGION_ENTRY_INV      0x20        /* invalid region table entry     */
+#define REGION_ENTRY_TYPE_MASK 0x0c       /* region/segment table type mask */
+#define REGION_ENTRY_TYPE_R1  0x0c        /* region first table type        */
+#define REGION_ENTRY_TYPE_R2  0x08        /* region second table type       */
+#define REGION_ENTRY_TYPE_R3  0x04        /* region third table type        */
+#define REGION_ENTRY_LENGTH   0x03        /* region third length            */
+
+#define SEGMENT_ENTRY_ORIGIN  (~0x7ffULL) /* segment table origin        */
+#define SEGMENT_ENTRY_FC      0x400       /* format control              */
+#define SEGMENT_ENTRY_RO      0x200       /* page protection bit         */
+#define SEGMENT_ENTRY_INV     0x20        /* invalid segment table entry */
+
+#define VADDR_PX              0xff000     /* page index bits   */
+
+#define PAGE_RO               0x200       /* HW read-only bit  */
+#define PAGE_INVALID          0x400       /* HW invalid bit    */
+#define PAGE_RES0             0x800       /* bit must be zero  */
 
 #define SK_C                    (0x1 << 1)
 #define SK_R                    (0x1 << 2)
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index d5291b246e..a0e28bd124 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -1924,20 +1924,20 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
 
     if (!(r2 & 0x800)) {
         /* invalidation-and-clearing operation */
-        table = r1 & _ASCE_ORIGIN;
+        table = r1 & ASCE_ORIGIN;
         entries = (r2 & 0x7ff) + 1;
 
-        switch (r1 & _ASCE_TYPE_MASK) {
-        case _ASCE_TYPE_REGION1:
+        switch (r1 & ASCE_TYPE_MASK) {
+        case ASCE_TYPE_REGION1:
             index = (r2 >> 53) & 0x7ff;
             break;
-        case _ASCE_TYPE_REGION2:
+        case ASCE_TYPE_REGION2:
             index = (r2 >> 42) & 0x7ff;
             break;
-        case _ASCE_TYPE_REGION3:
+        case ASCE_TYPE_REGION3:
             index = (r2 >> 31) & 0x7ff;
             break;
-        case _ASCE_TYPE_SEGMENT:
+        case ASCE_TYPE_SEGMENT:
             index = (r2 >> 20) & 0x7ff;
             break;
         }
@@ -1945,9 +1945,9 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
             /* addresses are not wrapped in 24/31bit mode but table index is */
             raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
             entry = cpu_ldq_real_ra(env, raddr, ra);
-            if (!(entry & _REGION_ENTRY_INV)) {
+            if (!(entry & REGION_ENTRY_INV)) {
                 /* we are allowed to not store if already invalid */
-                entry |= _REGION_ENTRY_INV;
+                entry |= REGION_ENTRY_INV;
                 cpu_stq_real_ra(env, raddr, entry, ra);
             }
         }
@@ -1971,12 +1971,12 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
     uint64_t pte_addr, pte;
 
     /* Compute the page table entry address */
-    pte_addr = (pto & _SEGMENT_ENTRY_ORIGIN);
+    pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
     pte_addr += (vaddr & VADDR_PX) >> 9;
 
     /* Mark the page table entry as invalid */
     pte = cpu_ldq_real_ra(env, pte_addr, ra);
-    pte |= _PAGE_INVALID;
+    pte |= PAGE_INVALID;
     cpu_stq_real_ra(env, pte_addr, pte, ra);
 
     /* XXX we exploit the fact that Linux passes the exact virtual
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 23fb2e7501..1deeb6e6e4 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -128,11 +128,11 @@ static bool lowprot_enabled(const CPUS390XState *env, uint64_t asc)
     /* Check the private-space control bit */
     switch (asc) {
     case PSW_ASC_PRIMARY:
-        return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
+        return !(env->cregs[1] & ASCE_PRIVATE_SPACE);
     case PSW_ASC_SECONDARY:
-        return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
+        return !(env->cregs[7] & ASCE_PRIVATE_SPACE);
     case PSW_ASC_HOME:
-        return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
+        return !(env->cregs[13] & ASCE_PRIVATE_SPACE);
     default:
         /* We don't support access register mode */
         error_report("unsupported addressing mode");
@@ -159,20 +159,20 @@ static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
                              uint64_t asc, uint64_t pt_entry,
                              target_ulong *raddr, int *flags, int rw, bool exc)
 {
-    if (pt_entry & _PAGE_INVALID) {
+    if (pt_entry & PAGE_INVALID) {
         DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
         trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
         return -1;
     }
-    if (pt_entry & _PAGE_RES0) {
+    if (pt_entry & PAGE_RES0) {
         trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
         return -1;
     }
-    if (pt_entry & _PAGE_RO) {
+    if (pt_entry & PAGE_RO) {
         *flags &= ~PAGE_WRITE;
     }
 
-    *raddr = pt_entry & _ASCE_ORIGIN;
+    *raddr = pt_entry & ASCE_ORIGIN;
 
     PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
 
@@ -188,11 +188,11 @@ static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
     CPUState *cs = CPU(s390_env_get_cpu(env));
     uint64_t origin, offs, pt_entry;
 
-    if (st_entry & _SEGMENT_ENTRY_RO) {
+    if (st_entry & SEGMENT_ENTRY_RO) {
         *flags &= ~PAGE_WRITE;
     }
 
-    if ((st_entry & _SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
+    if ((st_entry & SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
         /* Decode EDAT1 segment frame absolute address (1MB page) */
         *raddr = (st_entry & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
         PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, st_entry);
@@ -200,7 +200,7 @@ static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
     }
 
     /* Look up 4KB page entry */
-    origin = st_entry & _SEGMENT_ENTRY_ORIGIN;
+    origin = st_entry & SEGMENT_ENTRY_ORIGIN;
     offs  = (vaddr & VADDR_PX) >> 9;
     pt_entry = ldq_phys(cs->as, origin + offs);
     PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
@@ -223,39 +223,39 @@ static int mmu_translate_region(CPUS390XState *env, target_ulong vaddr,
 
     PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, entry);
 
-    origin = entry & _REGION_ENTRY_ORIGIN;
+    origin = entry & REGION_ENTRY_ORIGIN;
     offs = (vaddr >> (17 + 11 * level / 4)) & 0x3ff8;
 
     new_entry = ldq_phys(cs->as, origin + offs);
     PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
                 __func__, origin, offs, new_entry);
 
-    if ((new_entry & _REGION_ENTRY_INV) != 0) {
+    if ((new_entry & REGION_ENTRY_INV) != 0) {
         DPRINTF("%s: invalid region\n", __func__);
         trigger_page_fault(env, vaddr, pchks[level / 4], asc, rw, exc);
         return -1;
     }
 
-    if ((new_entry & _REGION_ENTRY_TYPE_MASK) != level) {
+    if ((new_entry & REGION_ENTRY_TYPE_MASK) != level) {
         trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
         return -1;
     }
 
-    if (level == _ASCE_TYPE_SEGMENT) {
+    if (level == ASCE_TYPE_SEGMENT) {
         return mmu_translate_segment(env, vaddr, asc, new_entry, raddr, flags,
                                      rw, exc);
     }
 
     /* Check region table offset and length */
     offs = (vaddr >> (28 + 11 * (level - 4) / 4)) & 3;
-    if (offs < ((new_entry & _REGION_ENTRY_TF) >> 6)
-        || offs > (new_entry & _REGION_ENTRY_LENGTH)) {
+    if (offs < ((new_entry & REGION_ENTRY_TF) >> 6)
+        || offs > (new_entry & REGION_ENTRY_LENGTH)) {
         DPRINTF("%s: invalid offset or len (%lx)\n", __func__, new_entry);
         trigger_page_fault(env, vaddr, pchks[level / 4 - 1], asc, rw, exc);
         return -1;
     }
 
-    if ((env->cregs[0] & CR0_EDAT) && (new_entry & _REGION_ENTRY_RO)) {
+    if ((env->cregs[0] & CR0_EDAT) && (new_entry & REGION_ENTRY_RO)) {
         *flags &= ~PAGE_WRITE;
     }
 
@@ -271,52 +271,52 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
     int level;
     int r;
 
-    if (asce & _ASCE_REAL_SPACE) {
+    if (asce & ASCE_REAL_SPACE) {
         /* direct mapping */
         *raddr = vaddr;
         return 0;
     }
 
-    level = asce & _ASCE_TYPE_MASK;
+    level = asce & ASCE_TYPE_MASK;
     switch (level) {
-    case _ASCE_TYPE_REGION1:
-        if ((vaddr >> 62) > (asce & _ASCE_TABLE_LENGTH)) {
+    case ASCE_TYPE_REGION1:
+        if ((vaddr >> 62) > (asce & ASCE_TABLE_LENGTH)) {
             trigger_page_fault(env, vaddr, PGM_REG_FIRST_TRANS, asc, rw, exc);
             return -1;
         }
         break;
-    case _ASCE_TYPE_REGION2:
+    case ASCE_TYPE_REGION2:
         if (vaddr & 0xffe0000000000000ULL) {
             DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
                     " 0xffe0000000000000ULL\n", __func__, vaddr);
             trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
             return -1;
         }
-        if ((vaddr >> 51 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+        if ((vaddr >> 51 & 3) > (asce & ASCE_TABLE_LENGTH)) {
             trigger_page_fault(env, vaddr, PGM_REG_SEC_TRANS, asc, rw, exc);
             return -1;
         }
         break;
-    case _ASCE_TYPE_REGION3:
+    case ASCE_TYPE_REGION3:
         if (vaddr & 0xfffffc0000000000ULL) {
             DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
                     " 0xfffffc0000000000ULL\n", __func__, vaddr);
             trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
             return -1;
         }
-        if ((vaddr >> 40 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+        if ((vaddr >> 40 & 3) > (asce & ASCE_TABLE_LENGTH)) {
             trigger_page_fault(env, vaddr, PGM_REG_THIRD_TRANS, asc, rw, exc);
             return -1;
         }
         break;
-    case _ASCE_TYPE_SEGMENT:
+    case ASCE_TYPE_SEGMENT:
         if (vaddr & 0xffffffff80000000ULL) {
             DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
                     " 0xffffffff80000000ULL\n", __func__, vaddr);
             trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
             return -1;
         }
-        if ((vaddr >> 29 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+        if ((vaddr >> 29 & 3) > (asce & ASCE_TABLE_LENGTH)) {
             trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw, exc);
             return -1;
         }
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 71e0853e43..5aa367a182 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -2093,6 +2093,11 @@ static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
             type = GET_ASI_BFILL;
             break;
         }
+
+        /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
+         * permissions check in get_physical_address(..).
+         */
+        mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
     } else {
         gen_exception(dc, TT_PRIV_INSN);
         type = GET_ASI_EXCP;