summary refs log tree commit diff stats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--target/sh4/cpu.h12
-rw-r--r--target/sh4/helper.c28
-rw-r--r--target/sh4/translate.c25
3 files changed, 46 insertions, 19 deletions
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 6c07c6b24b..ffb91687b8 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -91,8 +91,10 @@
 #define FPSCR_RM_NEAREST       (0 << 0)
 #define FPSCR_RM_ZERO          (1 << 0)
 
+#define DELAY_SLOT_MASK        0x7
 #define DELAY_SLOT             (1 << 0)
 #define DELAY_SLOT_CONDITIONAL (1 << 1)
+#define DELAY_SLOT_RTE         (1 << 2)
 
 typedef struct tlb_t {
     uint32_t vpn;		/* virtual page number */
@@ -263,7 +265,13 @@ void cpu_load_tlb(CPUSH4State * env);
 #define MMU_USER_IDX 1
 static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
 {
-    return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
+    /* The instruction in a RTE delay slot is fetched in privileged
+       mode, but executed in user mode.  */
+    if (ifetch && (env->flags & DELAY_SLOT_RTE)) {
+        return 0;
+    } else {
+        return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
+    }
 }
 
 #include "exec/cpu-all.h"
@@ -380,7 +388,7 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
 {
     *pc = env->pc;
     *cs_base = 0;
-    *flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) /* Bits 0-1 */
+    *flags = (env->flags & DELAY_SLOT_MASK)                    /* Bits  0- 2 */
             | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR))  /* Bits 19-21 */
             | (env->sr & ((1u << SR_MD) | (1u << SR_RB)))      /* Bits 29-30 */
             | (env->sr & (1u << SR_FD))                        /* Bit 15 */
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 8f8ce81401..28d93c2543 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -21,6 +21,7 @@
 #include "cpu.h"
 #include "exec/exec-all.h"
 #include "exec/log.h"
+#include "sysemu/sysemu.h"
 
 #if !defined(CONFIG_USER_ONLY)
 #include "hw/sh4/sh_intc.h"
@@ -92,7 +93,14 @@ void superh_cpu_do_interrupt(CPUState *cs)
 
     if (env->sr & (1u << SR_BL)) {
         if (do_exp && cs->exception_index != 0x1e0) {
-            cs->exception_index = 0x000; /* masked exception -> reset */
+            /* In theory a masked exception generates a reset exception,
+               which in turn jumps to the reset vector. However this only
+               works when using a bootloader. When using a kernel and an
+               initrd, they need to be reloaded and the program counter
+               should be loaded with the kernel entry point.
+               qemu_system_reset_request takes care of that.  */
+            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+            return;
         }
         if (do_irq && !env->in_sleep) {
             return; /* masked */
@@ -164,11 +172,11 @@ void superh_cpu_do_interrupt(CPUState *cs)
     env->sgr = env->gregs[15];
     env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
 
-    if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+    if (env->flags & DELAY_SLOT_MASK) {
         /* Branch instruction should be executed again before delay slot. */
 	env->spc -= 2;
 	/* Clear flags for exception/interrupt routine. */
-        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+        env->flags &= ~DELAY_SLOT_MASK;
     }
 
     if (do_exp) {
@@ -420,7 +428,7 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
         if (!(env->sr & (1u << SR_MD))
 	    && (address < 0xe0000000 || address >= 0xe4000000)) {
 	    /* Unauthorized access in user mode (only store queues are available) */
-	    fprintf(stderr, "Unauthorized access\n");
+            qemu_log_mask(LOG_GUEST_ERROR, "Unauthorized access\n");
 	    if (rw == 0)
 		return MMU_DADDR_ERROR_READ;
 	    else if (rw == 1)
@@ -863,8 +871,16 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
 bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 {
     if (interrupt_request & CPU_INTERRUPT_HARD) {
-        superh_cpu_do_interrupt(cs);
-        return true;
+        SuperHCPU *cpu = SUPERH_CPU(cs);
+        CPUSH4State *env = &cpu->env;
+
+        /* Delay slots are indivisible, ignore interrupts */
+        if (env->flags & DELAY_SLOT_MASK) {
+            return false;
+        } else {
+            superh_cpu_do_interrupt(cs);
+            return true;
+        }
     }
     return false;
 }
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 0bc2f9ff19..8bc132b27b 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -185,6 +185,9 @@ void superh_cpu_dump_state(CPUState *cs, FILE *f,
     } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
 	cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
 		    env->delayed_pc);
+    } else if (env->flags & DELAY_SLOT_RTE) {
+        cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
+                    env->delayed_pc);
     }
 }
 
@@ -217,8 +220,7 @@ static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
     if (ctx->delayed_pc != (uint32_t) -1) {
         tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
     }
-    if ((ctx->tbflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
-        != ctx->envflags) {
+    if ((ctx->tbflags & DELAY_SLOT_MASK) != ctx->envflags) {
         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
     }
 }
@@ -329,7 +331,7 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
 
 #define CHECK_NOT_DELAY_SLOT \
-    if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {     \
+    if (ctx->envflags & DELAY_SLOT_MASK) {                           \
         gen_save_cpu_state(ctx, true);                               \
         gen_helper_raise_slot_illegal_instruction(cpu_env);          \
         ctx->bstate = BS_EXCP;                                       \
@@ -339,7 +341,7 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
 #define CHECK_PRIVILEGED                                             \
     if (IS_USER(ctx)) {                                              \
         gen_save_cpu_state(ctx, true);                               \
-        if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
+        if (ctx->envflags & DELAY_SLOT_MASK) {                       \
             gen_helper_raise_slot_illegal_instruction(cpu_env);      \
         } else {                                                     \
             gen_helper_raise_illegal_instruction(cpu_env);           \
@@ -351,7 +353,7 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
 #define CHECK_FPU_ENABLED                                            \
     if (ctx->tbflags & (1u << SR_FD)) {                              \
         gen_save_cpu_state(ctx, true);                               \
-        if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
+        if (ctx->envflags & DELAY_SLOT_MASK) {                       \
             gen_helper_raise_slot_fpu_disable(cpu_env);              \
         } else {                                                     \
             gen_helper_raise_fpu_disable(cpu_env);                   \
@@ -428,8 +430,9 @@ static void _decode_opc(DisasContext * ctx)
 	CHECK_NOT_DELAY_SLOT
         gen_write_sr(cpu_ssr);
 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
-        ctx->envflags |= DELAY_SLOT;
+        ctx->envflags |= DELAY_SLOT_RTE;
 	ctx->delayed_pc = (uint32_t) - 1;
+        ctx->bstate = BS_STOP;
 	return;
     case 0x0058:		/* sets */
         tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
@@ -1784,7 +1787,7 @@ static void _decode_opc(DisasContext * ctx)
     fflush(stderr);
 #endif
     gen_save_cpu_state(ctx, true);
-    if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+    if (ctx->envflags & DELAY_SLOT_MASK) {
         gen_helper_raise_slot_illegal_instruction(cpu_env);
     } else {
         gen_helper_raise_illegal_instruction(cpu_env);
@@ -1798,14 +1801,14 @@ static void decode_opc(DisasContext * ctx)
 
     _decode_opc(ctx);
 
-    if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+    if (old_flags & DELAY_SLOT_MASK) {
         /* go out of the delay slot */
-        ctx->envflags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+        ctx->envflags &= ~DELAY_SLOT_MASK;
         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
         ctx->bstate = BS_BRANCH;
         if (old_flags & DELAY_SLOT_CONDITIONAL) {
 	    gen_delayed_conditional_jump(ctx);
-        } else if (old_flags & DELAY_SLOT) {
+        } else {
             gen_jump(ctx);
 	}
 
@@ -1824,7 +1827,7 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
     pc_start = tb->pc;
     ctx.pc = pc_start;
     ctx.tbflags = (uint32_t)tb->flags;
-    ctx.envflags = tb->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+    ctx.envflags = tb->flags & DELAY_SLOT_MASK;
     ctx.bstate = BS_NONE;
     ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
     /* We don't know if the delayed pc came from a dynamic or static branch,