diff options
Diffstat (limited to 'target/sparc/translate.c')
| -rw-r--r-- | target/sparc/translate.c | 6911 |
1 files changed, 3339 insertions, 3572 deletions
diff --git a/target/sparc/translate.c b/target/sparc/translate.c index f92ff80ac8..986a88c4e1 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -25,9 +25,8 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" - +#include "tcg/tcg-op-gvec.h" #include "exec/helper-gen.h" - #include "exec/translator.h" #include "exec/log.h" #include "asi.h" @@ -36,6 +35,65 @@ #include "exec/helper-info.c.inc" #undef HELPER_H +#ifdef TARGET_SPARC64 +# define gen_helper_rdpsr(D, E) qemu_build_not_reached() +# define gen_helper_rett(E) qemu_build_not_reached() +# define gen_helper_power_down(E) qemu_build_not_reached() +# define gen_helper_wrpsr(E, S) qemu_build_not_reached() +#else +# define gen_helper_clear_softint(E, S) qemu_build_not_reached() +# define gen_helper_done(E) qemu_build_not_reached() +# define gen_helper_fabsd(D, S) qemu_build_not_reached() +# define gen_helper_flushw(E) qemu_build_not_reached() +# define gen_helper_fnegd(D, S) qemu_build_not_reached() +# define gen_helper_rdccr(D, E) qemu_build_not_reached() +# define gen_helper_rdcwp(D, E) qemu_build_not_reached() +# define gen_helper_restored(E) qemu_build_not_reached() +# define gen_helper_retry(E) qemu_build_not_reached() +# define gen_helper_saved(E) qemu_build_not_reached() +# define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached() +# define gen_helper_set_softint(E, S) qemu_build_not_reached() +# define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached() +# define gen_helper_tick_set_count(P, S) qemu_build_not_reached() +# define gen_helper_tick_set_limit(P, S) qemu_build_not_reached() +# define gen_helper_udivx(D, E, A, B) qemu_build_not_reached() +# define gen_helper_wrccr(E, S) qemu_build_not_reached() +# define gen_helper_wrcwp(E, S) qemu_build_not_reached() +# define gen_helper_wrgl(E, S) qemu_build_not_reached() +# define gen_helper_write_softint(E, S) qemu_build_not_reached() +# define gen_helper_wrpil(E, S) qemu_build_not_reached() +# define gen_helper_wrpstate(E, S) qemu_build_not_reached() +# define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fstox ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_pdist ({ qemu_build_not_reached(); NULL; }) +# define FSR_LDXFSR_MASK 0 +# define FSR_LDXFSR_OLDMASK 0 +# define MAXTL_MASK 0 +#endif + /* Dynamic PC, must exit to main loop. */ #define DYNAMIC_PC 1 /* Dynamic PC, one of two values according to jump_pc[T2]. */ @@ -53,21 +111,36 @@ static TCGv_i32 cpu_psr; static TCGv cpu_fsr, cpu_pc, cpu_npc; static TCGv cpu_regs[32]; static TCGv cpu_y; -#ifndef CONFIG_USER_ONLY static TCGv cpu_tbr; -#endif static TCGv cpu_cond; #ifdef TARGET_SPARC64 static TCGv_i32 cpu_xcc, cpu_fprs; static TCGv cpu_gsr; -static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr; -static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver; #else -static TCGv cpu_wim; +# define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; }) +# define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; }) #endif /* Floating point registers */ static TCGv_i64 cpu_fpr[TARGET_DPREGS]; +#define env_field_offsetof(X) offsetof(CPUSPARCState, X) +#ifdef TARGET_SPARC64 +# define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; }) +# define env64_field_offsetof(X) env_field_offsetof(X) +#else +# define env32_field_offsetof(X) env_field_offsetof(X) +# define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; }) +#endif + +typedef struct DisasDelayException { + struct DisasDelayException *next; + TCGLabel *lab; + TCGv_i32 excp; + /* Saved state at parent insn. */ + target_ulong pc; + target_ulong npc; +} DisasDelayException; + typedef struct DisasContext { DisasContextBase base; target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ @@ -89,6 +162,7 @@ typedef struct DisasContext { int fprs_dirty; int asi; #endif + DisasDelayException *delay_excp_list; } DisasContext; typedef struct { @@ -119,12 +193,6 @@ typedef struct { #define UA2005_HTRAP_MASK 0xff #define V8_TRAP_MASK 0x7f -static int sign_extend(int x, int len) -{ - len = 32 - len; - return (x << len) >> len; -} - #define IS_IMM (insn & (1<<13)) static void gen_update_fprs_dirty(DisasContext *dc, int rd) @@ -209,69 +277,40 @@ static void gen_op_store_QT0_fpr(unsigned int dst) offsetof(CPU_QuadU, ll.lower)); } -static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, - TCGv_i64 v1, TCGv_i64 v2) -{ - dst = QFPREG(dst); - - tcg_gen_mov_i64(cpu_fpr[dst / 2], v1); - tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2); - gen_update_fprs_dirty(dc, dst); -} - -#ifdef TARGET_SPARC64 -static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src) -{ - src = QFPREG(src); - return cpu_fpr[src / 2]; -} - -static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src) -{ - src = QFPREG(src); - return cpu_fpr[src / 2 + 1]; -} - -static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) -{ - rd = QFPREG(rd); - rs = QFPREG(rs); - - tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]); - tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]); - gen_update_fprs_dirty(dc, rd); -} -#endif - /* moves */ #ifdef CONFIG_USER_ONLY #define supervisor(dc) 0 -#ifdef TARGET_SPARC64 #define hypervisor(dc) 0 -#endif #else #ifdef TARGET_SPARC64 #define hypervisor(dc) (dc->hypervisor) #define supervisor(dc) (dc->supervisor | dc->hypervisor) #else #define supervisor(dc) (dc->supervisor) +#define hypervisor(dc) 0 #endif #endif -#ifdef TARGET_SPARC64 -#ifndef TARGET_ABI32 -#define AM_CHECK(dc) ((dc)->address_mask_32bit) +#if !defined(TARGET_SPARC64) +# define AM_CHECK(dc) false +#elif defined(TARGET_ABI32) +# define AM_CHECK(dc) true +#elif defined(CONFIG_USER_ONLY) +# define AM_CHECK(dc) false #else -#define AM_CHECK(dc) (1) -#endif +# define AM_CHECK(dc) ((dc)->address_mask_32bit) #endif static void gen_address_mask(DisasContext *dc, TCGv addr) { -#ifdef TARGET_SPARC64 - if (AM_CHECK(dc)) + if (AM_CHECK(dc)) { tcg_gen_andi_tl(addr, addr, 0xffffffffULL); -#endif + } +} + +static target_ulong address_mask_i(DisasContext *dc, target_ulong addr) +{ + return AM_CHECK(dc) ? (uint32_t)addr : addr; } static TCGv gen_load_gpr(DisasContext *dc, int reg) @@ -402,71 +441,89 @@ static TCGv_i32 gen_sub32_carry32(void) return carry_32; } -static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, - TCGv src2, int update_cc) +static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2, + TCGv_i32 carry_32, bool update_cc) { - TCGv_i32 carry_32; - TCGv carry; + tcg_gen_add_tl(dst, src1, src2); - switch (dc->cc_op) { - case CC_OP_DIV: - case CC_OP_LOGIC: - /* Carry is known to be zero. Fall back to plain ADD. */ - if (update_cc) { - gen_op_add_cc(dst, src1, src2); - } else { - tcg_gen_add_tl(dst, src1, src2); - } - return; +#ifdef TARGET_SPARC64 + TCGv carry = tcg_temp_new(); + tcg_gen_extu_i32_tl(carry, carry_32); + tcg_gen_add_tl(dst, dst, carry); +#else + tcg_gen_add_i32(dst, dst, carry_32); +#endif - case CC_OP_ADD: - case CC_OP_TADD: - case CC_OP_TADDTV: - if (TARGET_LONG_BITS == 32) { - /* We can re-use the host's hardware carry generation by using - an ADD2 opcode. We discard the low part of the output. - Ideally we'd combine this operation with the add that - generated the carry in the first place. */ - carry = tcg_temp_new(); - tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - goto add_done; - } - carry_32 = gen_add32_carry32(); - break; + if (update_cc) { + tcg_debug_assert(dst == cpu_cc_dst); + tcg_gen_mov_tl(cpu_cc_src, src1); + tcg_gen_mov_tl(cpu_cc_src2, src2); + } +} - case CC_OP_SUB: - case CC_OP_TSUB: - case CC_OP_TSUBTV: - carry_32 = gen_sub32_carry32(); - break; +static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc) +{ + TCGv discard; - default: - /* We need external help to produce the carry. */ - carry_32 = tcg_temp_new_i32(); - gen_helper_compute_C_icc(carry_32, tcg_env); - break; + if (TARGET_LONG_BITS == 64) { + gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc); + return; } -#if TARGET_LONG_BITS == 64 - carry = tcg_temp_new(); - tcg_gen_extu_i32_i64(carry, carry_32); -#else - carry = carry_32; -#endif - - tcg_gen_add_tl(dst, src1, src2); - tcg_gen_add_tl(dst, dst, carry); + /* + * We can re-use the host's hardware carry generation by using + * an ADD2 opcode. We discard the low part of the output. + * Ideally we'd combine this operation with the add that + * generated the carry in the first place. + */ + discard = tcg_temp_new(); + tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - add_done: if (update_cc) { + tcg_debug_assert(dst == cpu_cc_dst); tcg_gen_mov_tl(cpu_cc_src, src1); tcg_gen_mov_tl(cpu_cc_src2, src2); - tcg_gen_mov_tl(cpu_cc_dst, dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX); - dc->cc_op = CC_OP_ADDX; } } +static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int_add(dst, src1, src2, false); +} + +static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int_add(dst, src1, src2, true); +} + +static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false); +} + +static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true); +} + +static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2, + bool update_cc) +{ + TCGv_i32 carry_32 = tcg_temp_new_i32(); + gen_helper_compute_C_icc(carry_32, tcg_env); + gen_op_addc_int(dst, src1, src2, carry_32, update_cc); +} + +static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int_generic(dst, src1, src2, false); +} + +static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int_generic(dst, src1, src2, true); +} + static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2) { tcg_gen_mov_tl(cpu_cc_src, src1); @@ -475,51 +532,11 @@ static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2) tcg_gen_mov_tl(dst, cpu_cc_dst); } -static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, - TCGv src2, int update_cc) +static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2, + TCGv_i32 carry_32, bool update_cc) { - TCGv_i32 carry_32; TCGv carry; - switch (dc->cc_op) { - case CC_OP_DIV: - case CC_OP_LOGIC: - /* Carry is known to be zero. Fall back to plain SUB. */ - if (update_cc) { - gen_op_sub_cc(dst, src1, src2); - } else { - tcg_gen_sub_tl(dst, src1, src2); - } - return; - - case CC_OP_ADD: - case CC_OP_TADD: - case CC_OP_TADDTV: - carry_32 = gen_add32_carry32(); - break; - - case CC_OP_SUB: - case CC_OP_TSUB: - case CC_OP_TSUBTV: - if (TARGET_LONG_BITS == 32) { - /* We can re-use the host's hardware carry generation by using - a SUB2 opcode. We discard the low part of the output. - Ideally we'd combine this operation with the add that - generated the carry in the first place. */ - carry = tcg_temp_new(); - tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - goto sub_done; - } - carry_32 = gen_sub32_carry32(); - break; - - default: - /* We need external help to produce the carry. */ - carry_32 = tcg_temp_new_i32(); - gen_helper_compute_C_icc(carry_32, tcg_env); - break; - } - #if TARGET_LONG_BITS == 64 carry = tcg_temp_new(); tcg_gen_extu_i32_i64(carry, carry_32); @@ -530,16 +547,75 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, tcg_gen_sub_tl(dst, src1, src2); tcg_gen_sub_tl(dst, dst, carry); - sub_done: if (update_cc) { + tcg_debug_assert(dst == cpu_cc_dst); tcg_gen_mov_tl(cpu_cc_src, src1); tcg_gen_mov_tl(cpu_cc_src2, src2); - tcg_gen_mov_tl(cpu_cc_dst, dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX); - dc->cc_op = CC_OP_SUBX; } } +static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false); +} + +static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true); +} + +static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc) +{ + TCGv discard; + + if (TARGET_LONG_BITS == 64) { + gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc); + return; + } + + /* + * We can re-use the host's hardware carry generation by using + * a SUB2 opcode. We discard the low part of the output. + */ + discard = tcg_temp_new(); + tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2); + + if (update_cc) { + tcg_debug_assert(dst == cpu_cc_dst); + tcg_gen_mov_tl(cpu_cc_src, src1); + tcg_gen_mov_tl(cpu_cc_src2, src2); + } +} + +static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int_sub(dst, src1, src2, false); +} + +static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int_sub(dst, src1, src2, true); +} + +static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2, + bool update_cc) +{ + TCGv_i32 carry_32 = tcg_temp_new_i32(); + + gen_helper_compute_C_icc(carry_32, tcg_env); + gen_op_subc_int(dst, src1, src2, carry_32, update_cc); +} + +static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int_generic(dst, src1, src2, false); +} + +static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int_generic(dst, src1, src2, true); +} + static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) { TCGv r_temp, zero, t0; @@ -616,6 +692,133 @@ static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2) gen_op_multiply(dst, src1, src2, 1); } +static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_udivx(dst, tcg_env, src1, src2); +} + +static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_sdivx(dst, tcg_env, src1, src2); +} + +static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_udiv(dst, tcg_env, src1, src2); +} + +static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_sdiv(dst, tcg_env, src1, src2); +} + +static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_udiv_cc(dst, tcg_env, src1, src2); +} + +static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_sdiv_cc(dst, tcg_env, src1, src2); +} + +static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_taddcctv(dst, tcg_env, src1, src2); +} + +static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_tsubcctv(dst, tcg_env, src1, src2); +} + +static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2) +{ + tcg_gen_ctpop_tl(dst, src2); +} + +#ifndef TARGET_SPARC64 +static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2) +{ + g_assert_not_reached(); +} +#endif + +static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_array8(dst, src1, src2); + tcg_gen_shli_tl(dst, dst, 1); +} + +static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_array8(dst, src1, src2); + tcg_gen_shli_tl(dst, dst, 2); +} + +static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src) +{ +#ifdef TARGET_SPARC64 + gen_helper_fpack16(dst, cpu_gsr, src); +#else + g_assert_not_reached(); +#endif +} + +static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src) +{ +#ifdef TARGET_SPARC64 + gen_helper_fpackfix(dst, cpu_gsr, src); +#else + g_assert_not_reached(); +#endif +} + +static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) +{ +#ifdef TARGET_SPARC64 + gen_helper_fpack32(dst, cpu_gsr, src1, src2); +#else + g_assert_not_reached(); +#endif +} + +static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2) +{ +#ifdef TARGET_SPARC64 + TCGv t1, t2, shift; + + t1 = tcg_temp_new(); + t2 = tcg_temp_new(); + shift = tcg_temp_new(); + + tcg_gen_andi_tl(shift, cpu_gsr, 7); + tcg_gen_shli_tl(shift, shift, 3); + tcg_gen_shl_tl(t1, s1, shift); + + /* + * A shift of 64 does not produce 0 in TCG. Divide this into a + * shift of (up to 63) followed by a constant shift of 1. + */ + tcg_gen_xori_tl(shift, shift, 63); + tcg_gen_shr_tl(t2, s2, shift); + tcg_gen_shri_tl(t2, t2, 1); + + tcg_gen_or_tl(dst, t1, t2); +#else + g_assert_not_reached(); +#endif +} + +static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) +{ +#ifdef TARGET_SPARC64 + gen_helper_bshuffle(dst, cpu_gsr, src1, src2); +#else + g_assert_not_reached(); +#endif +} + // 1 static void gen_op_eval_ba(TCGv dst) { @@ -884,47 +1087,6 @@ static void gen_branch2(DisasContext *dc, target_ulong pc1, gen_goto_tb(dc, 1, pc2, pc2 + 4); } -static void gen_branch_a(DisasContext *dc, target_ulong pc1) -{ - TCGLabel *l1 = gen_new_label(); - target_ulong npc = dc->npc; - - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1); - - gen_goto_tb(dc, 0, npc, pc1); - - gen_set_label(l1); - gen_goto_tb(dc, 1, npc + 4, npc + 8); - - dc->base.is_jmp = DISAS_NORETURN; -} - -static void gen_branch_n(DisasContext *dc, target_ulong pc1) -{ - target_ulong npc = dc->npc; - - if (npc & 3) { - switch (npc) { - case DYNAMIC_PC: - case DYNAMIC_PC_LOOKUP: - tcg_gen_mov_tl(cpu_pc, cpu_npc); - tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); - tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, - cpu_cond, tcg_constant_tl(0), - tcg_constant_tl(pc1), cpu_npc); - dc->pc = npc; - break; - default: - g_assert_not_reached(); - } - } else { - dc->pc = npc; - dc->jump_pc[0] = pc1; - dc->jump_pc[1] = npc + 4; - dc->npc = JUMP_PC; - } -} - static void gen_generic_branch(DisasContext *dc) { TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]); @@ -984,9 +1146,38 @@ static void gen_exception(DisasContext *dc, int which) dc->base.is_jmp = DISAS_NORETURN; } -static void gen_check_align(TCGv addr, int mask) +static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp) { - gen_helper_check_align(tcg_env, addr, tcg_constant_i32(mask)); + DisasDelayException *e = g_new0(DisasDelayException, 1); + + e->next = dc->delay_excp_list; + dc->delay_excp_list = e; + + e->lab = gen_new_label(); + e->excp = excp; + e->pc = dc->pc; + /* Caller must have used flush_cond before branch. */ + assert(e->npc != JUMP_PC); + e->npc = dc->npc; + + return e->lab; +} + +static TCGLabel *delay_exception(DisasContext *dc, int excp) +{ + return delay_exceptionv(dc, tcg_constant_i32(excp)); +} + +static void gen_check_align(DisasContext *dc, TCGv addr, int mask) +{ + TCGv t = tcg_temp_new(); + TCGLabel *lab; + + tcg_gen_andi_tl(t, addr, mask); + + flush_cond(dc); + lab = delay_exception(dc, TT_UNALIGNED); + tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab); } static void gen_mov_pc_npc(DisasContext *dc) @@ -1264,41 +1455,13 @@ static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond) } } -static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond, - DisasContext *dc) -{ - DisasCompare cmp; - gen_compare(&cmp, cc, cond, dc); - - /* The interface is to return a boolean in r_dst. */ - if (cmp.is_bool) { - tcg_gen_mov_tl(r_dst, cmp.c1); - } else { - tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); - } -} - -static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond) -{ - DisasCompare cmp; - gen_fcompare(&cmp, cc, cond); - - /* The interface is to return a boolean in r_dst. */ - if (cmp.is_bool) { - tcg_gen_mov_tl(r_dst, cmp.c1); - } else { - tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); - } -} - -#ifdef TARGET_SPARC64 // Inverted logic -static const int gen_tcg_cond_reg[8] = { - -1, +static const TCGCond gen_tcg_cond_reg[8] = { + TCG_COND_NEVER, /* reserved */ TCG_COND_NE, TCG_COND_GT, TCG_COND_GE, - -1, + TCG_COND_NEVER, /* reserved */ TCG_COND_EQ, TCG_COND_LE, TCG_COND_LT, @@ -1312,115 +1475,48 @@ static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) cmp->c2 = tcg_constant_tl(0); } -static void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src) +static void gen_op_clear_ieee_excp_and_FTT(void) { - DisasCompare cmp; - gen_compare_reg(&cmp, cond, r_src); - - /* The interface is to return a boolean in r_dst. */ - tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); + tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); } -#endif -static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) +static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src) { - unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); - target_ulong target = dc->pc + offset; + gen_op_clear_ieee_excp_and_FTT(); + tcg_gen_mov_i32(dst, src); +} -#ifdef TARGET_SPARC64 - if (unlikely(AM_CHECK(dc))) { - target &= 0xffffffffULL; - } -#endif - if (cond == 0x0) { - /* unconditional not taken */ - if (a) { - dc->pc = dc->npc + 4; - dc->npc = dc->pc + 4; - } else { - dc->pc = dc->npc; - dc->npc = dc->pc + 4; - } - } else if (cond == 0x8) { - /* unconditional taken */ - if (a) { - dc->pc = target; - dc->npc = dc->pc + 4; - } else { - dc->pc = dc->npc; - dc->npc = target; - tcg_gen_mov_tl(cpu_pc, cpu_npc); - } - } else { - flush_cond(dc); - gen_cond(cpu_cond, cc, cond, dc); - if (a) { - gen_branch_a(dc, target); - } else { - gen_branch_n(dc, target); - } - } +static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src) +{ + gen_op_clear_ieee_excp_and_FTT(); + gen_helper_fnegs(dst, src); } -static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) +static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src) { - unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); - target_ulong target = dc->pc + offset; + gen_op_clear_ieee_excp_and_FTT(); + gen_helper_fabss(dst, src); +} -#ifdef TARGET_SPARC64 - if (unlikely(AM_CHECK(dc))) { - target &= 0xffffffffULL; - } -#endif - if (cond == 0x0) { - /* unconditional not taken */ - if (a) { - dc->pc = dc->npc + 4; - dc->npc = dc->pc + 4; - } else { - dc->pc = dc->npc; - dc->npc = dc->pc + 4; - } - } else if (cond == 0x8) { - /* unconditional taken */ - if (a) { - dc->pc = target; - dc->npc = dc->pc + 4; - } else { - dc->pc = dc->npc; - dc->npc = target; - tcg_gen_mov_tl(cpu_pc, cpu_npc); - } - } else { - flush_cond(dc); - gen_fcond(cpu_cond, cc, cond); - if (a) { - gen_branch_a(dc, target); - } else { - gen_branch_n(dc, target); - } - } +static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src) +{ + gen_op_clear_ieee_excp_and_FTT(); + tcg_gen_mov_i64(dst, src); } -#ifdef TARGET_SPARC64 -static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn, - TCGv r_reg) +static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src) { - unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29)); - target_ulong target = dc->pc + offset; + gen_op_clear_ieee_excp_and_FTT(); + gen_helper_fnegd(dst, src); +} - if (unlikely(AM_CHECK(dc))) { - target &= 0xffffffffULL; - } - flush_cond(dc); - gen_cond_reg(cpu_cond, cond, r_reg); - if (a) { - gen_branch_a(dc, target); - } else { - gen_branch_n(dc, target); - } +static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src) +{ + gen_op_clear_ieee_excp_and_FTT(); + gen_helper_fabsd(dst, src); } +#ifdef TARGET_SPARC64 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) { switch (fccno) { @@ -1580,343 +1676,7 @@ static int gen_trap_ifnofpu(DisasContext *dc) return 0; } -static void gen_op_clear_ieee_excp_and_FTT(void) -{ - tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); -} - -static void gen_fop_FF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32)) -{ - TCGv_i32 dst, src; - - src = gen_load_fpr_F(dc, rs); - dst = gen_dest_fpr_F(dc); - - gen(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_F(dc, rd, dst); -} - -static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i32, TCGv_i32)) -{ - TCGv_i32 dst, src; - - src = gen_load_fpr_F(dc, rs); - dst = gen_dest_fpr_F(dc); - - gen(dst, src); - - gen_store_fpr_F(dc, rd, dst); -} - -static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) -{ - TCGv_i32 dst, src1, src2; - - src1 = gen_load_fpr_F(dc, rs1); - src2 = gen_load_fpr_F(dc, rs2); - dst = gen_dest_fpr_F(dc); - - gen(dst, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_F(dc, rd, dst); -} - -#ifdef TARGET_SPARC64 -static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) -{ - TCGv_i32 dst, src1, src2; - - src1 = gen_load_fpr_F(dc, rs1); - src2 = gen_load_fpr_F(dc, rs2); - dst = gen_dest_fpr_F(dc); - - gen(dst, src1, src2); - - gen_store_fpr_F(dc, rd, dst); -} -#endif - -static void gen_fop_DD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64)) -{ - TCGv_i64 dst, src; - - src = gen_load_fpr_D(dc, rs); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} - -#ifdef TARGET_SPARC64 -static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src; - - src = gen_load_fpr_D(dc, rs); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, src); - - gen_store_fpr_D(dc, rd, dst); -} -#endif - -static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} - -#ifdef TARGET_SPARC64 -static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, src1, src2); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, cpu_gsr, src1, src2); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src0, src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - src0 = gen_load_fpr_D(dc, rd); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, src0, src1, src2); - - gen_store_fpr_D(dc, rd, dst); -} -#endif - -static void gen_fop_QQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_ptr)) -{ - gen_op_load_fpr_QT1(QFPREG(rs)); - - gen(tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -#ifdef TARGET_SPARC64 -static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_ptr)) -{ - gen_op_load_fpr_QT1(QFPREG(rs)); - - gen(tcg_env); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} -#endif - -static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_ptr)) -{ - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - - gen(tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) -{ - TCGv_i64 dst; - TCGv_i32 src1, src2; - - src1 = gen_load_fpr_F(dc, rs1); - src2 = gen_load_fpr_F(dc, rs2); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - - gen(tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -#ifdef TARGET_SPARC64 -static void gen_fop_DF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) -{ - TCGv_i64 dst; - TCGv_i32 src; - - src = gen_load_fpr_F(dc, rs); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} -#endif - -static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) -{ - TCGv_i64 dst; - TCGv_i32 src; - - src = gen_load_fpr_F(dc, rs); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_fop_FD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64)) -{ - TCGv_i32 dst; - TCGv_i64 src; - - src = gen_load_fpr_D(dc, rs); - dst = gen_dest_fpr_F(dc); - - gen(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_F(dc, rd, dst); -} - -static void gen_fop_FQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i32, TCGv_ptr)) -{ - TCGv_i32 dst; - - gen_op_load_fpr_QT1(QFPREG(rs)); - dst = gen_dest_fpr_F(dc); - - gen(dst, tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_F(dc, rd, dst); -} - -static void gen_fop_DQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_ptr)) -{ - TCGv_i64 dst; - - gen_op_load_fpr_QT1(QFPREG(rs)); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_ptr, TCGv_i32)) -{ - TCGv_i32 src; - - src = gen_load_fpr_F(dc, rs); - - gen(tcg_env, src); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_ptr, TCGv_i64)) -{ - TCGv_i64 src; - - src = gen_load_fpr_D(dc, rs); - - gen(tcg_env, src); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, - TCGv addr, int mmu_idx, MemOp memop) -{ - gen_address_mask(dc, addr); - tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN); -} - -static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) -{ - TCGv m1 = tcg_constant_tl(0xff); - gen_address_mask(dc, addr); - tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB); -} - /* asi moves */ -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) typedef enum { GET_ASI_HELPER, GET_ASI_EXCP, @@ -1935,15 +1695,25 @@ typedef struct { MemOp memop; } DisasASI; -static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) +/* + * Build DisasASI. + * For asi == -1, treat as non-asi. + * For ask == -2, treat as immediate offset (v8 error, v9 %asi). + */ +static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop) { - int asi = GET_FIELD(insn, 19, 26); ASIType type = GET_ASI_HELPER; int mem_idx = dc->mem_idx; + if (asi == -1) { + /* Artificial "non-asi" case. */ + type = GET_ASI_DIRECT; + goto done; + } + #ifndef TARGET_SPARC64 /* Before v9, all asis are immediate and privileged. */ - if (IS_IMM) { + if (asi < 0) { gen_exception(dc, TT_ILL_INSN); type = GET_ASI_EXCP; } else if (supervisor(dc) @@ -1986,7 +1756,7 @@ static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) type = GET_ASI_EXCP; } #else - if (IS_IMM) { + if (asi < 0) { asi = dc->asi; } /* With v9, all asis below 0x80 are privileged. */ @@ -2145,28 +1915,39 @@ static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) } #endif + done: return (DisasASI){ type, asi, mem_idx, memop }; } -static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, - int insn, MemOp memop) +#if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) +static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a, + TCGv_i32 asi, TCGv_i32 mop) +{ + g_assert_not_reached(); +} + +static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r, + TCGv_i32 asi, TCGv_i32 mop) { - DisasASI da = get_asi(dc, insn, memop); + g_assert_not_reached(); +} +#endif - switch (da.type) { +static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) +{ + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DTWINX: /* Reserved for ldda. */ gen_exception(dc, TT_ILL_INSN); break; case GET_ASI_DIRECT: - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN); break; default: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN); save_state(dc); #ifdef TARGET_SPARC64 @@ -2183,34 +1964,30 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, } } -static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, - int insn, MemOp memop) +static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr) { - DisasASI da = get_asi(dc, insn, memop); - - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; + case GET_ASI_DTWINX: /* Reserved for stda. */ -#ifndef TARGET_SPARC64 - gen_exception(dc, TT_ILL_INSN); - break; -#else - if (!(dc->def->features & CPU_FEATURE_HYPV)) { + if (TARGET_LONG_BITS == 32) { + gen_exception(dc, TT_ILL_INSN); + break; + } else if (!(dc->def->features & CPU_FEATURE_HYPV)) { /* Pre OpenSPARC CPUs don't have these */ gen_exception(dc, TT_ILL_INSN); - return; + break; } - /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions - * are ST_BLKINIT_ ASIs */ -#endif + /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */ /* fall through */ + case GET_ASI_DIRECT: - gen_address_mask(dc, addr); - tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN); break; -#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) + case GET_ASI_BCOPY: + assert(TARGET_LONG_BITS == 32); /* Copy 32 bytes from the address in SRC to ADDR. */ /* ??? The original qemu code suggests 4-byte alignment, dropping the low bits, but the only place I can see this used is in the @@ -2228,18 +2005,18 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, for (i = 0; i < 32; i += 4) { /* Since the loads and stores are paired, allow the copy to happen in the host endianness. */ - tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL); - tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL); + tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL); + tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL); tcg_gen_add_tl(saddr, saddr, four); tcg_gen_add_tl(daddr, daddr, four); } } break; -#endif + default: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN); save_state(dc); #ifdef TARGET_SPARC64 @@ -2259,16 +2036,15 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, } } -static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, - TCGv addr, int insn) +static void gen_swap_asi(DisasContext *dc, DisasASI *da, + TCGv dst, TCGv src, TCGv addr) { - DisasASI da = get_asi(dc, insn, MO_TEUL); - - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_swap(dc, dst, src, addr, da.mem_idx, da.memop); + tcg_gen_atomic_xchg_tl(dst, addr, src, + da->mem_idx, da->memop | MO_ALIGN); break; default: /* ??? Should be DAE_invalid_asi. */ @@ -2277,20 +2053,15 @@ static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, } } -static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, - int insn, int rd) +static void gen_cas_asi(DisasContext *dc, DisasASI *da, + TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr) { - DisasASI da = get_asi(dc, insn, MO_TEUL); - TCGv oldv; - - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: return; case GET_ASI_DIRECT: - oldv = tcg_temp_new(); - tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), - da.mem_idx, da.memop | MO_ALIGN); - gen_store_gpr(dc, rd, oldv); + tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv, + da->mem_idx, da->memop | MO_ALIGN); break; default: /* ??? Should be DAE_invalid_asi. */ @@ -2299,15 +2070,14 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, } } -static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) +static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) { - DisasASI da = get_asi(dc, insn, MO_UB); - - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_ldstub(dc, dst, addr, da.mem_idx); + tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff), + da->mem_idx, MO_UB); break; default: /* ??? In theory, this should be raise DAE_invalid_asi. @@ -2315,7 +2085,7 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) if (tb_cflags(dc->base.tb) & CF_PARALLEL) { gen_helper_exit_atomic(tcg_env); } else { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); TCGv_i32 r_mop = tcg_constant_i32(MO_UB); TCGv_i64 s64, t64; @@ -2334,38 +2104,44 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) break; } } -#endif -#ifdef TARGET_SPARC64 -static void gen_ldf_asi(DisasContext *dc, TCGv addr, - int insn, int size, int rd) +static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, + TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); + MemOp memop = da->memop; + MemOp size = memop & MO_SIZE; TCGv_i32 d32; TCGv_i64 d64; + TCGv addr_tmp; + + /* TODO: Use 128-bit load/store below. */ + if (size == MO_128) { + memop = (memop & ~MO_SIZE) | MO_64; + } - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_address_mask(dc, addr); + memop |= MO_ALIGN_4; switch (size) { - case 4: + case MO_32: d32 = gen_dest_fpr_F(dc); - tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop); gen_store_fpr_F(dc, rd, d32); break; - case 8: - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + + case MO_64: + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop); break; - case 16: + + case MO_128: d64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4); - tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop); + addr_tmp = tcg_temp_new(); + tcg_gen_addi_tl(addr_tmp, addr, 8); + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); break; default: @@ -2375,24 +2151,17 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, case GET_ASI_BLOCK: /* Valid for lddfa on aligned registers only. */ - if (size == 8 && (rd & 7) == 0) { - MemOp memop; - TCGv eight; - int i; - - gen_address_mask(dc, addr); - + if (orig_size == MO_64 && (rd & 7) == 0) { /* The first operation checks required alignment. */ - memop = da.memop | MO_ALIGN_64; - eight = tcg_constant_tl(8); - for (i = 0; ; ++i) { - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, - da.mem_idx, memop); + addr_tmp = tcg_temp_new(); + for (int i = 0; ; ++i) { + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, + memop | (i == 0 ? MO_ALIGN_64 : 0)); if (i == 7) { break; } - tcg_gen_add_tl(addr, addr, eight); - memop = da.memop; + tcg_gen_addi_tl(addr_tmp, addr, 8); + addr = addr_tmp; } } else { gen_exception(dc, TT_ILL_INSN); @@ -2401,10 +2170,9 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, case GET_ASI_SHORT: /* Valid for lddfa only. */ - if (size == 8) { - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN); + if (orig_size == MO_64) { + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2412,8 +2180,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, default: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); save_state(dc); /* According to the table in the UA2011 manual, the only @@ -2421,21 +2189,24 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, the NO_FAULT asis. We still need a helper for these, but we can just use the integer asi helper for them. */ switch (size) { - case 4: + case MO_32: d64 = tcg_temp_new_i64(); gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); d32 = gen_dest_fpr_F(dc); tcg_gen_extrl_i64_i32(d32, d64); gen_store_fpr_F(dc, rd, d32); break; - case 8: - gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop); + case MO_64: + gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, + r_asi, r_mop); break; - case 16: + case MO_128: d64 = tcg_temp_new_i64(); gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); - tcg_gen_addi_tl(addr, addr, 8); - gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop); + addr_tmp = tcg_temp_new(); + tcg_gen_addi_tl(addr_tmp, addr, 8); + gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp, + r_asi, r_mop); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); break; default: @@ -2446,37 +2217,45 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, } } -static void gen_stf_asi(DisasContext *dc, TCGv addr, - int insn, int size, int rd) +static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, + TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); + MemOp memop = da->memop; + MemOp size = memop & MO_SIZE; TCGv_i32 d32; + TCGv addr_tmp; + + /* TODO: Use 128-bit load/store below. */ + if (size == MO_128) { + memop = (memop & ~MO_SIZE) | MO_64; + } - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_address_mask(dc, addr); + memop |= MO_ALIGN_4; switch (size) { - case 4: + case MO_32: d32 = gen_load_fpr_F(dc, rd); - tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN); break; - case 8: - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + case MO_64: + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN_4); break; - case 16: + case MO_128: /* Only 4-byte alignment required. However, it is legal for the cpu to signal the alignment fault, and the OS trap handler is required to fix it up. Requiring 16-byte alignment here avoids having to probe the second page before performing the first write. */ - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_16); - tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN_16); + addr_tmp = tcg_temp_new(); + tcg_gen_addi_tl(addr_tmp, addr, 8); + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop); break; default: g_assert_not_reached(); @@ -2485,24 +2264,17 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, case GET_ASI_BLOCK: /* Valid for stdfa on aligned registers only. */ - if (size == 8 && (rd & 7) == 0) { - MemOp memop; - TCGv eight; - int i; - - gen_address_mask(dc, addr); - + if (orig_size == MO_64 && (rd & 7) == 0) { /* The first operation checks required alignment. */ - memop = da.memop | MO_ALIGN_64; - eight = tcg_constant_tl(8); - for (i = 0; ; ++i) { - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, - da.mem_idx, memop); + addr_tmp = tcg_temp_new(); + for (int i = 0; ; ++i) { + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, + memop | (i == 0 ? MO_ALIGN_64 : 0)); if (i == 7) { break; } - tcg_gen_add_tl(addr, addr, eight); - memop = da.memop; + tcg_gen_addi_tl(addr_tmp, addr, 8); + addr = addr_tmp; } } else { gen_exception(dc, TT_ILL_INSN); @@ -2511,10 +2283,9 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, case GET_ASI_SHORT: /* Valid for stdfa only. */ - if (size == 8) { - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN); + if (orig_size == MO_64) { + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2529,37 +2300,51 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, } } -static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) +static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, MO_TEUQ); - TCGv_i64 hi = gen_dest_gpr(dc, rd); - TCGv_i64 lo = gen_dest_gpr(dc, rd + 1); + TCGv hi = gen_dest_gpr(dc, rd); + TCGv lo = gen_dest_gpr(dc, rd + 1); - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: return; case GET_ASI_DTWINX: - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); - tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop); +#ifdef TARGET_SPARC64 + { + MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16; + TCGv_i128 t = tcg_temp_new_i128(); + + tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop); + /* + * Note that LE twinx acts as if each 64-bit register result is + * byte swapped. We perform one 128-bit LE load, so must swap + * the order of the writebacks. + */ + if ((mop & MO_BSWAP) == MO_TE) { + tcg_gen_extr_i128_i64(lo, hi, t); + } else { + tcg_gen_extr_i128_i64(hi, lo, t); + } + } break; +#else + g_assert_not_reached(); +#endif case GET_ASI_DIRECT: { TCGv_i64 tmp = tcg_temp_new_i64(); - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN); /* Note that LE ldda acts as if each 32-bit register result is byte swapped. Having just performed one 64-bit bswap, we need now to swap the writebacks. */ - if ((da.memop & MO_BSWAP) == MO_TE) { - tcg_gen_extr32_i64(lo, hi, tmp); + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_extr_i64_tl(lo, hi, tmp); } else { - tcg_gen_extr32_i64(hi, lo, tmp); + tcg_gen_extr_i64_tl(hi, lo, tmp); } } break; @@ -2570,18 +2355,18 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) real hardware allows others. This can be seen with e.g. FreeBSD 10.3 wrt ASI_IC_TAG. */ { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(da.memop); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(da->memop); TCGv_i64 tmp = tcg_temp_new_i64(); save_state(dc); gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop); /* See above. */ - if ((da.memop & MO_BSWAP) == MO_TE) { - tcg_gen_extr32_i64(lo, hi, tmp); + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_extr_i64_tl(lo, hi, tmp); } else { - tcg_gen_extr32_i64(hi, lo, tmp); + tcg_gen_extr_i64_tl(hi, lo, tmp); } } break; @@ -2591,157 +2376,90 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) gen_store_gpr(dc, rd + 1, lo); } -static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, - int insn, int rd) +static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, MO_TEUQ); + TCGv hi = gen_load_gpr(dc, rd); TCGv lo = gen_load_gpr(dc, rd + 1); - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DTWINX: - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); - tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop); - break; - - case GET_ASI_DIRECT: +#ifdef TARGET_SPARC64 { - TCGv_i64 t64 = tcg_temp_new_i64(); - - /* Note that LE stda acts as if each 32-bit register result is - byte swapped. We will perform one 64-bit LE store, so now - we must swap the order of the construction. */ - if ((da.memop & MO_BSWAP) == MO_TE) { - tcg_gen_concat32_i64(t64, lo, hi); + MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16; + TCGv_i128 t = tcg_temp_new_i128(); + + /* + * Note that LE twinx acts as if each 64-bit register result is + * byte swapped. We perform one 128-bit LE store, so must swap + * the order of the construction. + */ + if ((mop & MO_BSWAP) == MO_TE) { + tcg_gen_concat_i64_i128(t, lo, hi); } else { - tcg_gen_concat32_i64(t64, hi, lo); + tcg_gen_concat_i64_i128(t, hi, lo); } - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop); } break; +#else + g_assert_not_reached(); +#endif - default: - /* ??? In theory we've handled all of the ASIs that are valid - for stda, and this should raise DAE_invalid_asi. */ + case GET_ASI_DIRECT: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(da.memop); TCGv_i64 t64 = tcg_temp_new_i64(); - /* See above. */ - if ((da.memop & MO_BSWAP) == MO_TE) { - tcg_gen_concat32_i64(t64, lo, hi); + /* Note that LE stda acts as if each 32-bit register result is + byte swapped. We will perform one 64-bit LE store, so now + we must swap the order of the construction. */ + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_concat_tl_i64(t64, lo, hi); } else { - tcg_gen_concat32_i64(t64, hi, lo); + tcg_gen_concat_tl_i64(t64, hi, lo); } - - save_state(dc); - gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); + tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN); } break; - } -} -static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, - int insn, int rd) -{ - DisasASI da = get_asi(dc, insn, MO_TEUQ); - TCGv oldv; - - switch (da.type) { - case GET_ASI_EXCP: - return; - case GET_ASI_DIRECT: - oldv = tcg_temp_new(); - tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), - da.mem_idx, da.memop | MO_ALIGN); - gen_store_gpr(dc, rd, oldv); - break; - default: - /* ??? Should be DAE_invalid_asi. */ - gen_exception(dc, TT_DATA_ACCESS); - break; - } -} - -#elif !defined(CONFIG_USER_ONLY) -static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) -{ - /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12, - whereby "rd + 1" elicits "error: array subscript is above array". - Since we have already asserted that rd is even, the semantics - are unchanged. */ - TCGv lo = gen_dest_gpr(dc, rd | 1); - TCGv hi = gen_dest_gpr(dc, rd); - TCGv_i64 t64 = tcg_temp_new_i64(); - DisasASI da = get_asi(dc, insn, MO_TEUQ); - - switch (da.type) { - case GET_ASI_EXCP: - return; - case GET_ASI_DIRECT: - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); - break; - default: - { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); - - save_state(dc); - gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); - } - break; - } - - tcg_gen_extr_i64_i32(lo, hi, t64); - gen_store_gpr(dc, rd | 1, lo); - gen_store_gpr(dc, rd, hi); -} - -static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, - int insn, int rd) -{ - DisasASI da = get_asi(dc, insn, MO_TEUQ); - TCGv lo = gen_load_gpr(dc, rd + 1); - TCGv_i64 t64 = tcg_temp_new_i64(); - - tcg_gen_concat_tl_i64(t64, lo, hi); - - switch (da.type) { - case GET_ASI_EXCP: - break; - case GET_ASI_DIRECT: - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); - break; case GET_ASI_BFILL: + assert(TARGET_LONG_BITS == 32); /* Store 32 bytes of T64 to ADDR. */ /* ??? The original qemu code suggests 8-byte alignment, dropping the low bits, but the only place I can see this used is in the Linux kernel with 32 byte alignment, which would make more sense as a cacheline-style operation. */ { + TCGv_i64 t64 = tcg_temp_new_i64(); TCGv d_addr = tcg_temp_new(); TCGv eight = tcg_constant_tl(8); int i; + tcg_gen_concat_tl_i64(t64, lo, hi); tcg_gen_andi_tl(d_addr, addr, -8); for (i = 0; i < 32; i += 8) { - tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop); tcg_gen_add_tl(d_addr, d_addr, eight); } } break; + default: + /* ??? In theory we've handled all of the ASIs that are valid + for stda, and this should raise DAE_invalid_asi. */ { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(da->memop); + TCGv_i64 t64 = tcg_temp_new_i64(); + + /* See above. */ + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_concat_tl_i64(t64, lo, hi); + } else { + tcg_gen_concat_tl_i64(t64, hi, lo); + } save_state(dc); gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); @@ -2749,30 +2467,10 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, break; } } -#endif -static TCGv get_src1(DisasContext *dc, unsigned int insn) -{ - unsigned int rs1 = GET_FIELD(insn, 13, 17); - return gen_load_gpr(dc, rs1); -} - -static TCGv get_src2(DisasContext *dc, unsigned int insn) -{ - if (IS_IMM) { /* immediate */ - target_long simm = GET_FIELDs(insn, 19, 31); - TCGv t = tcg_temp_new(); - tcg_gen_movi_tl(t, simm); - return t; - } else { /* register */ - unsigned int rs2 = GET_FIELD(insn, 27, 31); - return gen_load_gpr(dc, rs2); - } -} - -#ifdef TARGET_SPARC64 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { +#ifdef TARGET_SPARC64 TCGv_i32 c32, zero, dst, s1, s2; /* We have two choices here: extend the 32 bit data and use movcond_i64, @@ -2795,19 +2493,27 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2); gen_store_fpr_F(dc, rd, dst); +#else + qemu_build_not_reached(); +#endif } static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { +#ifdef TARGET_SPARC64 TCGv_i64 dst = gen_dest_fpr_D(dc, rd); tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2, gen_load_fpr_D(dc, rs), gen_load_fpr_D(dc, rd)); gen_store_fpr_D(dc, rd, dst); +#else + qemu_build_not_reached(); +#endif } static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { +#ifdef TARGET_SPARC64 int qd = QFPREG(rd); int qs = QFPREG(rs); @@ -2817,10 +2523,13 @@ static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]); gen_update_fprs_dirty(dc, qd); +#else + qemu_build_not_reached(); +#endif } -#ifndef CONFIG_USER_ONLY -static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env tcg_env) +#ifdef TARGET_SPARC64 +static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr) { TCGv_i32 r_tl = tcg_temp_new_i32(); @@ -2843,13 +2552,1359 @@ static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env tcg_env) } #endif -static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, +static int extract_dfpreg(DisasContext *dc, int x) +{ + return DFPREG(x); +} + +static int extract_qfpreg(DisasContext *dc, int x) +{ + return QFPREG(x); +} + +/* Include the auto-generated decoder. */ +#include "decode-insns.c.inc" + +#define TRANS(NAME, AVAIL, FUNC, ...) \ + static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \ + { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); } + +#define avail_ALL(C) true +#ifdef TARGET_SPARC64 +# define avail_32(C) false +# define avail_ASR17(C) false +# define avail_CASA(C) true +# define avail_DIV(C) true +# define avail_MUL(C) true +# define avail_POWERDOWN(C) false +# define avail_64(C) true +# define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL) +# define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV) +# define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1) +# define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2) +#else +# define avail_32(C) true +# define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17) +# define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA) +# define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV) +# define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL) +# define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN) +# define avail_64(C) false +# define avail_GL(C) false +# define avail_HYPV(C) false +# define avail_VIS1(C) false +# define avail_VIS2(C) false +#endif + +/* Default case for non jump instructions. */ +static bool advance_pc(DisasContext *dc) +{ + if (dc->npc & 3) { + switch (dc->npc) { + case DYNAMIC_PC: + case DYNAMIC_PC_LOOKUP: + dc->pc = dc->npc; + gen_op_next_insn(); + break; + case JUMP_PC: + /* we can do a static jump */ + gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond); + dc->base.is_jmp = DISAS_NORETURN; + break; + default: + g_assert_not_reached(); + } + } else { + dc->pc = dc->npc; + dc->npc = dc->npc + 4; + } + return true; +} + +/* + * Major opcodes 00 and 01 -- branches, call, and sethi + */ + +static bool advance_jump_uncond_never(DisasContext *dc, bool annul) +{ + if (annul) { + dc->pc = dc->npc + 4; + dc->npc = dc->pc + 4; + } else { + dc->pc = dc->npc; + dc->npc = dc->pc + 4; + } + return true; +} + +static bool advance_jump_uncond_always(DisasContext *dc, bool annul, + target_ulong dest) +{ + if (annul) { + dc->pc = dest; + dc->npc = dest + 4; + } else { + dc->pc = dc->npc; + dc->npc = dest; + tcg_gen_mov_tl(cpu_pc, cpu_npc); + } + return true; +} + +static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp, + bool annul, target_ulong dest) +{ + target_ulong npc = dc->npc; + + if (annul) { + TCGLabel *l1 = gen_new_label(); + + tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1); + gen_goto_tb(dc, 0, npc, dest); + gen_set_label(l1); + gen_goto_tb(dc, 1, npc + 4, npc + 8); + + dc->base.is_jmp = DISAS_NORETURN; + } else { + if (npc & 3) { + switch (npc) { + case DYNAMIC_PC: + case DYNAMIC_PC_LOOKUP: + tcg_gen_mov_tl(cpu_pc, cpu_npc); + tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); + tcg_gen_movcond_tl(cmp->cond, cpu_npc, + cmp->c1, cmp->c2, + tcg_constant_tl(dest), cpu_npc); + dc->pc = npc; + break; + default: + g_assert_not_reached(); + } + } else { + dc->pc = npc; + dc->jump_pc[0] = dest; + dc->jump_pc[1] = npc + 4; + dc->npc = JUMP_PC; + if (cmp->is_bool) { + tcg_gen_mov_tl(cpu_cond, cmp->c1); + } else { + tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2); + } + } + } + return true; +} + +static bool raise_priv(DisasContext *dc) +{ + gen_exception(dc, TT_PRIV_INSN); + return true; +} + +static bool raise_unimpfpop(DisasContext *dc) +{ + gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); + return true; +} + +static bool gen_trap_float128(DisasContext *dc) +{ + if (dc->def->features & CPU_FEATURE_FLOAT128) { + return false; + } + return raise_unimpfpop(dc); +} + +static bool do_bpcc(DisasContext *dc, arg_bcc *a) +{ + target_long target = address_mask_i(dc, dc->pc + a->i * 4); + DisasCompare cmp; + + switch (a->cond) { + case 0x0: + return advance_jump_uncond_never(dc, a->a); + case 0x8: + return advance_jump_uncond_always(dc, a->a, target); + default: + flush_cond(dc); + + gen_compare(&cmp, a->cc, a->cond, dc); + return advance_jump_cond(dc, &cmp, a->a, target); + } +} + +TRANS(Bicc, ALL, do_bpcc, a) +TRANS(BPcc, 64, do_bpcc, a) + +static bool do_fbpfcc(DisasContext *dc, arg_bcc *a) +{ + target_long target = address_mask_i(dc, dc->pc + a->i * 4); + DisasCompare cmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + switch (a->cond) { + case 0x0: + return advance_jump_uncond_never(dc, a->a); + case 0x8: + return advance_jump_uncond_always(dc, a->a, target); + default: + flush_cond(dc); + + gen_fcompare(&cmp, a->cc, a->cond); + return advance_jump_cond(dc, &cmp, a->a, target); + } +} + +TRANS(FBPfcc, 64, do_fbpfcc, a) +TRANS(FBfcc, ALL, do_fbpfcc, a) + +static bool trans_BPr(DisasContext *dc, arg_BPr *a) +{ + target_long target = address_mask_i(dc, dc->pc + a->i * 4); + DisasCompare cmp; + + if (!avail_64(dc)) { + return false; + } + if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) { + return false; + } + + flush_cond(dc); + gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); + return advance_jump_cond(dc, &cmp, a->a, target); +} + +static bool trans_CALL(DisasContext *dc, arg_CALL *a) +{ + target_long target = address_mask_i(dc, dc->pc + a->i * 4); + + gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc)); + gen_mov_pc_npc(dc); + dc->npc = target; + return true; +} + +static bool trans_NCP(DisasContext *dc, arg_NCP *a) +{ + /* + * For sparc32, always generate the no-coprocessor exception. + * For sparc64, always generate illegal instruction. + */ +#ifdef TARGET_SPARC64 + return false; +#else + gen_exception(dc, TT_NCP_INSN); + return true; +#endif +} + +static bool trans_SETHI(DisasContext *dc, arg_SETHI *a) +{ + /* Special-case %g0 because that's the canonical nop. */ + if (a->rd) { + gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10)); + } + return advance_pc(dc); +} + +/* + * Major Opcode 10 -- integer, floating-point, vis, and system insns. + */ + +static bool do_tcc(DisasContext *dc, int cond, int cc, + int rs1, bool imm, int rs2_or_imm) +{ + int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) + ? UA2005_HTRAP_MASK : V8_TRAP_MASK); + DisasCompare cmp; + TCGLabel *lab; + TCGv_i32 trap; + + /* Trap never. */ + if (cond == 0) { + return advance_pc(dc); + } + + /* + * Immediate traps are the most common case. Since this value is + * live across the branch, it really pays to evaluate the constant. + */ + if (rs1 == 0 && (imm || rs2_or_imm == 0)) { + trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP); + } else { + trap = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1)); + if (imm) { + tcg_gen_addi_i32(trap, trap, rs2_or_imm); + } else { + TCGv_i32 t2 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm)); + tcg_gen_add_i32(trap, trap, t2); + } + tcg_gen_andi_i32(trap, trap, mask); + tcg_gen_addi_i32(trap, trap, TT_TRAP); + } + + /* Trap always. */ + if (cond == 8) { + save_state(dc); + gen_helper_raise_exception(tcg_env, trap); + dc->base.is_jmp = DISAS_NORETURN; + return true; + } + + /* Conditional trap. */ + flush_cond(dc); + lab = delay_exceptionv(dc, trap); + gen_compare(&cmp, cc, cond, dc); + tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab); + + return advance_pc(dc); +} + +static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a) +{ + if (avail_32(dc) && a->cc) { + return false; + } + return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2); +} + +static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a) +{ + if (avail_64(dc)) { + return false; + } + return do_tcc(dc, a->cond, 0, a->rs1, true, a->i); +} + +static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a) +{ + if (avail_32(dc)) { + return false; + } + return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i); +} + +static bool trans_STBAR(DisasContext *dc, arg_STBAR *a) +{ + tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); + return advance_pc(dc); +} + +static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a) +{ + if (avail_32(dc)) { + return false; + } + if (a->mmask) { + /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */ + tcg_gen_mb(a->mmask | TCG_BAR_SC); + } + if (a->cmask) { + /* For #Sync, etc, end the TB to recognize interrupts. */ + dc->base.is_jmp = DISAS_EXIT; + } + return advance_pc(dc); +} + +static bool do_rd_special(DisasContext *dc, bool priv, int rd, + TCGv (*func)(DisasContext *, TCGv)) +{ + if (!priv) { + return raise_priv(dc); + } + gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd))); + return advance_pc(dc); +} + +static TCGv do_rdy(DisasContext *dc, TCGv dst) +{ + return cpu_y; +} + +static bool trans_RDY(DisasContext *dc, arg_RDY *a) +{ + /* + * TODO: Need a feature bit for sparcv8. In the meantime, treat all + * 32-bit cpus like sparcv7, which ignores the rs1 field. + * This matches after all other ASR, so Leon3 Asr17 is handled first. + */ + if (avail_64(dc) && a->rs1 != 0) { + return false; + } + return do_rd_special(dc, true, a->rd, do_rdy); +} + +static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst) +{ + uint32_t val; + + /* + * TODO: There are many more fields to be filled, + * some of which are writable. + */ + val = dc->def->nwindows - 1; /* [4:0] NWIN */ + val |= 1 << 8; /* [8] V8 */ + + return tcg_constant_tl(val); +} + +TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config) + +static TCGv do_rdccr(DisasContext *dc, TCGv dst) +{ + update_psr(dc); + gen_helper_rdccr(dst, tcg_env); + return dst; +} + +TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr) + +static TCGv do_rdasi(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + return tcg_constant_tl(dc->asi); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi) + +static TCGv do_rdtick(DisasContext *dc, TCGv dst) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); + if (translator_io_start(&dc->base)) { + dc->base.is_jmp = DISAS_EXIT; + } + gen_helper_tick_get_count(dst, tcg_env, r_tickptr, + tcg_constant_i32(dc->mem_idx)); + return dst; +} + +/* TODO: non-priv access only allowed when enabled. */ +TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick) + +static TCGv do_rdpc(DisasContext *dc, TCGv dst) +{ + return tcg_constant_tl(address_mask_i(dc, dc->pc)); +} + +TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc) + +static TCGv do_rdfprs(DisasContext *dc, TCGv dst) +{ + tcg_gen_ext_i32_tl(dst, cpu_fprs); + return dst; +} + +TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs) + +static TCGv do_rdgsr(DisasContext *dc, TCGv dst) +{ + gen_trap_ifnofpu(dc); + return cpu_gsr; +} + +TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr) + +static TCGv do_rdsoftint(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint)); + return dst; +} + +TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint) + +static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr)); + return dst; +} + +/* TODO: non-priv access only allowed when enabled. */ +TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr) + +static TCGv do_rdstick(DisasContext *dc, TCGv dst) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick)); + if (translator_io_start(&dc->base)) { + dc->base.is_jmp = DISAS_EXIT; + } + gen_helper_tick_get_count(dst, tcg_env, r_tickptr, + tcg_constant_i32(dc->mem_idx)); + return dst; +} + +/* TODO: non-priv access only allowed when enabled. */ +TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick) + +static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr)); + return dst; +} + +/* TODO: supervisor access only allowed when enabled by hypervisor. */ +TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr) + +/* + * UltraSPARC-T1 Strand status. + * HYPV check maybe not enough, UA2005 & UA2007 describe + * this ASR as impl. dep + */ +static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst) +{ + return tcg_constant_tl(1); +} + +TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status) + +static TCGv do_rdpsr(DisasContext *dc, TCGv dst) +{ + update_psr(dc); + gen_helper_rdpsr(dst, tcg_env); + return dst; +} + +TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr) + +static TCGv do_rdhpstate(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate)); + return dst; +} + +TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate) + +static TCGv do_rdhtstate(DisasContext *dc, TCGv dst) +{ + TCGv_i32 tl = tcg_temp_new_i32(); + TCGv_ptr tp = tcg_temp_new_ptr(); + + tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl)); + tcg_gen_andi_i32(tl, tl, MAXTL_MASK); + tcg_gen_shli_i32(tl, tl, 3); + tcg_gen_ext_i32_ptr(tp, tl); + tcg_gen_add_ptr(tp, tp, tcg_env); + + tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate)); + return dst; +} + +TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate) + +static TCGv do_rdhintp(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp)); + return dst; +} + +TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp) + +static TCGv do_rdhtba(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba)); + return dst; +} + +TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba) + +static TCGv do_rdhver(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver)); + return dst; +} + +TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver) + +static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr)); + return dst; +} + +TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd, + do_rdhstick_cmpr) + +static TCGv do_rdwim(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim)); + return dst; +} + +TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim) + +static TCGv do_rdtpc(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc)); + return dst; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc) + +static TCGv do_rdtnpc(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc)); + return dst; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc) + +static TCGv do_rdtstate(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate)); + return dst; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate) + +static TCGv do_rdtt(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt)); + return dst; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt) +TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick) + +static TCGv do_rdtba(DisasContext *dc, TCGv dst) +{ + return cpu_tbr; +} + +TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba) +TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba) + +static TCGv do_rdpstate(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate)); + return dst; +} + +TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate) + +static TCGv do_rdtl(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl)); + return dst; +} + +TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl) + +static TCGv do_rdpil(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil)); + return dst; +} + +TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil) + +static TCGv do_rdcwp(DisasContext *dc, TCGv dst) +{ + gen_helper_rdcwp(dst, tcg_env); + return dst; +} + +TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp) + +static TCGv do_rdcansave(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave)); + return dst; +} + +TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave) + +static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore)); + return dst; +} + +TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd, + do_rdcanrestore) + +static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin)); + return dst; +} + +TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin) + +static TCGv do_rdotherwin(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin)); + return dst; +} + +TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin) + +static TCGv do_rdwstate(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate)); + return dst; +} + +TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate) + +static TCGv do_rdgl(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl)); + return dst; +} + +TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl) + +/* UA2005 strand status */ +static TCGv do_rdssr(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr)); + return dst; +} + +TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr) + +static TCGv do_rdver(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version)); + return dst; +} + +TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver) + +static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a) +{ + if (avail_64(dc)) { + gen_helper_flushw(tcg_env); + return advance_pc(dc); + } + return false; +} + +static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv, + void (*func)(DisasContext *, TCGv)) +{ + TCGv src; + + /* For simplicity, we under-decoded the rs2 form. */ + if (!a->imm && (a->rs2_or_imm & ~0x1f)) { + return false; + } + if (!priv) { + return raise_priv(dc); + } + + if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) { + src = tcg_constant_tl(a->rs2_or_imm); + } else { + TCGv src1 = gen_load_gpr(dc, a->rs1); + if (a->rs2_or_imm == 0) { + src = src1; + } else { + src = tcg_temp_new(); + if (a->imm) { + tcg_gen_xori_tl(src, src1, a->rs2_or_imm); + } else { + tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm)); + } + } + } + func(dc, src); + return advance_pc(dc); +} + +static void do_wry(DisasContext *dc, TCGv src) +{ + tcg_gen_ext32u_tl(cpu_y, src); +} + +TRANS(WRY, ALL, do_wr_special, a, true, do_wry) + +static void do_wrccr(DisasContext *dc, TCGv src) +{ + gen_helper_wrccr(tcg_env, src); +} + +TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr) + +static void do_wrasi(DisasContext *dc, TCGv src) +{ + TCGv tmp = tcg_temp_new(); + + tcg_gen_ext8u_tl(tmp, src); + tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi)); + /* End TB to notice changed ASI. */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi) + +static void do_wrfprs(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + tcg_gen_trunc_tl_i32(cpu_fprs, src); + dc->fprs_dirty = 0; + dc->base.is_jmp = DISAS_EXIT; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs) + +static void do_wrgsr(DisasContext *dc, TCGv src) +{ + gen_trap_ifnofpu(dc); + tcg_gen_mov_tl(cpu_gsr, src); +} + +TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr) + +static void do_wrsoftint_set(DisasContext *dc, TCGv src) +{ + gen_helper_set_softint(tcg_env, src); +} + +TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set) + +static void do_wrsoftint_clr(DisasContext *dc, TCGv src) +{ + gen_helper_clear_softint(tcg_env, src); +} + +TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr) + +static void do_wrsoftint(DisasContext *dc, TCGv src) +{ + gen_helper_write_softint(tcg_env, src); +} + +TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint) + +static void do_wrtick_cmpr(DisasContext *dc, TCGv src) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr)); + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); + translator_io_start(&dc->base); + gen_helper_tick_set_limit(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr) + +static void do_wrstick(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick)); + translator_io_start(&dc->base); + gen_helper_tick_set_count(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick) + +static void do_wrstick_cmpr(DisasContext *dc, TCGv src) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr)); + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick)); + translator_io_start(&dc->base); + gen_helper_tick_set_limit(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr) + +static void do_wrpowerdown(DisasContext *dc, TCGv src) +{ + save_state(dc); + gen_helper_power_down(tcg_env); +} + +TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown) + +static void do_wrpsr(DisasContext *dc, TCGv src) +{ + gen_helper_wrpsr(tcg_env, src); + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); + dc->cc_op = CC_OP_FLAGS; + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr) + +static void do_wrwim(DisasContext *dc, TCGv src) +{ + target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows); + TCGv tmp = tcg_temp_new(); + + tcg_gen_andi_tl(tmp, src, mask); + tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim)); +} + +TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim) + +static void do_wrtpc(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc)); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc) + +static void do_wrtnpc(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc)); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc) + +static void do_wrtstate(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate)); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate) + +static void do_wrtt(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt)); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt) + +static void do_wrtick(DisasContext *dc, TCGv src) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); + translator_io_start(&dc->base); + gen_helper_tick_set_count(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick) + +static void do_wrtba(DisasContext *dc, TCGv src) +{ + tcg_gen_mov_tl(cpu_tbr, src); +} + +TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba) + +static void do_wrpstate(DisasContext *dc, TCGv src) +{ + save_state(dc); + if (translator_io_start(&dc->base)) { + dc->base.is_jmp = DISAS_EXIT; + } + gen_helper_wrpstate(tcg_env, src); + dc->npc = DYNAMIC_PC; +} + +TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate) + +static void do_wrtl(DisasContext *dc, TCGv src) +{ + save_state(dc); + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl)); + dc->npc = DYNAMIC_PC; +} + +TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl) + +static void do_wrpil(DisasContext *dc, TCGv src) +{ + if (translator_io_start(&dc->base)) { + dc->base.is_jmp = DISAS_EXIT; + } + gen_helper_wrpil(tcg_env, src); +} + +TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil) + +static void do_wrcwp(DisasContext *dc, TCGv src) +{ + gen_helper_wrcwp(tcg_env, src); +} + +TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp) + +static void do_wrcansave(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave)); +} + +TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave) + +static void do_wrcanrestore(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore)); +} + +TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore) + +static void do_wrcleanwin(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin)); +} + +TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin) + +static void do_wrotherwin(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin)); +} + +TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin) + +static void do_wrwstate(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate)); +} + +TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate) + +static void do_wrgl(DisasContext *dc, TCGv src) +{ + gen_helper_wrgl(tcg_env, src); +} + +TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl) + +/* UA2005 strand status */ +static void do_wrssr(DisasContext *dc, TCGv src) +{ + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr)); +} + +TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr) + +TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba) + +static void do_wrhpstate(DisasContext *dc, TCGv src) +{ + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate)); + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate) + +static void do_wrhtstate(DisasContext *dc, TCGv src) +{ + TCGv_i32 tl = tcg_temp_new_i32(); + TCGv_ptr tp = tcg_temp_new_ptr(); + + tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl)); + tcg_gen_andi_i32(tl, tl, MAXTL_MASK); + tcg_gen_shli_i32(tl, tl, 3); + tcg_gen_ext_i32_ptr(tp, tl); + tcg_gen_add_ptr(tp, tp, tcg_env); + + tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate)); +} + +TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate) + +static void do_wrhintp(DisasContext *dc, TCGv src) +{ + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp)); +} + +TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp) + +static void do_wrhtba(DisasContext *dc, TCGv src) +{ + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba)); +} + +TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba) + +static void do_wrhstick_cmpr(DisasContext *dc, TCGv src) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr)); + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick)); + translator_io_start(&dc->base); + gen_helper_tick_set_limit(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc), + do_wrhstick_cmpr) + +static bool do_saved_restored(DisasContext *dc, bool saved) +{ + if (!supervisor(dc)) { + return raise_priv(dc); + } + if (saved) { + gen_helper_saved(tcg_env); + } else { + gen_helper_restored(tcg_env); + } + return advance_pc(dc); +} + +TRANS(SAVED, 64, do_saved_restored, true) +TRANS(RESTORED, 64, do_saved_restored, false) + +static bool trans_NOP(DisasContext *dc, arg_NOP *a) +{ + return advance_pc(dc); +} + +/* + * TODO: Need a feature bit for sparcv8. + * In the meantime, treat all 32-bit cpus like sparcv7. + */ +TRANS(NOP_v7, 32, trans_NOP, a) +TRANS(NOP_v9, 64, trans_NOP, a) + +static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op, + void (*func)(TCGv, TCGv, TCGv), + void (*funci)(TCGv, TCGv, target_long)) +{ + TCGv dst, src1; + + /* For simplicity, we under-decoded the rs2 form. */ + if (!a->imm && a->rs2_or_imm & ~0x1f) { + return false; + } + + if (a->cc) { + dst = cpu_cc_dst; + } else { + dst = gen_dest_gpr(dc, a->rd); + } + src1 = gen_load_gpr(dc, a->rs1); + + if (a->imm || a->rs2_or_imm == 0) { + if (funci) { + funci(dst, src1, a->rs2_or_imm); + } else { + func(dst, src1, tcg_constant_tl(a->rs2_or_imm)); + } + } else { + func(dst, src1, cpu_regs[a->rs2_or_imm]); + } + gen_store_gpr(dc, a->rd, dst); + + if (a->cc) { + tcg_gen_movi_i32(cpu_cc_op, cc_op); + dc->cc_op = cc_op; + } + return advance_pc(dc); +} + +static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op, + void (*func)(TCGv, TCGv, TCGv), + void (*funci)(TCGv, TCGv, target_long), + void (*func_cc)(TCGv, TCGv, TCGv)) +{ + if (a->cc) { + assert(cc_op >= 0); + return do_arith_int(dc, a, cc_op, func_cc, NULL); + } + return do_arith_int(dc, a, cc_op, func, funci); +} + +static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a, + void (*func)(TCGv, TCGv, TCGv), + void (*funci)(TCGv, TCGv, target_long)) +{ + return do_arith_int(dc, a, CC_OP_LOGIC, func, funci); +} + +TRANS(ADD, ALL, do_arith, a, CC_OP_ADD, + tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc) +TRANS(SUB, ALL, do_arith, a, CC_OP_SUB, + tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc) + +TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc) +TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc) +TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv) +TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv) + +TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl) +TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl) +TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL) +TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL) +TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL) + +TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL) +TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL) +TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL) + +TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL) +TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL) +TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc) +TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc) + +/* TODO: Should have feature bit -- comes in with UltraSparc T2. */ +TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL) + +static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a) +{ + /* OR with %g0 is the canonical alias for MOV. */ + if (!a->cc && a->rs1 == 0) { + if (a->imm || a->rs2_or_imm == 0) { + gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm)); + } else if (a->rs2_or_imm & ~0x1f) { + /* For simplicity, we under-decoded the rs2 form. */ + return false; + } else { + gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]); + } + return advance_pc(dc); + } + return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl); +} + +static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a) +{ + switch (dc->cc_op) { + case CC_OP_DIV: + case CC_OP_LOGIC: + /* Carry is known to be zero. Fall back to plain ADD. */ + return do_arith(dc, a, CC_OP_ADD, + tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc); + case CC_OP_ADD: + case CC_OP_TADD: + case CC_OP_TADDTV: + return do_arith(dc, a, CC_OP_ADDX, + gen_op_addc_add, NULL, gen_op_addccc_add); + case CC_OP_SUB: + case CC_OP_TSUB: + case CC_OP_TSUBTV: + return do_arith(dc, a, CC_OP_ADDX, + gen_op_addc_sub, NULL, gen_op_addccc_sub); + default: + return do_arith(dc, a, CC_OP_ADDX, + gen_op_addc_generic, NULL, gen_op_addccc_generic); + } +} + +static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a) +{ + switch (dc->cc_op) { + case CC_OP_DIV: + case CC_OP_LOGIC: + /* Carry is known to be zero. Fall back to plain SUB. */ + return do_arith(dc, a, CC_OP_SUB, + tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc); + case CC_OP_ADD: + case CC_OP_TADD: + case CC_OP_TADDTV: + return do_arith(dc, a, CC_OP_SUBX, + gen_op_subc_add, NULL, gen_op_subccc_add); + case CC_OP_SUB: + case CC_OP_TSUB: + case CC_OP_TSUBTV: + return do_arith(dc, a, CC_OP_SUBX, + gen_op_subc_sub, NULL, gen_op_subccc_sub); + default: + return do_arith(dc, a, CC_OP_SUBX, + gen_op_subc_generic, NULL, gen_op_subccc_generic); + } +} + +static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a) +{ + update_psr(dc); + return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc); +} + +static bool gen_edge(DisasContext *dc, arg_r_r_r *a, int width, bool cc, bool left) { - TCGv lo1, lo2; + TCGv dst, s1, s2, lo1, lo2; uint64_t amask, tabl, tabr; int shift, imask, omask; + dst = gen_dest_gpr(dc, a->rd); + s1 = gen_load_gpr(dc, a->rs1); + s2 = gen_load_gpr(dc, a->rs2); + if (cc) { tcg_gen_mov_tl(cpu_cc_src, s1); tcg_gen_mov_tl(cpu_cc_src2, s2); @@ -2858,13 +3913,15 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, dc->cc_op = CC_OP_SUB; } - /* Theory of operation: there are two tables, left and right (not to - be confused with the left and right versions of the opcode). These - are indexed by the low 3 bits of the inputs. To make things "easy", - these tables are loaded into two constants, TABL and TABR below. - The operation index = (input & imask) << shift calculates the index - into the constant, while val = (table >> index) & omask calculates - the value we're looking for. */ + /* + * Theory of operation: there are two tables, left and right (not to + * be confused with the left and right versions of the opcode). These + * are indexed by the low 3 bits of the inputs. To make things "easy", + * these tables are loaded into two constants, TABL and TABR below. + * The operation index = (input & imask) << shift calculates the index + * into the constant, while val = (table >> index) & omask calculates + * the value we're looking for. + */ switch (width) { case 8: imask = 0x7; @@ -2918,2653 +3975,1345 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, tcg_gen_andi_tl(lo1, lo1, omask); tcg_gen_andi_tl(lo2, lo2, omask); - amask = -8; - if (AM_CHECK(dc)) { - amask &= 0xffffffffULL; - } + amask = address_mask_i(dc, -8); tcg_gen_andi_tl(s1, s1, amask); tcg_gen_andi_tl(s2, s2, amask); /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */ tcg_gen_and_tl(lo2, lo2, lo1); tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2); + + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0) +TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1) +TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0) +TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1) +TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0) +TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1) + +TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0) +TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1) +TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0) +TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1) +TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0) +TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1) + +static bool do_rrr(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv, TCGv, TCGv)) +{ + TCGv dst = gen_dest_gpr(dc, a->rd); + TCGv src1 = gen_load_gpr(dc, a->rs1); + TCGv src2 = gen_load_gpr(dc, a->rs2); + + func(dst, src1, src2); + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); } -static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left) +TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8) +TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16) +TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32) + +static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2) { +#ifdef TARGET_SPARC64 TCGv tmp = tcg_temp_new(); tcg_gen_add_tl(tmp, s1, s2); tcg_gen_andi_tl(dst, tmp, -8); - if (left) { - tcg_gen_neg_tl(tmp, tmp); - } tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); +#else + g_assert_not_reached(); +#endif } -static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2) +static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2) { - TCGv t1, t2, shift; - - t1 = tcg_temp_new(); - t2 = tcg_temp_new(); - shift = tcg_temp_new(); +#ifdef TARGET_SPARC64 + TCGv tmp = tcg_temp_new(); - tcg_gen_andi_tl(shift, gsr, 7); - tcg_gen_shli_tl(shift, shift, 3); - tcg_gen_shl_tl(t1, s1, shift); + tcg_gen_add_tl(tmp, s1, s2); + tcg_gen_andi_tl(dst, tmp, -8); + tcg_gen_neg_tl(tmp, tmp); + tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); +#else + g_assert_not_reached(); +#endif +} - /* A shift of 64 does not produce 0 in TCG. Divide this into a - shift of (up to 63) followed by a constant shift of 1. */ - tcg_gen_xori_tl(shift, shift, 63); - tcg_gen_shr_tl(t2, s2, shift); - tcg_gen_shri_tl(t2, t2, 1); +TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr) +TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl) - tcg_gen_or_tl(dst, t1, t2); -} +static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2) +{ +#ifdef TARGET_SPARC64 + tcg_gen_add_tl(dst, s1, s2); + tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32); +#else + g_assert_not_reached(); #endif +} -#define CHECK_IU_FEATURE(dc, FEATURE) \ - if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ - goto illegal_insn; -#define CHECK_FPU_FEATURE(dc, FEATURE) \ - if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ - goto nfpu_insn; +TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask) -/* before an instruction, dc->pc must be static */ -static void disas_sparc_insn(DisasContext * dc, unsigned int insn) +static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u) { - unsigned int opc, rs1, rs2, rd; - TCGv cpu_src1, cpu_src2; - TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32; - TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64; - target_long simm; + TCGv dst, src1, src2; - opc = GET_FIELD(insn, 0, 1); - rd = GET_FIELD(insn, 2, 6); + /* Reject 64-bit shifts for sparc32. */ + if (avail_32(dc) && a->x) { + return false; + } - switch (opc) { - case 0: /* branches/sethi */ - { - unsigned int xop = GET_FIELD(insn, 7, 9); - int32_t target; - switch (xop) { -#ifdef TARGET_SPARC64 - case 0x1: /* V9 BPcc */ - { - int cc; - - target = GET_FIELD_SP(insn, 0, 18); - target = sign_extend(target, 19); - target <<= 2; - cc = GET_FIELD_SP(insn, 20, 21); - if (cc == 0) - do_branch(dc, target, insn, 0); - else if (cc == 2) - do_branch(dc, target, insn, 1); - else - goto illegal_insn; - goto jmp_insn; - } - case 0x3: /* V9 BPr */ - { - target = GET_FIELD_SP(insn, 0, 13) | - (GET_FIELD_SP(insn, 20, 21) << 14); - target = sign_extend(target, 16); - target <<= 2; - cpu_src1 = get_src1(dc, insn); - do_branch_reg(dc, target, insn, cpu_src1); - goto jmp_insn; - } - case 0x5: /* V9 FBPcc */ - { - int cc = GET_FIELD_SP(insn, 20, 21); - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - target = GET_FIELD_SP(insn, 0, 18); - target = sign_extend(target, 19); - target <<= 2; - do_fbranch(dc, target, insn, cc); - goto jmp_insn; - } -#else - case 0x7: /* CBN+x */ - { - goto ncp_insn; - } -#endif - case 0x2: /* BN+x */ - { - target = GET_FIELD(insn, 10, 31); - target = sign_extend(target, 22); - target <<= 2; - do_branch(dc, target, insn, 0); - goto jmp_insn; - } - case 0x6: /* FBN+x */ - { - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - target = GET_FIELD(insn, 10, 31); - target = sign_extend(target, 22); - target <<= 2; - do_fbranch(dc, target, insn, 0); - goto jmp_insn; - } - case 0x4: /* SETHI */ - /* Special-case %g0 because that's the canonical nop. */ - if (rd) { - uint32_t value = GET_FIELD(insn, 10, 31); - TCGv t = gen_dest_gpr(dc, rd); - tcg_gen_movi_tl(t, value << 10); - gen_store_gpr(dc, rd, t); - } - break; - case 0x0: /* UNIMPL */ - default: - goto illegal_insn; - } - break; + src2 = tcg_temp_new(); + tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31); + src1 = gen_load_gpr(dc, a->rs1); + dst = gen_dest_gpr(dc, a->rd); + + if (l) { + tcg_gen_shl_tl(dst, src1, src2); + if (!a->x) { + tcg_gen_ext32u_tl(dst, dst); } - break; - case 1: /*CALL*/ - { - target_long target = GET_FIELDs(insn, 2, 31) << 2; - TCGv o7 = gen_dest_gpr(dc, 15); + } else if (u) { + if (!a->x) { + tcg_gen_ext32u_tl(dst, src1); + src1 = dst; + } + tcg_gen_shr_tl(dst, src1, src2); + } else { + if (!a->x) { + tcg_gen_ext32s_tl(dst, src1); + src1 = dst; + } + tcg_gen_sar_tl(dst, src1, src2); + } + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} - tcg_gen_movi_tl(o7, dc->pc); - gen_store_gpr(dc, 15, o7); - target += dc->pc; - gen_mov_pc_npc(dc); -#ifdef TARGET_SPARC64 - if (unlikely(AM_CHECK(dc))) { - target &= 0xffffffffULL; - } -#endif - dc->npc = target; +TRANS(SLL_r, ALL, do_shift_r, a, true, true) +TRANS(SRL_r, ALL, do_shift_r, a, false, true) +TRANS(SRA_r, ALL, do_shift_r, a, false, false) + +static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u) +{ + TCGv dst, src1; + + /* Reject 64-bit shifts for sparc32. */ + if (avail_32(dc) && (a->x || a->i >= 32)) { + return false; + } + + src1 = gen_load_gpr(dc, a->rs1); + dst = gen_dest_gpr(dc, a->rd); + + if (avail_32(dc) || a->x) { + if (l) { + tcg_gen_shli_tl(dst, src1, a->i); + } else if (u) { + tcg_gen_shri_tl(dst, src1, a->i); + } else { + tcg_gen_sari_tl(dst, src1, a->i); } - goto jmp_insn; - case 2: /* FPU & Logical Operations */ - { - unsigned int xop = GET_FIELD(insn, 7, 12); - TCGv cpu_dst = tcg_temp_new(); - TCGv cpu_tmp0; - - if (xop == 0x3a) { /* generate trap */ - int cond = GET_FIELD(insn, 3, 6); - TCGv_i32 trap; - TCGLabel *l1 = NULL; - int mask; - - if (cond == 0) { - /* Trap never. */ - break; - } + } else { + if (l) { + tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i); + } else if (u) { + tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i); + } else { + tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i); + } + } + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} - save_state(dc); +TRANS(SLL_i, ALL, do_shift_i, a, true, true) +TRANS(SRL_i, ALL, do_shift_i, a, false, true) +TRANS(SRA_i, ALL, do_shift_i, a, false, false) - if (cond != 8) { - /* Conditional trap. */ - DisasCompare cmp; -#ifdef TARGET_SPARC64 - /* V9 icc/xcc */ - int cc = GET_FIELD_SP(insn, 11, 12); - if (cc == 0) { - gen_compare(&cmp, 0, cond, dc); - } else if (cc == 2) { - gen_compare(&cmp, 1, cond, dc); - } else { - goto illegal_insn; - } -#else - gen_compare(&cmp, 0, cond, dc); -#endif - l1 = gen_new_label(); - tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond), - cmp.c1, cmp.c2, l1); - } +static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm) +{ + /* For simplicity, we under-decoded the rs2 form. */ + if (!imm && rs2_or_imm & ~0x1f) { + return NULL; + } + if (imm || rs2_or_imm == 0) { + return tcg_constant_tl(rs2_or_imm); + } else { + return cpu_regs[rs2_or_imm]; + } +} - mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) - ? UA2005_HTRAP_MASK : V8_TRAP_MASK); - - /* Don't use the normal temporaries, as they may well have - gone out of scope with the branch above. While we're - doing that we might as well pre-truncate to 32-bit. */ - trap = tcg_temp_new_i32(); - - rs1 = GET_FIELD_SP(insn, 14, 18); - if (IS_IMM) { - rs2 = GET_FIELD_SP(insn, 0, 7); - if (rs1 == 0) { - tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP); - /* Signal that the trap value is fully constant. */ - mask = 0; - } else { - TCGv t1 = gen_load_gpr(dc, rs1); - tcg_gen_trunc_tl_i32(trap, t1); - tcg_gen_addi_i32(trap, trap, rs2); - } - } else { - TCGv t1, t2; - rs2 = GET_FIELD_SP(insn, 0, 4); - t1 = gen_load_gpr(dc, rs1); - t2 = gen_load_gpr(dc, rs2); - tcg_gen_add_tl(t1, t1, t2); - tcg_gen_trunc_tl_i32(trap, t1); - } - if (mask != 0) { - tcg_gen_andi_i32(trap, trap, mask); - tcg_gen_addi_i32(trap, trap, TT_TRAP); - } +static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2) +{ + TCGv dst = gen_load_gpr(dc, rd); - gen_helper_raise_exception(tcg_env, trap); + tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst); + gen_store_gpr(dc, rd, dst); + return advance_pc(dc); +} - if (cond == 8) { - /* An unconditional trap ends the TB. */ - dc->base.is_jmp = DISAS_NORETURN; - goto jmp_insn; - } else { - /* A conditional trap falls through to the next insn. */ - gen_set_label(l1); - break; - } - } else if (xop == 0x28) { - rs1 = GET_FIELD(insn, 13, 17); - switch(rs1) { - case 0: /* rdy */ -#ifndef TARGET_SPARC64 - case 0x01 ... 0x0e: /* undefined in the SPARCv8 - manual, rdy on the microSPARC - II */ - case 0x0f: /* stbar in the SPARCv8 manual, - rdy on the microSPARC II */ - case 0x10 ... 0x1f: /* implementation-dependent in the - SPARCv8 manual, rdy on the - microSPARC II */ - /* Read Asr17 */ - if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) { - TCGv t = gen_dest_gpr(dc, rd); - /* Read Asr17 for a Leon3 monoprocessor */ - tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1)); - gen_store_gpr(dc, rd, t); - break; - } -#endif - gen_store_gpr(dc, rd, cpu_y); - break; -#ifdef TARGET_SPARC64 - case 0x2: /* V9 rdccr */ - update_psr(dc); - gen_helper_rdccr(cpu_dst, tcg_env); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x3: /* V9 rdasi */ - tcg_gen_movi_tl(cpu_dst, dc->asi); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x4: /* V9 rdtick */ - { - TCGv_ptr r_tickptr; - TCGv_i32 r_const; - - r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_constant_i32(dc->mem_idx); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, tick)); - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr, - r_const); - gen_store_gpr(dc, rd, cpu_dst); - } - break; - case 0x5: /* V9 rdpc */ - { - TCGv t = gen_dest_gpr(dc, rd); - if (unlikely(AM_CHECK(dc))) { - tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL); - } else { - tcg_gen_movi_tl(t, dc->pc); - } - gen_store_gpr(dc, rd, t); - } - break; - case 0x6: /* V9 rdfprs */ - tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0xf: /* V9 membar */ - break; /* no effect */ - case 0x13: /* Graphics Status */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_store_gpr(dc, rd, cpu_gsr); - break; - case 0x16: /* Softint */ - tcg_gen_ld32s_tl(cpu_dst, tcg_env, - offsetof(CPUSPARCState, softint)); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x17: /* Tick compare */ - gen_store_gpr(dc, rd, cpu_tick_cmpr); - break; - case 0x18: /* System tick */ - { - TCGv_ptr r_tickptr; - TCGv_i32 r_const; - - r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_constant_i32(dc->mem_idx); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, stick)); - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr, - r_const); - gen_store_gpr(dc, rd, cpu_dst); - } - break; - case 0x19: /* System tick compare */ - gen_store_gpr(dc, rd, cpu_stick_cmpr); - break; - case 0x1a: /* UltraSPARC-T1 Strand status */ - /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe - * this ASR as impl. dep - */ - CHECK_IU_FEATURE(dc, HYPV); - { - TCGv t = gen_dest_gpr(dc, rd); - tcg_gen_movi_tl(t, 1UL); - gen_store_gpr(dc, rd, t); - } - break; - case 0x10: /* Performance Control */ - case 0x11: /* Performance Instrumentation Counter */ - case 0x12: /* Dispatch Control */ - case 0x14: /* Softint set, WO */ - case 0x15: /* Softint clear, WO */ -#endif - default: - goto illegal_insn; - } -#if !defined(CONFIG_USER_ONLY) - } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */ -#ifndef TARGET_SPARC64 - if (!supervisor(dc)) { - goto priv_insn; - } - update_psr(dc); - gen_helper_rdpsr(cpu_dst, tcg_env); -#else - CHECK_IU_FEATURE(dc, HYPV); - if (!hypervisor(dc)) - goto priv_insn; - rs1 = GET_FIELD(insn, 13, 17); - switch (rs1) { - case 0: // hpstate - tcg_gen_ld_i64(cpu_dst, tcg_env, - offsetof(CPUSPARCState, hpstate)); - break; - case 1: // htstate - // gen_op_rdhtstate(); - break; - case 3: // hintp - tcg_gen_mov_tl(cpu_dst, cpu_hintp); - break; - case 5: // htba - tcg_gen_mov_tl(cpu_dst, cpu_htba); - break; - case 6: // hver - tcg_gen_mov_tl(cpu_dst, cpu_hver); - break; - case 31: // hstick_cmpr - tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr); - break; - default: - goto illegal_insn; - } -#endif - gen_store_gpr(dc, rd, cpu_dst); - break; - } else if (xop == 0x2a) { /* rdwim / V9 rdpr */ - if (!supervisor(dc)) { - goto priv_insn; - } - cpu_tmp0 = tcg_temp_new(); -#ifdef TARGET_SPARC64 - rs1 = GET_FIELD(insn, 13, 17); - switch (rs1) { - case 0: // tpc - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_ld_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tpc)); - } - break; - case 1: // tnpc - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_ld_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tnpc)); - } - break; - case 2: // tstate - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_ld_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tstate)); - } - break; - case 3: // tt - { - TCGv_ptr r_tsptr = tcg_temp_new_ptr(); - - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tt)); - } - break; - case 4: // tick - { - TCGv_ptr r_tickptr; - TCGv_i32 r_const; - - r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_constant_i32(dc->mem_idx); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, tick)); - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_tick_get_count(cpu_tmp0, tcg_env, - r_tickptr, r_const); - } - break; - case 5: // tba - tcg_gen_mov_tl(cpu_tmp0, cpu_tbr); - break; - case 6: // pstate - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, pstate)); - break; - case 7: // tl - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, tl)); - break; - case 8: // pil - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, psrpil)); - break; - case 9: // cwp - gen_helper_rdcwp(cpu_tmp0, tcg_env); - break; - case 10: // cansave - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, cansave)); - break; - case 11: // canrestore - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, canrestore)); - break; - case 12: // cleanwin - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, cleanwin)); - break; - case 13: // otherwin - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, otherwin)); - break; - case 14: // wstate - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, wstate)); - break; - case 16: // UA2005 gl - CHECK_IU_FEATURE(dc, GL); - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, gl)); - break; - case 26: // UA2005 strand status - CHECK_IU_FEATURE(dc, HYPV); - if (!hypervisor(dc)) - goto priv_insn; - tcg_gen_mov_tl(cpu_tmp0, cpu_ssr); - break; - case 31: // ver - tcg_gen_mov_tl(cpu_tmp0, cpu_ver); - break; - case 15: // fq - default: - goto illegal_insn; - } -#else - tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim); -#endif - gen_store_gpr(dc, rd, cpu_tmp0); - break; -#endif -#if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY) - } else if (xop == 0x2b) { /* rdtbr / V9 flushw */ -#ifdef TARGET_SPARC64 - gen_helper_flushw(tcg_env); -#else - if (!supervisor(dc)) - goto priv_insn; - gen_store_gpr(dc, rd, cpu_tbr); -#endif - break; -#endif - } else if (xop == 0x34) { /* FPU Operations */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_op_clear_ieee_excp_and_FTT(); - rs1 = GET_FIELD(insn, 13, 17); - rs2 = GET_FIELD(insn, 27, 31); - xop = GET_FIELD(insn, 18, 26); - - switch (xop) { - case 0x1: /* fmovs */ - cpu_src1_32 = gen_load_fpr_F(dc, rs2); - gen_store_fpr_F(dc, rd, cpu_src1_32); - break; - case 0x5: /* fnegs */ - gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs); - break; - case 0x9: /* fabss */ - gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss); - break; - case 0x29: /* fsqrts */ - CHECK_FPU_FEATURE(dc, FSQRT); - gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts); - break; - case 0x2a: /* fsqrtd */ - CHECK_FPU_FEATURE(dc, FSQRT); - gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd); - break; - case 0x2b: /* fsqrtq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq); - break; - case 0x41: /* fadds */ - gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds); - break; - case 0x42: /* faddd */ - gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd); - break; - case 0x43: /* faddq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq); - break; - case 0x45: /* fsubs */ - gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs); - break; - case 0x46: /* fsubd */ - gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd); - break; - case 0x47: /* fsubq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq); - break; - case 0x49: /* fmuls */ - CHECK_FPU_FEATURE(dc, FMUL); - gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls); - break; - case 0x4a: /* fmuld */ - CHECK_FPU_FEATURE(dc, FMUL); - gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld); - break; - case 0x4b: /* fmulq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - CHECK_FPU_FEATURE(dc, FMUL); - gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq); - break; - case 0x4d: /* fdivs */ - gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs); - break; - case 0x4e: /* fdivd */ - gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd); - break; - case 0x4f: /* fdivq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq); - break; - case 0x69: /* fsmuld */ - CHECK_FPU_FEATURE(dc, FSMULD); - gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld); - break; - case 0x6e: /* fdmulq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq); - break; - case 0xc4: /* fitos */ - gen_fop_FF(dc, rd, rs2, gen_helper_fitos); - break; - case 0xc6: /* fdtos */ - gen_fop_FD(dc, rd, rs2, gen_helper_fdtos); - break; - case 0xc7: /* fqtos */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos); - break; - case 0xc8: /* fitod */ - gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod); - break; - case 0xc9: /* fstod */ - gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod); - break; - case 0xcb: /* fqtod */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod); - break; - case 0xcc: /* fitoq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq); - break; - case 0xcd: /* fstoq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq); - break; - case 0xce: /* fdtoq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq); - break; - case 0xd1: /* fstoi */ - gen_fop_FF(dc, rd, rs2, gen_helper_fstoi); - break; - case 0xd2: /* fdtoi */ - gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi); - break; - case 0xd3: /* fqtoi */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi); - break; -#ifdef TARGET_SPARC64 - case 0x2: /* V9 fmovd */ - cpu_src1_64 = gen_load_fpr_D(dc, rs2); - gen_store_fpr_D(dc, rd, cpu_src1_64); - break; - case 0x3: /* V9 fmovq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_move_Q(dc, rd, rs2); - break; - case 0x6: /* V9 fnegd */ - gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd); - break; - case 0x7: /* V9 fnegq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq); - break; - case 0xa: /* V9 fabsd */ - gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd); - break; - case 0xb: /* V9 fabsq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq); - break; - case 0x81: /* V9 fstox */ - gen_fop_DF(dc, rd, rs2, gen_helper_fstox); - break; - case 0x82: /* V9 fdtox */ - gen_fop_DD(dc, rd, rs2, gen_helper_fdtox); - break; - case 0x83: /* V9 fqtox */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox); - break; - case 0x84: /* V9 fxtos */ - gen_fop_FD(dc, rd, rs2, gen_helper_fxtos); - break; - case 0x88: /* V9 fxtod */ - gen_fop_DD(dc, rd, rs2, gen_helper_fxtod); - break; - case 0x8c: /* V9 fxtoq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq); - break; -#endif - default: - goto illegal_insn; - } - } else if (xop == 0x35) { /* FPU Operations */ -#ifdef TARGET_SPARC64 - int cond; -#endif - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_op_clear_ieee_excp_and_FTT(); - rs1 = GET_FIELD(insn, 13, 17); - rs2 = GET_FIELD(insn, 27, 31); - xop = GET_FIELD(insn, 18, 26); +static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a) +{ + TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); + DisasCompare cmp; -#ifdef TARGET_SPARC64 -#define FMOVR(sz) \ - do { \ - DisasCompare cmp; \ - cond = GET_FIELD_SP(insn, 10, 12); \ - cpu_src1 = get_src1(dc, insn); \ - gen_compare_reg(&cmp, cond, cpu_src1); \ - gen_fmov##sz(dc, &cmp, rd, rs2); \ - } while (0) - - if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */ - FMOVR(s); - break; - } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr - FMOVR(d); - break; - } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVR(q); - break; - } -#undef FMOVR -#endif - switch (xop) { -#ifdef TARGET_SPARC64 -#define FMOVCC(fcc, sz) \ - do { \ - DisasCompare cmp; \ - cond = GET_FIELD_SP(insn, 14, 17); \ - gen_fcompare(&cmp, fcc, cond); \ - gen_fmov##sz(dc, &cmp, rd, rs2); \ - } while (0) - - case 0x001: /* V9 fmovscc %fcc0 */ - FMOVCC(0, s); - break; - case 0x002: /* V9 fmovdcc %fcc0 */ - FMOVCC(0, d); - break; - case 0x003: /* V9 fmovqcc %fcc0 */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(0, q); - break; - case 0x041: /* V9 fmovscc %fcc1 */ - FMOVCC(1, s); - break; - case 0x042: /* V9 fmovdcc %fcc1 */ - FMOVCC(1, d); - break; - case 0x043: /* V9 fmovqcc %fcc1 */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(1, q); - break; - case 0x081: /* V9 fmovscc %fcc2 */ - FMOVCC(2, s); - break; - case 0x082: /* V9 fmovdcc %fcc2 */ - FMOVCC(2, d); - break; - case 0x083: /* V9 fmovqcc %fcc2 */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(2, q); - break; - case 0x0c1: /* V9 fmovscc %fcc3 */ - FMOVCC(3, s); - break; - case 0x0c2: /* V9 fmovdcc %fcc3 */ - FMOVCC(3, d); - break; - case 0x0c3: /* V9 fmovqcc %fcc3 */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(3, q); - break; -#undef FMOVCC -#define FMOVCC(xcc, sz) \ - do { \ - DisasCompare cmp; \ - cond = GET_FIELD_SP(insn, 14, 17); \ - gen_compare(&cmp, xcc, cond, dc); \ - gen_fmov##sz(dc, &cmp, rd, rs2); \ - } while (0) - - case 0x101: /* V9 fmovscc %icc */ - FMOVCC(0, s); - break; - case 0x102: /* V9 fmovdcc %icc */ - FMOVCC(0, d); - break; - case 0x103: /* V9 fmovqcc %icc */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(0, q); - break; - case 0x181: /* V9 fmovscc %xcc */ - FMOVCC(1, s); - break; - case 0x182: /* V9 fmovdcc %xcc */ - FMOVCC(1, d); - break; - case 0x183: /* V9 fmovqcc %xcc */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(1, q); - break; -#undef FMOVCC -#endif - case 0x51: /* fcmps, V9 %fcc */ - cpu_src1_32 = gen_load_fpr_F(dc, rs1); - cpu_src2_32 = gen_load_fpr_F(dc, rs2); - gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32); - break; - case 0x52: /* fcmpd, V9 %fcc */ - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64); - break; - case 0x53: /* fcmpq, V9 %fcc */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_op_fcmpq(rd & 3); - break; - case 0x55: /* fcmpes, V9 %fcc */ - cpu_src1_32 = gen_load_fpr_F(dc, rs1); - cpu_src2_32 = gen_load_fpr_F(dc, rs2); - gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32); - break; - case 0x56: /* fcmped, V9 %fcc */ - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64); - break; - case 0x57: /* fcmpeq, V9 %fcc */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_op_fcmpeq(rd & 3); - break; - default: - goto illegal_insn; - } - } else if (xop == 0x2) { - TCGv dst = gen_dest_gpr(dc, rd); - rs1 = GET_FIELD(insn, 13, 17); - if (rs1 == 0) { - /* clr/mov shortcut : or %g0, x, y -> mov x, y */ - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - tcg_gen_movi_tl(dst, simm); - gen_store_gpr(dc, rd, dst); - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2 == 0) { - tcg_gen_movi_tl(dst, 0); - gen_store_gpr(dc, rd, dst); - } else { - cpu_src2 = gen_load_gpr(dc, rs2); - gen_store_gpr(dc, rd, cpu_src2); - } - } - } else { - cpu_src1 = get_src1(dc, insn); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - tcg_gen_ori_tl(dst, cpu_src1, simm); - gen_store_gpr(dc, rd, dst); - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2 == 0) { - /* mov shortcut: or x, %g0, y -> mov x, y */ - gen_store_gpr(dc, rd, cpu_src1); - } else { - cpu_src2 = gen_load_gpr(dc, rs2); - tcg_gen_or_tl(dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, dst); - } - } - } -#ifdef TARGET_SPARC64 - } else if (xop == 0x25) { /* sll, V9 sllx */ - cpu_src1 = get_src1(dc, insn); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - if (insn & (1 << 12)) { - tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f); - } else { - tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f); - } - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = tcg_temp_new(); - if (insn & (1 << 12)) { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); - } else { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); - } - tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0); - } - gen_store_gpr(dc, rd, cpu_dst); - } else if (xop == 0x26) { /* srl, V9 srlx */ - cpu_src1 = get_src1(dc, insn); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - if (insn & (1 << 12)) { - tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f); - } else { - tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL); - tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f); - } - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = tcg_temp_new(); - if (insn & (1 << 12)) { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); - tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0); - } else { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL); - tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0); - } - } - gen_store_gpr(dc, rd, cpu_dst); - } else if (xop == 0x27) { /* sra, V9 srax */ - cpu_src1 = get_src1(dc, insn); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - if (insn & (1 << 12)) { - tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f); - } else { - tcg_gen_ext32s_i64(cpu_dst, cpu_src1); - tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f); - } - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = tcg_temp_new(); - if (insn & (1 << 12)) { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); - tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0); - } else { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_ext32s_i64(cpu_dst, cpu_src1); - tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0); - } - } - gen_store_gpr(dc, rd, cpu_dst); -#endif - } else if (xop < 0x36) { - if (xop < 0x20) { - cpu_src1 = get_src1(dc, insn); - cpu_src2 = get_src2(dc, insn); - switch (xop & ~0x10) { - case 0x0: /* add */ - if (xop & 0x10) { - gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD); - dc->cc_op = CC_OP_ADD; - } else { - tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2); - } - break; - case 0x1: /* and */ - tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x2: /* or */ - tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x3: /* xor */ - tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x4: /* sub */ - if (xop & 0x10) { - gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB); - dc->cc_op = CC_OP_SUB; - } else { - tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2); - } - break; - case 0x5: /* andn */ - tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x6: /* orn */ - tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x7: /* xorn */ - tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x8: /* addx, V9 addc */ - gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2, - (xop & 0x10)); - break; -#ifdef TARGET_SPARC64 - case 0x9: /* V9 mulx */ - tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2); - break; -#endif - case 0xa: /* umul */ - CHECK_IU_FEATURE(dc, MUL); - gen_op_umul(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0xb: /* smul */ - CHECK_IU_FEATURE(dc, MUL); - gen_op_smul(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0xc: /* subx, V9 subc */ - gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2, - (xop & 0x10)); - break; -#ifdef TARGET_SPARC64 - case 0xd: /* V9 udivx */ - gen_helper_udivx(cpu_dst, tcg_env, cpu_src1, cpu_src2); - break; -#endif - case 0xe: /* udiv */ - CHECK_IU_FEATURE(dc, DIV); - if (xop & 0x10) { - gen_helper_udiv_cc(cpu_dst, tcg_env, cpu_src1, - cpu_src2); - dc->cc_op = CC_OP_DIV; - } else { - gen_helper_udiv(cpu_dst, tcg_env, cpu_src1, - cpu_src2); - } - break; - case 0xf: /* sdiv */ - CHECK_IU_FEATURE(dc, DIV); - if (xop & 0x10) { - gen_helper_sdiv_cc(cpu_dst, tcg_env, cpu_src1, - cpu_src2); - dc->cc_op = CC_OP_DIV; - } else { - gen_helper_sdiv(cpu_dst, tcg_env, cpu_src1, - cpu_src2); - } - break; - default: - goto illegal_insn; - } - gen_store_gpr(dc, rd, cpu_dst); - } else { - cpu_src1 = get_src1(dc, insn); - cpu_src2 = get_src2(dc, insn); - switch (xop) { - case 0x20: /* taddcc */ - gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD); - dc->cc_op = CC_OP_TADD; - break; - case 0x21: /* tsubcc */ - gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB); - dc->cc_op = CC_OP_TSUB; - break; - case 0x22: /* taddcctv */ - gen_helper_taddcctv(cpu_dst, tcg_env, - cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - dc->cc_op = CC_OP_TADDTV; - break; - case 0x23: /* tsubcctv */ - gen_helper_tsubcctv(cpu_dst, tcg_env, - cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - dc->cc_op = CC_OP_TSUBTV; - break; - case 0x24: /* mulscc */ - update_psr(dc); - gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD); - dc->cc_op = CC_OP_ADD; - break; -#ifndef TARGET_SPARC64 - case 0x25: /* sll */ - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f); - } else { /* register */ - cpu_tmp0 = tcg_temp_new(); - tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0); - } - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x26: /* srl */ - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f); - } else { /* register */ - cpu_tmp0 = tcg_temp_new(); - tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0); - } - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x27: /* sra */ - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f); - } else { /* register */ - cpu_tmp0 = tcg_temp_new(); - tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0); - } - gen_store_gpr(dc, rd, cpu_dst); - break; -#endif - case 0x30: - { - cpu_tmp0 = tcg_temp_new(); - switch(rd) { - case 0: /* wry */ - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff); - break; -#ifndef TARGET_SPARC64 - case 0x01 ... 0x0f: /* undefined in the - SPARCv8 manual, nop - on the microSPARC - II */ - case 0x10 ... 0x1f: /* implementation-dependent - in the SPARCv8 - manual, nop on the - microSPARC II */ - if ((rd == 0x13) && (dc->def->features & - CPU_FEATURE_POWERDOWN)) { - /* LEON3 power-down */ - save_state(dc); - gen_helper_power_down(tcg_env); - } - break; -#else - case 0x2: /* V9 wrccr */ - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_wrccr(tcg_env, cpu_tmp0); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); - dc->cc_op = CC_OP_FLAGS; - break; - case 0x3: /* V9 wrasi */ - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff); - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, asi)); - /* - * End TB to notice changed ASI. - * TODO: Could notice src1 = %g0 and IS_IMM, - * update DisasContext and not exit the TB. - */ - save_state(dc); - gen_op_next_insn(); - tcg_gen_lookup_and_goto_ptr(); - dc->base.is_jmp = DISAS_NORETURN; - break; - case 0x6: /* V9 wrfprs */ - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0); - dc->fprs_dirty = 0; - save_state(dc); - gen_op_next_insn(); - tcg_gen_exit_tb(NULL, 0); - dc->base.is_jmp = DISAS_NORETURN; - break; - case 0xf: /* V9 sir, nop if user */ -#if !defined(CONFIG_USER_ONLY) - if (supervisor(dc)) { - ; // XXX - } -#endif - break; - case 0x13: /* Graphics Status */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2); - break; - case 0x14: /* Softint set */ - if (!supervisor(dc)) - goto illegal_insn; - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_set_softint(tcg_env, cpu_tmp0); - break; - case 0x15: /* Softint clear */ - if (!supervisor(dc)) - goto illegal_insn; - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_clear_softint(tcg_env, cpu_tmp0); - break; - case 0x16: /* Softint write */ - if (!supervisor(dc)) - goto illegal_insn; - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_write_softint(tcg_env, cpu_tmp0); - break; - case 0x17: /* Tick compare */ -#if !defined(CONFIG_USER_ONLY) - if (!supervisor(dc)) - goto illegal_insn; -#endif - { - TCGv_ptr r_tickptr; - - tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1, - cpu_src2); - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, tick)); - translator_io_start(&dc->base); - gen_helper_tick_set_limit(r_tickptr, - cpu_tick_cmpr); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - case 0x18: /* System tick */ -#if !defined(CONFIG_USER_ONLY) - if (!supervisor(dc)) - goto illegal_insn; -#endif - { - TCGv_ptr r_tickptr; - - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, - cpu_src2); - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, stick)); - translator_io_start(&dc->base); - gen_helper_tick_set_count(r_tickptr, - cpu_tmp0); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - case 0x19: /* System tick compare */ -#if !defined(CONFIG_USER_ONLY) - if (!supervisor(dc)) - goto illegal_insn; -#endif - { - TCGv_ptr r_tickptr; - - tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1, - cpu_src2); - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, stick)); - translator_io_start(&dc->base); - gen_helper_tick_set_limit(r_tickptr, - cpu_stick_cmpr); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - - case 0x10: /* Performance Control */ - case 0x11: /* Performance Instrumentation - Counter */ - case 0x12: /* Dispatch Control */ -#endif - default: - goto illegal_insn; - } - } - break; -#if !defined(CONFIG_USER_ONLY) - case 0x31: /* wrpsr, V9 saved, restored */ - { - if (!supervisor(dc)) - goto priv_insn; -#ifdef TARGET_SPARC64 - switch (rd) { - case 0: - gen_helper_saved(tcg_env); - break; - case 1: - gen_helper_restored(tcg_env); - break; - case 2: /* UA2005 allclean */ - case 3: /* UA2005 otherw */ - case 4: /* UA2005 normalw */ - case 5: /* UA2005 invalw */ - // XXX - default: - goto illegal_insn; - } -#else - cpu_tmp0 = tcg_temp_new(); - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_wrpsr(tcg_env, cpu_tmp0); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); - dc->cc_op = CC_OP_FLAGS; - save_state(dc); - gen_op_next_insn(); - tcg_gen_exit_tb(NULL, 0); - dc->base.is_jmp = DISAS_NORETURN; -#endif - } - break; - case 0x32: /* wrwim, V9 wrpr */ - { - if (!supervisor(dc)) - goto priv_insn; - cpu_tmp0 = tcg_temp_new(); - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); -#ifdef TARGET_SPARC64 - switch (rd) { - case 0: // tpc - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_st_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tpc)); - } - break; - case 1: // tnpc - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_st_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tnpc)); - } - break; - case 2: // tstate - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_st_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, - tstate)); - } - break; - case 3: // tt - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_st32_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tt)); - } - break; - case 4: // tick - { - TCGv_ptr r_tickptr; - - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, tick)); - translator_io_start(&dc->base); - gen_helper_tick_set_count(r_tickptr, - cpu_tmp0); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - case 5: // tba - tcg_gen_mov_tl(cpu_tbr, cpu_tmp0); - break; - case 6: // pstate - save_state(dc); - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_wrpstate(tcg_env, cpu_tmp0); - dc->npc = DYNAMIC_PC; - break; - case 7: // tl - save_state(dc); - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, tl)); - dc->npc = DYNAMIC_PC; - break; - case 8: // pil - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_wrpil(tcg_env, cpu_tmp0); - break; - case 9: // cwp - gen_helper_wrcwp(tcg_env, cpu_tmp0); - break; - case 10: // cansave - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - cansave)); - break; - case 11: // canrestore - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - canrestore)); - break; - case 12: // cleanwin - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - cleanwin)); - break; - case 13: // otherwin - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - otherwin)); - break; - case 14: // wstate - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - wstate)); - break; - case 16: // UA2005 gl - CHECK_IU_FEATURE(dc, GL); - gen_helper_wrgl(tcg_env, cpu_tmp0); - break; - case 26: // UA2005 strand status - CHECK_IU_FEATURE(dc, HYPV); - if (!hypervisor(dc)) - goto priv_insn; - tcg_gen_mov_tl(cpu_ssr, cpu_tmp0); - break; - default: - goto illegal_insn; - } -#else - tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0); - if (dc->def->nwindows != 32) { - tcg_gen_andi_tl(cpu_wim, cpu_wim, - (1 << dc->def->nwindows) - 1); - } -#endif - } - break; - case 0x33: /* wrtbr, UA2005 wrhpr */ - { -#ifndef TARGET_SPARC64 - if (!supervisor(dc)) - goto priv_insn; - tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2); -#else - CHECK_IU_FEATURE(dc, HYPV); - if (!hypervisor(dc)) - goto priv_insn; - cpu_tmp0 = tcg_temp_new(); - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - switch (rd) { - case 0: // hpstate - tcg_gen_st_i64(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - hpstate)); - save_state(dc); - gen_op_next_insn(); - tcg_gen_exit_tb(NULL, 0); - dc->base.is_jmp = DISAS_NORETURN; - break; - case 1: // htstate - // XXX gen_op_wrhtstate(); - break; - case 3: // hintp - tcg_gen_mov_tl(cpu_hintp, cpu_tmp0); - break; - case 5: // htba - tcg_gen_mov_tl(cpu_htba, cpu_tmp0); - break; - case 31: // hstick_cmpr - { - TCGv_ptr r_tickptr; - - tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0); - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, hstick)); - translator_io_start(&dc->base); - gen_helper_tick_set_limit(r_tickptr, - cpu_hstick_cmpr); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - case 6: // hver readonly - default: - goto illegal_insn; - } -#endif - } - break; -#endif -#ifdef TARGET_SPARC64 - case 0x2c: /* V9 movcc */ - { - int cc = GET_FIELD_SP(insn, 11, 12); - int cond = GET_FIELD_SP(insn, 14, 17); - DisasCompare cmp; - TCGv dst; - - if (insn & (1 << 18)) { - if (cc == 0) { - gen_compare(&cmp, 0, cond, dc); - } else if (cc == 2) { - gen_compare(&cmp, 1, cond, dc); - } else { - goto illegal_insn; - } - } else { - gen_fcompare(&cmp, cc, cond); - } - - /* The get_src2 above loaded the normal 13-bit - immediate field, not the 11-bit field we have - in movcc. But it did handle the reg case. */ - if (IS_IMM) { - simm = GET_FIELD_SPs(insn, 0, 10); - tcg_gen_movi_tl(cpu_src2, simm); - } - - dst = gen_load_gpr(dc, rd); - tcg_gen_movcond_tl(cmp.cond, dst, - cmp.c1, cmp.c2, - cpu_src2, dst); - gen_store_gpr(dc, rd, dst); - break; - } - case 0x2d: /* V9 sdivx */ - gen_helper_sdivx(cpu_dst, tcg_env, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x2e: /* V9 popc */ - tcg_gen_ctpop_tl(cpu_dst, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x2f: /* V9 movr */ - { - int cond = GET_FIELD_SP(insn, 10, 12); - DisasCompare cmp; - TCGv dst; - - gen_compare_reg(&cmp, cond, cpu_src1); - - /* The get_src2 above loaded the normal 13-bit - immediate field, not the 10-bit field we have - in movr. But it did handle the reg case. */ - if (IS_IMM) { - simm = GET_FIELD_SPs(insn, 0, 9); - tcg_gen_movi_tl(cpu_src2, simm); - } - - dst = gen_load_gpr(dc, rd); - tcg_gen_movcond_tl(cmp.cond, dst, - cmp.c1, cmp.c2, - cpu_src2, dst); - gen_store_gpr(dc, rd, dst); - break; - } -#endif - default: - goto illegal_insn; - } - } - } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */ -#ifdef TARGET_SPARC64 - int opf = GET_FIELD_SP(insn, 5, 13); - rs1 = GET_FIELD(insn, 13, 17); - rs2 = GET_FIELD(insn, 27, 31); - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } + if (src2 == NULL) { + return false; + } + gen_compare(&cmp, a->cc, a->cond, dc); + return do_mov_cond(dc, &cmp, a->rd, src2); +} - switch (opf) { - case 0x000: /* VIS I edge8cc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x001: /* VIS II edge8n */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x002: /* VIS I edge8lcc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x003: /* VIS II edge8ln */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x004: /* VIS I edge16cc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x005: /* VIS II edge16n */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x006: /* VIS I edge16lcc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x007: /* VIS II edge16ln */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x008: /* VIS I edge32cc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x009: /* VIS II edge32n */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x00a: /* VIS I edge32lcc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x00b: /* VIS II edge32ln */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x010: /* VIS I array8 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x012: /* VIS I array16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_shli_i64(cpu_dst, cpu_dst, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x014: /* VIS I array32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_shli_i64(cpu_dst, cpu_dst, 2); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x018: /* VIS I alignaddr */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x01a: /* VIS I alignaddrl */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x019: /* VIS II bmask */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x020: /* VIS I fcmple16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x022: /* VIS I fcmpne16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x024: /* VIS I fcmple32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x026: /* VIS I fcmpne32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x028: /* VIS I fcmpgt16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x02a: /* VIS I fcmpeq16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x02c: /* VIS I fcmpgt32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x02e: /* VIS I fcmpeq32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x031: /* VIS I fmul8x16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16); - break; - case 0x033: /* VIS I fmul8x16au */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au); - break; - case 0x035: /* VIS I fmul8x16al */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al); - break; - case 0x036: /* VIS I fmul8sux16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16); - break; - case 0x037: /* VIS I fmul8ulx16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16); - break; - case 0x038: /* VIS I fmuld8sux16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16); - break; - case 0x039: /* VIS I fmuld8ulx16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16); - break; - case 0x03a: /* VIS I fpack32 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32); - break; - case 0x03b: /* VIS I fpack16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs2); - cpu_dst_32 = gen_dest_fpr_F(dc); - gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x03d: /* VIS I fpackfix */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs2); - cpu_dst_32 = gen_dest_fpr_F(dc); - gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x03e: /* VIS I pdist */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist); - break; - case 0x048: /* VIS I faligndata */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata); - break; - case 0x04b: /* VIS I fpmerge */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge); - break; - case 0x04c: /* VIS II bshuffle */ - CHECK_FPU_FEATURE(dc, VIS2); - gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle); - break; - case 0x04d: /* VIS I fexpand */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand); - break; - case 0x050: /* VIS I fpadd16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16); - break; - case 0x051: /* VIS I fpadd16s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s); - break; - case 0x052: /* VIS I fpadd32 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32); - break; - case 0x053: /* VIS I fpadd32s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32); - break; - case 0x054: /* VIS I fpsub16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16); - break; - case 0x055: /* VIS I fpsub16s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s); - break; - case 0x056: /* VIS I fpsub32 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32); - break; - case 0x057: /* VIS I fpsub32s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32); - break; - case 0x060: /* VIS I fzero */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_dst_64 = gen_dest_fpr_D(dc, rd); - tcg_gen_movi_i64(cpu_dst_64, 0); - gen_store_fpr_D(dc, rd, cpu_dst_64); - break; - case 0x061: /* VIS I fzeros */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_dst_32 = gen_dest_fpr_F(dc); - tcg_gen_movi_i32(cpu_dst_32, 0); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x062: /* VIS I fnor */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64); - break; - case 0x063: /* VIS I fnors */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32); - break; - case 0x064: /* VIS I fandnot2 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64); - break; - case 0x065: /* VIS I fandnot2s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32); - break; - case 0x066: /* VIS I fnot2 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64); - break; - case 0x067: /* VIS I fnot2s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32); - break; - case 0x068: /* VIS I fandnot1 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64); - break; - case 0x069: /* VIS I fandnot1s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32); - break; - case 0x06a: /* VIS I fnot1 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64); - break; - case 0x06b: /* VIS I fnot1s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32); - break; - case 0x06c: /* VIS I fxor */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64); - break; - case 0x06d: /* VIS I fxors */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32); - break; - case 0x06e: /* VIS I fnand */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64); - break; - case 0x06f: /* VIS I fnands */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32); - break; - case 0x070: /* VIS I fand */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64); - break; - case 0x071: /* VIS I fands */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32); - break; - case 0x072: /* VIS I fxnor */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64); - break; - case 0x073: /* VIS I fxnors */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32); - break; - case 0x074: /* VIS I fsrc1 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - gen_store_fpr_D(dc, rd, cpu_src1_64); - break; - case 0x075: /* VIS I fsrc1s */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_32 = gen_load_fpr_F(dc, rs1); - gen_store_fpr_F(dc, rd, cpu_src1_32); - break; - case 0x076: /* VIS I fornot2 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64); - break; - case 0x077: /* VIS I fornot2s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32); - break; - case 0x078: /* VIS I fsrc2 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs2); - gen_store_fpr_D(dc, rd, cpu_src1_64); - break; - case 0x079: /* VIS I fsrc2s */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_32 = gen_load_fpr_F(dc, rs2); - gen_store_fpr_F(dc, rd, cpu_src1_32); - break; - case 0x07a: /* VIS I fornot1 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64); - break; - case 0x07b: /* VIS I fornot1s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32); - break; - case 0x07c: /* VIS I for */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64); - break; - case 0x07d: /* VIS I fors */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32); - break; - case 0x07e: /* VIS I fone */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_dst_64 = gen_dest_fpr_D(dc, rd); - tcg_gen_movi_i64(cpu_dst_64, -1); - gen_store_fpr_D(dc, rd, cpu_dst_64); - break; - case 0x07f: /* VIS I fones */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_dst_32 = gen_dest_fpr_F(dc); - tcg_gen_movi_i32(cpu_dst_32, -1); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x080: /* VIS I shutdown */ - case 0x081: /* VIS II siam */ - // XXX - goto illegal_insn; - default: - goto illegal_insn; - } -#else - goto ncp_insn; -#endif - } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */ -#ifdef TARGET_SPARC64 - goto illegal_insn; -#else - goto ncp_insn; -#endif -#ifdef TARGET_SPARC64 - } else if (xop == 0x39) { /* V9 return */ - save_state(dc); - cpu_src1 = get_src1(dc, insn); - cpu_tmp0 = tcg_temp_new(); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2) { - cpu_src2 = gen_load_gpr(dc, rs2); - tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2); - } else { - tcg_gen_mov_tl(cpu_tmp0, cpu_src1); - } - } - gen_helper_restore(tcg_env); - gen_mov_pc_npc(dc); - gen_check_align(cpu_tmp0, 3); - tcg_gen_mov_tl(cpu_npc, cpu_tmp0); - dc->npc = DYNAMIC_PC_LOOKUP; - goto jmp_insn; -#endif - } else { - cpu_src1 = get_src1(dc, insn); - cpu_tmp0 = tcg_temp_new(); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2) { - cpu_src2 = gen_load_gpr(dc, rs2); - tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2); - } else { - tcg_gen_mov_tl(cpu_tmp0, cpu_src1); - } - } - switch (xop) { - case 0x38: /* jmpl */ - { - TCGv t = gen_dest_gpr(dc, rd); - tcg_gen_movi_tl(t, dc->pc); - gen_store_gpr(dc, rd, t); - - gen_mov_pc_npc(dc); - gen_check_align(cpu_tmp0, 3); - gen_address_mask(dc, cpu_tmp0); - tcg_gen_mov_tl(cpu_npc, cpu_tmp0); - dc->npc = DYNAMIC_PC_LOOKUP; - } - goto jmp_insn; -#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) - case 0x39: /* rett, V9 return */ - { - if (!supervisor(dc)) - goto priv_insn; - gen_mov_pc_npc(dc); - gen_check_align(cpu_tmp0, 3); - tcg_gen_mov_tl(cpu_npc, cpu_tmp0); - dc->npc = DYNAMIC_PC; - gen_helper_rett(tcg_env); - } - goto jmp_insn; -#endif - case 0x3b: /* flush */ - if (!((dc)->def->features & CPU_FEATURE_FLUSH)) - goto unimp_flush; - /* nop */ - break; - case 0x3c: /* save */ - gen_helper_save(tcg_env); - gen_store_gpr(dc, rd, cpu_tmp0); - break; - case 0x3d: /* restore */ - gen_helper_restore(tcg_env); - gen_store_gpr(dc, rd, cpu_tmp0); - break; -#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64) - case 0x3e: /* V9 done/retry */ - { - switch (rd) { - case 0: - if (!supervisor(dc)) - goto priv_insn; - dc->npc = DYNAMIC_PC; - dc->pc = DYNAMIC_PC; - translator_io_start(&dc->base); - gen_helper_done(tcg_env); - goto jmp_insn; - case 1: - if (!supervisor(dc)) - goto priv_insn; - dc->npc = DYNAMIC_PC; - dc->pc = DYNAMIC_PC; - translator_io_start(&dc->base); - gen_helper_retry(tcg_env); - goto jmp_insn; - default: - goto illegal_insn; - } - } - break; -#endif - default: - goto illegal_insn; - } - } - break; - } - break; - case 3: /* load/store instructions */ - { - unsigned int xop = GET_FIELD(insn, 7, 12); - /* ??? gen_address_mask prevents us from using a source - register directly. Always generate a temporary. */ - TCGv cpu_addr = tcg_temp_new(); - - tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn)); - if (xop == 0x3c || xop == 0x3e) { - /* V9 casa/casxa : no offset */ - } else if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - if (simm != 0) { - tcg_gen_addi_tl(cpu_addr, cpu_addr, simm); - } - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2 != 0) { - tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2)); - } - } - if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) || - (xop > 0x17 && xop <= 0x1d ) || - (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) { - TCGv cpu_val = gen_dest_gpr(dc, rd); - - switch (xop) { - case 0x0: /* ld, V9 lduw, load unsigned word */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - break; - case 0x1: /* ldub, load unsigned byte */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_UB); - break; - case 0x2: /* lduh, load unsigned halfword */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUW | MO_ALIGN); - break; - case 0x3: /* ldd, load double word */ - if (rd & 1) - goto illegal_insn; - else { - TCGv_i64 t64; - - gen_address_mask(dc, cpu_addr); - t64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t64, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - tcg_gen_trunc_i64_tl(cpu_val, t64); - tcg_gen_ext32u_tl(cpu_val, cpu_val); - gen_store_gpr(dc, rd + 1, cpu_val); - tcg_gen_shri_i64(t64, t64, 32); - tcg_gen_trunc_i64_tl(cpu_val, t64); - tcg_gen_ext32u_tl(cpu_val, cpu_val); - } - break; - case 0x9: /* ldsb, load signed byte */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB); - break; - case 0xa: /* ldsh, load signed halfword */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TESW | MO_ALIGN); - break; - case 0xd: /* ldstub */ - gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); - break; - case 0x0f: - /* swap, swap register with memory. Also atomically */ - CHECK_IU_FEATURE(dc, SWAP); - cpu_src1 = gen_load_gpr(dc, rd); - gen_swap(dc, cpu_val, cpu_src1, cpu_addr, - dc->mem_idx, MO_TEUL); - break; -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) - case 0x10: /* lda, V9 lduwa, load word alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); - break; - case 0x11: /* lduba, load unsigned byte alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB); - break; - case 0x12: /* lduha, load unsigned halfword alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); - break; - case 0x13: /* ldda, load double word alternate */ - if (rd & 1) { - goto illegal_insn; - } - gen_ldda_asi(dc, cpu_addr, insn, rd); - goto skip_move; - case 0x19: /* ldsba, load signed byte alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB); - break; - case 0x1a: /* ldsha, load signed halfword alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW); - break; - case 0x1d: /* ldstuba -- XXX: should be atomically */ - gen_ldstub_asi(dc, cpu_val, cpu_addr, insn); - break; - case 0x1f: /* swapa, swap reg with alt. memory. Also - atomically */ - CHECK_IU_FEATURE(dc, SWAP); - cpu_src1 = gen_load_gpr(dc, rd); - gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn); - break; +static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a) +{ + TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); + DisasCompare cmp; -#ifndef TARGET_SPARC64 - case 0x30: /* ldc */ - case 0x31: /* ldcsr */ - case 0x33: /* lddc */ - goto ncp_insn; -#endif -#endif -#ifdef TARGET_SPARC64 - case 0x08: /* V9 ldsw */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TESL | MO_ALIGN); - break; - case 0x0b: /* V9 ldx */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - break; - case 0x18: /* V9 ldswa */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); - break; - case 0x1b: /* V9 ldxa */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); - break; - case 0x2d: /* V9 prefetch, no effect */ - goto skip_move; - case 0x30: /* V9 ldfa */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_ldf_asi(dc, cpu_addr, insn, 4, rd); - gen_update_fprs_dirty(dc, rd); - goto skip_move; - case 0x33: /* V9 lddfa */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); - gen_update_fprs_dirty(dc, DFPREG(rd)); - goto skip_move; - case 0x3d: /* V9 prefetcha, no effect */ - goto skip_move; - case 0x32: /* V9 ldqfa */ - CHECK_FPU_FEATURE(dc, FLOAT128); - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); - goto skip_move; -#endif - default: - goto illegal_insn; - } - gen_store_gpr(dc, rd, cpu_val); -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) - skip_move: ; -#endif - } else if (xop >= 0x20 && xop < 0x24) { - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - switch (xop) { - case 0x20: /* ldf, load fpreg */ - gen_address_mask(dc, cpu_addr); - cpu_dst_32 = gen_dest_fpr_F(dc); - tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x21: /* ldfsr, V9 ldxfsr */ -#ifdef TARGET_SPARC64 - gen_address_mask(dc, cpu_addr); - if (rd == 1) { - TCGv_i64 t64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t64, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64); - break; - } -#endif - cpu_dst_32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32); - break; - case 0x22: /* ldqf, load quad fpreg */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_address_mask(dc, cpu_addr); - cpu_src1_64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx, - MO_TEUQ | MO_ALIGN_4); - tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); - cpu_src2_64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx, - MO_TEUQ | MO_ALIGN_4); - gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); - break; - case 0x23: /* lddf, load double fpreg */ - gen_address_mask(dc, cpu_addr); - cpu_dst_64 = gen_dest_fpr_D(dc, rd); - tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx, - MO_TEUQ | MO_ALIGN_4); - gen_store_fpr_D(dc, rd, cpu_dst_64); - break; - default: - goto illegal_insn; - } - } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) || - xop == 0xe || xop == 0x1e) { - TCGv cpu_val = gen_load_gpr(dc, rd); - - switch (xop) { - case 0x4: /* st, store word */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - break; - case 0x5: /* stb, store byte */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB); - break; - case 0x6: /* sth, store halfword */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUW | MO_ALIGN); - break; - case 0x7: /* std, store double word */ - if (rd & 1) - goto illegal_insn; - else { - TCGv_i64 t64; - TCGv lo; - - gen_address_mask(dc, cpu_addr); - lo = gen_load_gpr(dc, rd + 1); - t64 = tcg_temp_new_i64(); - tcg_gen_concat_tl_i64(t64, lo, cpu_val); - tcg_gen_qemu_st_i64(t64, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - } - break; -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) - case 0x14: /* sta, V9 stwa, store word alternate */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); - break; - case 0x15: /* stba, store byte alternate */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB); - break; - case 0x16: /* stha, store halfword alternate */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); - break; - case 0x17: /* stda, store double word alternate */ - if (rd & 1) { - goto illegal_insn; - } - gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd); - break; -#endif -#ifdef TARGET_SPARC64 - case 0x0e: /* V9 stx */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - break; - case 0x1e: /* V9 stxa */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); - break; -#endif - default: - goto illegal_insn; - } - } else if (xop > 0x23 && xop < 0x28) { - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - switch (xop) { - case 0x24: /* stf, store fpreg */ - gen_address_mask(dc, cpu_addr); - cpu_src1_32 = gen_load_fpr_F(dc, rd); - tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - break; - case 0x25: /* stfsr, V9 stxfsr */ - { -#ifdef TARGET_SPARC64 - gen_address_mask(dc, cpu_addr); - if (rd == 1) { - tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - break; - } -#endif - tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - } - break; - case 0x26: -#ifdef TARGET_SPARC64 - /* V9 stqf, store quad fpreg */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_address_mask(dc, cpu_addr); - /* ??? While stqf only requires 4-byte alignment, it is - legal for the cpu to signal the unaligned exception. - The OS trap handler is then required to fix it up. - For qemu, this avoids having to probe the second page - before performing the first write. */ - cpu_src1_64 = gen_load_fpr_Q0(dc, rd); - tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN_16); - tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); - cpu_src2_64 = gen_load_fpr_Q1(dc, rd); - tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, - dc->mem_idx, MO_TEUQ); - break; -#else /* !TARGET_SPARC64 */ - /* stdfq, store floating point queue */ -#if defined(CONFIG_USER_ONLY) - goto illegal_insn; -#else - if (!supervisor(dc)) - goto priv_insn; - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - goto nfq_insn; -#endif -#endif - case 0x27: /* stdf, store double fpreg */ - gen_address_mask(dc, cpu_addr); - cpu_src1_64 = gen_load_fpr_D(dc, rd); - tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx, - MO_TEUQ | MO_ALIGN_4); - break; - default: - goto illegal_insn; - } - } else if (xop > 0x33 && xop < 0x3f) { - switch (xop) { -#ifdef TARGET_SPARC64 - case 0x34: /* V9 stfa */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_stf_asi(dc, cpu_addr, insn, 4, rd); - break; - case 0x36: /* V9 stqfa */ - { - CHECK_FPU_FEATURE(dc, FLOAT128); - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); - } - break; - case 0x37: /* V9 stdfa */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); - break; - case 0x3e: /* V9 casxa */ - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd); - break; -#else - case 0x34: /* stc */ - case 0x35: /* stcsr */ - case 0x36: /* stdcq */ - case 0x37: /* stdc */ - goto ncp_insn; -#endif -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) - case 0x3c: /* V9 or LEON3 casa */ -#ifndef TARGET_SPARC64 - CHECK_IU_FEATURE(dc, CASA); -#endif - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd); - break; -#endif - default: - goto illegal_insn; - } - } else { - goto illegal_insn; - } + if (src2 == NULL) { + return false; + } + gen_fcompare(&cmp, a->cc, a->cond); + return do_mov_cond(dc, &cmp, a->rd, src2); +} + +static bool trans_MOVR(DisasContext *dc, arg_MOVR *a) +{ + TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); + DisasCompare cmp; + + if (src2 == NULL) { + return false; + } + gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); + return do_mov_cond(dc, &cmp, a->rd, src2); +} + +static bool do_add_special(DisasContext *dc, arg_r_r_ri *a, + bool (*func)(DisasContext *dc, int rd, TCGv src)) +{ + TCGv src1, sum; + + /* For simplicity, we under-decoded the rs2 form. */ + if (!a->imm && a->rs2_or_imm & ~0x1f) { + return false; + } + + /* + * Always load the sum into a new temporary. + * This is required to capture the value across a window change, + * e.g. SAVE and RESTORE, and may be optimized away otherwise. + */ + sum = tcg_temp_new(); + src1 = gen_load_gpr(dc, a->rs1); + if (a->imm || a->rs2_or_imm == 0) { + tcg_gen_addi_tl(sum, src1, a->rs2_or_imm); + } else { + tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]); + } + return func(dc, a->rd, sum); +} + +static bool do_jmpl(DisasContext *dc, int rd, TCGv src) +{ + /* + * Preserve pc across advance, so that we can delay + * the writeback to rd until after src is consumed. + */ + target_ulong cur_pc = dc->pc; + + gen_check_align(dc, src, 3); + + gen_mov_pc_npc(dc); + tcg_gen_mov_tl(cpu_npc, src); + gen_address_mask(dc, cpu_npc); + gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc)); + + dc->npc = DYNAMIC_PC_LOOKUP; + return true; +} + +TRANS(JMPL, ALL, do_add_special, a, do_jmpl) + +static bool do_rett(DisasContext *dc, int rd, TCGv src) +{ + if (!supervisor(dc)) { + return raise_priv(dc); + } + + gen_check_align(dc, src, 3); + + gen_mov_pc_npc(dc); + tcg_gen_mov_tl(cpu_npc, src); + gen_helper_rett(tcg_env); + + dc->npc = DYNAMIC_PC; + return true; +} + +TRANS(RETT, 32, do_add_special, a, do_rett) + +static bool do_return(DisasContext *dc, int rd, TCGv src) +{ + gen_check_align(dc, src, 3); + + gen_mov_pc_npc(dc); + tcg_gen_mov_tl(cpu_npc, src); + gen_address_mask(dc, cpu_npc); + + gen_helper_restore(tcg_env); + dc->npc = DYNAMIC_PC_LOOKUP; + return true; +} + +TRANS(RETURN, 64, do_add_special, a, do_return) + +static bool do_save(DisasContext *dc, int rd, TCGv src) +{ + gen_helper_save(tcg_env); + gen_store_gpr(dc, rd, src); + return advance_pc(dc); +} + +TRANS(SAVE, ALL, do_add_special, a, do_save) + +static bool do_restore(DisasContext *dc, int rd, TCGv src) +{ + gen_helper_restore(tcg_env); + gen_store_gpr(dc, rd, src); + return advance_pc(dc); +} + +TRANS(RESTORE, ALL, do_add_special, a, do_restore) + +static bool do_done_retry(DisasContext *dc, bool done) +{ + if (!supervisor(dc)) { + return raise_priv(dc); + } + dc->npc = DYNAMIC_PC; + dc->pc = DYNAMIC_PC; + translator_io_start(&dc->base); + if (done) { + gen_helper_done(tcg_env); + } else { + gen_helper_retry(tcg_env); + } + return true; +} + +TRANS(DONE, 64, do_done_retry, true) +TRANS(RETRY, 64, do_done_retry, false) + +/* + * Major opcode 11 -- load and store instructions + */ + +static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm) +{ + TCGv addr, tmp = NULL; + + /* For simplicity, we under-decoded the rs2 form. */ + if (!imm && rs2_or_imm & ~0x1f) { + return NULL; + } + + addr = gen_load_gpr(dc, rs1); + if (rs2_or_imm) { + tmp = tcg_temp_new(); + if (imm) { + tcg_gen_addi_tl(tmp, addr, rs2_or_imm); + } else { + tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]); } - break; + addr = tmp; } - /* default case for non jump instructions */ - if (dc->npc & 3) { - switch (dc->npc) { - case DYNAMIC_PC: - case DYNAMIC_PC_LOOKUP: - dc->pc = dc->npc; - gen_op_next_insn(); - break; - case JUMP_PC: - /* we can do a static jump */ - gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond); - dc->base.is_jmp = DISAS_NORETURN; - break; - default: - g_assert_not_reached(); + if (AM_CHECK(dc)) { + if (!tmp) { + tmp = tcg_temp_new(); } - } else { - dc->pc = dc->npc; - dc->npc = dc->npc + 4; + tcg_gen_ext32u_tl(tmp, addr); + addr = tmp; + } + return addr; +} + +static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) +{ + TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + DisasASI da; + + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, mop); + + reg = gen_dest_gpr(dc, a->rd); + gen_ld_asi(dc, &da, reg, addr); + gen_store_gpr(dc, a->rd, reg); + return advance_pc(dc); +} + +TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL) +TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB) +TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW) +TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB) +TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW) +TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL) +TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ) + +static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) +{ + TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + DisasASI da; + + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, mop); + + reg = gen_load_gpr(dc, a->rd); + gen_st_asi(dc, &da, reg, addr); + return advance_pc(dc); +} + +TRANS(STW, ALL, do_st_gpr, a, MO_TEUL) +TRANS(STB, ALL, do_st_gpr, a, MO_UB) +TRANS(STH, ALL, do_st_gpr, a, MO_TEUW) +TRANS(STX, 64, do_st_gpr, a, MO_TEUQ) + +static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a) +{ + TCGv addr; + DisasASI da; + + if (a->rd & 1) { + return false; + } + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, MO_TEUQ); + gen_ldda_asi(dc, &da, addr, a->rd); + return advance_pc(dc); +} + +static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a) +{ + TCGv addr; + DisasASI da; + + if (a->rd & 1) { + return false; + } + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, MO_TEUQ); + gen_stda_asi(dc, &da, addr, a->rd); + return advance_pc(dc); +} + +static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a) +{ + TCGv addr, reg; + DisasASI da; + + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, MO_UB); + + reg = gen_dest_gpr(dc, a->rd); + gen_ldstub_asi(dc, &da, reg, addr); + gen_store_gpr(dc, a->rd, reg); + return advance_pc(dc); +} + +static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a) +{ + TCGv addr, dst, src; + DisasASI da; + + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, MO_TEUL); + + dst = gen_dest_gpr(dc, a->rd); + src = gen_load_gpr(dc, a->rd); + gen_swap_asi(dc, &da, dst, src, addr); + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} + +static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) +{ + TCGv addr, o, n, c; + DisasASI da; + + addr = gen_ldst_addr(dc, a->rs1, true, 0); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, mop); + + o = gen_dest_gpr(dc, a->rd); + n = gen_load_gpr(dc, a->rd); + c = gen_load_gpr(dc, a->rs2_or_imm); + gen_cas_asi(dc, &da, o, n, c, addr); + gen_store_gpr(dc, a->rd, o); + return advance_pc(dc); +} + +TRANS(CASA, CASA, do_casa, a, MO_TEUL) +TRANS(CASXA, 64, do_casa, a, MO_TEUQ) + +static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) +{ + TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + DisasASI da; + + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (sz == MO_128 && gen_trap_float128(dc)) { + return true; + } + da = resolve_asi(dc, a->asi, MO_TE | sz); + gen_ldf_asi(dc, &da, sz, addr, a->rd); + gen_update_fprs_dirty(dc, a->rd); + return advance_pc(dc); +} + +TRANS(LDF, ALL, do_ld_fpr, a, MO_32) +TRANS(LDDF, ALL, do_ld_fpr, a, MO_64) +TRANS(LDQF, ALL, do_ld_fpr, a, MO_128) + +TRANS(LDFA, 64, do_ld_fpr, a, MO_32) +TRANS(LDDFA, 64, do_ld_fpr, a, MO_64) +TRANS(LDQFA, 64, do_ld_fpr, a, MO_128) + +static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) +{ + TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + DisasASI da; + + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (sz == MO_128 && gen_trap_float128(dc)) { + return true; + } + da = resolve_asi(dc, a->asi, MO_TE | sz); + gen_stf_asi(dc, &da, sz, addr, a->rd); + return advance_pc(dc); +} + +TRANS(STF, ALL, do_st_fpr, a, MO_32) +TRANS(STDF, ALL, do_st_fpr, a, MO_64) +TRANS(STQF, ALL, do_st_fpr, a, MO_128) + +TRANS(STFA, 64, do_st_fpr, a, MO_32) +TRANS(STDFA, 64, do_st_fpr, a, MO_64) +TRANS(STQFA, 64, do_st_fpr, a, MO_128) + +static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a) +{ + if (!avail_32(dc)) { + return false; + } + if (!supervisor(dc)) { + return raise_priv(dc); + } + if (gen_trap_ifnofpu(dc)) { + return true; } - jmp_insn: - return; - illegal_insn: - gen_exception(dc, TT_ILL_INSN); - return; - unimp_flush: - gen_exception(dc, TT_UNIMP_FLUSH); - return; -#if !defined(CONFIG_USER_ONLY) - priv_insn: - gen_exception(dc, TT_PRIV_INSN); - return; -#endif - nfpu_insn: - gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); - return; -#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) - nfq_insn: gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); - return; -#endif -#ifndef TARGET_SPARC64 - ncp_insn: - gen_exception(dc, TT_NCP_INSN); - return; -#endif + return true; +} + +static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop, + target_ulong new_mask, target_ulong old_mask) +{ + TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + tmp = tcg_temp_new(); + tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN); + tcg_gen_andi_tl(tmp, tmp, new_mask); + tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask); + tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp); + gen_helper_set_fsr(tcg_env, cpu_fsr); + return advance_pc(dc); +} + +TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK) +TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK) + +static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop) +{ + TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN); + return advance_pc(dc); } +TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL) +TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ) + +static bool do_fc(DisasContext *dc, int rd, bool c) +{ + uint64_t mask; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + if (rd & 1) { + mask = MAKE_64BIT_MASK(0, 32); + } else { + mask = MAKE_64BIT_MASK(32, 32); + } + if (c) { + tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask); + } else { + tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask); + } + gen_update_fprs_dirty(dc, rd); + return advance_pc(dc); +} + +TRANS(FZEROs, VIS1, do_fc, a->rd, 0) +TRANS(FONEs, VIS1, do_fc, a->rd, 1) + +static bool do_dc(DisasContext *dc, int rd, int64_t c) +{ + if (gen_trap_ifnofpu(dc)) { + return true; + } + + tcg_gen_movi_i64(cpu_fpr[rd / 2], c); + gen_update_fprs_dirty(dc, rd); + return advance_pc(dc); +} + +TRANS(FZEROd, VIS1, do_dc, a->rd, 0) +TRANS(FONEd, VIS1, do_dc, a->rd, -1) + +static bool do_ff(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_i32)) +{ + TCGv_i32 tmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + tmp = gen_load_fpr_F(dc, a->rs); + func(tmp, tmp); + gen_store_fpr_F(dc, a->rd, tmp); + return advance_pc(dc); +} + +TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs) +TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs) +TRANS(FABSs, ALL, do_ff, a, gen_op_fabss) +TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32) +TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32) + +static bool do_fd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_i64)) +{ + TCGv_i32 dst; + TCGv_i64 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_fpr_F(dc); + src = gen_load_fpr_D(dc, a->rs); + func(dst, src); + gen_store_fpr_F(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16) +TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix) + +static bool do_env_ff(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) +{ + TCGv_i32 tmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + tmp = gen_load_fpr_F(dc, a->rs); + func(tmp, tcg_env, tmp); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_F(dc, a->rd, tmp); + return advance_pc(dc); +} + +TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts) +TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos) +TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi) + +static bool do_env_fd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) +{ + TCGv_i32 dst; + TCGv_i64 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_F(dc); + src = gen_load_fpr_D(dc, a->rs); + func(dst, tcg_env, src); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_F(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos) +TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi) +TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos) + +static bool do_dd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_fpr_D(dc, a->rd); + src = gen_load_fpr_D(dc, a->rs); + func(dst, src); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd) +TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd) +TRANS(FABSd, 64, do_dd, a, gen_op_fabsd) +TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64) +TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64) + +static bool do_env_dd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) +{ + TCGv_i64 dst, src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_D(dc, a->rd); + src = gen_load_fpr_D(dc, a->rs); + func(dst, tcg_env, src); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd) +TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod) +TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox) + +static bool do_env_df(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) +{ + TCGv_i64 dst; + TCGv_i32 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_D(dc, a->rd); + src = gen_load_fpr_F(dc, a->rs); + func(dst, tcg_env, src); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod) +TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod) +TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox) + +static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a) +{ + int rd, rs; + + if (!avail_64(dc)) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + rd = QFPREG(a->rd); + rs = QFPREG(a->rs); + tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]); + tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]); + gen_update_fprs_dirty(dc, rd); + return advance_pc(dc); +} + +static bool do_qq(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_env)) +{ + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT1(QFPREG(a->rs)); + func(tcg_env); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq) +TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq) + +static bool do_env_qq(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_env)) +{ + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT1(QFPREG(a->rs)); + func(tcg_env); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq) + +static bool do_env_fq(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_env)) +{ + TCGv_i32 dst; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT1(QFPREG(a->rs)); + dst = gen_dest_fpr_F(dc); + func(dst, tcg_env); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_F(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos) +TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi) + +static bool do_env_dq(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i64, TCGv_env)) +{ + TCGv_i64 dst; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT1(QFPREG(a->rs)); + dst = gen_dest_fpr_D(dc, a->rd); + func(dst, tcg_env); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod) +TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox) + +static bool do_env_qf(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_env, TCGv_i32)) +{ + TCGv_i32 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src = gen_load_fpr_F(dc, a->rs); + func(tcg_env, src); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq) +TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq) + +static bool do_env_qd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_env, TCGv_i64)) +{ + TCGv_i64 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src = gen_load_fpr_D(dc, a->rs); + func(tcg_env, src); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq) +TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq) + +static bool do_fff(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + src1 = gen_load_fpr_F(dc, a->rs1); + src2 = gen_load_fpr_F(dc, a->rs2); + func(src1, src1, src2); + gen_store_fpr_F(dc, a->rd, src1); + return advance_pc(dc); +} + +TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32) +TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32) +TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32) +TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32) +TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32) +TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32) +TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32) +TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32) +TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32) +TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32) +TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32) +TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32) + +static bool do_env_fff(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src1 = gen_load_fpr_F(dc, a->rs1); + src2 = gen_load_fpr_F(dc, a->rs2); + func(src1, tcg_env, src1, src2); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_F(dc, a->rd, src1); + return advance_pc(dc); +} + +TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds) +TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs) +TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls) +TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs) + +static bool do_ddd(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_fpr_D(dc, a->rd); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + func(dst, src1, src2); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16) +TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au) +TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al) +TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16) +TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16) +TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16) +TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16) +TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge) +TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand) + +TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64) +TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64) +TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64) +TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64) +TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64) +TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64) +TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64) +TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64) +TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64) +TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64) +TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64) +TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64) + +TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32) +TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata) +TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle) + +static bool do_rdd(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 src1, src2; + TCGv dst; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_gpr(dc, a->rd); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + func(dst, src1, src2); + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16) +TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16) +TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16) +TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16) + +TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32) +TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32) +TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32) +TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32) + +static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_D(dc, a->rd); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + func(dst, tcg_env, src1, src2); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd) +TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd) +TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld) +TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd) + +static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a) +{ + TCGv_i64 dst; + TCGv_i32 src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (!(dc->def->features & CPU_FEATURE_FSMULD)) { + return raise_unimpfpop(dc); + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_D(dc, a->rd); + src1 = gen_load_fpr_F(dc, a->rs1); + src2 = gen_load_fpr_F(dc, a->rs2); + gen_helper_fsmuld(dst, tcg_env, src1, src2); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +static bool do_dddd(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src0, src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_fpr_D(dc, a->rd); + src0 = gen_load_fpr_D(dc, a->rd); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + func(dst, src0, src1, src2); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist) + +static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_env)) +{ + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT0(QFPREG(a->rs1)); + gen_op_load_fpr_QT1(QFPREG(a->rs2)); + func(tcg_env); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq) +TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq) +TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq) +TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq) + +static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a) +{ + TCGv_i64 src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + gen_helper_fdmulq(tcg_env, src1, src2); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128, + void (*func)(DisasContext *, DisasCompare *, int, int)) +{ + DisasCompare cmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (is_128 && gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); + func(dc, &cmp, a->rd, a->rs2); + return advance_pc(dc); +} + +TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs) +TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd) +TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq) + +static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128, + void (*func)(DisasContext *, DisasCompare *, int, int)) +{ + DisasCompare cmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (is_128 && gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_compare(&cmp, a->cc, a->cond, dc); + func(dc, &cmp, a->rd, a->rs2); + return advance_pc(dc); +} + +TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs) +TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd) +TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq) + +static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128, + void (*func)(DisasContext *, DisasCompare *, int, int)) +{ + DisasCompare cmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (is_128 && gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_fcompare(&cmp, a->cc, a->cond); + func(dc, &cmp, a->rd, a->rs2); + return advance_pc(dc); +} + +TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs) +TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd) +TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq) + +static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e) +{ + TCGv_i32 src1, src2; + + if (avail_32(dc) && a->cc != 0) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src1 = gen_load_fpr_F(dc, a->rs1); + src2 = gen_load_fpr_F(dc, a->rs2); + if (e) { + gen_op_fcmpes(a->cc, src1, src2); + } else { + gen_op_fcmps(a->cc, src1, src2); + } + return advance_pc(dc); +} + +TRANS(FCMPs, ALL, do_fcmps, a, false) +TRANS(FCMPEs, ALL, do_fcmps, a, true) + +static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e) +{ + TCGv_i64 src1, src2; + + if (avail_32(dc) && a->cc != 0) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + if (e) { + gen_op_fcmped(a->cc, src1, src2); + } else { + gen_op_fcmpd(a->cc, src1, src2); + } + return advance_pc(dc); +} + +TRANS(FCMPd, ALL, do_fcmpd, a, false) +TRANS(FCMPEd, ALL, do_fcmpd, a, true) + +static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e) +{ + if (avail_32(dc) && a->cc != 0) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT0(QFPREG(a->rs1)); + gen_op_load_fpr_QT1(QFPREG(a->rs2)); + if (e) { + gen_op_fcmpeq(a->cc); + } else { + gen_op_fcmpq(a->cc); + } + return advance_pc(dc); +} + +TRANS(FCMPq, ALL, do_fcmpq, a, false) +TRANS(FCMPEq, ALL, do_fcmpq, a, true) + static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); @@ -5630,7 +5379,10 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) insn = translator_ldl(env, &dc->base, dc->pc); dc->base.pc_next += 4; - disas_sparc_insn(dc, insn); + + if (!decode(dc, insn)) { + gen_exception(dc, TT_ILL_INSN); + } if (dc->base.is_jmp == DISAS_NORETURN) { return; @@ -5643,6 +5395,7 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); + DisasDelayException *e, *e_next; bool may_lookup; switch (dc->base.is_jmp) { @@ -5654,10 +5407,10 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) break; } + may_lookup = true; if (dc->pc & 3) { switch (dc->pc) { case DYNAMIC_PC_LOOKUP: - may_lookup = true; break; case DYNAMIC_PC: may_lookup = false; @@ -5667,10 +5420,24 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } else { tcg_gen_movi_tl(cpu_pc, dc->pc); - may_lookup = true; } - save_npc(dc); + if (dc->npc & 3) { + switch (dc->npc) { + case JUMP_PC: + gen_generic_branch(dc); + break; + case DYNAMIC_PC: + may_lookup = false; + break; + case DYNAMIC_PC_LOOKUP: + break; + default: + g_assert_not_reached(); + } + } else { + tcg_gen_movi_tl(cpu_npc, dc->npc); + } if (may_lookup) { tcg_gen_lookup_and_goto_ptr(); } else { @@ -5690,6 +5457,19 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) default: g_assert_not_reached(); } + + for (e = dc->delay_excp_list; e ; e = e_next) { + gen_set_label(e->lab); + + tcg_gen_movi_tl(cpu_pc, e->pc); + if (e->npc % 4 == 0) { + tcg_gen_movi_tl(cpu_npc, e->npc); + } + gen_helper_raise_exception(tcg_env, e->excp); + + e_next = e->next; + g_free(e); + } } static void sparc_tr_disas_log(const DisasContextBase *dcbase, @@ -5735,8 +5515,6 @@ void sparc_tcg_init(void) #ifdef TARGET_SPARC64 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" }, { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" }, -#else - { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" }, #endif { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" }, { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" }, @@ -5745,15 +5523,6 @@ void sparc_tcg_init(void) static const struct { TCGv *ptr; int off; const char *name; } rtl[] = { #ifdef TARGET_SPARC64 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" }, - { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" }, - { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" }, - { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr), - "hstick_cmpr" }, - { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" }, - { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" }, - { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" }, - { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" }, - { &cpu_ver, offsetof(CPUSPARCState, version), "ver" }, #endif { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" }, { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" }, @@ -5763,9 +5532,7 @@ void sparc_tcg_init(void) { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" }, { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" }, { &cpu_y, offsetof(CPUSPARCState, y), "y" }, -#ifndef CONFIG_USER_ONLY { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" }, -#endif }; unsigned int i; |