diff options
Diffstat (limited to 'target-ppc')
| -rw-r--r-- | target-ppc/helper.h | 1 | ||||
| -rw-r--r-- | target-ppc/int_helper.c | 19 | ||||
| -rw-r--r-- | target-ppc/kvm.c | 371 | ||||
| -rw-r--r-- | target-ppc/kvm_ppc.h | 12 | ||||
| -rw-r--r-- | target-ppc/translate.c | 123 |
5 files changed, 436 insertions, 90 deletions
diff --git a/target-ppc/helper.h b/target-ppc/helper.h index 509eae52ff..0cfdc8ab8f 100644 --- a/target-ppc/helper.h +++ b/target-ppc/helper.h @@ -28,7 +28,6 @@ DEF_HELPER_2(icbi, void, env, tl) DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32) #if defined(TARGET_PPC64) -DEF_HELPER_3(mulldo, i64, env, i64, i64) DEF_HELPER_4(divdeu, i64, env, i64, i64, i32) DEF_HELPER_4(divde, i64, env, i64, i64, i32) #endif diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c index f6e8846707..713d777076 100644 --- a/target-ppc/int_helper.c +++ b/target-ppc/int_helper.c @@ -24,23 +24,6 @@ #include "helper_regs.h" /*****************************************************************************/ /* Fixed point operations helpers */ -#if defined(TARGET_PPC64) - -uint64_t helper_mulldo(CPUPPCState *env, uint64_t arg1, uint64_t arg2) -{ - int64_t th; - uint64_t tl; - - muls64(&tl, (uint64_t *)&th, arg1, arg2); - /* If th != 0 && th != -1, then we had an overflow */ - if (likely((uint64_t)(th + 1) <= 1)) { - env->ov = 0; - } else { - env->so = env->ov = 1; - } - return (int64_t)tl; -} -#endif target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb, uint32_t oe) @@ -238,7 +221,7 @@ target_ulong helper_srad(CPUPPCState *env, target_ulong value, if (likely((uint64_t)shift != 0)) { shift &= 0x3f; ret = (int64_t)value >> shift; - if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) { + if (likely(ret >= 0 || (value & ((1ULL << shift) - 1)) == 0)) { env->ca = 0; } else { env->ca = 1; diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c index 42718f77ae..9c23c6ba0d 100644 --- a/target-ppc/kvm.c +++ b/target-ppc/kvm.c @@ -38,6 +38,7 @@ #include "hw/ppc/ppc.h" #include "sysemu/watchdog.h" #include "trace.h" +#include "exec/gdbstub.h" //#define DEBUG_KVM @@ -72,6 +73,8 @@ static int cap_papr; static int cap_htab_fd; static int cap_fixup_hcalls; +static uint32_t debug_inst_opcode; + /* XXX We have a race condition where we actually have a level triggered * interrupt, but the infrastructure can't expose that yet, so the guest * takes but ignores it, goes to sleep and never gets notified that there's @@ -410,6 +413,38 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu) return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu)); } +/* e500 supports 2 h/w breakpoint and 2 watchpoint. + * book3s supports only 1 watchpoint, so array size + * of 4 is sufficient for now. + */ +#define MAX_HW_BKPTS 4 + +static struct HWBreakpoint { + target_ulong addr; + int type; +} hw_debug_points[MAX_HW_BKPTS]; + +static CPUWatchpoint hw_watchpoint; + +/* Default there is no breakpoint and watchpoint supported */ +static int max_hw_breakpoint; +static int max_hw_watchpoint; +static int nb_hw_breakpoint; +static int nb_hw_watchpoint; + +static void kvmppc_hw_debug_points_init(CPUPPCState *cenv) +{ + if (cenv->excp_model == POWERPC_EXCP_BOOKE) { + max_hw_breakpoint = 2; + max_hw_watchpoint = 2; + } + + if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) { + fprintf(stderr, "Error initializing h/w breakpoints\n"); + return; + } +} + int kvm_arch_init_vcpu(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -436,6 +471,9 @@ int kvm_arch_init_vcpu(CPUState *cs) break; } + kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode); + kvmppc_hw_debug_points_init(cenv); + return ret; } @@ -899,6 +937,11 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } +static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor) +{ + env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR]; +} + int kvm_arch_get_registers(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -981,35 +1024,57 @@ int kvm_arch_get_registers(CPUState *cs) if (sregs.u.e.features & KVM_SREGS_E_IVOR) { env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0]; + kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0); env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1]; + kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1); env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2]; + kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2); env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3]; + kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3); env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4]; + kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4); env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5]; + kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5); env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6]; + kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6); env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7]; + kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7); env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8]; + kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8); env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9]; + kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9); env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10]; + kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10); env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11]; + kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11); env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12]; + kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12); env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13]; + kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13); env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14]; + kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14); env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15]; + kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15); if (sregs.u.e.features & KVM_SREGS_E_SPE) { env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0]; + kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32); env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1]; + kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33); env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2]; + kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34); } if (sregs.u.e.features & KVM_SREGS_E_PM) { env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3]; + kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35); } if (sregs.u.e.features & KVM_SREGS_E_PC) { env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4]; + kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36); env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5]; + kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37); } } @@ -1244,6 +1309,259 @@ static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t dat return 0; } +int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + /* Mixed endian case is not handled */ + uint32_t sc = debug_inst_opcode; + + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, + sizeof(sc), 0) || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) { + return -EINVAL; + } + + return 0; +} + +int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + uint32_t sc; + + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) || + sc != debug_inst_opcode || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, + sizeof(sc), 1)) { + return -EINVAL; + } + + return 0; +} + +static int find_hw_breakpoint(target_ulong addr, int type) +{ + int n; + + assert((nb_hw_breakpoint + nb_hw_watchpoint) + <= ARRAY_SIZE(hw_debug_points)); + + for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) { + if (hw_debug_points[n].addr == addr && + hw_debug_points[n].type == type) { + return n; + } + } + + return -1; +} + +static int find_hw_watchpoint(target_ulong addr, int *flag) +{ + int n; + + n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS); + if (n >= 0) { + *flag = BP_MEM_ACCESS; + return n; + } + + n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE); + if (n >= 0) { + *flag = BP_MEM_WRITE; + return n; + } + + n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ); + if (n >= 0) { + *flag = BP_MEM_READ; + return n; + } + + return -1; +} + +int kvm_arch_insert_hw_breakpoint(target_ulong addr, + target_ulong len, int type) +{ + if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) { + return -ENOBUFS; + } + + hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr; + hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type; + + switch (type) { + case GDB_BREAKPOINT_HW: + if (nb_hw_breakpoint >= max_hw_breakpoint) { + return -ENOBUFS; + } + + if (find_hw_breakpoint(addr, type) >= 0) { + return -EEXIST; + } + + nb_hw_breakpoint++; + break; + + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_ACCESS: + if (nb_hw_watchpoint >= max_hw_watchpoint) { + return -ENOBUFS; + } + + if (find_hw_breakpoint(addr, type) >= 0) { + return -EEXIST; + } + + nb_hw_watchpoint++; + break; + + default: + return -ENOSYS; + } + + return 0; +} + +int kvm_arch_remove_hw_breakpoint(target_ulong addr, + target_ulong len, int type) +{ + int n; + + n = find_hw_breakpoint(addr, type); + if (n < 0) { + return -ENOENT; + } + + switch (type) { + case GDB_BREAKPOINT_HW: + nb_hw_breakpoint--; + break; + + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_ACCESS: + nb_hw_watchpoint--; + break; + + default: + return -ENOSYS; + } + hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint]; + + return 0; +} + +void kvm_arch_remove_all_hw_breakpoints(void) +{ + nb_hw_breakpoint = nb_hw_watchpoint = 0; +} + +void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) +{ + int n; + + /* Software Breakpoint updates */ + if (kvm_sw_breakpoints_active(cs)) { + dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; + } + + assert((nb_hw_breakpoint + nb_hw_watchpoint) + <= ARRAY_SIZE(hw_debug_points)); + assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp)); + + if (nb_hw_breakpoint + nb_hw_watchpoint > 0) { + dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; + memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp)); + for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) { + switch (hw_debug_points[n].type) { + case GDB_BREAKPOINT_HW: + dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT; + break; + case GDB_WATCHPOINT_WRITE: + dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE; + break; + case GDB_WATCHPOINT_READ: + dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ; + break; + case GDB_WATCHPOINT_ACCESS: + dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE | + KVMPPC_DEBUG_WATCH_READ; + break; + default: + cpu_abort(cs, "Unsupported breakpoint type\n"); + } + dbg->arch.bp[n].addr = hw_debug_points[n].addr; + } + } +} + +static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + struct kvm_debug_exit_arch *arch_info = &run->debug.arch; + int handle = 0; + int n; + int flag = 0; + + if (cs->singlestep_enabled) { + handle = 1; + } else if (arch_info->status) { + if (nb_hw_breakpoint + nb_hw_watchpoint > 0) { + if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) { + n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW); + if (n >= 0) { + handle = 1; + } + } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ | + KVMPPC_DEBUG_WATCH_WRITE)) { + n = find_hw_watchpoint(arch_info->address, &flag); + if (n >= 0) { + handle = 1; + cs->watchpoint_hit = &hw_watchpoint; + hw_watchpoint.vaddr = hw_debug_points[n].addr; + hw_watchpoint.flags = flag; + } + } + } + } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) { + handle = 1; + } else { + /* QEMU is not able to handle debug exception, so inject + * program exception to guest; + * Yes program exception NOT debug exception !! + * When QEMU is using debug resources then debug exception must + * be always set. To achieve this we set MSR_DE and also set + * MSRP_DEP so guest cannot change MSR_DE. + * When emulating debug resource for guest we want guest + * to control MSR_DE (enable/disable debug interrupt on need). + * Supporting both configurations are NOT possible. + * So the result is that we cannot share debug resources + * between QEMU and Guest on BOOKE architecture. + * In the current design QEMU gets the priority over guest, + * this means that if QEMU is using debug resources then guest + * cannot use them; + * For software breakpoint QEMU uses a privileged instruction; + * So there cannot be any reason that we are here for guest + * set debug exception, only possibility is guest executed a + * privileged / illegal instruction and that's why we are + * injecting a program interrupt. + */ + + cpu_synchronize_state(cs); + /* env->nip is PC, so increment this by 4 to use + * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4. + */ + env->nip += 4; + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_INVAL; + ppc_cpu_do_interrupt(cs); + } + + return handle; +} + int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -1284,6 +1602,16 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) ret = 0; break; + case KVM_EXIT_DEBUG: + DPRINTF("handle debug exception\n"); + if (kvm_handle_debug(cpu, run)) { + ret = EXCP_DEBUG; + break; + } + /* re-enter, this exception was guest-internal */ + ret = 0; + break; + default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; @@ -1369,7 +1697,7 @@ static int read_cpuinfo(const char *field, char *value, int len) } do { - if(!fgets(line, sizeof(line), f)) { + if (!fgets(line, sizeof(line), f)) { break; } if (!strncmp(line, field, field_len)) { @@ -1404,6 +1732,17 @@ uint32_t kvmppc_get_tbfreq(void) return retval; } +bool kvmppc_get_host_serial(char **value) +{ + return g_file_get_contents("/proc/device-tree/system-id", value, NULL, + NULL); +} + +bool kvmppc_get_host_model(char **value) +{ + return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL); +} + /* Try to find a device tree node for a CPU with clock-frequency property */ static int kvmppc_find_cpu_dt(char *buf, int buf_len) { @@ -1496,7 +1835,7 @@ static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo) PowerPCCPU *cpu = ppc_env_get_cpu(env); CPUState *cs = CPU(cpu); - if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) && + if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) && !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) { return 0; } @@ -1965,34 +2304,6 @@ void kvm_arch_init_irq_routing(KVMState *s) { } -int kvm_arch_insert_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp) -{ - return -EINVAL; -} - -int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp) -{ - return -EINVAL; -} - -int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type) -{ - return -EINVAL; -} - -int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type) -{ - return -EINVAL; -} - -void kvm_arch_remove_all_hw_breakpoints(void) -{ -} - -void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) -{ -} - struct kvm_get_htab_buf { struct kvm_get_htab_header header; /* diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h index d9516e73ef..2e0224c6af 100644 --- a/target-ppc/kvm_ppc.h +++ b/target-ppc/kvm_ppc.h @@ -19,6 +19,8 @@ uint32_t kvmppc_get_tbfreq(void); uint64_t kvmppc_get_clockfreq(void); uint32_t kvmppc_get_vmx(void); uint32_t kvmppc_get_dfp(void); +bool kvmppc_get_host_model(char **buf); +bool kvmppc_get_host_serial(char **buf); int kvmppc_get_hasidle(CPUPPCState *env); int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len); int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level); @@ -60,6 +62,16 @@ static inline uint32_t kvmppc_get_tbfreq(void) return 0; } +static inline bool kvmppc_get_host_model(char **buf) +{ + return false; +} + +static inline bool kvmppc_get_host_serial(char **buf) +{ + return false; +} + static inline uint64_t kvmppc_get_clockfreq(void) { return 0; diff --git a/target-ppc/translate.c b/target-ppc/translate.c index c07bb01a7a..d03daeaa48 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -1128,9 +1128,19 @@ static void gen_mulhwu(DisasContext *ctx) /* mullw mullw. */ static void gen_mullw(DisasContext *ctx) { - tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - tcg_gen_ext32s_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)]); +#if defined(TARGET_PPC64) + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); + tcg_temp_free(t0); + tcg_temp_free(t1); +#else + tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); +#endif if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -1144,7 +1154,11 @@ static void gen_mullwo(DisasContext *ctx) tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_muls2_i32(t0, t1, t0, t1); - tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); +#if defined(TARGET_PPC64) + tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); +#else + tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); +#endif tcg_gen_sari_i32(t0, t0, 31); tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1); @@ -1201,8 +1215,20 @@ static void gen_mulld(DisasContext *ctx) /* mulldo mulldo. */ static void gen_mulldo(DisasContext *ctx) { - gen_helper_mulldo(cpu_gpr[rD(ctx->opcode)], cpu_env, - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + + tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0); + + tcg_gen_sari_i64(t0, t0, 63); + tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1); + tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -1616,18 +1642,17 @@ static void gen_rlwimi(DisasContext *ctx) mb = MB(ctx->opcode); me = ME(ctx->opcode); sh = SH(ctx->opcode); - if (likely(sh == 0 && mb == 0 && me == 31)) { - tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); + if (likely(sh == (31-me) && mb <= me)) { + tcg_gen_deposit_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)], sh, me - mb + 1); } else { target_ulong mask; TCGv t1; TCGv t0 = tcg_temp_new(); #if defined(TARGET_PPC64) - TCGv_i32 t2 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(t2, cpu_gpr[rS(ctx->opcode)]); - tcg_gen_rotli_i32(t2, t2, sh); - tcg_gen_extu_i32_i64(t0, t2); - tcg_temp_free_i32(t2); + tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)], 32, 32); + tcg_gen_rotli_i64(t0, t0, sh); #else tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); #endif @@ -1672,14 +1697,18 @@ static void gen_rlwinm(DisasContext *ctx) tcg_gen_shri_tl(t0, t0, mb); tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free(t0); + } else if (likely(mb == 0 && me == 31)) { + TCGv_i32 t0 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t0, cpu_gpr[rS(ctx->opcode)]); + tcg_gen_rotli_i32(t0, t0, sh); + tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t0); + tcg_temp_free_i32(t0); } else { TCGv t0 = tcg_temp_new(); #if defined(TARGET_PPC64) - TCGv_i32 t1 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]); - tcg_gen_rotli_i32(t1, t1, sh); - tcg_gen_extu_i32_i64(t0, t1); - tcg_temp_free_i32(t1); + tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)], 32, 32); + tcg_gen_rotli_i64(t0, t0, sh); #else tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); #endif @@ -1698,37 +1727,49 @@ static void gen_rlwinm(DisasContext *ctx) static void gen_rlwnm(DisasContext *ctx) { uint32_t mb, me; - TCGv t0; + mb = MB(ctx->opcode); + me = ME(ctx->opcode); + + if (likely(mb == 0 && me == 31)) { + TCGv_i32 t0, t1; + t0 = tcg_temp_new_i32(); + t1 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_trunc_tl_i32(t1, cpu_gpr[rS(ctx->opcode)]); + tcg_gen_andi_i32(t0, t0, 0x1f); + tcg_gen_rotl_i32(t1, t1, t0); + tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t1); + tcg_temp_free_i32(t0); + tcg_temp_free_i32(t1); + } else { + TCGv t0; #if defined(TARGET_PPC64) - TCGv_i32 t1, t2; + TCGv t1; #endif - mb = MB(ctx->opcode); - me = ME(ctx->opcode); - t0 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f); + t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f); #if defined(TARGET_PPC64) - t1 = tcg_temp_new_i32(); - t2 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]); - tcg_gen_trunc_i64_i32(t2, t0); - tcg_gen_rotl_i32(t1, t1, t2); - tcg_gen_extu_i32_i64(t0, t1); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); + t1 = tcg_temp_new_i64(); + tcg_gen_deposit_i64(t1, cpu_gpr[rS(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)], 32, 32); + tcg_gen_rotl_i64(t0, t1, t0); + tcg_temp_free_i64(t1); #else - tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); + tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); #endif - if (unlikely(mb != 0 || me != 31)) { + if (unlikely(mb != 0 || me != 31)) { #if defined(TARGET_PPC64) - mb += 32; - me += 32; + mb += 32; + me += 32; #endif - tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me)); - } else { - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); + tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me)); + } else { + tcg_gen_andi_tl(t0, t0, MASK(32, 63)); + tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); + } + tcg_temp_free(t0); } - tcg_temp_free(t0); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } @@ -1924,7 +1965,7 @@ static void gen_srawi(DisasContext *ctx) TCGv dst = cpu_gpr[rA(ctx->opcode)]; TCGv src = cpu_gpr[rS(ctx->opcode)]; if (sh == 0) { - tcg_gen_mov_tl(dst, src); + tcg_gen_ext32s_tl(dst, src); tcg_gen_movi_tl(cpu_ca, 0); } else { TCGv t0; |