diff options
49 files changed, 4853 insertions, 4668 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index d36aa44661..cd8d6b140f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1131,7 +1131,7 @@ F: docs/system/arm/emcraft-sf2.rst ASPEED BMCs M: Cédric Le Goater <clg@kaod.org> M: Peter Maydell <peter.maydell@linaro.org> -R: Andrew Jeffery <andrew@aj.id.au> +R: Andrew Jeffery <andrew@codeconstruct.com.au> R: Joel Stanley <joel@jms.id.au> L: qemu-arm@nongnu.org S: Maintained diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 3f7eafe08c..e39a810a4e 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -90,8 +90,6 @@ bool kvm_kernel_irqchip; bool kvm_split_irqchip; bool kvm_async_interrupts_allowed; bool kvm_halt_in_kernel_allowed; -bool kvm_eventfds_allowed; -bool kvm_irqfds_allowed; bool kvm_resamplefds_allowed; bool kvm_msi_via_irqfd_allowed; bool kvm_gsi_routing_allowed; @@ -99,8 +97,6 @@ bool kvm_gsi_direct_mapping; bool kvm_allowed; bool kvm_readonly_mem_allowed; bool kvm_vm_attributes_allowed; -bool kvm_direct_msi_allowed; -bool kvm_ioeventfd_any_length_allowed; bool kvm_msi_use_devid; bool kvm_has_guest_debug; static int kvm_sstep_flags; @@ -111,6 +107,9 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = { KVM_CAP_INFO(USER_MEMORY), KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS), + KVM_CAP_INFO(INTERNAL_ERROR_DATA), + KVM_CAP_INFO(IOEVENTFD), + KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH), KVM_CAP_LAST_INFO }; @@ -1106,13 +1105,6 @@ static void kvm_coalesce_pio_del(MemoryListener *listener, } } -static MemoryListener kvm_coalesced_pio_listener = { - .name = "kvm-coalesced-pio", - .coalesced_io_add = kvm_coalesce_pio_add, - .coalesced_io_del = kvm_coalesce_pio_del, - .priority = MEMORY_LISTENER_PRIORITY_MIN, -}; - int kvm_check_extension(KVMState *s, unsigned int extension) { int ret; @@ -1254,43 +1246,6 @@ static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val, } -static int kvm_check_many_ioeventfds(void) -{ - /* Userspace can use ioeventfd for io notification. This requires a host - * that supports eventfd(2) and an I/O thread; since eventfd does not - * support SIGIO it cannot interrupt the vcpu. - * - * Older kernels have a 6 device limit on the KVM io bus. Find out so we - * can avoid creating too many ioeventfds. - */ -#if defined(CONFIG_EVENTFD) - int ioeventfds[7]; - int i, ret = 0; - for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) { - ioeventfds[i] = eventfd(0, EFD_CLOEXEC); - if (ioeventfds[i] < 0) { - break; - } - ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true); - if (ret < 0) { - close(ioeventfds[i]); - break; - } - } - - /* Decide whether many devices are supported or not */ - ret = i == ARRAY_SIZE(ioeventfds); - - while (i-- > 0) { - kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true); - close(ioeventfds[i]); - } - return ret; -#else - return 0; -#endif -} - static const KVMCapabilityInfo * kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) { @@ -1806,6 +1761,8 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, static MemoryListener kvm_io_listener = { .name = "kvm-io", + .coalesced_io_add = kvm_coalesce_pio_add, + .coalesced_io_del = kvm_coalesce_pio_del, .eventfd_add = kvm_io_ioeventfd_add, .eventfd_del = kvm_io_ioeventfd_del, .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND, @@ -1847,7 +1804,7 @@ static void clear_gsi(KVMState *s, unsigned int gsi) void kvm_init_irq_routing(KVMState *s) { - int gsi_count, i; + int gsi_count; gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; if (gsi_count > 0) { @@ -1859,12 +1816,6 @@ void kvm_init_irq_routing(KVMState *s) s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); s->nr_allocated_irq_routes = 0; - if (!kvm_direct_msi_allowed) { - for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) { - QTAILQ_INIT(&s->msi_hashtab[i]); - } - } - kvm_arch_init_irq_routing(s); } @@ -1984,41 +1935,10 @@ void kvm_irqchip_change_notify(void) notifier_list_notify(&kvm_irqchip_change_notifiers, NULL); } -static unsigned int kvm_hash_msi(uint32_t data) -{ - /* This is optimized for IA32 MSI layout. However, no other arch shall - * repeat the mistake of not providing a direct MSI injection API. */ - return data & 0xff; -} - -static void kvm_flush_dynamic_msi_routes(KVMState *s) -{ - KVMMSIRoute *route, *next; - unsigned int hash; - - for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) { - QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) { - kvm_irqchip_release_virq(s, route->kroute.gsi); - QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry); - g_free(route); - } - } -} - static int kvm_irqchip_get_virq(KVMState *s) { int next_virq; - /* - * PIC and IOAPIC share the first 16 GSI numbers, thus the available - * GSI numbers are more than the number of IRQ route. Allocating a GSI - * number can succeed even though a new route entry cannot be added. - * When this happens, flush dynamic MSI entries to free IRQ route entries. - */ - if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) { - kvm_flush_dynamic_msi_routes(s); - } - /* Return the lowest unused GSI in the bitmap */ next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); if (next_virq >= s->gsi_count) { @@ -2028,63 +1948,17 @@ static int kvm_irqchip_get_virq(KVMState *s) } } -static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg) -{ - unsigned int hash = kvm_hash_msi(msg.data); - KVMMSIRoute *route; - - QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) { - if (route->kroute.u.msi.address_lo == (uint32_t)msg.address && - route->kroute.u.msi.address_hi == (msg.address >> 32) && - route->kroute.u.msi.data == le32_to_cpu(msg.data)) { - return route; - } - } - return NULL; -} - int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) { struct kvm_msi msi; - KVMMSIRoute *route; - - if (kvm_direct_msi_allowed) { - msi.address_lo = (uint32_t)msg.address; - msi.address_hi = msg.address >> 32; - msi.data = le32_to_cpu(msg.data); - msi.flags = 0; - memset(msi.pad, 0, sizeof(msi.pad)); - return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); - } - - route = kvm_lookup_msi_route(s, msg); - if (!route) { - int virq; - - virq = kvm_irqchip_get_virq(s); - if (virq < 0) { - return virq; - } + msi.address_lo = (uint32_t)msg.address; + msi.address_hi = msg.address >> 32; + msi.data = le32_to_cpu(msg.data); + msi.flags = 0; + memset(msi.pad, 0, sizeof(msi.pad)); - route = g_new0(KVMMSIRoute, 1); - route->kroute.gsi = virq; - route->kroute.type = KVM_IRQ_ROUTING_MSI; - route->kroute.flags = 0; - route->kroute.u.msi.address_lo = (uint32_t)msg.address; - route->kroute.u.msi.address_hi = msg.address >> 32; - route->kroute.u.msi.data = le32_to_cpu(msg.data); - - kvm_add_routing_entry(s, &route->kroute); - kvm_irqchip_commit_routes(s); - - QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route, - entry); - } - - assert(route->kroute.type == KVM_IRQ_ROUTING_MSI); - - return kvm_set_irq(s, route->kroute.gsi, 1); + return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); } int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) @@ -2211,10 +2085,6 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, } } - if (!kvm_irqfds_enabled()) { - return -ENOSYS; - } - return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd); } @@ -2375,6 +2245,11 @@ static void kvm_irqchip_create(KVMState *s) return; } + if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) { + fprintf(stderr, "kvm: irqfd not implemented\n"); + exit(1); + } + /* First probe and see if there's a arch-specific hook to create the * in-kernel irqchip for us */ ret = kvm_arch_irqchip_create(s); @@ -2649,22 +2524,8 @@ static int kvm_init(MachineState *ms) #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif - - s->robust_singlestep = - kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); - -#ifdef KVM_CAP_DEBUGREGS - s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); -#endif - s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); -#ifdef KVM_CAP_IRQ_ROUTING - kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); -#endif - - s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); - s->irq_set_ioctl = KVM_IRQ_LINE; if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; @@ -2673,21 +2534,12 @@ static int kvm_init(MachineState *ms) kvm_readonly_mem_allowed = (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); - kvm_eventfds_allowed = - (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); - - kvm_irqfds_allowed = - (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); - kvm_resamplefds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); kvm_vm_attributes_allowed = (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); - kvm_ioeventfd_any_length_allowed = - (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0); - #ifdef KVM_CAP_SET_GUEST_DEBUG kvm_has_guest_debug = (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0); @@ -2724,24 +2576,16 @@ static int kvm_init(MachineState *ms) kvm_irqchip_create(s); } - if (kvm_eventfds_allowed) { - s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; - s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; - } + s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; + s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; kvm_memory_listener_register(s, &s->memory_listener, &address_space_memory, 0, "kvm-memory"); - if (kvm_eventfds_allowed) { - memory_listener_register(&kvm_io_listener, - &address_space_io); - } - memory_listener_register(&kvm_coalesced_pio_listener, + memory_listener_register(&kvm_io_listener, &address_space_io); - s->many_ioeventfds = kvm_check_many_ioeventfds(); - s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); if (!s->sync_mmu) { ret = ram_block_discard_disable(true); @@ -2794,16 +2638,14 @@ static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direc static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run) { + int i; + fprintf(stderr, "KVM internal error. Suberror: %d\n", run->internal.suberror); - if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { - int i; - - for (i = 0; i < run->internal.ndata; ++i) { - fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n", - i, (uint64_t)run->internal.data[i]); - } + for (i = 0; i < run->internal.ndata; ++i) { + fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n", + i, (uint64_t)run->internal.data[i]); } if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { fprintf(stderr, "emulation failure\n"); @@ -3297,29 +3139,11 @@ int kvm_has_vcpu_events(void) return kvm_state->vcpu_events; } -int kvm_has_robust_singlestep(void) -{ - return kvm_state->robust_singlestep; -} - -int kvm_has_debugregs(void) -{ - return kvm_state->debugregs; -} - int kvm_max_nested_state_length(void) { return kvm_state->max_nested_state_len; } -int kvm_has_many_ioeventfds(void) -{ - if (!kvm_enabled()) { - return 0; - } - return kvm_state->many_ioeventfds; -} - int kvm_has_gsi_routing(void) { #ifdef KVM_CAP_IRQ_ROUTING @@ -3329,11 +3153,6 @@ int kvm_has_gsi_routing(void) #endif } -int kvm_has_intx_set_mask(void) -{ - return kvm_state->intx_set_mask; -} - bool kvm_arm_supports_user_irq(void) { return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ); diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index 51f522e52e..1b37d9a302 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -17,17 +17,13 @@ KVMState *kvm_state; bool kvm_kernel_irqchip; bool kvm_async_interrupts_allowed; -bool kvm_eventfds_allowed; -bool kvm_irqfds_allowed; bool kvm_resamplefds_allowed; bool kvm_msi_via_irqfd_allowed; bool kvm_gsi_routing_allowed; bool kvm_gsi_direct_mapping; bool kvm_allowed; bool kvm_readonly_mem_allowed; -bool kvm_ioeventfd_any_length_allowed; bool kvm_msi_use_devid; -bool kvm_direct_msi_allowed; void kvm_flush_coalesced_mmio_buffer(void) { @@ -42,11 +38,6 @@ bool kvm_has_sync_mmu(void) return false; } -int kvm_has_many_ioeventfds(void) -{ - return 0; -} - int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) { return 1; @@ -92,11 +83,6 @@ void kvm_irqchip_change_notify(void) { } -int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) -{ - return -ENOSYS; -} - int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, EventNotifier *rn, int virq) { diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak index 454eb35499..a5d9200382 100644 --- a/configs/targets/sparc-softmmu.mak +++ b/configs/targets/sparc-softmmu.mak @@ -1,2 +1,3 @@ TARGET_ARCH=sparc TARGET_BIG_ENDIAN=y +TARGET_SUPPORTS_MTTCG=y diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak index d3f8a3b710..36ca64ec41 100644 --- a/configs/targets/sparc64-softmmu.mak +++ b/configs/targets/sparc64-softmmu.mak @@ -1,3 +1,4 @@ TARGET_ARCH=sparc64 TARGET_BASE_ARCH=sparc TARGET_BIG_ENDIAN=y +TARGET_SUPPORTS_MTTCG=y diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index f8ba67531a..cc59176563 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -40,7 +40,7 @@ struct AspeedMachineState { MachineState parent_obj; /* Public */ - AspeedSoCState soc; + AspeedSoCState *soc; MemoryRegion boot_rom; bool mmio_exec; uint32_t uart_chosen; @@ -288,7 +288,7 @@ static void write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size, static void aspeed_install_boot_rom(AspeedMachineState *bmc, BlockBackend *blk, uint64_t rom_size) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; memory_region_init_rom(&bmc->boot_rom, NULL, "aspeed.boot_rom", rom_size, &error_abort); @@ -337,7 +337,7 @@ static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo) static void connect_serial_hds_to_uarts(AspeedMachineState *bmc) { AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc); - AspeedSoCState *s = &bmc->soc; + AspeedSoCState *s = bmc->soc; AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default; @@ -358,32 +358,33 @@ static void aspeed_machine_init(MachineState *machine) int i; NICInfo *nd = &nd_table[0]; - object_initialize_child(OBJECT(machine), "soc", &bmc->soc, amc->soc_name); - - sc = ASPEED_SOC_GET_CLASS(&bmc->soc); + bmc->soc = ASPEED_SOC(object_new(amc->soc_name)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(bmc->soc)); + object_unref(OBJECT(bmc->soc)); + sc = ASPEED_SOC_GET_CLASS(bmc->soc); /* * This will error out if the RAM size is not supported by the * memory controller of the SoC. */ - object_property_set_uint(OBJECT(&bmc->soc), "ram-size", machine->ram_size, + object_property_set_uint(OBJECT(bmc->soc), "ram-size", machine->ram_size, &error_fatal); for (i = 0; i < sc->macs_num; i++) { if ((amc->macs_mask & (1 << i)) && nd->used) { qemu_check_nic_model(nd, TYPE_FTGMAC100); - qdev_set_nic_properties(DEVICE(&bmc->soc.ftgmac100[i]), nd); + qdev_set_nic_properties(DEVICE(&bmc->soc->ftgmac100[i]), nd); nd++; } } - object_property_set_int(OBJECT(&bmc->soc), "hw-strap1", amc->hw_strap1, + object_property_set_int(OBJECT(bmc->soc), "hw-strap1", amc->hw_strap1, &error_abort); - object_property_set_int(OBJECT(&bmc->soc), "hw-strap2", amc->hw_strap2, + object_property_set_int(OBJECT(bmc->soc), "hw-strap2", amc->hw_strap2, &error_abort); - object_property_set_link(OBJECT(&bmc->soc), "memory", + object_property_set_link(OBJECT(bmc->soc), "memory", OBJECT(get_system_memory()), &error_abort); - object_property_set_link(OBJECT(&bmc->soc), "dram", + object_property_set_link(OBJECT(bmc->soc), "dram", OBJECT(machine->ram), &error_abort); if (machine->kernel_filename) { /* @@ -391,17 +392,17 @@ static void aspeed_machine_init(MachineState *machine) * that runs to unlock the SCU. In this case set the default to * be unlocked as the kernel expects */ - object_property_set_int(OBJECT(&bmc->soc), "hw-prot-key", + object_property_set_int(OBJECT(bmc->soc), "hw-prot-key", ASPEED_SCU_PROT_KEY, &error_abort); } connect_serial_hds_to_uarts(bmc); - qdev_realize(DEVICE(&bmc->soc), NULL, &error_abort); + qdev_realize(DEVICE(bmc->soc), NULL, &error_abort); if (defaults_enabled()) { - aspeed_board_init_flashes(&bmc->soc.fmc, + aspeed_board_init_flashes(&bmc->soc->fmc, bmc->fmc_model ? bmc->fmc_model : amc->fmc_model, amc->num_cs, 0); - aspeed_board_init_flashes(&bmc->soc.spi[0], + aspeed_board_init_flashes(&bmc->soc->spi[0], bmc->spi_model ? bmc->spi_model : amc->spi_model, 1, amc->num_cs); } @@ -426,22 +427,22 @@ static void aspeed_machine_init(MachineState *machine) amc->i2c_init(bmc); } - for (i = 0; i < bmc->soc.sdhci.num_slots; i++) { - sdhci_attach_drive(&bmc->soc.sdhci.slots[i], + for (i = 0; i < bmc->soc->sdhci.num_slots; i++) { + sdhci_attach_drive(&bmc->soc->sdhci.slots[i], drive_get(IF_SD, 0, i)); } - if (bmc->soc.emmc.num_slots) { - sdhci_attach_drive(&bmc->soc.emmc.slots[0], - drive_get(IF_SD, 0, bmc->soc.sdhci.num_slots)); + if (bmc->soc->emmc.num_slots) { + sdhci_attach_drive(&bmc->soc->emmc.slots[0], + drive_get(IF_SD, 0, bmc->soc->sdhci.num_slots)); } if (!bmc->mmio_exec) { - DeviceState *dev = ssi_get_cs(bmc->soc.fmc.spi, 0); + DeviceState *dev = ssi_get_cs(bmc->soc->fmc.spi, 0); BlockBackend *fmc0 = dev ? m25p80_get_blk(dev) : NULL; if (fmc0) { - uint64_t rom_size = memory_region_size(&bmc->soc.spi_boot); + uint64_t rom_size = memory_region_size(&bmc->soc->spi_boot); aspeed_install_boot_rom(bmc, fmc0, rom_size); } } @@ -451,7 +452,7 @@ static void aspeed_machine_init(MachineState *machine) static void palmetto_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; DeviceState *dev; uint8_t *eeprom_buf = g_malloc0(32 * 1024); @@ -473,7 +474,7 @@ static void palmetto_bmc_i2c_init(AspeedMachineState *bmc) static void quanta_q71l_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; /* * The quanta-q71l platform expects tmp75s which are compatible with @@ -505,7 +506,7 @@ static void quanta_q71l_bmc_i2c_init(AspeedMachineState *bmc) static void ast2500_evb_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; uint8_t *eeprom_buf = g_malloc0(8 * 1024); smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 3), 0x50, @@ -518,7 +519,7 @@ static void ast2500_evb_i2c_init(AspeedMachineState *bmc) static void ast2600_evb_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; uint8_t *eeprom_buf = g_malloc0(8 * 1024); smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 7), 0x50, @@ -531,7 +532,7 @@ static void ast2600_evb_i2c_init(AspeedMachineState *bmc) static void yosemitev2_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x51, 128 * KiB); at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 128 * KiB, @@ -545,7 +546,7 @@ static void yosemitev2_bmc_i2c_init(AspeedMachineState *bmc) static void romulus_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; /* The romulus board expects Epson RX8900 I2C RTC but a ds1338 is * good enough */ @@ -554,7 +555,7 @@ static void romulus_bmc_i2c_init(AspeedMachineState *bmc) static void tiogapass_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x54, 128 * KiB); at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 6), 0x54, 128 * KiB, @@ -573,7 +574,7 @@ static void create_pca9552(AspeedSoCState *soc, int bus_id, int addr) static void sonorapass_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; /* bus 2 : */ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), "tmp105", 0x48); @@ -627,7 +628,7 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc) {14, LED_COLOR_GREEN, "front-power-3", GPIO_POLARITY_ACTIVE_LOW}, {15, LED_COLOR_GREEN, "front-id-5", GPIO_POLARITY_ACTIVE_LOW}, }; - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; uint8_t *eeprom_buf = g_malloc0(8 * 1024); DeviceState *dev; LEDState *led; @@ -672,7 +673,7 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc) static void g220a_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; DeviceState *dev; dev = DEVICE(i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3), @@ -708,7 +709,7 @@ static void g220a_bmc_i2c_init(AspeedMachineState *bmc) static void fp5280g2_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; I2CSlave *i2c_mux; /* The at24c256 */ @@ -735,7 +736,7 @@ static void fp5280g2_bmc_i2c_init(AspeedMachineState *bmc) static void rainier_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; I2CSlave *i2c_mux; at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 0), 0x51, 32 * KiB); @@ -852,7 +853,7 @@ static void get_pca9548_channels(I2CBus *bus, uint8_t mux_addr, static void fuji_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; I2CBus *i2c[144] = {}; for (int i = 0; i < 16; i++) { @@ -930,7 +931,7 @@ static void fuji_bmc_i2c_init(AspeedMachineState *bmc) static void bletchley_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; I2CBus *i2c[13] = {}; for (int i = 0; i < 13; i++) { if ((i == 8) || (i == 11)) { @@ -976,7 +977,7 @@ static void bletchley_bmc_i2c_init(AspeedMachineState *bmc) static void fby35_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; I2CBus *i2c[16]; for (int i = 0; i < 16; i++) { @@ -1008,14 +1009,14 @@ static void fby35_i2c_init(AspeedMachineState *bmc) static void qcom_dc_scm_bmc_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 15), "tmp105", 0x4d); } static void qcom_dc_scm_firework_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; I2CSlave *therm_mux, *cpuvr_mux; /* Create the generic DC-SCM hardware */ @@ -1477,7 +1478,7 @@ static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data) static void fby35_reset(MachineState *state, ShutdownCause reason) { AspeedMachineState *bmc = ASPEED_MACHINE(state); - AspeedGPIOState *gpio = &bmc->soc.gpio; + AspeedGPIOState *gpio = &bmc->soc->gpio; qemu_devices_reset(reason); @@ -1528,24 +1529,26 @@ static void aspeed_minibmc_machine_init(MachineState *machine) sysclk = clock_new(OBJECT(machine), "SYSCLK"); clock_set_hz(sysclk, SYSCLK_FRQ); - object_initialize_child(OBJECT(machine), "soc", &bmc->soc, amc->soc_name); - qdev_connect_clock_in(DEVICE(&bmc->soc), "sysclk", sysclk); + bmc->soc = ASPEED_SOC(object_new(amc->soc_name)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(bmc->soc)); + object_unref(OBJECT(bmc->soc)); + qdev_connect_clock_in(DEVICE(bmc->soc), "sysclk", sysclk); - object_property_set_link(OBJECT(&bmc->soc), "memory", + object_property_set_link(OBJECT(bmc->soc), "memory", OBJECT(get_system_memory()), &error_abort); connect_serial_hds_to_uarts(bmc); - qdev_realize(DEVICE(&bmc->soc), NULL, &error_abort); + qdev_realize(DEVICE(bmc->soc), NULL, &error_abort); - aspeed_board_init_flashes(&bmc->soc.fmc, + aspeed_board_init_flashes(&bmc->soc->fmc, bmc->fmc_model ? bmc->fmc_model : amc->fmc_model, amc->num_cs, 0); - aspeed_board_init_flashes(&bmc->soc.spi[0], + aspeed_board_init_flashes(&bmc->soc->spi[0], bmc->spi_model ? bmc->spi_model : amc->spi_model, amc->num_cs, amc->num_cs); - aspeed_board_init_flashes(&bmc->soc.spi[1], + aspeed_board_init_flashes(&bmc->soc->spi[1], bmc->spi_model ? bmc->spi_model : amc->spi_model, amc->num_cs, (amc->num_cs * 2)); @@ -1561,7 +1564,7 @@ static void aspeed_minibmc_machine_init(MachineState *machine) static void ast1030_evb_i2c_init(AspeedMachineState *bmc) { - AspeedSoCState *soc = &bmc->soc; + AspeedSoCState *soc = bmc->soc; /* U10 24C08 connects to SDA/SCL Group 1 by default */ uint8_t *eeprom_buf = g_malloc0(32 * 1024); diff --git a/hw/arm/aspeed_ast10x0.c b/hw/arm/aspeed_ast10x0.c index 649b3b13c1..8becb146a8 100644 --- a/hw/arm/aspeed_ast10x0.c +++ b/hw/arm/aspeed_ast10x0.c @@ -101,13 +101,15 @@ static const int aspeed_soc_ast1030_irqmap[] = { static qemu_irq aspeed_soc_ast1030_get_irq(AspeedSoCState *s, int dev) { + Aspeed10x0SoCState *a = ASPEED10X0_SOC(s); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - return qdev_get_gpio_in(DEVICE(&s->armv7m), sc->irqmap[dev]); + return qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[dev]); } static void aspeed_soc_ast1030_init(Object *obj) { + Aspeed10x0SoCState *a = ASPEED10X0_SOC(obj); AspeedSoCState *s = ASPEED_SOC(obj); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); char socname[8]; @@ -118,7 +120,7 @@ static void aspeed_soc_ast1030_init(Object *obj) g_assert_not_reached(); } - object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M); + object_initialize_child(obj, "armv7m", &a->armv7m, TYPE_ARMV7M); s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); @@ -185,6 +187,7 @@ static void aspeed_soc_ast1030_init(Object *obj) static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) { + Aspeed10x0SoCState *a = ASPEED10X0_SOC(dev_soc); AspeedSoCState *s = ASPEED_SOC(dev_soc); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); DeviceState *armv7m; @@ -206,17 +209,17 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) 0x40000); /* AST1030 CPU Core */ - armv7m = DEVICE(&s->armv7m); + armv7m = DEVICE(&a->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 256); qdev_prop_set_string(armv7m, "cpu-type", sc->cpu_type); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); - object_property_set_link(OBJECT(&s->armv7m), "memory", + object_property_set_link(OBJECT(&a->armv7m), "memory", OBJECT(s->memory), &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&a->armv7m), &error_abort); /* Internal SRAM */ sram_name = g_strdup_printf("aspeed.sram.%d", - CPU(s->armv7m.cpu)->cpu_index); + CPU(a->armv7m.cpu)->cpu_index); memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); if (err != NULL) { error_propagate(errp, err); @@ -249,7 +252,7 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) } aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { - qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->armv7m), + qemu_irq irq = qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[ASPEED_DEV_I2C] + i); /* The AST1030 I2C controller has one IRQ per bus. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq); @@ -261,7 +264,7 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) } aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]); for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) { - qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->armv7m), + qemu_irq irq = qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[ASPEED_DEV_I3C] + i); /* The AST1030 I3C controller has one IRQ per bus. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq); @@ -290,19 +293,19 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) * On the AST1030 LPC subdevice IRQs are connected straight to the GIC. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1, - qdev_get_gpio_in(DEVICE(&s->armv7m), + qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_1)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2, - qdev_get_gpio_in(DEVICE(&s->armv7m), + qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_2)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3, - qdev_get_gpio_in(DEVICE(&s->armv7m), + qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_3)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4, - qdev_get_gpio_in(DEVICE(&s->armv7m), + qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_4)); /* UART */ @@ -435,18 +438,18 @@ static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data) sc->get_irq = aspeed_soc_ast1030_get_irq; } -static const TypeInfo aspeed_soc_ast1030_type_info = { - .name = "ast1030-a1", - .parent = TYPE_ASPEED_SOC, - .instance_size = sizeof(AspeedSoCState), - .instance_init = aspeed_soc_ast1030_init, - .class_init = aspeed_soc_ast1030_class_init, - .class_size = sizeof(AspeedSoCClass), +static const TypeInfo aspeed_soc_ast10x0_types[] = { + { + .name = TYPE_ASPEED10X0_SOC, + .parent = TYPE_ASPEED_SOC, + .instance_size = sizeof(Aspeed10x0SoCState), + .abstract = true, + }, { + .name = "ast1030-a1", + .parent = TYPE_ASPEED10X0_SOC, + .instance_init = aspeed_soc_ast1030_init, + .class_init = aspeed_soc_ast1030_class_init, + }, }; -static void aspeed_soc_register_types(void) -{ - type_register_static(&aspeed_soc_ast1030_type_info); -} - -type_init(aspeed_soc_register_types) +DEFINE_TYPES(aspeed_soc_ast10x0_types) diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_ast2400.c index bf22258de9..a4334c81b8 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_ast2400.c @@ -135,13 +135,15 @@ static const int aspeed_soc_ast2400_irqmap[] = { static qemu_irq aspeed_soc_ast2400_get_irq(AspeedSoCState *s, int dev) { + Aspeed2400SoCState *a = ASPEED2400_SOC(s); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - return qdev_get_gpio_in(DEVICE(&s->vic), sc->irqmap[dev]); + return qdev_get_gpio_in(DEVICE(&a->vic), sc->irqmap[dev]); } -static void aspeed_soc_init(Object *obj) +static void aspeed_ast2400_soc_init(Object *obj) { + Aspeed2400SoCState *a = ASPEED2400_SOC(obj); AspeedSoCState *s = ASPEED_SOC(obj); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); int i; @@ -153,7 +155,7 @@ static void aspeed_soc_init(Object *obj) } for (i = 0; i < sc->num_cpus; i++) { - object_initialize_child(obj, "cpu[*]", &s->cpu[i], sc->cpu_type); + object_initialize_child(obj, "cpu[*]", &a->cpu[i], sc->cpu_type); } snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); @@ -167,7 +169,7 @@ static void aspeed_soc_init(Object *obj) object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu), "hw-prot-key"); - object_initialize_child(obj, "vic", &s->vic, TYPE_ASPEED_VIC); + object_initialize_child(obj, "vic", &a->vic, TYPE_ASPEED_VIC); object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC); @@ -239,9 +241,10 @@ static void aspeed_soc_init(Object *obj) object_initialize_child(obj, "video", &s->video, TYPE_UNIMPLEMENTED_DEVICE); } -static void aspeed_soc_realize(DeviceState *dev, Error **errp) +static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) { int i; + Aspeed2400SoCState *a = ASPEED2400_SOC(dev); AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); Error *err = NULL; @@ -264,15 +267,15 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) /* CPU */ for (i = 0; i < sc->num_cpus; i++) { - object_property_set_link(OBJECT(&s->cpu[i]), "memory", + object_property_set_link(OBJECT(&a->cpu[i]), "memory", OBJECT(s->memory), &error_abort); - if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { + if (!qdev_realize(DEVICE(&a->cpu[i]), NULL, errp)) { return; } } /* SRAM */ - sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&s->cpu[0])->cpu_index); + sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index); memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); if (err) { error_propagate(errp, err); @@ -288,14 +291,14 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); /* VIC */ - if (!sysbus_realize(SYS_BUS_DEVICE(&s->vic), errp)) { + if (!sysbus_realize(SYS_BUS_DEVICE(&a->vic), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->vic), 0, sc->memmap[ASPEED_DEV_VIC]); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0, - qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ)); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1, - qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ)); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->vic), 0, sc->memmap[ASPEED_DEV_VIC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&a->vic), 0, + qdev_get_gpio_in(DEVICE(&a->cpu), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&a->vic), 1, + qdev_get_gpio_in(DEVICE(&a->cpu), ARM_CPU_FIQ)); /* RTC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { @@ -497,36 +500,15 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); } -static Property aspeed_soc_properties[] = { - DEFINE_PROP_LINK("memory", AspeedSoCState, memory, TYPE_MEMORY_REGION, - MemoryRegion *), - DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION, - MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), -}; -static void aspeed_soc_class_init(ObjectClass *oc, void *data) +static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) { + AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); - dc->realize = aspeed_soc_realize; + dc->realize = aspeed_ast2400_soc_realize; /* Reason: Uses serial_hds and nd_table in realize() directly */ dc->user_creatable = false; - device_class_set_props(dc, aspeed_soc_properties); -} - -static const TypeInfo aspeed_soc_type_info = { - .name = TYPE_ASPEED_SOC, - .parent = TYPE_DEVICE, - .instance_size = sizeof(AspeedSoCState), - .class_size = sizeof(AspeedSoCClass), - .class_init = aspeed_soc_class_init, - .abstract = true, -}; - -static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) -{ - AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); sc->name = "ast2400-a1"; sc->cpu_type = ARM_CPU_TYPE_NAME("arm926"); @@ -543,17 +525,14 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) sc->get_irq = aspeed_soc_ast2400_get_irq; } -static const TypeInfo aspeed_soc_ast2400_type_info = { - .name = "ast2400-a1", - .parent = TYPE_ASPEED_SOC, - .instance_init = aspeed_soc_init, - .instance_size = sizeof(AspeedSoCState), - .class_init = aspeed_soc_ast2400_class_init, -}; - static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) { AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = aspeed_ast2400_soc_realize; + /* Reason: Uses serial_hds and nd_table in realize() directly */ + dc->user_creatable = false; sc->name = "ast2500-a1"; sc->cpu_type = ARM_CPU_TYPE_NAME("arm1176"); @@ -570,114 +549,22 @@ static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) sc->get_irq = aspeed_soc_ast2400_get_irq; } -static const TypeInfo aspeed_soc_ast2500_type_info = { - .name = "ast2500-a1", - .parent = TYPE_ASPEED_SOC, - .instance_init = aspeed_soc_init, - .instance_size = sizeof(AspeedSoCState), - .class_init = aspeed_soc_ast2500_class_init, -}; -static void aspeed_soc_register_types(void) -{ - type_register_static(&aspeed_soc_type_info); - type_register_static(&aspeed_soc_ast2400_type_info); - type_register_static(&aspeed_soc_ast2500_type_info); +static const TypeInfo aspeed_soc_ast2400_types[] = { + { + .name = TYPE_ASPEED2400_SOC, + .parent = TYPE_ASPEED_SOC, + .instance_init = aspeed_ast2400_soc_init, + .instance_size = sizeof(Aspeed2400SoCState), + .abstract = true, + }, { + .name = "ast2400-a1", + .parent = TYPE_ASPEED2400_SOC, + .class_init = aspeed_soc_ast2400_class_init, + }, { + .name = "ast2500-a1", + .parent = TYPE_ASPEED2400_SOC, + .class_init = aspeed_soc_ast2500_class_init, + }, }; -type_init(aspeed_soc_register_types); - -qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev) -{ - return ASPEED_SOC_GET_CLASS(s)->get_irq(s, dev); -} - -bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp) -{ - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - SerialMM *smm; - - for (int i = 0, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) { - smm = &s->uart[i]; - - /* Chardev property is set by the machine. */ - qdev_prop_set_uint8(DEVICE(smm), "regshift", 2); - qdev_prop_set_uint32(DEVICE(smm), "baudbase", 38400); - qdev_set_legacy_instance_id(DEVICE(smm), sc->memmap[uart], 2); - qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN); - if (!sysbus_realize(SYS_BUS_DEVICE(smm), errp)) { - return false; - } - - sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, aspeed_soc_get_irq(s, uart)); - aspeed_mmio_map(s, SYS_BUS_DEVICE(smm), 0, sc->memmap[uart]); - } - - return true; -} - -void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr) -{ - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - int i = dev - ASPEED_DEV_UART1; - - g_assert(0 <= i && i < ARRAY_SIZE(s->uart) && i < sc->uarts_num); - qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr); -} - -/* - * SDMC should be realized first to get correct RAM size and max size - * values - */ -bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp) -{ - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - ram_addr_t ram_size, max_ram_size; - - ram_size = object_property_get_uint(OBJECT(&s->sdmc), "ram-size", - &error_abort); - max_ram_size = object_property_get_uint(OBJECT(&s->sdmc), "max-ram-size", - &error_abort); - - memory_region_init(&s->dram_container, OBJECT(s), "ram-container", - max_ram_size); - memory_region_add_subregion(&s->dram_container, 0, s->dram_mr); - - /* - * Add a memory region beyond the RAM region to let firmwares scan - * the address space with load/store and guess how much RAM the - * SoC has. - */ - if (ram_size < max_ram_size) { - DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE); - - qdev_prop_set_string(dev, "name", "ram-empty"); - qdev_prop_set_uint64(dev, "size", max_ram_size - ram_size); - if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp)) { - return false; - } - - memory_region_add_subregion_overlap(&s->dram_container, ram_size, - sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0), -1000); - } - - memory_region_add_subregion(s->memory, - sc->memmap[ASPEED_DEV_SDRAM], &s->dram_container); - return true; -} - -void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr) -{ - memory_region_add_subregion(s->memory, addr, - sysbus_mmio_get_region(dev, n)); -} - -void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev, - const char *name, hwaddr addr, uint64_t size) -{ - qdev_prop_set_string(DEVICE(dev), "name", name); - qdev_prop_set_uint64(DEVICE(dev), "size", size); - sysbus_realize(dev, &error_abort); - - memory_region_add_subregion_overlap(s->memory, addr, - sysbus_mmio_get_region(dev, 0), -1000); -} +DEFINE_TYPES(aspeed_soc_ast2400_types) diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index e122e1c32d..b965fbab5e 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -137,13 +137,15 @@ static const int aspeed_soc_ast2600_irqmap[] = { static qemu_irq aspeed_soc_ast2600_get_irq(AspeedSoCState *s, int dev) { + Aspeed2600SoCState *a = ASPEED2600_SOC(s); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - return qdev_get_gpio_in(DEVICE(&s->a7mpcore), sc->irqmap[dev]); + return qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[dev]); } static void aspeed_soc_ast2600_init(Object *obj) { + Aspeed2600SoCState *a = ASPEED2600_SOC(obj); AspeedSoCState *s = ASPEED_SOC(obj); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); int i; @@ -155,7 +157,7 @@ static void aspeed_soc_ast2600_init(Object *obj) } for (i = 0; i < sc->num_cpus; i++) { - object_initialize_child(obj, "cpu[*]", &s->cpu[i], sc->cpu_type); + object_initialize_child(obj, "cpu[*]", &a->cpu[i], sc->cpu_type); } snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); @@ -169,7 +171,7 @@ static void aspeed_soc_ast2600_init(Object *obj) object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu), "hw-prot-key"); - object_initialize_child(obj, "a7mpcore", &s->a7mpcore, + object_initialize_child(obj, "a7mpcore", &a->a7mpcore, TYPE_A15MPCORE_PRIV); object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC); @@ -277,6 +279,7 @@ static uint64_t aspeed_calc_affinity(int cpu) static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) { int i; + Aspeed2600SoCState *a = ASPEED2600_SOC(dev); AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); Error *err = NULL; @@ -306,39 +309,39 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) /* CPU */ for (i = 0; i < sc->num_cpus; i++) { if (sc->num_cpus > 1) { - object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar", + object_property_set_int(OBJECT(&a->cpu[i]), "reset-cbar", ASPEED_A7MPCORE_ADDR, &error_abort); } - object_property_set_int(OBJECT(&s->cpu[i]), "mp-affinity", + object_property_set_int(OBJECT(&a->cpu[i]), "mp-affinity", aspeed_calc_affinity(i), &error_abort); - object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 1125000000, + object_property_set_int(OBJECT(&a->cpu[i]), "cntfrq", 1125000000, &error_abort); - object_property_set_bool(OBJECT(&s->cpu[i]), "neon", false, + object_property_set_bool(OBJECT(&a->cpu[i]), "neon", false, &error_abort); - object_property_set_bool(OBJECT(&s->cpu[i]), "vfp-d32", false, + object_property_set_bool(OBJECT(&a->cpu[i]), "vfp-d32", false, &error_abort); - object_property_set_link(OBJECT(&s->cpu[i]), "memory", + object_property_set_link(OBJECT(&a->cpu[i]), "memory", OBJECT(s->memory), &error_abort); - if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { + if (!qdev_realize(DEVICE(&a->cpu[i]), NULL, errp)) { return; } } /* A7MPCORE */ - object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", sc->num_cpus, + object_property_set_int(OBJECT(&a->a7mpcore), "num-cpu", sc->num_cpus, &error_abort); - object_property_set_int(OBJECT(&s->a7mpcore), "num-irq", + object_property_set_int(OBJECT(&a->a7mpcore), "num-irq", ROUND_UP(AST2600_MAX_IRQ + GIC_INTERNAL, 32), &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->a7mpcore), 0, ASPEED_A7MPCORE_ADDR); + sysbus_realize(SYS_BUS_DEVICE(&a->a7mpcore), &error_abort); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->a7mpcore), 0, ASPEED_A7MPCORE_ADDR); for (i = 0; i < sc->num_cpus; i++) { - SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore); - DeviceState *d = DEVICE(&s->cpu[i]); + SysBusDevice *sbd = SYS_BUS_DEVICE(&a->a7mpcore); + DeviceState *d = DEVICE(&a->cpu[i]); irq = qdev_get_gpio_in(d, ARM_CPU_IRQ); sysbus_connect_irq(sbd, i, irq); @@ -351,7 +354,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) } /* SRAM */ - sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&s->cpu[0])->cpu_index); + sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index); memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); if (err) { error_propagate(errp, err); @@ -413,7 +416,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) } aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), + irq = qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[ASPEED_DEV_I2C] + i); /* The AST2600 I2C controller has one IRQ per bus. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq); @@ -579,19 +582,19 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) * offset 0. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), + qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_1)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), + qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_2)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), + qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_3)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), + qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_4)); /* HACE */ @@ -611,7 +614,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) } aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]); for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) { - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), + irq = qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[ASPEED_DEV_I3C] + i); /* The AST2600 I3C controller has one IRQ per bus. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq); @@ -646,18 +649,18 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data) sc->get_irq = aspeed_soc_ast2600_get_irq; } -static const TypeInfo aspeed_soc_ast2600_type_info = { - .name = "ast2600-a3", - .parent = TYPE_ASPEED_SOC, - .instance_size = sizeof(AspeedSoCState), - .instance_init = aspeed_soc_ast2600_init, - .class_init = aspeed_soc_ast2600_class_init, - .class_size = sizeof(AspeedSoCClass), +static const TypeInfo aspeed_soc_ast2600_types[] = { + { + .name = TYPE_ASPEED2600_SOC, + .parent = TYPE_ASPEED_SOC, + .instance_size = sizeof(Aspeed2600SoCState), + .abstract = true, + }, { + .name = "ast2600-a3", + .parent = TYPE_ASPEED2600_SOC, + .instance_init = aspeed_soc_ast2600_init, + .class_init = aspeed_soc_ast2600_class_init, + }, }; -static void aspeed_soc_register_types(void) -{ - type_register_static(&aspeed_soc_ast2600_type_info); -}; - -type_init(aspeed_soc_register_types) +DEFINE_TYPES(aspeed_soc_ast2600_types) diff --git a/hw/arm/aspeed_soc_common.c b/hw/arm/aspeed_soc_common.c new file mode 100644 index 0000000000..828f61093b --- /dev/null +++ b/hw/arm/aspeed_soc_common.c @@ -0,0 +1,154 @@ +/* + * ASPEED SoC family + * + * Andrew Jeffery <andrew@aj.id.au> + * Jeremy Kerr <jk@ozlabs.org> + * + * Copyright 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/misc/unimp.h" +#include "hw/arm/aspeed_soc.h" +#include "hw/char/serial.h" + + +qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev) +{ + return ASPEED_SOC_GET_CLASS(s)->get_irq(s, dev); +} + +bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + SerialMM *smm; + + for (int i = 0, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) { + smm = &s->uart[i]; + + /* Chardev property is set by the machine. */ + qdev_prop_set_uint8(DEVICE(smm), "regshift", 2); + qdev_prop_set_uint32(DEVICE(smm), "baudbase", 38400); + qdev_set_legacy_instance_id(DEVICE(smm), sc->memmap[uart], 2); + qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN); + if (!sysbus_realize(SYS_BUS_DEVICE(smm), errp)) { + return false; + } + + sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, aspeed_soc_get_irq(s, uart)); + aspeed_mmio_map(s, SYS_BUS_DEVICE(smm), 0, sc->memmap[uart]); + } + + return true; +} + +void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int i = dev - ASPEED_DEV_UART1; + + g_assert(0 <= i && i < ARRAY_SIZE(s->uart) && i < sc->uarts_num); + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr); +} + +/* + * SDMC should be realized first to get correct RAM size and max size + * values + */ +bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + ram_addr_t ram_size, max_ram_size; + + ram_size = object_property_get_uint(OBJECT(&s->sdmc), "ram-size", + &error_abort); + max_ram_size = object_property_get_uint(OBJECT(&s->sdmc), "max-ram-size", + &error_abort); + + memory_region_init(&s->dram_container, OBJECT(s), "ram-container", + max_ram_size); + memory_region_add_subregion(&s->dram_container, 0, s->dram_mr); + + /* + * Add a memory region beyond the RAM region to let firmwares scan + * the address space with load/store and guess how much RAM the + * SoC has. + */ + if (ram_size < max_ram_size) { + DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE); + + qdev_prop_set_string(dev, "name", "ram-empty"); + qdev_prop_set_uint64(dev, "size", max_ram_size - ram_size); + if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp)) { + return false; + } + + memory_region_add_subregion_overlap(&s->dram_container, ram_size, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0), -1000); + } + + memory_region_add_subregion(s->memory, + sc->memmap[ASPEED_DEV_SDRAM], &s->dram_container); + return true; +} + +void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr) +{ + memory_region_add_subregion(s->memory, addr, + sysbus_mmio_get_region(dev, n)); +} + +void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev, + const char *name, hwaddr addr, uint64_t size) +{ + qdev_prop_set_string(DEVICE(dev), "name", name); + qdev_prop_set_uint64(DEVICE(dev), "size", size); + sysbus_realize(dev, &error_abort); + + memory_region_add_subregion_overlap(s->memory, addr, + sysbus_mmio_get_region(dev, 0), -1000); +} + +static void aspeed_soc_realize(DeviceState *dev, Error **errp) +{ + AspeedSoCState *s = ASPEED_SOC(dev); + + if (!s->memory) { + error_setg(errp, "'memory' link is not set"); + return; + } +} + +static Property aspeed_soc_properties[] = { + DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_LINK("memory", AspeedSoCState, memory, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_soc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = aspeed_soc_realize; + device_class_set_props(dc, aspeed_soc_properties); +} + +static const TypeInfo aspeed_soc_types[] = { + { + .name = TYPE_ASPEED_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(AspeedSoCState), + .class_size = sizeof(AspeedSoCClass), + .class_init = aspeed_soc_class_init, + .abstract = true, + }, +}; + +DEFINE_TYPES(aspeed_soc_types) diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c index f2ff6c1abf..c9964bd283 100644 --- a/hw/arm/fby35.c +++ b/hw/arm/fby35.c @@ -27,8 +27,8 @@ struct Fby35State { MemoryRegion bic_memory; Clock *bic_sysclk; - AspeedSoCState bmc; - AspeedSoCState bic; + Aspeed2600SoCState bmc; + Aspeed10x0SoCState bic; bool mmio_exec; }; @@ -70,7 +70,10 @@ static void fby35_bmc_write_boot_rom(DriveInfo *dinfo, MemoryRegion *mr, static void fby35_bmc_init(Fby35State *s) { + AspeedSoCState *soc; + object_initialize_child(OBJECT(s), "bmc", &s->bmc, "ast2600-a3"); + soc = ASPEED_SOC(&s->bmc); memory_region_init(&s->bmc_memory, OBJECT(&s->bmc), "bmc-memory", UINT64_MAX); @@ -87,22 +90,21 @@ static void fby35_bmc_init(Fby35State *s) &error_abort); object_property_set_int(OBJECT(&s->bmc), "hw-strap2", 0x00000003, &error_abort); - aspeed_soc_uart_set_chr(&s->bmc, ASPEED_DEV_UART5, serial_hd(0)); + aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART5, serial_hd(0)); qdev_realize(DEVICE(&s->bmc), NULL, &error_abort); - aspeed_board_init_flashes(&s->bmc.fmc, "n25q00", 2, 0); + aspeed_board_init_flashes(&soc->fmc, "n25q00", 2, 0); /* Install first FMC flash content as a boot rom. */ if (!s->mmio_exec) { DriveInfo *mtd0 = drive_get(IF_MTD, 0, 0); if (mtd0) { - AspeedSoCState *bmc = &s->bmc; - uint64_t rom_size = memory_region_size(&bmc->spi_boot); + uint64_t rom_size = memory_region_size(&soc->spi_boot); memory_region_init_rom(&s->bmc_boot_rom, NULL, "aspeed.boot_rom", rom_size, &error_abort); - memory_region_add_subregion_overlap(&bmc->spi_boot_container, 0, + memory_region_add_subregion_overlap(&soc->spi_boot_container, 0, &s->bmc_boot_rom, 1); fby35_bmc_write_boot_rom(mtd0, &s->bmc_boot_rom, @@ -114,10 +116,13 @@ static void fby35_bmc_init(Fby35State *s) static void fby35_bic_init(Fby35State *s) { + AspeedSoCState *soc; + s->bic_sysclk = clock_new(OBJECT(s), "SYSCLK"); clock_set_hz(s->bic_sysclk, 200000000ULL); object_initialize_child(OBJECT(s), "bic", &s->bic, "ast1030-a1"); + soc = ASPEED_SOC(&s->bic); memory_region_init(&s->bic_memory, OBJECT(&s->bic), "bic-memory", UINT64_MAX); @@ -125,12 +130,12 @@ static void fby35_bic_init(Fby35State *s) qdev_connect_clock_in(DEVICE(&s->bic), "sysclk", s->bic_sysclk); object_property_set_link(OBJECT(&s->bic), "memory", OBJECT(&s->bic_memory), &error_abort); - aspeed_soc_uart_set_chr(&s->bic, ASPEED_DEV_UART5, serial_hd(1)); + aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART5, serial_hd(1)); qdev_realize(DEVICE(&s->bic), NULL, &error_abort); - aspeed_board_init_flashes(&s->bic.fmc, "sst25vf032b", 2, 2); - aspeed_board_init_flashes(&s->bic.spi[0], "sst25vf032b", 2, 4); - aspeed_board_init_flashes(&s->bic.spi[1], "sst25vf032b", 2, 6); + aspeed_board_init_flashes(&soc->fmc, "sst25vf032b", 2, 2); + aspeed_board_init_flashes(&soc->spi[0], "sst25vf032b", 2, 4); + aspeed_board_init_flashes(&soc->spi[1], "sst25vf032b", 2, 6); } static void fby35_init(MachineState *machine) diff --git a/hw/arm/meson.build b/hw/arm/meson.build index a6feaf1af9..68245d3ad1 100644 --- a/hw/arm/meson.build +++ b/hw/arm/meson.build @@ -48,8 +48,9 @@ arm_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c' arm_ss.add(when: 'CONFIG_FSL_IMX31', if_true: files('fsl-imx31.c', 'kzm.c')) arm_ss.add(when: 'CONFIG_FSL_IMX6', if_true: files('fsl-imx6.c')) arm_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( - 'aspeed_soc.c', 'aspeed.c', + 'aspeed_soc_common.c', + 'aspeed_ast2400.c', 'aspeed_ast2600.c', 'aspeed_ast10x0.c', 'aspeed_eeprom.c', diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c index f25977d3f6..e756b0aa43 100644 --- a/hw/i386/kvm/clock.c +++ b/hw/i386/kvm/clock.c @@ -333,10 +333,6 @@ void kvmclock_create(bool create_always) X86CPU *cpu = X86_CPU(first_cpu); assert(kvm_enabled()); - if (!kvm_has_adjust_clock()) { - return; - } - if (create_always || cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) | (1ULL << KVM_FEATURE_CLOCKSOURCE2))) { diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c index a649b2b7ca..e49b9c4b56 100644 --- a/hw/i386/kvm/i8254.c +++ b/hw/i386/kvm/i8254.c @@ -97,24 +97,12 @@ static void kvm_pit_get(PITCommonState *pit) return; } - if (kvm_has_pit_state2()) { - ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT2, &kpit); - if (ret < 0) { - fprintf(stderr, "KVM_GET_PIT2 failed: %s\n", strerror(-ret)); - abort(); - } - pit->channels[0].irq_disabled = kpit.flags & KVM_PIT_FLAGS_HPET_LEGACY; - } else { - /* - * kvm_pit_state2 is superset of kvm_pit_state struct, - * so we can use it for KVM_GET_PIT as well. - */ - ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT, &kpit); - if (ret < 0) { - fprintf(stderr, "KVM_GET_PIT failed: %s\n", strerror(-ret)); - abort(); - } + ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT2, &kpit); + if (ret < 0) { + fprintf(stderr, "KVM_GET_PIT2 failed: %s\n", strerror(-ret)); + abort(); } + pit->channels[0].irq_disabled = kpit.flags & KVM_PIT_FLAGS_HPET_LEGACY; for (i = 0; i < 3; i++) { kchan = &kpit.channels[i]; sc = &pit->channels[i]; @@ -170,12 +158,9 @@ static void kvm_pit_put(PITCommonState *pit) kchan->count_load_time = sc->count_load_time - s->kernel_clock_offset; } - ret = kvm_vm_ioctl(kvm_state, - kvm_has_pit_state2() ? KVM_SET_PIT2 : KVM_SET_PIT, - &kpit); + ret = kvm_vm_ioctl(kvm_state, KVM_SET_PIT2, &kpit); if (ret < 0) { - fprintf(stderr, "%s failed: %s\n", - kvm_has_pit_state2() ? "KVM_SET_PIT2" : "KVM_SET_PIT", + fprintf(stderr, "KVM_SET_PIT2 failed: %s\n", strerror(-ret)); abort(); } @@ -261,11 +246,12 @@ static void kvm_pit_realizefn(DeviceState *dev, Error **errp) }; int ret; - if (kvm_check_extension(kvm_state, KVM_CAP_PIT2)) { - ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT2, &config); - } else { - ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT); + if (!kvm_check_extension(kvm_state, KVM_CAP_PIT_STATE2) || + !kvm_check_extension(kvm_state, KVM_CAP_PIT2)) { + error_setg(errp, "In-kernel PIT not available"); } + + ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT2, &config); if (ret < 0) { error_setg(errp, "Create kernel PIC irqchip failed: %s", strerror(-ret)); diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 11fed78d17..6031234a73 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -1214,12 +1214,8 @@ void pc_basic_device_init(struct PCMachineState *pcms, /* * Check if an HPET shall be created. - * - * Without KVM_CAP_PIT_STATE2, we cannot switch off the in-kernel PIT - * when the HPET wants to take over. Thus we have to disable the latter. */ - if (pcms->hpet_enabled && (!kvm_irqchip_in_kernel() || - kvm_has_pit_state2())) { + if (pcms->hpet_enabled) { qemu_irq rtc_irq; hpet = qdev_try_new(TYPE_HPET); diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index abaf77057e..fddd6d490c 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -163,8 +163,7 @@ type_init(gicv3_its_common_register_types) const char *its_class_name(void) { if (kvm_irqchip_in_kernel()) { - /* KVM implementation requires this capability */ - return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL; + return "arm-its-kvm"; } else { /* Software emulation based model */ return "arm-gicv3-its"; diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index 61c1cc7bdb..f7df602cff 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -123,7 +123,7 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp) kvm_msi_use_devid = true; kvm_gsi_direct_mapping = false; - kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); + kvm_msi_via_irqfd_allowed = true; } /** diff --git a/hw/misc/pci-testdev.c b/hw/misc/pci-testdev.c index 49303134e4..acedd0f82b 100644 --- a/hw/misc/pci-testdev.c +++ b/hw/misc/pci-testdev.c @@ -245,7 +245,6 @@ static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp) uint8_t *pci_conf; char *name; int r, i; - bool fastmmio = kvm_ioeventfd_any_length_enabled(); pci_conf = pci_dev->config; @@ -279,7 +278,7 @@ static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp) g_free(name); test->hdr->offset = cpu_to_le32(IOTEST_SIZE(i) + i * IOTEST_ACCESS_WIDTH); test->match_data = strcmp(IOTEST_TEST(i), "wildcard-eventfd"); - if (fastmmio && IOTEST_IS_MEM(i) && !test->match_data) { + if (IOTEST_IS_MEM(i) && !test->match_data) { test->size = 0; } else { test->size = IOTEST_ACCESS_WIDTH; diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 17c548b84f..80453718a3 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -768,10 +768,6 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) sch->cssid, sch->ssid, sch->schid, sch->devno, ccw_dev->devno.valid ? "user-configured" : "auto-configured"); - if (kvm_enabled() && !kvm_eventfds_enabled()) { - dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; - } - /* fd-based ioevents can't be synchronized in record/replay */ if (replay_mode != REPLAY_MODE_NONE) { dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index b8a7b5542d..7b42ae8aae 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -264,11 +264,6 @@ struct scrub_regions { int fd_idx; }; -static bool ioeventfd_enabled(void) -{ - return !kvm_enabled() || kvm_eventfds_enabled(); -} - static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) { struct vhost_user *u = dev->opaque; @@ -1318,7 +1313,7 @@ static int vhost_set_vring_file(struct vhost_dev *dev, .hdr.size = sizeof(msg.payload.u64), }; - if (ioeventfd_enabled() && file->fd > 0) { + if (file->fd > 0) { fds[fd_num++] = file->fd; } else { msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index c2c6d85475..22f15e1e02 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -761,10 +761,6 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp) qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL); sysbus_init_irq(sbd, &proxy->irq); - if (!kvm_eventfds_enabled()) { - proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD; - } - /* fd-based ioevents can't be synchronized in record/replay */ if (replay_mode != REPLAY_MODE_NONE) { proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD; diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index af1f4bc187..205dbf24fb 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -332,7 +332,6 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, VirtQueue *vq = virtio_get_queue(vdev, n); bool legacy = virtio_pci_legacy(proxy); bool modern = virtio_pci_modern(proxy); - bool fast_mmio = kvm_ioeventfd_any_length_enabled(); bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; MemoryRegion *modern_mr = &proxy->notify.mr; MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; @@ -343,13 +342,8 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, if (assign) { if (modern) { - if (fast_mmio) { - memory_region_add_eventfd(modern_mr, modern_addr, 0, - false, n, notifier); - } else { - memory_region_add_eventfd(modern_mr, modern_addr, 2, - false, n, notifier); - } + memory_region_add_eventfd(modern_mr, modern_addr, 0, + false, n, notifier); if (modern_pio) { memory_region_add_eventfd(modern_notify_mr, 0, 2, true, n, notifier); @@ -361,13 +355,8 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, } } else { if (modern) { - if (fast_mmio) { - memory_region_del_eventfd(modern_mr, modern_addr, 0, - false, n, notifier); - } else { - memory_region_del_eventfd(modern_mr, modern_addr, 2, - false, n, notifier); - } + memory_region_del_eventfd(modern_mr, modern_addr, 0, + false, n, notifier); if (modern_pio) { memory_region_del_eventfd(modern_notify_mr, 0, 2, true, n, notifier); @@ -2114,10 +2103,6 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && !pci_bus_is_root(pci_get_bus(pci_dev)); - if (kvm_enabled() && !kvm_has_many_ioeventfds()) { - proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; - } - /* fd-based ioevents can't be synchronized in record/replay */ if (replay_mode != REPLAY_MODE_NONE) { proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index 8adff70072..cb832bc1ee 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -47,20 +47,14 @@ #define ASPEED_JTAG_NUM 2 struct AspeedSoCState { - /*< private >*/ DeviceState parent; - /*< public >*/ - ARMCPU cpu[ASPEED_CPUS_NUM]; - A15MPPrivState a7mpcore; - ARMv7MState armv7m; MemoryRegion *memory; MemoryRegion *dram_mr; MemoryRegion dram_container; MemoryRegion sram; MemoryRegion spi_boot_container; MemoryRegion spi_boot; - AspeedVICState vic; AspeedRtcState rtc; AspeedTimerCtrlState timerctrl; AspeedI2CState i2c; @@ -101,6 +95,35 @@ struct AspeedSoCState { #define TYPE_ASPEED_SOC "aspeed-soc" OBJECT_DECLARE_TYPE(AspeedSoCState, AspeedSoCClass, ASPEED_SOC) +struct Aspeed2400SoCState { + AspeedSoCState parent; + + ARMCPU cpu[ASPEED_CPUS_NUM]; + AspeedVICState vic; +}; + +#define TYPE_ASPEED2400_SOC "aspeed2400-soc" +OBJECT_DECLARE_SIMPLE_TYPE(Aspeed2400SoCState, ASPEED2400_SOC) + +struct Aspeed2600SoCState { + AspeedSoCState parent; + + A15MPPrivState a7mpcore; + ARMCPU cpu[ASPEED_CPUS_NUM]; /* XXX belong to a7mpcore */ +}; + +#define TYPE_ASPEED2600_SOC "aspeed2600-soc" +OBJECT_DECLARE_SIMPLE_TYPE(Aspeed2600SoCState, ASPEED2600_SOC) + +struct Aspeed10x0SoCState { + AspeedSoCState parent; + + ARMv7MState armv7m; +}; + +#define TYPE_ASPEED10X0_SOC "aspeed10x0-soc" +OBJECT_DECLARE_SIMPLE_TYPE(Aspeed10x0SoCState, ASPEED10X0_SOC) + struct AspeedSoCClass { DeviceClass parent_class; diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 97a8a4f201..80b69d88f6 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -36,15 +36,11 @@ extern bool kvm_kernel_irqchip; extern bool kvm_split_irqchip; extern bool kvm_async_interrupts_allowed; extern bool kvm_halt_in_kernel_allowed; -extern bool kvm_eventfds_allowed; -extern bool kvm_irqfds_allowed; extern bool kvm_resamplefds_allowed; extern bool kvm_msi_via_irqfd_allowed; extern bool kvm_gsi_routing_allowed; extern bool kvm_gsi_direct_mapping; extern bool kvm_readonly_mem_allowed; -extern bool kvm_direct_msi_allowed; -extern bool kvm_ioeventfd_any_length_allowed; extern bool kvm_msi_use_devid; #define kvm_enabled() (kvm_allowed) @@ -89,22 +85,15 @@ extern bool kvm_msi_use_devid; #define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed) /** - * kvm_eventfds_enabled: - * - * Returns: true if we can use eventfds to receive notifications - * from a KVM CPU (ie the kernel supports eventds and we are running - * with a configuration where it is meaningful to use them). - */ -#define kvm_eventfds_enabled() (kvm_eventfds_allowed) - -/** * kvm_irqfds_enabled: * * Returns: true if we can use irqfds to inject interrupts into * a KVM CPU (ie the kernel supports irqfds and we are running * with a configuration where it is meaningful to use them). + * + * Always available if running with in-kernel irqchip. */ -#define kvm_irqfds_enabled() (kvm_irqfds_allowed) +#define kvm_irqfds_enabled() kvm_irqchip_in_kernel() /** * kvm_resamplefds_enabled: @@ -148,19 +137,6 @@ extern bool kvm_msi_use_devid; #define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed) /** - * kvm_direct_msi_enabled: - * - * Returns: true if KVM allows direct MSI injection. - */ -#define kvm_direct_msi_enabled() (kvm_direct_msi_allowed) - -/** - * kvm_ioeventfd_any_length_enabled: - * Returns: true if KVM allows any length io eventfd. - */ -#define kvm_ioeventfd_any_length_enabled() (kvm_ioeventfd_any_length_allowed) - -/** * kvm_msi_devid_required: * Returns: true if KVM requires a device id to be provided while * defining an MSI routing entry. @@ -174,15 +150,12 @@ extern bool kvm_msi_use_devid; #define kvm_irqchip_is_split() (false) #define kvm_async_interrupts_enabled() (false) #define kvm_halt_in_kernel() (false) -#define kvm_eventfds_enabled() (false) #define kvm_irqfds_enabled() (false) #define kvm_resamplefds_enabled() (false) #define kvm_msi_via_irqfd_enabled() (false) #define kvm_gsi_routing_allowed() (false) #define kvm_gsi_direct_mapping() (false) #define kvm_readonly_mem_enabled() (false) -#define kvm_direct_msi_enabled() (false) -#define kvm_ioeventfd_any_length_enabled() (false) #define kvm_msi_devid_required() (false) #endif /* CONFIG_KVM_IS_POSSIBLE */ @@ -219,12 +192,8 @@ unsigned int kvm_get_max_memslots(void); unsigned int kvm_get_free_memslots(void); bool kvm_has_sync_mmu(void); int kvm_has_vcpu_events(void); -int kvm_has_robust_singlestep(void); -int kvm_has_debugregs(void); int kvm_max_nested_state_length(void); -int kvm_has_many_ioeventfds(void); int kvm_has_gsi_routing(void); -int kvm_has_intx_set_mask(void); /** * kvm_arm_supports_user_irq diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index 075939a3c4..fd846394be 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -78,14 +78,10 @@ struct KVMState struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; bool coalesced_flush_in_progress; int vcpu_events; - int robust_singlestep; - int debugregs; #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints; #endif int max_nested_state_len; - int many_ioeventfds; - int intx_set_mask; int kvm_shadow_mem; bool kernel_irqchip_allowed; bool kernel_irqchip_required; @@ -103,7 +99,6 @@ struct KVMState int nr_allocated_irq_routes; unsigned long *used_gsi_bitmap; unsigned int gsi_count; - QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; #endif KVMMemoryListener memory_listener; QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; diff --git a/linux-user/sparc/target_syscall.h b/linux-user/sparc/target_syscall.h index be77e44eb8..e421165357 100644 --- a/linux-user/sparc/target_syscall.h +++ b/linux-user/sparc/target_syscall.h @@ -50,11 +50,7 @@ static inline abi_ulong target_shmlba(CPUSPARCState *env) #ifdef TARGET_SPARC64 return MAX(TARGET_PAGE_SIZE, 16 * 1024); #else - if (!(env->def.features & CPU_FEATURE_FLUSH)) { - return 64 * 1024; - } else { - return 256 * 1024; - } + return 256 * 1024; #endif } diff --git a/system/memory.c b/system/memory.c index a800fbc9e5..4928f2525d 100644 --- a/system/memory.c +++ b/system/memory.c @@ -1535,7 +1535,12 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, adjust_endianness(mr, &data, op); - if ((!kvm_eventfds_enabled()) && + /* + * FIXME: it's not clear why under KVM the write would be processed + * directly, instead of going through eventfd. This probably should + * test "tcg_enabled() || qtest_enabled()", or should just go away. + */ + if (!kvm_enabled() && memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { return MEMTX_OK; } @@ -2550,8 +2555,6 @@ void memory_region_clear_flush_coalesced(MemoryRegion *mr) } } -static bool userspace_eventfd_warning; - void memory_region_add_eventfd(MemoryRegion *mr, hwaddr addr, unsigned size, @@ -2568,13 +2571,6 @@ void memory_region_add_eventfd(MemoryRegion *mr, }; unsigned i; - if (kvm_enabled() && (!(kvm_eventfds_enabled() || - userspace_eventfd_warning))) { - userspace_eventfd_warning = true; - error_report("Using eventfd without MMIO binding in KVM. " - "Suboptimal performance expected"); - } - if (size) { adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); } diff --git a/target/i386/cpu.c b/target/i386/cpu.c index bdca901dfa..fc8484cb5e 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -714,7 +714,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_RDSEED | \ - CPUID_7_0_EBX_KERNEL_FEATURES) + CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_KERNEL_FEATURES) /* missing: CPUID_7_0_EBX_HLE CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM */ @@ -7377,7 +7377,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) return; } - if (env->features[FEAT_1_EDX] & CPUID_PSE36) { + if (env->features[FEAT_1_EDX] & (CPUID_PSE36 | CPUID_PAE)) { cpu->phys_bits = 36; } else { cpu->phys_bits = 32; diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index e7c054cc16..770e81d56e 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -91,6 +91,15 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_INFO(SET_TSS_ADDR), KVM_CAP_INFO(EXT_CPUID), KVM_CAP_INFO(MP_STATE), + KVM_CAP_INFO(SIGNAL_MSI), + KVM_CAP_INFO(IRQ_ROUTING), + KVM_CAP_INFO(DEBUGREGS), + KVM_CAP_INFO(XSAVE), + KVM_CAP_INFO(VCPU_EVENTS), + KVM_CAP_INFO(X86_ROBUST_SINGLESTEP), + KVM_CAP_INFO(MCE), + KVM_CAP_INFO(ADJUST_CLOCK), + KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR), KVM_CAP_LAST_INFO }; @@ -134,10 +143,8 @@ static uint32_t has_architectural_pmu_version; static uint32_t num_architectural_pmu_gp_counters; static uint32_t num_architectural_pmu_fixed_counters; -static int has_xsave; static int has_xsave2; static int has_xcrs; -static int has_pit_state2; static int has_sregs2; static int has_exception_payload; static int has_triple_fault_event; @@ -154,11 +161,6 @@ static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES]; static RateLimit bus_lock_ratelimit_ctrl; static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value); -bool kvm_has_pit_state2(void) -{ - return !!has_pit_state2; -} - bool kvm_has_smm(void) { return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM); @@ -171,11 +173,6 @@ bool kvm_has_adjust_clock_stable(void) return (ret & KVM_CLOCK_TSC_STABLE); } -bool kvm_has_adjust_clock(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); -} - bool kvm_has_exception_payload(void) { return has_exception_payload; @@ -577,14 +574,8 @@ uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, int *max_banks) { - int r; - - r = kvm_check_extension(s, KVM_CAP_MCE); - if (r > 0) { - *max_banks = r; - return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); - } - return -ENOSYS; + *max_banks = kvm_check_extension(s, KVM_CAP_MCE); + return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); } static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) @@ -687,15 +678,6 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false); } -static void kvm_reset_exception(CPUX86State *env) -{ - env->exception_nr = -1; - env->exception_pending = 0; - env->exception_injected = 0; - env->exception_has_payload = false; - env->exception_payload = 0; -} - static void kvm_queue_exception(CPUX86State *env, int32_t exception_nr, uint8_t exception_has_payload, @@ -728,38 +710,6 @@ static void kvm_queue_exception(CPUX86State *env, } } -static int kvm_inject_mce_oldstyle(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - - if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) { - unsigned int bank, bank_num = env->mcg_cap & 0xff; - struct kvm_x86_mce mce; - - kvm_reset_exception(env); - - /* - * There must be at least one bank in use if an MCE is pending. - * Find it and use its values for the event injection. - */ - for (bank = 0; bank < bank_num; bank++) { - if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) { - break; - } - } - assert(bank < bank_num); - - mce.bank = bank; - mce.status = env->mce_banks[bank * 4 + 1]; - mce.mcg_status = env->mcg_status; - mce.addr = env->mce_banks[bank * 4 + 2]; - mce.misc = env->mce_banks[bank * 4 + 3]; - - return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce); - } - return 0; -} - static void cpu_update_state(void *opaque, bool running, RunState state) { CPUX86State *env = opaque; @@ -1711,10 +1661,8 @@ static void kvm_init_xsave(CPUX86State *env) { if (has_xsave2) { env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); - } else if (has_xsave) { - env->xsave_buf_len = sizeof(struct kvm_xsave); } else { - return; + env->xsave_buf_len = sizeof(struct kvm_xsave); } env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); @@ -2154,8 +2102,7 @@ int kvm_arch_init_vcpu(CPUState *cs) if (((env->cpuid_version >> 8)&0xF) >= 6 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == - (CPUID_MCE | CPUID_MCA) - && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) { + (CPUID_MCE | CPUID_MCA)) { uint64_t mcg_cap, unsupported_caps; int banks; int ret; @@ -2589,14 +2536,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) return ret; } - if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { - error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM"); - return -ENOTSUP; - } - - has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE); has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); - has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0; hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); @@ -2654,20 +2594,13 @@ int kvm_arch_init(MachineState *ms, KVMState *s) * In order to use vm86 mode, an EPT identity map and a TSS are needed. * Since these must be part of guest physical memory, we need to allocate * them, both by setting their start addresses in the kernel and by - * creating a corresponding e820 entry. We need 4 pages before the BIOS. - * - * Older KVM versions may not support setting the identity map base. In - * that case we need to stick with the default, i.e. a 256K maximum BIOS - * size. + * creating a corresponding e820 entry. We need 4 pages before the BIOS, + * so this value allows up to 16M BIOSes. */ - if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) { - /* Allows up to 16M BIOSes. */ - identity_base = 0xfeffc000; - - ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); - if (ret < 0) { - return ret; - } + identity_base = 0xfeffc000; + ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); + if (ret < 0) { + return ret; } /* Set TSS base one page after EPT identity map. */ @@ -2879,40 +2812,11 @@ static int kvm_getput_regs(X86CPU *cpu, int set) return ret; } -static int kvm_put_fpu(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - struct kvm_fpu fpu; - int i; - - memset(&fpu, 0, sizeof fpu); - fpu.fsw = env->fpus & ~(7 << 11); - fpu.fsw |= (env->fpstt & 7) << 11; - fpu.fcw = env->fpuc; - fpu.last_opcode = env->fpop; - fpu.last_ip = env->fpip; - fpu.last_dp = env->fpdp; - for (i = 0; i < 8; ++i) { - fpu.ftwx |= (!env->fptags[i]) << i; - } - memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); - for (i = 0; i < CPU_NB_REGS; i++) { - stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0)); - stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1)); - } - fpu.mxcsr = env->mxcsr; - - return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu); -} - static int kvm_put_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; void *xsave = env->xsave_buf; - if (!has_xsave) { - return kvm_put_fpu(cpu); - } x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len); return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); @@ -3657,46 +3561,12 @@ static int kvm_put_msrs(X86CPU *cpu, int level) } -static int kvm_get_fpu(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - struct kvm_fpu fpu; - int i, ret; - - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu); - if (ret < 0) { - return ret; - } - - env->fpstt = (fpu.fsw >> 11) & 7; - env->fpus = fpu.fsw; - env->fpuc = fpu.fcw; - env->fpop = fpu.last_opcode; - env->fpip = fpu.last_ip; - env->fpdp = fpu.last_dp; - for (i = 0; i < 8; ++i) { - env->fptags[i] = !((fpu.ftwx >> i) & 1); - } - memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); - for (i = 0; i < CPU_NB_REGS; i++) { - env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]); - env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]); - } - env->mxcsr = fpu.mxcsr; - - return 0; -} - static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; void *xsave = env->xsave_buf; int type, ret; - if (!has_xsave) { - return kvm_get_fpu(cpu); - } - type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); if (ret < 0) { @@ -4427,10 +4297,6 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) CPUX86State *env = &cpu->env; struct kvm_vcpu_events events = {}; - if (!kvm_has_vcpu_events()) { - return 0; - } - events.flags = 0; if (has_exception_payload) { @@ -4498,10 +4364,6 @@ static int kvm_get_vcpu_events(X86CPU *cpu) struct kvm_vcpu_events events; int ret; - if (!kvm_has_vcpu_events()) { - return 0; - } - memset(&events, 0, sizeof(events)); ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); if (ret < 0) { @@ -4567,47 +4429,12 @@ static int kvm_get_vcpu_events(X86CPU *cpu) return 0; } -static int kvm_guest_debug_workarounds(X86CPU *cpu) -{ - CPUState *cs = CPU(cpu); - CPUX86State *env = &cpu->env; - int ret = 0; - unsigned long reinject_trap = 0; - - if (!kvm_has_vcpu_events()) { - if (env->exception_nr == EXCP01_DB) { - reinject_trap = KVM_GUESTDBG_INJECT_DB; - } else if (env->exception_injected == EXCP03_INT3) { - reinject_trap = KVM_GUESTDBG_INJECT_BP; - } - kvm_reset_exception(env); - } - - /* - * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF - * injected via SET_GUEST_DEBUG while updating GP regs. Work around this - * by updating the debug state once again if single-stepping is on. - * Another reason to call kvm_update_guest_debug here is a pending debug - * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to - * reinject them via SET_GUEST_DEBUG. - */ - if (reinject_trap || - (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) { - ret = kvm_update_guest_debug(cs, reinject_trap); - } - return ret; -} - static int kvm_put_debugregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_debugregs dbgregs; int i; - if (!kvm_has_debugregs()) { - return 0; - } - memset(&dbgregs, 0, sizeof(dbgregs)); for (i = 0; i < 4; i++) { dbgregs.db[i] = env->dr[i]; @@ -4625,10 +4452,6 @@ static int kvm_get_debugregs(X86CPU *cpu) struct kvm_debugregs dbgregs; int i, ret; - if (!kvm_has_debugregs()) { - return 0; - } - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs); if (ret < 0) { return ret; @@ -4778,11 +4601,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (ret < 0) { return ret; } - /* must be before kvm_put_msrs */ - ret = kvm_inject_mce_oldstyle(x86_cpu); - if (ret < 0) { - return ret; - } ret = kvm_put_msrs(x86_cpu, level); if (ret < 0) { return ret; @@ -4806,11 +4624,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (ret < 0) { return ret; } - /* must be last */ - ret = kvm_guest_debug_workarounds(x86_cpu); - if (ret < 0) { - return ret; - } return 0; } diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 55d4e68c34..30fedcffea 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -33,7 +33,6 @@ bool kvm_has_smm(void); bool kvm_enable_x2apic(void); bool kvm_hv_vpindex_settable(void); -bool kvm_has_pit_state2(void); bool kvm_enable_sgx_provisioning(KVMState *s); bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); @@ -50,7 +49,6 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); #ifdef CONFIG_KVM -bool kvm_has_adjust_clock(void); bool kvm_has_adjust_clock_stable(void); bool kvm_has_exception_payload(void); void kvm_synchronize_all_tsc(void); diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index 33908c0691..6a465a35fd 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -2527,6 +2527,134 @@ SSE_HELPER_FMAP(helper_fma4ps, ZMM_S, 2 << SHIFT, float32_muladd) SSE_HELPER_FMAP(helper_fma4pd, ZMM_D, 1 << SHIFT, float64_muladd) #endif +#if SHIFT == 1 +#define SSE_HELPER_SHA1RNDS4(name, F, K) \ + void name(Reg *d, Reg *a, Reg *b) \ + { \ + uint32_t A, B, C, D, E, t, i; \ + \ + A = a->L(3); \ + B = a->L(2); \ + C = a->L(1); \ + D = a->L(0); \ + E = 0; \ + \ + for (i = 0; i <= 3; i++) { \ + t = F(B, C, D) + rol32(A, 5) + b->L(3 - i) + E + K; \ + E = D; \ + D = C; \ + C = rol32(B, 30); \ + B = A; \ + A = t; \ + } \ + \ + d->L(3) = A; \ + d->L(2) = B; \ + d->L(1) = C; \ + d->L(0) = D; \ + } + +#define SHA1_F0(b, c, d) (((b) & (c)) ^ (~(b) & (d))) +#define SHA1_F1(b, c, d) ((b) ^ (c) ^ (d)) +#define SHA1_F2(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d))) + +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f0, SHA1_F0, 0x5A827999) +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f1, SHA1_F1, 0x6ED9EBA1) +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f2, SHA1_F2, 0x8F1BBCDC) +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f3, SHA1_F1, 0xCA62C1D6) + +void helper_sha1nexte(Reg *d, Reg *a, Reg *b) +{ + d->L(3) = b->L(3) + rol32(a->L(3), 30); + d->L(2) = b->L(2); + d->L(1) = b->L(1); + d->L(0) = b->L(0); +} + +void helper_sha1msg1(Reg *d, Reg *a, Reg *b) +{ + /* These could be overwritten by the first two assignments, save them. */ + uint32_t b3 = b->L(3); + uint32_t b2 = b->L(2); + + d->L(3) = a->L(3) ^ a->L(1); + d->L(2) = a->L(2) ^ a->L(0); + d->L(1) = a->L(1) ^ b3; + d->L(0) = a->L(0) ^ b2; +} + +void helper_sha1msg2(Reg *d, Reg *a, Reg *b) +{ + d->L(3) = rol32(a->L(3) ^ b->L(2), 1); + d->L(2) = rol32(a->L(2) ^ b->L(1), 1); + d->L(1) = rol32(a->L(1) ^ b->L(0), 1); + d->L(0) = rol32(a->L(0) ^ d->L(3), 1); +} + +#define SHA256_CH(e, f, g) (((e) & (f)) ^ (~(e) & (g))) +#define SHA256_MAJ(a, b, c) (((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c))) + +#define SHA256_RNDS0(w) (ror32((w), 2) ^ ror32((w), 13) ^ ror32((w), 22)) +#define SHA256_RNDS1(w) (ror32((w), 6) ^ ror32((w), 11) ^ ror32((w), 25)) +#define SHA256_MSGS0(w) (ror32((w), 7) ^ ror32((w), 18) ^ ((w) >> 3)) +#define SHA256_MSGS1(w) (ror32((w), 17) ^ ror32((w), 19) ^ ((w) >> 10)) + +void helper_sha256rnds2(Reg *d, Reg *a, Reg *b, uint32_t wk0, uint32_t wk1) +{ + uint32_t t, AA, EE; + + uint32_t A = b->L(3); + uint32_t B = b->L(2); + uint32_t C = a->L(3); + uint32_t D = a->L(2); + uint32_t E = b->L(1); + uint32_t F = b->L(0); + uint32_t G = a->L(1); + uint32_t H = a->L(0); + + /* Even round */ + t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk0 + H; + AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); + EE = t + D; + + /* These will be B and F at the end of the odd round */ + d->L(2) = AA; + d->L(0) = EE; + + D = C, C = B, B = A, A = AA; + H = G, G = F, F = E, E = EE; + + /* Odd round */ + t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk1 + H; + AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); + EE = t + D; + + d->L(3) = AA; + d->L(1) = EE; +} + +void helper_sha256msg1(Reg *d, Reg *a, Reg *b) +{ + /* b->L(0) could be overwritten by the first assignment, save it. */ + uint32_t b0 = b->L(0); + + d->L(0) = a->L(0) + SHA256_MSGS0(a->L(1)); + d->L(1) = a->L(1) + SHA256_MSGS0(a->L(2)); + d->L(2) = a->L(2) + SHA256_MSGS0(a->L(3)); + d->L(3) = a->L(3) + SHA256_MSGS0(b0); +} + +void helper_sha256msg2(Reg *d, Reg *a, Reg *b) +{ + /* Earlier assignments cannot overwrite any of the two operands. */ + d->L(0) = a->L(0) + SHA256_MSGS1(b->L(2)); + d->L(1) = a->L(1) + SHA256_MSGS1(b->L(3)); + /* Yes, this reuses the previously computed values. */ + d->L(2) = a->L(2) + SHA256_MSGS1(d->L(0)); + d->L(3) = a->L(3) + SHA256_MSGS1(d->L(1)); +} +#endif + #undef SSE_HELPER_S #undef LANE_WIDTH diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index 7d76f15275..2bdbb1bba0 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -23,7 +23,11 @@ * The decoder is mostly based on tables copied from the Intel SDM. As * a result, most operand load and writeback is done entirely in common * table-driven code using the same operand type (X86_TYPE_*) and - * size (X86_SIZE_*) codes used in the manual. + * size (X86_SIZE_*) codes used in the manual. There are a few differences + * though. + * + * Vector operands + * --------------- * * The main difference is that the V, U and W types are extended to * cover MMX as well; if an instruction is like @@ -43,6 +47,50 @@ * There are a couple cases in which instructions (e.g. MOVD) write the * whole XMM or MM register but are established incorrectly in the manual * as "d" or "q". These have to be fixed for the decoder to work correctly. + * + * VEX exception classes + * --------------------- + * + * Speaking about imprecisions in the manual, the decoder treats all + * exception-class 4 instructions as having an optional VEX prefix, and + * all exception-class 6 instructions as having a mandatory VEX prefix. + * This is true except for a dozen instructions; these are in exception + * class 4 but do not ignore the VEX.W bit (which does not even exist + * without a VEX prefix). These instructions are mostly listed in Intel's + * table 2-16, but with a few exceptions. + * + * The AMD manual has more precise subclasses for exceptions, and unlike Intel + * they list the VEX.W requirements in the exception classes as well (except + * when they don't). AMD describes class 6 as "AVX Mixed Memory Argument" + * without defining what a mixed memory argument is, but still use 4 as the + * primary exception class... except when they don't. + * + * The summary is: + * Intel AMD VEX.W note + * ------------------------------------------------------------------- + * vpblendd 4 4J 0 + * vpblendvb 4 4E-X 0 (*) + * vpbroadcastq 6 6D 0 (+) + * vpermd/vpermps 4 4H 0 (§) + * vpermq/vpermpd 4 4H-1 1 (§) + * vpermilpd/vpermilps 4 6E 0 (^) + * vpmaskmovd 6 4K significant (^) + * vpsllv 4 4K significant + * vpsrav 4 4J 0 + * vpsrlv 4 4K significant + * vtestps/vtestpd 4 4G 0 + * + * (*) AMD lists VPBLENDVB as related to SSE4.1 PBLENDVB, which may + * explain why it is considered exception class 4. However, + * Intel says that VEX-only instructions should be in class 6... + * + * (+) Not found in Intel's table 2-16 + * + * (§) 4H and 4H-1 do not mention VEX.W requirements, which are + * however present in the description of the instruction + * + * (^) these are the two cases in which Intel and AMD disagree on the + * primary exception class */ #define X86_OP_NONE { 0 }, @@ -90,8 +138,6 @@ X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__) #define cpuid(feat) .cpuid = X86_FEAT_##feat, -#define i64 .special = X86_SPECIAL_i64, -#define o64 .special = X86_SPECIAL_o64, #define xchg .special = X86_SPECIAL_Locked, #define mmx .special = X86_SPECIAL_MMX, #define zext0 .special = X86_SPECIAL_ZExtOp0, @@ -114,6 +160,9 @@ #define vex12 .vex_class = 12, #define vex13 .vex_class = 13, +#define chk(a) .check = X86_CHECK_##a, +#define svm(a) .intercept = SVM_EXIT_##a, + #define avx2_256 .vex_special = X86_VEX_AVX2_256, #define P_00 1 @@ -161,8 +210,8 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry, }; static const X86OpEntry group15_mem[8] = { - [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5), - [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5), + [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128)), + [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128)), }; uint8_t modrm = get_modrm(s, env); @@ -337,11 +386,11 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x07] = X86_OP_ENTRY3(PHSUBSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x10] = X86_OP_ENTRY2(PBLENDVB, V,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), - [0x13] = X86_OP_ENTRY2(VCVTPH2PS, V,x, W,xh, vex11 cpuid(F16C) p_66), + [0x13] = X86_OP_ENTRY2(VCVTPH2PS, V,x, W,xh, vex11 chk(W0) cpuid(F16C) p_66), [0x14] = X86_OP_ENTRY2(BLENDVPS, V,x, W,x, vex4 cpuid(SSE41) p_66), [0x15] = X86_OP_ENTRY2(BLENDVPD, V,x, W,x, vex4 cpuid(SSE41) p_66), /* Listed incorrectly as type 4 */ - [0x16] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x16] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), /* vpermps */ [0x17] = X86_OP_ENTRY3(VPTEST, None,None, V,x, W,x, vex4 cpuid(SSE41) p_66), /* @@ -362,14 +411,14 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x33] = X86_OP_ENTRY3(VPMOVZXWD, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), [0x34] = X86_OP_ENTRY3(VPMOVZXWQ, V,x, None,None, W,d, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), [0x35] = X86_OP_ENTRY3(VPMOVZXDQ, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66), - [0x36] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x36] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), [0x37] = X86_OP_ENTRY3(PCMPGTQ, V,x, H,x, W,x, vex4 cpuid(SSE42) avx2_256 p_66), [0x40] = X86_OP_ENTRY3(PMULLD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x41] = X86_OP_ENTRY3(VPHMINPOSUW, V,dq, None,None, W,dq, vex4 cpuid(SSE41) p_66), /* Listed incorrectly as type 4 */ [0x45] = X86_OP_ENTRY3(VPSRLV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), - [0x46] = X86_OP_ENTRY3(VPSRAV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), + [0x46] = X86_OP_ENTRY3(VPSRAV, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX2) p_66), [0x47] = X86_OP_ENTRY3(VPSLLV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), [0x90] = X86_OP_ENTRY3(VPGATHERD, V,x, H,x, M,d, vex12 cpuid(AVX2) p_66), /* vpgatherdd/q */ @@ -391,14 +440,15 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x09] = X86_OP_ENTRY3(PSIGNW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x0a] = X86_OP_ENTRY3(PSIGND, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x0b] = X86_OP_ENTRY3(PMULHRSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), - [0x0c] = X86_OP_ENTRY3(VPERMILPS, V,x, H,x, W,x, vex4 cpuid(AVX) p_00_66), - [0x0d] = X86_OP_ENTRY3(VPERMILPD, V,x, H,x, W,x, vex4 cpuid(AVX) p_66), - [0x0e] = X86_OP_ENTRY3(VTESTPS, None,None, V,x, W,x, vex4 cpuid(AVX) p_66), - [0x0f] = X86_OP_ENTRY3(VTESTPD, None,None, V,x, W,x, vex4 cpuid(AVX) p_66), - - [0x18] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 cpuid(AVX) p_66), /* vbroadcastss */ - [0x19] = X86_OP_ENTRY3(VPBROADCASTQ, V,qq, None,None, W,q, vex6 cpuid(AVX) p_66), /* vbroadcastsd */ - [0x1a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 cpuid(AVX) p_66), + /* Listed incorrectly as type 4 */ + [0x0c] = X86_OP_ENTRY3(VPERMILPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_00_66), + [0x0d] = X86_OP_ENTRY3(VPERMILPD, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x0e] = X86_OP_ENTRY3(VTESTPS, None,None, V,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x0f] = X86_OP_ENTRY3(VTESTPD, None,None, V,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + + [0x18] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 chk(W0) cpuid(AVX) p_66), /* vbroadcastss */ + [0x19] = X86_OP_ENTRY3(VPBROADCASTQ, V,qq, None,None, W,q, vex6 chk(W0) cpuid(AVX) p_66), /* vbroadcastsd */ + [0x1a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 chk(W0) cpuid(AVX) p_66), [0x1c] = X86_OP_ENTRY3(PABSB, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x1d] = X86_OP_ENTRY3(PABSW, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), [0x1e] = X86_OP_ENTRY3(PABSD, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), @@ -407,11 +457,11 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x29] = X86_OP_ENTRY3(PCMPEQQ, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x2a] = X86_OP_ENTRY3(MOVDQ, V,x, None,None, WM,x, vex1 cpuid(SSE41) avx2_256 p_66), /* movntdqa */ [0x2b] = X86_OP_ENTRY3(VPACKUSDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), - [0x2c] = X86_OP_ENTRY3(VMASKMOVPS, V,x, H,x, WM,x, vex6 cpuid(AVX) p_66), - [0x2d] = X86_OP_ENTRY3(VMASKMOVPD, V,x, H,x, WM,x, vex6 cpuid(AVX) p_66), + [0x2c] = X86_OP_ENTRY3(VMASKMOVPS, V,x, H,x, WM,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x2d] = X86_OP_ENTRY3(VMASKMOVPD, V,x, H,x, WM,x, vex6 chk(W0) cpuid(AVX) p_66), /* Incorrectly listed as Mx,Hx,Vx in the manual */ - [0x2e] = X86_OP_ENTRY3(VMASKMOVPS_st, M,x, V,x, H,x, vex6 cpuid(AVX) p_66), - [0x2f] = X86_OP_ENTRY3(VMASKMOVPD_st, M,x, V,x, H,x, vex6 cpuid(AVX) p_66), + [0x2e] = X86_OP_ENTRY3(VMASKMOVPS_st, M,x, V,x, H,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x2f] = X86_OP_ENTRY3(VMASKMOVPD_st, M,x, V,x, H,x, vex6 chk(W0) cpuid(AVX) p_66), [0x38] = X86_OP_ENTRY3(PMINSB, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x39] = X86_OP_ENTRY3(PMINSD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), @@ -422,12 +472,13 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0x3e] = X86_OP_ENTRY3(PMAXUW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x3f] = X86_OP_ENTRY3(PMAXUD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), - [0x58] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 cpuid(AVX2) p_66), - [0x59] = X86_OP_ENTRY3(VPBROADCASTQ, V,x, None,None, W,q, vex6 cpuid(AVX2) p_66), - [0x5a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 cpuid(AVX2) p_66), + /* VPBROADCASTQ not listed as W0 in table 2-16 */ + [0x58] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 chk(W0) cpuid(AVX2) p_66), + [0x59] = X86_OP_ENTRY3(VPBROADCASTQ, V,x, None,None, W,q, vex6 chk(W0) cpuid(AVX2) p_66), + [0x5a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 chk(W0) cpuid(AVX2) p_66), - [0x78] = X86_OP_ENTRY3(VPBROADCASTB, V,x, None,None, W,b, vex6 cpuid(AVX2) p_66), - [0x79] = X86_OP_ENTRY3(VPBROADCASTW, V,x, None,None, W,w, vex6 cpuid(AVX2) p_66), + [0x78] = X86_OP_ENTRY3(VPBROADCASTB, V,x, None,None, W,b, vex6 chk(W0) cpuid(AVX2) p_66), + [0x79] = X86_OP_ENTRY3(VPBROADCASTW, V,x, None,None, W,w, vex6 chk(W0) cpuid(AVX2) p_66), [0x8c] = X86_OP_ENTRY3(VPMASKMOV, V,x, H,x, WM,x, vex6 cpuid(AVX2) p_66), [0x8e] = X86_OP_ENTRY3(VPMASKMOV_st, M,x, V,x, H,x, vex6 cpuid(AVX2) p_66), @@ -460,6 +511,13 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0xbe] = X86_OP_ENTRY3(VFNMSUB231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), [0xbf] = X86_OP_ENTRY3(VFNMSUB231Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66), + [0xc8] = X86_OP_ENTRY2(SHA1NEXTE, V,dq, W,dq, cpuid(SHA_NI)), + [0xc9] = X86_OP_ENTRY2(SHA1MSG1, V,dq, W,dq, cpuid(SHA_NI)), + [0xca] = X86_OP_ENTRY2(SHA1MSG2, V,dq, W,dq, cpuid(SHA_NI)), + [0xcb] = X86_OP_ENTRY2(SHA256RNDS2, V,dq, W,dq, cpuid(SHA_NI)), + [0xcc] = X86_OP_ENTRY2(SHA256MSG1, V,dq, W,dq, cpuid(SHA_NI)), + [0xcd] = X86_OP_ENTRY2(SHA256MSG2, V,dq, W,dq, cpuid(SHA_NI)), + [0xdb] = X86_OP_ENTRY3(VAESIMC, V,dq, None,None, W,dq, vex4 cpuid(AES) p_66), [0xdc] = X86_OP_ENTRY3(VAESENC, V,x, H,x, W,x, vex4 cpuid(AES) p_66), [0xdd] = X86_OP_ENTRY3(VAESENCLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66), @@ -554,18 +612,18 @@ static const X86OpEntry opcodes_0F3A[256] = { * Also the "qq" instructions are sometimes omitted by Table 2-17, but are VEX256 * only. */ - [0x00] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 cpuid(AVX2) p_66), - [0x01] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 cpuid(AVX2) p_66), /* VPERMPD */ - [0x02] = X86_OP_ENTRY4(VBLENDPS, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), /* VPBLENDD */ - [0x04] = X86_OP_ENTRY3(VPERMILPS_i, V,x, W,x, I,b, vex6 cpuid(AVX) p_66), - [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 cpuid(AVX) p_66), - [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 cpuid(AVX) p_66), + [0x00] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 chk(W1) cpuid(AVX2) p_66), + [0x01] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 chk(W1) cpuid(AVX2) p_66), /* VPERMPD */ + [0x02] = X86_OP_ENTRY4(VBLENDPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX2) p_66), /* VPBLENDD */ + [0x04] = X86_OP_ENTRY3(VPERMILPS_i, V,x, W,x, I,b, vex6 chk(W0) cpuid(AVX) p_66), + [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 chk(W0) cpuid(AVX) p_66), + [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66), [0x14] = X86_OP_ENTRY3(PEXTRB, E,b, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), [0x15] = X86_OP_ENTRY3(PEXTRW, E,w, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), [0x16] = X86_OP_ENTRY3(PEXTR, E,y, V,dq, I,b, vex5 cpuid(SSE41) p_66), [0x17] = X86_OP_ENTRY3(VEXTRACTPS, E,d, V,dq, I,b, vex5 cpuid(SSE41) p_66), - [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,xh, V,x, I,b, vex11 cpuid(F16C) p_66), + [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,xh, V,x, I,b, vex11 chk(W0) cpuid(F16C) p_66), [0x20] = X86_OP_ENTRY4(PINSRB, V,dq, H,dq, E,b, vex5 cpuid(SSE41) zext2 p_66), [0x21] = X86_OP_GROUP0(VINSERTPS), @@ -575,7 +633,7 @@ static const X86OpEntry opcodes_0F3A[256] = { [0x41] = X86_OP_ENTRY4(VDDPD, V,dq, H,dq, W,dq, vex2 cpuid(SSE41) p_66), [0x42] = X86_OP_ENTRY4(VMPSADBW, V,x, H,x, W,x, vex2 cpuid(SSE41) avx2_256 p_66), [0x44] = X86_OP_ENTRY4(PCLMULQDQ, V,dq, H,dq, W,dq, vex4 cpuid(PCLMULQDQ) p_66), - [0x46] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), + [0x46] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), [0x60] = X86_OP_ENTRY4(PCMPESTRM, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66), [0x61] = X86_OP_ENTRY4(PCMPESTRI, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66), @@ -598,16 +656,18 @@ static const X86OpEntry opcodes_0F3A[256] = { [0x0e] = X86_OP_ENTRY4(VPBLENDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x0f] = X86_OP_ENTRY4(PALIGNR, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), - [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 cpuid(AVX) p_66), - [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 cpuid(AVX) p_66), + [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66), + [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX) p_66), - [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66), - [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 cpuid(AVX2) p_66), + [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), + [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX2) p_66), /* Listed incorrectly as type 4 */ - [0x4a] = X86_OP_ENTRY4(VBLENDVPS, V,x, H,x, W,x, vex6 cpuid(AVX) p_66), - [0x4b] = X86_OP_ENTRY4(VBLENDVPD, V,x, H,x, W,x, vex6 cpuid(AVX) p_66), - [0x4c] = X86_OP_ENTRY4(VPBLENDVB, V,x, H,x, W,x, vex6 cpuid(AVX) p_66 avx2_256), + [0x4a] = X86_OP_ENTRY4(VBLENDVPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x4b] = X86_OP_ENTRY4(VBLENDVPD, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66), + [0x4c] = X86_OP_ENTRY4(VPBLENDVB, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66 avx2_256), + + [0xcc] = X86_OP_ENTRY3(SHA1RNDS4, V,dq, W,dq, I,b, cpuid(SHA_NI)), [0xdf] = X86_OP_ENTRY3(VAESKEYGEN, V,dq, W,dq, I,b, vex4 cpuid(AES) p_66), @@ -1456,6 +1516,8 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid) return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2); case X86_FEAT_AVX2: return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2); + case X86_FEAT_SHA_NI: + return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SHA_NI); } g_assert_not_reached(); } @@ -1493,8 +1555,6 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode) } } - /* TODO: instructions that require VEX.W=0 (Table 2-16) */ - switch (e->vex_class) { case 0: if (s->prefix & PREFIX_VEX) { @@ -1579,6 +1639,24 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode) if (s->flags & HF_EM_MASK) { goto illegal; } + + if (e->check) { + if (e->check & X86_CHECK_VEX128) { + if (s->vex_l) { + goto illegal; + } + } + if (e->check & X86_CHECK_W0) { + if (s->vex_w) { + goto illegal; + } + } + if (e->check & X86_CHECK_W1) { + if (!s->vex_w) { + goto illegal; + } + } + } return true; nm_exception: @@ -1764,6 +1842,25 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) goto illegal_op; } + /* Checks that result in #UD come first. */ + if (decode.e.check) { + if (decode.e.check & X86_CHECK_i64) { + if (CODE64(s)) { + goto illegal_op; + } + } + if (decode.e.check & X86_CHECK_o64) { + if (!CODE64(s)) { + goto illegal_op; + } + } + if (decode.e.check & X86_CHECK_prot) { + if (!PE(s) || VM86(s)) { + goto illegal_op; + } + } + } + switch (decode.e.special) { case X86_SPECIAL_None: break; @@ -1774,23 +1871,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) } break; - case X86_SPECIAL_ProtMode: - if (!PE(s) || VM86(s)) { - goto illegal_op; - } - break; - - case X86_SPECIAL_i64: - if (CODE64(s)) { - goto illegal_op; - } - break; - case X86_SPECIAL_o64: - if (!CODE64(s)) { - goto illegal_op; - } - break; - case X86_SPECIAL_ZExtOp0: assert(decode.op[0].unit == X86_OP_INT); if (!decode.op[0].has_ea) { @@ -1820,6 +1900,37 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) if (!validate_vex(s, &decode)) { return; } + + /* + * Checks that result in #GP or VMEXIT come second. Intercepts are + * generally checked after non-memory exceptions (i.e. before all + * exceptions if there is no memory operand). Exceptions are + * vm86 checks (INTn, IRET, PUSHF/POPF), RSM and XSETBV (!). + * + * RSM and XSETBV will be handled in the gen_* functions + * instead of using chk(). + */ + if (decode.e.check & X86_CHECK_cpl0) { + if (CPL(s) != 0) { + goto gp_fault; + } + } + if (decode.e.intercept && unlikely(GUEST(s))) { + gen_helper_svm_check_intercept(tcg_env, + tcg_constant_i32(decode.e.intercept)); + } + if (decode.e.check) { + if ((decode.e.check & X86_CHECK_vm86_iopl) && VM86(s)) { + if (IOPL(s) < 3) { + goto gp_fault; + } + } else if (decode.e.check & X86_CHECK_cpl_iopl) { + if (IOPL(s) < CPL(s)) { + goto gp_fault; + } + } + } + if (decode.e.special == X86_SPECIAL_MMX && !(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) { gen_helper_enter_mmx(tcg_env); @@ -1846,6 +1957,9 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) gen_writeback(s, &decode, 0, s->T0); } return; + gp_fault: + gen_exception_gpf(s); + return; illegal_op: gen_illegal_opcode(s); return; diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h index a542ec1681..e6c904a319 100644 --- a/target/i386/tcg/decode-new.h +++ b/target/i386/tcg/decode-new.h @@ -108,6 +108,7 @@ typedef enum X86CPUIDFeature { X86_FEAT_FMA, X86_FEAT_MOVBE, X86_FEAT_PCLMULQDQ, + X86_FEAT_SHA_NI, X86_FEAT_SSE, X86_FEAT_SSE2, X86_FEAT_SSE3, @@ -130,15 +131,36 @@ typedef enum X86OpUnit { X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */ } X86OpUnit; +typedef enum X86InsnCheck { + /* Illegal or exclusive to 64-bit mode */ + X86_CHECK_i64 = 1, + X86_CHECK_o64 = 2, + + /* Fault outside protected mode */ + X86_CHECK_prot = 4, + + /* Privileged instruction checks */ + X86_CHECK_cpl0 = 8, + X86_CHECK_vm86_iopl = 16, + X86_CHECK_cpl_iopl = 32, + X86_CHECK_iopl = X86_CHECK_cpl_iopl | X86_CHECK_vm86_iopl, + + /* Fault if VEX.L=1 */ + X86_CHECK_VEX128 = 64, + + /* Fault if VEX.W=1 */ + X86_CHECK_W0 = 128, + + /* Fault if VEX.W=0 */ + X86_CHECK_W1 = 256, +} X86InsnCheck; + typedef enum X86InsnSpecial { X86_SPECIAL_None, /* Always locked if it has a memory operand (XCHG) */ X86_SPECIAL_Locked, - /* Fault outside protected mode */ - X86_SPECIAL_ProtMode, - /* * Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw * in the manual. @@ -157,10 +179,6 @@ typedef enum X86InsnSpecial { * become P/P/Q/N, and size "x" becomes "q". */ X86_SPECIAL_MMX, - - /* Illegal or exclusive to 64-bit mode */ - X86_SPECIAL_i64, - X86_SPECIAL_o64, } X86InsnSpecial; /* @@ -223,7 +241,9 @@ struct X86OpEntry { X86CPUIDFeature cpuid:8; unsigned vex_class:8; X86VEXSpecial vex_special:8; - uint16_t valid_prefix:16; + unsigned valid_prefix:16; + unsigned check:16; + unsigned intercept:8; bool is_decode:1; }; diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index 88793ba988..82da5488d4 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -1236,10 +1236,6 @@ static void gen_INSERTQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - if (s->vex_l) { - gen_illegal_opcode(s); - return; - } tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1); gen_helper_ldmxcsr(tcg_env, s->tmp2_i32); } @@ -1800,6 +1796,60 @@ static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_sar_tl(s->T0, s->T0, s->T1); } +static void gen_SHA1NEXTE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha1nexte(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA1MSG1(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha1msg1(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA1MSG2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha1msg2(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA1RNDS4(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + switch(decode->immediate & 3) { + case 0: + gen_helper_sha1rnds4_f0(OP_PTR0, OP_PTR0, OP_PTR1); + break; + case 1: + gen_helper_sha1rnds4_f1(OP_PTR0, OP_PTR0, OP_PTR1); + break; + case 2: + gen_helper_sha1rnds4_f2(OP_PTR0, OP_PTR0, OP_PTR1); + break; + case 3: + gen_helper_sha1rnds4_f3(OP_PTR0, OP_PTR0, OP_PTR1); + break; + } +} + +static void gen_SHA256MSG1(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha256msg1(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA256MSG2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + gen_helper_sha256msg2(OP_PTR0, OP_PTR1, OP_PTR2); +} + +static void gen_SHA256RNDS2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 wk0 = tcg_temp_new_i32(); + TCGv_i32 wk1 = tcg_temp_new_i32(); + + tcg_gen_ld_i32(wk0, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(0))); + tcg_gen_ld_i32(wk1, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(1))); + + gen_helper_sha256rnds2(OP_PTR0, OP_PTR1, OP_PTR2, wk0, wk1); +} + static void gen_SHLX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1832,10 +1882,6 @@ static void gen_VAESKEYGEN(DisasContext *s, CPUX86State *env, X86DecodedInsn *de static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - if (s->vex_l) { - gen_illegal_opcode(s); - return; - } gen_helper_update_mxcsr(tcg_env); tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr)); } diff --git a/target/i386/tcg/ops_sse_header.h.inc b/target/i386/tcg/ops_sse_header.h.inc index 8a7b2f4e2f..d92c6faf6d 100644 --- a/target/i386/tcg/ops_sse_header.h.inc +++ b/target/i386/tcg/ops_sse_header.h.inc @@ -399,6 +399,20 @@ DEF_HELPER_3(vpermq_ymm, void, Reg, Reg, i32) #endif #endif +/* SHA helpers */ +#if SHIFT == 1 +DEF_HELPER_3(sha1rnds4_f0, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1rnds4_f1, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1rnds4_f2, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1rnds4_f3, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1nexte, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1msg1, void, Reg, Reg, Reg) +DEF_HELPER_3(sha1msg2, void, Reg, Reg, Reg) +DEF_HELPER_5(sha256rnds2, void, Reg, Reg, Reg, i32, i32) +DEF_HELPER_3(sha256msg1, void, Reg, Reg, Reg) +DEF_HELPER_3(sha256msg2, void, Reg, Reg, Reg) +#endif + #undef SHIFT #undef Reg #undef SUFFIX diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 090d617627..26e68c7ab4 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -1420,7 +1420,7 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, exit(1); } - kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); + kvm_msi_via_irqfd_allowed = true; } static void kvm_cpu_instance_init(CPUState *cs) diff --git a/target/sparc/cpu-feature.h.inc b/target/sparc/cpu-feature.h.inc new file mode 100644 index 0000000000..d800f18c4e --- /dev/null +++ b/target/sparc/cpu-feature.h.inc @@ -0,0 +1,14 @@ +FEATURE(FLOAT128) +FEATURE(MUL) +FEATURE(DIV) +FEATURE(VIS1) +FEATURE(VIS2) +FEATURE(FSMULD) +FEATURE(HYPV) +FEATURE(CMT) +FEATURE(GL) +FEATURE(TA0_SHUTDOWN) /* Shutdown on "ta 0x0" */ +FEATURE(ASR17) +FEATURE(CACHE_CTRL) +FEATURE(POWERDOWN) +FEATURE(CASA) diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 8ba96ae225..bb1a155510 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -403,9 +403,7 @@ static const sparc_def_t sparc_defs[] = { .mmu_sfsr_mask = 0x00016fff, .mmu_trcr_mask = 0x0000003f, .nwindows = 7, - .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL | - CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | - CPU_FEATURE_FMUL, + .features = CPU_FEATURE_MUL | CPU_FEATURE_DIV, }, { .name = "TI MicroSparc II", @@ -545,21 +543,20 @@ static const sparc_def_t sparc_defs[] = { #endif }; +/* This must match sparc_cpu_properties[]. */ static const char * const feature_name[] = { - "float", - "float128", - "swap", - "mul", - "div", - "flush", - "fsqrt", - "fmul", - "vis1", - "vis2", - "fsmuld", - "hypv", - "cmt", - "gl", + [CPU_FEATURE_BIT_FLOAT128] = "float128", +#ifdef TARGET_SPARC64 + [CPU_FEATURE_BIT_CMT] = "cmt", + [CPU_FEATURE_BIT_GL] = "gl", + [CPU_FEATURE_BIT_HYPV] = "hypv", + [CPU_FEATURE_BIT_VIS1] = "vis1", + [CPU_FEATURE_BIT_VIS2] = "vis2", +#else + [CPU_FEATURE_BIT_MUL] = "mul", + [CPU_FEATURE_BIT_DIV] = "div", + [CPU_FEATURE_BIT_FSMULD] = "fsmuld", +#endif }; static void print_features(uint32_t features, const char *prefix) @@ -757,9 +754,8 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp) CPUSPARCState *env = &cpu->env; #if defined(CONFIG_USER_ONLY) - if ((env->def.features & CPU_FEATURE_FLOAT)) { - env->def.features |= CPU_FEATURE_FLOAT128; - } + /* We are emulating the kernel, which will trap and emulate float128. */ + env->def.features |= CPU_FEATURE_FLOAT128; #endif env->version = env->def.iu_version; @@ -835,21 +831,29 @@ static PropertyInfo qdev_prop_nwindows = { .set = sparc_set_nwindows, }; +/* This must match feature_name[]. */ static Property sparc_cpu_properties[] = { - DEFINE_PROP_BIT("float", SPARCCPU, env.def.features, 0, false), - DEFINE_PROP_BIT("float128", SPARCCPU, env.def.features, 1, false), - DEFINE_PROP_BIT("swap", SPARCCPU, env.def.features, 2, false), - DEFINE_PROP_BIT("mul", SPARCCPU, env.def.features, 3, false), - DEFINE_PROP_BIT("div", SPARCCPU, env.def.features, 4, false), - DEFINE_PROP_BIT("flush", SPARCCPU, env.def.features, 5, false), - DEFINE_PROP_BIT("fsqrt", SPARCCPU, env.def.features, 6, false), - DEFINE_PROP_BIT("fmul", SPARCCPU, env.def.features, 7, false), - DEFINE_PROP_BIT("vis1", SPARCCPU, env.def.features, 8, false), - DEFINE_PROP_BIT("vis2", SPARCCPU, env.def.features, 9, false), - DEFINE_PROP_BIT("fsmuld", SPARCCPU, env.def.features, 10, false), - DEFINE_PROP_BIT("hypv", SPARCCPU, env.def.features, 11, false), - DEFINE_PROP_BIT("cmt", SPARCCPU, env.def.features, 12, false), - DEFINE_PROP_BIT("gl", SPARCCPU, env.def.features, 13, false), + DEFINE_PROP_BIT("float128", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_FLOAT128, false), +#ifdef TARGET_SPARC64 + DEFINE_PROP_BIT("cmt", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_CMT, false), + DEFINE_PROP_BIT("gl", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_GL, false), + DEFINE_PROP_BIT("hypv", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_HYPV, false), + DEFINE_PROP_BIT("vis1", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_VIS1, false), + DEFINE_PROP_BIT("vis2", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_VIS2, false), +#else + DEFINE_PROP_BIT("mul", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_MUL, false), + DEFINE_PROP_BIT("div", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_DIV, false), + DEFINE_PROP_BIT("fsmuld", SPARCCPU, env.def.features, + CPU_FEATURE_BIT_FSMULD, false), +#endif DEFINE_PROP_UNSIGNED("iu-version", SPARCCPU, env.def.iu_version, 0, qdev_prop_uint64, target_ulong), DEFINE_PROP_UINT32("fpu-version", SPARCCPU, env.def.fpu_version, 0), diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index b3a98f1d74..758a4e8aaa 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -6,6 +6,29 @@ #include "exec/cpu-defs.h" #include "qemu/cpu-float.h" +/* + * From Oracle SPARC Architecture 2015: + * + * Compatibility notes: The PSO memory model described in SPARC V8 and + * SPARC V9 compatibility architecture specifications was never implemented + * in a SPARC V9 implementation and is not included in the Oracle SPARC + * Architecture specification. + * + * The RMO memory model described in the SPARC V9 specification was + * implemented in some non-Sun SPARC V9 implementations, but is not + * directly supported in Oracle SPARC Architecture 2015 implementations. + * + * Therefore always use TSO in QEMU. + * + * D.5 Specification of Partial Store Order (PSO) + * ... [loads] are followed by an implied MEMBAR #LoadLoad | #LoadStore. + * + * D.6 Specification of Total Store Order (TSO) + * ... PSO with the additional requirement that all [stores] are followed + * by an implied MEMBAR #StoreStore. + */ +#define TCG_GUEST_DEFAULT_MO (TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST) + #if !defined(TARGET_SPARC64) #define TARGET_DPREGS 16 #else @@ -268,38 +291,27 @@ struct sparc_def_t { uint32_t maxtl; }; -#define CPU_FEATURE_FLOAT (1 << 0) -#define CPU_FEATURE_FLOAT128 (1 << 1) -#define CPU_FEATURE_SWAP (1 << 2) -#define CPU_FEATURE_MUL (1 << 3) -#define CPU_FEATURE_DIV (1 << 4) -#define CPU_FEATURE_FLUSH (1 << 5) -#define CPU_FEATURE_FSQRT (1 << 6) -#define CPU_FEATURE_FMUL (1 << 7) -#define CPU_FEATURE_VIS1 (1 << 8) -#define CPU_FEATURE_VIS2 (1 << 9) -#define CPU_FEATURE_FSMULD (1 << 10) -#define CPU_FEATURE_HYPV (1 << 11) -#define CPU_FEATURE_CMT (1 << 12) -#define CPU_FEATURE_GL (1 << 13) -#define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */ -#define CPU_FEATURE_ASR17 (1 << 15) -#define CPU_FEATURE_CACHE_CTRL (1 << 16) -#define CPU_FEATURE_POWERDOWN (1 << 17) -#define CPU_FEATURE_CASA (1 << 18) +#define FEATURE(X) CPU_FEATURE_BIT_##X, +enum { +#include "cpu-feature.h.inc" +}; + +#undef FEATURE +#define FEATURE(X) CPU_FEATURE_##X = 1u << CPU_FEATURE_BIT_##X, + +enum { +#include "cpu-feature.h.inc" +}; + +#undef FEATURE #ifndef TARGET_SPARC64 -#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \ - CPU_FEATURE_MUL | CPU_FEATURE_DIV | \ - CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \ - CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD) +#define CPU_DEFAULT_FEATURES (CPU_FEATURE_MUL | CPU_FEATURE_DIV | \ + CPU_FEATURE_FSMULD) #else -#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \ - CPU_FEATURE_MUL | CPU_FEATURE_DIV | \ - CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \ - CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 | \ - CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD | \ - CPU_FEATURE_CASA) +#define CPU_DEFAULT_FEATURES (CPU_FEATURE_MUL | CPU_FEATURE_DIV | \ + CPU_FEATURE_FSMULD | CPU_FEATURE_CASA | \ + CPU_FEATURE_VIS1 | CPU_FEATURE_VIS2) enum { mmu_us_12, // Ultrasparc < III (64 entry TLB) mmu_us_3, // Ultrasparc III (512 entry TLB) @@ -782,14 +794,12 @@ static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc, if (env->pstate & PS_AM) { flags |= TB_FLAG_AM_ENABLED; } - if ((env->def.features & CPU_FEATURE_FLOAT) - && (env->pstate & PS_PEF) - && (env->fprs & FPRS_FEF)) { + if ((env->pstate & PS_PEF) && (env->fprs & FPRS_FEF)) { flags |= TB_FLAG_FPU_ENABLED; } flags |= env->asi << TB_FLAG_ASI_SHIFT; #else - if ((env->def.features & CPU_FEATURE_FLOAT) && env->psref) { + if (env->psref) { flags |= TB_FLAG_FPU_ENABLED; } #endif diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c index f54fa9b959..0f8aa3abcd 100644 --- a/target/sparc/fop_helper.c +++ b/target/sparc/fop_helper.c @@ -382,20 +382,7 @@ static void set_fsr(CPUSPARCState *env, target_ulong fsr) set_float_rounding_mode(rnd_mode, &env->fp_status); } -target_ulong helper_ldfsr(CPUSPARCState *env, target_ulong old_fsr, - uint32_t new_fsr) +void helper_set_fsr(CPUSPARCState *env, target_ulong fsr) { - old_fsr = (new_fsr & FSR_LDFSR_MASK) | (old_fsr & FSR_LDFSR_OLDMASK); - set_fsr(env, old_fsr); - return old_fsr; + set_fsr(env, fsr); } - -#ifdef TARGET_SPARC64 -target_ulong helper_ldxfsr(CPUSPARCState *env, target_ulong old_fsr, - uint64_t new_fsr) -{ - old_fsr = (new_fsr & FSR_LDXFSR_MASK) | (old_fsr & FSR_LDXFSR_OLDMASK); - set_fsr(env, old_fsr); - return old_fsr; -} -#endif diff --git a/target/sparc/helper.c b/target/sparc/helper.c index c4358bba84..2bcdc81d54 100644 --- a/target/sparc/helper.c +++ b/target/sparc/helper.c @@ -102,9 +102,7 @@ static target_ulong do_udiv(CPUSPARCState *env, target_ulong a, } if (cc) { - env->cc_dst = x0; env->cc_src2 = overflow; - env->cc_op = CC_OP_DIV; } return x0; } @@ -143,9 +141,7 @@ static target_ulong do_sdiv(CPUSPARCState *env, target_ulong a, } if (cc) { - env->cc_dst = x0; env->cc_src2 = overflow; - env->cc_op = CC_OP_DIV; } return x0; } @@ -202,10 +198,8 @@ target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1, } /* Only modify the CC after any exceptions have been generated. */ - env->cc_op = CC_OP_TADDTV; env->cc_src = src1; env->cc_src2 = src2; - env->cc_dst = dst; return dst; tag_overflow: @@ -230,10 +224,8 @@ target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1, } /* Only modify the CC after any exceptions have been generated. */ - env->cc_op = CC_OP_TSUBTV; env->cc_src = src1; env->cc_src2 = src2; - env->cc_dst = dst; return dst; tag_overflow: diff --git a/target/sparc/helper.h b/target/sparc/helper.h index b8f1e78c75..dd1721a340 100644 --- a/target/sparc/helper.h +++ b/target/sparc/helper.h @@ -24,7 +24,6 @@ DEF_HELPER_FLAGS_2(tick_set_count, TCG_CALL_NO_RWG, void, ptr, i64) DEF_HELPER_FLAGS_3(tick_get_count, TCG_CALL_NO_WG, i64, env, ptr, int) DEF_HELPER_FLAGS_2(tick_set_limit, TCG_CALL_NO_RWG, void, ptr, i64) #endif -DEF_HELPER_FLAGS_3(check_align, TCG_CALL_NO_WG, void, env, tl, i32) DEF_HELPER_1(debug, void, env) DEF_HELPER_1(save, void, env) DEF_HELPER_1(restore, void, env) @@ -43,7 +42,7 @@ DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32) DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32) #endif DEF_HELPER_FLAGS_1(check_ieee_exceptions, TCG_CALL_NO_WG, tl, env) -DEF_HELPER_FLAGS_3(ldfsr, TCG_CALL_NO_RWG, tl, env, tl, i32) +DEF_HELPER_FLAGS_2(set_fsr, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32) DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, f32, env, f32) DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_RWG, f64, env, f64) @@ -55,7 +54,6 @@ DEF_HELPER_FLAGS_1(fsqrtq, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_1(fcmpq, TCG_CALL_NO_WG, tl, env) DEF_HELPER_FLAGS_1(fcmpeq, TCG_CALL_NO_WG, tl, env) #ifdef TARGET_SPARC64 -DEF_HELPER_FLAGS_3(ldxfsr, TCG_CALL_NO_RWG, tl, env, tl, i64) DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64) DEF_HELPER_FLAGS_3(fcmps_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmps_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32) @@ -139,18 +137,6 @@ DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64) DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64) DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) -#define VIS_HELPER(name) \ - DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE, \ - i64, i64, i64) \ - DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \ - i32, i32, i32) \ - DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE, \ - i64, i64, i64) \ - DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \ - i32, i32, i32) - -VIS_HELPER(padd) -VIS_HELPER(psub) #define VIS_CMPHELPER(name) \ DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \ i64, i64, i64) \ diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode new file mode 100644 index 0000000000..0552f1447d --- /dev/null +++ b/target/sparc/insns.decode @@ -0,0 +1,547 @@ +# SPDX-License-Identifier: LGPL-2.0+ +# +# Sparc instruction decode definitions. +# Copyright (c) 2023 Richard Henderson <rth@twiddle.net> + +## +## Major Opcodes 00 and 01 -- branches, call, and sethi. +## + +&bcc i a cond cc +BPcc 00 a:1 cond:4 001 cc:1 0 - i:s19 &bcc +Bicc 00 a:1 cond:4 010 i:s22 &bcc cc=0 +FBPfcc 00 a:1 cond:4 101 cc:2 - i:s19 &bcc +FBfcc 00 a:1 cond:4 110 i:s22 &bcc cc=0 + +%d16 20:s2 0:14 +BPr 00 a:1 0 cond:3 011 .. - rs1:5 .............. i=%d16 + +NCP 00 - ---- 111 ---------------------- # CBcc + +SETHI 00 rd:5 100 i:22 + +CALL 01 i:s30 + +## +## Major Opcode 10 -- integer, floating-point, vis, and system insns. +## + +&r_r_ri rd rs1 rs2_or_imm imm:bool +@n_r_ri .. ..... ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri rd=0 +@r_r_ri .. rd:5 ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri + +&r_r_ri_cc rd rs1 rs2_or_imm imm:bool cc:bool +@r_r_ri_cc .. rd:5 . cc:1 .... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri_cc +@r_r_ri_cc0 .. rd:5 ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri_cc cc=0 +@r_r_ri_cc1 .. rd:5 ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri_cc cc=1 + +&r_r_r rd rs1 rs2 +@r_r_r .. rd:5 ...... rs1:5 . ........ rs2:5 &r_r_r +@r_r_r_swap .. rd:5 ...... rs2:5 . ........ rs1:5 &r_r_r + +&r_r rd rs +@r_r1 .. rd:5 ...... rs:5 . ........ ..... &r_r +@r_r2 .. rd:5 ...... ..... . ........ rs:5 &r_r + +{ + [ + STBAR 10 00000 101000 01111 0 0000000000000 + MEMBAR 10 00000 101000 01111 1 000000 cmask:3 mmask:4 + + RDCCR 10 rd:5 101000 00010 0 0000000000000 + RDASI 10 rd:5 101000 00011 0 0000000000000 + RDTICK 10 rd:5 101000 00100 0 0000000000000 + RDPC 10 rd:5 101000 00101 0 0000000000000 + RDFPRS 10 rd:5 101000 00110 0 0000000000000 + RDASR17 10 rd:5 101000 10001 0 0000000000000 + RDGSR 10 rd:5 101000 10011 0 0000000000000 + RDSOFTINT 10 rd:5 101000 10110 0 0000000000000 + RDTICK_CMPR 10 rd:5 101000 10111 0 0000000000000 + RDSTICK 10 rd:5 101000 11000 0 0000000000000 + RDSTICK_CMPR 10 rd:5 101000 11001 0 0000000000000 + RDSTRAND_STATUS 10 rd:5 101000 11010 0 0000000000000 + ] + # Before v8, all rs1 accepted; otherwise rs1==0. + RDY 10 rd:5 101000 rs1:5 0 0000000000000 +} + +{ + [ + WRY 10 00000 110000 ..... . ............. @n_r_ri + WRCCR 10 00010 110000 ..... . ............. @n_r_ri + WRASI 10 00011 110000 ..... . ............. @n_r_ri + WRFPRS 10 00110 110000 ..... . ............. @n_r_ri + { + WRGSR 10 10011 110000 ..... . ............. @n_r_ri + WRPOWERDOWN 10 10011 110000 ..... . ............. @n_r_ri + } + WRSOFTINT_SET 10 10100 110000 ..... . ............. @n_r_ri + WRSOFTINT_CLR 10 10101 110000 ..... . ............. @n_r_ri + WRSOFTINT 10 10110 110000 ..... . ............. @n_r_ri + WRTICK_CMPR 10 10111 110000 ..... . ............. @n_r_ri + WRSTICK 10 11000 110000 ..... . ............. @n_r_ri + WRSTICK_CMPR 10 11001 110000 ..... . ............. @n_r_ri + ] + # Before v8, rs1==0 was WRY, and the rest executed as nop. + [ + NOP_v7 10 ----- 110000 ----- 0 00000000 ----- + NOP_v7 10 ----- 110000 ----- 1 -------- ----- + ] +} + +{ + RDPSR 10 rd:5 101001 00000 0 0000000000000 + RDHPR_hpstate 10 rd:5 101001 00000 0 0000000000000 +} +RDHPR_htstate 10 rd:5 101001 00001 0 0000000000000 +RDHPR_hintp 10 rd:5 101001 00011 0 0000000000000 +RDHPR_htba 10 rd:5 101001 00101 0 0000000000000 +RDHPR_hver 10 rd:5 101001 00110 0 0000000000000 +RDHPR_hstick_cmpr 10 rd:5 101001 11111 0 0000000000000 + +{ + WRPSR 10 00000 110001 ..... . ............. @n_r_ri + SAVED 10 00000 110001 00000 0 0000000000000 +} +RESTORED 10 00001 110001 00000 0 0000000000000 +# UA2005 ALLCLEAN +# UA2005 OTHERW +# UA2005 NORMALW +# UA2005 INVALW + +{ + RDWIM 10 rd:5 101010 00000 0 0000000000000 + RDPR_tpc 10 rd:5 101010 00000 0 0000000000000 +} +RDPR_tnpc 10 rd:5 101010 00001 0 0000000000000 +RDPR_tstate 10 rd:5 101010 00010 0 0000000000000 +RDPR_tt 10 rd:5 101010 00011 0 0000000000000 +RDPR_tick 10 rd:5 101010 00100 0 0000000000000 +RDPR_tba 10 rd:5 101010 00101 0 0000000000000 +RDPR_pstate 10 rd:5 101010 00110 0 0000000000000 +RDPR_tl 10 rd:5 101010 00111 0 0000000000000 +RDPR_pil 10 rd:5 101010 01000 0 0000000000000 +RDPR_cwp 10 rd:5 101010 01001 0 0000000000000 +RDPR_cansave 10 rd:5 101010 01010 0 0000000000000 +RDPR_canrestore 10 rd:5 101010 01011 0 0000000000000 +RDPR_cleanwin 10 rd:5 101010 01100 0 0000000000000 +RDPR_otherwin 10 rd:5 101010 01101 0 0000000000000 +RDPR_wstate 10 rd:5 101010 01110 0 0000000000000 +RDPR_gl 10 rd:5 101010 10000 0 0000000000000 +RDPR_strand_status 10 rd:5 101010 11010 0 0000000000000 +RDPR_ver 10 rd:5 101010 11111 0 0000000000000 + +{ + WRWIM 10 00000 110010 ..... . ............. @n_r_ri + WRPR_tpc 10 00000 110010 ..... . ............. @n_r_ri +} +WRPR_tnpc 10 00001 110010 ..... . ............. @n_r_ri +WRPR_tstate 10 00010 110010 ..... . ............. @n_r_ri +WRPR_tt 10 00011 110010 ..... . ............. @n_r_ri +WRPR_tick 10 00100 110010 ..... . ............. @n_r_ri +WRPR_tba 10 00101 110010 ..... . ............. @n_r_ri +WRPR_pstate 10 00110 110010 ..... . ............. @n_r_ri +WRPR_tl 10 00111 110010 ..... . ............. @n_r_ri +WRPR_pil 10 01000 110010 ..... . ............. @n_r_ri +WRPR_cwp 10 01001 110010 ..... . ............. @n_r_ri +WRPR_cansave 10 01010 110010 ..... . ............. @n_r_ri +WRPR_canrestore 10 01011 110010 ..... . ............. @n_r_ri +WRPR_cleanwin 10 01100 110010 ..... . ............. @n_r_ri +WRPR_otherwin 10 01101 110010 ..... . ............. @n_r_ri +WRPR_wstate 10 01110 110010 ..... . ............. @n_r_ri +WRPR_gl 10 10000 110010 ..... . ............. @n_r_ri +WRPR_strand_status 10 11010 110010 ..... . ............. @n_r_ri + +{ + FLUSHW 10 00000 101011 00000 0 0000000000000 + RDTBR 10 rd:5 101011 00000 0 0000000000000 +} + +{ + WRTBR 10 00000 110011 ..... . ............. @n_r_ri + WRHPR_hpstate 10 00000 110011 ..... . ............. @n_r_ri +} +WRHPR_htstate 10 00001 110011 ..... . ............. @n_r_ri +WRHPR_hintp 10 00011 110011 ..... . ............. @n_r_ri +WRHPR_htba 10 00101 110011 ..... . ............. @n_r_ri +WRHPR_hstick_cmpr 10 11111 110011 ..... . ............. @n_r_ri + +ADD 10 ..... 0.0000 ..... . ............. @r_r_ri_cc +AND 10 ..... 0.0001 ..... . ............. @r_r_ri_cc +OR 10 ..... 0.0010 ..... . ............. @r_r_ri_cc +XOR 10 ..... 0.0011 ..... . ............. @r_r_ri_cc +SUB 10 ..... 0.0100 ..... . ............. @r_r_ri_cc +ANDN 10 ..... 0.0101 ..... . ............. @r_r_ri_cc +ORN 10 ..... 0.0110 ..... . ............. @r_r_ri_cc +XORN 10 ..... 0.0111 ..... . ............. @r_r_ri_cc +ADDC 10 ..... 0.1000 ..... . ............. @r_r_ri_cc +SUBC 10 ..... 0.1100 ..... . ............. @r_r_ri_cc + +MULX 10 ..... 001001 ..... . ............. @r_r_ri_cc0 +UMUL 10 ..... 0.1010 ..... . ............. @r_r_ri_cc +SMUL 10 ..... 0.1011 ..... . ............. @r_r_ri_cc +MULScc 10 ..... 100100 ..... . ............. @r_r_ri_cc1 + +UDIVX 10 ..... 001101 ..... . ............. @r_r_ri_cc0 +SDIVX 10 ..... 101101 ..... . ............. @r_r_ri_cc0 +UDIV 10 ..... 0.1110 ..... . ............. @r_r_ri_cc +SDIV 10 ..... 0.1111 ..... . ............. @r_r_ri_cc + +TADDcc 10 ..... 100000 ..... . ............. @r_r_ri_cc1 +TSUBcc 10 ..... 100001 ..... . ............. @r_r_ri_cc1 +TADDccTV 10 ..... 100010 ..... . ............. @r_r_ri_cc1 +TSUBccTV 10 ..... 100011 ..... . ............. @r_r_ri_cc1 + +POPC 10 rd:5 101110 00000 imm:1 rs2_or_imm:s13 \ + &r_r_ri_cc rs1=0 cc=0 + +&shiftr rd rs1 rs2 x:bool +@shiftr .. rd:5 ...... rs1:5 . x:1 ....... rs2:5 &shiftr + +SLL_r 10 ..... 100101 ..... 0 . 0000000 ..... @shiftr +SRL_r 10 ..... 100110 ..... 0 . 0000000 ..... @shiftr +SRA_r 10 ..... 100111 ..... 0 . 0000000 ..... @shiftr + +&shifti rd rs1 i x:bool +@shifti .. rd:5 ...... rs1:5 . x:1 ...... i:6 &shifti + +SLL_i 10 ..... 100101 ..... 1 . 000000 ...... @shifti +SRL_i 10 ..... 100110 ..... 1 . 000000 ...... @shifti +SRA_i 10 ..... 100111 ..... 1 . 000000 ...... @shifti + +Tcc_r 10 0 cond:4 111010 rs1:5 0 cc:1 0000000 rs2:5 +{ + # For v7, the entire simm13 field is present, but masked to 7 bits. + # For v8, [12:7] are reserved. However, a compatibility note for + # the Tcc insn in the v9 manual suggests that the v8 reserved field + # was ignored and did not produce traps. + Tcc_i_v7 10 0 cond:4 111010 rs1:5 1 ------ i:7 + + # For v9, bits [12:11] are cc1 and cc0 (and cc0 must be 0). + # Bits [10:8] are reserved and the OSA2011 manual says they must be 0. + Tcc_i_v9 10 0 cond:4 111010 rs1:5 1 cc:1 0 000 i:8 +} + +MOVcc 10 rd:5 101100 1 cond:4 imm:1 cc:1 0 rs2_or_imm:s11 +MOVfcc 10 rd:5 101100 0 cond:4 imm:1 cc:2 rs2_or_imm:s11 +MOVR 10 rd:5 101111 rs1:5 imm:1 cond:3 rs2_or_imm:s10 + +JMPL 10 ..... 111000 ..... . ............. @r_r_ri +{ + RETT 10 00000 111001 ..... . ............. @n_r_ri + RETURN 10 00000 111001 ..... . ............. @n_r_ri +} +NOP 10 00000 111011 ----- 0 00000000----- # FLUSH reg+reg +NOP 10 00000 111011 ----- 1 ------------- # FLUSH reg+imm +SAVE 10 ..... 111100 ..... . ............. @r_r_ri +RESTORE 10 ..... 111101 ..... . ............. @r_r_ri + +DONE 10 00000 111110 00000 0 0000000000000 +RETRY 10 00001 111110 00000 0 0000000000000 + +FMOVs 10 ..... 110100 00000 0 0000 0001 ..... @r_r2 +FMOVd 10 ..... 110100 00000 0 0000 0010 ..... @r_r2 +FMOVq 10 ..... 110100 00000 0 0000 0011 ..... @r_r2 +FNEGs 10 ..... 110100 00000 0 0000 0101 ..... @r_r2 +FNEGd 10 ..... 110100 00000 0 0000 0110 ..... @r_r2 +FNEGq 10 ..... 110100 00000 0 0000 0111 ..... @r_r2 +FABSs 10 ..... 110100 00000 0 0000 1001 ..... @r_r2 +FABSd 10 ..... 110100 00000 0 0000 1010 ..... @r_r2 +FABSq 10 ..... 110100 00000 0 0000 1011 ..... @r_r2 +FSQRTs 10 ..... 110100 00000 0 0010 1001 ..... @r_r2 +FSQRTd 10 ..... 110100 00000 0 0010 1010 ..... @r_r2 +FSQRTq 10 ..... 110100 00000 0 0010 1011 ..... @r_r2 +FADDs 10 ..... 110100 ..... 0 0100 0001 ..... @r_r_r +FADDd 10 ..... 110100 ..... 0 0100 0010 ..... @r_r_r +FADDq 10 ..... 110100 ..... 0 0100 0011 ..... @r_r_r +FSUBs 10 ..... 110100 ..... 0 0100 0101 ..... @r_r_r +FSUBd 10 ..... 110100 ..... 0 0100 0110 ..... @r_r_r +FSUBq 10 ..... 110100 ..... 0 0100 0111 ..... @r_r_r +FMULs 10 ..... 110100 ..... 0 0100 1001 ..... @r_r_r +FMULd 10 ..... 110100 ..... 0 0100 1010 ..... @r_r_r +FMULq 10 ..... 110100 ..... 0 0100 1011 ..... @r_r_r +FDIVs 10 ..... 110100 ..... 0 0100 1101 ..... @r_r_r +FDIVd 10 ..... 110100 ..... 0 0100 1110 ..... @r_r_r +FDIVq 10 ..... 110100 ..... 0 0100 1111 ..... @r_r_r +FsMULd 10 ..... 110100 ..... 0 0110 1001 ..... @r_r_r +FdMULq 10 ..... 110100 ..... 0 0110 1110 ..... @r_r_r +FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @r_r2 +FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @r_r2 +FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @r_r2 +FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_r2 +FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @r_r2 +FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @r_r2 +FiTOs 10 ..... 110100 00000 0 1100 0100 ..... @r_r2 +FdTOs 10 ..... 110100 00000 0 1100 0110 ..... @r_r2 +FqTOs 10 ..... 110100 00000 0 1100 0111 ..... @r_r2 +FiTOd 10 ..... 110100 00000 0 1100 1000 ..... @r_r2 +FsTOd 10 ..... 110100 00000 0 1100 1001 ..... @r_r2 +FqTOd 10 ..... 110100 00000 0 1100 1011 ..... @r_r2 +FiTOq 10 ..... 110100 00000 0 1100 1100 ..... @r_r2 +FsTOq 10 ..... 110100 00000 0 1100 1101 ..... @r_r2 +FdTOq 10 ..... 110100 00000 0 1100 1110 ..... @r_r2 +FsTOi 10 ..... 110100 00000 0 1101 0001 ..... @r_r2 +FdTOi 10 ..... 110100 00000 0 1101 0010 ..... @r_r2 +FqTOi 10 ..... 110100 00000 0 1101 0011 ..... @r_r2 + +FMOVscc 10 rd:5 110101 0 cond:4 1 cc:1 0 000001 rs2:5 +FMOVdcc 10 rd:5 110101 0 cond:4 1 cc:1 0 000010 rs2:5 +FMOVqcc 10 rd:5 110101 0 cond:4 1 cc:1 0 000011 rs2:5 + +FMOVsfcc 10 rd:5 110101 0 cond:4 0 cc:2 000001 rs2:5 +FMOVdfcc 10 rd:5 110101 0 cond:4 0 cc:2 000010 rs2:5 +FMOVqfcc 10 rd:5 110101 0 cond:4 0 cc:2 000011 rs2:5 + +FMOVRs 10 rd:5 110101 rs1:5 0 cond:3 00101 rs2:5 +FMOVRd 10 rd:5 110101 rs1:5 0 cond:3 00110 rs2:5 +FMOVRq 10 rd:5 110101 rs1:5 0 cond:3 00111 rs2:5 + +FCMPs 10 000 cc:2 110101 rs1:5 0 0101 0001 rs2:5 +FCMPd 10 000 cc:2 110101 rs1:5 0 0101 0010 rs2:5 +FCMPq 10 000 cc:2 110101 rs1:5 0 0101 0011 rs2:5 +FCMPEs 10 000 cc:2 110101 rs1:5 0 0101 0101 rs2:5 +FCMPEd 10 000 cc:2 110101 rs1:5 0 0101 0110 rs2:5 +FCMPEq 10 000 cc:2 110101 rs1:5 0 0101 0111 rs2:5 + +{ + [ + EDGE8cc 10 ..... 110110 ..... 0 0000 0000 ..... @r_r_r + EDGE8N 10 ..... 110110 ..... 0 0000 0001 ..... @r_r_r + EDGE8Lcc 10 ..... 110110 ..... 0 0000 0010 ..... @r_r_r + EDGE8LN 10 ..... 110110 ..... 0 0000 0011 ..... @r_r_r + EDGE16cc 10 ..... 110110 ..... 0 0000 0100 ..... @r_r_r + EDGE16N 10 ..... 110110 ..... 0 0000 0101 ..... @r_r_r + EDGE16Lcc 10 ..... 110110 ..... 0 0000 0110 ..... @r_r_r + EDGE16LN 10 ..... 110110 ..... 0 0000 0111 ..... @r_r_r + EDGE32cc 10 ..... 110110 ..... 0 0000 1000 ..... @r_r_r + EDGE32N 10 ..... 110110 ..... 0 0000 1001 ..... @r_r_r + EDGE32Lcc 10 ..... 110110 ..... 0 0000 1010 ..... @r_r_r + EDGE32LN 10 ..... 110110 ..... 0 0000 1011 ..... @r_r_r + + ARRAY8 10 ..... 110110 ..... 0 0001 0000 ..... @r_r_r + ARRAY16 10 ..... 110110 ..... 0 0001 0010 ..... @r_r_r + ARRAY32 10 ..... 110110 ..... 0 0001 0100 ..... @r_r_r + + ALIGNADDR 10 ..... 110110 ..... 0 0001 1000 ..... @r_r_r + ALIGNADDRL 10 ..... 110110 ..... 0 0001 1010 ..... @r_r_r + + BMASK 10 ..... 110110 ..... 0 0001 1001 ..... @r_r_r + + FPCMPLE16 10 ..... 110110 ..... 0 0010 0000 ..... @r_r_r + FPCMPNE16 10 ..... 110110 ..... 0 0010 0010 ..... @r_r_r + FPCMPGT16 10 ..... 110110 ..... 0 0010 1000 ..... @r_r_r + FPCMPEQ16 10 ..... 110110 ..... 0 0010 1010 ..... @r_r_r + FPCMPLE32 10 ..... 110110 ..... 0 0010 0100 ..... @r_r_r + FPCMPNE32 10 ..... 110110 ..... 0 0010 0110 ..... @r_r_r + FPCMPGT32 10 ..... 110110 ..... 0 0010 1100 ..... @r_r_r + FPCMPEQ32 10 ..... 110110 ..... 0 0010 1110 ..... @r_r_r + + FMUL8x16 10 ..... 110110 ..... 0 0011 0001 ..... @r_r_r + FMUL8x16AU 10 ..... 110110 ..... 0 0011 0011 ..... @r_r_r + FMUL8x16AL 10 ..... 110110 ..... 0 0011 0101 ..... @r_r_r + FMUL8SUx16 10 ..... 110110 ..... 0 0011 0110 ..... @r_r_r + FMUL8ULx16 10 ..... 110110 ..... 0 0011 0111 ..... @r_r_r + FMULD8SUx16 10 ..... 110110 ..... 0 0011 1000 ..... @r_r_r + FMULD8ULx16 10 ..... 110110 ..... 0 0011 1001 ..... @r_r_r + FPACK32 10 ..... 110110 ..... 0 0011 1010 ..... @r_r_r + FPACK16 10 ..... 110110 00000 0 0011 1011 ..... @r_r2 + FPACKFIX 10 ..... 110110 00000 0 0011 1101 ..... @r_r2 + PDIST 10 ..... 110110 ..... 0 0011 1110 ..... @r_r_r + + FALIGNDATAg 10 ..... 110110 ..... 0 0100 1000 ..... @r_r_r + FPMERGE 10 ..... 110110 ..... 0 0100 1011 ..... @r_r_r + BSHUFFLE 10 ..... 110110 ..... 0 0100 1100 ..... @r_r_r + FEXPAND 10 ..... 110110 ..... 0 0100 1101 ..... @r_r_r + + FSRCd 10 ..... 110110 ..... 0 0111 0100 00000 @r_r1 # FSRC1d + FSRCs 10 ..... 110110 ..... 0 0111 0101 00000 @r_r1 # FSRC1s + FSRCd 10 ..... 110110 00000 0 0111 1000 ..... @r_r2 # FSRC2d + FSRCs 10 ..... 110110 00000 0 0111 1001 ..... @r_r2 # FSRC2s + FNOTd 10 ..... 110110 ..... 0 0110 1010 00000 @r_r1 # FNOT1d + FNOTs 10 ..... 110110 ..... 0 0110 1011 00000 @r_r1 # FNOT1s + FNOTd 10 ..... 110110 00000 0 0110 0110 ..... @r_r2 # FNOT2d + FNOTs 10 ..... 110110 00000 0 0110 0111 ..... @r_r2 # FNOT2s + + FPADD16 10 ..... 110110 ..... 0 0101 0000 ..... @r_r_r + FPADD16s 10 ..... 110110 ..... 0 0101 0001 ..... @r_r_r + FPADD32 10 ..... 110110 ..... 0 0101 0010 ..... @r_r_r + FPADD32s 10 ..... 110110 ..... 0 0101 0011 ..... @r_r_r + FPSUB16 10 ..... 110110 ..... 0 0101 0100 ..... @r_r_r + FPSUB16s 10 ..... 110110 ..... 0 0101 0101 ..... @r_r_r + FPSUB32 10 ..... 110110 ..... 0 0101 0110 ..... @r_r_r + FPSUB32s 10 ..... 110110 ..... 0 0101 0111 ..... @r_r_r + + FNORd 10 ..... 110110 ..... 0 0110 0010 ..... @r_r_r + FNORs 10 ..... 110110 ..... 0 0110 0011 ..... @r_r_r + FANDNOTd 10 ..... 110110 ..... 0 0110 0100 ..... @r_r_r # FANDNOT2d + FANDNOTs 10 ..... 110110 ..... 0 0110 0101 ..... @r_r_r # FANDNOT2s + FANDNOTd 10 ..... 110110 ..... 0 0110 1000 ..... @r_r_r_swap # ... 1d + FANDNOTs 10 ..... 110110 ..... 0 0110 1001 ..... @r_r_r_swap # ... 1s + FXORd 10 ..... 110110 ..... 0 0110 1100 ..... @r_r_r + FXORs 10 ..... 110110 ..... 0 0110 1101 ..... @r_r_r + FNANDd 10 ..... 110110 ..... 0 0110 1110 ..... @r_r_r + FNANDs 10 ..... 110110 ..... 0 0110 1111 ..... @r_r_r + FANDd 10 ..... 110110 ..... 0 0111 0000 ..... @r_r_r + FANDs 10 ..... 110110 ..... 0 0111 0001 ..... @r_r_r + FXNORd 10 ..... 110110 ..... 0 0111 0010 ..... @r_r_r + FXNORs 10 ..... 110110 ..... 0 0111 0011 ..... @r_r_r + FORNOTd 10 ..... 110110 ..... 0 0111 0110 ..... @r_r_r # FORNOT2d + FORNOTs 10 ..... 110110 ..... 0 0111 0111 ..... @r_r_r # FORNOT2s + FORNOTd 10 ..... 110110 ..... 0 0111 1010 ..... @r_r_r_swap # ... 1d + FORNOTs 10 ..... 110110 ..... 0 0111 1011 ..... @r_r_r_swap # ... 1s + FORd 10 ..... 110110 ..... 0 0111 1100 ..... @r_r_r + FORs 10 ..... 110110 ..... 0 0111 1101 ..... @r_r_r + + FZEROd 10 rd:5 110110 00000 0 0110 0000 00000 + FZEROs 10 rd:5 110110 00000 0 0110 0001 00000 + FONEd 10 rd:5 110110 00000 0 0111 1110 00000 + FONEs 10 rd:5 110110 00000 0 0111 1111 00000 + ] + NCP 10 ----- 110110 ----- --------- ----- # v8 CPop1 +} + +NCP 10 ----- 110111 ----- --------- ----- # v8 CPop2 + +## +## Major Opcode 11 -- load and store instructions +## + +%dfp_rd 25:5 !function=extract_dfpreg +%qfp_rd 25:5 !function=extract_qfpreg + +&r_r_ri_asi rd rs1 rs2_or_imm asi imm:bool +@r_r_ri_na .. rd:5 ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri_asi asi=-1 +@d_r_ri_na .. ..... ...... rs1:5 imm:1 rs2_or_imm:s13 \ + &r_r_ri_asi rd=%dfp_rd asi=-1 +@q_r_ri_na .. ..... ...... rs1:5 imm:1 rs2_or_imm:s13 \ + &r_r_ri_asi rd=%qfp_rd asi=-1 + +@r_r_r_asi .. rd:5 ...... rs1:5 0 asi:8 rs2_or_imm:5 &r_r_ri_asi imm=0 +@r_r_i_asi .. rd:5 ...... rs1:5 1 rs2_or_imm:s13 \ + &r_r_ri_asi imm=1 asi=-2 +@d_r_r_asi .. ..... ...... rs1:5 0 asi:8 rs2_or_imm:5 \ + &r_r_ri_asi rd=%dfp_rd imm=0 +@d_r_i_asi .. ..... ...... rs1:5 1 rs2_or_imm:s13 \ + &r_r_ri_asi rd=%dfp_rd imm=1 asi=-2 +@q_r_r_asi .. ..... ...... rs1:5 0 asi:8 rs2_or_imm:5 \ + &r_r_ri_asi rd=%qfp_rd imm=0 +@q_r_i_asi .. ..... ...... rs1:5 1 rs2_or_imm:s13 \ + &r_r_ri_asi rd=%qfp_rd imm=1 asi=-2 +@casa_imm .. rd:5 ...... rs1:5 1 00000000 rs2_or_imm:5 \ + &r_r_ri_asi imm=1 asi=-2 + +LDUW 11 ..... 000000 ..... . ............. @r_r_ri_na +LDUB 11 ..... 000001 ..... . ............. @r_r_ri_na +LDUH 11 ..... 000010 ..... . ............. @r_r_ri_na +LDD 11 ..... 000011 ..... . ............. @r_r_ri_na +LDSW 11 ..... 001000 ..... . ............. @r_r_ri_na +LDSB 11 ..... 001001 ..... . ............. @r_r_ri_na +LDSH 11 ..... 001010 ..... . ............. @r_r_ri_na +LDX 11 ..... 001011 ..... . ............. @r_r_ri_na + +STW 11 ..... 000100 ..... . ............. @r_r_ri_na +STB 11 ..... 000101 ..... . ............. @r_r_ri_na +STH 11 ..... 000110 ..... . ............. @r_r_ri_na +STD 11 ..... 000111 ..... . ............. @r_r_ri_na +STX 11 ..... 001110 ..... . ............. @r_r_ri_na + +LDUW 11 ..... 010000 ..... . ............. @r_r_r_asi # LDUWA +LDUW 11 ..... 010000 ..... . ............. @r_r_i_asi # LDUWA +LDUB 11 ..... 010001 ..... . ............. @r_r_r_asi # LDUBA +LDUB 11 ..... 010001 ..... . ............. @r_r_i_asi # LDUBA +LDUH 11 ..... 010010 ..... . ............. @r_r_r_asi # LDUHA +LDUH 11 ..... 010010 ..... . ............. @r_r_i_asi # LDUHA +LDD 11 ..... 010011 ..... . ............. @r_r_r_asi # LDDA +LDD 11 ..... 010011 ..... . ............. @r_r_i_asi # LDDA +LDX 11 ..... 011011 ..... . ............. @r_r_r_asi # LDXA +LDX 11 ..... 011011 ..... . ............. @r_r_i_asi # LDXA +LDSB 11 ..... 011001 ..... . ............. @r_r_r_asi # LDSBA +LDSB 11 ..... 011001 ..... . ............. @r_r_i_asi # LDSBA +LDSH 11 ..... 011010 ..... . ............. @r_r_r_asi # LDSHA +LDSH 11 ..... 011010 ..... . ............. @r_r_i_asi # LDSHA +LDSW 11 ..... 011000 ..... . ............. @r_r_r_asi # LDSWA +LDSW 11 ..... 011000 ..... . ............. @r_r_i_asi # LDSWA + +STW 11 ..... 010100 ..... . ............. @r_r_r_asi # STWA +STW 11 ..... 010100 ..... . ............. @r_r_i_asi # STWA +STB 11 ..... 010101 ..... . ............. @r_r_r_asi # STBA +STB 11 ..... 010101 ..... . ............. @r_r_i_asi # STBA +STH 11 ..... 010110 ..... . ............. @r_r_r_asi # STHA +STH 11 ..... 010110 ..... . ............. @r_r_i_asi # STHA +STD 11 ..... 010111 ..... . ............. @r_r_r_asi # STDA +STD 11 ..... 010111 ..... . ............. @r_r_i_asi # STDA +STX 11 ..... 011110 ..... . ............. @r_r_r_asi # STXA +STX 11 ..... 011110 ..... . ............. @r_r_i_asi # STXA + +LDF 11 ..... 100000 ..... . ............. @r_r_ri_na +LDFSR 11 00000 100001 ..... . ............. @n_r_ri +LDXFSR 11 00001 100001 ..... . ............. @n_r_ri +LDQF 11 ..... 100010 ..... . ............. @q_r_ri_na +LDDF 11 ..... 100011 ..... . ............. @d_r_ri_na + +STF 11 ..... 100100 ..... . ............. @r_r_ri_na +STFSR 11 00000 100101 ..... . ............. @n_r_ri +STXFSR 11 00001 100101 ..... . ............. @n_r_ri +{ + STQF 11 ..... 100110 ..... . ............. @q_r_ri_na + STDFQ 11 ----- 100110 ----- - ------------- +} +STDF 11 ..... 100111 ..... . ............. @d_r_ri_na + +LDSTUB 11 ..... 001101 ..... . ............. @r_r_ri_na +LDSTUB 11 ..... 011101 ..... . ............. @r_r_r_asi # LDSTUBA +LDSTUB 11 ..... 011101 ..... . ............. @r_r_i_asi # LDSTUBA + +SWAP 11 ..... 001111 ..... . ............. @r_r_ri_na +SWAP 11 ..... 011111 ..... . ............. @r_r_r_asi # SWAPA +SWAP 11 ..... 011111 ..... . ............. @r_r_i_asi # SWAPA + +CASA 11 ..... 111100 ..... . ............. @r_r_r_asi +CASA 11 ..... 111100 ..... . ............. @casa_imm +CASXA 11 ..... 111110 ..... . ............. @r_r_r_asi +CASXA 11 ..... 111110 ..... . ............. @casa_imm + +NOP_v9 11 ----- 101101 ----- 0 00000000 ----- # PREFETCH +NOP_v9 11 ----- 101101 ----- 1 ------------- # PREFETCH +NOP_v9 11 ----- 111101 ----- - ------------- # PREFETCHA + +{ + [ + LDFA 11 ..... 110000 ..... . ............. @r_r_r_asi + LDFA 11 ..... 110000 ..... . ............. @r_r_i_asi + ] + NCP 11 ----- 110000 ----- --------- ----- # v8 LDC +} +NCP 11 ----- 110001 ----- --------- ----- # v8 LDCSR +LDQFA 11 ..... 110010 ..... . ............. @q_r_r_asi +LDQFA 11 ..... 110010 ..... . ............. @q_r_i_asi +{ + [ + LDDFA 11 ..... 110011 ..... . ............. @d_r_r_asi + LDDFA 11 ..... 110011 ..... . ............. @d_r_i_asi + ] + NCP 11 ----- 110011 ----- --------- ----- # v8 LDDC +} + +{ + [ + STFA 11 ..... 110100 ..... . ............. @r_r_r_asi + STFA 11 ..... 110100 ..... . ............. @r_r_i_asi + ] + NCP 11 ----- 110100 ----- --------- ----- # v8 STC +} +NCP 11 ----- 110101 ----- --------- ----- # v8 STCSR +{ + [ + STQFA 11 ..... 110110 ..... . ............. @q_r_r_asi + STQFA 11 ..... 110110 ..... . ............. @q_r_i_asi + ] + NCP 11 ----- 110110 ----- --------- ----- # v8 STDCQ +} +{ + [ + STDFA 11 ..... 110111 ..... . ............. @d_r_r_asi + STDFA 11 ..... 110111 ..... . ............. @d_r_i_asi + ] + NCP 11 ----- 110111 ----- --------- ----- # v8 STDC +} diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c index 78b03308ae..09066d5487 100644 --- a/target/sparc/ldst_helper.c +++ b/target/sparc/ldst_helper.c @@ -360,6 +360,7 @@ static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra) #endif /* !CONFIG_USER_ONLY */ #endif +#if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY) static void do_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align, uintptr_t ra) { @@ -367,11 +368,7 @@ static void do_check_align(CPUSPARCState *env, target_ulong addr, cpu_raise_exception_ra(env, TT_UNALIGNED, ra); } } - -void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align) -{ - do_check_align(env, addr, align, GETPC()); -} +#endif #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \ defined(DEBUG_MXCC) @@ -1653,7 +1650,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2); env->dmmu.sun4v_tsb_pointers[idx] = val; } else { - helper_raise_exception(env, TT_ILL_INSN); + goto illegal_insn; } break; case 0x33: @@ -1665,7 +1662,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, */ env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val; } else { - helper_raise_exception(env, TT_ILL_INSN); + goto illegal_insn; } break; case 0x35: @@ -1682,7 +1679,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2); env->immu.sun4v_tsb_pointers[idx] = val; } else { - helper_raise_exception(env, TT_ILL_INSN); + goto illegal_insn; } break; case 0x37: @@ -1694,7 +1691,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, */ env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val; } else { - helper_raise_exception(env, TT_ILL_INSN); + goto illegal_insn; } break; case ASI_UPA_CONFIG: /* UPA config */ @@ -1923,6 +1920,8 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, default: sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); return; + illegal_insn: + cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } } #endif /* CONFIG_USER_ONLY */ diff --git a/target/sparc/meson.build b/target/sparc/meson.build index 48025cce76..c316773db6 100644 --- a/target/sparc/meson.build +++ b/target/sparc/meson.build @@ -1,4 +1,7 @@ +gen = decodetree.process('insns.decode') + sparc_ss = ss.source_set() +sparc_ss.add(gen) sparc_ss.add(files( 'cc_helper.c', 'cpu.c', diff --git a/target/sparc/translate.c b/target/sparc/translate.c index f92ff80ac8..986a88c4e1 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -25,9 +25,8 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" - +#include "tcg/tcg-op-gvec.h" #include "exec/helper-gen.h" - #include "exec/translator.h" #include "exec/log.h" #include "asi.h" @@ -36,6 +35,65 @@ #include "exec/helper-info.c.inc" #undef HELPER_H +#ifdef TARGET_SPARC64 +# define gen_helper_rdpsr(D, E) qemu_build_not_reached() +# define gen_helper_rett(E) qemu_build_not_reached() +# define gen_helper_power_down(E) qemu_build_not_reached() +# define gen_helper_wrpsr(E, S) qemu_build_not_reached() +#else +# define gen_helper_clear_softint(E, S) qemu_build_not_reached() +# define gen_helper_done(E) qemu_build_not_reached() +# define gen_helper_fabsd(D, S) qemu_build_not_reached() +# define gen_helper_flushw(E) qemu_build_not_reached() +# define gen_helper_fnegd(D, S) qemu_build_not_reached() +# define gen_helper_rdccr(D, E) qemu_build_not_reached() +# define gen_helper_rdcwp(D, E) qemu_build_not_reached() +# define gen_helper_restored(E) qemu_build_not_reached() +# define gen_helper_retry(E) qemu_build_not_reached() +# define gen_helper_saved(E) qemu_build_not_reached() +# define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached() +# define gen_helper_set_softint(E, S) qemu_build_not_reached() +# define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached() +# define gen_helper_tick_set_count(P, S) qemu_build_not_reached() +# define gen_helper_tick_set_limit(P, S) qemu_build_not_reached() +# define gen_helper_udivx(D, E, A, B) qemu_build_not_reached() +# define gen_helper_wrccr(E, S) qemu_build_not_reached() +# define gen_helper_wrcwp(E, S) qemu_build_not_reached() +# define gen_helper_wrgl(E, S) qemu_build_not_reached() +# define gen_helper_write_softint(E, S) qemu_build_not_reached() +# define gen_helper_wrpil(E, S) qemu_build_not_reached() +# define gen_helper_wrpstate(E, S) qemu_build_not_reached() +# define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fstox ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; }) +# define gen_helper_pdist ({ qemu_build_not_reached(); NULL; }) +# define FSR_LDXFSR_MASK 0 +# define FSR_LDXFSR_OLDMASK 0 +# define MAXTL_MASK 0 +#endif + /* Dynamic PC, must exit to main loop. */ #define DYNAMIC_PC 1 /* Dynamic PC, one of two values according to jump_pc[T2]. */ @@ -53,21 +111,36 @@ static TCGv_i32 cpu_psr; static TCGv cpu_fsr, cpu_pc, cpu_npc; static TCGv cpu_regs[32]; static TCGv cpu_y; -#ifndef CONFIG_USER_ONLY static TCGv cpu_tbr; -#endif static TCGv cpu_cond; #ifdef TARGET_SPARC64 static TCGv_i32 cpu_xcc, cpu_fprs; static TCGv cpu_gsr; -static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr; -static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver; #else -static TCGv cpu_wim; +# define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; }) +# define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; }) #endif /* Floating point registers */ static TCGv_i64 cpu_fpr[TARGET_DPREGS]; +#define env_field_offsetof(X) offsetof(CPUSPARCState, X) +#ifdef TARGET_SPARC64 +# define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; }) +# define env64_field_offsetof(X) env_field_offsetof(X) +#else +# define env32_field_offsetof(X) env_field_offsetof(X) +# define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; }) +#endif + +typedef struct DisasDelayException { + struct DisasDelayException *next; + TCGLabel *lab; + TCGv_i32 excp; + /* Saved state at parent insn. */ + target_ulong pc; + target_ulong npc; +} DisasDelayException; + typedef struct DisasContext { DisasContextBase base; target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ @@ -89,6 +162,7 @@ typedef struct DisasContext { int fprs_dirty; int asi; #endif + DisasDelayException *delay_excp_list; } DisasContext; typedef struct { @@ -119,12 +193,6 @@ typedef struct { #define UA2005_HTRAP_MASK 0xff #define V8_TRAP_MASK 0x7f -static int sign_extend(int x, int len) -{ - len = 32 - len; - return (x << len) >> len; -} - #define IS_IMM (insn & (1<<13)) static void gen_update_fprs_dirty(DisasContext *dc, int rd) @@ -209,69 +277,40 @@ static void gen_op_store_QT0_fpr(unsigned int dst) offsetof(CPU_QuadU, ll.lower)); } -static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, - TCGv_i64 v1, TCGv_i64 v2) -{ - dst = QFPREG(dst); - - tcg_gen_mov_i64(cpu_fpr[dst / 2], v1); - tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2); - gen_update_fprs_dirty(dc, dst); -} - -#ifdef TARGET_SPARC64 -static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src) -{ - src = QFPREG(src); - return cpu_fpr[src / 2]; -} - -static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src) -{ - src = QFPREG(src); - return cpu_fpr[src / 2 + 1]; -} - -static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) -{ - rd = QFPREG(rd); - rs = QFPREG(rs); - - tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]); - tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]); - gen_update_fprs_dirty(dc, rd); -} -#endif - /* moves */ #ifdef CONFIG_USER_ONLY #define supervisor(dc) 0 -#ifdef TARGET_SPARC64 #define hypervisor(dc) 0 -#endif #else #ifdef TARGET_SPARC64 #define hypervisor(dc) (dc->hypervisor) #define supervisor(dc) (dc->supervisor | dc->hypervisor) #else #define supervisor(dc) (dc->supervisor) +#define hypervisor(dc) 0 #endif #endif -#ifdef TARGET_SPARC64 -#ifndef TARGET_ABI32 -#define AM_CHECK(dc) ((dc)->address_mask_32bit) +#if !defined(TARGET_SPARC64) +# define AM_CHECK(dc) false +#elif defined(TARGET_ABI32) +# define AM_CHECK(dc) true +#elif defined(CONFIG_USER_ONLY) +# define AM_CHECK(dc) false #else -#define AM_CHECK(dc) (1) -#endif +# define AM_CHECK(dc) ((dc)->address_mask_32bit) #endif static void gen_address_mask(DisasContext *dc, TCGv addr) { -#ifdef TARGET_SPARC64 - if (AM_CHECK(dc)) + if (AM_CHECK(dc)) { tcg_gen_andi_tl(addr, addr, 0xffffffffULL); -#endif + } +} + +static target_ulong address_mask_i(DisasContext *dc, target_ulong addr) +{ + return AM_CHECK(dc) ? (uint32_t)addr : addr; } static TCGv gen_load_gpr(DisasContext *dc, int reg) @@ -402,71 +441,89 @@ static TCGv_i32 gen_sub32_carry32(void) return carry_32; } -static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, - TCGv src2, int update_cc) +static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2, + TCGv_i32 carry_32, bool update_cc) { - TCGv_i32 carry_32; - TCGv carry; + tcg_gen_add_tl(dst, src1, src2); - switch (dc->cc_op) { - case CC_OP_DIV: - case CC_OP_LOGIC: - /* Carry is known to be zero. Fall back to plain ADD. */ - if (update_cc) { - gen_op_add_cc(dst, src1, src2); - } else { - tcg_gen_add_tl(dst, src1, src2); - } - return; +#ifdef TARGET_SPARC64 + TCGv carry = tcg_temp_new(); + tcg_gen_extu_i32_tl(carry, carry_32); + tcg_gen_add_tl(dst, dst, carry); +#else + tcg_gen_add_i32(dst, dst, carry_32); +#endif - case CC_OP_ADD: - case CC_OP_TADD: - case CC_OP_TADDTV: - if (TARGET_LONG_BITS == 32) { - /* We can re-use the host's hardware carry generation by using - an ADD2 opcode. We discard the low part of the output. - Ideally we'd combine this operation with the add that - generated the carry in the first place. */ - carry = tcg_temp_new(); - tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - goto add_done; - } - carry_32 = gen_add32_carry32(); - break; + if (update_cc) { + tcg_debug_assert(dst == cpu_cc_dst); + tcg_gen_mov_tl(cpu_cc_src, src1); + tcg_gen_mov_tl(cpu_cc_src2, src2); + } +} - case CC_OP_SUB: - case CC_OP_TSUB: - case CC_OP_TSUBTV: - carry_32 = gen_sub32_carry32(); - break; +static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc) +{ + TCGv discard; - default: - /* We need external help to produce the carry. */ - carry_32 = tcg_temp_new_i32(); - gen_helper_compute_C_icc(carry_32, tcg_env); - break; + if (TARGET_LONG_BITS == 64) { + gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc); + return; } -#if TARGET_LONG_BITS == 64 - carry = tcg_temp_new(); - tcg_gen_extu_i32_i64(carry, carry_32); -#else - carry = carry_32; -#endif - - tcg_gen_add_tl(dst, src1, src2); - tcg_gen_add_tl(dst, dst, carry); + /* + * We can re-use the host's hardware carry generation by using + * an ADD2 opcode. We discard the low part of the output. + * Ideally we'd combine this operation with the add that + * generated the carry in the first place. + */ + discard = tcg_temp_new(); + tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - add_done: if (update_cc) { + tcg_debug_assert(dst == cpu_cc_dst); tcg_gen_mov_tl(cpu_cc_src, src1); tcg_gen_mov_tl(cpu_cc_src2, src2); - tcg_gen_mov_tl(cpu_cc_dst, dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX); - dc->cc_op = CC_OP_ADDX; } } +static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int_add(dst, src1, src2, false); +} + +static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int_add(dst, src1, src2, true); +} + +static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false); +} + +static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true); +} + +static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2, + bool update_cc) +{ + TCGv_i32 carry_32 = tcg_temp_new_i32(); + gen_helper_compute_C_icc(carry_32, tcg_env); + gen_op_addc_int(dst, src1, src2, carry_32, update_cc); +} + +static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int_generic(dst, src1, src2, false); +} + +static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_addc_int_generic(dst, src1, src2, true); +} + static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2) { tcg_gen_mov_tl(cpu_cc_src, src1); @@ -475,51 +532,11 @@ static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2) tcg_gen_mov_tl(dst, cpu_cc_dst); } -static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, - TCGv src2, int update_cc) +static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2, + TCGv_i32 carry_32, bool update_cc) { - TCGv_i32 carry_32; TCGv carry; - switch (dc->cc_op) { - case CC_OP_DIV: - case CC_OP_LOGIC: - /* Carry is known to be zero. Fall back to plain SUB. */ - if (update_cc) { - gen_op_sub_cc(dst, src1, src2); - } else { - tcg_gen_sub_tl(dst, src1, src2); - } - return; - - case CC_OP_ADD: - case CC_OP_TADD: - case CC_OP_TADDTV: - carry_32 = gen_add32_carry32(); - break; - - case CC_OP_SUB: - case CC_OP_TSUB: - case CC_OP_TSUBTV: - if (TARGET_LONG_BITS == 32) { - /* We can re-use the host's hardware carry generation by using - a SUB2 opcode. We discard the low part of the output. - Ideally we'd combine this operation with the add that - generated the carry in the first place. */ - carry = tcg_temp_new(); - tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - goto sub_done; - } - carry_32 = gen_sub32_carry32(); - break; - - default: - /* We need external help to produce the carry. */ - carry_32 = tcg_temp_new_i32(); - gen_helper_compute_C_icc(carry_32, tcg_env); - break; - } - #if TARGET_LONG_BITS == 64 carry = tcg_temp_new(); tcg_gen_extu_i32_i64(carry, carry_32); @@ -530,16 +547,75 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, tcg_gen_sub_tl(dst, src1, src2); tcg_gen_sub_tl(dst, dst, carry); - sub_done: if (update_cc) { + tcg_debug_assert(dst == cpu_cc_dst); tcg_gen_mov_tl(cpu_cc_src, src1); tcg_gen_mov_tl(cpu_cc_src2, src2); - tcg_gen_mov_tl(cpu_cc_dst, dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX); - dc->cc_op = CC_OP_SUBX; } } +static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false); +} + +static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true); +} + +static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc) +{ + TCGv discard; + + if (TARGET_LONG_BITS == 64) { + gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc); + return; + } + + /* + * We can re-use the host's hardware carry generation by using + * a SUB2 opcode. We discard the low part of the output. + */ + discard = tcg_temp_new(); + tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2); + + if (update_cc) { + tcg_debug_assert(dst == cpu_cc_dst); + tcg_gen_mov_tl(cpu_cc_src, src1); + tcg_gen_mov_tl(cpu_cc_src2, src2); + } +} + +static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int_sub(dst, src1, src2, false); +} + +static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int_sub(dst, src1, src2, true); +} + +static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2, + bool update_cc) +{ + TCGv_i32 carry_32 = tcg_temp_new_i32(); + + gen_helper_compute_C_icc(carry_32, tcg_env); + gen_op_subc_int(dst, src1, src2, carry_32, update_cc); +} + +static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int_generic(dst, src1, src2, false); +} + +static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2) +{ + gen_op_subc_int_generic(dst, src1, src2, true); +} + static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) { TCGv r_temp, zero, t0; @@ -616,6 +692,133 @@ static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2) gen_op_multiply(dst, src1, src2, 1); } +static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_udivx(dst, tcg_env, src1, src2); +} + +static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_sdivx(dst, tcg_env, src1, src2); +} + +static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_udiv(dst, tcg_env, src1, src2); +} + +static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_sdiv(dst, tcg_env, src1, src2); +} + +static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_udiv_cc(dst, tcg_env, src1, src2); +} + +static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_sdiv_cc(dst, tcg_env, src1, src2); +} + +static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_taddcctv(dst, tcg_env, src1, src2); +} + +static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_tsubcctv(dst, tcg_env, src1, src2); +} + +static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2) +{ + tcg_gen_ctpop_tl(dst, src2); +} + +#ifndef TARGET_SPARC64 +static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2) +{ + g_assert_not_reached(); +} +#endif + +static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_array8(dst, src1, src2); + tcg_gen_shli_tl(dst, dst, 1); +} + +static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_array8(dst, src1, src2); + tcg_gen_shli_tl(dst, dst, 2); +} + +static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src) +{ +#ifdef TARGET_SPARC64 + gen_helper_fpack16(dst, cpu_gsr, src); +#else + g_assert_not_reached(); +#endif +} + +static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src) +{ +#ifdef TARGET_SPARC64 + gen_helper_fpackfix(dst, cpu_gsr, src); +#else + g_assert_not_reached(); +#endif +} + +static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) +{ +#ifdef TARGET_SPARC64 + gen_helper_fpack32(dst, cpu_gsr, src1, src2); +#else + g_assert_not_reached(); +#endif +} + +static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2) +{ +#ifdef TARGET_SPARC64 + TCGv t1, t2, shift; + + t1 = tcg_temp_new(); + t2 = tcg_temp_new(); + shift = tcg_temp_new(); + + tcg_gen_andi_tl(shift, cpu_gsr, 7); + tcg_gen_shli_tl(shift, shift, 3); + tcg_gen_shl_tl(t1, s1, shift); + + /* + * A shift of 64 does not produce 0 in TCG. Divide this into a + * shift of (up to 63) followed by a constant shift of 1. + */ + tcg_gen_xori_tl(shift, shift, 63); + tcg_gen_shr_tl(t2, s2, shift); + tcg_gen_shri_tl(t2, t2, 1); + + tcg_gen_or_tl(dst, t1, t2); +#else + g_assert_not_reached(); +#endif +} + +static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) +{ +#ifdef TARGET_SPARC64 + gen_helper_bshuffle(dst, cpu_gsr, src1, src2); +#else + g_assert_not_reached(); +#endif +} + // 1 static void gen_op_eval_ba(TCGv dst) { @@ -884,47 +1087,6 @@ static void gen_branch2(DisasContext *dc, target_ulong pc1, gen_goto_tb(dc, 1, pc2, pc2 + 4); } -static void gen_branch_a(DisasContext *dc, target_ulong pc1) -{ - TCGLabel *l1 = gen_new_label(); - target_ulong npc = dc->npc; - - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1); - - gen_goto_tb(dc, 0, npc, pc1); - - gen_set_label(l1); - gen_goto_tb(dc, 1, npc + 4, npc + 8); - - dc->base.is_jmp = DISAS_NORETURN; -} - -static void gen_branch_n(DisasContext *dc, target_ulong pc1) -{ - target_ulong npc = dc->npc; - - if (npc & 3) { - switch (npc) { - case DYNAMIC_PC: - case DYNAMIC_PC_LOOKUP: - tcg_gen_mov_tl(cpu_pc, cpu_npc); - tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); - tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, - cpu_cond, tcg_constant_tl(0), - tcg_constant_tl(pc1), cpu_npc); - dc->pc = npc; - break; - default: - g_assert_not_reached(); - } - } else { - dc->pc = npc; - dc->jump_pc[0] = pc1; - dc->jump_pc[1] = npc + 4; - dc->npc = JUMP_PC; - } -} - static void gen_generic_branch(DisasContext *dc) { TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]); @@ -984,9 +1146,38 @@ static void gen_exception(DisasContext *dc, int which) dc->base.is_jmp = DISAS_NORETURN; } -static void gen_check_align(TCGv addr, int mask) +static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp) { - gen_helper_check_align(tcg_env, addr, tcg_constant_i32(mask)); + DisasDelayException *e = g_new0(DisasDelayException, 1); + + e->next = dc->delay_excp_list; + dc->delay_excp_list = e; + + e->lab = gen_new_label(); + e->excp = excp; + e->pc = dc->pc; + /* Caller must have used flush_cond before branch. */ + assert(e->npc != JUMP_PC); + e->npc = dc->npc; + + return e->lab; +} + +static TCGLabel *delay_exception(DisasContext *dc, int excp) +{ + return delay_exceptionv(dc, tcg_constant_i32(excp)); +} + +static void gen_check_align(DisasContext *dc, TCGv addr, int mask) +{ + TCGv t = tcg_temp_new(); + TCGLabel *lab; + + tcg_gen_andi_tl(t, addr, mask); + + flush_cond(dc); + lab = delay_exception(dc, TT_UNALIGNED); + tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab); } static void gen_mov_pc_npc(DisasContext *dc) @@ -1264,41 +1455,13 @@ static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond) } } -static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond, - DisasContext *dc) -{ - DisasCompare cmp; - gen_compare(&cmp, cc, cond, dc); - - /* The interface is to return a boolean in r_dst. */ - if (cmp.is_bool) { - tcg_gen_mov_tl(r_dst, cmp.c1); - } else { - tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); - } -} - -static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond) -{ - DisasCompare cmp; - gen_fcompare(&cmp, cc, cond); - - /* The interface is to return a boolean in r_dst. */ - if (cmp.is_bool) { - tcg_gen_mov_tl(r_dst, cmp.c1); - } else { - tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); - } -} - -#ifdef TARGET_SPARC64 // Inverted logic -static const int gen_tcg_cond_reg[8] = { - -1, +static const TCGCond gen_tcg_cond_reg[8] = { + TCG_COND_NEVER, /* reserved */ TCG_COND_NE, TCG_COND_GT, TCG_COND_GE, - -1, + TCG_COND_NEVER, /* reserved */ TCG_COND_EQ, TCG_COND_LE, TCG_COND_LT, @@ -1312,115 +1475,48 @@ static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) cmp->c2 = tcg_constant_tl(0); } -static void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src) +static void gen_op_clear_ieee_excp_and_FTT(void) { - DisasCompare cmp; - gen_compare_reg(&cmp, cond, r_src); - - /* The interface is to return a boolean in r_dst. */ - tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); + tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); } -#endif -static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) +static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src) { - unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); - target_ulong target = dc->pc + offset; + gen_op_clear_ieee_excp_and_FTT(); + tcg_gen_mov_i32(dst, src); +} -#ifdef TARGET_SPARC64 - if (unlikely(AM_CHECK(dc))) { - target &= 0xffffffffULL; - } -#endif - if (cond == 0x0) { - /* unconditional not taken */ - if (a) { - dc->pc = dc->npc + 4; - dc->npc = dc->pc + 4; - } else { - dc->pc = dc->npc; - dc->npc = dc->pc + 4; - } - } else if (cond == 0x8) { - /* unconditional taken */ - if (a) { - dc->pc = target; - dc->npc = dc->pc + 4; - } else { - dc->pc = dc->npc; - dc->npc = target; - tcg_gen_mov_tl(cpu_pc, cpu_npc); - } - } else { - flush_cond(dc); - gen_cond(cpu_cond, cc, cond, dc); - if (a) { - gen_branch_a(dc, target); - } else { - gen_branch_n(dc, target); - } - } +static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src) +{ + gen_op_clear_ieee_excp_and_FTT(); + gen_helper_fnegs(dst, src); } -static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) +static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src) { - unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); - target_ulong target = dc->pc + offset; + gen_op_clear_ieee_excp_and_FTT(); + gen_helper_fabss(dst, src); +} -#ifdef TARGET_SPARC64 - if (unlikely(AM_CHECK(dc))) { - target &= 0xffffffffULL; - } -#endif - if (cond == 0x0) { - /* unconditional not taken */ - if (a) { - dc->pc = dc->npc + 4; - dc->npc = dc->pc + 4; - } else { - dc->pc = dc->npc; - dc->npc = dc->pc + 4; - } - } else if (cond == 0x8) { - /* unconditional taken */ - if (a) { - dc->pc = target; - dc->npc = dc->pc + 4; - } else { - dc->pc = dc->npc; - dc->npc = target; - tcg_gen_mov_tl(cpu_pc, cpu_npc); - } - } else { - flush_cond(dc); - gen_fcond(cpu_cond, cc, cond); - if (a) { - gen_branch_a(dc, target); - } else { - gen_branch_n(dc, target); - } - } +static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src) +{ + gen_op_clear_ieee_excp_and_FTT(); + tcg_gen_mov_i64(dst, src); } -#ifdef TARGET_SPARC64 -static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn, - TCGv r_reg) +static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src) { - unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29)); - target_ulong target = dc->pc + offset; + gen_op_clear_ieee_excp_and_FTT(); + gen_helper_fnegd(dst, src); +} - if (unlikely(AM_CHECK(dc))) { - target &= 0xffffffffULL; - } - flush_cond(dc); - gen_cond_reg(cpu_cond, cond, r_reg); - if (a) { - gen_branch_a(dc, target); - } else { - gen_branch_n(dc, target); - } +static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src) +{ + gen_op_clear_ieee_excp_and_FTT(); + gen_helper_fabsd(dst, src); } +#ifdef TARGET_SPARC64 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) { switch (fccno) { @@ -1580,343 +1676,7 @@ static int gen_trap_ifnofpu(DisasContext *dc) return 0; } -static void gen_op_clear_ieee_excp_and_FTT(void) -{ - tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); -} - -static void gen_fop_FF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32)) -{ - TCGv_i32 dst, src; - - src = gen_load_fpr_F(dc, rs); - dst = gen_dest_fpr_F(dc); - - gen(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_F(dc, rd, dst); -} - -static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i32, TCGv_i32)) -{ - TCGv_i32 dst, src; - - src = gen_load_fpr_F(dc, rs); - dst = gen_dest_fpr_F(dc); - - gen(dst, src); - - gen_store_fpr_F(dc, rd, dst); -} - -static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) -{ - TCGv_i32 dst, src1, src2; - - src1 = gen_load_fpr_F(dc, rs1); - src2 = gen_load_fpr_F(dc, rs2); - dst = gen_dest_fpr_F(dc); - - gen(dst, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_F(dc, rd, dst); -} - -#ifdef TARGET_SPARC64 -static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) -{ - TCGv_i32 dst, src1, src2; - - src1 = gen_load_fpr_F(dc, rs1); - src2 = gen_load_fpr_F(dc, rs2); - dst = gen_dest_fpr_F(dc); - - gen(dst, src1, src2); - - gen_store_fpr_F(dc, rd, dst); -} -#endif - -static void gen_fop_DD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64)) -{ - TCGv_i64 dst, src; - - src = gen_load_fpr_D(dc, rs); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} - -#ifdef TARGET_SPARC64 -static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src; - - src = gen_load_fpr_D(dc, rs); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, src); - - gen_store_fpr_D(dc, rd, dst); -} -#endif - -static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} - -#ifdef TARGET_SPARC64 -static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, src1, src2); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, cpu_gsr, src1, src2); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 dst, src0, src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - src0 = gen_load_fpr_D(dc, rd); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, src0, src1, src2); - - gen_store_fpr_D(dc, rd, dst); -} -#endif - -static void gen_fop_QQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_ptr)) -{ - gen_op_load_fpr_QT1(QFPREG(rs)); - - gen(tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -#ifdef TARGET_SPARC64 -static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_ptr)) -{ - gen_op_load_fpr_QT1(QFPREG(rs)); - - gen(tcg_env); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} -#endif - -static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_ptr)) -{ - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - - gen(tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) -{ - TCGv_i64 dst; - TCGv_i32 src1, src2; - - src1 = gen_load_fpr_F(dc, rs1); - src2 = gen_load_fpr_F(dc, rs2); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 src1, src2; - - src1 = gen_load_fpr_D(dc, rs1); - src2 = gen_load_fpr_D(dc, rs2); - - gen(tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -#ifdef TARGET_SPARC64 -static void gen_fop_DF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) -{ - TCGv_i64 dst; - TCGv_i32 src; - - src = gen_load_fpr_F(dc, rs); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} -#endif - -static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) -{ - TCGv_i64 dst; - TCGv_i32 src; - - src = gen_load_fpr_F(dc, rs); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env, src); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_fop_FD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64)) -{ - TCGv_i32 dst; - TCGv_i64 src; - - src = gen_load_fpr_D(dc, rs); - dst = gen_dest_fpr_F(dc); - - gen(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_F(dc, rd, dst); -} - -static void gen_fop_FQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i32, TCGv_ptr)) -{ - TCGv_i32 dst; - - gen_op_load_fpr_QT1(QFPREG(rs)); - dst = gen_dest_fpr_F(dc); - - gen(dst, tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_F(dc, rd, dst); -} - -static void gen_fop_DQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_i64, TCGv_ptr)) -{ - TCGv_i64 dst; - - gen_op_load_fpr_QT1(QFPREG(rs)); - dst = gen_dest_fpr_D(dc, rd); - - gen(dst, tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - - gen_store_fpr_D(dc, rd, dst); -} - -static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_ptr, TCGv_i32)) -{ - TCGv_i32 src; - - src = gen_load_fpr_F(dc, rs); - - gen(tcg_env, src); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGv_ptr, TCGv_i64)) -{ - TCGv_i64 src; - - src = gen_load_fpr_D(dc, rs); - - gen(tcg_env, src); - - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); -} - -static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, - TCGv addr, int mmu_idx, MemOp memop) -{ - gen_address_mask(dc, addr); - tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN); -} - -static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) -{ - TCGv m1 = tcg_constant_tl(0xff); - gen_address_mask(dc, addr); - tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB); -} - /* asi moves */ -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) typedef enum { GET_ASI_HELPER, GET_ASI_EXCP, @@ -1935,15 +1695,25 @@ typedef struct { MemOp memop; } DisasASI; -static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) +/* + * Build DisasASI. + * For asi == -1, treat as non-asi. + * For ask == -2, treat as immediate offset (v8 error, v9 %asi). + */ +static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop) { - int asi = GET_FIELD(insn, 19, 26); ASIType type = GET_ASI_HELPER; int mem_idx = dc->mem_idx; + if (asi == -1) { + /* Artificial "non-asi" case. */ + type = GET_ASI_DIRECT; + goto done; + } + #ifndef TARGET_SPARC64 /* Before v9, all asis are immediate and privileged. */ - if (IS_IMM) { + if (asi < 0) { gen_exception(dc, TT_ILL_INSN); type = GET_ASI_EXCP; } else if (supervisor(dc) @@ -1986,7 +1756,7 @@ static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) type = GET_ASI_EXCP; } #else - if (IS_IMM) { + if (asi < 0) { asi = dc->asi; } /* With v9, all asis below 0x80 are privileged. */ @@ -2145,28 +1915,39 @@ static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) } #endif + done: return (DisasASI){ type, asi, mem_idx, memop }; } -static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, - int insn, MemOp memop) +#if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) +static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a, + TCGv_i32 asi, TCGv_i32 mop) +{ + g_assert_not_reached(); +} + +static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r, + TCGv_i32 asi, TCGv_i32 mop) { - DisasASI da = get_asi(dc, insn, memop); + g_assert_not_reached(); +} +#endif - switch (da.type) { +static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) +{ + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DTWINX: /* Reserved for ldda. */ gen_exception(dc, TT_ILL_INSN); break; case GET_ASI_DIRECT: - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN); break; default: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN); save_state(dc); #ifdef TARGET_SPARC64 @@ -2183,34 +1964,30 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, } } -static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, - int insn, MemOp memop) +static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr) { - DisasASI da = get_asi(dc, insn, memop); - - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; + case GET_ASI_DTWINX: /* Reserved for stda. */ -#ifndef TARGET_SPARC64 - gen_exception(dc, TT_ILL_INSN); - break; -#else - if (!(dc->def->features & CPU_FEATURE_HYPV)) { + if (TARGET_LONG_BITS == 32) { + gen_exception(dc, TT_ILL_INSN); + break; + } else if (!(dc->def->features & CPU_FEATURE_HYPV)) { /* Pre OpenSPARC CPUs don't have these */ gen_exception(dc, TT_ILL_INSN); - return; + break; } - /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions - * are ST_BLKINIT_ ASIs */ -#endif + /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */ /* fall through */ + case GET_ASI_DIRECT: - gen_address_mask(dc, addr); - tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN); break; -#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) + case GET_ASI_BCOPY: + assert(TARGET_LONG_BITS == 32); /* Copy 32 bytes from the address in SRC to ADDR. */ /* ??? The original qemu code suggests 4-byte alignment, dropping the low bits, but the only place I can see this used is in the @@ -2228,18 +2005,18 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, for (i = 0; i < 32; i += 4) { /* Since the loads and stores are paired, allow the copy to happen in the host endianness. */ - tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL); - tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL); + tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL); + tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL); tcg_gen_add_tl(saddr, saddr, four); tcg_gen_add_tl(daddr, daddr, four); } } break; -#endif + default: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN); save_state(dc); #ifdef TARGET_SPARC64 @@ -2259,16 +2036,15 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, } } -static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, - TCGv addr, int insn) +static void gen_swap_asi(DisasContext *dc, DisasASI *da, + TCGv dst, TCGv src, TCGv addr) { - DisasASI da = get_asi(dc, insn, MO_TEUL); - - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_swap(dc, dst, src, addr, da.mem_idx, da.memop); + tcg_gen_atomic_xchg_tl(dst, addr, src, + da->mem_idx, da->memop | MO_ALIGN); break; default: /* ??? Should be DAE_invalid_asi. */ @@ -2277,20 +2053,15 @@ static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, } } -static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, - int insn, int rd) +static void gen_cas_asi(DisasContext *dc, DisasASI *da, + TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr) { - DisasASI da = get_asi(dc, insn, MO_TEUL); - TCGv oldv; - - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: return; case GET_ASI_DIRECT: - oldv = tcg_temp_new(); - tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), - da.mem_idx, da.memop | MO_ALIGN); - gen_store_gpr(dc, rd, oldv); + tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv, + da->mem_idx, da->memop | MO_ALIGN); break; default: /* ??? Should be DAE_invalid_asi. */ @@ -2299,15 +2070,14 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, } } -static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) +static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) { - DisasASI da = get_asi(dc, insn, MO_UB); - - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_ldstub(dc, dst, addr, da.mem_idx); + tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff), + da->mem_idx, MO_UB); break; default: /* ??? In theory, this should be raise DAE_invalid_asi. @@ -2315,7 +2085,7 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) if (tb_cflags(dc->base.tb) & CF_PARALLEL) { gen_helper_exit_atomic(tcg_env); } else { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); TCGv_i32 r_mop = tcg_constant_i32(MO_UB); TCGv_i64 s64, t64; @@ -2334,38 +2104,44 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) break; } } -#endif -#ifdef TARGET_SPARC64 -static void gen_ldf_asi(DisasContext *dc, TCGv addr, - int insn, int size, int rd) +static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, + TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); + MemOp memop = da->memop; + MemOp size = memop & MO_SIZE; TCGv_i32 d32; TCGv_i64 d64; + TCGv addr_tmp; + + /* TODO: Use 128-bit load/store below. */ + if (size == MO_128) { + memop = (memop & ~MO_SIZE) | MO_64; + } - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_address_mask(dc, addr); + memop |= MO_ALIGN_4; switch (size) { - case 4: + case MO_32: d32 = gen_dest_fpr_F(dc); - tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop); gen_store_fpr_F(dc, rd, d32); break; - case 8: - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + + case MO_64: + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop); break; - case 16: + + case MO_128: d64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4); - tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop); + addr_tmp = tcg_temp_new(); + tcg_gen_addi_tl(addr_tmp, addr, 8); + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); break; default: @@ -2375,24 +2151,17 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, case GET_ASI_BLOCK: /* Valid for lddfa on aligned registers only. */ - if (size == 8 && (rd & 7) == 0) { - MemOp memop; - TCGv eight; - int i; - - gen_address_mask(dc, addr); - + if (orig_size == MO_64 && (rd & 7) == 0) { /* The first operation checks required alignment. */ - memop = da.memop | MO_ALIGN_64; - eight = tcg_constant_tl(8); - for (i = 0; ; ++i) { - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, - da.mem_idx, memop); + addr_tmp = tcg_temp_new(); + for (int i = 0; ; ++i) { + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, + memop | (i == 0 ? MO_ALIGN_64 : 0)); if (i == 7) { break; } - tcg_gen_add_tl(addr, addr, eight); - memop = da.memop; + tcg_gen_addi_tl(addr_tmp, addr, 8); + addr = addr_tmp; } } else { gen_exception(dc, TT_ILL_INSN); @@ -2401,10 +2170,9 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, case GET_ASI_SHORT: /* Valid for lddfa only. */ - if (size == 8) { - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN); + if (orig_size == MO_64) { + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2412,8 +2180,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, default: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); save_state(dc); /* According to the table in the UA2011 manual, the only @@ -2421,21 +2189,24 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, the NO_FAULT asis. We still need a helper for these, but we can just use the integer asi helper for them. */ switch (size) { - case 4: + case MO_32: d64 = tcg_temp_new_i64(); gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); d32 = gen_dest_fpr_F(dc); tcg_gen_extrl_i64_i32(d32, d64); gen_store_fpr_F(dc, rd, d32); break; - case 8: - gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop); + case MO_64: + gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, + r_asi, r_mop); break; - case 16: + case MO_128: d64 = tcg_temp_new_i64(); gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); - tcg_gen_addi_tl(addr, addr, 8); - gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop); + addr_tmp = tcg_temp_new(); + tcg_gen_addi_tl(addr_tmp, addr, 8); + gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp, + r_asi, r_mop); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); break; default: @@ -2446,37 +2217,45 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, } } -static void gen_stf_asi(DisasContext *dc, TCGv addr, - int insn, int size, int rd) +static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, + TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); + MemOp memop = da->memop; + MemOp size = memop & MO_SIZE; TCGv_i32 d32; + TCGv addr_tmp; + + /* TODO: Use 128-bit load/store below. */ + if (size == MO_128) { + memop = (memop & ~MO_SIZE) | MO_64; + } - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_address_mask(dc, addr); + memop |= MO_ALIGN_4; switch (size) { - case 4: + case MO_32: d32 = gen_load_fpr_F(dc, rd); - tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN); break; - case 8: - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + case MO_64: + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN_4); break; - case 16: + case MO_128: /* Only 4-byte alignment required. However, it is legal for the cpu to signal the alignment fault, and the OS trap handler is required to fix it up. Requiring 16-byte alignment here avoids having to probe the second page before performing the first write. */ - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_16); - tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN_16); + addr_tmp = tcg_temp_new(); + tcg_gen_addi_tl(addr_tmp, addr, 8); + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop); break; default: g_assert_not_reached(); @@ -2485,24 +2264,17 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, case GET_ASI_BLOCK: /* Valid for stdfa on aligned registers only. */ - if (size == 8 && (rd & 7) == 0) { - MemOp memop; - TCGv eight; - int i; - - gen_address_mask(dc, addr); - + if (orig_size == MO_64 && (rd & 7) == 0) { /* The first operation checks required alignment. */ - memop = da.memop | MO_ALIGN_64; - eight = tcg_constant_tl(8); - for (i = 0; ; ++i) { - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, - da.mem_idx, memop); + addr_tmp = tcg_temp_new(); + for (int i = 0; ; ++i) { + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, + memop | (i == 0 ? MO_ALIGN_64 : 0)); if (i == 7) { break; } - tcg_gen_add_tl(addr, addr, eight); - memop = da.memop; + tcg_gen_addi_tl(addr_tmp, addr, 8); + addr = addr_tmp; } } else { gen_exception(dc, TT_ILL_INSN); @@ -2511,10 +2283,9 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, case GET_ASI_SHORT: /* Valid for stdfa only. */ - if (size == 8) { - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN); + if (orig_size == MO_64) { + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2529,37 +2300,51 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, } } -static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) +static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, MO_TEUQ); - TCGv_i64 hi = gen_dest_gpr(dc, rd); - TCGv_i64 lo = gen_dest_gpr(dc, rd + 1); + TCGv hi = gen_dest_gpr(dc, rd); + TCGv lo = gen_dest_gpr(dc, rd + 1); - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: return; case GET_ASI_DTWINX: - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); - tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop); +#ifdef TARGET_SPARC64 + { + MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16; + TCGv_i128 t = tcg_temp_new_i128(); + + tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop); + /* + * Note that LE twinx acts as if each 64-bit register result is + * byte swapped. We perform one 128-bit LE load, so must swap + * the order of the writebacks. + */ + if ((mop & MO_BSWAP) == MO_TE) { + tcg_gen_extr_i128_i64(lo, hi, t); + } else { + tcg_gen_extr_i128_i64(hi, lo, t); + } + } break; +#else + g_assert_not_reached(); +#endif case GET_ASI_DIRECT: { TCGv_i64 tmp = tcg_temp_new_i64(); - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN); /* Note that LE ldda acts as if each 32-bit register result is byte swapped. Having just performed one 64-bit bswap, we need now to swap the writebacks. */ - if ((da.memop & MO_BSWAP) == MO_TE) { - tcg_gen_extr32_i64(lo, hi, tmp); + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_extr_i64_tl(lo, hi, tmp); } else { - tcg_gen_extr32_i64(hi, lo, tmp); + tcg_gen_extr_i64_tl(hi, lo, tmp); } } break; @@ -2570,18 +2355,18 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) real hardware allows others. This can be seen with e.g. FreeBSD 10.3 wrt ASI_IC_TAG. */ { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(da.memop); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(da->memop); TCGv_i64 tmp = tcg_temp_new_i64(); save_state(dc); gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop); /* See above. */ - if ((da.memop & MO_BSWAP) == MO_TE) { - tcg_gen_extr32_i64(lo, hi, tmp); + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_extr_i64_tl(lo, hi, tmp); } else { - tcg_gen_extr32_i64(hi, lo, tmp); + tcg_gen_extr_i64_tl(hi, lo, tmp); } } break; @@ -2591,157 +2376,90 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) gen_store_gpr(dc, rd + 1, lo); } -static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, - int insn, int rd) +static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, MO_TEUQ); + TCGv hi = gen_load_gpr(dc, rd); TCGv lo = gen_load_gpr(dc, rd + 1); - switch (da.type) { + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DTWINX: - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); - tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop); - break; - - case GET_ASI_DIRECT: +#ifdef TARGET_SPARC64 { - TCGv_i64 t64 = tcg_temp_new_i64(); - - /* Note that LE stda acts as if each 32-bit register result is - byte swapped. We will perform one 64-bit LE store, so now - we must swap the order of the construction. */ - if ((da.memop & MO_BSWAP) == MO_TE) { - tcg_gen_concat32_i64(t64, lo, hi); + MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16; + TCGv_i128 t = tcg_temp_new_i128(); + + /* + * Note that LE twinx acts as if each 64-bit register result is + * byte swapped. We perform one 128-bit LE store, so must swap + * the order of the construction. + */ + if ((mop & MO_BSWAP) == MO_TE) { + tcg_gen_concat_i64_i128(t, lo, hi); } else { - tcg_gen_concat32_i64(t64, hi, lo); + tcg_gen_concat_i64_i128(t, hi, lo); } - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop); } break; +#else + g_assert_not_reached(); +#endif - default: - /* ??? In theory we've handled all of the ASIs that are valid - for stda, and this should raise DAE_invalid_asi. */ + case GET_ASI_DIRECT: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(da.memop); TCGv_i64 t64 = tcg_temp_new_i64(); - /* See above. */ - if ((da.memop & MO_BSWAP) == MO_TE) { - tcg_gen_concat32_i64(t64, lo, hi); + /* Note that LE stda acts as if each 32-bit register result is + byte swapped. We will perform one 64-bit LE store, so now + we must swap the order of the construction. */ + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_concat_tl_i64(t64, lo, hi); } else { - tcg_gen_concat32_i64(t64, hi, lo); + tcg_gen_concat_tl_i64(t64, hi, lo); } - - save_state(dc); - gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); + tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN); } break; - } -} -static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, - int insn, int rd) -{ - DisasASI da = get_asi(dc, insn, MO_TEUQ); - TCGv oldv; - - switch (da.type) { - case GET_ASI_EXCP: - return; - case GET_ASI_DIRECT: - oldv = tcg_temp_new(); - tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), - da.mem_idx, da.memop | MO_ALIGN); - gen_store_gpr(dc, rd, oldv); - break; - default: - /* ??? Should be DAE_invalid_asi. */ - gen_exception(dc, TT_DATA_ACCESS); - break; - } -} - -#elif !defined(CONFIG_USER_ONLY) -static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) -{ - /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12, - whereby "rd + 1" elicits "error: array subscript is above array". - Since we have already asserted that rd is even, the semantics - are unchanged. */ - TCGv lo = gen_dest_gpr(dc, rd | 1); - TCGv hi = gen_dest_gpr(dc, rd); - TCGv_i64 t64 = tcg_temp_new_i64(); - DisasASI da = get_asi(dc, insn, MO_TEUQ); - - switch (da.type) { - case GET_ASI_EXCP: - return; - case GET_ASI_DIRECT: - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); - break; - default: - { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); - - save_state(dc); - gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); - } - break; - } - - tcg_gen_extr_i64_i32(lo, hi, t64); - gen_store_gpr(dc, rd | 1, lo); - gen_store_gpr(dc, rd, hi); -} - -static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, - int insn, int rd) -{ - DisasASI da = get_asi(dc, insn, MO_TEUQ); - TCGv lo = gen_load_gpr(dc, rd + 1); - TCGv_i64 t64 = tcg_temp_new_i64(); - - tcg_gen_concat_tl_i64(t64, lo, hi); - - switch (da.type) { - case GET_ASI_EXCP: - break; - case GET_ASI_DIRECT: - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); - break; case GET_ASI_BFILL: + assert(TARGET_LONG_BITS == 32); /* Store 32 bytes of T64 to ADDR. */ /* ??? The original qemu code suggests 8-byte alignment, dropping the low bits, but the only place I can see this used is in the Linux kernel with 32 byte alignment, which would make more sense as a cacheline-style operation. */ { + TCGv_i64 t64 = tcg_temp_new_i64(); TCGv d_addr = tcg_temp_new(); TCGv eight = tcg_constant_tl(8); int i; + tcg_gen_concat_tl_i64(t64, lo, hi); tcg_gen_andi_tl(d_addr, addr, -8); for (i = 0; i < 32; i += 8) { - tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop); tcg_gen_add_tl(d_addr, d_addr, eight); } } break; + default: + /* ??? In theory we've handled all of the ASIs that are valid + for stda, and this should raise DAE_invalid_asi. */ { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(da->memop); + TCGv_i64 t64 = tcg_temp_new_i64(); + + /* See above. */ + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_concat_tl_i64(t64, lo, hi); + } else { + tcg_gen_concat_tl_i64(t64, hi, lo); + } save_state(dc); gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); @@ -2749,30 +2467,10 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, break; } } -#endif -static TCGv get_src1(DisasContext *dc, unsigned int insn) -{ - unsigned int rs1 = GET_FIELD(insn, 13, 17); - return gen_load_gpr(dc, rs1); -} - -static TCGv get_src2(DisasContext *dc, unsigned int insn) -{ - if (IS_IMM) { /* immediate */ - target_long simm = GET_FIELDs(insn, 19, 31); - TCGv t = tcg_temp_new(); - tcg_gen_movi_tl(t, simm); - return t; - } else { /* register */ - unsigned int rs2 = GET_FIELD(insn, 27, 31); - return gen_load_gpr(dc, rs2); - } -} - -#ifdef TARGET_SPARC64 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { +#ifdef TARGET_SPARC64 TCGv_i32 c32, zero, dst, s1, s2; /* We have two choices here: extend the 32 bit data and use movcond_i64, @@ -2795,19 +2493,27 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2); gen_store_fpr_F(dc, rd, dst); +#else + qemu_build_not_reached(); +#endif } static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { +#ifdef TARGET_SPARC64 TCGv_i64 dst = gen_dest_fpr_D(dc, rd); tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2, gen_load_fpr_D(dc, rs), gen_load_fpr_D(dc, rd)); gen_store_fpr_D(dc, rd, dst); +#else + qemu_build_not_reached(); +#endif } static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { +#ifdef TARGET_SPARC64 int qd = QFPREG(rd); int qs = QFPREG(rs); @@ -2817,10 +2523,13 @@ static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]); gen_update_fprs_dirty(dc, qd); +#else + qemu_build_not_reached(); +#endif } -#ifndef CONFIG_USER_ONLY -static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env tcg_env) +#ifdef TARGET_SPARC64 +static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr) { TCGv_i32 r_tl = tcg_temp_new_i32(); @@ -2843,13 +2552,1359 @@ static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env tcg_env) } #endif -static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, +static int extract_dfpreg(DisasContext *dc, int x) +{ + return DFPREG(x); +} + +static int extract_qfpreg(DisasContext *dc, int x) +{ + return QFPREG(x); +} + +/* Include the auto-generated decoder. */ +#include "decode-insns.c.inc" + +#define TRANS(NAME, AVAIL, FUNC, ...) \ + static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \ + { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); } + +#define avail_ALL(C) true +#ifdef TARGET_SPARC64 +# define avail_32(C) false +# define avail_ASR17(C) false +# define avail_CASA(C) true +# define avail_DIV(C) true +# define avail_MUL(C) true +# define avail_POWERDOWN(C) false +# define avail_64(C) true +# define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL) +# define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV) +# define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1) +# define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2) +#else +# define avail_32(C) true +# define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17) +# define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA) +# define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV) +# define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL) +# define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN) +# define avail_64(C) false +# define avail_GL(C) false +# define avail_HYPV(C) false +# define avail_VIS1(C) false +# define avail_VIS2(C) false +#endif + +/* Default case for non jump instructions. */ +static bool advance_pc(DisasContext *dc) +{ + if (dc->npc & 3) { + switch (dc->npc) { + case DYNAMIC_PC: + case DYNAMIC_PC_LOOKUP: + dc->pc = dc->npc; + gen_op_next_insn(); + break; + case JUMP_PC: + /* we can do a static jump */ + gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond); + dc->base.is_jmp = DISAS_NORETURN; + break; + default: + g_assert_not_reached(); + } + } else { + dc->pc = dc->npc; + dc->npc = dc->npc + 4; + } + return true; +} + +/* + * Major opcodes 00 and 01 -- branches, call, and sethi + */ + +static bool advance_jump_uncond_never(DisasContext *dc, bool annul) +{ + if (annul) { + dc->pc = dc->npc + 4; + dc->npc = dc->pc + 4; + } else { + dc->pc = dc->npc; + dc->npc = dc->pc + 4; + } + return true; +} + +static bool advance_jump_uncond_always(DisasContext *dc, bool annul, + target_ulong dest) +{ + if (annul) { + dc->pc = dest; + dc->npc = dest + 4; + } else { + dc->pc = dc->npc; + dc->npc = dest; + tcg_gen_mov_tl(cpu_pc, cpu_npc); + } + return true; +} + +static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp, + bool annul, target_ulong dest) +{ + target_ulong npc = dc->npc; + + if (annul) { + TCGLabel *l1 = gen_new_label(); + + tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1); + gen_goto_tb(dc, 0, npc, dest); + gen_set_label(l1); + gen_goto_tb(dc, 1, npc + 4, npc + 8); + + dc->base.is_jmp = DISAS_NORETURN; + } else { + if (npc & 3) { + switch (npc) { + case DYNAMIC_PC: + case DYNAMIC_PC_LOOKUP: + tcg_gen_mov_tl(cpu_pc, cpu_npc); + tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); + tcg_gen_movcond_tl(cmp->cond, cpu_npc, + cmp->c1, cmp->c2, + tcg_constant_tl(dest), cpu_npc); + dc->pc = npc; + break; + default: + g_assert_not_reached(); + } + } else { + dc->pc = npc; + dc->jump_pc[0] = dest; + dc->jump_pc[1] = npc + 4; + dc->npc = JUMP_PC; + if (cmp->is_bool) { + tcg_gen_mov_tl(cpu_cond, cmp->c1); + } else { + tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2); + } + } + } + return true; +} + +static bool raise_priv(DisasContext *dc) +{ + gen_exception(dc, TT_PRIV_INSN); + return true; +} + +static bool raise_unimpfpop(DisasContext *dc) +{ + gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); + return true; +} + +static bool gen_trap_float128(DisasContext *dc) +{ + if (dc->def->features & CPU_FEATURE_FLOAT128) { + return false; + } + return raise_unimpfpop(dc); +} + +static bool do_bpcc(DisasContext *dc, arg_bcc *a) +{ + target_long target = address_mask_i(dc, dc->pc + a->i * 4); + DisasCompare cmp; + + switch (a->cond) { + case 0x0: + return advance_jump_uncond_never(dc, a->a); + case 0x8: + return advance_jump_uncond_always(dc, a->a, target); + default: + flush_cond(dc); + + gen_compare(&cmp, a->cc, a->cond, dc); + return advance_jump_cond(dc, &cmp, a->a, target); + } +} + +TRANS(Bicc, ALL, do_bpcc, a) +TRANS(BPcc, 64, do_bpcc, a) + +static bool do_fbpfcc(DisasContext *dc, arg_bcc *a) +{ + target_long target = address_mask_i(dc, dc->pc + a->i * 4); + DisasCompare cmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + switch (a->cond) { + case 0x0: + return advance_jump_uncond_never(dc, a->a); + case 0x8: + return advance_jump_uncond_always(dc, a->a, target); + default: + flush_cond(dc); + + gen_fcompare(&cmp, a->cc, a->cond); + return advance_jump_cond(dc, &cmp, a->a, target); + } +} + +TRANS(FBPfcc, 64, do_fbpfcc, a) +TRANS(FBfcc, ALL, do_fbpfcc, a) + +static bool trans_BPr(DisasContext *dc, arg_BPr *a) +{ + target_long target = address_mask_i(dc, dc->pc + a->i * 4); + DisasCompare cmp; + + if (!avail_64(dc)) { + return false; + } + if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) { + return false; + } + + flush_cond(dc); + gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); + return advance_jump_cond(dc, &cmp, a->a, target); +} + +static bool trans_CALL(DisasContext *dc, arg_CALL *a) +{ + target_long target = address_mask_i(dc, dc->pc + a->i * 4); + + gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc)); + gen_mov_pc_npc(dc); + dc->npc = target; + return true; +} + +static bool trans_NCP(DisasContext *dc, arg_NCP *a) +{ + /* + * For sparc32, always generate the no-coprocessor exception. + * For sparc64, always generate illegal instruction. + */ +#ifdef TARGET_SPARC64 + return false; +#else + gen_exception(dc, TT_NCP_INSN); + return true; +#endif +} + +static bool trans_SETHI(DisasContext *dc, arg_SETHI *a) +{ + /* Special-case %g0 because that's the canonical nop. */ + if (a->rd) { + gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10)); + } + return advance_pc(dc); +} + +/* + * Major Opcode 10 -- integer, floating-point, vis, and system insns. + */ + +static bool do_tcc(DisasContext *dc, int cond, int cc, + int rs1, bool imm, int rs2_or_imm) +{ + int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) + ? UA2005_HTRAP_MASK : V8_TRAP_MASK); + DisasCompare cmp; + TCGLabel *lab; + TCGv_i32 trap; + + /* Trap never. */ + if (cond == 0) { + return advance_pc(dc); + } + + /* + * Immediate traps are the most common case. Since this value is + * live across the branch, it really pays to evaluate the constant. + */ + if (rs1 == 0 && (imm || rs2_or_imm == 0)) { + trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP); + } else { + trap = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1)); + if (imm) { + tcg_gen_addi_i32(trap, trap, rs2_or_imm); + } else { + TCGv_i32 t2 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm)); + tcg_gen_add_i32(trap, trap, t2); + } + tcg_gen_andi_i32(trap, trap, mask); + tcg_gen_addi_i32(trap, trap, TT_TRAP); + } + + /* Trap always. */ + if (cond == 8) { + save_state(dc); + gen_helper_raise_exception(tcg_env, trap); + dc->base.is_jmp = DISAS_NORETURN; + return true; + } + + /* Conditional trap. */ + flush_cond(dc); + lab = delay_exceptionv(dc, trap); + gen_compare(&cmp, cc, cond, dc); + tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab); + + return advance_pc(dc); +} + +static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a) +{ + if (avail_32(dc) && a->cc) { + return false; + } + return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2); +} + +static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a) +{ + if (avail_64(dc)) { + return false; + } + return do_tcc(dc, a->cond, 0, a->rs1, true, a->i); +} + +static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a) +{ + if (avail_32(dc)) { + return false; + } + return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i); +} + +static bool trans_STBAR(DisasContext *dc, arg_STBAR *a) +{ + tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); + return advance_pc(dc); +} + +static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a) +{ + if (avail_32(dc)) { + return false; + } + if (a->mmask) { + /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */ + tcg_gen_mb(a->mmask | TCG_BAR_SC); + } + if (a->cmask) { + /* For #Sync, etc, end the TB to recognize interrupts. */ + dc->base.is_jmp = DISAS_EXIT; + } + return advance_pc(dc); +} + +static bool do_rd_special(DisasContext *dc, bool priv, int rd, + TCGv (*func)(DisasContext *, TCGv)) +{ + if (!priv) { + return raise_priv(dc); + } + gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd))); + return advance_pc(dc); +} + +static TCGv do_rdy(DisasContext *dc, TCGv dst) +{ + return cpu_y; +} + +static bool trans_RDY(DisasContext *dc, arg_RDY *a) +{ + /* + * TODO: Need a feature bit for sparcv8. In the meantime, treat all + * 32-bit cpus like sparcv7, which ignores the rs1 field. + * This matches after all other ASR, so Leon3 Asr17 is handled first. + */ + if (avail_64(dc) && a->rs1 != 0) { + return false; + } + return do_rd_special(dc, true, a->rd, do_rdy); +} + +static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst) +{ + uint32_t val; + + /* + * TODO: There are many more fields to be filled, + * some of which are writable. + */ + val = dc->def->nwindows - 1; /* [4:0] NWIN */ + val |= 1 << 8; /* [8] V8 */ + + return tcg_constant_tl(val); +} + +TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config) + +static TCGv do_rdccr(DisasContext *dc, TCGv dst) +{ + update_psr(dc); + gen_helper_rdccr(dst, tcg_env); + return dst; +} + +TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr) + +static TCGv do_rdasi(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + return tcg_constant_tl(dc->asi); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi) + +static TCGv do_rdtick(DisasContext *dc, TCGv dst) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); + if (translator_io_start(&dc->base)) { + dc->base.is_jmp = DISAS_EXIT; + } + gen_helper_tick_get_count(dst, tcg_env, r_tickptr, + tcg_constant_i32(dc->mem_idx)); + return dst; +} + +/* TODO: non-priv access only allowed when enabled. */ +TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick) + +static TCGv do_rdpc(DisasContext *dc, TCGv dst) +{ + return tcg_constant_tl(address_mask_i(dc, dc->pc)); +} + +TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc) + +static TCGv do_rdfprs(DisasContext *dc, TCGv dst) +{ + tcg_gen_ext_i32_tl(dst, cpu_fprs); + return dst; +} + +TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs) + +static TCGv do_rdgsr(DisasContext *dc, TCGv dst) +{ + gen_trap_ifnofpu(dc); + return cpu_gsr; +} + +TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr) + +static TCGv do_rdsoftint(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint)); + return dst; +} + +TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint) + +static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr)); + return dst; +} + +/* TODO: non-priv access only allowed when enabled. */ +TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr) + +static TCGv do_rdstick(DisasContext *dc, TCGv dst) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick)); + if (translator_io_start(&dc->base)) { + dc->base.is_jmp = DISAS_EXIT; + } + gen_helper_tick_get_count(dst, tcg_env, r_tickptr, + tcg_constant_i32(dc->mem_idx)); + return dst; +} + +/* TODO: non-priv access only allowed when enabled. */ +TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick) + +static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr)); + return dst; +} + +/* TODO: supervisor access only allowed when enabled by hypervisor. */ +TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr) + +/* + * UltraSPARC-T1 Strand status. + * HYPV check maybe not enough, UA2005 & UA2007 describe + * this ASR as impl. dep + */ +static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst) +{ + return tcg_constant_tl(1); +} + +TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status) + +static TCGv do_rdpsr(DisasContext *dc, TCGv dst) +{ + update_psr(dc); + gen_helper_rdpsr(dst, tcg_env); + return dst; +} + +TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr) + +static TCGv do_rdhpstate(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate)); + return dst; +} + +TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate) + +static TCGv do_rdhtstate(DisasContext *dc, TCGv dst) +{ + TCGv_i32 tl = tcg_temp_new_i32(); + TCGv_ptr tp = tcg_temp_new_ptr(); + + tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl)); + tcg_gen_andi_i32(tl, tl, MAXTL_MASK); + tcg_gen_shli_i32(tl, tl, 3); + tcg_gen_ext_i32_ptr(tp, tl); + tcg_gen_add_ptr(tp, tp, tcg_env); + + tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate)); + return dst; +} + +TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate) + +static TCGv do_rdhintp(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp)); + return dst; +} + +TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp) + +static TCGv do_rdhtba(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba)); + return dst; +} + +TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba) + +static TCGv do_rdhver(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver)); + return dst; +} + +TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver) + +static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr)); + return dst; +} + +TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd, + do_rdhstick_cmpr) + +static TCGv do_rdwim(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim)); + return dst; +} + +TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim) + +static TCGv do_rdtpc(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc)); + return dst; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc) + +static TCGv do_rdtnpc(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc)); + return dst; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc) + +static TCGv do_rdtstate(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate)); + return dst; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate) + +static TCGv do_rdtt(DisasContext *dc, TCGv dst) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt)); + return dst; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt) +TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick) + +static TCGv do_rdtba(DisasContext *dc, TCGv dst) +{ + return cpu_tbr; +} + +TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba) +TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba) + +static TCGv do_rdpstate(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate)); + return dst; +} + +TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate) + +static TCGv do_rdtl(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl)); + return dst; +} + +TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl) + +static TCGv do_rdpil(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil)); + return dst; +} + +TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil) + +static TCGv do_rdcwp(DisasContext *dc, TCGv dst) +{ + gen_helper_rdcwp(dst, tcg_env); + return dst; +} + +TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp) + +static TCGv do_rdcansave(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave)); + return dst; +} + +TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave) + +static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore)); + return dst; +} + +TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd, + do_rdcanrestore) + +static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin)); + return dst; +} + +TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin) + +static TCGv do_rdotherwin(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin)); + return dst; +} + +TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin) + +static TCGv do_rdwstate(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate)); + return dst; +} + +TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate) + +static TCGv do_rdgl(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl)); + return dst; +} + +TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl) + +/* UA2005 strand status */ +static TCGv do_rdssr(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr)); + return dst; +} + +TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr) + +static TCGv do_rdver(DisasContext *dc, TCGv dst) +{ + tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version)); + return dst; +} + +TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver) + +static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a) +{ + if (avail_64(dc)) { + gen_helper_flushw(tcg_env); + return advance_pc(dc); + } + return false; +} + +static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv, + void (*func)(DisasContext *, TCGv)) +{ + TCGv src; + + /* For simplicity, we under-decoded the rs2 form. */ + if (!a->imm && (a->rs2_or_imm & ~0x1f)) { + return false; + } + if (!priv) { + return raise_priv(dc); + } + + if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) { + src = tcg_constant_tl(a->rs2_or_imm); + } else { + TCGv src1 = gen_load_gpr(dc, a->rs1); + if (a->rs2_or_imm == 0) { + src = src1; + } else { + src = tcg_temp_new(); + if (a->imm) { + tcg_gen_xori_tl(src, src1, a->rs2_or_imm); + } else { + tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm)); + } + } + } + func(dc, src); + return advance_pc(dc); +} + +static void do_wry(DisasContext *dc, TCGv src) +{ + tcg_gen_ext32u_tl(cpu_y, src); +} + +TRANS(WRY, ALL, do_wr_special, a, true, do_wry) + +static void do_wrccr(DisasContext *dc, TCGv src) +{ + gen_helper_wrccr(tcg_env, src); +} + +TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr) + +static void do_wrasi(DisasContext *dc, TCGv src) +{ + TCGv tmp = tcg_temp_new(); + + tcg_gen_ext8u_tl(tmp, src); + tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi)); + /* End TB to notice changed ASI. */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi) + +static void do_wrfprs(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + tcg_gen_trunc_tl_i32(cpu_fprs, src); + dc->fprs_dirty = 0; + dc->base.is_jmp = DISAS_EXIT; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs) + +static void do_wrgsr(DisasContext *dc, TCGv src) +{ + gen_trap_ifnofpu(dc); + tcg_gen_mov_tl(cpu_gsr, src); +} + +TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr) + +static void do_wrsoftint_set(DisasContext *dc, TCGv src) +{ + gen_helper_set_softint(tcg_env, src); +} + +TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set) + +static void do_wrsoftint_clr(DisasContext *dc, TCGv src) +{ + gen_helper_clear_softint(tcg_env, src); +} + +TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr) + +static void do_wrsoftint(DisasContext *dc, TCGv src) +{ + gen_helper_write_softint(tcg_env, src); +} + +TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint) + +static void do_wrtick_cmpr(DisasContext *dc, TCGv src) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr)); + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); + translator_io_start(&dc->base); + gen_helper_tick_set_limit(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr) + +static void do_wrstick(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick)); + translator_io_start(&dc->base); + gen_helper_tick_set_count(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick) + +static void do_wrstick_cmpr(DisasContext *dc, TCGv src) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr)); + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick)); + translator_io_start(&dc->base); + gen_helper_tick_set_limit(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr) + +static void do_wrpowerdown(DisasContext *dc, TCGv src) +{ + save_state(dc); + gen_helper_power_down(tcg_env); +} + +TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown) + +static void do_wrpsr(DisasContext *dc, TCGv src) +{ + gen_helper_wrpsr(tcg_env, src); + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); + dc->cc_op = CC_OP_FLAGS; + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr) + +static void do_wrwim(DisasContext *dc, TCGv src) +{ + target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows); + TCGv tmp = tcg_temp_new(); + + tcg_gen_andi_tl(tmp, src, mask); + tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim)); +} + +TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim) + +static void do_wrtpc(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc)); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc) + +static void do_wrtnpc(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc)); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc) + +static void do_wrtstate(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate)); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate) + +static void do_wrtt(DisasContext *dc, TCGv src) +{ +#ifdef TARGET_SPARC64 + TCGv_ptr r_tsptr = tcg_temp_new_ptr(); + + gen_load_trap_state_at_tl(r_tsptr); + tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt)); +#else + qemu_build_not_reached(); +#endif +} + +TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt) + +static void do_wrtick(DisasContext *dc, TCGv src) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); + translator_io_start(&dc->base); + gen_helper_tick_set_count(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick) + +static void do_wrtba(DisasContext *dc, TCGv src) +{ + tcg_gen_mov_tl(cpu_tbr, src); +} + +TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba) + +static void do_wrpstate(DisasContext *dc, TCGv src) +{ + save_state(dc); + if (translator_io_start(&dc->base)) { + dc->base.is_jmp = DISAS_EXIT; + } + gen_helper_wrpstate(tcg_env, src); + dc->npc = DYNAMIC_PC; +} + +TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate) + +static void do_wrtl(DisasContext *dc, TCGv src) +{ + save_state(dc); + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl)); + dc->npc = DYNAMIC_PC; +} + +TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl) + +static void do_wrpil(DisasContext *dc, TCGv src) +{ + if (translator_io_start(&dc->base)) { + dc->base.is_jmp = DISAS_EXIT; + } + gen_helper_wrpil(tcg_env, src); +} + +TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil) + +static void do_wrcwp(DisasContext *dc, TCGv src) +{ + gen_helper_wrcwp(tcg_env, src); +} + +TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp) + +static void do_wrcansave(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave)); +} + +TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave) + +static void do_wrcanrestore(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore)); +} + +TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore) + +static void do_wrcleanwin(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin)); +} + +TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin) + +static void do_wrotherwin(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin)); +} + +TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin) + +static void do_wrwstate(DisasContext *dc, TCGv src) +{ + tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate)); +} + +TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate) + +static void do_wrgl(DisasContext *dc, TCGv src) +{ + gen_helper_wrgl(tcg_env, src); +} + +TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl) + +/* UA2005 strand status */ +static void do_wrssr(DisasContext *dc, TCGv src) +{ + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr)); +} + +TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr) + +TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba) + +static void do_wrhpstate(DisasContext *dc, TCGv src) +{ + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate)); + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate) + +static void do_wrhtstate(DisasContext *dc, TCGv src) +{ + TCGv_i32 tl = tcg_temp_new_i32(); + TCGv_ptr tp = tcg_temp_new_ptr(); + + tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl)); + tcg_gen_andi_i32(tl, tl, MAXTL_MASK); + tcg_gen_shli_i32(tl, tl, 3); + tcg_gen_ext_i32_ptr(tp, tl); + tcg_gen_add_ptr(tp, tp, tcg_env); + + tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate)); +} + +TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate) + +static void do_wrhintp(DisasContext *dc, TCGv src) +{ + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp)); +} + +TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp) + +static void do_wrhtba(DisasContext *dc, TCGv src) +{ + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba)); +} + +TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba) + +static void do_wrhstick_cmpr(DisasContext *dc, TCGv src) +{ + TCGv_ptr r_tickptr = tcg_temp_new_ptr(); + + tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr)); + tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick)); + translator_io_start(&dc->base); + gen_helper_tick_set_limit(r_tickptr, src); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; +} + +TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc), + do_wrhstick_cmpr) + +static bool do_saved_restored(DisasContext *dc, bool saved) +{ + if (!supervisor(dc)) { + return raise_priv(dc); + } + if (saved) { + gen_helper_saved(tcg_env); + } else { + gen_helper_restored(tcg_env); + } + return advance_pc(dc); +} + +TRANS(SAVED, 64, do_saved_restored, true) +TRANS(RESTORED, 64, do_saved_restored, false) + +static bool trans_NOP(DisasContext *dc, arg_NOP *a) +{ + return advance_pc(dc); +} + +/* + * TODO: Need a feature bit for sparcv8. + * In the meantime, treat all 32-bit cpus like sparcv7. + */ +TRANS(NOP_v7, 32, trans_NOP, a) +TRANS(NOP_v9, 64, trans_NOP, a) + +static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op, + void (*func)(TCGv, TCGv, TCGv), + void (*funci)(TCGv, TCGv, target_long)) +{ + TCGv dst, src1; + + /* For simplicity, we under-decoded the rs2 form. */ + if (!a->imm && a->rs2_or_imm & ~0x1f) { + return false; + } + + if (a->cc) { + dst = cpu_cc_dst; + } else { + dst = gen_dest_gpr(dc, a->rd); + } + src1 = gen_load_gpr(dc, a->rs1); + + if (a->imm || a->rs2_or_imm == 0) { + if (funci) { + funci(dst, src1, a->rs2_or_imm); + } else { + func(dst, src1, tcg_constant_tl(a->rs2_or_imm)); + } + } else { + func(dst, src1, cpu_regs[a->rs2_or_imm]); + } + gen_store_gpr(dc, a->rd, dst); + + if (a->cc) { + tcg_gen_movi_i32(cpu_cc_op, cc_op); + dc->cc_op = cc_op; + } + return advance_pc(dc); +} + +static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op, + void (*func)(TCGv, TCGv, TCGv), + void (*funci)(TCGv, TCGv, target_long), + void (*func_cc)(TCGv, TCGv, TCGv)) +{ + if (a->cc) { + assert(cc_op >= 0); + return do_arith_int(dc, a, cc_op, func_cc, NULL); + } + return do_arith_int(dc, a, cc_op, func, funci); +} + +static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a, + void (*func)(TCGv, TCGv, TCGv), + void (*funci)(TCGv, TCGv, target_long)) +{ + return do_arith_int(dc, a, CC_OP_LOGIC, func, funci); +} + +TRANS(ADD, ALL, do_arith, a, CC_OP_ADD, + tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc) +TRANS(SUB, ALL, do_arith, a, CC_OP_SUB, + tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc) + +TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc) +TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc) +TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv) +TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv) + +TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl) +TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl) +TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL) +TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL) +TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL) + +TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL) +TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL) +TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL) + +TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL) +TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL) +TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc) +TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc) + +/* TODO: Should have feature bit -- comes in with UltraSparc T2. */ +TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL) + +static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a) +{ + /* OR with %g0 is the canonical alias for MOV. */ + if (!a->cc && a->rs1 == 0) { + if (a->imm || a->rs2_or_imm == 0) { + gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm)); + } else if (a->rs2_or_imm & ~0x1f) { + /* For simplicity, we under-decoded the rs2 form. */ + return false; + } else { + gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]); + } + return advance_pc(dc); + } + return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl); +} + +static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a) +{ + switch (dc->cc_op) { + case CC_OP_DIV: + case CC_OP_LOGIC: + /* Carry is known to be zero. Fall back to plain ADD. */ + return do_arith(dc, a, CC_OP_ADD, + tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc); + case CC_OP_ADD: + case CC_OP_TADD: + case CC_OP_TADDTV: + return do_arith(dc, a, CC_OP_ADDX, + gen_op_addc_add, NULL, gen_op_addccc_add); + case CC_OP_SUB: + case CC_OP_TSUB: + case CC_OP_TSUBTV: + return do_arith(dc, a, CC_OP_ADDX, + gen_op_addc_sub, NULL, gen_op_addccc_sub); + default: + return do_arith(dc, a, CC_OP_ADDX, + gen_op_addc_generic, NULL, gen_op_addccc_generic); + } +} + +static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a) +{ + switch (dc->cc_op) { + case CC_OP_DIV: + case CC_OP_LOGIC: + /* Carry is known to be zero. Fall back to plain SUB. */ + return do_arith(dc, a, CC_OP_SUB, + tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc); + case CC_OP_ADD: + case CC_OP_TADD: + case CC_OP_TADDTV: + return do_arith(dc, a, CC_OP_SUBX, + gen_op_subc_add, NULL, gen_op_subccc_add); + case CC_OP_SUB: + case CC_OP_TSUB: + case CC_OP_TSUBTV: + return do_arith(dc, a, CC_OP_SUBX, + gen_op_subc_sub, NULL, gen_op_subccc_sub); + default: + return do_arith(dc, a, CC_OP_SUBX, + gen_op_subc_generic, NULL, gen_op_subccc_generic); + } +} + +static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a) +{ + update_psr(dc); + return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc); +} + +static bool gen_edge(DisasContext *dc, arg_r_r_r *a, int width, bool cc, bool left) { - TCGv lo1, lo2; + TCGv dst, s1, s2, lo1, lo2; uint64_t amask, tabl, tabr; int shift, imask, omask; + dst = gen_dest_gpr(dc, a->rd); + s1 = gen_load_gpr(dc, a->rs1); + s2 = gen_load_gpr(dc, a->rs2); + if (cc) { tcg_gen_mov_tl(cpu_cc_src, s1); tcg_gen_mov_tl(cpu_cc_src2, s2); @@ -2858,13 +3913,15 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, dc->cc_op = CC_OP_SUB; } - /* Theory of operation: there are two tables, left and right (not to - be confused with the left and right versions of the opcode). These - are indexed by the low 3 bits of the inputs. To make things "easy", - these tables are loaded into two constants, TABL and TABR below. - The operation index = (input & imask) << shift calculates the index - into the constant, while val = (table >> index) & omask calculates - the value we're looking for. */ + /* + * Theory of operation: there are two tables, left and right (not to + * be confused with the left and right versions of the opcode). These + * are indexed by the low 3 bits of the inputs. To make things "easy", + * these tables are loaded into two constants, TABL and TABR below. + * The operation index = (input & imask) << shift calculates the index + * into the constant, while val = (table >> index) & omask calculates + * the value we're looking for. + */ switch (width) { case 8: imask = 0x7; @@ -2918,2653 +3975,1345 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, tcg_gen_andi_tl(lo1, lo1, omask); tcg_gen_andi_tl(lo2, lo2, omask); - amask = -8; - if (AM_CHECK(dc)) { - amask &= 0xffffffffULL; - } + amask = address_mask_i(dc, -8); tcg_gen_andi_tl(s1, s1, amask); tcg_gen_andi_tl(s2, s2, amask); /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */ tcg_gen_and_tl(lo2, lo2, lo1); tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2); + + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0) +TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1) +TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0) +TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1) +TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0) +TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1) + +TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0) +TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1) +TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0) +TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1) +TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0) +TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1) + +static bool do_rrr(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv, TCGv, TCGv)) +{ + TCGv dst = gen_dest_gpr(dc, a->rd); + TCGv src1 = gen_load_gpr(dc, a->rs1); + TCGv src2 = gen_load_gpr(dc, a->rs2); + + func(dst, src1, src2); + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); } -static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left) +TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8) +TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16) +TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32) + +static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2) { +#ifdef TARGET_SPARC64 TCGv tmp = tcg_temp_new(); tcg_gen_add_tl(tmp, s1, s2); tcg_gen_andi_tl(dst, tmp, -8); - if (left) { - tcg_gen_neg_tl(tmp, tmp); - } tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); +#else + g_assert_not_reached(); +#endif } -static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2) +static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2) { - TCGv t1, t2, shift; - - t1 = tcg_temp_new(); - t2 = tcg_temp_new(); - shift = tcg_temp_new(); +#ifdef TARGET_SPARC64 + TCGv tmp = tcg_temp_new(); - tcg_gen_andi_tl(shift, gsr, 7); - tcg_gen_shli_tl(shift, shift, 3); - tcg_gen_shl_tl(t1, s1, shift); + tcg_gen_add_tl(tmp, s1, s2); + tcg_gen_andi_tl(dst, tmp, -8); + tcg_gen_neg_tl(tmp, tmp); + tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); +#else + g_assert_not_reached(); +#endif +} - /* A shift of 64 does not produce 0 in TCG. Divide this into a - shift of (up to 63) followed by a constant shift of 1. */ - tcg_gen_xori_tl(shift, shift, 63); - tcg_gen_shr_tl(t2, s2, shift); - tcg_gen_shri_tl(t2, t2, 1); +TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr) +TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl) - tcg_gen_or_tl(dst, t1, t2); -} +static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2) +{ +#ifdef TARGET_SPARC64 + tcg_gen_add_tl(dst, s1, s2); + tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32); +#else + g_assert_not_reached(); #endif +} -#define CHECK_IU_FEATURE(dc, FEATURE) \ - if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ - goto illegal_insn; -#define CHECK_FPU_FEATURE(dc, FEATURE) \ - if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ - goto nfpu_insn; +TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask) -/* before an instruction, dc->pc must be static */ -static void disas_sparc_insn(DisasContext * dc, unsigned int insn) +static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u) { - unsigned int opc, rs1, rs2, rd; - TCGv cpu_src1, cpu_src2; - TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32; - TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64; - target_long simm; + TCGv dst, src1, src2; - opc = GET_FIELD(insn, 0, 1); - rd = GET_FIELD(insn, 2, 6); + /* Reject 64-bit shifts for sparc32. */ + if (avail_32(dc) && a->x) { + return false; + } - switch (opc) { - case 0: /* branches/sethi */ - { - unsigned int xop = GET_FIELD(insn, 7, 9); - int32_t target; - switch (xop) { -#ifdef TARGET_SPARC64 - case 0x1: /* V9 BPcc */ - { - int cc; - - target = GET_FIELD_SP(insn, 0, 18); - target = sign_extend(target, 19); - target <<= 2; - cc = GET_FIELD_SP(insn, 20, 21); - if (cc == 0) - do_branch(dc, target, insn, 0); - else if (cc == 2) - do_branch(dc, target, insn, 1); - else - goto illegal_insn; - goto jmp_insn; - } - case 0x3: /* V9 BPr */ - { - target = GET_FIELD_SP(insn, 0, 13) | - (GET_FIELD_SP(insn, 20, 21) << 14); - target = sign_extend(target, 16); - target <<= 2; - cpu_src1 = get_src1(dc, insn); - do_branch_reg(dc, target, insn, cpu_src1); - goto jmp_insn; - } - case 0x5: /* V9 FBPcc */ - { - int cc = GET_FIELD_SP(insn, 20, 21); - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - target = GET_FIELD_SP(insn, 0, 18); - target = sign_extend(target, 19); - target <<= 2; - do_fbranch(dc, target, insn, cc); - goto jmp_insn; - } -#else - case 0x7: /* CBN+x */ - { - goto ncp_insn; - } -#endif - case 0x2: /* BN+x */ - { - target = GET_FIELD(insn, 10, 31); - target = sign_extend(target, 22); - target <<= 2; - do_branch(dc, target, insn, 0); - goto jmp_insn; - } - case 0x6: /* FBN+x */ - { - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - target = GET_FIELD(insn, 10, 31); - target = sign_extend(target, 22); - target <<= 2; - do_fbranch(dc, target, insn, 0); - goto jmp_insn; - } - case 0x4: /* SETHI */ - /* Special-case %g0 because that's the canonical nop. */ - if (rd) { - uint32_t value = GET_FIELD(insn, 10, 31); - TCGv t = gen_dest_gpr(dc, rd); - tcg_gen_movi_tl(t, value << 10); - gen_store_gpr(dc, rd, t); - } - break; - case 0x0: /* UNIMPL */ - default: - goto illegal_insn; - } - break; + src2 = tcg_temp_new(); + tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31); + src1 = gen_load_gpr(dc, a->rs1); + dst = gen_dest_gpr(dc, a->rd); + + if (l) { + tcg_gen_shl_tl(dst, src1, src2); + if (!a->x) { + tcg_gen_ext32u_tl(dst, dst); } - break; - case 1: /*CALL*/ - { - target_long target = GET_FIELDs(insn, 2, 31) << 2; - TCGv o7 = gen_dest_gpr(dc, 15); + } else if (u) { + if (!a->x) { + tcg_gen_ext32u_tl(dst, src1); + src1 = dst; + } + tcg_gen_shr_tl(dst, src1, src2); + } else { + if (!a->x) { + tcg_gen_ext32s_tl(dst, src1); + src1 = dst; + } + tcg_gen_sar_tl(dst, src1, src2); + } + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} - tcg_gen_movi_tl(o7, dc->pc); - gen_store_gpr(dc, 15, o7); - target += dc->pc; - gen_mov_pc_npc(dc); -#ifdef TARGET_SPARC64 - if (unlikely(AM_CHECK(dc))) { - target &= 0xffffffffULL; - } -#endif - dc->npc = target; +TRANS(SLL_r, ALL, do_shift_r, a, true, true) +TRANS(SRL_r, ALL, do_shift_r, a, false, true) +TRANS(SRA_r, ALL, do_shift_r, a, false, false) + +static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u) +{ + TCGv dst, src1; + + /* Reject 64-bit shifts for sparc32. */ + if (avail_32(dc) && (a->x || a->i >= 32)) { + return false; + } + + src1 = gen_load_gpr(dc, a->rs1); + dst = gen_dest_gpr(dc, a->rd); + + if (avail_32(dc) || a->x) { + if (l) { + tcg_gen_shli_tl(dst, src1, a->i); + } else if (u) { + tcg_gen_shri_tl(dst, src1, a->i); + } else { + tcg_gen_sari_tl(dst, src1, a->i); } - goto jmp_insn; - case 2: /* FPU & Logical Operations */ - { - unsigned int xop = GET_FIELD(insn, 7, 12); - TCGv cpu_dst = tcg_temp_new(); - TCGv cpu_tmp0; - - if (xop == 0x3a) { /* generate trap */ - int cond = GET_FIELD(insn, 3, 6); - TCGv_i32 trap; - TCGLabel *l1 = NULL; - int mask; - - if (cond == 0) { - /* Trap never. */ - break; - } + } else { + if (l) { + tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i); + } else if (u) { + tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i); + } else { + tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i); + } + } + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} - save_state(dc); +TRANS(SLL_i, ALL, do_shift_i, a, true, true) +TRANS(SRL_i, ALL, do_shift_i, a, false, true) +TRANS(SRA_i, ALL, do_shift_i, a, false, false) - if (cond != 8) { - /* Conditional trap. */ - DisasCompare cmp; -#ifdef TARGET_SPARC64 - /* V9 icc/xcc */ - int cc = GET_FIELD_SP(insn, 11, 12); - if (cc == 0) { - gen_compare(&cmp, 0, cond, dc); - } else if (cc == 2) { - gen_compare(&cmp, 1, cond, dc); - } else { - goto illegal_insn; - } -#else - gen_compare(&cmp, 0, cond, dc); -#endif - l1 = gen_new_label(); - tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond), - cmp.c1, cmp.c2, l1); - } +static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm) +{ + /* For simplicity, we under-decoded the rs2 form. */ + if (!imm && rs2_or_imm & ~0x1f) { + return NULL; + } + if (imm || rs2_or_imm == 0) { + return tcg_constant_tl(rs2_or_imm); + } else { + return cpu_regs[rs2_or_imm]; + } +} - mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) - ? UA2005_HTRAP_MASK : V8_TRAP_MASK); - - /* Don't use the normal temporaries, as they may well have - gone out of scope with the branch above. While we're - doing that we might as well pre-truncate to 32-bit. */ - trap = tcg_temp_new_i32(); - - rs1 = GET_FIELD_SP(insn, 14, 18); - if (IS_IMM) { - rs2 = GET_FIELD_SP(insn, 0, 7); - if (rs1 == 0) { - tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP); - /* Signal that the trap value is fully constant. */ - mask = 0; - } else { - TCGv t1 = gen_load_gpr(dc, rs1); - tcg_gen_trunc_tl_i32(trap, t1); - tcg_gen_addi_i32(trap, trap, rs2); - } - } else { - TCGv t1, t2; - rs2 = GET_FIELD_SP(insn, 0, 4); - t1 = gen_load_gpr(dc, rs1); - t2 = gen_load_gpr(dc, rs2); - tcg_gen_add_tl(t1, t1, t2); - tcg_gen_trunc_tl_i32(trap, t1); - } - if (mask != 0) { - tcg_gen_andi_i32(trap, trap, mask); - tcg_gen_addi_i32(trap, trap, TT_TRAP); - } +static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2) +{ + TCGv dst = gen_load_gpr(dc, rd); - gen_helper_raise_exception(tcg_env, trap); + tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst); + gen_store_gpr(dc, rd, dst); + return advance_pc(dc); +} - if (cond == 8) { - /* An unconditional trap ends the TB. */ - dc->base.is_jmp = DISAS_NORETURN; - goto jmp_insn; - } else { - /* A conditional trap falls through to the next insn. */ - gen_set_label(l1); - break; - } - } else if (xop == 0x28) { - rs1 = GET_FIELD(insn, 13, 17); - switch(rs1) { - case 0: /* rdy */ -#ifndef TARGET_SPARC64 - case 0x01 ... 0x0e: /* undefined in the SPARCv8 - manual, rdy on the microSPARC - II */ - case 0x0f: /* stbar in the SPARCv8 manual, - rdy on the microSPARC II */ - case 0x10 ... 0x1f: /* implementation-dependent in the - SPARCv8 manual, rdy on the - microSPARC II */ - /* Read Asr17 */ - if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) { - TCGv t = gen_dest_gpr(dc, rd); - /* Read Asr17 for a Leon3 monoprocessor */ - tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1)); - gen_store_gpr(dc, rd, t); - break; - } -#endif - gen_store_gpr(dc, rd, cpu_y); - break; -#ifdef TARGET_SPARC64 - case 0x2: /* V9 rdccr */ - update_psr(dc); - gen_helper_rdccr(cpu_dst, tcg_env); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x3: /* V9 rdasi */ - tcg_gen_movi_tl(cpu_dst, dc->asi); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x4: /* V9 rdtick */ - { - TCGv_ptr r_tickptr; - TCGv_i32 r_const; - - r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_constant_i32(dc->mem_idx); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, tick)); - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr, - r_const); - gen_store_gpr(dc, rd, cpu_dst); - } - break; - case 0x5: /* V9 rdpc */ - { - TCGv t = gen_dest_gpr(dc, rd); - if (unlikely(AM_CHECK(dc))) { - tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL); - } else { - tcg_gen_movi_tl(t, dc->pc); - } - gen_store_gpr(dc, rd, t); - } - break; - case 0x6: /* V9 rdfprs */ - tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0xf: /* V9 membar */ - break; /* no effect */ - case 0x13: /* Graphics Status */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_store_gpr(dc, rd, cpu_gsr); - break; - case 0x16: /* Softint */ - tcg_gen_ld32s_tl(cpu_dst, tcg_env, - offsetof(CPUSPARCState, softint)); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x17: /* Tick compare */ - gen_store_gpr(dc, rd, cpu_tick_cmpr); - break; - case 0x18: /* System tick */ - { - TCGv_ptr r_tickptr; - TCGv_i32 r_const; - - r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_constant_i32(dc->mem_idx); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, stick)); - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr, - r_const); - gen_store_gpr(dc, rd, cpu_dst); - } - break; - case 0x19: /* System tick compare */ - gen_store_gpr(dc, rd, cpu_stick_cmpr); - break; - case 0x1a: /* UltraSPARC-T1 Strand status */ - /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe - * this ASR as impl. dep - */ - CHECK_IU_FEATURE(dc, HYPV); - { - TCGv t = gen_dest_gpr(dc, rd); - tcg_gen_movi_tl(t, 1UL); - gen_store_gpr(dc, rd, t); - } - break; - case 0x10: /* Performance Control */ - case 0x11: /* Performance Instrumentation Counter */ - case 0x12: /* Dispatch Control */ - case 0x14: /* Softint set, WO */ - case 0x15: /* Softint clear, WO */ -#endif - default: - goto illegal_insn; - } -#if !defined(CONFIG_USER_ONLY) - } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */ -#ifndef TARGET_SPARC64 - if (!supervisor(dc)) { - goto priv_insn; - } - update_psr(dc); - gen_helper_rdpsr(cpu_dst, tcg_env); -#else - CHECK_IU_FEATURE(dc, HYPV); - if (!hypervisor(dc)) - goto priv_insn; - rs1 = GET_FIELD(insn, 13, 17); - switch (rs1) { - case 0: // hpstate - tcg_gen_ld_i64(cpu_dst, tcg_env, - offsetof(CPUSPARCState, hpstate)); - break; - case 1: // htstate - // gen_op_rdhtstate(); - break; - case 3: // hintp - tcg_gen_mov_tl(cpu_dst, cpu_hintp); - break; - case 5: // htba - tcg_gen_mov_tl(cpu_dst, cpu_htba); - break; - case 6: // hver - tcg_gen_mov_tl(cpu_dst, cpu_hver); - break; - case 31: // hstick_cmpr - tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr); - break; - default: - goto illegal_insn; - } -#endif - gen_store_gpr(dc, rd, cpu_dst); - break; - } else if (xop == 0x2a) { /* rdwim / V9 rdpr */ - if (!supervisor(dc)) { - goto priv_insn; - } - cpu_tmp0 = tcg_temp_new(); -#ifdef TARGET_SPARC64 - rs1 = GET_FIELD(insn, 13, 17); - switch (rs1) { - case 0: // tpc - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_ld_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tpc)); - } - break; - case 1: // tnpc - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_ld_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tnpc)); - } - break; - case 2: // tstate - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_ld_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tstate)); - } - break; - case 3: // tt - { - TCGv_ptr r_tsptr = tcg_temp_new_ptr(); - - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tt)); - } - break; - case 4: // tick - { - TCGv_ptr r_tickptr; - TCGv_i32 r_const; - - r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_constant_i32(dc->mem_idx); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, tick)); - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_tick_get_count(cpu_tmp0, tcg_env, - r_tickptr, r_const); - } - break; - case 5: // tba - tcg_gen_mov_tl(cpu_tmp0, cpu_tbr); - break; - case 6: // pstate - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, pstate)); - break; - case 7: // tl - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, tl)); - break; - case 8: // pil - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, psrpil)); - break; - case 9: // cwp - gen_helper_rdcwp(cpu_tmp0, tcg_env); - break; - case 10: // cansave - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, cansave)); - break; - case 11: // canrestore - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, canrestore)); - break; - case 12: // cleanwin - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, cleanwin)); - break; - case 13: // otherwin - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, otherwin)); - break; - case 14: // wstate - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, wstate)); - break; - case 16: // UA2005 gl - CHECK_IU_FEATURE(dc, GL); - tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, gl)); - break; - case 26: // UA2005 strand status - CHECK_IU_FEATURE(dc, HYPV); - if (!hypervisor(dc)) - goto priv_insn; - tcg_gen_mov_tl(cpu_tmp0, cpu_ssr); - break; - case 31: // ver - tcg_gen_mov_tl(cpu_tmp0, cpu_ver); - break; - case 15: // fq - default: - goto illegal_insn; - } -#else - tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim); -#endif - gen_store_gpr(dc, rd, cpu_tmp0); - break; -#endif -#if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY) - } else if (xop == 0x2b) { /* rdtbr / V9 flushw */ -#ifdef TARGET_SPARC64 - gen_helper_flushw(tcg_env); -#else - if (!supervisor(dc)) - goto priv_insn; - gen_store_gpr(dc, rd, cpu_tbr); -#endif - break; -#endif - } else if (xop == 0x34) { /* FPU Operations */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_op_clear_ieee_excp_and_FTT(); - rs1 = GET_FIELD(insn, 13, 17); - rs2 = GET_FIELD(insn, 27, 31); - xop = GET_FIELD(insn, 18, 26); - - switch (xop) { - case 0x1: /* fmovs */ - cpu_src1_32 = gen_load_fpr_F(dc, rs2); - gen_store_fpr_F(dc, rd, cpu_src1_32); - break; - case 0x5: /* fnegs */ - gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs); - break; - case 0x9: /* fabss */ - gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss); - break; - case 0x29: /* fsqrts */ - CHECK_FPU_FEATURE(dc, FSQRT); - gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts); - break; - case 0x2a: /* fsqrtd */ - CHECK_FPU_FEATURE(dc, FSQRT); - gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd); - break; - case 0x2b: /* fsqrtq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq); - break; - case 0x41: /* fadds */ - gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds); - break; - case 0x42: /* faddd */ - gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd); - break; - case 0x43: /* faddq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq); - break; - case 0x45: /* fsubs */ - gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs); - break; - case 0x46: /* fsubd */ - gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd); - break; - case 0x47: /* fsubq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq); - break; - case 0x49: /* fmuls */ - CHECK_FPU_FEATURE(dc, FMUL); - gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls); - break; - case 0x4a: /* fmuld */ - CHECK_FPU_FEATURE(dc, FMUL); - gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld); - break; - case 0x4b: /* fmulq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - CHECK_FPU_FEATURE(dc, FMUL); - gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq); - break; - case 0x4d: /* fdivs */ - gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs); - break; - case 0x4e: /* fdivd */ - gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd); - break; - case 0x4f: /* fdivq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq); - break; - case 0x69: /* fsmuld */ - CHECK_FPU_FEATURE(dc, FSMULD); - gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld); - break; - case 0x6e: /* fdmulq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq); - break; - case 0xc4: /* fitos */ - gen_fop_FF(dc, rd, rs2, gen_helper_fitos); - break; - case 0xc6: /* fdtos */ - gen_fop_FD(dc, rd, rs2, gen_helper_fdtos); - break; - case 0xc7: /* fqtos */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos); - break; - case 0xc8: /* fitod */ - gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod); - break; - case 0xc9: /* fstod */ - gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod); - break; - case 0xcb: /* fqtod */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod); - break; - case 0xcc: /* fitoq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq); - break; - case 0xcd: /* fstoq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq); - break; - case 0xce: /* fdtoq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq); - break; - case 0xd1: /* fstoi */ - gen_fop_FF(dc, rd, rs2, gen_helper_fstoi); - break; - case 0xd2: /* fdtoi */ - gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi); - break; - case 0xd3: /* fqtoi */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi); - break; -#ifdef TARGET_SPARC64 - case 0x2: /* V9 fmovd */ - cpu_src1_64 = gen_load_fpr_D(dc, rs2); - gen_store_fpr_D(dc, rd, cpu_src1_64); - break; - case 0x3: /* V9 fmovq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_move_Q(dc, rd, rs2); - break; - case 0x6: /* V9 fnegd */ - gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd); - break; - case 0x7: /* V9 fnegq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq); - break; - case 0xa: /* V9 fabsd */ - gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd); - break; - case 0xb: /* V9 fabsq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq); - break; - case 0x81: /* V9 fstox */ - gen_fop_DF(dc, rd, rs2, gen_helper_fstox); - break; - case 0x82: /* V9 fdtox */ - gen_fop_DD(dc, rd, rs2, gen_helper_fdtox); - break; - case 0x83: /* V9 fqtox */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox); - break; - case 0x84: /* V9 fxtos */ - gen_fop_FD(dc, rd, rs2, gen_helper_fxtos); - break; - case 0x88: /* V9 fxtod */ - gen_fop_DD(dc, rd, rs2, gen_helper_fxtod); - break; - case 0x8c: /* V9 fxtoq */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq); - break; -#endif - default: - goto illegal_insn; - } - } else if (xop == 0x35) { /* FPU Operations */ -#ifdef TARGET_SPARC64 - int cond; -#endif - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_op_clear_ieee_excp_and_FTT(); - rs1 = GET_FIELD(insn, 13, 17); - rs2 = GET_FIELD(insn, 27, 31); - xop = GET_FIELD(insn, 18, 26); +static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a) +{ + TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); + DisasCompare cmp; -#ifdef TARGET_SPARC64 -#define FMOVR(sz) \ - do { \ - DisasCompare cmp; \ - cond = GET_FIELD_SP(insn, 10, 12); \ - cpu_src1 = get_src1(dc, insn); \ - gen_compare_reg(&cmp, cond, cpu_src1); \ - gen_fmov##sz(dc, &cmp, rd, rs2); \ - } while (0) - - if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */ - FMOVR(s); - break; - } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr - FMOVR(d); - break; - } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVR(q); - break; - } -#undef FMOVR -#endif - switch (xop) { -#ifdef TARGET_SPARC64 -#define FMOVCC(fcc, sz) \ - do { \ - DisasCompare cmp; \ - cond = GET_FIELD_SP(insn, 14, 17); \ - gen_fcompare(&cmp, fcc, cond); \ - gen_fmov##sz(dc, &cmp, rd, rs2); \ - } while (0) - - case 0x001: /* V9 fmovscc %fcc0 */ - FMOVCC(0, s); - break; - case 0x002: /* V9 fmovdcc %fcc0 */ - FMOVCC(0, d); - break; - case 0x003: /* V9 fmovqcc %fcc0 */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(0, q); - break; - case 0x041: /* V9 fmovscc %fcc1 */ - FMOVCC(1, s); - break; - case 0x042: /* V9 fmovdcc %fcc1 */ - FMOVCC(1, d); - break; - case 0x043: /* V9 fmovqcc %fcc1 */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(1, q); - break; - case 0x081: /* V9 fmovscc %fcc2 */ - FMOVCC(2, s); - break; - case 0x082: /* V9 fmovdcc %fcc2 */ - FMOVCC(2, d); - break; - case 0x083: /* V9 fmovqcc %fcc2 */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(2, q); - break; - case 0x0c1: /* V9 fmovscc %fcc3 */ - FMOVCC(3, s); - break; - case 0x0c2: /* V9 fmovdcc %fcc3 */ - FMOVCC(3, d); - break; - case 0x0c3: /* V9 fmovqcc %fcc3 */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(3, q); - break; -#undef FMOVCC -#define FMOVCC(xcc, sz) \ - do { \ - DisasCompare cmp; \ - cond = GET_FIELD_SP(insn, 14, 17); \ - gen_compare(&cmp, xcc, cond, dc); \ - gen_fmov##sz(dc, &cmp, rd, rs2); \ - } while (0) - - case 0x101: /* V9 fmovscc %icc */ - FMOVCC(0, s); - break; - case 0x102: /* V9 fmovdcc %icc */ - FMOVCC(0, d); - break; - case 0x103: /* V9 fmovqcc %icc */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(0, q); - break; - case 0x181: /* V9 fmovscc %xcc */ - FMOVCC(1, s); - break; - case 0x182: /* V9 fmovdcc %xcc */ - FMOVCC(1, d); - break; - case 0x183: /* V9 fmovqcc %xcc */ - CHECK_FPU_FEATURE(dc, FLOAT128); - FMOVCC(1, q); - break; -#undef FMOVCC -#endif - case 0x51: /* fcmps, V9 %fcc */ - cpu_src1_32 = gen_load_fpr_F(dc, rs1); - cpu_src2_32 = gen_load_fpr_F(dc, rs2); - gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32); - break; - case 0x52: /* fcmpd, V9 %fcc */ - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64); - break; - case 0x53: /* fcmpq, V9 %fcc */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_op_fcmpq(rd & 3); - break; - case 0x55: /* fcmpes, V9 %fcc */ - cpu_src1_32 = gen_load_fpr_F(dc, rs1); - cpu_src2_32 = gen_load_fpr_F(dc, rs2); - gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32); - break; - case 0x56: /* fcmped, V9 %fcc */ - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64); - break; - case 0x57: /* fcmpeq, V9 %fcc */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_op_fcmpeq(rd & 3); - break; - default: - goto illegal_insn; - } - } else if (xop == 0x2) { - TCGv dst = gen_dest_gpr(dc, rd); - rs1 = GET_FIELD(insn, 13, 17); - if (rs1 == 0) { - /* clr/mov shortcut : or %g0, x, y -> mov x, y */ - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - tcg_gen_movi_tl(dst, simm); - gen_store_gpr(dc, rd, dst); - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2 == 0) { - tcg_gen_movi_tl(dst, 0); - gen_store_gpr(dc, rd, dst); - } else { - cpu_src2 = gen_load_gpr(dc, rs2); - gen_store_gpr(dc, rd, cpu_src2); - } - } - } else { - cpu_src1 = get_src1(dc, insn); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - tcg_gen_ori_tl(dst, cpu_src1, simm); - gen_store_gpr(dc, rd, dst); - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2 == 0) { - /* mov shortcut: or x, %g0, y -> mov x, y */ - gen_store_gpr(dc, rd, cpu_src1); - } else { - cpu_src2 = gen_load_gpr(dc, rs2); - tcg_gen_or_tl(dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, dst); - } - } - } -#ifdef TARGET_SPARC64 - } else if (xop == 0x25) { /* sll, V9 sllx */ - cpu_src1 = get_src1(dc, insn); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - if (insn & (1 << 12)) { - tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f); - } else { - tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f); - } - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = tcg_temp_new(); - if (insn & (1 << 12)) { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); - } else { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); - } - tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0); - } - gen_store_gpr(dc, rd, cpu_dst); - } else if (xop == 0x26) { /* srl, V9 srlx */ - cpu_src1 = get_src1(dc, insn); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - if (insn & (1 << 12)) { - tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f); - } else { - tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL); - tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f); - } - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = tcg_temp_new(); - if (insn & (1 << 12)) { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); - tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0); - } else { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL); - tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0); - } - } - gen_store_gpr(dc, rd, cpu_dst); - } else if (xop == 0x27) { /* sra, V9 srax */ - cpu_src1 = get_src1(dc, insn); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - if (insn & (1 << 12)) { - tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f); - } else { - tcg_gen_ext32s_i64(cpu_dst, cpu_src1); - tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f); - } - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = tcg_temp_new(); - if (insn & (1 << 12)) { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); - tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0); - } else { - tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_ext32s_i64(cpu_dst, cpu_src1); - tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0); - } - } - gen_store_gpr(dc, rd, cpu_dst); -#endif - } else if (xop < 0x36) { - if (xop < 0x20) { - cpu_src1 = get_src1(dc, insn); - cpu_src2 = get_src2(dc, insn); - switch (xop & ~0x10) { - case 0x0: /* add */ - if (xop & 0x10) { - gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD); - dc->cc_op = CC_OP_ADD; - } else { - tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2); - } - break; - case 0x1: /* and */ - tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x2: /* or */ - tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x3: /* xor */ - tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x4: /* sub */ - if (xop & 0x10) { - gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB); - dc->cc_op = CC_OP_SUB; - } else { - tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2); - } - break; - case 0x5: /* andn */ - tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x6: /* orn */ - tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x7: /* xorn */ - tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0x8: /* addx, V9 addc */ - gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2, - (xop & 0x10)); - break; -#ifdef TARGET_SPARC64 - case 0x9: /* V9 mulx */ - tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2); - break; -#endif - case 0xa: /* umul */ - CHECK_IU_FEATURE(dc, MUL); - gen_op_umul(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0xb: /* smul */ - CHECK_IU_FEATURE(dc, MUL); - gen_op_smul(cpu_dst, cpu_src1, cpu_src2); - if (xop & 0x10) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } - break; - case 0xc: /* subx, V9 subc */ - gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2, - (xop & 0x10)); - break; -#ifdef TARGET_SPARC64 - case 0xd: /* V9 udivx */ - gen_helper_udivx(cpu_dst, tcg_env, cpu_src1, cpu_src2); - break; -#endif - case 0xe: /* udiv */ - CHECK_IU_FEATURE(dc, DIV); - if (xop & 0x10) { - gen_helper_udiv_cc(cpu_dst, tcg_env, cpu_src1, - cpu_src2); - dc->cc_op = CC_OP_DIV; - } else { - gen_helper_udiv(cpu_dst, tcg_env, cpu_src1, - cpu_src2); - } - break; - case 0xf: /* sdiv */ - CHECK_IU_FEATURE(dc, DIV); - if (xop & 0x10) { - gen_helper_sdiv_cc(cpu_dst, tcg_env, cpu_src1, - cpu_src2); - dc->cc_op = CC_OP_DIV; - } else { - gen_helper_sdiv(cpu_dst, tcg_env, cpu_src1, - cpu_src2); - } - break; - default: - goto illegal_insn; - } - gen_store_gpr(dc, rd, cpu_dst); - } else { - cpu_src1 = get_src1(dc, insn); - cpu_src2 = get_src2(dc, insn); - switch (xop) { - case 0x20: /* taddcc */ - gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD); - dc->cc_op = CC_OP_TADD; - break; - case 0x21: /* tsubcc */ - gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB); - dc->cc_op = CC_OP_TSUB; - break; - case 0x22: /* taddcctv */ - gen_helper_taddcctv(cpu_dst, tcg_env, - cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - dc->cc_op = CC_OP_TADDTV; - break; - case 0x23: /* tsubcctv */ - gen_helper_tsubcctv(cpu_dst, tcg_env, - cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - dc->cc_op = CC_OP_TSUBTV; - break; - case 0x24: /* mulscc */ - update_psr(dc); - gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD); - dc->cc_op = CC_OP_ADD; - break; -#ifndef TARGET_SPARC64 - case 0x25: /* sll */ - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f); - } else { /* register */ - cpu_tmp0 = tcg_temp_new(); - tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0); - } - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x26: /* srl */ - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f); - } else { /* register */ - cpu_tmp0 = tcg_temp_new(); - tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0); - } - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x27: /* sra */ - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 20, 31); - tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f); - } else { /* register */ - cpu_tmp0 = tcg_temp_new(); - tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); - tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0); - } - gen_store_gpr(dc, rd, cpu_dst); - break; -#endif - case 0x30: - { - cpu_tmp0 = tcg_temp_new(); - switch(rd) { - case 0: /* wry */ - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff); - break; -#ifndef TARGET_SPARC64 - case 0x01 ... 0x0f: /* undefined in the - SPARCv8 manual, nop - on the microSPARC - II */ - case 0x10 ... 0x1f: /* implementation-dependent - in the SPARCv8 - manual, nop on the - microSPARC II */ - if ((rd == 0x13) && (dc->def->features & - CPU_FEATURE_POWERDOWN)) { - /* LEON3 power-down */ - save_state(dc); - gen_helper_power_down(tcg_env); - } - break; -#else - case 0x2: /* V9 wrccr */ - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_wrccr(tcg_env, cpu_tmp0); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); - dc->cc_op = CC_OP_FLAGS; - break; - case 0x3: /* V9 wrasi */ - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff); - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, asi)); - /* - * End TB to notice changed ASI. - * TODO: Could notice src1 = %g0 and IS_IMM, - * update DisasContext and not exit the TB. - */ - save_state(dc); - gen_op_next_insn(); - tcg_gen_lookup_and_goto_ptr(); - dc->base.is_jmp = DISAS_NORETURN; - break; - case 0x6: /* V9 wrfprs */ - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0); - dc->fprs_dirty = 0; - save_state(dc); - gen_op_next_insn(); - tcg_gen_exit_tb(NULL, 0); - dc->base.is_jmp = DISAS_NORETURN; - break; - case 0xf: /* V9 sir, nop if user */ -#if !defined(CONFIG_USER_ONLY) - if (supervisor(dc)) { - ; // XXX - } -#endif - break; - case 0x13: /* Graphics Status */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2); - break; - case 0x14: /* Softint set */ - if (!supervisor(dc)) - goto illegal_insn; - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_set_softint(tcg_env, cpu_tmp0); - break; - case 0x15: /* Softint clear */ - if (!supervisor(dc)) - goto illegal_insn; - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_clear_softint(tcg_env, cpu_tmp0); - break; - case 0x16: /* Softint write */ - if (!supervisor(dc)) - goto illegal_insn; - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_write_softint(tcg_env, cpu_tmp0); - break; - case 0x17: /* Tick compare */ -#if !defined(CONFIG_USER_ONLY) - if (!supervisor(dc)) - goto illegal_insn; -#endif - { - TCGv_ptr r_tickptr; - - tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1, - cpu_src2); - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, tick)); - translator_io_start(&dc->base); - gen_helper_tick_set_limit(r_tickptr, - cpu_tick_cmpr); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - case 0x18: /* System tick */ -#if !defined(CONFIG_USER_ONLY) - if (!supervisor(dc)) - goto illegal_insn; -#endif - { - TCGv_ptr r_tickptr; - - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, - cpu_src2); - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, stick)); - translator_io_start(&dc->base); - gen_helper_tick_set_count(r_tickptr, - cpu_tmp0); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - case 0x19: /* System tick compare */ -#if !defined(CONFIG_USER_ONLY) - if (!supervisor(dc)) - goto illegal_insn; -#endif - { - TCGv_ptr r_tickptr; - - tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1, - cpu_src2); - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, stick)); - translator_io_start(&dc->base); - gen_helper_tick_set_limit(r_tickptr, - cpu_stick_cmpr); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - - case 0x10: /* Performance Control */ - case 0x11: /* Performance Instrumentation - Counter */ - case 0x12: /* Dispatch Control */ -#endif - default: - goto illegal_insn; - } - } - break; -#if !defined(CONFIG_USER_ONLY) - case 0x31: /* wrpsr, V9 saved, restored */ - { - if (!supervisor(dc)) - goto priv_insn; -#ifdef TARGET_SPARC64 - switch (rd) { - case 0: - gen_helper_saved(tcg_env); - break; - case 1: - gen_helper_restored(tcg_env); - break; - case 2: /* UA2005 allclean */ - case 3: /* UA2005 otherw */ - case 4: /* UA2005 normalw */ - case 5: /* UA2005 invalw */ - // XXX - default: - goto illegal_insn; - } -#else - cpu_tmp0 = tcg_temp_new(); - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - gen_helper_wrpsr(tcg_env, cpu_tmp0); - tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); - dc->cc_op = CC_OP_FLAGS; - save_state(dc); - gen_op_next_insn(); - tcg_gen_exit_tb(NULL, 0); - dc->base.is_jmp = DISAS_NORETURN; -#endif - } - break; - case 0x32: /* wrwim, V9 wrpr */ - { - if (!supervisor(dc)) - goto priv_insn; - cpu_tmp0 = tcg_temp_new(); - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); -#ifdef TARGET_SPARC64 - switch (rd) { - case 0: // tpc - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_st_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tpc)); - } - break; - case 1: // tnpc - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_st_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tnpc)); - } - break; - case 2: // tstate - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_st_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, - tstate)); - } - break; - case 3: // tt - { - TCGv_ptr r_tsptr; - - r_tsptr = tcg_temp_new_ptr(); - gen_load_trap_state_at_tl(r_tsptr, tcg_env); - tcg_gen_st32_tl(cpu_tmp0, r_tsptr, - offsetof(trap_state, tt)); - } - break; - case 4: // tick - { - TCGv_ptr r_tickptr; - - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, tick)); - translator_io_start(&dc->base); - gen_helper_tick_set_count(r_tickptr, - cpu_tmp0); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - case 5: // tba - tcg_gen_mov_tl(cpu_tbr, cpu_tmp0); - break; - case 6: // pstate - save_state(dc); - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_wrpstate(tcg_env, cpu_tmp0); - dc->npc = DYNAMIC_PC; - break; - case 7: // tl - save_state(dc); - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, tl)); - dc->npc = DYNAMIC_PC; - break; - case 8: // pil - if (translator_io_start(&dc->base)) { - dc->base.is_jmp = DISAS_EXIT; - } - gen_helper_wrpil(tcg_env, cpu_tmp0); - break; - case 9: // cwp - gen_helper_wrcwp(tcg_env, cpu_tmp0); - break; - case 10: // cansave - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - cansave)); - break; - case 11: // canrestore - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - canrestore)); - break; - case 12: // cleanwin - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - cleanwin)); - break; - case 13: // otherwin - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - otherwin)); - break; - case 14: // wstate - tcg_gen_st32_tl(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - wstate)); - break; - case 16: // UA2005 gl - CHECK_IU_FEATURE(dc, GL); - gen_helper_wrgl(tcg_env, cpu_tmp0); - break; - case 26: // UA2005 strand status - CHECK_IU_FEATURE(dc, HYPV); - if (!hypervisor(dc)) - goto priv_insn; - tcg_gen_mov_tl(cpu_ssr, cpu_tmp0); - break; - default: - goto illegal_insn; - } -#else - tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0); - if (dc->def->nwindows != 32) { - tcg_gen_andi_tl(cpu_wim, cpu_wim, - (1 << dc->def->nwindows) - 1); - } -#endif - } - break; - case 0x33: /* wrtbr, UA2005 wrhpr */ - { -#ifndef TARGET_SPARC64 - if (!supervisor(dc)) - goto priv_insn; - tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2); -#else - CHECK_IU_FEATURE(dc, HYPV); - if (!hypervisor(dc)) - goto priv_insn; - cpu_tmp0 = tcg_temp_new(); - tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); - switch (rd) { - case 0: // hpstate - tcg_gen_st_i64(cpu_tmp0, tcg_env, - offsetof(CPUSPARCState, - hpstate)); - save_state(dc); - gen_op_next_insn(); - tcg_gen_exit_tb(NULL, 0); - dc->base.is_jmp = DISAS_NORETURN; - break; - case 1: // htstate - // XXX gen_op_wrhtstate(); - break; - case 3: // hintp - tcg_gen_mov_tl(cpu_hintp, cpu_tmp0); - break; - case 5: // htba - tcg_gen_mov_tl(cpu_htba, cpu_tmp0); - break; - case 31: // hstick_cmpr - { - TCGv_ptr r_tickptr; - - tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0); - r_tickptr = tcg_temp_new_ptr(); - tcg_gen_ld_ptr(r_tickptr, tcg_env, - offsetof(CPUSPARCState, hstick)); - translator_io_start(&dc->base); - gen_helper_tick_set_limit(r_tickptr, - cpu_hstick_cmpr); - /* End TB to handle timer interrupt */ - dc->base.is_jmp = DISAS_EXIT; - } - break; - case 6: // hver readonly - default: - goto illegal_insn; - } -#endif - } - break; -#endif -#ifdef TARGET_SPARC64 - case 0x2c: /* V9 movcc */ - { - int cc = GET_FIELD_SP(insn, 11, 12); - int cond = GET_FIELD_SP(insn, 14, 17); - DisasCompare cmp; - TCGv dst; - - if (insn & (1 << 18)) { - if (cc == 0) { - gen_compare(&cmp, 0, cond, dc); - } else if (cc == 2) { - gen_compare(&cmp, 1, cond, dc); - } else { - goto illegal_insn; - } - } else { - gen_fcompare(&cmp, cc, cond); - } - - /* The get_src2 above loaded the normal 13-bit - immediate field, not the 11-bit field we have - in movcc. But it did handle the reg case. */ - if (IS_IMM) { - simm = GET_FIELD_SPs(insn, 0, 10); - tcg_gen_movi_tl(cpu_src2, simm); - } - - dst = gen_load_gpr(dc, rd); - tcg_gen_movcond_tl(cmp.cond, dst, - cmp.c1, cmp.c2, - cpu_src2, dst); - gen_store_gpr(dc, rd, dst); - break; - } - case 0x2d: /* V9 sdivx */ - gen_helper_sdivx(cpu_dst, tcg_env, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x2e: /* V9 popc */ - tcg_gen_ctpop_tl(cpu_dst, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x2f: /* V9 movr */ - { - int cond = GET_FIELD_SP(insn, 10, 12); - DisasCompare cmp; - TCGv dst; - - gen_compare_reg(&cmp, cond, cpu_src1); - - /* The get_src2 above loaded the normal 13-bit - immediate field, not the 10-bit field we have - in movr. But it did handle the reg case. */ - if (IS_IMM) { - simm = GET_FIELD_SPs(insn, 0, 9); - tcg_gen_movi_tl(cpu_src2, simm); - } - - dst = gen_load_gpr(dc, rd); - tcg_gen_movcond_tl(cmp.cond, dst, - cmp.c1, cmp.c2, - cpu_src2, dst); - gen_store_gpr(dc, rd, dst); - break; - } -#endif - default: - goto illegal_insn; - } - } - } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */ -#ifdef TARGET_SPARC64 - int opf = GET_FIELD_SP(insn, 5, 13); - rs1 = GET_FIELD(insn, 13, 17); - rs2 = GET_FIELD(insn, 27, 31); - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } + if (src2 == NULL) { + return false; + } + gen_compare(&cmp, a->cc, a->cond, dc); + return do_mov_cond(dc, &cmp, a->rd, src2); +} - switch (opf) { - case 0x000: /* VIS I edge8cc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x001: /* VIS II edge8n */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x002: /* VIS I edge8lcc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x003: /* VIS II edge8ln */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x004: /* VIS I edge16cc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x005: /* VIS II edge16n */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x006: /* VIS I edge16lcc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x007: /* VIS II edge16ln */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x008: /* VIS I edge32cc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x009: /* VIS II edge32n */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x00a: /* VIS I edge32lcc */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x00b: /* VIS II edge32ln */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x010: /* VIS I array8 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x012: /* VIS I array16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_shli_i64(cpu_dst, cpu_dst, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x014: /* VIS I array32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_shli_i64(cpu_dst, cpu_dst, 2); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x018: /* VIS I alignaddr */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x01a: /* VIS I alignaddrl */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x019: /* VIS II bmask */ - CHECK_FPU_FEATURE(dc, VIS2); - cpu_src1 = gen_load_gpr(dc, rs1); - cpu_src2 = gen_load_gpr(dc, rs2); - tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2); - tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x020: /* VIS I fcmple16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x022: /* VIS I fcmpne16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x024: /* VIS I fcmple32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x026: /* VIS I fcmpne32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x028: /* VIS I fcmpgt16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x02a: /* VIS I fcmpeq16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x02c: /* VIS I fcmpgt32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x02e: /* VIS I fcmpeq32 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64); - gen_store_gpr(dc, rd, cpu_dst); - break; - case 0x031: /* VIS I fmul8x16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16); - break; - case 0x033: /* VIS I fmul8x16au */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au); - break; - case 0x035: /* VIS I fmul8x16al */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al); - break; - case 0x036: /* VIS I fmul8sux16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16); - break; - case 0x037: /* VIS I fmul8ulx16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16); - break; - case 0x038: /* VIS I fmuld8sux16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16); - break; - case 0x039: /* VIS I fmuld8ulx16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16); - break; - case 0x03a: /* VIS I fpack32 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32); - break; - case 0x03b: /* VIS I fpack16 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs2); - cpu_dst_32 = gen_dest_fpr_F(dc); - gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x03d: /* VIS I fpackfix */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs2); - cpu_dst_32 = gen_dest_fpr_F(dc); - gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x03e: /* VIS I pdist */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist); - break; - case 0x048: /* VIS I faligndata */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata); - break; - case 0x04b: /* VIS I fpmerge */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge); - break; - case 0x04c: /* VIS II bshuffle */ - CHECK_FPU_FEATURE(dc, VIS2); - gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle); - break; - case 0x04d: /* VIS I fexpand */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand); - break; - case 0x050: /* VIS I fpadd16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16); - break; - case 0x051: /* VIS I fpadd16s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s); - break; - case 0x052: /* VIS I fpadd32 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32); - break; - case 0x053: /* VIS I fpadd32s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32); - break; - case 0x054: /* VIS I fpsub16 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16); - break; - case 0x055: /* VIS I fpsub16s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s); - break; - case 0x056: /* VIS I fpsub32 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32); - break; - case 0x057: /* VIS I fpsub32s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32); - break; - case 0x060: /* VIS I fzero */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_dst_64 = gen_dest_fpr_D(dc, rd); - tcg_gen_movi_i64(cpu_dst_64, 0); - gen_store_fpr_D(dc, rd, cpu_dst_64); - break; - case 0x061: /* VIS I fzeros */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_dst_32 = gen_dest_fpr_F(dc); - tcg_gen_movi_i32(cpu_dst_32, 0); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x062: /* VIS I fnor */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64); - break; - case 0x063: /* VIS I fnors */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32); - break; - case 0x064: /* VIS I fandnot2 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64); - break; - case 0x065: /* VIS I fandnot2s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32); - break; - case 0x066: /* VIS I fnot2 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64); - break; - case 0x067: /* VIS I fnot2s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32); - break; - case 0x068: /* VIS I fandnot1 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64); - break; - case 0x069: /* VIS I fandnot1s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32); - break; - case 0x06a: /* VIS I fnot1 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64); - break; - case 0x06b: /* VIS I fnot1s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32); - break; - case 0x06c: /* VIS I fxor */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64); - break; - case 0x06d: /* VIS I fxors */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32); - break; - case 0x06e: /* VIS I fnand */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64); - break; - case 0x06f: /* VIS I fnands */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32); - break; - case 0x070: /* VIS I fand */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64); - break; - case 0x071: /* VIS I fands */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32); - break; - case 0x072: /* VIS I fxnor */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64); - break; - case 0x073: /* VIS I fxnors */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32); - break; - case 0x074: /* VIS I fsrc1 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs1); - gen_store_fpr_D(dc, rd, cpu_src1_64); - break; - case 0x075: /* VIS I fsrc1s */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_32 = gen_load_fpr_F(dc, rs1); - gen_store_fpr_F(dc, rd, cpu_src1_32); - break; - case 0x076: /* VIS I fornot2 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64); - break; - case 0x077: /* VIS I fornot2s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32); - break; - case 0x078: /* VIS I fsrc2 */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_64 = gen_load_fpr_D(dc, rs2); - gen_store_fpr_D(dc, rd, cpu_src1_64); - break; - case 0x079: /* VIS I fsrc2s */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_src1_32 = gen_load_fpr_F(dc, rs2); - gen_store_fpr_F(dc, rd, cpu_src1_32); - break; - case 0x07a: /* VIS I fornot1 */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64); - break; - case 0x07b: /* VIS I fornot1s */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32); - break; - case 0x07c: /* VIS I for */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64); - break; - case 0x07d: /* VIS I fors */ - CHECK_FPU_FEATURE(dc, VIS1); - gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32); - break; - case 0x07e: /* VIS I fone */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_dst_64 = gen_dest_fpr_D(dc, rd); - tcg_gen_movi_i64(cpu_dst_64, -1); - gen_store_fpr_D(dc, rd, cpu_dst_64); - break; - case 0x07f: /* VIS I fones */ - CHECK_FPU_FEATURE(dc, VIS1); - cpu_dst_32 = gen_dest_fpr_F(dc); - tcg_gen_movi_i32(cpu_dst_32, -1); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x080: /* VIS I shutdown */ - case 0x081: /* VIS II siam */ - // XXX - goto illegal_insn; - default: - goto illegal_insn; - } -#else - goto ncp_insn; -#endif - } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */ -#ifdef TARGET_SPARC64 - goto illegal_insn; -#else - goto ncp_insn; -#endif -#ifdef TARGET_SPARC64 - } else if (xop == 0x39) { /* V9 return */ - save_state(dc); - cpu_src1 = get_src1(dc, insn); - cpu_tmp0 = tcg_temp_new(); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2) { - cpu_src2 = gen_load_gpr(dc, rs2); - tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2); - } else { - tcg_gen_mov_tl(cpu_tmp0, cpu_src1); - } - } - gen_helper_restore(tcg_env); - gen_mov_pc_npc(dc); - gen_check_align(cpu_tmp0, 3); - tcg_gen_mov_tl(cpu_npc, cpu_tmp0); - dc->npc = DYNAMIC_PC_LOOKUP; - goto jmp_insn; -#endif - } else { - cpu_src1 = get_src1(dc, insn); - cpu_tmp0 = tcg_temp_new(); - if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2) { - cpu_src2 = gen_load_gpr(dc, rs2); - tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2); - } else { - tcg_gen_mov_tl(cpu_tmp0, cpu_src1); - } - } - switch (xop) { - case 0x38: /* jmpl */ - { - TCGv t = gen_dest_gpr(dc, rd); - tcg_gen_movi_tl(t, dc->pc); - gen_store_gpr(dc, rd, t); - - gen_mov_pc_npc(dc); - gen_check_align(cpu_tmp0, 3); - gen_address_mask(dc, cpu_tmp0); - tcg_gen_mov_tl(cpu_npc, cpu_tmp0); - dc->npc = DYNAMIC_PC_LOOKUP; - } - goto jmp_insn; -#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) - case 0x39: /* rett, V9 return */ - { - if (!supervisor(dc)) - goto priv_insn; - gen_mov_pc_npc(dc); - gen_check_align(cpu_tmp0, 3); - tcg_gen_mov_tl(cpu_npc, cpu_tmp0); - dc->npc = DYNAMIC_PC; - gen_helper_rett(tcg_env); - } - goto jmp_insn; -#endif - case 0x3b: /* flush */ - if (!((dc)->def->features & CPU_FEATURE_FLUSH)) - goto unimp_flush; - /* nop */ - break; - case 0x3c: /* save */ - gen_helper_save(tcg_env); - gen_store_gpr(dc, rd, cpu_tmp0); - break; - case 0x3d: /* restore */ - gen_helper_restore(tcg_env); - gen_store_gpr(dc, rd, cpu_tmp0); - break; -#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64) - case 0x3e: /* V9 done/retry */ - { - switch (rd) { - case 0: - if (!supervisor(dc)) - goto priv_insn; - dc->npc = DYNAMIC_PC; - dc->pc = DYNAMIC_PC; - translator_io_start(&dc->base); - gen_helper_done(tcg_env); - goto jmp_insn; - case 1: - if (!supervisor(dc)) - goto priv_insn; - dc->npc = DYNAMIC_PC; - dc->pc = DYNAMIC_PC; - translator_io_start(&dc->base); - gen_helper_retry(tcg_env); - goto jmp_insn; - default: - goto illegal_insn; - } - } - break; -#endif - default: - goto illegal_insn; - } - } - break; - } - break; - case 3: /* load/store instructions */ - { - unsigned int xop = GET_FIELD(insn, 7, 12); - /* ??? gen_address_mask prevents us from using a source - register directly. Always generate a temporary. */ - TCGv cpu_addr = tcg_temp_new(); - - tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn)); - if (xop == 0x3c || xop == 0x3e) { - /* V9 casa/casxa : no offset */ - } else if (IS_IMM) { /* immediate */ - simm = GET_FIELDs(insn, 19, 31); - if (simm != 0) { - tcg_gen_addi_tl(cpu_addr, cpu_addr, simm); - } - } else { /* register */ - rs2 = GET_FIELD(insn, 27, 31); - if (rs2 != 0) { - tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2)); - } - } - if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) || - (xop > 0x17 && xop <= 0x1d ) || - (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) { - TCGv cpu_val = gen_dest_gpr(dc, rd); - - switch (xop) { - case 0x0: /* ld, V9 lduw, load unsigned word */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - break; - case 0x1: /* ldub, load unsigned byte */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_UB); - break; - case 0x2: /* lduh, load unsigned halfword */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUW | MO_ALIGN); - break; - case 0x3: /* ldd, load double word */ - if (rd & 1) - goto illegal_insn; - else { - TCGv_i64 t64; - - gen_address_mask(dc, cpu_addr); - t64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t64, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - tcg_gen_trunc_i64_tl(cpu_val, t64); - tcg_gen_ext32u_tl(cpu_val, cpu_val); - gen_store_gpr(dc, rd + 1, cpu_val); - tcg_gen_shri_i64(t64, t64, 32); - tcg_gen_trunc_i64_tl(cpu_val, t64); - tcg_gen_ext32u_tl(cpu_val, cpu_val); - } - break; - case 0x9: /* ldsb, load signed byte */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB); - break; - case 0xa: /* ldsh, load signed halfword */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TESW | MO_ALIGN); - break; - case 0xd: /* ldstub */ - gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); - break; - case 0x0f: - /* swap, swap register with memory. Also atomically */ - CHECK_IU_FEATURE(dc, SWAP); - cpu_src1 = gen_load_gpr(dc, rd); - gen_swap(dc, cpu_val, cpu_src1, cpu_addr, - dc->mem_idx, MO_TEUL); - break; -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) - case 0x10: /* lda, V9 lduwa, load word alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); - break; - case 0x11: /* lduba, load unsigned byte alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB); - break; - case 0x12: /* lduha, load unsigned halfword alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); - break; - case 0x13: /* ldda, load double word alternate */ - if (rd & 1) { - goto illegal_insn; - } - gen_ldda_asi(dc, cpu_addr, insn, rd); - goto skip_move; - case 0x19: /* ldsba, load signed byte alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB); - break; - case 0x1a: /* ldsha, load signed halfword alternate */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW); - break; - case 0x1d: /* ldstuba -- XXX: should be atomically */ - gen_ldstub_asi(dc, cpu_val, cpu_addr, insn); - break; - case 0x1f: /* swapa, swap reg with alt. memory. Also - atomically */ - CHECK_IU_FEATURE(dc, SWAP); - cpu_src1 = gen_load_gpr(dc, rd); - gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn); - break; +static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a) +{ + TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); + DisasCompare cmp; -#ifndef TARGET_SPARC64 - case 0x30: /* ldc */ - case 0x31: /* ldcsr */ - case 0x33: /* lddc */ - goto ncp_insn; -#endif -#endif -#ifdef TARGET_SPARC64 - case 0x08: /* V9 ldsw */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TESL | MO_ALIGN); - break; - case 0x0b: /* V9 ldx */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - break; - case 0x18: /* V9 ldswa */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); - break; - case 0x1b: /* V9 ldxa */ - gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); - break; - case 0x2d: /* V9 prefetch, no effect */ - goto skip_move; - case 0x30: /* V9 ldfa */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_ldf_asi(dc, cpu_addr, insn, 4, rd); - gen_update_fprs_dirty(dc, rd); - goto skip_move; - case 0x33: /* V9 lddfa */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); - gen_update_fprs_dirty(dc, DFPREG(rd)); - goto skip_move; - case 0x3d: /* V9 prefetcha, no effect */ - goto skip_move; - case 0x32: /* V9 ldqfa */ - CHECK_FPU_FEATURE(dc, FLOAT128); - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); - goto skip_move; -#endif - default: - goto illegal_insn; - } - gen_store_gpr(dc, rd, cpu_val); -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) - skip_move: ; -#endif - } else if (xop >= 0x20 && xop < 0x24) { - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - switch (xop) { - case 0x20: /* ldf, load fpreg */ - gen_address_mask(dc, cpu_addr); - cpu_dst_32 = gen_dest_fpr_F(dc); - tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - gen_store_fpr_F(dc, rd, cpu_dst_32); - break; - case 0x21: /* ldfsr, V9 ldxfsr */ -#ifdef TARGET_SPARC64 - gen_address_mask(dc, cpu_addr); - if (rd == 1) { - TCGv_i64 t64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t64, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64); - break; - } -#endif - cpu_dst_32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32); - break; - case 0x22: /* ldqf, load quad fpreg */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_address_mask(dc, cpu_addr); - cpu_src1_64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx, - MO_TEUQ | MO_ALIGN_4); - tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); - cpu_src2_64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx, - MO_TEUQ | MO_ALIGN_4); - gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); - break; - case 0x23: /* lddf, load double fpreg */ - gen_address_mask(dc, cpu_addr); - cpu_dst_64 = gen_dest_fpr_D(dc, rd); - tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx, - MO_TEUQ | MO_ALIGN_4); - gen_store_fpr_D(dc, rd, cpu_dst_64); - break; - default: - goto illegal_insn; - } - } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) || - xop == 0xe || xop == 0x1e) { - TCGv cpu_val = gen_load_gpr(dc, rd); - - switch (xop) { - case 0x4: /* st, store word */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - break; - case 0x5: /* stb, store byte */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB); - break; - case 0x6: /* sth, store halfword */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUW | MO_ALIGN); - break; - case 0x7: /* std, store double word */ - if (rd & 1) - goto illegal_insn; - else { - TCGv_i64 t64; - TCGv lo; - - gen_address_mask(dc, cpu_addr); - lo = gen_load_gpr(dc, rd + 1); - t64 = tcg_temp_new_i64(); - tcg_gen_concat_tl_i64(t64, lo, cpu_val); - tcg_gen_qemu_st_i64(t64, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - } - break; -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) - case 0x14: /* sta, V9 stwa, store word alternate */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); - break; - case 0x15: /* stba, store byte alternate */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB); - break; - case 0x16: /* stha, store halfword alternate */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); - break; - case 0x17: /* stda, store double word alternate */ - if (rd & 1) { - goto illegal_insn; - } - gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd); - break; -#endif -#ifdef TARGET_SPARC64 - case 0x0e: /* V9 stx */ - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st_tl(cpu_val, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - break; - case 0x1e: /* V9 stxa */ - gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); - break; -#endif - default: - goto illegal_insn; - } - } else if (xop > 0x23 && xop < 0x28) { - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - switch (xop) { - case 0x24: /* stf, store fpreg */ - gen_address_mask(dc, cpu_addr); - cpu_src1_32 = gen_load_fpr_F(dc, rd); - tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - break; - case 0x25: /* stfsr, V9 stxfsr */ - { -#ifdef TARGET_SPARC64 - gen_address_mask(dc, cpu_addr); - if (rd == 1) { - tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN); - break; - } -#endif - tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, - dc->mem_idx, MO_TEUL | MO_ALIGN); - } - break; - case 0x26: -#ifdef TARGET_SPARC64 - /* V9 stqf, store quad fpreg */ - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_address_mask(dc, cpu_addr); - /* ??? While stqf only requires 4-byte alignment, it is - legal for the cpu to signal the unaligned exception. - The OS trap handler is then required to fix it up. - For qemu, this avoids having to probe the second page - before performing the first write. */ - cpu_src1_64 = gen_load_fpr_Q0(dc, rd); - tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, - dc->mem_idx, MO_TEUQ | MO_ALIGN_16); - tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); - cpu_src2_64 = gen_load_fpr_Q1(dc, rd); - tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, - dc->mem_idx, MO_TEUQ); - break; -#else /* !TARGET_SPARC64 */ - /* stdfq, store floating point queue */ -#if defined(CONFIG_USER_ONLY) - goto illegal_insn; -#else - if (!supervisor(dc)) - goto priv_insn; - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - goto nfq_insn; -#endif -#endif - case 0x27: /* stdf, store double fpreg */ - gen_address_mask(dc, cpu_addr); - cpu_src1_64 = gen_load_fpr_D(dc, rd); - tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx, - MO_TEUQ | MO_ALIGN_4); - break; - default: - goto illegal_insn; - } - } else if (xop > 0x33 && xop < 0x3f) { - switch (xop) { -#ifdef TARGET_SPARC64 - case 0x34: /* V9 stfa */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_stf_asi(dc, cpu_addr, insn, 4, rd); - break; - case 0x36: /* V9 stqfa */ - { - CHECK_FPU_FEATURE(dc, FLOAT128); - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); - } - break; - case 0x37: /* V9 stdfa */ - if (gen_trap_ifnofpu(dc)) { - goto jmp_insn; - } - gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); - break; - case 0x3e: /* V9 casxa */ - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd); - break; -#else - case 0x34: /* stc */ - case 0x35: /* stcsr */ - case 0x36: /* stdcq */ - case 0x37: /* stdc */ - goto ncp_insn; -#endif -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) - case 0x3c: /* V9 or LEON3 casa */ -#ifndef TARGET_SPARC64 - CHECK_IU_FEATURE(dc, CASA); -#endif - rs2 = GET_FIELD(insn, 27, 31); - cpu_src2 = gen_load_gpr(dc, rs2); - gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd); - break; -#endif - default: - goto illegal_insn; - } - } else { - goto illegal_insn; - } + if (src2 == NULL) { + return false; + } + gen_fcompare(&cmp, a->cc, a->cond); + return do_mov_cond(dc, &cmp, a->rd, src2); +} + +static bool trans_MOVR(DisasContext *dc, arg_MOVR *a) +{ + TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); + DisasCompare cmp; + + if (src2 == NULL) { + return false; + } + gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); + return do_mov_cond(dc, &cmp, a->rd, src2); +} + +static bool do_add_special(DisasContext *dc, arg_r_r_ri *a, + bool (*func)(DisasContext *dc, int rd, TCGv src)) +{ + TCGv src1, sum; + + /* For simplicity, we under-decoded the rs2 form. */ + if (!a->imm && a->rs2_or_imm & ~0x1f) { + return false; + } + + /* + * Always load the sum into a new temporary. + * This is required to capture the value across a window change, + * e.g. SAVE and RESTORE, and may be optimized away otherwise. + */ + sum = tcg_temp_new(); + src1 = gen_load_gpr(dc, a->rs1); + if (a->imm || a->rs2_or_imm == 0) { + tcg_gen_addi_tl(sum, src1, a->rs2_or_imm); + } else { + tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]); + } + return func(dc, a->rd, sum); +} + +static bool do_jmpl(DisasContext *dc, int rd, TCGv src) +{ + /* + * Preserve pc across advance, so that we can delay + * the writeback to rd until after src is consumed. + */ + target_ulong cur_pc = dc->pc; + + gen_check_align(dc, src, 3); + + gen_mov_pc_npc(dc); + tcg_gen_mov_tl(cpu_npc, src); + gen_address_mask(dc, cpu_npc); + gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc)); + + dc->npc = DYNAMIC_PC_LOOKUP; + return true; +} + +TRANS(JMPL, ALL, do_add_special, a, do_jmpl) + +static bool do_rett(DisasContext *dc, int rd, TCGv src) +{ + if (!supervisor(dc)) { + return raise_priv(dc); + } + + gen_check_align(dc, src, 3); + + gen_mov_pc_npc(dc); + tcg_gen_mov_tl(cpu_npc, src); + gen_helper_rett(tcg_env); + + dc->npc = DYNAMIC_PC; + return true; +} + +TRANS(RETT, 32, do_add_special, a, do_rett) + +static bool do_return(DisasContext *dc, int rd, TCGv src) +{ + gen_check_align(dc, src, 3); + + gen_mov_pc_npc(dc); + tcg_gen_mov_tl(cpu_npc, src); + gen_address_mask(dc, cpu_npc); + + gen_helper_restore(tcg_env); + dc->npc = DYNAMIC_PC_LOOKUP; + return true; +} + +TRANS(RETURN, 64, do_add_special, a, do_return) + +static bool do_save(DisasContext *dc, int rd, TCGv src) +{ + gen_helper_save(tcg_env); + gen_store_gpr(dc, rd, src); + return advance_pc(dc); +} + +TRANS(SAVE, ALL, do_add_special, a, do_save) + +static bool do_restore(DisasContext *dc, int rd, TCGv src) +{ + gen_helper_restore(tcg_env); + gen_store_gpr(dc, rd, src); + return advance_pc(dc); +} + +TRANS(RESTORE, ALL, do_add_special, a, do_restore) + +static bool do_done_retry(DisasContext *dc, bool done) +{ + if (!supervisor(dc)) { + return raise_priv(dc); + } + dc->npc = DYNAMIC_PC; + dc->pc = DYNAMIC_PC; + translator_io_start(&dc->base); + if (done) { + gen_helper_done(tcg_env); + } else { + gen_helper_retry(tcg_env); + } + return true; +} + +TRANS(DONE, 64, do_done_retry, true) +TRANS(RETRY, 64, do_done_retry, false) + +/* + * Major opcode 11 -- load and store instructions + */ + +static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm) +{ + TCGv addr, tmp = NULL; + + /* For simplicity, we under-decoded the rs2 form. */ + if (!imm && rs2_or_imm & ~0x1f) { + return NULL; + } + + addr = gen_load_gpr(dc, rs1); + if (rs2_or_imm) { + tmp = tcg_temp_new(); + if (imm) { + tcg_gen_addi_tl(tmp, addr, rs2_or_imm); + } else { + tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]); } - break; + addr = tmp; } - /* default case for non jump instructions */ - if (dc->npc & 3) { - switch (dc->npc) { - case DYNAMIC_PC: - case DYNAMIC_PC_LOOKUP: - dc->pc = dc->npc; - gen_op_next_insn(); - break; - case JUMP_PC: - /* we can do a static jump */ - gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond); - dc->base.is_jmp = DISAS_NORETURN; - break; - default: - g_assert_not_reached(); + if (AM_CHECK(dc)) { + if (!tmp) { + tmp = tcg_temp_new(); } - } else { - dc->pc = dc->npc; - dc->npc = dc->npc + 4; + tcg_gen_ext32u_tl(tmp, addr); + addr = tmp; + } + return addr; +} + +static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) +{ + TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + DisasASI da; + + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, mop); + + reg = gen_dest_gpr(dc, a->rd); + gen_ld_asi(dc, &da, reg, addr); + gen_store_gpr(dc, a->rd, reg); + return advance_pc(dc); +} + +TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL) +TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB) +TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW) +TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB) +TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW) +TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL) +TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ) + +static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) +{ + TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + DisasASI da; + + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, mop); + + reg = gen_load_gpr(dc, a->rd); + gen_st_asi(dc, &da, reg, addr); + return advance_pc(dc); +} + +TRANS(STW, ALL, do_st_gpr, a, MO_TEUL) +TRANS(STB, ALL, do_st_gpr, a, MO_UB) +TRANS(STH, ALL, do_st_gpr, a, MO_TEUW) +TRANS(STX, 64, do_st_gpr, a, MO_TEUQ) + +static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a) +{ + TCGv addr; + DisasASI da; + + if (a->rd & 1) { + return false; + } + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, MO_TEUQ); + gen_ldda_asi(dc, &da, addr, a->rd); + return advance_pc(dc); +} + +static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a) +{ + TCGv addr; + DisasASI da; + + if (a->rd & 1) { + return false; + } + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, MO_TEUQ); + gen_stda_asi(dc, &da, addr, a->rd); + return advance_pc(dc); +} + +static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a) +{ + TCGv addr, reg; + DisasASI da; + + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, MO_UB); + + reg = gen_dest_gpr(dc, a->rd); + gen_ldstub_asi(dc, &da, reg, addr); + gen_store_gpr(dc, a->rd, reg); + return advance_pc(dc); +} + +static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a) +{ + TCGv addr, dst, src; + DisasASI da; + + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, MO_TEUL); + + dst = gen_dest_gpr(dc, a->rd); + src = gen_load_gpr(dc, a->rd); + gen_swap_asi(dc, &da, dst, src, addr); + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} + +static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) +{ + TCGv addr, o, n, c; + DisasASI da; + + addr = gen_ldst_addr(dc, a->rs1, true, 0); + if (addr == NULL) { + return false; + } + da = resolve_asi(dc, a->asi, mop); + + o = gen_dest_gpr(dc, a->rd); + n = gen_load_gpr(dc, a->rd); + c = gen_load_gpr(dc, a->rs2_or_imm); + gen_cas_asi(dc, &da, o, n, c, addr); + gen_store_gpr(dc, a->rd, o); + return advance_pc(dc); +} + +TRANS(CASA, CASA, do_casa, a, MO_TEUL) +TRANS(CASXA, 64, do_casa, a, MO_TEUQ) + +static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) +{ + TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + DisasASI da; + + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (sz == MO_128 && gen_trap_float128(dc)) { + return true; + } + da = resolve_asi(dc, a->asi, MO_TE | sz); + gen_ldf_asi(dc, &da, sz, addr, a->rd); + gen_update_fprs_dirty(dc, a->rd); + return advance_pc(dc); +} + +TRANS(LDF, ALL, do_ld_fpr, a, MO_32) +TRANS(LDDF, ALL, do_ld_fpr, a, MO_64) +TRANS(LDQF, ALL, do_ld_fpr, a, MO_128) + +TRANS(LDFA, 64, do_ld_fpr, a, MO_32) +TRANS(LDDFA, 64, do_ld_fpr, a, MO_64) +TRANS(LDQFA, 64, do_ld_fpr, a, MO_128) + +static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) +{ + TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + DisasASI da; + + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (sz == MO_128 && gen_trap_float128(dc)) { + return true; + } + da = resolve_asi(dc, a->asi, MO_TE | sz); + gen_stf_asi(dc, &da, sz, addr, a->rd); + return advance_pc(dc); +} + +TRANS(STF, ALL, do_st_fpr, a, MO_32) +TRANS(STDF, ALL, do_st_fpr, a, MO_64) +TRANS(STQF, ALL, do_st_fpr, a, MO_128) + +TRANS(STFA, 64, do_st_fpr, a, MO_32) +TRANS(STDFA, 64, do_st_fpr, a, MO_64) +TRANS(STQFA, 64, do_st_fpr, a, MO_128) + +static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a) +{ + if (!avail_32(dc)) { + return false; + } + if (!supervisor(dc)) { + return raise_priv(dc); + } + if (gen_trap_ifnofpu(dc)) { + return true; } - jmp_insn: - return; - illegal_insn: - gen_exception(dc, TT_ILL_INSN); - return; - unimp_flush: - gen_exception(dc, TT_UNIMP_FLUSH); - return; -#if !defined(CONFIG_USER_ONLY) - priv_insn: - gen_exception(dc, TT_PRIV_INSN); - return; -#endif - nfpu_insn: - gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); - return; -#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) - nfq_insn: gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); - return; -#endif -#ifndef TARGET_SPARC64 - ncp_insn: - gen_exception(dc, TT_NCP_INSN); - return; -#endif + return true; +} + +static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop, + target_ulong new_mask, target_ulong old_mask) +{ + TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + tmp = tcg_temp_new(); + tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN); + tcg_gen_andi_tl(tmp, tmp, new_mask); + tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask); + tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp); + gen_helper_set_fsr(tcg_env, cpu_fsr); + return advance_pc(dc); +} + +TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK) +TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK) + +static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop) +{ + TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN); + return advance_pc(dc); } +TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL) +TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ) + +static bool do_fc(DisasContext *dc, int rd, bool c) +{ + uint64_t mask; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + if (rd & 1) { + mask = MAKE_64BIT_MASK(0, 32); + } else { + mask = MAKE_64BIT_MASK(32, 32); + } + if (c) { + tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask); + } else { + tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask); + } + gen_update_fprs_dirty(dc, rd); + return advance_pc(dc); +} + +TRANS(FZEROs, VIS1, do_fc, a->rd, 0) +TRANS(FONEs, VIS1, do_fc, a->rd, 1) + +static bool do_dc(DisasContext *dc, int rd, int64_t c) +{ + if (gen_trap_ifnofpu(dc)) { + return true; + } + + tcg_gen_movi_i64(cpu_fpr[rd / 2], c); + gen_update_fprs_dirty(dc, rd); + return advance_pc(dc); +} + +TRANS(FZEROd, VIS1, do_dc, a->rd, 0) +TRANS(FONEd, VIS1, do_dc, a->rd, -1) + +static bool do_ff(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_i32)) +{ + TCGv_i32 tmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + tmp = gen_load_fpr_F(dc, a->rs); + func(tmp, tmp); + gen_store_fpr_F(dc, a->rd, tmp); + return advance_pc(dc); +} + +TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs) +TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs) +TRANS(FABSs, ALL, do_ff, a, gen_op_fabss) +TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32) +TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32) + +static bool do_fd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_i64)) +{ + TCGv_i32 dst; + TCGv_i64 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_fpr_F(dc); + src = gen_load_fpr_D(dc, a->rs); + func(dst, src); + gen_store_fpr_F(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16) +TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix) + +static bool do_env_ff(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) +{ + TCGv_i32 tmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + tmp = gen_load_fpr_F(dc, a->rs); + func(tmp, tcg_env, tmp); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_F(dc, a->rd, tmp); + return advance_pc(dc); +} + +TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts) +TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos) +TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi) + +static bool do_env_fd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) +{ + TCGv_i32 dst; + TCGv_i64 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_F(dc); + src = gen_load_fpr_D(dc, a->rs); + func(dst, tcg_env, src); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_F(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos) +TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi) +TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos) + +static bool do_dd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_fpr_D(dc, a->rd); + src = gen_load_fpr_D(dc, a->rs); + func(dst, src); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd) +TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd) +TRANS(FABSd, 64, do_dd, a, gen_op_fabsd) +TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64) +TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64) + +static bool do_env_dd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) +{ + TCGv_i64 dst, src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_D(dc, a->rd); + src = gen_load_fpr_D(dc, a->rs); + func(dst, tcg_env, src); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd) +TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod) +TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox) + +static bool do_env_df(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) +{ + TCGv_i64 dst; + TCGv_i32 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_D(dc, a->rd); + src = gen_load_fpr_F(dc, a->rs); + func(dst, tcg_env, src); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod) +TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod) +TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox) + +static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a) +{ + int rd, rs; + + if (!avail_64(dc)) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + rd = QFPREG(a->rd); + rs = QFPREG(a->rs); + tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]); + tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]); + gen_update_fprs_dirty(dc, rd); + return advance_pc(dc); +} + +static bool do_qq(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_env)) +{ + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT1(QFPREG(a->rs)); + func(tcg_env); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq) +TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq) + +static bool do_env_qq(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_env)) +{ + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT1(QFPREG(a->rs)); + func(tcg_env); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq) + +static bool do_env_fq(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i32, TCGv_env)) +{ + TCGv_i32 dst; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT1(QFPREG(a->rs)); + dst = gen_dest_fpr_F(dc); + func(dst, tcg_env); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_F(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos) +TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi) + +static bool do_env_dq(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_i64, TCGv_env)) +{ + TCGv_i64 dst; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT1(QFPREG(a->rs)); + dst = gen_dest_fpr_D(dc, a->rd); + func(dst, tcg_env); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod) +TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox) + +static bool do_env_qf(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_env, TCGv_i32)) +{ + TCGv_i32 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src = gen_load_fpr_F(dc, a->rs); + func(tcg_env, src); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq) +TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq) + +static bool do_env_qd(DisasContext *dc, arg_r_r *a, + void (*func)(TCGv_env, TCGv_i64)) +{ + TCGv_i64 src; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src = gen_load_fpr_D(dc, a->rs); + func(tcg_env, src); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq) +TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq) + +static bool do_fff(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + src1 = gen_load_fpr_F(dc, a->rs1); + src2 = gen_load_fpr_F(dc, a->rs2); + func(src1, src1, src2); + gen_store_fpr_F(dc, a->rd, src1); + return advance_pc(dc); +} + +TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32) +TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32) +TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32) +TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32) +TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32) +TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32) +TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32) +TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32) +TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32) +TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32) +TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32) +TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32) + +static bool do_env_fff(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src1 = gen_load_fpr_F(dc, a->rs1); + src2 = gen_load_fpr_F(dc, a->rs2); + func(src1, tcg_env, src1, src2); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_F(dc, a->rd, src1); + return advance_pc(dc); +} + +TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds) +TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs) +TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls) +TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs) + +static bool do_ddd(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_fpr_D(dc, a->rd); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + func(dst, src1, src2); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16) +TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au) +TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al) +TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16) +TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16) +TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16) +TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16) +TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge) +TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand) + +TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64) +TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64) +TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64) +TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64) +TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64) +TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64) +TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64) +TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64) +TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64) +TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64) +TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64) +TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64) + +TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32) +TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata) +TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle) + +static bool do_rdd(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 src1, src2; + TCGv dst; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_gpr(dc, a->rd); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + func(dst, src1, src2); + gen_store_gpr(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16) +TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16) +TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16) +TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16) + +TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32) +TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32) +TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32) +TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32) + +static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_D(dc, a->rd); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + func(dst, tcg_env, src1, src2); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd) +TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd) +TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld) +TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd) + +static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a) +{ + TCGv_i64 dst; + TCGv_i32 src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (!(dc->def->features & CPU_FEATURE_FSMULD)) { + return raise_unimpfpop(dc); + } + + gen_op_clear_ieee_excp_and_FTT(); + dst = gen_dest_fpr_D(dc, a->rd); + src1 = gen_load_fpr_F(dc, a->rs1); + src2 = gen_load_fpr_F(dc, a->rs2); + gen_helper_fsmuld(dst, tcg_env, src1, src2); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +static bool do_dddd(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src0, src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + + dst = gen_dest_fpr_D(dc, a->rd); + src0 = gen_load_fpr_D(dc, a->rd); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + func(dst, src0, src1, src2); + gen_store_fpr_D(dc, a->rd, dst); + return advance_pc(dc); +} + +TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist) + +static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a, + void (*func)(TCGv_env)) +{ + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT0(QFPREG(a->rs1)); + gen_op_load_fpr_QT1(QFPREG(a->rs2)); + func(tcg_env); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq) +TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq) +TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq) +TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq) + +static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a) +{ + TCGv_i64 src1, src2; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + gen_helper_fdmulq(tcg_env, src1, src2); + gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + gen_op_store_QT0_fpr(QFPREG(a->rd)); + gen_update_fprs_dirty(dc, QFPREG(a->rd)); + return advance_pc(dc); +} + +static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128, + void (*func)(DisasContext *, DisasCompare *, int, int)) +{ + DisasCompare cmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (is_128 && gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); + func(dc, &cmp, a->rd, a->rs2); + return advance_pc(dc); +} + +TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs) +TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd) +TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq) + +static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128, + void (*func)(DisasContext *, DisasCompare *, int, int)) +{ + DisasCompare cmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (is_128 && gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_compare(&cmp, a->cc, a->cond, dc); + func(dc, &cmp, a->rd, a->rs2); + return advance_pc(dc); +} + +TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs) +TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd) +TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq) + +static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128, + void (*func)(DisasContext *, DisasCompare *, int, int)) +{ + DisasCompare cmp; + + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (is_128 && gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_fcompare(&cmp, a->cc, a->cond); + func(dc, &cmp, a->rd, a->rs2); + return advance_pc(dc); +} + +TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs) +TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd) +TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq) + +static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e) +{ + TCGv_i32 src1, src2; + + if (avail_32(dc) && a->cc != 0) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src1 = gen_load_fpr_F(dc, a->rs1); + src2 = gen_load_fpr_F(dc, a->rs2); + if (e) { + gen_op_fcmpes(a->cc, src1, src2); + } else { + gen_op_fcmps(a->cc, src1, src2); + } + return advance_pc(dc); +} + +TRANS(FCMPs, ALL, do_fcmps, a, false) +TRANS(FCMPEs, ALL, do_fcmps, a, true) + +static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e) +{ + TCGv_i64 src1, src2; + + if (avail_32(dc) && a->cc != 0) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + src1 = gen_load_fpr_D(dc, a->rs1); + src2 = gen_load_fpr_D(dc, a->rs2); + if (e) { + gen_op_fcmped(a->cc, src1, src2); + } else { + gen_op_fcmpd(a->cc, src1, src2); + } + return advance_pc(dc); +} + +TRANS(FCMPd, ALL, do_fcmpd, a, false) +TRANS(FCMPEd, ALL, do_fcmpd, a, true) + +static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e) +{ + if (avail_32(dc) && a->cc != 0) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + if (gen_trap_float128(dc)) { + return true; + } + + gen_op_clear_ieee_excp_and_FTT(); + gen_op_load_fpr_QT0(QFPREG(a->rs1)); + gen_op_load_fpr_QT1(QFPREG(a->rs2)); + if (e) { + gen_op_fcmpeq(a->cc); + } else { + gen_op_fcmpq(a->cc); + } + return advance_pc(dc); +} + +TRANS(FCMPq, ALL, do_fcmpq, a, false) +TRANS(FCMPEq, ALL, do_fcmpq, a, true) + static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); @@ -5630,7 +5379,10 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) insn = translator_ldl(env, &dc->base, dc->pc); dc->base.pc_next += 4; - disas_sparc_insn(dc, insn); + + if (!decode(dc, insn)) { + gen_exception(dc, TT_ILL_INSN); + } if (dc->base.is_jmp == DISAS_NORETURN) { return; @@ -5643,6 +5395,7 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); + DisasDelayException *e, *e_next; bool may_lookup; switch (dc->base.is_jmp) { @@ -5654,10 +5407,10 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) break; } + may_lookup = true; if (dc->pc & 3) { switch (dc->pc) { case DYNAMIC_PC_LOOKUP: - may_lookup = true; break; case DYNAMIC_PC: may_lookup = false; @@ -5667,10 +5420,24 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } } else { tcg_gen_movi_tl(cpu_pc, dc->pc); - may_lookup = true; } - save_npc(dc); + if (dc->npc & 3) { + switch (dc->npc) { + case JUMP_PC: + gen_generic_branch(dc); + break; + case DYNAMIC_PC: + may_lookup = false; + break; + case DYNAMIC_PC_LOOKUP: + break; + default: + g_assert_not_reached(); + } + } else { + tcg_gen_movi_tl(cpu_npc, dc->npc); + } if (may_lookup) { tcg_gen_lookup_and_goto_ptr(); } else { @@ -5690,6 +5457,19 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) default: g_assert_not_reached(); } + + for (e = dc->delay_excp_list; e ; e = e_next) { + gen_set_label(e->lab); + + tcg_gen_movi_tl(cpu_pc, e->pc); + if (e->npc % 4 == 0) { + tcg_gen_movi_tl(cpu_npc, e->npc); + } + gen_helper_raise_exception(tcg_env, e->excp); + + e_next = e->next; + g_free(e); + } } static void sparc_tr_disas_log(const DisasContextBase *dcbase, @@ -5735,8 +5515,6 @@ void sparc_tcg_init(void) #ifdef TARGET_SPARC64 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" }, { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" }, -#else - { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" }, #endif { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" }, { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" }, @@ -5745,15 +5523,6 @@ void sparc_tcg_init(void) static const struct { TCGv *ptr; int off; const char *name; } rtl[] = { #ifdef TARGET_SPARC64 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" }, - { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" }, - { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" }, - { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr), - "hstick_cmpr" }, - { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" }, - { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" }, - { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" }, - { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" }, - { &cpu_ver, offsetof(CPUSPARCState, version), "ver" }, #endif { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" }, { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" }, @@ -5763,9 +5532,7 @@ void sparc_tcg_init(void) { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" }, { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" }, { &cpu_y, offsetof(CPUSPARCState, y), "y" }, -#ifndef CONFIG_USER_ONLY { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" }, -#endif }; unsigned int i; diff --git a/target/sparc/vis_helper.c b/target/sparc/vis_helper.c index 3afdc6975c..7763b16c24 100644 --- a/target/sparc/vis_helper.c +++ b/target/sparc/vis_helper.c @@ -275,65 +275,6 @@ uint64_t helper_fexpand(uint64_t src1, uint64_t src2) return d.ll; } -#define VIS_HELPER(name, F) \ - uint64_t name##16(uint64_t src1, uint64_t src2) \ - { \ - VIS64 s, d; \ - \ - s.ll = src1; \ - d.ll = src2; \ - \ - d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \ - d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \ - d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \ - d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \ - \ - return d.ll; \ - } \ - \ - uint32_t name##16s(uint32_t src1, uint32_t src2) \ - { \ - VIS32 s, d; \ - \ - s.l = src1; \ - d.l = src2; \ - \ - d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \ - d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \ - \ - return d.l; \ - } \ - \ - uint64_t name##32(uint64_t src1, uint64_t src2) \ - { \ - VIS64 s, d; \ - \ - s.ll = src1; \ - d.ll = src2; \ - \ - d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \ - d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \ - \ - return d.ll; \ - } \ - \ - uint32_t name##32s(uint32_t src1, uint32_t src2) \ - { \ - VIS32 s, d; \ - \ - s.l = src1; \ - d.l = src2; \ - \ - d.l = F(d.l, s.l); \ - \ - return d.l; \ - } - -#define FADD(a, b) ((a) + (b)) -#define FSUB(a, b) ((a) - (b)) -VIS_HELPER(helper_fpadd, FADD) -VIS_HELPER(helper_fpsub, FSUB) - #define VIS_CMPHELPER(name, F) \ uint64_t name##16(uint64_t src1, uint64_t src2) \ { \ diff --git a/tests/tcg/i386/test-avx.c b/tests/tcg/i386/test-avx.c index c39c0e5bce..230e6d84b8 100644 --- a/tests/tcg/i386/test-avx.c +++ b/tests/tcg/i386/test-avx.c @@ -236,12 +236,15 @@ v4di val_i64[] = { v4di deadbeef = {0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull}; -v4di indexq = {0x000000000000001full, 0x000000000000008full, - 0xffffffffffffffffull, 0xffffffffffffff5full}; -v4di indexd = {0x00000002000000efull, 0xfffffff500000010ull, - 0x0000000afffffff0ull, 0x000000000000000eull}; +/* &gather_mem[0x10] is 512 bytes from the base; indices must be >=-64, <64 + * to account for scaling by 8 */ +v4di indexq = {0x000000000000001full, 0x000000000000003dull, + 0xffffffffffffffffull, 0xffffffffffffffdfull}; +v4di indexd = {0x00000002ffffffcdull, 0xfffffff500000010ull, + 0x0000003afffffff0ull, 0x000000000000000eull}; v4di gather_mem[0x20]; +_Static_assert(sizeof(gather_mem) == 1024); void init_f16reg(v4di *r) { @@ -316,6 +319,8 @@ int main(int argc, char *argv[]) int i; init_all(&initI); + init_intreg(&initI.ymm[0]); + init_intreg(&initI.ymm[9]); init_intreg(&initI.ymm[10]); init_intreg(&initI.ymm[11]); init_intreg(&initI.ymm[12]); @@ -324,6 +329,8 @@ int main(int argc, char *argv[]) dump_regs(&initI); init_all(&initF16); + init_f16reg(&initF16.ymm[0]); + init_f16reg(&initF16.ymm[9]); init_f16reg(&initF16.ymm[10]); init_f16reg(&initF16.ymm[11]); init_f16reg(&initF16.ymm[12]); @@ -333,6 +340,8 @@ int main(int argc, char *argv[]) dump_regs(&initF16); init_all(&initF32); + init_f32reg(&initF32.ymm[0]); + init_f32reg(&initF32.ymm[9]); init_f32reg(&initF32.ymm[10]); init_f32reg(&initF32.ymm[11]); init_f32reg(&initF32.ymm[12]); @@ -342,6 +351,8 @@ int main(int argc, char *argv[]) dump_regs(&initF32); init_all(&initF64); + init_f64reg(&initF64.ymm[0]); + init_f64reg(&initF64.ymm[9]); init_f64reg(&initF64.ymm[10]); init_f64reg(&initF64.ymm[11]); init_f64reg(&initF64.ymm[12]); diff --git a/tests/tcg/i386/test-avx.py b/tests/tcg/i386/test-avx.py index 641a2ef69e..6063fb2d11 100755 --- a/tests/tcg/i386/test-avx.py +++ b/tests/tcg/i386/test-avx.py @@ -9,7 +9,7 @@ from fnmatch import fnmatch archs = [ "SSE", "SSE2", "SSE3", "SSSE3", "SSE4_1", "SSE4_2", "AES", "AVX", "AVX2", "AES+AVX", "VAES+AVX", - "F16C", "FMA", + "F16C", "FMA", "SHA", ] ignore = set(["FISTTP", @@ -43,6 +43,7 @@ imask = { 'vPS[LR][AL][WDQ]': 0x3f, 'vPS[RL]LDQ': 0x1f, 'vROUND[PS][SD]': 0x7, + 'SHA1RNDS4': 0x03, 'vSHUFPD': 0x0f, 'vSHUFPS': 0xff, 'vAESKEYGENASSIST': 0xff, |