diff options
Diffstat (limited to 'hw')
114 files changed, 876 insertions, 1099 deletions
diff --git a/hw/9pfs/cofile.c b/hw/9pfs/cofile.c index 9c5344039e..71174c3e4a 100644 --- a/hw/9pfs/cofile.c +++ b/hw/9pfs/cofile.c @@ -252,7 +252,7 @@ int coroutine_fn v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp, if (v9fs_request_cancelled(pdu)) { return -EINTR; } - fsdev_co_throttle_request(s->ctx.fst, true, iov, iovcnt); + fsdev_co_throttle_request(s->ctx.fst, THROTTLE_WRITE, iov, iovcnt); v9fs_co_run_in_worker( { err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset); @@ -272,7 +272,7 @@ int coroutine_fn v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp, if (v9fs_request_cancelled(pdu)) { return -EINTR; } - fsdev_co_throttle_request(s->ctx.fst, false, iov, iovcnt); + fsdev_co_throttle_request(s->ctx.fst, THROTTLE_READ, iov, iovcnt); v9fs_co_run_in_worker( { err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset); diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index ea331a20d1..af66bde0f5 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -312,7 +312,7 @@ build_prepend_package_length(GArray *package, unsigned length, bool incl_self) /* * PkgLength is the length of the inclusive length of the data * and PkgLength's length itself when used for terms with - * explitit length. + * explicit length. */ length += length_bytes; } @@ -680,7 +680,7 @@ Aml *aml_store(Aml *val, Aml *target) * "Op Operand Operand Target" * pattern. * - * Returns: The newly allocated and composed according to patter Aml object. + * Returns: The newly allocated and composed according to pattern Aml object. */ static Aml * build_opcode_2arg_dst(uint8_t op, Aml *arg1, Aml *arg2, Aml *dst) @@ -2159,7 +2159,7 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, /* FADT Minor Version */ build_append_int_noprefix(tbl, f->minor_ver, 1); } else { - build_append_int_noprefix(tbl, 0, 3); /* Reserved upto ACPI 5.0 */ + build_append_int_noprefix(tbl, 0, 3); /* Reserved up to ACPI 5.0 */ } build_append_int_noprefix(tbl, 0, 8); /* X_FIRMWARE_CTRL */ diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c index 3a6d51282a..2d5e199ba9 100644 --- a/hw/acpi/hmat.c +++ b/hw/acpi/hmat.c @@ -82,7 +82,7 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, uint32_t base; /* Length in bytes for entire structure */ uint32_t lb_length - = 32 /* Table length upto and including Entry Base Unit */ + = 32 /* Table length up to and including Entry Base Unit */ + 4 * num_initiator /* Initiator Proximity Domain List */ + 4 * num_target /* Target Proximity Domain List */ + 2 * num_initiator * num_target; /* Latency or Bandwidth Entries */ diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index a3b25a92f3..9ba90806f2 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -670,7 +670,8 @@ static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr) } static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm, - uint32_t offset, uint32_t length) + uint32_t offset, uint32_t length, + bool is_write) { uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID; @@ -690,6 +691,10 @@ static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm, return ret; } + if (is_write && nvdimm->readonly) { + return NVDIMM_DSM_RET_STATUS_UNSUPPORT; + } + return NVDIMM_DSM_RET_STATUS_SUCCESS; } @@ -713,7 +718,7 @@ static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, get_label_data->length); status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset, - get_label_data->length); + get_label_data->length, false); if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) { nvdimm_dsm_no_payload(status, dsm_mem_addr); return; @@ -752,7 +757,7 @@ static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, set_label_data->length); status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset, - set_label_data->length); + set_label_data->length, true); if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) { nvdimm_dsm_no_payload(status, dsm_mem_addr); return; @@ -1097,7 +1102,7 @@ static void nvdimm_build_common_dsm(Aml *dev, * be treated as an integer. Moreover, the integer size depends on * DSDT tables revision number. If revision number is < 2, integer * size is 32 bits, otherwise it is 64 bits. - * Because of this CreateField() canot be used if RLEN < Integer Size. + * Because of this CreateField() cannot be used if RLEN < Integer Size. * * Also please note that APCI ASL operator SizeOf() doesn't support * Integer and there isn't any other way to figure out the Integer diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 720f22531a..24fa169060 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -761,6 +761,10 @@ static void do_cpu_reset(void *opaque) if (cpu_isar_feature(aa64_hcx, cpu)) { env->cp15.scr_el3 |= SCR_HXEN; } + if (cpu_isar_feature(aa64_fgt, cpu)) { + env->cp15.scr_el3 |= SCR_FGTEN; + } + /* AArch64 kernels never boot in secure mode */ assert(!info->secure_boot); /* This hook is only supported for AArch32 currently: diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index bc89eb4806..3c7dfcd6dc 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -61,6 +61,7 @@ #define ARCH_TIMER_S_EL1_IRQ 13 #define ARCH_TIMER_NS_EL1_IRQ 14 #define ARCH_TIMER_NS_EL2_IRQ 10 +#define ARCH_TIMER_NS_EL2_VIRT_IRQ 12 enum { SBSA_FLASH, @@ -489,6 +490,7 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem) [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, + [GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ, }; for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { diff --git a/hw/block/hd-geometry.c b/hw/block/hd-geometry.c index dae13ab14d..2b0af4430f 100644 --- a/hw/block/hd-geometry.c +++ b/hw/block/hd-geometry.c @@ -50,7 +50,7 @@ struct partition { uint32_t nr_sects; /* nr of sectors in partition */ } QEMU_PACKED; -/* try to guess the disk logical geometry from the MSDOS partition table. +/* try to guess the disk logical geometry from the MS-DOS partition table. Return 0 if OK, -1 if could not guess */ static int guess_disk_lchs(BlockBackend *blk, int *pcylinders, int *pheads, int *psectors) @@ -66,7 +66,7 @@ static int guess_disk_lchs(BlockBackend *blk, if (blk_pread(blk, 0, BDRV_SECTOR_SIZE, buf, 0) < 0) { return -1; } - /* test msdos magic */ + /* test MS-DOS magic */ if (buf[510] != 0x55 || buf[511] != 0xaa) { return -1; } diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c index 3c066e3405..62056b1d74 100644 --- a/hw/block/pflash_cfi01.c +++ b/hw/block/pflash_cfi01.c @@ -891,7 +891,7 @@ static Property pflash_cfi01_properties[] = { /* num-blocks is the number of blocks actually visible to the guest, * ie the total size of the device divided by the sector length. * If we're emulating flash devices wired in parallel the actual - * number of blocks per indvidual device will differ. + * number of blocks per individual device will differ. */ DEFINE_PROP_UINT32("num-blocks", PFlashCFI01, nb_blocs, 0), DEFINE_PROP_UINT64("sector-length", PFlashCFI01, sector_len, 0), diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c index eff0304a18..a2ac062b1e 100644 --- a/hw/char/cadence_uart.c +++ b/hw/char/cadence_uart.c @@ -575,7 +575,7 @@ static int cadence_uart_pre_load(void *opaque) { CadenceUARTState *s = opaque; - /* the frequency will be overriden if the refclk field is present */ + /* the frequency will be overridden if the refclk field is present */ clock_set_hz(s->refclk, UART_DEFAULT_REF_CLK); return 0; } diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c index 1b75a89588..377d1d9773 100644 --- a/hw/char/imx_serial.c +++ b/hw/char/imx_serial.c @@ -112,7 +112,7 @@ static void imx_serial_reset_at_boot(DeviceState *dev) imx_serial_reset(s); /* - * enable the uart on boot, so messages from the linux decompresser + * enable the uart on boot, so messages from the linux decompressor * are visible. On real hardware this is done by the boot rom * before anything else is loaded. */ diff --git a/hw/char/serial.c b/hw/char/serial.c index f3094f860f..a32eb25f58 100644 --- a/hw/char/serial.c +++ b/hw/char/serial.c @@ -54,7 +54,7 @@ #define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ #define UART_IIR_CTI 0x0C /* Character Timeout Indication */ -#define UART_IIR_FENF 0x80 /* Fifo enabled, but not functionning */ +#define UART_IIR_FENF 0x80 /* Fifo enabled, but not functioning */ #define UART_IIR_FE 0xC0 /* Fifo enabled */ /* diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c index 4f4d77908d..d4b5c501d8 100644 --- a/hw/core/generic-loader.c +++ b/hw/core/generic-loader.c @@ -24,7 +24,7 @@ * callback that does the memory operations. * This device allows the user to monkey patch memory. To be able to do - * this it needs a backend to manage the datas, the same as other + * this it needs a backend to manage the data, the same as other * memory-related devices. In this case as the backend is so trivial we * have merged it with the frontend instead of creating and maintaining a * separate backend. @@ -166,7 +166,7 @@ static void generic_loader_realize(DeviceState *dev, Error **errp) } } - /* Convert the data endiannes */ + /* Convert the data endianness */ if (s->data_be) { s->data = cpu_to_be64(s->data); } else { diff --git a/hw/core/machine.c b/hw/core/machine.c index da699cf4e1..cb38b8cf4c 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -38,6 +38,7 @@ #include "exec/confidential-guest-support.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-pci.h" +#include "hw/virtio/virtio-net.h" GlobalProperty hw_compat_8_1[] = {}; const size_t hw_compat_8_1_len = G_N_ELEMENTS(hw_compat_8_1); @@ -45,6 +46,9 @@ const size_t hw_compat_8_1_len = G_N_ELEMENTS(hw_compat_8_1); GlobalProperty hw_compat_8_0[] = { { "migration", "multifd-flush-after-each-section", "on"}, { TYPE_PCI_DEVICE, "x-pcie-ari-nextfn-1", "on" }, + { TYPE_VIRTIO_NET, "host_uso", "off"}, + { TYPE_VIRTIO_NET, "guest_uso4", "off"}, + { TYPE_VIRTIO_NET, "guest_uso6", "off"}, }; const size_t hw_compat_8_0_len = G_N_ELEMENTS(hw_compat_8_0); @@ -1355,6 +1359,7 @@ out: void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp) { + ERRP_GUARD(); MachineClass *machine_class = MACHINE_GET_CLASS(machine); ObjectClass *oc = object_class_by_name(machine->cpu_type); CPUClass *cc; @@ -1383,9 +1388,13 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error * numa_uses_legacy_mem()) { if (object_property_find(object_get_objects_root(), machine_class->default_ram_id)) { - error_setg(errp, "object name '%s' is reserved for the default" - " RAM backend, it can't be used for any other purposes." - " Change the object's 'id' to something else", + error_setg(errp, "object's id '%s' is reserved for the default" + " RAM backend, it can't be used for any other purposes", + machine_class->default_ram_id); + error_append_hint(errp, + "Change the object's 'id' to something else or disable" + " automatic creation of the default RAM backend by setting" + " 'memory-backend=%s' with '-machine'.\n", machine_class->default_ram_id); return; } @@ -1417,7 +1426,7 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error * for (i = 0; machine_class->valid_cpu_types[i]; i++) { if (object_class_dynamic_cast(oc, machine_class->valid_cpu_types[i])) { - /* The user specificed CPU is in the valid field, we are + /* The user specified CPU is in the valid field, we are * good to go. */ break; diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index 6d5d43eda2..41b7e682c7 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -107,7 +107,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, } if (*ptr) { - /* BlockBackend alread exists. So, we want to change attached node */ + /* BlockBackend already exists. So, we want to change attached node */ blk = *ptr; ctx = blk_get_aio_context(blk); bs = bdrv_lookup_bs(NULL, str, errp); diff --git a/hw/cpu/a15mpcore.c b/hw/cpu/a15mpcore.c index 774ca9987a..bfd8aa5644 100644 --- a/hw/cpu/a15mpcore.c +++ b/hw/cpu/a15mpcore.c @@ -161,7 +161,7 @@ static void a15mp_priv_class_init(ObjectClass *klass, void *data) dc->realize = a15mp_priv_realize; device_class_set_props(dc, a15mp_priv_properties); - /* We currently have no savable state */ + /* We currently have no saveable state */ } static const TypeInfo a15mp_priv_info = { diff --git a/hw/cxl/cxl-events.c b/hw/cxl/cxl-events.c index d161d57456..3ddd6369ad 100644 --- a/hw/cxl/cxl-events.c +++ b/hw/cxl/cxl-events.c @@ -197,7 +197,7 @@ CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, CXLClearEventPayload * QEMU_LOCK_GUARD(&log->lock); /* - * Must itterate the queue twice. + * Must iterate the queue twice. * "The device shall verify the event record handles specified in the input * payload are in temporal order. If the device detects an older event * record that will not be cleared when Clear Event Records is executed, diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c index 034c7805b3..f0920da956 100644 --- a/hw/cxl/cxl-host.c +++ b/hw/cxl/cxl-host.c @@ -39,12 +39,6 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state, return; } - fw->targets = g_malloc0_n(fw->num_targets, sizeof(*fw->targets)); - for (i = 0, target = object->targets; target; i++, target = target->next) { - /* This link cannot be resolved yet, so stash the name for now */ - fw->targets[i] = g_strdup(target->value); - } - if (object->size % (256 * MiB)) { error_setg(errp, "Size of a CXL fixed memory window must be a multiple of 256MiB"); @@ -64,6 +58,12 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state, fw->enc_int_gran = 0; } + fw->targets = g_malloc0_n(fw->num_targets, sizeof(*fw->targets)); + for (i = 0, target = object->targets; target; i++, target = target->next) { + /* This link cannot be resolved yet, so stash the name for now */ + fw->targets[i] = g_strdup(target->value); + } + cxl_state->fixed_windows = g_list_append(cxl_state->fixed_windows, g_steal_pointer(&fw)); diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 02f9b5a870..434ccc5f6e 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -39,7 +39,7 @@ * fill the output data into cmd->payload (overwriting what was there), * setting the length, and returning a valid return code. * - * XXX: The handler need not worry about endianess. The payload is read out of + * XXX: The handler need not worry about endianness. The payload is read out of * a register interface that already deals with it. */ @@ -501,7 +501,7 @@ static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd, uint16_t out_pl_len; query_start = ldq_le_p(&in->pa); - /* 64 byte alignemnt required */ + /* 64 byte alignment required */ if (query_start & 0x3f) { return CXL_MBOX_INVALID_INPUT; } diff --git a/hw/dma/omap_dma.c b/hw/dma/omap_dma.c index c6e35ba4b8..77797a67b5 100644 --- a/hw/dma/omap_dma.c +++ b/hw/dma/omap_dma.c @@ -247,7 +247,7 @@ static void omap_dma_deactivate_channel(struct omap_dma_s *s, return; } - /* Don't deactive the channel if it is synchronized and the DMA request is + /* Don't deactivate the channel if it is synchronized and the DMA request is active */ if (ch->sync && ch->enable && (s->dma->drqbmp & (1ULL << ch->sync))) return; @@ -422,7 +422,7 @@ static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma) if (ch->fs && ch->bs) { a->pck_element ++; - /* Check if a full packet has beed transferred. */ + /* Check if a full packet has been transferred. */ if (a->pck_element == a->pck_elements) { a->pck_element = 0; diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 866e11d208..cf28cb9586 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -133,14 +133,10 @@ static FWCfgState *create_fw_cfg(MachineState *ms) fw_cfg_add_file(fw_cfg, "/etc/firmware-min-version", g_memdup(&val, sizeof(val)), sizeof(val)); - val = cpu_to_le64(HPPA_TLB_ENTRIES); + val = cpu_to_le64(HPPA_TLB_ENTRIES - HPPA_BTLB_ENTRIES); fw_cfg_add_file(fw_cfg, "/etc/cpu/tlb_entries", g_memdup(&val, sizeof(val)), sizeof(val)); - val = cpu_to_le64(HPPA_BTLB_ENTRIES); - fw_cfg_add_file(fw_cfg, "/etc/cpu/btlb_entries", - g_memdup(&val, sizeof(val)), sizeof(val)); - val = cpu_to_le64(HPA_POWER_BUTTON); fw_cfg_add_file(fw_cfg, "/etc/power-button-addr", g_memdup(&val, sizeof(val)), sizeof(val)); @@ -433,6 +429,10 @@ static void hppa_machine_reset(MachineState *ms, ShutdownCause reason) cs->exception_index = -1; cs->halted = 0; + + /* clear any existing TLB and BTLB entries */ + memset(cpu[i]->env.tlb, 0, sizeof(cpu[i]->env.tlb)); + cpu[i]->env.tlb_last = HPPA_BTLB_ENTRIES; } /* already initialized by machine_hppa_init()? */ diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index bb12b0ad43..4d2d40bab5 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -779,7 +779,7 @@ static Aml *initialize_route(Aml *route, const char *link_name, * * Returns an array of 128 routes, one for each device, * based on device location. - * The main goal is to equaly distribute the interrupts + * The main goal is to equally distribute the interrupts * over the 4 existing ACPI links (works only for i440fx). * The hash function is (slot + pin) & 3 -> "LNK[D|A|B|C]". * @@ -2079,7 +2079,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) } /* - * Insert DMAR scope for PCI bridges and endpoint devcie + * Insert DMAR scope for PCI bridges and endpoint devices */ static void insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque) diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 9c77304438..c98a3c6e11 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -259,7 +259,7 @@ static void amdvi_log_command_error(AMDVIState *s, hwaddr addr) pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, PCI_STATUS_SIG_TARGET_ABORT); } -/* log an illegal comand event +/* log an illegal command event * @addr : address of illegal command */ static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info, @@ -767,7 +767,7 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val, break; case AMDVI_MMIO_COMMAND_BASE: amdvi_mmio_reg_write(s, size, val, addr); - /* FIXME - make sure System Software has finished writing incase + /* FIXME - make sure System Software has finished writing in case * it writes in chucks less than 8 bytes in a robust way.As for * now, this hacks works for the linux driver */ diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index c9961ef752..c0ce896668 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -52,7 +52,7 @@ /* * PCI bus number (or SID) is not reliable since the device is usaully - * initalized before guest can configure the PCI bridge + * initialized before guest can configure the PCI bridge * (SECONDARY_BUS_NUMBER). */ struct vtd_as_key { @@ -1694,7 +1694,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) * """ * * We enable per as memory region (iommu_ir_fault) for catching - * the tranlsation for interrupt range through PASID + PT. + * the translation for interrupt range through PASID + PT. */ if (pt && as->pasid != PCI_NO_PASID) { memory_region_set_enabled(&as->iommu_ir_fault, true); diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c index 133d89e953..660d0b72f9 100644 --- a/hw/i386/kvm/xen_xenstore.c +++ b/hw/i386/kvm/xen_xenstore.c @@ -1156,7 +1156,7 @@ static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr, /* * This matches the barrier in copy_to_ring() (or the guest's - * equivalent) betweem writing the data to the ring and updating + * equivalent) between writing the data to the ring and updating * rsp_prod. It protects against the pathological case (which * again I think never happened except on Alpha) where our * subsequent writes to the ring could *cross* the read of diff --git a/hw/i386/kvm/xenstore_impl.c b/hw/i386/kvm/xenstore_impl.c index d9732b567e..1d134a6866 100644 --- a/hw/i386/kvm/xenstore_impl.c +++ b/hw/i386/kvm/xenstore_impl.c @@ -1436,7 +1436,7 @@ static void save_node(gpointer key, gpointer value, gpointer opaque) /* * If we already wrote this node, refer to the previous copy. * There's no rename/move in XenStore, so all we need to find - * it is the tx_id of the transation in which it exists. Which + * it is the tx_id of the transaction in which it exists. Which * may be the root tx. */ if (n->serialized_tx != XBT_NULL) { diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 54838c0c41..3db0743f31 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -436,7 +436,7 @@ static uint64_t ioport80_read(void *opaque, hwaddr addr, unsigned size) return 0xffffffffffffffffULL; } -/* MSDOS compatibility mode FPU exception support */ +/* MS-DOS compatibility mode FPU exception support */ static void ioportF0_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { @@ -1746,16 +1746,16 @@ static void pc_machine_set_max_fw_size(Object *obj, Visitor *v, } /* - * We don't have a theoretically justifiable exact lower bound on the base - * address of any flash mapping. In practice, the IO-APIC MMIO range is - * [0xFEE00000..0xFEE01000] -- see IO_APIC_DEFAULT_ADDRESS --, leaving free - * only 18MB-4KB below 4G. For now, restrict the cumulative mapping to 8MB in - * size. - */ + * We don't have a theoretically justifiable exact lower bound on the base + * address of any flash mapping. In practice, the IO-APIC MMIO range is + * [0xFEE00000..0xFEE01000] -- see IO_APIC_DEFAULT_ADDRESS --, leaving free + * only 18MiB-4KiB below 4GiB. For now, restrict the cumulative mapping to + * 16MiB in size. + */ if (value > 16 * MiB) { error_setg(errp, "User specified max allowed firmware size %" PRIu64 " is " - "greater than 16MiB. If combined firwmare size exceeds " + "greater than 16MiB. If combined firmware size exceeds " "16MiB the system may not boot, or experience intermittent" "stability issues.", value); diff --git a/hw/input/hid.c b/hw/input/hid.c index e7ecebdf8f..a9c7dd1ce1 100644 --- a/hw/input/hid.c +++ b/hw/input/hid.c @@ -209,7 +209,7 @@ static void hid_pointer_sync(DeviceState *dev) prev->dz += curr->dz; curr->dz = 0; } else { - /* prepate next (clear rel, copy abs + btns) */ + /* prepare next (clear rel, copy abs + btns) */ if (hs->kind == HID_MOUSE) { next->xdx = 0; next->ydy = 0; diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c index 555b677173..db2b80e35f 100644 --- a/hw/input/tsc2005.c +++ b/hw/input/tsc2005.c @@ -157,14 +157,14 @@ static uint16_t tsc2005_read(TSC2005State *s, int reg) s->reset = true; return ret; - case 0x8: /* AUX high treshold */ + case 0x8: /* AUX high threshold */ return s->aux_thr[1]; - case 0x9: /* AUX low treshold */ + case 0x9: /* AUX low threshold */ return s->aux_thr[0]; - case 0xa: /* TEMP high treshold */ + case 0xa: /* TEMP high threshold */ return s->temp_thr[1]; - case 0xb: /* TEMP low treshold */ + case 0xb: /* TEMP low threshold */ return s->temp_thr[0]; case 0xc: /* CFR0 */ @@ -186,17 +186,17 @@ static uint16_t tsc2005_read(TSC2005State *s, int reg) static void tsc2005_write(TSC2005State *s, int reg, uint16_t data) { switch (reg) { - case 0x8: /* AUX high treshold */ + case 0x8: /* AUX high threshold */ s->aux_thr[1] = data; break; - case 0x9: /* AUX low treshold */ + case 0x9: /* AUX low threshold */ s->aux_thr[0] = data; break; - case 0xa: /* TEMP high treshold */ + case 0xa: /* TEMP high threshold */ s->temp_thr[1] = data; break; - case 0xb: /* TEMP low treshold */ + case 0xb: /* TEMP low threshold */ s->temp_thr[0] = data; break; diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index af75460643..24fb3af8cc 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -191,7 +191,7 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, cpu = attrs.requester_id; old_data = s->coreisr[cpu][index]; s->coreisr[cpu][index] = old_data & ~val; - /* write 1 to clear interrrupt */ + /* write 1 to clear interrupt */ old_data &= val; irq = ctz32(old_data); while (irq != 32) { diff --git a/hw/intc/loongson_liointc.c b/hw/intc/loongson_liointc.c index cc11b544cb..c10fb97a06 100644 --- a/hw/intc/loongson_liointc.c +++ b/hw/intc/loongson_liointc.c @@ -1,5 +1,5 @@ /* - * QEMU Loongson Local I/O interrupt controler. + * QEMU Loongson Local I/O interrupt controller. * * Copyright (c) 2020 Huacai Chen <chenhc@lemote.com> * Copyright (c) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com> diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c index 647bf324a8..435c47600f 100644 --- a/hw/intc/omap_intc.c +++ b/hw/intc/omap_intc.c @@ -68,7 +68,7 @@ static void omap_inth_sir_update(OMAPIntcState *s, int is_fiq) p_intr = 255; /* Find the interrupt line with the highest dynamic priority. - * Note: 0 denotes the hightest priority. + * Note: 0 denotes the highest priority. * If all interrupts have the same priority, the default order is IRQ_N, * IRQ_N-1,...,IRQ_0. */ for (j = 0; j < s->nbanks; ++j) { diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c index 9b10e90519..da10deceb8 100644 --- a/hw/intc/pnv_xive.c +++ b/hw/intc/pnv_xive.c @@ -210,7 +210,7 @@ static uint64_t pnv_xive_vst_addr_remote(PnvXive *xive, uint32_t type, return 0; } - remote_addr |= idx << xive->pc_shift; + remote_addr |= ((uint64_t)idx) << xive->pc_shift; vst_addr = address_space_ldq_be(&address_space_memory, remote_addr, MEMTXATTRS_UNSPECIFIED, &result); @@ -988,7 +988,7 @@ static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, */ case VC_SBC_CONFIG: /* Store EOI configuration */ /* - * Configure store EOI if required by firwmare (skiboot has removed + * Configure store EOI if required by firmware (skiboot has removed * support recently though) */ if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 8bcab2846c..7f701d414b 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -27,7 +27,7 @@ #include "trace.h" /* - * XIVE Virtualization Controller BAR and Thread Managment BAR that we + * XIVE Virtualization Controller BAR and Thread Management BAR that we * use for the ESB pages and the TIMA pages */ #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index 61fe7bd2d3..5789062379 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -485,7 +485,7 @@ static int kvmppc_xive_get_queues(SpaprXive *xive, Error **errp) * * Whenever the VM is stopped, the VM change handler sets the source * PQs to PENDING to stop the flow of events and to possibly catch a - * triggered interrupt occuring while the VM is stopped. The previous + * triggered interrupt occurring while the VM is stopped. The previous * state is saved in anticipation of a migration. The XIVE controller * is then synced through KVM to flush any in-flight event * notification and stabilize the EQs. @@ -551,7 +551,7 @@ static void kvmppc_xive_change_state_handler(void *opaque, bool running, /* * PQ is set to PENDING to possibly catch a triggered - * interrupt occuring while the VM is stopped (hotplug event + * interrupt occurring while the VM is stopped (hotplug event * for instance) . */ if (pq != XIVE_ESB_OFF) { @@ -633,7 +633,7 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id) /* The KVM XIVE device should be in use */ assert(xive->fd != -1); - /* Restore the ENDT first. The targetting depends on it. */ + /* Restore the ENDT first. The targeting depends on it. */ for (i = 0; i < xive->nr_ends; i++) { if (!xive_end_is_valid(&xive->endt[i])) { continue; diff --git a/hw/intc/xive.c b/hw/intc/xive.c index df3ee0496f..a3585593d8 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -1608,7 +1608,7 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, * * It receives notification requests sent by the IVRE to find one * matching NVT (or more) dispatched on the processor threads. In case - * of a single NVT notification, the process is abreviated and the + * of a single NVT notification, the process is abbreviated and the * thread is signaled if a match is found. In case of a logical server * notification (bits ignored at the end of the NVT identifier), the * IVPE and IVRE select a winning thread using different filters. This diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c index c37ef25d44..98c0d8ba44 100644 --- a/hw/intc/xive2.c +++ b/hw/intc/xive2.c @@ -542,7 +542,7 @@ static void xive2_router_realize(DeviceState *dev, Error **errp) /* * Notification using the END ESe/ESn bit (Event State Buffer for - * escalation and notification). Profide futher coalescing in the + * escalation and notification). Profide further coalescing in the * Router. */ static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, @@ -621,7 +621,7 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, /* * Check the END ESn (Event State Buffer for notification) for - * even futher coalescing in the Router + * even further coalescing in the Router */ if (!xive2_end_is_notify(&end)) { /* ESn[Q]=1 : end of notification */ @@ -702,7 +702,7 @@ do_escalation: /* * Check the END ESe (Event State Buffer for escalation) for even - * futher coalescing in the Router + * further coalescing in the Router */ if (!xive2_end_is_uncond_escalation(&end)) { /* ESe[Q]=1 : end of escalation notification */ diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c index acf2bab35f..e232d35ba2 100644 --- a/hw/ipmi/ipmi_bmc_extern.c +++ b/hw/ipmi/ipmi_bmc_extern.c @@ -301,7 +301,7 @@ static void handle_msg(IPMIBmcExtern *ibe) ipmi_debug("msg checksum failure\n"); return; } else { - ibe->inpos--; /* Remove checkum */ + ibe->inpos--; /* Remove checksum */ } timer_del(ibe->extern_timer); diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index 4e314748d3..4cdcb3f7e7 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -1,3 +1,14 @@ +/* + * CXL Type 3 (memory expander) device + * + * Copyright(C) 2020 Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-v2-only + */ + #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/error-report.h" @@ -538,7 +549,7 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, FIRST_ERROR_POINTER, cxl_err->type); } else { /* - * If no more errors, then follow recomendation of PCI spec + * If no more errors, then follow recommendation of PCI spec * r6.0 6.2.4.2 to set the first error pointer to a status * bit that will never be used. */ @@ -697,7 +708,7 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) PCI_BASE_ADDRESS_MEM_TYPE_64, &ct3d->cxl_dstate.device_registers); - /* MSI(-X) Initailization */ + /* MSI(-X) Initialization */ rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); if (rc) { goto err_address_space_free; @@ -706,7 +717,7 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) msix_vector_use(pci_dev, i); } - /* DOE Initailization */ + /* DOE Initialization */ pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0); cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table; diff --git a/hw/mem/cxl_type3_stubs.c b/hw/mem/cxl_type3_stubs.c index f3e4a9fa72..8ba5d3d1f7 100644 --- a/hw/mem/cxl_type3_stubs.c +++ b/hw/mem/cxl_type3_stubs.c @@ -1,3 +1,13 @@ +/* + * CXL Type 3 (memory expander) device QMP stubs + * + * Copyright(C) 2020 Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-v2-only + */ #include "qemu/osdep.h" #include "qapi/error.h" diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c index 31080c22c9..1631a7d13f 100644 --- a/hw/mem/nvdimm.c +++ b/hw/mem/nvdimm.c @@ -154,6 +154,9 @@ static void nvdimm_prepare_memory_region(NVDIMMDevice *nvdimm, Error **errp) object_get_canonical_path_component(OBJECT(hostmem))); return; } + if (memory_region_is_rom(mr)) { + nvdimm->readonly = true; + } nvdimm->nvdimm_mr = g_new(MemoryRegion, 1); memory_region_init_alias(nvdimm->nvdimm_mr, OBJECT(dimm), @@ -207,15 +210,16 @@ static void nvdimm_unrealize(PCDIMMDevice *dimm) * label read/write functions. */ static void nvdimm_validate_rw_label_data(NVDIMMDevice *nvdimm, uint64_t size, - uint64_t offset) + uint64_t offset, bool is_write) { assert((nvdimm->label_size >= size + offset) && (offset + size > offset)); + assert(!is_write || !nvdimm->readonly); } static void nvdimm_read_label_data(NVDIMMDevice *nvdimm, void *buf, uint64_t size, uint64_t offset) { - nvdimm_validate_rw_label_data(nvdimm, size, offset); + nvdimm_validate_rw_label_data(nvdimm, size, offset, false); memcpy(buf, nvdimm->label_data + offset, size); } @@ -229,7 +233,7 @@ static void nvdimm_write_label_data(NVDIMMDevice *nvdimm, const void *buf, "pmem", NULL); uint64_t backend_offset; - nvdimm_validate_rw_label_data(nvdimm, size, offset); + nvdimm_validate_rw_label_data(nvdimm, size, offset, true); if (!is_pmem) { memcpy(nvdimm->label_data + offset, buf, size); diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c index f135ec7b7e..7539f7fb45 100644 --- a/hw/misc/imx7_ccm.c +++ b/hw/misc/imx7_ccm.c @@ -227,7 +227,7 @@ static uint32_t imx7_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) * have fixed frequencies and we can provide requested frequency * easily. However for CCM provided clocks (like IPG) each GPT * timer can have its own clock root. - * This means we need additionnal information when calling this + * This means we need additional information when calling this * function to know the requester's identity. */ uint32_t freq = 0; diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c index 0787a0268d..f84cc68849 100644 --- a/hw/misc/mac_via.c +++ b/hw/misc/mac_via.c @@ -246,7 +246,7 @@ #define vT2CL 0x1000 /* [VIA only] Timer two counter low. */ #define vT2CH 0x1200 /* [VIA only] Timer two counter high. */ #define vSR 0x1400 /* [VIA only] Shift register. */ -#define vACR 0x1600 /* [VIA only] Auxilary control register. */ +#define vACR 0x1600 /* [VIA only] Auxiliary control register. */ #define vPCR 0x1800 /* [VIA only] Peripheral control register. */ /* * CHRP sez never ever to *write* this. diff --git a/hw/misc/stm32f2xx_syscfg.c b/hw/misc/stm32f2xx_syscfg.c index 04c22c2850..19c1e86424 100644 --- a/hw/misc/stm32f2xx_syscfg.c +++ b/hw/misc/stm32f2xx_syscfg.c @@ -94,12 +94,12 @@ static void stm32f2xx_syscfg_write(void *opaque, hwaddr addr, switch (addr) { case SYSCFG_MEMRMP: qemu_log_mask(LOG_UNIMP, - "%s: Changeing the memory mapping isn't supported " \ + "%s: Changing the memory mapping isn't supported " \ "in QEMU\n", __func__); return; case SYSCFG_PMC: qemu_log_mask(LOG_UNIMP, - "%s: Changeing the memory mapping isn't supported " \ + "%s: Changing the memory mapping isn't supported " \ "in QEMU\n", __func__); return; case SYSCFG_EXTICR1: diff --git a/hw/misc/trace-events b/hw/misc/trace-events index e8b2be14c0..bc87cd3670 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -155,7 +155,7 @@ stm32f4xx_syscfg_read(uint64_t addr) "reg read: addr: 0x%" PRIx64 " " stm32f4xx_syscfg_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" # stm32f4xx_exti.c -stm32f4xx_exti_set_irq(int irq, int leve) "Set EXTI: %d to %d" +stm32f4xx_exti_set_irq(int irq, int level) "Set EXTI: %d to %d" stm32f4xx_exti_read(uint64_t addr) "reg read: addr: 0x%" PRIx64 " " stm32f4xx_exti_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c index 8b70285961..41f38a98e9 100644 --- a/hw/misc/zynq_slcr.c +++ b/hw/misc/zynq_slcr.c @@ -285,7 +285,7 @@ static void zynq_slcr_compute_clocks_internal(ZynqSLCRState *s, uint64_t ps_clk) } /** - * Compute and set the ouputs clocks periods. + * Compute and set the outputs clocks periods. * But do not propagate them further. Connected clocks * will not receive any updates (See zynq_slcr_compute_clocks()) */ diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 42ea2411a2..f445d8bb5e 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -81,8 +81,8 @@ #define GEM_IPGSTRETCH (0x000000BC / 4) /* IPG Stretch reg */ #define GEM_SVLAN (0x000000C0 / 4) /* Stacked VLAN reg */ #define GEM_MODID (0x000000FC / 4) /* Module ID reg */ -#define GEM_OCTTXLO (0x00000100 / 4) /* Octects transmitted Low reg */ -#define GEM_OCTTXHI (0x00000104 / 4) /* Octects transmitted High reg */ +#define GEM_OCTTXLO (0x00000100 / 4) /* Octets transmitted Low reg */ +#define GEM_OCTTXHI (0x00000104 / 4) /* Octets transmitted High reg */ #define GEM_TXCNT (0x00000108 / 4) /* Error-free Frames transmitted */ #define GEM_TXBCNT (0x0000010C / 4) /* Error-free Broadcast Frames */ #define GEM_TXMCNT (0x00000110 / 4) /* Error-free Multicast Frame */ @@ -101,8 +101,8 @@ #define GEM_LATECOLLCNT (0x00000144 / 4) /* Late Collision Frames */ #define GEM_DEFERTXCNT (0x00000148 / 4) /* Deferred Transmission Frames */ #define GEM_CSENSECNT (0x0000014C / 4) /* Carrier Sense Error Counter */ -#define GEM_OCTRXLO (0x00000150 / 4) /* Octects Received register Low */ -#define GEM_OCTRXHI (0x00000154 / 4) /* Octects Received register High */ +#define GEM_OCTRXLO (0x00000150 / 4) /* Octets Received register Low */ +#define GEM_OCTRXHI (0x00000154 / 4) /* Octets Received register High */ #define GEM_RXCNT (0x00000158 / 4) /* Error-free Frames Received */ #define GEM_RXBROADCNT (0x0000015C / 4) /* Error-free Broadcast Frames RX */ #define GEM_RXMULTICNT (0x00000160 / 4) /* Error-free Multicast Frames RX */ @@ -954,7 +954,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) /* Is this destination MAC address "for us" ? */ maf = gem_mac_address_filter(s, buf); if (maf == GEM_RX_REJECT) { - return size; /* no, drop siliently b/c it's not an error */ + return size; /* no, drop silently b/c it's not an error */ } /* Discard packets with receive length error enabled ? */ diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c index a596f7fbc6..c6f5fb7dce 100644 --- a/hw/net/dp8393x.c +++ b/hw/net/dp8393x.c @@ -551,7 +551,7 @@ static uint64_t dp8393x_read(void *opaque, hwaddr addr, unsigned int size) val = s->cam[s->regs[SONIC_CEP] & 0xf][SONIC_CAP0 - reg]; } break; - /* All other registers have no special contraints */ + /* All other registers have no special constraints */ default: val = s->regs[reg]; } diff --git a/hw/net/e1000_regs.h b/hw/net/e1000_regs.h index 8a4ce82034..39f4882510 100644 --- a/hw/net/e1000_regs.h +++ b/hw/net/e1000_regs.h @@ -130,7 +130,7 @@ #define E1000_GCR2 0x05B64 /* 3GIO Control Register 2 */ #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ -#define E1000_HICR 0x08F00 /* Host Inteface Control */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index f8aeafa16b..e324c02dd5 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -810,24 +810,24 @@ e1000e_txdesc_writeback(E1000ECore *core, dma_addr_t base, return e1000e_tx_wb_interrupt_cause(core, queue_idx); } -typedef struct E1000E_RingInfo_st { +typedef struct E1000ERingInfo { int dbah; int dbal; int dlen; int dh; int dt; int idx; -} E1000E_RingInfo; +} E1000ERingInfo; static inline bool -e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r) +e1000e_ring_empty(E1000ECore *core, const E1000ERingInfo *r) { return core->mac[r->dh] == core->mac[r->dt] || core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN; } static inline uint64_t -e1000e_ring_base(E1000ECore *core, const E1000E_RingInfo *r) +e1000e_ring_base(E1000ECore *core, const E1000ERingInfo *r) { uint64_t bah = core->mac[r->dbah]; uint64_t bal = core->mac[r->dbal]; @@ -836,13 +836,13 @@ e1000e_ring_base(E1000ECore *core, const E1000E_RingInfo *r) } static inline uint64_t -e1000e_ring_head_descr(E1000ECore *core, const E1000E_RingInfo *r) +e1000e_ring_head_descr(E1000ECore *core, const E1000ERingInfo *r) { return e1000e_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; } static inline void -e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count) +e1000e_ring_advance(E1000ECore *core, const E1000ERingInfo *r, uint32_t count) { core->mac[r->dh] += count; @@ -852,7 +852,7 @@ e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count) } static inline uint32_t -e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r) +e1000e_ring_free_descr_num(E1000ECore *core, const E1000ERingInfo *r) { trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], core->mac[r->dh], core->mac[r->dt]); @@ -871,19 +871,19 @@ e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r) } static inline bool -e1000e_ring_enabled(E1000ECore *core, const E1000E_RingInfo *r) +e1000e_ring_enabled(E1000ECore *core, const E1000ERingInfo *r) { return core->mac[r->dlen] > 0; } static inline uint32_t -e1000e_ring_len(E1000ECore *core, const E1000E_RingInfo *r) +e1000e_ring_len(E1000ECore *core, const E1000ERingInfo *r) { return core->mac[r->dlen]; } typedef struct E1000E_TxRing_st { - const E1000E_RingInfo *i; + const E1000ERingInfo *i; struct e1000e_tx *tx; } E1000E_TxRing; @@ -896,7 +896,7 @@ e1000e_mq_queue_idx(int base_reg_idx, int reg_idx) static inline void e1000e_tx_ring_init(E1000ECore *core, E1000E_TxRing *txr, int idx) { - static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { + static const E1000ERingInfo i[E1000E_NUM_QUEUES] = { { TDBAH, TDBAL, TDLEN, TDH, TDT, 0 }, { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 } }; @@ -908,13 +908,13 @@ e1000e_tx_ring_init(E1000ECore *core, E1000E_TxRing *txr, int idx) } typedef struct E1000E_RxRing_st { - const E1000E_RingInfo *i; + const E1000ERingInfo *i; } E1000E_RxRing; static inline void e1000e_rx_ring_init(E1000ECore *core, E1000E_RxRing *rxr, int idx) { - static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { + static const E1000ERingInfo i[E1000E_NUM_QUEUES] = { { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 }, { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 } }; @@ -930,7 +930,7 @@ e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) dma_addr_t base; struct e1000_tx_desc desc; bool ide = false; - const E1000E_RingInfo *txi = txr->i; + const E1000ERingInfo *txi = txr->i; uint32_t cause = E1000_ICS_TXQE; if (!(core->mac[TCTL] & E1000_TCTL_EN)) { @@ -960,7 +960,7 @@ e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) } static bool -e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r, +e1000e_has_rxbufs(E1000ECore *core, const E1000ERingInfo *r, size_t total_size) { uint32_t bufs = e1000e_ring_free_descr_num(core, r); @@ -1397,17 +1397,17 @@ e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr, } } -typedef struct e1000e_ba_state_st { +typedef struct E1000EBAState { uint16_t written[MAX_PS_BUFFERS]; uint8_t cur_idx; -} e1000e_ba_state; +} E1000EBAState; static inline void -e1000e_write_hdr_to_rx_buffers(E1000ECore *core, - hwaddr ba[MAX_PS_BUFFERS], - e1000e_ba_state *bastate, - const char *data, - dma_addr_t data_len) +e1000e_write_hdr_frag_to_rx_buffers(E1000ECore *core, + hwaddr ba[MAX_PS_BUFFERS], + E1000EBAState *bastate, + const char *data, + dma_addr_t data_len) { assert(data_len <= core->rxbuf_sizes[0] - bastate->written[0]); @@ -1418,11 +1418,11 @@ e1000e_write_hdr_to_rx_buffers(E1000ECore *core, } static void -e1000e_write_to_rx_buffers(E1000ECore *core, - hwaddr ba[MAX_PS_BUFFERS], - e1000e_ba_state *bastate, - const char *data, - dma_addr_t data_len) +e1000e_write_payload_frag_to_rx_buffers(E1000ECore *core, + hwaddr ba[MAX_PS_BUFFERS], + E1000EBAState *bastate, + const char *data, + dma_addr_t data_len) { while (data_len > 0) { uint32_t cur_buf_len = core->rxbuf_sizes[bastate->cur_idx]; @@ -1460,7 +1460,7 @@ e1000e_update_rx_stats(E1000ECore *core, size_t pkt_size, size_t pkt_fcs_size) } static inline bool -e1000e_rx_descr_threshold_hit(E1000ECore *core, const E1000E_RingInfo *rxi) +e1000e_rx_descr_threshold_hit(E1000ECore *core, const E1000ERingInfo *rxi) { return e1000e_ring_free_descr_num(core, rxi) == e1000e_ring_len(core, rxi) >> core->rxbuf_min_shift; @@ -1521,7 +1521,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, struct iovec *iov = net_rx_pkt_get_iovec(pkt); size_t size = net_rx_pkt_get_total_len(pkt); size_t total_size = size + e1000x_fcs_len(core->mac); - const E1000E_RingInfo *rxi; + const E1000ERingInfo *rxi; size_t ps_hdr_len = 0; bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len); bool is_first = true; @@ -1530,7 +1530,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, do { hwaddr ba[MAX_PS_BUFFERS]; - e1000e_ba_state bastate = { { 0 } }; + E1000EBAState bastate = { { 0 } }; bool is_last = false; desc_size = total_size - desc_offset; @@ -1568,8 +1568,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, iov_copy = MIN(ps_hdr_len - ps_hdr_copied, iov->iov_len - iov_ofs); - e1000e_write_hdr_to_rx_buffers(core, ba, &bastate, - iov->iov_base, iov_copy); + e1000e_write_hdr_frag_to_rx_buffers(core, ba, + &bastate, + iov->iov_base, + iov_copy); copy_size -= iov_copy; ps_hdr_copied += iov_copy; @@ -1585,8 +1587,8 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, } else { /* Leave buffer 0 of each descriptor except first */ /* empty as per spec 7.1.5.1 */ - e1000e_write_hdr_to_rx_buffers(core, ba, &bastate, - NULL, 0); + e1000e_write_hdr_frag_to_rx_buffers(core, ba, &bastate, + NULL, 0); } } @@ -1594,8 +1596,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, while (copy_size) { iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); - e1000e_write_to_rx_buffers(core, ba, &bastate, - iov->iov_base + iov_ofs, iov_copy); + e1000e_write_payload_frag_to_rx_buffers(core, ba, &bastate, + iov->iov_base + + iov_ofs, + iov_copy); copy_size -= iov_copy; iov_ofs += iov_copy; @@ -1607,7 +1611,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, if (desc_offset + desc_size >= total_size) { /* Simulate FCS checksum presence in the last descriptor */ - e1000e_write_to_rx_buffers(core, ba, &bastate, + e1000e_write_payload_frag_to_rx_buffers(core, ba, &bastate, (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); } } @@ -2852,7 +2856,7 @@ e1000e_update_rx_offloads(E1000ECore *core) if (core->has_vnet) { qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, - cso_state, 0, 0, 0, 0); + cso_state, 0, 0, 0, 0, 0, 0); } } diff --git a/hw/net/e1000x_regs.h b/hw/net/e1000x_regs.h index 13760c66d3..cd896fc0ca 100644 --- a/hw/net/e1000x_regs.h +++ b/hw/net/e1000x_regs.h @@ -839,7 +839,7 @@ union e1000_rx_desc_packet_split { #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ #define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c index 788463f1b6..42216de6c9 100644 --- a/hw/net/fsl_etsec/rings.c +++ b/hw/net/fsl_etsec/rings.c @@ -365,13 +365,19 @@ void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr) } while (TRUE); /* Save the Buffer Descriptor Pointers to last bd that was not - * succesfully closed */ + * successfully closed */ etsec->regs[TBPTR0 + ring_nbr].value = bd_addr; /* Set transmit halt THLTx */ etsec->regs[TSTAT].value |= 1 << (31 - ring_nbr); } +/* + * rx_init_frame() ensures we never do more padding than this + * (checksum plus minimum data packet size) + */ +#define MAX_RX_PADDING 64 + static void fill_rx_bd(eTSEC *etsec, eTSEC_rxtx_bd *bd, const uint8_t **buf, @@ -380,9 +386,11 @@ static void fill_rx_bd(eTSEC *etsec, uint16_t to_write; hwaddr bufptr = bd->bufptr + ((hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32); - uint8_t padd[etsec->rx_padding]; + uint8_t padd[MAX_RX_PADDING]; uint8_t rem; + assert(etsec->rx_padding <= MAX_RX_PADDING); + RING_DEBUG("eTSEC fill Rx buffer @ 0x%016" HWADDR_PRIx " size:%zu(padding + crc:%u) + fcb:%u\n", bufptr, *size, etsec->rx_padding, etsec->rx_fcb_size); @@ -426,7 +434,7 @@ static void fill_rx_bd(eTSEC *etsec, rem = MIN(etsec->regs[MRBLR].value - bd->length, etsec->rx_padding); if (rem > 0) { - memset(padd, 0x0, sizeof(padd)); + memset(padd, 0x0, rem); etsec->rx_padding -= rem; *size -= rem; bd->length += rem; diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index 8b6b75c522..f6a5e2327b 100644 --- a/hw/net/igb_core.c +++ b/hw/net/igb_core.c @@ -267,6 +267,29 @@ igb_rx_use_legacy_descriptor(IGBCore *core) return false; } +typedef struct E1000ERingInfo { + int dbah; + int dbal; + int dlen; + int dh; + int dt; + int idx; +} E1000ERingInfo; + +static uint32_t +igb_rx_queue_desctyp_get(IGBCore *core, const E1000ERingInfo *r) +{ + return core->mac[E1000_SRRCTL(r->idx) >> 2] & E1000_SRRCTL_DESCTYPE_MASK; +} + +static bool +igb_rx_use_ps_descriptor(IGBCore *core, const E1000ERingInfo *r) +{ + uint32_t desctyp = igb_rx_queue_desctyp_get(core, r); + return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT || + desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; +} + static inline bool igb_rss_enabled(IGBCore *core) { @@ -694,24 +717,15 @@ static uint32_t igb_rx_wb_eic(IGBCore *core, int queue_idx) return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0; } -typedef struct E1000E_RingInfo_st { - int dbah; - int dbal; - int dlen; - int dh; - int dt; - int idx; -} E1000E_RingInfo; - static inline bool -igb_ring_empty(IGBCore *core, const E1000E_RingInfo *r) +igb_ring_empty(IGBCore *core, const E1000ERingInfo *r) { return core->mac[r->dh] == core->mac[r->dt] || core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN; } static inline uint64_t -igb_ring_base(IGBCore *core, const E1000E_RingInfo *r) +igb_ring_base(IGBCore *core, const E1000ERingInfo *r) { uint64_t bah = core->mac[r->dbah]; uint64_t bal = core->mac[r->dbal]; @@ -720,13 +734,13 @@ igb_ring_base(IGBCore *core, const E1000E_RingInfo *r) } static inline uint64_t -igb_ring_head_descr(IGBCore *core, const E1000E_RingInfo *r) +igb_ring_head_descr(IGBCore *core, const E1000ERingInfo *r) { return igb_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; } static inline void -igb_ring_advance(IGBCore *core, const E1000E_RingInfo *r, uint32_t count) +igb_ring_advance(IGBCore *core, const E1000ERingInfo *r, uint32_t count) { core->mac[r->dh] += count; @@ -736,7 +750,7 @@ igb_ring_advance(IGBCore *core, const E1000E_RingInfo *r, uint32_t count) } static inline uint32_t -igb_ring_free_descr_num(IGBCore *core, const E1000E_RingInfo *r) +igb_ring_free_descr_num(IGBCore *core, const E1000ERingInfo *r) { trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], core->mac[r->dh], core->mac[r->dt]); @@ -755,13 +769,13 @@ igb_ring_free_descr_num(IGBCore *core, const E1000E_RingInfo *r) } static inline bool -igb_ring_enabled(IGBCore *core, const E1000E_RingInfo *r) +igb_ring_enabled(IGBCore *core, const E1000ERingInfo *r) { return core->mac[r->dlen] > 0; } typedef struct IGB_TxRing_st { - const E1000E_RingInfo *i; + const E1000ERingInfo *i; struct igb_tx *tx; } IGB_TxRing; @@ -774,7 +788,7 @@ igb_mq_queue_idx(int base_reg_idx, int reg_idx) static inline void igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx) { - static const E1000E_RingInfo i[IGB_NUM_QUEUES] = { + static const E1000ERingInfo i[IGB_NUM_QUEUES] = { { TDBAH0, TDBAL0, TDLEN0, TDH0, TDT0, 0 }, { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 }, { TDBAH2, TDBAL2, TDLEN2, TDH2, TDT2, 2 }, @@ -800,13 +814,13 @@ igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx) } typedef struct E1000E_RxRing_st { - const E1000E_RingInfo *i; + const E1000ERingInfo *i; } E1000E_RxRing; static inline void igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx) { - static const E1000E_RingInfo i[IGB_NUM_QUEUES] = { + static const E1000ERingInfo i[IGB_NUM_QUEUES] = { { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 }, { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 }, { RDBAH2, RDBAL2, RDLEN2, RDH2, RDT2, 2 }, @@ -833,7 +847,7 @@ igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx) static uint32_t igb_txdesc_writeback(IGBCore *core, dma_addr_t base, union e1000_adv_tx_desc *tx_desc, - const E1000E_RingInfo *txi) + const E1000ERingInfo *txi) { PCIDevice *d; uint32_t cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len); @@ -866,7 +880,7 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t base, } static inline bool -igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi) +igb_tx_enabled(IGBCore *core, const E1000ERingInfo *txi) { bool vmdq = core->mac[MRQC] & 1; uint16_t qn = txi->idx; @@ -883,7 +897,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) PCIDevice *d; dma_addr_t base; union e1000_adv_tx_desc desc; - const E1000E_RingInfo *txi = txr->i; + const E1000ERingInfo *txi = txr->i; uint32_t eic = 0; if (!igb_tx_enabled(core, txi)) { @@ -918,7 +932,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) } static uint32_t -igb_rxbufsize(IGBCore *core, const E1000E_RingInfo *r) +igb_rxbufsize(IGBCore *core, const E1000ERingInfo *r) { uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2]; uint32_t bsizepkt = srrctl & E1000_SRRCTL_BSIZEPKT_MASK; @@ -930,7 +944,7 @@ igb_rxbufsize(IGBCore *core, const E1000E_RingInfo *r) } static bool -igb_has_rxbufs(IGBCore *core, const E1000E_RingInfo *r, size_t total_size) +igb_has_rxbufs(IGBCore *core, const E1000ERingInfo *r, size_t total_size) { uint32_t bufs = igb_ring_free_descr_num(core, r); uint32_t bufsize = igb_rxbufsize(core, r); @@ -941,6 +955,14 @@ igb_has_rxbufs(IGBCore *core, const E1000E_RingInfo *r, size_t total_size) bufsize; } +static uint32_t +igb_rxhdrbufsize(IGBCore *core, const E1000ERingInfo *r) +{ + uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2]; + return (srrctl & E1000_SRRCTL_BSIZEHDRSIZE_MASK) >> + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; +} + void igb_start_recv(IGBCore *core) { @@ -1225,21 +1247,77 @@ igb_read_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, } static inline void -igb_read_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, - hwaddr *buff_addr) +igb_read_adv_rx_single_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc, + hwaddr *buff_addr) { *buff_addr = le64_to_cpu(desc->read.pkt_addr); } static inline void -igb_read_rx_descr(IGBCore *core, union e1000_rx_desc_union *desc, - hwaddr *buff_addr) +igb_read_adv_rx_split_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc, + hwaddr *buff_addr) { + buff_addr[0] = le64_to_cpu(desc->read.hdr_addr); + buff_addr[1] = le64_to_cpu(desc->read.pkt_addr); +} + +typedef struct IGBBAState { + uint16_t written[IGB_MAX_PS_BUFFERS]; + uint8_t cur_idx; +} IGBBAState; + +typedef struct IGBSplitDescriptorData { + bool sph; + bool hbo; + size_t hdr_len; +} IGBSplitDescriptorData; + +typedef struct IGBPacketRxDMAState { + size_t size; + size_t total_size; + size_t ps_hdr_len; + size_t desc_size; + size_t desc_offset; + uint32_t rx_desc_packet_buf_size; + uint32_t rx_desc_header_buf_size; + struct iovec *iov; + size_t iov_ofs; + bool do_ps; + bool is_first; + IGBBAState bastate; + hwaddr ba[IGB_MAX_PS_BUFFERS]; + IGBSplitDescriptorData ps_desc_data; +} IGBPacketRxDMAState; + +static inline void +igb_read_rx_descr(IGBCore *core, + union e1000_rx_desc_union *desc, + IGBPacketRxDMAState *pdma_st, + const E1000ERingInfo *r) +{ + uint32_t desc_type; + if (igb_rx_use_legacy_descriptor(core)) { - igb_read_lgcy_rx_descr(core, &desc->legacy, buff_addr); - } else { - igb_read_adv_rx_descr(core, &desc->adv, buff_addr); + igb_read_lgcy_rx_descr(core, &desc->legacy, &pdma_st->ba[1]); + pdma_st->ba[0] = 0; + return; } + + /* advanced header split descriptor */ + if (igb_rx_use_ps_descriptor(core, r)) { + igb_read_adv_rx_split_buf_descr(core, &desc->adv, &pdma_st->ba[0]); + return; + } + + /* descriptor replication modes not supported */ + desc_type = igb_rx_queue_desctyp_get(core, r); + if (desc_type != E1000_SRRCTL_DESCTYPE_ADV_ONEBUF) { + trace_igb_wrn_rx_desc_modes_not_supp(desc_type); + } + + /* advanced single buffer descriptor */ + igb_read_adv_rx_single_buf_descr(core, &desc->adv, &pdma_st->ba[1]); + pdma_st->ba[0] = 0; } static void @@ -1281,15 +1359,11 @@ igb_verify_csum_in_sw(IGBCore *core, } static void -igb_build_rx_metadata(IGBCore *core, - struct NetRxPkt *pkt, - bool is_eop, - const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, - uint16_t *pkt_info, uint16_t *hdr_info, - uint32_t *rss, - uint32_t *status_flags, - uint16_t *ip_id, - uint16_t *vlan_tag) +igb_build_rx_metadata_common(IGBCore *core, + struct NetRxPkt *pkt, + bool is_eop, + uint32_t *status_flags, + uint16_t *vlan_tag) { struct virtio_net_hdr *vhdr; bool hasip4, hasip6, csum_valid; @@ -1298,7 +1372,6 @@ igb_build_rx_metadata(IGBCore *core, *status_flags = E1000_RXD_STAT_DD; /* No additional metadata needed for non-EOP descriptors */ - /* TODO: EOP apply only to status so don't skip whole function. */ if (!is_eop) { goto func_exit; } @@ -1315,64 +1388,6 @@ igb_build_rx_metadata(IGBCore *core, trace_e1000e_rx_metadata_vlan(*vlan_tag); } - /* Packet parsing results */ - if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { - if (rss_info->enabled) { - *rss = cpu_to_le32(rss_info->hash); - trace_igb_rx_metadata_rss(*rss); - } - } else if (hasip4) { - *status_flags |= E1000_RXD_STAT_IPIDV; - *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); - trace_e1000e_rx_metadata_ip_id(*ip_id); - } - - if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && net_rx_pkt_is_tcp_ack(pkt)) { - *status_flags |= E1000_RXD_STAT_ACK; - trace_e1000e_rx_metadata_ack(); - } - - if (pkt_info) { - *pkt_info = rss_info->enabled ? rss_info->type : 0; - - if (etqf < 8) { - *pkt_info |= (BIT(11) | etqf) << 4; - } else { - if (hasip4) { - *pkt_info |= E1000_ADVRXD_PKT_IP4; - } - - if (hasip6) { - *pkt_info |= E1000_ADVRXD_PKT_IP6; - } - - switch (l4hdr_proto) { - case ETH_L4_HDR_PROTO_TCP: - *pkt_info |= E1000_ADVRXD_PKT_TCP; - break; - - case ETH_L4_HDR_PROTO_UDP: - *pkt_info |= E1000_ADVRXD_PKT_UDP; - break; - - case ETH_L4_HDR_PROTO_SCTP: - *pkt_info |= E1000_ADVRXD_PKT_SCTP; - break; - - default: - break; - } - } - } - - if (hdr_info) { - *hdr_info = 0; - } - - if (ts) { - *status_flags |= BIT(16); - } - /* RX CSO information */ if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { trace_e1000e_rx_metadata_ipv6_sum_disabled(); @@ -1428,56 +1443,168 @@ func_exit: static inline void igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, struct NetRxPkt *pkt, - const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, + const E1000E_RSSInfo *rss_info, uint16_t length) { - uint32_t status_flags, rss; - uint16_t ip_id; + uint32_t status_flags; assert(!rss_info->enabled); + + memset(desc, 0, sizeof(*desc)); desc->length = cpu_to_le16(length); - desc->csum = 0; + igb_build_rx_metadata_common(core, pkt, pkt != NULL, + &status_flags, + &desc->special); - igb_build_rx_metadata(core, pkt, pkt != NULL, - rss_info, etqf, ts, - NULL, NULL, &rss, - &status_flags, &ip_id, - &desc->special); desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); desc->status = (uint8_t) le32_to_cpu(status_flags); } +static bool +igb_rx_ps_descriptor_split_always(IGBCore *core, const E1000ERingInfo *r) +{ + uint32_t desctyp = igb_rx_queue_desctyp_get(core, r); + return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; +} + +static uint16_t +igb_rx_desc_get_packet_type(IGBCore *core, struct NetRxPkt *pkt, uint16_t etqf) +{ + uint16_t pkt_type; + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; + + if (etqf < 8) { + pkt_type = BIT(11) | etqf; + return pkt_type; + } + + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + + if (hasip6 && !(core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { + eth_ip6_hdr_info *ip6hdr_info = net_rx_pkt_get_ip6_info(pkt); + pkt_type = ip6hdr_info->has_ext_hdrs ? E1000_ADVRXD_PKT_IP6E : + E1000_ADVRXD_PKT_IP6; + } else if (hasip4) { + pkt_type = E1000_ADVRXD_PKT_IP4; + } else { + pkt_type = 0; + } + + switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_TCP: + pkt_type |= E1000_ADVRXD_PKT_TCP; + break; + case ETH_L4_HDR_PROTO_UDP: + pkt_type |= E1000_ADVRXD_PKT_UDP; + break; + case ETH_L4_HDR_PROTO_SCTP: + pkt_type |= E1000_ADVRXD_PKT_SCTP; + break; + default: + break; + } + + return pkt_type; +} + static inline void igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, uint16_t length) { + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; + uint16_t rss_type = 0, pkt_type; + bool eop = (pkt != NULL); + uint32_t adv_desc_status_error = 0; memset(&desc->wb, 0, sizeof(desc->wb)); desc->wb.upper.length = cpu_to_le16(length); + igb_build_rx_metadata_common(core, pkt, eop, + &desc->wb.upper.status_error, + &desc->wb.upper.vlan); + + if (!eop) { + return; + } + + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + + if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { + if (rss_info->enabled) { + desc->wb.lower.hi_dword.rss = cpu_to_le32(rss_info->hash); + rss_type = rss_info->type; + trace_igb_rx_metadata_rss(desc->wb.lower.hi_dword.rss, rss_type); + } + } else if (hasip4) { + adv_desc_status_error |= E1000_RXD_STAT_IPIDV; + desc->wb.lower.hi_dword.csum_ip.ip_id = + cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); + trace_e1000e_rx_metadata_ip_id( + desc->wb.lower.hi_dword.csum_ip.ip_id); + } + + if (ts) { + adv_desc_status_error |= BIT(16); + } + + pkt_type = igb_rx_desc_get_packet_type(core, pkt, etqf); + trace_e1000e_rx_metadata_pkt_type(pkt_type); + desc->wb.lower.lo_dword.pkt_info = cpu_to_le16(rss_type | (pkt_type << 4)); + desc->wb.upper.status_error |= cpu_to_le32(adv_desc_status_error); +} + +static inline void +igb_write_adv_ps_rx_descr(IGBCore *core, + union e1000_adv_rx_desc *desc, + struct NetRxPkt *pkt, + const E1000E_RSSInfo *rss_info, + const E1000ERingInfo *r, + uint16_t etqf, + bool ts, + IGBPacketRxDMAState *pdma_st) +{ + size_t pkt_len; + uint16_t hdr_info = 0; - igb_build_rx_metadata(core, pkt, pkt != NULL, - rss_info, etqf, ts, - &desc->wb.lower.lo_dword.pkt_info, - &desc->wb.lower.lo_dword.hdr_info, - &desc->wb.lower.hi_dword.rss, - &desc->wb.upper.status_error, - &desc->wb.lower.hi_dword.csum_ip.ip_id, - &desc->wb.upper.vlan); + if (pdma_st->do_ps) { + pkt_len = pdma_st->bastate.written[1]; + } else { + pkt_len = pdma_st->bastate.written[0] + pdma_st->bastate.written[1]; + } + + igb_write_adv_rx_descr(core, desc, pkt, rss_info, etqf, ts, pkt_len); + + hdr_info = (pdma_st->ps_desc_data.hdr_len << E1000_ADVRXD_HDR_LEN_OFFSET) & + E1000_ADVRXD_ADV_HDR_LEN_MASK; + hdr_info |= pdma_st->ps_desc_data.sph ? E1000_ADVRXD_HDR_SPH : 0; + desc->wb.lower.lo_dword.hdr_info = cpu_to_le16(hdr_info); + + desc->wb.upper.status_error |= cpu_to_le32( + pdma_st->ps_desc_data.hbo ? E1000_ADVRXD_ST_ERR_HBO_OFFSET : 0); } static inline void -igb_write_rx_descr(IGBCore *core, union e1000_rx_desc_union *desc, - struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, - uint16_t etqf, bool ts, uint16_t length) +igb_write_rx_descr(IGBCore *core, + union e1000_rx_desc_union *desc, + struct NetRxPkt *pkt, + const E1000E_RSSInfo *rss_info, + uint16_t etqf, + bool ts, + IGBPacketRxDMAState *pdma_st, + const E1000ERingInfo *r) { if (igb_rx_use_legacy_descriptor(core)) { igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, - etqf, ts, length); + pdma_st->bastate.written[1]); + } else if (igb_rx_use_ps_descriptor(core, r)) { + igb_write_adv_ps_rx_descr(core, &desc->adv, pkt, rss_info, r, etqf, ts, + pdma_st); } else { igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info, - etqf, ts, length); + etqf, ts, pdma_st->bastate.written[1]); } } @@ -1514,20 +1641,7 @@ igb_pci_dma_write_rx_desc(IGBCore *core, PCIDevice *dev, dma_addr_t addr, } static void -igb_write_to_rx_buffers(IGBCore *core, - PCIDevice *d, - hwaddr ba, - uint16_t *written, - const char *data, - dma_addr_t data_len) -{ - trace_igb_rx_desc_buff_write(ba, *written, data, data_len); - pci_dma_write(d, ba + *written, data, data_len); - *written += data_len; -} - -static void -igb_update_rx_stats(IGBCore *core, const E1000E_RingInfo *rxi, +igb_update_rx_stats(IGBCore *core, const E1000ERingInfo *rxi, size_t pkt_size, size_t pkt_fcs_size) { eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); @@ -1545,12 +1659,256 @@ igb_update_rx_stats(IGBCore *core, const E1000E_RingInfo *rxi, } static inline bool -igb_rx_descr_threshold_hit(IGBCore *core, const E1000E_RingInfo *rxi) +igb_rx_descr_threshold_hit(IGBCore *core, const E1000ERingInfo *rxi) { return igb_ring_free_descr_num(core, rxi) == ((core->mac[E1000_SRRCTL(rxi->idx) >> 2] >> 20) & 31) * 16; } +static bool +igb_do_ps(IGBCore *core, + const E1000ERingInfo *r, + struct NetRxPkt *pkt, + IGBPacketRxDMAState *pdma_st) +{ + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; + bool fragment; + bool split_always; + size_t bheader_size; + size_t total_pkt_len; + + if (!igb_rx_use_ps_descriptor(core, r)) { + return false; + } + + total_pkt_len = net_rx_pkt_get_total_len(pkt); + bheader_size = igb_rxhdrbufsize(core, r); + split_always = igb_rx_ps_descriptor_split_always(core, r); + if (split_always && total_pkt_len <= bheader_size) { + pdma_st->ps_hdr_len = total_pkt_len; + pdma_st->ps_desc_data.hdr_len = total_pkt_len; + return true; + } + + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + + if (hasip4) { + fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; + } else if (hasip6) { + fragment = net_rx_pkt_get_ip6_info(pkt)->fragment; + } else { + pdma_st->ps_desc_data.hdr_len = bheader_size; + goto header_not_handled; + } + + if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) { + pdma_st->ps_desc_data.hdr_len = bheader_size; + goto header_not_handled; + } + + /* no header splitting for SCTP */ + if (!fragment && (l4hdr_proto == ETH_L4_HDR_PROTO_UDP || + l4hdr_proto == ETH_L4_HDR_PROTO_TCP)) { + pdma_st->ps_hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt); + } else { + pdma_st->ps_hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt); + } + + pdma_st->ps_desc_data.sph = true; + pdma_st->ps_desc_data.hdr_len = pdma_st->ps_hdr_len; + + if (pdma_st->ps_hdr_len > bheader_size) { + pdma_st->ps_desc_data.hbo = true; + goto header_not_handled; + } + + return true; + +header_not_handled: + if (split_always) { + pdma_st->ps_hdr_len = bheader_size; + return true; + } + + return false; +} + +static void +igb_truncate_to_descriptor_size(IGBPacketRxDMAState *pdma_st, size_t *size) +{ + if (pdma_st->do_ps && pdma_st->is_first) { + if (*size > pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len) { + *size = pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len; + } + } else { + if (*size > pdma_st->rx_desc_packet_buf_size) { + *size = pdma_st->rx_desc_packet_buf_size; + } + } +} + +static inline void +igb_write_hdr_frag_to_rx_buffers(IGBCore *core, + PCIDevice *d, + IGBPacketRxDMAState *pdma_st, + const char *data, + dma_addr_t data_len) +{ + assert(data_len <= pdma_st->rx_desc_header_buf_size - + pdma_st->bastate.written[0]); + pci_dma_write(d, + pdma_st->ba[0] + pdma_st->bastate.written[0], + data, data_len); + pdma_st->bastate.written[0] += data_len; + pdma_st->bastate.cur_idx = 1; +} + +static void +igb_write_header_to_rx_buffers(IGBCore *core, + struct NetRxPkt *pkt, + PCIDevice *d, + IGBPacketRxDMAState *pdma_st, + size_t *copy_size) +{ + size_t iov_copy; + size_t ps_hdr_copied = 0; + + if (!pdma_st->is_first) { + /* Leave buffer 0 of each descriptor except first */ + /* empty */ + pdma_st->bastate.cur_idx = 1; + return; + } + + do { + iov_copy = MIN(pdma_st->ps_hdr_len - ps_hdr_copied, + pdma_st->iov->iov_len - pdma_st->iov_ofs); + + igb_write_hdr_frag_to_rx_buffers(core, d, pdma_st, + pdma_st->iov->iov_base, + iov_copy); + + *copy_size -= iov_copy; + ps_hdr_copied += iov_copy; + + pdma_st->iov_ofs += iov_copy; + if (pdma_st->iov_ofs == pdma_st->iov->iov_len) { + pdma_st->iov++; + pdma_st->iov_ofs = 0; + } + } while (ps_hdr_copied < pdma_st->ps_hdr_len); + + pdma_st->is_first = false; +} + +static void +igb_write_payload_frag_to_rx_buffers(IGBCore *core, + PCIDevice *d, + IGBPacketRxDMAState *pdma_st, + const char *data, + dma_addr_t data_len) +{ + while (data_len > 0) { + assert(pdma_st->bastate.cur_idx < IGB_MAX_PS_BUFFERS); + + uint32_t cur_buf_bytes_left = + pdma_st->rx_desc_packet_buf_size - + pdma_st->bastate.written[pdma_st->bastate.cur_idx]; + uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left); + + trace_igb_rx_desc_buff_write( + pdma_st->bastate.cur_idx, + pdma_st->ba[pdma_st->bastate.cur_idx], + pdma_st->bastate.written[pdma_st->bastate.cur_idx], + data, + bytes_to_write); + + pci_dma_write(d, + pdma_st->ba[pdma_st->bastate.cur_idx] + + pdma_st->bastate.written[pdma_st->bastate.cur_idx], + data, bytes_to_write); + + pdma_st->bastate.written[pdma_st->bastate.cur_idx] += bytes_to_write; + data += bytes_to_write; + data_len -= bytes_to_write; + + if (pdma_st->bastate.written[pdma_st->bastate.cur_idx] == + pdma_st->rx_desc_packet_buf_size) { + pdma_st->bastate.cur_idx++; + } + } +} + +static void +igb_write_payload_to_rx_buffers(IGBCore *core, + struct NetRxPkt *pkt, + PCIDevice *d, + IGBPacketRxDMAState *pdma_st, + size_t *copy_size) +{ + static const uint32_t fcs_pad; + size_t iov_copy; + + /* Copy packet payload */ + while (*copy_size) { + iov_copy = MIN(*copy_size, pdma_st->iov->iov_len - pdma_st->iov_ofs); + igb_write_payload_frag_to_rx_buffers(core, d, + pdma_st, + pdma_st->iov->iov_base + + pdma_st->iov_ofs, + iov_copy); + + *copy_size -= iov_copy; + pdma_st->iov_ofs += iov_copy; + if (pdma_st->iov_ofs == pdma_st->iov->iov_len) { + pdma_st->iov++; + pdma_st->iov_ofs = 0; + } + } + + if (pdma_st->desc_offset + pdma_st->desc_size >= pdma_st->total_size) { + /* Simulate FCS checksum presence in the last descriptor */ + igb_write_payload_frag_to_rx_buffers(core, d, + pdma_st, + (const char *) &fcs_pad, + e1000x_fcs_len(core->mac)); + } +} + +static void +igb_write_to_rx_buffers(IGBCore *core, + struct NetRxPkt *pkt, + PCIDevice *d, + IGBPacketRxDMAState *pdma_st) +{ + size_t copy_size; + + if (!(pdma_st->ba)[1] || (pdma_st->do_ps && !(pdma_st->ba[0]))) { + /* as per intel docs; skip descriptors with null buf addr */ + trace_e1000e_rx_null_descriptor(); + return; + } + + if (pdma_st->desc_offset >= pdma_st->size) { + return; + } + + pdma_st->desc_size = pdma_st->total_size - pdma_st->desc_offset; + igb_truncate_to_descriptor_size(pdma_st, &pdma_st->desc_size); + copy_size = pdma_st->size - pdma_st->desc_offset; + igb_truncate_to_descriptor_size(pdma_st, ©_size); + + /* For PS mode copy the packet header first */ + if (pdma_st->do_ps) { + igb_write_header_to_rx_buffers(core, pkt, d, pdma_st, ©_size); + } else { + pdma_st->bastate.cur_idx = 1; + } + + igb_write_payload_to_rx_buffers(core, pkt, d, pdma_st, ©_size); +} + static void igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, const E1000E_RxRing *rxr, @@ -1560,95 +1918,61 @@ igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, PCIDevice *d; dma_addr_t base; union e1000_rx_desc_union desc; - size_t desc_size; - size_t desc_offset = 0; - size_t iov_ofs = 0; - - struct iovec *iov = net_rx_pkt_get_iovec(pkt); - size_t size = net_rx_pkt_get_total_len(pkt); - size_t total_size = size + e1000x_fcs_len(core->mac); - const E1000E_RingInfo *rxi = rxr->i; - size_t bufsize = igb_rxbufsize(core, rxi); - + const E1000ERingInfo *rxi; + size_t rx_desc_len; + + IGBPacketRxDMAState pdma_st = {0}; + pdma_st.is_first = true; + pdma_st.size = net_rx_pkt_get_total_len(pkt); + pdma_st.total_size = pdma_st.size + e1000x_fcs_len(core->mac); + + rxi = rxr->i; + rx_desc_len = core->rx_desc_len; + pdma_st.rx_desc_packet_buf_size = igb_rxbufsize(core, rxi); + pdma_st.rx_desc_header_buf_size = igb_rxhdrbufsize(core, rxi); + pdma_st.iov = net_rx_pkt_get_iovec(pkt); d = pcie_sriov_get_vf_at_index(core->owner, rxi->idx % 8); if (!d) { d = core->owner; } + pdma_st.do_ps = igb_do_ps(core, rxi, pkt, &pdma_st); + do { - hwaddr ba; - uint16_t written = 0; + memset(&pdma_st.bastate, 0, sizeof(IGBBAState)); bool is_last = false; - desc_size = total_size - desc_offset; - - if (desc_size > bufsize) { - desc_size = bufsize; - } - if (igb_ring_empty(core, rxi)) { return; } base = igb_ring_head_descr(core, rxi); + pci_dma_read(d, base, &desc, rx_desc_len); + trace_e1000e_rx_descr(rxi->idx, base, rx_desc_len); - pci_dma_read(d, base, &desc, core->rx_desc_len); - - trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); - - igb_read_rx_descr(core, &desc, &ba); - - if (ba) { - if (desc_offset < size) { - static const uint32_t fcs_pad; - size_t iov_copy; - size_t copy_size = size - desc_offset; - if (copy_size > bufsize) { - copy_size = bufsize; - } - - /* Copy packet payload */ - while (copy_size) { - iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); + igb_read_rx_descr(core, &desc, &pdma_st, rxi); - igb_write_to_rx_buffers(core, d, ba, &written, - iov->iov_base + iov_ofs, iov_copy); - - copy_size -= iov_copy; - iov_ofs += iov_copy; - if (iov_ofs == iov->iov_len) { - iov++; - iov_ofs = 0; - } - } - - if (desc_offset + desc_size >= total_size) { - /* Simulate FCS checksum presence in the last descriptor */ - igb_write_to_rx_buffers(core, d, ba, &written, - (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); - } - } - } else { /* as per intel docs; skip descriptors with null buf addr */ - trace_e1000e_rx_null_descriptor(); - } - desc_offset += desc_size; - if (desc_offset >= total_size) { + igb_write_to_rx_buffers(core, pkt, d, &pdma_st); + pdma_st.desc_offset += pdma_st.desc_size; + if (pdma_st.desc_offset >= pdma_st.total_size) { is_last = true; } - igb_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL, - rss_info, etqf, ts, written); - igb_pci_dma_write_rx_desc(core, d, base, &desc, core->rx_desc_len); - - igb_ring_advance(core, rxi, core->rx_desc_len / E1000_MIN_RX_DESC_LEN); - - } while (desc_offset < total_size); + igb_write_rx_descr(core, &desc, + is_last ? pkt : NULL, + rss_info, + etqf, ts, + &pdma_st, + rxi); + igb_pci_dma_write_rx_desc(core, d, base, &desc, rx_desc_len); + igb_ring_advance(core, rxi, rx_desc_len / E1000_MIN_RX_DESC_LEN); + } while (pdma_st.desc_offset < pdma_st.total_size); - igb_update_rx_stats(core, rxi, size, total_size); + igb_update_rx_stats(core, rxi, pdma_st.size, pdma_st.total_size); } static bool -igb_rx_strip_vlan(IGBCore *core, const E1000E_RingInfo *rxi) +igb_rx_strip_vlan(IGBCore *core, const E1000ERingInfo *rxi) { if (core->mac[MRQC] & 1) { uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS; @@ -2753,7 +3077,7 @@ igb_update_rx_offloads(IGBCore *core) if (core->has_vnet) { qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, - cso_state, 0, 0, 0, 0); + cso_state, 0, 0, 0, 0, 0, 0); } } diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h index 82ff195dfc..e5a47eab64 100644 --- a/hw/net/igb_regs.h +++ b/hw/net/igb_regs.h @@ -364,7 +364,7 @@ union e1000_adv_rx_desc { /* Indicates that VF is still clear to send requests */ #define E1000_VT_MSGTYPE_CTS 0x20000000 #define E1000_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ +/* bits 23:16 are used for extra info for certain messages */ #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) #define E1000_VF_RESET 0x01 /* VF requests reset */ @@ -452,6 +452,7 @@ union e1000_adv_rx_desc { #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 #define E1000_SRRCTL_DROP_EN 0x80000000 @@ -490,7 +491,7 @@ union e1000_adv_rx_desc { #define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ #define E1000_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ +/* bits 23:16 are used for extra info for certain messages */ #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) #define E1000_VF_RESET 0x01 /* VF requests reset */ @@ -692,11 +693,20 @@ union e1000_adv_rx_desc { #define E1000_STATUS_NUM_VFS_SHIFT 14 -#define E1000_ADVRXD_PKT_IP4 BIT(4) -#define E1000_ADVRXD_PKT_IP6 BIT(6) -#define E1000_ADVRXD_PKT_TCP BIT(8) -#define E1000_ADVRXD_PKT_UDP BIT(9) -#define E1000_ADVRXD_PKT_SCTP BIT(10) +#define E1000_ADVRXD_PKT_IP4 BIT(0) +#define E1000_ADVRXD_PKT_IP6 BIT(2) +#define E1000_ADVRXD_PKT_IP6E BIT(3) +#define E1000_ADVRXD_PKT_TCP BIT(4) +#define E1000_ADVRXD_PKT_UDP BIT(5) +#define E1000_ADVRXD_PKT_SCTP BIT(6) + +#define IGB_MAX_PS_BUFFERS 2 + +#define E1000_ADVRXD_HDR_LEN_OFFSET (21 - 16) +#define E1000_ADVRXD_ADV_HDR_LEN_MASK ((BIT(10) - 1) << \ + E1000_ADVRXD_HDR_LEN_OFFSET) +#define E1000_ADVRXD_HDR_SPH BIT(15) +#define E1000_ADVRXD_ST_ERR_HBO_OFFSET BIT(3 + 20) static inline uint8_t igb_ivar_entry_rx(uint8_t i) { diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c index 8aa27bd322..ec3ddf520a 100644 --- a/hw/net/mcf_fec.c +++ b/hw/net/mcf_fec.c @@ -571,7 +571,7 @@ static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t si size += 4; crc = cpu_to_be32(crc32(~0, buf, size)); crc_ptr = (uint8_t *)&crc; - /* Huge frames are truncted. */ + /* Huge frames are truncated. */ if (size > FEC_MAX_FRAME_SIZE) { size = FEC_MAX_FRAME_SIZE; flags |= FEC_BD_TR | FEC_BD_LG; diff --git a/hw/net/rocker/rocker_fp.c b/hw/net/rocker/rocker_fp.c index cbeed65bd5..9afd0c5e3f 100644 --- a/hw/net/rocker/rocker_fp.c +++ b/hw/net/rocker/rocker_fp.c @@ -134,7 +134,7 @@ static ssize_t fp_port_receive_iov(NetClientState *nc, const struct iovec *iov, FpPort *port = qemu_get_nic_opaque(nc); /* If the port is disabled, we want to drop this pkt - * now rather than queing it for later. We don't want + * now rather than queueing it for later. We don't want * any stale pkts getting into the device when the port * transitions to enabled. */ diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c index dfe4754469..5e16056be6 100644 --- a/hw/net/rocker/rocker_of_dpa.c +++ b/hw/net/rocker/rocker_of_dpa.c @@ -1043,7 +1043,7 @@ static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id) static ssize_t of_dpa_ig(World *world, uint32_t pport, const struct iovec *iov, int iovcnt) { - struct iovec iov_copy[iovcnt + 2]; + g_autofree struct iovec *iov_copy = g_new(struct iovec, iovcnt + 2); OfDpaFlowContext fc = { .of_dpa = world_private(world), .in_pport = pport, diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index b4df75b2c9..4525fda383 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -100,7 +100,7 @@ enum RTL8139_registers { MAC0 = 0, /* Ethernet hardware address. */ MAR0 = 8, /* Multicast filter. */ TxStatus0 = 0x10,/* Transmit status (Four 32bit registers). C mode only */ - /* Dump Tally Conter control register(64bit). C+ mode only */ + /* Dump Tally Counter control register(64bit). C+ mode only */ TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ RxBuf = 0x30, ChipCmd = 0x37, diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c index ad778cd8fc..ddbceda967 100644 --- a/hw/net/smc91c111.c +++ b/hw/net/smc91c111.c @@ -361,7 +361,7 @@ static void smc91c111_writeb(void *opaque, hwaddr offset, case 4: case 5: case 6: case 7: case 8: case 9: /* IA */ /* Not implemented. */ return; - case 10: /* Genral Purpose */ + case 10: /* General Purpose */ SET_LOW(gpr, value); return; case 11: diff --git a/hw/net/sungem.c b/hw/net/sungem.c index 510b370e5f..c2e2c90668 100644 --- a/hw/net/sungem.c +++ b/hw/net/sungem.c @@ -1228,7 +1228,7 @@ static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val, case MIF_SMACHINE: return; /* No actual write */ case MIF_CFG: - /* Maintain the RO MDI bits to advertize an MDIO PHY on MDI0 */ + /* Maintain the RO MDI bits to advertise an MDIO PHY on MDI0 */ val &= ~MIF_CFG_MDI1; val |= MIF_CFG_MDI0; break; diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c index 391d26fb82..64d4ea5850 100644 --- a/hw/net/sunhme.c +++ b/hw/net/sunhme.c @@ -901,7 +901,7 @@ static void sunhme_reset(DeviceState *ds) /* Configure internal transceiver */ s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0; - /* Advetise auto, 100Mbps FD */ + /* Advertise auto, 100Mbps FD */ s->miiregs[MII_ANAR] = MII_ANAR_TXFD; s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD | MII_BMSR_AN_COMP; diff --git a/hw/net/trace-events b/hw/net/trace-events index 6b5ba669a2..3abfd65e5b 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -278,9 +278,9 @@ igb_core_mdic_write_unhandled(uint32_t addr) "MDIC WRITE: PHY[%u] UNHANDLED" igb_link_set_ext_params(bool asd_check, bool speed_select_bypass, bool pfrstd) "Set extended link params: ASD check: %d, Speed select bypass: %d, PF reset done: %d" igb_rx_desc_buff_size(uint32_t b) "buffer size: %u" -igb_rx_desc_buff_write(uint64_t addr, uint16_t offset, const void* source, uint32_t len) "addr: 0x%"PRIx64", offset: %u, from: %p, length: %u" +igb_rx_desc_buff_write(uint8_t idx, uint64_t addr, uint16_t offset, const void* source, uint32_t len) "buffer %u, addr: 0x%"PRIx64", offset: %u, from: %p, length: %u" -igb_rx_metadata_rss(uint32_t rss) "RSS data: 0x%X" +igb_rx_metadata_rss(uint32_t rss, uint16_t rss_pkt_type) "RSS data: rss: 0x%X, rss_pkt_type: 0x%X" igb_irq_icr_clear_gpie_nsicr(void) "Clearing ICR on read due to GPIE.NSICR enabled" igb_irq_set_iam(uint32_t icr) "Update IAM: 0x%x" @@ -295,6 +295,8 @@ igb_irq_eitr_set(uint32_t eitr_num, uint32_t val) "EITR[%u] = 0x%x" igb_set_pfmailbox(uint32_t vf_num, uint32_t val) "PFMailbox[%d]: 0x%x" igb_set_vfmailbox(uint32_t vf_num, uint32_t val) "VFMailbox[%d]: 0x%x" +igb_wrn_rx_desc_modes_not_supp(int desc_type) "Not supported descriptor type: %d" + # igbvf.c igbvf_wrn_io_addr_unknown(uint64_t addr) "IO unknown register 0x%"PRIx64 diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 6b958d6363..57427a3997 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -78,6 +78,9 @@ static const int user_feature_bits[] = { VIRTIO_F_RING_RESET, VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, + VIRTIO_NET_F_GUEST_USO4, + VIRTIO_NET_F_GUEST_USO6, + VIRTIO_NET_F_HOST_USO, /* This bit implies RARP isn't sent by QEMU out of band */ VIRTIO_NET_F_GUEST_ANNOUNCE, diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 7102ec4817..5a0201c423 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -659,6 +659,15 @@ static int peer_has_ufo(VirtIONet *n) return n->has_ufo; } +static int peer_has_uso(VirtIONet *n) +{ + if (!peer_has_vnet_hdr(n)) { + return 0; + } + + return qemu_has_uso(qemu_get_queue(n->nic)->peer); +} + static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, int version_1, int hash_report) { @@ -796,6 +805,10 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6); + virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); } @@ -804,6 +817,12 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); } + if (!peer_has_uso(n)) { + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6); + } + if (!get_vhost_net(nc->peer)) { return features; } @@ -859,17 +878,21 @@ static void virtio_net_apply_guest_offloads(VirtIONet *n) !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), - !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)), + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO4)), + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO6))); } -static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) +static uint64_t virtio_net_guest_offloads_by_features(uint64_t features) { static const uint64_t guest_offloads_mask = (1ULL << VIRTIO_NET_F_GUEST_CSUM) | (1ULL << VIRTIO_NET_F_GUEST_TSO4) | (1ULL << VIRTIO_NET_F_GUEST_TSO6) | (1ULL << VIRTIO_NET_F_GUEST_ECN) | - (1ULL << VIRTIO_NET_F_GUEST_UFO); + (1ULL << VIRTIO_NET_F_GUEST_UFO) | + (1ULL << VIRTIO_NET_F_GUEST_USO4) | + (1ULL << VIRTIO_NET_F_GUEST_USO6); return guest_offloads_mask & features; } @@ -1307,7 +1330,7 @@ static void virtio_net_detach_epbf_rss(VirtIONet *n) static bool virtio_net_load_ebpf(VirtIONet *n) { if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) { - /* backend does't support steering ebpf */ + /* backend doesn't support steering ebpf */ return false; } @@ -2046,7 +2069,7 @@ static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain, + sizeof(struct ip6_header)); unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10; - /* There is a difference between payload lenght in ipv4 and v6, + /* There is a difference between payload length in ipv4 and v6, ip header is excluded in ipv6 */ unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen; } @@ -3795,7 +3818,7 @@ static void virtio_net_instance_init(Object *obj) /* * The default config_size is sizeof(struct virtio_net_config). - * Can be overriden with virtio_net_set_config_size. + * Can be overridden with virtio_net_set_config_size. */ n->config_size = sizeof(struct virtio_net_config); device_add_bootindex_property(obj, &n->nic_conf.bootindex, @@ -3922,6 +3945,12 @@ static Property virtio_net_properties[] = { DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN), DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str), DEFINE_PROP_BOOL("failover", VirtIONet, failover, false), + DEFINE_PROP_BIT64("guest_uso4", VirtIONet, host_features, + VIRTIO_NET_F_GUEST_USO4, true), + DEFINE_PROP_BIT64("guest_uso6", VirtIONet, host_features, + VIRTIO_NET_F_GUEST_USO6, true), + DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features, + VIRTIO_NET_F_HOST_USO, true), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 3fb108751a..1b48d7743e 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -1341,6 +1341,8 @@ static void vmxnet3_update_features(VMXNET3State *s) s->lro_supported, s->lro_supported, 0, + 0, + 0, 0); } } @@ -1887,7 +1889,7 @@ vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size) break; default: - VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes", addr, size); + VMW_CBPRN("Unknown read BAR1[%" PRIx64 "], %d bytes", addr, size); break; } diff --git a/hw/net/vmxnet3.h b/hw/net/vmxnet3.h index bf4f6de74a..f9283f9e7b 100644 --- a/hw/net/vmxnet3.h +++ b/hw/net/vmxnet3.h @@ -733,7 +733,7 @@ struct Vmxnet3_TxQueueDesc { struct Vmxnet3_RxQueueDesc { struct Vmxnet3_RxQueueCtrl ctrl; struct Vmxnet3_RxQueueConf conf; - /* Driver read after a GET commad */ + /* Driver read after a GET command */ struct Vmxnet3_QueueStatus status; struct UPT1_RxStats stats; u8 __pad[88]; /* 128 aligned */ diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 90687b168a..f026245d1e 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -17,7 +17,7 @@ * Notes on coding style * --------------------- * While QEMU coding style prefers lowercase hexadecimals in constants, the - * NVMe subsystem use thes format from the NVMe specifications in the comments + * NVMe subsystem use this format from the NVMe specifications in the comments * (i.e. 'h' suffix instead of '0x' prefix). * * Usage @@ -730,7 +730,7 @@ static inline void nvme_sg_unmap(NvmeSg *sg) } /* - * When metadata is transfered as extended LBAs, the DPTR mapped into `sg` + * When metadata is transferred as extended LBAs, the DPTR mapped into `sg` * holds both data and metadata. This function splits the data and metadata * into two separate QSG/IOVs. */ @@ -2130,11 +2130,6 @@ static inline bool nvme_is_write(NvmeRequest *req) rw->opcode == NVME_CMD_WRITE_ZEROES; } -static AioContext *nvme_get_aio_context(BlockAIOCB *acb) -{ - return qemu_get_aio_context(); -} - static void nvme_misc_cb(void *opaque, int ret) { NvmeRequest *req = opaque; @@ -3302,7 +3297,6 @@ static void nvme_flush_cancel(BlockAIOCB *acb) static const AIOCBInfo nvme_flush_aiocb_info = { .aiocb_size = sizeof(NvmeFlushAIOCB), .cancel_async = nvme_flush_cancel, - .get_aio_context = nvme_get_aio_context, }; static void nvme_do_flush(NvmeFlushAIOCB *iocb); @@ -6478,7 +6472,6 @@ static void nvme_format_cancel(BlockAIOCB *aiocb) static const AIOCBInfo nvme_format_aiocb_info = { .aiocb_size = sizeof(NvmeFormatAIOCB), .cancel_async = nvme_format_cancel, - .get_aio_context = nvme_get_aio_context, }; static void nvme_format_set(NvmeNamespace *ns, uint8_t lbaf, uint8_t mset, @@ -7594,7 +7587,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) /* * NVM Express v1.3d, Section 4.1 state: "If host software writes * an invalid value to the Submission Queue Tail Doorbell or - * Completion Queue Head Doorbell regiter and an Asynchronous Event + * Completion Queue Head Doorbell register and an Asynchronous Event * Request command is outstanding, then an asynchronous event is * posted to the Admin Completion Queue with a status code of * Invalid Doorbell Write Value." diff --git a/hw/nvram/eeprom_at24c.c b/hw/nvram/eeprom_at24c.c index 613c4929e3..3272068663 100644 --- a/hw/nvram/eeprom_at24c.c +++ b/hw/nvram/eeprom_at24c.c @@ -51,7 +51,7 @@ struct EEPROMState { bool writable; /* cells changed since last START? */ bool changed; - /* during WRITE, # of address bytes transfered */ + /* during WRITE, # of address bytes transferred */ uint8_t haveaddr; uint8_t *mem; diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 29a5bef1d5..4e4524673a 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -877,7 +877,7 @@ static struct { /* * Any sub-page size update to these table MRs will be lost during migration, * as we use aligned size in ram_load_precopy() -> qemu_ram_resize() path. - * In order to avoid the inconsistency in sizes save them seperately and + * In order to avoid the inconsistency in sizes save them separately and * migrate over in vmstate post_load(). */ static void fw_cfg_acpi_mr_save(FWCfgState *s, const char *filename, size_t len) diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c index 54f507318f..5a2b749c8e 100644 --- a/hw/pci-bridge/cxl_downstream.c +++ b/hw/pci-bridge/cxl_downstream.c @@ -42,7 +42,7 @@ static void latch_registers(CXLDownstreamPort *dsp) CXL2_DOWNSTREAM_PORT); } -/* TODO: Look at sharing this code acorss all CXL port types */ +/* TODO: Look at sharing this code across all CXL port types */ static void cxl_dsp_dvsec_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c index 9159f48a8c..2b9cf0cc97 100644 --- a/hw/pci-bridge/cxl_upstream.c +++ b/hw/pci-bridge/cxl_upstream.c @@ -262,7 +262,7 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) .length = sslbis_size, }, .data_type = HMATLB_DATA_TYPE_ACCESS_BANDWIDTH, - .entry_base_unit = 1000, + .entry_base_unit = 1024, }, }; diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index 613857b601..535889f7c2 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -263,7 +263,7 @@ static int pxb_map_irq_fn(PCIDevice *pci_dev, int pin) /* * First carry out normal swizzle to handle - * multple root ports on a pxb instance. + * multiple root ports on a pxb instance. */ pin = pci_swizzle_map_irq_fn(pci_dev, pin); diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c index 4701481b9b..ee6cb85e97 100644 --- a/hw/pci-host/bonito.c +++ b/hw/pci-host/bonito.c @@ -62,7 +62,7 @@ #define DPRINTF(fmt, ...) #endif -/* from linux soure code. include/asm-mips/mips-boards/bonito64.h*/ +/* from linux source code. include/asm-mips/mips-boards/bonito64.h*/ #define BONITO_BOOT_BASE 0x1fc00000 #define BONITO_BOOT_SIZE 0x00100000 #define BONITO_BOOT_TOP (BONITO_BOOT_BASE + BONITO_BOOT_SIZE - 1) diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c index 388d252ee2..6f5442f108 100644 --- a/hw/pci-host/designware.c +++ b/hw/pci-host/designware.c @@ -488,7 +488,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp) /* * If no inbound iATU windows are configured, HW defaults to - * letting inbound TLPs to pass in. We emulate that by exlicitly + * letting inbound TLPs to pass in. We emulate that by explicitly * configuring first inbound window to cover all of target's * address space. * @@ -503,7 +503,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp) &designware_pci_host_msi_ops, root, "pcie-msi", 0x4); /* - * We initially place MSI interrupt I/O region a adress 0 and + * We initially place MSI interrupt I/O region at address 0 and * disable it. It'll be later moved to correct offset and enabled * in designware_pcie_root_update_msi_mapping() as a part of * initialization done by guest OS diff --git a/hw/pci-host/dino.c b/hw/pci-host/dino.c index e8eaebca54..82503229fa 100644 --- a/hw/pci-host/dino.c +++ b/hw/pci-host/dino.c @@ -1,5 +1,5 @@ /* - * HP-PARISC Dino PCI chipset emulation, as in B160L and similiar machines + * HP-PARISC Dino PCI chipset emulation, as in B160L and similar machines * * (C) 2017-2019 by Helge Deller <deller@gmx.de> * diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c index 7c7316bc96..1092dc3b70 100644 --- a/hw/pci-host/gpex-acpi.c +++ b/hw/pci-host/gpex-acpi.c @@ -177,7 +177,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) acpi_dsdt_add_pci_route_table(dev, cfg->irq); /* - * Resources defined for PXBs are composed by the folling parts: + * Resources defined for PXBs are composed of the following parts: * 1. The resources the pci-brige/pcie-root-port need. * 2. The resources the devices behind pxb need. */ diff --git a/hw/pci-host/gt64120.c b/hw/pci-host/gt64120.c index 82c15edb46..143bf053d7 100644 --- a/hw/pci-host/gt64120.c +++ b/hw/pci-host/gt64120.c @@ -331,9 +331,9 @@ static void gt64120_update_pci_cfgdata_mapping(GT64120State *s) /* * The setting of the MByteSwap bit and MWordSwap bit in the PCI Internal * Command Register determines how data transactions from the CPU to/from - * PCI are handled along with the setting of the Endianess bit in the CPU + * PCI are handled along with the setting of the Endianness bit in the CPU * Configuration Register. See: - * - Table 16: 32-bit PCI Transaction Endianess + * - Table 16: 32-bit PCI Transaction Endianness * - Table 158: PCI_0 Command, Offset: 0xc00 */ diff --git a/hw/pci-host/pnv_phb.c b/hw/pci-host/pnv_phb.c index 82332d7a05..157c00782c 100644 --- a/hw/pci-host/pnv_phb.c +++ b/hw/pci-host/pnv_phb.c @@ -25,7 +25,7 @@ * state associated with the child has an id, use it as QOM id. * Otherwise use object_typename[index] as QOM id. * - * This helper does both operations at the same time because seting + * This helper does both operations at the same time because setting * a new QOM child will erase the bus parent of the device. This happens * because object_unparent() will call object_property_del_child(), * which in turn calls the property release callback prop->release if diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c index 7a21497cf8..c5e58f4086 100644 --- a/hw/pci-host/pnv_phb3.c +++ b/hw/pci-host/pnv_phb3.c @@ -757,7 +757,7 @@ static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr, * We only support non-translate in top window. * * TODO: Venice/Murano support it on bottom window above 4G and - * Naples suports it on everything + * Naples supports it on everything */ if (!(tve & PPC_BIT(51))) { phb3_error(phb, "xlate for invalid non-translate TVE"); diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c index 41e63b066f..dc8d8637f2 100644 --- a/hw/pci-host/pnv_phb3_msi.c +++ b/hw/pci-host/pnv_phb3_msi.c @@ -281,7 +281,7 @@ static void phb3_msi_instance_init(Object *obj) object_property_allow_set_link, OBJ_PROP_LINK_STRONG); - /* Will be overriden later */ + /* Will be overridden later */ ics->offset = 0; } diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c index 6232cbeee1..29cb11a5d9 100644 --- a/hw/pci-host/pnv_phb4.c +++ b/hw/pci-host/pnv_phb4.c @@ -207,7 +207,7 @@ static void pnv_phb4_check_mbt(PnvPHB4 *phb, uint32_t index) start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]); } - /* TODO: Figure out how to implemet/decode AOMASK */ + /* TODO: Figure out how to implement/decode AOMASK */ /* Check if it matches an enabled MMIO region in the PEC stack */ if (memory_region_is_mapped(&phb->mmbar0) && @@ -391,7 +391,7 @@ static void pnv_phb4_ioda_write(PnvPHB4 *phb, uint64_t val) case IODA3_TBL_MBT: *tptr = val; - /* Copy accross the valid bit to the other half */ + /* Copy across the valid bit to the other half */ phb->ioda_MBT[idx ^ 1] &= 0x7fffffffffffffffull; phb->ioda_MBT[idx ^ 1] |= 0x8000000000000000ull & val; @@ -1408,7 +1408,7 @@ static void pnv_phb4_msi_write(void *opaque, hwaddr addr, return; } - /* TODO: check PE/MSI assignement */ + /* TODO: check PE/MSI assignment */ qemu_irq_pulse(phb->qirqs[src]); } diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index 374d593ead..b68c7ecb49 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -324,7 +324,7 @@ static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) * it isn't implemented in qemu right now. * So just discard the error for now. * OS which cares of aer would receive errors via - * native aer mechanims, so this wouldn't matter. + * native aer mechanisms, so this wouldn't matter. */ } diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c index e7bc7192f1..df7f370111 100644 --- a/hw/pci/shpc.c +++ b/hw/pci/shpc.c @@ -615,7 +615,7 @@ int shpc_init(PCIDevice *d, PCIBus *sec_bus, MemoryRegion *bar, } if (nslots > SHPC_MAX_SLOTS || SHPC_IDX_TO_PCI(nslots) > PCI_SLOT_MAX) { - /* TODO: report an error mesage that makes sense. */ + /* TODO: report an error message that makes sense. */ return -EINVAL; } shpc->nslots = nslots; diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build index a313d4b964..7c2c52434a 100644 --- a/hw/ppc/meson.build +++ b/hw/ppc/meson.build @@ -36,7 +36,6 @@ ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_TCG'], if_true: files( ppc_ss.add(when: 'CONFIG_SPAPR_RNG', if_true: files('spapr_rng.c')) ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_LINUX'], if_true: files( 'spapr_pci_vfio.c', - 'spapr_pci_nvlink2.c' )) # IBM PowerNV diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index aeb116d919..be167710a3 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -738,7 +738,7 @@ static target_ulong _cpu_ppc_load_decr(CPUPPCState *env, int64_t now) decr = __cpu_ppc_load_decr(env, now, tb_env->decr_next); /* - * If large decrementer is enabled then the decrementer is signed extened + * If large decrementer is enabled then the decrementer is signed extended * to 64 bits, otherwise it is a 32 bit value. */ if (env->spr[SPR_LPCR] & LPCR_LD) { diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c index 5a56f155f5..c96cefb13d 100644 --- a/hw/ppc/prep_systemio.c +++ b/hw/ppc/prep_systemio.c @@ -39,7 +39,7 @@ #define TYPE_PREP_SYSTEMIO "prep-systemio" OBJECT_DECLARE_SIMPLE_TYPE(PrepSystemIoState, PREP_SYSTEMIO) -/* Bit as defined in PowerPC Reference Plaform v1.1, sect. 6.1.5, p. 132 */ +/* Bit as defined in PowerPC Reference Platform v1.1, sect. 6.1.5, p. 132 */ #define PREP_BIT(n) (1 << (7 - (n))) struct PrepSystemIoState { diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index f7cc6a890f..1f1aa2a6d4 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2573,7 +2573,7 @@ static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp) return; } - /* Detemine the VSMT mode to use: */ + /* Determine the VSMT mode to use: */ if (vsmt_user) { if (spapr->vsmt < smp_threads) { error_setg(errp, "Cannot support VSMT mode %d" @@ -2875,8 +2875,6 @@ static void spapr_machine_init(MachineState *machine) /* init CPUs */ spapr_init_cpus(spapr); - spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine); - /* Init numa_assoc_array */ spapr_numa_associativity_init(spapr, machine); @@ -3109,7 +3107,7 @@ static int spapr_kvm_type(MachineState *machine, const char *vm_type) { /* * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to - * accomodate the 'HV' and 'PV' formats that exists in the + * accommodate the 'HV' and 'PV' formats that exists in the * wild. The 'auto' mode is being introduced already as * lower-case, thus we don't need to bother checking for * "AUTO". @@ -4134,7 +4132,6 @@ static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, &sphb->buid, &sphb->io_win_addr, &sphb->mem_win_addr, &sphb->mem64_win_addr, windows_supported, sphb->dma_liobn, - &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr, errp); } @@ -4343,7 +4340,7 @@ spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) CPUArchId *core_slot; MachineClass *mc = MACHINE_GET_CLASS(machine); - /* make sure possible_cpu are intialized */ + /* make sure possible_cpu are initialized */ mc->possible_cpu_arch_ids(machine); /* get CPU core slot containing thread that matches cpu_index */ core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); @@ -4397,8 +4394,7 @@ static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio32, hwaddr *mmio64, - unsigned n_dma, uint32_t *liobns, - hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) + unsigned n_dma, uint32_t *liobns, Error **errp) { /* * New-style PHB window placement. @@ -4443,9 +4439,6 @@ static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; - - *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE; - *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE; return true; } @@ -4963,16 +4956,12 @@ DEFINE_SPAPR_MACHINE(4_1, "4.1", false); static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio32, hwaddr *mmio64, - unsigned n_dma, uint32_t *liobns, - hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) + unsigned n_dma, uint32_t *liobns, Error **errp) { if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, - liobns, nv2gpa, nv2atsd, errp)) { + liobns, errp)) { return false; } - - *nv2gpa = 0; - *nv2atsd = 0; return true; } static void spapr_machine_4_0_class_options(MachineClass *mc) @@ -5045,7 +5034,7 @@ static void spapr_machine_2_12_class_options(MachineClass *mc) /* We depend on kvm_enabled() to choose a default value for the * hpt-max-page-size capability. Of course we can't do it here - * because this is too early and the HW accelerator isn't initialzed + * because this is too early and the HW accelerator isn't initialized * yet. Postpone this to machine init (see default_caps_with_cpu()). */ smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; @@ -5137,8 +5126,7 @@ DEFINE_SPAPR_MACHINE(2_8, "2.8", false); static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio32, hwaddr *mmio64, - unsigned n_dma, uint32_t *liobns, - hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) + unsigned n_dma, uint32_t *liobns, Error **errp) { /* Legacy PHB placement for pseries-2.7 and earlier machine types */ const uint64_t base_buid = 0x800000020000000ULL; @@ -5183,8 +5171,6 @@ static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, * window into contiguous 32-bit and 64-bit windows */ - *nv2gpa = 0; - *nv2atsd = 0; return true; } diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index b7dc388f2f..522a2396c7 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -1615,7 +1615,7 @@ static void hypercall_register_types(void) spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS, h_get_cpu_characteristics); - /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate + /* "debugger" hcalls (also used by SLOF). Note: We do -not- differentiate * here between the "CI" and the "CACHE" variants, they will use whatever * mapping attributes qemu is using. When using KVM, the kernel will * enforce the attributes more strongly diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c index a64098c375..ea6762d3d2 100644 --- a/hw/ppc/spapr_numa.c +++ b/hw/ppc/spapr_numa.c @@ -109,20 +109,6 @@ static bool spapr_numa_is_symmetrical(MachineState *ms) } /* - * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. - * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is - * called from vPHB reset handler so we initialize the counter here. - * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM - * must be equally distant from any other node. - * The final value of spapr->gpu_numa_id is going to be written to - * max-associativity-domains in spapr_build_fdt(). - */ -unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine) -{ - return MAX(1, machine->numa_state->num_nodes); -} - -/* * This function will translate the user distances into * what the kernel understand as possible values: 10 * (local distance), 20, 40, 80 and 160, and return the equivalent @@ -277,7 +263,7 @@ static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr, { SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; - int i, j, max_nodes_with_gpus; + int i, j; /* * For all associativity arrays: first position is the size, @@ -293,17 +279,7 @@ static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr, spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); } - /* - * Initialize NVLink GPU associativity arrays. We know that - * the first GPU will take the first available NUMA id, and - * we'll have a maximum of NVGPU_MAX_NUM GPUs in the machine. - * At this point we're not sure if there are GPUs or not, but - * let's initialize the associativity arrays and allow NVLink - * GPUs to be handled like regular NUMA nodes later on. - */ - max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM; - - for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) { + for (i = nb_numa_nodes; i < nb_numa_nodes; i++) { spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { @@ -345,10 +321,6 @@ static void spapr_numa_FORM2_affinity_init(SpaprMachineState *spapr) * CPUs will write an additional 'vcpu_id' on top of the arrays * being initialized here. 'numa_id' is represented by the * index 'i' of the loop. - * - * Given that this initialization is also valid for GPU associativity - * arrays, handle everything in one single step by populating the - * arrays up to NUMA_NODES_MAX_NUM. */ for (i = 0; i < NUMA_NODES_MAX_NUM; i++) { spapr->FORM2_assoc_array[i][0] = cpu_to_be32(1); @@ -461,8 +433,6 @@ static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, { MachineState *ms = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - - spapr_numa_initial_nvgpu_numa_id(ms); uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x3), @@ -470,7 +440,7 @@ static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, cpu_to_be32(0x1), }; uint32_t nr_refpoints = ARRAY_SIZE(refpoints); - uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomain = ms->numa_state->num_nodes; uint32_t maxdomains[] = { cpu_to_be32(4), cpu_to_be32(maxdomain), @@ -486,13 +456,12 @@ static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, cpu_to_be32(0x4), cpu_to_be32(0x2), }; - uint32_t legacy_maxdomain = spapr->gpu_numa_id > 1 ? 1 : 0; uint32_t legacy_maxdomains[] = { cpu_to_be32(4), - cpu_to_be32(legacy_maxdomain), - cpu_to_be32(legacy_maxdomain), - cpu_to_be32(legacy_maxdomain), - cpu_to_be32(spapr->gpu_numa_id), + cpu_to_be32(0), + cpu_to_be32(0), + cpu_to_be32(0), + cpu_to_be32(maxdomain ? maxdomain : 1), }; G_STATIC_ASSERT(sizeof(legacy_refpoints) <= sizeof(refpoints)); @@ -581,8 +550,6 @@ static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) { MachineState *ms = MACHINE(spapr); - uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - - spapr_numa_initial_nvgpu_numa_id(ms); /* * In FORM2, ibm,associativity-reference-points will point to @@ -596,7 +563,7 @@ static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr, */ uint32_t refpoints[] = { cpu_to_be32(1) }; - uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomain = ms->numa_state->num_nodes; uint32_t maxdomains[] = { cpu_to_be32(1), cpu_to_be32(maxdomain) }; _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c index a8688243a6..b2f009c816 100644 --- a/hw/ppc/spapr_nvdimm.c +++ b/hw/ppc/spapr_nvdimm.c @@ -320,7 +320,8 @@ static target_ulong h_scm_write_metadata(PowerPCCPU *cpu, nvdimm = NVDIMM(drc->dev); if ((offset + len < offset) || - (nvdimm->label_size < len + offset)) { + (nvdimm->label_size < len + offset) || + nvdimm->readonly) { return H_P2; } @@ -377,7 +378,7 @@ static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr, /* * Currently continue token should be zero qemu has already bound - * everything and this hcall doesnt return H_BUSY. + * everything and this hcall doesn't return H_BUSY. */ if (continue_token > 0) { return H_P5; @@ -588,7 +589,7 @@ void spapr_nvdimm_finish_flushes(void) * Called on reset path, the main loop thread which calls * the pending BHs has gotten out running in the reset path, * finally reaching here. Other code path being guest - * h_client_architecture_support, thats early boot up. + * h_client_architecture_support, that's early boot up. */ nvdimms = nvdimm_get_device_list(); for (list = nvdimms; list; list = list->next) { diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 75aacda65a..ce14959317 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1443,8 +1443,6 @@ static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev, _FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1)); } - spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb); - if (!IS_PCI_BRIDGE(dev)) { /* Properties only for non-bridges */ uint32_t min_grant = pci_default_read_config(dev, PCI_MIN_GNT, 1); @@ -1757,8 +1755,6 @@ static void spapr_phb_unrealize(DeviceState *dev) int i; const unsigned windows_supported = spapr_phb_windows_supported(sphb); - spapr_phb_nvgpu_free(sphb); - if (sphb->msi) { g_hash_table_unref(sphb->msi); sphb->msi = NULL; @@ -2069,14 +2065,8 @@ void spapr_phb_dma_reset(SpaprPhbState *sphb) static void spapr_phb_reset(DeviceState *qdev) { SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev); - Error *err = NULL; spapr_phb_dma_reset(sphb); - spapr_phb_nvgpu_free(sphb); - spapr_phb_nvgpu_setup(sphb, &err); - if (err) { - error_report_err(err); - } /* Reset the IOMMU state */ object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL); @@ -2112,8 +2102,6 @@ static Property spapr_phb_properties[] = { pre_2_8_migration, false), DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState, pcie_ecs, true), - DEFINE_PROP_UINT64("gpa", SpaprPhbState, nv2_gpa_win_addr, 0), - DEFINE_PROP_UINT64("atsd", SpaprPhbState, nv2_atsd_win_addr, 0), DEFINE_PROP_BOOL("pre-5.1-associativity", SpaprPhbState, pre_5_1_assoc, false), DEFINE_PROP_END_OF_LIST(), @@ -2362,7 +2350,6 @@ int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb, }; SpaprTceTable *tcet; SpaprDrc *drc; - Error *err = NULL; /* Start populating the FDT */ _FDT(bus_off = fdt_add_subnode(fdt, 0, phb->dtbusname)); @@ -2443,12 +2430,6 @@ int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb, return ret; } - spapr_phb_nvgpu_populate_dt(phb, fdt, bus_off, &err); - if (err) { - error_report_err(err); - } - spapr_phb_nvgpu_ram_populate_dt(phb, fdt); - return 0; } diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c deleted file mode 100644 index 2a8a11be1d..0000000000 --- a/hw/ppc/spapr_pci_nvlink2.c +++ /dev/null @@ -1,442 +0,0 @@ -/* - * QEMU sPAPR PCI for NVLink2 pass through - * - * Copyright (c) 2019 Alexey Kardashevskiy, IBM Corporation. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "hw/pci/pci.h" -#include "hw/pci-host/spapr.h" -#include "hw/ppc/spapr_numa.h" -#include "qemu/error-report.h" -#include "hw/ppc/fdt.h" -#include "hw/pci/pci_bridge.h" - -#define PHANDLE_PCIDEV(phb, pdev) (0x12000000 | \ - (((phb)->index) << 16) | ((pdev)->devfn)) -#define PHANDLE_GPURAM(phb, n) (0x110000FF | ((n) << 8) | \ - (((phb)->index) << 16)) -#define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \ - ((gn) << 4) | (nn)) - -typedef struct SpaprPhbPciNvGpuSlot { - uint64_t tgt; - uint64_t gpa; - unsigned numa_id; - PCIDevice *gpdev; - int linknum; - struct { - uint64_t atsd_gpa; - PCIDevice *npdev; - uint32_t link_speed; - } links[NVGPU_MAX_LINKS]; -} SpaprPhbPciNvGpuSlot; - -struct SpaprPhbPciNvGpuConfig { - uint64_t nv2_ram_current; - uint64_t nv2_atsd_current; - int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */ - SpaprPhbPciNvGpuSlot slots[NVGPU_MAX_NUM]; - Error *err; -}; - -static SpaprPhbPciNvGpuSlot * -spapr_nvgpu_get_slot(SpaprPhbPciNvGpuConfig *nvgpus, uint64_t tgt) -{ - int i; - - /* Search for partially collected "slot" */ - for (i = 0; i < nvgpus->num; ++i) { - if (nvgpus->slots[i].tgt == tgt) { - return &nvgpus->slots[i]; - } - } - - if (nvgpus->num == ARRAY_SIZE(nvgpus->slots)) { - return NULL; - } - - i = nvgpus->num; - nvgpus->slots[i].tgt = tgt; - ++nvgpus->num; - - return &nvgpus->slots[i]; -} - -static void spapr_pci_collect_nvgpu(SpaprPhbPciNvGpuConfig *nvgpus, - PCIDevice *pdev, uint64_t tgt, - MemoryRegion *mr, Error **errp) -{ - MachineState *machine = MACHINE(qdev_get_machine()); - SpaprMachineState *spapr = SPAPR_MACHINE(machine); - SpaprPhbPciNvGpuSlot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); - - if (!nvslot) { - error_setg(errp, "Found too many GPUs per vPHB"); - return; - } - g_assert(!nvslot->gpdev); - nvslot->gpdev = pdev; - - nvslot->gpa = nvgpus->nv2_ram_current; - nvgpus->nv2_ram_current += memory_region_size(mr); - nvslot->numa_id = spapr->gpu_numa_id; - ++spapr->gpu_numa_id; -} - -static void spapr_pci_collect_nvnpu(SpaprPhbPciNvGpuConfig *nvgpus, - PCIDevice *pdev, uint64_t tgt, - MemoryRegion *mr, Error **errp) -{ - SpaprPhbPciNvGpuSlot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); - int j; - - if (!nvslot) { - error_setg(errp, "Found too many NVLink bridges per vPHB"); - return; - } - - j = nvslot->linknum; - if (j == ARRAY_SIZE(nvslot->links)) { - error_setg(errp, "Found too many NVLink bridges per GPU"); - return; - } - ++nvslot->linknum; - - g_assert(!nvslot->links[j].npdev); - nvslot->links[j].npdev = pdev; - nvslot->links[j].atsd_gpa = nvgpus->nv2_atsd_current; - nvgpus->nv2_atsd_current += memory_region_size(mr); - nvslot->links[j].link_speed = - object_property_get_uint(OBJECT(pdev), "nvlink2-link-speed", NULL); -} - -static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev, - void *opaque) -{ - PCIBus *sec_bus; - Object *po = OBJECT(pdev); - uint64_t tgt = object_property_get_uint(po, "nvlink2-tgt", NULL); - - if (tgt) { - Error *local_err = NULL; - SpaprPhbPciNvGpuConfig *nvgpus = opaque; - Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL); - Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]", - NULL); - - g_assert(mr_gpu || mr_npu); - if (mr_gpu) { - spapr_pci_collect_nvgpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_gpu), - &local_err); - } else { - spapr_pci_collect_nvnpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_npu), - &local_err); - } - error_propagate(&nvgpus->err, local_err); - } - if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != - PCI_HEADER_TYPE_BRIDGE)) { - return; - } - - sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); - if (!sec_bus) { - return; - } - - pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_collect_nvgpu, opaque); -} - -void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) -{ - int i, j, valid_gpu_num; - PCIBus *bus; - - /* Search for GPUs and NPUs */ - if (!sphb->nv2_gpa_win_addr || !sphb->nv2_atsd_win_addr) { - return; - } - - sphb->nvgpus = g_new0(SpaprPhbPciNvGpuConfig, 1); - sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr; - sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr; - - bus = PCI_HOST_BRIDGE(sphb)->bus; - pci_for_each_device_under_bus(bus, spapr_phb_pci_collect_nvgpu, - sphb->nvgpus); - - if (sphb->nvgpus->err) { - error_propagate(errp, sphb->nvgpus->err); - sphb->nvgpus->err = NULL; - goto cleanup_exit; - } - - /* Add found GPU RAM and ATSD MRs if found */ - for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) { - Object *nvmrobj; - SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; - - if (!nvslot->gpdev) { - continue; - } - nvmrobj = object_property_get_link(OBJECT(nvslot->gpdev), - "nvlink2-mr[0]", NULL); - /* ATSD is pointless without GPU RAM MR so skip those */ - if (!nvmrobj) { - continue; - } - - ++valid_gpu_num; - memory_region_add_subregion(get_system_memory(), nvslot->gpa, - MEMORY_REGION(nvmrobj)); - - for (j = 0; j < nvslot->linknum; ++j) { - Object *atsdmrobj; - - atsdmrobj = object_property_get_link(OBJECT(nvslot->links[j].npdev), - "nvlink2-atsd-mr[0]", NULL); - if (!atsdmrobj) { - continue; - } - memory_region_add_subregion(get_system_memory(), - nvslot->links[j].atsd_gpa, - MEMORY_REGION(atsdmrobj)); - } - } - - if (valid_gpu_num) { - return; - } - /* We did not find any interesting GPU */ -cleanup_exit: - g_free(sphb->nvgpus); - sphb->nvgpus = NULL; -} - -void spapr_phb_nvgpu_free(SpaprPhbState *sphb) -{ - int i, j; - - if (!sphb->nvgpus) { - return; - } - - for (i = 0; i < sphb->nvgpus->num; ++i) { - SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; - Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), - "nvlink2-mr[0]", NULL); - - if (nv_mrobj) { - memory_region_del_subregion(get_system_memory(), - MEMORY_REGION(nv_mrobj)); - } - for (j = 0; j < nvslot->linknum; ++j) { - PCIDevice *npdev = nvslot->links[j].npdev; - Object *atsd_mrobj; - atsd_mrobj = object_property_get_link(OBJECT(npdev), - "nvlink2-atsd-mr[0]", NULL); - if (atsd_mrobj) { - memory_region_del_subregion(get_system_memory(), - MEMORY_REGION(atsd_mrobj)); - } - } - } - g_free(sphb->nvgpus); - sphb->nvgpus = NULL; -} - -void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off, - Error **errp) -{ - int i, j, atsdnum = 0; - uint64_t atsd[8]; /* The existing limitation of known guests */ - - if (!sphb->nvgpus) { - return; - } - - for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) { - SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; - - if (!nvslot->gpdev) { - continue; - } - for (j = 0; j < nvslot->linknum; ++j) { - if (!nvslot->links[j].atsd_gpa) { - continue; - } - - if (atsdnum == ARRAY_SIZE(atsd)) { - error_report("Only %"PRIuPTR" ATSD registers supported", - ARRAY_SIZE(atsd)); - break; - } - atsd[atsdnum] = cpu_to_be64(nvslot->links[j].atsd_gpa); - ++atsdnum; - } - } - - if (!atsdnum) { - error_setg(errp, "No ATSD registers found"); - return; - } - - if (!spapr_phb_eeh_available(sphb)) { - /* - * ibm,mmio-atsd contains ATSD registers; these belong to an NPU PHB - * which we do not emulate as a separate device. Instead we put - * ibm,mmio-atsd to the vPHB with GPU and make sure that we do not - * put GPUs from different IOMMU groups to the same vPHB to ensure - * that the guest will use ATSDs from the corresponding NPU. - */ - error_setg(errp, "ATSD requires separate vPHB per GPU IOMMU group"); - return; - } - - _FDT((fdt_setprop(fdt, bus_off, "ibm,mmio-atsd", atsd, - atsdnum * sizeof(atsd[0])))); -} - -void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) -{ - int i, j, linkidx, npuoff; - g_autofree char *npuname = NULL; - - if (!sphb->nvgpus) { - return; - } - - npuname = g_strdup_printf("npuphb%d", sphb->index); - npuoff = fdt_add_subnode(fdt, 0, npuname); - _FDT(npuoff); - _FDT(fdt_setprop_cell(fdt, npuoff, "#address-cells", 1)); - _FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0)); - /* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */ - _FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu"))); - - for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) { - for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) { - g_autofree char *linkname = g_strdup_printf("link@%d", linkidx); - int off = fdt_add_subnode(fdt, npuoff, linkname); - - _FDT(off); - /* _FDT((fdt_setprop_cell(fdt, off, "reg", linkidx))); */ - _FDT((fdt_setprop_string(fdt, off, "compatible", - "ibm,npu-link"))); - _FDT((fdt_setprop_cell(fdt, off, "phandle", - PHANDLE_NVLINK(sphb, i, j)))); - _FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx))); - ++linkidx; - } - } - - /* Add memory nodes for GPU RAM and mark them unusable */ - for (i = 0; i < sphb->nvgpus->num; ++i) { - SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; - Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), - "nvlink2-mr[0]", - &error_abort); - uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL); - uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) }; - g_autofree char *mem_name = g_strdup_printf("memory@%"PRIx64, - nvslot->gpa); - int off = fdt_add_subnode(fdt, 0, mem_name); - - _FDT(off); - _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); - _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg)))); - - spapr_numa_write_associativity_dt(SPAPR_MACHINE(qdev_get_machine()), - fdt, off, nvslot->numa_id); - - _FDT((fdt_setprop_string(fdt, off, "compatible", - "ibm,coherent-device-memory"))); - - mem_reg[1] = cpu_to_be64(0); - _FDT((fdt_setprop(fdt, off, "linux,usable-memory", mem_reg, - sizeof(mem_reg)))); - _FDT((fdt_setprop_cell(fdt, off, "phandle", - PHANDLE_GPURAM(sphb, i)))); - } - -} - -void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset, - SpaprPhbState *sphb) -{ - int i, j; - - if (!sphb->nvgpus) { - return; - } - - for (i = 0; i < sphb->nvgpus->num; ++i) { - SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; - - /* Skip "slot" without attached GPU */ - if (!nvslot->gpdev) { - continue; - } - if (dev == nvslot->gpdev) { - g_autofree uint32_t *npus = g_new(uint32_t, nvslot->linknum); - - for (j = 0; j < nvslot->linknum; ++j) { - PCIDevice *npdev = nvslot->links[j].npdev; - - npus[j] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev)); - } - _FDT(fdt_setprop(fdt, offset, "ibm,npu", npus, - j * sizeof(npus[0]))); - _FDT((fdt_setprop_cell(fdt, offset, "phandle", - PHANDLE_PCIDEV(sphb, dev)))); - continue; - } - - for (j = 0; j < nvslot->linknum; ++j) { - if (dev != nvslot->links[j].npdev) { - continue; - } - - _FDT((fdt_setprop_cell(fdt, offset, "phandle", - PHANDLE_PCIDEV(sphb, dev)))); - _FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu", - PHANDLE_PCIDEV(sphb, nvslot->gpdev))); - _FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink", - PHANDLE_NVLINK(sphb, i, j)))); - /* - * If we ever want to emulate GPU RAM at the same location as on - * the host - here is the encoding GPA->TGT: - * - * gta = ((sphb->nv2_gpa >> 42) & 0x1) << 42; - * gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43; - * gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45; - * gta |= sphb->nv2_gpa & ((1UL << 43) - 1); - */ - _FDT(fdt_setprop_cell(fdt, offset, "memory-region", - PHANDLE_GPURAM(sphb, i))); - _FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr", - nvslot->tgt)); - _FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed", - nvslot->links[j].link_speed)); - } - } -} diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c index d8aeee0b7e..9016720547 100644 --- a/hw/ppc/spapr_pci_vfio.c +++ b/hw/ppc/spapr_pci_vfio.c @@ -78,7 +78,7 @@ int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb, * call. Now we just need to check the validity of the PCI * pass-through devices (vfio-pci) under this sphb bus. * We have already validated that all the devices under this sphb - * are from same iommu group (within same PE) before comming here. + * are from same iommu group (within same PE) before coming here. * * Prior to linux commit 98ba956f6a389 ("powerpc/pseries/eeh: * Rework device EEH PE determination") kernel would call diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c index 2b8a38a296..cc7101c530 100644 --- a/hw/rtc/exynos4210_rtc.c +++ b/hw/rtc/exynos4210_rtc.c @@ -202,7 +202,7 @@ static void exynos4210_rtc_update_freq(Exynos4210RTCState *s, uint32_t freq; freq = s->freq; - /* set frequncy for time generator */ + /* set frequency for time generator */ s->freq = RTC_BASE_FREQ / (1 << TICCKSEL(reg_value)); if (freq != s->freq) { diff --git a/hw/rx/rx62n.c b/hw/rx/rx62n.c index 3e887a0fc7..d00fcb0ef0 100644 --- a/hw/rx/rx62n.c +++ b/hw/rx/rx62n.c @@ -114,7 +114,7 @@ static const uint8_t ipr_table[NR_IRQS] = { }; /* - * Level triggerd IRQ list + * Level triggered IRQ list * Not listed IRQ is Edge trigger. * See "11.3.1 Interrupt Vector Table" in hardware manual. */ diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index f7d45b0b20..634ed49c2e 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -1321,7 +1321,7 @@ again: } trace_lsi_execute_script_io_selected(id, insn & (1 << 3) ? " ATN" : ""); - /* ??? Linux drivers compain when this is set. Maybe + /* ??? Linux drivers complain when this is set. Maybe it only applies in low-level mode (unimplemented). lsi_script_scsi_interrupt(s, LSI_SIST0_CMP, 0); */ s->select_tag = id << 8; diff --git a/hw/scsi/mfi.h b/hw/scsi/mfi.h index 0b4ee53dfc..cf7a2d775b 100644 --- a/hw/scsi/mfi.h +++ b/hw/scsi/mfi.h @@ -65,7 +65,7 @@ #define MFI_IQPH 0xc4 /* Inbound queue port (high bytes) */ #define MFI_DIAG 0xf8 /* Host diag */ #define MFI_SEQ 0xfc /* Sequencer offset */ -#define MFI_1078_EIM 0x80000004 /* 1078 enable intrrupt mask */ +#define MFI_1078_EIM 0x80000004 /* 1078 enable interrupt mask */ #define MFI_RMI 0x2 /* reply message interrupt */ #define MFI_1078_RM 0x80000000 /* reply 1078 message interrupt */ #define MFI_ODC 0x4 /* outbound doorbell change interrupt */ diff --git a/hw/sh4/sh7750_regs.h b/hw/sh4/sh7750_regs.h index 94043431e6..edb5d18f00 100644 --- a/hw/sh4/sh7750_regs.h +++ b/hw/sh4/sh7750_regs.h @@ -113,7 +113,7 @@ #define SH7750_TTB SH7750_P4_REG32(SH7750_TTB_REGOFS) #define SH7750_TTB_A7 SH7750_A7_REG32(SH7750_TTB_REGOFS) -/* TLB exeption address register - TEA */ +/* TLB exception address register - TEA */ #define SH7750_TEA_REGOFS 0x00000c /* offset */ #define SH7750_TEA SH7750_P4_REG32(SH7750_TEA_REGOFS) #define SH7750_TEA_A7 SH7750_A7_REG32(SH7750_TEA_REGOFS) @@ -183,19 +183,19 @@ #define SH7750_TRA_IMM 0x000003fd /* Immediate data operand */ #define SH7750_TRA_IMM_S 2 -/* Exeption event register - EXPEVT */ +/* Exception event register - EXPEVT */ #define SH7750_EXPEVT_REGOFS 0x000024 #define SH7750_EXPEVT SH7750_P4_REG32(SH7750_EXPEVT_REGOFS) #define SH7750_EXPEVT_A7 SH7750_A7_REG32(SH7750_EXPEVT_REGOFS) -#define SH7750_EXPEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_EXPEVT_EX 0x00000fff /* Exception code */ #define SH7750_EXPEVT_EX_S 0 /* Interrupt event register */ #define SH7750_INTEVT_REGOFS 0x000028 #define SH7750_INTEVT SH7750_P4_REG32(SH7750_INTEVT_REGOFS) #define SH7750_INTEVT_A7 SH7750_A7_REG32(SH7750_INTEVT_REGOFS) -#define SH7750_INTEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_INTEVT_EX 0x00000fff /* Exception code */ #define SH7750_INTEVT_EX_S 0 /* @@ -1274,15 +1274,15 @@ /* * User Break Controller registers */ -#define SH7750_BARA 0x200000 /* Break address regiser A */ -#define SH7750_BAMRA 0x200004 /* Break address mask regiser A */ -#define SH7750_BBRA 0x200008 /* Break bus cycle regiser A */ -#define SH7750_BARB 0x20000c /* Break address regiser B */ -#define SH7750_BAMRB 0x200010 /* Break address mask regiser B */ -#define SH7750_BBRB 0x200014 /* Break bus cycle regiser B */ -#define SH7750_BASRB 0x000018 /* Break ASID regiser B */ -#define SH7750_BDRB 0x200018 /* Break data regiser B */ -#define SH7750_BDMRB 0x20001c /* Break data mask regiser B */ +#define SH7750_BARA 0x200000 /* Break address register A */ +#define SH7750_BAMRA 0x200004 /* Break address mask register A */ +#define SH7750_BBRA 0x200008 /* Break bus cycle register A */ +#define SH7750_BARB 0x20000c /* Break address register B */ +#define SH7750_BAMRB 0x200010 /* Break address mask register B */ +#define SH7750_BBRB 0x200014 /* Break bus cycle register B */ +#define SH7750_BASRB 0x000018 /* Break ASID register B */ +#define SH7750_BDRB 0x200018 /* Break data register B */ +#define SH7750_BDMRB 0x20001c /* Break data mask register B */ #define SH7750_BRCR 0x200020 /* Break control register */ #define SH7750_BRCR_UDBE 0x0001 /* User break debug enable bit */ diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 10cd22f610..b753705856 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -1110,7 +1110,7 @@ void smbios_get_tables(MachineState *ms, dimm_cnt = QEMU_ALIGN_UP(current_machine->ram_size, MAX_DIMM_SZ) / MAX_DIMM_SZ; /* - * The offset determines if we need to keep additional space betweeen + * The offset determines if we need to keep additional space between * table 17 and table 19 header handle numbers so that they do * not overlap. For example, for a VM with larger than 8 TB guest * memory and DIMM like chunks of 16 GiB, the default space between diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index 97009d3a5d..a3955c6c50 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -163,7 +163,7 @@ FIELD(GQSPI_CNFG, ENDIAN, 26, 1) /* Poll timeout not implemented */ FIELD(GQSPI_CNFG, EN_POLL_TIMEOUT, 20, 1) - /* QEMU doesnt care about any of these last three */ + /* QEMU doesn't care about any of these last three */ FIELD(GQSPI_CNFG, BR, 3, 3) FIELD(GQSPI_CNFG, CPH, 2, 1) FIELD(GQSPI_CNFG, CPL, 1, 1) @@ -469,7 +469,7 @@ static void xlnx_zynqmp_qspips_flush_fifo_g(XlnxZynqMPQSPIPS *s) imm = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA); if (!ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_XFER)) { - /* immedate transfer */ + /* immediate transfer */ if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT) || ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE)) { s->regs[R_GQSPI_DATA_STS] = 1; @@ -768,7 +768,7 @@ static void xilinx_spips_check_zero_pump(XilinxSPIPS *s) */ while (s->regs[R_TRANSFER_SIZE] && s->rx_fifo.num + s->tx_fifo.num < RXFF_A_Q - 3) { - /* endianess just doesn't matter when zero pumping */ + /* endianness just doesn't matter when zero pumping */ tx_data_bytes(&s->tx_fifo, 0, 4, false); s->regs[R_TRANSFER_SIZE] &= ~0x03ull; s->regs[R_TRANSFER_SIZE] -= 4; diff --git a/hw/ssi/xlnx-versal-ospi.c b/hw/ssi/xlnx-versal-ospi.c index c762e0b367..1a61679c2f 100644 --- a/hw/ssi/xlnx-versal-ospi.c +++ b/hw/ssi/xlnx-versal-ospi.c @@ -837,7 +837,7 @@ static void ospi_do_ind_read(XlnxVersalOspi *s) /* Continue to read flash until we run out of space in sram */ while (!ospi_ind_op_completed(op) && !fifo8_is_full(&s->rx_sram)) { - /* Read reqested number of bytes, max bytes limited to size of sram */ + /* Read requested number of bytes, max bytes limited to size of sram */ next_b = ind_op_next_byte(op); end_b = next_b + fifo8_num_free(&s->rx_sram); end_b = MIN(end_b, ind_op_end_byte(op)); diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c index 2d6d92ef93..f035b74560 100644 --- a/hw/timer/etraxfs_timer.c +++ b/hw/timer/etraxfs_timer.c @@ -236,7 +236,7 @@ static void watchdog_hit(void *opaque) { ETRAXTimerState *t = opaque; if (t->wd_hits == 0) { - /* real hw gives a single tick before reseting but we are + /* real hw gives a single tick before resetting but we are a bit friendlier to compensate for our slower execution. */ ptimer_set_count(t->ptimer_wd, 10); ptimer_run(t->ptimer_wd, 1); diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c index c15f654738..43b31213bc 100644 --- a/hw/timer/renesas_tmr.c +++ b/hw/timer/renesas_tmr.c @@ -115,7 +115,7 @@ static int elapsed_time(RTMRState *tmr, int ch, int64_t delta) et = tmr->div_round[ch] / divrate; tmr->div_round[ch] %= divrate; } else { - /* disble clock. so no update */ + /* disable clock. so no update */ et = 0; } return et; diff --git a/hw/tpm/tpm_tis.h b/hw/tpm/tpm_tis.h index 6f29a508dd..6f14896b97 100644 --- a/hw/tpm/tpm_tis.h +++ b/hw/tpm/tpm_tis.h @@ -19,7 +19,7 @@ * specification. * * TPM TIS for TPM 2 implementation following TCG PC Client Platform - * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + * TPM Profile (PTP) Specification, Family 2.0, Revision 00.43 */ #ifndef TPM_TPM_TIS_H #define TPM_TPM_TIS_H diff --git a/hw/tpm/tpm_tis_common.c b/hw/tpm/tpm_tis_common.c index c07c179dbc..279ce436b5 100644 --- a/hw/tpm/tpm_tis_common.c +++ b/hw/tpm/tpm_tis_common.c @@ -20,7 +20,7 @@ * specification. * * TPM TIS for TPM 2 implementation following TCG PC Client Platform - * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + * TPM Profile (PTP) Specification, Family 2.0, Revision 00.43 */ #include "qemu/osdep.h" #include "hw/irq.h" diff --git a/hw/tpm/tpm_tis_i2c.c b/hw/tpm/tpm_tis_i2c.c index b695fd3a46..4ecea7fa3e 100644 --- a/hw/tpm/tpm_tis_i2c.c +++ b/hw/tpm/tpm_tis_i2c.c @@ -13,7 +13,7 @@ * Family 2.0, Level 00, Revision 1.00 * * TPM TIS for TPM 2 implementation following TCG PC Client Platform - * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + * TPM Profile (PTP) Specification, Family 2.0, Revision 00.43 * */ @@ -507,7 +507,7 @@ static void tpm_tis_i2c_realizefn(DeviceState *dev, Error **errp) } /* - * Get the backend pointer. It is not initialized propery during + * Get the backend pointer. It is not initialized properly during * device_class_set_props */ s->be_driver = qemu_find_tpm_be("tpm0"); diff --git a/hw/tpm/tpm_tis_isa.c b/hw/tpm/tpm_tis_isa.c index 91e3792248..0367401586 100644 --- a/hw/tpm/tpm_tis_isa.c +++ b/hw/tpm/tpm_tis_isa.c @@ -19,7 +19,7 @@ * specification. * * TPM TIS for TPM 2 implementation following TCG PC Client Platform - * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + * TPM Profile (PTP) Specification, Family 2.0, Revision 00.43 */ #include "qemu/osdep.h" diff --git a/hw/tpm/tpm_tis_sysbus.c b/hw/tpm/tpm_tis_sysbus.c index 6724b3d4f6..2fc550f119 100644 --- a/hw/tpm/tpm_tis_sysbus.c +++ b/hw/tpm/tpm_tis_sysbus.c @@ -19,7 +19,7 @@ * specification. * * TPM TIS for TPM 2 implementation following TCG PC Client Platform - * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + * TPM Profile (PTP) Specification, Family 2.0, Revision 00.43 */ #include "qemu/osdep.h" diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index f4ff836805..84b1a7b948 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -1610,121 +1610,6 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) return 0; } -int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp) -{ - int ret; - void *p; - struct vfio_region_info *nv2reg = NULL; - struct vfio_info_cap_header *hdr; - struct vfio_region_info_cap_nvlink2_ssatgt *cap; - VFIOQuirk *quirk; - - ret = vfio_get_dev_region_info(&vdev->vbasedev, - VFIO_REGION_TYPE_PCI_VENDOR_TYPE | - PCI_VENDOR_ID_NVIDIA, - VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM, - &nv2reg); - if (ret) { - return ret; - } - - hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT); - if (!hdr) { - ret = -ENODEV; - goto free_exit; - } - cap = (void *) hdr; - - p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE, - MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset); - if (p == MAP_FAILED) { - ret = -errno; - goto free_exit; - } - - quirk = vfio_quirk_alloc(1); - memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr", - nv2reg->size, p); - QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); - - object_property_add_uint64_ptr(OBJECT(vdev), "nvlink2-tgt", - (uint64_t *) &cap->tgt, - OBJ_PROP_FLAG_READ); - trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt, - nv2reg->size); -free_exit: - g_free(nv2reg); - - return ret; -} - -int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp) -{ - int ret; - void *p; - struct vfio_region_info *atsdreg = NULL; - struct vfio_info_cap_header *hdr; - struct vfio_region_info_cap_nvlink2_ssatgt *captgt; - struct vfio_region_info_cap_nvlink2_lnkspd *capspeed; - VFIOQuirk *quirk; - - ret = vfio_get_dev_region_info(&vdev->vbasedev, - VFIO_REGION_TYPE_PCI_VENDOR_TYPE | - PCI_VENDOR_ID_IBM, - VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD, - &atsdreg); - if (ret) { - return ret; - } - - hdr = vfio_get_region_info_cap(atsdreg, - VFIO_REGION_INFO_CAP_NVLINK2_SSATGT); - if (!hdr) { - ret = -ENODEV; - goto free_exit; - } - captgt = (void *) hdr; - - hdr = vfio_get_region_info_cap(atsdreg, - VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD); - if (!hdr) { - ret = -ENODEV; - goto free_exit; - } - capspeed = (void *) hdr; - - /* Some NVLink bridges may not have assigned ATSD */ - if (atsdreg->size) { - p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE, - MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset); - if (p == MAP_FAILED) { - ret = -errno; - goto free_exit; - } - - quirk = vfio_quirk_alloc(1); - memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev), - "nvlink2-atsd-mr", atsdreg->size, p); - QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); - } - - object_property_add_uint64_ptr(OBJECT(vdev), "nvlink2-tgt", - (uint64_t *) &captgt->tgt, - OBJ_PROP_FLAG_READ); - trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt, - atsdreg->size); - - object_property_add_uint32_ptr(OBJECT(vdev), "nvlink2-link-speed", - &capspeed->link_speed, - OBJ_PROP_FLAG_READ); - trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name, - capspeed->link_speed); -free_exit: - g_free(atsdreg); - - return ret; -} - /* * The VMD endpoint provides a real PCIe domain to the guest and the guest * kernel performs enumeration of the VMD sub-device domain. Guest transactions diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index a205c6b113..3b2ca3c24c 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -3271,20 +3271,6 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } } - if (vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) { - ret = vfio_pci_nvidia_v100_ram_init(vdev, errp); - if (ret && ret != -ENODEV) { - error_report("Failed to setup NVIDIA V100 GPU RAM"); - } - } - - if (vfio_pci_is(vdev, PCI_VENDOR_ID_IBM, PCI_ANY_ID)) { - ret = vfio_pci_nvlink2_init(vdev, errp); - if (ret && ret != -ENODEV) { - error_report("Failed to setup NVlink2 bridge"); - } - } - if (!pdev->failover_pair_id) { if (!vfio_migration_realize(vbasedev, errp)) { goto out_deregister; diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index a2771b9ff3..2d836093a8 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -221,8 +221,6 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp); int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, struct vfio_region_info *info, Error **errp); -int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp); -int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp); void vfio_display_reset(VFIOPCIDevice *vdev); int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp); diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 81ec7c7a95..e64ca4a019 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -82,10 +82,6 @@ vfio_ioeventfd_handler(const char *name, uint64_t addr, unsigned size, uint64_t vfio_ioeventfd_init(const char *name, uint64_t addr, unsigned size, uint64_t data, bool vfio) "%s+0x%"PRIx64"[%d]:0x%"PRIx64" vfio:%d" vfio_pci_igd_opregion_enabled(const char *name) "%s" -vfio_pci_nvidia_gpu_setup_quirk(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64 -vfio_pci_nvlink2_setup_quirk_ssatgt(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64 -vfio_pci_nvlink2_setup_quirk_lnkspd(const char *name, uint32_t link_speed) "%s link_speed=0x%x" - # igd.c vfio_pci_igd_bar4_write(const char *name, uint32_t index, uint32_t data, uint32_t base) "%s [0x%03x] 0x%08x -> 0x%08x" vfio_pci_igd_bdsm_enabled(const char *name, int size) "%s %dMB" diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 13aec771e1..0e2cc8d5a8 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -655,7 +655,7 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev, op_info->len_to_hash = len_to_hash; op_info->cipher_start_src_offset = cipher_start_src_offset; op_info->len_to_cipher = len_to_cipher; - /* Handle the initilization vector */ + /* Handle the initialization vector */ if (op_info->iv_len > 0) { DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len); op_info->iv = op_info->data + curr_size; @@ -1278,7 +1278,7 @@ static void virtio_crypto_instance_init(Object *obj) /* * The default config_size is sizeof(struct virtio_crypto_config). - * Can be overriden with virtio_crypto_set_config_size. + * Can be overridden with virtio_crypto_set_config_size. */ vcrypto->config_size = sizeof(struct virtio_crypto_config); } diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index b6e781741e..da5b09cefc 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -1119,7 +1119,7 @@ static int virtio_mem_mig_sanity_checks_post_load(void *opaque, int version_id) return -EINVAL; } /* - * Note: Preparation for resizeable memory regions. The maximum size + * Note: Preparation for resizable memory regions. The maximum size * of the memory region must not change during migration. */ if (tmp->region_size != new_region_size) { diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 969c25f4cf..4577f3f5b3 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -2096,7 +2096,7 @@ void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index) * being converted to LOG_GUEST_ERROR. * if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { - error_report("queue_enable is only suppported in devices of virtio " + error_report("queue_enable is only supported in devices of virtio " "1.0 or later."); } */ |