diff options
| author | Stefan Hajnoczi <stefanha@redhat.com> | 2023-09-19 13:22:02 -0400 |
|---|---|---|
| committer | Stefan Hajnoczi <stefanha@redhat.com> | 2023-09-19 13:22:02 -0400 |
| commit | 6a0eddb34a642be919260e2964e5b4780f80ffdf (patch) | |
| tree | c79f51cb9dfc11f15c7e7331689a62592b969585 /hw/ppc/spapr_numa.c | |
| parent | dd0c84983dd5c3fefaa29f15ed1b4c5c7be9775d (diff) | |
| parent | 44fa20c92811a9b88b41b4882a7e948c2fe6bd08 (diff) | |
| download | focaccia-qemu-6a0eddb34a642be919260e2964e5b4780f80ffdf.tar.gz focaccia-qemu-6a0eddb34a642be919260e2964e5b4780f80ffdf.zip | |
Merge tag 'pull-ppc-20230918' of https://gitlab.com/danielhb/qemu into staging
ppc patch queue for 2023-09-18: In this short queue we're making two important changes: - Nicholas Piggin is now the qemu-ppc maintainer. Cédric Le Goater and Daniel Barboza will act as backup during Nick's transition to this new role. - Support for NVIDIA V100 GPU with NVLink2 is dropped from qemu-ppc. Linux removed the same support back in 5.13, we're following suit now. A xive Coverity fix is also included. # -----BEGIN PGP SIGNATURE----- # # iIwEABYKADQWIQQX6/+ZI9AYAK8oOBk82cqW3gMxZAUCZQhPnBYcZGFuaWVsaGI0 # MTNAZ21haWwuY29tAAoJEDzZypbeAzFk5QUBAJJNnCtv/SPP6bQVNGMgtfI9sz2z # MEttDa7SINyLCiVxAP0Y9z8ZHEj6vhztTX0AAv2QubCKWIVbJZbPV5RWrHCEBQ== # =y3nh # -----END PGP SIGNATURE----- # gpg: Signature made Mon 18 Sep 2023 09:24:44 EDT # gpg: using EDDSA key 17EBFF9923D01800AF2838193CD9CA96DE033164 # gpg: issuer "danielhb413@gmail.com" # gpg: Good signature from "Daniel Henrique Barboza <danielhb413@gmail.com>" [unknown] # gpg: WARNING: The key's User ID is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 17EB FF99 23D0 1800 AF28 3819 3CD9 CA96 DE03 3164 * tag 'pull-ppc-20230918' of https://gitlab.com/danielhb/qemu: spapr: Remove support for NVIDIA V100 GPU with NVLink2 ppc/xive: Fix uint32_t overflow MAINTAINERS: Nick Piggin PPC maintainer, other PPC changes Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'hw/ppc/spapr_numa.c')
| -rw-r--r-- | hw/ppc/spapr_numa.c | 49 |
1 files changed, 8 insertions, 41 deletions
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c index a64098c375..ea6762d3d2 100644 --- a/hw/ppc/spapr_numa.c +++ b/hw/ppc/spapr_numa.c @@ -109,20 +109,6 @@ static bool spapr_numa_is_symmetrical(MachineState *ms) } /* - * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. - * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is - * called from vPHB reset handler so we initialize the counter here. - * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM - * must be equally distant from any other node. - * The final value of spapr->gpu_numa_id is going to be written to - * max-associativity-domains in spapr_build_fdt(). - */ -unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine) -{ - return MAX(1, machine->numa_state->num_nodes); -} - -/* * This function will translate the user distances into * what the kernel understand as possible values: 10 * (local distance), 20, 40, 80 and 160, and return the equivalent @@ -277,7 +263,7 @@ static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr, { SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; - int i, j, max_nodes_with_gpus; + int i, j; /* * For all associativity arrays: first position is the size, @@ -293,17 +279,7 @@ static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr, spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); } - /* - * Initialize NVLink GPU associativity arrays. We know that - * the first GPU will take the first available NUMA id, and - * we'll have a maximum of NVGPU_MAX_NUM GPUs in the machine. - * At this point we're not sure if there are GPUs or not, but - * let's initialize the associativity arrays and allow NVLink - * GPUs to be handled like regular NUMA nodes later on. - */ - max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM; - - for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) { + for (i = nb_numa_nodes; i < nb_numa_nodes; i++) { spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { @@ -345,10 +321,6 @@ static void spapr_numa_FORM2_affinity_init(SpaprMachineState *spapr) * CPUs will write an additional 'vcpu_id' on top of the arrays * being initialized here. 'numa_id' is represented by the * index 'i' of the loop. - * - * Given that this initialization is also valid for GPU associativity - * arrays, handle everything in one single step by populating the - * arrays up to NUMA_NODES_MAX_NUM. */ for (i = 0; i < NUMA_NODES_MAX_NUM; i++) { spapr->FORM2_assoc_array[i][0] = cpu_to_be32(1); @@ -461,8 +433,6 @@ static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, { MachineState *ms = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - - spapr_numa_initial_nvgpu_numa_id(ms); uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x3), @@ -470,7 +440,7 @@ static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, cpu_to_be32(0x1), }; uint32_t nr_refpoints = ARRAY_SIZE(refpoints); - uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomain = ms->numa_state->num_nodes; uint32_t maxdomains[] = { cpu_to_be32(4), cpu_to_be32(maxdomain), @@ -486,13 +456,12 @@ static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, cpu_to_be32(0x4), cpu_to_be32(0x2), }; - uint32_t legacy_maxdomain = spapr->gpu_numa_id > 1 ? 1 : 0; uint32_t legacy_maxdomains[] = { cpu_to_be32(4), - cpu_to_be32(legacy_maxdomain), - cpu_to_be32(legacy_maxdomain), - cpu_to_be32(legacy_maxdomain), - cpu_to_be32(spapr->gpu_numa_id), + cpu_to_be32(0), + cpu_to_be32(0), + cpu_to_be32(0), + cpu_to_be32(maxdomain ? maxdomain : 1), }; G_STATIC_ASSERT(sizeof(legacy_refpoints) <= sizeof(refpoints)); @@ -581,8 +550,6 @@ static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) { MachineState *ms = MACHINE(spapr); - uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - - spapr_numa_initial_nvgpu_numa_id(ms); /* * In FORM2, ibm,associativity-reference-points will point to @@ -596,7 +563,7 @@ static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr, */ uint32_t refpoints[] = { cpu_to_be32(1) }; - uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomain = ms->numa_state->num_nodes; uint32_t maxdomains[] = { cpu_to_be32(1), cpu_to_be32(maxdomain) }; _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", |