diff options
Diffstat (limited to 'hw/intc')
| -rw-r--r-- | hw/intc/pnv_xive2.c | 44 | ||||
| -rw-r--r-- | hw/intc/spapr_xive_kvm.c | 4 | ||||
| -rw-r--r-- | hw/intc/xics.c | 16 | ||||
| -rw-r--r-- | hw/intc/xive.c | 203 | ||||
| -rw-r--r-- | hw/intc/xive2.c | 317 |
5 files changed, 474 insertions, 110 deletions
diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c index 78609105a8..834d32287b 100644 --- a/hw/intc/pnv_xive2.c +++ b/hw/intc/pnv_xive2.c @@ -490,6 +490,23 @@ static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx, word_number); } +static int pnv_xive2_get_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t blk, uint32_t idx, + Xive2Nvgc *nvgc) +{ + return pnv_xive2_vst_read(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG, + blk, idx, nvgc); +} + +static int pnv_xive2_write_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t blk, uint32_t idx, + Xive2Nvgc *nvgc) +{ + return pnv_xive2_vst_write(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG, + blk, idx, nvgc, + XIVE_VST_WORD_ALL); +} + static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type) { switch (nxc_type) { @@ -2407,6 +2424,8 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data) xrc->write_end = pnv_xive2_write_end; xrc->get_nvp = pnv_xive2_get_nvp; xrc->write_nvp = pnv_xive2_write_nvp; + xrc->get_nvgc = pnv_xive2_get_nvgc; + xrc->write_nvgc = pnv_xive2_write_nvgc; xrc->get_config = pnv_xive2_get_config; xrc->get_block_id = pnv_xive2_get_block_id; @@ -2497,8 +2516,9 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf) Xive2Eas eas; Xive2End end; Xive2Nvp nvp; + Xive2Nvgc nvgc; int i; - uint64_t xive_nvp_per_subpage; + uint64_t entries_per_subpage; g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, srcno0 + nr_esbs - 1); @@ -2530,10 +2550,28 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf) g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk, 0, XIVE2_NVP_COUNT - 1); - xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP); - for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) { + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) { xive2_nvp_pic_print_info(&nvp, i++, buf); } } + + g_string_append_printf(buf, "XIVE[%x] #%d NVGT %08x .. %08x\n", + chip_id, blk, 0, XIVE2_NVP_COUNT - 1); + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVG); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { + while (!xive2_router_get_nvgc(xrtr, false, blk, i, &nvgc)) { + xive2_nvgc_pic_print_info(&nvgc, i++, buf); + } + } + + g_string_append_printf(buf, "XIVE[%x] #%d NVCT %08x .. %08x\n", + chip_id, blk, 0, XIVE2_NVP_COUNT - 1); + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVC); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { + while (!xive2_router_get_nvgc(xrtr, true, blk, i, &nvgc)) { + xive2_nvgc_pic_print_info(&nvgc, i++, buf); + } + } } diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index 5789062379..7a86197fc9 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -720,7 +720,7 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, { SpaprXive *xive = SPAPR_XIVE(intc); XiveSource *xsrc = &xive->source; - size_t esb_len = xive_source_esb_len(xsrc); + uint64_t esb_len = xive_source_esb_len(xsrc); size_t tima_len = 4ull << TM_SHIFT; CPUState *cs; int fd; @@ -824,7 +824,7 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc) { SpaprXive *xive = SPAPR_XIVE(intc); XiveSource *xsrc; - size_t esb_len; + uint64_t esb_len; assert(xive->fd != -1); diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 6f4d5271ea..e893363dc9 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -335,22 +335,6 @@ static void icp_realize(DeviceState *dev, Error **errp) return; } } - /* - * The way that pre_2_10_icp is handling is really, really hacky. - * We used to have here this call: - * - * vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp); - * - * But we were doing: - * pre_2_10_vmstate_register_dummy_icp() - * this vmstate_register() - * pre_2_10_vmstate_unregister_dummy_icp() - * - * So for a short amount of time we had to vmstate entries with - * the same name. This fixes it. - */ - vmstate_replace_hack_for_ppc(NULL, icp->cs->cpu_index, - &vmstate_icp_server, icp); } static void icp_unrealize(DeviceState *dev) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 5a02dd8e02..245e4d181a 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -74,33 +74,48 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) if (regs[TM_NSR] & mask) { uint8_t cppr = regs[TM_PIPR]; + uint8_t alt_ring; + uint8_t *alt_regs; + + /* POOL interrupt uses IPB in QW2, POOL ring */ + if ((ring == TM_QW3_HV_PHYS) && (nsr & (TM_QW3_NSR_HE_POOL << 6))) { + alt_ring = TM_QW2_HV_POOL; + } else { + alt_ring = ring; + } + alt_regs = &tctx->regs[alt_ring]; regs[TM_CPPR] = cppr; /* Reset the pending buffer bit */ - regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); - regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); + alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); /* Drop Exception bit */ regs[TM_NSR] &= ~mask; - trace_xive_tctx_accept(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], + trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, + alt_regs[TM_IPB], regs[TM_PIPR], regs[TM_CPPR], regs[TM_NSR]); } - return (nsr << 8) | regs[TM_CPPR]; + return ((uint64_t)nsr << 8) | regs[TM_CPPR]; } static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) { + /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ + uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; + uint8_t *alt_regs = &tctx->regs[alt_ring]; uint8_t *regs = &tctx->regs[ring]; - if (regs[TM_PIPR] < regs[TM_CPPR]) { + if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { switch (ring) { case TM_QW1_OS: regs[TM_NSR] |= TM_QW1_NSR_EO; break; + case TM_QW2_HV_POOL: + alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6); + break; case TM_QW3_HV_PHYS: regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); break; @@ -108,26 +123,27 @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) g_assert_not_reached(); } trace_xive_tctx_notify(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - regs[TM_CPPR], regs[TM_NSR]); + regs[TM_IPB], alt_regs[TM_PIPR], + alt_regs[TM_CPPR], alt_regs[TM_NSR]); qemu_irq_raise(xive_tctx_output(tctx, ring)); } } -void xive_tctx_reset_os_signal(XiveTCTX *tctx) +void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring) { /* - * Lower the External interrupt. Used when pulling an OS - * context. It is necessary to avoid catching it in the hypervisor - * context. It should be raised again when re-pushing the OS - * context. + * Lower the External interrupt. Used when pulling a context. It is + * necessary to avoid catching it in the higher privilege context. It + * should be raised again when re-pushing the lower privilege context. */ - qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS)); + qemu_irq_lower(xive_tctx_output(tctx, ring)); } static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) { uint8_t *regs = &tctx->regs[ring]; + uint8_t pipr_min; + uint8_t ring_min; trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, regs[TM_IPB], regs[TM_PIPR], @@ -139,8 +155,37 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) tctx->regs[ring + TM_CPPR] = cppr; + /* + * Recompute the PIPR based on local pending interrupts. The PHYS + * ring must take the minimum of both the PHYS and POOL PIPR values. + */ + pipr_min = ipb_to_pipr(regs[TM_IPB]); + ring_min = ring; + + /* PHYS updates also depend on POOL values */ + if (ring == TM_QW3_HV_PHYS) { + uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; + + /* POOL values only matter if POOL ctx is valid */ + if (pool_regs[TM_WORD2] & 0x80) { + + uint8_t pool_pipr = ipb_to_pipr(pool_regs[TM_IPB]); + + /* + * Determine highest priority interrupt and + * remember which ring has it. + */ + if (pool_pipr < pipr_min) { + pipr_min = pool_pipr; + ring_min = TM_QW2_HV_POOL; + } + } + } + + regs[TM_PIPR] = pipr_min; + /* CPPR has changed, check if we need to raise a pending exception */ - xive_tctx_notify(tctx, ring); + xive_tctx_notify(tctx, ring_min); } void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) @@ -179,6 +224,17 @@ static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, return qw2w2; } +static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; + uint8_t qw3b8; + + qw3b8 = qw3b8_prev & ~TM_QW3B8_VT; + tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8; + return qw3b8; +} + static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { @@ -207,14 +263,14 @@ static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, static const uint8_t xive_tm_hw_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ - 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ }; static const uint8_t xive_tm_hv_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ - 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ }; @@ -341,6 +397,19 @@ static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } +static void xive_tctx_set_lgs(XiveTCTX *tctx, uint8_t ring, uint8_t lgs) +{ + uint8_t *regs = &tctx->regs[ring]; + + regs[TM_LGS] = lgs; +} + +static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff); +} + /* * Adjust the IPB to allow a CPU to process event queues of other * priorities during one physical interrupt cycle. @@ -400,7 +469,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); xive_tctx_set_os_cam(tctx, qw1w2_new); - xive_tctx_reset_os_signal(tctx); + xive_tctx_reset_signal(tctx, TM_QW1_OS); return qw1w2; } @@ -488,20 +557,34 @@ static const XiveTmOp xive_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, + xive_tm_vt_poll }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, + xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, + NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, + xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, + xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, + xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, + xive_tm_pull_phys_ctx }, }; static const XiveTmOp xive2_tm_operations[] = { @@ -509,20 +592,50 @@ static const XiveTmOp xive2_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx, + NULL }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, + xive_tm_vt_poll }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target, + NULL }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, + xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, + NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL, + xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, + xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, + xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, + xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol, + NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL, + xive_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, + xive_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol, + NULL }, }; static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset, @@ -718,6 +831,10 @@ void xive_tctx_reset(XiveTCTX *tctx) tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; + if (!(xive_presenter_get_config(tctx->xptr) & + XIVE_PRESENTER_GEN1_TIMA_OS)) { + tctx->regs[TM_QW1_OS + TM_OGEN] = 2; + } /* * Initialize PIPR to 0xFF to avoid phantom interrupts when the @@ -1242,7 +1359,7 @@ static void xive_source_reset(void *dev) static void xive_source_realize(DeviceState *dev, Error **errp) { XiveSource *xsrc = XIVE_SOURCE(dev); - size_t esb_len = xive_source_esb_len(xsrc); + uint64_t esb_len = xive_source_esb_len(xsrc); assert(xsrc->xive); diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c index 1f150685bf..d1df35e9b3 100644 --- a/hw/intc/xive2.c +++ b/hw/intc/xive2.c @@ -26,6 +26,43 @@ uint32_t xive2_router_get_config(Xive2Router *xrtr) return xrc->get_config(xrtr); } +static int xive2_router_get_block_id(Xive2Router *xrtr) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_block_id(xrtr); +} + +static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp) +{ + uint64_t cache_addr; + + cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 | + xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7); + cache_addr <<= 8; /* aligned on a cache line pair */ + return cache_addr; +} + +static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority) +{ + uint32_t val = 0; + uint8_t *ptr, i; + + if (priority > 7) { + return 0; + } + + /* + * The per-priority backlog counters are 24-bit and the structure + * is stored in big endian + */ + ptr = (uint8_t *)&nvgc->w2 + priority * 3; + for (i = 0; i < 3; i++, ptr++) { + val = (val << 8) + *ptr; + } + return val; +} + void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) { if (!xive2_eas_is_valid(eas)) { @@ -144,14 +181,20 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) { uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5); uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5); + uint64_t cache_line = xive2_nvp_reporting_addr(nvp); if (!xive2_nvp_is_valid(nvp)) { return; } - g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x", + g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x PGoFirst:%02x", nvp_idx, eq_blk, eq_idx, - xive_get_field32(NVP2_W2_IPB, nvp->w2)); + xive_get_field32(NVP2_W2_IPB, nvp->w2), + xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0)); + if (cache_line) { + g_string_append_printf(buf, " reporting CL:%016"PRIx64, cache_line); + } + /* * When the NVP is HW controlled, more fields are updated */ @@ -166,6 +209,23 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) g_string_append_c(buf, '\n'); } +void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf) +{ + uint8_t i; + + if (!xive2_nvgc_is_valid(nvgc)) { + return; + } + + g_string_append_printf(buf, " %08x PGoNext:%02x bklog: ", nvgc_idx, + xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0)); + for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { + g_string_append_printf(buf, "[%d]=0x%x ", + i, xive2_nvgc_get_backlog(nvgc, i)); + } + g_string_append_printf(buf, "\n"); +} + static void xive2_end_enqueue(Xive2End *end, uint32_t data) { uint64_t qaddr_base = xive2_end_qaddr(end); @@ -210,13 +270,14 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) * the NVP by changing the H bit while the context is enabled */ -static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, - uint8_t nvp_blk, uint32_t nvp_idx) +static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t nvp_blk, uint32_t nvp_idx, + uint8_t ring) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; Xive2Nvp nvp; - uint8_t *regs = &tctx->regs[TM_QW1_OS]; + uint8_t *regs = &tctx->regs[ring]; if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", @@ -261,44 +322,190 @@ static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); } -static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk, - uint32_t *nvp_idx, bool *vo, bool *ho) +static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk, + uint32_t *nvp_idx, bool *valid, bool *hw) { *nvp_blk = xive2_nvp_blk(cam); *nvp_idx = xive2_nvp_idx(cam); - *vo = !!(cam & TM2_QW1W2_VO); - *ho = !!(cam & TM2_QW1W2_HO); + *valid = !!(cam & TM2_W2_VALID); + *hw = !!(cam & TM2_W2_HW); } -uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, - hwaddr offset, unsigned size) +/* + * Encode the HW CAM line with 7bit or 8bit thread id. The thread id + * width and block id width is configurable at the IC level. + * + * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) + * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) + */ +static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) { Xive2Router *xrtr = XIVE2_ROUTER(xptr); - uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); - uint32_t qw1w2_new; - uint32_t cam = be32_to_cpu(qw1w2); + CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; + uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t blk = xive2_router_get_block_id(xrtr); + uint8_t tid_shift = + xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; + uint8_t tid_mask = (1 << tid_shift) - 1; + + return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); +} + +static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size, uint8_t ring) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]); + uint32_t cam = be32_to_cpu(target_ringw2); uint8_t nvp_blk; uint32_t nvp_idx; - bool vo; + uint8_t cur_ring; + bool valid; bool do_save; - xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save); + xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save); - if (!vo) { + if (!valid) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", nvp_blk, nvp_idx); } - /* Invalidate CAM line */ - qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0); - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4); + /* Invalidate CAM line of requested ring and all lower rings */ + for (cur_ring = TM_QW0_USER; cur_ring <= ring; + cur_ring += XIVE_TM_RING_SIZE) { + uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]); + uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0); + memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4); + } if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { - xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx); + xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring); + } + + /* + * Lower external interrupt line of requested ring and below except for + * USER, which doesn't exist. + */ + for (cur_ring = TM_QW1_OS; cur_ring <= ring; + cur_ring += XIVE_TM_RING_SIZE) { + xive_tctx_reset_signal(tctx, cur_ring); + } + return target_ringw2; +} + +uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS); +} + +#define REPORT_LINE_GEN1_SIZE 16 + +static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data, + uint8_t size) +{ + uint8_t *regs = tctx->regs; + + g_assert(size == REPORT_LINE_GEN1_SIZE); + memset(data, 0, size); + /* + * See xive architecture for description of what is saved. It is + * hand-picked information to fit in 16 bytes. + */ + data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR]; + data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR]; + data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB]; + data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB]; + data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT]; + data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS]; + data[0x6] = 0xFF; + data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80; + data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1; + data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2; + data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3); + data[0x8] = regs[TM_QW1_OS + TM_NSR]; + data[0x9] = regs[TM_QW1_OS + TM_CPPR]; + data[0xA] = regs[TM_QW1_OS + TM_IPB]; + data[0xB] = regs[TM_QW1_OS + TM_LGS]; + if (regs[TM_QW0_USER + TM_WORD2] & 0x80) { + /* + * Logical server extension, except VU bit replaced by EB bit + * from NSR + */ + data[0xC] = regs[TM_QW0_USER + TM_WORD2]; + data[0xC] &= ~0x80; + data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80; + data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1]; + data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2]; + data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3]; } +} + +static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, + unsigned size, uint8_t ring) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t hw_cam, nvp_idx, xive2_cfg, reserved; + uint8_t nvp_blk; + Xive2Nvp nvp; + uint64_t phys_addr; + MemTxResult result; - xive_tctx_reset_os_signal(tctx); - return qw1w2; + hw_cam = xive2_tctx_hw_cam_line(xptr, tctx); + nvp_blk = xive2_nvp_blk(hw_cam); + nvp_idx = xive2_nvp_idx(hw_cam); + + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + xive2_cfg = xive2_router_get_config(xrtr); + + phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */ + if (xive2_cfg & XIVE2_GEN1_TIMA_OS) { + uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE]; + + xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE); + result = dma_memory_write(&address_space_memory, phys_addr, + pull_ctxt, REPORT_LINE_GEN1_SIZE, + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + } else { + result = dma_memory_write(&address_space_memory, phys_addr, + &tctx->regs, sizeof(tctx->regs), + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + reserved = 0xFFFFFFFF; + result = dma_memory_write(&address_space_memory, phys_addr + 12, + &reserved, sizeof(reserved), + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + } + + /* the rest is similar to pull context to registers */ + xive2_tm_pull_ctx(xptr, tctx, offset, size, ring); +} + +void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS); +} + + +void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); } static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, @@ -390,17 +597,31 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - uint32_t cam = value; - uint32_t qw1w2 = cpu_to_be32(cam); + uint32_t cam; + uint32_t qw1w2; + uint64_t qw1dw1; uint8_t nvp_blk; uint32_t nvp_idx; bool vo; bool do_restore; - xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); - /* First update the thead context */ - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); + switch (size) { + case 4: + cam = value; + qw1w2 = cpu_to_be32(cam); + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); + break; + case 8: + cam = value >> 32; + qw1dw1 = cpu_to_be64(value); + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8); + break; + default: + g_assert_not_reached(); + } + + xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); /* Check the interrupt pending bits */ if (vo) { @@ -409,6 +630,19 @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, } } +static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) +{ + uint8_t *regs = &tctx->regs[ring]; + + regs[TM_T] = target; +} + +void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff); +} + /* * XIVE Router (aka. Virtualization Controller or IVRE) */ @@ -471,31 +705,22 @@ int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number); } -static int xive2_router_get_block_id(Xive2Router *xrtr) +int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc) { Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); - return xrc->get_block_id(xrtr); + return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); } -/* - * Encode the HW CAM line with 7bit or 8bit thread id. The thread id - * width and block id width is configurable at the IC level. - * - * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) - * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) - */ -static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) +int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc) { - Xive2Router *xrtr = XIVE2_ROUTER(xptr); - CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; - uint32_t pir = env->spr_cb[SPR_PIR].default_value; - uint8_t blk = xive2_router_get_block_id(xrtr); - uint8_t tid_shift = - xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; - uint8_t tid_mask = (1 << tid_shift) - 1; + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); - return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); + return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); } /* |