diff options
Diffstat (limited to 'hw/arm/smmuv3.c')
| -rw-r--r-- | hw/arm/smmuv3.c | 142 |
1 files changed, 72 insertions, 70 deletions
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 57a79df55b..0122700e72 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -254,6 +254,9 @@ static void smmuv3_init_regs(SMMUv3State *s) s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); + s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1); + s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1); + /* 4K and 64K granule support */ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); @@ -270,6 +273,7 @@ static void smmuv3_init_regs(SMMUv3State *s) s->features = 0; s->sid_split = 0; + s->aidr = 0x1; } static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, @@ -506,7 +510,8 @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { goto bad_cd; } - trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz); + tt->had = CD_HAD(cd, i); + trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had); } event->record_trans_faults = CD_R(cd); @@ -626,7 +631,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, SMMUTranslationStatus status; SMMUState *bs = ARM_SMMU(s); uint64_t page_mask, aligned_addr; - IOMMUTLBEntry *cached_entry = NULL; + SMMUTLBEntry *cached_entry = NULL; SMMUTransTableInfo *tt; SMMUTransCfg *cfg = NULL; IOMMUTLBEntry entry = { @@ -636,7 +641,6 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; - SMMUIOTLBKey key, *new_key; qemu_mutex_lock(&s->mutex); @@ -675,17 +679,9 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, page_mask = (1ULL << (tt->granule_sz)) - 1; aligned_addr = addr & ~page_mask; - key.asid = cfg->asid; - key.iova = aligned_addr; - - cached_entry = g_hash_table_lookup(bs->iotlb, &key); + cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr); if (cached_entry) { - cfg->iotlb_hits++; - trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr, - cfg->iotlb_hits, cfg->iotlb_misses, - 100 * cfg->iotlb_hits / - (cfg->iotlb_hits + cfg->iotlb_misses)); - if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) { + if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { status = SMMU_TRANS_ERROR; if (event.record_trans_faults) { event.type = SMMU_EVT_F_PERMISSION; @@ -698,17 +694,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, goto epilogue; } - cfg->iotlb_misses++; - trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask, - cfg->iotlb_hits, cfg->iotlb_misses, - 100 * cfg->iotlb_hits / - (cfg->iotlb_hits + cfg->iotlb_misses)); - - if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { - smmu_iotlb_inv_all(bs); - } - - cached_entry = g_new0(IOMMUTLBEntry, 1); + cached_entry = g_new0(SMMUTLBEntry, 1); if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { g_free(cached_entry); @@ -753,10 +739,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, } status = SMMU_TRANS_ERROR; } else { - new_key = g_new0(SMMUIOTLBKey, 1); - new_key->asid = cfg->asid; - new_key->iova = aligned_addr; - g_hash_table_insert(bs->iotlb, new_key, cached_entry); + smmu_iotlb_insert(bs, cfg, cached_entry); status = SMMU_TRANS_SUCCESS; } @@ -765,9 +748,9 @@ epilogue: switch (status) { case SMMU_TRANS_SUCCESS: entry.perm = flag; - entry.translated_addr = cached_entry->translated_addr + - (addr & page_mask); - entry.addr_mask = cached_entry->addr_mask; + entry.translated_addr = cached_entry->entry.translated_addr + + (addr & cached_entry->entry.addr_mask); + entry.addr_mask = cached_entry->entry.addr_mask; trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, entry.translated_addr, entry.perm); break; @@ -807,42 +790,49 @@ epilogue: * @n: notifier to be called * @asid: address space ID or negative value if we don't care * @iova: iova + * @tg: translation granule (if communicated through range invalidation) + * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1 */ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, IOMMUNotifier *n, - int asid, - dma_addr_t iova) + int asid, dma_addr_t iova, + uint8_t tg, uint64_t num_pages) { SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); - SMMUEventInfo event = {.inval_ste_allowed = true}; - SMMUTransTableInfo *tt; - SMMUTransCfg *cfg; IOMMUTLBEntry entry; + uint8_t granule = tg; - cfg = smmuv3_get_config(sdev, &event); - if (!cfg) { - return; - } + if (!tg) { + SMMUEventInfo event = {.inval_ste_allowed = true}; + SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event); + SMMUTransTableInfo *tt; - if (asid >= 0 && cfg->asid != asid) { - return; - } + if (!cfg) { + return; + } - tt = select_tt(cfg, iova); - if (!tt) { - return; + if (asid >= 0 && cfg->asid != asid) { + return; + } + + tt = select_tt(cfg, iova); + if (!tt) { + return; + } + granule = tt->granule_sz; } entry.target_as = &address_space_memory; entry.iova = iova; - entry.addr_mask = (1 << tt->granule_sz) - 1; + entry.addr_mask = num_pages * (1 << granule) - 1; entry.perm = IOMMU_NONE; memory_region_notify_one(n, &entry); } -/* invalidate an asid/iova tuple in all mr's */ -static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) +/* invalidate an asid/iova range tuple in all mr's */ +static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, + uint8_t tg, uint64_t num_pages) { SMMUDevice *sdev; @@ -850,14 +840,41 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) IOMMUMemoryRegion *mr = &sdev->iommu; IOMMUNotifier *n; - trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova); + trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, + tg, num_pages); IOMMU_NOTIFIER_FOREACH(n, mr) { - smmuv3_notify_iova(mr, n, asid, iova); + smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); } } } +static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) +{ + uint8_t scale = 0, num = 0, ttl = 0; + dma_addr_t addr = CMD_ADDR(cmd); + uint8_t type = CMD_TYPE(cmd); + uint16_t vmid = CMD_VMID(cmd); + bool leaf = CMD_LEAF(cmd); + uint8_t tg = CMD_TG(cmd); + hwaddr num_pages = 1; + int asid = -1; + + if (tg) { + scale = CMD_SCALE(cmd); + num = CMD_NUM(cmd); + ttl = CMD_TTL(cmd); + num_pages = (num + 1) * (1 << (scale)); + } + + if (type == SMMU_CMD_TLBI_NH_VA) { + asid = CMD_ASID(cmd); + } + trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); + smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); + smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl); +} + static int smmuv3_cmdq_consume(SMMUv3State *s) { SMMUState *bs = ARM_SMMU(s); @@ -988,27 +1005,9 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) smmu_iotlb_inv_all(bs); break; case SMMU_CMD_TLBI_NH_VAA: - { - dma_addr_t addr = CMD_ADDR(&cmd); - uint16_t vmid = CMD_VMID(&cmd); - - trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr); - smmuv3_inv_notifiers_iova(bs, -1, addr); - smmu_iotlb_inv_all(bs); - break; - } case SMMU_CMD_TLBI_NH_VA: - { - uint16_t asid = CMD_ASID(&cmd); - uint16_t vmid = CMD_VMID(&cmd); - dma_addr_t addr = CMD_ADDR(&cmd); - bool leaf = CMD_LEAF(&cmd); - - trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf); - smmuv3_inv_notifiers_iova(bs, asid, addr); - smmu_iotlb_inv_iova(bs, asid, addr); + smmuv3_s1_range_inval(bs, &cmd); break; - } case SMMU_CMD_TLBI_EL3_ALL: case SMMU_CMD_TLBI_EL3_VA: case SMMU_CMD_TLBI_EL2_ALL: @@ -1257,6 +1256,9 @@ static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, case A_IIDR: *data = s->iidr; return MEMTX_OK; + case A_AIDR: + *data = s->aidr; + return MEMTX_OK; case A_CR0: *data = s->cr[0]; return MEMTX_OK; |