Could we merge patch 1-3 from this series? it just cleans up weird code and merging these patches will cover some of the work needed to move the intel iommu driver to the dma-iommu api in the future.
On Sat, 21 Dec 2019 at 07:04, Tom Murphy murphyt7@tcd.ie wrote:
Remove all IOVA handling code from the non-dma_ops path in the intel iommu driver.
There's no need for the non-dma_ops path to keep track of IOVAs. The whole point of the non-dma_ops path is that it allows the IOVAs to be handled separately. The IOVA handling code removed in this patch is pointless.
Signed-off-by: Tom Murphy murphyt7@tcd.ie
drivers/iommu/intel-iommu.c | 89 ++++++++++++++----------------------- 1 file changed, 33 insertions(+), 56 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 64b1a9793daa..8d72ea0fb843 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1908,7 +1908,8 @@ static void domain_exit(struct dmar_domain *domain) domain_remove_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
if (domain->domain.type == IOMMU_DOMAIN_DMA)
put_iova_domain(&domain->iovad); if (domain->pgd) { struct page *freelist;
@@ -2671,19 +2672,9 @@ static struct dmar_domain *set_domain_for_dev(struct device *dev, }
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
unsigned long long end)
unsigned long first_vpfn,
unsigned long last_vpfn)
{
unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
dma_to_mm_pfn(last_vpfn))) {
pr_err("Reserving iova failed\n");
return -ENOMEM;
}
pr_debug("Mapping reserved region %llx-%llx\n", start, end); /* * RMRR range might have overlap with physical memory range, * clear it first
@@ -2760,7 +2751,8 @@ static int __init si_domain_init(int hw)
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { ret = iommu_domain_identity_map(si_domain,
PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
mm_to_dma_pfn(start_pfn),
mm_to_dma_pfn(end_pfn)); if (ret) return ret; }
@@ -4593,58 +4585,37 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) { struct memory_notify *mhp = v;
unsigned long long start, end;
unsigned long start_vpfn, last_vpfn;
unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
mhp->nr_pages - 1); switch (val) { case MEM_GOING_ONLINE:
start = mhp->start_pfn << PAGE_SHIFT;
end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
if (iommu_domain_identity_map(si_domain, start, end)) {
pr_warn("Failed to build identity map for [%llx-%llx]\n",
start, end);
if (iommu_domain_identity_map(si_domain, start_vpfn,
last_vpfn)) {
pr_warn("Failed to build identity map for [%lx-%lx]\n",
start_vpfn, last_vpfn); return NOTIFY_BAD; } break; case MEM_OFFLINE: case MEM_CANCEL_ONLINE:
start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
while (start_vpfn <= last_vpfn) {
struct iova *iova;
{ struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; struct page *freelist;
iova = find_iova(&si_domain->iovad, start_vpfn);
if (iova == NULL) {
pr_debug("Failed get IOVA for PFN %lx\n",
start_vpfn);
break;
}
iova = split_and_remove_iova(&si_domain->iovad, iova,
start_vpfn, last_vpfn);
if (iova == NULL) {
pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
start_vpfn, last_vpfn);
return NOTIFY_BAD;
}
freelist = domain_unmap(si_domain, iova->pfn_lo,
iova->pfn_hi);
freelist = domain_unmap(si_domain, start_vpfn,
last_vpfn); rcu_read_lock(); for_each_active_iommu(iommu, drhd) iommu_flush_iotlb_psi(iommu, si_domain,
iova->pfn_lo, iova_size(iova),
start_vpfn, mhp->nr_pages, !freelist, 0); rcu_read_unlock(); dma_free_pagelist(freelist);
start_vpfn = iova->pfn_hi + 1;
free_iova_mem(iova); } break; }
@@ -4672,8 +4643,9 @@ static void free_all_cpu_cached_iovas(unsigned int cpu) for (did = 0; did < cap_ndoms(iommu->cap); did++) { domain = get_iommu_domain(iommu, (u16)did);
if (!domain)
if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA) continue;
free_cpu_cached_iovas(cpu, &domain->iovad); } }
@@ -5095,9 +5067,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) { int adjust_width;
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */ domain->gaw = guest_width; adjust_width = guestwidth_to_adjustwidth(guest_width);
@@ -5116,6 +5085,18 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) return 0; }
+static void intel_init_iova_domain(struct dmar_domain *dmar_domain) +{
init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad);
if (init_iova_flush_queue(&dmar_domain->iovad, iommu_flush_iova,
iova_entry_free)) {
pr_warn("iova flush queue initialization failed\n");
intel_iommu_strict = 1;
}
+}
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; @@ -5136,12 +5117,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; }
if (type == IOMMU_DOMAIN_DMA &&
init_iova_flush_queue(&dmar_domain->iovad,
iommu_flush_iova, iova_entry_free)) {
pr_warn("iova flush queue initialization failed\n");
intel_iommu_strict = 1;
}
if (type == IOMMU_DOMAIN_DMA)
intel_init_iova_domain(dmar_domain); domain_update_iommu_cap(dmar_domain);
-- 2.20.1