mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
iommu/vt-d: Use cache_tag_flush_range_np() in iotlb_sync_map
The iotlb_sync_map callback is called by the iommu core after non-present to present mappings are created. The iommu driver uses this callback to invalidate caches if IOMMU is working in caching mode and second-only translation is used for the domain. Use cache_tag_flush_range_np() in this callback. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240416080656.60968-7-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
a600ccd0a3
commit
129dab6e12
1 changed files with 1 additions and 21 deletions
|
@ -1501,20 +1501,6 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
||||||
iommu_flush_dev_iotlb(domain, addr, mask);
|
iommu_flush_dev_iotlb(domain, addr, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Notification for newly created mappings */
|
|
||||||
static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain,
|
|
||||||
unsigned long pfn, unsigned int pages)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* It's a non-present to present mapping. Only flush if caching mode
|
|
||||||
* and second level.
|
|
||||||
*/
|
|
||||||
if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
|
|
||||||
iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
|
|
||||||
else
|
|
||||||
iommu_flush_write_buffer(iommu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush the relevant caches in nested translation if the domain
|
* Flush the relevant caches in nested translation if the domain
|
||||||
* also serves as a parent
|
* also serves as a parent
|
||||||
|
@ -4544,14 +4530,8 @@ static bool risky_device(struct pci_dev *pdev)
|
||||||
static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||||
unsigned long iova, size_t size)
|
unsigned long iova, size_t size)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1);
|
||||||
unsigned long pages = aligned_nrpages(iova, size);
|
|
||||||
unsigned long pfn = iova >> VTD_PAGE_SHIFT;
|
|
||||||
struct iommu_domain_info *info;
|
|
||||||
unsigned long i;
|
|
||||||
|
|
||||||
xa_for_each(&dmar_domain->iommu_array, i, info)
|
|
||||||
__mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue