__domain_mapping() always removes the pages in the range from
'iov_pfn' to 'end_pfn', but the 'end_pfn' is always the last pfn
of the range that the caller wants to map.
This would introduce too many duplicated removing and leads the
map operation take too long, for example:
Map iova=0x100000,nr_pages=0x7d61800
iov_pfn: 0x100000, end_pfn: 0x7e617ff
iov_pfn: 0x140000, end_pfn: 0x7e617ff
iov_pfn: 0x180000, end_pfn: 0x7e617ff
iov_pfn: 0x1c0000, end_pfn: 0x7e617ff
iov_pfn: 0x200000, end_pfn: 0x7e617ff
...
it takes about 50ms in total.
We can reduce the cost by recalculate the 'end_pfn' and limit it
to the boundary of the end of the pte page.
Map iova=0x100000,nr_pages=0x7d61800
iov_pfn: 0x100000, end_pfn: 0x13ffff
iov_pfn: 0x140000, end_pfn: 0x17ffff
iov_pfn: 0x180000, end_pfn: 0x1bffff
iov_pfn: 0x1c0000, end_pfn: 0x1fffff
iov_pfn: 0x200000, end_pfn: 0x23ffff
...
it only need 9ms now.
Signed-off-by: Longpeng(Mike) <longpeng2@xxxxxxxxxx>
---
drivers/iommu/intel/iommu.c | 12 +++++++-----
include/linux/intel-iommu.h | 6 ++++++
2 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index d75f59a..87cbf34 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2354,12 +2354,18 @@ static void switch_to_super_page(struct dmar_domain *domain,
return -ENOMEM;
first_pte = pte;
+ lvl_pages = lvl_to_nr_pages(largepage_lvl);
+ BUG_ON(nr_pages < lvl_pages);
+
/* It is large page*/
if (largepage_lvl > 1) {
unsigned long end_pfn;
+ unsigned long pages_to_remove;
pteval |= DMA_PTE_LARGE_PAGE;
- end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
+ pages_to_remove = min_t(unsigned long, nr_pages,
+ nr_pte_to_next_page(pte) * lvl_pages);
+ end_pfn = iov_pfn + pages_to_remove - 1;
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
} else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
@@ -2381,10 +2387,6 @@ static void switch_to_super_page(struct dmar_domain *domain,
WARN_ON(1);
}
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
-
- BUG_ON(nr_pages < lvl_pages);
-
nr_pages -= lvl_pages;
iov_pfn += lvl_pages;
phys_pfn += lvl_pages;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index a590b00..4bff70c 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -713,6 +713,12 @@ static inline bool first_pte_in_page(struct dma_pte *pte)
return !((unsigned long)pte & ~VTD_PAGE_MASK);
}
+static inline int nr_pte_to_next_page(struct dma_pte *pte)
+{
+ return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
+ (struct dma_pte *)VTD_PAGE_ALIGN((unsigned long)pte) - pte;
+}
+
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);