[PATCH 11/16] iommu/dma: Support PCI P2PDMA pages in dma-iommu map_sg

From: Logan Gunthorpe
Date: Thu Apr 08 2021 - 13:02:09 EST


When a PCI P2PDMA page is seen, set the IOVA length of the segment
to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
apply the appropriate bus address to the segment. The IOVA is not
created if the scatterlist only consists of P2PDMA pages.

Similar to dma-direct, the sg_mark_pci_p2pdma() flag is used to
indicate bus address segments. On unmap, P2PDMA segments are skipped
over when determining the start and end IOVA addresses.

With this change, the flags variable in the dma_map_ops is
set to DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for
P2PDMA pages.

Signed-off-by: Logan Gunthorpe <logang@xxxxxxxxxxxx>
---
drivers/iommu/dma-iommu.c | 66 ++++++++++++++++++++++++++++++++++-----
1 file changed, 58 insertions(+), 8 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index af765c813cc8..ef49635f9819 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,6 +20,7 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
#include <linux/swiotlb.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
@@ -864,6 +865,16 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;

+ if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
+ if (i > 0)
+ cur = sg_next(cur);
+
+ pci_p2pdma_map_bus_segment(s, cur);
+ count++;
+ cur_len = 0;
+ continue;
+ }
+
/*
* Now fill in the real DMA data. If...
* - there is a valid output segment to append to
@@ -961,10 +972,12 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+ struct dev_pagemap *pgmap = NULL;
+ enum pci_p2pdma_map_type map_type;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
- int i;
+ int i, ret = 0;

if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
@@ -993,6 +1006,31 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
s_length = iova_align(iovad, s_length + s_iova_off);
s->length = s_length;

+ if (is_pci_p2pdma_page(sg_page(s))) {
+ if (sg_page(s)->pgmap != pgmap) {
+ pgmap = sg_page(s)->pgmap;
+ map_type = pci_p2pdma_map_type(pgmap, dev,
+ attrs);
+ }
+
+ switch (map_type) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * A zero length will be ignored by
+ * iommu_map_sg() and then can be detected
+ * in __finalise_sg() to actually map the
+ * bus address.
+ */
+ s->length = 0;
+ continue;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ break;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
+ }
+ }
+
/*
* Due to the alignment of our single IOVA allocation, we can
* depend on these assumptions about the segment boundary mask:
@@ -1015,6 +1053,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}

+ if (!iova_len)
+ return __finalise_sg(dev, sg, nents, 0);
+
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova)
goto out_restore_sg;
@@ -1032,13 +1073,13 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
- return 0;
+ return ret;
}

static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t start, end;
+ dma_addr_t end, start = DMA_MAPPING_ERROR;
struct scatterlist *tmp;
int i;

@@ -1054,14 +1095,22 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
*/
- start = sg_dma_address(sg);
- for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ for_each_sg(sg, tmp, nents, i) {
+ if (sg_is_pci_p2pdma(tmp)) {
+ sg_unmark_pci_p2pdma(tmp);
+ continue;
+ }
if (sg_dma_len(tmp) == 0)
break;
- sg = tmp;
+
+ if (start == DMA_MAPPING_ERROR)
+ start = sg_dma_address(tmp);
+
+ end = sg_dma_address(tmp) + sg_dma_len(tmp);
}
- end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(dev, start, end - start);
+
+ if (start != DMA_MAPPING_ERROR)
+ __iommu_dma_unmap(dev, start, end - start);
}

static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1254,6 +1303,7 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
}

static const struct dma_map_ops iommu_dma_ops = {
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
--
2.20.1