[PATCH v8 6/7] swiotlb: support aligned swiotlb buffers
From: David Stevens
Date: Tue Sep 28 2021 - 22:33:41 EST
From: David Stevens <stevensd@xxxxxxxxxxxx>
Add an argument to swiotlb_tbl_map_single that specifies the desired
alignment of the allocated buffer. This is used by dma-iommu to ensure
the buffer is aligned to the iova granule size when using swiotlb with
untrusted sub-granule mappings. This addresses an issue where adjacent
slots could be exposed to the untrusted device if IO_TLB_SIZE < iova
granule < PAGE_SIZE.
Signed-off-by: David Stevens <stevensd@xxxxxxxxxxxx>
Reviewed-by: Christoph Hellwig <hch@xxxxxx>
---
drivers/iommu/dma-iommu.c | 4 ++--
drivers/xen/swiotlb-xen.c | 2 +-
include/linux/swiotlb.h | 3 ++-
kernel/dma/swiotlb.c | 13 ++++++++-----
4 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 85a005b268f6..289c49ead01a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -818,8 +818,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
size_t padding_size;
aligned_size = iova_align(iovad, size);
- phys = swiotlb_tbl_map_single(dev, phys, size,
- aligned_size, dir, attrs);
+ phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+ iova_mask(iovad), dir, attrs);
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index e56a5faac395..cbdff8979980 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -380,7 +380,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
+ map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b0cb2a9973f4..569272871375 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -45,7 +45,8 @@ extern void __init swiotlb_update_mem_attributes(void);
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
size_t mapping_size, size_t alloc_size,
- enum dma_data_direction dir, unsigned long attrs);
+ unsigned int alloc_aligned_mask, enum dma_data_direction dir,
+ unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 87c40517e822..019672b3da1d 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -459,7 +459,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
* allocate a buffer from that IO TLB pool.
*/
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
- size_t alloc_size)
+ size_t alloc_size, unsigned int alloc_align_mask)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
unsigned long boundary_mask = dma_get_seg_boundary(dev);
@@ -483,6 +483,7 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
if (alloc_size >= PAGE_SIZE)
stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
+ stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
spin_lock_irqsave(&mem->lock, flags);
if (unlikely(nslots > mem->nslabs - mem->used))
@@ -541,7 +542,8 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
size_t mapping_size, size_t alloc_size,
- enum dma_data_direction dir, unsigned long attrs)
+ unsigned int alloc_align_mask, enum dma_data_direction dir,
+ unsigned long attrs)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
@@ -561,7 +563,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
return (phys_addr_t)DMA_MAPPING_ERROR;
}
- index = swiotlb_find_slots(dev, orig_addr, alloc_size + offset);
+ index = swiotlb_find_slots(dev, orig_addr,
+ alloc_size + offset, alloc_align_mask);
if (index == -1) {
if (!(attrs & DMA_ATTR_NO_WARN))
dev_warn_ratelimited(dev,
@@ -675,7 +678,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
swiotlb_force);
- swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
+ swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
@@ -759,7 +762,7 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
if (!mem)
return NULL;
- index = swiotlb_find_slots(dev, 0, size);
+ index = swiotlb_find_slots(dev, 0, size, 0);
if (index == -1)
return NULL;
--
2.33.0.685.g46640cef36-goog