[PATCH 5.4 40/50] dma-direct: exclude dma_direct_map_resource from the min_low_pfn check
From: Greg Kroah-Hartman
Date: Fri May 08 2020 - 08:53:50 EST
From: Christoph Hellwig <hch@xxxxxx>
commit 68a33b1794665ba8a1d1ef1d3bfcc7c587d380a6 upstream.
The valid memory address check in dma_capable only makes sense when mapping
normal memory, not when using dma_map_resource to map a device resource.
Add a new boolean argument to dma_capable to exclude that check for the
dma_map_resource case.
Fixes: b12d66278dd6 ("dma-direct: check for overflows on 32 bit DMA addresses")
Reported-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Signed-off-by: Christoph Hellwig <hch@xxxxxx>
Acked-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Tested-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/x86/kernel/amd_gart_64.c | 4 ++--
drivers/xen/swiotlb-xen.c | 4 ++--
include/linux/dma-direct.h | 5 +++--
kernel/dma/direct.c | 4 ++--
kernel/dma/swiotlb.c | 2 +-
5 files changed, 10 insertions(+), 9 deletions(-)
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -185,13 +185,13 @@ static void iommu_full(struct device *de
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
- return force_iommu || !dma_capable(dev, addr, size);
+ return force_iommu || !dma_capable(dev, addr, size, true);
}
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
- return !dma_capable(dev, addr, size);
+ return !dma_capable(dev, addr, size, true);
}
/* Map a single continuous physical area into the IOMMU.
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(s
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) &&
+ if (dma_capable(dev, dev_addr, size, true) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
swiotlb_force != SWIOTLB_FORCE)
@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(s
/*
* Ensure that the address returned is DMA'ble
*/
- if (unlikely(!dma_capable(dev, dev_addr, size))) {
+ if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, map, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -25,14 +25,15 @@ static inline phys_addr_t __dma_to_phys(
return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
+ bool is_ram)
{
dma_addr_t end = addr + size - 1;
if (!dev->dma_mask)
return false;
- if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
return false;
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -327,7 +327,7 @@ static inline bool dma_direct_possible(s
size_t size)
{
return swiotlb_force != SWIOTLB_FORCE &&
- dma_capable(dev, dma_addr, size);
+ dma_capable(dev, dma_addr, size, true);
}
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
@@ -376,7 +376,7 @@ dma_addr_t dma_direct_map_resource(struc
{
dma_addr_t dma_addr = paddr;
- if (unlikely(!dma_capable(dev, dma_addr, size))) {
+ if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
report_addr(dev, dma_addr, size);
return DMA_MAPPING_ERROR;
}
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -678,7 +678,7 @@ bool swiotlb_map(struct device *dev, phy
/* Ensure that the address returned is DMA'ble */
*dma_addr = __phys_to_dma(dev, *phys);
- if (unlikely(!dma_capable(dev, *dma_addr, size))) {
+ if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return false;