DMA_ATTR_NO_KERNEL_MAPPING is to avoid creating a kernel mapping
for the allocated buffer, but current implementation is that
PTE of allocated buffer in kernel page table is valid. So we
should set invalid for PTE of allocate buffer so that there are
no kernel mapping for the allocated buffer.
In some cases, we don't hope the allocated buffer to be read
by cpu or speculative execution, so we use DMA_ATTR_NO_KERNEL_MAPPING
to get no kernel mapping in order to achieve this goal.
Signed-off-by: Walter Wu <walter-zh.wu@xxxxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Cc: Robin Murphy <robin.murphy@xxxxxxx>
Cc: Matthias Brugger <matthias.bgg@xxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
kernel/dma/direct.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4c6c5e0635e3..aa10b4c5d762 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -13,6 +13,7 @@
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/slab.h>
+#include <asm/cacheflush.h>
#include "direct.h"
/*
@@ -169,6 +170,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ /* remove kernel mapping for pages */
+ set_memory_valid((unsigned long)phys_to_virt(dma_to_phys(dev, *dma_handle)),
+ size >> PAGE_SHIFT, 0);
/* return the page pointer as the opaque cookie */
return page;
}
@@ -278,6 +282,10 @@ void dma_direct_free(struct device *dev, size_t size,
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
+ size = PAGE_ALIGN(size);
+ /* create kernel mapping for pages */
+ set_memory_valid((unsigned long)phys_to_virt(dma_to_phys(dev, dma_addr)),
+ size >> PAGE_SHIFT, 1);
/* cpu_addr is a struct page cookie, not a kernel address */
dma_free_contiguous(dev, cpu_addr, size);
return;