[PATCH v2] dma-direct: improve DMA_ATTR_NO_KERNEL_MAPPING
From: Walter Wu
Date: Wed Nov 03 2021 - 22:32:32 EST
When the allocated buffers use dma coherent memory with
DMA_ATTR_NO_KERNEL_MAPPING, then its kernel mapping is exist.
The caller use that DMA_ATTR_NO_KERNEL_MAPPING mean they can't
rely on kernel mapping, but removing kernel mapping have
some improvements.
The improvements are:
a) Security improvement. In some cases, we don't hope the allocated
buffer to be read by cpu speculative execution. Therefore, it
need to remove kernel mapping, this patch improve
DMA_ATTR_NO_KERNEL_MAPPING to remove a page from kernel mapping
in order that cpu doesn't read it.
b) Debugging improvement. If the allocated buffer map into user space,
only access it in user space, nobody can access it in kernel space,
so we can use this patch to see who try to access it in kernel space.
This patch only works if the memory is mapping at page granularity
in the linear region, so that current only support for ARM64.
Signed-off-by: Walter Wu <walter-zh.wu@xxxxxxxxxxxx>
Suggested-by: Christoph Hellwig <hch@xxxxxx>
Suggested-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Cc: Robin Murphy <robin.murphy@xxxxxxx>
Cc: Matthias Brugger <matthias.bgg@xxxxxxxxx>
Cc: Ard Biesheuvel <ardb@xxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
v2:
1. modify commit message and fix the removing mapping for arm64
2. fix build error for x86
---
include/linux/set_memory.h | 5 +++++
kernel/dma/direct.c | 13 +++++++++++++
2 files changed, 18 insertions(+)
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index f36be5166c19..6c7d1683339c 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -7,11 +7,16 @@
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
#include <asm/set_memory.h>
+
+#ifndef CONFIG_RODATA_FULL_DEFAULT_ENABLED
+static inline int set_memory_valid(unsigned long addr, int numpages, int enable) { return 0; }
+#endif
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_valid(unsigned long addr, int numpages, int enable) { return 0; }
#endif
#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4c6c5e0635e3..d5d03b51b708 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -155,6 +155,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
struct page *page;
void *ret;
int err;
+ unsigned long kaddr;
size = PAGE_ALIGN(size);
if (attrs & DMA_ATTR_NO_WARN)
@@ -169,6 +170,11 @@ void *dma_direct_alloc(struct device *dev, size_t size,
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ if (IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED)) {
+ kaddr = (unsigned long)phys_to_virt(dma_to_phys(dev, *dma_handle));
+ /* page remove kernel mapping for arm64 */
+ set_memory_valid(kaddr, size >> PAGE_SHIFT, 0);
+ }
/* return the page pointer as the opaque cookie */
return page;
}
@@ -275,9 +281,16 @@ void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int page_order = get_order(size);
+ unsigned long kaddr;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
+ if (IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED)) {
+ size = PAGE_ALIGN(size);
+ kaddr = (unsigned long)phys_to_virt(dma_to_phys(dev, dma_addr));
+ /* page create kernel mapping for arm64 */
+ set_memory_valid(kaddr, size >> PAGE_SHIFT, 1);
+ }
/* cpu_addr is a struct page cookie, not a kernel address */
dma_free_contiguous(dev, cpu_addr, size);
return;
--
2.18.0