[RFC PATCH v7 06/16] xpfo: add primitives for mapping underlying memory
From: Khalid Aziz
Date: Thu Jan 10 2019 - 16:11:07 EST
From: Tycho Andersen <tycho@xxxxxxxxxx>
In some cases (on arm64 DMA and data cache flushes) we may have unmapped
the underlying pages needed for something via XPFO. Here are some
primitives useful for ensuring the underlying memory is mapped/unmapped in
the face of xpfo.
Signed-off-by: Tycho Andersen <tycho@xxxxxxxxxx>
Signed-off-by: Khalid Aziz <khalid.aziz@xxxxxxxxxx>
---
include/linux/xpfo.h | 22 ++++++++++++++++++++++
mm/xpfo.c | 30 ++++++++++++++++++++++++++++++
2 files changed, 52 insertions(+)
diff --git a/include/linux/xpfo.h b/include/linux/xpfo.h
index e38b823f44e3..2682a00ebbcb 100644
--- a/include/linux/xpfo.h
+++ b/include/linux/xpfo.h
@@ -37,6 +37,15 @@ void xpfo_free_pages(struct page *page, int order);
bool xpfo_page_is_unmapped(struct page *page);
+#define XPFO_NUM_PAGES(addr, size) \
+ (PFN_UP((unsigned long) (addr) + (size)) - \
+ PFN_DOWN((unsigned long) (addr)))
+
+void xpfo_temp_map(const void *addr, size_t size, void **mapping,
+ size_t mapping_len);
+void xpfo_temp_unmap(const void *addr, size_t size, void **mapping,
+ size_t mapping_len);
+
#else /* !CONFIG_XPFO */
static inline void xpfo_kmap(void *kaddr, struct page *page) { }
@@ -46,6 +55,19 @@ static inline void xpfo_free_pages(struct page *page, int order) { }
static inline bool xpfo_page_is_unmapped(struct page *page) { return false; }
+#define XPFO_NUM_PAGES(addr, size) 0
+
+static inline void xpfo_temp_map(const void *addr, size_t size, void **mapping,
+ size_t mapping_len)
+{
+}
+
+static inline void xpfo_temp_unmap(const void *addr, size_t size,
+ void **mapping, size_t mapping_len)
+{
+}
+
+
#endif /* CONFIG_XPFO */
#endif /* _LINUX_XPFO_H */
diff --git a/mm/xpfo.c b/mm/xpfo.c
index cdbcbac582d5..f79075bf7d65 100644
--- a/mm/xpfo.c
+++ b/mm/xpfo.c
@@ -13,6 +13,7 @@
* the Free Software Foundation.
*/
+#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/page_ext.h>
@@ -235,3 +236,32 @@ bool xpfo_page_is_unmapped(struct page *page)
return test_bit(XPFO_PAGE_UNMAPPED, &xpfo->flags);
}
EXPORT_SYMBOL(xpfo_page_is_unmapped);
+
+void xpfo_temp_map(const void *addr, size_t size, void **mapping,
+ size_t mapping_len)
+{
+ struct page *page = virt_to_page(addr);
+ int i, num_pages = mapping_len / sizeof(mapping[0]);
+
+ memset(mapping, 0, mapping_len);
+
+ for (i = 0; i < num_pages; i++) {
+ if (page_to_virt(page + i) >= addr + size)
+ break;
+
+ if (xpfo_page_is_unmapped(page + i))
+ mapping[i] = kmap_atomic(page + i);
+ }
+}
+EXPORT_SYMBOL(xpfo_temp_map);
+
+void xpfo_temp_unmap(const void *addr, size_t size, void **mapping,
+ size_t mapping_len)
+{
+ int i, num_pages = mapping_len / sizeof(mapping[0]);
+
+ for (i = 0; i < num_pages; i++)
+ if (mapping[i])
+ kunmap_atomic(mapping[i]);
+}
+EXPORT_SYMBOL(xpfo_temp_unmap);
--
2.17.1