[PATCH v3 03/14] linux/mm.h: move page_kasan_tag() to mm/page_kasan_tag.h
From: Max Kellermann
Date: Tue Mar 05 2024 - 04:00:12 EST
Prepare to reduce dependencies on linux/mm.h.
page_kasan_tag() is used by the page_to_virt() macro in ARM64 (but
asm/memory.h does not include linux/mm.h). Thus, in order to be able to use
anything that calls page_to_virt(), linux/mm.h needs to be included.
This would prevent us from moving page_address() to a separate header,
because it calls lowmem_page_address() which in turn calls
page_to_virt(). To prepare for this move, we move page_kasan_tag()
out of linux/mm.h into a separate lean header.
A side effect of this patch is that the <linux/kasan.h> include line
is moved inside the "#ifdef CONFIG_KASAN..." block, i.e. it is not
included at all if KASAN is disabled.
Signed-off-by: Max Kellermann <max.kellermann@xxxxxxxxx>
---
MAINTAINERS | 1 +
include/linux/mm.h | 56 +-------------------------
include/linux/mm/page_kasan_tag.h | 66 +++++++++++++++++++++++++++++++
3 files changed, 68 insertions(+), 55 deletions(-)
create mode 100644 include/linux/mm/page_kasan_tag.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 9e5bb60c55fe..dbfe08329154 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14210,6 +14210,7 @@ F: include/linux/mempolicy.h
F: include/linux/mempool.h
F: include/linux/memremap.h
F: include/linux/mm.h
+F: include/linux/mm/*.h
F: include/linux/mm_*.h
F: include/linux/mmzone.h
F: include/linux/mmu_notifier.h
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0436b919f1c7..e140ee45f49c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_MM_H
#define _LINUX_MM_H
+#include <linux/mm/page_kasan_tag.h>
#include <linux/errno.h>
#include <linux/mmdebug.h>
#include <linux/gfp.h>
@@ -27,7 +28,6 @@
#include <linux/sizes.h>
#include <linux/sched.h>
#include <linux/pgtable.h>
-#include <linux/kasan.h>
#include <linux/memremap.h>
#include <linux/slab.h>
@@ -1818,60 +1818,6 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
}
#endif /* CONFIG_NUMA_BALANCING */
-#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
-
-/*
- * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
- * setting tags for all pages to native kernel tag value 0xff, as the default
- * value 0x00 maps to 0xff.
- */
-
-static inline u8 page_kasan_tag(const struct page *page)
-{
- u8 tag = KASAN_TAG_KERNEL;
-
- if (kasan_enabled()) {
- tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
- tag ^= 0xff;
- }
-
- return tag;
-}
-
-static inline void page_kasan_tag_set(struct page *page, u8 tag)
-{
- unsigned long old_flags, flags;
-
- if (!kasan_enabled())
- return;
-
- tag ^= 0xff;
- old_flags = READ_ONCE(page->flags);
- do {
- flags = old_flags;
- flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
- flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
- } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
-}
-
-static inline void page_kasan_tag_reset(struct page *page)
-{
- if (kasan_enabled())
- page_kasan_tag_set(page, KASAN_TAG_KERNEL);
-}
-
-#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
-
-static inline u8 page_kasan_tag(const struct page *page)
-{
- return 0xff;
-}
-
-static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
-static inline void page_kasan_tag_reset(struct page *page) { }
-
-#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
-
static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
diff --git a/include/linux/mm/page_kasan_tag.h b/include/linux/mm/page_kasan_tag.h
new file mode 100644
index 000000000000..1210c62170a3
--- /dev/null
+++ b/include/linux/mm/page_kasan_tag.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MM_PAGE_KASAN_TAG_H
+#define _LINUX_MM_PAGE_KASAN_TAG_H
+
+#include <linux/types.h>
+
+struct page;
+
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+
+#include <linux/kasan.h>
+#include <linux/mm_types.h> // for struct page
+
+/*
+ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
+ * setting tags for all pages to native kernel tag value 0xff, as the default
+ * value 0x00 maps to 0xff.
+ */
+
+static inline u8 page_kasan_tag(const struct page *page)
+{
+ u8 tag = KASAN_TAG_KERNEL;
+
+ if (kasan_enabled()) {
+ tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag ^= 0xff;
+ }
+
+ return tag;
+}
+
+static inline void page_kasan_tag_set(struct page *page, u8 tag)
+{
+ unsigned long old_flags, flags;
+
+ if (!kasan_enabled())
+ return;
+
+ tag ^= 0xff;
+ old_flags = READ_ONCE(page->flags);
+ do {
+ flags = old_flags;
+ flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+ flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
+}
+
+static inline void page_kasan_tag_reset(struct page *page)
+{
+ if (kasan_enabled())
+ page_kasan_tag_set(page, KASAN_TAG_KERNEL);
+}
+
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
+static inline u8 page_kasan_tag(const struct page *page)
+{
+ return 0xff;
+}
+
+static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
+static inline void page_kasan_tag_reset(struct page *page) { }
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
+#endif /* _LINUX_MM_PAGE_KASAN_TAG_H */
--
2.39.2