[PATCH v2 02/28] mm: Add functions to zero portions of a folio
From: Matthew Wilcox (Oracle)
Date: Sun Nov 07 2021 - 23:14:47 EST
These functions are wrappers around zero_user_segments(), which means
that zero_user_segments() can now be called for compound pages even when
CONFIG_TRANSPARENT_HUGEPAGE is disabled.
Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
include/linux/highmem.h | 44 ++++++++++++++++++++++++++++++++++++++---
mm/highmem.c | 2 --
2 files changed, 41 insertions(+), 5 deletions(-)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 25aff0f2ed0b..c343c69bb5b4 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -231,10 +231,10 @@ static inline void tag_clear_highpage(struct page *page)
* If we pass in a base or tail page, we can zero up to PAGE_SIZE.
* If we pass in a head page, we can zero up to the size of the compound page.
*/
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#ifdef CONFIG_HIGHMEM
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2);
-#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#else
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
@@ -254,7 +254,7 @@ static inline void zero_user_segments(struct page *page,
for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i);
}
-#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#endif
static inline void zero_user_segment(struct page *page,
unsigned start, unsigned end)
@@ -364,4 +364,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_local(addr);
}
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @end1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @end2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t end1, size_t start2, size_t end2)
+{
+ zero_user_segments(&folio->page, start1, end1, start2, end2);
+}
+
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @end: One more than the last byte in the first range.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t end)
+{
+ zero_user_segments(&folio->page, start, end, 0, 0);
+}
+
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
+{
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
+}
+
#endif /* _LINUX_HIGHMEM_H */
diff --git a/mm/highmem.c b/mm/highmem.c
index 88f65f155845..819d41140e5b 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -359,7 +359,6 @@ void kunmap_high(struct page *page)
}
EXPORT_SYMBOL(kunmap_high);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
@@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
BUG_ON((start1 | start2 | end1 | end2) != 0);
}
EXPORT_SYMBOL(zero_user_segments);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HIGHMEM */
#ifdef CONFIG_KMAP_LOCAL
--
2.33.0