[PATCH v3 1/4] x86/clear_page: extend clear_page*() for multi-page clearing
From: Ankur Arora
Date: Sun Apr 13 2025 - 23:47:00 EST
clear_page*() variants now take a page-aligned length parameter and
clears the whole region.
Rename to clear_pages*().
Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
arch/x86/include/asm/page_64.h | 20 +++++++------
arch/x86/lib/clear_page_64.S | 52 +++++++++++++++++++++++++---------
2 files changed, 49 insertions(+), 23 deletions(-)
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index d3aab6f4e59a..45db74db9021 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -40,22 +40,24 @@ extern unsigned long __phys_addr_symbol(unsigned long);
#define __phys_reloc_hide(x) (x)
-void clear_page_orig(void *page);
-void clear_page_rep(void *page);
-void clear_page_erms(void *page);
+void clear_pages_orig(void *page, unsigned int length);
+void clear_pages_rep(void *page, unsigned int length);
+void clear_pages_erms(void *page, unsigned int length);
static inline void clear_page(void *page)
{
+ unsigned int length = PAGE_SIZE;
/*
- * Clean up KMSAN metadata for the page being cleared. The assembly call
+ * Clean up KMSAN metadata for the pages being cleared. The assembly call
* below clobbers @page, so we perform unpoisoning before it.
*/
- kmsan_unpoison_memory(page, PAGE_SIZE);
- alternative_call_2(clear_page_orig,
- clear_page_rep, X86_FEATURE_REP_GOOD,
- clear_page_erms, X86_FEATURE_ERMS,
+ kmsan_unpoison_memory(page, length);
+
+ alternative_call_2(clear_pages_orig,
+ clear_pages_rep, X86_FEATURE_REP_GOOD,
+ clear_pages_erms, X86_FEATURE_ERMS,
"=D" (page),
- "D" (page),
+ ASM_INPUT("D" (page), "S" (length)),
"cc", "memory", "rax", "rcx");
}
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index a508e4a8c66a..bce516263b69 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -13,20 +13,35 @@
*/
/*
- * Zero a page.
- * %rdi - page
+ * Zero kernel page aligned region.
+ *
+ * Input:
+ * %rdi - destination
+ * %esi - length
+ *
+ * Clobbers: %rax, %rcx
*/
-SYM_TYPED_FUNC_START(clear_page_rep)
- movl $4096/8,%ecx
+SYM_TYPED_FUNC_START(clear_pages_rep)
+ movl %esi, %ecx
xorl %eax,%eax
+ shrl $3,%ecx
rep stosq
RET
-SYM_FUNC_END(clear_page_rep)
-EXPORT_SYMBOL_GPL(clear_page_rep)
+SYM_FUNC_END(clear_pages_rep)
+EXPORT_SYMBOL_GPL(clear_pages_rep)
-SYM_TYPED_FUNC_START(clear_page_orig)
+/*
+ * Original page zeroing loop.
+ * Input:
+ * %rdi - destination
+ * %esi - length
+ *
+ * Clobbers: %rax, %rcx, %rflags
+ */
+SYM_TYPED_FUNC_START(clear_pages_orig)
+ movl %esi, %ecx
xorl %eax,%eax
- movl $4096/64,%ecx
+ shrl $6,%ecx
.p2align 4
.Lloop:
decl %ecx
@@ -43,16 +58,25 @@ SYM_TYPED_FUNC_START(clear_page_orig)
jnz .Lloop
nop
RET
-SYM_FUNC_END(clear_page_orig)
-EXPORT_SYMBOL_GPL(clear_page_orig)
+SYM_FUNC_END(clear_pages_orig)
+EXPORT_SYMBOL_GPL(clear_pages_orig)
-SYM_TYPED_FUNC_START(clear_page_erms)
- movl $4096,%ecx
+/*
+ * Zero kernel page aligned region.
+ *
+ * Input:
+ * %rdi - destination
+ * %esi - length
+ *
+ * Clobbers: %rax, %rcx
+ */
+SYM_TYPED_FUNC_START(clear_pages_erms)
+ movl %esi, %ecx
xorl %eax,%eax
rep stosb
RET
-SYM_FUNC_END(clear_page_erms)
-EXPORT_SYMBOL_GPL(clear_page_erms)
+SYM_FUNC_END(clear_pages_erms)
+EXPORT_SYMBOL_GPL(clear_pages_erms)
/*
* Default clear user-space.
--
2.31.1