Re: [PATCH v7 00/16] mm: folio_zero_user: clear contiguous pages

From: Ankur Arora

Date: Tue Oct 07 2025 - 02:18:33 EST



Raghavendra K T <raghavendra.kt@xxxxxxx> writes:

> On 9/17/2025 8:54 PM, Ankur Arora wrote:
>> This series adds clearing of contiguous page ranges for hugepages,
>> improving on the current page-at-a-time approach in two ways:
>>
>> - amortizes the per-page setup cost over a larger extent
>>
>> - when using string instructions, exposes the real region size
>> to the processor.
>>
>> A processor could use a knowledge of the extent to optimize the
>> clearing. AMD Zen uarchs, as an example, elide allocation of
>> cachelines for regions larger than L3-size.
> [...]
>
> Hello,
>
> Feel free to add
>
> Tested-by: Raghavendra K T <raghavendra.kt@xxxxxxx>

Great. Thanks Raghu.

> for whole series.
>
> [ I do understand that there may be minor tweeks to clear page patches
> to convert nth_page once David's changes are in]

Yeah and a few other changes based on Andrew and David's comments.

> SUT: AMD Zen5
>
> I also did a quick hack to unconditionally use CLZERO/MOVNT on top of
> Ankur's series to test how much additional benefits can architectural
> enhancements bring in. [ Inline with second part of Ankur's old series before
> preempt lazy changes ]. Please note that it is only for testing ideally
> for lower sizes we would want rep stosb only. and threshold at which
> we need to do non-temporal copy should be a function of L3 and / OR L2 size
> perhaps.
>
> Results:
> base : 6.17-rc6 + perf bench patches
> clearpage : 6.17-rc6 + whole series from Ankur
> clzero : 6.17-rc6 + Ankur's series + clzero (below patch)
> movnt : 6.17-rc6 + Ankur's series + movnt (below patch)
>
> Command run: ./perf bench mem mmap -p 2MB -f demand -s 64GB -l 10
>
> Higher = better
>
> preempt = lazy (GB/sec) preempt = voluntary (GB/sec)
>
> base 20.655559 19.712500
> clearpage 35.060572 34.533414
> clzero 66.948422 66.067265
> movnt 51.593506 51.403765

The CLZERO number with page-size=2MB is pretty impressive. But as you
said that non temporal instructions need more thinking related to
thresholds etc.

> CLZERO/MOVNT experimental patch. Hope I have not missed anything here :)

Looks good to me :).

> -- >8 --
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 52c8910ba2ef..26cef2b187b9 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -3170,6 +3170,8 @@ config HAVE_ATOMIC_IOMAP
> def_bool y
> depends on X86_32
>
> +source "arch/x86/Kconfig.cpy"
> +
> source "arch/x86/kvm/Kconfig"
>
> source "arch/x86/Kconfig.cpufeatures"
> diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
> index 2361066d175e..aa2e62bbfa62 100644
> --- a/arch/x86/include/asm/page_64.h
> +++ b/arch/x86/include/asm/page_64.h
> @@ -84,11 +84,23 @@ static inline void clear_pages(void *addr, unsigned int npages)
> */
> kmsan_unpoison_memory(addr, len);
> asm volatile(ALTERNATIVE_2("call memzero_page_aligned_unrolled",
> - "shrq $3, %%rcx; rep stosq", X86_FEATURE_REP_GOOD,
> - "rep stosb", X86_FEATURE_ERMS)
> - : "+c" (len), "+D" (addr), ASM_CALL_CONSTRAINT
> - : "a" (0)
> - : "cc", "memory");
> + "shrq $3, %%rcx; rep stosq", X86_FEATURE_REP_GOOD,
> +#if defined(CONFIG_CLEARPAGE_CLZERO)
> + "call clear_pages_clzero", X86_FEATURE_CLZERO)
> + : "+c" (len), "+D" (addr), ASM_CALL_CONSTRAINT
> + : "a" (0)
> + : "cc", "memory");
> +#elif defined(CONFIG_CLEARPAGE_MOVNT)
> + "call clear_pages_movnt", X86_FEATURE_XMM2)
> + : "+c" (len), "+D" (addr), ASM_CALL_CONSTRAINT
> + : "a" (0)
> + : "cc", "memory");
> +#else
> + "rep stosb", X86_FEATURE_ERMS)
> + : "+c" (len), "+D" (addr), ASM_CALL_CONSTRAINT
> + : "a" (0)
> + : "cc", "memory");
> +#endif
> }
> #define clear_pages clear_pages
>
> diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
> index 27debe0c018c..0848287446dd 100644
> --- a/arch/x86/lib/clear_page_64.S
> +++ b/arch/x86/lib/clear_page_64.S
> @@ -4,6 +4,7 @@
> #include <linux/cfi_types.h>
> #include <linux/objtool.h>
> #include <asm/asm.h>
> +#include <asm/page_types.h>
>
> /*
> * Zero page aligned region.
> @@ -119,3 +120,40 @@ SYM_FUNC_START(rep_stos_alternative)
> _ASM_EXTABLE_UA(17b, .Lclear_user_tail)
> SYM_FUNC_END(rep_stos_alternative)
> EXPORT_SYMBOL(rep_stos_alternative)
> +
> +SYM_FUNC_START(clear_pages_movnt)
> + .p2align 4
> +.Lstart:
> + movnti %rax, 0x00(%rdi)
> + movnti %rax, 0x08(%rdi)
> + movnti %rax, 0x10(%rdi)
> + movnti %rax, 0x18(%rdi)
> + movnti %rax, 0x20(%rdi)
> + movnti %rax, 0x28(%rdi)
> + movnti %rax, 0x30(%rdi)
> + movnti %rax, 0x38(%rdi)
> + addq $0x40, %rdi
> + subl $0x40, %ecx
> + ja .Lstart
> + RET
> +SYM_FUNC_END(clear_pages_movnt)
> +EXPORT_SYMBOL_GPL(clear_pages_movnt)
> +
> +/*
> + * Zero a page using clzero (On AMD, with CPU_FEATURE_CLZERO.)
> + *
> + * Caller needs to issue a sfence at the end.
> + */
> +
> +SYM_FUNC_START(clear_pages_clzero)
> + movq %rdi,%rax
> + .p2align 4
> +.Liter:
> + clzero
> + addq $0x40, %rax
> + subl $0x40, %ecx
> + ja .Liter
> + sfence
> + RET
> +SYM_FUNC_END(clear_pages_clzero)
> +EXPORT_SYMBOL_GPL(clear_pages_clzero)


--
ankur