Re: [PATCH 2/4] selftests/mm: rename guard-pages to guard-regions

From: Vlastimil Babka
Date: Tue Feb 18 2025 - 09:16:12 EST


On 2/13/25 19:17, Lorenzo Stoakes wrote:
> The feature formerly referred to as guard pages is more correctly referred
> to as 'guard regions', as in fact no pages are ever allocated in the
> process of installing the regions.
>
> To avoid confusion, rename the tests accordingly.
>
> Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx>

Acked-by: Vlastimil Babka <vbabka@xxxxxxx>

> ---
> tools/testing/selftests/mm/.gitignore | 2 +-
> tools/testing/selftests/mm/Makefile | 2 +-
> .../mm/{guard-pages.c => guard-regions.c} | 42 +++++++++----------
> 3 files changed, 23 insertions(+), 23 deletions(-)
> rename tools/testing/selftests/mm/{guard-pages.c => guard-regions.c} (98%)
>
> diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore
> index 121000c28c10..c5241b193db8 100644
> --- a/tools/testing/selftests/mm/.gitignore
> +++ b/tools/testing/selftests/mm/.gitignore
> @@ -57,4 +57,4 @@ droppable
> hugetlb_dio
> pkey_sighandler_tests_32
> pkey_sighandler_tests_64
> -guard-pages
> +guard-regions
> diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
> index 63ce39d024bb..8270895039d1 100644
> --- a/tools/testing/selftests/mm/Makefile
> +++ b/tools/testing/selftests/mm/Makefile
> @@ -97,7 +97,7 @@ TEST_GEN_FILES += hugetlb_fault_after_madv
> TEST_GEN_FILES += hugetlb_madv_vs_map
> TEST_GEN_FILES += hugetlb_dio
> TEST_GEN_FILES += droppable
> -TEST_GEN_FILES += guard-pages
> +TEST_GEN_FILES += guard-regions
>
> ifneq ($(ARCH),arm64)
> TEST_GEN_FILES += soft-dirty
> diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-regions.c
> similarity index 98%
> rename from tools/testing/selftests/mm/guard-pages.c
> rename to tools/testing/selftests/mm/guard-regions.c
> index ece37212a8a2..7a41cf9ffbdf 100644
> --- a/tools/testing/selftests/mm/guard-pages.c
> +++ b/tools/testing/selftests/mm/guard-regions.c
> @@ -107,12 +107,12 @@ static bool try_read_write_buf(char *ptr)
> return try_read_buf(ptr) && try_write_buf(ptr);
> }
>
> -FIXTURE(guard_pages)
> +FIXTURE(guard_regions)
> {
> unsigned long page_size;
> };
>
> -FIXTURE_SETUP(guard_pages)
> +FIXTURE_SETUP(guard_regions)
> {
> struct sigaction act = {
> .sa_handler = &handle_fatal,
> @@ -126,7 +126,7 @@ FIXTURE_SETUP(guard_pages)
> self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
> };
>
> -FIXTURE_TEARDOWN(guard_pages)
> +FIXTURE_TEARDOWN(guard_regions)
> {
> struct sigaction act = {
> .sa_handler = SIG_DFL,
> @@ -137,7 +137,7 @@ FIXTURE_TEARDOWN(guard_pages)
> sigaction(SIGSEGV, &act, NULL);
> }
>
> -TEST_F(guard_pages, basic)
> +TEST_F(guard_regions, basic)
> {
> const unsigned long NUM_PAGES = 10;
> const unsigned long page_size = self->page_size;
> @@ -231,7 +231,7 @@ TEST_F(guard_pages, basic)
> }
>
> /* Assert that operations applied across multiple VMAs work as expected. */
> -TEST_F(guard_pages, multi_vma)
> +TEST_F(guard_regions, multi_vma)
> {
> const unsigned long page_size = self->page_size;
> char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
> @@ -367,7 +367,7 @@ TEST_F(guard_pages, multi_vma)
> * Assert that batched operations performed using process_madvise() work as
> * expected.
> */
> -TEST_F(guard_pages, process_madvise)
> +TEST_F(guard_regions, process_madvise)
> {
> const unsigned long page_size = self->page_size;
> pid_t pid = getpid();
> @@ -467,7 +467,7 @@ TEST_F(guard_pages, process_madvise)
> }
>
> /* Assert that unmapping ranges does not leave guard markers behind. */
> -TEST_F(guard_pages, munmap)
> +TEST_F(guard_regions, munmap)
> {
> const unsigned long page_size = self->page_size;
> char *ptr, *ptr_new1, *ptr_new2;
> @@ -505,7 +505,7 @@ TEST_F(guard_pages, munmap)
> }
>
> /* Assert that mprotect() operations have no bearing on guard markers. */
> -TEST_F(guard_pages, mprotect)
> +TEST_F(guard_regions, mprotect)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -553,7 +553,7 @@ TEST_F(guard_pages, mprotect)
> }
>
> /* Split and merge VMAs and make sure guard pages still behave. */
> -TEST_F(guard_pages, split_merge)
> +TEST_F(guard_regions, split_merge)
> {
> const unsigned long page_size = self->page_size;
> char *ptr, *ptr_new;
> @@ -684,7 +684,7 @@ TEST_F(guard_pages, split_merge)
> }
>
> /* Assert that MADV_DONTNEED does not remove guard markers. */
> -TEST_F(guard_pages, dontneed)
> +TEST_F(guard_regions, dontneed)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -737,7 +737,7 @@ TEST_F(guard_pages, dontneed)
> }
>
> /* Assert that mlock()'ed pages work correctly with guard markers. */
> -TEST_F(guard_pages, mlock)
> +TEST_F(guard_regions, mlock)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -810,7 +810,7 @@ TEST_F(guard_pages, mlock)
> *
> * - Moving a mapping alone should retain markers as they are.
> */
> -TEST_F(guard_pages, mremap_move)
> +TEST_F(guard_regions, mremap_move)
> {
> const unsigned long page_size = self->page_size;
> char *ptr, *ptr_new;
> @@ -857,7 +857,7 @@ TEST_F(guard_pages, mremap_move)
> * will have to remove guard pages manually to fix up (they'd have to do the
> * same if it were a PROT_NONE mapping).
> */
> -TEST_F(guard_pages, mremap_expand)
> +TEST_F(guard_regions, mremap_expand)
> {
> const unsigned long page_size = self->page_size;
> char *ptr, *ptr_new;
> @@ -920,7 +920,7 @@ TEST_F(guard_pages, mremap_expand)
> * if the user were using a PROT_NONE mapping they'd have to manually fix this
> * up also so this is OK.
> */
> -TEST_F(guard_pages, mremap_shrink)
> +TEST_F(guard_regions, mremap_shrink)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -984,7 +984,7 @@ TEST_F(guard_pages, mremap_shrink)
> * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
> * retain guard pages.
> */
> -TEST_F(guard_pages, fork)
> +TEST_F(guard_regions, fork)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -1039,7 +1039,7 @@ TEST_F(guard_pages, fork)
> * Assert expected behaviour after we fork populated ranges of anonymous memory
> * and then guard and unguard the range.
> */
> -TEST_F(guard_pages, fork_cow)
> +TEST_F(guard_regions, fork_cow)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -1110,7 +1110,7 @@ TEST_F(guard_pages, fork_cow)
> * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
> * behave as expected.
> */
> -TEST_F(guard_pages, fork_wipeonfork)
> +TEST_F(guard_regions, fork_wipeonfork)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -1160,7 +1160,7 @@ TEST_F(guard_pages, fork_wipeonfork)
> }
>
> /* Ensure that MADV_FREE retains guard entries as expected. */
> -TEST_F(guard_pages, lazyfree)
> +TEST_F(guard_regions, lazyfree)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -1196,7 +1196,7 @@ TEST_F(guard_pages, lazyfree)
> }
>
> /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
> -TEST_F(guard_pages, populate)
> +TEST_F(guard_regions, populate)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -1222,7 +1222,7 @@ TEST_F(guard_pages, populate)
> }
>
> /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
> -TEST_F(guard_pages, cold_pageout)
> +TEST_F(guard_regions, cold_pageout)
> {
> const unsigned long page_size = self->page_size;
> char *ptr;
> @@ -1268,7 +1268,7 @@ TEST_F(guard_pages, cold_pageout)
> }
>
> /* Ensure that guard pages do not break userfaultd. */
> -TEST_F(guard_pages, uffd)
> +TEST_F(guard_regions, uffd)
> {
> const unsigned long page_size = self->page_size;
> int uffd;