Re: [PATCH v2] tools/testing/selftests: add merge test for partial msealed range
From: Andrew Morton
Date: Tue Mar 31 2026 - 20:59:01 EST
On Tue, 31 Mar 2026 11:08:03 +0100 "Lorenzo Stoakes (Oracle)" <ljs@xxxxxxxxxx> wrote:
> Then again, and contradicting myself elsewhere on fix-patches blah blah blah,
> since this is just test code, I checked the below and it works the same and
> triggers the regression.
>
> So maybe we can just apply that?
Here's how the two folded together will look:
From: "Lorenzo Stoakes (Oracle)" <ljs@xxxxxxxxxx>
Subject: tools/testing/selftests: add merge test for partial msealed range
Date: Tue, 31 Mar 2026 08:36:27 +0100
Commit 2697dd8ae721 ("mm/mseal: update VMA end correctly on merge") fixed
an issue in the loop which iterates through VMAs applying mseal, which was
triggered by mseal()'ing a range of VMAs where the second was mseal()'d
and the first mergeable with it, once mseal()'d.
Add a regression test to assert that this behaviour is correct. We place
it in the merge selftests as this is strictly an issue with merging (via a
vma_modify() invocation).
It also asserts that mseal()'d ranges are correctly merged as you'd
expect.
The test is implemented such that it is skipped if mseal() is not
available on the system.
[ljs@xxxxxxxxxx: simplifications per Pedro]
Link: https://lkml.kernel.org/r/1c9c922d-5cb5-4cff-9273-b737cdb57ca1@lucifer.local
Link: https://lkml.kernel.org/r/20260331073627.50010-1-ljs@xxxxxxxxxx
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@xxxxxxxxxx>
Cc: David Hildenbrand <david@xxxxxxxxxx>
Cc: Jann Horn <jannh@xxxxxxxxxx>
Cc: Liam Howlett <liam.howlett@xxxxxxxxxx>
Cc: Lorenzo Stoakes <ljs@xxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxx>
Cc: Mike Rapoport <rppt@xxxxxxxxxx>
Cc: Pedro Falcato <pfalcato@xxxxxxx>
Cc: Shuah Khan <shuah@xxxxxxxxxx>
Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
tools/testing/selftests/mm/merge.c | 89 +++++++++++++++++++++++++++
1 file changed, 89 insertions(+)
--- a/tools/testing/selftests/mm/merge.c~tools-testing-selftests-add-merge-test-for-partial-msealed-range
+++ a/tools/testing/selftests/mm/merge.c
@@ -2,6 +2,7 @@
#define _GNU_SOURCE
#include "kselftest_harness.h"
+#include <asm-generic/unistd.h>
#include <linux/prctl.h>
#include <fcntl.h>
#include <stdio.h>
@@ -48,6 +49,19 @@ static pid_t do_fork(struct procmap_fd *
return 0;
}
+#ifdef __NR_mseal
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+ return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
+}
+#else
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
FIXTURE_SETUP(merge)
{
self->page_size = psize();
@@ -1217,6 +1231,81 @@ TEST_F(merge, mremap_correct_placed_faul
ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
}
+TEST_F(merge, merge_vmas_with_mseal)
+{
+ unsigned int page_size = self->page_size;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2, *ptr3;
+ /* We need our own as cannot munmap() once sealed. */
+ char *carveout;
+
+ /* Invalid mseal() call to see if implemented. */
+ ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
+ if (errno == ENOSYS)
+ SKIP(return, "mseal not supported, skipping.");
+
+ /* Map carveout. */
+ carveout = mmap(NULL, 5 * page_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ ASSERT_NE(carveout, MAP_FAILED);
+
+ /*
+ * Map 3 separate VMAs:
+ *
+ * |-----------|-----------|-----------|
+ * | RW | RWE | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ */
+ ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ptr3 = mmap(&carveout[3 * page_size], page_size, PROT_READ,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /*
+ * mseal the second VMA:
+ *
+ * |-----------|-----------|-----------|
+ * | RW | RWES | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ */
+ ASSERT_EQ(sys_mseal(ptr2, page_size, 0), 0);
+
+ /* Make first VMA mergeable upon mseal. */
+ ASSERT_EQ(mprotect(ptr, page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC), 0);
+ /*
+ * At this point we have:
+ *
+ * |-----------|-----------|-----------|
+ * | RWE | RWES | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * Now mseal all of the VMAs.
+ */
+ ASSERT_EQ(sys_mseal(ptr, 3 * page_size, 0), 0);
+
+ /*
+ * We should end up with:
+ *
+ * |-----------------------|-----------|
+ * | RWES | ROS |
+ * |-----------------------|-----------|
+ * ptr ptr3
+ */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+}
+
TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
{
struct procmap_fd *procmap = &self->procmap;
_