Re: linux-next: manual merge of the arm64 tree with the mm tree

From: Suren Baghdasaryan
Date: Tue Nov 19 2024 - 20:43:44 EST


On Tue, Nov 19, 2024 at 5:09 PM Suren Baghdasaryan <surenb@xxxxxxxxxx> wrote:
>
> On Tue, Nov 19, 2024 at 5:01 PM Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> wrote:
> >
> > Hi all,
> >
> > On Mon, 28 Oct 2024 11:10:58 +1100 Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> wrote:
> > >
> > > Today's linux-next merge of the arm64 tree got a conflict in:
> > >
> > > include/linux/mm.h
> > >
> > > between commit:
> > >
> > > e87ec503cf2e ("mm/codetag: uninline and move pgalloc_tag_copy and pgalloc_tag_split")
> > >
> > > from the mm-unstable branch of the mm tree and commit:
> > >
> > > 91e102e79740 ("prctl: arch-agnostic prctl for shadow stack")
> > >
> > > from the arm64 tree.
> > >
> > > I fixed it up (see below) and can carry the fix as necessary. This
> > > is now fixed as far as linux-next is concerned, but any non trivial
> > > conflicts should be mentioned to your upstream maintainer when your tree
> > > is submitted for merging. You may also want to consider cooperating
> > > with the maintainer of the conflicting tree to minimise any particularly
> > > complex conflicts.
> > >
> > > --
> > > Cheers,
> > > Stephen Rothwell
> > >
> > > diff --cc include/linux/mm.h
> > > index 086ba524d3ba,8852c39c7695..000000000000
> > > --- a/include/linux/mm.h
> > > +++ b/include/linux/mm.h
> > > @@@ -4166,4 -4174,65 +4178,8 @@@ static inline int do_mseal(unsigned lon
> > > }
> > > #endif
> > >
> > > -#ifdef CONFIG_MEM_ALLOC_PROFILING
> > > -static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
> > > -{
> > > - int i;
> > > - struct alloc_tag *tag;
> > > - unsigned int nr_pages = 1 << new_order;
> > > -
> > > - if (!mem_alloc_profiling_enabled())
> > > - return;
> > > -
> > > - tag = pgalloc_tag_get(&folio->page);
> > > - if (!tag)
> > > - return;
> > > -
> > > - for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
> > > - union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
> > > -
> > > - if (ref) {
> > > - /* Set new reference to point to the original tag */
> > > - alloc_tag_ref_set(ref, tag);
> > > - put_page_tag_ref(ref);
> > > - }
> > > - }
> > > -}
> > > -
> > > -static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
> > > -{
> > > - struct alloc_tag *tag;
> > > - union codetag_ref *ref;
> > > -
> > > - tag = pgalloc_tag_get(&old->page);
> > > - if (!tag)
> > > - return;
> > > -
> > > - ref = get_page_tag_ref(&new->page);
> > > - if (!ref)
> > > - return;
> > > -
> > > - /* Clear the old ref to the original allocation tag. */
> > > - clear_page_tag_ref(&old->page);
> > > - /* Decrement the counters of the tag on get_new_folio. */
> > > - alloc_tag_sub(ref, folio_nr_pages(new));
> > > -
> > > - __alloc_tag_ref_set(ref, tag);
> > > -
> > > - put_page_tag_ref(ref);
> > > -}
> > > -#else /* !CONFIG_MEM_ALLOC_PROFILING */
> > > -static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
> > > -{
> > > -}
> > > -
> > > -static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
> > > -{
> > > -}
> > > -#endif /* CONFIG_MEM_ALLOC_PROFILING */
> > > -
> > > + int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
> > > + int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
> > > + int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
> > > +
> > > #endif /* _LINUX_MM_H */
> >
> > This is now a conflict between the mm-stable tree and Linus' tree.
>
> Let me try to manually apply it to Linus' ToT and will send a replacement patch.

Attached patch should apply to Linus' tree but please make sure the
following two patches from mm-stable are merged before this one
because there are dependencies between them:

ed265529d39a mm/codetag: fix arg in pgalloc_tag_copy alloc_tag_sub
42895a861244 alloc_tag: introduce pgtag_ref_handle to abstract page
tag references

>
> >
> > --
> > Cheers,
> > Stephen Rothwell
From a9df77cdc42c06b068f782b4733855413d2ff926 Mon Sep 17 00:00:00 2001
From: Suren Baghdasaryan <surenb@xxxxxxxxxx>
Date: Thu, 24 Oct 2024 09:23:18 -0700
Subject: [PATCH 1/1] mm/codetag: uninline and move pgalloc_tag_copy and
pgalloc_tag_split

pgalloc_tag_copy() and pgalloc_tag_split() are sizable and outside of any
performance-critical paths, so it should be fine to uninline them. Also
move their declarations into pgalloc_tag.h which seems like a more
appropriate place for them. No functional changes other than uninlining.

Link: https://lkml.kernel.org/r/20241024162318.1640781-1-surenb@xxxxxxxxxx
Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx>
Suggested-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Acked-by: Yu Zhao <yuzhao@xxxxxxxxxx>
Cc: Kent Overstreet <kent.overstreet@xxxxxxxxx>
Cc: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx>
Cc: Sourav Panda <souravpanda@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
include/linux/mm.h | 58 -------------------------------------
include/linux/pgalloc_tag.h | 5 ++++
lib/alloc_tag.c | 48 ++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 58 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index c36dc44ab91f..8bad7918f5d9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4175,64 +4175,6 @@ static inline int do_mseal(unsigned long start, size_t len_in, unsigned long fla
}
#endif

-#ifdef CONFIG_MEM_ALLOC_PROFILING
-static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
-{
- int i;
- struct alloc_tag *tag;
- unsigned int nr_pages = 1 << new_order;
-
- if (!mem_alloc_profiling_enabled())
- return;
-
- tag = pgalloc_tag_get(&folio->page);
- if (!tag)
- return;
-
- for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
- union pgtag_ref_handle handle;
- union codetag_ref ref;
-
- if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
- /* Set new reference to point to the original tag */
- alloc_tag_ref_set(&ref, tag);
- update_page_tag_ref(handle, &ref);
- put_page_tag_ref(handle);
- }
- }
-}
-
-static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
-{
- union pgtag_ref_handle handle;
- union codetag_ref ref;
- struct alloc_tag *tag;
-
- tag = pgalloc_tag_get(&old->page);
- if (!tag)
- return;
-
- if (!get_page_tag_ref(&new->page, &ref, &handle))
- return;
-
- /* Clear the old ref to the original allocation tag. */
- clear_page_tag_ref(&old->page);
- /* Decrement the counters of the tag on get_new_folio. */
- alloc_tag_sub(&ref, folio_size(new));
- __alloc_tag_ref_set(&ref, tag);
- update_page_tag_ref(handle, &ref);
- put_page_tag_ref(handle);
-}
-#else /* !CONFIG_MEM_ALLOC_PROFILING */
-static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
-{
-}
-
-static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
-{
-}
-#endif /* CONFIG_MEM_ALLOC_PROFILING */
-
int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index b13cd3313a88..a942b5a03ebf 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -122,6 +122,9 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
}

+void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
+void pgalloc_tag_copy(struct folio *new, struct folio *old);
+
#else /* CONFIG_MEM_ALLOC_PROFILING */

static inline void clear_page_tag_ref(struct page *page) {}
@@ -130,6 +133,8 @@ static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) {}

#endif /* CONFIG_MEM_ALLOC_PROFILING */

diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 81e5f9a70f22..f2790272a603 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -144,6 +144,54 @@ size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sl
return nr;
}

+void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+ int i;
+ struct alloc_tag *tag;
+ unsigned int nr_pages = 1 << new_order;
+
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ tag = pgalloc_tag_get(&folio->page);
+ if (!tag)
+ return;
+
+ for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
+ /* Set new reference to point to the original tag */
+ alloc_tag_ref_set(&ref, tag);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+ }
+ }
+}
+
+void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+ struct alloc_tag *tag;
+
+ tag = pgalloc_tag_get(&old->page);
+ if (!tag)
+ return;
+
+ if (!get_page_tag_ref(&new->page, &ref, &handle))
+ return;
+
+ /* Clear the old ref to the original allocation tag. */
+ clear_page_tag_ref(&old->page);
+ /* Decrement the counters of the tag on get_new_folio. */
+ alloc_tag_sub(&ref, folio_size(new));
+ __alloc_tag_ref_set(&ref, tag);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+}
+
static void __init procfs_init(void)
{
proc_create_seq("allocinfo", 0400, NULL, &allocinfo_seq_op);
--
2.47.0.338.g60cca15819-goog