[PATCH -next 4/7] mm: convert xchg_page_access_time to xchg_folio_access_time()
From: Kefeng Wang
Date: Tue Oct 10 2023 - 02:47:26 EST
Make xchg_page_access_time to take a folio, and rename it to
xchg_folio_access_time() since all callers with a folio.
Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx>
---
include/linux/mm.h | 7 ++++---
kernel/sched/fair.c | 2 +-
mm/huge_memory.c | 4 ++--
mm/mprotect.c | 2 +-
4 files changed, 8 insertions(+), 7 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a10b8774cc6f..13ca63efacf7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1711,11 +1711,12 @@ static inline void page_cpupid_reset_last(struct page *page)
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int xchg_folio_access_time(struct folio *folio, int time)
{
int last_time;
- last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+ last_time = page_cpupid_xchg_last(&folio->page,
+ time >> PAGE_ACCESS_TIME_BUCKETS);
return last_time << PAGE_ACCESS_TIME_BUCKETS;
}
@@ -1734,7 +1735,7 @@ static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
return page_to_nid(page); /* XXX */
}
-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int xchg_folio_access_time(struct folio *folio, int time)
{
return 0;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 682067c545d1..50b9f63099fb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1722,7 +1722,7 @@ static int numa_hint_fault_latency(struct folio *folio)
int last_time, time;
time = jiffies_to_msecs(jiffies);
- last_time = xchg_page_access_time(&folio->page, time);
+ last_time = xchg_folio_access_time(folio, time);
return (time - last_time) & PAGE_ACCESS_TIME_MASK;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 344c8db904e1..e85238ac1d5c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1912,8 +1912,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!toptier)
- xchg_page_access_time(&folio->page,
- jiffies_to_msecs(jiffies));
+ xchg_folio_access_time(folio,
+ jiffies_to_msecs(jiffies));
}
/*
* In case prot_numa, we are under mmap_read_lock(mm). It's critical
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 459daa987131..1c556651888a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -159,7 +159,7 @@ static long change_pte_range(struct mmu_gather *tlb,
continue;
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!toptier)
- xchg_page_access_time(&folio->page,
+ xchg_folio_access_time(folio,
jiffies_to_msecs(jiffies));
}
--
2.27.0