[PATCH 1/2] x86/numa: Introduce numa_fill_memblks()

From: alison . schofield
Date: Thu May 18 2023 - 20:05:09 EST


From: Alison Schofield <alison.schofield@xxxxxxxxx>

numa_fill_memblks() fills in the gaps in numa_meminfo memblks
over an HPA address range.

The initial use case is the ACPI driver that needs to extend
SRAT defined proximity domains to an entire CXL CFMWS Window[1].

The APCI driver expects to use numa_fill_memblks() while parsing
the CFMWS. Extending the memblks created during SRAT parsing, to
cover the entire CFMWS Window, is desirable because everything in
a CFMWS Window is expected to be of a similar performance class.

Requires CONFIG_NUMA_KEEP_MEMINFO.

[1] A CXL CFMWS Window represents a contiguous CXL memory resource,
aka an HPA range. The CFMWS (CXL Fixed Memory Window Structure) is
part of the ACPI CEDT (CXL Early Discovery Table).

Signed-off-by: Alison Schofield <alison.schofield@xxxxxxxxx>
---
arch/x86/include/asm/sparsemem.h | 2 +
arch/x86/mm/numa.c | 82 ++++++++++++++++++++++++++++++++
include/linux/numa.h | 7 +++
3 files changed, 91 insertions(+)

diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
index 64df897c0ee3..1be13b2dfe8b 100644
--- a/arch/x86/include/asm/sparsemem.h
+++ b/arch/x86/include/asm/sparsemem.h
@@ -37,6 +37,8 @@ extern int phys_to_target_node(phys_addr_t start);
#define phys_to_target_node phys_to_target_node
extern int memory_add_physaddr_to_nid(u64 start);
#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+extern int numa_fill_memblks(u64 start, u64 end);
+#define numa_fill_memblks numa_fill_memblks
#endif
#endif /* __ASSEMBLY__ */

diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 2aadb2019b4f..6c8f9cff71da 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -11,6 +11,7 @@
#include <linux/nodemask.h>
#include <linux/sched.h>
#include <linux/topology.h>
+#include <linux/sort.h>

#include <asm/e820/api.h>
#include <asm/proto.h>
@@ -961,4 +962,85 @@ int memory_add_physaddr_to_nid(u64 start)
return nid;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+
+static int __init cmp_memblk(const void *a, const void *b)
+{
+ const struct numa_memblk *ma = *(const struct numa_memblk **)a;
+ const struct numa_memblk *mb = *(const struct numa_memblk **)b;
+
+ if (ma->start != mb->start)
+ return (ma->start < mb->start) ? -1 : 1;
+
+ if (ma->end != mb->end)
+ return (ma->end < mb->end) ? -1 : 1;
+
+ return 0;
+}
+
+static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
+
+/**
+ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
+ * @start: address to begin fill
+ * @end: address to end fill
+ *
+ * Find and extend numa_meminfo memblks to cover the @start/@end
+ * HPA address range, following these rules:
+ * 1. The first memblk must start at @start
+ * 2. The last memblk must end at @end
+ * 3. Fill the gaps between memblks by extending numa_memblk.end
+ * Result: All addresses in start/end range are included in
+ * numa_meminfo.
+ *
+ * RETURNS:
+ * 0 : Success. numa_meminfo fully describes start/end
+ * NUMA_NO_MEMBLK : No memblk exists in start/end range
+ */
+
+int __init numa_fill_memblks(u64 start, u64 end)
+{
+ struct numa_meminfo *mi = &numa_meminfo;
+ struct numa_memblk **blk = &numa_memblk_list[0];
+ int count = 0;
+
+ for (int i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
+
+ if (start <= bi->start && end >= bi->end) {
+ blk[count] = &mi->blk[i];
+ count++;
+ }
+ }
+ if (!count)
+ return NUMA_NO_MEMBLK;
+
+ if (count == 1) {
+ blk[0]->start = start;
+ blk[0]->end = end;
+ return 0;
+ }
+
+ sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
+ blk[0]->start = start;
+ blk[count - 1]->end = end;
+
+ for (int i = 0, j = 1; j < count; i++, j++) {
+ /* Overlaps OK. sort() put the lesser end first */
+ if (blk[i]->start == blk[j]->start)
+ continue;
+
+ /* No gap */
+ if (blk[i]->end == blk[j]->start)
+ continue;
+
+ /* Fill the gap */
+ if (blk[i]->end < blk[j]->start) {
+ blk[i]->end = blk[j]->start;
+ continue;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(numa_fill_memblks);
+
#endif
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 59df211d051f..0f512c0aba54 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -12,6 +12,7 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)

#define NUMA_NO_NODE (-1)
+#define NUMA_NO_MEMBLK (-1)

/* optionally keep NUMA memory info available post init */
#ifdef CONFIG_NUMA_KEEP_MEMINFO
@@ -43,6 +44,12 @@ static inline int phys_to_target_node(u64 start)
return 0;
}
#endif
+#ifndef numa_fill_memblks
+static inline int __init numa_fill_memblks(u64 start, u64 end)
+{
+ return NUMA_NO_MEMBLK;
+}
+#endif
#else /* !CONFIG_NUMA */
static inline int numa_map_to_online_node(int node)
{
--
2.37.3