[PATCH RFC 08/14] arm64/mm: Split asid_inits in 2 parts
From: Julien Grall
Date: Thu Mar 21 2019 - 12:37:44 EST
Move out the common initialization of the ASID allocator in a separate
function.
Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
---
arch/arm64/mm/context.c | 43 +++++++++++++++++++++++++++++++------------
1 file changed, 31 insertions(+), 12 deletions(-)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index fb13bc249951..b071a1b3469e 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -271,31 +271,50 @@ asmlinkage void post_ttbr_update_workaround(void)
CONFIG_CAVIUM_ERRATUM_27456));
}
-static int asids_init(void)
+/*
+ * Initialize the ASID allocator
+ *
+ * @info: Pointer to the asid allocator structure
+ * @bits: Number of ASIDs available
+ * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
+ * allocated contiguously for a given context. This value should be a power of
+ * 2.
+ */
+static int asid_allocator_init(struct asid_info *info,
+ u32 bits, unsigned int asid_per_ctxt)
{
- struct asid_info *info = &asid_info;
-
- info->bits = get_cpu_asid_bits();
- info->ctxt_shift = ilog2(ASID_PER_CONTEXT);
+ info->bits = bits;
+ info->ctxt_shift = ilog2(asid_per_ctxt);
/*
* Expect allocation after rollover to fail if we don't have at least
- * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+ * one more ASID than CPUs. ASID #0 is always reserved.
*/
WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
sizeof(*info->map), GFP_KERNEL);
if (!info->map)
- panic("Failed to allocate bitmap for %lu ASIDs\n",
- NUM_CTXT_ASIDS(info));
-
- info->active = &active_asids;
- info->reserved = &reserved_asids;
+ return -ENOMEM;
raw_spin_lock_init(&info->lock);
+ return 0;
+}
+
+static int asids_init(void)
+{
+ u32 bits = get_cpu_asid_bits();
+
+ if (!asid_allocator_init(&asid_info, bits, ASID_PER_CONTEXT))
+ panic("Unable to initialize ASID allocator for %lu ASIDs\n",
+ 1UL << bits);
+
+ asid_info.active = &active_asids;
+ asid_info.reserved = &reserved_asids;
+
pr_info("ASID allocator initialised with %lu entries\n",
- NUM_CTXT_ASIDS(info));
+ NUM_CTXT_ASIDS(&asid_info));
+
return 0;
}
early_initcall(asids_init);
--
2.11.0