[PATCH 2/2] arm64/mm: avoid max_pinned_asids underflow
From: Reda CHERKAOUI
Date: Thu Feb 19 2026 - 07:31:33 EST
Signed-off-by: Reda CHERKAOUI <redacherkaoui67@xxxxxxxxx>
---
arch/arm64/mm/context.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 74c1ece7db78..fdcee1127954 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -386,11 +386,7 @@ static int asids_update_limit(void)
if (pinned_asid_map)
set_kpti_asid_bits(pinned_asid_map);
}
- /*
- * Expect allocation after rollover to fail if we don't have at least
- * one more ASID than CPUs. ASID #0 is reserved for init_mm.
- */
- WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+
pr_info("ASID allocator initialised with %lu entries\n",
num_available_asids);
@@ -399,7 +395,11 @@ static int asids_update_limit(void)
* even if all CPUs have a reserved ASID and the maximum number of ASIDs
* are pinned, there still is at least one empty slot in the ASID map.
*/
- max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+ if (WARN_ON(num_available_asids <= num_possible_cpus() + 2))
+ max_pinned_asids = 0;
+ else
+ max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+
return 0;
}
arch_initcall(asids_update_limit);
--
2.43.0