[tip: x86/alternatives] x86/alternatives: Rename 'bp_refs' to 'text_poke_array_refs'
From: tip-bot2 for Ingo Molnar
Date: Fri Apr 11 2025 - 06:13:26 EST
The following commit has been merged into the x86/alternatives branch of tip:
Commit-ID: 28fb79092d9f7db3397e886d637d3006551693b3
Gitweb: https://git.kernel.org/tip/28fb79092d9f7db3397e886d637d3006551693b3
Author: Ingo Molnar <mingo@xxxxxxxxxx>
AuthorDate: Fri, 11 Apr 2025 07:40:16 +02:00
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitterDate: Fri, 11 Apr 2025 11:01:33 +02:00
x86/alternatives: Rename 'bp_refs' to 'text_poke_array_refs'
Make it clear that these reference counts lock access
to text_poke_array.
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Juergen Gross <jgross@xxxxxxxx>
Cc: "H . Peter Anvin" <hpa@xxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Link: https://lore.kernel.org/r/20250411054105.2341982-5-mingo@xxxxxxxxxx
---
arch/x86/kernel/alternative.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 8edf7d3..9bd71c0 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -2476,14 +2476,14 @@ struct text_poke_int3_vec {
int nr_entries;
};
-static DEFINE_PER_CPU(atomic_t, bp_refs);
+static DEFINE_PER_CPU(atomic_t, text_poke_array_refs);
static struct text_poke_int3_vec bp_desc;
static __always_inline
struct text_poke_int3_vec *try_get_desc(void)
{
- atomic_t *refs = this_cpu_ptr(&bp_refs);
+ atomic_t *refs = this_cpu_ptr(&text_poke_array_refs);
if (!raw_atomic_inc_not_zero(refs))
return NULL;
@@ -2493,7 +2493,7 @@ struct text_poke_int3_vec *try_get_desc(void)
static __always_inline void put_desc(void)
{
- atomic_t *refs = this_cpu_ptr(&bp_refs);
+ atomic_t *refs = this_cpu_ptr(&text_poke_array_refs);
smp_mb__before_atomic();
raw_atomic_dec(refs);
@@ -2529,9 +2529,9 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
* Having observed our INT3 instruction, we now must observe
* bp_desc with non-zero refcount:
*
- * bp_refs = 1 INT3
+ * text_poke_array_refs = 1 INT3
* WMB RMB
- * write INT3 if (bp_refs != 0)
+ * write INT3 if (text_poke_array_refs != 0)
*/
smp_rmb();
@@ -2638,7 +2638,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* ensure reading a non-zero refcount provides up to date bp_desc data.
*/
for_each_possible_cpu(i)
- atomic_set_release(per_cpu_ptr(&bp_refs, i), 1);
+ atomic_set_release(per_cpu_ptr(&text_poke_array_refs, i), 1);
/*
* Function tracing can enable thousands of places that need to be
@@ -2760,7 +2760,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* unused.
*/
for_each_possible_cpu(i) {
- atomic_t *refs = per_cpu_ptr(&bp_refs, i);
+ atomic_t *refs = per_cpu_ptr(&text_poke_array_refs, i);
if (unlikely(!atomic_dec_and_test(refs)))
atomic_cond_read_acquire(refs, !VAL);