[PATCH 3/6] locking/atomic/x86: Use READ_ONCE before atomic{,64}_try_cmpxchg loops

From: Uros Bizjak
Date: Tue Apr 09 2024 - 06:06:25 EST


The value preload before the cmpxchg loop does not need to be atomic,
but should use READ_ONCE to prevent compiler from merging, refetching
or reordering the read.

This patch unifies arch_atomic{,64}_{,fetch}_{and,or,xor}() macros
between x86_32 and x86_64 targets.

No functional changes intended.

Signed-off-by: Uros Bizjak <ubizjak@xxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
---
arch/x86/include/asm/atomic.h | 8 ++++----
arch/x86/include/asm/atomic64_64.h | 20 ++++++++++----------
2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 55a55ec04350..b166da21ee98 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -20,7 +20,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
- return __READ_ONCE((v)->counter);
+ return __READ_ONCE(v->counter);
}

static __always_inline void arch_atomic_set(atomic_t *v, int i)
@@ -132,7 +132,7 @@ static __always_inline void arch_atomic_and(int i, atomic_t *v)

static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
{
- int val = arch_atomic_read(v);
+ int val = __READ_ONCE(v->counter);

do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));

@@ -150,7 +150,7 @@ static __always_inline void arch_atomic_or(int i, atomic_t *v)

static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
{
- int val = arch_atomic_read(v);
+ int val = __READ_ONCE(v->counter);

do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));

@@ -168,7 +168,7 @@ static __always_inline void arch_atomic_xor(int i, atomic_t *v)

static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{
- int val = arch_atomic_read(v);
+ int val = __READ_ONCE(v->counter);

do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));

diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 3165c0feedf7..e7b12a48fecb 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -12,7 +12,7 @@

static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
- return __READ_ONCE((v)->counter);
+ return __READ_ONCE(v->counter);
}

static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
@@ -126,10 +126,10 @@ static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)

static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{
- s64 val = arch_atomic64_read(v);
+ s64 val = __READ_ONCE(v->counter);
+
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));

- do {
- } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
@@ -144,10 +144,10 @@ static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)

static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{
- s64 val = arch_atomic64_read(v);
+ s64 val = __READ_ONCE(v->counter);
+
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));

- do {
- } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
@@ -162,10 +162,10 @@ static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)

static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
- s64 val = arch_atomic64_read(v);
+ s64 val = __READ_ONCE(v->counter);
+
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));

- do {
- } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
--
2.44.0