[PATCH 5/5] x86,atomic: Use atomic_try_cmpxchg
From: Peter Zijlstra
Date: Fri Mar 17 2017 - 17:31:43 EST
text data bss dec hex filename
10665111 4530096 843776 16038983 f4bc47 defconfig-build/vmlinux.3
10655703 4530096 843776 16029575 f49787 defconfig-build/vmlinux.4
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
arch/x86/include/asm/atomic.h | 27 ++++++++----------------
arch/x86/include/asm/atomic64_64.h | 40 ++++++++++++-------------------------
2 files changed, 22 insertions(+), 45 deletions(-)
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -207,16 +207,12 @@ static inline void atomic_##op(int i, at
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
- int old, val = atomic_read(v); \
- for (;;) { \
- old = atomic_cmpxchg(v, val, val c_op i); \
- if (old == val) \
- break; \
- val = old; \
- } \
- return old; \
+ int val = atomic_read(v); \
+ do { \
+ } while (!atomic_try_cmpxchg(v, &val, val c_op i)); \
+ return val; \
}
#define ATOMIC_OPS(op, c_op) \
@@ -242,16 +238,11 @@ ATOMIC_OPS(xor, ^)
*/
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
+ int c = atomic_read(v);
+ do {
+ if (unlikely(c == u))
break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
+ } while (!atomic_try_cmpxchg(v, &c, c + a));
return c;
}
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -198,17 +198,12 @@ static inline long atomic64_xchg(atomic6
*/
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
+ long c = atomic64_read(v);
+ do {
+ if (unlikely(c == u))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c + a));
+ return true;
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
@@ -222,17 +217,12 @@ static inline bool atomic64_add_unless(a
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
- long c, old, dec;
- c = atomic64_read(v);
- for (;;) {
+ long dec, c = atomic64_read(v);
+ do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
+ } while (!atomic64_try_cmpxchg(v, &c, dec));
return dec;
}
@@ -248,14 +238,10 @@ static inline void atomic64_##op(long i,
#define ATOMIC64_FETCH_OP(op, c_op) \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
- long old, val = atomic64_read(v); \
- for (;;) { \
- old = atomic64_cmpxchg(v, val, val c_op i); \
- if (old == val) \
- break; \
- val = old; \
- } \
- return old; \
+ long val = atomic64_read(v); \
+ do { \
+ } while (!atomic64_try_cmpxchg(v, &val, val c_op i)); \
+ return val; \
}
#define ATOMIC64_OPS(op, c_op) \