[RFC][PATCH 02/31] locking,alpha: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}()
From: Peter Zijlstra
Date: Fri Apr 22 2016 - 05:52:48 EST
- Next message: tip-bot for Luis R. Rodriguez: "[tip:x86/boot] tools/lguest: Force disable tboot and APM"
- Previous message: tip-bot for Luis R. Rodriguez: "[tip:x86/boot] x86/init: Use a platform legacy quirk for EBDA"
- In reply to: Peter Zijlstra: "[RFC][PATCH 08/31] locking,frv: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()"
- Next in thread: Richard Henderson: "Re: [RFC][PATCH 02/31] locking,alpha: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}()"
- Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
arch/alpha/include/asm/atomic.h | 67 ++++++++++++++++++++++++++++++++++------
1 file changed, 58 insertions(+), 9 deletions(-)
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -65,6 +65,26 @@ static inline int atomic_##op##_return(i
return result; \
}
+#define ATOMIC_FETCH_OP(op, asm_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ long temp, result; \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ldl_l %0,%1\n" \
+ " mov %0,%2\n" \
+ " " #asm_op " %0,%3,%0\n" \
+ " stl_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
+ :"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_mb(); \
+ return result; \
+}
+
#define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
@@ -101,11 +121,33 @@ static __inline__ long atomic64_##op##_r
return result; \
}
+#define ATOMIC64_FETCH_OP(op, asm_op) \
+static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
+{ \
+ long temp, result; \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ldq_l %0,%1\n" \
+ " mov %0,%2\n" \
+ " " #asm_op " %0,%3,%0\n" \
+ " stq_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
+ :"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_mb(); \
+ return result; \
+}
+
#define ATOMIC_OPS(op) \
ATOMIC_OP(op, op##l) \
ATOMIC_OP_RETURN(op, op##l) \
+ ATOMIC_FETCH_OP(op, op##l) \
ATOMIC64_OP(op, op##q) \
- ATOMIC64_OP_RETURN(op, op##q)
+ ATOMIC64_OP_RETURN(op, op##q) \
+ ATOMIC64_FETCH_OP(op, op##q)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
@@ -113,18 +155,25 @@ ATOMIC_OPS(sub)
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
-ATOMIC_OP(and, and)
-ATOMIC_OP(andnot, bic)
-ATOMIC_OP(or, bis)
-ATOMIC_OP(xor, xor)
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(andnot, bic)
-ATOMIC64_OP(or, bis)
-ATOMIC64_OP(xor, xor)
+#define atomic_fetch_or atomic_fetch_or
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm) \
+ ATOMIC_OP(op, asm) \
+ ATOMIC_FETCH_OP(op, asm) \
+ ATOMIC64_OP(op, asm) \
+ ATOMIC64_FETCH_OP(op, asm)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, bis)
+ATOMIC_OPS(xor, xor)
#undef ATOMIC_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
- Next message: tip-bot for Luis R. Rodriguez: "[tip:x86/boot] tools/lguest: Force disable tboot and APM"
- Previous message: tip-bot for Luis R. Rodriguez: "[tip:x86/boot] x86/init: Use a platform legacy quirk for EBDA"
- In reply to: Peter Zijlstra: "[RFC][PATCH 08/31] locking,frv: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()"
- Next in thread: Richard Henderson: "Re: [RFC][PATCH 02/31] locking,alpha: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}()"
- Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]