Re: [tip:locking/core] locking/pvqspinlock: Replace xchg() by the more descriptive set_mb()

From: Peter Zijlstra
Date: Mon May 11 2015 - 10:54:31 EST


On Mon, May 11, 2015 at 05:43:55AM -0700, tip-bot for Waiman Long wrote:
> - (void)xchg(&pn->state, vcpu_halted);
> + set_mb(pn->state, vcpu_halted);

Hmm, so I looked at the set_mb() definitions and I figure we want to do
something like the below, right?

---
arch/arm/include/asm/barrier.h | 2 +-
arch/arm64/include/asm/barrier.h | 2 +-
arch/ia64/include/asm/barrier.h | 2 +-
arch/metag/include/asm/barrier.h | 2 +-
arch/mips/include/asm/barrier.h | 2 +-
arch/powerpc/include/asm/barrier.h | 2 +-
arch/s390/include/asm/barrier.h | 2 +-
arch/sparc/include/asm/barrier_64.h | 2 +-
arch/x86/include/asm/barrier.h | 2 +-
include/asm-generic/barrier.h | 2 +-
10 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index d2f81e6b8c1c..993150aea681 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -81,7 +81,7 @@ do { \
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)

-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)

#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 71f19c4dc0de..ff7de78d01b8 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -114,7 +114,7 @@ do { \
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)

-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define nop() asm volatile("nop");

#define smp_mb__before_atomic() smp_mb()
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index f6769eb2bbf9..03117e7b2ab8 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -82,7 +82,7 @@ do { \
* acquire vs release semantics but we can't discuss this stuff with
* Linus just yet. Grrr...
*/
-#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)

/*
* The group barrier in front of the rsm & ssm are necessary to ensure
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index d703d8e26a65..97eb018a2933 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -84,7 +84,7 @@ static inline void fence(void)
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)

-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)

#define smp_store_release(p, v) \
do { \
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 2b8bbbcb9be0..cff1bbdaa74a 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -113,7 +113,7 @@
#endif

#define set_mb(var, value) \
- do { var = value; smp_mb(); } while (0)
+ do { WRITE_ONCE(var, value); smp_mb(); } while (0)

#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")

diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index a3bf5be111ff..2a072e48780d 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -34,7 +34,7 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")

-#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)

#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 8d724718ec21..b66cd53d35fc 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -36,7 +36,7 @@
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()

-#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)

#define smp_store_release(p, v) \
do { \
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 76648941fea7..125fec7512f4 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -41,7 +41,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define dma_wmb() wmb()

#define set_mb(__var, __value) \
- do { __var = __value; membar_safe("#StoreLoad"); } while(0)
+ do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)

#ifdef CONFIG_SMP
#define smp_mb() mb()
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 959e45b81fe2..9de5cde133a1 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -40,7 +40,7 @@
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#endif /* SMP */

#define read_barrier_depends() do { } while (0)
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index f5c40b0fadc2..3938716b44d7 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -67,7 +67,7 @@
#endif

#ifndef set_mb
-#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#endif

#ifndef smp_mb__before_atomic
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/