On Tue, Jan 14, 2014 at 11:44:03PM -0500, Waiman Long wrote:+#ifndef arch_mutex_cpu_relaxInclude<linux/mutex.h>
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+#ifndef smp_load_acquireRemove these.
+# ifdef CONFIG_X86
+# define smp_load_acquire(p) \
+ ({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ barrier(); \
+ ___p1; \
+ })
+# else
+# define smp_load_acquire(p) \
+ ({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ smp_mb(); \
+ ___p1; \
+ })
+# endif
+#endif
+
+#ifndef smp_store_release
+# ifdef CONFIG_X86
+# define smp_store_release(p, v) \
+ do { \
+ barrier(); \
+ ACCESS_ONCE(*p) = v; \
+ } while (0)
+# else
+# define smp_store_release(p, v) \
+ do { \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = v; \
+ } while (0)
+# endif
+#endif
+/*Is GCC really so stupid that you cannot always use the
+ * If an xadd (exchange-add) macro isn't available, simulate one with
+ * the atomic_add_return() function.
+ */
+#ifdef xadd
+# define qrw_xadd(rw, inc) xadd(&(rw).rwc, inc)
+#else
+# define qrw_xadd(rw, inc) (u32)(atomic_add_return(inc,&(rw).rwa) - inc)
+#endif
atomic_add_return()? The x86 atomic_add_return is i + xadd(), so you'll
end up with:
i + xadd() - i
Surely it can just remove the two i terms?
+/**Please rebase on top of the MCS lock patches such that this is gone.
+ * wait_in_queue - Add to queue and wait until it is at the head
+ * @lock: Pointer to queue rwlock structure
+ * @node: Node pointer to be added to the queue
+ */
+static inline void wait_in_queue(struct qrwlock *lock, struct qrwnode *node)
+{
+ struct qrwnode *prev;
+
+ node->next = NULL;
+ node->wait = true;
+ prev = xchg(&lock->waitq, node);
+ if (prev) {
+ prev->next = node;
+ /*
+ * Wait until the waiting flag is off
+ */
+ while (smp_load_acquire(&node->wait))
+ arch_mutex_cpu_relax();
+ }
+}