On Fri, Sep 11, 2015 at 02:37:37PM -0400, Waiman Long wrote:
+#define queued_spin_trylock(l) pv_queued_spin_trylock_unfair(l)These aren't actually ever used...
+static inline bool pv_queued_spin_trylock_unfair(struct qspinlock *lock)
+{
+ struct __qspinlock *l = (void *)lock;
+
+ if (READ_ONCE(l->locked))
+ return 0;
+ /*
+ * Wait a bit here to ensure that an actively spinning vCPU has a fair
+ * chance of getting the lock.
+ */
+ cpu_relax();
+
+ return cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0;
+}
+static inline int pvstat_trylock_unfair(struct qspinlock *lock)
+{
+ int ret = pv_queued_spin_trylock_unfair(lock);
+
+ if (ret)
+ pvstat_inc(pvstat_utrylock);
+ return ret;
+}
+#undef queued_spin_trylock
+#define queued_spin_trylock(l) pvstat_trylock_unfair(l)