On Mon, Feb 17, 2014 at 03:41:22PM -0500, Waiman Long wrote:
+void queue_spin_lock_slowpath(struct qspinlock *lock, int qsval)why oh why?
+{
+ unsigned int cpu_nr, qn_idx;
+ struct qnode *node, *next;
+ u32 prev_qcode, my_qcode;
+
+#ifdef queue_spin_trylock_quick
+ /*
+ * Try the quick spinning code path
+ */
+ if (queue_spin_trylock_quick(lock, qsval))
+ return;
+#endif
+ /*WTF is this #ifdef for?
+ * Get the queue node
+ */
+ cpu_nr = smp_processor_id();
+ node = get_qnode(&qn_idx);
+
+ if (unlikely(!node)) {
+ /*
+ * This shouldn't happen, print a warning message
+ *& busy spinning on the lock.
+ */
+ printk_sched(
+ "qspinlock: queue node table exhausted at cpu %d!\n",
+ cpu_nr);
+ while (!queue_spin_trylock_unfair(lock))
+ arch_mutex_cpu_relax();
+ return;
+ }
+
+ /*
+ * Set up the new cpu code to be exchanged
+ */
+ my_qcode = _SET_QCODE(cpu_nr, qn_idx);
+
+ /*
+ * Initialize the queue node
+ */
+ node->wait = true;
+ node->next = NULL;
+
+ /*
+ * The lock may be available at this point, try again if no task was
+ * waiting in the queue.
+ */
+ if (!(qsval>> _QCODE_OFFSET)&& queue_spin_trylock(lock)) {
+ put_qnode();
+ return;
+ }
+
+#ifdef queue_code_xchg
+ prev_qcode = queue_code_xchg(lock, my_qcode);
+#else
+ /*
+ * Exchange current copy of the queue node code
+ */
+ prev_qcode = atomic_xchg(&lock->qlcode, my_qcode);
+ /*
+ * It is possible that we may accidentally steal the lock. If this is
+ * the case, we need to either release it if not the head of the queue
+ * or get the lock and be done with it.
+ */
+ if (unlikely(!(prev_qcode& _QSPINLOCK_LOCKED))) {
+ if (prev_qcode == 0) {
+ /*
+ * Got the lock since it is at the head of the queue
+ * Now try to atomically clear the queue code.
+ */
+ if (atomic_cmpxchg(&lock->qlcode, my_qcode,
+ _QSPINLOCK_LOCKED) == my_qcode)
+ goto release_node;
+ /*
+ * The cmpxchg fails only if one or more tasks
+ * are added to the queue. In this case, we need to
+ * notify the next one to be the head of the queue.
+ */
+ goto notify_next;
+ }
+ /*
+ * Accidentally steal the lock, release the lock and
+ * let the queue head get it.
+ */
+ queue_spin_unlock(lock);
+ } else
+ prev_qcode&= ~_QSPINLOCK_LOCKED; /* Clear the lock bit */
+ my_qcode&= ~_QSPINLOCK_LOCKED;
+#endif /* queue_code_xchg */
+ if (prev_qcode) {Why is this an option at all?
+ /*
+ * Not at the queue head, get the address of the previous node
+ * and set up the "next" fields of the that node.
+ */
+ struct qnode *prev = xlate_qcode(prev_qcode);
+
+ ACCESS_ONCE(prev->next) = node;
+ /*
+ * Wait until the waiting flag is off
+ */
+ while (smp_load_acquire(&node->wait))
+ arch_mutex_cpu_relax();
+ }
+
+ /*
+ * At the head of the wait queue now
+ */
+ while (true) {
+ u32 qcode;
+ int retval;
+
+ retval = queue_get_lock_qcode(lock,&qcode, my_qcode);
+ if (retval> 0)
+ ; /* Lock not available yet */
+ else if (retval< 0)
+ /* Lock taken, can release the node& return */
+ goto release_node;
+ else if (qcode != my_qcode) {
+ /*
+ * Just get the lock with other spinners waiting
+ * in the queue.
+ */
+ if (queue_spin_trylock_unfair(lock))
+ goto notify_next;