+#ifdef CONFIG_RT_MUTEX_SPIN_ON_OWNER
+static bool rt_mutex_spin_on_owner(struct rt_mutex *lock,
+ struct task_struct *owner)
+{
+ bool ret = true;
+
+ /*
+ * The last owner could have just released the lock,
+ * immediately try taking it again.
+ */
+ if (!owner)
+ goto done;
+
+ rcu_read_lock();
+ while (rt_mutex_owner(lock) == owner) {
+ /*
+ * Ensure we emit the owner->on_cpu, dereference _after_
+ * checking lock->owner still matches owner. If that fails,
+ * owner might point to freed memory. If it still matches,
+ * the rcu_read_lock() ensures the memory stays valid.
+ */
+ barrier();
+ if (!owner->on_cpu || need_resched()) {
+ ret = false;
+ break;
+ }
+
+ cpu_relax_lowlatency();
+ }
+ rcu_read_unlock();
+done:
+ return ret;
+}
+