[tip:locking/core] locking/paravirt: Use new static key for controlling call of virt_spin_lock()

From: tip-bot for Juergen Gross
Date: Tue Oct 10 2017 - 07:07:06 EST


Commit-ID: 9043442b43b1fddf202591b84702863286700c1a
Gitweb: https://git.kernel.org/tip/9043442b43b1fddf202591b84702863286700c1a
Author: Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Wed, 6 Sep 2017 19:36:24 +0200
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Tue, 10 Oct 2017 11:50:12 +0200

locking/paravirt: Use new static key for controlling call of virt_spin_lock()

There are cases where a guest tries to switch spinlocks to bare metal
behavior (e.g. by setting "xen_nopvspin" boot parameter). Today this
has the downside of falling back to unfair test and set scheme for
qspinlocks due to virt_spin_lock() detecting the virtualized
environment.

Add a static key controlling whether virt_spin_lock() should be
called or not. When running on bare metal set the new key to false.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Acked-by: Waiman Long <longman@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: akataria@xxxxxxxxxx
Cc: boris.ostrovsky@xxxxxxxxxx
Cc: chrisw@xxxxxxxxxxxx
Cc: hpa@xxxxxxxxx
Cc: jeremy@xxxxxxxx
Cc: rusty@xxxxxxxxxxxxxxx
Cc: virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx
Link: http://lkml.kernel.org/r/20170906173625.18158-2-jgross@xxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/x86/include/asm/qspinlock.h | 11 ++++++++++-
arch/x86/kernel/paravirt.c | 14 ++++++++++++--
arch/x86/kernel/smpboot.c | 2 ++
3 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 48a706f..308dfd0 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_QSPINLOCK_H
#define _ASM_X86_QSPINLOCK_H

+#include <linux/jump_label.h>
#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>
@@ -46,10 +47,14 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
#endif

#ifdef CONFIG_PARAVIRT
+DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void native_pv_lock_init(void) __init;
+
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
- if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ if (!static_branch_likely(&virt_spin_lock_key))
return false;

/*
@@ -65,6 +70,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock)

return true;
}
+#else
+static inline void native_pv_lock_init(void)
+{
+}
#endif /* CONFIG_PARAVIRT */

#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 19a3e8f..041096b 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -115,8 +115,18 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
return 5;
}

-/* Neat trick to map patch type back to the call within the
- * corresponding structure. */
+DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void __init native_pv_lock_init(void)
+{
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ static_branch_disable(&virt_spin_lock_key);
+}
+
+/*
+ * Neat trick to map patch type back to the call within the
+ * corresponding structure.
+ */
static void *get_call_destination(u8 type)
{
struct paravirt_patch_template tmpl = {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ad59edd..361f916 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,6 +77,7 @@
#include <asm/i8259.h>
#include <asm/realmode.h>
#include <asm/misc.h>
+#include <asm/qspinlock.h>

/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -1384,6 +1385,7 @@ void __init native_smp_prepare_boot_cpu(void)
/* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask);
cpu_set_state_online(me);
+ native_pv_lock_init();
}

void __init native_smp_cpus_done(unsigned int max_cpus)