Re: [PATCH] locking/osq: Drop the overload of osq lock

From: xinhui
Date: Mon Jun 27 2016 - 03:37:15 EST




On 2016å06æ27æ 14:45, Boqun Feng wrote:
On Sun, Jun 26, 2016 at 02:59:26PM +0800, Boqun Feng wrote:
[snip]

This should be:

extern struct vcpu_preempt_ops vcpu_preempt_ops;

And I tested this one along with modified version of Xinhui's patch.

The test showed that even in a not over-committed system, the overhead
of mutex_lock() could go down from 2.58%(w/o patches) to 0.87%(w/
patches).


Clearly, I made several more mistakes in the patch, and the vcpu
preemption could never been activated, so please ignore this bench
result.

Here is a new version of vcpu preempt. Thoughts?

Regards,
Boqun

---------------->8
virt: Introduce vcpu preemption detection functions

Lock holder preemption is an serious problem in virtualized environment
for locking. Different archs and hypervisors employ different ways to
try to solve the problem in different locking mechanisms. And recently
Pan Xinhui found a significant performance drop in osq_lock(), which
could be solved by providing a mechanism to detect the preemption of
vcpu.

Therefore this patch introduces several vcpu preemption detection
primitives, which allows locking primtives to save the time of
unnecesarry spinning. These primitives act as an abstract layer between
locking functions and arch or hypervisor related code.

There are two sets of primitives:

1. vcpu_preempt_count() and vcpu_has_preempted(), they must be used
pairwisely in a same preempt disable critical section. And they
could detect whether a vcpu preemption happens between them.

2. vcpu_is_preempted(), used to check whether other cpu's vcpu is
preempted.

This patch also implements those primitives on pseries and wire them up.


Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx>
---
arch/powerpc/platforms/pseries/Kconfig | 1 +
arch/powerpc/platforms/pseries/setup.c | 28 ++++++++++++
include/linux/vcpu_preempt.h | 82 ++++++++++++++++++++++++++++++++++
kernel/Kconfig.preempt | 5 ++-
4 files changed, 115 insertions(+), 1 deletion(-)
create mode 100644 include/linux/vcpu_preempt.h

diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index bec90fb30425..7d24c3e48878 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -21,6 +21,7 @@ config PPC_PSERIES
select HOTPLUG_CPU if SMP
select ARCH_RANDOM
select PPC_DOORBELL
+ select HAS_VCPU_PREEMPTION_DETECTION
default y

config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 9883bc7ea007..5d4aed54e039 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -42,6 +42,7 @@
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/kexec.h>
+#include <linux/vcpu_preempt.h>

#include <asm/mmu.h>
#include <asm/processor.h>
@@ -501,6 +502,31 @@ static void __init find_and_init_phbs(void)
of_pci_check_probe_only();
}

+struct vcpu_preempt_ops vcpu_preempt_ops = DEFAULT_VCPU_PREEMPT_OPS;
+EXPORT_SYMBOL(vcpu_preempt_ops);
+
+static long pseries_vcpu_preempt_count(void)
+{
+ return be32_to_cpu(get_lppaca()->yield_count);
+}
+
+static bool pseries_vcpu_is_preempted(int cpu)
+{
+ return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
+}
+
+static bool pseries_vcpu_has_preempted(long vpc)
+{
+ return pseries_vcpu_preempt_count() != vpc;
+}
+
+static void __init pseries_setup_vcpu_preempt_ops(void)
+{
+ vcpu_preempt_ops.preempt_count = pseries_vcpu_preempt_count;
+ vcpu_preempt_ops.is_preempted = pseries_vcpu_is_preempted;
+ vcpu_preempt_ops.has_preempted = pseries_vcpu_has_preempted;
+}
+
static void __init pSeries_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -549,6 +575,8 @@ static void __init pSeries_setup_arch(void)
"%ld\n", rc);
}
}
+
+ pseries_setup_vcpu_preempt_ops();
}

static int __init pSeries_init_panel(void)
diff --git a/include/linux/vcpu_preempt.h b/include/linux/vcpu_preempt.h
new file mode 100644
index 000000000000..e76e054a7917
--- /dev/null
+++ b/include/linux/vcpu_preempt.h
@@ -0,0 +1,82 @@
+/*
+ * Primitives for checking the vcpu preemption from the guest.
+ */
+
+static long __vcpu_preempt_count(void)
+{
+ return 0;
+}
+
+static bool __vcpu_has_preempted(long vpc)
+{
+ return false;
+}
+
+static bool __vcpu_is_preempted(int cpu)
+{
+ return false;
+}
+
+struct vcpu_preempt_ops {
+ /*
+ * Get the current vcpu's "preempt count", which is going to use for
+ * checking whether the current vcpu has ever been preempted
+ */
+ long (*preempt_count)(void);
+
+ /*
+ * Return whether a vcpu is preempted
+ */
+ bool (*is_preempted)(int cpu);
+
+ /*
+ * Given a "vcpu preempt count", Return whether a vcpu preemption ever
+ * happened after the .preempt_count() was called.
+ */
+ bool (*has_preempted)(long vpc);
+};
+
+extern struct vcpu_preempt_ops vcpu_preempt_ops;
+
+/* Default boilerplate */
+#define DEFAULT_VCPU_PREEMPT_OPS \
+ { \
+ .preempt_count = __vcpu_preempt_count, \
+ .is_preempted = __vcpu_is_preempted, \
+ .has_preempted = __vcpu_has_preempted \
+ }
+
+#ifdef CONFIG_HAS_VCPU_PREEMPTION_DETECTION
+/*
+ * vcpu_preempt_count: Get the current cpu's "vcpu preempt count"(vpc).
+ *
+ * The vpc is used for checking whether the current vcpu has ever been
+ * preempted via vcpu_has_preempted().
+ *
+ * This function and vcpu_has_preepmted() should be called in the same
+ * preemption disabled critical section.
+ */
+#define vcpu_preempt_count() vcpu_preempt_ops.preempt_count()
+
+/*
+ * vcpu_is_preempted: Check whether @cpu's vcpu is preempted.
+ */
+#define vcpu_is_preempted(cpu) vcpu_preempt_ops.is_preempted(cpu)
+
+/*
+ * vcpu_has_preepmted: Check whether the current cpu's vcpu has ever been
+ * preempted.
+ *
+ * The checked duration is between the vcpu_preempt_count() which returns @vpc
+ * is called and this function called.
+ *
+ * This function and corresponding vcpu_preempt_count() should be in the same
+ * preemption disabled cirtial section.
+ */
+#define vcpu_has_preempted(vpc) vcpu_preempt_ops.has_preempted(vpc)
+
+#else /* CONFIG_HAS_VCPU_PREEMPTION_DETECTION */
+#define vcpu_preempt_count() __vcpu_preempt_count()
+#define vcpu_is_preempted(cpu) __vcpu_is_preempted(cpu)
+#define vcpu_has_preempted(vpc) __vcpu_has_preempted(vpc)
+#endif /* CONFIG_HAS_VCPU_PREEPMTION_DETECTION */

AND if possible, We can hide the detail about vcpu preempted check. IOW, caller better do not know vcpu_preempt_count() .

currently, users have to code like below.

long vpc = vcpu_preempt_count();
while()
if (vcpu_has_preempted(vpc))
break;
the vpc is long, this the implementation detail.



SO we can introduce :
vcpu_preempt_critical_start(void);
{
/* record the yield count*/
__this_cpu_write(vpc, vcpu_preempt_count());
}
vcpu_preempt_critical_end(void)
{
// do nothing.
}

vcpu_has_preempted(void)
{
vcpu_preempt_ops.has_preempted(__this_cpu_read(vpc));
}


then we can code like below

vcpu_preempt_critical_start()
while()
if (vcpu_has_preempted())
break;
vcpu_preempt_critical_end()

looks a little better?

thanks
xinhui

diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 3f9c97419f02..cb88bc3a2fc6 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -55,4 +55,7 @@ config PREEMPT
endchoice

config PREEMPT_COUNT
- bool
\ No newline at end of file
+ bool
+
+config HAS_VCPU_PREEMPTION_DETECTION
+ bool