[tip:x86/asm] x86/asm/tsc: Add rdtsc_ordered() and use it in trivial call sites

From: tip-bot for Andy Lutomirski
Date: Mon Jul 06 2015 - 11:45:18 EST


Commit-ID: 03b9730b769fc4d87e40f6104f4c5b2e43889f19
Gitweb: http://git.kernel.org/tip/03b9730b769fc4d87e40f6104f4c5b2e43889f19
Author: Andy Lutomirski <luto@xxxxxxxxxx>
AuthorDate: Thu, 25 Jun 2015 18:44:08 +0200
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Mon, 6 Jul 2015 15:23:29 +0200

x86/asm/tsc: Add rdtsc_ordered() and use it in trivial call sites

rdtsc_barrier(); rdtsc() is an unnecessary mouthful and requires
more thought than should be necessary. Add an rdtsc_ordered()
helper and replace the trivial call sites with it.

This should not change generated code. The duplication of the
fence asm is temporary.

Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Huang Rui <ray.huang@xxxxxxx>
Cc: John Stultz <john.stultz@xxxxxxxxxx>
Cc: Len Brown <lenb@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: kvm ML <kvm@xxxxxxxxxxxxxxx>
Link: http://lkml.kernel.org/r/dddbf98a2af53312e9aa73a5a2b1622fe5d6f52b.1434501121.git.luto@xxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/x86/entry/vdso/vclock_gettime.c | 16 ++--------------
arch/x86/include/asm/msr.h | 26 ++++++++++++++++++++++++++
arch/x86/kernel/trace_clock.c | 7 +------
arch/x86/kvm/x86.c | 16 ++--------------
arch/x86/lib/delay.c | 9 +++------
5 files changed, 34 insertions(+), 40 deletions(-)

diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 0340d93..ca94fa6 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -175,20 +175,8 @@ static notrace cycle_t vread_pvclock(int *mode)

notrace static cycle_t vread_tsc(void)
{
- cycle_t ret;
- u64 last;
-
- /*
- * Empirically, a fence (of type that depends on the CPU)
- * before rdtsc is enough to ensure that rdtsc is ordered
- * with respect to loads. The various CPU manuals are unclear
- * as to whether rdtsc can be reordered with later loads,
- * but no one has ever seen it happen.
- */
- rdtsc_barrier();
- ret = (cycle_t)rdtsc();
-
- last = gtod->cycle_last;
+ cycle_t ret = (cycle_t)rdtsc_ordered();
+ u64 last = gtod->cycle_last;

if (likely(ret >= last))
return ret;
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index ff0c120..02bdd6c 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -127,6 +127,32 @@ static __always_inline unsigned long long rdtsc(void)
return EAX_EDX_VAL(val, low, high);
}

+/**
+ * rdtsc_ordered() - read the current TSC in program order
+ *
+ * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
+ * It is ordered like a load to a global in-memory counter. It should
+ * be impossible to observe non-monotonic rdtsc_unordered() behavior
+ * across multiple CPUs as long as the TSC is synced.
+ */
+static __always_inline unsigned long long rdtsc_ordered(void)
+{
+ /*
+ * The RDTSC instruction is not ordered relative to memory
+ * access. The Intel SDM and the AMD APM are both vague on this
+ * point, but empirically an RDTSC instruction can be
+ * speculatively executed before prior loads. An RDTSC
+ * immediately after an appropriate barrier appears to be
+ * ordered as a normal load, that is, it provides the same
+ * ordering guarantees as reading from a global memory location
+ * that some other imaginary CPU is updating continuously with a
+ * time stamp.
+ */
+ alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
+ "lfence", X86_FEATURE_LFENCE_RDTSC);
+ return rdtsc();
+}
+
static inline unsigned long long native_read_pmc(int counter)
{
DECLARE_ARGS(val, low, high);
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
index 67efb8c..80bb24d 100644
--- a/arch/x86/kernel/trace_clock.c
+++ b/arch/x86/kernel/trace_clock.c
@@ -12,10 +12,5 @@
*/
u64 notrace trace_clock_x86_tsc(void)
{
- u64 ret;
-
- rdtsc_barrier();
- ret = rdtsc();
-
- return ret;
+ return rdtsc_ordered();
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index dfa9713..8d73ec8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1444,20 +1444,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);

static cycle_t read_tsc(void)
{
- cycle_t ret;
- u64 last;
-
- /*
- * Empirically, a fence (of type that depends on the CPU)
- * before rdtsc is enough to ensure that rdtsc is ordered
- * with respect to loads. The various CPU manuals are unclear
- * as to whether rdtsc can be reordered with later loads,
- * but no one has ever seen it happen.
- */
- rdtsc_barrier();
- ret = (cycle_t)rdtsc();
-
- last = pvclock_gtod_data.clock.cycle_last;
+ cycle_t ret = (cycle_t)rdtsc_ordered();
+ u64 last = pvclock_gtod_data.clock.cycle_last;

if (likely(ret >= last))
return ret;
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index f24bc59..4453d52 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -54,11 +54,9 @@ static void delay_tsc(unsigned long __loops)

preempt_disable();
cpu = smp_processor_id();
- rdtsc_barrier();
- bclock = rdtsc();
+ bclock = rdtsc_ordered();
for (;;) {
- rdtsc_barrier();
- now = rdtsc();
+ now = rdtsc_ordered();
if ((now - bclock) >= loops)
break;

@@ -79,8 +77,7 @@ static void delay_tsc(unsigned long __loops)
if (unlikely(cpu != smp_processor_id())) {
loops -= (now - bclock);
cpu = smp_processor_id();
- rdtsc_barrier();
- bclock = rdtsc();
+ bclock = rdtsc_ordered();
}
}
preempt_enable();
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/