[PATCH 2/3] x86 perf: Protect LBR msrs accessing against potential #GP

From: kan . liang
Date: Thu Jul 31 2014 - 13:32:03 EST


From: Kan Liang <kan.liang@xxxxxxxxx>

Intel PT will take over LBR hardware. If RTIT_CTL.TraceEn=1, any attempt to
read or write the LBR or LER MSRs, including LBR_TOS, will result in a #GP.
Intel PT can be enabled/disabled at runtime by hardware/BIOS, so it's better
LBR MSRs can be protected at runtime.

The {rd,wr}msrl_goto can protect LBR accessing against the potential #GP.
Furthermore, it will not impact the "fast" path's performance.

Signed-off-by: Kan Liang <kan.liang@xxxxxxxxx>
---
arch/x86/kernel/cpu/perf_event_intel_lbr.c | 35 ++++++++++++++++++++++++------
1 file changed, 28 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 9dd2459..ec82e0e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -157,7 +157,11 @@ static void intel_pmu_lbr_reset_32(void)
int i;

for (i = 0; i < x86_pmu.lbr_nr; i++)
- wrmsrl(x86_pmu.lbr_from + i, 0);
+ wrmsrl_goto(x86_pmu.lbr_from + i, 0ULL, wrmsr_fail);
+ return;
+
+wrmsr_fail:
+ ; /* TODO: error path, but do nothing currently. */
}

static void intel_pmu_lbr_reset_64(void)
@@ -165,9 +169,13 @@ static void intel_pmu_lbr_reset_64(void)
int i;

for (i = 0; i < x86_pmu.lbr_nr; i++) {
- wrmsrl(x86_pmu.lbr_from + i, 0);
- wrmsrl(x86_pmu.lbr_to + i, 0);
+ wrmsrl_goto(x86_pmu.lbr_from + i, 0ULL, wrmsr_fail);
+ wrmsrl_goto(x86_pmu.lbr_to + i, 0ULL, wrmsr_fail);
}
+ return;
+
+wrmsr_fail:
+ ; /* TODO: error path, but do nothing currently. */
}

void intel_pmu_lbr_reset(void)
@@ -241,9 +249,13 @@ static inline u64 intel_pmu_lbr_tos(void)
{
u64 tos;

- rdmsrl(x86_pmu.lbr_tos, tos);
+ rdmsrl_goto(x86_pmu.lbr_tos, tos, rdmsr_fail);

return tos;
+
+rdmsr_fail:
+ /* TODO: error path, but do nothing currently. */
+ return 0;
}

static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
@@ -262,7 +274,8 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
u64 lbr;
} msr_lastbranch;

- rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
+ rdmsrl_goto(x86_pmu.lbr_from + lbr_idx,
+ msr_lastbranch.lbr, rdmsr_fail);

cpuc->lbr_entries[i].from = msr_lastbranch.from;
cpuc->lbr_entries[i].to = msr_lastbranch.to;
@@ -271,6 +284,10 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;
+ return;
+
+rdmsr_fail:
+ ; /* TODO: error path, but do nothing currently. */
}

/*
@@ -292,8 +309,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
int skip = 0;
int lbr_flags = lbr_desc[lbr_format];

- rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
- rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
+ rdmsrl_goto(x86_pmu.lbr_from + lbr_idx, from, rdmsr_fail);
+ rdmsrl_goto(x86_pmu.lbr_to + lbr_idx, to, rdmsr_fail);

if (lbr_flags & LBR_EIP_FLAGS) {
mis = !!(from & LBR_FROM_FLAG_MISPRED);
@@ -328,6 +345,10 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
out++;
}
cpuc->lbr_stack.nr = out;
+ return;
+
+rdmsr_fail:
+ ; /* TODO: error path, but do nothing currently. */
}

void intel_pmu_lbr_read(void)
--
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/