[PATCH 06/32] x86/intel,cqm: add CONFIG_INTEL_RDT configuration flag and refactor PQR

From: David Carrillo-Cisneros
Date: Fri Apr 29 2016 - 00:46:31 EST


Add Intel's PQR as its own build target, remove its build dependency
on CQM, and add CONFIG_INTEL_RDT as a configuration flag to build PQR
and all of its related drivers (currently CQM, future: MBM, CAT, CDP).

Reviewed-by: Stephane Eranian <eranian@xxxxxxxxxx>
Signed-off-by: David Carrillo-Cisneros <davidcc@xxxxxxxxxx>
---
arch/x86/Kconfig | 6 ++++++
arch/x86/events/intel/Makefile | 3 ++-
arch/x86/events/intel/cqm.c | 27 +--------------------------
arch/x86/include/asm/pqr_common.h | 31 +++++++++++++++++++++++++++++++
arch/x86/kernel/cpu/Makefile | 4 ++++
arch/x86/kernel/cpu/pqr_common.c | 9 +++++++++
include/linux/perf_event.h | 2 ++
7 files changed, 55 insertions(+), 27 deletions(-)
create mode 100644 arch/x86/include/asm/pqr_common.h
create mode 100644 arch/x86/kernel/cpu/pqr_common.c

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a494fa3..7b81e6a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -160,6 +160,12 @@ config X86
select ARCH_USES_HIGH_VMA_FLAGS if X86_INTEL_MEMORY_PROTECTION_KEYS
select ARCH_HAS_PKEYS if X86_INTEL_MEMORY_PROTECTION_KEYS

+config INTEL_RDT
+ def_bool y
+ depends on PERF_EVENTS && CPU_SUP_INTEL
+ ---help---
+ Enable Resource Director Technology for Intel Xeon Microprocessors.
+
config INSTRUCTION_DECODER
def_bool y
depends on KPROBES || PERF_EVENTS || UPROBES
diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile
index 3660b2c..7e610bf 100644
--- a/arch/x86/events/intel/Makefile
+++ b/arch/x86/events/intel/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
+obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
@@ -7,3 +7,4 @@ obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
intel-cstate-objs := cstate.o
+obj-$(CONFIG_INTEL_RDT) += cqm.o
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index afd60dd..8457dd0 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -7,40 +7,15 @@
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
+#include <asm/pqr_common.h>
#include "../perf_event.h"

-#define MSR_IA32_PQR_ASSOC 0x0c8f
#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d

static u32 cqm_max_rmid = -1;
static unsigned int cqm_l3_scale; /* supposedly cacheline size */

-/**
- * struct intel_pqr_state - State cache for the PQR MSR
- * @rmid: The cached Resource Monitoring ID
- * @closid: The cached Class Of Service ID
- *
- * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
- * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
- * contains both parts, so we need to cache them.
- *
- * The cache also helps to avoid pointless updates if the value does
- * not change.
- */
-struct intel_pqr_state {
- u32 rmid;
- u32 closid;
-};
-
-/*
- * The cached intel_pqr_state is strictly per CPU and can never be
- * updated from a remote CPU. Both functions which modify the state
- * (intel_cqm_event_start and intel_cqm_event_stop) are called with
- * interrupts disabled, which is sufficient for the protection.
- */
-static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
-
/*
* Updates caller cpu's cache.
*/
diff --git a/arch/x86/include/asm/pqr_common.h b/arch/x86/include/asm/pqr_common.h
new file mode 100644
index 0000000..0c2001b
--- /dev/null
+++ b/arch/x86/include/asm/pqr_common.h
@@ -0,0 +1,31 @@
+#ifndef _X86_PQR_COMMON_H_
+#define _X86_PQR_COMMON_H_
+
+#if defined(CONFIG_INTEL_RDT)
+
+#include <linux/types.h>
+#include <asm/percpu.h>
+
+#define MSR_IA32_PQR_ASSOC 0x0c8f
+
+/**
+ * struct intel_pqr_state - State cache for the PQR MSR
+ * @rmid: The cached Resource Monitoring ID
+ * @closid: The cached Class Of Service ID
+ *
+ * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
+ * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
+ * contains both parts, so we need to cache them.
+ *
+ * The cache also helps to avoid pointless updates if the value does
+ * not change.
+ */
+struct intel_pqr_state {
+ u32 rmid;
+ u32 closid;
+};
+
+DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+
+#endif
+#endif
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 4a8697f..87e6279 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -34,6 +34,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o

+ifdef CONFIG_CPU_SUP_INTEL
+obj-$(CONFIG_INTEL_RDT) += pqr_common.o
+endif
+
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
diff --git a/arch/x86/kernel/cpu/pqr_common.c b/arch/x86/kernel/cpu/pqr_common.c
new file mode 100644
index 0000000..9eff5d9
--- /dev/null
+++ b/arch/x86/kernel/cpu/pqr_common.c
@@ -0,0 +1,9 @@
+#include <asm/pqr_common.h>
+
+/*
+ * The cached intel_pqr_state is strictly per CPU and can never be
+ * updated from a remote CPU. Both functions which modify the state
+ * (intel_cqm_event_start and intel_cqm_event_stop) are called with
+ * interrupts disabled, which is sufficient for the protection.
+ */
+DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8bb1532..3a847bf 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -118,6 +118,7 @@ struct hw_perf_event {
/* for tp_event->class */
struct list_head tp_list;
};
+#ifdef CONFIG_INTEL_RDT
struct { /* intel_cqm */
int cqm_state;
u32 cqm_rmid;
@@ -125,6 +126,7 @@ struct hw_perf_event {
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
};
+#endif
struct { /* itrace */
int itrace_started;
};
--
2.8.0.rc3.226.g39d4020