[PATCH v2 08/33] x86/intel_rdt: Add Class of service management
From: Fenghua Yu
Date: Thu Sep 08 2016 - 03:03:40 EST
From: Vikas Shivappa <vikas.shivappa@xxxxxxxxxxxxxxx>
Adds some data-structures and APIs to support Class of service
management(closid). There is a new clos_cbm table which keeps a 1:1
mapping between closid and capacity bit mask (cbm)
and a count of usage of closid. Each task would be associated with a
Closid at a time and this patch adds a new field closid to task_struct
to keep track of the same.
Signed-off-by: Vikas Shivappa <vikas.shivappa@xxxxxxxxxxxxxxx>
Signed-off-by: Fenghua Yu <fenghua.yu@xxxxxxxxx>
Reviewed-by: Tony Luck <tony.luck@xxxxxxxxx>
---
arch/x86/include/asm/intel_rdt.h | 12 ++++++
arch/x86/kernel/cpu/intel_rdt.c | 81 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 91 insertions(+), 2 deletions(-)
create mode 100644 arch/x86/include/asm/intel_rdt.h
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
new file mode 100644
index 0000000..68bab26
--- /dev/null
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -0,0 +1,12 @@
+#ifndef _RDT_H_
+#define _RDT_H_
+
+#ifdef CONFIG_INTEL_RDT
+
+struct clos_cbm_table {
+ unsigned long cbm;
+ unsigned int clos_refcnt;
+};
+
+#endif
+#endif
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index fcd0642..b25940a 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -21,17 +21,94 @@
*/
#include <linux/slab.h>
#include <linux/err.h>
+#include <asm/intel_rdt.h>
+
+/*
+ * cctable maintains 1:1 mapping between CLOSid and cache bitmask.
+ */
+static struct clos_cbm_table *cctable;
+/*
+ * closid availability bit map.
+ */
+unsigned long *closmap;
+static DEFINE_MUTEX(rdtgroup_mutex);
+
+static inline void closid_get(u32 closid)
+{
+ struct clos_cbm_table *cct = &cctable[closid];
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ cct->clos_refcnt++;
+}
+
+static int closid_alloc(u32 *closid)
+{
+ u32 maxid;
+ u32 id;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ maxid = boot_cpu_data.x86_cache_max_closid;
+ id = find_first_zero_bit(closmap, maxid);
+ if (id == maxid)
+ return -ENOSPC;
+
+ set_bit(id, closmap);
+ closid_get(id);
+ *closid = id;
+
+ return 0;
+}
+
+static inline void closid_free(u32 closid)
+{
+ clear_bit(closid, closmap);
+ cctable[closid].cbm = 0;
+}
+
+static void closid_put(u32 closid)
+{
+ struct clos_cbm_table *cct = &cctable[closid];
+
+ lockdep_assert_held(&rdtgroup_mutex);
+ if (WARN_ON(!cct->clos_refcnt))
+ return;
+
+ if (!--cct->clos_refcnt)
+ closid_free(closid);
+}
static int __init intel_rdt_late_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
+ u32 maxid;
+ int err = 0, size;
if (!cpu_has(c, X86_FEATURE_CAT_L3))
return -ENODEV;
- pr_info("Intel cache allocation detected\n");
+ maxid = c->x86_cache_max_closid;
- return 0;
+ size = maxid * sizeof(struct clos_cbm_table);
+ cctable = kzalloc(size, GFP_KERNEL);
+ if (!cctable) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ size = BITS_TO_LONGS(maxid) * sizeof(long);
+ closmap = kzalloc(size, GFP_KERNEL);
+ if (!closmap) {
+ kfree(cctable);
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ pr_info("Intel cache allocation enabled\n");
+out_err:
+
+ return err;
}
late_initcall(intel_rdt_late_init);
--
2.5.0