[PATCH 5/6] x86/intel_rdt: Class of service management for code data prioritization
From: Vikas Shivappa
Date: Sun Aug 23 2015 - 18:47:22 EST
Add support to manage CLOSid(class of service id) for code data
prioritization(CDP). Includes allocating, freeing closid and closid_get
and closid_put.
During mount if the mode is changed between cdp and cache allocation
only, all the CLOSids are freed. When a new cgroup is created it
inherits its parents CLOSid in CDP just like in Cache allocation.
---
arch/x86/kernel/cpu/intel_rdt.c | 127 +++++++++++++++++++++++++---------------
1 file changed, 81 insertions(+), 46 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 155ac51..285db1e 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -166,6 +166,85 @@ static void closcbm_map_dump(void)
}
}
+static void closid_map_init(void)
+{
+ u32 maxid = boot_cpu_data.x86_cache_max_closid;
+
+ bitmap_zero(rdtss_info.closmap, maxid);
+}
+
+static inline void closid_get(u32 closid)
+{
+ lockdep_assert_held(&rdt_group_mutex);
+
+ if (!rdtss_info.cdp_enable)
+ cat_cm_map[closid].clos_refcnt++;
+ else
+ cdp_cm_map[closid].clos_refcnt++;
+}
+
+static int closid_alloc(struct intel_rdt *ir)
+{
+ u32 maxid;
+ u32 id;
+
+ lockdep_assert_held(&rdt_group_mutex);
+
+ maxid = boot_cpu_data.x86_cache_max_closid;
+ id = find_next_zero_bit(rdtss_info.closmap, maxid, 0);
+ if (id == maxid)
+ return -ENOSPC;
+
+ set_bit(id, rdtss_info.closmap);
+ closid_get(id);
+ ir->closid = id;
+
+ return 0;
+}
+
+static inline void closid_free(u32 closid)
+{
+ clear_bit(closid, rdtss_info.closmap);
+ if (!rdtss_info.cdp_enable) {
+ cat_cm_map[closid].cache_mask = 0;
+ } else {
+ cdp_cm_map[closid].dcache_mask = 0;
+ cdp_cm_map[closid].icache_mask = 0;
+ }
+}
+
+static inline void closid_cat_put(u32 closid)
+{
+ struct cat_clos_mask_map *ccm = &cat_cm_map[closid];
+
+ lockdep_assert_held(&rdt_group_mutex);
+ if (WARN_ON(!ccm->clos_refcnt))
+ return;
+
+ if (!--ccm->clos_refcnt)
+ closid_free(closid);
+}
+
+static inline void closid_cdp_put(u32 closid)
+{
+ struct cdp_clos_mask_map *ccm = &cdp_cm_map[closid];
+
+ lockdep_assert_held(&rdt_group_mutex);
+ if (WARN_ON(!ccm->clos_refcnt))
+ return;
+
+ if (!--ccm->clos_refcnt)
+ closid_free(closid);
+}
+
+static inline void closid_put(u32 closid)
+{
+ if (!rdtss_info.cdp_enable)
+ closid_cat_put(closid);
+ else
+ closid_cdp_put(closid);
+}
+
static void cdp_cm_map_reset(int maxid, unsigned long max_cbm_mask)
{
size_t sizeb;
@@ -266,6 +345,8 @@ static void rdt_css_mount(void* info)
else
cdp_disable();
+ closid_map_init();
+
rdtss_info.cdp_enable = enable_cdp;
mutex_unlock(&rdt_group_mutex);
}
@@ -288,52 +369,6 @@ static inline void rdt_cdp_init(int cdp_maxid, unsigned long max_cbm_mask)
rdtss_info.cdp_supported = true;
}
-static inline void closid_get(u32 closid)
-{
- struct cat_clos_mask_map *ccm = &cat_cm_map[closid];
-
- lockdep_assert_held(&rdt_group_mutex);
-
- ccm->clos_refcnt++;
-}
-
-static int closid_alloc(struct intel_rdt *ir)
-{
- u32 maxid;
- u32 id;
-
- lockdep_assert_held(&rdt_group_mutex);
-
- maxid = boot_cpu_data.x86_cache_max_closid;
- id = find_next_zero_bit(rdtss_info.closmap, maxid, 0);
- if (id == maxid)
- return -ENOSPC;
-
- set_bit(id, rdtss_info.closmap);
- closid_get(id);
- ir->closid = id;
-
- return 0;
-}
-
-static inline void closid_free(u32 closid)
-{
- clear_bit(closid, rdtss_info.closmap);
- cat_cm_map[closid].cache_mask = 0;
-}
-
-static inline void closid_put(u32 closid)
-{
- struct cat_clos_mask_map *ccm = &cat_cm_map[closid];
-
- lockdep_assert_held(&rdt_group_mutex);
- if (WARN_ON(!ccm->clos_refcnt))
- return;
-
- if (!--ccm->clos_refcnt)
- closid_free(closid);
-}
-
void __intel_rdt_sched_in(void)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/