[PATCH 23/24] x86/resctrl: Remove rdt_cdp_peer_get()
From: James Morse
Date: Fri Oct 30 2020 - 12:13:26 EST
Now that the configuration can be read from either resource, as they share
the ctrlval array, rdt_cdp_peer_get() is not needed to to map the resource
and search for the corresponding domain.
Replace it with a helper to return the 'other' CODE/DATA type, and use
the existing get-config helper.
Signed-off-by: James Morse <james.morse@xxxxxxx>
---
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 99 ++++----------------------
1 file changed, 14 insertions(+), 85 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 162e415d5d09..0d561679f7e8 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1094,82 +1094,17 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
return 0;
}
-/**
- * rdt_cdp_peer_get - Retrieve CDP peer if it exists
- * @r: RDT resource to which RDT domain @d belongs
- * @d: Cache instance for which a CDP peer is requested
- * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
- * Used to return the result.
- * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
- * Used to return the result.
- * @peer_type: The CDP configuration type of the peer resource.
- *
- * RDT resources are managed independently and by extension the RDT domains
- * (RDT resource instances) are managed independently also. The Code and
- * Data Prioritization (CDP) RDT resources, while managed independently,
- * could refer to the same underlying hardware. For example,
- * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
- *
- * When provided with an RDT resource @r and an instance of that RDT
- * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
- * resource and the exact instance that shares the same hardware.
- *
- * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
- * If a CDP peer was found, @r_cdp will point to the peer RDT resource
- * and @d_cdp will point to the peer RDT domain.
- */
-static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
- struct rdt_resource **r_cdp,
- struct rdt_domain **d_cdp,
- enum resctrl_conf_type *peer_type)
+static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
{
- struct rdt_resource *_r_cdp = NULL;
- struct rdt_domain *_d_cdp = NULL;
- int ret = 0;
-
- switch (r->rid) {
- case RDT_RESOURCE_L3DATA:
- _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE].resctrl;
- *peer_type = CDP_CODE;
- break;
- case RDT_RESOURCE_L3CODE:
- _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA].resctrl;
- *peer_type = CDP_DATA;
- break;
- case RDT_RESOURCE_L2DATA:
- _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE].resctrl;
- *peer_type = CDP_CODE;
- break;
- case RDT_RESOURCE_L2CODE:
- _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA].resctrl;
- *peer_type = CDP_DATA;
- break;
+ switch (my_type) {
+ case CDP_CODE:
+ return CDP_DATA;
+ case CDP_DATA:
+ return CDP_CODE;
default:
- ret = -ENOENT;
- goto out;
- }
-
- /*
- * When a new CPU comes online and CDP is enabled then the new
- * RDT domains (if any) associated with both CDP RDT resources
- * are added in the same CPU online routine while the
- * rdtgroup_mutex is held. It should thus not happen for one
- * RDT domain to exist and be associated with its RDT CDP
- * resource but there is no RDT domain associated with the
- * peer RDT CDP resource. Hence the WARN.
- */
- _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
- if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
- _r_cdp = NULL;
- _d_cdp = NULL;
- ret = -EINVAL;
+ case CDP_BOTH:
+ return CDP_BOTH;
}
-
-out:
- *r_cdp = _r_cdp;
- *d_cdp = _d_cdp;
-
- return ret;
}
/**
@@ -1250,19 +1185,16 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
unsigned long cbm, int closid, bool exclusive)
{
- enum resctrl_conf_type peer_type;
+ enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
struct rdt_resource *r = s->res;
- struct rdt_resource *r_cdp;
- struct rdt_domain *d_cdp;
if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
exclusive))
return true;
- if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp, &peer_type) < 0)
+ if (!resctrl_arch_get_cdp_enabled(r->rid))
return false;
-
- return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, peer_type, exclusive);
+ return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
}
/**
@@ -2766,11 +2698,9 @@ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
u32 closid)
{
+ enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
enum resctrl_conf_type t = s-> conf_type;
- struct rdt_resource *r_cdp = NULL;
struct resctrl_staged_config *cfg;
- enum resctrl_conf_type peer_type;
- struct rdt_domain *d_cdp = NULL;
struct rdt_resource *r = s->res;
u32 used_b = 0, unused_b = 0;
u32 peer_ctl, ctrl_val;
@@ -2778,7 +2708,6 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
enum rdtgrp_mode mode;
int i;
- rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp, &peer_type);
cfg = &d->staged_config[t];
cfg->have_new_ctrl = false;
cfg->new_ctrl = r->cache.shareable_bits;
@@ -2798,8 +2727,8 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
* usage to ensure there is no overlap
* with an exclusive group.
*/
- if (d_cdp)
- resctrl_arch_get_config(r_cdp, d_cdp, i, peer_type, &peer_ctl);
+ if (resctrl_arch_get_cdp_enabled(r->rid))
+ resctrl_arch_get_config(r, d, i, peer_type, &peer_ctl);
else
peer_ctl = 0;
resctrl_arch_get_config(r, d, i, s->conf_type, &ctrl_val);
--
2.28.0