Re: [PATCH 30/32] x86/intel_rdt_rdtgroup.c: Process schemas input from rscctrl interface

From: David Carrillo-Cisneros
Date: Wed Jul 13 2016 - 20:41:47 EST


> +static int get_res_type(char **res, enum resource_type *res_type)
> +{
> + char *tok;
> +
> + tok = strsep(res, ":");
> + if (tok == NULL)
> + return -EINVAL;
> +
> + if (!strcmp(tok, "L3")) {

Maybe use strstrip to allow a more readable input ? i.e. "L3 : <schema> "

> + *res_type = RESOURCE_L3;
> + return 0;
> + }
> +
> + return -EINVAL;
> +}
> +
> +static int divide_resources(char *buf, char *resources[RESOURCE_NUM])
> +{
> + char *tok;
> + unsigned int resource_num = 0;
> + int ret = 0;
> + char *res;
> + char *res_block;
> + size_t size;
> + enum resource_type res_type;
> +
> + size = strlen(buf) + 1;
> + res = kzalloc(size, GFP_KERNEL);
> + if (!res) {
> + ret = -ENOSPC;

-ENOMEM?

> +
> + res_block = res;
> + ret = get_res_type(&res_block, &res_type);
> + if (ret) {
> + pr_info("Unknown resource type!");
> + goto out;
> + }

does this work if res_block doesn't have ":"? don't you need to check res_block?

> +static int get_cache_schema(char *buf, struct cache_resource *l, int level,
> + struct rdtgroup *rdtgrp)
> +{
> + char *tok, *tok_cache_id;
> + int ret;
> + int domain_num;
> + int input_domain_num;
> + int len;
> + unsigned int input_cache_id;
> + unsigned int cid;
> + unsigned int leaf;
> +
> + if (!cat_enabled(level) && strcmp(buf, ";")) {
> + pr_info("Disabled resource should have empty schema\n");
> + return -EINVAL;
> + }
> +
> + len = strlen(buf);
> + /*
> + * Translate cache id based cbm from one line string with format
> + * "<cache prefix>:<cache id0>=xxxx;<cache id1>=xxxx;..." for
> + * disabled cdp.
> + * Or
> + * "<cache prefix>:<cache id0>=xxxxx,xxxxx;<cache id1>=xxxxx,xxxxx;..."
> + * for enabled cdp.
> + */
> + input_domain_num = 0;
> + while ((tok = strsep(&buf, ";")) != NULL) {
> + tok_cache_id = strsep(&tok, "=");
> + if (tok_cache_id == NULL)
> + goto cache_id_err;

what if no "=" ? , also would be nice to allow spaces around "=" .

> +
> + ret = kstrtouint(tok_cache_id, 16, &input_cache_id);
> + if (ret)
> + goto cache_id_err;
> +
> + leaf = level_to_leaf(level);

why is this in the loop?

> + cid = cache_domains[leaf].shared_cache_id[input_domain_num];
> + if (input_cache_id != cid)
> + goto cache_id_err;

so schemata must be present for all cache_id's and sorted in
increasing order of cache_id? what's the point of having the cache_id#
then?

> +
> +/*
> + * Check if the reference counts are all ones in rdtgrp's domain.
> + */
> +static bool one_refcnt(struct rdtgroup *rdtgrp, int domain)
> +{
> + int refcnt;
> + int closid;
> +
> + closid = rdtgrp->resource.closid[domain];
> + if (cat_l3_enabled) {

if cat_l3_enabled == false, then reference counts are always one?

> + * Go through all shared domains. Check if there is an existing closid
> + * in all rdtgroups that matches l3 cbms in the shared
> + * domain. If find one, reuse the closid. Otherwise, allocate a new one.
> + */
> +static int get_rdtgroup_resources(struct resources *resources_set,
> + struct rdtgroup *rdtgrp)
> +{
> + struct cache_resource *l3;
> + bool l3_cbm_found;
> + struct list_head *l;
> + struct rdtgroup *r;
> + u64 cbm;
> + int rdt_closid[MAX_CACHE_DOMAINS];
> + int rdt_closid_type[MAX_CACHE_DOMAINS];
> + int domain;
> + int closid;
> + int ret;
> +
> + l3 = resources_set->l3;

l3 is NULL if cat_l3_enabled == false but it seems like it may be used
later even though.

> + memcpy(rdt_closid, rdtgrp->resource.closid,
> + shared_domain_num * sizeof(int));
> + for (domain = 0; domain < shared_domain_num; domain++) {
> + if (rdtgrp->resource.valid) {
> + /*
> + * If current rdtgrp is the only user of cbms in
> + * this domain, will replace the cbms with the input
> + * cbms and reuse its own closid.
> + */
> + if (one_refcnt(rdtgrp, domain)) {
> + closid = rdtgrp->resource.closid[domain];
> + rdt_closid[domain] = closid;
> + rdt_closid_type[domain] = REUSED_OWN_CLOSID;
> + continue;
> + }
> +
> + l3_cbm_found = true;
> +
> + if (cat_l3_enabled)
> + l3_cbm_found = cbm_found(l3, rdtgrp, domain,
> + CACHE_LEVEL3);
> +
> + /*
> + * If the cbms in this shared domain are already
> + * existing in current rdtgrp, record the closid
> + * and its type.
> + */
> + if (l3_cbm_found) {
> + closid = rdtgrp->resource.closid[domain];
> + rdt_closid[domain] = closid;
> + rdt_closid_type[domain] = CURRENT_CLOSID;

a new l3 resource will be created if cat_l3_enabled is false.


> +static void init_cache_resource(struct cache_resource *l)
> +{
> + l->cbm = NULL;
> + l->cbm2 = NULL;

is cbm2 the data bitmask for when CDP is enabled? if so, a more
descriptive name may help.

> + l->closid = NULL;
> + l->refcnt = NULL;
> +}
> +
> +static void free_cache_resource(struct cache_resource *l)
> +{
> + kfree(l->cbm);
> + kfree(l->cbm2);
> + kfree(l->closid);
> + kfree(l->refcnt);

this function is used to clean up alloc_cache_resource in the error
path of get_resources where it's not necessarily true that all of l's
members were allocated.