There's CAT in your subject, make up your minds already on what you want
to call this stuff.
On Fri, May 01, 2015 at 06:36:37PM -0700, Vikas Shivappa wrote:
+static void rdt_free_closid(unsigned int clos)
+{
+
superfluous whitespace
--
+ lockdep_assert_held(&rdt_group_mutex);
+
+ clear_bit(clos, rdtss_info.closmap);
+}
+static inline bool cbm_is_contiguous(unsigned long var)
+{
+ unsigned long first_bit, zero_bit;
+ unsigned long maxcbm = MAX_CBM_LENGTH;
flip these two lines
+
+ if (!var)
+ return false;
+
+ first_bit = find_next_bit(&var, maxcbm, 0);
What was wrong with find_first_bit() ?
+ zero_bit = find_next_zero_bit(&var, maxcbm, first_bit);
+
+ if (find_next_bit(&var, maxcbm, zero_bit) < maxcbm)
+ return false;
+
+ return true;
+}
+
+static int cat_cbm_read(struct seq_file *m, void *v)
+{
+ struct intel_rdt *ir = css_rdt(seq_css(m));
+
+ seq_printf(m, "%08lx\n", ccmap[ir->clos].cache_mask);
inconsistent spacing, you mostly have a whilespace before the return
statement, but here you have not.
+ return 0;
+}
+
+static int validate_cbm(struct intel_rdt *ir, unsigned long cbmvalue)
+{
+ struct intel_rdt *par, *c;
+ struct cgroup_subsys_state *css;
+ unsigned long *cbm_tmp;
No reason no to order these on line length is there?
+
+ if (!cbm_is_contiguous(cbmvalue)) {
+ pr_err("bitmask should have >= 1 bits and be contiguous\n");
+ return -EINVAL;
+ }
+
+ par = parent_rdt(ir);
+ cbm_tmp = &ccmap[par->clos].cache_mask;
+ if (!bitmap_subset(&cbmvalue, cbm_tmp, MAX_CBM_LENGTH))
+ return -EINVAL;
+
+ rcu_read_lock();
+ rdt_for_each_child(css, ir) {
+ c = css_rdt(css);
+ cbm_tmp = &ccmap[c->clos].cache_mask;
+ if (!bitmap_subset(cbm_tmp, &cbmvalue, MAX_CBM_LENGTH)) {
+ rcu_read_unlock();
+ pr_err("Children's mask not a subset\n");
+ return -EINVAL;
+ }
+ }
+
+ rcu_read_unlock();
Daft whitespace again.
+ return 0;
+}
+
+static bool cbm_search(unsigned long cbm, int *closid)
+{
+ int maxid = boot_cpu_data.x86_cat_closs;
+ unsigned int i;
+
+ for (i = 0; i < maxid; i++) {
+ if (bitmap_equal(&cbm, &ccmap[i].cache_mask, MAX_CBM_LENGTH)) {
+ *closid = i;
+ return true;
+ }
+ }
and again
+ return false;
+}
+
+static void cbmmap_dump(void)
+{
+ int i;
+
+ pr_debug("CBMMAP\n");
+ for (i = 0; i < boot_cpu_data.x86_cat_closs; i++)
+ pr_debug("cache_mask: 0x%x,clos_refcnt: %u\n",
+ (unsigned int)ccmap[i].cache_mask, ccmap[i].clos_refcnt);
This is missing {}
+}
+
+static void __cpu_cbm_update(void *info)
+{
+ unsigned int closid = *((unsigned int *)info);
+
+ wrmsrl(CBM_FROM_INDEX(closid), ccmap[closid].cache_mask);
+}
+static int cat_cbm_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 cbmvalue)
+{
+ struct intel_rdt *ir = css_rdt(css);
+ ssize_t err = 0;
+ unsigned long cache_mask, max_mask;
+ unsigned long *cbm_tmp;
+ unsigned int closid;
+ u32 max_cbm = boot_cpu_data.x86_cat_cbmlength;
That's just a right mess isn't it?
+
+ if (ir == &rdt_root_group)
+ return -EPERM;
+ bitmap_set(&max_mask, 0, max_cbm);
+
+ /*
+ * Need global mutex as cbm write may allocate a closid.
+ */
+ mutex_lock(&rdt_group_mutex);
+ bitmap_and(&cache_mask, (unsigned long *)&cbmvalue, &max_mask, max_cbm);
+ cbm_tmp = &ccmap[ir->clos].cache_mask;
+
+ if (bitmap_equal(&cache_mask, cbm_tmp, MAX_CBM_LENGTH))
+ goto out;
+
+ err = validate_cbm(ir, cache_mask);
+ if (err)
+ goto out;
+
+ /*
+ * At this point we are sure to change the cache_mask.Hence release the
+ * reference to the current CLOSid and try to get a reference for
+ * a different CLOSid.
+ */
+ __clos_put(ir->clos);
+
+ if (cbm_search(cache_mask, &closid)) {
+ ir->clos = closid;
+ __clos_get(closid);
+ } else {
+ err = rdt_alloc_closid(ir);
+ if (err)
+ goto out;
+
+ ccmap[ir->clos].cache_mask = cache_mask;
+ cbm_update_all(ir->clos);
+ }
+
+ cbmmap_dump();
+out:
+
Daft whitespace again.. Also inconsistent return paradigm, here you use
an out label, where in validate_cbm() you did rcu_read_unlock() and
return from the middle.
+ mutex_unlock(&rdt_group_mutex);
+ return err;
+}
+
+static inline bool rdt_update_cpumask(int cpu)
+{
+ int phys_id = topology_physical_package_id(cpu);
+ struct cpumask *mask = &rdt_cpumask;
+ int i;
+
+ for_each_cpu(i, mask) {
+ if (phys_id == topology_physical_package_id(i))
+ return false;
+ }
+
+ cpumask_set_cpu(cpu, mask);
More daft whitespace
+ return true;
+}
+
+/*
+ * rdt_cpu_start() - If a new package has come up, update all
+ * the Cache bitmasks on the package.
+ */
+static inline void rdt_cpu_start(int cpu)
+{
+ mutex_lock(&rdt_group_mutex);
+ if (rdt_update_cpumask(cpu))
+ cbm_update_msrs(cpu);
+ mutex_unlock(&rdt_group_mutex);
+}
+
+static void rdt_cpu_exit(unsigned int cpu)
+{
+ int phys_id = topology_physical_package_id(cpu);
+ int i;
+
+ mutex_lock(&rdt_group_mutex);
+ if (!cpumask_test_and_clear_cpu(cpu, &rdt_cpumask)) {
+ mutex_unlock(&rdt_group_mutex);
+ return;
And here we return from the middle again..
+ }
+
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+
+ if (phys_id == topology_physical_package_id(i)) {
+ cpumask_set_cpu(i, &rdt_cpumask);
+ break;
+ }
+ }
+ mutex_unlock(&rdt_group_mutex);
+}
/me tired and gives up..