On Mon, 5 Jun 2017, Anju T Sudhakar wrote:
+static void cleanup_all_core_imc_memory(struct imc_pmu *pmu_ptr)That's pointless.
+{
+ struct imc_mem_info *ptr = pmu_ptr->mem_info;
+
+ if (!ptr)
+ return;
+ for (; ptr; ptr++) {for (ptr = pmu_ptr->mem_info; ptr; ptr++) {
will do the right thing.
+ if (ptr->vbase[0] != 0)and kfree can be called with a NULL pointer.
+ free_pages(ptr->vbase[0], 0);
+ }
+ kfree(pmu_ptr->mem_info);Interesting. You copied the other function and then implemented it
+}
+
+/* Enabling of Core Engine needs a scom operation */
+static void core_imc_control_enable(int *cpu_opal_rc)
+{
+ int rc;
+
+ rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE);
+ if (rc)
+ cpu_opal_rc[smp_processor_id()] = 1;
+}
+
+/*
+ * Disabling of IMC Core Engine needs a scom operation
+ */
+static void core_imc_control_disable(int *cpu_opal_rc)
+{
+ int rc;
+
+ rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE);
+ if (rc)
+ cpu_opal_rc[smp_processor_id()] = 1;
+}
+
+int core_imc_control(int operation)
+{
+ int cpu, *cpus_opal_rc;
+
+ /* Memory for OPAL call return value. */
+ cpus_opal_rc = kzalloc((sizeof(int) * nr_cpu_ids), GFP_KERNEL);
+ if (!cpus_opal_rc)
+ return -ENOMEM;
+
+ /*
+ * Enable or disable the core IMC PMU on each core using the
+ * core_imc_cpumask.
+ */
+ switch (operation) {
+
+ case IMC_COUNTER_DISABLE:
+ on_each_cpu_mask(&core_imc_cpumask,
+ (smp_call_func_t)core_imc_control_disable,
+ cpus_opal_rc, 1);
+ break;
+ case IMC_COUNTER_ENABLE:
+ on_each_cpu_mask(&core_imc_cpumask,
+ (smp_call_func_t)core_imc_control_enable,
+ cpus_opal_rc, 1);
+ break;
+ default:
+ goto fail;
+ }
+ /* Check return value array for any OPAL call failure */
+ for_each_cpu(cpu, &core_imc_cpumask) {
+ if (cpus_opal_rc[cpu])
+ goto fail;
+ }
+ return 0;
+fail:
+ if (cpus_opal_rc)
+ kfree(cpus_opal_rc);
+ return -EINVAL;
+}
differently.
But, what's worse is that this is just duplicated code for no value. Both
the nest and core control functions call
opal_imc_counters_xxxx(WHICH_COUNTER);
So the obvious thing to do is:
static struct cpumask imc_result_mask;
static DEFINE_MUTEX(imc_control_mutex);
static void opal_imc_XXX(void *which)
{
if (opal_imc_counters_XXX((unsigned long) which))
cpumask_set_cpu(smp_processor_id(), &imc_result_mask);
}
static int imc_control(unsigned long which, bool start)
{
smp_call_func_t *fn;
int res;
mutex_lock(&imc_control_mutex);
memset(&imc_result_mask, 0, sizeof(imc_result_mask));
switch (which) {
case OPAL_IMC_COUNTERS_CORE:
case OPAL_IMC_COUNTERS_NEST:
break;
default:
res = -EINVAL;
goto out;
}
fn = start ? opal_imc_start : opal_imc_stop;
on_each_cpu_mask(&core_imc_cpumask, fn, (void *) which, 1);
res = cpumask_empty(&nest_imc_result) ? 0 : -ENODEV;
out:
mutex_unlock(&imc_control_mutex);
return res;
That would allow sharing of too much code, right?
+/*What's that NULL initialization for?
+ * core_imc_mem_init : Initializes memory for the current core.
+ *
+ * Uses alloc_pages_exact_nid() and uses the returned address as an argument to
+ * an opal call to configure the pdbar. The address sent as an argument is
+ * converted to physical address before the opal call is made. This is the
+ * base address at which the core imc counters are populated.
+ */
+static int __meminit core_imc_mem_init(int cpu, int size)
+{
+ int phys_id, rc = 0, core_id = (cpu / threads_per_core);
+ struct imc_mem_info *mem_info = NULL;
+That allocation is guaranteed not to fail?
+ phys_id = topology_physical_package_id(cpu);
+ /*
+ * alloc_pages_exact_nid() will allocate memory for core in the
+ * local node only.
+ */
+ mem_info = &core_imc_pmu->mem_info[core_id];
+ mem_info->id = core_id;
+ mem_info->vbase[0] = (u64) alloc_pages_exact_nid(phys_id,
+ (size_t)size, GFP_KERNEL | __GFP_ZERO);