Re: [PATCH 08/10] x86, perf: Support sysfs files depending on SMT status

From: Stephane Eranian
Date: Wed Dec 16 2015 - 07:49:01 EST


Andi,

On Tue, Dec 15, 2015 at 4:54 PM, Andi Kleen <andi@xxxxxxxxxxxxxx> wrote:
>
> From: Andi Kleen <ak@xxxxxxxxxxxxxxx>
>
> Add a way to show different sysfs events attributes depending on
> HyperThreading is on or off. This is difficult to determine
> early at boot, so we just do it dynamically when the sysfs
> attribute is read.
>
The only attribute I can think of is the any thread bit.
But I do not see this in this patch. Is this somewhere else?

>
> Signed-off-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
> ---
> arch/x86/kernel/cpu/perf_event.c | 34 ++++++++++++++++++++++++++++++++++
> arch/x86/kernel/cpu/perf_event.h | 10 ++++++++++
> include/linux/perf_event.h | 7 +++++++
> 3 files changed, 51 insertions(+)
>
> diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
> index 9dfbba5..976686f 100644
> --- a/arch/x86/kernel/cpu/perf_event.c
> +++ b/arch/x86/kernel/cpu/perf_event.c
> @@ -1590,6 +1590,40 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
> return x86_pmu.events_sysfs_show(page, config);
> }
>
> +ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
> + char *page)
> +{
> + struct perf_pmu_events_ht_attr *pmu_attr =
> + container_of(attr, struct perf_pmu_events_ht_attr, attr);
> + bool ht_on = false;
> + int cpu;
> +
> + /*
> + * Report conditional events depending on Hyper-Threading.
> + *
> + * Check all online CPUs if any have a thread sibling,
> + * as perf may measure any of them.
> + *
> + * This is overly conservative as usually the HT special
> + * handling is not needed if the other CPU thread is idle.
> + *
> + * Note this does not (cannot) handle the case when thread
> + * siblings are invisible, for example with virtualization
> + * if they are owned by some other guest. The user tool
> + * has to re-read when a thread sibling gets onlined later.
> + */
> + for_each_online_cpu (cpu) {
> + ht_on = cpumask_weight(topology_sibling_cpumask(cpu)) > 1;
> + if (ht_on)
> + break;
> + }
> +
> + return sprintf(page, "%s",
> + ht_on ?
> + pmu_attr->event_str_ht :
> + pmu_attr->event_str_noht);
> +}
> +
> EVENT_ATTR(cpu-cycles, CPU_CYCLES );
> EVENT_ATTR(instructions, INSTRUCTIONS );
> EVENT_ATTR(cache-references, CACHE_REFERENCES );
> diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
> index 799e6bd..ce87cd6 100644
> --- a/arch/x86/kernel/cpu/perf_event.h
> +++ b/arch/x86/kernel/cpu/perf_event.h
> @@ -660,6 +660,14 @@ static struct perf_pmu_events_attr event_attr_##v = { \
> .event_str = str, \
> };
>
> +#define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
> +static struct perf_pmu_events_ht_attr event_attr_##v = { \
> + .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
> + .id = 0, \
> + .event_str_noht = noht, \
> + .event_str_ht = ht, \
> +}
> +
> extern struct x86_pmu x86_pmu __read_mostly;
>
> static inline bool x86_pmu_has_lbr_callstack(void)
> @@ -919,6 +927,8 @@ int knc_pmu_init(void);
>
> ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
> char *page);
> +ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
> + char *page);
>
> static inline int is_ht_workaround_enabled(void)
> {
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index f9828a4..ea2d830 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -1166,6 +1166,13 @@ struct perf_pmu_events_attr {
> const char *event_str;
> };
>
> +struct perf_pmu_events_ht_attr {
> + struct device_attribute attr;
> + u64 id;
> + const char *event_str_ht;
> + const char *event_str_noht;
> +};
> +
> ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
> char *page);
>
> --
> 2.4.3
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/