Re: [PATCH RFC 1/1] perf stat: do not fatal if the leader is errored

From: Liang, Kan
Date: Thu Sep 22 2022 - 14:42:44 EST




On 2022-09-22 2:00 p.m., Dongli Zhang wrote:
> Hi Kan,
>
> I have tested that the patch works by hiding 'slots' sysfs entries.
>
> # ll /sys/bus/event_source/devices/cpu/events/
> total 0
> -r--r--r--. 1 root root 4096 Sep 22 17:57 branch-instructions
> -r--r--r--. 1 root root 4096 Sep 22 17:57 branch-misses
> -r--r--r--. 1 root root 4096 Sep 22 17:57 bus-cycles
> -r--r--r--. 1 root root 4096 Sep 22 17:57 cache-misses
> -r--r--r--. 1 root root 4096 Sep 22 17:57 cache-references
> -r--r--r--. 1 root root 4096 Sep 22 17:57 cpu-cycles
> -r--r--r--. 1 root root 4096 Sep 22 17:57 instructions
> -r--r--r--. 1 root root 4096 Sep 22 17:57 ref-cycles
>
> # perf stat
> ^C
> Performance counter stats for 'system wide':
>
> 19,256.20 msec cpu-clock # 16.025 CPUs
> utilized
> 179 context-switches # 9.296 /sec
>
> 17 cpu-migrations # 0.883 /sec
>
> 3 page-faults # 0.156 /sec
>
> 7,502,294 cycles # 0.000 GHz
>
> 2,512,587 instructions # 0.33 insn per
> cycle
> 552,989 branches # 28.717 K/sec
>
> 15,999 branch-misses # 2.89% of all
> branches
>
> 1.201628129 seconds time elapsed
>
>
> Would you send this patch to fix at the kernel side?

Yes, I will send it out shortly.


Thanks,
Kan
>
> Thank you very much!
>
> Dongli Zhang
>
> On 9/22/22 6:34 AM, Liang, Kan wrote:
>>
>>
>> On 2022-09-22 4:07 a.m., Like Xu wrote:
>>> On 22/9/2022 3:10 pm, Dongli Zhang wrote:
>>>> There are three options to fix the issue.
>>>>
>>>> 1. Do not expose /sys/bus/event_source/devices/cpu/events/slots to
>>>> userspace so that pmu_have_event(pmu->name, "slots") returns false.
>>>
>>> IMO, the guest PMU driver should be fixed
>>> since it misrepresents emulated hardware capabilities in terms of slots.
>>
>> Yes, we need to fix the kernel to hide the slots event if it's not
>> available.
>>
>> The patch as below should fix it. (Not tested yet)
>>
>> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
>> index b20e646c8205..27ee43faba32 100644
>> --- a/arch/x86/events/intel/core.c
>> +++ b/arch/x86/events/intel/core.c
>> @@ -5565,6 +5565,19 @@ static struct attribute *intel_pmu_attrs[] = {
>> NULL,
>> };
>>
>> +static umode_t
>> +td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
>> +{
>> + /*
>> + * Hide the perf metrics topdown events
>> + * if the slots is not in CPUID.
>> + */
>> + if (x86_pmu.num_topdown_events)
>> + return (x86_pmu.intel_ctrl & INTEL_PMC_MSK_FIXED_SLOTS) ? attr->mode : 0;
>> +
>> + return attr->mode;
>> +}
>> +
>> static umode_t
>> tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
>> {
>> @@ -5600,6 +5613,7 @@ default_is_visible(struct kobject *kobj, struct
>> attribute *attr, int i)
>>
>> static struct attribute_group group_events_td = {
>> .name = "events",
>> + .is_visible = td_is_visible,
>> };
>>
>> static struct attribute_group group_events_mem = {
>> @@ -5758,6 +5772,23 @@ static inline int
>> hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
>> return (cpu >= nr_cpu_ids) ? -1 : cpu;
>> }
>>
>> +static umode_t hybrid_td_is_visible(struct kobject *kobj,
>> + struct attribute *attr, int i)
>> +{
>> + struct device *dev = kobj_to_dev(kobj);
>> + struct x86_hybrid_pmu *pmu =
>> + container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
>> +
>> + if (!is_attr_for_this_pmu(kobj, attr))
>> + return 0;
>> +
>> + /* Only check the big core which supports perf metrics */
>> + if (pmu->cpu_type == hybrid_big)
>> + return (pmu->intel_ctrl & INTEL_PMC_MSK_FIXED_SLOTS) ? attr->mode : 0;
>> +
>> + return attr->mode;
>> +}
>> +
>> static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
>> struct attribute *attr, int i)
>> {
>> @@ -5784,7 +5815,7 @@ static umode_t hybrid_format_is_visible(struct
>> kobject *kobj,
>>
>> static struct attribute_group hybrid_group_events_td = {
>> .name = "events",
>> - .is_visible = hybrid_events_is_visible,
>> + .is_visible = hybrid_td_is_visible,
>> };
>>
>> static struct attribute_group hybrid_group_events_mem = {
>>
>>
>> Thanks,
>> Kan
>>