Re: [Patch v9 10/12] perf/x86/intel: Update dyn_constranit base on PEBS event precise level

From: Mi, Dapeng
Date: Tue Nov 11 2025 - 00:41:23 EST



On 11/10/2025 5:15 PM, Mi, Dapeng wrote:
> On 11/10/2025 5:03 PM, Peter Zijlstra wrote:
>> On Mon, Nov 10, 2025 at 08:23:55AM +0800, Mi, Dapeng wrote:
>>
>>>> @@ -5536,6 +5540,14 @@ static void intel_pmu_check_dyn_constr(s
>>>> continue;
>>>> mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
>>>> break;
>>>> + case DYN_CONSTR_PEBS:
>>>> + if (x86_pmu.arch_pebs)
>>>> + mask = hybrid(pmu, arch_pebs_cap).counters;
>>>> + break;
>>>> + case DYN_CONSTR_PDIST:
>>>> + if (x86_pmu.arch_pebs)
>>>> + mask = hybrid(pmu, arch_pebs_cap).pdists;
>>>> + break;
>>>> default:
>>>> pr_warn("Unsupported dynamic constraint type %d\n", i);
>>>> }
>>> Yes, exactly. Thanks.
>> Excellent. Could you please double check and try the bits I have in
>> queue/perf/core ? I don't think I've got v6 hardware at hand.
> Sure. I would post test results tomorrow.

Hi Peter,

I tested the queue/perf/core code with a slight code refine on SPR/CWF/PTL.
In summary, all things look good. The constraints validation passes on all
these 3 platforms, no overlapped constraints are reported. Besides, perf
counting/sampling (both legacy PEBS and arch-PEBS) works well, no issue is
found.

I did a slight change for the intel_pmu_check_dyn_constr() helper. It
should be good enough to only validate the GP counters for the PEBS counter
and PDIST constraint check. Beside the code style is refined
opportunistically. Thanks.

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index aad89c9d9514..81e6c8bcabde 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -5506,7 +5506,7 @@ static void __intel_pmu_check_dyn_constr(struct
event_constraint *constr,
                        }

                        if (check_fail) {
-                               pr_info("The two events 0x%llx and 0x%llx
may not be "
+                               pr_warn("The two events 0x%llx and 0x%llx
may not be "
                                        "fully scheduled under some
circumstances as "
                                        "%s.\n",
                                        c1->code, c2->code,
dyn_constr_type_name[type]);
@@ -5519,6 +5519,7 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu,
                                       struct event_constraint *constr,
                                       u64 cntr_mask)
 {
+       u64 gp_mask = GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
        enum dyn_constr_type i;
        u64 mask;

@@ -5533,20 +5534,25 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu,
                                mask = x86_pmu.lbr_counters;
                        break;
                case DYN_CONSTR_ACR_CNTR:
-                       mask = hybrid(pmu, acr_cntr_mask64) &
GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
+                       mask = hybrid(pmu, acr_cntr_mask64) & gp_mask;
                        break;
                case DYN_CONSTR_ACR_CAUSE:
-                       if (hybrid(pmu, acr_cntr_mask64) == hybrid(pmu,
acr_cause_mask64))
+                       if (hybrid(pmu, acr_cntr_mask64) ==
+                                       hybrid(pmu, acr_cause_mask64))
                                continue;
-                       mask = hybrid(pmu, acr_cause_mask64) &
GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
+                       mask = hybrid(pmu, acr_cause_mask64) & gp_mask;
                        break;
                case DYN_CONSTR_PEBS:
-                       if (x86_pmu.arch_pebs)
-                               mask = hybrid(pmu, arch_pebs_cap).counters;
+                       if (x86_pmu.arch_pebs) {
+                               mask = hybrid(pmu, arch_pebs_cap).counters &
+                                      gp_mask;
+                       }
                        break;
                case DYN_CONSTR_PDIST:
-                       if (x86_pmu.arch_pebs)
-                               mask = hybrid(pmu, arch_pebs_cap).pdists;
+                       if (x86_pmu.arch_pebs) {
+                               mask = hybrid(pmu, arch_pebs_cap).pdists &
+                                      gp_mask;
+                       }
                        break;
                default:
                        pr_warn("Unsupported dynamic constraint type %d\n", i);


>
>
>