Re: [PATCH 6/8] perf/amd/ibs: Add pmu specific minimum period

From: Namhyung Kim
Date: Mon Oct 07 2024 - 15:30:27 EST


On Mon, Oct 07, 2024 at 03:48:08AM +0000, Ravi Bangoria wrote:
> 0x10 is the minimum sample period for IBS Fetch and 0x90 for IBS Op.
> Current IBS pmu driver uses 0x10 for both the pmus, which is incorrect.
> Fix it by adding pmu specific minimum period values in struct perf_ibs.
>
> Also, bail out opening a 'sample period mode' event if the user requested
> sample period is less than pmu supported minimum value. For a 'freq mode'
> event, start calibrating sample period from pmu specific minimum period.
>
> Signed-off-by: Ravi Bangoria <ravi.bangoria@xxxxxxx>
> ---
> arch/x86/events/amd/ibs.c | 24 ++++++++++++++++--------
> 1 file changed, 16 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
> index 368ed839b612..e7522ba45a7e 100644
> --- a/arch/x86/events/amd/ibs.c
> +++ b/arch/x86/events/amd/ibs.c
> @@ -83,6 +83,7 @@ struct perf_ibs {
> u64 cnt_mask;
> u64 enable_mask;
> u64 valid_mask;
> + u16 min_period;
> u64 max_period;
> unsigned long offset_mask[1];
> int offset_max;
> @@ -295,10 +296,14 @@ static int perf_ibs_init(struct perf_event *event)
> /* raw max_cnt may not be set */
> return -EINVAL;
>
> - /* Silently mask off lower nibble. IBS hw mandates it. */
> - hwc->sample_period &= ~0x0FULL;
> - if (!hwc->sample_period)
> - hwc->sample_period = 0x10;
> + if (event->attr.freq) {
> + hwc->sample_period = perf_ibs->min_period;
> + } else {
> + /* Silently mask off lower nibble. IBS hw mandates it. */
> + hwc->sample_period &= ~0x0FULL;
> + if (hwc->sample_period < perf_ibs->min_period)
> + return -EINVAL;

Maybe it needs to check perf_ibs->max_period as well.

Thanks,
Namhyung

> + }
> } else {
> u64 period = 0;
>
> @@ -316,10 +321,10 @@ static int perf_ibs_init(struct perf_event *event)
> config &= ~perf_ibs->cnt_mask;
> event->attr.sample_period = period;
> hwc->sample_period = period;
> - }
>
> - if (!hwc->sample_period)
> - return -EINVAL;
> + if (hwc->sample_period < perf_ibs->min_period)
> + return -EINVAL;
> + }
>
> /*
> * If we modify hwc->sample_period, we also need to update
> @@ -340,7 +345,8 @@ static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
> int overflow;
>
> /* ignore lower 4 bits in min count: */
> - overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
> + overflow = perf_event_set_period(hwc, perf_ibs->min_period,
> + perf_ibs->max_period, period);
> local64_set(&hwc->prev_count, 0);
>
> return overflow;
> @@ -677,6 +683,7 @@ static struct perf_ibs perf_ibs_fetch = {
> .cnt_mask = IBS_FETCH_MAX_CNT,
> .enable_mask = IBS_FETCH_ENABLE,
> .valid_mask = IBS_FETCH_VAL,
> + .min_period = 0x10,
> .max_period = IBS_FETCH_MAX_CNT << 4,
> .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
> .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
> @@ -702,6 +709,7 @@ static struct perf_ibs perf_ibs_op = {
> IBS_OP_CUR_CNT_RAND,
> .enable_mask = IBS_OP_ENABLE,
> .valid_mask = IBS_OP_VAL,
> + .min_period = 0x90,
> .max_period = IBS_OP_MAX_CNT << 4,
> .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
> .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
> --
> 2.46.2
>