Re: [PATCH v3 6/7] arm64: perf: Disable PMU while processing counter overflows
From: Mark Rutland
Date: Tue Jun 19 2018 - 06:43:32 EST
On Tue, Jun 19, 2018 at 11:15:41AM +0100, Suzuki K Poulose wrote:
> The arm64 PMU updates the event counters and reprograms the
> counters in the overflow IRQ handler without disabling the
> PMU. This could potentially cause skews in for group counters,
> where the overflowed counters may potentially loose some event
> counts, while they are reprogrammed. To prevent this, disable
> the PMU while we process the counter overflows and enable it
> right back when we are done.
>
> This patch also moves the PMU stop/start routines to avoid a
> forward declaration.
>
> Suggested-by: Mark Rutland <mark.rutland@xxxxxxx>
> Cc: Will Deacon <will.deacon@xxxxxxx>
> Signed-off-by: Suzuki K Poulose <suzuki.poulose@xxxxxxx>
Acked-by: Mark Rutland <mark.rutland@xxxxxxx>
This makes me realise that we could remove the pmu_lock, but that's not
a new problem, and we can address that separately.
Thanks,
Mark.
> ---
> arch/arm64/kernel/perf_event.c | 50 +++++++++++++++++++++++-------------------
> 1 file changed, 28 insertions(+), 22 deletions(-)
>
> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
> index 9ce3729..eebc635 100644
> --- a/arch/arm64/kernel/perf_event.c
> +++ b/arch/arm64/kernel/perf_event.c
> @@ -678,6 +678,28 @@ static void armv8pmu_disable_event(struct perf_event *event)
> raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> }
>
> +static void armv8pmu_start(struct arm_pmu *cpu_pmu)
> +{
> + unsigned long flags;
> + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
> +
> + raw_spin_lock_irqsave(&events->pmu_lock, flags);
> + /* Enable all counters */
> + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
> + raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> +}
> +
> +static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
> +{
> + unsigned long flags;
> + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
> +
> + raw_spin_lock_irqsave(&events->pmu_lock, flags);
> + /* Disable all counters */
> + armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
> + raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> +}
> +
> static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
> {
> u32 pmovsr;
> @@ -702,6 +724,11 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
> */
> regs = get_irq_regs();
>
> + /*
> + * Stop the PMU while processing the counter overflows
> + * to prevent skews in group events.
> + */
> + armv8pmu_stop(cpu_pmu);
> for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
> struct perf_event *event = cpuc->events[idx];
> struct hw_perf_event *hwc;
> @@ -726,6 +753,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
> if (perf_event_overflow(event, &data, regs))
> cpu_pmu->disable(event);
> }
> + armv8pmu_start(cpu_pmu);
>
> /*
> * Handle the pending perf events.
> @@ -739,28 +767,6 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
> return IRQ_HANDLED;
> }
>
> -static void armv8pmu_start(struct arm_pmu *cpu_pmu)
> -{
> - unsigned long flags;
> - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
> -
> - raw_spin_lock_irqsave(&events->pmu_lock, flags);
> - /* Enable all counters */
> - armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
> - raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> -}
> -
> -static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
> -{
> - unsigned long flags;
> - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
> -
> - raw_spin_lock_irqsave(&events->pmu_lock, flags);
> - /* Disable all counters */
> - armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
> - raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> -}
> -
> static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
> struct perf_event *event)
> {
> --
> 2.7.4
>