+static inline void smmu_pmu_counter_set_value(struct smmu_pmu*smmu_pmu,
+ u32 idx, u64 value)8));
+{
+ if (smmu_pmu->counter_mask & BIT(32))
+ writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
+ else4));
+ writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx,
The arm64 IO macros use __force u32 and so it's probably OK to provide a 64
bit
value to writel. But you could use something like lower_32_bits for clarity.
Yes, macro uses __force u32. I will change it to make it more clear though.
+static void smmu_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ u32 filter_span, filter_sid;
+ u32 evtyper;
+
+ hwc->state = 0;
+
+ smmu_pmu_set_period(smmu_pmu, hwc);
+
+ smmu_pmu_get_event_filter(event, &filter_span, &filter_sid);
+
+ evtyper = get_event(event) |
+ filter_span << SMMU_PMCG_SID_SPAN_SHIFT;
+
+ smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
+ smmu_pmu_set_smr(smmu_pmu, idx, filter_sid);
+ smmu_pmu_interrupt_enable(smmu_pmu, idx);
+ smmu_pmu_counter_enable(smmu_pmu, idx);
+}
+
+static void smmu_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ smmu_pmu_counter_disable(smmu_pmu, idx);
Is it intentional not to call smmu_pmu_interrupt_disable here?
Yes, it is. Earlier patch had the interrupt toggling and Robin pointed out that
it is not really needed as counters are anyway stopped and explicitly disabling
may not solve the spurious interrupt case as well.
Ah apologies for not seeing that in earlier reviews.