[PATCH 1/4] perf/x86/intel: pass auto-reload information to event update

From: kan . liang
Date: Mon Dec 18 2017 - 14:48:11 EST


From: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>

There is auto-reload mechanism enabled for PEBS events in fixed period
mode. When calculating the event->count, the reload value also need to
be taken into account.

Pass the auto-reload value and times to event update.
They will be used later.

No functional change.

Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
---
arch/x86/events/core.c | 9 +++++----
arch/x86/events/intel/core.c | 9 +++++----
arch/x86/events/intel/ds.c | 2 +-
arch/x86/events/intel/knc.c | 2 +-
arch/x86/events/intel/p4.c | 2 +-
arch/x86/events/perf_event.h | 6 ++++--
6 files changed, 17 insertions(+), 13 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 140d332..35552ea 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -64,7 +64,8 @@ u64 __read_mostly hw_cache_extra_regs
* Can only be executed on the CPU where the event is active.
* Returns the delta events processed.
*/
-u64 x86_perf_event_update(struct perf_event *event)
+u64 x86_perf_event_update(struct perf_event *event,
+ u64 reload_val, int reload_times)
{
struct hw_perf_event *hwc = &event->hw;
int shift = 64 - x86_pmu.cntval_bits;
@@ -1357,7 +1358,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
* Drain the remaining delta count out of a event
* that we are disabling:
*/
- x86_perf_event_update(event);
+ x86_perf_event_update(event, 0, 0);
hwc->state |= PERF_HES_UPTODATE;
}
}
@@ -1456,7 +1457,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)

event = cpuc->events[idx];

- val = x86_perf_event_update(event);
+ val = x86_perf_event_update(event, 0, 0);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;

@@ -1884,7 +1885,7 @@ early_initcall(init_hw_perf_events);

static inline void x86_pmu_read(struct perf_event *event)
{
- x86_perf_event_update(event);
+ x86_perf_event_update(event, 0, 0);
}

/*
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 09c26a4..1f7edaf 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1965,7 +1965,7 @@ static void intel_pmu_nhm_workaround(void)
for (i = 0; i < 4; i++) {
event = cpuc->events[i];
if (event)
- x86_perf_event_update(event);
+ x86_perf_event_update(event, 0, 0);
}

for (i = 0; i < 4; i++) {
@@ -2135,9 +2135,10 @@ static void intel_pmu_add_event(struct perf_event *event)
* Save and restart an expired event. Called by NMI contexts,
* so it has to be careful about preempting normal event ops:
*/
-int intel_pmu_save_and_restart(struct perf_event *event)
+int intel_pmu_save_and_restart(struct perf_event *event,
+ u64 reload_val, int reload_times)
{
- x86_perf_event_update(event);
+ x86_perf_event_update(event, reload_val, reload_times);
/*
* For a checkpointed counter always reset back to 0. This
* avoids a situation where the counter overflows, aborts the
@@ -2299,7 +2300,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
if (!test_bit(bit, cpuc->active_mask))
continue;

- if (!intel_pmu_save_and_restart(event))
+ if (!intel_pmu_save_and_restart(event, 0, 0))
continue;

perf_sample_data_init(&data, 0, event->hw.last_period);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 3674a4b..0b693b7 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1260,7 +1260,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs regs;
void *at = get_next_pebs_record_by_bit(base, top, bit);

- if (!intel_pmu_save_and_restart(event) &&
+ if (!intel_pmu_save_and_restart(event, 0, 0) &&
!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
return;

diff --git a/arch/x86/events/intel/knc.c b/arch/x86/events/intel/knc.c
index 618001c..b42a49d 100644
--- a/arch/x86/events/intel/knc.c
+++ b/arch/x86/events/intel/knc.c
@@ -247,7 +247,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs)
if (!test_bit(bit, cpuc->active_mask))
continue;

- if (!intel_pmu_save_and_restart(event))
+ if (!intel_pmu_save_and_restart(event, 0, 0))
continue;

perf_sample_data_init(&data, 0, event->hw.last_period);
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index d32c0ee..672bf5e 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -1024,7 +1024,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
/* it might be unflagged overflow */
overflow = p4_pmu_clear_cccr_ovf(hwc);

- val = x86_perf_event_update(event);
+ val = x86_perf_event_update(event, 0, 0);
if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
continue;

diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index f7aaadf..fabb8b3 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -738,7 +738,8 @@ extern u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];

-u64 x86_perf_event_update(struct perf_event *event);
+u64 x86_perf_event_update(struct perf_event *event,
+ u64 reload_val, int reload_times);

static inline unsigned int x86_pmu_config_addr(int index)
{
@@ -871,7 +872,8 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
return false;
}

-int intel_pmu_save_and_restart(struct perf_event *event);
+int intel_pmu_save_and_restart(struct perf_event *event,
+ u64 reload_val, int reload_times);

struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
--
2.7.4