[PATCH V9 09/10] arm64/perf: Implement branch records save on task sched out

From: Anshuman Khandual
Date: Wed Mar 15 2023 - 01:25:57 EST


This modifies current armv8pmu_sched_task(), to implement a branch records
save mechanism via armv8pmu_branch_save() when a task scheds out of a cpu.
BRBE is paused and disabled for all exception levels before branch records
get captured, which then get concatenated with all existing stored records
present in the task context maintaining the contiguity. Although the final
length of the concatenated buffer does not exceed implemented BRBE length.

Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx>
---
arch/arm64/include/asm/perf_event.h | 2 ++
arch/arm64/kernel/brbe.c | 30 +++++++++++++++++++++++++++++
arch/arm64/kernel/perf_event.c | 14 ++++++++++++--
3 files changed, 44 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 8077b1fabe29..9ad0c6aabc07 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -289,6 +289,7 @@ void armv8pmu_branch_probe(struct arm_pmu *arm_pmu);
void armv8pmu_branch_reset(void);
int armv8pmu_private_alloc(struct arm_pmu *arm_pmu);
void armv8pmu_private_free(struct arm_pmu *arm_pmu);
+void armv8pmu_branch_save(struct arm_pmu *arm_pmu, void *ctx);
#else
static inline void armv8pmu_branch_read(struct pmu_hw_events *cpuc, struct perf_event *event)
{
@@ -315,6 +316,7 @@ static inline void armv8pmu_branch_probe(struct arm_pmu *arm_pmu) { }
static inline void armv8pmu_branch_reset(void) { }
static inline int armv8pmu_private_alloc(struct arm_pmu *arm_pmu) { return 0; }
static inline void armv8pmu_private_free(struct arm_pmu *arm_pmu) { }
+static inline void armv8pmu_branch_save(struct arm_pmu *arm_pmu, void *ctx) { }
#endif
#endif
#endif
diff --git a/arch/arm64/kernel/brbe.c b/arch/arm64/kernel/brbe.c
index 34bc58ef8062..3dcb4407b92a 100644
--- a/arch/arm64/kernel/brbe.c
+++ b/arch/arm64/kernel/brbe.c
@@ -207,6 +207,36 @@ static int stitch_stored_live_entries(struct brbe_regset *stored,
return nr_last;
}

+static int brbe_branch_save(struct brbe_hw_attr *brbe_attr, struct brbe_regset *live)
+{
+ u64 brbfcr = read_sysreg_s(SYS_BRBFCR_EL1);
+ int nr_live;
+
+ write_sysreg_s(brbfcr | BRBFCR_EL1_PAUSED, SYS_BRBFCR_EL1);
+ isb();
+
+ nr_live = capture_brbe_regset(brbe_attr, live);
+
+ write_sysreg_s(brbfcr & ~BRBFCR_EL1_PAUSED, SYS_BRBFCR_EL1);
+ isb();
+
+ return nr_live;
+}
+
+void armv8pmu_branch_save(struct arm_pmu *arm_pmu, void *ctx)
+{
+ struct brbe_hw_attr *brbe_attr = (struct brbe_hw_attr *)arm_pmu->private;
+ struct arm64_perf_task_context *task_ctx = ctx;
+ struct brbe_regset live[BRBE_MAX_ENTRIES];
+ int nr_live, nr_store;
+
+ nr_live = brbe_branch_save(brbe_attr, live);
+ nr_store = task_ctx->nr_brbe_records;
+ nr_store = stitch_stored_live_entries(task_ctx->store, live, nr_store,
+ nr_live, brbe_attr->brbe_nr);
+ task_ctx->nr_brbe_records = nr_store;
+}
+
/*
* Generic perf branch filters supported on BRBE
*
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index c100731c52a0..2fbed575e747 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -972,9 +972,19 @@ static int armv8pmu_user_event_idx(struct perf_event *event)
static void armv8pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu_ctx->pmu);
+ void *task_ctx = pmu_ctx ? pmu_ctx->task_ctx_data : NULL;

- if (sched_in && arm_pmu_branch_stack_supported(armpmu))
- armv8pmu_branch_reset();
+ if (arm_pmu_branch_stack_supported(armpmu)) {
+ /* Save branch records in task_ctx on sched out */
+ if (task_ctx && !sched_in) {
+ armv8pmu_branch_save(armpmu, task_ctx);
+ return;
+ }
+
+ /* Reset branch records on sched in */
+ if (sched_in)
+ armv8pmu_branch_reset();
+ }
}

/*
--
2.25.1