[PATCH v9] arch/powerpc/kvm: Add support for reading VPA counters for pseries guests

From: Gautam Menghani
Date: Mon May 20 2024 - 13:58:29 EST


PAPR hypervisor has introduced three new counters in the VPA area of
LPAR CPUs for KVM L2 guest (see [1] for terminology) observability - 2
for context switches from host to guest and vice versa, and 1 counter
for getting the total time spent inside the KVM guest. Add a tracepoint
that enables reading the counters for use by ftrace/perf. Note that this
tracepoint is only available for nestedv2 API (i.e, KVM on PowerVM).

[1] Terminology:
a. L1 refers to the VM (LPAR) booted on top of PAPR hypervisor
b. L2 refers to the KVM guest booted on top of L1.

Reviewed-by: Nicholas Piggin <npiggin@xxxxxxxxx>
Acked-by: Naveen N Rao <naveen@xxxxxxxxxx>
Signed-off-by: Vaibhav Jain <vaibhav@xxxxxxxxxxxxx>
Signed-off-by: Gautam Menghani <gautam@xxxxxxxxxxxxx>
---
v8 -> v9:
1. Fix linker errors when compiling as module.

v7 -> v8:
1. Use per_cpu vars instead of paca members.
2. Fix build error for powernv config.

v6 -> v7:
1. Use TRACE_EVENT_FN_COND to handle zero counters case.
2. Use for_each_present_cpu() to handle hotplugs.

v5 -> v6:
1. Use TRACE_EVENT_FN to enable/disable counters only once.
2. Remove the agg. counters from vcpu->arch.
3. Use PACA to maintain old counter values instead of zeroing on every
entry.
4. Simplify variable names

v4 -> v5:
1. Define helper functions for getting/setting the accumulation counter
in L2's VPA

v3 -> v4:
1. After vcpu_run, check the VPA flag instead of checking for tracepoint
being enabled for disabling the cs time accumulation.

v2 -> v3:
1. Move the counter disabling and zeroing code to a different function.
2. Move the get_lppaca() inside the tracepoint_enabled() branch.
3. Add the aggregation logic to maintain total context switch time.

v1 -> v2:
1. Fix the build error due to invalid struct member reference.

arch/powerpc/include/asm/kvm_book3s_64.h | 5 ++
arch/powerpc/include/asm/lppaca.h | 11 +++-
arch/powerpc/kvm/book3s_hv.c | 75 ++++++++++++++++++++++++
arch/powerpc/kvm/trace_hv.h | 29 +++++++++
4 files changed, 117 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d8729ec81ca0..2ef9a5f4e5d1 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -684,6 +684,11 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa);

+int kmvhv_counters_tracepoint_regfunc(void);
+void kmvhv_counters_tracepoint_unregfunc(void);
+int kvmhv_get_l2_counters_status(void);
+void kvmhv_set_l2_counters_status(int cpu, bool status);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */

#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 61ec2447dabf..f40a646bee3c 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -62,7 +62,8 @@ struct lppaca {
u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
u8 fpregs_in_use;
u8 pmcregs_in_use;
- u8 reserved8[28];
+ u8 l2_counters_enable; /* Enable usage of counters for KVM guest */
+ u8 reserved8[27];
__be64 wait_state_cycles; /* Wait cycles for this proc */
u8 reserved9[28];
__be16 slb_count; /* # of SLBs to maintain */
@@ -92,9 +93,13 @@ struct lppaca {
/* cacheline 4-5 */

__be32 page_ins; /* CMO Hint - # page ins by OS */
- u8 reserved12[148];
+ u8 reserved12[28];
+ volatile __be64 l1_to_l2_cs_tb;
+ volatile __be64 l2_to_l1_cs_tb;
+ volatile __be64 l2_runtime_tb;
+ u8 reserved13[96];
volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
- u8 reserved13[96];
+ u8 reserved14[96];
} ____cacheline_aligned;

#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 35cb014a0c51..a6e4d46c1cd0 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4108,6 +4108,77 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
}
}

+/* Helper functions for reading L2's stats from L1's VPA */
+#ifdef CONFIG_PPC_PSERIES
+static DEFINE_PER_CPU(u64, l1_to_l2_cs);
+static DEFINE_PER_CPU(u64, l2_to_l1_cs);
+static DEFINE_PER_CPU(u64, l2_runtime_agg);
+
+int kvmhv_get_l2_counters_status(void)
+{
+ return firmware_has_feature(FW_FEATURE_LPAR) &&
+ get_lppaca()->l2_counters_enable;
+}
+
+void kvmhv_set_l2_counters_status(int cpu, bool status)
+{
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ return;
+ if (status)
+ lppaca_of(cpu).l2_counters_enable = 1;
+ else
+ lppaca_of(cpu).l2_counters_enable = 0;
+}
+
+int kmvhv_counters_tracepoint_regfunc(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ kvmhv_set_l2_counters_status(cpu, true);
+ }
+ return 0;
+}
+
+void kmvhv_counters_tracepoint_unregfunc(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ kvmhv_set_l2_counters_status(cpu, false);
+ }
+}
+
+static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
+{
+ struct lppaca *lp = get_lppaca();
+ u64 l1_to_l2_ns, l2_to_l1_ns, l2_runtime_ns;
+ u64 *l1_to_l2_cs_ptr = this_cpu_ptr(&l1_to_l2_cs);
+ u64 *l2_to_l1_cs_ptr = this_cpu_ptr(&l2_to_l1_cs);
+ u64 *l2_runtime_agg_ptr = this_cpu_ptr(&l2_runtime_agg);
+
+ l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb));
+ l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb));
+ l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb));
+ trace_kvmppc_vcpu_stats(vcpu, l1_to_l2_ns - *l1_to_l2_cs_ptr,
+ l2_to_l1_ns - *l2_to_l1_cs_ptr,
+ l2_runtime_ns - *l2_runtime_agg_ptr);
+ *l1_to_l2_cs_ptr = l1_to_l2_ns;
+ *l2_to_l1_cs_ptr = l2_to_l1_ns;
+ *l2_runtime_agg_ptr = l2_runtime_ns;
+}
+
+#else
+int kvmhv_get_l2_counters_status(void)
+{
+ return 0;
+}
+
+static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb)
{
@@ -4156,6 +4227,10 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,

timer_rearm_host_dec(*tb);

+ /* Record context switch and guest_run_time data */
+ if (kvmhv_get_l2_counters_status())
+ do_trace_nested_cs_time(vcpu);
+
return trap;
}

diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 8d57c8428531..77ebc724e6cd 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -512,6 +512,35 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
__entry->vcpu_id, __entry->exit, __entry->ret)
);

+#ifdef CONFIG_PPC_PSERIES
+
+TRACE_EVENT_FN_COND(kvmppc_vcpu_stats,
+ TP_PROTO(struct kvm_vcpu *vcpu, u64 l1_to_l2_cs, u64 l2_to_l1_cs, u64 l2_runtime),
+
+ TP_ARGS(vcpu, l1_to_l2_cs, l2_to_l1_cs, l2_runtime),
+
+ TP_CONDITION(l1_to_l2_cs || l2_to_l1_cs || l2_runtime),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(u64, l1_to_l2_cs)
+ __field(u64, l2_to_l1_cs)
+ __field(u64, l2_runtime)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->l1_to_l2_cs = l1_to_l2_cs;
+ __entry->l2_to_l1_cs = l2_to_l1_cs;
+ __entry->l2_runtime = l2_runtime;
+ ),
+
+ TP_printk("VCPU %d: l1_to_l2_cs_time=%llu ns l2_to_l1_cs_time=%llu ns l2_runtime=%llu ns",
+ __entry->vcpu_id, __entry->l1_to_l2_cs,
+ __entry->l2_to_l1_cs, __entry->l2_runtime),
+ kmvhv_counters_tracepoint_regfunc, kmvhv_counters_tracepoint_unregfunc
+);
+#endif
#endif /* _TRACE_KVM_HV_H */

/* This part must be outside protection */
--
2.39.3