[PATCH v2 4/9] KVM: arm64: Support stolen time reporting via shared structure
From: Steven Price
Date: Mon Aug 19 2019 - 10:04:59 EST
Implement the service call for configuring a shared structure between a
VCPU and the hypervisor in which the hypervisor can write the time
stolen from the VCPU's execution time by other tasks on the host.
The hypervisor allocates memory which is placed at an IPA chosen by user
space. The hypervisor then uses WRITE_ONCE() to update the shared
structure ensuring single copy atomicity of the 64-bit unsigned value
that reports stolen time in nanoseconds.
Whenever stolen time is enabled by the guest, the stolen time counter is
reset.
The stolen time itself is retrieved from the sched_info structure
maintained by the Linux scheduler code. We enable SCHEDSTATS when
selecting KVM Kconfig to ensure this value is meaningful.
Signed-off-by: Steven Price <steven.price@xxxxxxx>
---
arch/arm/include/asm/kvm_host.h | 15 +++++++
arch/arm64/include/asm/kvm_host.h | 16 ++++++-
arch/arm64/kvm/Kconfig | 1 +
include/linux/kvm_types.h | 2 +
virt/kvm/arm/arm.c | 19 +++++++++
virt/kvm/arm/hypercalls.c | 3 ++
virt/kvm/arm/pvtime.c | 71 +++++++++++++++++++++++++++++++
7 files changed, 126 insertions(+), 1 deletion(-)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 369b5d2d54bf..14d61a84c270 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -39,6 +39,7 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -77,6 +78,12 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;
+
+ struct kvm_arch_pvtime {
+ struct gfn_to_hva_cache st_ghc;
+ gpa_t st_base;
+ u64 st_size;
+ } pvtime;
};
#define KVM_NR_MEM_OBJS 40
@@ -328,6 +335,14 @@ static inline int kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
{
return SMCCC_RET_NOT_SUPPORTED;
}
+static inline int kvm_hypercall_stolen_time(struct kvm_vcpu *vcpu)
+{
+ return SMCCC_RET_NOT_SUPPORTED;
+}
+static inline int kvm_update_stolen_time(struct kvm_vcpu *vcpu, bool init)
+{
+ return -ENOTSUPP;
+}
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 583b3639062a..627ecbdd0c59 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -83,6 +84,12 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;
+
+ struct kvm_arch_pvtime {
+ struct gfn_to_hva_cache st_ghc;
+ gpa_t st_base;
+ u64 st_size;
+ } pvtime;
};
#define KVM_NR_MEM_OBJS 40
@@ -338,8 +345,13 @@ struct kvm_vcpu_arch {
/* True when deferrable sysregs are loaded on the physical CPU,
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
bool sysregs_loaded_on_cpu;
-};
+ /* Guest PV state */
+ struct {
+ u64 steal;
+ u64 last_steal;
+ } steal;
+};
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
sve_ffr_offset((vcpu)->arch.sve_max_vl)))
@@ -479,6 +491,8 @@ int kvm_perf_init(void);
int kvm_perf_teardown(void);
int kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
+int kvm_hypercall_stolen_time(struct kvm_vcpu *vcpu);
+int kvm_update_stolen_time(struct kvm_vcpu *vcpu, bool init);
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index a67121d419a2..d8b88e40d223 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -39,6 +39,7 @@ config KVM
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_VCPU_RUN_PID_CHANGE
+ select SCHEDSTATS
---help---
Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index bde5374ae021..1c88e69db3d9 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -35,6 +35,8 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
+#define GPA_INVALID (~(gpa_t)0)
+
typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef u64 hfn_t;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 35a069815baf..53cc80e98d8b 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -40,6 +40,10 @@
#include <asm/kvm_coproc.h>
#include <asm/sections.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_pmu.h>
+#include <kvm/arm_psci.h>
+
#ifdef REQUIRES_VIRT
__asm__(".arch_extension virt");
#endif
@@ -135,6 +139,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.max_vcpus = vgic_present ?
kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
+ kvm->arch.pvtime.st_base = GPA_INVALID;
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
@@ -379,6 +384,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_vcpu_load_sysregs(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
kvm_vcpu_pmu_restore_guest(vcpu);
+ if (vcpu->kvm->arch.pvtime.st_base != GPA_INVALID)
+ kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
if (single_task_running())
vcpu_clear_wfe_traps(vcpu);
@@ -625,6 +632,15 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
smp_rmb();
}
+static void vcpu_req_record_steal(struct kvm_vcpu *vcpu)
+{
+ int idx;
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_update_stolen_time(vcpu, false);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+}
+
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
@@ -644,6 +660,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
* that a VCPU sees new virtual interrupts.
*/
kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
+
+ if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
+ vcpu_req_record_steal(vcpu);
}
}
diff --git a/virt/kvm/arm/hypercalls.c b/virt/kvm/arm/hypercalls.c
index 63ae629c466a..ac678eabf15f 100644
--- a/virt/kvm/arm/hypercalls.c
+++ b/virt/kvm/arm/hypercalls.c
@@ -56,6 +56,9 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
case ARM_SMCCC_HV_PV_FEATURES:
val = kvm_hypercall_pv_features(vcpu);
break;
+ case ARM_SMCCC_HV_PV_TIME_ST:
+ val = kvm_hypercall_stolen_time(vcpu);
+ break;
default:
return kvm_psci_call(vcpu);
}
diff --git a/virt/kvm/arm/pvtime.c b/virt/kvm/arm/pvtime.c
index 6201d71cb1f8..f169184e4076 100644
--- a/virt/kvm/arm/pvtime.c
+++ b/virt/kvm/arm/pvtime.c
@@ -3,8 +3,55 @@
#include <linux/arm-smccc.h>
+#include <asm/pvclock-abi.h>
+
#include <kvm/arm_hypercalls.h>
+int kvm_update_stolen_time(struct kvm_vcpu *vcpu, bool init)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_arch_pvtime *pvtime = &kvm->arch.pvtime;
+ u64 steal;
+ u64 steal_le;
+ u64 offset;
+ int idx;
+ const int stride = sizeof(struct pvclock_vcpu_stolen_time);
+
+ if (pvtime->st_base == GPA_INVALID)
+ return -ENOTSUPP;
+
+ /* Let's do the local bookkeeping */
+ steal = vcpu->arch.steal.steal;
+ steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
+ vcpu->arch.steal.last_steal = current->sched_info.run_delay;
+ vcpu->arch.steal.steal = steal;
+
+ offset = stride * kvm_vcpu_get_idx(vcpu);
+
+ if (unlikely(offset + stride > pvtime->st_size))
+ return -EINVAL;
+
+ steal_le = cpu_to_le64(steal);
+ pagefault_disable();
+ idx = srcu_read_lock(&kvm->srcu);
+ if (init) {
+ struct pvclock_vcpu_stolen_time init_values = {
+ .revision = 0,
+ .attributes = 0
+ };
+ kvm_write_guest_offset_cached(kvm,
+ &pvtime->st_ghc,
+ &init_values, offset, sizeof(init_values));
+ }
+ offset += offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ kvm_write_guest_offset_cached(kvm, &pvtime->st_ghc,
+ &steal_le, offset, sizeof(steal_le));
+ srcu_read_unlock(&kvm->srcu, idx);
+ pagefault_enable();
+
+ return 0;
+}
+
int kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
{
u32 feature = smccc_get_arg1(vcpu);
@@ -12,6 +59,7 @@ int kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_FEATURES:
+ case ARM_SMCCC_HV_PV_TIME_ST:
val = SMCCC_RET_SUCCESS;
break;
}
@@ -19,3 +67,26 @@ int kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
return val;
}
+int kvm_hypercall_stolen_time(struct kvm_vcpu *vcpu)
+{
+ u64 ret;
+ int err;
+
+ /*
+ * Start counting stolen time from the time the guest requests
+ * the feature enabled.
+ */
+ vcpu->arch.steal.steal = 0;
+ vcpu->arch.steal.last_steal = current->sched_info.run_delay;
+
+ err = kvm_update_stolen_time(vcpu, true);
+
+ if (err)
+ ret = SMCCC_RET_NOT_SUPPORTED;
+ else
+ ret = vcpu->kvm->arch.pvtime.st_base +
+ (sizeof(struct pvclock_vcpu_stolen_time) *
+ kvm_vcpu_get_idx(vcpu));
+
+ return ret;
+}
--
2.20.1