[PATCH RFC 14/39] KVM: x86/xen: handle PV IPI vcpu yield

From: Joao Martins
Date: Wed Feb 20 2019 - 15:17:59 EST


Cooperative Linux guests after an IPI-many may yield vcpu if
any of the IPI'd vcpus were preempted (i.e. runstate is 'runnable'.)
Support SCHEDOP_yield for handling yield.

Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx>
---
arch/x86/kvm/xen.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)

diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index ec40cb1de6b6..753a6d2c11cd 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -17,6 +17,7 @@
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/event_channel.h>
+#include <xen/interface/sched.h>

#include "trace.h"

@@ -668,6 +669,31 @@ static int kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout)
return 0;
}

+static int kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, int cmd, u64 param)
+{
+ int ret = -ENOSYS;
+ gpa_t gpa;
+ int idx;
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ if (!gpa)
+ return -EFAULT;
+
+ switch (cmd) {
+ case SCHEDOP_yield:
+ kvm_vcpu_on_spin(vcpu, true);
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
{
bool longmode;
@@ -714,6 +740,11 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
if (!r)
goto hcall_success;
break;
+ case __HYPERVISOR_sched_op:
+ r = kvm_xen_hcall_sched_op(vcpu, params[0], params[1]);
+ if (!r)
+ goto hcall_success;
+ break;
/* fallthrough */
default:
break;
--
2.11.0