[PATCH v8 14/15] KVM: xen: split up kvm_xen_set_evtchn_fast()
From: Paul Durrant
Date: Tue Nov 21 2023 - 13:30:47 EST
From: Paul Durrant <pdurrant@xxxxxxxxxx>
The implementation of kvm_xen_set_evtchn_fast() is a rather lengthy piece
of code that performs two operations: updating of the shared_info
evtchn_pending mask, and updating of the vcpu_info evtchn_pending_sel
mask. Introdude a separate function to perform each of those operations and
re-work kvm_xen_set_evtchn_fast() to use them.
No functional change intended.
Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx>
---
Cc: Sean Christopherson <seanjc@xxxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: David Woodhouse <dwmw2@xxxxxxxxxxxxx>
Cc: x86@xxxxxxxxxx
v8:
- New in this version.
---
arch/x86/kvm/xen.c | 170 +++++++++++++++++++++++++--------------------
1 file changed, 96 insertions(+), 74 deletions(-)
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 42a9f1ea25b3..eff405eead1c 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -1623,60 +1623,28 @@ static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
}
}
-/*
- * The return value from this function is propagated to kvm_set_irq() API,
- * so it returns:
- * < 0 Interrupt was ignored (masked or not delivered for other reasons)
- * = 0 Interrupt was coalesced (previous irq is still pending)
- * > 0 Number of CPUs interrupt was delivered to
- *
- * It is also called directly from kvm_arch_set_irq_inatomic(), where the
- * only check on its return value is a comparison with -EWOULDBLOCK'.
- */
-int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
+static int set_shinfo_evtchn_pending(struct kvm_vcpu *vcpu, u32 port, u32 *port_word_bit)
{
+ struct kvm *kvm = vcpu->kvm;
struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
- struct kvm_vcpu *vcpu;
unsigned long *pending_bits, *mask_bits;
unsigned long flags;
- int port_word_bit;
- bool kick_vcpu = false;
- int vcpu_idx, idx, rc;
-
- vcpu_idx = READ_ONCE(xe->vcpu_idx);
- if (vcpu_idx >= 0)
- vcpu = kvm_get_vcpu(kvm, vcpu_idx);
- else {
- vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
- if (!vcpu)
- return -EINVAL;
- WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
- }
-
- if (!vcpu->arch.xen.vcpu_info_cache.active)
- return -EINVAL;
-
- if (xe->port >= max_evtchn_port(kvm))
- return -EINVAL;
-
- rc = -EWOULDBLOCK;
-
- idx = srcu_read_lock(&kvm->srcu);
+ int rc = -EWOULDBLOCK;
read_lock_irqsave(&gpc->lock, flags);
if (!kvm_gpc_check(gpc, PAGE_SIZE))
- goto out_rcu;
+ goto out;
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
struct shared_info *shinfo = gpc->khva;
pending_bits = (unsigned long *)&shinfo->evtchn_pending;
mask_bits = (unsigned long *)&shinfo->evtchn_mask;
- port_word_bit = xe->port / 64;
+ *port_word_bit = port / 64;
} else {
struct compat_shared_info *shinfo = gpc->khva;
pending_bits = (unsigned long *)&shinfo->evtchn_pending;
mask_bits = (unsigned long *)&shinfo->evtchn_mask;
- port_word_bit = xe->port / 32;
+ *port_word_bit = port / 32;
}
/*
@@ -1686,52 +1654,106 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
* already set, then we kick the vCPU in question to write to the
* *real* evtchn_pending_sel in its own guest vcpu_info struct.
*/
- if (test_and_set_bit(xe->port, pending_bits)) {
+ if (test_and_set_bit(port, pending_bits)) {
rc = 0; /* It was already raised */
- } else if (test_bit(xe->port, mask_bits)) {
- rc = -ENOTCONN; /* Masked */
- kvm_xen_check_poller(vcpu, xe->port);
+ } else if (test_bit(port, mask_bits)) {
+ rc = -ENOTCONN; /* It is masked */
+ kvm_xen_check_poller(vcpu, port);
} else {
- rc = 1; /* Delivered to the bitmap in shared_info. */
- /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
- read_unlock_irqrestore(&gpc->lock, flags);
- gpc = &vcpu->arch.xen.vcpu_info_cache;
+ rc = 1; /* It is newly raised */
+ }
- read_lock_irqsave(&gpc->lock, flags);
- if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
- /*
- * Could not access the vcpu_info. Set the bit in-kernel
- * and prod the vCPU to deliver it for itself.
- */
- if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
- kick_vcpu = true;
- goto out_rcu;
- }
+ out:
+ read_unlock_irqrestore(&gpc->lock, flags);
+ return rc;
+}
- if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
- struct vcpu_info *vcpu_info = gpc->khva;
- if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
- WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
- kick_vcpu = true;
- }
- } else {
- struct compat_vcpu_info *vcpu_info = gpc->khva;
- if (!test_and_set_bit(port_word_bit,
- (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
- WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
- kick_vcpu = true;
- }
+static bool set_vcpu_info_evtchn_pending(struct kvm_vcpu *vcpu, u32 port_word_bit)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct gfn_to_pfn_cache *gpc = &vcpu->arch.xen.vcpu_info_cache;
+ unsigned long flags;
+ bool kick_vcpu = false;
+
+ read_lock_irqsave(&gpc->lock, flags);
+ if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
+ /*
+ * Could not access the vcpu_info. Set the bit in-kernel
+ * and prod the vCPU to deliver it for itself.
+ */
+ if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
+ kick_vcpu = true;
+ goto out;
+ }
+
+ if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
+ struct vcpu_info *vcpu_info = gpc->khva;
+
+ if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
+ WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
+ kick_vcpu = true;
}
+ } else {
+ struct compat_vcpu_info *vcpu_info = gpc->khva;
- /* For the per-vCPU lapic vector, deliver it as MSI. */
- if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
- kvm_xen_inject_vcpu_vector(vcpu);
- kick_vcpu = false;
+ if (!test_and_set_bit(port_word_bit,
+ (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
+ WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
+ kick_vcpu = true;
}
}
- out_rcu:
+ /* For the per-vCPU lapic vector, deliver it as MSI. */
+ if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
+ kvm_xen_inject_vcpu_vector(vcpu);
+ kick_vcpu = false;
+ }
+
+ out:
read_unlock_irqrestore(&gpc->lock, flags);
+ return kick_vcpu;
+}
+
+/*
+ * The return value from this function is propagated to kvm_set_irq() API,
+ * so it returns:
+ * < 0 Interrupt was ignored (masked or not delivered for other reasons)
+ * = 0 Interrupt was coalesced (previous irq is still pending)
+ * > 0 Number of CPUs interrupt was delivered to
+ *
+ * It is also called directly from kvm_arch_set_irq_inatomic(), where the
+ * only check on its return value is a comparison with -EWOULDBLOCK
+ * (which may be returned by set_shinfo_evtchn_pending()).
+ */
+int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ u32 port_word_bit;
+ bool kick_vcpu = false;
+ int vcpu_idx, idx, rc;
+
+ vcpu_idx = READ_ONCE(xe->vcpu_idx);
+ if (vcpu_idx >= 0)
+ vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+ else {
+ vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
+ if (!vcpu)
+ return -EINVAL;
+ WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
+ }
+
+ if (!vcpu->arch.xen.vcpu_info_cache.active)
+ return -EINVAL;
+
+ if (xe->port >= max_evtchn_port(kvm))
+ return -EINVAL;
+
+ idx = srcu_read_lock(&kvm->srcu);
+
+ rc = set_shinfo_evtchn_pending(vcpu, xe->port, &port_word_bit);
+ if (rc == 1) /* Delivered to the bitmap in shared_info. */
+ kick_vcpu = set_vcpu_info_evtchn_pending(vcpu, port_word_bit);
+
srcu_read_unlock(&kvm->srcu, idx);
if (kick_vcpu) {
--
2.39.2