[PATCH v2 1/5] KVM: x86: Introduce KVM_REQ_RING_SOFT_FULL
From: Gavin Shan
Date: Fri Sep 16 2022 - 00:52:24 EST
This adds KVM_REQ_RING_SOFT_FULL, which is raised when the dirty
ring of the specific VCPU becomes softly full in kvm_dirty_ring_push().
The VCPU is enforced to exit when the request is raised and its
dirty ring is softly full on its entrance.
Suggested-by: Marc Zyngier <maz@xxxxxxxxxx>
Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx>
---
arch/x86/kvm/x86.c | 5 +++--
include/linux/kvm_host.h | 1 +
virt/kvm/dirty_ring.c | 4 ++++
3 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 43a6a7efc6ec..7f368f59f033 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10265,8 +10265,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
bool req_immediate_exit = false;
/* Forbid vmenter if vcpu dirty ring is soft-full */
- if (unlikely(vcpu->kvm->dirty_ring_size &&
- kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) {
+ if (kvm_check_request(KVM_REQ_RING_SOFT_FULL, vcpu) &&
+ kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
+ kvm_make_request(KVM_REQ_RING_SOFT_FULL, vcpu);
vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
trace_kvm_dirty_ring_exit(vcpu);
r = 0;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f4519d3689e1..53fa3134fee0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -157,6 +157,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UNBLOCK 2
#define KVM_REQ_UNHALT 3
+#define KVM_REQ_RING_SOFT_FULL 4
#define KVM_REQUEST_ARCH_BASE 8
/*
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index f4c2a6eb1666..f0e49937bc9e 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -149,6 +149,7 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
{
+ struct kvm_vcpu *vcpu = container_of(ring, struct kvm_vcpu, dirty_ring);
struct kvm_dirty_gfn *entry;
/* It should never get full */
@@ -166,6 +167,9 @@ void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
kvm_dirty_gfn_set_dirtied(entry);
ring->dirty_index++;
trace_kvm_dirty_ring_push(ring, slot, offset);
+
+ if (kvm_dirty_ring_soft_full(ring))
+ kvm_make_request(KVM_REQ_RING_SOFT_FULL, vcpu);
}
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
--
2.23.0