[PATCH v3 04/15] KVM: x86: Use generic async PF slot management

From: Gavin Shan
Date: Fri May 07 2021 - 02:41:26 EST


This uses the generic slot management mechanism for asynchronous
page fault by enabling CONFIG_KVM_ASYNC_PF_SLOT because the private
implementation is totally duplicate to the generic one.

The changes introduced by this is pretty mechanical and shouldn't
cause any logical changes.

Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 2 -
arch/x86/kvm/Kconfig | 1 +
arch/x86/kvm/mmu/mmu.c | 2 +-
arch/x86/kvm/x86.c | 86 +++------------------------------
4 files changed, 8 insertions(+), 83 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cbbcee0a84f9..9b344a71f083 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -766,7 +766,6 @@ struct kvm_vcpu_arch {

struct {
bool halted;
- gfn_t gfns[ASYNC_PF_PER_VCPU];
struct gfn_to_hva_cache data;
u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
@@ -1797,7 +1796,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
-extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index f6b93a35ce14..8b8ae8a0467d 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -34,6 +34,7 @@ config KVM
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_EVENTFD
select KVM_ASYNC_PF
+ select KVM_ASYNC_PF_SLOT
select USER_RETURN_NOTIFIER
select KVM_MMIO
select TASKSTATS
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4b3ee244ebe0..c355d980fd9a 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3701,7 +3701,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,

if (!prefault && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
- if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+ if (kvm_async_pf_find_slot(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
return true;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 300661dc6ca4..55d53082a8df 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -305,13 +305,6 @@ static struct kmem_cache *kvm_alloc_emulator_cache(void)

static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);

-static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
-{
- int i;
- for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
- vcpu->arch.apf.gfns[i] = ~0;
-}
-
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
@@ -829,7 +822,7 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon

if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
- kvm_async_pf_hash_reset(vcpu);
+ kvm_async_pf_reset_slot(vcpu);
}

if ((cr0 ^ old_cr0) & update_bits)
@@ -2959,7 +2952,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)

if (!kvm_pv_async_pf_enabled(vcpu)) {
kvm_clear_async_pf_completion_queue(vcpu);
- kvm_async_pf_hash_reset(vcpu);
+ kvm_async_pf_reset_slot(vcpu);
return 0;
}

@@ -10264,7 +10257,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)

vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;

- kvm_async_pf_hash_reset(vcpu);
+ kvm_async_pf_reset_slot(vcpu);
kvm_pmu_init(vcpu);

vcpu->arch.pending_external_vector = -1;
@@ -10381,7 +10374,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvmclock_reset(vcpu);

kvm_clear_async_pf_completion_queue(vcpu);
- kvm_async_pf_hash_reset(vcpu);
+ kvm_async_pf_reset_slot(vcpu);
vcpu->arch.apf.halted = false;

if (vcpu->arch.guest_fpu && kvm_mpx_supported()) {
@@ -11221,73 +11214,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
}

-static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
-{
- BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
-
- return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
-}
-
-static inline u32 kvm_async_pf_next_probe(u32 key)
-{
- return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
-}
-
-static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- u32 key = kvm_async_pf_hash_fn(gfn);
-
- while (vcpu->arch.apf.gfns[key] != ~0)
- key = kvm_async_pf_next_probe(key);
-
- vcpu->arch.apf.gfns[key] = gfn;
-}
-
-static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- int i;
- u32 key = kvm_async_pf_hash_fn(gfn);
-
- for (i = 0; i < ASYNC_PF_PER_VCPU &&
- (vcpu->arch.apf.gfns[key] != gfn &&
- vcpu->arch.apf.gfns[key] != ~0); i++)
- key = kvm_async_pf_next_probe(key);
-
- return key;
-}
-
-bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
-}
-
-static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- u32 i, j, k;
-
- i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
-
- if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
- return;
-
- while (true) {
- vcpu->arch.apf.gfns[i] = ~0;
- do {
- j = kvm_async_pf_next_probe(j);
- if (vcpu->arch.apf.gfns[j] == ~0)
- return;
- k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
- /*
- * k lies cyclically in ]i,j]
- * | i.k.j |
- * |....j i.k.| or |.k..j i...|
- */
- } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
- vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
- i = j;
- }
-}
-
static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
{
u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
@@ -11351,7 +11277,7 @@ bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct x86_exception fault;

trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
- kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
+ kvm_async_pf_add_slot(vcpu, work->arch.gfn);

if (kvm_can_deliver_async_pf(vcpu) &&
!apf_put_user_notpresent(vcpu)) {
@@ -11388,7 +11314,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */
else
- kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+ kvm_async_pf_remove_slot(vcpu, work->arch.gfn);
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);

if ((work->wakeup_all || work->notpresent_injected) &&
--
2.23.0