Re: [PATCH v2 1/3] KVM: x86: move kvm_vcpu_gfn_to_memslot() out of try_async_pf()

From: Sean Christopherson
Date: Thu Aug 13 2020 - 21:40:19 EST


On Fri, Aug 07, 2020 at 04:12:30PM +0200, Vitaly Kuznetsov wrote:
> No functional change intended. Slot flags will need to be analyzed
> prior to try_async_pf() when KVM_MEM_PCI_HOLE is implemented.

Why? Wouldn't it be just as easy, and arguably more appropriate, to add
KVM_PFN_ERR_PCI_HOLE and update handle_abornmal_pfn() accordinaly?

> Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
> ---
> arch/x86/kvm/mmu/mmu.c | 14 ++++++++------
> arch/x86/kvm/mmu/paging_tmpl.h | 7 +++++--
> 2 files changed, 13 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 862bf418214e..fef6956393f7 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4042,11 +4042,10 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
> }
>
> -static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
> - gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
> - bool *writable)
> +static bool try_async_pf(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
> + bool prefault, gfn_t gfn, gpa_t cr2_or_gpa,
> + kvm_pfn_t *pfn, bool write, bool *writable)
> {
> - struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> bool async;
>
> /* Don't expose private memslots to L2. */
> @@ -4082,7 +4081,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
> bool exec = error_code & PFERR_FETCH_MASK;
> bool lpage_disallowed = exec && is_nx_huge_page_enabled();
> bool map_writable;
> -
> + struct kvm_memory_slot *slot;
> gfn_t gfn = gpa >> PAGE_SHIFT;
> unsigned long mmu_seq;
> kvm_pfn_t pfn;
> @@ -4104,7 +4103,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
> mmu_seq = vcpu->kvm->mmu_notifier_seq;
> smp_rmb();
>
> - if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
> + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> +
> + if (try_async_pf(vcpu, slot, prefault, gfn, gpa, &pfn, write,
> + &map_writable))
> return RET_PF_RETRY;
>
> if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 0172a949f6a7..5c6a895f67c3 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -779,6 +779,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
> int write_fault = error_code & PFERR_WRITE_MASK;
> int user_fault = error_code & PFERR_USER_MASK;
> struct guest_walker walker;
> + struct kvm_memory_slot *slot;
> int r;
> kvm_pfn_t pfn;
> unsigned long mmu_seq;
> @@ -833,8 +834,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
> mmu_seq = vcpu->kvm->mmu_notifier_seq;
> smp_rmb();
>
> - if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
> - &map_writable))
> + slot = kvm_vcpu_gfn_to_memslot(vcpu, walker.gfn);
> +
> + if (try_async_pf(vcpu, slot, prefault, walker.gfn, addr, &pfn,
> + write_fault, &map_writable))
> return RET_PF_RETRY;
>
> if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
> --
> 2.25.4
>