Re: [PATCH 2/3] KVM: x86: cleanup kvm_inject_emulated_page_fault
From: Vitaly Kuznetsov
Date: Thu Mar 26 2020 - 09:41:12 EST
Paolo Bonzini <pbonzini@xxxxxxxxxx> writes:
> To reconstruct the kvm_mmu to be used for page fault injection, we
> can simply use fault->nested_page_fault. This matches how
> fault->nested_page_fault is assigned in the first place by
> FNAME(walk_addr_generic).
>
> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
> ---
> arch/x86/kvm/mmu/mmu.c | 6 ------
> arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
> arch/x86/kvm/x86.c | 7 +++----
> 3 files changed, 4 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index e26c9a583e75..6250e31ac617 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4353,12 +4353,6 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
> return kvm_read_cr3(vcpu);
> }
>
> -static void inject_page_fault(struct kvm_vcpu *vcpu,
> - struct x86_exception *fault)
> -{
> - vcpu->arch.mmu->inject_page_fault(vcpu, fault);
> -}
> -
This is already gone with Sean's "KVM: x86: Consolidate logic for
injecting page faults to L1".
It would probably make sense to have a combined series (or a branch on
kvm.git) to simplify testing efforts.
> static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
> unsigned int access, int *nr_present)
> {
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 1ddbfff64ccc..ae646acf6703 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -812,7 +812,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
> if (!r) {
> pgprintk("%s: guest page fault\n", __func__);
> if (!prefault)
> - inject_page_fault(vcpu, &walker.fault);
> + kvm_inject_emulated_page_fault(vcpu, &walker.fault);
>
> return RET_PF_RETRY;
> }
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 64ed6e6e2b56..522905523bf0 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -614,12 +614,11 @@ EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
> bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
> struct x86_exception *fault)
> {
> + struct kvm_mmu *fault_mmu;
> WARN_ON_ONCE(fault->vector != PF_VECTOR);
>
> - if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
> - vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
> - else
> - vcpu->arch.mmu->inject_page_fault(vcpu, fault);
> + fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : vcpu->arch.walk_mmu;
> + fault_mmu->inject_page_fault(vcpu, fault);
>
> return fault->nested_page_fault;
> }
--
Vitaly