Re: [PATCH v2 01/11] KVM: x86: Add helper functions for illegal GPA checking and page fault injection
From: Mohammed Gamal
Date: Mon Jun 22 2020 - 08:21:58 EST
On Mon, 2020-06-22 at 12:44 +0800, Yuan Yao wrote:
> On Fri, Jun 19, 2020 at 05:39:15PM +0200, Mohammed Gamal wrote:
> > This patch adds two helper functions that will be used to support
> > virtualizing
> > MAXPHYADDR in both kvm-intel.ko and kvm.ko.
> >
> > kvm_fixup_and_inject_pf_error() injects a page fault for a user-
> > specified GVA,
> > while kvm_mmu_is_illegal_gpa() checks whether a GPA exceeds vCPU
> > address limits.
> >
> > Signed-off-by: Mohammed Gamal <mgamal@xxxxxxxxxx>
> > Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
> > ---
> > arch/x86/kvm/mmu.h | 6 ++++++
> > arch/x86/kvm/x86.c | 21 +++++++++++++++++++++
> > arch/x86/kvm/x86.h | 1 +
> > 3 files changed, 28 insertions(+)
> >
> > diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> > index 0ad06bfe2c2c..555237dfb91c 100644
> > --- a/arch/x86/kvm/mmu.h
> > +++ b/arch/x86/kvm/mmu.h
> > @@ -4,6 +4,7 @@
> >
> > #include <linux/kvm_host.h>
> > #include "kvm_cache_regs.h"
> > +#include "cpuid.h"
> >
> > #define PT64_PT_BITS 9
> > #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
> > @@ -158,6 +159,11 @@ static inline bool is_write_protection(struct
> > kvm_vcpu *vcpu)
> > return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
> > }
> >
> > +static inline bool kvm_mmu_is_illegal_gpa(struct kvm_vcpu *vcpu,
> > gpa_t gpa)
> > +{
> > + return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
> > +}
> > +
> > /*
> > * Check if a given access (described through the I/D, W/R and U/S
> > bits of a
> > * page fault error code pfec) causes a permission fault with the
> > given PTE
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 00c88c2f34e4..ac8642e890b1 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -10693,6 +10693,27 @@ u64 kvm_spec_ctrl_valid_bits(struct
> > kvm_vcpu *vcpu)
> > }
> > EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
> >
> > +void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t
> > gva, u16 error_code)
> > +{
> > + struct x86_exception fault;
> > +
> > + if (!(error_code & PFERR_PRESENT_MASK) ||
> > + vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code,
> > &fault) != UNMAPPED_GVA) {
> > + /*
> > + * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the
> > page
> > + * tables probably do not match the TLB. Just proceed
> > + * with the error code that the processor gave.
> > + */
> > + fault.vector = PF_VECTOR;
> > + fault.error_code_valid = true;
> > + fault.error_code = error_code;
> > + fault.nested_page_fault = false;
> > + fault.address = gva;
> > + }
> > + vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
>
> Should this "vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault)"
> inside the last brace?
> Otherwise an uninitialized fault variable will be passed to the
> walk_mmu->inject_page_fault.
Good catch. You're right. Will fix it in v3
>
> > +}
> > +EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
> > +
> > EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
> > EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
> > EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
> > diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> > index 6eb62e97e59f..239ae0f3e40b 100644
> > --- a/arch/x86/kvm/x86.h
> > +++ b/arch/x86/kvm/x86.h
> > @@ -272,6 +272,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32
> > msr, u64 *pdata);
> > bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu,
> > gfn_t gfn,
> > int page_num);
> > bool kvm_vector_hashing_enabled(void);
> > +void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t
> > gva, u16 error_code);
> > int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t
> > cr2_or_gpa,
> > int emulation_type, void *insn, int
> > insn_len);
> > fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
> > --
> > 2.26.2
> >