Re: [PATCH RFC v8 01/56] KVM: x86: Add 'fault_is_private' x86 op

From: Zhi Wang
Date: Wed Mar 01 2023 - 05:25:46 EST


On Mon, 20 Feb 2023 12:37:52 -0600
Michael Roth <michael.roth@xxxxxxx> wrote:

Basically, I don't think kvm_mmu_fault_is_private() is promising after going
through both SNP and TDX patches:

1) Fault path is critical. kvm_mmu_fault_is_private() is always doing a gfn_to
_memslot() no matter SNP/TDX is enabled or not. It might mostly hits the
slots->last_used_slot, but the worst case is going through an RB-tree search.

Adding extra overhead on the generic fault path needs to be re-considered
carefully. At least, check if the guest is a CC(SNP/TDX) guest.

2) Just after the gfn_to_memslot() in kvm_mmu_fault_is_private(), there is
another gfn_to_memslot():

static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u64 err, bool prefetch)
{
struct kvm_page_fault fault = {
.addr = cr2_or_gpa,
.error_code = lower_32_bits(err),
.exec = err & PFERR_FETCH_MASK,
.write = err & PFERR_WRITE_MASK,
.present = err & PFERR_PRESENT_MASK,
.rsvd = err & PFERR_RSVD_MASK,
.user = err & PFERR_USER_MASK,
.prefetch = prefetch,
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
.nx_huge_page_workaround_enabled =
is_nx_huge_page_enabled(vcpu->kvm),

.max_level = KVM_MAX_HUGEPAGE_LEVEL,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
.is_private = kvm_mmu_fault_is_private(vcpu->kvm, cr2_or_gpa, err),
};
int r;

if (vcpu->arch.mmu->root_role.direct) {
fault.gfn = fault.addr >> PAGE_SHIFT;
/* here */
fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
}

I was thinking if checking the private slot and kvm_slot_can_be_private() is
necessary in kvm_mmu_fault_is_private().

TDP MMU is expecting fault.is_private to indicate if CPU thinks the fault
is private or not (For SNP, it is in PF error code, for TDX it is the shared
bit in the fault GPA). TDP MMU will check if the slot is a private slot or
not, leave the userspace to handle it when they thinks differently.

My points:

1) Resolving the PFER in kvm_x86_ops.fault_is_private and setting
fault.is_private is enough. The rest can be handled by the TDP MMU.

2) Put the kvm_x86_ops.fault_is_private in a separate patch so that TDX series
can include it. (64bit-error code part can stay in another patch)

> This callback is used by the KVM MMU to check whether a #NPF was for a
> private GPA or not.
>
> In some cases the full 64-bit error code for the #NPF will be needed to
> make this determination, so also update kvm_mmu_do_page_fault() to
> accept the full 64-bit value so it can be plumbed through to the
> callback.
>
> Signed-off-by: Michael Roth <michael.roth@xxxxxxx>
> ---
> arch/x86/include/asm/kvm-x86-ops.h | 1 +
> arch/x86/include/asm/kvm_host.h | 1 +
> arch/x86/kvm/mmu/mmu.c | 3 +--
> arch/x86/kvm/mmu/mmu_internal.h | 37 +++++++++++++++++++++++++++---
> 4 files changed, 37 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 8dc345cc6318..72183da010b8 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
> KVM_X86_OP(complete_emulated_msr)
> KVM_X86_OP(vcpu_deliver_sipi_vector)
> KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> +KVM_X86_OP_OPTIONAL_RET0(fault_is_private);
>
> #undef KVM_X86_OP
> #undef KVM_X86_OP_OPTIONAL
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index e552374f2357..f856d689dda0 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1643,6 +1643,7 @@ struct kvm_x86_ops {
>
> void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
> int root_level);
> + bool (*fault_is_private)(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault);
>
> bool (*has_wbinvd_exit)(void);
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index eda615f3951c..fb3f34b7391c 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5724,8 +5724,7 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
> }
>
> if (r == RET_PF_INVALID) {
> - r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
> - lower_32_bits(error_code), false);
> + r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false);
> if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
> return -EIO;
> }
> diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
> index e642d431df4b..557a001210df 100644
> --- a/arch/x86/kvm/mmu/mmu_internal.h
> +++ b/arch/x86/kvm/mmu/mmu_internal.h
> @@ -231,6 +231,37 @@ struct kvm_page_fault {
>
> int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
>
> +static bool kvm_mmu_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 err)
> +{
> + struct kvm_memory_slot *slot;
> + bool private_fault = false;
> + gfn_t gfn = gpa_to_gfn(gpa);
> +
> + slot = gfn_to_memslot(kvm, gfn);
> + if (!slot) {
> + pr_debug("%s: no slot, GFN: 0x%llx\n", __func__, gfn);
> + goto out;
> + }
> +
> + if (!kvm_slot_can_be_private(slot)) {
> + pr_debug("%s: slot is not private, GFN: 0x%llx\n", __func__, gfn);
> + goto out;
> + }
> +
> + if (static_call(kvm_x86_fault_is_private)(kvm, gpa, err, &private_fault))
> + goto out;
> +
> + /*
> + * Handling below is for UPM self-tests and guests that treat userspace
> + * as the authority on whether a fault should be private or not.
> + */
> + private_fault = kvm_mem_is_private(kvm, gpa >> PAGE_SHIFT);
> +
> +out:
> + pr_debug("%s: GFN: 0x%llx, private: %d\n", __func__, gfn, private_fault);
> + return private_fault;
> +}
> +
> /*
> * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
> * and of course kvm_mmu_do_page_fault().
> @@ -262,11 +293,11 @@ enum {
> };
>
> static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> - u32 err, bool prefetch)
> + u64 err, bool prefetch)
> {
> struct kvm_page_fault fault = {
> .addr = cr2_or_gpa,
> - .error_code = err,
> + .error_code = lower_32_bits(err),
> .exec = err & PFERR_FETCH_MASK,
> .write = err & PFERR_WRITE_MASK,
> .present = err & PFERR_PRESENT_MASK,
> @@ -280,7 +311,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> .max_level = KVM_MAX_HUGEPAGE_LEVEL,
> .req_level = PG_LEVEL_4K,
> .goal_level = PG_LEVEL_4K,
> - .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
> + .is_private = kvm_mmu_fault_is_private(vcpu->kvm, cr2_or_gpa, err),
> };
> int r;
>