Re: [PATCH v6 21/38] KVM: nVMX: hyper-v: Enable L2 TLB flush

From: Maxim Levitsky
Date: Tue Jun 07 2022 - 06:02:40 EST


On Mon, 2022-06-06 at 10:36 +0200, Vitaly Kuznetsov wrote:
> Enable L2 TLB flush feature on nVMX when:
> - Enlightened VMCS is in use.
> - The feature flag is enabled in eVMCS.
> - The feature flag is enabled in partition assist page.
>
> Perform synthetic vmexit to L1 after processing TLB flush call upon
> request (HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH).
>
> Note: nested_evmcs_l2_tlb_flush_enabled() uses cached VP assist page copy
> which gets updated from nested_vmx_handle_enlightened_vmptrld(). This is
> also guaranteed to happen post migration with eVMCS backed L2 running.
>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
> ---
>  arch/x86/kvm/vmx/evmcs.c  | 17 +++++++++++++++++
>  arch/x86/kvm/vmx/evmcs.h  | 10 ++++++++++
>  arch/x86/kvm/vmx/nested.c | 22 ++++++++++++++++++++++
>  3 files changed, 49 insertions(+)
>
> diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
> index 7cd7b16942c6..870de69172be 100644
> --- a/arch/x86/kvm/vmx/evmcs.c
> +++ b/arch/x86/kvm/vmx/evmcs.c
> @@ -6,6 +6,7 @@
>  #include "../hyperv.h"
>  #include "../cpuid.h"
>  #include "evmcs.h"
> +#include "nested.h"
>  #include "vmcs.h"
>  #include "vmx.h"
>  #include "trace.h"
> @@ -433,6 +434,22 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
>         return 0;
>  }
>  
> +bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
> +{
> +       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
> +       struct vcpu_vmx *vmx = to_vmx(vcpu);
> +       struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
> +
> +       if (!hv_vcpu || !evmcs)
> +               return false;
> +
> +       if (!evmcs->hv_enlightenments_control.nested_flush_hypercall)
> +               return false;
> +
> +       return hv_vcpu->vp_assist_page.nested_control.features.directhypercall;
> +}
> +
>  void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu)
>  {
> +       nested_vmx_vmexit(vcpu, HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH, 0, 0);
>  }
> diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
> index 22d238b36238..0267b6191e6c 100644
> --- a/arch/x86/kvm/vmx/evmcs.h
> +++ b/arch/x86/kvm/vmx/evmcs.h
> @@ -66,6 +66,15 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs);
>  #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
>  #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
>  
> +/*
> + * Note, Hyper-V isn't actually stealing bit 28 from Intel, just abusing it by
> + * pairing it with architecturally impossible exit reasons.  Bit 28 is set only
> + * on SMI exits to a SMI transfer monitor (STM) and if and only if a MTF VM-Exit
> + * is pending.  I.e. it will never be set by hardware for non-SMI exits (there
> + * are only three), nor will it ever be set unless the VMM is an STM.
> + */
> +#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
> +
>  struct evmcs_field {
>         u16 offset;
>         u16 clean_field;
> @@ -245,6 +254,7 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
>                         uint16_t *vmcs_version);
>  void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
>  int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
> +bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
>  void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
>  
>  #endif /* __KVM_X86_VMX_EVMCS_H */
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 87bff81f7f3e..69d06f77d7b4 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -1170,6 +1170,17 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
>  {
>         struct vcpu_vmx *vmx = to_vmx(vcpu);
>  
> +       /*
> +        * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
> +        * L2's VP_ID upon request from the guest. Make sure we check for
> +        * pending entries for the case when the request got misplaced (e.g.
> +        * a transition from L2->L1 happened while processing L2 TLB flush
> +        * request or vice versa). kvm_hv_vcpu_flush_tlb() will not flush
> +        * anything if there are no requests in the corresponding buffer.
> +        */
> +       if (to_hv_vcpu(vcpu))
> +               kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
> +
>         /*
>          * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
>          * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
> @@ -3278,6 +3289,12 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
>  
>  static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
>  {
> +       /*
> +        * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy
> +        * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory
> +        * to make nested_evmcs_l2_tlb_flush_enabled() work correctly post
> +        * migration.
> +        */
>         if (!nested_get_evmcs_page(vcpu)) {
>                 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
>                                      __func__);
> @@ -6007,6 +6024,11 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
>                  * Handle L2's bus locks in L0 directly.
>                  */
>                 return true;
> +       case EXIT_REASON_VMCALL:
> +               /* Hyper-V L2 TLB flush hypercall is handled by L0 */
> +               return guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
> +                       nested_evmcs_l2_tlb_flush_enabled(vcpu) &&
> +                       kvm_hv_is_tlb_flush_hcall(vcpu);
>         default:
>                 break;
>         }


Reviewed-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx>

Best regards,
Maxim Levitsky