Re: [PATCH 4/6] kvm: nVMX: support EPT accessed/dirty bits
From: Bandan Das
Date: Tue Apr 11 2017 - 19:35:32 EST
Paolo Bonzini <pbonzini@xxxxxxxxxx> writes:
...
> accessed_dirty = have_ad ? PT_GUEST_ACCESSED_MASK : 0;
> +
> + /*
> + * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
> + * by the MOV to CR instruction are treated as reads and do not cause the
> + * processor to set the dirty flag in tany EPT paging-structure entry.
> + */
Minor typo: "in any EPT paging-structure entry".
> + nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
> +
> pt_access = pte_access = ACC_ALL;
> ++walker->level;
>
> @@ -338,7 +337,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
> walker->pte_gpa[walker->level - 1] = pte_gpa;
>
> real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
> - PFERR_USER_MASK|PFERR_WRITE_MASK,
> + nested_access,
> &walker->fault);
I can't seem to understand the significance of this change (or for that matter
what was before this change).
mmu->translate_gpa() just returns gfn_to_gpa(table_gfn), right ?
Bandan
> /*
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 1c372600a962..6aaecc78dd71 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -2767,6 +2767,8 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
> vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
> VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
> VMX_EPT_1GB_PAGE_BIT;
> + if (enable_ept_ad_bits)
> + vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
> } else
> vmx->nested.nested_vmx_ept_caps = 0;
>
> @@ -6211,6 +6213,18 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
>
> exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>
> + if (is_guest_mode(vcpu)
> + && !(exit_qualification & EPT_VIOLATION_GVA_TRANSLATED)) {
> + /*
> + * Fix up exit_qualification according to whether guest
> + * page table accesses are reads or writes.
> + */
> + u64 eptp = nested_ept_get_cr3(vcpu);
> + exit_qualification &= ~EPT_VIOLATION_ACC_WRITE;
> + if (eptp & VMX_EPT_AD_ENABLE_BIT)
> + exit_qualification |= EPT_VIOLATION_ACC_WRITE;
> + }
> +
> /*
> * EPT violation happened while executing iret from NMI,
> * "blocked by NMI" bit has to be set before next VM entry.
> @@ -9416,17 +9430,26 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
> return get_vmcs12(vcpu)->ept_pointer;
> }
>
> -static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
> +static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
> {
> + u64 eptp;
> +
> WARN_ON(mmu_is_nested(vcpu));
> + eptp = nested_ept_get_cr3(vcpu);
> + if ((eptp & VMX_EPT_AD_ENABLE_BIT) && !enable_ept_ad_bits)
> + return 1;
> +
> + kvm_mmu_unload(vcpu);
> kvm_init_shadow_ept_mmu(vcpu,
> to_vmx(vcpu)->nested.nested_vmx_ept_caps &
> - VMX_EPT_EXECUTE_ONLY_BIT);
> + VMX_EPT_EXECUTE_ONLY_BIT,
> + eptp & VMX_EPT_AD_ENABLE_BIT);
> vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
> vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
> vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
>
> vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
> + return 0;
> }
>
> static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
> @@ -10188,8 +10211,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
> }
>
> if (nested_cpu_has_ept(vmcs12)) {
> - kvm_mmu_unload(vcpu);
> - nested_ept_init_mmu_context(vcpu);
> + if (nested_ept_init_mmu_context(vcpu)) {
> + *entry_failure_code = ENTRY_FAIL_DEFAULT;
> + return 1;
> + }
> } else if (nested_cpu_has2(vmcs12,
> SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
> vmx_flush_tlb_ept_only(vcpu);