Re: [RFC PATCH V3 3/4] KVM: X86: Alloc role.pae_root shadow page

From: Sean Christopherson
Date: Tue Apr 12 2022 - 18:30:37 EST


On Wed, Mar 30, 2022, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx>
>
> Currently pae_root is special root page, this patch adds facility to
> allow using kvm_mmu_get_page() to allocate pae_root shadow page.

I don't think this will work for shadow paging. CR3 only has to be 32-byte aligned
for PAE paging. Unless I'm missing something subtle in the code, KVM will incorrectly
reuse a pae_root if the guest puts multiple PAE CR3s on a single page because KVM's
gfn calculation will drop bits 11:5.

Handling this as a one-off is probably easier. For TDP, only 32-bit KVM with NPT
benefits from reusing roots, IMO and shaving a few pages in that case is not worth
the complexity.

> @@ -332,7 +337,8 @@ union kvm_mmu_page_role {
> unsigned ad_disabled:1;
> unsigned guest_mode:1;
> unsigned glevel:4;
> - unsigned :2;
> + unsigned pae_root:1;

If we do end up adding a role bit, it can simply be "root", which may or may not
be useful for other things. is_pae_root() is then a combo of root+level. This
will clean up the code a bit as role.root is (mostly?) hardcoded based on the
function, e.g. root allocators set it, child allocators clear it.

> + unsigned :1;
>
> /*
> * This is left at the top of the word so that
> @@ -699,6 +705,7 @@ struct kvm_vcpu_arch {
> struct kvm_mmu_memory_cache mmu_shadow_page_cache;
> struct kvm_mmu_memory_cache mmu_gfn_array_cache;
> struct kvm_mmu_memory_cache mmu_page_header_cache;
> + void *mmu_pae_root_cache;
>
> /*
> * QEMU userspace and the guest each have their own FPU state.
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index d53037df8177..81ccaa7c1165 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -694,6 +694,35 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
> }
> }
>
> +static int mmu_topup_pae_root_cache(struct kvm_vcpu *vcpu)
> +{
> + struct page *page;
> +
> + if (vcpu->arch.mmu->shadow_root_level != PT32E_ROOT_LEVEL)
> + return 0;
> + if (vcpu->arch.mmu_pae_root_cache)
> + return 0;
> +
> + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_DMA32);
> + if (!page)
> + return -ENOMEM;
> + vcpu->arch.mmu_pae_root_cache = page_address(page);
> +
> + /*
> + * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
> + * get the CPU to treat the PDPTEs as encrypted. Decrypt the page so
> + * that KVM's writes and the CPU's reads get along. Note, this is
> + * only necessary when using shadow paging, as 64-bit NPT can get at
> + * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
> + * by 32-bit kernels (when KVM itself uses 32-bit NPT).
> + */
> + if (!tdp_enabled)
> + set_memory_decrypted((unsigned long)vcpu->arch.mmu_pae_root_cache, 1);
> + else
> + WARN_ON_ONCE(shadow_me_mask);
> + return 0;
> +}
> +
> static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> {
> int r;
> @@ -705,6 +734,9 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> return r;
> r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
> PT64_ROOT_MAX_LEVEL);
> + if (r)
> + return r;
> + r = mmu_topup_pae_root_cache(vcpu);

This doesn't need to be called from the common mmu_topup_memory_caches(), e.g. it
will unnecessarily require allocating another DMA32 page when handling a page fault.
I'd rather call this directly kvm_mmu_load(), which also makes it more obvious
that the cache really is only used for roots.

> if (r)
> return r;
> if (maybe_indirect) {
> @@ -717,12 +749,23 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> PT64_ROOT_MAX_LEVEL);
> }
>

...

> static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
> struct kvm_mmu_page *sp)
> {
> + struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
> u64 spte;
>
> BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
>
> - spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
> + if (!parent_sp->role.pae_root)

Hmm, without role.root, this could be:

if (sp->role.level == (PT32E_ROOT_level - 1) &&
((__pa(sptep) & PT64_BASE_ADDR_MASK) == vcpu->arch.mmu->root.hpa))
spte = make_pae_pdpte(sp->spt);
else
spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));

Which is gross, but it works. We could also do FNAME(link_shadow_page) to send
PAE roots down a dedicated path (also gross). Point being, I don't think we
strictly need a "root" flag unless the PAE roots are put in mmu_page_hash.

> + spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
> + else
> + spte = make_pae_pdpte(sp->spt);
>
> mmu_spte_set(sptep, spte);
>
> @@ -4782,6 +4847,8 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
> role.base.level = kvm_mmu_get_tdp_level(vcpu);
> role.base.direct = true;
> role.base.has_4_byte_gpte = false;
> + if (role.base.level == PT32E_ROOT_LEVEL)
> + role.base.pae_root = 1;
>
> return role;
> }
> @@ -4848,6 +4915,9 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
> else
> role.base.level = PT64_ROOT_4LEVEL;
>
> + if (role.base.level == PT32E_ROOT_LEVEL)
> + role.base.pae_root = 1;
> +
> return role;
> }
>
> @@ -4893,6 +4963,8 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
>
> role.base.direct = false;
> role.base.level = kvm_mmu_get_tdp_level(vcpu);
> + if (role.base.level == PT32E_ROOT_LEVEL)
> + role.base.pae_root = 1;
>
> return role;
> }
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 67489a060eba..1015f33e0758 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -1043,6 +1043,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
> .access = 0x7,
> .quadrant = 0x3,
> .glevel = 0xf,
> + .pae_root = 0x1,
> };
>
> /*
> --
> 2.19.1.6.gb485710b
>