Re: [PATCH v10 7/9] KVM: Update lpage info when private/shared memory are mixed
From: Isaku Yamahata
Date: Wed Dec 07 2022 - 01:42:32 EST
On Tue, Dec 06, 2022 at 08:02:24PM +0800,
Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx> wrote:
> On Mon, Dec 05, 2022 at 02:49:59PM -0800, Isaku Yamahata wrote:
> > On Fri, Dec 02, 2022 at 02:13:45PM +0800,
> > Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx> wrote:
> >
> > > A large page with mixed private/shared subpages can't be mapped as large
> > > page since its sub private/shared pages are from different memory
> > > backends and may also treated by architecture differently. When
> > > private/shared memory are mixed in a large page, the current lpage_info
> > > is not sufficient to decide whether the page can be mapped as large page
> > > or not and additional private/shared mixed information is needed.
> > >
> > > Tracking this 'mixed' information with the current 'count' like
> > > disallow_lpage is a bit challenge so reserve a bit in 'disallow_lpage'
> > > to indicate a large page has mixed private/share subpages and update
> > > this 'mixed' bit whenever the memory attribute is changed between
> > > private and shared.
> > >
> > > Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx>
> > > ---
> > > arch/x86/include/asm/kvm_host.h | 8 ++
> > > arch/x86/kvm/mmu/mmu.c | 134 +++++++++++++++++++++++++++++++-
> > > arch/x86/kvm/x86.c | 2 +
> > > include/linux/kvm_host.h | 19 +++++
> > > virt/kvm/kvm_main.c | 9 ++-
> > > 5 files changed, 169 insertions(+), 3 deletions(-)
> > >
> > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > > index 283cbb83d6ae..7772ab37ac89 100644
> > > --- a/arch/x86/include/asm/kvm_host.h
> > > +++ b/arch/x86/include/asm/kvm_host.h
> > > @@ -38,6 +38,7 @@
> > > #include <asm/hyperv-tlfs.h>
> > >
> > > #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
> > > +#define __KVM_HAVE_ARCH_SET_MEMORY_ATTRIBUTES
> > >
> > > #define KVM_MAX_VCPUS 1024
> > >
> > > @@ -1011,6 +1012,13 @@ struct kvm_vcpu_arch {
> > > #endif
> > > };
> > >
> > > +/*
> > > + * Use a bit in disallow_lpage to indicate private/shared pages mixed at the
> > > + * level. The remaining bits are used as a reference count.
> > > + */
> > > +#define KVM_LPAGE_PRIVATE_SHARED_MIXED (1U << 31)
> > > +#define KVM_LPAGE_COUNT_MAX ((1U << 31) - 1)
> > > +
> > > struct kvm_lpage_info {
> > > int disallow_lpage;
> > > };
> > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > > index e2c70b5afa3e..2190fd8c95c0 100644
> > > --- a/arch/x86/kvm/mmu/mmu.c
> > > +++ b/arch/x86/kvm/mmu/mmu.c
> > > @@ -763,11 +763,16 @@ static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
> > > {
> > > struct kvm_lpage_info *linfo;
> > > int i;
> > > + int disallow_count;
> > >
> > > for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
> > > linfo = lpage_info_slot(gfn, slot, i);
> > > +
> > > + disallow_count = linfo->disallow_lpage & KVM_LPAGE_COUNT_MAX;
> > > + WARN_ON(disallow_count + count < 0 ||
> > > + disallow_count > KVM_LPAGE_COUNT_MAX - count);
> > > +
> > > linfo->disallow_lpage += count;
> > > - WARN_ON(linfo->disallow_lpage < 0);
> > > }
> > > }
> > >
> > > @@ -6986,3 +6991,130 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
> > > if (kvm->arch.nx_huge_page_recovery_thread)
> > > kthread_stop(kvm->arch.nx_huge_page_recovery_thread);
> > > }
> > > +
> > > +static bool linfo_is_mixed(struct kvm_lpage_info *linfo)
> > > +{
> > > + return linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED;
> > > +}
> > > +
> > > +static void linfo_set_mixed(gfn_t gfn, struct kvm_memory_slot *slot,
> > > + int level, bool mixed)
> > > +{
> > > + struct kvm_lpage_info *linfo = lpage_info_slot(gfn, slot, level);
> > > +
> > > + if (mixed)
> > > + linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
> > > + else
> > > + linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
> > > +}
> > > +
> > > +static bool is_expected_attr_entry(void *entry, unsigned long expected_attrs)
> > > +{
> > > + bool expect_private = expected_attrs & KVM_MEMORY_ATTRIBUTE_PRIVATE;
> > > +
> > > + if (xa_to_value(entry) & KVM_MEMORY_ATTRIBUTE_PRIVATE) {
> > > + if (!expect_private)
> > > + return false;
> > > + } else if (expect_private)
> > > + return false;
> > > +
> > > + return true;
> > > +}
> > > +
> > > +static bool mem_attrs_mixed_2m(struct kvm *kvm, unsigned long attrs,
> > > + gfn_t start, gfn_t end)
> > > +{
> > > + XA_STATE(xas, &kvm->mem_attr_array, start);
> > > + gfn_t gfn = start;
> > > + void *entry;
> > > + bool mixed = false;
> > > +
> > > + rcu_read_lock();
> > > + entry = xas_load(&xas);
> > > + while (gfn < end) {
> > > + if (xas_retry(&xas, entry))
> > > + continue;
> > > +
> > > + KVM_BUG_ON(gfn != xas.xa_index, kvm);
> > > +
> > > + if (!is_expected_attr_entry(entry, attrs)) {
> > > + mixed = true;
> > > + break;
> > > + }
> > > +
> > > + entry = xas_next(&xas);
> > > + gfn++;
> > > + }
> > > +
> > > + rcu_read_unlock();
> > > + return mixed;
> > > +}
> > > +
> > > +static bool mem_attrs_mixed(struct kvm *kvm, struct kvm_memory_slot *slot,
> > > + int level, unsigned long attrs,
> > > + gfn_t start, gfn_t end)
> > > +{
> > > + unsigned long gfn;
> > > +
> > > + if (level == PG_LEVEL_2M)
> > > + return mem_attrs_mixed_2m(kvm, attrs, start, end);
> > > +
> > > + for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1))
> > > + if (linfo_is_mixed(lpage_info_slot(gfn, slot, level - 1)) ||
> > > + !is_expected_attr_entry(xa_load(&kvm->mem_attr_array, gfn),
> > > + attrs))
> > > + return true;
> > > + return false;
> > > +}
> > > +
> > > +static void kvm_update_lpage_private_shared_mixed(struct kvm *kvm,
> > > + struct kvm_memory_slot *slot,
> > > + unsigned long attrs,
> > > + gfn_t start, gfn_t end)
> > > +{
> > > + unsigned long pages, mask;
> > > + gfn_t gfn, gfn_end, first, last;
> > > + int level;
> > > + bool mixed;
> > > +
> > > + /*
> > > + * The sequence matters here: we set the higher level basing on the
> > > + * lower level's scanning result.
> > > + */
> > > + for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
> > > + pages = KVM_PAGES_PER_HPAGE(level);
> > > + mask = ~(pages - 1);
> > > + first = start & mask;
> > > + last = (end - 1) & mask;
> > > +
> > > + /*
> > > + * We only need to scan the head and tail page, for middle pages
> > > + * we know they will not be mixed.
> > > + */
> > > + gfn = max(first, slot->base_gfn);
> > > + gfn_end = min(first + pages, slot->base_gfn + slot->npages);
> > > + mixed = mem_attrs_mixed(kvm, slot, level, attrs, gfn, gfn_end);
> > > + linfo_set_mixed(gfn, slot, level, mixed);
> > > +
> > > + if (first == last)
> > > + return;
> >
> >
> > continue.
>
> Ya!
>
> >
> > > +
> > > + for (gfn = first + pages; gfn < last; gfn += pages)
> > > + linfo_set_mixed(gfn, slot, level, false);
> > > +
> > > + gfn = last;
> > > + gfn_end = min(last + pages, slot->base_gfn + slot->npages);
> >
> > if (gfn == gfn_end) continue.
>
> Do you see a case where gfn can equal to gfn_end? Though it does not
> hurt to add a check.
If last == base_gfn + npages, gfn == gfn_end can occur.
> > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > > index 9a07380f8d3c..5aefcff614d2 100644
> > > --- a/arch/x86/kvm/x86.c
> > > +++ b/arch/x86/kvm/x86.c
> > > @@ -12362,6 +12362,8 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
> > > if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
> > > linfo[lpages - 1].disallow_lpage = 1;
> > > ugfn = slot->userspace_addr >> PAGE_SHIFT;
> > > + if (kvm_slot_can_be_private(slot))
> > > + ugfn |= slot->restricted_offset >> PAGE_SHIFT;
> >
> > Is there any alignment restriction? If no, It should be +=.
> > In practice, alignment will hold though.
>
> All we need here is checking whether both userspace_addr and
> restricted_offset are aligned to HPAGE_SIZE or not. '+=' actually can
> yield wrong value in cases when userspace_addr + restricted_offset is
> aligned to HPAGE_SIZE but individually they may not align to HPAGE_SIZE.
Ah, got it. The blow comment explains it.
> Thanks,
> Chao
> >
> > Thanks,
> >
> > > /*
> > > * If the gfn and userspace address are not aligned wrt each
> > > * other, disable large page support for this slot.
--
Isaku Yamahata <isaku.yamahata@xxxxxxxxx>