[PATCH v14 043/113] KVM: Add flags to struct kvm_gfn_range

From: isaku . yamahata
Date: Mon May 29 2023 - 00:25:55 EST


From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>

kvm_unmap_gfn_range() needs to know the reason of the callback for TDX.
mmu notifier, set memattr ioctl or restrictedmem notifier. Based on the
reason, TDX changes the behavior. For mmu notifier, it's the operation on
shared memory slot to zap shared PTE. For set memattr, it's the operation
of private<->shared conversion, zap the original PTE. For restrictedmem,
it's punching a hole of the range, zap the corresponding PTE.

Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
---
include/linux/kvm_host.h | 10 +++++++++-
virt/kvm/guest_mem.c | 1 +
virt/kvm/kvm_main.c | 4 +++-
3 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 44d8209d9869..d97eeba38774 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -256,12 +256,20 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif

#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+
+#define KVM_GFN_RANGE_FLAGS_GMEM BIT(0)
+#define KVM_GFN_RANGE_FLAGS_SET_MEM_ATTR BIT(1)
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
- pte_t pte;
+ union {
+ pte_t pte;
+ u64 attrs;
+ };
bool may_block;
+ unsigned int flags;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
index 41f89d9c8118..647c0503f79f 100644
--- a/virt/kvm/guest_mem.c
+++ b/virt/kvm/guest_mem.c
@@ -116,6 +116,7 @@ static void kvm_gmem_invalidate_begin(struct kvm *kvm, struct kvm_gmem *gmem,
.slot = slot,
.pte = __pte(0),
.may_block = true,
+ .flags = KVM_GFN_RANGE_FLAGS_GMEM,
};

if (WARN_ON_ONCE(start < slot->gmem.index ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0d8edc686e9d..49a64f040df5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -629,6 +629,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
gfn_range.slot = slot;
+ gfn_range.flags = 0;

if (!locked) {
locked = true;
@@ -2421,8 +2422,9 @@ static void kvm_mem_attrs_changed(struct kvm *kvm, unsigned long attrs,
bool flush = false;
int i;

- gfn_range.pte = __pte(0);
+ gfn_range.attrs = attrs;
gfn_range.may_block = true;
+ gfn_range.flags = KVM_GFN_RANGE_FLAGS_SET_MEM_ATTR;

for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
slots = __kvm_memslots(kvm, i);
--
2.25.1