[PATCH 2/3] kvm: mmu: Separate generating and setting mmio ptes

From: Ben Gardon
Date: Mon Feb 03 2020 - 18:09:31 EST


Separate the functions for generating MMIO page table entries from the
function that inserts them into the paging structure. This refactoring
will facilitate changes to the MMU sychronization model to use atomic
compare / exchanges (which are not guaranteed to succeed) instead of a
monolithic MMU lock.

No functional change expected.

Tested by running kvm-unit-tests on an Intel Haswell machine. This
commit introduced no new failures.

This commit can be viewed in Gerrit at:
https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2359

Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx>
Reviewed-by: Oliver Upton <oupton@xxxxxxxxxx>
Reviewed-by: Peter Shier <pshier@xxxxxxxxxx>
---
arch/x86/kvm/mmu/mmu.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a9c593dec49bf..b81010d0edae1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -451,9 +451,9 @@ static u64 get_mmio_spte_generation(u64 spte)
return gen;
}

-static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
- unsigned int access)
+static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
{
+
u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
u64 mask = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;
@@ -464,6 +464,17 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< shadow_nonpresent_or_rsvd_mask_len;

+ return mask;
+}
+
+static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
+ unsigned int access)
+{
+ u64 mask = make_mmio_spte(vcpu, gfn, access);
+ unsigned int gen = get_mmio_spte_generation(mask);
+
+ access = mask & ACC_ALL;
+
trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask);
}
--
2.25.0.341.g760bfbb309-goog