Retry #PF for softmmu only when the current vcpu has the same cr3 as the time
when #PF occurs
Changelog:
Just compare cr3 value since It's harmless to instantiate an spte for an
unused translation from Marcelo's comment
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c6bb449..3f0d9a0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2607,9 +2607,11 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
{
struct kvm_arch_async_pf arch;
+
arch.token = (vcpu->arch.apf.id++<< 12) | vcpu->vcpu_id;
arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu.direct_map;
+ arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, gfn,&arch);
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 23275d0..437e11a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -116,7 +116,7 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
*/
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gva_t addr, u32 access)
+ gva_t addr, u32 access, bool prefault)
{
pt_element_t pte;
gfn_t table_gfn;
@@ -194,6 +194,13 @@ walk:
#endif
if (!eperm&& !rsvd_fault&& !(pte& PT_ACCESSED_MASK)) {
+ /*
+ * Don't set gpte accessed bit if it's on
+ * speculative path.
+ */
+ if (prefault)
+ goto error;