Simply return from kvm_mmu_pte_write path if no shadow page is
write-protected, then we can avoid to walk all shadow pages and hold
mmu-lock
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2841805..971e2d2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -498,6 +498,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1;
}
+ atomic_inc(&kvm->arch.indirect_shadow_pages);
}
static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -513,6 +514,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
linfo->write_count -= 1;
WARN_ON(linfo->write_count< 0);
}
+ atomic_dec(&kvm->arch.indirect_shadow_pages);
}