Simplely return from kvm_mmu_pte_write path if no shadow page is
write-protected, than we can avoid to walk all shadow pages and hold
mmu-lock
Signed-off-by: Xiao Guangrong<xiaoguangrong@xxxxxxxxxxxxxx>
---
arch/x86/kvm/mmu.c | 8 ++++++++
1 files changed, 8 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2841805..ccac6c4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3219,6 +3219,11 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
}
+static bool need_track_pte_changed(struct kvm_vcpu *vcpu)
+{
+ return !tdp_enabled || mmu_is_nested(vcpu);
+}
+
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes,
bool guest_initiated)
@@ -3233,6 +3238,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int level, npte, invlpg_counter, r, flooded = 0;
bool remote_flush, local_flush, zap_page;
+ if (!need_track_pte_changed(vcpu))
+ return;
+
zap_page = remote_flush = local_flush = false;
offset = offset_in_page(gpa);