static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,I think your modification is good but I am little bit confused here. In account_shadowed, if
@@ -2140,12 +2150,18 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
hlist_add_head(&sp->hash_link,
&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
if (!direct) {
- if (rmap_write_protect(vcpu, gfn))
+ /*
+ * we should do write protection before syncing pages
+ * otherwise the content of the synced shadow page may
+ * be inconsistent with guest page table.
+ */
+ account_shadowed(vcpu->kvm, sp);
+
+ if (level == PT_PAGE_TABLE_LEVEL &&
+ rmap_write_protect(vcpu, gfn))
kvm_flush_remote_tlbs(vcpu->kvm);
sp->role.level > PT_PAGE_TABLE_LEVEL, the sp->gfn is write protected, and this is reasonable. So why
write protecting the gfn of PT_PAGE_TABLE_LEVEL here?
/*Looks you need to merge this part with patch 1, as you are modifying
* remove the guest page from the tracking pool which stops the interception
* of corresponding access on that page. It is the opposed operation of
@@ -134,20 +160,12 @@ void kvm_page_track_remove_page(struct kvm *kvm, gfn_t gfn,
struct kvm_memory_slot *slot;
int i;
- WARN_ON(!check_mode(mode));
-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
slot = __gfn_to_memslot(slots, gfn);
spin_lock(&kvm->mmu_lock);
- update_gfn_track(slot, gfn, mode, -1);
-
- /*
- * allow large page mapping for the tracked page
- * after the tracker is gone.
- */
- kvm_mmu_gfn_allow_lpage(slot, gfn);
+ kvm_slot_page_track_remove_page_nolock(kvm, slot, gfn, mode);
kvm_page_track_{add,remove}_page here, which are introduced in your patch 1.