-static void mmu_sync_children(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *parent)
+static int mmu_sync_children(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *parent, bool can_yield)
{
int i;
struct kvm_mmu_page *sp;
@@ -2050,7 +2050,15 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
- if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
+ /*
+ * Don't yield if there are fewer than <N> unsync children
+ * remaining, just finish up and get out.
+ */
+ if (parent->unsync_children > SOME_ARBITRARY_THRESHOLD &&
+ (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock))) {
+ if (!can_yield)
+ return -EINTR;
+