On Tue, Aug 24, 2010 at 12:30:25PM +0300, Avi Kivity wrote:On 07/19/2010 06:31 PM, Gleb Natapov wrote:We will not. IRQs are disabled here. native_safe_halt() enables them.If async page fault is received by idle task or when preemp_count isIf we get the wakeup here, we'll halt and never wake up again.
not zero guest cannot reschedule, so do sti; hlt and wait for page to be
ready. vcpu can still process interrupts while it waits for the page to
be ready.
Acked-by: Rik van Riel<riel@xxxxxxxxxx>
Signed-off-by: Gleb Natapov<gleb@xxxxxxxxxx>
---
arch/x86/kernel/kvm.c | 36 ++++++++++++++++++++++++++++++++----
1 files changed, 32 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a6db92e..914b0fc 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -37,6 +37,7 @@
#include<asm/cpu.h>
#include<asm/traps.h>
#include<asm/desc.h>
+#include<asm/tlbflush.h>
#define MMU_QUEUE_SIZE 1024
@@ -68,6 +69,8 @@ struct kvm_task_sleep_node {
wait_queue_head_t wq;
u32 token;
int cpu;
+ bool halted;
+ struct mm_struct *mm;
};
static struct kvm_task_sleep_head {
@@ -96,6 +99,11 @@ static void apf_task_wait(struct task_struct *tsk, u32 token)
struct kvm_task_sleep_head *b =&async_pf_sleepers[key];
struct kvm_task_sleep_node n, *e;
DEFINE_WAIT(wait);
+ int cpu, idle;
+
+ cpu = get_cpu();
+ idle = idle_cpu(cpu);
+ put_cpu();
spin_lock(&b->lock);
e = _find_apf_task(b, token);
@@ -109,17 +117,31 @@ static void apf_task_wait(struct task_struct *tsk, u32 token)
n.token = token;
n.cpu = smp_processor_id();
+ n.mm = current->active_mm;
+ n.halted = idle || preempt_count()> 1;
+ atomic_inc(&n.mm->mm_count);
init_waitqueue_head(&n.wq);
hlist_add_head(&n.link,&b->list);
spin_unlock(&b->lock);
for (;;) {
- prepare_to_wait(&n.wq,&wait, TASK_UNINTERRUPTIBLE);
+ if (!n.halted)
+ prepare_to_wait(&n.wq,&wait, TASK_UNINTERRUPTIBLE);
if (hlist_unhashed(&n.link))
break;
- schedule();
+
+ if (!n.halted) {
+ schedule();
+ } else {
+ /*
+ * We cannot reschedule. So halt.
+ */