[PATCH RFC 15/15] KVM: selftests: Test dirty ring waitqueue
From: Peter Xu
Date: Fri Nov 29 2019 - 16:35:47 EST
This is a bit tricky, but should still be reasonable.
Firstly we introduce a totally new dirty log test type, because we
need to force vcpu to go into a blocked state by dead loop on vcpu_run
even if it wants to quit to userspace.
Here the tricky part is we need to read the procfs to make sure the
vcpu thread is TASK_UNINTERRUPTIBLE.
After that, we reset the ring and the reset should kick the vcpu again
by moving out of that state.
Signed-off-by: Peter Xu <peterx@xxxxxxxxxx>
---
tools/testing/selftests/kvm/dirty_log_test.c | 101 +++++++++++++++++++
1 file changed, 101 insertions(+)
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index c9db136a1f12..41bc015131e1 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -16,6 +16,7 @@
#include <sys/types.h>
#include <signal.h>
#include <errno.h>
+#include <sys/syscall.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <asm/barrier.h>
@@ -151,12 +152,16 @@ enum log_mode_t {
/* Use dirty ring for logging */
LOG_MODE_DIRTY_RING = 2,
+ /* Dirty ring test but tailored for the waitqueue */
+ LOG_MODE_DIRTY_RING_WP = 3,
+
LOG_MODE_NUM,
};
/* Mode of logging. Default is LOG_MODE_DIRTY_LOG */
static enum log_mode_t host_log_mode;
pthread_t vcpu_thread;
+pid_t vcpu_thread_tid;
static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
/* Only way to pass this to the signal handler */
@@ -221,6 +226,18 @@ static void dirty_ring_create_vm_done(struct kvm_vm *vm)
sizeof(struct kvm_dirty_gfn));
}
+static void dirty_ring_wq_create_vm_done(struct kvm_vm *vm)
+{
+ /*
+ * Force to use a relatively small ring size, so easier to get
+ * full. Better bigger than PML size, hence 1024.
+ */
+ test_dirty_ring_count = 1024;
+ DEBUG("Forcing ring size: %u\n", test_dirty_ring_count);
+ vm_enable_dirty_ring(vm, test_dirty_ring_count *
+ sizeof(struct kvm_dirty_gfn));
+}
+
static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
struct kvm_dirty_ring_indexes *indexes,
int slot, void *bitmap,
@@ -295,6 +312,81 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
DEBUG("Iteration %ld collected %u pages\n", iteration, count);
}
+/*
+ * Return 'D' for uninterruptible, 'R' for running, 'S' for
+ * interruptible, etc.
+ */
+static char read_tid_status_char(unsigned int tid)
+{
+ int fd, ret, line = 0;
+ char buf[128], *c;
+
+ snprintf(buf, sizeof(buf) - 1, "/proc/%u/status", tid);
+ fd = open(buf, O_RDONLY);
+ TEST_ASSERT(fd >= 0, "open status file failed: %s", buf);
+ ret = read(fd, buf, sizeof(buf) - 1);
+ TEST_ASSERT(ret > 0, "read status file failed: %d, %d", ret, errno);
+ close(fd);
+
+ /* Skip 2 lines */
+ for (c = buf; c < buf + sizeof(buf) && line < 2; c++) {
+ if (*c == '\n') {
+ line++;
+ continue;
+ }
+ }
+
+ /* Skip "Status: " */
+ while (*c != ':') c++;
+ c++;
+ while (*c == ' ') c++;
+ c++;
+
+ return *c;
+}
+
+static void dirty_ring_wq_collect_dirty_pages(struct kvm_vm *vm, int slot,
+ void *bitmap, uint32_t num_pages)
+{
+ uint32_t count = test_dirty_ring_count;
+ struct kvm_run *state = vcpu_state(vm, VCPU_ID);
+ struct kvm_dirty_ring_indexes *indexes = &state->vcpu_ring_indexes;
+ uint32_t avail;
+
+ while (count--) {
+ /*
+ * Force vcpu to run enough time to make sure we
+ * trigger the ring full case
+ */
+ sem_post(&dirty_ring_vcpu_cont);
+ }
+
+ /* Make sure it's stuck */
+ TEST_ASSERT(vcpu_thread_tid, "TID not inited");
+ /*
+ * Wait for /proc/pid/status "Status:" changes to "D". "D"
+ * stands for "D (disk sleep)", TASK_UNINTERRUPTIBLE
+ */
+ while (read_tid_status_char(vcpu_thread_tid) != 'D') {
+ usleep(1000);
+ }
+ DEBUG("Now VCPU thread dirty ring full\n");
+
+ avail = READ_ONCE(indexes->avail_index);
+ /* Assuming we've consumed all */
+ WRITE_ONCE(indexes->fetch_index, avail);
+
+ kvm_vm_reset_dirty_ring(vm);
+
+ /* Wait for it to be awake */
+ while (read_tid_status_char(vcpu_thread_tid) == 'D') {
+ usleep(1000);
+ }
+ DEBUG("VCPU Thread is successfully waked up\n");
+
+ exit(0);
+}
+
static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
@@ -353,6 +445,12 @@ struct log_mode {
.before_vcpu_join = dirty_ring_before_vcpu_join,
.after_vcpu_run = dirty_ring_after_vcpu_run,
},
+ {
+ .name = "dirty-ring-wait-queue",
+ .create_vm_done = dirty_ring_wq_create_vm_done,
+ .collect_dirty_pages = dirty_ring_wq_collect_dirty_pages,
+ .after_vcpu_run = dirty_ring_after_vcpu_run,
+ },
};
/*
@@ -422,6 +520,9 @@ static void *vcpu_worker(void *data)
uint64_t *guest_array;
struct sigaction sigact;
+ vcpu_thread_tid = syscall(SYS_gettid);
+ printf("VCPU Thread ID: %u\n", vcpu_thread_tid);
+
current_vm = vm;
memset(&sigact, 0, sizeof(sigact));
sigact.sa_handler = vcpu_sig_handler;
--
2.21.0