[PATCH v3 6/6] selftests: KVM: use page stats to check if dirty logging works properly

From: Mingwei Zhang
Date: Fri Apr 01 2022 - 02:37:29 EST


When dirty logging is enabled, KVM will remap all accessed pages in
NPT/EPT at 4K. This property could be used to check if
the page stats metrics work properly in KVM mmu. At the same time, this
logic might be used the other way around: using page stats to verify if
dirty logging really splits all huge pages. Moreover, when dirty logging is
disabled, KVM zaps corresponding SPTEs and we could check whether the large
pages come back when guest touches the pages again.

So add page stats checking in dirty logging performance selftest. In
particular, add checks in three locations:
- just after vm is created;
- after populating memory into vm but before enabling dirty logging;
- finish dirty logging but before disabling it;
- behind the final iteration after disabling dirty logging.

Tested using commands:
- ./dirty_log_perf_test -s anonymous_hugetlb_1gb
- ./dirty_log_perf_test -s anonymous_hugetlb_2mb
- ./dirty_log_perf_test -s anonymous_thp

Cc: Sean Christopherson <seanjc@xxxxxxxxxx>
Cc: David Matlack <dmatlack@xxxxxxxxxx>
Cc: Jing Zhang <jingzhangos@xxxxxxxxxx>
Cc: Peter Xu <peterx@xxxxxxxxxx>

Suggested-by: Ben Gardon <bgardon@xxxxxxxxxx>
Reviewed-by: Ben Gardon <bgardon@xxxxxxxxxx>
Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx>
---
.../selftests/kvm/dirty_log_perf_test.c | 53 +++++++++++++++++++
1 file changed, 53 insertions(+)

diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index c9d9e513ca04..dd48aabfff5c 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -25,6 +25,10 @@
#define GICR_BASE_GPA 0x80A0000ULL
#endif

+#ifdef __x86_64__
+#include "processor.h"
+#endif
+
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
#define TEST_HOST_LOOP_N 2UL

@@ -191,6 +195,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
p->slots, p->backing_src,
p->partition_vcpu_memory_access);

+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
+ "4K page is non zero");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
+ "2M page is non zero");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
+ "1G page is non zero");
+#endif
perf_test_set_wr_fract(vm, p->wr_fract);

guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
@@ -232,6 +244,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Populate memory time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);

+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
+ "4K page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP ||
+ p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
+ "2M page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
+ "1G page is zero");
+#endif
/* Enable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
enable_dirty_logging(vm, p->slots);
@@ -277,6 +300,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
}
}
+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
+ "4K page is zero after dirty logging");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
+ "2M page is non-zero after dirty logging");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
+ "1G page is non-zero after dirty logging");
+#endif

/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
@@ -285,6 +316,28 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);

+ /*
+ * Increment iteration to run the vcpus again to ensure all pages come
+ * back.
+ */
+ iteration++;
+ pr_info("Starting the final iteration to get all pages back.\n");
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
+ != iteration)
+ ;
+ }
+
+#ifdef __x86_64__
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP ||
+ p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
+ "2M page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
+ "1G page is zero");
+#endif
+
/* Tell the vcpu thread to quit */
host_quit = true;
perf_test_join_vcpu_threads(nr_vcpus);
--
2.35.1.1094.g7c7d902a7c-goog