Re: [PATCH 00/21] Fixes and lock cleanup+hardening

From: Jethro Beekman

Date: Wed Mar 11 2026 - 10:29:45 EST


On 2026-03-11 00:48, Sean Christopherson wrote:
> Fix several fatal SEV bugs, then clean up the SEV+ APIs to either document
> that they are safe to query outside of kvm->lock, or to use lockdep-protected
> version. The sev_mem_enc_register_region() goof is at least the second bug
> we've had related to checking for an SEV guest outside of kvm->lock, and in
> general it's nearly impossible to just "eyeball" the safety of KVM's usage.
>
> I included Carlos' guard() cleanups here to avoid annoying conflicts (well,
> to solve them now instead of when applying).

I wrote a bunch of tests (see below) to check the kernel can properly handle bad userspace flows. I haven't had the chance to test them with your patch set.

test_vcpu_hotplug() triggers dump_vmcb()

test_snp_launch_finish_after_finish() triggers the OOPS I wrote about earlier.

All other tests seem to be handled (somewhat) gracefully already.


diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
index 77256c89bb8d..93339420b281 100644
--- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c
+++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
@@ -160,6 +160,155 @@ static void test_sev(void *guest_code, uint32_t type, uint64_t policy)
kvm_vm_free(vm);
}

+static void test_vcpu_hotplug(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu2;
+ struct kvm_mp_state mps;
+ struct kvm_vm *vm;
+
+ vm = vm_sev_create_with_one_vcpu(KVM_X86_SNP_VM, guest_snp_code, &vcpu);
+
+ vm_sev_launch(vm, snp_default_policy(), NULL);
+
+ vcpu2 = vm_vcpu_add(vm, 1, guest_snp_code);
+ mps.mp_state = KVM_MP_STATE_RUNNABLE;
+ vcpu_ioctl(vcpu2, KVM_SET_MP_STATE, &mps);
+ vcpu_run(vcpu2);
+ printf("test_vcpu_hotplug/vcpu_run returns %s\n", exit_reason_str(vcpu2->run->exit_reason));
+
+ kvm_vm_free(vm);
+}
+
+static int try_snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy)
+{
+ struct kvm_sev_snp_launch_start launch_start = {
+ .policy = policy,
+ };
+
+ if (__vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_START, &launch_start) != 0)
+ return errno;
+
+ return 0;
+}
+
+static int try_snp_vm_launch_update(struct kvm_vm *vm)
+{
+ struct userspace_mem_region *region;
+ int ctr;
+
+ hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)
+ {
+ const struct sparsebit *protected_phy_pages = region->protected_phy_pages;
+ const vm_paddr_t gpa_base = region->region.guest_phys_addr;
+ const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;
+ sparsebit_idx_t i, j;
+
+ if (!sparsebit_any_set(protected_phy_pages))
+ continue;
+
+ sparsebit_for_each_set_range(protected_phy_pages, i, j) {
+ const uint64_t size = (j - i + 1) * vm->page_size;
+ const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;
+
+ vm_mem_set_private(vm, gpa_base + offset, size);
+
+ struct kvm_sev_snp_launch_update update_data = {
+ .uaddr = (uint64_t)addr_gpa2hva(vm, gpa_base + offset),
+ .gfn_start = (gpa_base + offset) >> PAGE_SHIFT,
+ .len = size,
+ .type = KVM_SEV_SNP_PAGE_TYPE_NORMAL,
+ };
+
+ if (__vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_UPDATE, &update_data) != 0)
+ return errno;
+ }
+ }
+
+ return 0;
+}
+
+static int try_snp_vm_launch_finish(struct kvm_vm *vm)
+{
+ struct kvm_sev_snp_launch_finish launch_finish = { 0 };
+
+ if (__vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish) != 0)
+ return errno;
+
+ return 0;
+}
+
+static void test_snp_launch_update_before_start(void) {
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+
+ vm = vm_sev_create_with_one_vcpu(KVM_X86_SNP_VM, guest_snp_code, &vcpu);
+
+ TEST_ASSERT_EQ(try_snp_vm_launch_update(vm), EINVAL);
+
+ kvm_vm_free(vm);
+}
+
+static void test_snp_launch_finish_before_start(void) {
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+
+ vm = vm_sev_create_with_one_vcpu(KVM_X86_SNP_VM, guest_snp_code, &vcpu);
+
+ TEST_ASSERT_EQ(try_snp_vm_launch_finish(vm), EINVAL);
+
+ kvm_vm_free(vm);
+}
+
+static void test_snp_launch_start_after_start(void) {
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+
+ vm = vm_sev_create_with_one_vcpu(KVM_X86_SNP_VM, guest_snp_code, &vcpu);
+
+ snp_vm_launch_start(vm, snp_default_policy());
+ TEST_ASSERT_EQ(try_snp_vm_launch_start(vm, snp_default_policy()), EINVAL);
+
+ kvm_vm_free(vm);
+}
+
+static void test_snp_launch_start_after_finish(void) {
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+
+ vm = vm_sev_create_with_one_vcpu(KVM_X86_SNP_VM, guest_snp_code, &vcpu);
+
+ vm_sev_launch(vm, snp_default_policy(), NULL);
+ TEST_ASSERT_EQ(try_snp_vm_launch_start(vm, snp_default_policy()), EINVAL);
+
+ kvm_vm_free(vm);
+}
+
+static void test_snp_launch_update_after_finish(void) {
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+
+ vm = vm_sev_create_with_one_vcpu(KVM_X86_SNP_VM, guest_snp_code, &vcpu);
+
+ TEST_ASSERT_EQ(try_snp_vm_launch_start(vm, snp_default_policy()), 0);
+ TEST_ASSERT_EQ(try_snp_vm_launch_finish(vm), 0);
+ TEST_ASSERT_EQ(try_snp_vm_launch_update(vm), EIO);
+
+ kvm_vm_free(vm);
+}
+
+static void test_snp_launch_finish_after_finish(void) {
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+
+ vm = vm_sev_create_with_one_vcpu(KVM_X86_SNP_VM, guest_snp_code, &vcpu);
+
+ vm_sev_launch(vm, snp_default_policy(), NULL);
+ TEST_ASSERT_EQ(try_snp_vm_launch_finish(vm), EINVAL);
+
+ kvm_vm_free(vm);
+}
+
static void guest_shutdown_code(void)
{
struct desc_ptr idt;
@@ -217,13 +366,29 @@ int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));

- test_sev_smoke(guest_sev_code, KVM_X86_SEV_VM, 0);
+ if (!kvm_cpu_has(X86_FEATURE_SEV_SNP)) {
+ test_sev_smoke(guest_sev_code, KVM_X86_SEV_VM, 0);

- if (kvm_cpu_has(X86_FEATURE_SEV_ES))
- test_sev_smoke(guest_sev_es_code, KVM_X86_SEV_ES_VM, SEV_POLICY_ES);
-
- if (kvm_cpu_has(X86_FEATURE_SEV_SNP))
+ if (kvm_cpu_has(X86_FEATURE_SEV_ES))
+ test_sev_smoke(guest_sev_es_code, KVM_X86_SEV_ES_VM, SEV_POLICY_ES);
+ } else {
test_sev_smoke(guest_snp_code, KVM_X86_SNP_VM, snp_default_policy());
+ system("logger starting test test_vcpu_hotplug");
+ test_vcpu_hotplug();
+ system("logger starting test test_snp_launch_update_before_start");
+ test_snp_launch_update_before_start();
+ system("logger starting test test_snp_launch_finish_before_start");
+ test_snp_launch_finish_before_start();
+ system("logger starting test test_snp_launch_start_after_start");
+ test_snp_launch_start_after_start();
+ system("logger starting test test_snp_launch_start_after_finish");
+ test_snp_launch_start_after_finish();
+ system("logger starting test test_snp_launch_update_after_finish");
+ test_snp_launch_update_after_finish();
+ system("logger starting test test_snp_launch_finish_after_finish");
+ test_snp_launch_finish_after_finish();
+ system("logger all tests done");
+ }

return 0;
}

Attachment: smime.p7s
Description: S/MIME Cryptographic Signature