[PATCH v2 4/6] mshv: limit SynIC management to MSHV-owned resources

From: Jork Loeser

Date: Fri Apr 03 2026 - 15:06:57 EST


The SynIC is shared between VMBus and MSHV. VMBus owns the message
page (SIMP), event flags page (SIEFP), global enable (SCONTROL),
and SINT2. MSHV adds SINT0, SINT5, and the event ring page (SIRBP).

Currently mshv_synic_init() redundantly enables SIMP, SIEFP, and
SCONTROL that VMBus already configured, and mshv_synic_cleanup()
disables all of them. This is wrong because MSHV can be torn down
while VMBus is still active. In particular, a kexec reboot notifier
tears down MSHV first. Disabling SCONTROL, SIMP, and SIEFP out
from under VMBus causes its later cleanup to write SynIC MSRs while
SynIC is disabled, which the hypervisor does not tolerate.

Restrict MSHV to managing only the resources it owns:
- SINT0, SINT5: mask on cleanup, unmask on init
- SIRBP: enable/disable as before
- SIMP, SIEFP, SCONTROL: leave to VMBus when it is active (L1VH
and nested root partition); on a non-nested root partition VMBus
doesn't run, so MSHV must enable/disable them

Signed-off-by: Jork Loeser <jloeser@xxxxxxxxxxxxxxxxxxx>
---
drivers/hv/mshv_synic.c | 142 ++++++++++++++++++++++++++--------------
1 file changed, 94 insertions(+), 48 deletions(-)

diff --git a/drivers/hv/mshv_synic.c b/drivers/hv/mshv_synic.c
index f8b0337cdc82..7d273766bdb5 100644
--- a/drivers/hv/mshv_synic.c
+++ b/drivers/hv/mshv_synic.c
@@ -454,46 +454,72 @@ int mshv_synic_init(unsigned int cpu)
#ifdef HYPERVISOR_CALLBACK_VECTOR
union hv_synic_sint sint;
#endif
- union hv_synic_scontrol sctrl;
struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
struct hv_synic_event_flags_page **event_flags_page =
&spages->synic_event_flags_page;
struct hv_synic_event_ring_page **event_ring_page =
&spages->synic_event_ring_page;
+ /* VMBus runs on L1VH and nested root; it owns SIMP/SIEFP/SCONTROL */
+ bool vmbus_active = !hv_root_partition() || hv_nested;

- /* Setup the Synic's message page */
+ /*
+ * Map the SYNIC message page. When VMBus is not active the
+ * hypervisor pre-provisions the SIMP GPA but may not set
+ * simp_enabled — enable it here.
+ */
simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
- simp.simp_enabled = true;
+ if (!vmbus_active) {
+ simp.simp_enabled = true;
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ }
*msg_page = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
HV_HYP_PAGE_SIZE,
MEMREMAP_WB);

if (!(*msg_page))
- return -EFAULT;
+ goto cleanup_simp;

- hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
-
- /* Setup the Synic's event flags page */
+ /*
+ * Map the event flags page. Same as SIMP: enable when
+ * VMBus is not active, already enabled by VMBus otherwise.
+ */
siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
- siefp.siefp_enabled = true;
+ if (!vmbus_active) {
+ siefp.siefp_enabled = true;
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ }
*event_flags_page = memremap(siefp.base_siefp_gpa << PAGE_SHIFT,
PAGE_SIZE, MEMREMAP_WB);

if (!(*event_flags_page))
- goto cleanup;
-
- hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ goto cleanup_siefp;

/* Setup the Synic's event ring page */
sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
- sirbp.sirbp_enabled = true;
- *event_ring_page = memremap(sirbp.base_sirbp_gpa << PAGE_SHIFT,
- PAGE_SIZE, MEMREMAP_WB);

- if (!(*event_ring_page))
- goto cleanup;
+ if (hv_root_partition()) {
+ *event_ring_page = memremap(sirbp.base_sirbp_gpa << PAGE_SHIFT,
+ PAGE_SIZE, MEMREMAP_WB);

+ if (!(*event_ring_page))
+ goto cleanup_siefp;
+ } else {
+ /*
+ * On L1VH the hypervisor does not provide a SIRBP page.
+ * Allocate one and program its GPA into the MSR.
+ */
+ *event_ring_page = (struct hv_synic_event_ring_page *)
+ get_zeroed_page(GFP_KERNEL);
+
+ if (!(*event_ring_page))
+ goto cleanup_siefp;
+
+ sirbp.base_sirbp_gpa = virt_to_phys(*event_ring_page)
+ >> PAGE_SHIFT;
+ }
+
+ sirbp.sirbp_enabled = true;
hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);

#ifdef HYPERVISOR_CALLBACK_VECTOR
@@ -515,28 +541,30 @@ int mshv_synic_init(unsigned int cpu)
sint.as_uint64);
#endif

- /* Enable global synic bit */
- sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
- sctrl.enable = 1;
- hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+ /* When VMBus is active it already enabled SCONTROL. */
+ if (!vmbus_active) {
+ union hv_synic_scontrol sctrl;
+
+ sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
+ sctrl.enable = 1;
+ hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+ }

return 0;

-cleanup:
- if (*event_ring_page) {
- sirbp.sirbp_enabled = false;
- hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
- memunmap(*event_ring_page);
- }
- if (*event_flags_page) {
+cleanup_siefp:
+ if (*event_flags_page)
+ memunmap(*event_flags_page);
+ if (!vmbus_active) {
siefp.siefp_enabled = false;
hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
- memunmap(*event_flags_page);
}
- if (*msg_page) {
+cleanup_simp:
+ if (*msg_page)
+ memunmap(*msg_page);
+ if (!vmbus_active) {
simp.simp_enabled = false;
hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
- memunmap(*msg_page);
}

return -EFAULT;
@@ -545,16 +573,15 @@ int mshv_synic_init(unsigned int cpu)
int mshv_synic_cleanup(unsigned int cpu)
{
union hv_synic_sint sint;
- union hv_synic_simp simp;
- union hv_synic_siefp siefp;
union hv_synic_sirbp sirbp;
- union hv_synic_scontrol sctrl;
struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
struct hv_synic_event_flags_page **event_flags_page =
&spages->synic_event_flags_page;
struct hv_synic_event_ring_page **event_ring_page =
&spages->synic_event_ring_page;
+ /* VMBus runs on L1VH and nested root; it owns SIMP/SIEFP/SCONTROL */
+ bool vmbus_active = !hv_root_partition() || hv_nested;

/* Disable the interrupt */
sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX);
@@ -568,28 +595,47 @@ int mshv_synic_cleanup(unsigned int cpu)
hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
sint.as_uint64);

- /* Disable Synic's event ring page */
+ /* Disable SYNIC event ring page owned by MSHV */
sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
sirbp.sirbp_enabled = false;
- hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
- memunmap(*event_ring_page);

- /* Disable Synic's event flags page */
- siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
- siefp.siefp_enabled = false;
- hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ if (hv_root_partition()) {
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+ memunmap(*event_ring_page);
+ } else {
+ sirbp.base_sirbp_gpa = 0;
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+ free_page((unsigned long)*event_ring_page);
+ }
+
+ /*
+ * Release our mappings of the message and event flags pages.
+ * When VMBus is not active, we enabled SIMP/SIEFP — disable
+ * them. Otherwise VMBus owns the MSRs — leave them.
+ */
memunmap(*event_flags_page);
+ if (!vmbus_active) {
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;

- /* Disable Synic's message page */
- simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
- simp.simp_enabled = false;
- hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
+ siefp.siefp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+
+ simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
+ simp.simp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ }
memunmap(*msg_page);

- /* Disable global synic bit */
- sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
- sctrl.enable = 0;
- hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+ /* When VMBus is active it owns SCONTROL — leave it. */
+ if (!vmbus_active) {
+ union hv_synic_scontrol sctrl;
+
+ sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
+ sctrl.enable = 0;
+ hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+ }

return 0;
}
--
2.43.0