[PATCH v1 3/3] KVM: s390: vsie: Fix unshadowing logic
From: Claudio Imbrenda
Date: Wed May 06 2026 - 10:32:00 EST
In some cases (i.e. under extreme memory pressure on the host),
attempting to shadow memory will result in the same memory being
unshadowed, causing a loop.
Add a PGSTE bit to distinguish between shadowed memory and shadowed DAT
tables, fix the unshadowing logic in _gmap_ptep_xchg() to prevent
unnecessary unshadowing and perform better checks.
Also fix the unshadowing logic in _gmap_crstep_xchg_atomic() which did
not unshadow properly when the large page would become unprotected.
Signed-off-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx>
Fixes: a2c17f9270cc ("KVM: s390: New gmap code")
---
arch/s390/kvm/dat.h | 3 ++-
arch/s390/kvm/gmap.c | 1 +
arch/s390/kvm/gmap.h | 12 ++++++++++--
3 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/arch/s390/kvm/dat.h b/arch/s390/kvm/dat.h
index 8f8278c44879..873e13ac5a27 100644
--- a/arch/s390/kvm/dat.h
+++ b/arch/s390/kvm/dat.h
@@ -145,7 +145,8 @@ union pgste {
unsigned long cmma_d : 1; /* Dirty flag for CMMA bits */
unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
unsigned long vsie_notif : 1; /* Referenced in a shadow table */
- unsigned long : 5;
+ unsigned long vsie_gmem : 1; /* Contains nested guest memory */
+ unsigned long : 4;
unsigned long : 8;
};
struct {
diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c
index c1140da0689d..08fb806c9c36 100644
--- a/arch/s390/kvm/gmap.c
+++ b/arch/s390/kvm/gmap.c
@@ -1053,6 +1053,7 @@ int gmap_protect_rmap(struct kvm_s390_mmu_cache *mc, struct gmap *sg, gfn_t p_gf
pte.h.p = 1;
pgste = _gmap_ptep_xchg(sg->parent, ptep, pte, pgste, p_gfn, false);
pgste.vsie_notif = 1;
+ pgste.vsie_gmem |= level == TABLE_TYPE_PAGE_TABLE;
pgste_set_unlock(ptep, pgste);
return 0;
diff --git a/arch/s390/kvm/gmap.h b/arch/s390/kvm/gmap.h
index 96ee1395a592..f4caa3f42c0e 100644
--- a/arch/s390/kvm/gmap.h
+++ b/arch/s390/kvm/gmap.h
@@ -167,6 +167,13 @@ static inline bool gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
return _gmap_unmap_prefix(gmap, gfn, end, false);
}
+static inline bool needs_unshadow(union pte oldpte, union pte newpte, union pgste pgste)
+{
+ if (pgste.vsie_gmem)
+ return (oldpte.h.p != newpte.h.p) || newpte.h.i;
+ return (oldpte.h.p && !newpte.h.p) || !newpte.s.pr;
+}
+
static inline union pgste _gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
union pgste pgste, gfn_t gfn, bool needs_lock)
{
@@ -180,8 +187,9 @@ static inline union pgste _gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, un
pgste.prefix_notif = 0;
gmap_unmap_prefix(gmap, gfn, gfn + 1);
}
- if (pgste.vsie_notif && (ptep->h.p != newpte.h.p || newpte.h.i)) {
+ if (pgste.vsie_notif && needs_unshadow(*ptep, newpte, pgste)) {
pgste.vsie_notif = 0;
+ pgste.vsie_gmem = 0;
if (needs_lock)
gmap_handle_vsie_unshadow_event(gmap, gfn);
else
@@ -217,7 +225,7 @@ static inline bool __must_check _gmap_crstep_xchg_atomic(struct gmap *gmap, unio
gmap_unmap_prefix(gmap, gfn, gfn + align);
}
if (crste_leaf(oldcrste) && oldcrste.s.fc1.vsie_notif &&
- (newcrste.h.p || newcrste.h.i || !newcrste.s.fc1.vsie_notif)) {
+ ((newcrste.h.p != oldcrste.h.p) || newcrste.h.i || !newcrste.s.fc1.vsie_notif)) {
newcrste.s.fc1.vsie_notif = 0;
if (needs_lock)
gmap_handle_vsie_unshadow_event(gmap, gfn);
--
2.54.0