[PATCH AUTOSEL 5.15 081/188] s390/nmi: add missing __pa/__va address conversion of extended save area
From: Sasha Levin
Date: Mon Jan 17 2022 - 21:46:48 EST
From: Heiko Carstens <hca@xxxxxxxxxxxxx>
[ Upstream commit 402ff5a3387dc8ec6987a80d3ce26b0c25773622 ]
Add missing __pa/__va address conversion of machine check extended
save area designation, which is an absolute address.
Note: this currently doesn't fix a real bug, since virtual addresses
are indentical to physical ones.
Reported-by: Vineeth Vijayan <vneethv@xxxxxxxxxxxxx>
Tested-by: Vineeth Vijayan <vneethv@xxxxxxxxxxxxx>
Signed-off-by: Heiko Carstens <hca@xxxxxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
arch/s390/kernel/machine_kexec.c | 2 +-
arch/s390/kernel/nmi.c | 10 +++++-----
arch/s390/kernel/smp.c | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 0505e55a62979..a16467b3825ec 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -86,7 +86,7 @@ static noinline void __machine_kdump(void *image)
continue;
}
/* Store status of the boot CPU */
- mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
+ mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 20f8e1868853f..3f18c1412eba3 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -68,7 +68,7 @@ void __init nmi_alloc_boot_cpu(struct lowcore *lc)
{
if (!nmi_needs_mcesa())
return;
- lc->mcesad = (unsigned long) &boot_mcesa;
+ lc->mcesad = __pa(&boot_mcesa);
if (MACHINE_HAS_GS)
lc->mcesad |= ilog2(MCESA_MAX_SIZE);
}
@@ -94,7 +94,7 @@ static int __init nmi_init(void)
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28); /* disable lowcore protection */
/* Replace boot_mcesa on the boot CPU */
- S390_lowcore.mcesad = origin | mcesa_origin_lc;
+ S390_lowcore.mcesad = __pa(origin) | mcesa_origin_lc;
__ctl_load(cr0, 0, 0);
return 0;
}
@@ -111,7 +111,7 @@ int nmi_alloc_per_cpu(struct lowcore *lc)
return -ENOMEM;
/* The pointer is stored with mcesa_bits ORed in */
kmemleak_not_leak((void *) origin);
- lc->mcesad = origin | mcesa_origin_lc;
+ lc->mcesad = __pa(origin) | mcesa_origin_lc;
return 0;
}
@@ -119,7 +119,7 @@ void nmi_free_per_cpu(struct lowcore *lc)
{
if (!nmi_needs_mcesa())
return;
- kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
+ kmem_cache_free(mcesa_cache, __va(lc->mcesad & MCESA_ORIGIN_MASK));
}
static notrace void s390_handle_damage(void)
@@ -246,7 +246,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
: "Q" (S390_lowcore.fpt_creg_save_area));
}
- mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
+ mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
if (!MACHINE_HAS_VX) {
/* Validate floating point registers */
asm volatile(
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1a04e5bdf6555..5c3d3d8f6b5d8 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -622,7 +622,7 @@ int smp_store_status(int cpu)
return -EIO;
if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
return 0;
- pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK);
+ pa = lc->mcesad & MCESA_ORIGIN_MASK;
if (MACHINE_HAS_GS)
pa |= lc->mcesad & MCESA_LC_MASK;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
--
2.34.1