From: Suzuki K Poulose <suzuki.poulose@xxxxxxx>
On Arm CCA, with RMM-v1.0, all MMIO regions are shared. However, in
the future, an Arm CCA-v1.0 compliant guest may be run in a lesser
privileged partition in the Realm World (with Arm CCA-v1.1 Planes
feature). In this case, some of the MMIO regions may be emulated
by a higher privileged component in the Realm world, i.e, protected.
Thus the guest must decide today, whether a given MMIO region is shared
vs Protected and create the stage1 mapping accordingly. On Arm CCA, this
detection is based on the "IPA State" (RIPAS == RIPAS_IO). Provide a
helper to run this check on a given range of MMIO.
Also, provide a arm64 helper which may be hooked in by other solutions.
Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@xxxxxxx>
Signed-off-by: Steven Price <steven.price@xxxxxxx>
---
New patch for v5
---
arch/arm64/include/asm/io.h | 8 ++++++++
arch/arm64/include/asm/rsi.h | 2 ++
arch/arm64/include/asm/rsi_cmds.h | 21 +++++++++++++++++++++
arch/arm64/kernel/rsi.c | 26 ++++++++++++++++++++++++++
4 files changed, 57 insertions(+)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 1ada23a6ec19..cce445ff8e3f 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -17,6 +17,7 @@
#include <asm/early_ioremap.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
+#include <asm/rsi.h>
/*
* Generic IO read/write. These perform native-endian accesses.
@@ -318,4 +319,11 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
unsigned long flags);
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
+static inline bool arm64_is_mmio_private(phys_addr_t phys_addr, size_t size)
+{
+ if (unlikely(is_realm_world()))
+ return arm64_is_protected_mmio(phys_addr, size);
+ return false;
+}
+
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/rsi.h b/arch/arm64/include/asm/rsi.h
index acba065eb00e..42ff93c7b0ba 100644
--- a/arch/arm64/include/asm/rsi.h
+++ b/arch/arm64/include/asm/rsi.h
@@ -14,6 +14,8 @@ DECLARE_STATIC_KEY_FALSE(rsi_present);
void __init arm64_rsi_init(void);
+bool arm64_is_protected_mmio(phys_addr_t base, size_t size);
+
static inline bool is_realm_world(void)
{
return static_branch_unlikely(&rsi_present);
diff --git a/arch/arm64/include/asm/rsi_cmds.h b/arch/arm64/include/asm/rsi_cmds.h
index b661331c9204..fdb47f690307 100644
--- a/arch/arm64/include/asm/rsi_cmds.h
+++ b/arch/arm64/include/asm/rsi_cmds.h
@@ -45,6 +45,27 @@ static inline unsigned long rsi_get_realm_config(struct realm_config *cfg)
return res.a0;
}
+static inline unsigned long rsi_ipa_state_get(phys_addr_t start,
+ phys_addr_t end,
+ enum ripas *state,
+ phys_addr_t *top)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(SMC_RSI_IPA_STATE_GET,
+ start, end, 0, 0, 0, 0, 0,
+ &res);
+
+ if (res.a0 == RSI_SUCCESS) {
+ if (top)
+ *top = res.a1;
+ if (state)
+ *state = res.a2;
+ }
+
+ return res.a0;
+}
+
static inline unsigned long rsi_set_addr_range_state(phys_addr_t start,
phys_addr_t end,
enum ripas state,
diff --git a/arch/arm64/kernel/rsi.c b/arch/arm64/kernel/rsi.c
index a6495a64d9bb..d7bba4cee627 100644
--- a/arch/arm64/kernel/rsi.c
+++ b/arch/arm64/kernel/rsi.c
@@ -66,6 +66,32 @@ static void __init arm64_rsi_setup_memory(void)
}
}
+bool arm64_is_protected_mmio(phys_addr_t base, size_t size)
+{
+ enum ripas ripas;
+ phys_addr_t end, top;
+
+ /* Overflow ? */
+ if (WARN_ON(base + size <= base))
+ return false;
+
+ end = ALIGN(base + size, RSI_GRANULE_SIZE);
+ base = ALIGN_DOWN(base, RSI_GRANULE_SIZE);
+
+ while (base < end) {
+ if (WARN_ON(rsi_ipa_state_get(base, end, &ripas, &top)))
+ break;
+ if (WARN_ON(top <= base))
+ break;
+ if (ripas != RSI_RIPAS_DEV)
+ break;
+ base = top;
+ }
+
+ return base >= end;
+}
+EXPORT_SYMBOL(arm64_is_protected_mmio);
+
void __init arm64_rsi_init(void)
{
if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_SMC)