[PATCH v2 20/24] kvm: arm64: Intercept host's CPU_SUSPEND PSCI SMCs
From: David Brazdil
Date: Mon Nov 16 2020 - 15:45:24 EST
Add a handler of CPU_SUSPEND host PSCI SMCs. The SMC can either enter
a sleep state indistinguishable from a WFI or a deeper sleep state that
behaves like a CPU_OFF+CPU_ON.
The handler saves r0,pc of the host and makes the same call to EL3 with
the hyp CPU entry point. It either returns back to the handler and then
back to the host, or wakes up into the entry point and initializes EL2
state before dropping back to EL1.
There is a simple atomic lock around the reset state struct to protect
from races with CPU_ON. A well-behaved host should never run CPU_ON
against an already online core, and the kernel indeed does not allow
that, so if the core sees its reset state struct locked, it will return
a non-spec error code PENDING_ON. This protects the hypervisor state and
avoids the need for more complicated locking and/or tracking power state
of individual cores.
Signed-off-by: David Brazdil <dbrazdil@xxxxxxxxxx>
---
arch/arm64/kvm/hyp/nvhe/psci-relay.c | 39 +++++++++++++++++++++++++++-
1 file changed, 38 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 2daf52b59846..313ef42f0eab 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -121,6 +121,39 @@ static void release_reset_state(struct kvm_host_psci_state *cpu_state)
atomic_set_release(&cpu_state->pending_on, 0);
}
+static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
+{
+ u64 power_state = host_ctxt->regs.regs[1];
+ unsigned long pc = host_ctxt->regs.regs[2];
+ unsigned long r0 = host_ctxt->regs.regs[3];
+ struct kvm_host_psci_state *cpu_state;
+ struct kvm_nvhe_init_params *cpu_params;
+ int ret;
+
+ cpu_state = this_cpu_ptr(&kvm_host_psci_state);
+ cpu_params = this_cpu_ptr(&kvm_init_params);
+
+ /*
+ * Lock the reset state struct. This fails if the host has concurrently
+ * called CPU_ON with this CPU as target. The kernel keeps track of
+ * online CPUs, so that should never happen. If it does anyway, return
+ * a non-spec error. This avoids the need for spinlocks.
+ */
+ if (!try_acquire_reset_state(cpu_state, pc, r0))
+ return PSCI_RET_ALREADY_ON;
+
+ /*
+ * Will either return if shallow sleep state, or wake up into the entry
+ * point if it is a deep sleep state.
+ */
+ ret = psci_call(func_id, power_state,
+ __hyp_pa(hyp_symbol_addr(__kvm_hyp_cpu_entry)),
+ __hyp_pa(cpu_params));
+
+ release_reset_state(cpu_state);
+ return ret;
+}
+
static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
u64 mpidr = host_ctxt->regs.regs[1];
@@ -178,7 +211,9 @@ asmlinkage void __noreturn __kvm_hyp_psci_cpu_entry(void)
static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
- if (func_id == kvm_host_psci_function_id[PSCI_FN_CPU_OFF])
+ if (func_id == kvm_host_psci_function_id[PSCI_FN_CPU_SUSPEND])
+ return psci_cpu_suspend(func_id, host_ctxt);
+ else if (func_id == kvm_host_psci_function_id[PSCI_FN_CPU_OFF])
return psci_forward(host_ctxt);
else if (func_id == kvm_host_psci_function_id[PSCI_FN_CPU_ON])
return psci_cpu_on(func_id, host_ctxt);
@@ -202,6 +237,8 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
case PSCI_0_2_FN_SYSTEM_RESET:
psci_forward_noreturn(host_ctxt);
unreachable();
+ case PSCI_0_2_FN64_CPU_SUSPEND:
+ return psci_cpu_suspend(func_id, host_ctxt);
case PSCI_0_2_FN64_CPU_ON:
return psci_cpu_on(func_id, host_ctxt);
default:
--
2.29.2.299.gdc1121823c-goog