[PATCH v1 21/24] kvm: arm64: Add kvm-arm.protected early kernel parameter

From: David Brazdil
Date: Mon Nov 09 2020 - 06:36:10 EST


Add an early parameter that allows users to opt into protected KVM mode
when using the nVHE hypervisor. In this mode, guest state will be kept
private from the host. This will primarily involve enabling stage-2
address translation for the host, restricting DMA to host memory, and
filtering host SMCs.

Signed-off-by: David Brazdil <dbrazdil@xxxxxxxxxx>
---
arch/arm64/include/asm/virt.h | 9 +++++++++
arch/arm64/kvm/arm.c | 23 ++++++++++++++++++++++-
2 files changed, 31 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 6069be50baf9..2c3124512c00 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -65,6 +65,8 @@ extern u32 __boot_cpu_mode[2];
void __hyp_set_vectors(phys_addr_t phys_vector_base);
void __hyp_reset_vectors(void);

+DECLARE_STATIC_KEY_FALSE(kvm_protected_mode);
+
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
{
@@ -97,6 +99,13 @@ static __always_inline bool has_vhe(void)
return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
}

+static __always_inline bool is_kvm_protected_mode(void)
+{
+ return IS_ENABLED(CONFIG_KVM) &&
+ (is_nvhe_hyp_code() || !is_kernel_in_hyp_mode()) &&
+ static_branch_likely(&kvm_protected_mode);
+}
+
#endif /* __ASSEMBLY__ */

#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a931253ebb61..452a01afaf33 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -47,6 +47,8 @@
__asm__(".arch_extension virt");
#endif

+DEFINE_STATIC_KEY_FALSE(kvm_protected_mode);
+
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);

static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
@@ -1796,6 +1798,11 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}

+ if (in_hyp_mode && static_branch_unlikely(&kvm_protected_mode)) {
+ kvm_pr_unimpl("VHE protected mode unsupported, not initializing\n");
+ return -ENODEV;
+ }
+
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
@@ -1827,7 +1834,9 @@ int kvm_arch_init(void *opaque)
if (err)
goto out_hyp;

- if (in_hyp_mode)
+ if (is_kvm_protected_mode())
+ kvm_info("Protected nVHE mode initialized successfully\n");
+ else if (in_hyp_mode)
kvm_info("VHE mode initialized successfully\n");
else
kvm_info("Hyp mode initialized successfully\n");
@@ -1848,6 +1857,18 @@ void kvm_arch_exit(void)
kvm_perf_teardown();
}

+static int __init early_kvm_protected_cfg(char *buf)
+{
+ bool val;
+ int err;
+
+ err = strtobool(buf, &val);
+ if (!err && val)
+ static_branch_enable(&kvm_protected_mode);
+ return err;
+}
+early_param("kvm-arm.protected", early_kvm_protected_cfg);
+
static int arm_init(void)
{
int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
--
2.29.2.222.g5d2a92d10f8-goog