[PATCH v3 4/5] arm64: Add support for SMCCC TRNG entropy source

From: Andre Przywara
Date: Fri Nov 13 2020 - 13:24:57 EST


The ARM architected TRNG firmware interface, described in ARM spec
DEN0098, defines an ARM SMCCC based interface to a true random number
generator, provided by firmware.
This can be discovered via the SMCCC >=v1.1 interface, and provides
up to 192 bits of entropy per call.

Hook this SMC call into arm64's arch_get_random_*() implementation,
coming to the rescue when the CPU does not implement the ARM v8.5 RNG
system registers.

For the detection, we piggy back on the PSCI/SMCCC discovery (which gives
us the conduit to use (hvc/smc)), then try to call the
ARM_SMCCC_TRNG_VERSION function, which returns -1 if this interface is
not implemented.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
arch/arm64/include/asm/archrandom.h | 69 ++++++++++++++++++++++++-----
1 file changed, 58 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index abe07c21da8e..fe34bfd30caa 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -4,13 +4,24 @@

#ifdef CONFIG_ARCH_RANDOM

+#include <linux/arm-smccc.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/cpufeature.h>

+#define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL
+
+extern bool smccc_trng_available;
+
static inline bool __init smccc_probe_trng(void)
{
- return false;
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res);
+ if ((s32)res.a0 < 0)
+ return false;
+
+ return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION;
}

static inline bool __arm64_rndr(unsigned long *v)
@@ -43,26 +54,52 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)

static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
+ struct arm_smccc_res res;
+
/*
* Only support the generic interface after we have detected
* the system wide capability, avoiding complexity with the
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
- if (!cpus_have_const_cap(ARM64_HAS_RNG))
- return false;
+ if (cpus_have_const_cap(ARM64_HAS_RNG))
+ return __arm64_rndr(v);

- return __arm64_rndr(v);
-}
+ if (smccc_trng_available) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
+ if ((int)res.a0 < 0)
+ return false;

+ *v = res.a3;
+ return true;
+ }
+
+ return false;
+}

static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
+ struct arm_smccc_res res;
unsigned long val;
- bool ok = arch_get_random_seed_long(&val);

- *v = val;
- return ok;
+ if (cpus_have_const_cap(ARM64_HAS_RNG)) {
+ if (arch_get_random_seed_long(&val)) {
+ *v = val;
+ return true;
+ }
+ return false;
+ }
+
+ if (smccc_trng_available) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res);
+ if ((int)res.a0 < 0)
+ return false;
+
+ *v = res.a3 & GENMASK(31, 0);
+ return true;
+ }
+
+ return false;
}

static inline bool __init __early_cpu_has_rndr(void)
@@ -77,10 +114,20 @@ arch_get_random_seed_long_early(unsigned long *v)
{
WARN_ON(system_state != SYSTEM_BOOTING);

- if (!__early_cpu_has_rndr())
- return false;
+ if (__early_cpu_has_rndr())
+ return __arm64_rndr(v);
+
+ if (smccc_trng_available) {
+ struct arm_smccc_res res;

- return __arm64_rndr(v);
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
+ if ((int)res.a0 >= 0) {
+ *v = res.a3;
+ return true;
+ }
+ }
+
+ return false;
}
#define arch_get_random_seed_long_early arch_get_random_seed_long_early

--
2.17.1