[PATCH v3 04/15] x86/asm: Add a wrapper function for the LOADIWKEY instruction

From: Chang S. Bae
Date: Wed Nov 24 2021 - 15:14:35 EST


Key Locker introduces a CPU-internal wrapping key to encode a user key to a
key handle. Then a key handle is referenced instead of the plain text key.

The new instruction loads an internal wrapping key in the
software-inaccessible CPU state. It operates only in kernel mode.

Define struct iwkey to pass the key value.

The kernel will use this function to load a new key at boot time when the
feature is enabled.

Signed-off-by: Chang S. Bae <chang.seok.bae@xxxxxxxxx>
Reviewed-by: Dan Williams <dan.j.williams@xxxxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
---
Changes from RFC v2:
* Separate out the code as a new patch.
* Improve the usability with the new struct as an argument. (Dan Williams)

Note, Dan wondered if:
WARN_ON(!irq_fpu_usable());
would be appropriate in the load_xmm_iwkey() function.
---
arch/x86/include/asm/keylocker.h | 25 +++++++++++++++++++++
arch/x86/include/asm/special_insns.h | 33 ++++++++++++++++++++++++++++
2 files changed, 58 insertions(+)
create mode 100644 arch/x86/include/asm/keylocker.h

diff --git a/arch/x86/include/asm/keylocker.h b/arch/x86/include/asm/keylocker.h
new file mode 100644
index 000000000000..df84c83228a1
--- /dev/null
+++ b/arch/x86/include/asm/keylocker.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_KEYLOCKER_H
+#define _ASM_KEYLOCKER_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/fpu/types.h>
+
+/**
+ * struct iwkey - A temporary internal wrapping key storage.
+ * @integrity_key: A 128-bit key to check that key handles have not
+ * been tampered with.
+ * @encryption_key: A 256-bit encryption key used in
+ * wrapping/unwrapping a clear text key.
+ *
+ * This storage should be flushed immediately after loaded.
+ */
+struct iwkey {
+ struct reg_128_bit integrity_key;
+ struct reg_128_bit encryption_key[2];
+};
+
+#endif /*__ASSEMBLY__ */
+#endif /* _ASM_KEYLOCKER_H */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 68c257a3de0d..e6469b05facf 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -9,6 +9,7 @@
#include <asm/processor-flags.h>
#include <linux/irqflags.h>
#include <linux/jump_label.h>
+#include <asm/keylocker.h>

/*
* The compiler should not reorder volatile asm statements with respect to each
@@ -294,6 +295,38 @@ static inline int enqcmds(void __iomem *dst, const void *src)
return 0;
}

+
+/**
+ * load_xmm_iwkey - Load a CPU-internal wrapping key
+ * @key: A struct iwkey pointer.
+ *
+ * Load @key to XMMs then do LOADIWKEY. After this, flush XMM
+ * registers. Caller is responsible for kernel_cpu_begin().
+ */
+static inline void load_xmm_iwkey(struct iwkey *key)
+{
+ struct reg_128_bit zeros = { 0 };
+
+ asm volatile ("movdqu %0, %%xmm0; movdqu %1, %%xmm1; movdqu %2, %%xmm2;"
+ :: "m"(key->integrity_key), "m"(key->encryption_key[0]),
+ "m"(key->encryption_key[1]));
+
+ /*
+ * LOADIWKEY %xmm1,%xmm2
+ *
+ * EAX and XMM0 are implicit operands. Load a key value
+ * from XMM0-2 to a software-invisible CPU state. With zero
+ * in EAX, CPU does not do hardware randomization and the key
+ * backup is allowed.
+ *
+ * This instruction is supported by binutils >= 2.36.
+ */
+ asm volatile (".byte 0xf3,0x0f,0x38,0xdc,0xd1" :: "a"(0));
+
+ asm volatile ("movdqu %0, %%xmm0; movdqu %0, %%xmm1; movdqu %0, %%xmm2;"
+ :: "m"(zeros));
+}
+
#endif /* __KERNEL__ */

#endif /* _ASM_X86_SPECIAL_INSNS_H */
--
2.17.1