[PATCH v6 04/11] x86/tdx: Add protected guest support for TDX guest
From: Kuppuswamy Sathyanarayanan
Date: Fri Sep 03 2021 - 13:28:35 EST
TDX architecture provides a way for VM guests to be highly secure and
isolated (from untrusted VMM). To achieve this requirement, any data
coming from VMM cannot be completely trusted. TDX guest fixes this
issue by hardening the IO drivers against the attack from the VMM.
So, when adding hardening fixes to the generic drivers, to protect
custom fixes use prot_guest_has() API.
Also add TDX guest support to prot_guest_has() API to protect the
TDX specific fixes.
Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@xxxxxxxxxxxxxxx>
---
Changes since v5:
* Replaced tdx_prot_guest_has() with intel_prot_guest_has() to
keep the Intel call non TDX specific.
* Added TDX guest support to intel_prot_guest_has().
Changes since v4:
* Rebased on top of Tom Lendacky's protected guest changes.
* Moved memory encryption related protected guest flags in
tdx_prot_guest_has() to the patch that actually uses them.
arch/x86/Kconfig | 1 +
arch/x86/include/asm/protected_guest.h | 9 +++++++++
arch/x86/kernel/cpu/intel.c | 14 ++++++++++++++
include/linux/protected_guest.h | 3 +++
4 files changed, 27 insertions(+)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ab0e7c346c44..10f2cb51a39d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -871,6 +871,7 @@ config INTEL_TDX_GUEST
depends on SECURITY
select X86_X2APIC
select SECURITY_LOCKDOWN_LSM
+ select ARCH_HAS_PROTECTED_GUEST
help
Provide support for running in a trusted domain on Intel processors
equipped with Trusted Domain eXtensions. TDX is a new Intel
diff --git a/arch/x86/include/asm/protected_guest.h b/arch/x86/include/asm/protected_guest.h
index b4a267dddf93..722d11b2c5e8 100644
--- a/arch/x86/include/asm/protected_guest.h
+++ b/arch/x86/include/asm/protected_guest.h
@@ -11,13 +11,22 @@
#define _X86_PROTECTED_GUEST_H
#include <linux/mem_encrypt.h>
+#include <linux/processor.h>
#ifndef __ASSEMBLY__
+#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_ARCH_HAS_PROTECTED_GUEST)
+bool intel_prot_guest_has(unsigned int flag);
+#else
+static inline bool intel_prot_guest_has(unsigned int flag) { return false; }
+#endif
+
static inline bool prot_guest_has(unsigned int attr)
{
if (sme_me_mask)
return amd_prot_guest_has(attr);
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ return intel_prot_guest_has(attr);
return false;
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8321c43554a1..134ee3984fdd 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <linux/protected_guest.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
@@ -60,6 +61,19 @@ static u64 msr_test_ctrl_cache __ro_after_init;
*/
static bool cpu_model_supports_sld __ro_after_init;
+#ifdef CONFIG_ARCH_HAS_PROTECTED_GUEST
+bool intel_prot_guest_has(unsigned int flag)
+{
+ switch (flag) {
+ case PATTR_GUEST_TDX:
+ return cpu_feature_enabled(X86_FEATURE_TDX_GUEST);
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(intel_prot_guest_has);
+#endif
+
/*
* Processors which have self-snooping capability can handle conflicting
* memory type across CPUs by snooping its own cache. However, there exists
diff --git a/include/linux/protected_guest.h b/include/linux/protected_guest.h
index 5ddef1b6a2ea..b6bb86bdf713 100644
--- a/include/linux/protected_guest.h
+++ b/include/linux/protected_guest.h
@@ -25,6 +25,9 @@
#define PATTR_SEV 0x801
#define PATTR_SEV_ES 0x802
+/* 0x900 - 0x9ff reserved for Intel */
+#define PATTR_GUEST_TDX 0x900
+
#ifdef CONFIG_ARCH_HAS_PROTECTED_GUEST
#include <asm/protected_guest.h>
--
2.25.1