Re: [PATCH v13 00/11] Parallel CPU bringup for x86_64

From: Tom Lendacky
Date: Tue Mar 07 2023 - 11:50:28 EST


On 3/7/23 08:42, David Woodhouse wrote:
On Thu, 2023-03-02 at 11:12 +0000, Usama Arif wrote:
The main code change over v12 is to fix the build error when
CONFIG_FORCE_NR_CPUS is present.

The commit message for removing initial stack has also been improved, typos
have been fixed and extra comments have been added to make code clearer.

Might something like this make it work in parallel with SEV-SNP? If so,
I can clean it up and adjust the C code to actually invoke it...

This should be ok for both SEV-ES and SEV-SNP.


diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index b8357d6ecd47..f25df4bd318e 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -70,6 +70,7 @@
/* GHCBData[63:12] */ \
(((u64)(v) & GENMASK_ULL(63, 12)) >> 12)
+#ifndef __ASSEMBLY__
/*
* SNP Page State Change Operation
*
@@ -160,6 +161,8 @@ struct snp_psc_desc {
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
+#endif /* __ASSEMBLY__ */
+
/*
* Error codes related to GHCB input that can be communicated back to the guest
* by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index defe76ee9e64..0c26f80f488c 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -204,6 +204,7 @@ extern unsigned int smpboot_control;
/* Control bits for startup_64 */
#define STARTUP_APICID_CPUID_0B 0x80000000
#define STARTUP_APICID_CPUID_01 0x40000000
+#define STARTUP_APICID_SEV_SNP 0x20000000
#define STARTUP_PARALLEL_MASK (STARTUP_APICID_CPUID_01 | STARTUP_APICID_CPUID_0B)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index c35f7c173832..b2571034562c 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -26,7 +26,7 @@
#include <asm/nospec-branch.h>
#include <asm/fixmap.h>
#include <asm/smp.h>
-
+#include <asm/sev-common.h>
/*
* We are not able to switch in one step to the final KERNEL ADDRESS SPACE
* because we need identity-mapped pages.
@@ -242,6 +242,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
*
* Bit 31 STARTUP_APICID_CPUID_0B flag (use CPUID 0x0b)
* Bit 30 STARTUP_APICID_CPUID_01 flag (use CPUID 0x01)
+ * Bit 29 STARTUP_APICID_SEV_SNP flag (CPUID 0x0v via GHCB MSR)
* Bit 0-24 CPU# if STARTUP_APICID_CPUID_xx flags are not set
*/
movl smpboot_control(%rip), %ecx
@@ -249,6 +250,8 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
jnz .Luse_cpuid_0b
testl $STARTUP_APICID_CPUID_01, %ecx
jnz .Luse_cpuid_01
+ testl $STARTUP_APICID_SEV_SNP, %ecx
+ jnz .Luse_sev_cpuid_0b
andl $0x0FFFFFFF, %ecx
jmp .Lsetup_cpu
@@ -259,6 +262,18 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
shr $24, %edx
jmp .Lsetup_AP
+.Luse_sev_cpuid_0b:
+ movl $MSR_AMD64_SEV_ES_GHCB, %ecx
+ # GHCB_CPUID_REQ(0x0b, GHCB_CPUID_REQ_EDX)
+ movq $0xc00000040000000b, %rax
+ xorl %edx, %edx
+ vmgexit

According to the GHCB spec, the GHCB MSR protocol is triggered when the GHCB value is non-zero in bits 11:0. For the CPUID function, bits 63:32 hold the CPUID function, bits 31:30 hold the requested register and bits 11:0 == 0x4. So this should be:

/* Set the GHCB MSR to request CPUID 0xB_EDX */
movl $MSR_AMD64_SEV_ES_GHCB, %ecx
movl $0xc0000004, %eax
movl $0xb, %edx
wrmsr

/* Perform GHCB MSR protocol */
vmgexit

/*
* Get the result. After the RDMSR:
* EAX should be 0xc0000005
* EDX should have the CPUID register value and since EDX
* is the target register, no need to move the result.
*/
+ rdmsr
+ andl $GHCB_MSR_INFO_MASK, %eax
+ cmpl $GHCB_MSR_CPUID_RESP, %eax
+ jne 1f
+ jmp .Lsetup_AP
+
.Luse_cpuid_0b:
mov $0x0B, %eax
xorl %ecx, %ecx


Thanks,
Tom