[PATCH 5/6] x86/compressed: adhere to calling convention in get_sev_encryption_bit()
From: Ard Biesheuvel
Date: Mon Aug 15 2022 - 09:43:22 EST
Make get_sev_encryption_bit() follow the ordinary x86 calling
convention, and only call it if CONFIG_AMD_MEM_ENCRYPT is actually
enabled. This clarifies the calling code, and makes it more
maintainable.
Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
---
arch/x86/boot/compressed/head_64.S | 6 ++++--
arch/x86/boot/compressed/mem_encrypt.S | 10 ----------
2 files changed, 4 insertions(+), 12 deletions(-)
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 382ed3d8b26a..4539e7c6d4c3 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -180,12 +180,12 @@ SYM_FUNC_START(startup_32)
*/
/*
* If SEV is active then set the encryption mask in the page tables.
- * This will insure that when the kernel is copied and decompressed
+ * This will ensure that when the kernel is copied and decompressed
* it will be done so encrypted.
*/
+#ifdef CONFIG_AMD_MEM_ENCRYPT
call get_sev_encryption_bit
xorl %edx, %edx
-#ifdef CONFIG_AMD_MEM_ENCRYPT
testl %eax, %eax
jz 1f
subl $32, %eax /* Encryption bit is always above bit 31 */
@@ -199,6 +199,8 @@ SYM_FUNC_START(startup_32)
*/
movl $1, rva(sev_status)(%ebp)
1:
+#else
+ xorl %edx, %edx
#endif
/* Initialize Page tables to 0 */
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index 3cd3db0da49d..b4a116283bd9 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -21,12 +21,7 @@
.code32
SYM_FUNC_START(get_sev_encryption_bit)
- xor %eax, %eax
-
-#ifdef CONFIG_AMD_MEM_ENCRYPT
push %ebx
- push %ecx
- push %edx
movl $0x80000000, %eax /* CPUID to check the highest leaf */
cpuid
@@ -57,12 +52,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
xor %eax, %eax
.Lsev_exit:
- pop %edx
- pop %ecx
pop %ebx
-
-#endif /* CONFIG_AMD_MEM_ENCRYPT */
-
RET
SYM_FUNC_END(get_sev_encryption_bit)
--
2.35.1