aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2022-11-22 17:10:13 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-03-06 14:45:13 +0000
commite840ae3dc277f7f4ae38f600e7f5da7f169b8d7c (patch)
treed46256d05d7fda60b6e0971d01b91f0058403936
parent801873f1750aa1cc42e290d8a818e340fd7d0987 (diff)
downloadlinux-e840ae3dc277f7f4ae38f600e7f5da7f169b8d7c.tar.gz
x86/boot/compressed: Move startup32_check_sev_cbit() into .text
commit b5d854cd4b6a314edd6c15dabc4233b84a0f8e5e upstream. Move startup32_check_sev_cbit() into the .text section and turn it into an ordinary function using the ordinary 32-bit calling convention, instead of saving/restoring the registers that are known to be live at the only call site. This improves maintainability, and makes it possible to move this function out of head_64.S and into a separate compilation unit that is specific to memory encryption. Note that this requires the call site to be moved before the mixed mode check, as %eax will be live otherwise. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lore.kernel.org/r/20221122161017.2426828-14-ardb@kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/boot/compressed/head_64.S35
1 files changed, 19 insertions, 16 deletions
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index b1d00f862af91..c7655a9dfd3f8 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -251,6 +251,11 @@ SYM_FUNC_START(startup_32)
movl $__BOOT_TSS, %eax
ltr %ax
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ /* Check if the C-bit position is correct when SEV is active */
+ call startup32_check_sev_cbit
+#endif
+
/*
* Setup for the jump to 64bit mode
*
@@ -268,8 +273,6 @@ SYM_FUNC_START(startup_32)
leal rva(startup_64_mixed_mode)(%ebp), %eax
1:
#endif
- /* Check if the C-bit position is correct when SEV is active */
- call startup32_check_sev_cbit
pushl $__KERNEL_CS
pushl %eax
@@ -740,16 +743,17 @@ SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
* succeed. An incorrect C-bit position will map all memory unencrypted, so that
* the compare will use the encrypted random data and fail.
*/
- __HEAD
-SYM_FUNC_START(startup32_check_sev_cbit)
#ifdef CONFIG_AMD_MEM_ENCRYPT
- pushl %eax
+ .text
+SYM_FUNC_START(startup32_check_sev_cbit)
pushl %ebx
- pushl %ecx
- pushl %edx
+ pushl %ebp
+
+ call 0f
+0: popl %ebp
/* Check for non-zero sev_status */
- movl rva(sev_status)(%ebp), %eax
+ movl (sev_status - 0b)(%ebp), %eax
testl %eax, %eax
jz 4f
@@ -764,17 +768,18 @@ SYM_FUNC_START(startup32_check_sev_cbit)
jnc 2b
/* Store to memory and keep it in the registers */
- movl %eax, rva(sev_check_data)(%ebp)
- movl %ebx, rva(sev_check_data+4)(%ebp)
+ leal (sev_check_data - 0b)(%ebp), %ebp
+ movl %eax, 0(%ebp)
+ movl %ebx, 4(%ebp)
/* Enable paging to see if encryption is active */
movl %cr0, %edx /* Backup %cr0 in %edx */
movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
movl %ecx, %cr0
- cmpl %eax, rva(sev_check_data)(%ebp)
+ cmpl %eax, 0(%ebp)
jne 3f
- cmpl %ebx, rva(sev_check_data+4)(%ebp)
+ cmpl %ebx, 4(%ebp)
jne 3f
movl %edx, %cr0 /* Restore previous %cr0 */
@@ -786,13 +791,11 @@ SYM_FUNC_START(startup32_check_sev_cbit)
jmp 3b
4:
- popl %edx
- popl %ecx
+ popl %ebp
popl %ebx
- popl %eax
-#endif
RET
SYM_FUNC_END(startup32_check_sev_cbit)
+#endif
/*
* Stack and heap for uncompression