Re: x86/amd late microcode thread loading slows down boot
From: Borislav Petkov
Date: Mon Nov 18 2024 - 14:16:34 EST
On Thu, Nov 14, 2024 at 12:03:41PM +0000, Andrew Cooper wrote:
> > +static inline void invlpg(unsigned long addr)
> > +{
> > + asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
>
> "invlpg %0" :: "m" (*(char *)addr) : "memory"
>
> The compiler can usually do a better job than forcing it into a plain
> register.
I think it is pretty smart and DTRT regardless.
The diff is only comments - insns are the same.
--- /tmp/before 2024-11-18 20:11:08.942464511 +0100
+++ /tmp/after 2024-11-18 20:10:37.722620293 +0100
@@ -3,27 +3,27 @@
movl %ebp, %esi # psize, psize
# arch/x86/kernel/cpu/microcode/amd.c:495: unsigned long p_addr_end = p_addr + psize - 1;
leaq -1(%rbx,%rsi), %rax #, p_addr_end
-# ./arch/x86/include/asm/tlb.h:39: asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+# ./arch/x86/include/asm/tlb.h:39: asm volatile("invlpg %0" ::"m" (*(char *)addr) : "memory");
#APP
# 39 "./arch/x86/include/asm/tlb.h" 1
- invlpg (%rbx) # mc
+ invlpg (%rbx) # MEM[(char *)_1]
# 0 "" 2
# arch/x86/kernel/cpu/microcode/amd.c:503: if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
#NO_APP
- movq %rbx, %rcx # mc, tmp110
+ movq %rbx, %rcx # mc, tmp111
# arch/x86/kernel/cpu/microcode/amd.c:503: if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
- movq %rax, %rdx # p_addr_end, tmp111
+ movq %rax, %rdx # p_addr_end, tmp112
# arch/x86/kernel/cpu/microcode/amd.c:503: if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
- shrq $12, %rcx #, tmp110
+ shrq $12, %rcx #, tmp111
# arch/x86/kernel/cpu/microcode/amd.c:503: if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
- shrq $12, %rdx #, tmp111
+ shrq $12, %rdx #, tmp112
# arch/x86/kernel/cpu/microcode/amd.c:503: if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
- cmpq %rdx, %rcx # tmp111, tmp110
+ cmpq %rdx, %rcx # tmp112, tmp111
je .L5 #,
-# ./arch/x86/include/asm/tlb.h:39: asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+# ./arch/x86/include/asm/tlb.h:39: asm volatile("invlpg %0" ::"m" (*(char *)addr) : "memory");
#APP
# 39 "./arch/x86/include/asm/tlb.h" 1
- invlpg (%rax) # p_addr_end
+ invlpg (%rax) # *addr.16_25
# 0 "" 2
# ./arch/x86/include/asm/tlb.h:40: }
#NO_APP
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette