Re: [PATCH V2 for 6.6 & 6.12] LoongArch: Rework KASAN initialization for PTW-enabled systems

From: Greg Kroah-Hartman

Date: Tue Feb 17 2026 - 06:26:42 EST


On Tue, Feb 17, 2026 at 07:13:30PM +0800, Huacai Chen wrote:
> Hi, Greg,
>
> On Tue, Feb 17, 2026 at 7:06 PM Greg Kroah-Hartman
> <gregkh@xxxxxxxxxxxxxxxxxxx> wrote:
> >
> > On Mon, Feb 16, 2026 at 10:25:50PM +0800, Huacai Chen wrote:
> > > From: Tiezhu Yang <yangtiezhu@xxxxxxxxxxx>
> > >
> > > commit 5ec5ac4ca27e4daa234540ac32f9fc5219377d53 upstream.
> > >
> > > kasan_init_generic() indicates that kasan is fully initialized, so it
> > > should be put at end of kasan_init().
> > >
> > > Otherwise bringing up the primary CPU failed when CONFIG_KASAN is set
> > > on PTW-enabled systems, here are the call chains:
> > >
> > > kernel_entry()
> > > start_kernel()
> > > setup_arch()
> > > kasan_init()
> > > kasan_init_generic()
> > >
> > > The reason is PTW-enabled systems have speculative accesses which means
> > > memory accesses to the shadow memory after kasan_init() may be executed
> > > by hardware before. However, accessing shadow memory is safe only after
> > > kasan fully initialized because kasan_init() uses a temporary PGD table
> > > until we have populated all levels of shadow page tables and writen the
> > > PGD register. Moving kasan_init_generic() later can defer the occasion
> > > of kasan_enabled(), so as to avoid speculative accesses on shadow pages.
> > >
> > > After moving kasan_init_generic() to the end, kasan_init() can no longer
> > > call kasan_mem_to_shadow() for shadow address conversion because it will
> > > always return kasan_early_shadow_page. On the other hand, we should keep
> > > the current logic of kasan_mem_to_shadow() for both the early and final
> > > stage because there may be instrumentation before kasan_init().
> > >
> > > To solve this, we factor out a new mem_to_shadow() function from current
> > > kasan_mem_to_shadow() for the shadow address conversion in kasan_init().
> > >
> > > [ Huacai: To backport from upstream to 6.6 & 6.12, kasan_enabled() is
> > > replaced with kasan_arch_is_ready() and kasan_init_generic()
> > > is replaced with "kasan_early_stage = false". ]
> > >
> > > Cc: stable@xxxxxxxxxxxxxxx
> > > Signed-off-by: Tiezhu Yang <yangtiezhu@xxxxxxxxxxx>
> > > Signed-off-by: Huacai Chen <chenhuacai@xxxxxxxxxxx>
> > > ---
> > > arch/loongarch/mm/kasan_init.c | 77 ++++++++++++++++++----------------
> > > 1 file changed, 40 insertions(+), 37 deletions(-)
> > >
> > > diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
> > > index d2681272d8f0..9337380a70eb 100644
> > > --- a/arch/loongarch/mm/kasan_init.c
> > > +++ b/arch/loongarch/mm/kasan_init.c
> > > @@ -42,39 +42,43 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
> > >
> > > bool kasan_early_stage = true;
> > >
> > > -void *kasan_mem_to_shadow(const void *addr)
> > > +static void *mem_to_shadow(const void *addr)
> > > {
> > > - if (!kasan_arch_is_ready()) {
> > > + unsigned long offset = 0;
> > > + unsigned long maddr = (unsigned long)addr;
> > > + unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
> > > +
> > > + if (maddr >= FIXADDR_START)
> > > return (void *)(kasan_early_shadow_page);
> > > - } else {
> > > - unsigned long maddr = (unsigned long)addr;
> > > - unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
> > > - unsigned long offset = 0;
> > > -
> > > - if (maddr >= FIXADDR_START)
> > > - return (void *)(kasan_early_shadow_page);
> > > -
> > > - maddr &= XRANGE_SHADOW_MASK;
> > > - switch (xrange) {
> > > - case XKPRANGE_CC_SEG:
> > > - offset = XKPRANGE_CC_SHADOW_OFFSET;
> > > - break;
> > > - case XKPRANGE_UC_SEG:
> > > - offset = XKPRANGE_UC_SHADOW_OFFSET;
> > > - break;
> > > - case XKPRANGE_WC_SEG:
> > > - offset = XKPRANGE_WC_SHADOW_OFFSET;
> > > - break;
> > > - case XKVRANGE_VC_SEG:
> > > - offset = XKVRANGE_VC_SHADOW_OFFSET;
> > > - break;
> > > - default:
> > > - WARN_ON(1);
> > > - return NULL;
> > > - }
> > >
> > > - return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
> > > + maddr &= XRANGE_SHADOW_MASK;
> > > + switch (xrange) {
> > > + case XKPRANGE_CC_SEG:
> > > + offset = XKPRANGE_CC_SHADOW_OFFSET;
> > > + break;
> > > + case XKPRANGE_UC_SEG:
> > > + offset = XKPRANGE_UC_SHADOW_OFFSET;
> > > + break;
> > > + case XKPRANGE_WC_SEG:
> > > + offset = XKPRANGE_WC_SHADOW_OFFSET;
> > > + break;
> > > + case XKVRANGE_VC_SEG:
> > > + offset = XKVRANGE_VC_SHADOW_OFFSET;
> > > + break;
> > > + default:
> > > + WARN_ON(1);
> > > + return NULL;
> > > }
> > > +
> > > + return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
> > > +}
> > > +
> > > +void *kasan_mem_to_shadow(const void *addr)
> > > +{
> > > + if (kasan_arch_is_ready())
> > > + return mem_to_shadow(addr);
> > > + else
> > > + return (void *)(kasan_early_shadow_page);
> > > }
> > >
> > > const void *kasan_shadow_to_mem(const void *shadow_addr)
> > > @@ -295,10 +299,8 @@ void __init kasan_init(void)
> > > /* Maps everything to a single page of zeroes */
> > > kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
> > >
> > > - kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
> > > - kasan_mem_to_shadow((void *)KFENCE_AREA_END));
> > > -
> > > - kasan_early_stage = false;
> > > + kasan_populate_early_shadow(mem_to_shadow((void *)VMALLOC_START),
> > > + mem_to_shadow((void *)KFENCE_AREA_END));
> > >
> > > /* Populate the linear mapping */
> > > for_each_mem_range(i, &pa_start, &pa_end) {
> > > @@ -308,13 +310,13 @@ void __init kasan_init(void)
> > > if (start >= end)
> > > break;
> > >
> > > - kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
> > > - (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
> > > + kasan_map_populate((unsigned long)mem_to_shadow(start),
> > > + (unsigned long)mem_to_shadow(end), NUMA_NO_NODE);
> > > }
> > >
> > > /* Populate modules mapping */
> > > - kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
> > > - (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
> > > + kasan_map_populate((unsigned long)mem_to_shadow((void *)MODULES_VADDR),
> > > + (unsigned long)mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
> > > /*
> > > * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
> > > * should make sure that it maps the zero page read-only.
> > > @@ -329,5 +331,6 @@ void __init kasan_init(void)
> > >
> > > /* At this point kasan is fully initialized. Enable error messages */
> > > init_task.kasan_depth = 0;
> > > + kasan_early_stage = false;
> > > pr_info("KernelAddressSanitizer initialized.\n");
> > > }
> > > --
> > > 2.52.0
> > >
> > >
> >
> > Does not apply to 6.6.y, I get the following error:
> >
> > checking file arch/loongarch/mm/kasan_init.c
> > Hunk #1 FAILED at 42.
> > Hunk #2 succeeded at 290 (offset -5 lines).
> > Hunk #3 succeeded at 301 (offset -5 lines).
> > Hunk #4 succeeded at 322 (offset -5 lines).
> > 1 out of 4 hunks FAILED
> I'm sorry, for 6.6.y it need 139d42ca51018c1d43ab5f35829179f060d1ab31
> ("LoongArch: Add WriteCombine shadow mapping in KASAN") as a
> dependency.

That worked, thanks!