[PATCH 5.3 030/148] powerpc/kasan: Fix parallel loading of modules.

From: Greg Kroah-Hartman
Date: Thu Oct 10 2019 - 04:38:54 EST


From: Christophe Leroy <christophe.leroy@xxxxxx>

commit 45ff3c55958542c3b76075d59741297b8cb31cbb upstream.

Parallel loading of modules may lead to bad setup of shadow page table
entries.

First, lets align modules so that two modules never share the same
shadow page.

Second, ensure that two modules cannot allocate two page tables for
the same PMD entry at the same time. This is done by using
init_mm.page_table_lock in the same way as __pte_alloc_kernel()

Fixes: 2edb16efc899 ("powerpc/32: Add KASAN support")
Cc: stable@xxxxxxxxxxxxxxx # v5.2+
Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxx>
Signed-off-by: Michael Ellerman <mpe@xxxxxxxxxxxxxx>
Link: https://lore.kernel.org/r/c97284f912128cbc3f2fe09d68e90e65fb3e6026.1565361876.git.christophe.leroy@xxxxxx
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
arch/powerpc/mm/kasan/kasan_init_32.c | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)

--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -5,6 +5,7 @@
#include <linux/kasan.h>
#include <linux/printk.h>
#include <linux/memblock.h>
+#include <linux/moduleloader.h>
#include <linux/sched/task.h>
#include <linux/vmalloc.h>
#include <asm/pgalloc.h>
@@ -46,7 +47,19 @@ static int __ref kasan_init_shadow_page_
kasan_populate_pte(new, PAGE_READONLY);
else
kasan_populate_pte(new, PAGE_KERNEL_RO);
- pmd_populate_kernel(&init_mm, pmd, new);
+
+ smp_wmb(); /* See comment in __pte_alloc */
+
+ spin_lock(&init_mm.page_table_lock);
+ /* Has another populated it ? */
+ if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
+ pmd_populate_kernel(&init_mm, pmd, new);
+ new = NULL;
+ }
+ spin_unlock(&init_mm.page_table_lock);
+
+ if (new && slab_is_available())
+ pte_free_kernel(&init_mm, new);
}
return 0;
}
@@ -137,7 +150,11 @@ void __init kasan_init(void)
#ifdef CONFIG_MODULES
void *module_alloc(unsigned long size)
{
- void *base = vmalloc_exec(size);
+ void *base;
+
+ base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));

if (!base)
return NULL;