[PATCH v2] powerpc: implement CONFIG_DEBUG_VIRTUAL

From: Christophe Leroy
Date: Mon Dec 10 2018 - 02:59:30 EST


This patch implements CONFIG_DEBUG_VIRTUAL to warn about
incorrect use of virt_to_phys() and page_to_phys()

It also warns about DMA on stack when CONFIG_HAVE_ARCH_VMAP_STACK is
selected. It will help locate them before activating CONFIG_VMAP_STACK

Below is the result of test_debug_virtual:

[ 1.438746] WARNING: CPU: 0 PID: 1 at ./arch/powerpc/include/asm/io.h:808 test_debug_virtual_init+0x3c/0xd4
[ 1.448156] CPU: 0 PID: 1 Comm: swapper Not tainted 4.20.0-rc5-00560-g6bfb52e23a00-dirty #532
[ 1.457259] NIP: c066c550 LR: c0650ccc CTR: c066c514
[ 1.462257] REGS: c900bdb0 TRAP: 0700 Not tainted (4.20.0-rc5-00560-g6bfb52e23a00-dirty)
[ 1.471184] MSR: 00029032 <EE,ME,IR,DR,RI> CR: 48000422 XER: 20000000
[ 1.477811]
[ 1.477811] GPR00: c0650ccc c900be60 c60d0000 00000000 006000c0 c9000000 00009032 c7fa0020
[ 1.477811] GPR08: 00002400 00000001 09000000 00000000 c07b5d04 00000000 c00037d8 00000000
[ 1.477811] GPR16: 00000000 00000000 00000000 00000000 c0760000 c0740000 00000092 c0685bb0
[ 1.477811] GPR24: c065042c c068a734 c0685b8c 00000006 00000000 c0760000 c075c3c0 ffffffff
[ 1.512711] NIP [c066c550] test_debug_virtual_init+0x3c/0xd4
[ 1.518315] LR [c0650ccc] do_one_initcall+0x8c/0x1cc
[ 1.523163] Call Trace:
[ 1.525595] [c900be60] [c0567340] 0xc0567340 (unreliable)
[ 1.530954] [c900be90] [c0650ccc] do_one_initcall+0x8c/0x1cc
[ 1.536551] [c900bef0] [c0651000] kernel_init_freeable+0x1f4/0x2cc
[ 1.542658] [c900bf30] [c00037ec] kernel_init+0x14/0x110
[ 1.547913] [c900bf40] [c000e1d0] ret_from_kernel_thread+0x14/0x1c
[ 1.553971] Instruction dump:
[ 1.556909] 3ca50100 bfa10024 54a5000e 3fa0c076 7c0802a6 3d454000 813dc204 554893be
[ 1.564566] 7d294010 7d294910 90010034 39290001 <0f090000> 7c3e0b78 955e0008 3fe0c062
[ 1.572425] ---[ end trace 6f6984225b280ad6 ]---
[ 1.577467] PA: 0x09000000 for VA: 0xc9000000
[ 1.581799] PA: 0x061e8f50 for VA: 0xc61e8f50

Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxx>
---
v2: Using asm/pgtable.h to avoid build failure on ppc64e.
Added a verification that the object is not in stack to catch problems before activing VMAP_STACK.

arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/io.h | 19 ++++++++++++++++++-
arch/powerpc/mm/pgtable_32.c | 2 +-
3 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e312e92e3381..94b46624068d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -128,6 +128,7 @@ config PPC
#
# Please keep this list sorted alphabetically.
#
+ select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index e746becd9d6f..51e96a4413d2 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -29,12 +29,14 @@ extern struct pci_dev *isa_bridge_pcidev;

#include <linux/device.h>
#include <linux/compiler.h>
+#include <linux/sched/task_stack.h>
#include <asm/page.h>
#include <asm/byteorder.h>
#include <asm/synch.h>
#include <asm/delay.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
+#include <asm/pgtable.h>

#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -804,6 +806,11 @@ extern void __iounmap_at(void *ea, unsigned long size);
*/
static inline unsigned long virt_to_phys(volatile void * address)
{
+ if (IS_ENABLED(CONFIG_DEBUG_VIRTUAL) &&
+ !WARN_ON(IS_ENABLED(CONFIG_HAVE_ARCH_VMAP_STACK) && current->pid &&
+ object_is_on_stack((const void*)address)))
+ WARN_ON(!virt_addr_valid(address));
+
return __pa((unsigned long)address);
}

@@ -827,7 +834,17 @@ static inline void * phys_to_virt(unsigned long address)
/*
* Change "struct page" to physical address.
*/
-#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+static inline phys_addr_t page_to_phys(struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+
+ if (IS_ENABLED(CONFIG_DEBUG_VIRTUAL) &&
+ !WARN_ON(IS_ENABLED(CONFIG_HAVE_ARCH_VMAP_STACK) && current->pid &&
+ object_is_on_stack(__va(PFN_PHYS(pfn)))))
+ WARN_ON(!pfn_valid(pfn));
+
+ return PFN_PHYS(pfn);
+}

/*
* 32 bits still uses virt_to_bus() for it's implementation of DMA
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 4fc77a99c9bf..68d204a45cd0 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -143,7 +143,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
- if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
+ if (slab_is_available() && virt_addr_valid(p) &&
page_is_ram(__phys_to_pfn(p))) {
printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
(unsigned long long)p, __builtin_return_address(0));
--
2.13.3