arch/powerpc/mm/book3s64/hash_tlb.c:45:42: sparse: sparse: incorrect type in initializer (different address spaces)

From: kernel test robot

Date: Mon Mar 30 2026 - 14:58:49 EST


tree: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head: 7aaa8047eafd0bd628065b15757d9b48c5f9c07d
commit: ee628d9cc8d5b96fdceeb270cf662efc4f85f2b6 mm: add basic tests for lazy_mmu
date: 10 weeks ago
config: powerpc64-randconfig-r113-20260330 (https://download.01.org/0day-ci/archive/20260331/202603310251.oTtJlApu-lkp@xxxxxxxxx/config)
compiler: clang version 23.0.0git (https://github.com/llvm/llvm-project 2cd67b8b69f78e3f95918204320c3075a74ba16c)
sparse: v0.6.5-rc1
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260331/202603310251.oTtJlApu-lkp@xxxxxxxxx/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@xxxxxxxxx>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603310251.oTtJlApu-lkp@xxxxxxxxx/

sparse warnings: (new ones prefixed by >>)
>> arch/powerpc/mm/book3s64/hash_tlb.c:45:42: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected void const [noderef] __percpu *__vpp_verify @@ got struct ppc64_tlb_batch * @@
arch/powerpc/mm/book3s64/hash_tlb.c:45:42: sparse: expected void const [noderef] __percpu *__vpp_verify
arch/powerpc/mm/book3s64/hash_tlb.c:45:42: sparse: got struct ppc64_tlb_batch *
arch/powerpc/mm/book3s64/hash_tlb.c:162:45: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected void const [noderef] __percpu *__vpp_verify @@ got struct ppc64_tlb_batch * @@
arch/powerpc/mm/book3s64/hash_tlb.c:162:45: sparse: expected void const [noderef] __percpu *__vpp_verify
arch/powerpc/mm/book3s64/hash_tlb.c:162:45: sparse: got struct ppc64_tlb_batch *

vim +45 arch/powerpc/mm/book3s64/hash_tlb.c

^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 34
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 35 /*
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 36 * A linux PTE was changed and the corresponding hash table entry
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 37 * neesd to be flushed. This function will either perform the flush
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 38 * immediately or will batch it up if the current CPU has an active
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 39 * batch on it.
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 40 */
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 41 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 42 pte_t *ptep, unsigned long pte, int huge)
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 43 {
5524a27d39b6877 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2012-09-10 44 unsigned long vpn;
f342552b917a18a arch/powerpc/mm/tlb_hash64.c Peter Zijlstra 2011-02-24 @45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
5524a27d39b6877 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2012-09-10 46 unsigned long vsid;
bf72aeba2ffef59 arch/powerpc/mm/tlb_64.c Paul Mackerras 2006-06-15 47 unsigned int psize;
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 48 int ssize;
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 49 real_pte_t rpte;
ff31e105464d8c8 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2018-02-11 50 int i, offset;
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 51
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 52 i = batch->index;
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 53
47d99948eee48a8 arch/powerpc/mm/book3s64/hash_tlb.c Christophe Leroy 2019-03-29 54 /*
47d99948eee48a8 arch/powerpc/mm/book3s64/hash_tlb.c Christophe Leroy 2019-03-29 55 * Get page size (maybe move back to caller).
16c2d4762325232 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-05-08 56 *
16c2d4762325232 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-05-08 57 * NOTE: when using special 64K mappings in 4K environment like
16c2d4762325232 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-05-08 58 * for SPEs, we obtain the page size from the slice, which thus
16c2d4762325232 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-05-08 59 * must still exist (and thus the VMA not reused) at the time
16c2d4762325232 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-05-08 60 * of this call
16c2d4762325232 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-05-08 61 */
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 62 if (huge) {
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 63 #ifdef CONFIG_HUGETLB_PAGE
d258e64ef595792 arch/powerpc/mm/tlb_hash64.c Joe Perches 2009-06-28 64 psize = get_slice_psize(mm, addr);
77058e1adcc4391 arch/powerpc/mm/tlb_hash64.c David Gibson 2010-02-08 65 /* Mask the address for the correct page size */
77058e1adcc4391 arch/powerpc/mm/tlb_hash64.c David Gibson 2010-02-08 66 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
ff31e105464d8c8 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2018-02-11 67 if (unlikely(psize == MMU_PAGE_16G))
ff31e105464d8c8 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2018-02-11 68 offset = PTRS_PER_PUD;
ff31e105464d8c8 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2018-02-11 69 else
ff31e105464d8c8 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2018-02-11 70 offset = PTRS_PER_PMD;
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 71 #else
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 72 BUG();
16c2d4762325232 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-05-08 73 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 74 #endif
77058e1adcc4391 arch/powerpc/mm/tlb_hash64.c David Gibson 2010-02-08 75 } else {
16c2d4762325232 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-05-08 76 psize = pte_pagesize_index(mm, addr, pte);
47d99948eee48a8 arch/powerpc/mm/book3s64/hash_tlb.c Christophe Leroy 2019-03-29 77 /*
47d99948eee48a8 arch/powerpc/mm/book3s64/hash_tlb.c Christophe Leroy 2019-03-29 78 * Mask the address for the standard page size. If we
77058e1adcc4391 arch/powerpc/mm/tlb_hash64.c David Gibson 2010-02-08 79 * have a 64k page kernel, but the hardware does not
77058e1adcc4391 arch/powerpc/mm/tlb_hash64.c David Gibson 2010-02-08 80 * support 64k pages, this might be different from the
47d99948eee48a8 arch/powerpc/mm/book3s64/hash_tlb.c Christophe Leroy 2019-03-29 81 * hardware page size encoded in the slice table.
47d99948eee48a8 arch/powerpc/mm/book3s64/hash_tlb.c Christophe Leroy 2019-03-29 82 */
77058e1adcc4391 arch/powerpc/mm/tlb_hash64.c David Gibson 2010-02-08 83 addr &= PAGE_MASK;
ff31e105464d8c8 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2018-02-11 84 offset = PTRS_PER_PTE;
77058e1adcc4391 arch/powerpc/mm/tlb_hash64.c David Gibson 2010-02-08 85 }
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 86
f71dc176aa06359 arch/powerpc/mm/tlb_hash64.c David Gibson 2009-10-26 87
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 88 /* Build full vaddr */
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 89 if (!is_kernel_addr(addr)) {
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 90 ssize = user_segment_size(addr);
f384796c40dc55b arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2018-03-26 91 vsid = get_user_vsid(&mm->context, addr, ssize);
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 92 } else {
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 93 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 94 ssize = mmu_kernel_ssize;
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 95 }
c60ac5693c47df3 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2013-03-13 96 WARN_ON(vsid == 0);
5524a27d39b6877 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2012-09-10 97 vpn = hpt_vpn(addr, vsid, ssize);
ff31e105464d8c8 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2018-02-11 98 rpte = __real_pte(__pte(pte), ptep, offset);
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 99
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 100 /*
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 101 * Check if we have an active batch on this CPU. If not, just
c5cee6421cd6514 arch/powerpc/mm/tlb_hash64.c Balbir Singh 2017-05-25 102 * flush now and return.
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 103 */
313a05a15a1b29c arch/powerpc/mm/book3s64/hash_tlb.c Kevin Brodsky 2025-12-15 104 if (!is_lazy_mmu_mode_active()) {
c5cee6421cd6514 arch/powerpc/mm/tlb_hash64.c Balbir Singh 2017-05-25 105 flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
f342552b917a18a arch/powerpc/mm/tlb_hash64.c Peter Zijlstra 2011-02-24 106 put_cpu_var(ppc64_tlb_batch);
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 107 return;
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 108 }
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 109
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 110 /*
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 111 * This can happen when we are in the middle of a TLB batch and
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 112 * we encounter memory pressure (eg copy_page_range when it tries
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 113 * to allocate a new pte). If we have to reclaim memory and end
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 114 * up scanning and resetting referenced bits then our batch context
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 115 * will change mid stream.
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 116 *
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 117 * We also need to ensure only one page size is present in a given
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 118 * batch
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 119 */
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 120 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 121 batch->ssize != ssize)) {
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 122 __flush_tlb_pending(batch);
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 123 i = 0;
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 124 }
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 125 if (i == 0) {
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 126 batch->mm = mm;
3c726f8dee6f55e arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2005-11-07 127 batch->psize = psize;
1189be6508d4518 arch/powerpc/mm/tlb_64.c Paul Mackerras 2007-10-11 128 batch->ssize = ssize;
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 129 }
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 130 batch->pte[i] = rpte;
5524a27d39b6877 arch/powerpc/mm/tlb_hash64.c Aneesh Kumar K.V 2012-09-10 131 batch->vpn[i] = vpn;
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 132 batch->index = ++i;
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 133 if (i >= PPC64_TLB_BATCH_NR)
a741e6796957716 arch/powerpc/mm/tlb_64.c Benjamin Herrenschmidt 2007-04-10 134 __flush_tlb_pending(batch);
f342552b917a18a arch/powerpc/mm/tlb_hash64.c Peter Zijlstra 2011-02-24 135 put_cpu_var(ppc64_tlb_batch);
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 136 }
^1da177e4c3f415 arch/ppc64/mm/tlb.c Linus Torvalds 2005-04-16 137

:::::: The code at line 45 was first introduced by commit
:::::: f342552b917a18a7a1fa2c10625df85fac828c36 powerpc/mm: Make hpte_need_flush() safe for preemption

:::::: TO: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
:::::: CC: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx>

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki