[nsaenz-rpi:wip 5/5] mm/page_alloc.c:3183:28: sparse: sparse: incorrect type in assignment (different address spaces)
From: kernel test robot
Date: Sun Nov 14 2021 - 12:43:19 EST
tree: https://git.kernel.org/pub/scm/linux/kernel/git/nsaenz/linux-rpi.git wip
head: a6f29192b1e2bf93a6ab7c25c854307431124b8b
commit: a6f29192b1e2bf93a6ab7c25c854307431124b8b [5/5] mm/page_alloc: Add remote draining support to per-cpu lists
config: arm64-randconfig-s031-20211008 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 11.2.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.4-dirty
# https://git.kernel.org/pub/scm/linux/kernel/git/nsaenz/linux-rpi.git/commit/?id=a6f29192b1e2bf93a6ab7c25c854307431124b8b
git remote add nsaenz-rpi https://git.kernel.org/pub/scm/linux/kernel/git/nsaenz/linux-rpi.git
git fetch --no-tags nsaenz-rpi wip
git checkout a6f29192b1e2bf93a6ab7c25c854307431124b8b
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=arm64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>
sparse warnings: (new ones prefixed by >>)
mm/page_alloc.c:1467:30: sparse: sparse: incorrect type in assignment (different modifiers) @@ expected struct list_head *list @@ got struct list_head [noderef] * @@
mm/page_alloc.c:1467:30: sparse: expected struct list_head *list
mm/page_alloc.c:1467:30: sparse: got struct list_head [noderef] *
>> mm/page_alloc.c:3183:28: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct pcplists [noderef] *lp @@ got struct pcplists [noderef] __rcu * @@
mm/page_alloc.c:3183:28: sparse: expected struct pcplists [noderef] *lp
mm/page_alloc.c:3183:28: sparse: got struct pcplists [noderef] __rcu *
mm/page_alloc.c:3188:36: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct pcplists [noderef] *lp @@ got struct pcplists [noderef] __rcu * @@
mm/page_alloc.c:3188:36: sparse: expected struct pcplists [noderef] *lp
mm/page_alloc.c:3188:36: sparse: got struct pcplists [noderef] __rcu *
mm/page_alloc.c:3369:40: sparse: sparse: incorrect type in argument 2 (different modifiers) @@ expected struct list_head *head @@ got struct list_head [noderef] * @@
mm/page_alloc.c:3369:40: sparse: expected struct list_head *head
mm/page_alloc.c:3369:40: sparse: got struct list_head [noderef] *
mm/page_alloc.c:5878:28: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct pcplists [noderef] *lp @@ got struct pcplists [noderef] __rcu * @@
mm/page_alloc.c:5878:28: sparse: expected struct pcplists [noderef] *lp
mm/page_alloc.c:5878:28: sparse: got struct pcplists [noderef] __rcu *
mm/page_alloc.c:5976:28: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct pcplists [noderef] *lp @@ got struct pcplists [noderef] __rcu * @@
mm/page_alloc.c:5976:28: sparse: expected struct pcplists [noderef] *lp
mm/page_alloc.c:5976:28: sparse: got struct pcplists [noderef] __rcu *
>> mm/page_alloc.c:6883:17: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct pcplists [noderef] __rcu *lp @@ got struct pcplists [noderef] * @@
mm/page_alloc.c:6883:17: sparse: expected struct pcplists [noderef] __rcu *lp
mm/page_alloc.c:6883:17: sparse: got struct pcplists [noderef] *
>> mm/page_alloc.c:6887:47: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected struct list_head *list @@ got struct list_head [noderef] __rcu * @@
mm/page_alloc.c:6887:47: sparse: expected struct list_head *list
mm/page_alloc.c:6887:47: sparse: got struct list_head [noderef] __rcu *
mm/page_alloc.c:6888:50: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected struct list_head *list @@ got struct list_head [noderef] * @@
mm/page_alloc.c:6888:50: sparse: expected struct list_head *list
mm/page_alloc.c:6888:50: sparse: got struct list_head [noderef] *
mm/page_alloc.c:1452:17: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:1507:9: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3095:13: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3096:42: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3184:29: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3189:37: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3223:36: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3370:9: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3372:13: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3604:14: sparse: sparse: incorrect type in assignment (different modifiers) @@ expected struct list_head *list @@ got struct list_head [noderef] * @@
mm/page_alloc.c:3604:14: sparse: expected struct list_head *list
mm/page_alloc.c:3604:14: sparse: got struct list_head [noderef] *
mm/page_alloc.c:3624:25: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3631:17: sparse: sparse: dereference of noderef expression
mm/page_alloc.c: note: in included file (through include/linux/mm.h):
include/linux/gfp.h:353:27: sparse: sparse: restricted gfp_t degrades to integer
include/linux/gfp.h:353:27: sparse: sparse: restricted gfp_t degrades to integer
mm/page_alloc.c:3604:14: sparse: sparse: incorrect type in assignment (different modifiers) @@ expected struct list_head *list @@ got struct list_head [noderef] * @@
mm/page_alloc.c:3604:14: sparse: expected struct list_head *list
mm/page_alloc.c:3604:14: sparse: got struct list_head [noderef] *
mm/page_alloc.c:3624:25: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:3631:17: sparse: sparse: dereference of noderef expression
include/linux/gfp.h:353:27: sparse: sparse: restricted gfp_t degrades to integer
include/linux/gfp.h:353:27: sparse: sparse: restricted gfp_t degrades to integer
include/linux/gfp.h:353:27: sparse: sparse: restricted gfp_t degrades to integer
include/linux/gfp.h:353:27: sparse: sparse: restricted gfp_t degrades to integer
mm/page_alloc.c:5879:37: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:5977:37: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:5981:17: sparse: sparse: dereference of noderef expression
mm/page_alloc.c:5981:17: sparse: sparse: dereference of noderef expression
vim +3183 mm/page_alloc.c
3132
3133 /*
3134 * The implementation of drain_all_pages(), exposing an extra parameter to
3135 * drain on all cpus.
3136 *
3137 * drain_all_pages() is optimized to only execute on cpus where pcplists are
3138 * not empty. The check for non-emptiness can however race with a free to
3139 * pcplist that has not yet increased the lp->count from 0 to 1. Callers
3140 * that need the guarantee that every CPU has drained can disable the
3141 * optimizing racy check.
3142 */
3143 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3144 {
3145 struct per_cpu_pages *pcp;
3146 struct zone *z;
3147 int cpu;
3148
3149 /*
3150 * Allocate in the BSS so we won't require allocation in
3151 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3152 */
3153 static cpumask_t cpus_with_pcps;
3154
3155 /*
3156 * Do not drain if one is already in progress unless it's specific to
3157 * a zone. Such callers are primarily CMA and memory hotplug and need
3158 * the drain to be complete when the call returns.
3159 */
3160 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3161 if (!zone)
3162 return;
3163 mutex_lock(&pcpu_drain_mutex);
3164 }
3165
3166 /*
3167 * We don't care about racing with CPU hotplug event
3168 * as offline notification will cause the notified
3169 * cpu to drain that CPU pcps and on_each_cpu_mask
3170 * disables preemption as part of its processing
3171 */
3172 for_each_online_cpu(cpu) {
3173 bool has_pcps = false;
3174 struct pcplists *lp;
3175
3176 if (force_all_cpus) {
3177 /*
3178 * The lp->count check is racy, some callers need a
3179 * guarantee that no cpu is missed.
3180 */
3181 has_pcps = true;
3182 } else if (zone) {
> 3183 lp = READ_ONCE(per_cpu_ptr(zone->per_cpu_pageset, cpu)->lp);
3184 if (lp->count)
3185 has_pcps = true;
3186 } else {
3187 for_each_populated_zone(z) {
3188 lp = READ_ONCE(per_cpu_ptr(z->per_cpu_pageset, cpu)->lp);
3189 if (lp->count) {
3190 has_pcps = true;
3191 break;
3192 }
3193 }
3194 }
3195
3196 if (has_pcps)
3197 cpumask_set_cpu(cpu, &cpus_with_pcps);
3198 else
3199 cpumask_clear_cpu(cpu, &cpus_with_pcps);
3200 }
3201
3202 if (!force_all_cpus && cpumask_empty(&cpus_with_pcps))
3203 goto exit;
3204
3205 for_each_cpu(cpu, &cpus_with_pcps) {
3206 for_each_populated_zone(z) {
3207 if (zone && zone != z)
3208 continue;
3209
3210 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3211 pcp->drain = rcu_replace_pointer(pcp->lp, pcp->drain,
3212 mutex_is_locked(&pcpu_drain_mutex));
3213 }
3214 }
3215
3216 synchronize_rcu_expedited();
3217
3218 for_each_cpu(cpu, &cpus_with_pcps) {
3219 for_each_populated_zone(z) {
3220 int count;
3221
3222 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3223 count = pcp->drain->count;
3224 if (!count)
3225 continue;
3226
3227 free_pcppages_bulk(z, count, pcp, pcp->drain);
3228 }
3229 }
3230
3231 exit:
3232 mutex_unlock(&pcpu_drain_mutex);
3233 }
3234
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip