kernel/bpf/memalloc.c:139:48: sparse: sparse: incorrect type in initializer (different address spaces)

From: kernel test robot
Date: Sat Sep 09 2023 - 20:00:27 EST


tree: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head: 6b8bb5b8d9b35fb43f0dbf9fd91b7d35e6232e08
commit: 4ab67149f3c6e97c5c506a726f0ebdec38241679 bpf: Add percpu allocation support to bpf_mem_alloc.
date: 1 year ago
config: i386-randconfig-063-20230910 (https://download.01.org/0day-ci/archive/20230910/202309100756.kJWzQTtH-lkp@xxxxxxxxx/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230910/202309100756.kJWzQTtH-lkp@xxxxxxxxx/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@xxxxxxxxx>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309100756.kJWzQTtH-lkp@xxxxxxxxx/

sparse warnings: (new ones prefixed by >>)
>> kernel/bpf/memalloc.c:139:48: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected void *pptr @@ got void [noderef] __percpu * @@
kernel/bpf/memalloc.c:139:48: sparse: expected void *pptr
kernel/bpf/memalloc.c:139:48: sparse: got void [noderef] __percpu *
>> kernel/bpf/memalloc.c:142:37: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void [noderef] __percpu *__pdata @@ got void *pptr @@
kernel/bpf/memalloc.c:142:37: sparse: expected void [noderef] __percpu *__pdata
kernel/bpf/memalloc.c:142:37: sparse: got void *pptr
>> kernel/bpf/memalloc.c:211:43: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void [noderef] __percpu *__pdata @@ got void * @@
kernel/bpf/memalloc.c:211:43: sparse: expected void [noderef] __percpu *__pdata
kernel/bpf/memalloc.c:211:43: sparse: got void *

vim +139 kernel/bpf/memalloc.c

127
128 static void *__alloc(struct bpf_mem_cache *c, int node)
129 {
130 /* Allocate, but don't deplete atomic reserves that typical
131 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
132 * will allocate from the current numa node which is what we
133 * want here.
134 */
135 gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT;
136
137 if (c->percpu) {
138 void **obj = kmem_cache_alloc_node(c->kmem_cache, flags, node);
> 139 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
140
141 if (!obj || !pptr) {
> 142 free_percpu(pptr);
143 kfree(obj);
144 return NULL;
145 }
146 obj[1] = pptr;
147 return obj;
148 }
149
150 if (c->kmem_cache)
151 return kmem_cache_alloc_node(c->kmem_cache, flags, node);
152
153 return kmalloc_node(c->unit_size, flags, node);
154 }
155
156 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
157 {
158 #ifdef CONFIG_MEMCG_KMEM
159 if (c->objcg)
160 return get_mem_cgroup_from_objcg(c->objcg);
161 #endif
162
163 #ifdef CONFIG_MEMCG
164 return root_mem_cgroup;
165 #else
166 return NULL;
167 #endif
168 }
169
170 /* Mostly runs from irq_work except __init phase. */
171 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
172 {
173 struct mem_cgroup *memcg = NULL, *old_memcg;
174 unsigned long flags;
175 void *obj;
176 int i;
177
178 memcg = get_memcg(c);
179 old_memcg = set_active_memcg(memcg);
180 for (i = 0; i < cnt; i++) {
181 obj = __alloc(c, node);
182 if (!obj)
183 break;
184 if (IS_ENABLED(CONFIG_PREEMPT_RT))
185 /* In RT irq_work runs in per-cpu kthread, so disable
186 * interrupts to avoid preemption and interrupts and
187 * reduce the chance of bpf prog executing on this cpu
188 * when active counter is busy.
189 */
190 local_irq_save(flags);
191 /* alloc_bulk runs from irq_work which will not preempt a bpf
192 * program that does unit_alloc/unit_free since IRQs are
193 * disabled there. There is no race to increment 'active'
194 * counter. It protects free_llist from corruption in case NMI
195 * bpf prog preempted this loop.
196 */
197 WARN_ON_ONCE(local_inc_return(&c->active) != 1);
198 __llist_add(obj, &c->free_llist);
199 c->free_cnt++;
200 local_dec(&c->active);
201 if (IS_ENABLED(CONFIG_PREEMPT_RT))
202 local_irq_restore(flags);
203 }
204 set_active_memcg(old_memcg);
205 mem_cgroup_put(memcg);
206 }
207
208 static void free_one(struct bpf_mem_cache *c, void *obj)
209 {
210 if (c->percpu) {
> 211 free_percpu(((void **)obj)[1]);
212 kmem_cache_free(c->kmem_cache, obj);
213 return;
214 }
215
216 if (c->kmem_cache)
217 kmem_cache_free(c->kmem_cache, obj);
218 else
219 kfree(obj);
220 }
221

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki