Re: [PATCH] mm/vmalloc: Remove WARN_ON_ONCE related to adjust_va_to_fit_type

From: Uladzislau Rezki
Date: Wed Sep 27 2023 - 07:50:09 EST


> > Yes, but GFP_NOWAIT-alloc-error can easily occur for low-memory device.
> >
> Agree. You are really in a low memory condition. We end up here only if
> pre-loading also has not succeeded, i.e. GFP_KERNEL also fails.
>
> But i agree with you, we should "improve the warning" because we drain
> and repeat.
>
> > How about changing fix as below?:
> >
> > <snip>
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -1468,6 +1468,7 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
> > */
> > va->va_start = nva_start_addr + size;
> > } else {
> > + WARN_ON_ONCE(1);
> > return -1;
> > }
> >
> > @@ -1522,7 +1523,7 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> >
> > /* Update the free vmap_area. */
> > ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
> > - if (WARN_ON_ONCE(ret))
> > + if (ret)
> > return vend;
> >
> > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
> > @@ -4143,7 +4144,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
> > ret = adjust_va_to_fit_type(&free_vmap_area_root,
> > &free_vmap_area_list,
> > va, start, size);
> > - if (WARN_ON_ONCE(unlikely(ret)))
> > + if (unlikely(ret))
> > /* It is a BUG(), but trigger recovery instead. */
> > goto recovery;
> >
> > <snip>
> > It will WARN_ONCE_ONCE() only if classify_va_fit_type() is "(type == NOTHING_FIT)".
> >
> This is good but i think it should be improved further. We need to
> understand from the warning when no memory and when there is no a
> vmap space, so:
>
> - if NOTHING_FIT, we should WARN() for sure;
> - Second place in the pcpu_get_vm_area(), we do not use NE_FIT. Only in
> the begging after boot, but potentially we can trigger -ENOMEM and we
> should warn in this case. Otherwise you just hide it;
> - And last one if after repeating we still do not manage to allocate.
>

We should understand a reason of failing. I think error handling should
be improved. Something like:

<snip>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ef8599d394fd..03a36921a3fc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1454,7 +1454,7 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
*/
lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
if (!lva)
- return -1;
+ return -ENOMEM;
}

/*
@@ -1468,7 +1468,7 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
*/
va->va_start = nva_start_addr + size;
} else {
- return -1;
+ return -EINVAL;
}

if (type != FL_FIT_TYPE) {
@@ -1488,7 +1488,8 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
static __always_inline unsigned long
__alloc_vmap_area(struct rb_root *root, struct list_head *head,
unsigned long size, unsigned long align,
- unsigned long vstart, unsigned long vend)
+ unsigned long vstart, unsigned long vend,
+ int *error)
{
bool adjust_search_size = true;
unsigned long nva_start_addr;
@@ -1508,8 +1509,10 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
adjust_search_size = false;

va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
- if (unlikely(!va))
+ if (unlikely(!va)) {
+ *error = -ENOENT;
return vend;
+ }

if (va->va_start > vstart)
nva_start_addr = ALIGN(va->va_start, align);
@@ -1517,13 +1520,17 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
nva_start_addr = ALIGN(vstart, align);

/* Check the "vend" restriction. */
- if (nva_start_addr + size > vend)
+ if (nva_start_addr + size > vend) {
+ *error = -ERANGE;
return vend;
+ }

/* Update the free vmap_area. */
ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
- if (WARN_ON_ONCE(ret))
+ if (ret) {
+ *error = ret;
return vend;
+ }

#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
find_vmap_lowest_match_check(root, head, size, align);
@@ -1589,7 +1596,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long freed;
unsigned long addr;
int purged = 0;
- int ret;
+ int ret, error;

if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
return ERR_PTR(-EINVAL);
@@ -1613,7 +1620,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
retry:
preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
- size, align, vstart, vend);
+ size, align, vstart, vend, &error);
spin_unlock(&free_vmap_area_lock);

trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
@@ -1662,8 +1669,9 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
}

if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
- pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
- size);
+ pr_warn("vmap allocation for size %lu failed: "
+ "use vmalloc=<size> to increase size, errno: (%d)\n",
+ size, error);

kmem_cache_free(vmap_area_cachep, va);
return ERR_PTR(-EBUSY);
@@ -4143,9 +4151,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
ret = adjust_va_to_fit_type(&free_vmap_area_root,
&free_vmap_area_list,
va, start, size);
- if (WARN_ON_ONCE(unlikely(ret)))
- /* It is a BUG(), but trigger recovery instead. */
+ if (unlikely(ret)) {
+ WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
goto recovery;
+ }

/* Allocated area. */
va = vas[area];
<snip>

Any thoughts?

--
Uladzislau Rezki