[PATCH 2/2] XArray: Fix xas_create_range() when lower multi-order entry present

From: Wei Yang
Date: Mon Sep 12 2022 - 08:53:12 EST


If there is already an lower order entry present, xas_create_range()
would face two problems:

* When new_order is roundup(order, XA_CHUNK_SHIFT), it would go up and
access root->parent
* When there is holes in lower order range, no proper entry is created

This patch tries to fix this issue by adjust to proper next_index if we
found a multi-order entry. And then look up.

Signed-off-by: Wei Yang <richard.weiyang@xxxxxxxxx>
---
lib/test_xarray.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++
lib/xarray.c | 21 +++++++++++++++---
2 files changed, 72 insertions(+), 3 deletions(-)

diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index e77d4856442c..2cf2cd8471a8 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1482,6 +1482,59 @@ static noinline void check_create_range_5(struct xarray *xa,
xa_destroy(xa);
}

+static noinline void check_create_range_6(struct xarray *xa)
+{
+ unsigned long index = 0;
+ unsigned int order, next_order;
+
+ order = 2 * XA_CHUNK_SHIFT - 2;
+
+ for (next_order = order + 1; next_order <= roundup(order, XA_CHUNK_SHIFT) + 1;
+ next_order++) {
+ XA_STATE(load_xas, xa, 0);
+ XA_STATE_ORDER(xas, xa, 0, next_order);
+
+ for (index = 0; index < (1 << next_order); index += 1 << order) {
+ if (index == 0)
+ continue;
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+ }
+
+ // [0, (1 << order) - 1] is empty now
+ rcu_read_lock();
+ xas_load(&load_xas);
+ XA_BUG_ON(xa, load_xas.xa_node == NULL);
+ XA_BUG_ON(xa, load_xas.xa_node->shift == 0);
+ rcu_read_unlock();
+
+ xas_set(&load_xas, (1 << order) - 1);
+ rcu_read_lock();
+ xas_load(&load_xas);
+ XA_BUG_ON(xa, load_xas.xa_node == NULL);
+ XA_BUG_ON(xa, load_xas.xa_node->shift == 0);
+ rcu_read_unlock();
+
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ // [0, (1 << order) - 1] is created now
+ xas_set(&load_xas, 0);
+ XA_BUG_ON(xa, xas_load(&load_xas) != NULL);
+ XA_BUG_ON(xa, load_xas.xa_node == NULL);
+ XA_BUG_ON(xa, load_xas.xa_node->shift != 0);
+
+ xas_set(&load_xas, (1 << order) - 1);
+ XA_BUG_ON(xa, xas_load(&load_xas) != NULL);
+ XA_BUG_ON(xa, load_xas.xa_node == NULL);
+ XA_BUG_ON(xa, load_xas.xa_node->shift != 0);
+
+ xa_destroy(xa);
+ }
+}
+
static noinline void check_create_range(struct xarray *xa)
{
unsigned int order;
@@ -1515,6 +1568,7 @@ static noinline void check_create_range(struct xarray *xa)
}

check_create_range_3();
+ check_create_range_6(xa);
}

static noinline void __check_store_range(struct xarray *xa, unsigned long first,
diff --git a/lib/xarray.c b/lib/xarray.c
index 326b73bb9811..3f9a630ef788 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -708,6 +708,7 @@ void xas_create_range(struct xa_state *xas)
unsigned long index = xas->xa_index;
unsigned char shift = xas->xa_shift;
unsigned char sibs = xas->xa_sibs;
+ struct xa_node *node;

xas->xa_index |= ((sibs + 1UL) << shift) - 1;
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
@@ -723,14 +724,28 @@ void xas_create_range(struct xa_state *xas)
goto success;
xas->xa_index -= XA_CHUNK_SIZE;

+ node = xas->xa_node;
+ if (node->shift) {
+ unsigned long next_index = xas->xa_index >> node->shift;
+
+ next_index &= ~XA_CHUNK_MASK;
+ next_index += xas->xa_offset;
+ next_index <<= node->shift;
+
+ if (next_index <= (index & ~XA_CHUNK_MASK))
+ goto success;
+
+ xas->xa_index = next_index - 1;
+ }
+
for (;;) {
- struct xa_node *node = xas->xa_node;
- if (node->shift >= shift)
- break;
xas->xa_node = xa_parent_locked(xas->xa, node);
+ if (!xas->xa_node)
+ break;
xas->xa_offset = node->offset - 1;
if (node->offset != 0)
break;
+ node = xas->xa_node;
}
}

--
2.33.1