[RFC PATCH 1/3] lib/list_batch: A simple list insertion/deletion batching facility

From: Waiman Long
Date: Tue Jan 26 2016 - 11:04:26 EST


Linked list insertion or deletion under lock is a very common activity
in the Linux kernel. If this is the only activity under lock, the
locking overhead can be pretty large compared with the actual time
spent on the insertion or deletion operation itself especially on a
large system with many CPUs.

This patch introduces a simple list insertion/deletion batching
facility where a group of list insertion and deletion operations are
grouped together in a single batch under lock. This can reduce the
locking overhead and improve overall system performance.

The fast path of this batching facility will be similar in performance
to the "lock; listop; unlock;" sequence of the existing code. If
the lock is not available, it will enter slowpath where the batching
happens.

A new config option LIST_BATCHING is added so that we can control on
which architecture do we want to have this facility enabled.

Signed-off-by: Waiman Long <Waiman.Long@xxxxxxx>
---
include/linux/list_batch.h | 120 ++++++++++++++++++++++++++++++++++++++++++++
lib/Kconfig | 7 +++
lib/Makefile | 1 +
lib/list_batch.c | 117 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 245 insertions(+), 0 deletions(-)
create mode 100644 include/linux/list_batch.h
create mode 100644 lib/list_batch.c

diff --git a/include/linux/list_batch.h b/include/linux/list_batch.h
new file mode 100644
index 0000000..b8583e7
--- /dev/null
+++ b/include/linux/list_batch.h
@@ -0,0 +1,120 @@
+/*
+ * List insertion/deletion batching facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2016 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <waiman.long@xxxxxxx>
+ */
+#ifndef __LINUX_LIST_BATCH_H
+#define __LINUX_LIST_BATCH_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+/*
+ * include/linux/list_batch.h
+ *
+ * Inserting or deleting an entry from a linked list under a spinlock is a
+ * very common operation in the Linux kernel. If many CPUs are trying to
+ * grab the lock and manipulate the linked list, it can lead to significant
+ * lock contention and slow operation.
+ *
+ * This list operation batching facility is used to batch multiple list
+ * operations under one lock/unlock critical section, thus reducing the
+ * locking overhead and improving overall performance.
+ */
+enum list_batch_cmd {
+ lb_cmd_add,
+ lb_cmd_del,
+ lb_cmd_del_init
+};
+
+enum list_batch_state {
+ lb_state_waiting, /* Node is waiting */
+ lb_state_batch, /* Queue head to perform batch processing */
+ lb_state_done /* Job is done */
+};
+
+struct list_batch_qnode {
+ struct list_batch_qnode *next;
+ struct list_head *entry;
+ enum list_batch_cmd cmd;
+ enum list_batch_state state;
+};
+
+struct list_batch {
+ struct list_head *list;
+ struct list_batch_qnode *tail;
+};
+
+#define LIST_BATCH_INIT(_list) \
+ { \
+ .list = _list, \
+ .tail = NULL \
+ }
+
+static inline void list_batch_init(struct list_batch *batch,
+ struct list_head *list)
+{
+ batch->list = list;
+ batch->tail = NULL;
+}
+
+static __always_inline void _list_batch_cmd(enum list_batch_cmd cmd,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (cmd == lb_cmd_add)
+ list_add(entry, head);
+ else if (cmd == lb_cmd_del)
+ list_del(entry);
+ else /* cmd == lb_cmd_del_init */
+ list_del_init(entry);
+}
+
+#ifdef CONFIG_LIST_BATCHING
+
+extern void do_list_batch_slowpath(enum list_batch_cmd cmd, spinlock_t *lock,
+ struct list_batch *batch,
+ struct list_head *entry);
+
+static inline void do_list_batch(enum list_batch_cmd cmd, spinlock_t *lock,
+ struct list_batch *batch,
+ struct list_head *entry)
+{
+ /*
+ * Fast path
+ */
+ if (spin_trylock(lock)) {
+ _list_batch_cmd(cmd, batch->list, entry);
+ spin_unlock(lock);
+ return;
+ }
+ do_list_batch_slowpath(cmd, lock, batch, entry);
+}
+
+
+#else /* CONFIG_LIST_BATCHING */
+
+static inline void do_list_batch(enum list_batch_cmd cmd, spinlock_t *lock,
+ struct list_batch *batch,
+ struct list_head *entry)
+{
+ spin_lock(lock);
+ _list_batch_cmd(cmd, batch->list, entry);
+ spin_unlock(lock);
+}
+
+#endif /* CONFIG_LIST_BATCHING */
+
+#endif /* __LINUX_LIST_BATCH_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 133ebc0..d75ce19 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -514,6 +514,13 @@ config OID_REGISTRY
config UCS2_STRING
tristate

+config LIST_BATCHING
+ def_bool y if ARCH_USE_LIST_BATCHING
+ depends on SMP
+
+config ARCH_USE_LIST_BATCHING
+ bool
+
source "lib/fonts/Kconfig"

config SG_SPLIT
diff --git a/lib/Makefile b/lib/Makefile
index a7c26a4..2791262 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -210,6 +210,7 @@ quiet_cmd_build_OID_registry = GEN $@
clean-files += oid_registry_data.c

obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+obj-$(CONFIG_LIST_BATCHING) += list_batch.o
obj-$(CONFIG_UBSAN) += ubsan.o

UBSAN_SANITIZE_ubsan.o := n
diff --git a/lib/list_batch.c b/lib/list_batch.c
new file mode 100644
index 0000000..ac51d49
--- /dev/null
+++ b/lib/list_batch.c
@@ -0,0 +1,117 @@
+/*
+ * List insertion/deletion batching facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2016 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <waiman.long@xxxxxxx>
+ */
+#include <linux/list_batch.h>
+
+/*
+ * List processing batch size = 128
+ *
+ * The batch size shouldn't be too large. Otherwise, it will be too unfair
+ * to the task doing the batch processing. It shouldn't be too small neither
+ * as the performance benefit will be reduced.
+ */
+#define LB_BATCH_SIZE (1 << 7)
+
+/*
+ * Inserting or deleting an entry from a linked list under a spinlock is a
+ * very common operation in the Linux kernel. If many CPUs are trying to
+ * grab the lock and manipulate the linked list, it can lead to significant
+ * lock contention and slow operation.
+ *
+ * This list operation batching facility is used to batch multiple list
+ * operations under one lock/unlock critical section, thus reducing the
+ * locking overhead and improving overall performance.
+ */
+void do_list_batch_slowpath(enum list_batch_cmd cmd, spinlock_t *lock,
+ struct list_batch *batch, struct list_head *entry)
+{
+ struct list_batch_qnode node, *prev, *next, *nptr;
+ int loop;
+
+ /*
+ * Put itself into the list_batch queue
+ */
+ node.next = NULL;
+ node.entry = entry;
+ node.cmd = cmd;
+ node.state = lb_state_waiting;
+
+ prev = xchg(&batch->tail, &node);
+
+ if (prev) {
+ WRITE_ONCE(prev->next, &node);
+ while (READ_ONCE(node.state) == lb_state_waiting)
+ cpu_relax();
+ if (node.state == lb_state_done)
+ return;
+ WARN_ON(node.state != lb_state_batch);
+ }
+
+ /*
+ * We are now the queue head, we shold now acquire the lock and
+ * process a batch of qnodes.
+ */
+ loop = LB_BATCH_SIZE;
+ next = &node;
+ spin_lock(lock);
+
+do_list_again:
+ do {
+ nptr = next;
+ _list_batch_cmd(nptr->cmd, batch->list, nptr->entry);
+ next = READ_ONCE(nptr->next);
+ /*
+ * As soon as the state is marked lb_state_done, we
+ * can no longer assume the content of *nptr as valid.
+ * So we have to hold off marking it done until we no
+ * longer need its content.
+ *
+ * The release barrier here is to make sure that we
+ * won't access its content after marking it done.
+ */
+ if (next)
+ smp_store_release(&nptr->state, lb_state_done);
+ } while (--loop && next);
+ if (!next) {
+ /*
+ * The queue tail should equal to nptr, so clear it to
+ * mark the queue as empty.
+ */
+ if (cmpxchg(&batch->tail, nptr, NULL) != nptr) {
+ /*
+ * Queue not empty, wait until the next pointer is
+ * initialized.
+ */
+ while (!(next = READ_ONCE(nptr->next)))
+ cpu_relax();
+ }
+ /* The above cmpxchg acts as a memory barrier */
+ WRITE_ONCE(nptr->state, lb_state_done);
+ }
+ if (next) {
+ if (loop)
+ goto do_list_again; /* More qnodes to process */
+ /*
+ * Mark the next qnode as head to process the next batch
+ * of qnodes. The new queue head cannot proceed until we
+ * release the lock.
+ */
+ WRITE_ONCE(next->state, lb_state_batch);
+ }
+ spin_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(do_list_batch_slowpath);
--
1.7.1