[PATCH v43 04/15] LRNG - allocate one DRNG instance per NUMA node

From: Stephan Müller
Date: Sun Nov 21 2021 - 11:50:36 EST


In order to improve NUMA-locality when serving getrandom(2) requests,
allocate one DRNG instance per node.

The DRNG instance that is present right from the start of the kernel is
reused as the first per-NUMA-node DRNG. For all remaining online NUMA
nodes a new DRNG instance is allocated.

During boot time, the multiple DRNG instances are seeded sequentially.
With this, the first DRNG instance (referenced as the initial DRNG
in the code) is completely seeded with 256 bits of entropy before the
next DRNG instance is completely seeded.

When random numbers are requested, the NUMA-node-local DRNG is checked
whether it has been already fully seeded. If this is not the case, the
initial DRNG is used to serve the request.

CC: Torsten Duwe <duwe@xxxxxx>
CC: "Eric W. Biederman" <ebiederm@xxxxxxxxxxxx>
CC: "Alexander E. Patrakov" <patrakov@xxxxxxxxx>
CC: "Ahmed S. Darwish" <darwish.07@xxxxxxxxx>
CC: "Theodore Y. Ts'o" <tytso@xxxxxxx>
CC: Willy Tarreau <w@xxxxxx>
CC: Matthew Garrett <mjg59@xxxxxxxxxxxxx>
CC: Vito Caputo <vcaputo@xxxxxxxxxxx>
CC: Andreas Dilger <adilger.kernel@xxxxxxxxx>
CC: Jan Kara <jack@xxxxxxx>
CC: Ray Strode <rstrode@xxxxxxxxxx>
CC: William Jon McCann <mccann@xxxxxxx>
CC: zhangjs <zachary@xxxxxxxxxxxxxxxx>
CC: Andy Lutomirski <luto@xxxxxxxxxx>
CC: Florian Weimer <fweimer@xxxxxxxxxx>
CC: Lennart Poettering <mzxreary@xxxxxxxxxxx>
CC: Nicolai Stange <nstange@xxxxxxx>
CC: Eric Biggers <ebiggers@xxxxxxxxxx>
Reviewed-by: Alexander Lobakin <alobakin@xxxxx>
Tested-by: Alexander Lobakin <alobakin@xxxxx>
Reviewed-by: Marcelo Henrique Cerri <marcelo.cerri@xxxxxxxxxxxxx>
Reviewed-by: Roman Drahtmueller <draht@xxxxxxxxxxxxxx>
Tested-by: Marcelo Henrique Cerri <marcelo.cerri@xxxxxxxxxxxxx>
Tested-by: Neil Horman <nhorman@xxxxxxxxxx>
Tested-by: Jirka Hladky <jhladky@xxxxxxxxxx>
Reviewed-by: Jirka Hladky <jhladky@xxxxxxxxxx>
Signed-off-by: Stephan Mueller <smueller@xxxxxxxxxx>
---
drivers/char/lrng/Makefile | 1 +
drivers/char/lrng/lrng_numa.c | 122 ++++++++++++++++++++++++++++++++++
2 files changed, 123 insertions(+)
create mode 100644 drivers/char/lrng/lrng_numa.c

diff --git a/drivers/char/lrng/Makefile b/drivers/char/lrng/Makefile
index d7df72a702e4..1e722e0967e0 100644
--- a/drivers/char/lrng/Makefile
+++ b/drivers/char/lrng/Makefile
@@ -9,3 +9,4 @@ obj-y += lrng_es_mgr.o lrng_aux.o \

obj-$(CONFIG_LRNG_IRQ) += lrng_es_irq.o
obj-$(CONFIG_SYSCTL) += lrng_proc.o
+obj-$(CONFIG_NUMA) += lrng_numa.o
diff --git a/drivers/char/lrng/lrng_numa.c b/drivers/char/lrng/lrng_numa.c
new file mode 100644
index 000000000000..fbfb40a5fb8d
--- /dev/null
+++ b/drivers/char/lrng/lrng_numa.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG NUMA support
+ *
+ * Copyright (C) 2016 - 2021, Stephan Mueller <smueller@xxxxxxxxxx>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <linux/slab.h>
+
+#include "lrng_internal.h"
+
+static struct lrng_drng **lrng_drng __read_mostly = NULL;
+
+struct lrng_drng **lrng_drng_instances(void)
+{
+ return smp_load_acquire(&lrng_drng);
+}
+
+/* Allocate the data structures for the per-NUMA node DRNGs */
+static void _lrng_drngs_numa_alloc(struct work_struct *work)
+{
+ struct lrng_drng **drngs;
+ struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+ u32 node;
+ bool init_drng_used = false;
+
+ mutex_lock(&lrng_crypto_cb_update);
+
+ /* per-NUMA-node DRNGs are already present */
+ if (lrng_drng)
+ goto unlock;
+
+ drngs = kcalloc(nr_node_ids, sizeof(void *), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(node) {
+ struct lrng_drng *drng;
+
+ if (!init_drng_used) {
+ drngs[node] = lrng_drng_init;
+ init_drng_used = true;
+ continue;
+ }
+
+ drng = kmalloc_node(sizeof(struct lrng_drng),
+ GFP_KERNEL|__GFP_NOFAIL, node);
+ memset(drng, 0, sizeof(lrng_drng));
+
+ drng->crypto_cb = lrng_drng_init->crypto_cb;
+ drng->drng = drng->crypto_cb->lrng_drng_alloc(
+ LRNG_DRNG_SECURITY_STRENGTH_BYTES);
+ if (IS_ERR(drng->drng)) {
+ kfree(drng);
+ goto err;
+ }
+
+ drng->hash = drng->crypto_cb->lrng_hash_alloc();
+ if (IS_ERR(drng->hash)) {
+ drng->crypto_cb->lrng_drng_dealloc(drng->drng);
+ kfree(drng);
+ goto err;
+ }
+
+ mutex_init(&drng->lock);
+ spin_lock_init(&drng->spin_lock);
+ rwlock_init(&drng->hash_lock);
+
+ /*
+ * Switch the hash used by the per-CPU pool.
+ * We do not need to lock the new hash as it is not usable yet
+ * due to **drngs not yet being initialized.
+ */
+ if (lrng_pcpu_switch_hash(node, drng->crypto_cb, drng->hash,
+ &lrng_cc20_crypto_cb))
+ goto err;
+
+ /*
+ * No reseeding of NUMA DRNGs from previous DRNGs as this
+ * would complicate the code. Let it simply reseed.
+ */
+ lrng_drng_reset(drng);
+ drngs[node] = drng;
+
+ lrng_pool_inc_numa_node();
+ pr_info("DRNG and entropy pool read hash for NUMA node %d allocated\n",
+ node);
+ }
+
+ /* counterpart to smp_load_acquire in lrng_drng_instances */
+ if (!cmpxchg_release(&lrng_drng, NULL, drngs)) {
+ lrng_pool_all_numa_nodes_seeded(false);
+ goto unlock;
+ }
+
+err:
+ for_each_online_node(node) {
+ struct lrng_drng *drng = drngs[node];
+
+ if (drng == lrng_drng_init)
+ continue;
+
+ if (drng) {
+ lrng_pcpu_switch_hash(node, &lrng_cc20_crypto_cb, NULL,
+ drng->crypto_cb);
+ drng->crypto_cb->lrng_hash_dealloc(drng->hash);
+ drng->crypto_cb->lrng_drng_dealloc(drng->drng);
+ kfree(drng);
+ }
+ }
+ kfree(drngs);
+
+unlock:
+ mutex_unlock(&lrng_crypto_cb_update);
+}
+
+static DECLARE_WORK(lrng_drngs_numa_alloc_work, _lrng_drngs_numa_alloc);
+
+void lrng_drngs_numa_alloc(void)
+{
+ schedule_work(&lrng_drngs_numa_alloc_work);
+}
--
2.31.1