[RFC PATCH 29/41] random: move definition of struct queued_entropy and related API upwards

From: Nicolai Stange
Date: Mon Sep 21 2020 - 04:01:44 EST


The next patch will add a member of type struct queued_entropy to struct
fast_pool and thus, the former's definition needs to be visible at the
latter's.

Move the definition of struct queued_entropy upwards in the file so that
it comes before the definition struct fast_pool. Move the associated
function definitions as well in order to keep everything together.

Note that said function definitions had originally been inserted at the
old location with the intent to minimize their introducing patch's diff
by placing them near the now removed credit_entropy_delta() they
superseded.

Signed-off-by: Nicolai Stange <nstange@xxxxxxx>
---
drivers/char/random.c | 243 +++++++++++++++++++++---------------------
1 file changed, 124 insertions(+), 119 deletions(-)

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 680ccc82a436..55e784a5a2ec 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -519,6 +519,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int fips);

+static unsigned int pool_entropy_delta(struct entropy_store *r,
+ int base_entropy_count,
+ int nfrac, bool fast);
+
static int min_crng_reseed_pool_entropy(void);

static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
@@ -610,125 +614,6 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in,
spin_unlock_irqrestore(&r->lock, flags);
}

-struct fast_pool {
- __u32 pool[4];
- unsigned long last;
- unsigned short reg_idx;
- unsigned char count;
- int event_entropy_shift;
-};
-
-/*
- * This is a fast mixing routine used by the interrupt randomness
- * collector. It's hardcoded for an 128 bit pool and assumes that any
- * locks that might be needed are taken by the caller.
- */
-static void fast_mix(struct fast_pool *f)
-{
- __u32 a = f->pool[0], b = f->pool[1];
- __u32 c = f->pool[2], d = f->pool[3];
-
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
-
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
-
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
-
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
-
- f->pool[0] = a; f->pool[1] = b;
- f->pool[2] = c; f->pool[3] = d;
- f->count++;
-}
-
-static void process_random_ready_list(void)
-{
- unsigned long flags;
- struct random_ready_callback *rdy, *tmp;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
- struct module *owner = rdy->owner;
-
- list_del_init(&rdy->list);
- rdy->func(rdy);
- module_put(owner);
- }
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-}
-
-/*
- * Based on the pool's current entropy fill level, specified as
- * base_entropy_count, and the number of new entropy bits in units of
- * 2^-ENTROPY_SHIFT to add, return the amount of new entropy to
- * credit. If the 'fast' parameter is set to true, the calculation
- * will be guaranteed to terminate quickly, but this comes at the
- * expense of capping nbits to one half of the pool size.
- */
-static unsigned int pool_entropy_delta(struct entropy_store *r,
- int base_entropy_count,
- int nfrac, bool fast)
-{
- const int pool_size = r->poolinfo->poolfracbits;
- int entropy_count = base_entropy_count;
-
- if (!nfrac)
- return 0;
-
- if (pool_size <= base_entropy_count)
- return 0;
-
- /*
- * Credit: we have to account for the possibility of
- * overwriting already present entropy. Even in the
- * ideal case of pure Shannon entropy, new contributions
- * approach the full value asymptotically:
- *
- * entropy <- entropy + (pool_size - entropy) *
- * (1 - exp(-add_entropy/pool_size))
- *
- * For add_entropy <= pool_size/2 then
- * (1 - exp(-add_entropy/pool_size)) >=
- * (add_entropy/pool_size)*0.7869...
- * so we can approximate the exponential with
- * 3/4*add_entropy/pool_size and still be on the
- * safe side by adding at most pool_size/2 at a time.
- *
- * The use of pool_size-2 in the while statement is to
- * prevent rounding artifacts from making the loop
- * arbitrarily long; this limits the loop to log2(pool_size)*2
- * turns no matter how large nbits is.
- */
- do {
- /* The +2 corresponds to the /4 in the denominator */
- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
- unsigned int anfrac = min(nfrac, pool_size/2);
- unsigned int add =
- ((pool_size - entropy_count)*anfrac*3) >> s;
-
- entropy_count += add;
- nfrac -= anfrac;
- } while (unlikely(!fast && entropy_count < pool_size-2 && nfrac));
-
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: pool %s count %d\n",
- r->name, entropy_count);
- entropy_count = base_entropy_count;
- } else if (entropy_count > pool_size) {
- entropy_count = pool_size;
- }
-
- return entropy_count - base_entropy_count;
-}
-
struct queued_entropy {
unsigned int pool_watermark_seq;
unsigned int queued_entropy_fracbits;
@@ -994,6 +879,126 @@ static void discard_queued_entropy(struct entropy_store *r,
spin_unlock_irqrestore(&r->lock, flags);
}

+struct fast_pool {
+ __u32 pool[4];
+ unsigned long last;
+ unsigned short reg_idx;
+ unsigned char count;
+ int event_entropy_shift;
+};
+
+/*
+ * This is a fast mixing routine used by the interrupt randomness
+ * collector. It's hardcoded for an 128 bit pool and assumes that any
+ * locks that might be needed are taken by the caller.
+ */
+static void fast_mix(struct fast_pool *f)
+{
+ __u32 a = f->pool[0], b = f->pool[1];
+ __u32 c = f->pool[2], d = f->pool[3];
+
+ a += b; c += d;
+ b = rol32(b, 6); d = rol32(d, 27);
+ d ^= a; b ^= c;
+
+ a += b; c += d;
+ b = rol32(b, 16); d = rol32(d, 14);
+ d ^= a; b ^= c;
+
+ a += b; c += d;
+ b = rol32(b, 6); d = rol32(d, 27);
+ d ^= a; b ^= c;
+
+ a += b; c += d;
+ b = rol32(b, 16); d = rol32(d, 14);
+ d ^= a; b ^= c;
+
+ f->pool[0] = a; f->pool[1] = b;
+ f->pool[2] = c; f->pool[3] = d;
+ f->count++;
+}
+
+static void process_random_ready_list(void)
+{
+ unsigned long flags;
+ struct random_ready_callback *rdy, *tmp;
+
+ spin_lock_irqsave(&random_ready_list_lock, flags);
+ list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
+ struct module *owner = rdy->owner;
+
+ list_del_init(&rdy->list);
+ rdy->func(rdy);
+ module_put(owner);
+ }
+ spin_unlock_irqrestore(&random_ready_list_lock, flags);
+}
+
+/*
+ * Based on the pool's current entropy fill level, specified as
+ * base_entropy_count, and the number of new entropy bits in units of
+ * 2^-ENTROPY_SHIFT to add, return the amount of new entropy to
+ * credit. If the 'fast' parameter is set to true, the calculation
+ * will be guaranteed to terminate quickly, but this comes at the
+ * expense of capping nbits to one half of the pool size.
+ */
+static unsigned int pool_entropy_delta(struct entropy_store *r,
+ int base_entropy_count,
+ int nfrac, bool fast)
+{
+ const int pool_size = r->poolinfo->poolfracbits;
+ int entropy_count = base_entropy_count;
+
+ if (!nfrac)
+ return 0;
+
+ if (pool_size <= base_entropy_count)
+ return 0;
+
+ /*
+ * Credit: we have to account for the possibility of
+ * overwriting already present entropy. Even in the
+ * ideal case of pure Shannon entropy, new contributions
+ * approach the full value asymptotically:
+ *
+ * entropy <- entropy + (pool_size - entropy) *
+ * (1 - exp(-add_entropy/pool_size))
+ *
+ * For add_entropy <= pool_size/2 then
+ * (1 - exp(-add_entropy/pool_size)) >=
+ * (add_entropy/pool_size)*0.7869...
+ * so we can approximate the exponential with
+ * 3/4*add_entropy/pool_size and still be on the
+ * safe side by adding at most pool_size/2 at a time.
+ *
+ * The use of pool_size-2 in the while statement is to
+ * prevent rounding artifacts from making the loop
+ * arbitrarily long; this limits the loop to log2(pool_size)*2
+ * turns no matter how large nbits is.
+ */
+ do {
+ /* The +2 corresponds to the /4 in the denominator */
+ const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
+ unsigned int anfrac = min(nfrac, pool_size/2);
+ unsigned int add =
+ ((pool_size - entropy_count)*anfrac*3) >> s;
+
+ entropy_count += add;
+ nfrac -= anfrac;
+ } while (unlikely(!fast && entropy_count < pool_size-2 && nfrac));
+
+ if (WARN_ON(entropy_count < 0)) {
+ pr_warn("negative entropy/overflow: pool %s count %d\n",
+ r->name, entropy_count);
+ entropy_count = base_entropy_count;
+ } else if (entropy_count > pool_size) {
+ entropy_count = pool_size;
+ }
+
+ return entropy_count - base_entropy_count;
+}
+
+
/*********************************************************************
*
* CRNG using CHACHA20
--
2.26.2