Re: [PATCH v3 4/5] kfence: limit currently covered allocations when pool nearly full

From: Marco Elver
Date: Fri Sep 24 2021 - 09:26:15 EST


On Thu, Sep 23, 2021 at 04:28PM -0700, Andrew Morton wrote:
> On Thu, 23 Sep 2021 15:44:10 +0200 Marco Elver <elver@xxxxxxxxxx> wrote:
[...]
> > I'm worried about next_pseudo_random32() changing their implementation
> > to longer be deterministic or change in other ways that break our
> > usecase. In this case we want pseudorandomness, but we're not
> > implementing a PRNG.
> >
> > Open-coding the constants (given they are from "Numerical Recipes") is
> > more reliable and doesn't introduce unwanted reliance on
> > next_pseudo_random32()'s behaviour.
>
> Perhaps we could summarize this in an additional comment?

Hmm, on second thought, while trying to write the comment realized it's
unnecessary altogether. I've switched to just using hash_32() which is
probably better suited.

> Also, this:
>
> +static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
> +{
> + /* Some randomness across reboots / different machines. */
> + u32 seed = (u32)((unsigned long)__kfence_pool >> (BITS_PER_LONG - 32));
>
> seems a bit weak. Would it be better to seed this at boot time with
> a randomish number?

Sure, makes sense.

Both fixes are included in the below fixup. (Let me know if resending as
v4 is better, but I've seen the patches already appeared in -mm.)

Thank you!

-- Marco

------ >8 ------

From: Marco Elver <elver@xxxxxxxxxx>
Date: Fri, 24 Sep 2021 14:17:38 +0200
Subject: [PATCH] fixup! kfence: limit currently covered allocations when pool
nearly full

* Simplify and just use hash_32().
* Use more random stack_hash_seed.

Signed-off-by: Marco Elver <elver@xxxxxxxxxx>
---
mm/kfence/core.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 58a0f6f1acc5..545999d04af4 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -10,6 +10,7 @@
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/debugfs.h>
+#include <linux/hash.h>
#include <linux/irq_work.h>
#include <linux/jhash.h>
#include <linux/kcsan-checks.h>
@@ -122,14 +123,21 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
* P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
*/
#define ALLOC_COVERED_HNUM 2
-#define ALLOC_COVERED_SIZE (1 << (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2))
-#define ALLOC_COVERED_HNEXT(h) (1664525 * (h) + 1013904223)
+#define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
+#define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
+#define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
#define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
static atomic_t alloc_covered[ALLOC_COVERED_SIZE];

/* Stack depth used to determine uniqueness of an allocation. */
#define UNIQUE_ALLOC_STACK_DEPTH 8UL

+/*
+ * Randomness for stack hashes, making the same collisions across reboots and
+ * different machines less likely.
+ */
+static u32 stack_hash_seed __ro_after_init;
+
/* Statistics counters for debugfs. */
enum kfence_counter_id {
KFENCE_COUNTER_ALLOCATED,
@@ -166,12 +174,9 @@ static inline bool should_skip_covered(void)

static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
{
- /* Some randomness across reboots / different machines. */
- u32 seed = (u32)((unsigned long)__kfence_pool >> (BITS_PER_LONG - 32));
-
num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
num_entries = filter_irq_stacks(stack_entries, num_entries);
- return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), seed);
+ return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
}

/*
@@ -759,6 +764,7 @@ void __init kfence_init(void)
if (!kfence_sample_interval)
return;

+ stack_hash_seed = (u32)random_get_entropy();
if (!kfence_init_pool()) {
pr_err("%s failed\n", __func__);
return;
--
2.33.0.685.g46640cef36-goog