[PATCH] fs/kernel_read_file: add a clutch

From: Luis Chamberlain
Date: Fri Mar 24 2023 - 13:35:41 EST


Signed-off-by: Luis Chamberlain <mcgrof@xxxxxxxxxx>
---
fs/kernel_read_file.c | 52 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 50 insertions(+), 2 deletions(-)

diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index 5d826274570c..2d55abe73b21 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -1,10 +1,52 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "kread: " fmt
+
#include <linux/fs.h>
#include <linux/fs_struct.h>
#include <linux/kernel_read_file.h>
#include <linux/security.h>
#include <linux/vmalloc.h>

+/*
+ * This clutch ensures we only allow a certain number concurrent threads at a
+ * time allocating space concurrently and they must all finish within the
+ * timeout specified. Anything more we know we're thrashing.
+ */
+#define MAX_KREAD_CONCURRENT 20
+static atomic_t kread_concurrent_max = ATOMIC_INIT(MAX_KREAD_CONCURRENT);
+static DECLARE_WAIT_QUEUE_HEAD(kread_wq);
+
+/*
+ * How many seconds to wait for *all* MAX_KREAD_CONCURRENT threads running
+ * at the same time without returning.
+ */
+#define MAX_KREAD_ALL_BUSY_TIMEOUT 1
+
+static int kernel_read_check_concurrent(void)
+{
+ int ret;
+
+ if (atomic_dec_if_positive(&kread_concurrent_max) < 0) {
+ pr_warn_ratelimited("kread_concurrent_max (%u) close to 0 (max_loads: %u), throttling...",
+ atomic_read(&kread_concurrent_max),
+ MAX_KREAD_CONCURRENT);
+ ret = wait_event_killable_timeout(kread_wq,
+ atomic_dec_if_positive(&kread_concurrent_max) >= 0,
+ MAX_KREAD_ALL_BUSY_TIMEOUT * HZ);
+ if (!ret) {
+ pr_warn_ratelimited("reading cannot be processed, kernel busy with %d threads reading files now for more than %d seconds",
+ MAX_KREAD_CONCURRENT, MAX_KREAD_ALL_BUSY_TIMEOUT);
+ return -ETIME;
+ } else if (ret == -ERESTARTSYS) {
+ pr_warn_ratelimited("sigkill sent for kernel read, giving up");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* kernel_read_file() - read file contents into a kernel buffer
*
@@ -68,10 +110,14 @@ ssize_t kernel_read_file(struct file *file, loff_t offset, void **buf,
goto out;
}

+ ret = kernel_read_check_concurrent();
+ if (ret)
+ goto out;
+
whole_file = (offset == 0 && i_size <= buf_size);
ret = security_kernel_read_file(file, id, whole_file);
if (ret)
- goto out;
+ goto out_allow_new_read;

if (file_size)
*file_size = i_size;
@@ -117,7 +163,9 @@ ssize_t kernel_read_file(struct file *file, loff_t offset, void **buf,
*buf = NULL;
}
}
-
+out_allow_new_read:
+ atomic_inc(&kread_concurrent_max);
+ wake_up(&kread_wq);
out:
allow_write_access(file);
return ret == 0 ? copied : ret;
--
2.39.2