[PATCH v6 3/7] kernfs: Introduce hashed spinlocks to replace global kernfs_open_node_lock.

From: Imran Khan
Date: Mon Feb 14 2022 - 07:03:59 EST


In current kernfs design a single spinlock, kernfs_open_node_lock, protects
the kernfs_node->attr.open i.e kernfs_open_node instances corresponding to
a sysfs attribute. So even if different tasks are opening or closing
different sysfs files they can contend on this spinlock. The contention is
more apparent in large scale systems with few hundred CPUs where most of
the CPUs have running tasks that are opening, accessing or closing sysfs
files at any point of time.

Using hashed spinlocks in place of a single global spinlock, can
significantly reduce contention around global spinlock and hence provide
better scalability. Moreover as these hashed spinlocks are not part of
kernfs_node objects we will not see any singnificant change in memory
utilization of kernfs based file systems like sysfs, cgroupfs etc.

This patch introduces hashed spinlocks that can be used in place of above
mentioned global spinlock. It also provides interfaces needed to use hashed
spinlocks. The next patch makes use of these interfaces and replaces global
spinlock with hashed ones.

Signed-off-by: Imran Khan <imran.f.khan@xxxxxxxxxx>
---
fs/kernfs/kernfs-internal.h | 20 ++++++++++++++++++++
fs/kernfs/mount.c | 4 +++-
include/linux/kernfs.h | 11 +++++++++--
3 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 03e983953eda4..593395f325a18 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -170,4 +170,24 @@ static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
return lock;
}

+static inline spinlock_t *
+kernfs_open_node_spinlock_ptr(struct kernfs_node *kn)
+{
+ int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS);
+
+ return &kernfs_locks->open_node_locks[idx].lock;
+}
+
+static inline spinlock_t *
+kernfs_open_node_spinlock(struct kernfs_node *kn)
+{
+ spinlock_t *lock;
+
+ lock = kernfs_open_node_spinlock_ptr(kn);
+
+ spin_lock_irq(lock);
+
+ return lock;
+}
+
#endif /* __KERNFS_INTERNAL_H */
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index fa3fa22c95b21..809b738739b18 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -395,8 +395,10 @@ void __init kernfs_lock_init(void)
kernfs_locks = kmalloc(sizeof(struct kernfs_global_locks), GFP_KERNEL);
WARN_ON(!kernfs_locks);

- for (count = 0; count < NR_KERNFS_LOCKS; count++)
+ for (count = 0; count < NR_KERNFS_LOCKS; count++) {
mutex_init(&kernfs_locks->open_file_mutex[count].lock);
+ spin_lock_init(&kernfs_locks->open_node_locks[count].lock);
+ }
}

void __init kernfs_init(void)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 3f72d38d48e31..7ee0595b315a2 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -19,6 +19,7 @@
#include <linux/wait.h>
#include <linux/rwsem.h>
#include <linux/cache.h>
+#include <linux/spinlock.h>

struct file;
struct dentry;
@@ -75,20 +76,26 @@ struct kernfs_iattrs;
* kernfs_open_file.
* kernfs_open_files are chained at kernfs_open_node->files, which is
* protected by kernfs_open_file_mutex.lock.
+ *
+ * kernfs_node->attr.open points to kernfs_open_node. attr.open is
+ * protected by kernfs_open_node_lock.lock.
*/
-
struct kernfs_open_file_mutex {
struct mutex lock;
} ____cacheline_aligned_in_smp;

+struct kernfs_open_node_lock {
+ spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
/*
* To reduce possible contention in sysfs access, arising due to single
* locks, use an array of locks and use kernfs_node object address as
* hash keys to get the index of these locks.
*/
-
struct kernfs_global_locks {
struct kernfs_open_file_mutex open_file_mutex[NR_KERNFS_LOCKS];
+ struct kernfs_open_node_lock open_node_locks[NR_KERNFS_LOCKS];
};

enum kernfs_node_type {
--
2.30.2