[PATCH] fs: fall back to file_ref_put() for non-last reference

From: Mateusz Guzik
Date: Fri Apr 18 2025 - 08:50:26 EST


This reduces the slowdown in face of multiple callers issuing close on
what turns out to not be the last reference.

Signed-off-by: Mateusz Guzik <mjguzik@xxxxxxxxx>
Reported-by: kernel test robot <oliver.sang@xxxxxxxxx>
Closes: https://lore.kernel.org/oe-lkp/202504171513.6d6f8a16-lkp@xxxxxxxxx
---
fs/file.c | 2 +-
include/linux/file_ref.h | 19 ++++++-------------
2 files changed, 7 insertions(+), 14 deletions(-)

diff --git a/fs/file.c b/fs/file.c
index dc3f7e120e3e..3a3146664cf3 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -26,7 +26,7 @@

#include "internal.h"

-bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
+static noinline bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
{
/*
* If the reference count was already in the dead zone, then this
diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h
index 7db62fbc0500..0f25ba769f48 100644
--- a/include/linux/file_ref.h
+++ b/include/linux/file_ref.h
@@ -61,7 +61,6 @@ static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
atomic_long_set(&ref->refcnt, cnt - 1);
}

-bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt);
bool __file_ref_put(file_ref_t *ref, unsigned long cnt);

/**
@@ -178,20 +177,14 @@ static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
*/
static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
{
- long old, new;
+ long old;

old = atomic_long_read(&ref->refcnt);
- do {
- if (unlikely(old < 0))
- return __file_ref_put_badval(ref, old);
-
- if (old == FILE_REF_ONEREF)
- new = FILE_REF_DEAD;
- else
- new = old - 1;
- } while (!atomic_long_try_cmpxchg(&ref->refcnt, &old, new));
-
- return new == FILE_REF_DEAD;
+ if (likely(old == FILE_REF_ONEREF)) {
+ if (likely(!atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD)))
+ return true;
+ }
+ return file_ref_put(ref);
}

/**
--
2.48.1