[PATCH 1/4] fs/dcache: Limit numbers of negative dentries
From: Waiman Long
Date: Mon Jul 17 2017 - 09:41:55 EST
The number of positive dentries is limited by the number of files
in the filesystems. The number of negative dentries, however,
has no limit other than the total amount of memory available in
the system. So a rogue application that generates a lot of negative
dentries can potentially exhaust most of the memory available in the
system impacting performance on other running applications.
To prevent this from happening, the dcache code is now updated to limit
the amount of the negative dentries in the LRU lists that can be kept
as a percentage of total available system memory. The default is 5%
and can be changed by specifying the "neg_dentry_pc=" kernel command
line option.
Signed-off-by: Waiman Long <longman@xxxxxxxxxx>
---
Documentation/admin-guide/kernel-parameters.txt | 7 ++
fs/dcache.c | 154 +++++++++++++++++++++++-
include/linux/dcache.h | 1 +
3 files changed, 161 insertions(+), 1 deletion(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f701430..fc3c937 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2372,6 +2372,13 @@
n2= [NET] SDL Inc. RISCom/N2 synchronous serial card
+ neg_dentry_pc= [KNL]
+ Range: 1-50
+ Default: 5
+ This parameter specifies the amount of negative
+ dentries allowed in the system as a percentage of
+ total system memory.
+
netdev= [NET] Network devices parameters
Format: <irq>,<io>,<mem_start>,<mem_end>,<name>
Note that mem_start is often overloaded to mean
diff --git a/fs/dcache.c b/fs/dcache.c
index f901413..6a0a844 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -130,8 +130,19 @@ struct dentry_stat_t dentry_stat = {
.age_limit = 45,
};
+/*
+ * Macros and variables to manage and count negative dentries.
+ */
+#define NEG_DENTRY_BATCH (1 << 8)
+static long neg_dentry_percpu_limit __read_mostly;
+static struct {
+ raw_spinlock_t nfree_lock;
+ long nfree; /* Negative dentry free pool */
+} ndblk ____cacheline_aligned_in_smp;
+
static DEFINE_PER_CPU(long, nr_dentry);
static DEFINE_PER_CPU(long, nr_dentry_unused);
+static DEFINE_PER_CPU(long, nr_dentry_neg);
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
@@ -227,6 +238,86 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char
#endif
+/*
+ * There is a system-wide limit to the amount of negative dentries allowed
+ * in the super blocks' LRU lists. The default limit is 5% of the total
+ * system memory. This limit can be changed by using the kernel command line
+ * option "neg_dentry_pc=" to specify the percentage of the total memory
+ * that can be used for negative dentries. That percentage must be in the
+ * 1-50% range.
+ *
+ * To avoid performance problem with a global counter on an SMP system,
+ * the tracking is done mostly on a per-cpu basis. The total limit is
+ * distributed in a 80/20 ratio to per-cpu counters and a global free pool.
+ *
+ * If a per-cpu counter runs out of negative dentries, it can borrow extra
+ * ones from the global free pool. If it has more than its percpu limit,
+ * the extra ones will be returned back to the global pool.
+ */
+
+/*
+ * Decrement negative dentry count if applicable.
+ */
+static void __neg_dentry_dec(struct dentry *dentry)
+{
+ if (unlikely(this_cpu_dec_return(nr_dentry_neg) < 0)) {
+ long *pcnt = get_cpu_ptr(&nr_dentry_neg);
+
+ if ((*pcnt < 0) && raw_spin_trylock(&ndblk.nfree_lock)) {
+ ACCESS_ONCE(ndblk.nfree) += NEG_DENTRY_BATCH;
+ *pcnt += NEG_DENTRY_BATCH;
+ raw_spin_unlock(&ndblk.nfree_lock);
+ }
+ put_cpu_ptr(&nr_dentry_neg);
+ }
+}
+
+static inline void neg_dentry_dec(struct dentry *dentry)
+{
+ if (unlikely(d_is_negative(dentry)))
+ __neg_dentry_dec(dentry);
+}
+
+/*
+ * Increment negative dentry count if applicable.
+ */
+static void __neg_dentry_inc(struct dentry *dentry)
+{
+ long cnt, *pcnt;
+
+ if (this_cpu_inc_return(nr_dentry_neg) <= neg_dentry_percpu_limit)
+ return;
+
+ pcnt = get_cpu_ptr(&nr_dentry_neg);
+ cnt = (READ_ONCE(ndblk.nfree) &&
+ (*pcnt > neg_dentry_percpu_limit)) ? NEG_DENTRY_BATCH : 0;
+
+ if (cnt && raw_spin_trylock(&ndblk.nfree_lock)) {
+ long val = READ_ONCE(ndblk.nfree);
+
+ if (val < cnt)
+ cnt = val;
+ ACCESS_ONCE(ndblk.nfree) -= cnt;
+ *pcnt -= cnt;
+ raw_spin_unlock(&ndblk.nfree_lock);
+ } else {
+ cnt = 0;
+ }
+ put_cpu_ptr(&nr_dentry_neg);
+ /*
+ * If there are too many negative dentries, set DCACHE_KILL_NEGATIVE
+ * flag to indicate that the dentry should be killed.
+ */
+ if (!cnt)
+ dentry->d_flags |= DCACHE_KILL_NEGATIVE;
+}
+
+static inline void neg_dentry_inc(struct dentry *dentry)
+{
+ if (unlikely(d_is_negative(dentry)))
+ __neg_dentry_inc(dentry);
+}
+
static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
{
/*
@@ -396,6 +487,7 @@ static void d_lru_add(struct dentry *dentry)
dentry->d_flags |= DCACHE_LRU_LIST;
this_cpu_inc(nr_dentry_unused);
WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+ neg_dentry_inc(dentry);
}
static void d_lru_del(struct dentry *dentry)
@@ -404,6 +496,7 @@ static void d_lru_del(struct dentry *dentry)
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+ neg_dentry_dec(dentry);
}
static void d_shrink_del(struct dentry *dentry)
@@ -434,6 +527,7 @@ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
list_lru_isolate(lru, &dentry->d_lru);
+ neg_dentry_dec(dentry);
}
static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
@@ -442,6 +536,7 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags |= DCACHE_SHRINK_LIST;
list_lru_isolate_move(lru, &dentry->d_lru, list);
+ neg_dentry_dec(dentry);
}
/*
@@ -603,7 +698,13 @@ static struct dentry *dentry_kill(struct dentry *dentry)
if (!IS_ROOT(dentry)) {
parent = dentry->d_parent;
- if (unlikely(!spin_trylock(&parent->d_lock))) {
+ /*
+ * Force the killing of this negative dentry when
+ * DCACHE_KILL_NEGATIVE flag is set.
+ */
+ if (unlikely(dentry->d_flags & DCACHE_KILL_NEGATIVE)) {
+ spin_lock(&parent->d_lock);
+ } else if (unlikely(!spin_trylock(&parent->d_lock))) {
if (inode)
spin_unlock(&inode->i_lock);
goto failed;
@@ -815,6 +916,9 @@ void dput(struct dentry *dentry)
dentry_lru_add(dentry);
+ if (unlikely(dentry->d_flags & DCACHE_KILL_NEGATIVE))
+ goto kill_it;
+
dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock);
return;
@@ -1820,6 +1924,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
WARN_ON(d_in_lookup(dentry));
spin_lock(&dentry->d_lock);
+ /*
+ * Decrement negative dentry count if it was in the LRU list.
+ */
+ if (dentry->d_flags & DCACHE_LRU_LIST)
+ neg_dentry_dec(dentry);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
@@ -3566,6 +3675,47 @@ void d_tmpfile(struct dentry *dentry, struct inode *inode)
}
EXPORT_SYMBOL(d_tmpfile);
+static long neg_dentry_pc __initdata = 5;
+static bool neg_dentry_warn __initdata;
+static int __init set_neg_dentry_pc(char *str)
+{
+ ssize_t ret;
+ long new_pc = neg_dentry_pc;
+
+ if (!str)
+ return 0;
+ ret = kstrtol(str, 0, &new_pc);
+ if (ret || (new_pc < 1) || (new_pc > 50))
+ ret = 1;
+ else
+ neg_dentry_pc = new_pc;
+ if (ret)
+ neg_dentry_warn = true;
+ return ret ? 0 : 1;
+}
+__setup("neg_dentry_pc=", set_neg_dentry_pc);
+
+static void __init neg_dentry_init(void)
+{
+ /* Rough estimate of # of dentries allocated per page */
+ unsigned int nr_dentry_page = PAGE_SIZE/sizeof(struct dentry) - 1;
+ unsigned long cnt;
+
+ raw_spin_lock_init(&ndblk.nfree_lock);
+
+ /* 20% in global pool & 80% in percpu free */
+ ndblk.nfree = totalram_pages * nr_dentry_page * neg_dentry_pc / 500;
+ cnt = ndblk.nfree * 4 / num_possible_cpus();
+ if (unlikely(cnt < 2 * NEG_DENTRY_BATCH))
+ cnt = 2 * NEG_DENTRY_BATCH;
+ neg_dentry_percpu_limit = cnt;
+
+ if (neg_dentry_warn)
+ pr_warn("Warning: neg_dentry_pc must be within 1-50 range.\n");
+ pr_info("Negative dentry: percpu limit = %ld, free pool = %ld\n",
+ neg_dentry_percpu_limit, ndblk.nfree);
+}
+
static __initdata unsigned long dhash_entries;
static int __init set_dhash_entries(char *str)
{
@@ -3606,6 +3756,8 @@ static void __init dcache_init(void)
dentry_cache = KMEM_CACHE(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
+ neg_dentry_init();
+
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
return;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 3f3ff4c..498233b 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -218,6 +218,7 @@ struct dentry_operations {
#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
#define DCACHE_DENTRY_CURSOR 0x20000000
+#define DCACHE_KILL_NEGATIVE 0x40000000 /* Kill negative dentry */
extern seqlock_t rename_lock;
--
1.8.3.1