[PATCH] kvfree_rcu: Release page cache under memory pressure

From: qiang . zhang
Date: Thu Jan 28 2021 - 07:48:48 EST


From: Zqiang <qiang.zhang@xxxxxxxxxxxxx>

Add free per-cpu existing krcp's page cache operation, when
the system is under memory pressure.

Signed-off-by: Zqiang <qiang.zhang@xxxxxxxxxxxxx>
---
kernel/rcu/tree.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c1ae1e52f638..4e1c14b12bdd 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3571,17 +3571,41 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
}
EXPORT_SYMBOL_GPL(kvfree_call_rcu);

+static inline int free_krc_page_cache(struct kfree_rcu_cpu *krcp)
+{
+ unsigned long flags;
+ struct kvfree_rcu_bulk_data *bnode;
+ int i, num = 0;
+
+ for (i = 0; i < rcu_min_cached_objs; i++) {
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ bnode = get_cached_bnode(krcp);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
+ if (!bnode)
+ break;
+ free_page((unsigned long)bnode);
+ num++;
+ }
+
+ return num;
+}
+
static unsigned long
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
int cpu;
unsigned long count = 0;
+ unsigned long flags;

/* Snapshot count of all CPUs */
for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);

count += READ_ONCE(krcp->count);
+
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ count += krcp->nr_bkv_objs;
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
}

return count;
@@ -3604,6 +3628,8 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
else
raw_spin_unlock_irqrestore(&krcp->lock, flags);

+ count += free_krc_page_cache(krcp);
+
sc->nr_to_scan -= count;
freed += count;

--
2.17.1