[patch] mm: memcontrol: lockless page counters fix

From: Johannes Weiner
Date: Mon Sep 22 2014 - 13:54:24 EST


- renamed limited to failcnt again [vladimir]
- base page counter range on on LONG_MAX [vladimir]
- page_counter_read() [vladimir]
- page_counter_sub() [vladimir]
- rework the nofail charging [vladimir]
- page_counter_reset_watermark() [vladimir]
- fixed hugepage limit page alignment [vladimir]
- fixed page_counter_sub() return value [vladimir]
- fixed kmem's idea of unlimited [vladimir]
- fixed tcp memcontrol's idea of unlimited [vladimir]
- fixed tcp memcontrol's usage reporting [vladimir]
- serialize page_counter_limit() callsites [vladimir]
---
include/linux/memcontrol.h | 24 ++++++---
include/net/sock.h | 8 +--
mm/hugetlb_cgroup.c | 22 ++++----
mm/memcontrol.c | 123 +++++++++++++++++++++++++--------------------
net/ipv4/tcp_memcontrol.c | 18 ++++---
5 files changed, 115 insertions(+), 80 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bf8fb1a05597..a8b939376a5d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -62,13 +62,13 @@ struct page_counter {

/* legacy */
unsigned long watermark;
- unsigned long limited;
+ unsigned long failcnt;
};

#if BITS_PER_LONG == 32
-#define PAGE_COUNTER_MAX ULONG_MAX
+#define PAGE_COUNTER_MAX LONG_MAX
#else
-#define PAGE_COUNTER_MAX (ULONG_MAX / PAGE_SIZE)
+#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
#endif

static inline void page_counter_init(struct page_counter *counter,
@@ -79,13 +79,25 @@ static inline void page_counter_init(struct page_counter *counter,
counter->parent = parent;
}

-int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
-int page_counter_charge(struct page_counter *counter, unsigned long nr_pages,
- struct page_counter **fail);
+static inline unsigned long page_counter_read(struct page_counter *counter)
+{
+ return atomic_long_read(&counter->count);
+}
+
+int page_counter_sub(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
+int page_counter_try_charge(struct page_counter *counter,
+ unsigned long nr_pages,
+ struct page_counter **fail);
int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
int page_counter_limit(struct page_counter *counter, unsigned long limit);
int page_counter_memparse(const char *buf, unsigned long *nr_pages);

+static inline void page_counter_reset_watermark(struct page_counter *counter)
+{
+ counter->watermark = page_counter_read(counter);
+}
+
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
diff --git a/include/net/sock.h b/include/net/sock.h
index f41749982668..9aa435de3ef1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1217,9 +1217,9 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
unsigned long amt,
int *parent_status)
{
- page_counter_charge(&prot->memory_allocated, amt, NULL);
+ page_counter_charge(&prot->memory_allocated, amt);

- if (atomic_long_read(&prot->memory_allocated.count) >
+ if (page_counter_read(&prot->memory_allocated) >
prot->memory_allocated.limit)
*parent_status = OVER_LIMIT;
}
@@ -1236,7 +1236,7 @@ sk_memory_allocated(const struct sock *sk)
struct proto *prot = sk->sk_prot;

if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return atomic_long_read(&sk->sk_cgrp->memory_allocated.count);
+ return page_counter_read(&sk->sk_cgrp->memory_allocated);

return atomic_long_read(prot->memory_allocated);
}
@@ -1250,7 +1250,7 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
/* update the root cgroup regardless */
atomic_long_add_return(amt, prot->memory_allocated);
- return atomic_long_read(&sk->sk_cgrp->memory_allocated.count);
+ return page_counter_read(&sk->sk_cgrp->memory_allocated);
}

return atomic_long_add_return(amt, prot->memory_allocated);
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index e619b6b62f1f..abd1e8dc7b46 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -61,7 +61,7 @@ static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
int idx;

for (idx = 0; idx < hugetlb_max_hstate; idx++) {
- if (atomic_long_read(&h_cg->hugepage[idx].count))
+ if (page_counter_read(&h_cg->hugepage[idx]))
return true;
}
return false;
@@ -127,11 +127,11 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
if (!parent) {
parent = root_h_cgroup;
/* root has no limit */
- page_counter_charge(&parent->hugepage[idx], nr_pages, NULL);
+ page_counter_charge(&parent->hugepage[idx], nr_pages);
}
counter = &h_cg->hugepage[idx];
/* Take the pages off the local counter */
- page_counter_cancel(counter, nr_pages);
+ page_counter_sub(counter, nr_pages);

set_hugetlb_cgroup(page, parent);
out:
@@ -186,7 +186,7 @@ again:
}
rcu_read_unlock();

- ret = page_counter_charge(&h_cg->hugepage[idx], nr_pages, &counter);
+ ret = page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter);
css_put(&h_cg->css);
done:
*ptr = h_cg;
@@ -254,18 +254,20 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,

switch (MEMFILE_ATTR(cft->private)) {
case RES_USAGE:
- return (u64)atomic_long_read(&counter->count) * PAGE_SIZE;
+ return (u64)page_counter_read(counter) * PAGE_SIZE;
case RES_LIMIT:
return (u64)counter->limit * PAGE_SIZE;
case RES_MAX_USAGE:
return (u64)counter->watermark * PAGE_SIZE;
case RES_FAILCNT:
- return counter->limited;
+ return counter->failcnt;
default:
BUG();
}
}

+static DEFINE_MUTEX(hugetlb_limit_mutex);
+
static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
@@ -285,8 +287,10 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,

switch (MEMFILE_ATTR(of_cft(of)->private)) {
case RES_LIMIT:
- nr_pages = ALIGN(nr_pages, huge_page_shift(&hstates[idx]));
+ nr_pages = ALIGN(nr_pages, 1UL<<huge_page_order(&hstates[idx]));
+ mutex_lock(&hugetlb_limit_mutex);
ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages);
+ mutex_unlock(&hugetlb_limit_mutex);
break;
default:
ret = -EINVAL;
@@ -306,10 +310,10 @@ static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,

switch (MEMFILE_ATTR(of_cft(of)->private)) {
case RES_MAX_USAGE:
- counter->watermark = atomic_long_read(&counter->count);
+ page_counter_reset_watermark(counter);
break;
case RES_FAILCNT:
- counter->limited = 0;
+ counter->failcnt = 0;
break;
default:
ret = -EINVAL;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index dfd3b15a57e8..9dec20b3c928 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -65,7 +65,7 @@

#include <trace/events/vmscan.h>

-int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
+int page_counter_sub(struct page_counter *counter, unsigned long nr_pages)
{
long new;

@@ -74,28 +74,41 @@ int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
if (WARN_ON(unlikely(new < 0)))
atomic_long_set(&counter->count, 0);

- return new > 1;
+ return new > 0;
}

-int page_counter_charge(struct page_counter *counter, unsigned long nr_pages,
- struct page_counter **fail)
+void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
+{
+ struct page_counter *c;
+
+ for (c = counter; c; c = c->parent) {
+ long new;
+
+ new = atomic_long_add_return(nr_pages, &c->count);
+
+ if (new > c->watermark)
+ c->watermark = new;
+ }
+}
+
+int page_counter_try_charge(struct page_counter *counter,
+ unsigned long nr_pages,
+ struct page_counter **fail)
{
struct page_counter *c;

for (c = counter; c; c = c->parent) {
for (;;) {
- unsigned long count;
- unsigned long new;
+ long count;
+ long new;

count = atomic_long_read(&c->count);

new = count + nr_pages;
if (new > c->limit) {
- c->limited++;
- if (fail) {
- *fail = c;
- goto failed;
- }
+ c->failcnt++;
+ *fail = c;
+ goto failed;
}

if (atomic_long_cmpxchg(&c->count, count, new) != count)
@@ -111,7 +124,7 @@ int page_counter_charge(struct page_counter *counter, unsigned long nr_pages,

failed:
for (c = counter; c != *fail; c = c->parent)
- page_counter_cancel(c, nr_pages);
+ page_counter_sub(c, nr_pages);

return -ENOMEM;
}
@@ -124,7 +137,7 @@ int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
for (c = counter; c; c = c->parent) {
int remainder;

- remainder = page_counter_cancel(c, nr_pages);
+ remainder = page_counter_sub(c, nr_pages);
if (c == counter && !remainder)
ret = 0;
}
@@ -135,8 +148,8 @@ int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
int page_counter_limit(struct page_counter *counter, unsigned long limit)
{
for (;;) {
- unsigned long count;
unsigned long old;
+ long count;

count = atomic_long_read(&counter->count);

@@ -751,7 +764,7 @@ static void disarm_kmem_keys(struct mem_cgroup *memcg)
* This check can't live in kmem destruction function,
* since the charges will outlive the cgroup
*/
- WARN_ON(atomic_long_read(&memcg->kmem.count));
+ WARN_ON(page_counter_read(&memcg->kmem));
}
#else
static void disarm_kmem_keys(struct mem_cgroup *memcg)
@@ -858,7 +871,7 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,

static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
- unsigned long nr_pages = atomic_long_read(&memcg->memory.count);
+ unsigned long nr_pages = page_counter_read(&memcg->memory);
unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit);
unsigned long excess = 0;

@@ -1609,13 +1622,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
unsigned long count;
unsigned long limit;

- count = atomic_long_read(&memcg->memory.count);
+ count = page_counter_read(&memcg->memory);
limit = ACCESS_ONCE(memcg->memory.limit);
if (count < limit)
margin = limit - count;

if (do_swap_account) {
- count = atomic_long_read(&memcg->memsw.count);
+ count = page_counter_read(&memcg->memsw);
limit = ACCESS_ONCE(memcg->memsw.limit);
if (count < limit)
margin = min(margin, limit - count);
@@ -1763,14 +1776,14 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
rcu_read_unlock();

pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
- K((u64)atomic_long_read(&memcg->memory.count)),
- K((u64)memcg->memory.limit), memcg->memory.limited);
+ K((u64)page_counter_read(&memcg->memory)),
+ K((u64)memcg->memory.limit), memcg->memory.failcnt);
pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
- K((u64)atomic_long_read(&memcg->memsw.count)),
- K((u64)memcg->memsw.limit), memcg->memsw.limited);
+ K((u64)page_counter_read(&memcg->memsw)),
+ K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
- K((u64)atomic_long_read(&memcg->kmem.count)),
- K((u64)memcg->kmem.limit), memcg->kmem.limited);
+ K((u64)page_counter_read(&memcg->kmem)),
+ K((u64)memcg->kmem.limit), memcg->kmem.failcnt);

for_each_mem_cgroup_tree(iter, memcg) {
pr_info("Memory cgroup stats for ");
@@ -2604,10 +2617,10 @@ retry:
if (consume_stock(memcg, nr_pages))
goto done;

- if (!page_counter_charge(&memcg->memory, batch, &counter)) {
+ if (!page_counter_try_charge(&memcg->memory, batch, &counter)) {
if (!do_swap_account)
goto done_restock;
- if (!page_counter_charge(&memcg->memsw, batch, &counter))
+ if (!page_counter_try_charge(&memcg->memsw, batch, &counter))
goto done_restock;
page_counter_uncharge(&memcg->memory, batch);
mem_over_limit = mem_cgroup_from_counter(counter, memsw);
@@ -2877,7 +2890,7 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
struct page_counter *counter;
int ret = 0;

- ret = page_counter_charge(&memcg->kmem, nr_pages, &counter);
+ ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
if (ret < 0)
return ret;

@@ -2898,9 +2911,9 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
* when the allocation triggers should have been already
* directed to the root cgroup in memcontrol.h
*/
- page_counter_charge(&memcg->memory, nr_pages, NULL);
+ page_counter_charge(&memcg->memory, nr_pages);
if (do_swap_account)
- page_counter_charge(&memcg->memsw, nr_pages, NULL);
+ page_counter_charge(&memcg->memsw, nr_pages);
ret = 0;
} else if (ret)
page_counter_uncharge(&memcg->kmem, nr_pages);
@@ -3558,9 +3571,9 @@ static int mem_cgroup_move_parent(struct page *page,
pc, child, parent);
if (!ret) {
/* Take charge off the local counters */
- page_counter_cancel(&child->memory, nr_pages);
+ page_counter_sub(&child->memory, nr_pages);
if (do_swap_account)
- page_counter_cancel(&child->memsw, nr_pages);
+ page_counter_sub(&child->memsw, nr_pages);
}

if (nr_pages > 1)
@@ -3665,7 +3678,7 @@ void mem_cgroup_print_bad_page(struct page *page)
}
#endif

-static DEFINE_MUTEX(set_limit_mutex);
+static DEFINE_MUTEX(memcg_limit_mutex);

static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long limit)
@@ -3684,7 +3697,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
retry_count = MEM_CGROUP_RECLAIM_RETRIES *
mem_cgroup_count_children(memcg);

- oldusage = atomic_long_read(&memcg->memory.count);
+ oldusage = page_counter_read(&memcg->memory);

do {
if (signal_pending(current)) {
@@ -3692,23 +3705,23 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;
}

- mutex_lock(&set_limit_mutex);
+ mutex_lock(&memcg_limit_mutex);
if (limit > memcg->memsw.limit) {
- mutex_unlock(&set_limit_mutex);
+ mutex_unlock(&memcg_limit_mutex);
ret = -EINVAL;
break;
}
if (limit > memcg->memory.limit)
enlarge = true;
ret = page_counter_limit(&memcg->memory, limit);
- mutex_unlock(&set_limit_mutex);
+ mutex_unlock(&memcg_limit_mutex);

if (!ret)
break;

try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);

- curusage = atomic_long_read(&memcg->memory.count);
+ curusage = page_counter_read(&memcg->memory);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
@@ -3735,7 +3748,7 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
retry_count = MEM_CGROUP_RECLAIM_RETRIES *
mem_cgroup_count_children(memcg);

- oldusage = atomic_long_read(&memcg->memsw.count);
+ oldusage = page_counter_read(&memcg->memsw);

do {
if (signal_pending(current)) {
@@ -3743,23 +3756,23 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
break;
}

- mutex_lock(&set_limit_mutex);
+ mutex_lock(&memcg_limit_mutex);
if (limit < memcg->memory.limit) {
- mutex_unlock(&set_limit_mutex);
+ mutex_unlock(&memcg_limit_mutex);
ret = -EINVAL;
break;
}
if (limit > memcg->memsw.limit)
enlarge = true;
ret = page_counter_limit(&memcg->memsw, limit);
- mutex_unlock(&set_limit_mutex);
+ mutex_unlock(&memcg_limit_mutex);

if (!ret)
break;

try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);

- curusage = atomic_long_read(&memcg->memsw.count);
+ curusage = page_counter_read(&memcg->memsw);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
@@ -3960,8 +3973,8 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
* right after the check. RES_USAGE should be safe as we always
* charge before adding to the LRU.
*/
- } while (atomic_long_read(&memcg->memory.count) -
- atomic_long_read(&memcg->kmem.count) > 0);
+ } while (page_counter_read(&memcg->memory) -
+ page_counter_read(&memcg->kmem) > 0);
}

/*
@@ -4001,7 +4014,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
/* we call try-to-free pages for make this cgroup empty */
lru_add_drain_all();
/* try to free all pages in this cgroup */
- while (nr_retries && atomic_long_read(&memcg->memory.count)) {
+ while (nr_retries && page_counter_read(&memcg->memory)) {
int progress;

if (signal_pending(current))
@@ -4098,9 +4111,9 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
} else {
if (!swap)
- val = atomic_long_read(&memcg->memory.count);
+ val = page_counter_read(&memcg->memory);
else
- val = atomic_long_read(&memcg->memsw.count);
+ val = page_counter_read(&memcg->memsw);
}
return val << PAGE_SHIFT;
}
@@ -4139,13 +4152,13 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
return mem_cgroup_usage(memcg, false);
if (counter == &memcg->memsw)
return mem_cgroup_usage(memcg, true);
- return (u64)atomic_long_read(&counter->count) * PAGE_SIZE;
+ return (u64)page_counter_read(counter) * PAGE_SIZE;
case RES_LIMIT:
return (u64)counter->limit * PAGE_SIZE;
case RES_MAX_USAGE:
return (u64)counter->watermark * PAGE_SIZE;
case RES_FAILCNT:
- return counter->limited;
+ return counter->failcnt;
case RES_SOFT_LIMIT:
return (u64)memcg->soft_limit * PAGE_SIZE;
default:
@@ -4234,10 +4247,12 @@ static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
{
int ret;

+ mutex_lock(&memcg_limit_mutex);
if (!memcg_kmem_is_active(memcg))
ret = memcg_activate_kmem(memcg, limit);
else
ret = page_counter_limit(&memcg->kmem, limit);
+ mutex_unlock(&memcg_limit_mutex);
return ret;
}

@@ -4255,7 +4270,7 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg)
* after this point, because it has at least one child already.
*/
if (memcg_kmem_is_active(parent))
- ret = __memcg_activate_kmem(memcg, ULONG_MAX);
+ ret = __memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
mutex_unlock(&activate_kmem_mutex);
return ret;
}
@@ -4331,10 +4346,10 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,

switch (MEMFILE_ATTR(of_cft(of)->private)) {
case RES_MAX_USAGE:
- counter->watermark = atomic_long_read(&counter->count);
+ page_counter_reset_watermark(counter);
break;
case RES_FAILCNT:
- counter->limited = 0;
+ counter->failcnt = 0;
break;
default:
BUG();
@@ -4934,7 +4949,7 @@ static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)

memcg_kmem_mark_dead(memcg);

- if (atomic_long_read(&memcg->kmem.count))
+ if (page_counter_read(&memcg->kmem))
return;

if (memcg_kmem_test_and_clear_dead(memcg))
@@ -5603,7 +5618,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
* call_rcu()
* offline_css()
* reparent_charges()
- * page_counter_charge()
+ * page_counter_try_charge()
* css_put()
* css_free()
* pc->mem_cgroup = dead memcg
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 9a448bdb19e9..272327134a1b 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -68,7 +68,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
cg_proto->sysctl_mem[i] = min_t(long, nr_pages,
sysctl_tcp_mem[i]);

- if (nr_pages == ULONG_MAX / PAGE_SIZE)
+ if (nr_pages == PAGE_COUNTER_MAX)
clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
else {
/*
@@ -106,6 +106,8 @@ enum {
RES_FAILCNT,
};

+static DEFINE_MUTEX(tcp_limit_mutex);
+
static ssize_t tcp_cgroup_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
@@ -121,7 +123,9 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of,
ret = page_counter_memparse(buf, &nr_pages);
if (ret)
break;
+ mutex_lock(&tcp_limit_mutex);
ret = tcp_update_limit(memcg, nr_pages);
+ mutex_unlock(&tcp_limit_mutex);
break;
default:
ret = -EINVAL;
@@ -145,14 +149,15 @@ static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
break;
case RES_USAGE:
if (!cg_proto)
- return atomic_long_read(&tcp_memory_allocated);
- val = atomic_long_read(&cg_proto->memory_allocated.count);
+ val = atomic_long_read(&tcp_memory_allocated);
+ else
+ val = page_counter_read(&cg_proto->memory_allocated);
val *= PAGE_SIZE;
break;
case RES_FAILCNT:
if (!cg_proto)
return 0;
- val = cg_proto->memory_allocated.limited;
+ val = cg_proto->memory_allocated.failcnt;
break;
case RES_MAX_USAGE:
if (!cg_proto)
@@ -179,11 +184,10 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of,

switch (of_cft(of)->private) {
case RES_MAX_USAGE:
- cg_proto->memory_allocated.watermark =
- atomic_long_read(&cg_proto->memory_allocated.count);
+ page_counter_reset_watermark(&cg_proto->memory_allocated);
break;
case RES_FAILCNT:
- cg_proto->memory_allocated.limited = 0;
+ cg_proto->memory_allocated.failcnt = 0;
break;
}

--
2.1.0


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/