Re: [PATCH 2/2] mm/cgroup: delay soft limit data allocation

From: Laurent Dufour
Date: Thu Feb 23 2017 - 04:00:23 EST


On 22/02/2017 19:24, Michal Hocko wrote:
> On Wed 22-02-17 18:50:19, Laurent Dufour wrote:
>> On 22/02/2017 18:11, Michal Hocko wrote:
>>> On Wed 22-02-17 16:58:11, Laurent Dufour wrote:
>>> [...]
>>>> static struct mem_cgroup_tree_per_node *
>>>> soft_limit_tree_node(int nid)
>>>> {
>>>> @@ -465,6 +497,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
>>>> struct mem_cgroup_tree_per_node *mctz;
>>>>
>>>> mctz = soft_limit_tree_from_page(page);
>>>> + if (!mctz)
>>>> + return;
>>>> /*
>>>> * Necessary to update all ancestors when hierarchy is used.
>>>> * because their event counter is not touched.
>>>> @@ -502,7 +536,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
>>>> for_each_node(nid) {
>>>> mz = mem_cgroup_nodeinfo(memcg, nid);
>>>> mctz = soft_limit_tree_node(nid);
>>>> - mem_cgroup_remove_exceeded(mz, mctz);
>>>> + if (mctz)
>>>> + mem_cgroup_remove_exceeded(mz, mctz);
>>>> }
>>>> }
>>>>
>>>
>>> this belongs to the previous patch, right?
>>
>> It may. I made the first patch fixing the panic I saw but if you prefer
>> this to be part of the first one, fair enough.
>
> Without these you would just blow up later AFAICS so the fix is not
> complete. Also this patch is not complete because the initialization
> code should clean up if the allocation fails half way. I have tried to
> do that and it blows the code size a bit. I am not convinced this is
> worth the savings after all...

I do agree, we will have more code than the data we don't want to allocate.

Bur your proposal sounds to be the cleanest way to handle that, despite
the larger size of the code.
I'll send a new series in that way.

>
> Here is what I ended up:
> ---
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 44fb1e80701a..54d73c20124e 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -141,7 +141,7 @@ struct mem_cgroup_tree {
> struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
> };
>
> -static struct mem_cgroup_tree soft_limit_tree __read_mostly;
> +static struct mem_cgroup_tree *soft_limit_tree __read_mostly;
>
> /* for OOM */
> struct mem_cgroup_eventfd_list {
> @@ -381,7 +381,9 @@ mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
> static struct mem_cgroup_tree_per_node *
> soft_limit_tree_node(int nid)
> {
> - return soft_limit_tree.rb_tree_per_node[nid];
> + if (!soft_limit_tree_node)
> + return NULL;
> + return soft_limit_tree->rb_tree_per_node[nid];
> }
>
> static struct mem_cgroup_tree_per_node *
> @@ -389,7 +391,9 @@ soft_limit_tree_from_page(struct page *page)
> {
> int nid = page_to_nid(page);
>
> - return soft_limit_tree.rb_tree_per_node[nid];
> + if (!soft_limit_tree_node)
> + return NULL;
> + return soft_limit_tree->rb_tree_per_node[nid];
> }
>
> static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
> @@ -2969,6 +2973,46 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
> return ret;
> }
>
> +static bool soft_limit_initialize(void)
> +{
> + static DEFINE_MUTEX(soft_limit_mutex);
> + struct mem_cgroup_tree *tree;
> + bool ret = true;
> + int node;
> +
> + mutex_lock(&soft_limit_mutex);
> + if (soft_limit_tree)
> + goto out_unlock;
> +
> + tree = kmalloc(sizeof(*soft_limit_tree), GFP_KERNEL);
> + if (!tree) {
> + ret = false;
> + goto out;
> + }
> + for_each_node(node) {
> + struct mem_cgroup_tree_per_node *rtpn;
> +
> + rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
> + node_online(node) ? node : NUMA_NO_NODE);
> + if (!rtpn)
> + goto out_free;
> +
> + rtpn->rb_root = RB_ROOT;
> + spin_lock_init(&rtpn->lock);
> + tree->rb_tree_per_node[node] = rtpn;
> + }
> + WRITE_ONCE(soft_limit_tree, tree);
> +out_unlock:
> + mutex_unlock(&soft_limit_tree);
> + return ret;
> +out_free:
> + for_each_node(node)
> + kfree(tree->rb_tree_per_node[node]);
> + kfree(tree);
> + ret = false;
> + goto out_unlock;
> +}
> +
> /*
> * The user of this function is...
> * RES_LIMIT.
> @@ -3007,6 +3051,11 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
> }
> break;
> case RES_SOFT_LIMIT:
> + if (!soft_limit_initialize()) {
> + ret = -ENOMEM;
> + break;
> + }
> +
> memcg->soft_limit = nr_pages;
> ret = 0;
> break;
> @@ -5800,17 +5849,6 @@ static int __init mem_cgroup_init(void)
> INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
> drain_local_stock);
>
> - for_each_node(node) {
> - struct mem_cgroup_tree_per_node *rtpn;
> -
> - rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
> - node_online(node) ? node : NUMA_NO_NODE);
> -
> - rtpn->rb_root = RB_ROOT;
> - spin_lock_init(&rtpn->lock);
> - soft_limit_tree.rb_tree_per_node[node] = rtpn;
> - }
> -
> return 0;
> }
> subsys_initcall(mem_cgroup_init);
>