Re: [PATCH v3 1/2] of: of_node_get()/of_node_put() nodes held in phandle cache
From: Frank Rowand
Date: Tue Dec 18 2018 - 14:44:08 EST
On 12/18/18 11:40 AM, frowand.list@xxxxxxxxx wrote:
> From: Frank Rowand <frank.rowand@xxxxxxxx>
>
> The phandle cache contains struct device_node pointers. The refcount
> of the pointers was not incremented while in the cache, allowing use
> after free error after kfree() of the node. Add the proper increment
> and decrement of the use count.
>
> Fixes: 0b3ce78e90fc ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()")
>
> Signed-off-by: Frank Rowand <frank.rowand@xxxxxxxx>
> ---
>
> do not "cc: stable", unless the following commits are also in stable:
>
> commit e54192b48da7 ("of: fix phandle cache creation for DTs with no phandles")
> commit b9952b5218ad ("of: overlay: update phandle cache on overlay apply and remove")
> commit 0b3ce78e90fc ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()")
I should have carried this forward:
changes since v1
- make __of_free_phandle_cache() static
-Frank
>
>
> drivers/of/base.c | 70 ++++++++++++++++++++++++++++++++++++-------------------
> 1 file changed, 46 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/of/base.c b/drivers/of/base.c
> index 09692c9b32a7..6c33d63361b8 100644
> --- a/drivers/of/base.c
> +++ b/drivers/of/base.c
> @@ -116,9 +116,6 @@ int __weak of_node_to_nid(struct device_node *np)
> }
> #endif
>
> -static struct device_node **phandle_cache;
> -static u32 phandle_cache_mask;
> -
> /*
> * Assumptions behind phandle_cache implementation:
> * - phandle property values are in a contiguous range of 1..n
> @@ -127,6 +124,44 @@ int __weak of_node_to_nid(struct device_node *np)
> * - the phandle lookup overhead reduction provided by the cache
> * will likely be less
> */
> +
> +static struct device_node **phandle_cache;
> +static u32 phandle_cache_mask;
> +
> +/*
> + * Caller must hold devtree_lock.
> + */
> +static void __of_free_phandle_cache(void)
> +{
> + u32 cache_entries = phandle_cache_mask + 1;
> + u32 k;
> +
> + if (!phandle_cache)
> + return;
> +
> + for (k = 0; k < cache_entries; k++)
> + of_node_put(phandle_cache[k]);
> +
> + kfree(phandle_cache);
> + phandle_cache = NULL;
> +}
> +
> +int of_free_phandle_cache(void)
> +{
> + unsigned long flags;
> +
> + raw_spin_lock_irqsave(&devtree_lock, flags);
> +
> + __of_free_phandle_cache();
> +
> + raw_spin_unlock_irqrestore(&devtree_lock, flags);
> +
> + return 0;
> +}
> +#if !defined(CONFIG_MODULES)
> +late_initcall_sync(of_free_phandle_cache);
> +#endif
> +
> void of_populate_phandle_cache(void)
> {
> unsigned long flags;
> @@ -136,8 +171,7 @@ void of_populate_phandle_cache(void)
>
> raw_spin_lock_irqsave(&devtree_lock, flags);
>
> - kfree(phandle_cache);
> - phandle_cache = NULL;
> + __of_free_phandle_cache();
>
> for_each_of_allnodes(np)
> if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
> @@ -155,30 +189,15 @@ void of_populate_phandle_cache(void)
> goto out;
>
> for_each_of_allnodes(np)
> - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
> + if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
> + of_node_get(np);
> phandle_cache[np->phandle & phandle_cache_mask] = np;
> + }
>
> out:
> raw_spin_unlock_irqrestore(&devtree_lock, flags);
> }
>
> -int of_free_phandle_cache(void)
> -{
> - unsigned long flags;
> -
> - raw_spin_lock_irqsave(&devtree_lock, flags);
> -
> - kfree(phandle_cache);
> - phandle_cache = NULL;
> -
> - raw_spin_unlock_irqrestore(&devtree_lock, flags);
> -
> - return 0;
> -}
> -#if !defined(CONFIG_MODULES)
> -late_initcall_sync(of_free_phandle_cache);
> -#endif
> -
> void __init of_core_init(void)
> {
> struct device_node *np;
> @@ -1195,8 +1214,11 @@ struct device_node *of_find_node_by_phandle(phandle handle)
> if (!np) {
> for_each_of_allnodes(np)
> if (np->phandle == handle) {
> - if (phandle_cache)
> + if (phandle_cache) {
> + /* will put when removed from cache */
> + of_node_get(np);
> phandle_cache[masked_handle] = np;
> + }
> break;
> }
> }
>