Re: [PATCH v2 07/22] drm/msm: Do rpm get sooner in the submit path
From: Viresh Kumar
Date: Tue Nov 03 2020 - 00:47:22 EST
On 27-10-20, 17:05, Viresh Kumar wrote:
> It isn't that straight forward unfortunately, we need to make sure the
> table doesn't get allocated for the same device twice, so
> find+allocate needs to happen within a locked region.
>
> I have taken, not so straight forward, approach to fixing this issue,
> lets see if this fixes it or not.
>
> -------------------------8<-------------------------
>
> diff --git a/drivers/opp/core.c b/drivers/opp/core.c
> index 4ac4e7ce6b8b..6f4a73a6391f 100644
> --- a/drivers/opp/core.c
> +++ b/drivers/opp/core.c
> @@ -29,6 +29,8 @@
> LIST_HEAD(opp_tables);
> /* Lock to allow exclusive modification to the device and opp lists */
> DEFINE_MUTEX(opp_table_lock);
> +/* Flag indicating that opp_tables list is being updated at the moment */
> +static bool opp_tables_busy;
>
> static struct opp_device *_find_opp_dev(const struct device *dev,
> struct opp_table *opp_table)
> @@ -1036,8 +1038,8 @@ static void _remove_opp_dev(struct opp_device *opp_dev,
> kfree(opp_dev);
> }
>
> -static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
> - struct opp_table *opp_table)
> +struct opp_device *_add_opp_dev(const struct device *dev,
> + struct opp_table *opp_table)
> {
> struct opp_device *opp_dev;
>
> @@ -1048,7 +1050,9 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
> /* Initialize opp-dev */
> opp_dev->dev = dev;
>
> + mutex_lock(&opp_table->lock);
> list_add(&opp_dev->node, &opp_table->dev_list);
> + mutex_unlock(&opp_table->lock);
>
> /* Create debugfs entries for the opp_table */
> opp_debug_register(opp_dev, opp_table);
> @@ -1056,18 +1060,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
> return opp_dev;
> }
>
> -struct opp_device *_add_opp_dev(const struct device *dev,
> - struct opp_table *opp_table)
> -{
> - struct opp_device *opp_dev;
> -
> - mutex_lock(&opp_table->lock);
> - opp_dev = _add_opp_dev_unlocked(dev, opp_table);
> - mutex_unlock(&opp_table->lock);
> -
> - return opp_dev;
> -}
> -
> static struct opp_table *_allocate_opp_table(struct device *dev, int index)
> {
> struct opp_table *opp_table;
> @@ -1121,8 +1113,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
> INIT_LIST_HEAD(&opp_table->opp_list);
> kref_init(&opp_table->kref);
>
> - /* Secure the device table modification */
> - list_add(&opp_table->node, &opp_tables);
> return opp_table;
>
> err:
> @@ -1135,27 +1125,64 @@ void _get_opp_table_kref(struct opp_table *opp_table)
> kref_get(&opp_table->kref);
> }
>
> +/*
> + * We need to make sure that the OPP table for a device doesn't get added twice,
> + * if this routine gets called in parallel with the same device pointer.
> + *
> + * The simplest way to enforce that is to perform everything (find existing
> + * table and if not found, create a new one) under the opp_table_lock, so only
> + * one creator gets access to the same. But that expands the critical section
> + * under the lock and may end up causing circular dependencies with frameworks
> + * like debugfs, interconnect or clock framework as they may be direct or
> + * indirect users of OPP core.
> + *
> + * And for that reason we have to go for a bit tricky implementation here, which
> + * uses the opp_tables_busy flag to indicate if another creator is in the middle
> + * of adding an OPP table and others should wait for it to finish.
> + */
> static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
> {
> struct opp_table *opp_table;
>
> - /* Hold our table modification lock here */
> +again:
> mutex_lock(&opp_table_lock);
>
> opp_table = _find_opp_table_unlocked(dev);
> if (!IS_ERR(opp_table))
> goto unlock;
>
> + /*
> + * The opp_tables list or an OPP table's dev_list is getting updated by
> + * another user, wait for it to finish.
> + */
> + if (unlikely(opp_tables_busy)) {
> + mutex_unlock(&opp_table_lock);
> + cpu_relax();
> + goto again;
> + }
> +
> + opp_tables_busy = true;
> opp_table = _managed_opp(dev, index);
> +
> + /* Drop the lock to reduce the size of critical section */
> + mutex_unlock(&opp_table_lock);
> +
> if (opp_table) {
> - if (!_add_opp_dev_unlocked(dev, opp_table)) {
> + if (!_add_opp_dev(dev, opp_table)) {
> dev_pm_opp_put_opp_table(opp_table);
> opp_table = ERR_PTR(-ENOMEM);
> }
> - goto unlock;
> +
> + mutex_lock(&opp_table_lock);
> + } else {
> + opp_table = _allocate_opp_table(dev, index);
> +
> + mutex_lock(&opp_table_lock);
> + if (!IS_ERR(opp_table))
> + list_add(&opp_table->node, &opp_tables);
> }
>
> - opp_table = _allocate_opp_table(dev, index);
> + opp_tables_busy = false;
>
> unlock:
> mutex_unlock(&opp_table_lock);
> @@ -1181,6 +1208,10 @@ static void _opp_table_kref_release(struct kref *kref)
> struct opp_device *opp_dev, *temp;
> int i;
>
> + /* Drop the lock as soon as we can */
> + list_del(&opp_table->node);
> + mutex_unlock(&opp_table_lock);
> +
> _of_clear_opp_table(opp_table);
>
> /* Release clk */
> @@ -1208,10 +1239,7 @@ static void _opp_table_kref_release(struct kref *kref)
>
> mutex_destroy(&opp_table->genpd_virt_dev_lock);
> mutex_destroy(&opp_table->lock);
> - list_del(&opp_table->node);
> kfree(opp_table);
> -
> - mutex_unlock(&opp_table_lock);
> }
>
> void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
Rob, Ping.
--
viresh