[PATCH v2 1/2] clk: Add generic sync_state callback for disabling unused clocks

From: Abel Vesa
Date: Sun Dec 25 2022 - 11:47:22 EST


There are unused clocks that need to remain untouched by clk_disable_unused,
and most likely could be disabled later on sync_state. So provide a generic
sync_state callback for the clock providers that register such clocks.
Then, use the same mechanism as clk_disable_unused from that generic
callback, but pass the device to make sure only the clocks belonging to
the current clock provider get disabled, if unused. Also, during the
default clk_disable_unused, if the driver that registered the clock has
the generic clk_sync_state_disable_unused callback set for sync_state,
skip disabling its clocks.

Signed-off-by: Abel Vesa <abel.vesa@xxxxxxxxxx>
---

Changes since v1:
* Dropped the 0 returned by __clk_disable_unused when clk_ignore_unused
is set.
* Dropped __initdata for clk_ignore_unused

drivers/clk/clk.c | 59 +++++++++++++++++++++++++++++-------
include/linux/clk-provider.h | 1 +
2 files changed, 49 insertions(+), 11 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index e62552a75f08..5185b857fc65 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1302,14 +1302,27 @@ static void clk_core_disable_unprepare(struct clk_core *core)
clk_core_unprepare_lock(core);
}

-static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+static void clk_unprepare_unused_subtree(struct clk_core *core,
+ struct device *dev)
{
+ bool from_sync_state = !!dev;
struct clk_core *child;

lockdep_assert_held(&prepare_lock);

hlist_for_each_entry(child, &core->children, child_node)
- clk_unprepare_unused_subtree(child);
+ clk_unprepare_unused_subtree(child, dev);
+
+ if (from_sync_state && core->dev != dev)
+ return;
+
+ /*
+ * clock will be unprepared on sync_state,
+ * so leave as is for now
+ */
+ if (!from_sync_state && dev_has_sync_state(core->dev) &&
+ core->dev->driver->sync_state == clk_sync_state_disable_unused)
+ return;

if (core->prepare_count)
return;
@@ -1332,15 +1345,28 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
clk_pm_runtime_put(core);
}

-static void __init clk_disable_unused_subtree(struct clk_core *core)
+static void clk_disable_unused_subtree(struct clk_core *core,
+ struct device *dev)
{
+ bool from_sync_state = !!dev;
struct clk_core *child;
unsigned long flags;

lockdep_assert_held(&prepare_lock);

hlist_for_each_entry(child, &core->children, child_node)
- clk_disable_unused_subtree(child);
+ clk_disable_unused_subtree(child, dev);
+
+ if (from_sync_state && core->dev != dev)
+ return;
+
+ /*
+ * clock will be disabled on sync_state,
+ * so leave as is for now
+ */
+ if (!from_sync_state &&
+ core->dev->driver->sync_state == clk_sync_state_disable_unused)
+ return;

if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
@@ -1378,7 +1404,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
clk_core_disable_unprepare(core->parent);
}

-static bool clk_ignore_unused __initdata;
+static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
@@ -1386,35 +1412,46 @@ static int __init clk_ignore_unused_setup(char *__unused)
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);

-static int __init clk_disable_unused(void)
+static void __clk_disable_unused(struct device *dev)
{
struct clk_core *core;

if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
- return 0;
+ return;
}

clk_prepare_lock();

hlist_for_each_entry(core, &clk_root_list, child_node)
- clk_disable_unused_subtree(core);
+ clk_disable_unused_subtree(core, dev);

hlist_for_each_entry(core, &clk_orphan_list, child_node)
- clk_disable_unused_subtree(core);
+ clk_disable_unused_subtree(core, dev);

hlist_for_each_entry(core, &clk_root_list, child_node)
- clk_unprepare_unused_subtree(core);
+ clk_unprepare_unused_subtree(core, dev);

hlist_for_each_entry(core, &clk_orphan_list, child_node)
- clk_unprepare_unused_subtree(core);
+ clk_unprepare_unused_subtree(core, dev);

clk_prepare_unlock();
+}
+
+static int __init clk_disable_unused(void)
+{
+ __clk_disable_unused(NULL);

return 0;
}
late_initcall_sync(clk_disable_unused);

+void clk_sync_state_disable_unused(struct device *dev)
+{
+ __clk_disable_unused(dev);
+}
+EXPORT_SYMBOL_GPL(clk_sync_state_disable_unused);
+
static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 842e72a5348f..cf1adfeaf257 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -720,6 +720,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock);
+void clk_sync_state_disable_unused(struct device *dev);
/**
* clk_register_divider - register a divider clock with the clock framework
* @dev: device registering this clock
--
2.34.1