+static int __init nxp_stm_clocksource_init(struct device *dev, const char *name,
+ void __iomem *base, struct clk *clk)
+{
+ struct stm_timer *stm_timer;
+ int ret;
+
+ stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
+ if (!stm_timer)
+ return -ENOMEM;
+
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->scs.cs.name = name;
+ stm_timer->scs.cs.rating = 460;
+ stm_timer->scs.cs.read = nxp_stm_clocksource_read;
+ stm_timer->scs.cs.enable = nxp_stm_clocksource_enable;
+ stm_timer->scs.cs.disable = nxp_stm_clocksource_disable;
+ stm_timer->scs.cs.suspend = nxp_stm_clocksource_suspend;
+ stm_timer->scs.cs.resume = nxp_stm_clocksource_resume;
+ stm_timer->scs.cs.mask = CLOCKSOURCE_MASK(32);
+ stm_timer->scs.cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&stm_timer->scs.cs, stm_timer->rate);
+ if (ret)
+ return ret;
clocksource_unregister during remove callback for cleanup?
+
+ stm_sched_clock = stm_timer;
+
+ sched_clock_register(nxp_stm_read_sched_clock, 32, stm_timer->rate);
+
+ dev_set_drvdata(dev, stm_timer);
Is this used?
+
+ dev_dbg(dev, "Registered clocksource %s\n", name);
+
+ return 0;
+}
+static int nxp_stm_clockevent_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+ u32 val;
+
+ nxp_stm_clockevent_disable(stm_timer);
While examining the code base, I came across the
drivers/clocksource/timer-imx-gpt.c file, specifically the
mx1_2_set_next_event function, which includes a protection against
missing events. Using a similar approach would allow us to keep the STM
module enabled while only altering the channel's register state. This
risk can also be mitigated by adjusting min_delta_ns based on tick
frequency.
+ stm_timer->sce.delta = delta;
+
+ val = nxp_stm_clockevent_read_counter(stm_timer) + delta;
+
+ writel(val, stm_timer->base + STM_CHANNEL(0) + STM_CMP);
+
+ nxp_stm_clockevent_enable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_periodic(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ return nxp_stm_clockevent_set_next_event(stm_timer->rate, ced);
+}
+
+static int __init nxp_stm_clockevent_broadcast_init(struct device *dev, const char *name, void __iomem *base,
+ int irq, struct clk *clk)
+{
+ struct stm_timer *stm_timer;
+ int ret;
+
+ stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
+ if (!stm_timer)
+ return -ENOMEM;
+
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->sce.ced.name = name;
+ stm_timer->sce.ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ stm_timer->sce.ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
+ stm_timer->sce.ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
+ stm_timer->sce.ced.set_next_event = nxp_stm_clockevent_set_next_event;
+ stm_timer->sce.ced.cpumask = cpu_possible_mask;
+ stm_timer->sce.ced.rating = 460;
+ stm_timer->sce.ced.irq = irq;
+
+ nxp_stm_clockevent_irq_clr(stm_timer);
+
+ ret = request_irq(irq, nxp_stm_clockevent_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING, name, &stm_timer->sce.ced);
+ if (ret) {
+ dev_err(dev, "Unable to allocate interrupt line: %d\n", ret);
+ return ret;
+ }
+
+ clockevents_config_and_register(&stm_timer->sce.ced, stm_timer->rate, 1, 0xffffffff);
+
+ dev_dbg(dev, "Registered broadcast clockevent %s irq=%d\n", name, irq);
+
+ return 0;
+}
+
+static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, const char *name, void __iomem *base,
+ int irq, struct clk *clk, int cpu)
+{
This function duplicates a significant portion of the previous one. To
avoid code duplication, it would be beneficial to extract the common
part into a dedicated function.
+ struct stm_timer *stm_timer;
+ int ret;
+
+ stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
+ if (!stm_timer)
+ return -ENOMEM;
+
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->sce.ced.name = name;
+ stm_timer->sce.ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ stm_timer->sce.ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
+ stm_timer->sce.ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
+ stm_timer->sce.ced.set_next_event = nxp_stm_clockevent_set_next_event;
+ stm_timer->sce.ced.cpumask = cpumask_of(cpu);
+ stm_timer->sce.ced.rating = 460;
+ stm_timer->sce.ced.irq = irq;
+
+ nxp_stm_clockevent_irq_clr(stm_timer);
+
+ ret = request_irq(irq, nxp_stm_clockevent_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING, name, &stm_timer->sce.ced);
devm_request_irq instead ?
+ if (ret) {
+ dev_err(dev, "Unable to allocate interrupt line: %d\n", ret);
+ return ret;
+ }
+
+ per_cpu(stm_timers, cpu) = stm_timer;
+
+ dev_dbg(dev, "Initialized per cpu clockevent name=%s, irq=%d, cpu=%d\n", name, irq, cpu);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
+ int ret;
+
+ if (WARN_ON(!stm_timer))
+ return -EFAULT;
+
+ ret = irq_force_affinity(stm_timer->sce.ced.irq, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
+ clockevents_config_and_register(&stm_timer->sce.ced, stm_timer->rate, 1, 0xffffffff);
+
+ return 0;
+}
+
+static int __init nxp_stm_timer_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct stm_instances *stm_instances;
+ const char *name = of_node_full_name(np);
+ void __iomem *base;
+ int irq, ret;
+ struct clk *clk;
+
+ stm_instances = (typeof(stm_instances))of_device_get_match_data(dev);
+ if (!stm_instances) {
+ dev_err(dev, "No STM instances associated with a cpu");
+ return -EINVAL;
+ }
+
+ base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to iomap %pOFn\n", np);
+ return PTR_ERR(base);
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Failed to parse and map IRQ: %d\n", irq);
+ return -EINVAL;
+ }
From commit description:
The first probed STM is used as a clocksource, the second will be the
broadcast timer and the rest are used as a clockevent with the
affinity set to a CPU.
Why is the interrupt mandatory when the node is probed as a clocksource?
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Clock not found\n");
Missing irq_dispose_mapping ?
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable STM timer clock: %d\n", ret);
+ return ret;
+ }
devm_clk_get_enabled instead of devm_clk_get + clk_prepare_enable ?
+
+ if (!stm_instances->clocksource && (stm_instances->features & STM_CLKSRC)) {
+
+ /*
+ * First probed STM will be a clocksource
+ */
+ ret = nxp_stm_clocksource_init(dev, name, base, clk);
+ if (ret)
+ return ret;
+ stm_instances->clocksource++;
+
+ } else if (!stm_instances->clockevent_broadcast &&
+ (stm_instances->features & STM_CLKEVT_BROADCAST)) {
+
+ /*
+ * Second probed STM will be a broadcast clockevent
+ */
+ ret = nxp_stm_clockevent_broadcast_init(dev, name, base, irq, clk);
+ if (ret)
+ return ret;
+ stm_instances->clockevent_broadcast++;
+
+ } else if (stm_instances->clockevent_per_cpu < num_possible_cpus() &&
+ (stm_instances->features & STM_CLKEVT_PER_CPU)) {
+
+ /*
+ * Next probed STM will be a per CPU clockevent, until
+ * we probe as much as we have CPUs available on the
+ * system, we do a partial initialization
+ */
+ ret = nxp_stm_clockevent_per_cpu_init(dev, name, base, irq, clk,
+ stm_instances->clockevent_per_cpu);
+ if (ret)
+ return ret;
+
+ stm_instances->clockevent_per_cpu++;
+
+ /*
+ * The number of probed STM for per CPU clockevent is
+ * equal to the number of available CPUs on the
+ * system. We install the cpu hotplug to finish the
+ * initialization by registering the clockevents
+ */
+ if (stm_instances->clockevent_per_cpu == num_possible_cpus()) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "STM timer:starting",
+ nxp_stm_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct stm_instances s32g_stm_instances = { .features = STM_CLKSRC | STM_CLKEVT_PER_CPU };
+
+static const struct of_device_id nxp_stm_of_match[] = {
+ { .compatible = "nxp,s32g2-stm", &s32g_stm_instances },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
+
+static struct platform_driver nxp_stm_probe = {
+ .probe = nxp_stm_timer_probe,
+ .driver = {
+ .name = "nxp-stm",
+ .of_match_table = of_match_ptr(nxp_stm_of_match),
+ },
+};
+module_platform_driver(nxp_stm_probe);
+
+MODULE_DESCRIPTION("NXP System Timer Module driver");
+MODULE_LICENSE("GPL");