Re: [PATCH v8 01/11] cpuidle/poll_state: poll via smp_cond_load_relaxed()
From: Christoph Lameter (Ampere)
Date: Tue Oct 15 2024 - 19:10:58 EST
Here is a patch that keeps the cpuidle stuiff generic but allows an
override by arm64..
From: Christoph Lameter (Ampere) <cl@xxxxxxxxx>
Subject: Revise cpu poll idle to make full use of wfet() and wfe()
ARM64 has instructions that can wait for an event and timeouts.
Clean up the code in drivers/cpuidle/ to wait until the end
of a period and allow the override of the handling of the
waiting by an architecture.
Provide an optimized wait function for arm64.
Signed-off-by: Christoph Lameter <cl@xxxxxxxxx>
Index: linux/arch/arm64/lib/delay.c
===================================================================
--- linux.orig/arch/arm64/lib/delay.c
+++ linux/arch/arm64/lib/delay.c
@@ -12,6 +12,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timex.h>
+#include <linux/sched/clock.h>
+#include <linux/cpuidle.h>
#include <clocksource/arm_arch_timer.h>
@@ -67,3 +69,27 @@ void __ndelay(unsigned long nsecs)
__const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL(__ndelay);
+
+void cpuidle_wait_for_resched_with_timeout(u64 end)
+{
+ u64 start;
+
+ while (!need_resched() && (start = local_clock_noinstr()) < end) {
+
+ if (alternative_has_cap_unlikely(ARM64_HAS_WFXT)) {
+
+ /* Processor supports waiting for a specified period */
+ wfet(xloops_to_cycles((end - start) * 0x5UL));
+
+ } else
+ if (arch_timer_evtstrm_available() && start + ARCH_TIMER_EVT_STREAM_PERIOD_US * 1000 < end) {
+
+ /* We can wait until a periodic event occurs */
+ wfe();
+
+ } else
+ /* Need to spin until the end */
+ cpu_relax();
+ }
+}
+
Index: linux/drivers/cpuidle/poll_state.c
===================================================================
--- linux.orig/drivers/cpuidle/poll_state.c
+++ linux/drivers/cpuidle/poll_state.c
@@ -8,35 +8,29 @@
#include <linux/sched/clock.h>
#include <linux/sched/idle.h>
-#define POLL_IDLE_RELAX_COUNT 200
+__weak void cpuidle_wait_for_resched_with_timeout(u64 end)
+{
+ while (!need_resched() && local_clock_noinstr() < end) {
+ cpu_relax();
+ }
+}
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- u64 time_start;
-
- time_start = local_clock_noinstr();
+ u64 time_start = local_clock_noinstr();
+ u64 time_end = time_start + cpuidle_poll_time(drv, dev);
dev->poll_time_limit = false;
raw_local_irq_enable();
if (!current_set_polling_and_test()) {
- unsigned int loop_count = 0;
- u64 limit;
- limit = cpuidle_poll_time(drv, dev);
+ cpuidle_wait_for_resched_with_timeout(time_end);
+
+ if (!need_resched())
+ dev->poll_time_limit = true;
- while (!need_resched()) {
- cpu_relax();
- if (loop_count++ < POLL_IDLE_RELAX_COUNT)
- continue;
-
- loop_count = 0;
- if (local_clock_noinstr() - time_start > limit) {
- dev->poll_time_limit = true;
- break;
- }
- }
}
raw_local_irq_disable();
Index: linux/include/linux/cpuidle.h
===================================================================
--- linux.orig/include/linux/cpuidle.h
+++ linux/include/linux/cpuidle.h
@@ -202,6 +202,9 @@ extern int cpuidle_play_dead(void);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
static inline struct cpuidle_device *cpuidle_get_device(void)
{return __this_cpu_read(cpuidle_devices); }
+
+extern __weak void cpuidle_wait_for_resched_with_timeout(u64);
+
#else
static inline void disable_cpuidle(void) { }
static inline bool cpuidle_not_available(struct cpuidle_driver *drv,