Re: [RFC][PATCH 2/4] printk: offload printing from wake_up_klogd_work_func()
From: Sergey Senozhatsky
Date: Sat Mar 18 2017 - 05:58:07 EST
Hello Petr,
On (03/17/17 13:19), Petr Mladek wrote:
[..]
> A solution might be to rename the variable to something like
> printk_pending_output, always set it in vprintk_emit() and
> clear it in console_unlock() when there are no pending messages.
believe it or not, I thought that I set printk_kthread_need_flush_console
to `true' unconditionally. probably I did so in one of the previous patch
set iterations. weird. I agree that doing this makes sense. thanks for
bringing this up.
....
I don't want that printk_kthread_need_flush_console to exist. instead,
I think, I want to move printk_pending out of per-cpu memory and use a
global printk_pending. set PRINTK_PENDING_OUTPUT bit to true in
vprintk_emit(), clear it in console_unlock(). and make both printk_kthread
scheduling condition and console_unlock() retry path depend on
`printk_pending == 0' being true.
something like below (the code is ugly and lacks a ton of barriers, etc.
etc.)
---
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 601a9ef6db89..a0b231f49052 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -439,8 +439,6 @@ static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
static struct task_struct *printk_kthread __read_mostly;
-/* When `true' printing thread has messages to print. */
-static bool printk_kthread_need_flush_console;
/*
* We can't call into the scheduler (wake_up() printk kthread), for example,
* during suspend/kexec. This temporarily switches printk to old behaviour.
@@ -451,6 +449,13 @@ static int printk_kthread_disable __read_mostly;
* it doesn't go back to 0.
*/
static bool printk_emergency __read_mostly;
+/*
+ * Delayed printk version, for scheduler-internal messages:
+ */
+#define PRINTK_PENDING_WAKEUP 0x01
+#define PRINTK_PENDING_OUTPUT 0x02
+
+static int printk_pending = 0;
static inline bool printk_kthread_enabled(void)
{
@@ -1806,6 +1811,7 @@ asmlinkage int vprintk_emit(int facility, int level,
printed_len += log_output(facility, level, lflags, dict, dictlen, text, text_len);
+ printk_pending |= PRINTK_PENDING_OUTPUT;
logbuf_unlock_irqrestore(flags);
/* If called from the scheduler, we can not call up(). */
@@ -1819,8 +1825,6 @@ asmlinkage int vprintk_emit(int facility, int level,
* schedulable context.
*/
if (printk_kthread_enabled()) {
- printk_kthread_need_flush_console = true;
-
printk_safe_enter_irqsave(flags);
wake_up_process(printk_kthread);
printk_safe_exit_irqrestore(flags);
@@ -2220,10 +2224,11 @@ void console_unlock(void)
static char text[LOG_LINE_MAX + PREFIX_MAX];
static u64 seen_seq;
unsigned long flags;
- bool wake_klogd = false;
- bool do_cond_resched, retry;
+ bool wake_klogd;
+ bool do_cond_resched, retry = false;
if (console_suspended) {
+ printk_pending &= ~PRINTK_PENDING_OUTPUT;
up_console_sem();
return;
}
@@ -2242,6 +2247,8 @@ void console_unlock(void)
console_may_schedule = 0;
again:
+ wake_klogd = printk_pending & PRINTK_PENDING_WAKEUP;
+ printk_pending = 0;
/*
* We released the console_sem lock, so we need to recheck if
* cpu is online and (if not) is there at least one CON_ANYTIME
@@ -2330,15 +2337,16 @@ void console_unlock(void)
* flush, no worries.
*/
raw_spin_lock(&logbuf_lock);
- retry = console_seq != log_next_seq;
+ if (printk_pending != 0 || console_seq != log_next_seq)
+ retry = true;
raw_spin_unlock(&logbuf_lock);
printk_safe_exit_irqrestore(flags);
- if (retry && console_trylock())
- goto again;
-
if (wake_klogd)
wake_up_klogd();
+
+ if (retry && console_trylock())
+ goto again;
}
EXPORT_SYMBOL(console_unlock);
@@ -2722,19 +2730,9 @@ static int __init printk_late_init(void)
late_initcall(printk_late_init);
#if defined CONFIG_PRINTK
-/*
- * Delayed printk version, for scheduler-internal messages:
- */
-#define PRINTK_PENDING_WAKEUP 0x01
-#define PRINTK_PENDING_OUTPUT 0x02
-
-static DEFINE_PER_CPU(int, printk_pending);
-
static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
- int pending = __this_cpu_xchg(printk_pending, 0);
-
- if (pending & PRINTK_PENDING_OUTPUT) {
+ if (printk_pending & PRINTK_PENDING_OUTPUT) {
if (printk_kthread_enabled()) {
wake_up_process(printk_kthread);
} else {
@@ -2747,8 +2745,10 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
}
}
- if (pending & PRINTK_PENDING_WAKEUP)
+ if (printk_pending & PRINTK_PENDING_WAKEUP) {
+ printk_pending &= ~PRINTK_PENDING_WAKEUP;
wake_up_interruptible(&log_wait);
+ }
}
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
@@ -2760,18 +2760,10 @@ static int printk_kthread_func(void *data)
{
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!printk_kthread_need_flush_console)
+ if (!(printk_pending & PRINTK_PENDING_OUTPUT))
schedule();
__set_current_state(TASK_RUNNING);
- /*
- * Avoid an infinite loop when console_unlock() cannot
- * access consoles, e.g. because console_suspended is
- * true. schedule(), someone else will print the messages
- * from resume_console().
- */
- printk_kthread_need_flush_console = false;
-
console_lock();
console_unlock();
}
@@ -2802,7 +2794,7 @@ void wake_up_klogd(void)
{
preempt_disable();
if (waitqueue_active(&log_wait)) {
- this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
+ printk_pending |= PRINTK_PENDING_WAKEUP;
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
}
preempt_enable();
@@ -2818,7 +2810,6 @@ int printk_deferred(const char *fmt, ...)
r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
va_end(args);
- __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
preempt_enable();
---
[..]
> If I remember correctly, you were not much happy with this
> solution because it did spread the logic. I think that you did not
> believe that it was worth fixing the second problem.
hm, I think Jan Kara was the first one who said that we
are overcomplicating the whole thing... or may be it was me.
don't deny it either.
-ss