[PATCH printk v1 01/10] printk: relocate printk cpulock functions

From: John Ogness
Date: Tue Aug 03 2021 - 09:13:22 EST


Move the printk cpulock functions "as is" further up so that they
can be used by other printk.c functions in an upcoming commit.

Signed-off-by: John Ogness <john.ogness@xxxxxxxxxxxxx>
---
kernel/printk/printk.c | 232 ++++++++++++++++++++---------------------
1 file changed, 116 insertions(+), 116 deletions(-)

diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 825277e1e742..3d0c933937b4 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -211,6 +211,122 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
return 0;
}

+#ifdef CONFIG_SMP
+static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
+static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
+
+/**
+ * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
+ * spinning lock is not owned by any CPU.
+ *
+ * Context: Any context.
+ */
+void __printk_wait_on_cpu_lock(void)
+{
+ do {
+ cpu_relax();
+ } while (atomic_read(&printk_cpulock_owner) != -1);
+}
+EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
+
+/**
+ * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
+ * spinning lock.
+ *
+ * If no processor has the lock, the calling processor takes the lock and
+ * becomes the owner. If the calling processor is already the owner of the
+ * lock, this function succeeds immediately.
+ *
+ * Context: Any context. Expects interrupts to be disabled.
+ * Return: 1 on success, otherwise 0.
+ */
+int __printk_cpu_trylock(void)
+{
+ int cpu;
+ int old;
+
+ cpu = smp_processor_id();
+
+ /*
+ * Guarantee loads and stores from this CPU when it is the lock owner
+ * are _not_ visible to the previous lock owner. This pairs with
+ * __printk_cpu_unlock:B.
+ *
+ * Memory barrier involvement:
+ *
+ * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
+ * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
+ *
+ * Relies on:
+ *
+ * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
+ * of the previous CPU
+ * matching
+ * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
+ * of this CPU
+ */
+ old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1,
+ cpu); /* LMM(__printk_cpu_trylock:A) */
+ if (old == -1) {
+ /*
+ * This CPU is now the owner and begins loading/storing
+ * data: LMM(__printk_cpu_trylock:B)
+ */
+ return 1;
+
+ } else if (old == cpu) {
+ /* This CPU is already the owner. */
+ atomic_inc(&printk_cpulock_nested);
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(__printk_cpu_trylock);
+
+/**
+ * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
+ *
+ * The calling processor must be the owner of the lock.
+ *
+ * Context: Any context. Expects interrupts to be disabled.
+ */
+void __printk_cpu_unlock(void)
+{
+ if (atomic_read(&printk_cpulock_nested)) {
+ atomic_dec(&printk_cpulock_nested);
+ return;
+ }
+
+ /*
+ * This CPU is finished loading/storing data:
+ * LMM(__printk_cpu_unlock:A)
+ */
+
+ /*
+ * Guarantee loads and stores from this CPU when it was the
+ * lock owner are visible to the next lock owner. This pairs
+ * with __printk_cpu_trylock:A.
+ *
+ * Memory barrier involvement:
+ *
+ * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
+ * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
+ *
+ * Relies on:
+ *
+ * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
+ * of this CPU
+ * matching
+ * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
+ * of the next CPU
+ */
+ atomic_set_release(&printk_cpulock_owner,
+ -1); /* LMM(__printk_cpu_unlock:B) */
+}
+EXPORT_SYMBOL(__printk_cpu_unlock);
+#endif /* CONFIG_SMP */
+
/* Number of registered extended console drivers. */
static int nr_ext_console_drivers;

@@ -3578,119 +3694,3 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);

#endif
-
-#ifdef CONFIG_SMP
-static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
-static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
-
-/**
- * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
- * spinning lock is not owned by any CPU.
- *
- * Context: Any context.
- */
-void __printk_wait_on_cpu_lock(void)
-{
- do {
- cpu_relax();
- } while (atomic_read(&printk_cpulock_owner) != -1);
-}
-EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
-
-/**
- * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
- * spinning lock.
- *
- * If no processor has the lock, the calling processor takes the lock and
- * becomes the owner. If the calling processor is already the owner of the
- * lock, this function succeeds immediately.
- *
- * Context: Any context. Expects interrupts to be disabled.
- * Return: 1 on success, otherwise 0.
- */
-int __printk_cpu_trylock(void)
-{
- int cpu;
- int old;
-
- cpu = smp_processor_id();
-
- /*
- * Guarantee loads and stores from this CPU when it is the lock owner
- * are _not_ visible to the previous lock owner. This pairs with
- * __printk_cpu_unlock:B.
- *
- * Memory barrier involvement:
- *
- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
- * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
- *
- * Relies on:
- *
- * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
- * of the previous CPU
- * matching
- * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
- * of this CPU
- */
- old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1,
- cpu); /* LMM(__printk_cpu_trylock:A) */
- if (old == -1) {
- /*
- * This CPU is now the owner and begins loading/storing
- * data: LMM(__printk_cpu_trylock:B)
- */
- return 1;
-
- } else if (old == cpu) {
- /* This CPU is already the owner. */
- atomic_inc(&printk_cpulock_nested);
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(__printk_cpu_trylock);
-
-/**
- * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
- *
- * The calling processor must be the owner of the lock.
- *
- * Context: Any context. Expects interrupts to be disabled.
- */
-void __printk_cpu_unlock(void)
-{
- if (atomic_read(&printk_cpulock_nested)) {
- atomic_dec(&printk_cpulock_nested);
- return;
- }
-
- /*
- * This CPU is finished loading/storing data:
- * LMM(__printk_cpu_unlock:A)
- */
-
- /*
- * Guarantee loads and stores from this CPU when it was the
- * lock owner are visible to the next lock owner. This pairs
- * with __printk_cpu_trylock:A.
- *
- * Memory barrier involvement:
- *
- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
- * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
- *
- * Relies on:
- *
- * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
- * of this CPU
- * matching
- * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
- * of the next CPU
- */
- atomic_set_release(&printk_cpulock_owner,
- -1); /* LMM(__printk_cpu_unlock:B) */
-}
-EXPORT_SYMBOL(__printk_cpu_unlock);
-#endif /* CONFIG_SMP */
--
2.20.1