[PATCH 4/6] workqueue: Remember whether a work item was on a BH workqueue

From: Tejun Heo
Date: Tue Feb 27 2024 - 12:33:16 EST


Add an off-queue flag, WORK_OFFQ_BH, that indicates whether the last
workqueue the work item was on was a BH one. This will be used to test
whether a work item is BH in cancel_sync path to implement atomic
cancel_sync'ing for BH work items.

Signed-off-by: Tejun Heo <tj@xxxxxxxxxx>
---
include/linux/workqueue.h | 4 +++-
kernel/workqueue.c | 10 ++++++++--
2 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 86483743ad28..7710cd52f7f0 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -52,9 +52,10 @@ enum work_bits {
*
* MSB
* [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ]
- * 16 bits 0 bits 4 or 5 bits
+ * 16 bits 1 bit 4 or 5 bits
*/
WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS,
+ WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT,
WORK_OFFQ_FLAG_END,
WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,

@@ -98,6 +99,7 @@ enum wq_misc_consts {
};

/* Convenience constants - of type 'unsigned long', not 'enum'! */
+#define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT)
#define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
#define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
#define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 07e77130227c..5c71fbd9d854 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -763,6 +763,11 @@ static int work_next_color(int color)
return (color + 1) % WORK_NR_COLORS;
}

+static unsigned long pool_offq_flags(struct worker_pool *pool)
+{
+ return (pool->flags & POOL_BH) ? WORK_OFFQ_BH : 0;
+}
+
/*
* While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
* contain the pointer to the queued pwq. Once execution starts, the flag
@@ -2119,7 +2124,8 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
* this destroys work->data needed by the next step, stash it.
*/
work_data = *work_data_bits(work);
- set_work_pool_and_keep_pending(work, pool->id, 0);
+ set_work_pool_and_keep_pending(work, pool->id,
+ pool_offq_flags(pool));

/* must be the last step, see the function comment */
pwq_dec_nr_in_flight(pwq, work_data);
@@ -3171,7 +3177,7 @@ __acquires(&pool->lock)
* PENDING and queued state changes happen together while IRQ is
* disabled.
*/
- set_work_pool_and_clear_pending(work, pool->id, 0);
+ set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool));

pwq->stats[PWQ_STAT_STARTED]++;
raw_spin_unlock_irq(&pool->lock);
--
2.43.2