[PATCH 3/4] sched/core: have io_schedule_prepare() return a long

From: Jens Axboe
Date: Tue Apr 16 2024 - 08:16:39 EST


In preparation for needing more state then 32-bit on 64-bit archs,
switch it to a long instead.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
block/blk-cgroup.c | 2 +-
include/linux/sched.h | 4 ++--
kernel/locking/mutex.c | 4 ++--
kernel/locking/rtmutex_api.c | 4 ++--
kernel/sched/core.c | 6 +++---
5 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index bdbb557feb5a..77faceddd5dd 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1849,7 +1849,7 @@ static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
u64 now = blk_time_get_ns();
u64 exp;
u64 delay_nsec = 0;
- int tok;
+ long tok;

while (blkg->parent) {
int use_delay = atomic_read(&blkg->use_delay);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3c2abbc587b4..dcfc2830ed8e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -317,8 +317,8 @@ asmlinkage void preempt_schedule_irq(void);
extern void schedule_rtlock(void);
#endif

-extern int __must_check io_schedule_prepare(void);
-extern void io_schedule_finish(int token);
+extern long __must_check io_schedule_prepare(void);
+extern void io_schedule_finish(long token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cbae8c0b89ab..4a86ea6c7f19 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -830,7 +830,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
void __sched
mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
{
- int token;
+ long token;

might_sleep();

@@ -1026,7 +1026,7 @@ EXPORT_SYMBOL(mutex_lock_killable);
*/
void __sched mutex_lock_io(struct mutex *lock)
{
- int token;
+ long token;

token = io_schedule_prepare();
mutex_lock(lock);
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index a6974d044593..ddf7f7f3f0b5 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -547,7 +547,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);

void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
{
- int token;
+ long token;

might_sleep();

@@ -579,7 +579,7 @@ EXPORT_SYMBOL(mutex_lock_killable);

void __sched mutex_lock_io(struct mutex *lock)
{
- int token = io_schedule_prepare();
+ long token = io_schedule_prepare();

__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
io_schedule_finish(token);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6a6c985220b1..63f6d44f460c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9032,16 +9032,16 @@ int __sched yield_to(struct task_struct *p, bool preempt)
}
EXPORT_SYMBOL_GPL(yield_to);

-int io_schedule_prepare(void)
+long io_schedule_prepare(void)
{
- int old_iowait = current->in_iowait;
+ long old_iowait = current->in_iowait;

current->in_iowait = 1;
blk_flush_plug(current->plug, true);
return old_iowait;
}

-void io_schedule_finish(int token)
+void io_schedule_finish(long token)
{
current->in_iowait = token;
}
--
2.43.0