Re: [RFC PATCH] sched: Introduce per-mm/cpu concurrency id state

From: Aaron Lu
Date: Fri Mar 31 2023 - 04:41:28 EST


On Thu, Mar 30, 2023 at 07:09:11PM -0400, Mathieu Desnoyers wrote:

> void sched_mm_cid_exit_signals(struct task_struct *t)
> {
> struct mm_struct *mm = t->mm;
> - unsigned long flags;
> + struct rq *rq = this_rq();

Got many below messages due to the above line:

[ 19.294089] BUG: using smp_processor_id() in preemptible [00000000] code: kworker/u449:0/1621

> + struct rq_flags rf;
>
> if (!mm)
> return;
> - local_irq_save(flags);
> + rq_lock_irqsave(rq, &rf);
> mm_cid_put(mm, t->mm_cid);
> t->mm_cid = -1;
> t->mm_cid_active = 0;
> - local_irq_restore(flags);
> + rq_unlock_irqrestore(rq, &rf);
> }
>
> void sched_mm_cid_before_execve(struct task_struct *t)
> {
> struct mm_struct *mm = t->mm;
> - unsigned long flags;
> + struct rq *rq = this_rq();

Also here;

> + struct rq_flags rf;
>
> if (!mm)
> return;
> - local_irq_save(flags);
> + rq_lock_irqsave(rq, &rf);
> mm_cid_put(mm, t->mm_cid);
> t->mm_cid = -1;
> t->mm_cid_active = 0;
> - local_irq_restore(flags);
> + rq_unlock_irqrestore(rq, &rf);
> }
>
> void sched_mm_cid_after_execve(struct task_struct *t)
> {
> struct mm_struct *mm = t->mm;
> - unsigned long flags;
> + struct rq *rq = this_rq();

And here.

> + struct rq_flags rf;
>
> if (!mm)
> return;
> - local_irq_save(flags);
> + rq_lock_irqsave(rq, &rf);
> t->mm_cid = mm_cid_get(mm);
> t->mm_cid_active = 1;
> - local_irq_restore(flags);
> + rq_unlock_irqrestore(rq, &rf);
> rseq_set_notify_resume(t);
> }

I used below diff to get rid of these messages without understanding the
purpose of these functions:

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f07b87d155bd..7194c29f3c91 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -11444,45 +11444,57 @@ void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t, int src_c
void sched_mm_cid_exit_signals(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
- struct rq *rq = this_rq();
struct rq_flags rf;
+ struct rq *rq;

if (!mm)
return;
+
+ preempt_disable();
+ rq = this_rq();
rq_lock_irqsave(rq, &rf);
mm_cid_put(mm, t->mm_cid);
t->mm_cid = -1;
t->mm_cid_active = 0;
rq_unlock_irqrestore(rq, &rf);
+ preempt_enable();
}

void sched_mm_cid_before_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
- struct rq *rq = this_rq();
struct rq_flags rf;
+ struct rq *rq;

if (!mm)
return;
+
+ preempt_disable();
+ rq = this_rq();
rq_lock_irqsave(rq, &rf);
mm_cid_put(mm, t->mm_cid);
t->mm_cid = -1;
t->mm_cid_active = 0;
rq_unlock_irqrestore(rq, &rf);
+ preempt_enable();
}

void sched_mm_cid_after_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
- struct rq *rq = this_rq();
struct rq_flags rf;
+ struct rq *rq;

if (!mm)
return;
+
+ preempt_disable();
+ rq = this_rq();
rq_lock_irqsave(rq, &rf);
t->mm_cid = mm_cid_get(mm);
t->mm_cid_active = 1;
rq_unlock_irqrestore(rq, &rf);
+ preempt_enable();
rseq_set_notify_resume(t);
}

--
2.34.1