[PATCH] posix-cpu-timers: Merge running and checking_timer state in one field
From: Frederic Weisbecker
Date: Mon Oct 19 2015 - 20:19:25 EST
This way we might consume less space in the signal struct (well,
depending on bool size or padding) and we don't need to worry about
ordering between the running and checking_timers fields.
Cc: Jason Low <jason.low2@xxxxxx>
Cc: Oleg Nesterov <oleg@xxxxxxxxxx>
Cc: George Spelvin <linux@xxxxxxxxxxx>
Cc: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
Cc: Davidlohr Bueso <dave@xxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Frederic Weisbecker <fweisbec@xxxxxxxxx>
---
include/linux/init_task.h | 3 +--
include/linux/sched.h | 19 +++++++++++--------
kernel/fork.c | 2 +-
kernel/sched/stats.h | 2 +-
kernel/time/posix-cpu-timers.c | 31 +++++++++++++++++--------------
5 files changed, 31 insertions(+), 26 deletions(-)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 810a34f..c469c73 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -59,8 +59,7 @@ extern struct fs_struct init_fs;
.rlim = INIT_RLIMITS, \
.cputimer = { \
.cputime_atomic = INIT_CPUTIME_ATOMIC, \
- .running = false, \
- .checking_timer = false, \
+ .state = 0, \
}, \
INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f87559d..2042172 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -614,21 +614,24 @@ struct task_cputime_atomic {
*/
#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
+/* struct thread_group_cputimer::state bits */
+#define CPUTIMER_STATE_RUNNING 1
+#define CPUTIMER_STATE_CHECKING 2
+
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime_atomic: atomic thread group interval timers.
- * @running: true when there are timers running and
- * @cputime_atomic receives updates.
- * @checking_timer: true when a thread in the group is in the
- * process of checking for thread group timers.
- *
+ * @state: flags describing the current state of the cputimer.
+ * CPUTIMER_STATE_RUNNING bit means the timers is elapsing.
+ * CPUTIMER_STATE_CHECKING bit means that the cputimer has
+ * expired and a thread in the group is checking the
+ * callback list.
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
- struct task_cputime_atomic cputime_atomic;
- bool running;
- bool checking_timer;
+ struct task_cputime_atomic cputime_atomic;
+ unsigned int state;
};
#include <linux/rwsem.h>
diff --git a/kernel/fork.c b/kernel/fork.c
index 6ac8942..c037a2c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1101,7 +1101,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
- sig->cputimer.running = true;
+ sig->cputimer.state = CPUTIMER_STATE_RUNNING;
}
/* The timer lists. */
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index b0fbc76..099fde1 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -175,7 +175,7 @@ static inline bool cputimer_running(struct task_struct *tsk)
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
/* Check if cputimer isn't running. This is accessed without locking. */
- if (!READ_ONCE(cputimer->running))
+ if (!READ_ONCE(cputimer->state))
return false;
/*
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index f5e86d2..ffa95d3 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -233,7 +233,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
struct task_cputime sum;
/* Check if cputimer isn't running. This is accessed without locking. */
- if (!READ_ONCE(cputimer->running)) {
+ if (!READ_ONCE(cputimer->state)) {
/*
* The POSIX timer interface allows for absolute time expiry
* values through the TIMER_ABSTIME flag, therefore we have
@@ -243,13 +243,13 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
update_gt_cputime(&cputimer->cputime_atomic, &sum);
/*
- * We're setting cputimer->running without a lock. Ensure
+ * We're setting cputimer->state without a lock. Ensure
* this only gets written to in one operation. We set
* running after update_gt_cputime() as a small optimization,
* but barriers are not required because update_gt_cputime()
* can handle concurrent updates.
*/
- WRITE_ONCE(cputimer->running, true);
+ WRITE_ONCE(cputimer->state, CPUTIMER_STATE_RUNNING);
}
sample_cputime_atomic(times, &cputimer->cputime_atomic);
}
@@ -606,7 +606,7 @@ bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
return false;
/* Check if cputimer is running. This is accessed without locking. */
- if (READ_ONCE(tsk->signal->cputimer.running))
+ if (READ_ONCE(tsk->signal->cputimer.state))
return false;
return true;
@@ -918,7 +918,7 @@ static inline void stop_process_timers(struct signal_struct *sig)
struct thread_group_cputimer *cputimer = &sig->cputimer;
/* Turn off cputimer->running. This is done without locking. */
- WRITE_ONCE(cputimer->running, false);
+ WRITE_ONCE(cputimer->state, 0);
}
static u32 onecputick;
@@ -972,14 +972,17 @@ static void check_process_timers(struct task_struct *tsk,
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
*/
- if (!READ_ONCE(tsk->signal->cputimer.running))
+ if (!READ_ONCE(sig->cputimer.state))
return;
+ WARN_ON_ONCE(sig->cputimer.state & CPUTIMER_STATE_CHECKING);
/*
- * Signify that a thread is checking for process timers.
- * Write access to this field is protected by the sighand lock.
+ * Signify that this thread is checking for process timers, in order to
+ * avoid sighand lock contention with multiple threads in the group
+ * checking for process timers concurrently. Write access to this field is
+ * protected by the sighand lock.
*/
- sig->cputimer.checking_timer = true;
+ sig->cputimer.state |= CPUTIMER_STATE_CHECKING;
/*
* Collect the current process totals.
@@ -1036,7 +1039,8 @@ static void check_process_timers(struct task_struct *tsk,
if (task_cputime_zero(&sig->cputime_expires))
stop_process_timers(sig);
- sig->cputimer.checking_timer = false;
+ /* Turn off CHECKING if stop_process_timers() hasn't done it yet */
+ sig->cputimer.state &= ~CPUTIMER_STATE_CHECKING;
}
/*
@@ -1153,19 +1157,18 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
/*
* Check if thread group timers expired when the cputimer is
* running and no other thread in the group is already checking
- * for thread group cputimers. These fields are read without the
+ * for thread group cputimers. The state is read without the
* sighand lock. However, this is fine because this is meant to
* be a fastpath heuristic to determine whether we should try to
* acquire the sighand lock to check/handle timers.
*
- * In the worst case scenario, if 'running' or 'checking_timer' gets
+ * In the worst case scenario, if 'RUNNING' or 'CHECKING' gets
* set but the current thread doesn't see the change yet, we'll wait
* until the next thread in the group gets a scheduler interrupt to
* handle the timer. This isn't an issue in practice because these
* types of delays with signals actually getting sent are expected.
*/
- if (READ_ONCE(sig->cputimer.running) &&
- !READ_ONCE(sig->cputimer.checking_timer)) {
+ if (READ_ONCE(sig->cputimer.state) == CPUTIMER_STATE_RUNNING) {
struct task_cputime group_sample;
sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
--
2.5.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/