[PATCH 3/8] RT: Break out the search function

From: Gregory Haskins
Date: Mon Nov 05 2007 - 19:50:45 EST


Isolate the search logic into a function so that it can be used later
in places other than find_locked_lowest_rq().

Signed-off-by: Gregory Haskins <ghaskins@xxxxxxxxxx>
---

kernel/sched_rt.c | 62 ++++++++++++++++++++++++++++++++---------------------
1 files changed, 37 insertions(+), 25 deletions(-)

diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index f1fc1b4..fbe7b8a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -312,43 +312,55 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
return next;
}

-/* Will lock the rq it finds */
-static struct rq *find_lock_lowest_rq(struct task_struct *task,
- struct rq *rq)
+static int find_lowest_rq(struct task_struct *task)
{
- struct rq *lowest_rq = NULL;
- cpumask_t cpu_mask;
int cpu;
- int tries;
+ cpumask_t cpu_mask;
+ struct rq *lowest_rq = NULL;

cpus_and(cpu_mask, cpu_online_map, task->cpus_allowed);

- for (tries = 0; tries < RT_MAX_TRIES; tries++) {
- /*
- * Scan each rq for the lowest prio.
- */
- for_each_cpu_mask(cpu, cpu_mask) {
- struct rq *curr_rq = &per_cpu(runqueues, cpu);
+ /*
+ * Scan each rq for the lowest prio.
+ */
+ for_each_cpu_mask(cpu, cpu_mask) {
+ struct rq *rq = cpu_rq(cpu);

- if (cpu == rq->cpu)
- continue;
+ if (cpu == rq->cpu)
+ continue;

- /* We look for lowest RT prio or non-rt CPU */
- if (curr_rq->rt.highest_prio >= MAX_RT_PRIO) {
- lowest_rq = curr_rq;
- break;
- }
+ /* We look for lowest RT prio or non-rt CPU */
+ if (rq->rt.highest_prio >= MAX_RT_PRIO) {
+ lowest_rq = rq;
+ break;
+ }

- /* no locking for now */
- if (curr_rq->rt.highest_prio > task->prio &&
- (!lowest_rq || curr_rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
- lowest_rq = curr_rq;
- }
+ /* no locking for now */
+ if (rq->rt.highest_prio > task->prio &&
+ (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
+ lowest_rq = rq;
}
+ }
+
+ return lowest_rq ? lowest_rq->cpu : -1;
+}
+
+/* Will lock the rq it finds */
+static struct rq *find_lock_lowest_rq(struct task_struct *task,
+ struct rq *rq)
+{
+ struct rq *lowest_rq = NULL;
+ int cpu;
+ int tries;
+
+ for (tries = 0; tries < RT_MAX_TRIES; tries++) {
+ cpu = find_lowest_rq(task);

- if (!lowest_rq)
+ if (cpu == -1)
break;

+ lowest_rq = cpu_rq(cpu);
+
/* if the prio of this runqueue changed, try again */
if (double_lock_balance(rq, lowest_rq)) {
/*

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/