From 45db8d4ffc7f7524d795dd92219ff1042188a959 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 21 Feb 2010 19:23:36 +0100 Subject: [PATCH] sched: Extend activate_task to allow queueing to the head of a list commit babe95bad86cba3843cb53d1cee8ac39c491a64a in tip. The ability of enqueueing a task to the head of a SCHED_FIFO priority list is required to fix some violations of POSIX scheduling policy. Extend activate_task with a "head" argument and fix up all callers. Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- kernel/sched.c | 17 +++++++++-------- kernel/sched_fair.c | 2 +- kernel/sched_rt.c | 4 ++-- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 56ad49c..9a25a5f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1978,12 +1978,13 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) /* * activate_task - move a task to the runqueue. */ -static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) +static void +activate_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) { if (task_contributes_to_load(p)) rq->nr_uninterruptible--; - enqueue_task(rq, p, wakeup, false); + enqueue_task(rq, p, wakeup, head); inc_nr_running(rq); } @@ -2533,7 +2534,7 @@ out_activate: schedstat_inc(p, se.nr_wakeups_local); else schedstat_inc(p, se.nr_wakeups_remote); - activate_task(rq, p, 1); + activate_task(rq, p, 1, false); success = 1; /* @@ -2800,7 +2801,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) BUG_ON(p->state != TASK_WAKING); p->state = TASK_RUNNING; update_rq_clock(rq); - activate_task(rq, p, 0); + activate_task(rq, p, 0, false); trace_sched_wakeup_new(rq, p, 1); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP @@ -4877,7 +4878,7 @@ recheck: if (running) p->sched_class->set_curr_task(rq); if (on_rq) { - activate_task(rq, p, 0); + activate_task(rq, p, 0, false); check_class_changed(rq, p, prev_class, oldprio, running); } @@ -5794,7 +5795,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) if (p->se.on_rq) { deactivate_task(rq_src, p, 0); set_task_cpu(p, dest_cpu); - activate_task(rq_dest, p, 0); + activate_task(rq_dest, p, 0, false); check_preempt_curr(rq_dest, p, 0); } done: @@ -5962,7 +5963,7 @@ void sched_idle_next(void) __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); update_rq_clock(rq); - activate_task(rq, p, 0); + activate_task(rq, p, 0, false); raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -8268,7 +8269,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) deactivate_task(rq, p, 0); __setscheduler(rq, p, SCHED_NORMAL, 0); if (on_rq) { - activate_task(rq, p, 0); + activate_task(rq, p, 0, false); resched_task(rq->curr); } } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index b873769..add5302 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1828,7 +1828,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, { deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); - activate_task(this_rq, p, 0); + activate_task(this_rq, p, 0, false); check_preempt_curr(this_rq, p, 0); } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index b2f6d2b..fdf667b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1429,7 +1429,7 @@ static int push_rt_task(struct rq *rq) deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); - activate_task(lowest_rq, next_task, 0); + activate_task(lowest_rq, next_task, 0, false); resched_task(lowest_rq->curr); @@ -1512,7 +1512,7 @@ static int pull_rt_task(struct rq *this_rq) deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); - activate_task(this_rq, p, 0); + activate_task(this_rq, p, 0, false); /* * We continue with the search, just in * case there's an even higher prio task -- 1.7.0.4