From: Nick Piggin These conditions should now be impossible, and we need to fix them if they happen. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton --- 25-akpm/kernel/sched.c | 14 ++++---------- 1 files changed, 4 insertions(+), 10 deletions(-) diff -puN kernel/sched.c~sched2-add-debugging kernel/sched.c --- 25/kernel/sched.c~sched2-add-debugging 2005-03-17 23:47:59.000000000 -0800 +++ 25-akpm/kernel/sched.c 2005-03-17 23:47:59.000000000 -0800 @@ -1943,15 +1943,7 @@ static int load_balance(int this_cpu, ru goto out_balanced; } - /* - * This should be "impossible", but since load - * balancing is inherently racy and statistical, - * it could happen in theory. - */ - if (unlikely(busiest == this_rq)) { - WARN_ON(1); - goto out_balanced; - } + BUG_ON(busiest == this_rq); schedstat_add(sd, lb_imbalance[idle], imbalance); @@ -2053,11 +2045,13 @@ static int load_balance_newidle(int this } busiest = find_busiest_queue(group); - if (!busiest || busiest == this_rq) { + if (!busiest) { schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); goto out_balanced; } + BUG_ON(busiest == this_rq); + /* Attempt to move tasks */ double_lock_balance(this_rq, busiest); _