diff -ru sched-2.5.59-04/kernel/sched.c sched-2.5.59-05/kernel/sched.c --- sched-2.5.59-04/kernel/sched.c Tue Feb 4 00:40:20 2003 +++ sched-2.5.59-05/kernel/sched.c Tue Feb 4 02:24:21 2003 @@ -67,6 +67,8 @@ #define INTERACTIVE_DELTA 2 #define MAX_SLEEP_AVG (10*HZ) #define STARVATION_LIMIT (30*HZ) +#define SYNC_WAKEUPS 1 +#define SMART_WAKE_CHILD 1 /* * If a task is 'interactive' then we reinsert it in the active @@ -322,6 +324,13 @@ * Also update all the scheduling statistics stuff. (sleep average * calculation, priority modifiers, etc.) */ +static inline void __activate_task(task_t *p, runqueue_t *rq) +{ + enqueue_task(p, rq->active); + nr_running_inc(rq); +} + + static inline void activate_task(task_t *p, runqueue_t *rq) { unsigned long sleep_time = jiffies - p->last_run; @@ -340,8 +349,7 @@ p->sleep_avg = MAX_SLEEP_AVG; p->prio = effective_prio(p); } - enqueue_task(p, array); - nr_running_inc(rq); + __activate_task(p, rq); } /* @@ -455,6 +463,7 @@ long old_state; runqueue_t *rq; + sync &= SYNC_WAKEUPS; repeat_lock_task: rq = task_rq_lock(p, &flags); old_state = p->state; @@ -473,10 +482,13 @@ } if (old_state == TASK_UNINTERRUPTIBLE) rq->nr_uninterruptible--; - activate_task(p, rq); - - if (p->prio < rq->curr->prio) - resched_task(rq->curr); + if (sync) + __activate_task(p, rq); + else { + activate_task(p, rq); + if (p->prio < rq->curr->prio) + resched_task(rq->curr); + } success = 1; } p->state = TASK_RUNNING; @@ -512,8 +524,19 @@ p->prio = effective_prio(p); } set_task_cpu(p, smp_processor_id()); - activate_task(p, rq); + if (SMART_WAKE_CHILD) { + if (unlikely(!current->array)) + __activate_task(p, rq); + else { + p->prio = current->prio; + list_add_tail(&p->run_list, ¤t->run_list); + p->array = current->array; + p->array->nr_active++; + nr_running_inc(rq); + } + } else + activate_task(p, rq); rq_unlock(rq); } @@ -790,7 +813,23 @@ #endif /* CONFIG_NUMA */ -#if CONFIG_SMP +/* + * One of the idle_cpu_tick() and busy_cpu_tick() functions will + * get called every timer tick, on every CPU. Our balancing action + * frequency and balancing agressivity depends on whether the CPU is + * idle or not. + * + * busy-rebalance every 250 msecs. idle-rebalance every 1 msec. (or on + * systems with HZ=100, every 10 msecs.) + */ +#define BUSY_REBALANCE_TICK (HZ/4 ?: 1) +#define IDLE_REBALANCE_TICK (HZ/1000 ?: 1) + +#if !CONFIG_SMP + +static inline void load_balance(runqueue_t *rq, int this_cpu, int idle) { } + +#else /* * double_lock_balance - lock the busiest runqueue @@ -972,6 +1011,9 @@ * 1) running (obviously), or * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) are cache-hot on their current CPU. + * + * (except if we are in idle mode which is a more agressive + * form of rebalancing.) */ #define CAN_MIGRATE_TASK(p,rq,this_cpu) \ @@ -1665,7 +1707,7 @@ else p->prio = p->static_prio; if (array) - activate_task(p, task_rq(p)); + __activate_task(p, task_rq(p)); out_unlock: task_rq_unlock(rq, &flags); @@ -2297,7 +2339,7 @@ set_task_cpu(p, cpu_dest); if (p->array) { deactivate_task(p, rq_src); - activate_task(p, rq_dest); + __activate_task(p, rq_dest); if (p->prio < rq_dest->curr->prio) resched_task(rq_dest->curr); }