--- kernel/sched.c.org Fri Apr 25 06:24:34 2003 +++ kernel/sched.c Thu May 1 07:38:00 2003 @@ -1265,7 +1323,7 @@ runqueue_t *rq; prio_array_t *array; struct list_head *queue; - int idx; + int idx = 0; /* * Test if we are atomic. Since do_exit() needs to call into @@ -1330,7 +1388,19 @@ rq->expired_timestamp = 0; } - idx = sched_find_first_bit(array->bitmap); + if (!idx || idx >= MAX_PRIO) + idx = sched_find_first_bit(array->bitmap); + else { + idx = find_next_bit(array->bitmap, MAX_PRIO, idx + 1); + if (idx >= MAX_PRIO) { + idx = 0; + spin_unlock_irq(&rq->lock); + reacquire_kernel_lock(current); + preempt_enable_no_resched(); + goto need_resched; + } + } + queue = array->queue + idx; next = list_entry(queue->next, task_t, run_list); @@ -1984,19 +2054,12 @@ prio_array_t *array = current->array; /* - * We implement yielding by moving the task into the expired - * queue. - * - * (special rule: RT tasks will just roundrobin in the active - * array.) + * We implement yielding by moving the task to the back of + * the queue. */ - if (likely(!rt_task(current))) { - dequeue_task(current, array); - enqueue_task(current, rq->expired); - } else { - list_del(¤t->run_list); - list_add_tail(¤t->run_list, array->queue + current->prio); - } + list_del(¤t->run_list); + list_add_tail(¤t->run_list, array->queue + current->prio); + set_tsk_need_resched(current); /* * Since we are going to call schedule() anyway, there's * no need to preempt: