As has been pointed out before, sched_yield currently doesn't work for
SCHED_RR processes. There was a patch to fix this about two months ago
by Artur Skawina but, as far as I can tell, it was ignored (probably
because it included the controversial SCHED_IDLE support). So I'm
having another go.
Patch against 2.3.19, lightly tested. Please comment.
Regards,
Borislav
--- linux-2.3.19/kernel/sched.c.bak Sat Oct 9 20:39:29 1999
+++ linux-2.3.19/kernel/sched.c Sat Oct 9 22:57:26 1999
@@ -15,6 +15,8 @@
* Copyright (C) 1998 Andrea Arcangeli
* 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
* 1999-03-10 Improved NTP compatibility by Ulrich Windl
+ * 1999-10-09 Simplified SCHED_YIELD handling and made it work for
+ * RR processes by Borislav Deianov
*/
/*
@@ -197,24 +199,6 @@
}
/*
- * subtle. We want to discard a yielded process only if it's being
- * considered for a reschedule. Wakeup-time 'queries' of the scheduling
- * state do not count. Another optimization we do: sched_yield()-ed
- * processes are runnable (and thus will be considered for scheduling)
- * right when they are calling schedule(). So the only place we need
- * to care about SCHED_YIELD is when we calculate the previous process'
- * goodness ...
- */
-static inline int prev_goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
-{
- if (p->policy & SCHED_YIELD) {
- p->policy &= ~SCHED_YIELD;
- return 0;
- }
- return goodness(p, this_cpu, this_mm);
-}
-
-/*
* the 'goodness value' of replacing a process on a given CPU.
* positive value means 'replace', zero or negative means 'dont'.
*/
@@ -612,6 +596,10 @@
}
prev->need_resched = 0;
+ if (prev->policy & SCHED_YIELD)
+ goto move_yielded_last;
+move_yielded_back:
+
/*
* this is the scheduler proper:
*/
@@ -622,8 +610,6 @@
*/
next = idle_task(this_cpu);
c = -1000;
- if (prev->state == TASK_RUNNING)
- goto still_running;
still_running_back:
tmp = runqueue_head.next;
@@ -738,10 +724,11 @@
}
goto repeat_schedule;
-still_running:
- c = prev_goodness(prev, this_cpu, prev->active_mm);
- next = prev;
- goto still_running_back;
+move_yielded_last:
+ if (prev->state == TASK_RUNNING)
+ move_last_runqueue(prev);
+ prev->policy &= ~SCHED_YIELD;
+ goto move_yielded_back;
handle_bh:
do_bottom_half();
@@ -1622,12 +1609,8 @@
asmlinkage long sys_sched_yield(void)
{
- spin_lock_irq(&runqueue_lock);
- if (current->policy == SCHED_OTHER)
- current->policy |= SCHED_YIELD;
+ current->policy |= SCHED_YIELD;
current->need_resched = 1;
- move_last_runqueue(current);
- spin_unlock_irq(&runqueue_lock);
return 0;
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/