On Fri, 2009-09-18 at 21:54 +0200, Ingo Molnar wrote:
Really hate this change though,. doesn't seem right to not pick the samediff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 652e8bd..4fad08f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -353,11 +353,25 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *left = cfs_rq->rb_leftmost;
+ struct sched_entity *se, *curr;
if (!left)
return NULL;
- return rb_entry(left, struct sched_entity, run_node);
+ se = rb_entry(left, struct sched_entity, run_node);
+ curr =¤t->se;
+
+ /*
+ * Don't select the entity who just tried to schedule away
+ * if there's another entity available.
+ */
+ if (unlikely(se == curr&& cfs_rq->nr_running> 1)) {
+ struct rb_node *next_node = rb_next(&curr->run_node);
+ if (next_node)
+ se = rb_entry(next_node, struct sched_entity, run_node);
+ }
+
+ return se;
}
task again if its runnable. Bad for cache footprint.
The scenario is quite common for stuff like:
CPU0 CPU1
set_task_state(TASK_INTERRUPTIBLE)
if (cond)
goto out;
<--- ttwu()
schedule();