Re: broken behavior in cfs when moving threads between cgroups

From: Dima Zavin
Date: Wed Sep 29 2010 - 02:45:23 EST


I solved it by adding a prep_move_task callback and call it jsut
before set_task_rq. I'm sending the patch series in a separate thread,
let me know what you think.

--Dima

2010/9/28 Mike Galbraith <efault@xxxxxx>:
> On Wed, 2010-09-29 at 04:53 +0200, Mike Galbraith wrote:
>
>> ..but, as you noted, moving out then _back_ at forced 0 lag would result
>> in bogus vruntime deltas, so lag must be preserved.  The sleeper's
>> vruntime has to be set to relative before it's cfs_rq is changed, then
>> back to absolute in moved_group_fair() I suppose.
>
> I bent it up like so.
>
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 53eb33c..d2b06a9 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1073,7 +1073,7 @@ struct sched_class {
>                                         struct task_struct *task);
>
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -       void (*moved_group) (struct task_struct *p, int on_rq);
> +       void (*moved_group) (struct task_struct *p, int on_rq, int leaving);
>  #endif
>  };
>
> diff --git a/kernel/sched.c b/kernel/sched.c
> index 1ab8394..7001d5a 100644
> --- a/kernel/sched.c
> +++ b/kernel/sched.c
> @@ -8358,11 +8358,16 @@ void sched_move_task(struct task_struct *tsk)
>        if (unlikely(running))
>                tsk->sched_class->put_prev_task(rq, tsk);
>
> +#ifdef CONFIG_FAIR_GROUP_SCHED
> +       if (tsk->sched_class->moved_group)
> +               tsk->sched_class->moved_group(tsk, on_rq, 1);
> +#endif
> +
>        set_task_rq(tsk, task_cpu(tsk));
>
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>        if (tsk->sched_class->moved_group)
> -               tsk->sched_class->moved_group(tsk, on_rq);
> +               tsk->sched_class->moved_group(tsk, on_rq, 0);
>  #endif
>
>        if (unlikely(running))
> diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
> index 9b5b4f8..81885ae 100644
> --- a/kernel/sched_fair.c
> +++ b/kernel/sched_fair.c
> @@ -3824,13 +3824,23 @@ static void set_curr_task_fair(struct rq *rq)
>  }
>
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -static void moved_group_fair(struct task_struct *p, int on_rq)
> +static void moved_group_fair(struct task_struct *p, int on_rq, int leaving)
>  {
>        struct cfs_rq *cfs_rq = task_cfs_rq(p);
>
>        update_curr(cfs_rq);
> -       if (!on_rq)
> -               place_entity(cfs_rq, &p->se, 1);
> +
> +       /*
> +        * For runnable tasks, vruntime normalization is handled globally by
> +        * dequeue_entity().  task_waking_fair() normalizes sleepers in the
> +        * wakeup path (to allow lag to grow while sleeping), so we have to
> +        * normalize before the task exits it's old cfs_rq, and prepare for
> +        * the impending normalization before that happens.
> +        */
> +       if (!on_rq && leaving)
> +               p->se.vruntime -= cfs_rq->min_vruntime;
> +       else if (!on_rq)
> +               p->se.vruntime += cfs_rq->min_vruntime;
>  }
>  #endif
>
>
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/