Re: [RFC v5 1/9] sched/deadline: track the active utilization

From: Mathieu Poirier
Date: Sun Mar 26 2017 - 13:07:15 EST


Hello Luca,

On 23 March 2017 at 21:52, luca abeni <luca.abeni@xxxxxxxxxxxxxxx> wrote:
> From: Luca Abeni <luca.abeni@xxxxxxxx>
>
> Active utilization is defined as the total utilization of active
> (TASK_RUNNING) tasks queued on a runqueue. Hence, it is increased
> when a task wakes up and is decreased when a task blocks.
>
> When a task is migrated from CPUi to CPUj, immediately subtract the
> task's utilization from CPUi and add it to CPUj. This mechanism is
> implemented by modifying the pull and push functions.
> Note: this is not fully correct from the theoretical point of view
> (the utilization should be removed from CPUi only at the 0 lag
> time), a more theoretically sound solution will follow.
>
> Signed-off-by: Juri Lelli <juri.lelli@xxxxxxx>
> Signed-off-by: Luca Abeni <luca.abeni@xxxxxxxx>
> Tested-by: Daniel Bristot de Oliveira <bristot@xxxxxxxxxx>
> ---
> kernel/sched/deadline.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++---
> kernel/sched/sched.h | 6 +++++
> 2 files changed, 64 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index a2ce590..cef9adb 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -43,6 +43,28 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
> return !RB_EMPTY_NODE(&dl_se->rb_node);
> }
>
> +static inline
> +void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
> +{
> + u64 old = dl_rq->running_bw;
> +
> + lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
> + dl_rq->running_bw += dl_bw;
> + SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
> +}
> +
> +static inline
> +void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
> +{
> + u64 old = dl_rq->running_bw;
> +
> + lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
> + dl_rq->running_bw -= dl_bw;
> + SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
> + if (dl_rq->running_bw > old)
> + dl_rq->running_bw = 0;
> +}
> +
> static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
> {
> struct sched_dl_entity *dl_se = &p->dl;
> @@ -946,8 +968,12 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
> * parameters of the task might need updating. Otherwise,
> * we want a replenishment of its runtime.
> */
> - if (flags & ENQUEUE_WAKEUP)
> + if (flags & ENQUEUE_WAKEUP) {
> + struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
> +
> + add_running_bw(dl_se->dl_bw, dl_rq);
> update_dl_entity(dl_se, pi_se);
> + }

What do we do for tasks that go from normal to dl? Unless I'm
mistaking their utilisation won't be accounted for when they are first
enqueued.

> else if (flags & ENQUEUE_REPLENISH)
> replenish_dl_entity(dl_se, pi_se);
>
> @@ -998,14 +1024,25 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
> if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
> dl_check_constrained_dl(&p->dl);
>
> + if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE)
> + add_running_bw(p->dl.dl_bw, &rq->dl);
> +
> /*
> - * If p is throttled, we do nothing. In fact, if it exhausted
> + * If p is throttled, we do not enqueue it. In fact, if it exhausted
> * its budget it needs a replenishment and, since it now is on
> * its rq, the bandwidth timer callback (which clearly has not
> * run yet) will take care of this.
> + * However, the active utilization does not depend on the fact
> + * that the task is on the runqueue or not (but depends on the
> + * task's state - in GRUB parlance, "inactive" vs "active contending").
> + * In other words, even if a task is throttled its utilization must
> + * be counted in the active utilization; hence, we need to call
> + * add_running_bw().
> */
> - if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
> + if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
> + add_running_bw(p->dl.dl_bw, &rq->dl);
> return;
> + }
>
> enqueue_dl_entity(&p->dl, pi_se, flags);
>
> @@ -1023,6 +1060,20 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
> {
> update_curr_dl(rq);
> __dequeue_task_dl(rq, p, flags);
> +
> + if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE)
> + sub_running_bw(p->dl.dl_bw, &rq->dl);
> +
> + /*
> + * This check allows to decrease the active utilization in two cases:
> + * when the task blocks and when it is terminating
> + * (p->state == TASK_DEAD). We can handle the two cases in the same
> + * way, because from GRUB's point of view the same thing is happening
> + * (the task moves from "active contending" to "active non contending"
> + * or "inactive")
> + */
> + if (flags & DEQUEUE_SLEEP)
> + sub_running_bw(p->dl.dl_bw, &rq->dl);
> }
>
> /*
> @@ -1551,7 +1602,9 @@ static int push_dl_task(struct rq *rq)
> }
>
> deactivate_task(rq, next_task, 0);
> + sub_running_bw(next_task->dl.dl_bw, &rq->dl);
> set_task_cpu(next_task, later_rq->cpu);
> + add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
> activate_task(later_rq, next_task, 0);
> ret = 1;
>
> @@ -1639,7 +1692,9 @@ static void pull_dl_task(struct rq *this_rq)
> resched = true;
>
> deactivate_task(src_rq, p, 0);
> + sub_running_bw(p->dl.dl_bw, &src_rq->dl);
> set_task_cpu(p, this_cpu);
> + add_running_bw(p->dl.dl_bw, &this_rq->dl);
> activate_task(this_rq, p, 0);
> dmin = p->dl.deadline;
>
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 57caf36..caaa7d3 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -558,6 +558,12 @@ struct dl_rq {
> #else
> struct dl_bw dl_bw;
> #endif
> + /*
> + * "Active utilization" for this runqueue: increased when a
> + * task wakes up (becomes TASK_RUNNING) and decreased when a
> + * task blocks
> + */
> + u64 running_bw;

Would it be a good idea to initialise and clear this field in
rq_online/offline_dl()?

> };
>
> #ifdef CONFIG_SMP
> --
> 2.7.4
>