Re: Patch to speed recalculate loop in schedule

Dimitris Michailidis (dimitris@darkside.engr.sgi.com)
Thu, 13 May 1999 15:57:01 -0700


Ralph Loader wrote:

> The following patch (against 2.2.8 / 2.3.0) replaces the loop over all
> tasks in schedule with one over the run queue. It makes up for this by
> maintaining task_struct.counter when a process is placed on, or taken off,
> the run queue.

This patch doesn't compute p->counter correctly. The patch
below fixes this and also implements the calculation in a way that
doesn't impact del_from_runqueue. This should benefit highly interactive
threads that leave the run queue frequently.

----
Dimitris Michailidis dimitris@engr.sgi.com

--- linux/include/linux/sched.h.old Thu May 13 14:19:28 1999
+++ linux/include/linux/sched.h Thu May 13 14:21:06 1999
@@ -236,6 +236,7 @@
int processor;
int last_processor;
int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
+ int recalc_generation;
struct task_struct *next_task, *prev_task;
struct task_struct *next_run, *prev_run;

@@ -348,7 +349,7 @@
#define INIT_TASK \
/* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \
/* counter */ DEF_PRIORITY,DEF_PRIORITY,0, \
-/* SMP */ 0,0,0,-1, \
+/* SMP */ 0,0,0,-1,0, \
/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
/* binfmt */ NULL, \
/* ec,brk... */ 0,0,0,0,0,0, \
--- linux/kernel/sched.c.old Thu May 13 13:37:18 1999
+++ linux/kernel/sched.c Thu May 13 14:19:13 1999
@@ -90,6 +90,12 @@
unsigned long volatile jiffies=0;

/*
+ * Count how many times we've recalculated task_struct.counter.
+ */
+
+static int recalc_generation = 0;
+
+/*
* Init task must be ok at boot for the ix86 as we will check its signals
* via the SMP irq return path.
*/
@@ -367,12 +373,29 @@
static inline void add_to_runqueue(struct task_struct * p)
{
struct task_struct *next = init_task.next_run;
+ int shift;

p->prev_run = &init_task;
init_task.next_run = p;
p->next_run = next;
next->prev_run = p;
nr_running++;
+
+ /* Apply any counter recalculations that occured while we were not
+ * on the run queue. If too many such recalculations have taken
+ * place we fast path the calculation by setting p->counter to its
+ * max for this process. Here, 'too many' is 6, the base 2 log of
+ * the max priority.
+ */
+ shift = recalc_generation - p->recalc_generation;
+ if (shift) {
+ if (shift > 5)
+ p->counter = 2 * p->priority - 1;
+ else
+ while (shift--)
+ p->counter = (p->counter >> 1) + p->priority;
+ p->recalc_generation = recalc_generation;
+ }
}

static inline void del_from_runqueue(struct task_struct * p)
@@ -738,11 +761,12 @@
*/

p = init_task.next_run;
+ if (prev->state == TASK_RUNNING)
+ goto still_running;
+
/* Default process to select.. */
next = idle_task(this_cpu);
c = -1000;
- if (prev->state == TASK_RUNNING)
- goto still_running;
still_running_back:

/*
@@ -829,16 +853,12 @@
return;

recalculate:
- {
- struct task_struct *p;
- spin_unlock_irq(&runqueue_lock);
- read_lock(&tasklist_lock);
- for_each_task(p)
- p->counter = (p->counter >> 1) + p->priority;
- read_unlock(&tasklist_lock);
- spin_lock_irq(&runqueue_lock);
- goto repeat_schedule;
+ recalc_generation++;
+ for (p = init_task.next_run; p != &init_task; p = p->next_run) {
+ p->counter = (p->counter >> 1) + p->priority;
+ p->recalc_generation = recalc_generation;
}
+ goto repeat_schedule;

still_running:
c = prev_goodness(prev, prev, this_cpu);

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/