Patch to speed recalculate loop in schedule.

Ralph Loader (suckfish@ihug.co.nz)
Wed, 12 May 1999 22:26:10 +1200 (NZST)


Hi,

The following patch (against 2.2.8 / 2.3.0) replaces the loop over all
tasks in schedule with one over the run queue. It makes up for this by
maintaining task_struct.counter when a process is placed on, or taken off,
the run queue.

I have no idea whether the shorter loop is worth the cost...

Ralph.

diff -ur orig/linux/include/linux/sched.h linux/include/linux/sched.h
--- orig/linux/include/linux/sched.h Wed May 12 21:59:53 1999
+++ linux/include/linux/sched.h Wed May 12 21:50:52 1999
@@ -236,6 +236,7 @@
int processor;
int last_processor;
int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
+ int recalc_generation;
struct task_struct *next_task, *prev_task;
struct task_struct *next_run, *prev_run;

@@ -348,7 +349,7 @@
#define INIT_TASK \
/* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \
/* counter */ DEF_PRIORITY,DEF_PRIORITY,0, \
-/* SMP */ 0,0,0,-1, \
+/* SMP */ 0,0,0,-1,0, \
/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
/* binfmt */ NULL, \
/* ec,brk... */ 0,0,0,0,0,0, \
Only in linux/include/linux: sched.h~
diff -ur orig/linux/kernel/sched.c linux/kernel/sched.c
--- orig/linux/kernel/sched.c Wed May 12 21:59:53 1999
+++ linux/kernel/sched.c Wed May 12 21:55:38 1999
@@ -90,6 +90,12 @@
unsigned long volatile jiffies=0;

/*
+ * Count how many times we've recalculated task_struct.counter.
+ */
+
+static int recalc_generation = 0;
+
+/*
* Init task must be ok at boot for the ix86 as we will check its signals
* via the SMP irq return path.
*/
@@ -367,12 +373,21 @@
static inline void add_to_runqueue(struct task_struct * p)
{
struct task_struct *next = init_task.next_run;
+ unsigned int shift;

p->prev_run = &init_task;
init_task.next_run = p;
p->next_run = next;
next->prev_run = p;
nr_running++;
+
+ /* Now bring p->counter up to date. */
+ shift = recalc_generation - p->recalc_generation;
+ if (shift > 31)
+ shift = 31;
+
+ p->counter = (p->counter >> shift)
+ + p->priority - (p->priority >> shift);
}

static inline void del_from_runqueue(struct task_struct * p)
@@ -385,6 +400,12 @@
prev->next_run = next;
p->next_run = NULL;
p->prev_run = NULL;
+
+ /* A task on the runqueue has p->counter up to date with the current
+ global recalc_generation. When we remove it from the run-queue,
+ remember that. */
+
+ p->recalc_generation = recalc_generation;
}

static inline void move_last_runqueue(struct task_struct * p)
@@ -831,12 +852,9 @@
recalculate:
{
struct task_struct *p;
- spin_unlock_irq(&runqueue_lock);
- read_lock(&tasklist_lock);
- for_each_task(p)
+ recalc_generation++;
+ for (p = init_task.next_run; p != &init_task; p = p->next_run)
p->counter = (p->counter >> 1) + p->priority;
- read_unlock(&tasklist_lock);
- spin_lock_irq(&runqueue_lock);
goto repeat_schedule;
}

Only in linux/kernel: sched.c~

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/