[patch] sched: fix scheduling latencies for !PREEMPT kernels

From: Ingo Molnar
Date: Tue Sep 14 2004 - 06:49:08 EST



this patch adds a handful of cond_resched() points to a number of
key, scheduling-latency related non-inlined functions.

this reduces preemption latency for !PREEMPT kernels. These are
scheduling points complementary to PREEMPT_VOLUNTARY scheduling
points (might_sleep() places) - i.e. these are all points where
an explicit cond_resched() had to be added.

has been tested as part of the -VP patchset.

Ingo

this patch adds a handful of cond_resched() points to a number of
key, scheduling-latency related non-inlined functions.

this reduces preemption latency for !PREEMPT kernels. These are
scheduling points complementary to PREEMPT_VOLUNTARY scheduling
points (might_sleep() places) - i.e. these are all points where
an explicit cond_resched() had to be added.

has been tested as part of the -VP patchset.

Signed-off-by: Ingo Molnar <mingo@xxxxxxx>

--- linux/fs/exec.c.orig
+++ linux/fs/exec.c
@@ -184,6 +184,7 @@ static int count(char __user * __user *
argv++;
if(++i > max)
return -E2BIG;
+ cond_resched();
}
}
return i;
--- linux/fs/fs-writeback.c.orig
+++ linux/fs/fs-writeback.c
@@ -366,6 +366,7 @@ sync_sb_inodes(struct super_block *sb, s
list_move(&inode->i_list, &sb->s_dirty);
}
spin_unlock(&inode_lock);
+ cond_resched();
iput(inode);
spin_lock(&inode_lock);
if (wbc->nr_to_write <= 0)
--- linux/fs/select.c.orig
+++ linux/fs/select.c
@@ -239,6 +239,7 @@ int do_select(int n, fd_set_bits *fds, l
retval++;
}
}
+ cond_resched();
}
if (res_in)
*rinp = res_in;
--- linux/kernel/printk.c.orig
+++ linux/kernel/printk.c
@@ -280,6 +280,7 @@ int do_syslog(int type, char __user * bu
error = __put_user(c,buf);
buf++;
i++;
+ cond_resched();
spin_lock_irq(&logbuf_lock);
}
spin_unlock_irq(&logbuf_lock);
@@ -321,6 +322,7 @@ int do_syslog(int type, char __user * bu
c = LOG_BUF(j);
spin_unlock_irq(&logbuf_lock);
error = __put_user(c,&buf[count-1-i]);
+ cond_resched();
spin_lock_irq(&logbuf_lock);
}
spin_unlock_irq(&logbuf_lock);
@@ -336,6 +338,7 @@ int do_syslog(int type, char __user * bu
error = -EFAULT;
break;
}
+ cond_resched();
}
}
break;
--- linux/mm/memory.c.orig
+++ linux/mm/memory.c
@@ -1516,6 +1516,7 @@ do_no_page(struct mm_struct *mm, struct
}
smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */
retry:
+ cond_resched();
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);

/* no page was available -- either SIGBUS or OOM */
--- linux/mm/slab.c.orig
+++ linux/mm/slab.c
@@ -2793,7 +2793,7 @@ static void cache_reap(void *unused)
next_unlock:
spin_unlock_irq(&searchp->spinlock);
next:
- ;
+ cond_resched();
}
check_irq_on();
up(&cache_chain_sem);
--- linux/mm/vmscan.c.orig
+++ linux/mm/vmscan.c
@@ -361,6 +361,8 @@ static int shrink_list(struct list_head
int may_enter_fs;
int referenced;

+ cond_resched();
+
page = lru_to_page(page_list);
list_del(&page->lru);

@@ -710,6 +712,7 @@ refill_inactive_zone(struct zone *zone,
reclaim_mapped = 1;

while (!list_empty(&l_hold)) {
+ cond_resched();
page = lru_to_page(&l_hold);
list_del(&page->lru);
if (page_mapped(page)) {