Slackware 3.0 and 2.0

Christoph Lameter (clameter@miriam.fuller.edu)
21 Jun 1996 21:10:31 -0700


Here is a patch to avoid upgrading Slackwares libc (and a bunch of other stuff consequently ...) and
have lpd and sendmail work. This patch undoes the locking changes in the kernels after 1.3.9x.

I have a mission critical system that needs a stable but featrue rich kernel. This patch allowed me a
fast upgrade without changing too much.

diff -urN /miriam/usr/src/linux/fs/locks.c linux/fs/locks.c
--- /miriam/usr/src/linux/fs/locks.c Sat Jun 8 06:50:57 1996
+++ linux/fs/locks.c Fri Jun 21 05:45:22 1996
@@ -51,46 +51,20 @@
*
* Removed some race conditions in flock_lock_file(), marked other possible
* races. Just grep for FIXME to see them.
- * Dmitry Gorodchanin (begemot@bgm.rosprint.net), February 09, 1996.
+ * Dmitry Gorodchanin (begemot@bgm.rosprint.net), Feb 09, 1996.
*
* Addressed Dmitry's concerns. Deadlock checking no longer recursive.
* Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
* once we've checked for blocking and deadlocking.
- * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
+ * Andy Walker (andy@lysaker.kvaerner.no), Apr 03, 1996.
*
- * Initial implementation of mandatory locks. SunOS turned out to be
- * a rotten model, so I implemented the "obvious" semantics.
- * See 'linux/Documentation/mandatory.txt' for details.
- * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
- *
- * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
- * check if a file has mandatory locks, used by mmap(), open() and creat() to
- * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
- * Manual, Section 2.
- * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
- *
- * Tidied up block list handling. Added '/proc/locks' interface.
- * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
- *
- * Fixed deadlock condition for pathological code that mixes calls to
- * flock() and fcntl().
- * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
- *
- * Allow only one type of locking scheme (F_POSIX or F_FLOCK) to be in use
- * for a given file at a time. Changed the CONFIG_MANDATORY_OPTION scheme to
- * guarantee sensible behaviour in the case where file system modules might
- * be compiled with different options than the kernel itself.
- * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
- *
- * Added a couple of missing wake_up() calls.
- * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
- *
- * TODO: Do not honour mandatory locks on remote file systems. This matches
- * the SVR4 semantics and neatly sidesteps a pile of awkward issues that
- * would otherwise have to be addressed.
+ * NOTE:
+ * Starting to look at mandatory locks - using SunOS as a model.
+ * Probably a configuration option because mandatory locking can cause
+ * all sorts of chaos with runaway processes.
*/

-#include <linux/config.h>
+#include <asm/segment.h>

#include <linux/malloc.h>
#include <linux/sched.h>
@@ -99,7 +73,6 @@
#include <linux/stat.h>
#include <linux/fcntl.h>

-#include <asm/segment.h>

#define OFFSET_MAX ((off_t)0x7fffffff) /* FIXME: move elsewhere? */

@@ -119,21 +92,18 @@
static int posix_locks_deadlock(struct task_struct *my_task,
struct task_struct *blocked_task);
static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2);
-static void posix_remove_locks(struct file_lock **before, struct task_struct *task);
-static void flock_remove_locks(struct file_lock **before, struct file *filp);

static struct file_lock *locks_alloc_lock(struct file_lock *fl);
static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
static void locks_delete_lock(struct file_lock **fl, unsigned int wait);
-static char *lock_get_status(struct file_lock *fl, char *p, int id, char *pfx);

static struct file_lock *file_lock_table = NULL;

/* Free lock not inserted in any queue */
-static inline void locks_free_lock(struct file_lock *fl)
+static inline void locks_free_lock(struct file_lock **fl)
{
- kfree(fl);
- return;
+ kfree(*fl);
+ *fl = NULL; /* Just in case */
}

/* Add lock fl to the blocked list pointed to by block.
@@ -150,33 +120,34 @@
* so they are inlined now. -- Dmitry Gorodchanin 02/09/96.
*/

-static inline void locks_insert_block(struct file_lock *bfl,
+static inline void locks_insert_block(struct file_lock **block,
struct file_lock *fl)
{
- while (bfl->fl_block != NULL) {
- bfl = bfl->fl_block;
+ struct file_lock *bfl;
+
+ while ((bfl = *block) != NULL) {
+ block = &bfl->fl_block;
}

- bfl->fl_block = fl;
+ *block = fl;
fl->fl_block = NULL;

return;
}

-static inline void locks_delete_block(struct file_lock *bfl,
+static inline void locks_delete_block(struct file_lock **block,
struct file_lock *fl)
{
- struct file_lock *tfl;
+ struct file_lock *bfl;

- while ((tfl = bfl->fl_block) != NULL) {
- if (tfl == fl) {
- bfl->fl_block = fl->fl_block;
+ while ((bfl = *block) != NULL) {
+ if (bfl == fl) {
+ *block = fl->fl_block;
fl->fl_block = NULL;
return;
}
- bfl = tfl;
+ block = &bfl->fl_block;
}
- return;
}

/* flock() system call entry point. Apply a FLOCK style lock to
@@ -220,22 +191,19 @@
(flock.l_type == F_SHLCK))
return (-EINVAL);

- if (!filp->f_inode || !posix_make_lock(filp, &file_lock, &flock))
+ if (!posix_make_lock(filp, &file_lock, &flock))
return (-EINVAL);

- if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & F_POSIX)) {
- while (fl != NULL) {
- if (posix_locks_conflict(&file_lock, fl)) {
- flock.l_pid = fl->fl_owner->pid;
- flock.l_start = fl->fl_start;
- flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
- fl->fl_end - fl->fl_start + 1;
- flock.l_whence = 0;
- flock.l_type = fl->fl_type;
- memcpy_tofs(l, &flock, sizeof(flock));
- return (0);
- }
- fl = fl->fl_next;
+ for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
+ if (posix_locks_conflict(&file_lock, fl)) {
+ flock.l_pid = fl->fl_owner->pid;
+ flock.l_start = fl->fl_start;
+ flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
+ fl->fl_end - fl->fl_start + 1;
+ flock.l_whence = 0;
+ flock.l_type = fl->fl_type;
+ memcpy_tofs(l, &flock, sizeof(flock));
+ return (0);
}
}

@@ -255,7 +223,6 @@
struct file *filp;
struct file_lock file_lock;
struct flock flock;
- struct inode *inode;

/*
* Get arguments and validate them ...
@@ -268,23 +235,6 @@
if (error)
return (error);

- if (!(inode = filp->f_inode))
- return (-EINVAL);
-
-#ifdef CONFIG_LOCK_MANDATORY
- /* Don't allow mandatory locks on files that may be memory mapped
- * and shared.
- */
- if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && inode->i_mmap) {
- struct vm_area_struct *vma = inode->i_mmap;
- do {
- if (vma->vm_flags & VM_MAYSHARE)
- return (-EAGAIN);
- vma = vma->vm_next_share;
- } while (vma != inode->i_mmap);
- }
-#endif
-
memcpy_fromfs(&flock, l, sizeof(flock));
if (!posix_make_lock(filp, &file_lock, &flock))
return (-EINVAL);
@@ -292,28 +242,16 @@
switch (flock.l_type) {
case F_RDLCK :
if (!(filp->f_mode & 1))
- return (-EBADF);
+ return -EBADF;
break;
case F_WRLCK :
if (!(filp->f_mode & 2))
- return (-EBADF);
+ return -EBADF;
break;
case F_SHLCK :
case F_EXLCK :
-#if 1
-/* warn a bit for now, but don't overdo it */
-{
- static int count = 0;
- if (!count) {
- count=1;
- printk(KERN_WARNING
- "fcntl_setlk() called by process %d (%s) with broken flock() emulation\n",
- current->pid, current->comm);
- }
-}
-#endif
if (!(filp->f_mode & 3))
- return (-EBADF);
+ return -EBADF;
break;
case F_UNLCK :
break;
@@ -327,41 +265,17 @@
void locks_remove_locks(struct task_struct *task, struct file *filp)
{
struct file_lock *fl;
+ struct file_lock **before;

/* For POSIX locks we free all locks on this file for the given task.
* For FLOCK we only free locks on this *open* file if it is the last
* close on that file.
*/
- if ((fl = filp->f_inode->i_flock) != NULL) {
- if (fl->fl_flags & F_POSIX)
- posix_remove_locks(&filp->f_inode->i_flock, task);
- else
- flock_remove_locks(&filp->f_inode->i_flock, filp);
- }
-
- return;
-}
-
-static void posix_remove_locks(struct file_lock **before, struct task_struct *task)
-{
- struct file_lock *fl;
-
- while ((fl = *before) != NULL) {
- if (fl->fl_owner == task)
- locks_delete_lock(before, 0);
- else
- before = &fl->fl_next;
- }
-
- return;
-}
-
-static void flock_remove_locks(struct file_lock **before, struct file *filp)
-{
- struct file_lock *fl;
-
+ before = &filp->f_inode->i_flock;
while ((fl = *before) != NULL) {
- if ((fl->fl_file == filp) && (filp->f_count == 1))
+ if (((fl->fl_flags == F_POSIX) && (fl->fl_owner == task)) ||
+ ((fl->fl_flags == F_FLOCK) && (fl->fl_file == filp) &&
+ (filp->f_count == 1)))
locks_delete_lock(before, 0);
else
before = &fl->fl_next;
@@ -370,103 +284,6 @@
return;
}

-int locks_verify_locked(struct inode *inode)
-{
-#ifdef CONFIG_LOCK_MANDATORY
- /* Candidates for mandatory locking have the setgid bit set
- * but no group execute bit - an otherwise meaningless combination.
- */
- if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
- return (locks_mandatory_locked(inode));
-#endif
- return (0);
-}
-
-int locks_verify_area(int read_write, struct inode *inode, struct file *filp,
- unsigned int offset, unsigned int count)
-{
-#ifdef CONFIG_LOCK_MANDATORY
- /* Candidates for mandatory locking have the setgid bit set
- * but no group execute bit - an otherwise meaningless combination.
- */
- if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
- return (locks_mandatory_area(read_write, inode, filp, offset,
- count));
-#endif
- return (0);
-}
-
-int locks_mandatory_locked(struct inode *inode)
-{
-#ifdef CONFIG_LOCK_MANDATORY
- struct file_lock *fl;
-
- /* Search the lock list for this inode for any POSIX locks.
- */
- if ((fl = inode->i_flock) && (fl->fl_flags & F_FLOCK))
- return (0);
-
- while (fl != NULL) {
- if (fl->fl_owner != current)
- return (-EAGAIN);
- fl = fl->fl_next;
- }
-#endif
- return (0);
-}
-
-int locks_mandatory_area(int read_write, struct inode *inode,
- struct file *filp, unsigned int offset,
- unsigned int count)
-{
-#ifdef CONFIG_LOCK_MANDATORY
- struct file_lock *fl;
-
-repeat:
- /* Check that there are locks, and that they're not F_FLOCK locks.
- */
- if ((fl = inode->i_flock) && (fl->fl_flags & F_FLOCK))
- return (0);
-
- /*
- * Search the lock list for this inode for locks that conflict with
- * the proposed read/write.
- */
- while (fl != NULL) {
- if (fl->fl_owner == current ||
- fl->fl_end < offset || fl->fl_start >= offset + count)
- goto next_lock;
-
- /*
- * Block for writes against a "read" lock,
- * and both reads and writes against a "write" lock.
- */
- if ((read_write == FLOCK_VERIFY_WRITE) ||
- (fl->fl_type == F_WRLCK)) {
- if (filp && (filp->f_flags & O_NONBLOCK))
- return (-EAGAIN);
- if (current->signal & ~current->blocked)
- return (-ERESTARTSYS);
- if (posix_locks_deadlock(current, fl->fl_owner))
- return (-EDEADLOCK);
- interruptible_sleep_on(&fl->fl_wait);
- if (current->signal & ~current->blocked)
- return (-ERESTARTSYS);
- /*
- * If we've been sleeping someone might have
- * changed the permissions behind our back.
- */
- if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID)
- break;
- goto repeat;
- }
- next_lock:
- fl = fl->fl_next;
- }
-#endif
- return (0);
-}
-
/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
* style lock.
*/
@@ -475,7 +292,8 @@
{
off_t start;

- fl->fl_flags = F_POSIX;
+ if (!filp->f_inode) /* just in case */
+ return (0);

switch (l->l_type) {
case F_RDLCK :
@@ -485,11 +303,9 @@
break;
case F_SHLCK :
fl->fl_type = F_RDLCK;
- fl->fl_flags |= F_BROKEN;
break;
case F_EXLCK :
fl->fl_type = F_WRLCK;
- fl->fl_flags |= F_BROKEN;
break;
default :
return (0);
@@ -515,6 +331,7 @@
if ((l->l_len == 0) || ((fl->fl_end = start + l->l_len - 1) < 0))
fl->fl_end = OFFSET_MAX;

+ fl->fl_flags = F_POSIX;
fl->fl_file = filp;
fl->fl_owner = current;
fl->fl_wait = NULL; /* just for cleanliness */
@@ -549,7 +366,7 @@
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
fl->fl_file = filp;
- fl->fl_owner = NULL;
+ fl->fl_owner = current;
fl->fl_wait = NULL; /* just for cleanliness */

return (1);
@@ -563,7 +380,8 @@
/* POSIX locks owned by the same process do not conflict with
* each other.
*/
- if (caller_fl->fl_owner == sys_fl->fl_owner)
+ if ((sys_fl->fl_flags == F_POSIX) &&
+ (caller_fl->fl_owner == sys_fl->fl_owner))
return (0);

return (locks_conflict(caller_fl, sys_fl));
@@ -577,7 +395,8 @@
/* FLOCK locks referring to the same filp do not conflict with
* each other.
*/
- if (caller_fl->fl_file == sys_fl->fl_file)
+ if ((sys_fl->fl_flags == F_FLOCK) &&
+ (caller_fl->fl_file == sys_fl->fl_file))
return (0);

return (locks_conflict(caller_fl, sys_fl));
@@ -631,8 +450,6 @@
struct file_lock *fl;

next_task:
- if (my_task == blocked_task)
- return (1);
for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
if (fl->fl_owner == NULL || fl->fl_wait == NULL)
continue;
@@ -640,7 +457,7 @@
do {
if (dlock_wait->task == blocked_task) {
if (fl->fl_owner == my_task) {
- return (1);
+ return(-EDEADLOCK);
}
blocked_task = fl->fl_owner;
goto next_task;
@@ -651,8 +468,9 @@
return (0);
}

-/* Try to create a FLOCK lock on filp. We always insert new locks at
- * the head of the list.
+/* Try to create a FLOCK lock on filp. We rely on FLOCK locks being sorted
+ * first in an inode's lock list, and always insert new locks at the head
+ * of the list.
*/
static int flock_lock_file(struct file *filp, struct file_lock *caller,
unsigned int wait)
@@ -662,12 +480,11 @@
struct file_lock **before;
int change = 0;

+ /* This a compact little algorithm based on us always placing FLOCK
+ * locks at the front of the list.
+ */
before = &filp->f_inode->i_flock;
-
- if ((fl = *before) && (fl->fl_flags & F_POSIX))
- return (-EBUSY);
-
- while ((fl = *before) != NULL) {
+ while ((fl = *before) && (fl->fl_flags == F_FLOCK)) {
if (caller->fl_file == fl->fl_file) {
if (caller->fl_type == fl->fl_type)
return (0);
@@ -685,44 +502,41 @@
return (0);
if ((new_fl = locks_alloc_lock(caller)) == NULL)
return (-ENOLCK);
-repeat:
- if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & F_POSIX)) {
- locks_free_lock(new_fl);
- return (-EBUSY);
- }
-
- while (fl != NULL) {
- if (flock_locks_conflict(new_fl, fl)) {
- if (!wait) {
- locks_free_lock(new_fl);
- return (-EAGAIN);
- }
+ repeat:
+ for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
+ if (!flock_locks_conflict(new_fl, fl))
+ continue;
+
+ if (wait) {
if (current->signal & ~current->blocked) {
/* Note: new_fl is not in any queue at this
- * point, so we must use locks_free_lock()
+ * point. So we must use locks_free_lock()
* instead of locks_delete_lock()
* Dmitry Gorodchanin 09/02/96.
*/
- locks_free_lock(new_fl);
+ locks_free_lock(&new_fl);
return (-ERESTARTSYS);
}
- locks_insert_block(fl, new_fl);
+ locks_insert_block(&fl->fl_block, new_fl);
interruptible_sleep_on(&new_fl->fl_wait);
wake_up(&new_fl->fl_wait);
if (current->signal & ~current->blocked) {
- /* If we are here, than we were awakened
- * by a signal, so new_fl is still in the
- * block queue of fl. We need to remove
- * new_fl and then free it.
+ /* If we are here, than we were awaken
+ * by signal, so new_fl is still in
+ * block queue of fl. We need remove
+ * new_fl and then free it.
* Dmitry Gorodchanin 09/02/96.
*/
- locks_delete_block(fl, new_fl);
- locks_free_lock(new_fl);
+
+ locks_delete_block(&fl->fl_block, new_fl);
+ locks_free_lock(&new_fl);
return (-ERESTARTSYS);
}
goto repeat;
}
- fl = fl->fl_next;
+
+ locks_free_lock(&new_fl);
+ return (-EAGAIN);
}
locks_insert_lock(&filp->f_inode->i_flock, new_fl);
return (0);
@@ -750,25 +564,23 @@
struct file_lock **before;
int added = 0;

-repeat:
- if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & F_FLOCK))
- return (-EBUSY);
-
if (caller->fl_type != F_UNLCK) {
- while (fl != NULL) {
- if (posix_locks_conflict(caller, fl)) {
- if (!wait)
- return (-EAGAIN);
+repeat:
+ for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
+ if (!posix_locks_conflict(caller, fl))
+ continue;
+ if (wait) {
if (current->signal & ~current->blocked)
return (-ERESTARTSYS);
- if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
- return (-EDEADLOCK);
+ if (fl->fl_flags == F_POSIX)
+ if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
+ return (-EDEADLOCK);
interruptible_sleep_on(&fl->fl_wait);
if (current->signal & ~current->blocked)
return (-ERESTARTSYS);
goto repeat;
}
- fl = fl->fl_next;
+ return (-EAGAIN);
}
}
/*
@@ -779,7 +591,8 @@

/* First skip FLOCK locks and locks owned by other processes.
*/
- while ((fl = *before) && (caller->fl_owner != fl->fl_owner)) {
+ while ((fl = *before) && ((fl->fl_flags == F_FLOCK) ||
+ (caller->fl_owner != fl->fl_owner))) {
before = &fl->fl_next;
}

@@ -861,7 +674,7 @@
/* Go on to next lock.
*/
next_lock:
- before = &fl->fl_next;
+ before = &(*before)->fl_next;
}

if (!added) {
@@ -870,6 +683,7 @@
if ((new_fl = locks_alloc_lock(caller)) == NULL)
return (-ENOLCK);
locks_insert_lock(before, new_fl);
+
}
if (right) {
if (left == right) {
@@ -885,12 +699,9 @@
locks_insert_lock(before, left);
}
right->fl_start = caller->fl_end + 1;
- wake_up(&right->fl_wait);
}
- if (left) {
+ if (left)
left->fl_end = caller->fl_start - 1;
- wake_up(&left->fl_wait);
- }
return (0);
}

@@ -952,28 +763,26 @@
static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait)
{
struct file_lock *fl;
- struct file_lock *pfl;
- struct file_lock *nfl;
+ struct file_lock *bfl;

fl = *fl_p;
- *fl_p = fl->fl_next;
- pfl = fl->fl_prevlink;
- nfl = fl->fl_nextlink;
-
- if (nfl != NULL)
- nfl->fl_prevlink = pfl;
-
- if (pfl != NULL)
- pfl->fl_nextlink = nfl;
- else
- file_lock_table = nfl;
-
- while ((nfl = fl->fl_block) != NULL) {
- fl->fl_block = nfl->fl_block;
- nfl->fl_block = NULL;
- wake_up(&nfl->fl_wait);
+ *fl_p = (*fl_p)->fl_next;
+
+ if (fl->fl_nextlink != NULL)
+ fl->fl_nextlink->fl_prevlink = fl->fl_prevlink;
+
+ if (fl->fl_prevlink != NULL)
+ fl->fl_prevlink->fl_nextlink = fl->fl_nextlink;
+ else {
+ file_lock_table = fl->fl_nextlink;
+ }
+
+ while ((bfl = fl->fl_block) != NULL) {
+ fl->fl_block = bfl->fl_block;
+ bfl->fl_block = NULL;
+ wake_up(&bfl->fl_wait);
if (wait)
- sleep_on(&nfl->fl_wait);
+ sleep_on(&bfl->fl_wait);
}

wake_up(&fl->fl_wait);
@@ -982,7 +791,6 @@
return;
}

-
static char *lock_get_status(struct file_lock *fl, char *p, int id, char *pfx)
{
struct wait_queue *wt;
@@ -1036,4 +844,3 @@
}
return (p - buf);
}
-