[PATCH] lockd: replace semaphore, sleep_on

From: Thomas Gleixner
Date: Thu Oct 21 2004 - 02:44:27 EST



Use wait_event, completion instead of the obsolete sleep_on functions
and the abused semaphore

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Acked-by: Ingo Molnar <mingo@xxxxxxx>
---

2.6.9-bk-041020-thomas/fs/lockd/clntlock.c | 12 +++++++-----
2.6.9-bk-041020-thomas/fs/lockd/svc.c | 23
+++++++++++++----------
2 files changed, 20 insertions(+), 15 deletions(-)

diff -puN fs/lockd/svc.c~lockd fs/lockd/svc.c
--- 2.6.9-bk-041020/fs/lockd/svc.c~lockd 2004-10-20 15:56:52.000000000
+0200
+++ 2.6.9-bk-041020-thomas/fs/lockd/svc.c 2004-10-20 15:56:52.000000000
+0200
@@ -46,7 +46,7 @@ static pid_t nlmsvc_pid;
int nlmsvc_grace_period;
unsigned long nlmsvc_timeout;

-static DECLARE_MUTEX_LOCKED(lockd_start);
+static DECLARE_WAIT_QUEUE_HEAD(lockd_start);
static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);

/*
@@ -109,7 +109,7 @@ lockd(struct svc_rqst *rqstp)
* Let our maker know we're running.
*/
nlmsvc_pid = current->pid;
- up(&lockd_start);
+ wake_up(&lockd_start);

daemonize("lockd");

@@ -258,8 +258,15 @@ lockd_up(void)
"lockd_up: create thread failed, error=%d\n", error);
goto destroy_and_out;
}
- down(&lockd_start);
-
+ /*
+ * Wait for the lockd process to start, but since we're holding
+ * the lockd semaphore, we can't wait around forever ...
+ */
+ if (wait_event_interruptible_timeout(lockd_start,
+ nlmsvc_pid != 0, HZ) <= 0) {
+ printk(KERN_WARNING
+ "lockd_down: lockd failed to start\n");
+ }
/*
* Note: svc_serv structures have an initial use count of 1,
* so we exit through here on both success and failure.
@@ -298,16 +305,12 @@ lockd_down(void)
* Wait for the lockd process to exit, but since we're holding
* the lockd semaphore, we can't wait around forever ...
*/
- clear_thread_flag(TIF_SIGPENDING);
- interruptible_sleep_on_timeout(&lockd_exit, HZ);
- if (nlmsvc_pid) {
+ if (wait_event_interruptible_timeout(lockd_exit,
+ nlmsvc_pid == 0, HZ) <= 0) {
printk(KERN_WARNING
"lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
- spin_lock_irq(&current->sighand->siglock);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
out:
up(&nlmsvc_sema);
}
diff -puN fs/lockd/clntlock.c~lockd fs/lockd/clntlock.c
--- 2.6.9-bk-041020/fs/lockd/clntlock.c~lockd 2004-10-20
15:56:52.000000000 +0200
+++ 2.6.9-bk-041020-thomas/fs/lockd/clntlock.c 2004-10-20
15:56:52.000000000 +0200
@@ -6,6 +6,7 @@
* Copyright (C) 1996, Olaf Kirch <okir@xxxxxxxxxxxx>
*/

+#include <linux/completion.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/time.h>
@@ -32,7 +33,7 @@ static int reclaimer(void *ptr);
*/
struct nlm_wait {
struct nlm_wait * b_next; /* linked list */
- wait_queue_head_t b_wait; /* where to wait on */
+ struct completion b_wait; /* where to wait on */
struct nlm_host * b_host;
struct file_lock * b_lock; /* local file lock */
unsigned short b_reclaim; /* got to reclaim lock */
@@ -53,7 +54,7 @@ nlmclnt_block(struct nlm_host *host, str

block.b_host = host;
block.b_lock = fl;
- init_waitqueue_head(&block.b_wait);
+ init_completion(&block.b_wait);
block.b_status = NLM_LCK_BLOCKED;
block.b_next = nlm_blocked;
nlm_blocked = &block;
@@ -69,7 +70,8 @@ nlmclnt_block(struct nlm_host *host, str
* a 1 minute timeout would do. See the comment before
* nlmclnt_lock for an explanation.
*/
- sleep_on_timeout(&block.b_wait, 30*HZ);
+
+ wait_for_completion_timeout(&block.b_wait, 30*HZ);

for (head = &nlm_blocked; *head; head = &(*head)->b_next) {
if (*head == &block) {
@@ -118,7 +120,7 @@ nlmclnt_grant(struct nlm_lock *lock)
* wake up the caller.
*/
block->b_status = NLM_LCK_GRANTED;
- wake_up(&block->b_wait);
+ complete(&block->b_wait);

return nlm_granted;
}
@@ -233,7 +235,7 @@ restart:
for (block = nlm_blocked; block; block = block->b_next) {
if (block->b_host == host) {
block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;
- wake_up(&block->b_wait);
+ complete(&block->b_wait);
}
}

_


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/