Re: [PATCH] locking/percpu-rwsem: Remove WQ_FLAG_EXCLUSIVE flags

From: peterz
Date: Fri Aug 21 2020 - 08:16:02 EST


On Fri, Aug 21, 2020 at 12:13:44PM +0100, Will Deacon wrote:
> On Wed, Jul 01, 2020 at 01:57:20PM +0800, qiang.zhang@xxxxxxxxxxxxx wrote:
> > From: Zqiang <qiang.zhang@xxxxxxxxxxxxx>
> >
> > Remove WQ_FLAG_EXCLUSIVE from "wq_entry.flags", using function
> > __add_wait_queue_entry_tail_exclusive substitution.
> >
> > Signed-off-by: Zqiang <qiang.zhang@xxxxxxxxxxxxx>
> > ---
> > kernel/locking/percpu-rwsem.c | 4 ++--
> > 1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
> > index 8bbafe3e5203..48e1c55c2e59 100644
> > --- a/kernel/locking/percpu-rwsem.c
> > +++ b/kernel/locking/percpu-rwsem.c
> > @@ -148,8 +148,8 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
> > */
> > wait = !__percpu_rwsem_trylock(sem, reader);
> > if (wait) {
> > - wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
> > - __add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
> > + wq_entry.flags |= reader * WQ_FLAG_CUSTOM;
> > + __add_wait_queue_entry_tail_exclusive(&sem->waiters, &wq_entry);
> > }
> > spin_unlock_irq(&sem->waiters.lock);
>
> Seems straightforward enough:

Yeah, but I wonder why. Qiang, what made you write this patch?

afaict, there is only a single __add_wait_queue_entry_tail_exclusive()
user in the entire tree (two after this patch). I'm thinking it would be
much better to kill of that one user and remove the entire function.

something like the completely untested thing below, please double check.

---
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 538e839590ef..b24e62e30822 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -80,17 +80,10 @@ static void put(struct slot_map *m, int slot)

static int wait_for_free(struct slot_map *m)
{
- long left = slot_timeout_secs * HZ;
- DEFINE_WAIT(wait);
+ long ret, left = slot_timeout_secs * HZ;

do {
- long n = left, t;
- if (likely(list_empty(&wait.entry)))
- __add_wait_queue_entry_tail_exclusive(&m->q, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (m->c > 0)
- break;
+ long n = left;

if (m->c < 0) {
/* we are waiting for map to be installed */
@@ -98,27 +91,31 @@ static int wait_for_free(struct slot_map *m)
if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ)
n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ;
}
- spin_unlock(&m->q.lock);
- t = schedule_timeout(n);
- spin_lock(&m->q.lock);
- if (unlikely(!t) && n != left && m->c < 0)
- left = t;
- else
- left = t + (left - n);
- if (signal_pending(current))
- left = -EINTR;
- } while (left > 0);

- if (!list_empty(&wait.entry))
- list_del(&wait.entry);
- else if (left <= 0 && waitqueue_active(&m->q))
- __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
- __set_current_state(TASK_RUNNING);
+ ret = ___wait_event(m->q, m->c > 0, TASK_INTERRUPTIBLE, 1, n,
+
+ spin_unlock(&m->lock);
+ __ret = schedule_timeout(__ret);
+ spin_lock(&m->lock);
+
+ );

- if (likely(left > 0))
+ if (ret) /* @cond := true || -ERESTARTSYS */
+ break;
+
+ left -= n;
+ } while (left > 0);
+
+ if (!ret)
return 0;

- return left < 0 ? -EINTR : -ETIMEDOUT;
+ if (ret < 0) {
+ if (waitqueue_active(&w->q))
+ __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
+ return -EINTR;
+ }
+
+ return -ETIMEDOUT;
}

static int get(struct slot_map *m)
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 898c890fc153..841ef9ef15d9 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -185,13 +185,6 @@ static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head,
list_add_tail(&wq_entry->entry, &wq_head->head);
}

-static inline void
-__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
-{
- wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_entry_tail(wq_head, wq_entry);
-}
-
static inline void
__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{