Re: [PATCH 5/5] fs/locks: create a tree of dependent requests.

From: J. Bruce Fields
Date: Thu Aug 09 2018 - 10:13:44 EST


On Thu, Aug 09, 2018 at 12:04:41PM +1000, NeilBrown wrote:
> When we find an existing lock which conflicts with a request,
> and the request wants to wait, we currently add the request
> to a list. When the lock is removed, the whole list is woken.
> This can cause the thundering-herd problem.
> To reduce the problem, we make use of the (new) fact that
> a pending request can itself have a list of blocked requests.
> When we find a conflict, we look through the existing blocked requests.
> If any one of them blocks the new request, the new request is attached
> below that request.
> This way, when the lock is released, only a set of non-conflicting
> locks will be woken. The rest of the herd can stay asleep.

That that's not true any more--some of the locks you wake may conflict
with each other. Is that right? Which is fine (the possibility of
thundering herds in weird overlapping-range cases probably isn't a big
deal). I just want to make sure I understand....

I think you could simplify the code a lot by maintaining the tree so
that it always satisfies the condition that waiters are always strictly
"weaker" than their descendents, so that finding a conflict with a
waiter is always enough to know that the descendents also conflict.

So, when you put a waiter to sleep, you don't add it below a child
unless it's "stronger" than the child.

You give up the property that siblings don't conflict, but again that
just means thundering herds in weird cases, which is OK.

--b.

>
> Reported-and-tested-by: Martin Wilck <mwilck@xxxxxxx>
> Signed-off-by: NeilBrown <neilb@xxxxxxxx>
> ---
> fs/locks.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 63 insertions(+), 6 deletions(-)
>
> diff --git a/fs/locks.c b/fs/locks.c
> index fc64016d01ee..17843feb6f5b 100644
> --- a/fs/locks.c
> +++ b/fs/locks.c
> @@ -738,6 +738,39 @@ static void locks_delete_block(struct file_lock *waiter)
> spin_unlock(&blocked_lock_lock);
> }
>
> +static void wake_non_conflicts(struct file_lock *waiter, struct file_lock *blocker,
> + enum conflict conflict(struct file_lock *,
> + struct file_lock *))
> +{
> + struct file_lock *parent = waiter;
> + struct file_lock *fl;
> + struct file_lock *t;
> +
> + fl = list_entry(&parent->fl_blocked, struct file_lock, fl_block);
> +restart:
> + list_for_each_entry_safe_continue(fl, t, &parent->fl_blocked, fl_block) {
> + switch (conflict(fl, blocker)) {
> + default:
> + case FL_NO_CONFLICT:
> + __locks_wake_one(fl);
> + break;
> + case FL_CONFLICT:
> + /* Need to check children */
> + parent = fl;
> + fl = list_entry(&parent->fl_blocked, struct file_lock, fl_block);
> + goto restart;
> + case FL_TRANSITIVE_CONFLICT:
> + /* all children must also conflict, no need to check */
> + continue;
> + }
> + }
> + if (parent != waiter) {
> + parent = parent->fl_blocker;
> + fl = parent;
> + goto restart;
> + }
> +}
> +
> /* Insert waiter into blocker's block list.
> * We use a circular list so that processes can be easily woken up in
> * the order they blocked. The documentation doesn't require this but
> @@ -747,11 +780,32 @@ static void locks_delete_block(struct file_lock *waiter)
> * fl_blocked list itself is protected by the blocked_lock_lock, but by ensuring
> * that the flc_lock is also held on insertions we can avoid taking the
> * blocked_lock_lock in some cases when we see that the fl_blocked list is empty.
> + *
> + * Rather than just adding to the list, we check for conflicts with any existing
> + * waiter, and add to that waiter instead.
> + * Thus wakeups don't happen until needed.
> */
> static void __locks_insert_block(struct file_lock *blocker,
> - struct file_lock *waiter)
> + struct file_lock *waiter,
> + enum conflict conflict(struct file_lock *,
> + struct file_lock *))
> {
> + struct file_lock *fl;
> BUG_ON(!list_empty(&waiter->fl_block));
> +
> + /* Any request in waiter->fl_blocked is know to conflict with
> + * waiter, but it might not conflict with blocker.
> + * If it doesn't, it needs to be woken now so it can find
> + * somewhere else to wait, or possible it can get granted.
> + */
> + if (conflict(waiter, blocker) != FL_TRANSITIVE_CONFLICT)
> + wake_non_conflicts(waiter, blocker, conflict);
> +new_blocker:
> + list_for_each_entry(fl, &blocker->fl_blocked, fl_block)
> + if (conflict(fl, waiter)) {
> + blocker = fl;
> + goto new_blocker;
> + }
> waiter->fl_blocker = blocker;
> list_add_tail(&waiter->fl_block, &blocker->fl_blocked);
> if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
> @@ -760,10 +814,12 @@ static void __locks_insert_block(struct file_lock *blocker,
>
> /* Must be called with flc_lock held. */
> static void locks_insert_block(struct file_lock *blocker,
> - struct file_lock *waiter)
> + struct file_lock *waiter,
> + enum conflict conflict(struct file_lock *,
> + struct file_lock *))
> {
> spin_lock(&blocked_lock_lock);
> - __locks_insert_block(blocker, waiter);
> + __locks_insert_block(blocker, waiter, conflict);
> spin_unlock(&blocked_lock_lock);
> }
>
> @@ -1033,7 +1089,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
> if (!(request->fl_flags & FL_SLEEP))
> goto out;
> error = FILE_LOCK_DEFERRED;
> - locks_insert_block(fl, request);
> + locks_insert_block(fl, request, flock_locks_conflict);
> goto out;
> }
> if (request->fl_flags & FL_ACCESS)
> @@ -1107,7 +1163,8 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
> spin_lock(&blocked_lock_lock);
> if (likely(!posix_locks_deadlock(request, fl))) {
> error = FILE_LOCK_DEFERRED;
> - __locks_insert_block(fl, request);
> + __locks_insert_block(fl, request,
> + posix_locks_conflict);
> }
> spin_unlock(&blocked_lock_lock);
> goto out;
> @@ -1581,7 +1638,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
> break_time -= jiffies;
> if (break_time == 0)
> break_time++;
> - locks_insert_block(fl, new_fl);
> + locks_insert_block(fl, new_fl, leases_conflict);
> trace_break_lease_block(inode, new_fl);
> spin_unlock(&ctx->flc_lock);
> percpu_up_read_preempt_enable(&file_rwsem);
>