[PATCH 01/20] staging/lustre/ptlrpc: avoid list scan in ptlrpcd_check

From: green
Date: Sun Feb 01 2015 - 21:57:40 EST


From: Liang Zhen <liang.zhen@xxxxxxxxx>

ptlrpcd_check() always scan all requests on ptlrpc_request_set
and try to finish completed requests, this is low efficiency.
Even worse, l_wait_event() always checks condition for twice
before sleeping and one more time after waking up, which means
it will call ptlrpcd_check() for three times in each loop.

This patch will move completed requests at the head of list
in ptlrpc_check_set(), with this change ptlrpcd_check doesn't
need to scan all requests anymore.

Signed-off-by: Liang Zhen <liang.zhen@xxxxxxxxx>
Reviewed-on: http://review.whamcloud.com/11513
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5548
Reviewed-by: Bobi Jam <bobijam@xxxxxxxxx>
Reviewed-by: Andreas Dilger <andreas.dilger@xxxxxxxxx>
Reviewed-by: Johann Lombardi <johann.lombardi@xxxxxxxxx>
Signed-off-by: Oleg Drokin <oleg.drokin@xxxxxxxxx>
---
drivers/staging/lustre/lustre/ptlrpc/client.c | 12 +++++++++++-
drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c | 23 +++++++++--------------
2 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index dc9e406..8c1ec83 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -1497,11 +1497,13 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
struct list_head *tmp, *next;
+ struct list_head comp_reqs;
int force_timer_recalc = 0;

if (atomic_read(&set->set_remaining) == 0)
return 1;

+ INIT_LIST_HEAD(&comp_reqs);
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
@@ -1576,8 +1578,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
ptlrpc_rqphase_move(req, req->rq_next_phase);
}

- if (req->rq_phase == RQ_PHASE_COMPLETE)
+ if (req->rq_phase == RQ_PHASE_COMPLETE) {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
continue;
+ }

if (req->rq_phase == RQ_PHASE_INTERPRET)
goto interpret;
@@ -1860,9 +1864,15 @@ interpret:
if (req->rq_status != 0)
set->set_rc = req->rq_status;
ptlrpc_req_finished(req);
+ } else {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
}
}

+ /* move completed request at the head of list so it's easier for
+ * caller to find them */
+ list_splice(&comp_reqs, &set->set_requests);
+
/* If we hit an error, we want to recover promptly. */
return atomic_read(&set->set_remaining) == 0 || force_timer_recalc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index cbcc541..4621b71 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -306,21 +306,16 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
if (atomic_read(&set->set_remaining))
rc |= ptlrpc_check_set(env, set);

- if (!list_empty(&set->set_requests)) {
- /*
- * XXX: our set never completes, so we prune the completed
- * reqs after each iteration. boy could this be smarter.
- */
- list_for_each_safe(pos, tmp, &set->set_requests) {
- req = list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
- if (req->rq_phase != RQ_PHASE_COMPLETE)
- continue;
+ /* NB: ptlrpc_check_set has already moved completed request at the
+ * head of seq::set_requests */
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
+ if (req->rq_phase != RQ_PHASE_COMPLETE)
+ break;

- list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
- ptlrpc_req_finished(req);
- }
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+ ptlrpc_req_finished(req);
}

if (rc == 0) {
--
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/