[PATCH 06/17] block: extend queue bypassing to cover blkcg policies

From: Tejun Heo
Date: Sat Jan 21 2012 - 22:28:25 EST


Extend queue bypassing such that dying queue is always bypassing and
blk-throttle is drained on bypass. With blkcg policies updated to
test blk_queue_bypass() instead of blk_queue_dead(), this ensures that
no bio or request is held by or going through blkcg policies on a
bypassing queue.

This will be used to implement blkg cleanup on elevator switches and
policy changes.

Signed-off-by: Tejun Heo <tj@xxxxxxxxxx>
Cc: Vivek Goyal <vgoyal@xxxxxxxxxx>
---
block/blk-core.c | 12 ++++++++----
block/blk-throttle.c | 4 ++--
2 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index c2e39de..c6c61c0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -366,8 +366,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
spin_lock_irq(q->queue_lock);

elv_drain_elevator(q);
- if (drain_all)
- blk_throtl_drain(q);
+ blk_throtl_drain(q);

/*
* This function might be called on a queue which failed
@@ -408,8 +407,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
*
* In bypass mode, only the dispatch FIFO queue of @q is used. This
* function makes @q enter bypass mode and drains all requests which were
- * issued before. On return, it's guaranteed that no request has ELVPRIV
- * set.
+ * throttled or issued before. On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set.
*/
void blk_queue_bypass_start(struct request_queue *q)
{
@@ -454,6 +453,11 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);

spin_lock_irq(lock);
+
+ /* dead queue is permanently in bypass mode till released */
+ q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5eed6a7..702c0e6 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
struct request_queue *q = td->queue;

/* no throttling for dead queue */
- if (unlikely(blk_queue_dead(q)))
+ if (unlikely(blk_queue_bypass(q)))
return NULL;

rcu_read_lock();
@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
spin_lock_irq(q->queue_lock);

/* Make sure @q is still alive */
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_bypass(q))) {
kfree(tg);
return NULL;
}
--
1.7.7.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/