[RESEND PATCH v2 3/3] block: hide back blk_freeze_queue_start and export its blk-mq alias
From: Jinlong Chen
Date: Sun Oct 30 2022 - 01:28:09 EST
blk_freeze_queue_start is used internally for universal queue draining and
externally for blk-mq specific queue freezing. Keep the non-blk-mq name
private and export a blk-mq alias to users.
Signed-off-by: Jinlong Chen <nickyc975@xxxxxxxxxx>
---
block/blk-core.c | 13 +++++++++++++
block/blk-mq.c | 27 ++++++++++++++-------------
block/blk-pm.c | 2 +-
block/blk.h | 1 +
drivers/nvme/host/core.c | 2 +-
drivers/nvme/host/multipath.c | 2 +-
include/linux/blk-mq.h | 2 +-
7 files changed, 32 insertions(+), 17 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index 5d50dd16e2a5..d3dd439a8ed4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -269,6 +269,19 @@ void blk_put_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_put_queue);
+void blk_freeze_queue_start(struct request_queue *q)
+{
+ mutex_lock(&q->mq_freeze_lock);
+ if (++q->mq_freeze_depth == 1) {
+ percpu_ref_kill(&q->q_usage_counter);
+ mutex_unlock(&q->mq_freeze_lock);
+ if (queue_is_mq(q))
+ blk_mq_run_hw_queues(q, false);
+ } else {
+ mutex_unlock(&q->mq_freeze_lock);
+ }
+}
+
void blk_queue_start_drain(struct request_queue *q)
{
/*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e0654a2e80b9..d638bd0fb4d8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -161,19 +161,20 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
inflight[1] = mi.inflight[1];
}
-void blk_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
{
- mutex_lock(&q->mq_freeze_lock);
- if (++q->mq_freeze_depth == 1) {
- percpu_ref_kill(&q->q_usage_counter);
- mutex_unlock(&q->mq_freeze_lock);
- if (queue_is_mq(q))
- blk_mq_run_hw_queues(q, false);
- } else {
- mutex_unlock(&q->mq_freeze_lock);
- }
+ /*
+ * Warn on non-blk-mq usages.
+ */
+ WARN_ON_ONCE(!queue_is_mq(q));
+
+ /*
+ * Just an alias of blk_freeze_queue_start to keep the consistency of the
+ * blk_mq_* namespace.
+ */
+ blk_freeze_queue_start(q);
}
-EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
void blk_mq_freeze_queue_wait(struct request_queue *q)
{
@@ -196,7 +197,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
*/
void blk_mq_freeze_queue(struct request_queue *q)
{
- blk_freeze_queue_start(q);
+ blk_mq_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
@@ -1570,7 +1571,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
* percpu_ref_tryget directly, because we need to be able to
* obtain a reference even in the short window between the queue
* starting to freeze, by dropping the first reference in
- * blk_freeze_queue_start, and the moment the last request is
+ * blk_mq_freeze_queue_start, and the moment the last request is
* consumed, marked by the instant q_usage_counter reaches
* zero.
*/
diff --git a/block/blk-pm.c b/block/blk-pm.c
index 2dad62cc1572..ae2b950ed45d 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -80,7 +80,7 @@ int blk_pre_runtime_suspend(struct request_queue *q)
blk_set_pm_only(q);
ret = -EBUSY;
/* Switch q_usage_counter from per-cpu to atomic mode. */
- blk_freeze_queue_start(q);
+ blk_mq_freeze_queue_start(q);
/*
* Wait until atomic mode has been reached. Since that
* involves calling call_rcu(), it is guaranteed that later
diff --git a/block/blk.h b/block/blk.h
index e9addea2838a..ee576bb74382 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -37,6 +37,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
+void blk_freeze_queue_start(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0090dc0b3ae6..e2d5c54c651a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -5199,7 +5199,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
- blk_freeze_queue_start(ns->queue);
+ blk_mq_freeze_queue_start(ns->queue);
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 0ea7e441e080..3bb358bd0cde 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -77,7 +77,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry)
if (h->disk)
- blk_freeze_queue_start(h->disk->queue);
+ blk_mq_freeze_queue_start(h->disk->queue);
}
void nvme_failover_req(struct request *req)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 569053ed959d..8600d4b4aa80 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -887,7 +887,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
-void blk_freeze_queue_start(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
--
2.31.1