[PATCH 3/4] block: convert to using atomic-ref

From: Jens Axboe
Date: Mon Dec 06 2021 - 13:11:19 EST


Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
block/blk-flush.c | 4 ++--
block/blk-mq-tag.c | 2 +-
block/blk-mq.c | 12 ++++++------
block/blk.h | 31 -------------------------------
include/linux/blk-mq.h | 1 +
5 files changed, 10 insertions(+), 40 deletions(-)

diff --git a/block/blk-flush.c b/block/blk-flush.c
index e4df894189ce..e957902af17c 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -229,7 +229,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);

- if (!req_ref_put_and_test(flush_rq)) {
+ if (!atomic_ref_put_and_test(&flush_rq->ref)) {
fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
return;
@@ -349,7 +349,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
* and READ flush_rq->end_io
*/
smp_wmb();
- req_ref_set(flush_rq, 1);
+ atomic_set(&flush_rq->ref, 1);

blk_flush_queue_rq(flush_rq, false);
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 380e2dd31bfc..d9f961320652 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -228,7 +228,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,

spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr];
- if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
+ if (!rq || rq->tag != bitnr || !atomic_ref_inc_not_zero(&rq->ref))
rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags);
return rq;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0bf3523dd1f5..2be5557a77c9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -386,7 +386,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
INIT_LIST_HEAD(&rq->queuelist);
/* tag was already set */
WRITE_ONCE(rq->deadline, 0);
- req_ref_set(rq, 1);
+ atomic_set(&rq->ref, 1);

if (rq->rq_flags & RQF_ELV) {
struct elevator_queue *e = data->q->elevator;
@@ -634,7 +634,7 @@ void blk_mq_free_request(struct request *rq)
rq_qos_done(q, rq);

WRITE_ONCE(rq->state, MQ_RQ_IDLE);
- if (req_ref_put_and_test(rq))
+ if (atomic_ref_put_and_test(&rq->ref))
__blk_mq_free_request(rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);
@@ -930,7 +930,7 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
rq_qos_done(rq->q, rq);

WRITE_ONCE(rq->state, MQ_RQ_IDLE);
- if (!req_ref_put_and_test(rq))
+ if (!atomic_ref_put_and_test(&rq->ref))
continue;

blk_crypto_free_request(rq);
@@ -1373,7 +1373,7 @@ void blk_mq_put_rq_ref(struct request *rq)
{
if (is_flush_rq(rq))
rq->end_io(rq, 0);
- else if (req_ref_put_and_test(rq))
+ else if (atomic_ref_put_and_test(&rq->ref))
__blk_mq_free_request(rq);
}

@@ -3005,7 +3005,7 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
unsigned long rq_addr = (unsigned long)rq;

if (rq_addr >= start && rq_addr < end) {
- WARN_ON_ONCE(req_ref_read(rq) != 0);
+ WARN_ON_ONCE(atomic_read(&rq->ref) != 0);
cmpxchg(&drv_tags->rqs[i], rq, NULL);
}
}
@@ -3339,7 +3339,7 @@ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
if (!tags)
return;

- WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
+ WARN_ON_ONCE(atomic_read(&flush_rq->ref) != 0);

for (i = 0; i < queue_depth; i++)
cmpxchg(&tags->rqs[i], flush_rq, NULL);
diff --git a/block/blk.h b/block/blk.h
index 7ccb7c7d86b3..0114e18b9903 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -469,35 +469,4 @@ static inline bool should_fail_request(struct block_device *part,
}
#endif /* CONFIG_FAIL_MAKE_REQUEST */

-/*
- * Optimized request reference counting. Ideally we'd make timeouts be more
- * clever, as that's the only reason we need references at all... But until
- * this happens, this is faster than using refcount_t. Also see:
- *
- * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
- */
-#define req_ref_zero_or_close_to_overflow(req) \
- ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
-
-static inline bool req_ref_inc_not_zero(struct request *req)
-{
- return atomic_inc_not_zero(&req->ref);
-}
-
-static inline bool req_ref_put_and_test(struct request *req)
-{
- WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
- return atomic_dec_and_test(&req->ref);
-}
-
-static inline void req_ref_set(struct request *req, int value)
-{
- atomic_set(&req->ref, value);
-}
-
-static inline int req_ref_read(struct request *req)
-{
- return atomic_read(&req->ref);
-}
-
#endif /* BLK_INTERNAL_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ecdc049b52fa..02abf08f5765 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -7,6 +7,7 @@
#include <linux/lockdep.h>
#include <linux/scatterlist.h>
#include <linux/prefetch.h>
+#include <linux/atomic-ref.h>

struct blk_mq_tags;
struct blk_flush_queue;
--
2.34.1


--------------5E8153C5F8A865ED65BA2BE8
Content-Type: text/x-patch; charset=UTF-8;
name="0002-io_uring-convert-to-using-atomic-ref.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
filename="0002-io_uring-convert-to-using-atomic-ref.patch"