Re: [PATCH 5/5] io_uring: implement multishot mode for accept

From: Hao Xu
Date: Sat May 07 2022 - 05:13:09 EST


在 2022/5/6 下午10:42, Jens Axboe 写道:
On 5/6/22 1:01 AM, Hao Xu wrote:
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0a83ecc457d1..9febe7774dc3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1254,6 +1254,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
static void io_eventfd_signal(struct io_ring_ctx *ctx);
static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
+static void io_poll_remove_entries(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -5690,24 +5691,29 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_accept *accept = &req->accept;
+ bool multishot;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->ioprio || sqe->len || sqe->buf_index)
+ if (sqe->len || sqe->buf_index)
return -EINVAL;
accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
accept->flags = READ_ONCE(sqe->accept_flags);
accept->nofile = rlimit(RLIMIT_NOFILE);
+ multishot = !!(READ_ONCE(sqe->ioprio) & IORING_ACCEPT_MULTISHOT);

I tend to like:

multishot = READ_ONCE(sqe->ioprio) & IORING_ACCEPT_MULTISHOT) != 0;

as I think it's more readable. But I think we really want it ala:

u16 poll_flags;

poll_flags = READ_ONCE(sqe->ioprio);
if (poll_flags & ~IORING_ACCEPT_MULTISHOT)
return -EINVAL;

...

to ensure that we can add more flags later, hence only accepting this
single flag right now.

Do we need REQ_F_APOLL_MULTI_POLLED, or can we just store whether this
is a multishot request in struct io_accept?
I think we can do it in this way, but it may be a bit inconvenient if we
add other multishot OPCODE. With REQ_F_APOLL_MULTI_POLLED we can just
check req->flags in the poll arming path, which keeps it op unrelated.

@@ -5760,7 +5774,35 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
ret = io_install_fixed_file(req, file, issue_flags,
accept->file_slot - 1);
}
- __io_req_complete(req, issue_flags, ret, 0);
+
+ if (req->flags & REQ_F_APOLL_MULTISHOT) {
+ if (ret >= 0) {
+ bool filled;
+
+ spin_lock(&ctx->completion_lock);
+ filled = io_fill_cqe_aux(ctx, req->cqe.user_data, ret,
+ IORING_CQE_F_MORE);
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ if (unlikely(!filled)) {
+ io_poll_clean(req);
+ return -ECANCELED;
+ }
+ io_cqring_ev_posted(ctx);
+ goto retry;
+ } else {
+ /*
+ * the apoll multishot req should handle poll
+ * cancellation by itself since the upper layer
+ * who called io_queue_sqe() cannot get errors
+ * happened here.
+ */
+ io_poll_clean(req);
+ return ret;
+ }
+ } else {
+ __io_req_complete(req, issue_flags, ret, 0);
+ }
return 0;
}

I'd probably just make that:

if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
__io_req_complete(req, issue_flags, ret, 0);
return 0;
}
if (ret >= 0) {
bool filled;

spin_lock(&ctx->completion_lock);
filled = io_fill_cqe_aux(ctx, req->cqe.user_data, ret,
IORING_CQE_F_MORE);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
if (filled) {
io_cqring_ev_posted(ctx);
goto retry;
}
/* fall through to error case */
ret = -ECANCELED;
}

/*
* the apoll multishot req should handle poll
* cancellation by itself since the upper layer
* who called io_queue_sqe() cannot get errors
* happened here.
*/
io_poll_clean(req);
return ret;

which I think is a lot easier to read and keeps the indentation at a
manageable level and reduces duplicate code.
Great, thanks, it's better.