[PATCH net-next v3 22/25] io_uring: flush notifiers after sendzc

From: Pavel Begunkov
Date: Tue Jul 05 2022 - 11:04:48 EST


Allow to flush notifiers as a part of sendzc request by setting
IORING_SENDZC_FLUSH flag. When the sendzc request succeedes it will
flush the used [active] notifier.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
include/uapi/linux/io_uring.h | 1 +
io_uring/io_uring.c | 11 +----------
io_uring/io_uring.h | 10 ++++++++++
io_uring/net.c | 4 +++-
io_uring/notif.c | 2 +-
io_uring/notif.h | 11 +++++++++++
6 files changed, 27 insertions(+), 12 deletions(-)

diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 2509e6184bc7..2fd4e39a14d3 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -277,6 +277,7 @@ enum io_uring_op {
*/
enum {
IORING_SENDZC_FIXED_BUF = (1U << 0),
+ IORING_SENDZC_FLUSH = (1U << 1),
};

/*
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3b885d65e569..8f4152f01989 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -615,7 +615,7 @@ void __io_put_task(struct task_struct *task, int nr)
put_task_struct_many(task, nr);
}

-static void io_task_refs_refill(struct io_uring_task *tctx)
+void io_task_refs_refill(struct io_uring_task *tctx)
{
unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;

@@ -624,15 +624,6 @@ static void io_task_refs_refill(struct io_uring_task *tctx)
tctx->cached_refs += refill;
}

-static inline void io_get_task_refs(int nr)
-{
- struct io_uring_task *tctx = current->io_uring;
-
- tctx->cached_refs -= nr;
- if (unlikely(tctx->cached_refs < 0))
- io_task_refs_refill(tctx);
-}
-
static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
{
struct io_uring_task *tctx = task->io_uring;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index e978654d1b14..cf154e9c8e28 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -60,6 +60,7 @@ void io_wq_submit_work(struct io_wq_work *work);
void io_free_req(struct io_kiocb *req);
void io_queue_next(struct io_kiocb *req);
void __io_put_task(struct task_struct *task, int nr);
+void io_task_refs_refill(struct io_uring_task *tctx);

bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all);
@@ -254,4 +255,13 @@ static inline void io_put_task(struct task_struct *task, int nr)
__io_put_task(task, nr);
}

+static inline void io_get_task_refs(int nr)
+{
+ struct io_uring_task *tctx = current->io_uring;
+
+ tctx->cached_refs -= nr;
+ if (unlikely(tctx->cached_refs < 0))
+ io_task_refs_refill(tctx);
+}
+
#endif
diff --git a/io_uring/net.c b/io_uring/net.c
index 3dfe07749b04..3cd75d69fe70 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -784,7 +784,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}

-#define IO_SENDZC_VALID_FLAGS IORING_SENDZC_FIXED_BUF
+#define IO_SENDZC_VALID_FLAGS (IORING_SENDZC_FIXED_BUF|IORING_SENDZC_FLUSH)

int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
@@ -895,6 +895,8 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
return ret == -ERESTARTSYS ? -EINTR : ret;
}

+ if (zc->zc_flags & IORING_SENDZC_FLUSH)
+ io_notif_slot_flush_submit(notif_slot, 0);
io_req_set_res(req, ret, 0);
return IOU_OK;
}
diff --git a/io_uring/notif.c b/io_uring/notif.c
index a53acdda9ec0..847535d34c65 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -133,7 +133,7 @@ struct io_notif *io_alloc_notif(struct io_ring_ctx *ctx,
return notif;
}

-static void io_notif_slot_flush(struct io_notif_slot *slot)
+void io_notif_slot_flush(struct io_notif_slot *slot)
__must_hold(&ctx->uring_lock)
{
struct io_notif *notif = slot->notif;
diff --git a/io_uring/notif.h b/io_uring/notif.h
index 00efe164bdc4..6cd73d7b965b 100644
--- a/io_uring/notif.h
+++ b/io_uring/notif.h
@@ -54,6 +54,7 @@ int io_notif_register(struct io_ring_ctx *ctx,
int io_notif_unregister(struct io_ring_ctx *ctx);
void io_notif_cache_purge(struct io_ring_ctx *ctx);

+void io_notif_slot_flush(struct io_notif_slot *slot);
struct io_notif *io_alloc_notif(struct io_ring_ctx *ctx,
struct io_notif_slot *slot);

@@ -74,3 +75,13 @@ static inline struct io_notif_slot *io_get_notif_slot(struct io_ring_ctx *ctx,
idx = array_index_nospec(idx, ctx->nr_notif_slots);
return &ctx->notif_slots[idx];
}
+
+static inline void io_notif_slot_flush_submit(struct io_notif_slot *slot,
+ unsigned int issue_flags)
+{
+ if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ slot->notif->task = current;
+ io_get_task_refs(1);
+ }
+ io_notif_slot_flush(slot);
+}
--
2.36.1