Re: [PATCH 1/2] io_uring: Rename struct io_op_def

From: Pavel Begunkov
Date: Wed Jan 18 2023 - 08:56:50 EST


On 1/12/23 14:44, Breno Leitao wrote:
The current io_op_def struct is becoming huge and the name is a bit
generic.

The goal of this patch is to rename this struct to `io_issue_def`. This
struct will contain the hot functions associated with the issue code
path.

Reviewed-by: Pavel Begunkov <asml.silence@xxxxxxxxx>


For now, this patch only renames the structure, and an upcoming patch
will break up the structure in two, moving the non-issue fields to a
secondary struct.

Signed-off-by: Breno Leitao <leitao@xxxxxxxxxx>
---
io_uring/io_uring.c | 26 +++++++++++++-------------
io_uring/opdef.c | 16 ++++++++--------
io_uring/opdef.h | 4 ++--
io_uring/poll.c | 2 +-
io_uring/rw.c | 2 +-
5 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 2ac1cd8d23ea..ac7868ec9be2 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -407,7 +407,7 @@ static inline void io_arm_ltimeout(struct io_kiocb *req)
static void io_prep_async_work(struct io_kiocb *req)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
if (!(req->flags & REQ_F_CREDS)) {
@@ -980,7 +980,7 @@ void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
void io_req_defer_failed(struct io_kiocb *req, s32 res)
__must_hold(&ctx->uring_lock)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
lockdep_assert_held(&req->ctx->uring_lock);
@@ -1708,8 +1708,8 @@ unsigned int io_file_get_flags(struct file *file)
bool io_alloc_async_data(struct io_kiocb *req)
{
- WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
- req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
+ WARN_ON_ONCE(!io_issue_defs[req->opcode].async_size);
+ req->async_data = kmalloc(io_issue_defs[req->opcode].async_size, GFP_KERNEL);
if (req->async_data) {
req->flags |= REQ_F_ASYNC_DATA;
return false;
@@ -1719,7 +1719,7 @@ bool io_alloc_async_data(struct io_kiocb *req)
int io_req_prep_async(struct io_kiocb *req)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
/* assign early for deferred execution for non-fixed file */
if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
@@ -1728,7 +1728,7 @@ int io_req_prep_async(struct io_kiocb *req)
return 0;
if (WARN_ON_ONCE(req_has_async_data(req)))
return -EFAULT;
- if (!io_op_defs[req->opcode].manual_alloc) {
+ if (!io_issue_defs[req->opcode].manual_alloc) {
if (io_alloc_async_data(req))
return -EAGAIN;
}
@@ -1801,7 +1801,7 @@ static void io_clean_op(struct io_kiocb *req)
}
if (req->flags & REQ_F_NEED_CLEANUP) {
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
if (def->cleanup)
def->cleanup(req);
@@ -1827,7 +1827,7 @@ static void io_clean_op(struct io_kiocb *req)
static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
{
- if (req->file || !io_op_defs[req->opcode].needs_file)
+ if (req->file || !io_issue_defs[req->opcode].needs_file)
return true;
if (req->flags & REQ_F_FIXED_FILE)
@@ -1840,7 +1840,7 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
const struct cred *creds = NULL;
int ret;
@@ -1894,7 +1894,7 @@ struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
void io_wq_submit_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
bool needs_poll = false;
int ret = 0, err = -ECANCELED;
@@ -2106,7 +2106,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
__must_hold(&ctx->uring_lock)
{
- const struct io_op_def *def;
+ const struct io_issue_def *def;
unsigned int sqe_flags;
int personality;
u8 opcode;
@@ -2124,7 +2124,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->opcode = 0;
return -EINVAL;
}
- def = &io_op_defs[opcode];
+ def = &io_issue_defs[opcode];
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
/* enforce forwards compatibility on users */
if (sqe_flags & ~SQE_VALID_FLAGS)
@@ -3762,7 +3762,7 @@ static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
for (i = 0; i < nr_args; i++) {
p->ops[i].op = i;
- if (!io_op_defs[i].not_supported)
+ if (!io_issue_defs[i].not_supported)
p->ops[i].flags = IO_URING_OP_SUPPORTED;
}
p->ops_len = i;
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 3aa0d65c50e3..3c95e70a625e 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -46,7 +46,7 @@ static __maybe_unused int io_eopnotsupp_prep(struct io_kiocb *kiocb,
return -EOPNOTSUPP;
}
-const struct io_op_def io_op_defs[] = {
+const struct io_issue_def io_issue_defs[] = {
[IORING_OP_NOP] = {
.audit_skip = 1,
.iopoll = 1,
@@ -536,7 +536,7 @@ const struct io_op_def io_op_defs[] = {
const char *io_uring_get_opcode(u8 opcode)
{
if (opcode < IORING_OP_LAST)
- return io_op_defs[opcode].name;
+ return io_issue_defs[opcode].name;
return "INVALID";
}
@@ -544,12 +544,12 @@ void __init io_uring_optable_init(void)
{
int i;
- BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
+ BUILD_BUG_ON(ARRAY_SIZE(io_issue_defs) != IORING_OP_LAST);
- for (i = 0; i < ARRAY_SIZE(io_op_defs); i++) {
- BUG_ON(!io_op_defs[i].prep);
- if (io_op_defs[i].prep != io_eopnotsupp_prep)
- BUG_ON(!io_op_defs[i].issue);
- WARN_ON_ONCE(!io_op_defs[i].name);
+ for (i = 0; i < ARRAY_SIZE(io_issue_defs); i++) {
+ BUG_ON(!io_issue_defs[i].prep);
+ if (io_issue_defs[i].prep != io_eopnotsupp_prep)
+ BUG_ON(!io_issue_defs[i].issue);
+ WARN_ON_ONCE(!io_issue_defs[i].name);
}
}
diff --git a/io_uring/opdef.h b/io_uring/opdef.h
index df7e13d9bfba..d718e2ab1ff7 100644
--- a/io_uring/opdef.h
+++ b/io_uring/opdef.h
@@ -2,7 +2,7 @@
#ifndef IOU_OP_DEF_H
#define IOU_OP_DEF_H
-struct io_op_def {
+struct io_issue_def {
/* needs req->file assigned */
unsigned needs_file : 1;
/* should block plug */
@@ -41,7 +41,7 @@ struct io_op_def {
void (*fail)(struct io_kiocb *);
};
-extern const struct io_op_def io_op_defs[];
+extern const struct io_issue_def io_issue_defs[];
void io_uring_optable_init(void);
#endif
diff --git a/io_uring/poll.c b/io_uring/poll.c
index ee7da6150ec4..7a6d5d0da966 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -658,7 +658,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
struct async_poll *apoll;
struct io_poll_table ipt;
__poll_t mask = POLLPRI | POLLERR | EPOLLET;
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 8227af2e1c0f..54b44b9b736c 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -516,7 +516,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
struct io_rw_state *s, bool force)
{
- if (!force && !io_op_defs[req->opcode].prep_async)
+ if (!force && !io_issue_defs[req->opcode].prep_async)
return 0;
if (!req_has_async_data(req)) {
struct io_async_rw *iorw;

--
Pavel Begunkov