[PATCH v3v] eventfd: convert to f_op->read_iter()

From: Jens Axboe
Date: Fri May 01 2020 - 13:54:02 EST


eventfd is using ->read() as it's file_operations read handler, but
this prevents passing in information about whether a given IO operation
is blocking or not. We can only use the file flags for that. To support
async (-EAGAIN/poll based) retries for io_uring, we need ->read_iter()
support. Convert eventfd to using ->read_iter().

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>

---

Actually send out the right patch...

Since v2:

- Cleanup eventfd_read() as per Al's suggestions

Since v1:

- Add FMODE_NOWAIT to the eventfd file

diff --git a/fs/eventfd.c b/fs/eventfd.c
index 78e41c7c3d05..c9fa1e9cf5e3 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -216,32 +216,32 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
}
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);

-static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct eventfd_ctx *ctx = file->private_data;
- ssize_t res;
__u64 ucnt = 0;
DECLARE_WAITQUEUE(wait, current);

- if (count < sizeof(ucnt))
+ if (iov_iter_count(to) < sizeof(ucnt))
return -EINVAL;
-
spin_lock_irq(&ctx->wqh.lock);
- res = -EAGAIN;
- if (ctx->count > 0)
- res = sizeof(ucnt);
- else if (!(file->f_flags & O_NONBLOCK)) {
+ if (!ctx->count) {
+ if ((file->f_flags & O_NONBLOCK) ||
+ (iocb->ki_flags & IOCB_NOWAIT)) {
+ spin_unlock_irq(&ctx->wqh.lock);
+ return -EAGAIN;
+ }
__add_wait_queue(&ctx->wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (ctx->count > 0) {
- res = sizeof(ucnt);
+ if (ctx->count)
break;
- }
if (signal_pending(current)) {
- res = -ERESTARTSYS;
- break;
+ __remove_wait_queue(&ctx->wqh, &wait);
+ __set_current_state(TASK_RUNNING);
+ spin_unlock_irq(&ctx->wqh.lock);
+ return -ERESTARTSYS;
}
spin_unlock_irq(&ctx->wqh.lock);
schedule();
@@ -250,17 +250,14 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
- if (likely(res > 0)) {
- eventfd_ctx_do_read(ctx, &ucnt);
- if (waitqueue_active(&ctx->wqh))
- wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
- }
+ eventfd_ctx_do_read(ctx, &ucnt);
+ if (waitqueue_active(&ctx->wqh))
+ wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
spin_unlock_irq(&ctx->wqh.lock);
-
- if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
+ if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
return -EFAULT;

- return res;
+ return sizeof(ucnt);
}

static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
@@ -329,7 +326,7 @@ static const struct file_operations eventfd_fops = {
#endif
.release = eventfd_release,
.poll = eventfd_poll,
- .read = eventfd_read,
+ .read_iter = eventfd_read,
.write = eventfd_write,
.llseek = noop_llseek,
};
@@ -427,8 +424,17 @@ static int do_eventfd(unsigned int count, int flags)

fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
- if (fd < 0)
+ if (fd < 0) {
eventfd_free_ctx(ctx);
+ } else {
+ struct file *file;
+
+ file = fget(fd);
+ if (file) {
+ file->f_mode |= FMODE_NOWAIT;
+ fput(file);
+ }
+ }

return fd;
}

--
Jens Axboe