summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-12-13 21:18:10 -0700
committerJens Axboe <axboe@kernel.dk>2020-01-20 17:03:54 -0700
commiteddc7ef52a6b37b7ba3d1c8a8fbb63d5d9914f8a (patch)
tree94284b2f09d3477955ffca87c8f6cf55c5b227f5 /fs/io_uring.c
parent3934e36f6099e6277db33f433fe135c6644e8ac2 (diff)
io_uring: add support for IORING_OP_STATX
This provides support for async statx(2) through io_uring. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c86
1 files changed, 85 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 4325068324b7..115c7d413372 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -379,9 +379,13 @@ struct io_sr_msg {
struct io_open {
struct file *file;
int dfd;
- umode_t mode;
+ union {
+ umode_t mode;
+ unsigned mask;
+ };
const char __user *fname;
struct filename *filename;
+ struct statx __user *buffer;
int flags;
};
@@ -2266,6 +2270,74 @@ err:
return 0;
}
+static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ unsigned lookup_flags;
+ int ret;
+
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+
+ req->open.dfd = READ_ONCE(sqe->fd);
+ req->open.mask = READ_ONCE(sqe->len);
+ req->open.fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ req->open.flags = READ_ONCE(sqe->statx_flags);
+
+ if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.flags))
+ return -EINVAL;
+
+ req->open.filename = getname_flags(req->open.fname, lookup_flags, NULL);
+ if (IS_ERR(req->open.filename)) {
+ ret = PTR_ERR(req->open.filename);
+ req->open.filename = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ struct io_open *ctx = &req->open;
+ unsigned lookup_flags;
+ struct path path;
+ struct kstat stat;
+ int ret;
+
+ if (force_nonblock)
+ return -EAGAIN;
+
+ if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->flags))
+ return -EINVAL;
+
+retry:
+ /* filename_lookup() drops it, keep a reference */
+ ctx->filename->refcnt++;
+
+ ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path,
+ NULL);
+ if (ret)
+ goto err;
+
+ ret = vfs_getattr(&path, &stat, ctx->mask, ctx->flags);
+ path_put(&path);
+ if (retry_estale(ret, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
+ if (!ret)
+ ret = cp_statx(&stat, ctx->buffer);
+err:
+ putname(ctx->filename);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+}
+
static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
/*
@@ -3427,6 +3499,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
case IORING_OP_FILES_UPDATE:
ret = io_files_update_prep(req, sqe);
break;
+ case IORING_OP_STATX:
+ ret = io_statx_prep(req, sqe);
+ break;
default:
printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
req->opcode);
@@ -3613,6 +3688,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
ret = io_files_update(req, force_nonblock);
break;
+ case IORING_OP_STATX:
+ if (sqe) {
+ ret = io_statx_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_statx(req, nxt, force_nonblock);
+ break;
default:
ret = -EINVAL;
break;
@@ -3699,6 +3782,7 @@ static int io_req_needs_file(struct io_kiocb *req, int fd)
case IORING_OP_LINK_TIMEOUT:
return 0;
case IORING_OP_OPENAT:
+ case IORING_OP_STATX:
return fd != -1;
default:
if (io_req_op_valid(req->opcode))