struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
};
+enum io_uring_cmd_flags {
+ IO_URING_F_NONBLOCK = 1,
+};
+
struct io_mapped_ubuf {
u64 ubuf;
size_t len;
return -EINVAL;
}
-static int io_read(struct io_kiocb *req, bool force_nonblock,
+static int io_read(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
ssize_t io_size, ret, ret2;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (rw) {
iter = &rw->iter;
return io_rw_prep_async(req, WRITE);
}
-static int io_write(struct io_kiocb *req, bool force_nonblock,
+static int io_write(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
ssize_t ret, ret2, io_size;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (rw) {
iter = &rw->iter;
return 0;
}
-static int io_renameat(struct io_kiocb *req, bool force_nonblock)
+static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rename *ren = &req->rename;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
return 0;
}
-static int io_unlinkat(struct io_kiocb *req, bool force_nonblock)
+static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_unlink *un = &req->unlink;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
if (un->flags & AT_REMOVEDIR)
#endif
}
-static int io_shutdown(struct io_kiocb *req, bool force_nonblock)
+static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_NET)
struct socket *sock;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
sock = sock_from_file(req->file);
return __io_splice_prep(req, sqe);
}
-static int io_tee(struct io_kiocb *req, bool force_nonblock)
+static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_splice *sp = &req->splice;
struct file *in = sp->file_in;
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
long ret = 0;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
if (sp->len)
ret = do_tee(in, out, sp->len, flags);
return __io_splice_prep(req, sqe);
}
-static int io_splice(struct io_kiocb *req, bool force_nonblock)
+static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_splice *sp = &req->splice;
struct file *in = sp->file_in;
loff_t *poff_in, *poff_out;
long ret = 0;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
return 0;
}
-static int io_fsync(struct io_kiocb *req, bool force_nonblock)
+static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
{
loff_t end = req->sync.off + req->sync.len;
int ret;
/* fsync always requires a blocking context */
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = vfs_fsync_range(req->file, req->sync.off,
return 0;
}
-static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
+static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
{
int ret;
/* fallocate always requiring blocking context */
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
req->sync.len);
return __io_openat_prep(req, sqe);
}
-static int io_openat2(struct io_kiocb *req, bool force_nonblock)
+static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
{
struct open_flags op;
struct file *file;
goto err;
nonblock_set = op.open_flag & O_NONBLOCK;
resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
- if (force_nonblock) {
+ if (issue_flags & IO_URING_F_NONBLOCK) {
/*
* Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
* it'll always -EAGAIN
file = do_filp_open(req->open.dfd, req->open.filename, &op);
/* only retry if RESOLVE_CACHED wasn't already set by application */
- if ((!resolve_nonblock && force_nonblock) && file == ERR_PTR(-EAGAIN)) {
+ if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
+ file == ERR_PTR(-EAGAIN)) {
/*
* We could hang on to this 'fd', but seems like marginal
* gain for something that is now known to be a slower path.
put_unused_fd(ret);
ret = PTR_ERR(file);
} else {
- if (force_nonblock && !nonblock_set)
+ if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
file->f_flags &= ~O_NONBLOCK;
fsnotify_open(file);
fd_install(ret, file);
return 0;
}
-static int io_openat(struct io_kiocb *req, bool force_nonblock)
+static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
{
- return io_openat2(req, force_nonblock);
+ return io_openat2(req, issue_flags & IO_URING_F_NONBLOCK);
}
static int io_remove_buffers_prep(struct io_kiocb *req,
return i;
}
-static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
+static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer *head;
int ret = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
io_ring_submit_lock(ctx, !force_nonblock);
return i ? i : -ENOMEM;
}
-static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
+static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer *head, *list;
int ret = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
io_ring_submit_lock(ctx, !force_nonblock);
#endif
}
-static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
+static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
#if defined(CONFIG_EPOLL)
struct io_epoll *ie = &req->epoll;
int ret;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
if (force_nonblock && ret == -EAGAIN)
#endif
}
-static int io_madvise(struct io_kiocb *req, bool force_nonblock)
+static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
struct io_madvise *ma = &req->madvise;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
return 0;
}
-static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
+static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_fadvise *fa = &req->fadvise;
int ret;
- if (force_nonblock) {
+ if (issue_flags & IO_URING_F_NONBLOCK) {
switch (fa->advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
return 0;
}
-static int io_statx(struct io_kiocb *req, bool force_nonblock)
+static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_statx *ctx = &req->statx;
int ret;
- if (force_nonblock) {
+ if (issue_flags & IO_URING_F_NONBLOCK) {
/* only need file table for an actual valid fd */
if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
req->flags |= REQ_F_NO_FILE_TABLE;
return 0;
}
-static int io_close(struct io_kiocb *req, bool force_nonblock,
+static int io_close(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct files_struct *files = current->files;
}
/* if the file has a flush method, be safe and punt to async */
- if (file->f_op->flush && force_nonblock) {
+ if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
spin_unlock(&files->file_lock);
return -EAGAIN;
}
return 0;
}
-static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
+static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
{
int ret;
/* sync_file_range always requires a blocking context */
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = sync_file_range(req->file, req->sync.off, req->sync.len,
return ret;
}
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_async_msghdr iomsg, *kmsg;
flags = req->sr_msg.msg_flags;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
+ else if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
- if (force_nonblock && ret == -EAGAIN)
+ if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
return 0;
}
-static int io_send(struct io_kiocb *req, bool force_nonblock,
+static int io_send(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_sr_msg *sr = &req->sr_msg;
flags = req->sr_msg.msg_flags;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
+ else if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
ret = sock_sendmsg(sock, &msg);
- if (force_nonblock && ret == -EAGAIN)
+ if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
return ret;
}
-static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_async_msghdr iomsg, *kmsg;
struct io_buffer *kbuf;
unsigned flags;
int ret, cflags = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return 0;
}
-static int io_recv(struct io_kiocb *req, bool force_nonblock,
+static int io_recv(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_buffer *kbuf;
struct iovec iov;
unsigned flags;
int ret, cflags = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return 0;
}
-static int io_accept(struct io_kiocb *req, bool force_nonblock,
+static int io_accept(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_accept *accept = &req->accept;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
int ret;
&io->address);
}
-static int io_connect(struct io_kiocb *req, bool force_nonblock,
+static int io_connect(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_async_connect __io, *io;
unsigned file_flags;
int ret;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (req->async_data) {
io = req->async_data;
return -EOPNOTSUPP;
}
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
return -EOPNOTSUPP;
}
-static int io_send(struct io_kiocb *req, bool force_nonblock,
+static int io_send(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
return -EOPNOTSUPP;
return -EOPNOTSUPP;
}
-static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
return -EOPNOTSUPP;
}
-static int io_recv(struct io_kiocb *req, bool force_nonblock,
+static int io_recv(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
return -EOPNOTSUPP;
return -EOPNOTSUPP;
}
-static int io_accept(struct io_kiocb *req, bool force_nonblock,
+static int io_accept(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
return -EOPNOTSUPP;
return -EOPNOTSUPP;
}
-static int io_connect(struct io_kiocb *req, bool force_nonblock,
+static int io_connect(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
return -EOPNOTSUPP;
return 0;
}
-static int io_files_update(struct io_kiocb *req, bool force_nonblock,
+static int io_files_update(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_rsrc_update up;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
up.offset = req->rsrc_update.offset;
}
}
-static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
+static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
- ret = io_read(req, force_nonblock, cs);
+ ret = io_read(req, issue_flags, cs);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
- ret = io_write(req, force_nonblock, cs);
+ ret = io_write(req, issue_flags, cs);
break;
case IORING_OP_FSYNC:
- ret = io_fsync(req, force_nonblock);
+ ret = io_fsync(req, issue_flags);
break;
case IORING_OP_POLL_ADD:
ret = io_poll_add(req);
ret = io_poll_remove(req);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, force_nonblock);
+ ret = io_sync_file_range(req, issue_flags);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, force_nonblock, cs);
+ ret = io_sendmsg(req, issue_flags, cs);
break;
case IORING_OP_SEND:
- ret = io_send(req, force_nonblock, cs);
+ ret = io_send(req, issue_flags, cs);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, force_nonblock, cs);
+ ret = io_recvmsg(req, issue_flags, cs);
break;
case IORING_OP_RECV:
- ret = io_recv(req, force_nonblock, cs);
+ ret = io_recv(req, issue_flags, cs);
break;
case IORING_OP_TIMEOUT:
ret = io_timeout(req);
ret = io_timeout_remove(req);
break;
case IORING_OP_ACCEPT:
- ret = io_accept(req, force_nonblock, cs);
+ ret = io_accept(req, issue_flags, cs);
break;
case IORING_OP_CONNECT:
- ret = io_connect(req, force_nonblock, cs);
+ ret = io_connect(req, issue_flags, cs);
break;
case IORING_OP_ASYNC_CANCEL:
ret = io_async_cancel(req);
break;
case IORING_OP_FALLOCATE:
- ret = io_fallocate(req, force_nonblock);
+ ret = io_fallocate(req, issue_flags);
break;
case IORING_OP_OPENAT:
- ret = io_openat(req, force_nonblock);
+ ret = io_openat(req, issue_flags);
break;
case IORING_OP_CLOSE:
- ret = io_close(req, force_nonblock, cs);
+ ret = io_close(req, issue_flags, cs);
break;
case IORING_OP_FILES_UPDATE:
- ret = io_files_update(req, force_nonblock, cs);
+ ret = io_files_update(req, issue_flags, cs);
break;
case IORING_OP_STATX:
- ret = io_statx(req, force_nonblock);
+ ret = io_statx(req, issue_flags);
break;
case IORING_OP_FADVISE:
- ret = io_fadvise(req, force_nonblock);
+ ret = io_fadvise(req, issue_flags);
break;
case IORING_OP_MADVISE:
- ret = io_madvise(req, force_nonblock);
+ ret = io_madvise(req, issue_flags);
break;
case IORING_OP_OPENAT2:
- ret = io_openat2(req, force_nonblock);
+ ret = io_openat2(req, issue_flags);
break;
case IORING_OP_EPOLL_CTL:
- ret = io_epoll_ctl(req, force_nonblock, cs);
+ ret = io_epoll_ctl(req, issue_flags, cs);
break;
case IORING_OP_SPLICE:
- ret = io_splice(req, force_nonblock);
+ ret = io_splice(req, issue_flags);
break;
case IORING_OP_PROVIDE_BUFFERS:
- ret = io_provide_buffers(req, force_nonblock, cs);
+ ret = io_provide_buffers(req, issue_flags, cs);
break;
case IORING_OP_REMOVE_BUFFERS:
- ret = io_remove_buffers(req, force_nonblock, cs);
+ ret = io_remove_buffers(req, issue_flags, cs);
break;
case IORING_OP_TEE:
- ret = io_tee(req, force_nonblock);
+ ret = io_tee(req, issue_flags);
break;
case IORING_OP_SHUTDOWN:
- ret = io_shutdown(req, force_nonblock);
+ ret = io_shutdown(req, issue_flags);
break;
case IORING_OP_RENAMEAT:
- ret = io_renameat(req, force_nonblock);
+ ret = io_renameat(req, issue_flags);
break;
case IORING_OP_UNLINKAT:
- ret = io_unlinkat(req, force_nonblock);
+ ret = io_unlinkat(req, issue_flags);
break;
default:
ret = -EINVAL;
if (!ret) {
do {
- ret = io_issue_sqe(req, false, NULL);
+ ret = io_issue_sqe(req, 0, NULL);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
old_creds = override_creds(req->work.identity->creds);
}
- ret = io_issue_sqe(req, true, cs);
+ ret = io_issue_sqe(req, IO_URING_F_NONBLOCK, cs);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file