REQ_F_BL_NO_RECYCLE_BIT,
REQ_F_BUFFERS_COMMIT_BIT,
REQ_F_BUF_NODE_BIT,
+ REQ_F_HAS_METADATA_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
/* buf node is valid */
REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
+ /* request has read/write metadata assigned */
+ REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
pi_attr.len, &io->meta.iter);
if (unlikely(ret < 0))
return ret;
- rw->kiocb.ki_flags |= IOCB_HAS_METADATA;
+ req->flags |= REQ_F_HAS_METADATA;
io_meta_save_state(io);
return ret;
}
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct kiocb *kiocb = &rw->kiocb;
- /* never retry for NOWAIT, we just complete with -EAGAIN */
- if (req->flags & REQ_F_NOWAIT)
+ /*
+ * Never retry for NOWAIT or a request with metadata, we just complete
+ * with -EAGAIN.
+ */
+ if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA))
return false;
/* Only for buffered IO */
if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
return false;
- /* never retry for meta io */
- if (kiocb->ki_flags & IOCB_HAS_METADATA)
- return false;
-
/*
* just use poll if we can, and don't attempt if the fs doesn't
* support callback based unlocks
if (!(req->flags & REQ_F_FIXED_FILE))
req->flags |= io_file_get_flags(file);
- kiocb->ki_flags |= file->f_iocb_flags;
+ kiocb->ki_flags = file->f_iocb_flags;
ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
if (unlikely(ret))
return ret;
kiocb->ki_complete = io_complete_rw;
}
- if (kiocb->ki_flags & IOCB_HAS_METADATA) {
+ if (req->flags & REQ_F_HAS_METADATA) {
struct io_async_rw *io = req->async_data;
/*
*/
if (!(req->file->f_flags & O_DIRECT))
return -EOPNOTSUPP;
+ kiocb->ki_flags |= IOCB_HAS_METADATA;
kiocb->private = &io->meta;
}