4 * IO engine that transfers data by doing splices to/from pipes and
18 #ifdef FIO_HAVE_SPLICE
20 struct spliceio_data {
23 int vmsplice_to_user_map;
27 * vmsplice didn't use to support splicing to user space, this is the old
28 * variant of getting that job done. Doesn't make a lot of sense, but it
29 * uses splices to move data from the source into a pipe.
31 static int fio_splice_read_old(struct thread_data *td, struct io_u *io_u)
33 struct spliceio_data *sd = td->io_ops->data;
34 struct fio_file *f = io_u->file;
35 int ret, ret2, buflen;
39 offset = io_u->offset;
40 buflen = io_u->xfer_buflen;
43 int this_len = buflen;
45 if (this_len > SPLICE_DEF_SIZE)
46 this_len = SPLICE_DEF_SIZE;
48 ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE);
50 if (errno == ENODATA || errno == EAGAIN)
59 ret2 = read(sd->pipe[0], p, ret);
68 return io_u->xfer_buflen;
72 * We can now vmsplice into userspace, so do the transfer by splicing into
73 * a pipe and vmsplicing that into userspace.
75 static int fio_splice_read(struct thread_data *td, struct io_u *io_u)
77 struct spliceio_data *sd = td->io_ops->data;
78 struct fio_file *f = io_u->file;
80 int ret , buflen, mmap_len;
85 offset = io_u->offset;
86 mmap_len = buflen = io_u->xfer_buflen;
88 if (sd->vmsplice_to_user_map) {
89 map = mmap(io_u->xfer_buf, buflen, PROT_READ, MAP_PRIVATE|OS_MAP_ANON, 0, 0);
90 if (map == MAP_FAILED) {
91 td_verror(td, errno, "mmap io_u");
102 int this_len = buflen;
105 if (this_len > SPLICE_DEF_SIZE) {
106 this_len = SPLICE_DEF_SIZE;
107 flags = SPLICE_F_MORE;
110 ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len,flags);
112 if (errno == ENODATA || errno == EAGAIN)
115 td_verror(td, errno, "splice-from-fd");
123 while (iov.iov_len) {
124 ret = vmsplice(sd->pipe[0], &iov, 1, SPLICE_F_MOVE);
126 if (errno == EFAULT &&
127 sd->vmsplice_to_user_map) {
128 sd->vmsplice_to_user_map = 0;
129 munmap(map, mmap_len);
135 if (errno == EBADF) {
139 td_verror(td, errno, "vmsplice");
142 td_verror(td, ENODATA, "vmsplice");
155 if (sd->vmsplice_to_user_map && munmap(map, mmap_len) < 0) {
156 td_verror(td, errno, "munnap io_u");
162 return io_u->xfer_buflen;
166 * For splice writing, we can vmsplice our data buffer directly into a
167 * pipe and then splice that to a file.
169 static int fio_splice_write(struct thread_data *td, struct io_u *io_u)
171 struct spliceio_data *sd = td->io_ops->data;
173 .iov_base = io_u->xfer_buf,
174 .iov_len = io_u->xfer_buflen,
176 struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
177 struct fio_file *f = io_u->file;
178 off_t off = io_u->offset;
181 while (iov.iov_len) {
182 if (poll(&pfd, 1, -1) < 0)
185 ret = vmsplice(sd->pipe[1], &iov, 1, SPLICE_F_NONBLOCK);
193 ret2 = splice(sd->pipe[0], NULL, f->fd, &off, ret, 0);
201 return io_u->xfer_buflen;
204 static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
206 struct spliceio_data *sd = td->io_ops->data;
207 int uninitialized_var(ret);
209 fio_ro_check(td, io_u);
211 if (io_u->ddir == DDIR_READ) {
212 if (sd->vmsplice_to_user) {
213 ret = fio_splice_read(td, io_u);
215 * This kernel doesn't support vmsplice to user
216 * space. Reset the vmsplice_to_user flag, so that
217 * we retry below and don't hit this path again.
220 sd->vmsplice_to_user = 0;
222 if (!sd->vmsplice_to_user)
223 ret = fio_splice_read_old(td, io_u);
224 } else if (io_u->ddir == DDIR_WRITE)
225 ret = fio_splice_write(td, io_u);
226 else if (io_u->ddir == DDIR_TRIM)
227 ret = do_io_u_trim(td, io_u);
229 ret = do_io_u_sync(td, io_u);
231 if (ret != (int) io_u->xfer_buflen) {
233 io_u->resid = io_u->xfer_buflen - ret;
235 return FIO_Q_COMPLETED;
241 td_verror(td, io_u->error, "xfer");
242 if (io_u->error == EINVAL)
243 log_err("fio: looks like splice doesn't work on this"
247 return FIO_Q_COMPLETED;
250 static void fio_spliceio_cleanup(struct thread_data *td)
252 struct spliceio_data *sd = td->io_ops->data;
261 static int fio_spliceio_init(struct thread_data *td)
263 struct spliceio_data *sd = malloc(sizeof(*sd));
265 if (pipe(sd->pipe) < 0) {
266 td_verror(td, errno, "pipe");
272 * Assume this work, we'll reset this if it doesn't
274 sd->vmsplice_to_user = 1;
277 * Works with "real" vmsplice to user, eg mapping pages directly.
280 sd->vmsplice_to_user_map = 1;
283 * And if vmsplice_to_user works, we definitely need aligned
284 * buffers. Just set ->odirect to force that.
289 td->io_ops->data = sd;
293 static struct ioengine_ops ioengine = {
295 .version = FIO_IOOPS_VERSION,
296 .init = fio_spliceio_init,
297 .queue = fio_spliceio_queue,
298 .cleanup = fio_spliceio_cleanup,
299 .open_file = generic_open_file,
300 .close_file = generic_close_file,
301 .get_file_size = generic_get_file_size,
302 .flags = FIO_SYNCIO | FIO_PIPEIO,
305 #else /* FIO_HAVE_SPLICE */
308 * When we have a proper configure system in place, we simply wont build
309 * and install this io engine. For now install a crippled version that
310 * just complains and fails to load.
312 static int fio_spliceio_init(struct thread_data fio_unused *td)
314 log_err("fio: splice not available\n");
318 static struct ioengine_ops ioengine = {
320 .version = FIO_IOOPS_VERSION,
321 .init = fio_spliceio_init,
326 static void fio_init fio_spliceio_register(void)
328 register_ioengine(&ioengine);
331 static void fio_exit fio_spliceio_unregister(void)
333 unregister_ioengine(&ioengine);