4 * IO engine that transfers data by doing splices to/from pipes and
17 #ifdef FIO_HAVE_SPLICE
19 struct spliceio_data {
25 * vmsplice didn't use to support splicing to user space, this is the old
26 * variant of getting that job done. Doesn't make a lot of sense, but it
27 * uses splices to move data from the source into a pipe.
29 static int fio_splice_read_old(struct thread_data *td, struct io_u *io_u)
31 struct spliceio_data *sd = td->io_ops->data;
32 struct fio_file *f = io_u->file;
33 int ret, ret2, buflen;
37 offset = io_u->offset;
38 buflen = io_u->xfer_buflen;
41 int this_len = buflen;
43 if (this_len > SPLICE_DEF_SIZE)
44 this_len = SPLICE_DEF_SIZE;
46 ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE);
48 if (errno == ENODATA || errno == EAGAIN)
57 ret2 = read(sd->pipe[0], p, ret);
66 return io_u->xfer_buflen;
70 * We can now vmsplice into userspace, so do the transfer by splicing into
71 * a pipe and vmsplicing that into userspace.
73 static int fio_splice_read(struct thread_data *td, struct io_u *io_u)
75 struct spliceio_data *sd = td->io_ops->data;
76 struct fio_file *f = io_u->file;
82 offset = io_u->offset;
83 buflen = io_u->xfer_buflen;
86 int this_len = buflen;
88 if (this_len > SPLICE_DEF_SIZE)
89 this_len = SPLICE_DEF_SIZE;
91 ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE);
93 if (errno == ENODATA || errno == EAGAIN)
104 while (iov.iov_len) {
105 ret = vmsplice(sd->pipe[0], &iov, 1, SPLICE_F_MOVE);
116 return io_u->xfer_buflen;
121 * For splice writing, we can vmsplice our data buffer directly into a
122 * pipe and then splice that to a file.
124 static int fio_splice_write(struct thread_data *td, struct io_u *io_u)
126 struct spliceio_data *sd = td->io_ops->data;
128 .iov_base = io_u->xfer_buf,
129 .iov_len = io_u->xfer_buflen,
131 struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
132 struct fio_file *f = io_u->file;
133 off_t off = io_u->offset;
136 while (iov.iov_len) {
137 if (poll(&pfd, 1, -1) < 0)
140 ret = vmsplice(sd->pipe[1], &iov, 1, SPLICE_F_NONBLOCK);
148 ret2 = splice(sd->pipe[0], NULL, f->fd, &off, ret, 0);
156 return io_u->xfer_buflen;
159 static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
161 struct spliceio_data *sd = td->io_ops->data;
164 if (io_u->ddir == DDIR_READ) {
165 if (sd->vmsplice_to_user)
166 ret = fio_splice_read(td, io_u);
168 ret = fio_splice_read_old(td, io_u);
169 } else if (io_u->ddir == DDIR_WRITE)
170 ret = fio_splice_write(td, io_u);
172 ret = fsync(io_u->file->fd);
174 if (ret != (int) io_u->xfer_buflen) {
176 io_u->resid = io_u->xfer_buflen - ret;
178 return FIO_Q_COMPLETED;
184 td_verror(td, io_u->error, "xfer");
186 return FIO_Q_COMPLETED;
189 static void fio_spliceio_cleanup(struct thread_data *td)
191 struct spliceio_data *sd = td->io_ops->data;
197 td->io_ops->data = NULL;
201 static int fio_spliceio_init(struct thread_data *td)
203 struct spliceio_data *sd = malloc(sizeof(*sd));
205 if (pipe(sd->pipe) < 0) {
206 td_verror(td, errno, "pipe");
212 * need some check for enabling this, for now just leave it disabled
214 sd->vmsplice_to_user = 0;
216 td->io_ops->data = sd;
220 static struct ioengine_ops ioengine = {
222 .version = FIO_IOOPS_VERSION,
223 .init = fio_spliceio_init,
224 .queue = fio_spliceio_queue,
225 .cleanup = fio_spliceio_cleanup,
226 .open_file = generic_open_file,
227 .close_file = generic_close_file,
231 #else /* FIO_HAVE_SPLICE */
234 * When we have a proper configure system in place, we simply wont build
235 * and install this io engine. For now install a crippled version that
236 * just complains and fails to load.
238 static int fio_spliceio_init(struct thread_data fio_unused *td)
240 fprintf(stderr, "fio: splice not available\n");
244 static struct ioengine_ops ioengine = {
246 .version = FIO_IOOPS_VERSION,
247 .init = fio_spliceio_init,
252 static void fio_init fio_spliceio_register(void)
254 register_ioengine(&ioengine);
257 static void fio_exit fio_spliceio_unregister(void)
259 unregister_ioengine(&ioengine);