4 * IO engine that transfers data by doing splices to/from pipes and
17 struct spliceio_data {
20 int vmsplice_to_user_map;
24 * vmsplice didn't use to support splicing to user space, this is the old
25 * variant of getting that job done. Doesn't make a lot of sense, but it
26 * uses splices to move data from the source into a pipe.
28 static int fio_splice_read_old(struct thread_data *td, struct io_u *io_u)
30 struct spliceio_data *sd = td->io_ops_data;
31 struct fio_file *f = io_u->file;
32 int ret, ret2, buflen;
36 offset = io_u->offset;
37 buflen = io_u->xfer_buflen;
40 int this_len = buflen;
42 if (this_len > SPLICE_DEF_SIZE)
43 this_len = SPLICE_DEF_SIZE;
45 ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE);
47 if (errno == ENODATA || errno == EAGAIN)
56 ret2 = read(sd->pipe[0], p, ret);
65 return io_u->xfer_buflen;
69 * We can now vmsplice into userspace, so do the transfer by splicing into
70 * a pipe and vmsplicing that into userspace.
72 static int fio_splice_read(struct thread_data *td, struct io_u *io_u)
74 struct spliceio_data *sd = td->io_ops_data;
75 struct fio_file *f = io_u->file;
77 int ret , buflen, mmap_len;
83 offset = io_u->offset;
84 mmap_len = buflen = io_u->xfer_buflen;
86 if (sd->vmsplice_to_user_map) {
87 map = mmap(io_u->xfer_buf, buflen, PROT_READ, MAP_PRIVATE|OS_MAP_ANON, 0, 0);
88 if (map == MAP_FAILED) {
89 td_verror(td, errno, "mmap io_u");
100 int this_len = buflen;
103 if (this_len > SPLICE_DEF_SIZE) {
104 this_len = SPLICE_DEF_SIZE;
105 flags = SPLICE_F_MORE;
108 ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len,flags);
110 if (errno == ENODATA || errno == EAGAIN)
113 td_verror(td, errno, "splice-from-fd");
121 while (iov.iov_len) {
122 ret = vmsplice(sd->pipe[0], &iov, 1, SPLICE_F_MOVE);
124 if (errno == EFAULT &&
125 sd->vmsplice_to_user_map) {
126 sd->vmsplice_to_user_map = 0;
127 munmap(map, mmap_len);
133 if (errno == EBADF) {
137 td_verror(td, errno, "vmsplice");
140 td_verror(td, ENODATA, "vmsplice");
153 if (sd->vmsplice_to_user_map && munmap(map, mmap_len) < 0) {
154 td_verror(td, errno, "munnap io_u");
160 return io_u->xfer_buflen;
164 * For splice writing, we can vmsplice our data buffer directly into a
165 * pipe and then splice that to a file.
167 static int fio_splice_write(struct thread_data *td, struct io_u *io_u)
169 struct spliceio_data *sd = td->io_ops_data;
171 .iov_base = io_u->xfer_buf,
172 .iov_len = io_u->xfer_buflen,
174 struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
175 struct fio_file *f = io_u->file;
176 off_t off = io_u->offset;
179 while (iov.iov_len) {
180 if (poll(&pfd, 1, -1) < 0)
183 ret = vmsplice(sd->pipe[1], &iov, 1, SPLICE_F_NONBLOCK);
191 ret2 = splice(sd->pipe[0], NULL, f->fd, &off, ret, 0);
199 return io_u->xfer_buflen;
202 static enum fio_q_status fio_spliceio_queue(struct thread_data *td,
205 struct spliceio_data *sd = td->io_ops_data;
208 fio_ro_check(td, io_u);
210 if (io_u->ddir == DDIR_READ) {
211 if (sd->vmsplice_to_user) {
212 ret = fio_splice_read(td, io_u);
214 * This kernel doesn't support vmsplice to user
215 * space. Reset the vmsplice_to_user flag, so that
216 * we retry below and don't hit this path again.
219 sd->vmsplice_to_user = 0;
221 if (!sd->vmsplice_to_user)
222 ret = fio_splice_read_old(td, io_u);
223 } else if (io_u->ddir == DDIR_WRITE)
224 ret = fio_splice_write(td, io_u);
225 else if (io_u->ddir == DDIR_TRIM)
226 ret = do_io_u_trim(td, io_u);
228 ret = do_io_u_sync(td, io_u);
230 if (ret != (int) io_u->xfer_buflen) {
232 io_u->resid = io_u->xfer_buflen - ret;
234 return FIO_Q_COMPLETED;
240 td_verror(td, io_u->error, "xfer");
241 if (io_u->error == EINVAL)
242 log_err("fio: looks like splice doesn't work on this"
246 return FIO_Q_COMPLETED;
249 static void fio_spliceio_cleanup(struct thread_data *td)
251 struct spliceio_data *sd = td->io_ops_data;
260 static int fio_spliceio_init(struct thread_data *td)
262 struct spliceio_data *sd = malloc(sizeof(*sd));
264 if (pipe(sd->pipe) < 0) {
265 td_verror(td, errno, "pipe");
271 * Assume this work, we'll reset this if it doesn't
273 sd->vmsplice_to_user = 1;
276 * Works with "real" vmsplice to user, eg mapping pages directly.
279 sd->vmsplice_to_user_map = 1;
282 * And if vmsplice_to_user works, we definitely need aligned
283 * buffers. Just set ->odirect to force that.
288 td->io_ops_data = sd;
292 static struct ioengine_ops ioengine = {
294 .version = FIO_IOOPS_VERSION,
295 .init = fio_spliceio_init,
296 .queue = fio_spliceio_queue,
297 .cleanup = fio_spliceio_cleanup,
298 .open_file = generic_open_file,
299 .close_file = generic_close_file,
300 .get_file_size = generic_get_file_size,
301 .flags = FIO_SYNCIO | FIO_PIPEIO,
304 static void fio_init fio_spliceio_register(void)
306 register_ioengine(&ioengine);
309 static void fio_exit fio_spliceio_unregister(void)
311 unregister_ioengine(&ioengine);