Fix refill buffers overwriting verify data
[fio.git] / engines / splice.c
... / ...
CommitLineData
1/*
2 * splice engine
3 *
4 * IO engine that transfers data by doing splices to/from pipes and
5 * the files.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <sys/poll.h>
14#include <sys/mman.h>
15
16#include "../fio.h"
17
18#ifdef FIO_HAVE_SPLICE
19
20struct spliceio_data {
21 int pipe[2];
22 int vmsplice_to_user;
23 int vmsplice_to_user_map;
24};
25
26/*
27 * vmsplice didn't use to support splicing to user space, this is the old
28 * variant of getting that job done. Doesn't make a lot of sense, but it
29 * uses splices to move data from the source into a pipe.
30 */
31static int fio_splice_read_old(struct thread_data *td, struct io_u *io_u)
32{
33 struct spliceio_data *sd = td->io_ops->data;
34 struct fio_file *f = io_u->file;
35 int ret, ret2, buflen;
36 off_t offset;
37 void *p;
38
39 offset = io_u->offset;
40 buflen = io_u->xfer_buflen;
41 p = io_u->xfer_buf;
42 while (buflen) {
43 int this_len = buflen;
44
45 if (this_len > SPLICE_DEF_SIZE)
46 this_len = SPLICE_DEF_SIZE;
47
48 ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE);
49 if (ret < 0) {
50 if (errno == ENODATA || errno == EAGAIN)
51 continue;
52
53 return -errno;
54 }
55
56 buflen -= ret;
57
58 while (ret) {
59 ret2 = read(sd->pipe[0], p, ret);
60 if (ret2 < 0)
61 return -errno;
62
63 ret -= ret2;
64 p += ret2;
65 }
66 }
67
68 return io_u->xfer_buflen;
69}
70
71/*
72 * We can now vmsplice into userspace, so do the transfer by splicing into
73 * a pipe and vmsplicing that into userspace.
74 */
75static int fio_splice_read(struct thread_data *td, struct io_u *io_u)
76{
77 struct spliceio_data *sd = td->io_ops->data;
78 struct fio_file *f = io_u->file;
79 struct iovec iov;
80 int ret , buflen, mmap_len;
81 off_t offset;
82 void *p, *map;
83
84restart:
85 ret = 0;
86 offset = io_u->offset;
87 mmap_len = buflen = io_u->xfer_buflen;
88
89 if (sd->vmsplice_to_user_map) {
90 map = mmap(io_u->xfer_buf, buflen, PROT_READ, MAP_PRIVATE|OS_MAP_ANON, 0, 0);
91 if (map == MAP_FAILED) {
92 td_verror(td, errno, "mmap io_u");
93 return -1;
94 }
95
96 p = map;
97 } else {
98 map = NULL;
99 p = io_u->xfer_buf;
100 }
101
102 while (buflen) {
103 int this_len = buflen;
104 int flags = 0;
105
106 if (this_len > SPLICE_DEF_SIZE) {
107 this_len = SPLICE_DEF_SIZE;
108 flags = SPLICE_F_MORE;
109 }
110
111 ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len,flags);
112 if (ret < 0) {
113 if (errno == ENODATA || errno == EAGAIN)
114 continue;
115
116 td_verror(td, errno, "splice-from-fd");
117 break;
118 }
119
120 buflen -= ret;
121 iov.iov_base = p;
122 iov.iov_len = ret;
123 p += ret;
124
125 while (iov.iov_len) {
126 ret = vmsplice(sd->pipe[0], &iov, 1, SPLICE_F_MOVE);
127 if (ret < 0) {
128 if (errno == EFAULT && sd->vmsplice_to_user_map) {
129 sd->vmsplice_to_user_map = 0;
130 munmap(map, mmap_len);
131 goto restart;
132 }
133 if (errno == EBADF) {
134 ret = -EBADF;
135 break;
136 }
137 td_verror(td, errno, "vmsplice");
138 break;
139 } else if (!ret) {
140 td_verror(td, ENODATA, "vmsplice");
141 ret = -1;
142 break;
143 }
144
145 iov.iov_len -= ret;
146 iov.iov_base += ret;
147 }
148 if (ret < 0)
149 break;
150 }
151
152 if (sd->vmsplice_to_user_map && munmap(map, mmap_len) < 0) {
153 td_verror(td, errno, "munnap io_u");
154 return -1;
155 }
156 if (ret < 0)
157 return ret;
158
159 return io_u->xfer_buflen;
160}
161
162/*
163 * For splice writing, we can vmsplice our data buffer directly into a
164 * pipe and then splice that to a file.
165 */
166static int fio_splice_write(struct thread_data *td, struct io_u *io_u)
167{
168 struct spliceio_data *sd = td->io_ops->data;
169 struct iovec iov = {
170 .iov_base = io_u->xfer_buf,
171 .iov_len = io_u->xfer_buflen,
172 };
173 struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
174 struct fio_file *f = io_u->file;
175 off_t off = io_u->offset;
176 int ret, ret2;
177
178 while (iov.iov_len) {
179 if (poll(&pfd, 1, -1) < 0)
180 return errno;
181
182 ret = vmsplice(sd->pipe[1], &iov, 1, SPLICE_F_NONBLOCK);
183 if (ret < 0)
184 return -errno;
185
186 iov.iov_len -= ret;
187 iov.iov_base += ret;
188
189 while (ret) {
190 ret2 = splice(sd->pipe[0], NULL, f->fd, &off, ret, 0);
191 if (ret2 < 0)
192 return -errno;
193
194 ret -= ret2;
195 }
196 }
197
198 return io_u->xfer_buflen;
199}
200
201static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
202{
203 struct spliceio_data *sd = td->io_ops->data;
204 int ret;
205
206 fio_ro_check(td, io_u);
207
208 if (io_u->ddir == DDIR_READ) {
209 if (sd->vmsplice_to_user) {
210 ret = fio_splice_read(td, io_u);
211 /*
212 * This kernel doesn't support vmsplice to user
213 * space. Reset the vmsplice_to_user flag, so that
214 * we retry below and don't hit this path again.
215 */
216 if (ret == -EBADF)
217 sd->vmsplice_to_user = 0;
218 }
219 if (!sd->vmsplice_to_user)
220 ret = fio_splice_read_old(td, io_u);
221 } else if (io_u->ddir == DDIR_WRITE)
222 ret = fio_splice_write(td, io_u);
223 else
224 ret = fsync(io_u->file->fd);
225
226 if (ret != (int) io_u->xfer_buflen) {
227 if (ret >= 0) {
228 io_u->resid = io_u->xfer_buflen - ret;
229 io_u->error = 0;
230 return FIO_Q_COMPLETED;
231 } else
232 io_u->error = errno;
233 }
234
235 if (io_u->error)
236 td_verror(td, io_u->error, "xfer");
237
238 return FIO_Q_COMPLETED;
239}
240
241static void fio_spliceio_cleanup(struct thread_data *td)
242{
243 struct spliceio_data *sd = td->io_ops->data;
244
245 if (sd) {
246 close(sd->pipe[0]);
247 close(sd->pipe[1]);
248 free(sd);
249 td->io_ops->data = NULL;
250 }
251}
252
253static int fio_spliceio_init(struct thread_data *td)
254{
255 struct spliceio_data *sd = malloc(sizeof(*sd));
256
257 if (pipe(sd->pipe) < 0) {
258 td_verror(td, errno, "pipe");
259 free(sd);
260 return 1;
261 }
262
263 /*
264 * Assume this work, we'll reset this if it doesn't
265 */
266 sd->vmsplice_to_user = 1;
267
268 /*
269 * Works with "real" vmsplice to user, eg mapping pages directly.
270 * Reset if we fail.
271 */
272 sd->vmsplice_to_user_map = 1;
273
274 /*
275 * And if vmsplice_to_user works, we definitely need aligned
276 * buffers. Just set ->odirect to force that.
277 */
278 if (td_read(td))
279 td->o.odirect = 1;
280
281 td->io_ops->data = sd;
282 return 0;
283}
284
285static struct ioengine_ops ioengine = {
286 .name = "splice",
287 .version = FIO_IOOPS_VERSION,
288 .init = fio_spliceio_init,
289 .queue = fio_spliceio_queue,
290 .cleanup = fio_spliceio_cleanup,
291 .open_file = generic_open_file,
292 .close_file = generic_close_file,
293 .flags = FIO_SYNCIO,
294};
295
296#else /* FIO_HAVE_SPLICE */
297
298/*
299 * When we have a proper configure system in place, we simply wont build
300 * and install this io engine. For now install a crippled version that
301 * just complains and fails to load.
302 */
303static int fio_spliceio_init(struct thread_data fio_unused *td)
304{
305 fprintf(stderr, "fio: splice not available\n");
306 return 1;
307}
308
309static struct ioengine_ops ioengine = {
310 .name = "splice",
311 .version = FIO_IOOPS_VERSION,
312 .init = fio_spliceio_init,
313};
314
315#endif
316
317static void fio_init fio_spliceio_register(void)
318{
319 register_ioengine(&ioengine);
320}
321
322static void fio_exit fio_spliceio_unregister(void)
323{
324 unregister_ioengine(&ioengine);
325}