Commit | Line | Data |
---|---|---|
2866c82d JA |
1 | /* |
2 | * splice io engine | |
3 | * | |
4 | */ | |
5 | #include <stdio.h> | |
6 | #include <stdlib.h> | |
7 | #include <unistd.h> | |
8 | #include <errno.h> | |
9 | #include <assert.h> | |
10 | #include <sys/poll.h> | |
11 | #include "fio.h" | |
12 | #include "os.h" | |
13 | ||
14 | struct spliceio_data { | |
15 | struct io_u *last_io_u; | |
16 | int pipe[2]; | |
17 | }; | |
18 | ||
19 | static int fio_spliceio_sync(struct thread_data *td) | |
20 | { | |
21 | return fsync(td->fd); | |
22 | } | |
23 | ||
24 | static int fio_spliceio_getevents(struct thread_data *td, int fio_unused min, | |
25 | int max, struct timespec fio_unused *t) | |
26 | { | |
27 | assert(max <= 1); | |
28 | ||
29 | /* | |
30 | * we can only have one finished io_u for sync io, since the depth | |
31 | * is always 1 | |
32 | */ | |
33 | if (list_empty(&td->io_u_busylist)) | |
34 | return 0; | |
35 | ||
36 | return 1; | |
37 | } | |
38 | ||
39 | static struct io_u *fio_spliceio_event(struct thread_data *td, int event) | |
40 | { | |
41 | struct spliceio_data *sd = td->io_ops->data; | |
42 | ||
43 | assert(event == 0); | |
44 | ||
45 | return sd->last_io_u; | |
46 | } | |
47 | ||
48 | /* | |
49 | * For splice reading, we unfortunately cannot (yet) vmsplice the other way. | |
50 | * So just splice the data from the file into the pipe, and use regular | |
51 | * read to fill the buffer. Doesn't make a lot of sense, but... | |
52 | */ | |
53 | static int fio_splice_read(struct thread_data *td, struct io_u *io_u) | |
54 | { | |
55 | struct spliceio_data *sd = td->io_ops->data; | |
56 | int ret, ret2, buflen; | |
57 | off_t offset; | |
58 | void *p; | |
59 | ||
60 | offset = io_u->offset; | |
61 | buflen = io_u->buflen; | |
62 | p = io_u->buf; | |
63 | while (buflen) { | |
64 | int this_len = buflen; | |
65 | ||
66 | if (this_len > SPLICE_DEF_SIZE) | |
67 | this_len = SPLICE_DEF_SIZE; | |
68 | ||
69 | ret = splice(td->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE); | |
70 | if (ret < 0) { | |
71 | if (errno == ENODATA || errno == EAGAIN) | |
72 | continue; | |
73 | ||
74 | return errno; | |
75 | } | |
76 | ||
77 | buflen -= ret; | |
78 | ||
79 | while (ret) { | |
80 | ret2 = read(sd->pipe[0], p, ret); | |
81 | if (ret2 < 0) | |
82 | return errno; | |
83 | ||
84 | ret -= ret2; | |
85 | p += ret2; | |
86 | } | |
87 | } | |
88 | ||
89 | return io_u->buflen; | |
90 | } | |
91 | ||
92 | /* | |
93 | * For splice writing, we can vmsplice our data buffer directly into a | |
94 | * pipe and then splice that to a file. | |
95 | */ | |
96 | static int fio_splice_write(struct thread_data *td, struct io_u *io_u) | |
97 | { | |
98 | struct spliceio_data *sd = td->io_ops->data; | |
99 | struct iovec iov[1] = { | |
100 | { | |
101 | .iov_base = io_u->buf, | |
102 | .iov_len = io_u->buflen, | |
103 | } | |
104 | }; | |
105 | struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, }; | |
106 | off_t off = io_u->offset; | |
107 | int ret, ret2; | |
108 | ||
109 | while (iov[0].iov_len) { | |
110 | if (poll(&pfd, 1, -1) < 0) | |
111 | return errno; | |
112 | ||
113 | ret = vmsplice(sd->pipe[1], iov, 1, SPLICE_F_NONBLOCK); | |
114 | if (ret < 0) | |
115 | return errno; | |
116 | ||
117 | iov[0].iov_len -= ret; | |
118 | iov[0].iov_base += ret; | |
119 | ||
120 | while (ret) { | |
121 | ret2 = splice(sd->pipe[0], NULL, td->fd, &off, ret, 0); | |
122 | if (ret2 < 0) | |
123 | return errno; | |
124 | ||
125 | ret -= ret2; | |
126 | } | |
127 | } | |
128 | ||
129 | return io_u->buflen; | |
130 | } | |
131 | ||
132 | static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u) | |
133 | { | |
134 | struct spliceio_data *sd = td->io_ops->data; | |
135 | int ret; | |
136 | ||
137 | if (io_u->ddir == DDIR_READ) | |
138 | ret = fio_splice_read(td, io_u); | |
139 | else | |
140 | ret = fio_splice_write(td, io_u); | |
141 | ||
142 | if ((unsigned int) ret != io_u->buflen) { | |
143 | if (ret > 0) { | |
144 | io_u->resid = io_u->buflen - ret; | |
145 | io_u->error = ENODATA; | |
146 | } else | |
147 | io_u->error = errno; | |
148 | } | |
149 | ||
150 | if (!io_u->error) | |
151 | sd->last_io_u = io_u; | |
152 | ||
153 | return io_u->error; | |
154 | } | |
155 | ||
156 | static void fio_spliceio_cleanup(struct thread_data *td) | |
157 | { | |
158 | struct spliceio_data *sd = td->io_ops->data; | |
159 | ||
160 | if (sd) { | |
161 | close(sd->pipe[0]); | |
162 | close(sd->pipe[1]); | |
163 | free(sd); | |
164 | td->io_ops->data = NULL; | |
165 | } | |
166 | } | |
167 | ||
168 | static int fio_spliceio_init(struct thread_data *td) | |
169 | { | |
170 | struct spliceio_data *sd = malloc(sizeof(*sd)); | |
171 | ||
172 | sd->last_io_u = NULL; | |
173 | if (pipe(sd->pipe) < 0) { | |
174 | td_verror(td, errno); | |
175 | free(sd); | |
176 | return 1; | |
177 | } | |
178 | ||
179 | td->io_ops->data = sd; | |
180 | return 0; | |
181 | } | |
182 | ||
183 | struct ioengine_ops ioengine = { | |
184 | .name = "splice", | |
185 | .version = FIO_IOOPS_VERSION, | |
186 | .init = fio_spliceio_init, | |
187 | .queue = fio_spliceio_queue, | |
188 | .getevents = fio_spliceio_getevents, | |
189 | .event = fio_spliceio_event, | |
190 | .cleanup = fio_spliceio_cleanup, | |
191 | .sync = fio_spliceio_sync, | |
192 | .flags = FIO_SYNCIO, | |
193 | }; |