Commit | Line | Data |
---|---|---|
79a43187 JA |
1 | /* |
2 | * binject engine | |
3 | * | |
4 | * IO engine that uses the Linux binject interface to directly inject | |
5 | * bio's to block devices. | |
6 | * | |
7 | */ | |
8 | #include <stdio.h> | |
9 | #include <stdlib.h> | |
10 | #include <unistd.h> | |
11 | #include <errno.h> | |
12 | #include <assert.h> | |
13 | #include <string.h> | |
14 | #include <sys/poll.h> | |
83c107b7 JA |
15 | #include <sys/types.h> |
16 | #include <sys/stat.h> | |
79a43187 JA |
17 | |
18 | #include "../fio.h" | |
19 | ||
20 | #ifdef FIO_HAVE_BINJECT | |
21 | ||
22 | struct binject_data { | |
23 | struct b_user_cmd *cmds; | |
24 | struct io_u **events; | |
25 | struct pollfd *pfds; | |
26 | int *fd_flags; | |
79a43187 JA |
27 | }; |
28 | ||
0e238572 JA |
29 | struct binject_file { |
30 | unsigned int bs; | |
31 | int minor; | |
32 | int fd; | |
33 | }; | |
34 | ||
79a43187 JA |
35 | static void binject_buc_init(struct binject_data *bd, struct io_u *io_u) |
36 | { | |
37 | struct b_user_cmd *buc = &io_u->buc; | |
38 | ||
39 | memset(buc, 0, sizeof(*buc)); | |
40 | binject_buc_set_magic(buc); | |
41 | ||
42 | buc->buf = (unsigned long) io_u->xfer_buf; | |
43 | buc->len = io_u->xfer_buflen; | |
44 | buc->offset = io_u->offset; | |
45 | buc->usr_ptr = (unsigned long) io_u; | |
46 | ||
47 | buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG; | |
48 | assert(buc->buf); | |
49 | } | |
50 | ||
51 | static int pollin_events(struct pollfd *pfds, int fds) | |
52 | { | |
53 | int i; | |
54 | ||
55 | for (i = 0; i < fds; i++) | |
56 | if (pfds[i].revents & POLLIN) | |
57 | return 1; | |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
d01c404b JA |
62 | static unsigned int binject_read_commands(struct thread_data *td, void *p, |
63 | int left, int *err) | |
64 | { | |
65 | struct binject_file *bf; | |
66 | struct fio_file *f; | |
67 | int i, ret, events; | |
68 | ||
69 | one_more: | |
70 | events = 0; | |
71 | for_each_file(td, f, i) { | |
72 | bf = f->file_data; | |
73 | ret = read(bf->fd, p, left * sizeof(struct b_user_cmd)); | |
74 | if (ret < 0) { | |
75 | if (errno == EAGAIN) | |
76 | continue; | |
77 | *err = -errno; | |
78 | td_verror(td, errno, "read"); | |
79 | break; | |
80 | } else if (ret) { | |
81 | p += ret; | |
82 | events += ret / sizeof(struct b_user_cmd); | |
83 | } | |
84 | } | |
85 | ||
86 | if (*err || events) | |
87 | return events; | |
88 | ||
89 | usleep(1000); | |
90 | goto one_more; | |
91 | } | |
92 | ||
79a43187 JA |
93 | static int fio_binject_getevents(struct thread_data *td, unsigned int min, |
94 | unsigned int max, struct timespec fio_unused *t) | |
95 | { | |
96 | struct binject_data *bd = td->io_ops->data; | |
97 | int left = max, ret, r = 0, ev_index = 0; | |
98 | void *buf = bd->cmds; | |
99 | unsigned int i, events; | |
100 | struct fio_file *f; | |
0e238572 | 101 | struct binject_file *bf; |
79a43187 JA |
102 | |
103 | /* | |
104 | * Fill in the file descriptors | |
105 | */ | |
106 | for_each_file(td, f, i) { | |
d01c404b | 107 | bf = f->file_data; |
0e238572 | 108 | |
79a43187 JA |
109 | /* |
110 | * don't block for min events == 0 | |
111 | */ | |
112 | if (!min) { | |
0e238572 JA |
113 | bd->fd_flags[i] = fcntl(bf->fd, F_GETFL); |
114 | fcntl(bf->fd, F_SETFL, bd->fd_flags[i] | O_NONBLOCK); | |
79a43187 | 115 | } |
0e238572 | 116 | bd->pfds[i].fd = bf->fd; |
79a43187 JA |
117 | bd->pfds[i].events = POLLIN; |
118 | } | |
119 | ||
120 | while (left) { | |
d01c404b | 121 | while (!min) { |
79a43187 JA |
122 | ret = poll(bd->pfds, td->o.nr_files, -1); |
123 | if (ret < 0) { | |
124 | if (!r) | |
125 | r = -errno; | |
126 | td_verror(td, errno, "poll"); | |
127 | break; | |
128 | } else if (!ret) | |
129 | continue; | |
130 | ||
131 | if (pollin_events(bd->pfds, td->o.nr_files)) | |
132 | break; | |
d01c404b | 133 | } |
79a43187 JA |
134 | |
135 | if (r < 0) | |
136 | break; | |
137 | ||
d01c404b | 138 | events = binject_read_commands(td, buf, left, &r); |
79a43187 JA |
139 | |
140 | if (r < 0) | |
141 | break; | |
79a43187 JA |
142 | |
143 | left -= events; | |
144 | r += events; | |
145 | ||
146 | for (i = 0; i < events; i++) { | |
147 | struct b_user_cmd *buc = (struct b_user_cmd *) buf + i; | |
148 | ||
2f68124f | 149 | bd->events[ev_index] = (struct io_u *) (unsigned long) buc->usr_ptr; |
79a43187 JA |
150 | ev_index++; |
151 | } | |
152 | } | |
153 | ||
154 | if (!min) { | |
0e238572 | 155 | for_each_file(td, f, i) { |
d01c404b | 156 | bf = f->file_data; |
0e238572 JA |
157 | fcntl(bf->fd, F_SETFL, bd->fd_flags[i]); |
158 | } | |
79a43187 JA |
159 | } |
160 | ||
161 | if (r > 0) | |
162 | assert(ev_index == r); | |
163 | ||
164 | return r; | |
165 | } | |
166 | ||
167 | static int fio_binject_doio(struct thread_data *td, struct io_u *io_u) | |
168 | { | |
169 | struct b_user_cmd *buc = &io_u->buc; | |
d01c404b | 170 | struct binject_file *bf = io_u->file->file_data; |
79a43187 JA |
171 | int ret; |
172 | ||
0e238572 | 173 | ret = write(bf->fd, buc, sizeof(*buc)); |
79a43187 JA |
174 | if (ret < 0) |
175 | return ret; | |
176 | ||
177 | return FIO_Q_QUEUED; | |
178 | } | |
179 | ||
180 | static int fio_binject_prep(struct thread_data *td, struct io_u *io_u) | |
181 | { | |
182 | struct binject_data *bd = td->io_ops->data; | |
183 | struct b_user_cmd *buc = &io_u->buc; | |
d01c404b | 184 | struct binject_file *bf = io_u->file->file_data; |
79a43187 | 185 | |
0e238572 | 186 | if (io_u->xfer_buflen & (bf->bs - 1)) { |
79a43187 JA |
187 | log_err("read/write not sector aligned\n"); |
188 | return EINVAL; | |
189 | } | |
190 | ||
191 | if (io_u->ddir == DDIR_READ) { | |
192 | binject_buc_init(bd, io_u); | |
193 | buc->type = B_TYPE_READ; | |
194 | } else if (io_u->ddir == DDIR_WRITE) { | |
195 | binject_buc_init(bd, io_u); | |
1ef2b6be JA |
196 | if (io_u->flags & IO_U_F_BARRIER) |
197 | buc->type = B_TYPE_WRITEBARRIER; | |
198 | else | |
199 | buc->type = B_TYPE_WRITE; | |
79a43187 JA |
200 | } else if (io_u->ddir == DDIR_TRIM) { |
201 | binject_buc_init(bd, io_u); | |
202 | buc->type = B_TYPE_DISCARD; | |
203 | } else { | |
204 | assert(0); | |
205 | } | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | static int fio_binject_queue(struct thread_data *td, struct io_u *io_u) | |
211 | { | |
212 | int ret; | |
213 | ||
214 | fio_ro_check(td, io_u); | |
215 | ||
216 | ret = fio_binject_doio(td, io_u); | |
217 | ||
218 | if (ret < 0) | |
219 | io_u->error = errno; | |
220 | ||
221 | if (io_u->error) { | |
222 | td_verror(td, io_u->error, "xfer"); | |
223 | return FIO_Q_COMPLETED; | |
224 | } | |
225 | ||
226 | return ret; | |
227 | } | |
228 | ||
229 | static struct io_u *fio_binject_event(struct thread_data *td, int event) | |
230 | { | |
231 | struct binject_data *bd = td->io_ops->data; | |
232 | ||
233 | return bd->events[event]; | |
234 | } | |
235 | ||
0e238572 JA |
236 | static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf) |
237 | { | |
238 | struct b_ioctl_cmd bic; | |
239 | int fdb; | |
240 | ||
241 | if (bf->fd >= 0) { | |
242 | close(bf->fd); | |
243 | bf->fd = -1; | |
244 | } | |
245 | ||
246 | fdb = open("/dev/binject-ctl", O_RDWR); | |
247 | if (fdb < 0) { | |
248 | td_verror(td, errno, "open binject-ctl"); | |
249 | return; | |
250 | } | |
251 | ||
252 | bic.minor = bf->minor; | |
253 | ||
254 | if (ioctl(fdb, 1, &bic) < 0) { | |
255 | td_verror(td, errno, "binject dev unmap"); | |
256 | close(fdb); | |
257 | return; | |
258 | } | |
259 | ||
260 | close(fdb); | |
261 | } | |
262 | ||
263 | static int binject_map_dev(struct thread_data *td, struct binject_file *bf, | |
264 | int fd) | |
265 | { | |
266 | struct b_ioctl_cmd bic; | |
267 | char name[80]; | |
268 | struct stat sb; | |
269 | int fdb, dev_there, loops; | |
270 | ||
271 | fdb = open("/dev/binject-ctl", O_RDWR); | |
272 | if (fdb < 0) { | |
273 | td_verror(td, errno, "binject ctl open"); | |
274 | return 1; | |
275 | } | |
276 | ||
277 | bic.fd = fd; | |
278 | ||
279 | if (ioctl(fdb, 0, &bic) < 0) { | |
280 | td_verror(td, errno, "binject dev map"); | |
281 | close(fdb); | |
282 | return 1; | |
283 | } | |
284 | ||
285 | bf->minor = bic.minor; | |
286 | ||
287 | sprintf(name, "/dev/binject%u", bf->minor); | |
288 | ||
289 | /* | |
290 | * Wait for udev to create the node... | |
291 | */ | |
292 | dev_there = loops = 0; | |
293 | do { | |
294 | if (!stat(name, &sb)) { | |
295 | dev_there = 1; | |
296 | break; | |
297 | } | |
298 | ||
299 | usleep(10000); | |
300 | } while (++loops < 100); | |
301 | ||
302 | close(fdb); | |
303 | ||
304 | if (!dev_there) { | |
305 | log_err("fio: timed out waiting for binject dev\n"); | |
306 | goto err_unmap; | |
307 | } | |
308 | ||
309 | bf->fd = open(name, O_RDWR); | |
310 | if (bf->fd < 0) { | |
311 | td_verror(td, errno, "binject dev open"); | |
312 | err_unmap: | |
313 | binject_unmap_dev(td, bf); | |
314 | return 1; | |
315 | } | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
320 | static int fio_binject_close_file(struct thread_data *td, struct fio_file *f) | |
321 | { | |
d01c404b | 322 | struct binject_file *bf = f->file_data; |
0e238572 JA |
323 | |
324 | if (bf) { | |
325 | binject_unmap_dev(td, bf); | |
326 | free(bf); | |
d01c404b | 327 | f->file_data = NULL; |
0e238572 JA |
328 | return generic_close_file(td, f); |
329 | } | |
330 | ||
331 | return 0; | |
332 | } | |
333 | ||
4a435dac JA |
334 | static int fio_binject_open_file(struct thread_data *td, struct fio_file *f) |
335 | { | |
0e238572 | 336 | struct binject_file *bf; |
4a435dac JA |
337 | unsigned int bs; |
338 | int ret; | |
339 | ||
340 | ret = generic_open_file(td, f); | |
341 | if (ret) | |
342 | return 1; | |
343 | ||
344 | if (f->filetype != FIO_TYPE_BD) { | |
345 | log_err("fio: binject only works with block devices\n"); | |
0e238572 | 346 | goto err_close; |
4a435dac JA |
347 | } |
348 | if (ioctl(f->fd, BLKSSZGET, &bs) < 0) { | |
349 | td_verror(td, errno, "BLKSSZGET"); | |
0e238572 JA |
350 | goto err_close; |
351 | } | |
352 | ||
353 | bf = malloc(sizeof(*bf)); | |
354 | bf->bs = bs; | |
355 | bf->minor = bf->fd = -1; | |
d01c404b | 356 | f->file_data = bf; |
0e238572 JA |
357 | |
358 | if (binject_map_dev(td, bf, f->fd)) { | |
359 | err_close: | |
360 | ret = generic_close_file(td, f); | |
4a435dac JA |
361 | return 1; |
362 | } | |
363 | ||
4a435dac JA |
364 | return 0; |
365 | } | |
366 | ||
79a43187 JA |
367 | static void fio_binject_cleanup(struct thread_data *td) |
368 | { | |
369 | struct binject_data *bd = td->io_ops->data; | |
370 | ||
371 | if (bd) { | |
372 | free(bd->events); | |
373 | free(bd->cmds); | |
374 | free(bd->fd_flags); | |
375 | free(bd->pfds); | |
376 | free(bd); | |
377 | } | |
378 | } | |
379 | ||
380 | static int fio_binject_init(struct thread_data *td) | |
381 | { | |
382 | struct binject_data *bd; | |
383 | ||
384 | bd = malloc(sizeof(*bd)); | |
385 | memset(bd, 0, sizeof(*bd)); | |
386 | ||
387 | bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd)); | |
388 | memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd)); | |
389 | ||
390 | bd->events = malloc(td->o.iodepth * sizeof(struct io_u *)); | |
391 | memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *)); | |
392 | ||
393 | bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files); | |
394 | memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files); | |
395 | ||
396 | bd->fd_flags = malloc(sizeof(int) * td->o.nr_files); | |
397 | memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files); | |
398 | ||
399 | td->io_ops->data = bd; | |
400 | return 0; | |
401 | } | |
402 | ||
403 | static struct ioengine_ops ioengine = { | |
404 | .name = "binject", | |
405 | .version = FIO_IOOPS_VERSION, | |
406 | .init = fio_binject_init, | |
407 | .prep = fio_binject_prep, | |
408 | .queue = fio_binject_queue, | |
409 | .getevents = fio_binject_getevents, | |
410 | .event = fio_binject_event, | |
411 | .cleanup = fio_binject_cleanup, | |
4a435dac | 412 | .open_file = fio_binject_open_file, |
0e238572 | 413 | .close_file = fio_binject_close_file, |
79a43187 | 414 | .get_file_size = generic_get_file_size, |
1ef2b6be | 415 | .flags = FIO_RAWIO | FIO_BARRIER, |
79a43187 JA |
416 | }; |
417 | ||
418 | #else /* FIO_HAVE_BINJECT */ | |
419 | ||
420 | /* | |
421 | * When we have a proper configure system in place, we simply wont build | |
422 | * and install this io engine. For now install a crippled version that | |
423 | * just complains and fails to load. | |
424 | */ | |
425 | static int fio_binject_init(struct thread_data fio_unused *td) | |
426 | { | |
a3edaf76 | 427 | log_err("fio: ioengine binject not available\n"); |
79a43187 JA |
428 | return 1; |
429 | } | |
430 | ||
431 | static struct ioengine_ops ioengine = { | |
432 | .name = "binject", | |
433 | .version = FIO_IOOPS_VERSION, | |
434 | .init = fio_binject_init, | |
435 | }; | |
436 | ||
437 | #endif | |
438 | ||
439 | static void fio_init fio_binject_register(void) | |
440 | { | |
441 | register_ioengine(&ioengine); | |
442 | } | |
443 | ||
444 | static void fio_exit fio_binject_unregister(void) | |
445 | { | |
446 | unregister_ioengine(&ioengine); | |
447 | } |