sg: check for error in fcntl() restore of flags
[fio.git] / engines / binject.c
CommitLineData
79a43187
JA
1/*
2 * binject engine
3 *
4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <string.h>
14#include <sys/poll.h>
83c107b7
JA
15#include <sys/types.h>
16#include <sys/stat.h>
79a43187
JA
17
18#include "../fio.h"
19
20#ifdef FIO_HAVE_BINJECT
21
22struct binject_data {
23 struct b_user_cmd *cmds;
24 struct io_u **events;
25 struct pollfd *pfds;
26 int *fd_flags;
79a43187
JA
27};
28
0e238572
JA
29struct binject_file {
30 unsigned int bs;
31 int minor;
32 int fd;
33};
34
79a43187
JA
35static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
36{
37 struct b_user_cmd *buc = &io_u->buc;
38
39 memset(buc, 0, sizeof(*buc));
40 binject_buc_set_magic(buc);
41
42 buc->buf = (unsigned long) io_u->xfer_buf;
43 buc->len = io_u->xfer_buflen;
44 buc->offset = io_u->offset;
45 buc->usr_ptr = (unsigned long) io_u;
46
47 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
48 assert(buc->buf);
49}
50
51static int pollin_events(struct pollfd *pfds, int fds)
52{
53 int i;
54
55 for (i = 0; i < fds; i++)
56 if (pfds[i].revents & POLLIN)
57 return 1;
58
59 return 0;
60}
61
d01c404b
JA
62static unsigned int binject_read_commands(struct thread_data *td, void *p,
63 int left, int *err)
64{
65 struct binject_file *bf;
66 struct fio_file *f;
67 int i, ret, events;
68
69one_more:
70 events = 0;
71 for_each_file(td, f, i) {
47f07ddc 72 bf = (struct binject_file *) (uintptr_t) f->engine_data;
d01c404b
JA
73 ret = read(bf->fd, p, left * sizeof(struct b_user_cmd));
74 if (ret < 0) {
75 if (errno == EAGAIN)
76 continue;
77 *err = -errno;
78 td_verror(td, errno, "read");
79 break;
80 } else if (ret) {
81 p += ret;
82 events += ret / sizeof(struct b_user_cmd);
83 }
84 }
85
86 if (*err || events)
87 return events;
88
89 usleep(1000);
90 goto one_more;
91}
92
79a43187
JA
93static int fio_binject_getevents(struct thread_data *td, unsigned int min,
94 unsigned int max, struct timespec fio_unused *t)
95{
96 struct binject_data *bd = td->io_ops->data;
97 int left = max, ret, r = 0, ev_index = 0;
98 void *buf = bd->cmds;
99 unsigned int i, events;
100 struct fio_file *f;
0e238572 101 struct binject_file *bf;
79a43187
JA
102
103 /*
104 * Fill in the file descriptors
105 */
106 for_each_file(td, f, i) {
47f07ddc 107 bf = (struct binject_file *) (uintptr_t) f->engine_data;
0e238572 108
79a43187
JA
109 /*
110 * don't block for min events == 0
111 */
4a851614
JA
112 if (!min)
113 fio_set_fd_nonblocking(bf->fd, "binject");
114
0e238572 115 bd->pfds[i].fd = bf->fd;
79a43187
JA
116 bd->pfds[i].events = POLLIN;
117 }
118
119 while (left) {
d01c404b 120 while (!min) {
79a43187
JA
121 ret = poll(bd->pfds, td->o.nr_files, -1);
122 if (ret < 0) {
123 if (!r)
124 r = -errno;
125 td_verror(td, errno, "poll");
126 break;
127 } else if (!ret)
128 continue;
129
130 if (pollin_events(bd->pfds, td->o.nr_files))
131 break;
d01c404b 132 }
79a43187
JA
133
134 if (r < 0)
135 break;
136
d01c404b 137 events = binject_read_commands(td, buf, left, &r);
79a43187
JA
138
139 if (r < 0)
140 break;
79a43187
JA
141
142 left -= events;
143 r += events;
144
145 for (i = 0; i < events; i++) {
146 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
147
2f68124f 148 bd->events[ev_index] = (struct io_u *) (unsigned long) buc->usr_ptr;
79a43187
JA
149 ev_index++;
150 }
151 }
152
153 if (!min) {
0e238572 154 for_each_file(td, f, i) {
47f07ddc 155 bf = (struct binject_file *) (uintptr_t) f->engine_data;
45550d71
JA
156
157 if (fcntl(bf->fd, F_SETFL, bd->fd_flags[i]) < 0)
158 log_err("fio: binject failed to restore fcntl flags: %s\n", strerror(errno));
0e238572 159 }
79a43187
JA
160 }
161
162 if (r > 0)
163 assert(ev_index == r);
164
165 return r;
166}
167
168static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
169{
170 struct b_user_cmd *buc = &io_u->buc;
47f07ddc 171 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
79a43187
JA
172 int ret;
173
0e238572 174 ret = write(bf->fd, buc, sizeof(*buc));
79a43187
JA
175 if (ret < 0)
176 return ret;
177
178 return FIO_Q_QUEUED;
179}
180
181static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
182{
183 struct binject_data *bd = td->io_ops->data;
184 struct b_user_cmd *buc = &io_u->buc;
47f07ddc 185 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
79a43187 186
0e238572 187 if (io_u->xfer_buflen & (bf->bs - 1)) {
79a43187
JA
188 log_err("read/write not sector aligned\n");
189 return EINVAL;
190 }
191
192 if (io_u->ddir == DDIR_READ) {
193 binject_buc_init(bd, io_u);
194 buc->type = B_TYPE_READ;
195 } else if (io_u->ddir == DDIR_WRITE) {
196 binject_buc_init(bd, io_u);
1ef2b6be
JA
197 if (io_u->flags & IO_U_F_BARRIER)
198 buc->type = B_TYPE_WRITEBARRIER;
199 else
200 buc->type = B_TYPE_WRITE;
79a43187
JA
201 } else if (io_u->ddir == DDIR_TRIM) {
202 binject_buc_init(bd, io_u);
203 buc->type = B_TYPE_DISCARD;
204 } else {
205 assert(0);
206 }
207
208 return 0;
209}
210
211static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
212{
213 int ret;
214
215 fio_ro_check(td, io_u);
216
217 ret = fio_binject_doio(td, io_u);
218
219 if (ret < 0)
220 io_u->error = errno;
221
222 if (io_u->error) {
223 td_verror(td, io_u->error, "xfer");
224 return FIO_Q_COMPLETED;
225 }
226
227 return ret;
228}
229
230static struct io_u *fio_binject_event(struct thread_data *td, int event)
231{
232 struct binject_data *bd = td->io_ops->data;
233
234 return bd->events[event];
235}
236
ce4b5050
JA
237static int binject_open_ctl(struct thread_data *td)
238{
239 int fd;
240
241 fd = open("/dev/binject-ctl", O_RDWR);
242 if (fd < 0)
243 td_verror(td, errno, "open binject-ctl");
244
245 return fd;
246}
247
0e238572
JA
248static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf)
249{
250 struct b_ioctl_cmd bic;
251 int fdb;
252
253 if (bf->fd >= 0) {
254 close(bf->fd);
255 bf->fd = -1;
256 }
257
ce4b5050
JA
258 fdb = binject_open_ctl(td);
259 if (fdb < 0)
0e238572 260 return;
0e238572
JA
261
262 bic.minor = bf->minor;
263
f0f346d8 264 if (ioctl(fdb, B_IOCTL_DEL, &bic) < 0)
0e238572 265 td_verror(td, errno, "binject dev unmap");
0e238572
JA
266
267 close(fdb);
268}
269
270static int binject_map_dev(struct thread_data *td, struct binject_file *bf,
271 int fd)
272{
273 struct b_ioctl_cmd bic;
274 char name[80];
275 struct stat sb;
276 int fdb, dev_there, loops;
277
ce4b5050
JA
278 fdb = binject_open_ctl(td);
279 if (fdb < 0)
0e238572 280 return 1;
0e238572
JA
281
282 bic.fd = fd;
283
f0f346d8 284 if (ioctl(fdb, B_IOCTL_ADD, &bic) < 0) {
0e238572
JA
285 td_verror(td, errno, "binject dev map");
286 close(fdb);
287 return 1;
288 }
289
290 bf->minor = bic.minor;
291
292 sprintf(name, "/dev/binject%u", bf->minor);
293
294 /*
295 * Wait for udev to create the node...
296 */
297 dev_there = loops = 0;
298 do {
299 if (!stat(name, &sb)) {
300 dev_there = 1;
301 break;
302 }
303
304 usleep(10000);
305 } while (++loops < 100);
306
307 close(fdb);
308
309 if (!dev_there) {
310 log_err("fio: timed out waiting for binject dev\n");
311 goto err_unmap;
312 }
313
314 bf->fd = open(name, O_RDWR);
315 if (bf->fd < 0) {
316 td_verror(td, errno, "binject dev open");
317err_unmap:
318 binject_unmap_dev(td, bf);
319 return 1;
320 }
321
322 return 0;
323}
324
325static int fio_binject_close_file(struct thread_data *td, struct fio_file *f)
326{
47f07ddc 327 struct binject_file *bf = (struct binject_file *) (uintptr_t) f->engine_data;
0e238572
JA
328
329 if (bf) {
330 binject_unmap_dev(td, bf);
331 free(bf);
84b3842c 332 f->engine_data = 0;
0e238572
JA
333 return generic_close_file(td, f);
334 }
335
336 return 0;
337}
338
4a435dac
JA
339static int fio_binject_open_file(struct thread_data *td, struct fio_file *f)
340{
0e238572 341 struct binject_file *bf;
4a435dac
JA
342 unsigned int bs;
343 int ret;
344
345 ret = generic_open_file(td, f);
346 if (ret)
347 return 1;
348
349 if (f->filetype != FIO_TYPE_BD) {
350 log_err("fio: binject only works with block devices\n");
0e238572 351 goto err_close;
4a435dac
JA
352 }
353 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
354 td_verror(td, errno, "BLKSSZGET");
0e238572
JA
355 goto err_close;
356 }
357
358 bf = malloc(sizeof(*bf));
359 bf->bs = bs;
360 bf->minor = bf->fd = -1;
9cbef504 361 f->engine_data = (uintptr_t) bf;
0e238572
JA
362
363 if (binject_map_dev(td, bf, f->fd)) {
364err_close:
365 ret = generic_close_file(td, f);
4a435dac
JA
366 return 1;
367 }
368
4a435dac
JA
369 return 0;
370}
371
79a43187
JA
372static void fio_binject_cleanup(struct thread_data *td)
373{
374 struct binject_data *bd = td->io_ops->data;
375
376 if (bd) {
377 free(bd->events);
378 free(bd->cmds);
379 free(bd->fd_flags);
380 free(bd->pfds);
381 free(bd);
382 }
383}
384
385static int fio_binject_init(struct thread_data *td)
386{
387 struct binject_data *bd;
388
389 bd = malloc(sizeof(*bd));
390 memset(bd, 0, sizeof(*bd));
391
392 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
393 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
394
395 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
396 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
397
398 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
399 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
400
401 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
402 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
403
404 td->io_ops->data = bd;
405 return 0;
406}
407
408static struct ioengine_ops ioengine = {
409 .name = "binject",
410 .version = FIO_IOOPS_VERSION,
411 .init = fio_binject_init,
412 .prep = fio_binject_prep,
413 .queue = fio_binject_queue,
414 .getevents = fio_binject_getevents,
415 .event = fio_binject_event,
416 .cleanup = fio_binject_cleanup,
4a435dac 417 .open_file = fio_binject_open_file,
0e238572 418 .close_file = fio_binject_close_file,
79a43187 419 .get_file_size = generic_get_file_size,
ca7e0ddb 420 .flags = FIO_RAWIO | FIO_BARRIER | FIO_MEMALIGN,
79a43187
JA
421};
422
423#else /* FIO_HAVE_BINJECT */
424
425/*
426 * When we have a proper configure system in place, we simply wont build
427 * and install this io engine. For now install a crippled version that
428 * just complains and fails to load.
429 */
430static int fio_binject_init(struct thread_data fio_unused *td)
431{
a3edaf76 432 log_err("fio: ioengine binject not available\n");
79a43187
JA
433 return 1;
434}
435
436static struct ioengine_ops ioengine = {
437 .name = "binject",
438 .version = FIO_IOOPS_VERSION,
439 .init = fio_binject_init,
440};
441
442#endif
443
444static void fio_init fio_binject_register(void)
445{
446 register_ioengine(&ioengine);
447}
448
449static void fio_exit fio_binject_unregister(void)
450{
451 unregister_ioengine(&ioengine);
452}