4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
18 #ifdef FIO_HAVE_BINJECT
21 struct b_user_cmd *cmds;
33 static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
35 struct b_user_cmd *buc = &io_u->buc;
37 memset(buc, 0, sizeof(*buc));
38 binject_buc_set_magic(buc);
40 buc->buf = (unsigned long) io_u->xfer_buf;
41 buc->len = io_u->xfer_buflen;
42 buc->offset = io_u->offset;
43 buc->usr_ptr = (unsigned long) io_u;
45 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
49 static int pollin_events(struct pollfd *pfds, int fds)
53 for (i = 0; i < fds; i++)
54 if (pfds[i].revents & POLLIN)
60 static unsigned int binject_read_commands(struct thread_data *td, void *p,
63 struct binject_file *bf;
69 for_each_file(td, f, i) {
71 ret = read(bf->fd, p, left * sizeof(struct b_user_cmd));
76 td_verror(td, errno, "read");
80 events += ret / sizeof(struct b_user_cmd);
91 static int fio_binject_getevents(struct thread_data *td, unsigned int min,
92 unsigned int max, struct timespec fio_unused *t)
94 struct binject_data *bd = td->io_ops->data;
95 int left = max, ret, r = 0, ev_index = 0;
97 unsigned int i, events;
99 struct binject_file *bf;
102 * Fill in the file descriptors
104 for_each_file(td, f, i) {
108 * don't block for min events == 0
111 bd->fd_flags[i] = fcntl(bf->fd, F_GETFL);
112 fcntl(bf->fd, F_SETFL, bd->fd_flags[i] | O_NONBLOCK);
114 bd->pfds[i].fd = bf->fd;
115 bd->pfds[i].events = POLLIN;
120 ret = poll(bd->pfds, td->o.nr_files, -1);
124 td_verror(td, errno, "poll");
129 if (pollin_events(bd->pfds, td->o.nr_files))
136 events = binject_read_commands(td, buf, left, &r);
144 for (i = 0; i < events; i++) {
145 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
147 bd->events[ev_index] = (struct io_u *) buc->usr_ptr;
153 for_each_file(td, f, i) {
155 fcntl(bf->fd, F_SETFL, bd->fd_flags[i]);
160 assert(ev_index == r);
165 static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
167 struct b_user_cmd *buc = &io_u->buc;
168 struct binject_file *bf = io_u->file->file_data;
171 ret = write(bf->fd, buc, sizeof(*buc));
178 static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
180 struct binject_data *bd = td->io_ops->data;
181 struct b_user_cmd *buc = &io_u->buc;
182 struct binject_file *bf = io_u->file->file_data;
184 if (io_u->xfer_buflen & (bf->bs - 1)) {
185 log_err("read/write not sector aligned\n");
189 if (io_u->ddir == DDIR_READ) {
190 binject_buc_init(bd, io_u);
191 buc->type = B_TYPE_READ;
192 } else if (io_u->ddir == DDIR_WRITE) {
193 binject_buc_init(bd, io_u);
194 buc->type = B_TYPE_WRITE;
195 } else if (io_u->ddir == DDIR_TRIM) {
196 binject_buc_init(bd, io_u);
197 buc->type = B_TYPE_DISCARD;
205 static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
209 fio_ro_check(td, io_u);
211 ret = fio_binject_doio(td, io_u);
217 td_verror(td, io_u->error, "xfer");
218 return FIO_Q_COMPLETED;
224 static struct io_u *fio_binject_event(struct thread_data *td, int event)
226 struct binject_data *bd = td->io_ops->data;
228 return bd->events[event];
231 static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf)
233 struct b_ioctl_cmd bic;
241 fdb = open("/dev/binject-ctl", O_RDWR);
243 td_verror(td, errno, "open binject-ctl");
247 bic.minor = bf->minor;
249 if (ioctl(fdb, 1, &bic) < 0) {
250 td_verror(td, errno, "binject dev unmap");
258 static int binject_map_dev(struct thread_data *td, struct binject_file *bf,
261 struct b_ioctl_cmd bic;
264 int fdb, dev_there, loops;
266 fdb = open("/dev/binject-ctl", O_RDWR);
268 td_verror(td, errno, "binject ctl open");
274 if (ioctl(fdb, 0, &bic) < 0) {
275 td_verror(td, errno, "binject dev map");
280 bf->minor = bic.minor;
282 sprintf(name, "/dev/binject%u", bf->minor);
285 * Wait for udev to create the node...
287 dev_there = loops = 0;
289 if (!stat(name, &sb)) {
295 } while (++loops < 100);
300 log_err("fio: timed out waiting for binject dev\n");
304 bf->fd = open(name, O_RDWR);
306 td_verror(td, errno, "binject dev open");
308 binject_unmap_dev(td, bf);
315 static int fio_binject_close_file(struct thread_data *td, struct fio_file *f)
317 struct binject_file *bf = f->file_data;
320 binject_unmap_dev(td, bf);
323 return generic_close_file(td, f);
329 static int fio_binject_open_file(struct thread_data *td, struct fio_file *f)
331 struct binject_file *bf;
335 ret = generic_open_file(td, f);
339 if (f->filetype != FIO_TYPE_BD) {
340 log_err("fio: binject only works with block devices\n");
343 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
344 td_verror(td, errno, "BLKSSZGET");
348 bf = malloc(sizeof(*bf));
350 bf->minor = bf->fd = -1;
353 if (binject_map_dev(td, bf, f->fd)) {
355 ret = generic_close_file(td, f);
362 static void fio_binject_cleanup(struct thread_data *td)
364 struct binject_data *bd = td->io_ops->data;
375 static int fio_binject_init(struct thread_data *td)
377 struct binject_data *bd;
379 bd = malloc(sizeof(*bd));
380 memset(bd, 0, sizeof(*bd));
382 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
383 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
385 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
386 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
388 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
389 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
391 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
392 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
394 td->io_ops->data = bd;
398 static struct ioengine_ops ioengine = {
400 .version = FIO_IOOPS_VERSION,
401 .init = fio_binject_init,
402 .prep = fio_binject_prep,
403 .queue = fio_binject_queue,
404 .getevents = fio_binject_getevents,
405 .event = fio_binject_event,
406 .cleanup = fio_binject_cleanup,
407 .open_file = fio_binject_open_file,
408 .close_file = fio_binject_close_file,
409 .get_file_size = generic_get_file_size,
413 #else /* FIO_HAVE_BINJECT */
416 * When we have a proper configure system in place, we simply wont build
417 * and install this io engine. For now install a crippled version that
418 * just complains and fails to load.
420 static int fio_binject_init(struct thread_data fio_unused *td)
422 log_err("fio: ioengine binject not available\n");
426 static struct ioengine_ops ioengine = {
428 .version = FIO_IOOPS_VERSION,
429 .init = fio_binject_init,
434 static void fio_init fio_binject_register(void)
436 register_ioengine(&ioengine);
439 static void fio_exit fio_binject_unregister(void)
441 unregister_ioengine(&ioengine);