4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
15 #include <sys/types.h>
20 #ifdef FIO_HAVE_BINJECT
23 struct b_user_cmd *cmds;
35 static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
37 struct b_user_cmd *buc = &io_u->buc;
39 memset(buc, 0, sizeof(*buc));
40 binject_buc_set_magic(buc);
42 buc->buf = (unsigned long) io_u->xfer_buf;
43 buc->len = io_u->xfer_buflen;
44 buc->offset = io_u->offset;
45 buc->usr_ptr = (unsigned long) io_u;
47 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
51 static int pollin_events(struct pollfd *pfds, int fds)
55 for (i = 0; i < fds; i++)
56 if (pfds[i].revents & POLLIN)
62 static unsigned int binject_read_commands(struct thread_data *td, void *p,
65 struct binject_file *bf;
71 for_each_file(td, f, i) {
72 bf = (struct binject_file *) (uintptr_t) f->engine_data;
73 ret = read(bf->fd, p, left * sizeof(struct b_user_cmd));
78 td_verror(td, errno, "read");
82 events += ret / sizeof(struct b_user_cmd);
93 static int fio_binject_getevents(struct thread_data *td, unsigned int min,
94 unsigned int max, struct timespec fio_unused *t)
96 struct binject_data *bd = td->io_ops->data;
97 int left = max, ret, r = 0, ev_index = 0;
99 unsigned int i, events;
101 struct binject_file *bf;
104 * Fill in the file descriptors
106 for_each_file(td, f, i) {
107 bf = (struct binject_file *) (uintptr_t) f->engine_data;
110 * don't block for min events == 0
113 bd->fd_flags[i] = fcntl(bf->fd, F_GETFL);
114 fcntl(bf->fd, F_SETFL, bd->fd_flags[i] | O_NONBLOCK);
116 bd->pfds[i].fd = bf->fd;
117 bd->pfds[i].events = POLLIN;
122 ret = poll(bd->pfds, td->o.nr_files, -1);
126 td_verror(td, errno, "poll");
131 if (pollin_events(bd->pfds, td->o.nr_files))
138 events = binject_read_commands(td, buf, left, &r);
146 for (i = 0; i < events; i++) {
147 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
149 bd->events[ev_index] = (struct io_u *) (unsigned long) buc->usr_ptr;
155 for_each_file(td, f, i) {
156 bf = (struct binject_file *) (uintptr_t) f->engine_data;
157 fcntl(bf->fd, F_SETFL, bd->fd_flags[i]);
162 assert(ev_index == r);
167 static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
169 struct b_user_cmd *buc = &io_u->buc;
170 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
173 ret = write(bf->fd, buc, sizeof(*buc));
180 static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
182 struct binject_data *bd = td->io_ops->data;
183 struct b_user_cmd *buc = &io_u->buc;
184 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
186 if (io_u->xfer_buflen & (bf->bs - 1)) {
187 log_err("read/write not sector aligned\n");
191 if (io_u->ddir == DDIR_READ) {
192 binject_buc_init(bd, io_u);
193 buc->type = B_TYPE_READ;
194 } else if (io_u->ddir == DDIR_WRITE) {
195 binject_buc_init(bd, io_u);
196 if (io_u->flags & IO_U_F_BARRIER)
197 buc->type = B_TYPE_WRITEBARRIER;
199 buc->type = B_TYPE_WRITE;
200 } else if (io_u->ddir == DDIR_TRIM) {
201 binject_buc_init(bd, io_u);
202 buc->type = B_TYPE_DISCARD;
210 static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
214 fio_ro_check(td, io_u);
216 ret = fio_binject_doio(td, io_u);
222 td_verror(td, io_u->error, "xfer");
223 return FIO_Q_COMPLETED;
229 static struct io_u *fio_binject_event(struct thread_data *td, int event)
231 struct binject_data *bd = td->io_ops->data;
233 return bd->events[event];
236 static int binject_open_ctl(struct thread_data *td)
240 fd = open("/dev/binject-ctl", O_RDWR);
242 td_verror(td, errno, "open binject-ctl");
247 static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf)
249 struct b_ioctl_cmd bic;
257 fdb = binject_open_ctl(td);
261 bic.minor = bf->minor;
263 if (ioctl(fdb, B_IOCTL_DEL, &bic) < 0)
264 td_verror(td, errno, "binject dev unmap");
269 static int binject_map_dev(struct thread_data *td, struct binject_file *bf,
272 struct b_ioctl_cmd bic;
275 int fdb, dev_there, loops;
277 fdb = binject_open_ctl(td);
283 if (ioctl(fdb, B_IOCTL_ADD, &bic) < 0) {
284 td_verror(td, errno, "binject dev map");
289 bf->minor = bic.minor;
291 sprintf(name, "/dev/binject%u", bf->minor);
294 * Wait for udev to create the node...
296 dev_there = loops = 0;
298 if (!stat(name, &sb)) {
304 } while (++loops < 100);
309 log_err("fio: timed out waiting for binject dev\n");
313 bf->fd = open(name, O_RDWR);
315 td_verror(td, errno, "binject dev open");
317 binject_unmap_dev(td, bf);
324 static int fio_binject_close_file(struct thread_data *td, struct fio_file *f)
326 struct binject_file *bf = (struct binject_file *) (uintptr_t) f->engine_data;
329 binject_unmap_dev(td, bf);
332 return generic_close_file(td, f);
338 static int fio_binject_open_file(struct thread_data *td, struct fio_file *f)
340 struct binject_file *bf;
344 ret = generic_open_file(td, f);
348 if (f->filetype != FIO_TYPE_BD) {
349 log_err("fio: binject only works with block devices\n");
352 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
353 td_verror(td, errno, "BLKSSZGET");
357 bf = malloc(sizeof(*bf));
359 bf->minor = bf->fd = -1;
360 f->engine_data = (uintptr_t) bf;
362 if (binject_map_dev(td, bf, f->fd)) {
364 ret = generic_close_file(td, f);
371 static void fio_binject_cleanup(struct thread_data *td)
373 struct binject_data *bd = td->io_ops->data;
384 static int fio_binject_init(struct thread_data *td)
386 struct binject_data *bd;
388 bd = malloc(sizeof(*bd));
389 memset(bd, 0, sizeof(*bd));
391 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
392 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
394 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
395 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
397 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
398 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
400 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
401 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
403 td->io_ops->data = bd;
407 static struct ioengine_ops ioengine = {
409 .version = FIO_IOOPS_VERSION,
410 .init = fio_binject_init,
411 .prep = fio_binject_prep,
412 .queue = fio_binject_queue,
413 .getevents = fio_binject_getevents,
414 .event = fio_binject_event,
415 .cleanup = fio_binject_cleanup,
416 .open_file = fio_binject_open_file,
417 .close_file = fio_binject_close_file,
418 .get_file_size = generic_get_file_size,
419 .flags = FIO_RAWIO | FIO_BARRIER | FIO_MEMALIGN,
422 #else /* FIO_HAVE_BINJECT */
425 * When we have a proper configure system in place, we simply wont build
426 * and install this io engine. For now install a crippled version that
427 * just complains and fails to load.
429 static int fio_binject_init(struct thread_data fio_unused *td)
431 log_err("fio: ioengine binject not available\n");
435 static struct ioengine_ops ioengine = {
437 .version = FIO_IOOPS_VERSION,
438 .init = fio_binject_init,
443 static void fio_init fio_binject_register(void)
445 register_ioengine(&ioengine);
448 static void fio_exit fio_binject_unregister(void)
450 unregister_ioengine(&ioengine);