4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
15 #include <sys/types.h>
20 #ifdef FIO_HAVE_BINJECT
23 struct b_user_cmd *cmds;
35 static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
37 struct b_user_cmd *buc = &io_u->buc;
39 memset(buc, 0, sizeof(*buc));
40 binject_buc_set_magic(buc);
42 buc->buf = (unsigned long) io_u->xfer_buf;
43 buc->len = io_u->xfer_buflen;
44 buc->offset = io_u->offset;
45 buc->usr_ptr = (unsigned long) io_u;
47 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
51 static int pollin_events(struct pollfd *pfds, int fds)
55 for (i = 0; i < fds; i++)
56 if (pfds[i].revents & POLLIN)
62 static unsigned int binject_read_commands(struct thread_data *td, void *p,
65 struct binject_file *bf;
71 for_each_file(td, f, i) {
72 bf = (struct binject_file *) (uintptr_t) f->engine_data;
73 ret = read(bf->fd, p, left * sizeof(struct b_user_cmd));
78 td_verror(td, errno, "read");
82 events += ret / sizeof(struct b_user_cmd);
93 static int fio_binject_getevents(struct thread_data *td, unsigned int min,
95 const struct timespec fio_unused *t)
97 struct binject_data *bd = td->io_ops->data;
98 int left = max, ret, r = 0, ev_index = 0;
100 unsigned int i, events;
102 struct binject_file *bf;
105 * Fill in the file descriptors
107 for_each_file(td, f, i) {
108 bf = (struct binject_file *) (uintptr_t) f->engine_data;
111 * don't block for min events == 0
114 bd->fd_flags[i] = fio_set_fd_nonblocking(bf->fd, "binject");
116 bd->fd_flags[i] = -1;
118 bd->pfds[i].fd = bf->fd;
119 bd->pfds[i].events = POLLIN;
124 ret = poll(bd->pfds, td->o.nr_files, -1);
128 td_verror(td, errno, "poll");
133 if (pollin_events(bd->pfds, td->o.nr_files))
140 events = binject_read_commands(td, buf, left, &r);
148 for (i = 0; i < events; i++) {
149 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
151 bd->events[ev_index] = (struct io_u *) (unsigned long) buc->usr_ptr;
157 for_each_file(td, f, i) {
158 bf = (struct binject_file *) (uintptr_t) f->engine_data;
160 if (bd->fd_flags[i] == -1)
163 if (fcntl(bf->fd, F_SETFL, bd->fd_flags[i]) < 0)
164 log_err("fio: binject failed to restore fcntl flags: %s\n", strerror(errno));
169 assert(ev_index == r);
174 static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
176 struct b_user_cmd *buc = &io_u->buc;
177 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
180 ret = write(bf->fd, buc, sizeof(*buc));
187 static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
189 struct binject_data *bd = td->io_ops->data;
190 struct b_user_cmd *buc = &io_u->buc;
191 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
193 if (io_u->xfer_buflen & (bf->bs - 1)) {
194 log_err("read/write not sector aligned\n");
198 if (io_u->ddir == DDIR_READ) {
199 binject_buc_init(bd, io_u);
200 buc->type = B_TYPE_READ;
201 } else if (io_u->ddir == DDIR_WRITE) {
202 binject_buc_init(bd, io_u);
203 if (io_u->flags & IO_U_F_BARRIER)
204 buc->type = B_TYPE_WRITEBARRIER;
206 buc->type = B_TYPE_WRITE;
207 } else if (io_u->ddir == DDIR_TRIM) {
208 binject_buc_init(bd, io_u);
209 buc->type = B_TYPE_DISCARD;
217 static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
221 fio_ro_check(td, io_u);
223 ret = fio_binject_doio(td, io_u);
229 td_verror(td, io_u->error, "xfer");
230 return FIO_Q_COMPLETED;
236 static struct io_u *fio_binject_event(struct thread_data *td, int event)
238 struct binject_data *bd = td->io_ops->data;
240 return bd->events[event];
243 static int binject_open_ctl(struct thread_data *td)
247 fd = open("/dev/binject-ctl", O_RDWR);
249 td_verror(td, errno, "open binject-ctl");
254 static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf)
256 struct b_ioctl_cmd bic;
264 fdb = binject_open_ctl(td);
268 bic.minor = bf->minor;
270 if (ioctl(fdb, B_IOCTL_DEL, &bic) < 0)
271 td_verror(td, errno, "binject dev unmap");
276 static int binject_map_dev(struct thread_data *td, struct binject_file *bf,
279 struct b_ioctl_cmd bic;
282 int fdb, dev_there, loops;
284 fdb = binject_open_ctl(td);
290 if (ioctl(fdb, B_IOCTL_ADD, &bic) < 0) {
291 td_verror(td, errno, "binject dev map");
296 bf->minor = bic.minor;
298 sprintf(name, "/dev/binject%u", bf->minor);
301 * Wait for udev to create the node...
303 dev_there = loops = 0;
305 if (!stat(name, &sb)) {
311 } while (++loops < 100);
316 log_err("fio: timed out waiting for binject dev\n");
320 bf->fd = open(name, O_RDWR);
322 td_verror(td, errno, "binject dev open");
324 binject_unmap_dev(td, bf);
331 static int fio_binject_close_file(struct thread_data *td, struct fio_file *f)
333 struct binject_file *bf = (struct binject_file *) (uintptr_t) f->engine_data;
336 binject_unmap_dev(td, bf);
339 return generic_close_file(td, f);
345 static int fio_binject_open_file(struct thread_data *td, struct fio_file *f)
347 struct binject_file *bf;
351 ret = generic_open_file(td, f);
355 if (f->filetype != FIO_TYPE_BD) {
356 log_err("fio: binject only works with block devices\n");
359 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
360 td_verror(td, errno, "BLKSSZGET");
364 bf = malloc(sizeof(*bf));
366 bf->minor = bf->fd = -1;
367 f->engine_data = (uintptr_t) bf;
369 if (binject_map_dev(td, bf, f->fd)) {
371 ret = generic_close_file(td, f);
378 static void fio_binject_cleanup(struct thread_data *td)
380 struct binject_data *bd = td->io_ops->data;
391 static int fio_binject_init(struct thread_data *td)
393 struct binject_data *bd;
395 bd = malloc(sizeof(*bd));
396 memset(bd, 0, sizeof(*bd));
398 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
399 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
401 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
402 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
404 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
405 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
407 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
408 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
410 td->io_ops->data = bd;
414 static struct ioengine_ops ioengine = {
416 .version = FIO_IOOPS_VERSION,
417 .init = fio_binject_init,
418 .prep = fio_binject_prep,
419 .queue = fio_binject_queue,
420 .getevents = fio_binject_getevents,
421 .event = fio_binject_event,
422 .cleanup = fio_binject_cleanup,
423 .open_file = fio_binject_open_file,
424 .close_file = fio_binject_close_file,
425 .get_file_size = generic_get_file_size,
426 .flags = FIO_RAWIO | FIO_BARRIER | FIO_MEMALIGN,
429 #else /* FIO_HAVE_BINJECT */
432 * When we have a proper configure system in place, we simply wont build
433 * and install this io engine. For now install a crippled version that
434 * just complains and fails to load.
436 static int fio_binject_init(struct thread_data fio_unused *td)
438 log_err("fio: ioengine binject not available\n");
442 static struct ioengine_ops ioengine = {
444 .version = FIO_IOOPS_VERSION,
445 .init = fio_binject_init,
450 static void fio_init fio_binject_register(void)
452 register_ioengine(&ioengine);
455 static void fio_exit fio_binject_unregister(void)
457 unregister_ioengine(&ioengine);