4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
18 #ifdef FIO_HAVE_BINJECT
21 struct b_user_cmd *cmds;
33 static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
35 struct b_user_cmd *buc = &io_u->buc;
37 memset(buc, 0, sizeof(*buc));
38 binject_buc_set_magic(buc);
40 buc->buf = (unsigned long) io_u->xfer_buf;
41 buc->len = io_u->xfer_buflen;
42 buc->offset = io_u->offset;
43 buc->usr_ptr = (unsigned long) io_u;
45 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
49 static int pollin_events(struct pollfd *pfds, int fds)
53 for (i = 0; i < fds; i++)
54 if (pfds[i].revents & POLLIN)
60 static int fio_binject_getevents(struct thread_data *td, unsigned int min,
61 unsigned int max, struct timespec fio_unused *t)
63 struct binject_data *bd = td->io_ops->data;
64 int left = max, ret, r = 0, ev_index = 0;
66 unsigned int i, events;
68 struct binject_file *bf;
71 * Fill in the file descriptors
73 for_each_file(td, f, i) {
74 bf = (struct binject_file *) f->file_data;
77 * don't block for min events == 0
80 bd->fd_flags[i] = fcntl(bf->fd, F_GETFL);
81 fcntl(bf->fd, F_SETFL, bd->fd_flags[i] | O_NONBLOCK);
83 bd->pfds[i].fd = bf->fd;
84 bd->pfds[i].events = POLLIN;
94 ret = poll(bd->pfds, td->o.nr_files, -1);
98 td_verror(td, errno, "poll");
103 if (pollin_events(bd->pfds, td->o.nr_files))
113 for_each_file(td, f, i) {
114 bf = (struct binject_file *) f->file_data;
116 ret = read(bf->fd, p, left * sizeof(struct b_user_cmd));
121 td_verror(td, errno, "read");
125 events += ret / sizeof(struct b_user_cmd);
139 for (i = 0; i < events; i++) {
140 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
142 bd->events[ev_index] = (struct io_u *) buc->usr_ptr;
148 for_each_file(td, f, i) {
149 bf = (struct binject_file *) f->file_data;
150 fcntl(bf->fd, F_SETFL, bd->fd_flags[i]);
155 assert(ev_index == r);
160 static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
162 struct b_user_cmd *buc = &io_u->buc;
163 struct fio_file *f = io_u->file;
164 struct binject_file *bf = (struct binject_file *) f->file_data;
167 ret = write(bf->fd, buc, sizeof(*buc));
174 static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
176 struct binject_data *bd = td->io_ops->data;
177 struct b_user_cmd *buc = &io_u->buc;
178 struct binject_file *bf = (struct binject_file *) io_u->file->file_data;
180 if (io_u->xfer_buflen & (bf->bs - 1)) {
181 log_err("read/write not sector aligned\n");
185 if (io_u->ddir == DDIR_READ) {
186 binject_buc_init(bd, io_u);
187 buc->type = B_TYPE_READ;
188 } else if (io_u->ddir == DDIR_WRITE) {
189 binject_buc_init(bd, io_u);
190 buc->type = B_TYPE_WRITE;
191 } else if (io_u->ddir == DDIR_TRIM) {
192 binject_buc_init(bd, io_u);
193 buc->type = B_TYPE_DISCARD;
201 static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
205 fio_ro_check(td, io_u);
207 ret = fio_binject_doio(td, io_u);
213 td_verror(td, io_u->error, "xfer");
214 return FIO_Q_COMPLETED;
220 static struct io_u *fio_binject_event(struct thread_data *td, int event)
222 struct binject_data *bd = td->io_ops->data;
224 return bd->events[event];
227 static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf)
229 struct b_ioctl_cmd bic;
237 fdb = open("/dev/binject-ctl", O_RDWR);
239 td_verror(td, errno, "open binject-ctl");
243 bic.minor = bf->minor;
245 if (ioctl(fdb, 1, &bic) < 0) {
246 td_verror(td, errno, "binject dev unmap");
254 static int binject_map_dev(struct thread_data *td, struct binject_file *bf,
257 struct b_ioctl_cmd bic;
260 int fdb, dev_there, loops;
262 fdb = open("/dev/binject-ctl", O_RDWR);
264 td_verror(td, errno, "binject ctl open");
270 if (ioctl(fdb, 0, &bic) < 0) {
271 td_verror(td, errno, "binject dev map");
276 bf->minor = bic.minor;
278 sprintf(name, "/dev/binject%u", bf->minor);
281 * Wait for udev to create the node...
283 dev_there = loops = 0;
285 if (!stat(name, &sb)) {
291 } while (++loops < 100);
296 log_err("fio: timed out waiting for binject dev\n");
300 bf->fd = open(name, O_RDWR);
302 td_verror(td, errno, "binject dev open");
304 binject_unmap_dev(td, bf);
311 static int fio_binject_close_file(struct thread_data *td, struct fio_file *f)
313 struct binject_file *bf = (struct binject_file *) f->file_data;
316 binject_unmap_dev(td, bf);
319 return generic_close_file(td, f);
325 static int fio_binject_open_file(struct thread_data *td, struct fio_file *f)
327 struct binject_file *bf;
331 ret = generic_open_file(td, f);
335 if (f->filetype != FIO_TYPE_BD) {
336 log_err("fio: binject only works with block devices\n");
339 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
340 td_verror(td, errno, "BLKSSZGET");
344 bf = malloc(sizeof(*bf));
346 bf->minor = bf->fd = -1;
347 f->file_data = (unsigned long) bf;
349 if (binject_map_dev(td, bf, f->fd)) {
351 ret = generic_close_file(td, f);
358 static void fio_binject_cleanup(struct thread_data *td)
360 struct binject_data *bd = td->io_ops->data;
371 static int fio_binject_init(struct thread_data *td)
373 struct binject_data *bd;
375 bd = malloc(sizeof(*bd));
376 memset(bd, 0, sizeof(*bd));
378 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
379 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
381 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
382 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
384 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
385 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
387 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
388 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
390 td->io_ops->data = bd;
394 static struct ioengine_ops ioengine = {
396 .version = FIO_IOOPS_VERSION,
397 .init = fio_binject_init,
398 .prep = fio_binject_prep,
399 .queue = fio_binject_queue,
400 .getevents = fio_binject_getevents,
401 .event = fio_binject_event,
402 .cleanup = fio_binject_cleanup,
403 .open_file = fio_binject_open_file,
404 .close_file = fio_binject_close_file,
405 .get_file_size = generic_get_file_size,
409 #else /* FIO_HAVE_BINJECT */
412 * When we have a proper configure system in place, we simply wont build
413 * and install this io engine. For now install a crippled version that
414 * just complains and fails to load.
416 static int fio_binject_init(struct thread_data fio_unused *td)
418 log_err("fio: ioengine binject not available\n");
422 static struct ioengine_ops ioengine = {
424 .version = FIO_IOOPS_VERSION,
425 .init = fio_binject_init,
430 static void fio_init fio_binject_register(void)
432 register_ioengine(&ioengine);
435 static void fio_exit fio_binject_unregister(void)
437 unregister_ioengine(&ioengine);