4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
18 #ifdef FIO_HAVE_BINJECT
21 struct b_user_cmd *cmds;
28 static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
30 struct b_user_cmd *buc = &io_u->buc;
32 memset(buc, 0, sizeof(*buc));
33 binject_buc_set_magic(buc);
35 buc->buf = (unsigned long) io_u->xfer_buf;
36 buc->len = io_u->xfer_buflen;
37 buc->offset = io_u->offset;
38 buc->usr_ptr = (unsigned long) io_u;
40 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
44 static int pollin_events(struct pollfd *pfds, int fds)
48 for (i = 0; i < fds; i++)
49 if (pfds[i].revents & POLLIN)
55 static int fio_binject_getevents(struct thread_data *td, unsigned int min,
56 unsigned int max, struct timespec fio_unused *t)
58 struct binject_data *bd = td->io_ops->data;
59 int left = max, ret, r = 0, ev_index = 0;
61 unsigned int i, events;
65 * Fill in the file descriptors
67 for_each_file(td, f, i) {
69 * don't block for min events == 0
72 bd->fd_flags[i] = fcntl(f->fd, F_GETFL);
73 fcntl(f->fd, F_SETFL, bd->fd_flags[i] | O_NONBLOCK);
75 bd->pfds[i].fd = f->fd;
76 bd->pfds[i].events = POLLIN;
86 ret = poll(bd->pfds, td->o.nr_files, -1);
90 td_verror(td, errno, "poll");
95 if (pollin_events(bd->pfds, td->o.nr_files))
105 for_each_file(td, f, i) {
106 ret = read(f->fd, p, left * sizeof(struct b_user_cmd));
111 td_verror(td, errno, "read");
115 events += ret / sizeof(struct b_user_cmd);
129 for (i = 0; i < events; i++) {
130 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
132 bd->events[ev_index] = (struct io_u *) buc->usr_ptr;
138 for_each_file(td, f, i)
139 fcntl(f->fd, F_SETFL, bd->fd_flags[i]);
143 assert(ev_index == r);
148 static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
150 struct b_user_cmd *buc = &io_u->buc;
151 struct fio_file *f = io_u->file;
154 ret = write(f->fd, buc, sizeof(*buc));
161 static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
163 struct binject_data *bd = td->io_ops->data;
164 struct b_user_cmd *buc = &io_u->buc;
168 if (io_u->xfer_buflen & (bd->bs - 1)) {
169 log_err("read/write not sector aligned\n");
173 if (io_u->ddir == DDIR_READ) {
174 binject_buc_init(bd, io_u);
175 buc->type = B_TYPE_READ;
176 } else if (io_u->ddir == DDIR_WRITE) {
177 binject_buc_init(bd, io_u);
178 buc->type = B_TYPE_WRITEZERO;
179 } else if (io_u->ddir == DDIR_TRIM) {
180 binject_buc_init(bd, io_u);
181 buc->type = B_TYPE_DISCARD;
189 static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
193 fio_ro_check(td, io_u);
195 ret = fio_binject_doio(td, io_u);
201 td_verror(td, io_u->error, "xfer");
202 return FIO_Q_COMPLETED;
208 static struct io_u *fio_binject_event(struct thread_data *td, int event)
210 struct binject_data *bd = td->io_ops->data;
212 return bd->events[event];
215 static void fio_binject_cleanup(struct thread_data *td)
217 struct binject_data *bd = td->io_ops->data;
228 static int fio_binject_init(struct thread_data *td)
230 struct binject_data *bd;
232 bd = malloc(sizeof(*bd));
233 memset(bd, 0, sizeof(*bd));
235 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
236 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
238 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
239 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
241 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
242 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
244 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
245 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
247 td->io_ops->data = bd;
251 static struct ioengine_ops ioengine = {
253 .version = FIO_IOOPS_VERSION,
254 .init = fio_binject_init,
255 .prep = fio_binject_prep,
256 .queue = fio_binject_queue,
257 .getevents = fio_binject_getevents,
258 .event = fio_binject_event,
259 .cleanup = fio_binject_cleanup,
260 .open_file = generic_open_file,
261 .close_file = generic_close_file,
262 .get_file_size = generic_get_file_size,
266 #else /* FIO_HAVE_BINJECT */
269 * When we have a proper configure system in place, we simply wont build
270 * and install this io engine. For now install a crippled version that
271 * just complains and fails to load.
273 static int fio_binject_init(struct thread_data fio_unused *td)
275 fprintf(stderr, "fio: ioengine binject not available\n");
279 static struct ioengine_ops ioengine = {
281 .version = FIO_IOOPS_VERSION,
282 .init = fio_binject_init,
287 static void fio_init fio_binject_register(void)
289 register_ioengine(&ioengine);
292 static void fio_exit fio_binject_unregister(void)
294 unregister_ioengine(&ioengine);