idletime: fix another missing unlock on error
[fio.git] / engines / binject.c
CommitLineData
79a43187
JA
1/*
2 * binject engine
3 *
4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <string.h>
14#include <sys/poll.h>
83c107b7
JA
15#include <sys/types.h>
16#include <sys/stat.h>
79a43187
JA
17
18#include "../fio.h"
19
20#ifdef FIO_HAVE_BINJECT
21
22struct binject_data {
23 struct b_user_cmd *cmds;
24 struct io_u **events;
25 struct pollfd *pfds;
26 int *fd_flags;
79a43187
JA
27};
28
0e238572
JA
29struct binject_file {
30 unsigned int bs;
31 int minor;
32 int fd;
33};
34
79a43187
JA
35static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
36{
37 struct b_user_cmd *buc = &io_u->buc;
38
39 memset(buc, 0, sizeof(*buc));
40 binject_buc_set_magic(buc);
41
42 buc->buf = (unsigned long) io_u->xfer_buf;
43 buc->len = io_u->xfer_buflen;
44 buc->offset = io_u->offset;
45 buc->usr_ptr = (unsigned long) io_u;
46
47 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
48 assert(buc->buf);
49}
50
51static int pollin_events(struct pollfd *pfds, int fds)
52{
53 int i;
54
55 for (i = 0; i < fds; i++)
56 if (pfds[i].revents & POLLIN)
57 return 1;
58
59 return 0;
60}
61
d01c404b
JA
62static unsigned int binject_read_commands(struct thread_data *td, void *p,
63 int left, int *err)
64{
65 struct binject_file *bf;
66 struct fio_file *f;
67 int i, ret, events;
68
69one_more:
70 events = 0;
71 for_each_file(td, f, i) {
47f07ddc 72 bf = (struct binject_file *) (uintptr_t) f->engine_data;
d01c404b
JA
73 ret = read(bf->fd, p, left * sizeof(struct b_user_cmd));
74 if (ret < 0) {
75 if (errno == EAGAIN)
76 continue;
77 *err = -errno;
78 td_verror(td, errno, "read");
79 break;
80 } else if (ret) {
81 p += ret;
82 events += ret / sizeof(struct b_user_cmd);
83 }
84 }
85
86 if (*err || events)
87 return events;
88
89 usleep(1000);
90 goto one_more;
91}
92
79a43187
JA
93static int fio_binject_getevents(struct thread_data *td, unsigned int min,
94 unsigned int max, struct timespec fio_unused *t)
95{
96 struct binject_data *bd = td->io_ops->data;
97 int left = max, ret, r = 0, ev_index = 0;
98 void *buf = bd->cmds;
99 unsigned int i, events;
100 struct fio_file *f;
0e238572 101 struct binject_file *bf;
79a43187
JA
102
103 /*
104 * Fill in the file descriptors
105 */
106 for_each_file(td, f, i) {
47f07ddc 107 bf = (struct binject_file *) (uintptr_t) f->engine_data;
0e238572 108
79a43187
JA
109 /*
110 * don't block for min events == 0
111 */
4a851614
JA
112 if (!min)
113 fio_set_fd_nonblocking(bf->fd, "binject");
114
0e238572 115 bd->pfds[i].fd = bf->fd;
79a43187
JA
116 bd->pfds[i].events = POLLIN;
117 }
118
119 while (left) {
d01c404b 120 while (!min) {
79a43187
JA
121 ret = poll(bd->pfds, td->o.nr_files, -1);
122 if (ret < 0) {
123 if (!r)
124 r = -errno;
125 td_verror(td, errno, "poll");
126 break;
127 } else if (!ret)
128 continue;
129
130 if (pollin_events(bd->pfds, td->o.nr_files))
131 break;
d01c404b 132 }
79a43187
JA
133
134 if (r < 0)
135 break;
136
d01c404b 137 events = binject_read_commands(td, buf, left, &r);
79a43187
JA
138
139 if (r < 0)
140 break;
79a43187
JA
141
142 left -= events;
143 r += events;
144
145 for (i = 0; i < events; i++) {
146 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
147
2f68124f 148 bd->events[ev_index] = (struct io_u *) (unsigned long) buc->usr_ptr;
79a43187
JA
149 ev_index++;
150 }
151 }
152
153 if (!min) {
0e238572 154 for_each_file(td, f, i) {
47f07ddc 155 bf = (struct binject_file *) (uintptr_t) f->engine_data;
0e238572
JA
156 fcntl(bf->fd, F_SETFL, bd->fd_flags[i]);
157 }
79a43187
JA
158 }
159
160 if (r > 0)
161 assert(ev_index == r);
162
163 return r;
164}
165
166static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
167{
168 struct b_user_cmd *buc = &io_u->buc;
47f07ddc 169 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
79a43187
JA
170 int ret;
171
0e238572 172 ret = write(bf->fd, buc, sizeof(*buc));
79a43187
JA
173 if (ret < 0)
174 return ret;
175
176 return FIO_Q_QUEUED;
177}
178
179static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
180{
181 struct binject_data *bd = td->io_ops->data;
182 struct b_user_cmd *buc = &io_u->buc;
47f07ddc 183 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
79a43187 184
0e238572 185 if (io_u->xfer_buflen & (bf->bs - 1)) {
79a43187
JA
186 log_err("read/write not sector aligned\n");
187 return EINVAL;
188 }
189
190 if (io_u->ddir == DDIR_READ) {
191 binject_buc_init(bd, io_u);
192 buc->type = B_TYPE_READ;
193 } else if (io_u->ddir == DDIR_WRITE) {
194 binject_buc_init(bd, io_u);
1ef2b6be
JA
195 if (io_u->flags & IO_U_F_BARRIER)
196 buc->type = B_TYPE_WRITEBARRIER;
197 else
198 buc->type = B_TYPE_WRITE;
79a43187
JA
199 } else if (io_u->ddir == DDIR_TRIM) {
200 binject_buc_init(bd, io_u);
201 buc->type = B_TYPE_DISCARD;
202 } else {
203 assert(0);
204 }
205
206 return 0;
207}
208
209static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
210{
211 int ret;
212
213 fio_ro_check(td, io_u);
214
215 ret = fio_binject_doio(td, io_u);
216
217 if (ret < 0)
218 io_u->error = errno;
219
220 if (io_u->error) {
221 td_verror(td, io_u->error, "xfer");
222 return FIO_Q_COMPLETED;
223 }
224
225 return ret;
226}
227
228static struct io_u *fio_binject_event(struct thread_data *td, int event)
229{
230 struct binject_data *bd = td->io_ops->data;
231
232 return bd->events[event];
233}
234
ce4b5050
JA
235static int binject_open_ctl(struct thread_data *td)
236{
237 int fd;
238
239 fd = open("/dev/binject-ctl", O_RDWR);
240 if (fd < 0)
241 td_verror(td, errno, "open binject-ctl");
242
243 return fd;
244}
245
0e238572
JA
246static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf)
247{
248 struct b_ioctl_cmd bic;
249 int fdb;
250
251 if (bf->fd >= 0) {
252 close(bf->fd);
253 bf->fd = -1;
254 }
255
ce4b5050
JA
256 fdb = binject_open_ctl(td);
257 if (fdb < 0)
0e238572 258 return;
0e238572
JA
259
260 bic.minor = bf->minor;
261
f0f346d8 262 if (ioctl(fdb, B_IOCTL_DEL, &bic) < 0)
0e238572 263 td_verror(td, errno, "binject dev unmap");
0e238572
JA
264
265 close(fdb);
266}
267
268static int binject_map_dev(struct thread_data *td, struct binject_file *bf,
269 int fd)
270{
271 struct b_ioctl_cmd bic;
272 char name[80];
273 struct stat sb;
274 int fdb, dev_there, loops;
275
ce4b5050
JA
276 fdb = binject_open_ctl(td);
277 if (fdb < 0)
0e238572 278 return 1;
0e238572
JA
279
280 bic.fd = fd;
281
f0f346d8 282 if (ioctl(fdb, B_IOCTL_ADD, &bic) < 0) {
0e238572
JA
283 td_verror(td, errno, "binject dev map");
284 close(fdb);
285 return 1;
286 }
287
288 bf->minor = bic.minor;
289
290 sprintf(name, "/dev/binject%u", bf->minor);
291
292 /*
293 * Wait for udev to create the node...
294 */
295 dev_there = loops = 0;
296 do {
297 if (!stat(name, &sb)) {
298 dev_there = 1;
299 break;
300 }
301
302 usleep(10000);
303 } while (++loops < 100);
304
305 close(fdb);
306
307 if (!dev_there) {
308 log_err("fio: timed out waiting for binject dev\n");
309 goto err_unmap;
310 }
311
312 bf->fd = open(name, O_RDWR);
313 if (bf->fd < 0) {
314 td_verror(td, errno, "binject dev open");
315err_unmap:
316 binject_unmap_dev(td, bf);
317 return 1;
318 }
319
320 return 0;
321}
322
323static int fio_binject_close_file(struct thread_data *td, struct fio_file *f)
324{
47f07ddc 325 struct binject_file *bf = (struct binject_file *) (uintptr_t) f->engine_data;
0e238572
JA
326
327 if (bf) {
328 binject_unmap_dev(td, bf);
329 free(bf);
84b3842c 330 f->engine_data = 0;
0e238572
JA
331 return generic_close_file(td, f);
332 }
333
334 return 0;
335}
336
4a435dac
JA
337static int fio_binject_open_file(struct thread_data *td, struct fio_file *f)
338{
0e238572 339 struct binject_file *bf;
4a435dac
JA
340 unsigned int bs;
341 int ret;
342
343 ret = generic_open_file(td, f);
344 if (ret)
345 return 1;
346
347 if (f->filetype != FIO_TYPE_BD) {
348 log_err("fio: binject only works with block devices\n");
0e238572 349 goto err_close;
4a435dac
JA
350 }
351 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
352 td_verror(td, errno, "BLKSSZGET");
0e238572
JA
353 goto err_close;
354 }
355
356 bf = malloc(sizeof(*bf));
357 bf->bs = bs;
358 bf->minor = bf->fd = -1;
9cbef504 359 f->engine_data = (uintptr_t) bf;
0e238572
JA
360
361 if (binject_map_dev(td, bf, f->fd)) {
362err_close:
363 ret = generic_close_file(td, f);
4a435dac
JA
364 return 1;
365 }
366
4a435dac
JA
367 return 0;
368}
369
79a43187
JA
370static void fio_binject_cleanup(struct thread_data *td)
371{
372 struct binject_data *bd = td->io_ops->data;
373
374 if (bd) {
375 free(bd->events);
376 free(bd->cmds);
377 free(bd->fd_flags);
378 free(bd->pfds);
379 free(bd);
380 }
381}
382
383static int fio_binject_init(struct thread_data *td)
384{
385 struct binject_data *bd;
386
387 bd = malloc(sizeof(*bd));
388 memset(bd, 0, sizeof(*bd));
389
390 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
391 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
392
393 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
394 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
395
396 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
397 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
398
399 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
400 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
401
402 td->io_ops->data = bd;
403 return 0;
404}
405
406static struct ioengine_ops ioengine = {
407 .name = "binject",
408 .version = FIO_IOOPS_VERSION,
409 .init = fio_binject_init,
410 .prep = fio_binject_prep,
411 .queue = fio_binject_queue,
412 .getevents = fio_binject_getevents,
413 .event = fio_binject_event,
414 .cleanup = fio_binject_cleanup,
4a435dac 415 .open_file = fio_binject_open_file,
0e238572 416 .close_file = fio_binject_close_file,
79a43187 417 .get_file_size = generic_get_file_size,
ca7e0ddb 418 .flags = FIO_RAWIO | FIO_BARRIER | FIO_MEMALIGN,
79a43187
JA
419};
420
421#else /* FIO_HAVE_BINJECT */
422
423/*
424 * When we have a proper configure system in place, we simply wont build
425 * and install this io engine. For now install a crippled version that
426 * just complains and fails to load.
427 */
428static int fio_binject_init(struct thread_data fio_unused *td)
429{
a3edaf76 430 log_err("fio: ioengine binject not available\n");
79a43187
JA
431 return 1;
432}
433
434static struct ioengine_ops ioengine = {
435 .name = "binject",
436 .version = FIO_IOOPS_VERSION,
437 .init = fio_binject_init,
438};
439
440#endif
441
442static void fio_init fio_binject_register(void)
443{
444 register_ioengine(&ioengine);
445}
446
447static void fio_exit fio_binject_unregister(void)
448{
449 unregister_ioengine(&ioengine);
450}