| 1 | /* |
| 2 | * binject engine |
| 3 | * |
| 4 | * IO engine that uses the Linux binject interface to directly inject |
| 5 | * bio's to block devices. |
| 6 | * |
| 7 | */ |
| 8 | #include <stdio.h> |
| 9 | #include <stdlib.h> |
| 10 | #include <unistd.h> |
| 11 | #include <errno.h> |
| 12 | #include <assert.h> |
| 13 | #include <string.h> |
| 14 | #include <sys/poll.h> |
| 15 | #include <sys/types.h> |
| 16 | #include <sys/stat.h> |
| 17 | |
| 18 | #include "../fio.h" |
| 19 | |
| 20 | #ifdef FIO_HAVE_BINJECT |
| 21 | |
| 22 | struct binject_data { |
| 23 | struct b_user_cmd *cmds; |
| 24 | struct io_u **events; |
| 25 | struct pollfd *pfds; |
| 26 | int *fd_flags; |
| 27 | }; |
| 28 | |
| 29 | struct binject_file { |
| 30 | unsigned int bs; |
| 31 | int minor; |
| 32 | int fd; |
| 33 | }; |
| 34 | |
| 35 | static void binject_buc_init(struct binject_data *bd, struct io_u *io_u) |
| 36 | { |
| 37 | struct b_user_cmd *buc = &io_u->buc; |
| 38 | |
| 39 | memset(buc, 0, sizeof(*buc)); |
| 40 | binject_buc_set_magic(buc); |
| 41 | |
| 42 | buc->buf = (unsigned long) io_u->xfer_buf; |
| 43 | buc->len = io_u->xfer_buflen; |
| 44 | buc->offset = io_u->offset; |
| 45 | buc->usr_ptr = (unsigned long) io_u; |
| 46 | |
| 47 | buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG; |
| 48 | assert(buc->buf); |
| 49 | } |
| 50 | |
| 51 | static int pollin_events(struct pollfd *pfds, int fds) |
| 52 | { |
| 53 | int i; |
| 54 | |
| 55 | for (i = 0; i < fds; i++) |
| 56 | if (pfds[i].revents & POLLIN) |
| 57 | return 1; |
| 58 | |
| 59 | return 0; |
| 60 | } |
| 61 | |
| 62 | static unsigned int binject_read_commands(struct thread_data *td, void *p, |
| 63 | int left, int *err) |
| 64 | { |
| 65 | struct binject_file *bf; |
| 66 | struct fio_file *f; |
| 67 | int i, ret, events; |
| 68 | |
| 69 | one_more: |
| 70 | events = 0; |
| 71 | for_each_file(td, f, i) { |
| 72 | bf = (struct binject_file *) (uintptr_t) f->engine_data; |
| 73 | ret = read(bf->fd, p, left * sizeof(struct b_user_cmd)); |
| 74 | if (ret < 0) { |
| 75 | if (errno == EAGAIN) |
| 76 | continue; |
| 77 | *err = -errno; |
| 78 | td_verror(td, errno, "read"); |
| 79 | break; |
| 80 | } else if (ret) { |
| 81 | p += ret; |
| 82 | events += ret / sizeof(struct b_user_cmd); |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | if (*err || events) |
| 87 | return events; |
| 88 | |
| 89 | usleep(1000); |
| 90 | goto one_more; |
| 91 | } |
| 92 | |
| 93 | static int fio_binject_getevents(struct thread_data *td, unsigned int min, |
| 94 | unsigned int max, struct timespec fio_unused *t) |
| 95 | { |
| 96 | struct binject_data *bd = td->io_ops->data; |
| 97 | int left = max, ret, r = 0, ev_index = 0; |
| 98 | void *buf = bd->cmds; |
| 99 | unsigned int i, events; |
| 100 | struct fio_file *f; |
| 101 | struct binject_file *bf; |
| 102 | |
| 103 | /* |
| 104 | * Fill in the file descriptors |
| 105 | */ |
| 106 | for_each_file(td, f, i) { |
| 107 | bf = (struct binject_file *) (uintptr_t) f->engine_data; |
| 108 | |
| 109 | /* |
| 110 | * don't block for min events == 0 |
| 111 | */ |
| 112 | if (!min) { |
| 113 | bd->fd_flags[i] = fcntl(bf->fd, F_GETFL); |
| 114 | fcntl(bf->fd, F_SETFL, bd->fd_flags[i] | O_NONBLOCK); |
| 115 | } |
| 116 | bd->pfds[i].fd = bf->fd; |
| 117 | bd->pfds[i].events = POLLIN; |
| 118 | } |
| 119 | |
| 120 | while (left) { |
| 121 | while (!min) { |
| 122 | ret = poll(bd->pfds, td->o.nr_files, -1); |
| 123 | if (ret < 0) { |
| 124 | if (!r) |
| 125 | r = -errno; |
| 126 | td_verror(td, errno, "poll"); |
| 127 | break; |
| 128 | } else if (!ret) |
| 129 | continue; |
| 130 | |
| 131 | if (pollin_events(bd->pfds, td->o.nr_files)) |
| 132 | break; |
| 133 | } |
| 134 | |
| 135 | if (r < 0) |
| 136 | break; |
| 137 | |
| 138 | events = binject_read_commands(td, buf, left, &r); |
| 139 | |
| 140 | if (r < 0) |
| 141 | break; |
| 142 | |
| 143 | left -= events; |
| 144 | r += events; |
| 145 | |
| 146 | for (i = 0; i < events; i++) { |
| 147 | struct b_user_cmd *buc = (struct b_user_cmd *) buf + i; |
| 148 | |
| 149 | bd->events[ev_index] = (struct io_u *) (unsigned long) buc->usr_ptr; |
| 150 | ev_index++; |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | if (!min) { |
| 155 | for_each_file(td, f, i) { |
| 156 | bf = (struct binject_file *) (uintptr_t) f->engine_data; |
| 157 | fcntl(bf->fd, F_SETFL, bd->fd_flags[i]); |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | if (r > 0) |
| 162 | assert(ev_index == r); |
| 163 | |
| 164 | return r; |
| 165 | } |
| 166 | |
| 167 | static int fio_binject_doio(struct thread_data *td, struct io_u *io_u) |
| 168 | { |
| 169 | struct b_user_cmd *buc = &io_u->buc; |
| 170 | struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data; |
| 171 | int ret; |
| 172 | |
| 173 | ret = write(bf->fd, buc, sizeof(*buc)); |
| 174 | if (ret < 0) |
| 175 | return ret; |
| 176 | |
| 177 | return FIO_Q_QUEUED; |
| 178 | } |
| 179 | |
| 180 | static int fio_binject_prep(struct thread_data *td, struct io_u *io_u) |
| 181 | { |
| 182 | struct binject_data *bd = td->io_ops->data; |
| 183 | struct b_user_cmd *buc = &io_u->buc; |
| 184 | struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data; |
| 185 | |
| 186 | if (io_u->xfer_buflen & (bf->bs - 1)) { |
| 187 | log_err("read/write not sector aligned\n"); |
| 188 | return EINVAL; |
| 189 | } |
| 190 | |
| 191 | if (io_u->ddir == DDIR_READ) { |
| 192 | binject_buc_init(bd, io_u); |
| 193 | buc->type = B_TYPE_READ; |
| 194 | } else if (io_u->ddir == DDIR_WRITE) { |
| 195 | binject_buc_init(bd, io_u); |
| 196 | if (io_u->flags & IO_U_F_BARRIER) |
| 197 | buc->type = B_TYPE_WRITEBARRIER; |
| 198 | else |
| 199 | buc->type = B_TYPE_WRITE; |
| 200 | } else if (io_u->ddir == DDIR_TRIM) { |
| 201 | binject_buc_init(bd, io_u); |
| 202 | buc->type = B_TYPE_DISCARD; |
| 203 | } else { |
| 204 | assert(0); |
| 205 | } |
| 206 | |
| 207 | return 0; |
| 208 | } |
| 209 | |
| 210 | static int fio_binject_queue(struct thread_data *td, struct io_u *io_u) |
| 211 | { |
| 212 | int ret; |
| 213 | |
| 214 | fio_ro_check(td, io_u); |
| 215 | |
| 216 | ret = fio_binject_doio(td, io_u); |
| 217 | |
| 218 | if (ret < 0) |
| 219 | io_u->error = errno; |
| 220 | |
| 221 | if (io_u->error) { |
| 222 | td_verror(td, io_u->error, "xfer"); |
| 223 | return FIO_Q_COMPLETED; |
| 224 | } |
| 225 | |
| 226 | return ret; |
| 227 | } |
| 228 | |
| 229 | static struct io_u *fio_binject_event(struct thread_data *td, int event) |
| 230 | { |
| 231 | struct binject_data *bd = td->io_ops->data; |
| 232 | |
| 233 | return bd->events[event]; |
| 234 | } |
| 235 | |
| 236 | static int binject_open_ctl(struct thread_data *td) |
| 237 | { |
| 238 | int fd; |
| 239 | |
| 240 | fd = open("/dev/binject-ctl", O_RDWR); |
| 241 | if (fd < 0) |
| 242 | td_verror(td, errno, "open binject-ctl"); |
| 243 | |
| 244 | return fd; |
| 245 | } |
| 246 | |
| 247 | static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf) |
| 248 | { |
| 249 | struct b_ioctl_cmd bic; |
| 250 | int fdb; |
| 251 | |
| 252 | if (bf->fd >= 0) { |
| 253 | close(bf->fd); |
| 254 | bf->fd = -1; |
| 255 | } |
| 256 | |
| 257 | fdb = binject_open_ctl(td); |
| 258 | if (fdb < 0) |
| 259 | return; |
| 260 | |
| 261 | bic.minor = bf->minor; |
| 262 | |
| 263 | if (ioctl(fdb, B_IOCTL_DEL, &bic) < 0) |
| 264 | td_verror(td, errno, "binject dev unmap"); |
| 265 | |
| 266 | close(fdb); |
| 267 | } |
| 268 | |
| 269 | static int binject_map_dev(struct thread_data *td, struct binject_file *bf, |
| 270 | int fd) |
| 271 | { |
| 272 | struct b_ioctl_cmd bic; |
| 273 | char name[80]; |
| 274 | struct stat sb; |
| 275 | int fdb, dev_there, loops; |
| 276 | |
| 277 | fdb = binject_open_ctl(td); |
| 278 | if (fdb < 0) |
| 279 | return 1; |
| 280 | |
| 281 | bic.fd = fd; |
| 282 | |
| 283 | if (ioctl(fdb, B_IOCTL_ADD, &bic) < 0) { |
| 284 | td_verror(td, errno, "binject dev map"); |
| 285 | close(fdb); |
| 286 | return 1; |
| 287 | } |
| 288 | |
| 289 | bf->minor = bic.minor; |
| 290 | |
| 291 | sprintf(name, "/dev/binject%u", bf->minor); |
| 292 | |
| 293 | /* |
| 294 | * Wait for udev to create the node... |
| 295 | */ |
| 296 | dev_there = loops = 0; |
| 297 | do { |
| 298 | if (!stat(name, &sb)) { |
| 299 | dev_there = 1; |
| 300 | break; |
| 301 | } |
| 302 | |
| 303 | usleep(10000); |
| 304 | } while (++loops < 100); |
| 305 | |
| 306 | close(fdb); |
| 307 | |
| 308 | if (!dev_there) { |
| 309 | log_err("fio: timed out waiting for binject dev\n"); |
| 310 | goto err_unmap; |
| 311 | } |
| 312 | |
| 313 | bf->fd = open(name, O_RDWR); |
| 314 | if (bf->fd < 0) { |
| 315 | td_verror(td, errno, "binject dev open"); |
| 316 | err_unmap: |
| 317 | binject_unmap_dev(td, bf); |
| 318 | return 1; |
| 319 | } |
| 320 | |
| 321 | return 0; |
| 322 | } |
| 323 | |
| 324 | static int fio_binject_close_file(struct thread_data *td, struct fio_file *f) |
| 325 | { |
| 326 | struct binject_file *bf = (struct binject_file *) (uintptr_t) f->engine_data; |
| 327 | |
| 328 | if (bf) { |
| 329 | binject_unmap_dev(td, bf); |
| 330 | free(bf); |
| 331 | f->engine_data = 0; |
| 332 | return generic_close_file(td, f); |
| 333 | } |
| 334 | |
| 335 | return 0; |
| 336 | } |
| 337 | |
| 338 | static int fio_binject_open_file(struct thread_data *td, struct fio_file *f) |
| 339 | { |
| 340 | struct binject_file *bf; |
| 341 | unsigned int bs; |
| 342 | int ret; |
| 343 | |
| 344 | ret = generic_open_file(td, f); |
| 345 | if (ret) |
| 346 | return 1; |
| 347 | |
| 348 | if (f->filetype != FIO_TYPE_BD) { |
| 349 | log_err("fio: binject only works with block devices\n"); |
| 350 | goto err_close; |
| 351 | } |
| 352 | if (ioctl(f->fd, BLKSSZGET, &bs) < 0) { |
| 353 | td_verror(td, errno, "BLKSSZGET"); |
| 354 | goto err_close; |
| 355 | } |
| 356 | |
| 357 | bf = malloc(sizeof(*bf)); |
| 358 | bf->bs = bs; |
| 359 | bf->minor = bf->fd = -1; |
| 360 | f->engine_data = (uintptr_t) bf; |
| 361 | |
| 362 | if (binject_map_dev(td, bf, f->fd)) { |
| 363 | err_close: |
| 364 | ret = generic_close_file(td, f); |
| 365 | return 1; |
| 366 | } |
| 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
| 371 | static void fio_binject_cleanup(struct thread_data *td) |
| 372 | { |
| 373 | struct binject_data *bd = td->io_ops->data; |
| 374 | |
| 375 | if (bd) { |
| 376 | free(bd->events); |
| 377 | free(bd->cmds); |
| 378 | free(bd->fd_flags); |
| 379 | free(bd->pfds); |
| 380 | free(bd); |
| 381 | } |
| 382 | } |
| 383 | |
| 384 | static int fio_binject_init(struct thread_data *td) |
| 385 | { |
| 386 | struct binject_data *bd; |
| 387 | |
| 388 | bd = malloc(sizeof(*bd)); |
| 389 | memset(bd, 0, sizeof(*bd)); |
| 390 | |
| 391 | bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd)); |
| 392 | memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd)); |
| 393 | |
| 394 | bd->events = malloc(td->o.iodepth * sizeof(struct io_u *)); |
| 395 | memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *)); |
| 396 | |
| 397 | bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files); |
| 398 | memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files); |
| 399 | |
| 400 | bd->fd_flags = malloc(sizeof(int) * td->o.nr_files); |
| 401 | memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files); |
| 402 | |
| 403 | td->io_ops->data = bd; |
| 404 | return 0; |
| 405 | } |
| 406 | |
| 407 | static struct ioengine_ops ioengine = { |
| 408 | .name = "binject", |
| 409 | .version = FIO_IOOPS_VERSION, |
| 410 | .init = fio_binject_init, |
| 411 | .prep = fio_binject_prep, |
| 412 | .queue = fio_binject_queue, |
| 413 | .getevents = fio_binject_getevents, |
| 414 | .event = fio_binject_event, |
| 415 | .cleanup = fio_binject_cleanup, |
| 416 | .open_file = fio_binject_open_file, |
| 417 | .close_file = fio_binject_close_file, |
| 418 | .get_file_size = generic_get_file_size, |
| 419 | .flags = FIO_RAWIO | FIO_BARRIER | FIO_MEMALIGN, |
| 420 | }; |
| 421 | |
| 422 | #else /* FIO_HAVE_BINJECT */ |
| 423 | |
| 424 | /* |
| 425 | * When we have a proper configure system in place, we simply wont build |
| 426 | * and install this io engine. For now install a crippled version that |
| 427 | * just complains and fails to load. |
| 428 | */ |
| 429 | static int fio_binject_init(struct thread_data fio_unused *td) |
| 430 | { |
| 431 | log_err("fio: ioengine binject not available\n"); |
| 432 | return 1; |
| 433 | } |
| 434 | |
| 435 | static struct ioengine_ops ioengine = { |
| 436 | .name = "binject", |
| 437 | .version = FIO_IOOPS_VERSION, |
| 438 | .init = fio_binject_init, |
| 439 | }; |
| 440 | |
| 441 | #endif |
| 442 | |
| 443 | static void fio_init fio_binject_register(void) |
| 444 | { |
| 445 | register_ioengine(&ioengine); |
| 446 | } |
| 447 | |
| 448 | static void fio_exit fio_binject_unregister(void) |
| 449 | { |
| 450 | unregister_ioengine(&ioengine); |
| 451 | } |