4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
14 #include "../optgroup.h"
20 FIO_SG_WRITE_VERIFY = 2,
27 unsigned int writefua;
28 unsigned int write_mode;
31 static struct fio_option options[] = {
34 .lname = "sg engine read fua flag support",
36 .off1 = offsetof(struct sg_options, readfua),
37 .help = "Set FUA flag (force unit access) for all Read operations",
39 .category = FIO_OPT_C_ENGINE,
40 .group = FIO_OPT_G_SG,
44 .lname = "sg engine write fua flag support",
46 .off1 = offsetof(struct sg_options, writefua),
47 .help = "Set FUA flag (force unit access) for all Write operations",
49 .category = FIO_OPT_C_ENGINE,
50 .group = FIO_OPT_G_SG,
53 .name = "sg_write_mode",
54 .lname = "specify sg write mode",
56 .off1 = offsetof(struct sg_options, write_mode),
57 .help = "Specify SCSI WRITE mode",
62 .help = "Issue standard SCSI WRITE commands",
65 .oval = FIO_SG_WRITE_VERIFY,
66 .help = "Issue SCSI WRITE AND VERIFY commands",
69 .oval = FIO_SG_WRITE_SAME,
70 .help = "Issue SCSI WRITE SAME commands",
73 .category = FIO_OPT_C_ENGINE,
74 .group = FIO_OPT_G_SG,
81 #define MAX_10B_LBA 0xFFFFFFFFULL
82 #define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
83 #define MAX_SB 64 // sense block maximum return size
86 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
87 unsigned char sb[MAX_SB]; // add sense block to commands
92 struct sgio_cmd *cmds;
101 static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
102 struct io_u *io_u, int fs)
104 struct sgio_cmd *sc = &sd->cmds[io_u->index];
106 memset(hdr, 0, sizeof(*hdr));
107 memset(sc->cdb, 0, sizeof(sc->cdb));
109 hdr->interface_id = 'S';
111 hdr->cmd_len = sizeof(sc->cdb);
113 hdr->mx_sb_len = sizeof(sc->sb);
114 hdr->pack_id = io_u->index;
118 hdr->dxferp = io_u->xfer_buf;
119 hdr->dxfer_len = io_u->xfer_buflen;
123 static int pollin_events(struct pollfd *pfds, int fds)
127 for (i = 0; i < fds; i++)
128 if (pfds[i].revents & POLLIN)
134 static int sg_fd_read(int fd, void *data, size_t size)
141 ret = read(fd, data, size);
143 if (errno == EAGAIN || errno == EINTR)
163 static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
165 const struct timespec fio_unused *t)
167 struct sgio_data *sd = td->io_ops_data;
168 int left = max, eventNum, ret, r = 0;
169 void *buf = sd->sgbuf;
170 unsigned int i, events;
174 * Fill in the file descriptors
176 for_each_file(td, f, i) {
178 * don't block for min events == 0
181 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
183 sd->fd_flags[i] = -1;
185 sd->pfds[i].fd = f->fd;
186 sd->pfds[i].events = POLLIN;
192 dprint(FD_IO, "sgio_getevents: sd %p: left=%d\n", sd, left);
198 ret = poll(sd->pfds, td->o.nr_files, -1);
202 td_verror(td, errno, "poll");
207 if (pollin_events(sd->pfds, td->o.nr_files))
217 for_each_file(td, f, i) {
218 for (eventNum = 0; eventNum < left; eventNum++) {
219 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
220 dprint(FD_IO, "sgio_getevents: ret: %d\n", ret);
223 td_verror(td, r, "sg_read");
226 p += sizeof(struct sg_io_hdr);
228 dprint(FD_IO, "sgio_getevents: events: %d\n", events);
232 if (r < 0 && !events)
242 for (i = 0; i < events; i++) {
243 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
244 sd->events[i] = hdr->usr_ptr;
246 /* record if an io error occurred, ignore resid */
247 if (hdr->info & SG_INFO_CHECK) {
249 io_u = (struct io_u *)(hdr->usr_ptr);
250 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
251 sd->events[i]->error = EIO;
257 for_each_file(td, f, i) {
258 if (sd->fd_flags[i] == -1)
261 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
262 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
269 static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
273 struct sgio_data *sd = td->io_ops_data;
274 struct sg_io_hdr *hdr = &io_u->hdr;
277 sd->events[0] = io_u;
279 ret = ioctl(f->fd, SG_IO, hdr);
283 /* record if an io error occurred */
284 if (hdr->info & SG_INFO_CHECK)
287 return FIO_Q_COMPLETED;
290 static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int do_sync)
292 struct sg_io_hdr *hdr = &io_u->hdr;
295 ret = write(f->fd, hdr, sizeof(*hdr));
300 ret = read(f->fd, hdr, sizeof(*hdr));
304 /* record if an io error occurred */
305 if (hdr->info & SG_INFO_CHECK)
308 return FIO_Q_COMPLETED;
314 static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int do_sync)
316 struct fio_file *f = io_u->file;
319 if (f->filetype == FIO_TYPE_BLOCK) {
320 ret = fio_sgio_ioctl_doio(td, f, io_u);
321 td_verror(td, io_u->error, __func__);
323 ret = fio_sgio_rw_doio(f, io_u, do_sync);
325 td_verror(td, io_u->error, __func__);
331 static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
333 struct sg_io_hdr *hdr = &io_u->hdr;
334 struct sg_options *o = td->eo;
335 struct sgio_data *sd = td->io_ops_data;
336 long long nr_blocks, lba;
338 if (io_u->xfer_buflen & (sd->bs - 1)) {
339 log_err("read/write not sector aligned\n");
343 nr_blocks = io_u->xfer_buflen / sd->bs;
344 lba = io_u->offset / sd->bs;
346 if (io_u->ddir == DDIR_READ) {
347 sgio_hdr_init(sd, hdr, io_u, 1);
349 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
350 if (lba < MAX_10B_LBA)
351 hdr->cmdp[0] = 0x28; // read(10)
353 hdr->cmdp[0] = 0x88; // read(16)
356 hdr->cmdp[1] |= 0x08;
358 } else if (io_u->ddir == DDIR_WRITE) {
359 sgio_hdr_init(sd, hdr, io_u, 1);
361 hdr->dxfer_direction = SG_DXFER_TO_DEV;
362 switch(o->write_mode) {
364 if (lba < MAX_10B_LBA)
365 hdr->cmdp[0] = 0x2a; // write(10)
367 hdr->cmdp[0] = 0x8a; // write(16)
369 hdr->cmdp[1] |= 0x08;
371 case FIO_SG_WRITE_VERIFY:
372 if (lba < MAX_10B_LBA)
373 hdr->cmdp[0] = 0x2e; // write and verify(10)
375 hdr->cmdp[0] = 0x8e; // write and verify(16)
377 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
378 case FIO_SG_WRITE_SAME:
379 hdr->dxfer_len = sd->bs;
380 if (lba < MAX_10B_LBA)
381 hdr->cmdp[0] = 0x41; // write same(10)
383 hdr->cmdp[0] = 0x93; // write same(16)
387 sgio_hdr_init(sd, hdr, io_u, 0);
388 hdr->dxfer_direction = SG_DXFER_NONE;
389 if (lba < MAX_10B_LBA)
390 hdr->cmdp[0] = 0x35; // synccache(10)
392 hdr->cmdp[0] = 0x91; // synccache(16)
396 * for synccache, we leave lba and length to 0 to sync all
399 if (hdr->dxfer_direction != SG_DXFER_NONE) {
400 if (lba < MAX_10B_LBA) {
401 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
402 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
403 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
404 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
405 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
406 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
408 hdr->cmdp[2] = (unsigned char) ((lba >> 56) & 0xff);
409 hdr->cmdp[3] = (unsigned char) ((lba >> 48) & 0xff);
410 hdr->cmdp[4] = (unsigned char) ((lba >> 40) & 0xff);
411 hdr->cmdp[5] = (unsigned char) ((lba >> 32) & 0xff);
412 hdr->cmdp[6] = (unsigned char) ((lba >> 24) & 0xff);
413 hdr->cmdp[7] = (unsigned char) ((lba >> 16) & 0xff);
414 hdr->cmdp[8] = (unsigned char) ((lba >> 8) & 0xff);
415 hdr->cmdp[9] = (unsigned char) (lba & 0xff);
416 hdr->cmdp[10] = (unsigned char) ((nr_blocks >> 32) & 0xff);
417 hdr->cmdp[11] = (unsigned char) ((nr_blocks >> 16) & 0xff);
418 hdr->cmdp[12] = (unsigned char) ((nr_blocks >> 8) & 0xff);
419 hdr->cmdp[13] = (unsigned char) (nr_blocks & 0xff);
423 hdr->timeout = SCSI_TIMEOUT_MS;
427 static enum fio_q_status fio_sgio_queue(struct thread_data *td,
430 struct sg_io_hdr *hdr = &io_u->hdr;
431 int ret, do_sync = 0;
433 fio_ro_check(td, io_u);
435 if (td->o.sync_io || td->o.odirect || ddir_sync(io_u->ddir))
438 ret = fio_sgio_doio(td, io_u, do_sync);
442 else if (hdr->status) {
443 io_u->resid = hdr->resid;
448 td_verror(td, io_u->error, "xfer");
449 return FIO_Q_COMPLETED;
455 static struct io_u *fio_sgio_event(struct thread_data *td, int event)
457 struct sgio_data *sd = td->io_ops_data;
459 return sd->events[event];
462 static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
463 unsigned long long *max_lba)
466 * need to do read capacity operation w/o benefit of sd or
467 * io_u structures, which are not initialized until later.
469 struct sg_io_hdr hdr;
470 unsigned char cmd[16];
471 unsigned char sb[64];
472 unsigned char buf[32]; // read capacity return
476 struct fio_file *f = td->files[0];
478 /* open file independent of rest of application */
479 fd = open(f->file_name, O_RDONLY);
483 memset(&hdr, 0, sizeof(hdr));
484 memset(cmd, 0, sizeof(cmd));
485 memset(sb, 0, sizeof(sb));
486 memset(buf, 0, sizeof(buf));
488 /* First let's try a 10 byte read capacity. */
489 hdr.interface_id = 'S';
493 hdr.mx_sb_len = sizeof(sb);
494 hdr.timeout = SCSI_TIMEOUT_MS;
495 hdr.cmdp[0] = 0x25; // Read Capacity(10)
496 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
498 hdr.dxfer_len = sizeof(buf);
500 ret = ioctl(fd, SG_IO, &hdr);
506 *bs = ((unsigned long) buf[4] << 24) | ((unsigned long) buf[5] << 16) |
507 ((unsigned long) buf[6] << 8) | (unsigned long) buf[7];
508 *max_lba = ((unsigned long) buf[0] << 24) | ((unsigned long) buf[1] << 16) |
509 ((unsigned long) buf[2] << 8) | (unsigned long) buf[3];
512 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
513 * then need to retry with 16 byte Read Capacity command.
515 if (*max_lba == MAX_10B_LBA) {
517 hdr.cmdp[0] = 0x9e; // service action
518 hdr.cmdp[1] = 0x10; // Read Capacity(16)
519 hdr.cmdp[10] = (unsigned char) ((sizeof(buf) >> 24) & 0xff);
520 hdr.cmdp[11] = (unsigned char) ((sizeof(buf) >> 16) & 0xff);
521 hdr.cmdp[12] = (unsigned char) ((sizeof(buf) >> 8) & 0xff);
522 hdr.cmdp[13] = (unsigned char) (sizeof(buf) & 0xff);
524 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
526 hdr.dxfer_len = sizeof(buf);
528 ret = ioctl(fd, SG_IO, &hdr);
534 /* record if an io error occurred */
535 if (hdr.info & SG_INFO_CHECK)
536 td_verror(td, EIO, "fio_sgio_read_capacity");
538 *bs = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11];
539 *max_lba = ((unsigned long long)buf[0] << 56) |
540 ((unsigned long long)buf[1] << 48) |
541 ((unsigned long long)buf[2] << 40) |
542 ((unsigned long long)buf[3] << 32) |
543 ((unsigned long long)buf[4] << 24) |
544 ((unsigned long long)buf[5] << 16) |
545 ((unsigned long long)buf[6] << 8) |
546 (unsigned long long)buf[7];
553 static void fio_sgio_cleanup(struct thread_data *td)
555 struct sgio_data *sd = td->io_ops_data;
567 static int fio_sgio_init(struct thread_data *td)
569 struct sgio_data *sd;
571 sd = malloc(sizeof(*sd));
572 memset(sd, 0, sizeof(*sd));
573 sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
574 memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
575 sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
576 memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
577 sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
578 memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
579 sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
580 memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
581 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
582 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
583 sd->type_checked = 0;
584 td->io_ops_data = sd;
587 * we want to do it, regardless of whether odirect is set or not
589 td->o.override_sync = 1;
593 static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
595 struct sgio_data *sd = td->io_ops_data;
597 unsigned long long max_lba = 0;
599 if (f->filetype == FIO_TYPE_BLOCK) {
600 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
601 td_verror(td, errno, "ioctl");
604 } else if (f->filetype == FIO_TYPE_CHAR) {
607 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
608 td_verror(td, errno, "ioctl");
612 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
614 td_verror(td, td->error, "fio_sgio_read_capacity");
615 log_err("ioengine sg unable to read capacity successfully\n");
619 td_verror(td, EINVAL, "wrong file type");
620 log_err("ioengine sg only works on block or character devices\n");
625 // Determine size of commands needed based on max_lba
626 if (max_lba >= MAX_10B_LBA) {
627 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
628 "commands for lba above 0x%016llx/0x%016llx\n",
629 MAX_10B_LBA, max_lba);
632 if (f->filetype == FIO_TYPE_BLOCK) {
633 td->io_ops->getevents = NULL;
634 td->io_ops->event = NULL;
636 sd->type_checked = 1;
641 static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
643 struct sgio_data *sd = td->io_ops_data;
646 ret = generic_open_file(td, f);
650 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
651 ret = generic_close_file(td, f);
659 * Build an error string with details about the driver, host or scsi
660 * error contained in the sg header Caller will use as necessary.
662 static char *fio_sgio_errdetails(struct io_u *io_u)
664 struct sg_io_hdr *hdr = &io_u->hdr;
665 #define MAXERRDETAIL 1024
666 #define MAXMSGCHUNK 128
667 char *msg, msgchunk[MAXMSGCHUNK];
670 msg = calloc(1, MAXERRDETAIL);
674 * can't seem to find sg_err.h, so I'll just echo the define values
675 * so others can search on internet to find clearer clues of meaning.
677 if (hdr->info & SG_INFO_CHECK) {
678 if (hdr->host_status) {
679 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
680 strlcat(msg, msgchunk, MAXERRDETAIL);
681 switch (hdr->host_status) {
683 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
686 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
689 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
692 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
695 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
698 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
701 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
704 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
707 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
710 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
713 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
716 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
719 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
722 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
725 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
728 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
731 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
734 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
737 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
740 strlcat(msg, "Unknown", MAXERRDETAIL);
743 strlcat(msg, ". ", MAXERRDETAIL);
745 if (hdr->driver_status) {
746 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
747 strlcat(msg, msgchunk, MAXERRDETAIL);
748 switch (hdr->driver_status & 0x0F) {
750 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
753 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
756 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
759 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
762 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
765 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
768 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
771 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
774 strlcat(msg, "Unknown", MAXERRDETAIL);
777 strlcat(msg, "; ", MAXERRDETAIL);
778 switch (hdr->driver_status & 0xF0) {
780 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
783 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
786 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
789 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
792 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
795 strlcat(msg, ". ", MAXERRDETAIL);
798 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
799 strlcat(msg, msgchunk, MAXERRDETAIL);
800 // SCSI 3 status codes
801 switch (hdr->status) {
803 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
806 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
809 strlcat(msg, "BUSY", MAXERRDETAIL);
812 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
815 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
818 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
821 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
824 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
827 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
830 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
833 strlcat(msg, "Unknown", MAXERRDETAIL);
836 strlcat(msg, ". ", MAXERRDETAIL);
838 if (hdr->sb_len_wr) {
839 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
840 strlcat(msg, msgchunk, MAXERRDETAIL);
841 for (i = 0; i < hdr->sb_len_wr; i++) {
842 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
843 strlcat(msg, msgchunk, MAXERRDETAIL);
845 strlcat(msg, ". ", MAXERRDETAIL);
847 if (hdr->resid != 0) {
848 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
849 strlcat(msg, msgchunk, MAXERRDETAIL);
853 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
854 strncpy(msg, "SG Driver did not report a Host, Driver or Device check",
861 * get max file size from read capacity.
863 static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
866 * get_file_size is being called even before sgio_init is
867 * called, so none of the sg_io structures are
868 * initialized in the thread_data yet. So we need to do the
869 * ReadCapacity without any of those helpers. One of the effects
870 * is that ReadCapacity may get called 4 times on each open:
871 * readcap(10) followed by readcap(16) if needed - just to get
872 * the file size after the init occurs - it will be called
873 * again when "type_check" is called during structure
874 * initialization I'm not sure how to prevent this little
878 unsigned long long max_lba = 0;
881 if (fio_file_size_known(f))
884 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
885 td_verror(td, EINVAL, "wrong file type");
886 log_err("ioengine sg only works on block or character devices\n");
890 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
892 td_verror(td, td->error, "fio_sgio_read_capacity");
893 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
897 f->real_file_size = (max_lba + 1) * bs;
898 fio_file_set_size_known(f);
903 static struct ioengine_ops ioengine = {
905 .version = FIO_IOOPS_VERSION,
906 .init = fio_sgio_init,
907 .prep = fio_sgio_prep,
908 .queue = fio_sgio_queue,
909 .getevents = fio_sgio_getevents,
910 .errdetails = fio_sgio_errdetails,
911 .event = fio_sgio_event,
912 .cleanup = fio_sgio_cleanup,
913 .open_file = fio_sgio_open,
914 .close_file = generic_close_file,
915 .get_file_size = fio_sgio_get_file_size,
916 .flags = FIO_SYNCIO | FIO_RAWIO,
918 .option_struct_size = sizeof(struct sg_options)
921 #else /* FIO_HAVE_SGIO */
924 * When we have a proper configure system in place, we simply wont build
925 * and install this io engine. For now install a crippled version that
926 * just complains and fails to load.
928 static int fio_sgio_init(struct thread_data fio_unused *td)
930 log_err("fio: ioengine sg not available\n");
934 static struct ioengine_ops ioengine = {
936 .version = FIO_IOOPS_VERSION,
937 .init = fio_sgio_init,
942 static void fio_init fio_sgio_register(void)
944 register_ioengine(&ioengine);
947 static void fio_exit fio_sgio_unregister(void)
949 unregister_ioengine(&ioengine);