4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
6 * This ioengine can operate in two modes:
7 * sync with block devices (/dev/sdX) or
8 * with character devices (/dev/sgY) with direct=1 or sync=1
9 * async with character devices with direct=0 and sync=0
11 * What value does queue() return for the different cases?
12 * queue() return value
14 * /dev/sdX RWT FIO_Q_COMPLETED
15 * /dev/sgY RWT FIO_Q_COMPLETED
16 * with direct=1 or sync=1
19 * /dev/sgY RWT FIO_Q_QUEUED
22 * Because FIO_SYNCIO is set for this ioengine td_io_queue() will fill in
23 * issue_time *before* each IO is sent to queue()
25 * Where are the IO counting functions called for the different cases?
28 * /dev/sdX (commit==NULL)
30 * io_u_mark_depth() called in td_io_queue()
31 * io_u_mark_submit/complete() called in td_io_queue()
32 * issue_time set in td_io_queue()
34 * /dev/sgY with direct=1 or sync=1 (commit does nothing)
36 * io_u_mark_depth() called in td_io_queue()
37 * io_u_mark_submit/complete() called in queue()
38 * issue_time set in td_io_queue()
41 * /dev/sgY with direct=0 and sync=0
42 * RW: read and write operations are submitted in queue()
43 * io_u_mark_depth() called in td_io_commit()
44 * io_u_mark_submit() called in queue()
45 * issue_time set in td_io_queue()
46 * T: trim operations are queued in queue() and submitted in commit()
47 * io_u_mark_depth() called in td_io_commit()
48 * io_u_mark_submit() called in commit()
49 * issue_time set in commit()
59 #include "../optgroup.h"
63 #ifndef SGV4_FLAG_HIPRI
64 #define SGV4_FLAG_HIPRI 0x800
71 FIO_SG_WRITE_SAME_NDOB,
72 FIO_SG_VERIFY_BYTCHK_00,
73 FIO_SG_VERIFY_BYTCHK_01,
74 FIO_SG_VERIFY_BYTCHK_11,
81 unsigned int writefua;
82 unsigned int write_mode;
85 static struct fio_option options[] = {
88 .lname = "High Priority",
89 .type = FIO_OPT_STR_SET,
90 .off1 = offsetof(struct sg_options, hipri),
91 .help = "Use polled IO completions",
92 .category = FIO_OPT_C_ENGINE,
93 .group = FIO_OPT_G_SG,
97 .lname = "sg engine read fua flag support",
99 .off1 = offsetof(struct sg_options, readfua),
100 .help = "Set FUA flag (force unit access) for all Read operations",
102 .category = FIO_OPT_C_ENGINE,
103 .group = FIO_OPT_G_SG,
107 .lname = "sg engine write fua flag support",
108 .type = FIO_OPT_BOOL,
109 .off1 = offsetof(struct sg_options, writefua),
110 .help = "Set FUA flag (force unit access) for all Write operations",
112 .category = FIO_OPT_C_ENGINE,
113 .group = FIO_OPT_G_SG,
116 .name = "sg_write_mode",
117 .lname = "specify sg write mode",
119 .off1 = offsetof(struct sg_options, write_mode),
120 .help = "Specify SCSI WRITE mode",
124 .oval = FIO_SG_WRITE,
125 .help = "Issue standard SCSI WRITE commands",
127 { .ival = "write_and_verify",
128 .oval = FIO_SG_WRITE_VERIFY,
129 .help = "Issue SCSI WRITE AND VERIFY commands",
132 .oval = FIO_SG_WRITE_VERIFY,
133 .help = "Issue SCSI WRITE AND VERIFY commands. This "
134 "option is deprecated. Use write_and_verify instead.",
136 { .ival = "write_same",
137 .oval = FIO_SG_WRITE_SAME,
138 .help = "Issue SCSI WRITE SAME commands",
141 .oval = FIO_SG_WRITE_SAME,
142 .help = "Issue SCSI WRITE SAME commands. This "
143 "option is deprecated. Use write_same instead.",
145 { .ival = "write_same_ndob",
146 .oval = FIO_SG_WRITE_SAME_NDOB,
147 .help = "Issue SCSI WRITE SAME(16) commands with NDOB flag set",
149 { .ival = "verify_bytchk_00",
150 .oval = FIO_SG_VERIFY_BYTCHK_00,
151 .help = "Issue SCSI VERIFY commands with BYTCHK set to 00",
153 { .ival = "verify_bytchk_01",
154 .oval = FIO_SG_VERIFY_BYTCHK_01,
155 .help = "Issue SCSI VERIFY commands with BYTCHK set to 01",
157 { .ival = "verify_bytchk_11",
158 .oval = FIO_SG_VERIFY_BYTCHK_11,
159 .help = "Issue SCSI VERIFY commands with BYTCHK set to 11",
162 .category = FIO_OPT_C_ENGINE,
163 .group = FIO_OPT_G_SG,
170 #define MAX_10B_LBA 0xFFFFFFFFULL
171 #define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
172 #define MAX_SB 64 // sense block maximum return size
174 #define FIO_SGIO_DEBUG
178 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
179 unsigned char sb[MAX_SB]; // add sense block to commands
184 uint8_t *unmap_param;
185 unsigned int unmap_range_count;
186 struct io_u **trim_io_us;
190 struct sgio_cmd *cmds;
191 struct io_u **events;
197 struct sgio_trim **trim_queues;
199 #ifdef FIO_SGIO_DEBUG
200 unsigned int *trim_queue_map;
204 static inline uint32_t sgio_get_be32(uint8_t *buf)
206 return be32_to_cpu(*((uint32_t *) buf));
209 static inline uint64_t sgio_get_be64(uint8_t *buf)
211 return be64_to_cpu(*((uint64_t *) buf));
214 static inline void sgio_set_be16(uint16_t val, uint8_t *buf)
216 uint16_t t = cpu_to_be16(val);
218 memcpy(buf, &t, sizeof(uint16_t));
221 static inline void sgio_set_be32(uint32_t val, uint8_t *buf)
223 uint32_t t = cpu_to_be32(val);
225 memcpy(buf, &t, sizeof(uint32_t));
228 static inline void sgio_set_be64(uint64_t val, uint8_t *buf)
230 uint64_t t = cpu_to_be64(val);
232 memcpy(buf, &t, sizeof(uint64_t));
235 static inline bool sgio_unbuffered(struct thread_data *td)
237 return (td->o.odirect || td->o.sync_io);
240 static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
241 struct io_u *io_u, int fs)
243 struct sgio_cmd *sc = &sd->cmds[io_u->index];
245 memset(hdr, 0, sizeof(*hdr));
246 memset(sc->cdb, 0, sizeof(sc->cdb));
248 hdr->interface_id = 'S';
250 hdr->cmd_len = sizeof(sc->cdb);
252 hdr->mx_sb_len = sizeof(sc->sb);
253 hdr->pack_id = io_u->index;
255 hdr->timeout = SCSI_TIMEOUT_MS;
258 hdr->dxferp = io_u->xfer_buf;
259 hdr->dxfer_len = io_u->xfer_buflen;
263 static int pollin_events(struct pollfd *pfds, int fds)
267 for (i = 0; i < fds; i++)
268 if (pfds[i].revents & POLLIN)
274 static int sg_fd_read(int fd, void *data, size_t size)
281 ret = read(fd, data, size);
283 if (errno == EAGAIN || errno == EINTR)
303 static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
305 const struct timespec fio_unused *t)
307 struct sgio_data *sd = td->io_ops_data;
308 int left = max, eventNum, ret, r = 0, trims = 0;
309 void *buf = sd->sgbuf;
310 unsigned int i, j, events;
315 * Fill in the file descriptors
317 for_each_file(td, f, i) {
319 * don't block for min events == 0
322 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
324 sd->fd_flags[i] = -1;
326 sd->pfds[i].fd = f->fd;
327 sd->pfds[i].events = POLLIN;
331 ** There are two counters here:
332 ** - number of SCSI commands completed
333 ** - number of io_us completed
335 ** These are the same with reads and writes, but
336 ** could differ with trim/unmap commands because
337 ** a single unmap can include multiple io_us
343 dprint(FD_IO, "sgio_getevents: sd %p: min=%d, max=%d, left=%d\n", sd, min, max, left);
349 ret = poll(sd->pfds, td->o.nr_files, -1);
353 td_verror(td, errno, "poll");
358 if (pollin_events(sd->pfds, td->o.nr_files))
368 for_each_file(td, f, i) {
369 for (eventNum = 0; eventNum < left; eventNum++) {
370 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
371 dprint(FD_IO, "sgio_getevents: sg_fd_read ret: %d\n", ret);
374 td_verror(td, r, "sg_read");
377 io_u = ((struct sg_io_hdr *)p)->usr_ptr;
378 if (io_u->ddir == DDIR_TRIM) {
379 events += sd->trim_queues[io_u->index]->unmap_range_count;
380 eventNum += sd->trim_queues[io_u->index]->unmap_range_count - 1;
384 p += sizeof(struct sg_io_hdr);
385 dprint(FD_IO, "sgio_getevents: events: %d, eventNum: %d, left: %d\n", events, eventNum, left);
389 if (r < 0 && !events)
399 for (i = 0; i < events; i++) {
400 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
401 sd->events[i + trims] = hdr->usr_ptr;
402 io_u = (struct io_u *)(hdr->usr_ptr);
404 if (hdr->info & SG_INFO_CHECK) {
405 /* record if an io error occurred, ignore resid */
406 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
407 sd->events[i + trims]->error = EIO;
410 if (io_u->ddir == DDIR_TRIM) {
411 struct sgio_trim *st = sd->trim_queues[io_u->index];
412 #ifdef FIO_SGIO_DEBUG
413 assert(st->trim_io_us[0] == io_u);
414 assert(sd->trim_queue_map[io_u->index] == io_u->index);
415 dprint(FD_IO, "sgio_getevents: reaping %d io_us from trim queue %d\n", st->unmap_range_count, io_u->index);
416 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", io_u->index, i+trims);
418 for (j = 1; j < st->unmap_range_count; j++) {
420 sd->events[i + trims] = st->trim_io_us[j];
421 #ifdef FIO_SGIO_DEBUG
422 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", st->trim_io_us[j]->index, i+trims);
423 assert(sd->trim_queue_map[st->trim_io_us[j]->index] == io_u->index);
425 if (hdr->info & SG_INFO_CHECK) {
426 /* record if an io error occurred, ignore resid */
427 memcpy(&st->trim_io_us[j]->hdr, hdr, sizeof(struct sg_io_hdr));
428 sd->events[i + trims]->error = EIO;
431 events -= st->unmap_range_count - 1;
432 st->unmap_range_count = 0;
438 for_each_file(td, f, i) {
439 if (sd->fd_flags[i] == -1)
442 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
443 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
450 static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
454 struct sgio_data *sd = td->io_ops_data;
455 struct sg_io_hdr *hdr = &io_u->hdr;
458 sd->events[0] = io_u;
460 ret = ioctl(f->fd, SG_IO, hdr);
464 /* record if an io error occurred */
465 if (hdr->info & SG_INFO_CHECK)
468 return FIO_Q_COMPLETED;
471 static enum fio_q_status fio_sgio_rw_doio(struct thread_data *td,
473 struct io_u *io_u, int do_sync)
475 struct sg_io_hdr *hdr = &io_u->hdr;
478 ret = write(f->fd, hdr, sizeof(*hdr));
484 * We can't just read back the first command that completes
485 * and assume it's the one we need, it could be any command
491 ret = read(f->fd, hdr, sizeof(*hdr));
495 __io_u = hdr->usr_ptr;
497 /* record if an io error occurred */
498 if (hdr->info & SG_INFO_CHECK)
504 if (io_u_sync_complete(td, __io_u))
509 return FIO_Q_COMPLETED;
515 static enum fio_q_status fio_sgio_doio(struct thread_data *td,
516 struct io_u *io_u, int do_sync)
518 struct fio_file *f = io_u->file;
519 enum fio_q_status ret;
521 if (f->filetype == FIO_TYPE_BLOCK) {
522 ret = fio_sgio_ioctl_doio(td, f, io_u);
524 td_verror(td, io_u->error, __func__);
526 ret = fio_sgio_rw_doio(td, f, io_u, do_sync);
527 if (io_u->error && do_sync)
528 td_verror(td, io_u->error, __func__);
534 static void fio_sgio_rw_lba(struct sg_io_hdr *hdr, unsigned long long lba,
535 unsigned long long nr_blocks, bool override16)
537 if (lba < MAX_10B_LBA && !override16) {
538 sgio_set_be32((uint32_t) lba, &hdr->cmdp[2]);
539 sgio_set_be16((uint16_t) nr_blocks, &hdr->cmdp[7]);
541 sgio_set_be64(lba, &hdr->cmdp[2]);
542 sgio_set_be32((uint32_t) nr_blocks, &hdr->cmdp[10]);
548 static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
550 struct sg_io_hdr *hdr = &io_u->hdr;
551 struct sg_options *o = td->eo;
552 struct sgio_data *sd = td->io_ops_data;
553 unsigned long long nr_blocks, lba;
556 if (io_u->xfer_buflen & (sd->bs - 1)) {
557 log_err("read/write not sector aligned\n");
561 nr_blocks = io_u->xfer_buflen / sd->bs;
562 lba = io_u->offset / sd->bs;
564 if (io_u->ddir == DDIR_READ) {
565 sgio_hdr_init(sd, hdr, io_u, 1);
567 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
568 if (lba < MAX_10B_LBA)
569 hdr->cmdp[0] = 0x28; // read(10)
571 hdr->cmdp[0] = 0x88; // read(16)
574 hdr->flags |= SGV4_FLAG_HIPRI;
576 hdr->cmdp[1] |= 0x08;
578 fio_sgio_rw_lba(hdr, lba, nr_blocks, false);
580 } else if (io_u->ddir == DDIR_WRITE) {
581 sgio_hdr_init(sd, hdr, io_u, 1);
583 hdr->dxfer_direction = SG_DXFER_TO_DEV;
584 switch(o->write_mode) {
586 if (lba < MAX_10B_LBA)
587 hdr->cmdp[0] = 0x2a; // write(10)
589 hdr->cmdp[0] = 0x8a; // write(16)
591 hdr->flags |= SGV4_FLAG_HIPRI;
593 hdr->cmdp[1] |= 0x08;
595 case FIO_SG_WRITE_VERIFY:
596 if (lba < MAX_10B_LBA)
597 hdr->cmdp[0] = 0x2e; // write and verify(10)
599 hdr->cmdp[0] = 0x8e; // write and verify(16)
601 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
602 case FIO_SG_WRITE_SAME:
603 hdr->dxfer_len = sd->bs;
604 if (lba < MAX_10B_LBA)
605 hdr->cmdp[0] = 0x41; // write same(10)
607 hdr->cmdp[0] = 0x93; // write same(16)
609 case FIO_SG_WRITE_SAME_NDOB:
610 hdr->cmdp[0] = 0x93; // write same(16)
611 hdr->cmdp[1] |= 0x1; // no data output buffer
614 case FIO_SG_VERIFY_BYTCHK_00:
615 if (lba < MAX_10B_LBA)
616 hdr->cmdp[0] = 0x2f; // VERIFY(10)
618 hdr->cmdp[0] = 0x8f; // VERIFY(16)
621 case FIO_SG_VERIFY_BYTCHK_01:
622 if (lba < MAX_10B_LBA)
623 hdr->cmdp[0] = 0x2f; // VERIFY(10)
625 hdr->cmdp[0] = 0x8f; // VERIFY(16)
626 hdr->cmdp[1] |= 0x02; // BYTCHK = 01b
628 case FIO_SG_VERIFY_BYTCHK_11:
629 if (lba < MAX_10B_LBA)
630 hdr->cmdp[0] = 0x2f; // VERIFY(10)
632 hdr->cmdp[0] = 0x8f; // VERIFY(16)
633 hdr->cmdp[1] |= 0x06; // BYTCHK = 11b
634 hdr->dxfer_len = sd->bs;
638 fio_sgio_rw_lba(hdr, lba, nr_blocks,
639 o->write_mode == FIO_SG_WRITE_SAME_NDOB);
641 } else if (io_u->ddir == DDIR_TRIM) {
642 struct sgio_trim *st;
644 if (sd->current_queue == -1) {
645 sgio_hdr_init(sd, hdr, io_u, 0);
648 hdr->dxfer_direction = SG_DXFER_TO_DEV;
649 hdr->cmdp[0] = 0x42; // unmap
650 sd->current_queue = io_u->index;
651 st = sd->trim_queues[sd->current_queue];
652 hdr->dxferp = st->unmap_param;
653 #ifdef FIO_SGIO_DEBUG
654 assert(sd->trim_queues[io_u->index]->unmap_range_count == 0);
655 dprint(FD_IO, "sg: creating new queue based on io_u %d\n", io_u->index);
659 st = sd->trim_queues[sd->current_queue];
661 dprint(FD_IO, "sg: adding io_u %d to trim queue %d\n", io_u->index, sd->current_queue);
662 st->trim_io_us[st->unmap_range_count] = io_u;
663 #ifdef FIO_SGIO_DEBUG
664 sd->trim_queue_map[io_u->index] = sd->current_queue;
667 offset = 8 + 16 * st->unmap_range_count;
668 sgio_set_be64(lba, &st->unmap_param[offset]);
669 sgio_set_be32((uint32_t) nr_blocks, &st->unmap_param[offset + 8]);
671 st->unmap_range_count++;
673 } else if (ddir_sync(io_u->ddir)) {
674 sgio_hdr_init(sd, hdr, io_u, 0);
675 hdr->dxfer_direction = SG_DXFER_NONE;
676 if (lba < MAX_10B_LBA)
677 hdr->cmdp[0] = 0x35; // synccache(10)
679 hdr->cmdp[0] = 0x91; // synccache(16)
686 static void fio_sgio_unmap_setup(struct sg_io_hdr *hdr, struct sgio_trim *st)
688 uint16_t cnt = st->unmap_range_count * 16;
690 hdr->dxfer_len = cnt + 8;
691 sgio_set_be16(cnt + 8, &hdr->cmdp[7]);
692 sgio_set_be16(cnt + 6, st->unmap_param);
693 sgio_set_be16(cnt, &st->unmap_param[2]);
698 static enum fio_q_status fio_sgio_queue(struct thread_data *td,
701 struct sg_io_hdr *hdr = &io_u->hdr;
702 struct sgio_data *sd = td->io_ops_data;
703 int ret, do_sync = 0;
705 fio_ro_check(td, io_u);
707 if (sgio_unbuffered(td) || ddir_sync(io_u->ddir))
710 if (io_u->ddir == DDIR_TRIM) {
711 if (do_sync || io_u->file->filetype == FIO_TYPE_BLOCK) {
712 struct sgio_trim *st = sd->trim_queues[sd->current_queue];
714 /* finish cdb setup for unmap because we are
715 ** doing unmap commands synchronously */
716 #ifdef FIO_SGIO_DEBUG
717 assert(st->unmap_range_count == 1);
718 assert(io_u == st->trim_io_us[0]);
722 fio_sgio_unmap_setup(hdr, st);
724 st->unmap_range_count = 0;
725 sd->current_queue = -1;
727 /* queue up trim ranges and submit in commit() */
731 ret = fio_sgio_doio(td, io_u, do_sync);
735 else if (hdr->status) {
736 io_u->resid = hdr->resid;
738 } else if (td->io_ops->commit != NULL) {
739 if (do_sync && !ddir_sync(io_u->ddir)) {
740 io_u_mark_submit(td, 1);
741 io_u_mark_complete(td, 1);
742 } else if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
743 io_u_mark_submit(td, 1);
744 io_u_queued(td, io_u);
749 td_verror(td, io_u->error, "xfer");
750 return FIO_Q_COMPLETED;
756 static int fio_sgio_commit(struct thread_data *td)
758 struct sgio_data *sd = td->io_ops_data;
759 struct sgio_trim *st;
761 struct sg_io_hdr *hdr;
766 if (sd->current_queue == -1)
769 st = sd->trim_queues[sd->current_queue];
770 io_u = st->trim_io_us[0];
773 fio_sgio_unmap_setup(hdr, st);
775 sd->current_queue = -1;
777 ret = fio_sgio_rw_doio(td, io_u->file, io_u, 0);
779 if (ret < 0 || hdr->status) {
789 for (i = 0; i < st->unmap_range_count; i++) {
790 st->trim_io_us[i]->error = error;
791 clear_io_u(td, st->trim_io_us[i]);
793 st->trim_io_us[i]->resid = hdr->resid;
796 td_verror(td, error, "xfer");
800 if (fio_fill_issue_time(td)) {
801 fio_gettime(&now, NULL);
802 for (i = 0; i < st->unmap_range_count; i++) {
803 memcpy(&st->trim_io_us[i]->issue_time, &now, sizeof(now));
804 io_u_queued(td, io_u);
807 io_u_mark_submit(td, st->unmap_range_count);
812 static struct io_u *fio_sgio_event(struct thread_data *td, int event)
814 struct sgio_data *sd = td->io_ops_data;
816 return sd->events[event];
819 static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
820 unsigned long long *max_lba)
823 * need to do read capacity operation w/o benefit of sd or
824 * io_u structures, which are not initialized until later.
826 struct sg_io_hdr hdr;
827 unsigned long long hlba;
828 unsigned int blksz = 0;
829 unsigned char cmd[16];
830 unsigned char sb[64];
831 unsigned char buf[32]; // read capacity return
835 struct fio_file *f = td->files[0];
837 /* open file independent of rest of application */
838 fd = open(f->file_name, O_RDONLY);
842 memset(&hdr, 0, sizeof(hdr));
843 memset(cmd, 0, sizeof(cmd));
844 memset(sb, 0, sizeof(sb));
845 memset(buf, 0, sizeof(buf));
847 /* First let's try a 10 byte read capacity. */
848 hdr.interface_id = 'S';
852 hdr.mx_sb_len = sizeof(sb);
853 hdr.timeout = SCSI_TIMEOUT_MS;
854 hdr.cmdp[0] = 0x25; // Read Capacity(10)
855 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
857 hdr.dxfer_len = sizeof(buf);
859 ret = ioctl(fd, SG_IO, &hdr);
865 if (hdr.info & SG_INFO_CHECK) {
866 /* RCAP(10) might be unsupported by device. Force RCAP(16) */
869 blksz = sgio_get_be32(&buf[4]);
870 hlba = sgio_get_be32(buf);
874 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
875 * then need to retry with 16 byte Read Capacity command.
877 if (hlba == MAX_10B_LBA) {
879 hdr.cmdp[0] = 0x9e; // service action
880 hdr.cmdp[1] = 0x10; // Read Capacity(16)
881 sgio_set_be32(sizeof(buf), &hdr.cmdp[10]);
883 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
885 hdr.dxfer_len = sizeof(buf);
887 ret = ioctl(fd, SG_IO, &hdr);
893 /* record if an io error occurred */
894 if (hdr.info & SG_INFO_CHECK)
895 td_verror(td, EIO, "fio_sgio_read_capacity");
897 blksz = sgio_get_be32(&buf[8]);
898 hlba = sgio_get_be64(buf);
913 static void fio_sgio_cleanup(struct thread_data *td)
915 struct sgio_data *sd = td->io_ops_data;
924 #ifdef FIO_SGIO_DEBUG
925 free(sd->trim_queue_map);
928 for (i = 0; i < td->o.iodepth; i++) {
929 free(sd->trim_queues[i]->unmap_param);
930 free(sd->trim_queues[i]->trim_io_us);
931 free(sd->trim_queues[i]);
934 free(sd->trim_queues);
939 static int fio_sgio_init(struct thread_data *td)
941 struct sgio_data *sd;
942 struct sgio_trim *st;
943 struct sg_io_hdr *h3p;
946 sd = calloc(1, sizeof(*sd));
947 sd->cmds = calloc(td->o.iodepth, sizeof(struct sgio_cmd));
948 sd->sgbuf = calloc(td->o.iodepth, sizeof(struct sg_io_hdr));
949 sd->events = calloc(td->o.iodepth, sizeof(struct io_u *));
950 sd->pfds = calloc(td->o.nr_files, sizeof(struct pollfd));
951 sd->fd_flags = calloc(td->o.nr_files, sizeof(int));
952 sd->type_checked = 0;
954 sd->trim_queues = calloc(td->o.iodepth, sizeof(struct sgio_trim *));
955 sd->current_queue = -1;
956 #ifdef FIO_SGIO_DEBUG
957 sd->trim_queue_map = calloc(td->o.iodepth, sizeof(int));
959 for (i = 0, h3p = sd->sgbuf; i < td->o.iodepth; i++, ++h3p) {
960 sd->trim_queues[i] = calloc(1, sizeof(struct sgio_trim));
961 st = sd->trim_queues[i];
962 st->unmap_param = calloc(td->o.iodepth + 1, sizeof(char[16]));
963 st->unmap_range_count = 0;
964 st->trim_io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
965 h3p->interface_id = 'S';
968 td->io_ops_data = sd;
971 * we want to do it, regardless of whether odirect is set or not
973 td->o.override_sync = 1;
977 static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
979 struct sgio_data *sd = td->io_ops_data;
981 unsigned long long max_lba = 0;
983 if (f->filetype == FIO_TYPE_BLOCK) {
984 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
985 td_verror(td, errno, "ioctl");
988 } else if (f->filetype == FIO_TYPE_CHAR) {
991 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
992 td_verror(td, errno, "ioctl");
996 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
998 td_verror(td, td->error, "fio_sgio_read_capacity");
999 log_err("ioengine sg unable to read capacity successfully\n");
1003 td_verror(td, EINVAL, "wrong file type");
1004 log_err("ioengine sg only works on block or character devices\n");
1009 // Determine size of commands needed based on max_lba
1010 if (max_lba >= MAX_10B_LBA) {
1011 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
1012 "commands for lba above 0x%016llx/0x%016llx\n",
1013 MAX_10B_LBA, max_lba);
1016 if (f->filetype == FIO_TYPE_BLOCK) {
1017 td->io_ops->getevents = NULL;
1018 td->io_ops->event = NULL;
1019 td->io_ops->commit = NULL;
1021 ** Setting these functions to null may cause problems
1022 ** with filename=/dev/sda:/dev/sg0 since we are only
1023 ** considering a single file
1026 sd->type_checked = 1;
1031 static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
1033 struct sgio_data *sd = td->io_ops_data;
1036 ret = generic_open_file(td, f);
1040 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
1041 ret = generic_close_file(td, f);
1049 * Build an error string with details about the driver, host or scsi
1050 * error contained in the sg header Caller will use as necessary.
1052 static char *fio_sgio_errdetails(struct io_u *io_u)
1054 struct sg_io_hdr *hdr = &io_u->hdr;
1055 #define MAXERRDETAIL 1024
1056 #define MAXMSGCHUNK 128
1057 char *msg, msgchunk[MAXMSGCHUNK];
1060 msg = calloc(1, MAXERRDETAIL);
1064 * can't seem to find sg_err.h, so I'll just echo the define values
1065 * so others can search on internet to find clearer clues of meaning.
1067 if (hdr->info & SG_INFO_CHECK) {
1068 if (hdr->host_status) {
1069 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
1070 strlcat(msg, msgchunk, MAXERRDETAIL);
1071 switch (hdr->host_status) {
1073 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
1076 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
1079 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
1082 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
1085 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
1088 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
1091 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
1094 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
1097 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
1100 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
1103 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
1106 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
1109 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
1112 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
1115 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
1118 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
1121 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
1124 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
1127 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
1130 strlcat(msg, "Unknown", MAXERRDETAIL);
1133 strlcat(msg, ". ", MAXERRDETAIL);
1135 if (hdr->driver_status) {
1136 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
1137 strlcat(msg, msgchunk, MAXERRDETAIL);
1138 switch (hdr->driver_status & 0x0F) {
1140 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
1143 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
1146 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
1149 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
1152 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
1155 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
1158 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
1161 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
1164 strlcat(msg, "Unknown", MAXERRDETAIL);
1167 strlcat(msg, "; ", MAXERRDETAIL);
1168 switch (hdr->driver_status & 0xF0) {
1170 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
1173 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
1176 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
1179 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
1182 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
1185 strlcat(msg, ". ", MAXERRDETAIL);
1188 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
1189 strlcat(msg, msgchunk, MAXERRDETAIL);
1190 // SCSI 3 status codes
1191 switch (hdr->status) {
1193 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
1196 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
1199 strlcat(msg, "BUSY", MAXERRDETAIL);
1202 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
1205 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
1208 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
1211 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
1214 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
1217 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
1220 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
1223 strlcat(msg, "Unknown", MAXERRDETAIL);
1226 strlcat(msg, ". ", MAXERRDETAIL);
1228 if (hdr->sb_len_wr) {
1229 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
1230 strlcat(msg, msgchunk, MAXERRDETAIL);
1231 for (i = 0; i < hdr->sb_len_wr; i++) {
1232 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
1233 strlcat(msg, msgchunk, MAXERRDETAIL);
1235 strlcat(msg, ". ", MAXERRDETAIL);
1237 if (hdr->resid != 0) {
1238 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
1239 strlcat(msg, msgchunk, MAXERRDETAIL);
1242 strlcat(msg, "cdb:", MAXERRDETAIL);
1243 for (i = 0; i < hdr->cmd_len; i++) {
1244 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->cmdp[i]);
1245 strlcat(msg, msgchunk, MAXERRDETAIL);
1247 strlcat(msg, ". ", MAXERRDETAIL);
1248 if (io_u->ddir == DDIR_TRIM) {
1249 unsigned char *param_list = hdr->dxferp;
1250 strlcat(msg, "dxferp:", MAXERRDETAIL);
1251 for (i = 0; i < hdr->dxfer_len; i++) {
1252 snprintf(msgchunk, MAXMSGCHUNK, " %02x", param_list[i]);
1253 strlcat(msg, msgchunk, MAXERRDETAIL);
1255 strlcat(msg, ". ", MAXERRDETAIL);
1260 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
1261 snprintf(msg, MAXERRDETAIL, "%s",
1262 "SG Driver did not report a Host, Driver or Device check");
1268 * get max file size from read capacity.
1270 static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
1273 * get_file_size is being called even before sgio_init is
1274 * called, so none of the sg_io structures are
1275 * initialized in the thread_data yet. So we need to do the
1276 * ReadCapacity without any of those helpers. One of the effects
1277 * is that ReadCapacity may get called 4 times on each open:
1278 * readcap(10) followed by readcap(16) if needed - just to get
1279 * the file size after the init occurs - it will be called
1280 * again when "type_check" is called during structure
1281 * initialization I'm not sure how to prevent this little
1284 unsigned int bs = 0;
1285 unsigned long long max_lba = 0;
1288 if (fio_file_size_known(f))
1291 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
1292 td_verror(td, EINVAL, "wrong file type");
1293 log_err("ioengine sg only works on block or character devices\n");
1297 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
1299 td_verror(td, td->error, "fio_sgio_read_capacity");
1300 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
1304 f->real_file_size = (max_lba + 1) * bs;
1305 fio_file_set_size_known(f);
1310 static struct ioengine_ops ioengine = {
1312 .version = FIO_IOOPS_VERSION,
1313 .init = fio_sgio_init,
1314 .prep = fio_sgio_prep,
1315 .queue = fio_sgio_queue,
1316 .commit = fio_sgio_commit,
1317 .getevents = fio_sgio_getevents,
1318 .errdetails = fio_sgio_errdetails,
1319 .event = fio_sgio_event,
1320 .cleanup = fio_sgio_cleanup,
1321 .open_file = fio_sgio_open,
1322 .close_file = generic_close_file,
1323 .get_file_size = fio_sgio_get_file_size,
1324 .flags = FIO_SYNCIO | FIO_RAWIO,
1326 .option_struct_size = sizeof(struct sg_options)
1329 #else /* FIO_HAVE_SGIO */
1332 * When we have a proper configure system in place, we simply wont build
1333 * and install this io engine. For now install a crippled version that
1334 * just complains and fails to load.
1336 static int fio_sgio_init(struct thread_data fio_unused *td)
1338 log_err("fio: ioengine sg not available\n");
1342 static struct ioengine_ops ioengine = {
1344 .version = FIO_IOOPS_VERSION,
1345 .init = fio_sgio_init,
1350 static void fio_init fio_sgio_register(void)
1352 register_ioengine(&ioengine);
1355 static void fio_exit fio_sgio_unregister(void)
1357 unregister_ioengine(&ioengine);