engines/sg: add support for WRITE AND VERIFY, WRITE SAME
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
5 *
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <unistd.h>
10#include <errno.h>
11#include <poll.h>
12
13#include "../fio.h"
14#include "../optgroup.h"
15
16#ifdef FIO_HAVE_SGIO
17
18enum {
19 FIO_SG_WRITE = 1,
20 FIO_SG_WRITE_VERIFY = 2,
21 FIO_SG_WRITE_SAME = 3
22};
23
24struct sg_options {
25 void *pad;
26 unsigned int readfua;
27 unsigned int writefua;
28 unsigned int write_mode;
29};
30
31static struct fio_option options[] = {
32 {
33 .name = "readfua",
34 .lname = "sg engine read fua flag support",
35 .type = FIO_OPT_BOOL,
36 .off1 = offsetof(struct sg_options, readfua),
37 .help = "Set FUA flag (force unit access) for all Read operations",
38 .def = "0",
39 .category = FIO_OPT_C_ENGINE,
40 .group = FIO_OPT_G_SG,
41 },
42 {
43 .name = "writefua",
44 .lname = "sg engine write fua flag support",
45 .type = FIO_OPT_BOOL,
46 .off1 = offsetof(struct sg_options, writefua),
47 .help = "Set FUA flag (force unit access) for all Write operations",
48 .def = "0",
49 .category = FIO_OPT_C_ENGINE,
50 .group = FIO_OPT_G_SG,
51 },
52 {
53 .name = "sg_write_mode",
54 .lname = "specify sg write mode",
55 .type = FIO_OPT_STR,
56 .off1 = offsetof(struct sg_options, write_mode),
57 .help = "Specify SCSI WRITE mode",
58 .def = "write",
59 .posval = {
60 { .ival = "write",
61 .oval = FIO_SG_WRITE,
62 .help = "Issue standard SCSI WRITE commands",
63 },
64 { .ival = "verify",
65 .oval = FIO_SG_WRITE_VERIFY,
66 .help = "Issue SCSI WRITE AND VERIFY commands",
67 },
68 { .ival = "same",
69 .oval = FIO_SG_WRITE_SAME,
70 .help = "Issue SCSI WRITE SAME commands",
71 },
72 },
73 .category = FIO_OPT_C_ENGINE,
74 .group = FIO_OPT_G_SG,
75 },
76 {
77 .name = NULL,
78 },
79};
80
81#define MAX_10B_LBA 0xFFFFFFFFULL
82#define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
83#define MAX_SB 64 // sense block maximum return size
84
85struct sgio_cmd {
86 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
87 unsigned char sb[MAX_SB]; // add sense block to commands
88 int nr;
89};
90
91struct sgio_data {
92 struct sgio_cmd *cmds;
93 struct io_u **events;
94 struct pollfd *pfds;
95 int *fd_flags;
96 void *sgbuf;
97 unsigned int bs;
98 int type_checked;
99};
100
101static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
102 struct io_u *io_u, int fs)
103{
104 struct sgio_cmd *sc = &sd->cmds[io_u->index];
105
106 memset(hdr, 0, sizeof(*hdr));
107 memset(sc->cdb, 0, sizeof(sc->cdb));
108
109 hdr->interface_id = 'S';
110 hdr->cmdp = sc->cdb;
111 hdr->cmd_len = sizeof(sc->cdb);
112 hdr->sbp = sc->sb;
113 hdr->mx_sb_len = sizeof(sc->sb);
114 hdr->pack_id = io_u->index;
115 hdr->usr_ptr = io_u;
116
117 if (fs) {
118 hdr->dxferp = io_u->xfer_buf;
119 hdr->dxfer_len = io_u->xfer_buflen;
120 }
121}
122
123static int pollin_events(struct pollfd *pfds, int fds)
124{
125 int i;
126
127 for (i = 0; i < fds; i++)
128 if (pfds[i].revents & POLLIN)
129 return 1;
130
131 return 0;
132}
133
134static int sg_fd_read(int fd, void *data, size_t size)
135{
136 int err = 0;
137
138 while (size) {
139 ssize_t ret;
140
141 ret = read(fd, data, size);
142 if (ret < 0) {
143 if (errno == EAGAIN || errno == EINTR)
144 continue;
145 err = errno;
146 break;
147 } else if (!ret)
148 break;
149 else {
150 data += ret;
151 size -= ret;
152 }
153 }
154
155 if (err)
156 return err;
157 if (size)
158 return EAGAIN;
159
160 return 0;
161}
162
163static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
164 unsigned int max,
165 const struct timespec fio_unused *t)
166{
167 struct sgio_data *sd = td->io_ops_data;
168 int left = max, eventNum, ret, r = 0;
169 void *buf = sd->sgbuf;
170 unsigned int i, events;
171 struct fio_file *f;
172
173 /*
174 * Fill in the file descriptors
175 */
176 for_each_file(td, f, i) {
177 /*
178 * don't block for min events == 0
179 */
180 if (!min)
181 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
182 else
183 sd->fd_flags[i] = -1;
184
185 sd->pfds[i].fd = f->fd;
186 sd->pfds[i].events = POLLIN;
187 }
188
189 while (left) {
190 char *p;
191
192 dprint(FD_IO, "sgio_getevents: sd %p: left=%d\n", sd, left);
193
194 do {
195 if (!min)
196 break;
197
198 ret = poll(sd->pfds, td->o.nr_files, -1);
199 if (ret < 0) {
200 if (!r)
201 r = -errno;
202 td_verror(td, errno, "poll");
203 break;
204 } else if (!ret)
205 continue;
206
207 if (pollin_events(sd->pfds, td->o.nr_files))
208 break;
209 } while (1);
210
211 if (r < 0)
212 break;
213
214re_read:
215 p = buf;
216 events = 0;
217 for_each_file(td, f, i) {
218 for (eventNum = 0; eventNum < left; eventNum++) {
219 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
220 dprint(FD_IO, "sgio_getevents: ret: %d\n", ret);
221 if (ret) {
222 r = -ret;
223 td_verror(td, r, "sg_read");
224 break;
225 }
226 p += sizeof(struct sg_io_hdr);
227 events++;
228 dprint(FD_IO, "sgio_getevents: events: %d\n", events);
229 }
230 }
231
232 if (r < 0 && !events)
233 break;
234 if (!events) {
235 usleep(1000);
236 goto re_read;
237 }
238
239 left -= events;
240 r += events;
241
242 for (i = 0; i < events; i++) {
243 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
244 sd->events[i] = hdr->usr_ptr;
245
246 /* record if an io error occurred, ignore resid */
247 if (hdr->info & SG_INFO_CHECK) {
248 struct io_u *io_u;
249 io_u = (struct io_u *)(hdr->usr_ptr);
250 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
251 sd->events[i]->error = EIO;
252 }
253 }
254 }
255
256 if (!min) {
257 for_each_file(td, f, i) {
258 if (sd->fd_flags[i] == -1)
259 continue;
260
261 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
262 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
263 }
264 }
265
266 return r;
267}
268
269static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
270 struct fio_file *f,
271 struct io_u *io_u)
272{
273 struct sgio_data *sd = td->io_ops_data;
274 struct sg_io_hdr *hdr = &io_u->hdr;
275 int ret;
276
277 sd->events[0] = io_u;
278
279 ret = ioctl(f->fd, SG_IO, hdr);
280 if (ret < 0)
281 return ret;
282
283 /* record if an io error occurred */
284 if (hdr->info & SG_INFO_CHECK)
285 io_u->error = EIO;
286
287 return FIO_Q_COMPLETED;
288}
289
290static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int do_sync)
291{
292 struct sg_io_hdr *hdr = &io_u->hdr;
293 int ret;
294
295 ret = write(f->fd, hdr, sizeof(*hdr));
296 if (ret < 0)
297 return ret;
298
299 if (do_sync) {
300 ret = read(f->fd, hdr, sizeof(*hdr));
301 if (ret < 0)
302 return ret;
303
304 /* record if an io error occurred */
305 if (hdr->info & SG_INFO_CHECK)
306 io_u->error = EIO;
307
308 return FIO_Q_COMPLETED;
309 }
310
311 return FIO_Q_QUEUED;
312}
313
314static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int do_sync)
315{
316 struct fio_file *f = io_u->file;
317 int ret;
318
319 if (f->filetype == FIO_TYPE_BLOCK) {
320 ret = fio_sgio_ioctl_doio(td, f, io_u);
321 td_verror(td, io_u->error, __func__);
322 } else {
323 ret = fio_sgio_rw_doio(f, io_u, do_sync);
324 if (do_sync)
325 td_verror(td, io_u->error, __func__);
326 }
327
328 return ret;
329}
330
331static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
332{
333 struct sg_io_hdr *hdr = &io_u->hdr;
334 struct sg_options *o = td->eo;
335 struct sgio_data *sd = td->io_ops_data;
336 long long nr_blocks, lba;
337
338 if (io_u->xfer_buflen & (sd->bs - 1)) {
339 log_err("read/write not sector aligned\n");
340 return EINVAL;
341 }
342
343 nr_blocks = io_u->xfer_buflen / sd->bs;
344 lba = io_u->offset / sd->bs;
345
346 if (io_u->ddir == DDIR_READ) {
347 sgio_hdr_init(sd, hdr, io_u, 1);
348
349 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
350 if (lba < MAX_10B_LBA)
351 hdr->cmdp[0] = 0x28; // read(10)
352 else
353 hdr->cmdp[0] = 0x88; // read(16)
354
355 if (o->readfua)
356 hdr->cmdp[1] |= 0x08;
357
358 } else if (io_u->ddir == DDIR_WRITE) {
359 sgio_hdr_init(sd, hdr, io_u, 1);
360
361 hdr->dxfer_direction = SG_DXFER_TO_DEV;
362 switch(o->write_mode) {
363 case FIO_SG_WRITE:
364 if (lba < MAX_10B_LBA)
365 hdr->cmdp[0] = 0x2a; // write(10)
366 else
367 hdr->cmdp[0] = 0x8a; // write(16)
368 if (o->writefua)
369 hdr->cmdp[1] |= 0x08;
370 break;
371 case FIO_SG_WRITE_VERIFY:
372 if (lba < MAX_10B_LBA)
373 hdr->cmdp[0] = 0x2e; // write and verify(10)
374 else
375 hdr->cmdp[0] = 0x8e; // write and verify(16)
376 break;
377 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
378 case FIO_SG_WRITE_SAME:
379 hdr->dxfer_len = sd->bs;
380 if (lba < MAX_10B_LBA)
381 hdr->cmdp[0] = 0x41; // write same(10)
382 else
383 hdr->cmdp[0] = 0x93; // write same(16)
384 break;
385 };
386 } else {
387 sgio_hdr_init(sd, hdr, io_u, 0);
388 hdr->dxfer_direction = SG_DXFER_NONE;
389 if (lba < MAX_10B_LBA)
390 hdr->cmdp[0] = 0x35; // synccache(10)
391 else
392 hdr->cmdp[0] = 0x91; // synccache(16)
393 }
394
395 /*
396 * for synccache, we leave lba and length to 0 to sync all
397 * blocks on medium.
398 */
399 if (hdr->dxfer_direction != SG_DXFER_NONE) {
400 if (lba < MAX_10B_LBA) {
401 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
402 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
403 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
404 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
405 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
406 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
407 } else {
408 hdr->cmdp[2] = (unsigned char) ((lba >> 56) & 0xff);
409 hdr->cmdp[3] = (unsigned char) ((lba >> 48) & 0xff);
410 hdr->cmdp[4] = (unsigned char) ((lba >> 40) & 0xff);
411 hdr->cmdp[5] = (unsigned char) ((lba >> 32) & 0xff);
412 hdr->cmdp[6] = (unsigned char) ((lba >> 24) & 0xff);
413 hdr->cmdp[7] = (unsigned char) ((lba >> 16) & 0xff);
414 hdr->cmdp[8] = (unsigned char) ((lba >> 8) & 0xff);
415 hdr->cmdp[9] = (unsigned char) (lba & 0xff);
416 hdr->cmdp[10] = (unsigned char) ((nr_blocks >> 32) & 0xff);
417 hdr->cmdp[11] = (unsigned char) ((nr_blocks >> 16) & 0xff);
418 hdr->cmdp[12] = (unsigned char) ((nr_blocks >> 8) & 0xff);
419 hdr->cmdp[13] = (unsigned char) (nr_blocks & 0xff);
420 }
421 }
422
423 hdr->timeout = SCSI_TIMEOUT_MS;
424 return 0;
425}
426
427static enum fio_q_status fio_sgio_queue(struct thread_data *td,
428 struct io_u *io_u)
429{
430 struct sg_io_hdr *hdr = &io_u->hdr;
431 int ret, do_sync = 0;
432
433 fio_ro_check(td, io_u);
434
435 if (td->o.sync_io || td->o.odirect || ddir_sync(io_u->ddir))
436 do_sync = 1;
437
438 ret = fio_sgio_doio(td, io_u, do_sync);
439
440 if (ret < 0)
441 io_u->error = errno;
442 else if (hdr->status) {
443 io_u->resid = hdr->resid;
444 io_u->error = EIO;
445 }
446
447 if (io_u->error) {
448 td_verror(td, io_u->error, "xfer");
449 return FIO_Q_COMPLETED;
450 }
451
452 return ret;
453}
454
455static struct io_u *fio_sgio_event(struct thread_data *td, int event)
456{
457 struct sgio_data *sd = td->io_ops_data;
458
459 return sd->events[event];
460}
461
462static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
463 unsigned long long *max_lba)
464{
465 /*
466 * need to do read capacity operation w/o benefit of sd or
467 * io_u structures, which are not initialized until later.
468 */
469 struct sg_io_hdr hdr;
470 unsigned char cmd[16];
471 unsigned char sb[64];
472 unsigned char buf[32]; // read capacity return
473 int ret;
474 int fd = -1;
475
476 struct fio_file *f = td->files[0];
477
478 /* open file independent of rest of application */
479 fd = open(f->file_name, O_RDONLY);
480 if (fd < 0)
481 return -errno;
482
483 memset(&hdr, 0, sizeof(hdr));
484 memset(cmd, 0, sizeof(cmd));
485 memset(sb, 0, sizeof(sb));
486 memset(buf, 0, sizeof(buf));
487
488 /* First let's try a 10 byte read capacity. */
489 hdr.interface_id = 'S';
490 hdr.cmdp = cmd;
491 hdr.cmd_len = 10;
492 hdr.sbp = sb;
493 hdr.mx_sb_len = sizeof(sb);
494 hdr.timeout = SCSI_TIMEOUT_MS;
495 hdr.cmdp[0] = 0x25; // Read Capacity(10)
496 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
497 hdr.dxferp = buf;
498 hdr.dxfer_len = sizeof(buf);
499
500 ret = ioctl(fd, SG_IO, &hdr);
501 if (ret < 0) {
502 close(fd);
503 return ret;
504 }
505
506 *bs = ((unsigned long) buf[4] << 24) | ((unsigned long) buf[5] << 16) |
507 ((unsigned long) buf[6] << 8) | (unsigned long) buf[7];
508 *max_lba = ((unsigned long) buf[0] << 24) | ((unsigned long) buf[1] << 16) |
509 ((unsigned long) buf[2] << 8) | (unsigned long) buf[3];
510
511 /*
512 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
513 * then need to retry with 16 byte Read Capacity command.
514 */
515 if (*max_lba == MAX_10B_LBA) {
516 hdr.cmd_len = 16;
517 hdr.cmdp[0] = 0x9e; // service action
518 hdr.cmdp[1] = 0x10; // Read Capacity(16)
519 hdr.cmdp[10] = (unsigned char) ((sizeof(buf) >> 24) & 0xff);
520 hdr.cmdp[11] = (unsigned char) ((sizeof(buf) >> 16) & 0xff);
521 hdr.cmdp[12] = (unsigned char) ((sizeof(buf) >> 8) & 0xff);
522 hdr.cmdp[13] = (unsigned char) (sizeof(buf) & 0xff);
523
524 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
525 hdr.dxferp = buf;
526 hdr.dxfer_len = sizeof(buf);
527
528 ret = ioctl(fd, SG_IO, &hdr);
529 if (ret < 0) {
530 close(fd);
531 return ret;
532 }
533
534 /* record if an io error occurred */
535 if (hdr.info & SG_INFO_CHECK)
536 td_verror(td, EIO, "fio_sgio_read_capacity");
537
538 *bs = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11];
539 *max_lba = ((unsigned long long)buf[0] << 56) |
540 ((unsigned long long)buf[1] << 48) |
541 ((unsigned long long)buf[2] << 40) |
542 ((unsigned long long)buf[3] << 32) |
543 ((unsigned long long)buf[4] << 24) |
544 ((unsigned long long)buf[5] << 16) |
545 ((unsigned long long)buf[6] << 8) |
546 (unsigned long long)buf[7];
547 }
548
549 close(fd);
550 return 0;
551}
552
553static void fio_sgio_cleanup(struct thread_data *td)
554{
555 struct sgio_data *sd = td->io_ops_data;
556
557 if (sd) {
558 free(sd->events);
559 free(sd->cmds);
560 free(sd->fd_flags);
561 free(sd->pfds);
562 free(sd->sgbuf);
563 free(sd);
564 }
565}
566
567static int fio_sgio_init(struct thread_data *td)
568{
569 struct sgio_data *sd;
570
571 sd = malloc(sizeof(*sd));
572 memset(sd, 0, sizeof(*sd));
573 sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
574 memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
575 sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
576 memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
577 sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
578 memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
579 sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
580 memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
581 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
582 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
583 sd->type_checked = 0;
584 td->io_ops_data = sd;
585
586 /*
587 * we want to do it, regardless of whether odirect is set or not
588 */
589 td->o.override_sync = 1;
590 return 0;
591}
592
593static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
594{
595 struct sgio_data *sd = td->io_ops_data;
596 unsigned int bs = 0;
597 unsigned long long max_lba = 0;
598
599 if (f->filetype == FIO_TYPE_BLOCK) {
600 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
601 td_verror(td, errno, "ioctl");
602 return 1;
603 }
604 } else if (f->filetype == FIO_TYPE_CHAR) {
605 int version, ret;
606
607 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
608 td_verror(td, errno, "ioctl");
609 return 1;
610 }
611
612 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
613 if (ret) {
614 td_verror(td, td->error, "fio_sgio_read_capacity");
615 log_err("ioengine sg unable to read capacity successfully\n");
616 return 1;
617 }
618 } else {
619 td_verror(td, EINVAL, "wrong file type");
620 log_err("ioengine sg only works on block or character devices\n");
621 return 1;
622 }
623
624 sd->bs = bs;
625 // Determine size of commands needed based on max_lba
626 if (max_lba >= MAX_10B_LBA) {
627 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
628 "commands for lba above 0x%016llx/0x%016llx\n",
629 MAX_10B_LBA, max_lba);
630 }
631
632 if (f->filetype == FIO_TYPE_BLOCK) {
633 td->io_ops->getevents = NULL;
634 td->io_ops->event = NULL;
635 }
636 sd->type_checked = 1;
637
638 return 0;
639}
640
641static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
642{
643 struct sgio_data *sd = td->io_ops_data;
644 int ret;
645
646 ret = generic_open_file(td, f);
647 if (ret)
648 return ret;
649
650 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
651 ret = generic_close_file(td, f);
652 return 1;
653 }
654
655 return 0;
656}
657
658/*
659 * Build an error string with details about the driver, host or scsi
660 * error contained in the sg header Caller will use as necessary.
661 */
662static char *fio_sgio_errdetails(struct io_u *io_u)
663{
664 struct sg_io_hdr *hdr = &io_u->hdr;
665#define MAXERRDETAIL 1024
666#define MAXMSGCHUNK 128
667 char *msg, msgchunk[MAXMSGCHUNK];
668 int i;
669
670 msg = calloc(1, MAXERRDETAIL);
671 strcpy(msg, "");
672
673 /*
674 * can't seem to find sg_err.h, so I'll just echo the define values
675 * so others can search on internet to find clearer clues of meaning.
676 */
677 if (hdr->info & SG_INFO_CHECK) {
678 if (hdr->host_status) {
679 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
680 strlcat(msg, msgchunk, MAXERRDETAIL);
681 switch (hdr->host_status) {
682 case 0x01:
683 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
684 break;
685 case 0x02:
686 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
687 break;
688 case 0x03:
689 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
690 break;
691 case 0x04:
692 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
693 break;
694 case 0x05:
695 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
696 break;
697 case 0x06:
698 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
699 break;
700 case 0x07:
701 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
702 break;
703 case 0x08:
704 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
705 break;
706 case 0x09:
707 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
708 break;
709 case 0x0a:
710 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
711 break;
712 case 0x0b:
713 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
714 break;
715 case 0x0c:
716 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
717 break;
718 case 0x0d:
719 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
720 break;
721 case 0x0e:
722 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
723 break;
724 case 0x0f:
725 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
726 break;
727 case 0x10:
728 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
729 break;
730 case 0x11:
731 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
732 break;
733 case 0x12:
734 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
735 break;
736 case 0x13:
737 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
738 break;
739 default:
740 strlcat(msg, "Unknown", MAXERRDETAIL);
741 break;
742 }
743 strlcat(msg, ". ", MAXERRDETAIL);
744 }
745 if (hdr->driver_status) {
746 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
747 strlcat(msg, msgchunk, MAXERRDETAIL);
748 switch (hdr->driver_status & 0x0F) {
749 case 0x01:
750 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
751 break;
752 case 0x02:
753 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
754 break;
755 case 0x03:
756 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
757 break;
758 case 0x04:
759 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
760 break;
761 case 0x05:
762 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
763 break;
764 case 0x06:
765 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
766 break;
767 case 0x07:
768 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
769 break;
770 case 0x08:
771 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
772 break;
773 default:
774 strlcat(msg, "Unknown", MAXERRDETAIL);
775 break;
776 }
777 strlcat(msg, "; ", MAXERRDETAIL);
778 switch (hdr->driver_status & 0xF0) {
779 case 0x10:
780 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
781 break;
782 case 0x20:
783 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
784 break;
785 case 0x30:
786 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
787 break;
788 case 0x40:
789 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
790 break;
791 case 0x80:
792 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
793 break;
794 }
795 strlcat(msg, ". ", MAXERRDETAIL);
796 }
797 if (hdr->status) {
798 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
799 strlcat(msg, msgchunk, MAXERRDETAIL);
800 // SCSI 3 status codes
801 switch (hdr->status) {
802 case 0x02:
803 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
804 break;
805 case 0x04:
806 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
807 break;
808 case 0x08:
809 strlcat(msg, "BUSY", MAXERRDETAIL);
810 break;
811 case 0x10:
812 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
813 break;
814 case 0x14:
815 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
816 break;
817 case 0x18:
818 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
819 break;
820 case 0x22:
821 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
822 break;
823 case 0x28:
824 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
825 break;
826 case 0x30:
827 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
828 break;
829 case 0x40:
830 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
831 break;
832 default:
833 strlcat(msg, "Unknown", MAXERRDETAIL);
834 break;
835 }
836 strlcat(msg, ". ", MAXERRDETAIL);
837 }
838 if (hdr->sb_len_wr) {
839 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
840 strlcat(msg, msgchunk, MAXERRDETAIL);
841 for (i = 0; i < hdr->sb_len_wr; i++) {
842 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
843 strlcat(msg, msgchunk, MAXERRDETAIL);
844 }
845 strlcat(msg, ". ", MAXERRDETAIL);
846 }
847 if (hdr->resid != 0) {
848 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
849 strlcat(msg, msgchunk, MAXERRDETAIL);
850 }
851 }
852
853 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
854 strncpy(msg, "SG Driver did not report a Host, Driver or Device check",
855 MAXERRDETAIL - 1);
856
857 return msg;
858}
859
860/*
861 * get max file size from read capacity.
862 */
863static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
864{
865 /*
866 * get_file_size is being called even before sgio_init is
867 * called, so none of the sg_io structures are
868 * initialized in the thread_data yet. So we need to do the
869 * ReadCapacity without any of those helpers. One of the effects
870 * is that ReadCapacity may get called 4 times on each open:
871 * readcap(10) followed by readcap(16) if needed - just to get
872 * the file size after the init occurs - it will be called
873 * again when "type_check" is called during structure
874 * initialization I'm not sure how to prevent this little
875 * inefficiency.
876 */
877 unsigned int bs = 0;
878 unsigned long long max_lba = 0;
879 int ret;
880
881 if (fio_file_size_known(f))
882 return 0;
883
884 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
885 td_verror(td, EINVAL, "wrong file type");
886 log_err("ioengine sg only works on block or character devices\n");
887 return 1;
888 }
889
890 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
891 if (ret ) {
892 td_verror(td, td->error, "fio_sgio_read_capacity");
893 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
894 return 1;
895 }
896
897 f->real_file_size = (max_lba + 1) * bs;
898 fio_file_set_size_known(f);
899 return 0;
900}
901
902
903static struct ioengine_ops ioengine = {
904 .name = "sg",
905 .version = FIO_IOOPS_VERSION,
906 .init = fio_sgio_init,
907 .prep = fio_sgio_prep,
908 .queue = fio_sgio_queue,
909 .getevents = fio_sgio_getevents,
910 .errdetails = fio_sgio_errdetails,
911 .event = fio_sgio_event,
912 .cleanup = fio_sgio_cleanup,
913 .open_file = fio_sgio_open,
914 .close_file = generic_close_file,
915 .get_file_size = fio_sgio_get_file_size,
916 .flags = FIO_SYNCIO | FIO_RAWIO,
917 .options = options,
918 .option_struct_size = sizeof(struct sg_options)
919};
920
921#else /* FIO_HAVE_SGIO */
922
923/*
924 * When we have a proper configure system in place, we simply wont build
925 * and install this io engine. For now install a crippled version that
926 * just complains and fails to load.
927 */
928static int fio_sgio_init(struct thread_data fio_unused *td)
929{
930 log_err("fio: ioengine sg not available\n");
931 return 1;
932}
933
934static struct ioengine_ops ioengine = {
935 .name = "sg",
936 .version = FIO_IOOPS_VERSION,
937 .init = fio_sgio_init,
938};
939
940#endif
941
942static void fio_init fio_sgio_register(void)
943{
944 register_ioengine(&ioengine);
945}
946
947static void fio_exit fio_sgio_unregister(void)
948{
949 unregister_ioengine(&ioengine);
950}