rbd: fix crash with zero sized image
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
5 *
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <unistd.h>
10#include <errno.h>
11#include <assert.h>
12#include <sys/poll.h>
13
14#include "../fio.h"
15
16#ifdef FIO_HAVE_SGIO
17
18#define MAX_10B_LBA 0xFFFFFFFFULL
19#define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
20#define MAX_SB 64 // sense block maximum return size
21
22struct sgio_cmd {
23 unsigned char cdb[16]; // increase to support 16 byte commands
24 unsigned char sb[MAX_SB]; // add sense block to commands
25 int nr;
26};
27
28struct sgio_data {
29 struct sgio_cmd *cmds;
30 struct io_u **events;
31 struct pollfd *pfds;
32 int *fd_flags;
33 void *sgbuf;
34 unsigned int bs;
35 long long max_lba;
36 int type_checked;
37};
38
39static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
40 struct io_u *io_u, int fs)
41{
42 struct sgio_cmd *sc = &sd->cmds[io_u->index];
43
44 memset(hdr, 0, sizeof(*hdr));
45 memset(sc->cdb, 0, sizeof(sc->cdb));
46
47 hdr->interface_id = 'S';
48 hdr->cmdp = sc->cdb;
49 hdr->cmd_len = sizeof(sc->cdb);
50 hdr->sbp = sc->sb;
51 hdr->mx_sb_len = sizeof(sc->sb);
52 hdr->pack_id = io_u->index;
53 hdr->usr_ptr = io_u;
54
55 if (fs) {
56 hdr->dxferp = io_u->xfer_buf;
57 hdr->dxfer_len = io_u->xfer_buflen;
58 }
59}
60
61static int pollin_events(struct pollfd *pfds, int fds)
62{
63 int i;
64
65 for (i = 0; i < fds; i++)
66 if (pfds[i].revents & POLLIN)
67 return 1;
68
69 return 0;
70}
71
72static int sg_fd_read(int fd, void *data, size_t size)
73{
74 int err = 0;
75
76 while (size) {
77 ssize_t ret;
78
79 ret = read(fd, data, size);
80 if (ret < 0) {
81 if (errno == EAGAIN || errno == EINTR)
82 continue;
83 err = errno;
84 break;
85 } else if (!ret)
86 break;
87 else {
88 data += ret;
89 size -= ret;
90 }
91 }
92
93 if (err)
94 return err;
95 if (size)
96 return EAGAIN;
97
98 return 0;
99}
100
101static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
102 unsigned int max,
103 const struct timespec fio_unused *t)
104{
105 struct sgio_data *sd = td->io_ops_data;
106 int left = max, eventNum, ret, r = 0;
107 void *buf = sd->sgbuf;
108 unsigned int i, events;
109 struct fio_file *f;
110
111 /*
112 * Fill in the file descriptors
113 */
114 for_each_file(td, f, i) {
115 /*
116 * don't block for min events == 0
117 */
118 if (!min)
119 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
120 else
121 sd->fd_flags[i] = -1;
122
123 sd->pfds[i].fd = f->fd;
124 sd->pfds[i].events = POLLIN;
125 }
126
127 while (left) {
128 void *p;
129
130 dprint(FD_IO, "sgio_getevents: sd %p: left=%d\n", sd, left);
131
132 do {
133 if (!min)
134 break;
135
136 ret = poll(sd->pfds, td->o.nr_files, -1);
137 if (ret < 0) {
138 if (!r)
139 r = -errno;
140 td_verror(td, errno, "poll");
141 break;
142 } else if (!ret)
143 continue;
144
145 if (pollin_events(sd->pfds, td->o.nr_files))
146 break;
147 } while (1);
148
149 if (r < 0)
150 break;
151
152re_read:
153 p = buf;
154 events = 0;
155 for_each_file(td, f, i) {
156 for (eventNum = 0; eventNum < left; eventNum++) {
157 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
158 dprint(FD_IO, "sgio_getevents: ret: %d\n", ret);
159 if (ret) {
160 r = -ret;
161 td_verror(td, r, "sg_read");
162 break;
163 }
164 p += sizeof(struct sg_io_hdr);
165 events++;
166 dprint(FD_IO, "sgio_getevents: events: %d\n", events);
167 }
168 }
169
170 if (r < 0 && !events)
171 break;
172 if (!events) {
173 usleep(1000);
174 goto re_read;
175 }
176
177 left -= events;
178 r += events;
179
180 for (i = 0; i < events; i++) {
181 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
182 sd->events[i] = hdr->usr_ptr;
183
184 /* record if an io error occurred, ignore resid */
185 if (hdr->info & SG_INFO_CHECK) {
186 struct io_u *io_u;
187 io_u = (struct io_u *)(hdr->usr_ptr);
188 memcpy((void*)&(io_u->hdr), (void*)hdr, sizeof(struct sg_io_hdr));
189 sd->events[i]->error = EIO;
190 }
191 }
192 }
193
194 if (!min) {
195 for_each_file(td, f, i) {
196 if (sd->fd_flags[i] == -1)
197 continue;
198
199 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
200 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
201 }
202 }
203
204 return r;
205}
206
207static int fio_sgio_ioctl_doio(struct thread_data *td,
208 struct fio_file *f, struct io_u *io_u)
209{
210 struct sgio_data *sd = td->io_ops_data;
211 struct sg_io_hdr *hdr = &io_u->hdr;
212 int ret;
213
214 sd->events[0] = io_u;
215
216 ret = ioctl(f->fd, SG_IO, hdr);
217 if (ret < 0)
218 return ret;
219
220 /* record if an io error occurred */
221 if (hdr->info & SG_INFO_CHECK)
222 io_u->error = EIO;
223
224 return FIO_Q_COMPLETED;
225}
226
227static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int do_sync)
228{
229 struct sg_io_hdr *hdr = &io_u->hdr;
230 int ret;
231
232 ret = write(f->fd, hdr, sizeof(*hdr));
233 if (ret < 0)
234 return ret;
235
236 if (do_sync) {
237 ret = read(f->fd, hdr, sizeof(*hdr));
238 if (ret < 0)
239 return ret;
240
241 /* record if an io error occurred */
242 if (hdr->info & SG_INFO_CHECK)
243 io_u->error = EIO;
244
245 return FIO_Q_COMPLETED;
246 }
247
248 return FIO_Q_QUEUED;
249}
250
251static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int do_sync)
252{
253 struct fio_file *f = io_u->file;
254 int ret;
255
256 if (f->filetype == FIO_TYPE_BD) {
257 ret = fio_sgio_ioctl_doio(td, f, io_u);
258 td->error = io_u->error;
259 } else {
260 ret = fio_sgio_rw_doio(f, io_u, do_sync);
261 if (do_sync)
262 td->error = io_u->error;
263 }
264
265 return ret;
266}
267
268static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
269{
270 struct sg_io_hdr *hdr = &io_u->hdr;
271 struct sgio_data *sd = td->io_ops_data;
272 long long nr_blocks, lba;
273
274 if (io_u->xfer_buflen & (sd->bs - 1)) {
275 log_err("read/write not sector aligned\n");
276 return EINVAL;
277 }
278
279 nr_blocks = io_u->xfer_buflen / sd->bs;
280 lba = io_u->offset / sd->bs;
281
282 if (io_u->ddir == DDIR_READ) {
283 sgio_hdr_init(sd, hdr, io_u, 1);
284
285 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
286 if (lba < MAX_10B_LBA)
287 hdr->cmdp[0] = 0x28; // read(10)
288 else
289 hdr->cmdp[0] = 0x88; // read(16)
290 } else if (io_u->ddir == DDIR_WRITE) {
291 sgio_hdr_init(sd, hdr, io_u, 1);
292
293 hdr->dxfer_direction = SG_DXFER_TO_DEV;
294 if (lba < MAX_10B_LBA)
295 hdr->cmdp[0] = 0x2a; // write(10)
296 else
297 hdr->cmdp[0] = 0x8a; // write(16)
298 } else {
299 sgio_hdr_init(sd, hdr, io_u, 0);
300 hdr->dxfer_direction = SG_DXFER_NONE;
301 if (lba < MAX_10B_LBA)
302 hdr->cmdp[0] = 0x35; // synccache(10)
303 else
304 hdr->cmdp[0] = 0x91; // synccache(16)
305 }
306
307 /*
308 * for synccache, we leave lba and length to 0 to sync all
309 * blocks on medium.
310 */
311 if (hdr->dxfer_direction != SG_DXFER_NONE) {
312
313 if (lba < MAX_10B_LBA) {
314 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
315 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
316 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
317 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
318 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
319 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
320 } else {
321 hdr->cmdp[2] = (unsigned char) ((lba >> 56) & 0xff);
322 hdr->cmdp[3] = (unsigned char) ((lba >> 48) & 0xff);
323 hdr->cmdp[4] = (unsigned char) ((lba >> 40) & 0xff);
324 hdr->cmdp[5] = (unsigned char) ((lba >> 32) & 0xff);
325 hdr->cmdp[6] = (unsigned char) ((lba >> 24) & 0xff);
326 hdr->cmdp[7] = (unsigned char) ((lba >> 16) & 0xff);
327 hdr->cmdp[8] = (unsigned char) ((lba >> 8) & 0xff);
328 hdr->cmdp[9] = (unsigned char) (lba & 0xff);
329 hdr->cmdp[10] = (unsigned char) ((nr_blocks >> 32) & 0xff);
330 hdr->cmdp[11] = (unsigned char) ((nr_blocks >> 16) & 0xff);
331 hdr->cmdp[12] = (unsigned char) ((nr_blocks >> 8) & 0xff);
332 hdr->cmdp[13] = (unsigned char) (nr_blocks & 0xff);
333 }
334 }
335
336 hdr->timeout = SCSI_TIMEOUT_MS;
337 return 0;
338}
339
340static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
341{
342 struct sg_io_hdr *hdr = &io_u->hdr;
343 int ret, do_sync = 0;
344
345 fio_ro_check(td, io_u);
346
347 if (td->o.sync_io || td->o.odirect || ddir_sync(io_u->ddir))
348 do_sync = 1;
349
350 ret = fio_sgio_doio(td, io_u, do_sync);
351
352 if (ret < 0)
353 io_u->error = errno;
354 else if (hdr->status) {
355 io_u->resid = hdr->resid;
356 io_u->error = EIO;
357 }
358
359 if (io_u->error) {
360 td_verror(td, io_u->error, "xfer");
361 return FIO_Q_COMPLETED;
362 }
363
364 return ret;
365}
366
367static struct io_u *fio_sgio_event(struct thread_data *td, int event)
368{
369 struct sgio_data *sd = td->io_ops_data;
370
371 return sd->events[event];
372}
373
374static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
375 unsigned long long *max_lba)
376{
377 /*
378 * need to do read capacity operation w/o benefit of sd or
379 * io_u structures, which are not initialized until later.
380 */
381 struct sg_io_hdr hdr;
382 unsigned char cmd[16];
383 unsigned char sb[64];
384 unsigned char buf[32]; // read capacity return
385 int ret;
386 int fd = -1;
387
388 struct fio_file *f = td->files[0];
389
390 /* open file independent of rest of application */
391 fd = open(f->file_name, O_RDONLY);
392 if (fd < 0)
393 return -errno;
394
395 memset(&hdr, 0, sizeof(hdr));
396 memset(cmd, 0, sizeof(cmd));
397 memset(sb, 0, sizeof(sb));
398 memset(buf, 0, sizeof(buf));
399
400 /* First let's try a 10 byte read capacity. */
401 hdr.interface_id = 'S';
402 hdr.cmdp = cmd;
403 hdr.cmd_len = 10;
404 hdr.sbp = sb;
405 hdr.mx_sb_len = sizeof(sb);
406 hdr.timeout = SCSI_TIMEOUT_MS;
407 hdr.cmdp[0] = 0x25; // Read Capacity(10)
408 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
409 hdr.dxferp = buf;
410 hdr.dxfer_len = sizeof(buf);
411
412 ret = ioctl(fd, SG_IO, &hdr);
413 if (ret < 0) {
414 close(fd);
415 return ret;
416 }
417
418 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
419 *max_lba = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) & 0x00000000FFFFFFFFULL; // for some reason max_lba is being sign extended even though unsigned.
420
421
422 /*
423 * If max lba is 0xFFFFFFFF, then need to retry with
424 * 16 byteread capacity
425 */
426 if (*max_lba == MAX_10B_LBA) {
427 hdr.cmd_len = 16;
428 hdr.cmdp[0] = 0x9e; // Read Capacity(16)
429 hdr.cmdp[1] = 0x10; // service action
430 hdr.cmdp[10] = (unsigned char) ((sizeof(buf) >> 24) & 0xff);
431 hdr.cmdp[11] = (unsigned char) ((sizeof(buf) >> 16) & 0xff);
432 hdr.cmdp[12] = (unsigned char) ((sizeof(buf) >> 8) & 0xff);
433 hdr.cmdp[13] = (unsigned char) (sizeof(buf) & 0xff);
434
435 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
436 hdr.dxferp = buf;
437 hdr.dxfer_len = sizeof(buf);
438
439 ret = ioctl(fd, SG_IO, &hdr);
440 if (ret < 0) {
441 close(fd);
442 return ret;
443 }
444
445 /* record if an io error occurred */
446 if (hdr.info & SG_INFO_CHECK)
447 td_verror(td, EIO, "fio_sgio_read_capacity");
448
449 *bs = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11];
450 *max_lba = ((unsigned long long)buf[0] << 56) |
451 ((unsigned long long)buf[1] << 48) |
452 ((unsigned long long)buf[2] << 40) |
453 ((unsigned long long)buf[3] << 32) |
454 ((unsigned long long)buf[4] << 24) |
455 ((unsigned long long)buf[5] << 16) |
456 ((unsigned long long)buf[6] << 8) |
457 (unsigned long long)buf[7];
458 }
459
460 close(fd);
461 return 0;
462}
463
464static void fio_sgio_cleanup(struct thread_data *td)
465{
466 struct sgio_data *sd = td->io_ops_data;
467
468 if (sd) {
469 free(sd->events);
470 free(sd->cmds);
471 free(sd->fd_flags);
472 free(sd->pfds);
473 free(sd->sgbuf);
474 free(sd);
475 }
476}
477
478static int fio_sgio_init(struct thread_data *td)
479{
480 struct sgio_data *sd;
481
482 sd = malloc(sizeof(*sd));
483 memset(sd, 0, sizeof(*sd));
484 sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
485 memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
486 sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
487 memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
488 sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
489 memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
490 sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
491 memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
492 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
493 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
494 sd->type_checked = 0;
495 td->io_ops_data = sd;
496
497 /*
498 * we want to do it, regardless of whether odirect is set or not
499 */
500 td->o.override_sync = 1;
501 return 0;
502}
503
504static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
505{
506 struct sgio_data *sd = td->io_ops_data;
507 unsigned int bs = 0;
508 unsigned long long max_lba = 0;
509
510
511 if (f->filetype == FIO_TYPE_BD) {
512 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
513 td_verror(td, errno, "ioctl");
514 return 1;
515 }
516 } else if (f->filetype == FIO_TYPE_CHAR) {
517 int version, ret;
518
519 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
520 td_verror(td, errno, "ioctl");
521 return 1;
522 }
523
524 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
525 if (ret) {
526 td_verror(td, td->error, "fio_sgio_read_capacity");
527 log_err("ioengine sg unable to read capacity successfully\n");
528 return 1;
529 }
530 } else {
531 td_verror(td, EINVAL, "wrong file type");
532 log_err("ioengine sg only works on block devices\n");
533 return 1;
534 }
535
536 sd->bs = bs;
537 // Determine size of commands needed based on max_lba
538 sd->max_lba = max_lba;
539 if (max_lba > MAX_10B_LBA) {
540 dprint(FD_IO, "sgio_type_check: using 16 byte operations: max_lba = 0x%016llx\n", max_lba);
541 }
542
543
544 if (f->filetype == FIO_TYPE_BD) {
545 td->io_ops->getevents = NULL;
546 td->io_ops->event = NULL;
547 }
548 sd->type_checked = 1;
549
550 return 0;
551}
552
553static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
554{
555 struct sgio_data *sd = td->io_ops_data;
556 int ret;
557
558 ret = generic_open_file(td, f);
559 if (ret)
560 return ret;
561
562 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
563 ret = generic_close_file(td, f);
564 return 1;
565 }
566
567 return 0;
568}
569
570/*
571 * Build an error string with details about the driver, host or scsi
572 * error contained in the sg header Caller will use as necessary.
573 */
574static char *fio_sgio_errdetails(struct io_u *io_u)
575{
576 struct sg_io_hdr *hdr = &io_u->hdr;
577#define MAXERRDETAIL 1024
578#define MAXMSGCHUNK 128
579 char *msg, msgchunk[MAXMSGCHUNK], *ret = NULL;
580 int i;
581
582 msg = calloc(MAXERRDETAIL, 1);
583
584 /*
585 * can't seem to find sg_err.h, so I'll just echo the define values
586 * so others can search on internet to find clearer clues of meaning.
587 */
588 if (hdr->info & SG_INFO_CHECK) {
589 ret = msg;
590 if (hdr->host_status) {
591 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
592 strlcat(msg, msgchunk, MAXERRDETAIL);
593 switch (hdr->host_status) {
594 case 0x01:
595 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
596 break;
597 case 0x02:
598 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
599 break;
600 case 0x03:
601 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
602 break;
603 case 0x04:
604 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
605 break;
606 case 0x05:
607 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
608 break;
609 case 0x06:
610 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
611 break;
612 case 0x07:
613 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
614 break;
615 case 0x08:
616 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
617 break;
618 case 0x09:
619 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
620 break;
621 case 0x0a:
622 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
623 break;
624 case 0x0b:
625 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
626 break;
627 case 0x0c:
628 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
629 break;
630 case 0x0d:
631 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
632 break;
633 default:
634 strlcat(msg, "Unknown", MAXERRDETAIL);
635 break;
636 }
637 strlcat(msg, ". ", MAXERRDETAIL);
638 }
639 if (hdr->driver_status) {
640 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
641 strlcat(msg, msgchunk, MAXERRDETAIL);
642 switch (hdr->driver_status & 0x0F) {
643 case 0x01:
644 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
645 break;
646 case 0x02:
647 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
648 break;
649 case 0x03:
650 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
651 break;
652 case 0x04:
653 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
654 break;
655 case 0x05:
656 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
657 break;
658 case 0x06:
659 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
660 break;
661 case 0x07:
662 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
663 break;
664 case 0x08:
665 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
666 break;
667 default:
668 strlcat(msg, "Unknown", MAXERRDETAIL);
669 break;
670 }
671 strlcat(msg, "; ", MAXERRDETAIL);
672 switch (hdr->driver_status & 0xF0) {
673 case 0x10:
674 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
675 break;
676 case 0x20:
677 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
678 break;
679 case 0x30:
680 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
681 break;
682 case 0x40:
683 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
684 break;
685 case 0x80:
686 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
687 break;
688 }
689 strlcat(msg, ". ", MAXERRDETAIL);
690 }
691 if (hdr->status) {
692 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
693 strlcat(msg, msgchunk, MAXERRDETAIL);
694 // SCSI 3 status codes
695 switch (hdr->status) {
696 case 0x02:
697 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
698 break;
699 case 0x04:
700 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
701 break;
702 case 0x08:
703 strlcat(msg, "BUSY", MAXERRDETAIL);
704 break;
705 case 0x10:
706 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
707 break;
708 case 0x14:
709 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
710 break;
711 case 0x18:
712 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
713 break;
714 case 0x22:
715 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
716 break;
717 case 0x28:
718 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
719 break;
720 case 0x30:
721 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
722 break;
723 case 0x40:
724 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
725 break;
726 default:
727 strlcat(msg, "Unknown", MAXERRDETAIL);
728 break;
729 }
730 strlcat(msg, ". ", MAXERRDETAIL);
731 }
732 if (hdr->sb_len_wr) {
733 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
734 strlcat(msg, msgchunk, MAXERRDETAIL);
735 for (i = 0; i < hdr->sb_len_wr; i++) {
736 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
737 strlcat(msg, msgchunk, MAXERRDETAIL);
738 }
739 strlcat(msg, ". ", MAXERRDETAIL);
740 }
741 if (hdr->resid != 0) {
742 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
743 strlcat(msg, msgchunk, MAXERRDETAIL);
744 ret = msg;
745 }
746 }
747
748 if (!ret)
749 ret = strdup("SG Driver did not report a Host, Driver or Device check");
750
751 return ret;
752}
753
754/*
755 * get max file size from read capacity.
756 */
757static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
758{
759 /*
760 * get_file_size is being called even before sgio_init is
761 * called, so none of the sg_io structures are
762 * initialized in the thread_data yet. So we need to do the
763 * ReadCapacity without any of those helpers. One of the effects
764 * is that ReadCapacity may get called 4 times on each open:
765 * readcap(10) followed by readcap(16) if needed - just to get
766 * the file size after the init occurs - it will be called
767 * again when "type_check" is called during structure
768 * initialization I'm not sure how to prevent this little
769 * inefficiency.
770 */
771 unsigned int bs = 0;
772 unsigned long long max_lba = 0;
773 int ret;
774
775 if (fio_file_size_known(f))
776 return 0;
777
778 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
779 if (ret ) {
780 td_verror(td, td->error, "fio_sgio_read_capacity");
781 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
782 return 1;
783 }
784
785 f->real_file_size = (max_lba + 1) * bs;
786 fio_file_set_size_known(f);
787 return 0;
788}
789
790
791static struct ioengine_ops ioengine = {
792 .name = "sg",
793 .version = FIO_IOOPS_VERSION,
794 .init = fio_sgio_init,
795 .prep = fio_sgio_prep,
796 .queue = fio_sgio_queue,
797 .getevents = fio_sgio_getevents,
798 .errdetails = fio_sgio_errdetails,
799 .event = fio_sgio_event,
800 .cleanup = fio_sgio_cleanup,
801 .open_file = fio_sgio_open,
802 .close_file = generic_close_file,
803 .get_file_size = fio_sgio_get_file_size, // generic_get_file_size
804 .flags = FIO_SYNCIO | FIO_RAWIO,
805};
806
807#else /* FIO_HAVE_SGIO */
808
809/*
810 * When we have a proper configure system in place, we simply wont build
811 * and install this io engine. For now install a crippled version that
812 * just complains and fails to load.
813 */
814static int fio_sgio_init(struct thread_data fio_unused *td)
815{
816 log_err("fio: ioengine sg not available\n");
817 return 1;
818}
819
820static struct ioengine_ops ioengine = {
821 .name = "sg",
822 .version = FIO_IOOPS_VERSION,
823 .init = fio_sgio_init,
824};
825
826#endif
827
828static void fio_init fio_sgio_register(void)
829{
830 register_ioengine(&ioengine);
831}
832
833static void fio_exit fio_sgio_unregister(void)
834{
835 unregister_ioengine(&ioengine);
836}