sg: add support for VERIFY command using write modes
[fio.git] / engines / sg.c
CommitLineData
2866c82d 1/*
da751ca9
JA
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
2866c82d 5 *
b4b9665e
VF
6 * This ioengine can operate in two modes:
7 * sync with block devices (/dev/sdX) or
8 * with character devices (/dev/sgY) with direct=1 or sync=1
9 * async with character devices with direct=0 and sync=0
10 *
11 * What value does queue() return for the different cases?
12 * queue() return value
13 * In sync mode:
14 * /dev/sdX RWT FIO_Q_COMPLETED
15 * /dev/sgY RWT FIO_Q_COMPLETED
16 * with direct=1 or sync=1
17 *
18 * In async mode:
19 * /dev/sgY RWT FIO_Q_QUEUED
20 * direct=0 and sync=0
21 *
22 * Because FIO_SYNCIO is set for this ioengine td_io_queue() will fill in
23 * issue_time *before* each IO is sent to queue()
24 *
25 * Where are the IO counting functions called for the different cases?
26 *
27 * In sync mode:
28 * /dev/sdX (commit==NULL)
29 * RWT
30 * io_u_mark_depth() called in td_io_queue()
31 * io_u_mark_submit/complete() called in td_io_queue()
32 * issue_time set in td_io_queue()
33 *
34 * /dev/sgY with direct=1 or sync=1 (commit does nothing)
35 * RWT
36 * io_u_mark_depth() called in td_io_queue()
37 * io_u_mark_submit/complete() called in queue()
38 * issue_time set in td_io_queue()
39 *
40 * In async mode:
41 * /dev/sgY with direct=0 and sync=0
42 * RW: read and write operations are submitted in queue()
43 * io_u_mark_depth() called in td_io_commit()
44 * io_u_mark_submit() called in queue()
45 * issue_time set in td_io_queue()
46 * T: trim operations are queued in queue() and submitted in commit()
47 * io_u_mark_depth() called in td_io_commit()
48 * io_u_mark_submit() called in commit()
49 * issue_time set in commit()
50 *
2866c82d
JA
51 */
52#include <stdio.h>
53#include <stdlib.h>
54#include <unistd.h>
55#include <errno.h>
8393ca93 56#include <poll.h>
5f350952
JA
57
58#include "../fio.h"
52b81b7c 59#include "../optgroup.h"
2866c82d 60
34cfcdaf
JA
61#ifdef FIO_HAVE_SGIO
62
b0dc148e
DG
63#ifndef SGV4_FLAG_HIPRI
64#define SGV4_FLAG_HIPRI 0x800
65#endif
66
cbdc9353
VF
67enum {
68 FIO_SG_WRITE = 1,
e8ab121c
VF
69 FIO_SG_WRITE_VERIFY,
70 FIO_SG_WRITE_SAME,
71 FIO_SG_VERIFY_BYTCHK_00,
72 FIO_SG_VERIFY_BYTCHK_01,
73 FIO_SG_VERIFY_BYTCHK_11,
cbdc9353 74};
52b81b7c
KD
75
76struct sg_options {
77 void *pad;
b0dc148e 78 unsigned int hipri;
52b81b7c
KD
79 unsigned int readfua;
80 unsigned int writefua;
cbdc9353 81 unsigned int write_mode;
52b81b7c
KD
82};
83
84static struct fio_option options[] = {
b0dc148e
DG
85 {
86 .name = "hipri",
87 .lname = "High Priority",
88 .type = FIO_OPT_STR_SET,
89 .off1 = offsetof(struct sg_options, hipri),
90 .help = "Use polled IO completions",
91 .category = FIO_OPT_C_ENGINE,
92 .group = FIO_OPT_G_SG,
93 },
52b81b7c
KD
94 {
95 .name = "readfua",
96 .lname = "sg engine read fua flag support",
97 .type = FIO_OPT_BOOL,
98 .off1 = offsetof(struct sg_options, readfua),
99 .help = "Set FUA flag (force unit access) for all Read operations",
100 .def = "0",
101 .category = FIO_OPT_C_ENGINE,
102 .group = FIO_OPT_G_SG,
103 },
104 {
105 .name = "writefua",
106 .lname = "sg engine write fua flag support",
107 .type = FIO_OPT_BOOL,
108 .off1 = offsetof(struct sg_options, writefua),
109 .help = "Set FUA flag (force unit access) for all Write operations",
110 .def = "0",
111 .category = FIO_OPT_C_ENGINE,
112 .group = FIO_OPT_G_SG,
113 },
cbdc9353
VF
114 {
115 .name = "sg_write_mode",
116 .lname = "specify sg write mode",
117 .type = FIO_OPT_STR,
118 .off1 = offsetof(struct sg_options, write_mode),
119 .help = "Specify SCSI WRITE mode",
120 .def = "write",
121 .posval = {
122 { .ival = "write",
123 .oval = FIO_SG_WRITE,
124 .help = "Issue standard SCSI WRITE commands",
125 },
126 { .ival = "verify",
127 .oval = FIO_SG_WRITE_VERIFY,
128 .help = "Issue SCSI WRITE AND VERIFY commands",
129 },
130 { .ival = "same",
131 .oval = FIO_SG_WRITE_SAME,
132 .help = "Issue SCSI WRITE SAME commands",
133 },
e8ab121c
VF
134 { .ival = "verify_bytchk_00",
135 .oval = FIO_SG_VERIFY_BYTCHK_00,
136 .help = "Issue SCSI VERIFY commands with BYTCHK set to 00",
137 },
138 { .ival = "verify_bytchk_01",
139 .oval = FIO_SG_VERIFY_BYTCHK_01,
140 .help = "Issue SCSI VERIFY commands with BYTCHK set to 01",
141 },
142 { .ival = "verify_bytchk_11",
143 .oval = FIO_SG_VERIFY_BYTCHK_11,
144 .help = "Issue SCSI VERIFY commands with BYTCHK set to 11",
145 },
cbdc9353
VF
146 },
147 .category = FIO_OPT_C_ENGINE,
148 .group = FIO_OPT_G_SG,
149 },
52b81b7c
KD
150 {
151 .name = NULL,
152 },
153};
154
5ad7be56
KD
155#define MAX_10B_LBA 0xFFFFFFFFULL
156#define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
157#define MAX_SB 64 // sense block maximum return size
aa18e0ec
VF
158/*
159#define FIO_SGIO_DEBUG
160*/
5ad7be56 161
2866c82d 162struct sgio_cmd {
fde57152 163 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
5ad7be56 164 unsigned char sb[MAX_SB]; // add sense block to commands
2866c82d
JA
165 int nr;
166};
167
b4b9665e 168struct sgio_trim {
a824149a 169 uint8_t *unmap_param;
b4b9665e
VF
170 unsigned int unmap_range_count;
171 struct io_u **trim_io_us;
172};
173
2866c82d
JA
174struct sgio_data {
175 struct sgio_cmd *cmds;
176 struct io_u **events;
dc0deca2
JA
177 struct pollfd *pfds;
178 int *fd_flags;
179 void *sgbuf;
2866c82d 180 unsigned int bs;
b5af8293 181 int type_checked;
b4b9665e
VF
182 struct sgio_trim **trim_queues;
183 int current_queue;
aa18e0ec 184#ifdef FIO_SGIO_DEBUG
b4b9665e 185 unsigned int *trim_queue_map;
aa18e0ec 186#endif
2866c82d
JA
187};
188
a824149a
DF
189static inline uint32_t sgio_get_be32(uint8_t *buf)
190{
191 return be32_to_cpu(*((uint32_t *) buf));
192}
193
194static inline uint64_t sgio_get_be64(uint8_t *buf)
195{
196 return be64_to_cpu(*((uint64_t *) buf));
197}
198
199static inline void sgio_set_be16(uint16_t val, uint8_t *buf)
200{
201 uint16_t t = cpu_to_be16(val);
202
203 memcpy(buf, &t, sizeof(uint16_t));
204}
205
206static inline void sgio_set_be32(uint32_t val, uint8_t *buf)
207{
208 uint32_t t = cpu_to_be32(val);
209
210 memcpy(buf, &t, sizeof(uint32_t));
211}
212
213static inline void sgio_set_be64(uint64_t val, uint8_t *buf)
214{
215 uint64_t t = cpu_to_be64(val);
216
217 memcpy(buf, &t, sizeof(uint64_t));
218}
219
b4b9665e
VF
220static inline bool sgio_unbuffered(struct thread_data *td)
221{
222 return (td->o.odirect || td->o.sync_io);
223}
224
2866c82d
JA
225static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
226 struct io_u *io_u, int fs)
227{
228 struct sgio_cmd *sc = &sd->cmds[io_u->index];
229
230 memset(hdr, 0, sizeof(*hdr));
231 memset(sc->cdb, 0, sizeof(sc->cdb));
232
233 hdr->interface_id = 'S';
234 hdr->cmdp = sc->cdb;
235 hdr->cmd_len = sizeof(sc->cdb);
5ad7be56
KD
236 hdr->sbp = sc->sb;
237 hdr->mx_sb_len = sizeof(sc->sb);
2866c82d
JA
238 hdr->pack_id = io_u->index;
239 hdr->usr_ptr = io_u;
b4b9665e 240 hdr->timeout = SCSI_TIMEOUT_MS;
2866c82d
JA
241
242 if (fs) {
cec6b55d
JA
243 hdr->dxferp = io_u->xfer_buf;
244 hdr->dxfer_len = io_u->xfer_buflen;
2866c82d
JA
245 }
246}
247
adee86c5
JA
248static int pollin_events(struct pollfd *pfds, int fds)
249{
250 int i;
251
252 for (i = 0; i < fds; i++)
253 if (pfds[i].revents & POLLIN)
254 return 1;
255
256 return 0;
257}
2866c82d 258
14d0261e
JA
259static int sg_fd_read(int fd, void *data, size_t size)
260{
261 int err = 0;
262
263 while (size) {
264 ssize_t ret;
265
266 ret = read(fd, data, size);
267 if (ret < 0) {
268 if (errno == EAGAIN || errno == EINTR)
269 continue;
270 err = errno;
271 break;
272 } else if (!ret)
273 break;
274 else {
275 data += ret;
276 size -= ret;
277 }
278 }
279
280 if (err)
281 return err;
282 if (size)
283 return EAGAIN;
284
285 return 0;
286}
287
e7d2e616 288static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
1f440ece
JA
289 unsigned int max,
290 const struct timespec fio_unused *t)
2866c82d 291{
565e784d 292 struct sgio_data *sd = td->io_ops_data;
b4b9665e 293 int left = max, eventNum, ret, r = 0, trims = 0;
dc0deca2 294 void *buf = sd->sgbuf;
b4b9665e 295 unsigned int i, j, events;
946ff865 296 struct fio_file *f;
b4b9665e 297 struct io_u *io_u;
2866c82d
JA
298
299 /*
adee86c5 300 * Fill in the file descriptors
2866c82d 301 */
adee86c5
JA
302 for_each_file(td, f, i) {
303 /*
304 * don't block for min events == 0
305 */
4a851614 306 if (!min)
3a35845f
JA
307 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
308 else
309 sd->fd_flags[i] = -1;
4a851614 310
dc0deca2
JA
311 sd->pfds[i].fd = f->fd;
312 sd->pfds[i].events = POLLIN;
2866c82d
JA
313 }
314
b4b9665e
VF
315 /*
316 ** There are two counters here:
317 ** - number of SCSI commands completed
318 ** - number of io_us completed
319 **
320 ** These are the same with reads and writes, but
321 ** could differ with trim/unmap commands because
322 ** a single unmap can include multiple io_us
323 */
324
325 while (left > 0) {
c97e3cb0 326 char *p;
adee86c5 327
b4b9665e 328 dprint(FD_IO, "sgio_getevents: sd %p: min=%d, max=%d, left=%d\n", sd, min, max, left);
5ad7be56 329
2866c82d
JA
330 do {
331 if (!min)
332 break;
adee86c5 333
2dc1bbeb 334 ret = poll(sd->pfds, td->o.nr_files, -1);
adee86c5 335 if (ret < 0) {
adee86c5 336 if (!r)
22819ec2 337 r = -errno;
e1161c32 338 td_verror(td, errno, "poll");
adee86c5
JA
339 break;
340 } else if (!ret)
341 continue;
342
2dc1bbeb 343 if (pollin_events(sd->pfds, td->o.nr_files))
2866c82d
JA
344 break;
345 } while (1);
346
adee86c5 347 if (r < 0)
2866c82d 348 break;
adee86c5
JA
349
350re_read:
351 p = buf;
352 events = 0;
353 for_each_file(td, f, i) {
5ad7be56 354 for (eventNum = 0; eventNum < left; eventNum++) {
14d0261e 355 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
b4b9665e 356 dprint(FD_IO, "sgio_getevents: sg_fd_read ret: %d\n", ret);
14d0261e
JA
357 if (ret) {
358 r = -ret;
359 td_verror(td, r, "sg_read");
5ad7be56 360 break;
5ad7be56 361 }
b4b9665e
VF
362 io_u = ((struct sg_io_hdr *)p)->usr_ptr;
363 if (io_u->ddir == DDIR_TRIM) {
364 events += sd->trim_queues[io_u->index]->unmap_range_count;
365 eventNum += sd->trim_queues[io_u->index]->unmap_range_count - 1;
366 } else
367 events++;
368
14d0261e 369 p += sizeof(struct sg_io_hdr);
b4b9665e 370 dprint(FD_IO, "sgio_getevents: events: %d, eventNum: %d, left: %d\n", events, eventNum, left);
adee86c5
JA
371 }
372 }
373
14d0261e 374 if (r < 0 && !events)
2866c82d 375 break;
adee86c5
JA
376 if (!events) {
377 usleep(1000);
378 goto re_read;
379 }
2866c82d 380
2866c82d
JA
381 left -= events;
382 r += events;
383
384 for (i = 0; i < events; i++) {
385 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
b4b9665e
VF
386 sd->events[i + trims] = hdr->usr_ptr;
387 io_u = (struct io_u *)(hdr->usr_ptr);
5ad7be56 388
5ad7be56 389 if (hdr->info & SG_INFO_CHECK) {
b4b9665e 390 /* record if an io error occurred, ignore resid */
be660713 391 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
b4b9665e
VF
392 sd->events[i + trims]->error = EIO;
393 }
394
395 if (io_u->ddir == DDIR_TRIM) {
396 struct sgio_trim *st = sd->trim_queues[io_u->index];
aa18e0ec 397#ifdef FIO_SGIO_DEBUG
b4b9665e 398 assert(st->trim_io_us[0] == io_u);
aa18e0ec 399 assert(sd->trim_queue_map[io_u->index] == io_u->index);
b4b9665e
VF
400 dprint(FD_IO, "sgio_getevents: reaping %d io_us from trim queue %d\n", st->unmap_range_count, io_u->index);
401 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", io_u->index, i+trims);
aa18e0ec 402#endif
b4b9665e
VF
403 for (j = 1; j < st->unmap_range_count; j++) {
404 ++trims;
405 sd->events[i + trims] = st->trim_io_us[j];
aa18e0ec 406#ifdef FIO_SGIO_DEBUG
b4b9665e 407 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", st->trim_io_us[j]->index, i+trims);
aa18e0ec
VF
408 assert(sd->trim_queue_map[st->trim_io_us[j]->index] == io_u->index);
409#endif
b4b9665e
VF
410 if (hdr->info & SG_INFO_CHECK) {
411 /* record if an io error occurred, ignore resid */
412 memcpy(&st->trim_io_us[j]->hdr, hdr, sizeof(struct sg_io_hdr));
413 sd->events[i + trims]->error = EIO;
414 }
415 }
416 events -= st->unmap_range_count - 1;
417 st->unmap_range_count = 0;
5ad7be56 418 }
2866c82d
JA
419 }
420 }
421
adee86c5 422 if (!min) {
affe05a9 423 for_each_file(td, f, i) {
3a35845f
JA
424 if (sd->fd_flags[i] == -1)
425 continue;
426
affe05a9
JA
427 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
428 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
429 }
adee86c5 430 }
2866c82d 431
2866c82d
JA
432 return r;
433}
434
2e4ef4fb
JA
435static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
436 struct fio_file *f,
437 struct io_u *io_u)
2866c82d 438{
565e784d 439 struct sgio_data *sd = td->io_ops_data;
2866c82d 440 struct sg_io_hdr *hdr = &io_u->hdr;
36167d82 441 int ret;
2866c82d
JA
442
443 sd->events[0] = io_u;
444
36167d82
JA
445 ret = ioctl(f->fd, SG_IO, hdr);
446 if (ret < 0)
a05bd42d 447 return ret;
36167d82 448
5ad7be56
KD
449 /* record if an io error occurred */
450 if (hdr->info & SG_INFO_CHECK)
451 io_u->error = EIO;
452
36167d82 453 return FIO_Q_COMPLETED;
2866c82d
JA
454}
455
a999bc49
JA
456static enum fio_q_status fio_sgio_rw_doio(struct thread_data *td,
457 struct fio_file *f,
b4b9665e 458 struct io_u *io_u, int do_sync)
2866c82d
JA
459{
460 struct sg_io_hdr *hdr = &io_u->hdr;
461 int ret;
462
53cdc686 463 ret = write(f->fd, hdr, sizeof(*hdr));
2866c82d 464 if (ret < 0)
a05bd42d 465 return ret;
2866c82d 466
2b13e716 467 if (do_sync) {
a999bc49
JA
468 /*
469 * We can't just read back the first command that completes
470 * and assume it's the one we need, it could be any command
471 * that is inflight.
472 */
473 do {
474 struct io_u *__io_u;
475
476 ret = read(f->fd, hdr, sizeof(*hdr));
477 if (ret < 0)
478 return ret;
479
7508b394
JA
480 __io_u = hdr->usr_ptr;
481
a999bc49
JA
482 /* record if an io error occurred */
483 if (hdr->info & SG_INFO_CHECK)
7508b394 484 __io_u->error = EIO;
5ad7be56 485
a999bc49
JA
486 if (__io_u == io_u)
487 break;
488
691df596 489 if (io_u_sync_complete(td, __io_u))
a999bc49 490 break;
691df596 491
a999bc49 492 } while (1);
5ad7be56 493
36167d82 494 return FIO_Q_COMPLETED;
2866c82d
JA
495 }
496
36167d82 497 return FIO_Q_QUEUED;
2866c82d
JA
498}
499
b4b9665e
VF
500static enum fio_q_status fio_sgio_doio(struct thread_data *td,
501 struct io_u *io_u, int do_sync)
2866c82d 502{
53cdc686 503 struct fio_file *f = io_u->file;
b4b9665e 504 enum fio_q_status ret;
53cdc686 505
686fbd31 506 if (f->filetype == FIO_TYPE_BLOCK) {
5ad7be56 507 ret = fio_sgio_ioctl_doio(td, f, io_u);
a999bc49
JA
508 if (io_u->error)
509 td_verror(td, io_u->error, __func__);
5ad7be56 510 } else {
a999bc49
JA
511 ret = fio_sgio_rw_doio(td, f, io_u, do_sync);
512 if (io_u->error && do_sync)
c9aeb797 513 td_verror(td, io_u->error, __func__);
5ad7be56 514 }
2866c82d 515
5ad7be56 516 return ret;
2866c82d
JA
517}
518
b4b9665e
VF
519static void fio_sgio_rw_lba(struct sg_io_hdr *hdr, unsigned long long lba,
520 unsigned long long nr_blocks)
521{
522 if (lba < MAX_10B_LBA) {
a824149a
DF
523 sgio_set_be32((uint32_t) lba, &hdr->cmdp[2]);
524 sgio_set_be16((uint16_t) nr_blocks, &hdr->cmdp[7]);
b4b9665e 525 } else {
a824149a
DF
526 sgio_set_be64(lba, &hdr->cmdp[2]);
527 sgio_set_be32((uint32_t) nr_blocks, &hdr->cmdp[10]);
b4b9665e
VF
528 }
529
530 return;
531}
532
2866c82d
JA
533static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
534{
535 struct sg_io_hdr *hdr = &io_u->hdr;
52b81b7c 536 struct sg_options *o = td->eo;
565e784d 537 struct sgio_data *sd = td->io_ops_data;
b4b9665e
VF
538 unsigned long long nr_blocks, lba;
539 int offset;
2866c82d 540
cec6b55d 541 if (io_u->xfer_buflen & (sd->bs - 1)) {
2866c82d
JA
542 log_err("read/write not sector aligned\n");
543 return EINVAL;
544 }
545
5ad7be56
KD
546 nr_blocks = io_u->xfer_buflen / sd->bs;
547 lba = io_u->offset / sd->bs;
548
2866c82d 549 if (io_u->ddir == DDIR_READ) {
87dc1ab1
JA
550 sgio_hdr_init(sd, hdr, io_u, 1);
551
2866c82d 552 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
5ad7be56
KD
553 if (lba < MAX_10B_LBA)
554 hdr->cmdp[0] = 0x28; // read(10)
555 else
556 hdr->cmdp[0] = 0x88; // read(16)
52b81b7c 557
b0dc148e
DG
558 if (o->hipri)
559 hdr->flags |= SGV4_FLAG_HIPRI;
52b81b7c
KD
560 if (o->readfua)
561 hdr->cmdp[1] |= 0x08;
562
b4b9665e
VF
563 fio_sgio_rw_lba(hdr, lba, nr_blocks);
564
87dc1ab1
JA
565 } else if (io_u->ddir == DDIR_WRITE) {
566 sgio_hdr_init(sd, hdr, io_u, 1);
567
2866c82d 568 hdr->dxfer_direction = SG_DXFER_TO_DEV;
cbdc9353
VF
569 switch(o->write_mode) {
570 case FIO_SG_WRITE:
571 if (lba < MAX_10B_LBA)
572 hdr->cmdp[0] = 0x2a; // write(10)
573 else
574 hdr->cmdp[0] = 0x8a; // write(16)
b0dc148e
DG
575 if (o->hipri)
576 hdr->flags |= SGV4_FLAG_HIPRI;
cbdc9353
VF
577 if (o->writefua)
578 hdr->cmdp[1] |= 0x08;
579 break;
580 case FIO_SG_WRITE_VERIFY:
581 if (lba < MAX_10B_LBA)
582 hdr->cmdp[0] = 0x2e; // write and verify(10)
583 else
584 hdr->cmdp[0] = 0x8e; // write and verify(16)
585 break;
586 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
587 case FIO_SG_WRITE_SAME:
588 hdr->dxfer_len = sd->bs;
589 if (lba < MAX_10B_LBA)
590 hdr->cmdp[0] = 0x41; // write same(10)
591 else
592 hdr->cmdp[0] = 0x93; // write same(16)
593 break;
e8ab121c
VF
594 case FIO_SG_VERIFY_BYTCHK_00:
595 if (lba < MAX_10B_LBA)
596 hdr->cmdp[0] = 0x2f; // VERIFY(10)
597 else
598 hdr->cmdp[0] = 0x8f; // VERIFY(16)
599 hdr->dxfer_len = 0;
600 break;
601 case FIO_SG_VERIFY_BYTCHK_01:
602 if (lba < MAX_10B_LBA)
603 hdr->cmdp[0] = 0x2f; // VERIFY(10)
604 else
605 hdr->cmdp[0] = 0x8f; // VERIFY(16)
606 hdr->cmdp[1] |= 0x02; // BYTCHK = 01b
607 break;
608 case FIO_SG_VERIFY_BYTCHK_11:
609 if (lba < MAX_10B_LBA)
610 hdr->cmdp[0] = 0x2f; // VERIFY(10)
611 else
612 hdr->cmdp[0] = 0x8f; // VERIFY(16)
613 hdr->cmdp[1] |= 0x06; // BYTCHK = 11b
614 hdr->dxfer_len = sd->bs;
615 break;
cbdc9353 616 };
b4b9665e
VF
617
618 fio_sgio_rw_lba(hdr, lba, nr_blocks);
619
620 } else if (io_u->ddir == DDIR_TRIM) {
621 struct sgio_trim *st;
622
623 if (sd->current_queue == -1) {
624 sgio_hdr_init(sd, hdr, io_u, 0);
625
626 hdr->cmd_len = 10;
627 hdr->dxfer_direction = SG_DXFER_TO_DEV;
628 hdr->cmdp[0] = 0x42; // unmap
629 sd->current_queue = io_u->index;
630 st = sd->trim_queues[sd->current_queue];
631 hdr->dxferp = st->unmap_param;
aa18e0ec 632#ifdef FIO_SGIO_DEBUG
b4b9665e
VF
633 assert(sd->trim_queues[io_u->index]->unmap_range_count == 0);
634 dprint(FD_IO, "sg: creating new queue based on io_u %d\n", io_u->index);
aa18e0ec 635#endif
b4b9665e
VF
636 }
637 else
638 st = sd->trim_queues[sd->current_queue];
639
640 dprint(FD_IO, "sg: adding io_u %d to trim queue %d\n", io_u->index, sd->current_queue);
641 st->trim_io_us[st->unmap_range_count] = io_u;
aa18e0ec 642#ifdef FIO_SGIO_DEBUG
b4b9665e 643 sd->trim_queue_map[io_u->index] = sd->current_queue;
aa18e0ec 644#endif
b4b9665e
VF
645
646 offset = 8 + 16 * st->unmap_range_count;
a824149a
DF
647 sgio_set_be64(lba, &st->unmap_param[offset]);
648 sgio_set_be32((uint32_t) nr_blocks, &st->unmap_param[offset + 8]);
b4b9665e
VF
649
650 st->unmap_range_count++;
651
652 } else if (ddir_sync(io_u->ddir)) {
87dc1ab1 653 sgio_hdr_init(sd, hdr, io_u, 0);
87dc1ab1 654 hdr->dxfer_direction = SG_DXFER_NONE;
5ad7be56
KD
655 if (lba < MAX_10B_LBA)
656 hdr->cmdp[0] = 0x35; // synccache(10)
657 else
658 hdr->cmdp[0] = 0x91; // synccache(16)
b4b9665e
VF
659 } else
660 assert(0);
2866c82d 661
2866c82d
JA
662 return 0;
663}
664
b4b9665e
VF
665static void fio_sgio_unmap_setup(struct sg_io_hdr *hdr, struct sgio_trim *st)
666{
a824149a 667 uint16_t cnt = st->unmap_range_count * 16;
b4b9665e 668
a824149a
DF
669 hdr->dxfer_len = cnt + 8;
670 sgio_set_be16(cnt + 8, &hdr->cmdp[7]);
671 sgio_set_be16(cnt + 6, st->unmap_param);
672 sgio_set_be16(cnt, &st->unmap_param[2]);
b4b9665e
VF
673
674 return;
675}
676
2e4ef4fb
JA
677static enum fio_q_status fio_sgio_queue(struct thread_data *td,
678 struct io_u *io_u)
2866c82d
JA
679{
680 struct sg_io_hdr *hdr = &io_u->hdr;
b4b9665e 681 struct sgio_data *sd = td->io_ops_data;
f6db4fa5 682 int ret, do_sync = 0;
2866c82d 683
7101d9c2
JA
684 fio_ro_check(td, io_u);
685
b4b9665e 686 if (sgio_unbuffered(td) || ddir_sync(io_u->ddir))
f6db4fa5
JA
687 do_sync = 1;
688
b4b9665e
VF
689 if (io_u->ddir == DDIR_TRIM) {
690 if (do_sync || io_u->file->filetype == FIO_TYPE_BLOCK) {
691 struct sgio_trim *st = sd->trim_queues[sd->current_queue];
692
693 /* finish cdb setup for unmap because we are
694 ** doing unmap commands synchronously */
aa18e0ec 695#ifdef FIO_SGIO_DEBUG
b4b9665e
VF
696 assert(st->unmap_range_count == 1);
697 assert(io_u == st->trim_io_us[0]);
aa18e0ec 698#endif
b4b9665e
VF
699 hdr = &io_u->hdr;
700
701 fio_sgio_unmap_setup(hdr, st);
702
703 st->unmap_range_count = 0;
704 sd->current_queue = -1;
705 } else
706 /* queue up trim ranges and submit in commit() */
707 return FIO_Q_QUEUED;
708 }
709
f6db4fa5 710 ret = fio_sgio_doio(td, io_u, do_sync);
2866c82d
JA
711
712 if (ret < 0)
713 io_u->error = errno;
714 else if (hdr->status) {
715 io_u->resid = hdr->resid;
716 io_u->error = EIO;
b4b9665e
VF
717 } else if (td->io_ops->commit != NULL) {
718 if (do_sync && !ddir_sync(io_u->ddir)) {
719 io_u_mark_submit(td, 1);
720 io_u_mark_complete(td, 1);
721 } else if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
722 io_u_mark_submit(td, 1);
723 io_u_queued(td, io_u);
724 }
2866c82d
JA
725 }
726
95bcd815 727 if (io_u->error) {
e1161c32 728 td_verror(td, io_u->error, "xfer");
36167d82 729 return FIO_Q_COMPLETED;
95bcd815
JA
730 }
731
36167d82 732 return ret;
2866c82d
JA
733}
734
b4b9665e
VF
735static int fio_sgio_commit(struct thread_data *td)
736{
737 struct sgio_data *sd = td->io_ops_data;
738 struct sgio_trim *st;
739 struct io_u *io_u;
740 struct sg_io_hdr *hdr;
741 struct timespec now;
742 unsigned int i;
743 int ret;
744
745 if (sd->current_queue == -1)
746 return 0;
747
748 st = sd->trim_queues[sd->current_queue];
749 io_u = st->trim_io_us[0];
750 hdr = &io_u->hdr;
751
752 fio_sgio_unmap_setup(hdr, st);
753
754 sd->current_queue = -1;
755
a999bc49 756 ret = fio_sgio_rw_doio(td, io_u->file, io_u, 0);
b4b9665e 757
53ee8c17
VF
758 if (ret < 0 || hdr->status) {
759 int error;
760
761 if (ret < 0)
762 error = errno;
763 else {
764 error = EIO;
765 ret = -EIO;
b4b9665e 766 }
53ee8c17
VF
767
768 for (i = 0; i < st->unmap_range_count; i++) {
769 st->trim_io_us[i]->error = error;
770 clear_io_u(td, st->trim_io_us[i]);
771 if (hdr->status)
772 st->trim_io_us[i]->resid = hdr->resid;
b4b9665e 773 }
53ee8c17
VF
774
775 td_verror(td, error, "xfer");
776 return ret;
b4b9665e
VF
777 }
778
53ee8c17
VF
779 if (fio_fill_issue_time(td)) {
780 fio_gettime(&now, NULL);
781 for (i = 0; i < st->unmap_range_count; i++) {
782 memcpy(&st->trim_io_us[i]->issue_time, &now, sizeof(now));
783 io_u_queued(td, io_u);
784 }
b4b9665e 785 }
53ee8c17 786 io_u_mark_submit(td, st->unmap_range_count);
b4b9665e 787
53ee8c17 788 return 0;
b4b9665e
VF
789}
790
2866c82d
JA
791static struct io_u *fio_sgio_event(struct thread_data *td, int event)
792{
565e784d 793 struct sgio_data *sd = td->io_ops_data;
2866c82d
JA
794
795 return sd->events[event];
796}
797
5ad7be56
KD
798static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
799 unsigned long long *max_lba)
2866c82d 800{
5ad7be56
KD
801 /*
802 * need to do read capacity operation w/o benefit of sd or
803 * io_u structures, which are not initialized until later.
804 */
805 struct sg_io_hdr hdr;
02ae7bd8
DF
806 unsigned long long hlba;
807 unsigned int blksz = 0;
5ad7be56
KD
808 unsigned char cmd[16];
809 unsigned char sb[64];
810 unsigned char buf[32]; // read capacity return
2866c82d 811 int ret;
5ad7be56 812 int fd = -1;
2866c82d 813
5ad7be56 814 struct fio_file *f = td->files[0];
2866c82d 815
5ad7be56
KD
816 /* open file independent of rest of application */
817 fd = open(f->file_name, O_RDONLY);
818 if (fd < 0)
819 return -errno;
2866c82d 820
5ad7be56
KD
821 memset(&hdr, 0, sizeof(hdr));
822 memset(cmd, 0, sizeof(cmd));
823 memset(sb, 0, sizeof(sb));
824 memset(buf, 0, sizeof(buf));
2866c82d 825
5ad7be56
KD
826 /* First let's try a 10 byte read capacity. */
827 hdr.interface_id = 'S';
828 hdr.cmdp = cmd;
829 hdr.cmd_len = 10;
830 hdr.sbp = sb;
831 hdr.mx_sb_len = sizeof(sb);
832 hdr.timeout = SCSI_TIMEOUT_MS;
833 hdr.cmdp[0] = 0x25; // Read Capacity(10)
834 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
835 hdr.dxferp = buf;
836 hdr.dxfer_len = sizeof(buf);
837
838 ret = ioctl(fd, SG_IO, &hdr);
839 if (ret < 0) {
840 close(fd);
2866c82d 841 return ret;
5ad7be56 842 }
2866c82d 843
02ae7bd8
DF
844 if (hdr.info & SG_INFO_CHECK) {
845 /* RCAP(10) might be unsupported by device. Force RCAP(16) */
846 hlba = MAX_10B_LBA;
847 } else {
a824149a
DF
848 blksz = sgio_get_be32(&buf[4]);
849 hlba = sgio_get_be32(buf);
02ae7bd8 850 }
5ad7be56
KD
851
852 /*
fde57152
TK
853 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
854 * then need to retry with 16 byte Read Capacity command.
5ad7be56 855 */
02ae7bd8 856 if (hlba == MAX_10B_LBA) {
5ad7be56 857 hdr.cmd_len = 16;
28c43a89
TK
858 hdr.cmdp[0] = 0x9e; // service action
859 hdr.cmdp[1] = 0x10; // Read Capacity(16)
a824149a 860 sgio_set_be32(sizeof(buf), &hdr.cmdp[10]);
5ad7be56
KD
861
862 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
863 hdr.dxferp = buf;
864 hdr.dxfer_len = sizeof(buf);
865
866 ret = ioctl(fd, SG_IO, &hdr);
867 if (ret < 0) {
868 close(fd);
869 return ret;
870 }
871
872 /* record if an io error occurred */
873 if (hdr.info & SG_INFO_CHECK)
874 td_verror(td, EIO, "fio_sgio_read_capacity");
875
a824149a
DF
876 blksz = sgio_get_be32(&buf[8]);
877 hlba = sgio_get_be64(buf);
02ae7bd8
DF
878 }
879
880 if (blksz) {
881 *bs = blksz;
882 *max_lba = hlba;
883 ret = 0;
884 } else {
885 ret = EIO;
5ad7be56
KD
886 }
887
888 close(fd);
02ae7bd8 889 return ret;
2866c82d
JA
890}
891
892static void fio_sgio_cleanup(struct thread_data *td)
893{
565e784d 894 struct sgio_data *sd = td->io_ops_data;
b4b9665e 895 int i;
dc0deca2
JA
896
897 if (sd) {
898 free(sd->events);
899 free(sd->cmds);
900 free(sd->fd_flags);
901 free(sd->pfds);
902 free(sd->sgbuf);
aa18e0ec 903#ifdef FIO_SGIO_DEBUG
b4b9665e 904 free(sd->trim_queue_map);
aa18e0ec 905#endif
b4b9665e
VF
906
907 for (i = 0; i < td->o.iodepth; i++) {
908 free(sd->trim_queues[i]->unmap_param);
909 free(sd->trim_queues[i]->trim_io_us);
910 free(sd->trim_queues[i]);
911 }
912
913 free(sd->trim_queues);
dc0deca2 914 free(sd);
2866c82d
JA
915 }
916}
917
918static int fio_sgio_init(struct thread_data *td)
919{
920 struct sgio_data *sd;
b4b9665e 921 struct sgio_trim *st;
b0dc148e 922 struct sg_io_hdr *h3p;
b4b9665e 923 int i;
2866c82d 924
b4b9665e
VF
925 sd = calloc(1, sizeof(*sd));
926 sd->cmds = calloc(td->o.iodepth, sizeof(struct sgio_cmd));
927 sd->sgbuf = calloc(td->o.iodepth, sizeof(struct sg_io_hdr));
928 sd->events = calloc(td->o.iodepth, sizeof(struct io_u *));
929 sd->pfds = calloc(td->o.nr_files, sizeof(struct pollfd));
930 sd->fd_flags = calloc(td->o.nr_files, sizeof(int));
5ad7be56 931 sd->type_checked = 0;
b4b9665e
VF
932
933 sd->trim_queues = calloc(td->o.iodepth, sizeof(struct sgio_trim *));
934 sd->current_queue = -1;
aa18e0ec 935#ifdef FIO_SGIO_DEBUG
b4b9665e 936 sd->trim_queue_map = calloc(td->o.iodepth, sizeof(int));
aa18e0ec 937#endif
b0dc148e 938 for (i = 0, h3p = sd->sgbuf; i < td->o.iodepth; i++, ++h3p) {
b4b9665e
VF
939 sd->trim_queues[i] = calloc(1, sizeof(struct sgio_trim));
940 st = sd->trim_queues[i];
941 st->unmap_param = calloc(td->o.iodepth + 1, sizeof(char[16]));
942 st->unmap_range_count = 0;
943 st->trim_io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
b0dc148e 944 h3p->interface_id = 'S';
b4b9665e
VF
945 }
946
565e784d 947 td->io_ops_data = sd;
2866c82d 948
b5af8293
JA
949 /*
950 * we want to do it, regardless of whether odirect is set or not
951 */
2dc1bbeb 952 td->o.override_sync = 1;
b5af8293
JA
953 return 0;
954}
955
956static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
957{
565e784d 958 struct sgio_data *sd = td->io_ops_data;
5ad7be56
KD
959 unsigned int bs = 0;
960 unsigned long long max_lba = 0;
961
686fbd31 962 if (f->filetype == FIO_TYPE_BLOCK) {
53cdc686 963 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
e1161c32 964 td_verror(td, errno, "ioctl");
b5af8293 965 return 1;
2866c82d 966 }
af52b345 967 } else if (f->filetype == FIO_TYPE_CHAR) {
b5af8293 968 int version, ret;
2866c82d 969
53cdc686 970 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
e1161c32 971 td_verror(td, errno, "ioctl");
b5af8293 972 return 1;
2866c82d
JA
973 }
974
5ad7be56
KD
975 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
976 if (ret) {
977 td_verror(td, td->error, "fio_sgio_read_capacity");
978 log_err("ioengine sg unable to read capacity successfully\n");
b5af8293 979 return 1;
5ad7be56 980 }
2866c82d 981 } else {
16ada754 982 td_verror(td, EINVAL, "wrong file type");
30dac136 983 log_err("ioengine sg only works on block or character devices\n");
b5af8293 984 return 1;
2866c82d
JA
985 }
986
987 sd->bs = bs;
5ad7be56 988 // Determine size of commands needed based on max_lba
166c6b42
TK
989 if (max_lba >= MAX_10B_LBA) {
990 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
991 "commands for lba above 0x%016llx/0x%016llx\n",
992 MAX_10B_LBA, max_lba);
5ad7be56
KD
993 }
994
686fbd31 995 if (f->filetype == FIO_TYPE_BLOCK) {
36167d82
JA
996 td->io_ops->getevents = NULL;
997 td->io_ops->event = NULL;
b4b9665e
VF
998 td->io_ops->commit = NULL;
999 /*
1000 ** Setting these functions to null may cause problems
1001 ** with filename=/dev/sda:/dev/sg0 since we are only
1002 ** considering a single file
1003 */
36167d82 1004 }
5ad7be56 1005 sd->type_checked = 1;
2866c82d 1006
2866c82d 1007 return 0;
b5af8293
JA
1008}
1009
1010static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
1011{
565e784d 1012 struct sgio_data *sd = td->io_ops_data;
b5af8293
JA
1013 int ret;
1014
1015 ret = generic_open_file(td, f);
1016 if (ret)
1017 return ret;
1018
15ba640a 1019 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
6977bcd0 1020 ret = generic_close_file(td, f);
67486df3 1021 return ret;
b5af8293
JA
1022 }
1023
1024 return 0;
2866c82d
JA
1025}
1026
5ad7be56
KD
1027/*
1028 * Build an error string with details about the driver, host or scsi
1029 * error contained in the sg header Caller will use as necessary.
1030 */
1031static char *fio_sgio_errdetails(struct io_u *io_u)
1032{
1033 struct sg_io_hdr *hdr = &io_u->hdr;
1034#define MAXERRDETAIL 1024
1035#define MAXMSGCHUNK 128
fd04fa03 1036 char *msg, msgchunk[MAXMSGCHUNK];
5ad7be56
KD
1037 int i;
1038
efa72f25 1039 msg = calloc(1, MAXERRDETAIL);
fd04fa03 1040 strcpy(msg, "");
5ad7be56
KD
1041
1042 /*
1043 * can't seem to find sg_err.h, so I'll just echo the define values
1044 * so others can search on internet to find clearer clues of meaning.
1045 */
1046 if (hdr->info & SG_INFO_CHECK) {
5ad7be56
KD
1047 if (hdr->host_status) {
1048 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
1049 strlcat(msg, msgchunk, MAXERRDETAIL);
1050 switch (hdr->host_status) {
1051 case 0x01:
1052 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
1053 break;
1054 case 0x02:
1055 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
1056 break;
1057 case 0x03:
1058 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
1059 break;
1060 case 0x04:
1061 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
1062 break;
1063 case 0x05:
1064 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
1065 break;
1066 case 0x06:
1067 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
1068 break;
1069 case 0x07:
1070 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
1071 break;
1072 case 0x08:
1073 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
1074 break;
1075 case 0x09:
1076 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
1077 break;
1078 case 0x0a:
1079 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
1080 break;
1081 case 0x0b:
1082 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
1083 break;
1084 case 0x0c:
1085 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
1086 break;
1087 case 0x0d:
1088 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
1089 break;
2ce6c6e5
TK
1090 case 0x0e:
1091 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
1092 break;
1093 case 0x0f:
1094 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
1095 break;
1096 case 0x10:
1097 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
1098 break;
1099 case 0x11:
1100 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
1101 break;
1102 case 0x12:
1103 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
1104 break;
1105 case 0x13:
1106 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
1107 break;
5ad7be56
KD
1108 default:
1109 strlcat(msg, "Unknown", MAXERRDETAIL);
1110 break;
1111 }
1112 strlcat(msg, ". ", MAXERRDETAIL);
1113 }
1114 if (hdr->driver_status) {
1115 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
1116 strlcat(msg, msgchunk, MAXERRDETAIL);
1117 switch (hdr->driver_status & 0x0F) {
1118 case 0x01:
1119 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
1120 break;
1121 case 0x02:
1122 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
1123 break;
1124 case 0x03:
1125 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
1126 break;
1127 case 0x04:
1128 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
1129 break;
1130 case 0x05:
1131 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
1132 break;
1133 case 0x06:
1134 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
1135 break;
1136 case 0x07:
1137 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
1138 break;
1139 case 0x08:
1140 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
1141 break;
1142 default:
1143 strlcat(msg, "Unknown", MAXERRDETAIL);
1144 break;
1145 }
1146 strlcat(msg, "; ", MAXERRDETAIL);
1147 switch (hdr->driver_status & 0xF0) {
1148 case 0x10:
1149 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
1150 break;
1151 case 0x20:
1152 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
1153 break;
1154 case 0x30:
1155 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
1156 break;
1157 case 0x40:
1158 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
1159 break;
1160 case 0x80:
1161 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
1162 break;
1163 }
1164 strlcat(msg, ". ", MAXERRDETAIL);
1165 }
1166 if (hdr->status) {
1167 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
1168 strlcat(msg, msgchunk, MAXERRDETAIL);
1169 // SCSI 3 status codes
1170 switch (hdr->status) {
1171 case 0x02:
1172 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
1173 break;
1174 case 0x04:
1175 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
1176 break;
1177 case 0x08:
1178 strlcat(msg, "BUSY", MAXERRDETAIL);
1179 break;
1180 case 0x10:
1181 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
1182 break;
1183 case 0x14:
1184 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
1185 break;
1186 case 0x18:
1187 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
1188 break;
1189 case 0x22:
1190 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
1191 break;
1192 case 0x28:
1193 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
1194 break;
1195 case 0x30:
1196 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
1197 break;
1198 case 0x40:
1199 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
1200 break;
1201 default:
1202 strlcat(msg, "Unknown", MAXERRDETAIL);
1203 break;
1204 }
1205 strlcat(msg, ". ", MAXERRDETAIL);
1206 }
1207 if (hdr->sb_len_wr) {
1208 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
1209 strlcat(msg, msgchunk, MAXERRDETAIL);
1210 for (i = 0; i < hdr->sb_len_wr; i++) {
1211 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
1212 strlcat(msg, msgchunk, MAXERRDETAIL);
1213 }
1214 strlcat(msg, ". ", MAXERRDETAIL);
1215 }
1216 if (hdr->resid != 0) {
1217 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
1218 strlcat(msg, msgchunk, MAXERRDETAIL);
5ad7be56 1219 }
b4dbb3ce
VF
1220 if (hdr->cmdp) {
1221 strlcat(msg, "cdb:", MAXERRDETAIL);
1222 for (i = 0; i < hdr->cmd_len; i++) {
1223 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->cmdp[i]);
1224 strlcat(msg, msgchunk, MAXERRDETAIL);
1225 }
1226 strlcat(msg, ". ", MAXERRDETAIL);
1227 if (io_u->ddir == DDIR_TRIM) {
1228 unsigned char *param_list = hdr->dxferp;
1229 strlcat(msg, "dxferp:", MAXERRDETAIL);
1230 for (i = 0; i < hdr->dxfer_len; i++) {
1231 snprintf(msgchunk, MAXMSGCHUNK, " %02x", param_list[i]);
1232 strlcat(msg, msgchunk, MAXERRDETAIL);
1233 }
1234 strlcat(msg, ". ", MAXERRDETAIL);
1235 }
1236 }
5ad7be56
KD
1237 }
1238
fd04fa03 1239 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
36833fb0
BVA
1240 snprintf(msg, MAXERRDETAIL, "%s",
1241 "SG Driver did not report a Host, Driver or Device check");
5ad7be56 1242
fd04fa03 1243 return msg;
5ad7be56
KD
1244}
1245
1246/*
1247 * get max file size from read capacity.
1248 */
1249static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
1250{
1251 /*
1252 * get_file_size is being called even before sgio_init is
1253 * called, so none of the sg_io structures are
1254 * initialized in the thread_data yet. So we need to do the
1255 * ReadCapacity without any of those helpers. One of the effects
1256 * is that ReadCapacity may get called 4 times on each open:
1257 * readcap(10) followed by readcap(16) if needed - just to get
1258 * the file size after the init occurs - it will be called
1259 * again when "type_check" is called during structure
1260 * initialization I'm not sure how to prevent this little
1261 * inefficiency.
1262 */
1263 unsigned int bs = 0;
1264 unsigned long long max_lba = 0;
1265 int ret;
1266
1267 if (fio_file_size_known(f))
1268 return 0;
1269
686fbd31 1270 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
30dac136
TK
1271 td_verror(td, EINVAL, "wrong file type");
1272 log_err("ioengine sg only works on block or character devices\n");
1273 return 1;
1274 }
1275
5ad7be56
KD
1276 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
1277 if (ret ) {
1278 td_verror(td, td->error, "fio_sgio_read_capacity");
1279 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
1280 return 1;
1281 }
1282
1283 f->real_file_size = (max_lba + 1) * bs;
1284 fio_file_set_size_known(f);
1285 return 0;
1286}
1287
1288
5f350952 1289static struct ioengine_ops ioengine = {
2866c82d
JA
1290 .name = "sg",
1291 .version = FIO_IOOPS_VERSION,
1292 .init = fio_sgio_init,
1293 .prep = fio_sgio_prep,
1294 .queue = fio_sgio_queue,
b4b9665e 1295 .commit = fio_sgio_commit,
2866c82d 1296 .getevents = fio_sgio_getevents,
5ad7be56 1297 .errdetails = fio_sgio_errdetails,
2866c82d
JA
1298 .event = fio_sgio_event,
1299 .cleanup = fio_sgio_cleanup,
b5af8293
JA
1300 .open_file = fio_sgio_open,
1301 .close_file = generic_close_file,
fde57152 1302 .get_file_size = fio_sgio_get_file_size,
b2a15192 1303 .flags = FIO_SYNCIO | FIO_RAWIO,
52b81b7c
KD
1304 .options = options,
1305 .option_struct_size = sizeof(struct sg_options)
2866c82d 1306};
34cfcdaf
JA
1307
1308#else /* FIO_HAVE_SGIO */
1309
1310/*
1311 * When we have a proper configure system in place, we simply wont build
1312 * and install this io engine. For now install a crippled version that
1313 * just complains and fails to load.
1314 */
1315static int fio_sgio_init(struct thread_data fio_unused *td)
1316{
a3edaf76 1317 log_err("fio: ioengine sg not available\n");
34cfcdaf
JA
1318 return 1;
1319}
1320
5f350952 1321static struct ioengine_ops ioengine = {
d0c70934 1322 .name = "sg",
34cfcdaf
JA
1323 .version = FIO_IOOPS_VERSION,
1324 .init = fio_sgio_init,
1325};
1326
1327#endif
5f350952
JA
1328
1329static void fio_init fio_sgio_register(void)
1330{
1331 register_ioengine(&ioengine);
1332}
1333
1334static void fio_exit fio_sgio_unregister(void)
1335{
1336 unregister_ioengine(&ioengine);
1337}