stat: make add lat percentile functions inline
[fio.git] / engines / sg.c
CommitLineData
2866c82d 1/*
da751ca9
JA
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
2866c82d 5 *
b4b9665e
VF
6 * This ioengine can operate in two modes:
7 * sync with block devices (/dev/sdX) or
8 * with character devices (/dev/sgY) with direct=1 or sync=1
9 * async with character devices with direct=0 and sync=0
10 *
11 * What value does queue() return for the different cases?
12 * queue() return value
13 * In sync mode:
14 * /dev/sdX RWT FIO_Q_COMPLETED
15 * /dev/sgY RWT FIO_Q_COMPLETED
16 * with direct=1 or sync=1
17 *
18 * In async mode:
19 * /dev/sgY RWT FIO_Q_QUEUED
20 * direct=0 and sync=0
21 *
22 * Because FIO_SYNCIO is set for this ioengine td_io_queue() will fill in
23 * issue_time *before* each IO is sent to queue()
24 *
25 * Where are the IO counting functions called for the different cases?
26 *
27 * In sync mode:
28 * /dev/sdX (commit==NULL)
29 * RWT
30 * io_u_mark_depth() called in td_io_queue()
31 * io_u_mark_submit/complete() called in td_io_queue()
32 * issue_time set in td_io_queue()
33 *
34 * /dev/sgY with direct=1 or sync=1 (commit does nothing)
35 * RWT
36 * io_u_mark_depth() called in td_io_queue()
37 * io_u_mark_submit/complete() called in queue()
38 * issue_time set in td_io_queue()
39 *
40 * In async mode:
41 * /dev/sgY with direct=0 and sync=0
42 * RW: read and write operations are submitted in queue()
43 * io_u_mark_depth() called in td_io_commit()
44 * io_u_mark_submit() called in queue()
45 * issue_time set in td_io_queue()
46 * T: trim operations are queued in queue() and submitted in commit()
47 * io_u_mark_depth() called in td_io_commit()
48 * io_u_mark_submit() called in commit()
49 * issue_time set in commit()
50 *
2866c82d
JA
51 */
52#include <stdio.h>
53#include <stdlib.h>
54#include <unistd.h>
55#include <errno.h>
8393ca93 56#include <poll.h>
5f350952
JA
57
58#include "../fio.h"
52b81b7c 59#include "../optgroup.h"
2866c82d 60
34cfcdaf
JA
61#ifdef FIO_HAVE_SGIO
62
b0dc148e
DG
63#ifndef SGV4_FLAG_HIPRI
64#define SGV4_FLAG_HIPRI 0x800
65#endif
66
cbdc9353
VF
67enum {
68 FIO_SG_WRITE = 1,
69 FIO_SG_WRITE_VERIFY = 2,
70 FIO_SG_WRITE_SAME = 3
71};
52b81b7c
KD
72
73struct sg_options {
74 void *pad;
b0dc148e 75 unsigned int hipri;
52b81b7c
KD
76 unsigned int readfua;
77 unsigned int writefua;
cbdc9353 78 unsigned int write_mode;
52b81b7c
KD
79};
80
81static struct fio_option options[] = {
b0dc148e
DG
82 {
83 .name = "hipri",
84 .lname = "High Priority",
85 .type = FIO_OPT_STR_SET,
86 .off1 = offsetof(struct sg_options, hipri),
87 .help = "Use polled IO completions",
88 .category = FIO_OPT_C_ENGINE,
89 .group = FIO_OPT_G_SG,
90 },
52b81b7c
KD
91 {
92 .name = "readfua",
93 .lname = "sg engine read fua flag support",
94 .type = FIO_OPT_BOOL,
95 .off1 = offsetof(struct sg_options, readfua),
96 .help = "Set FUA flag (force unit access) for all Read operations",
97 .def = "0",
98 .category = FIO_OPT_C_ENGINE,
99 .group = FIO_OPT_G_SG,
100 },
101 {
102 .name = "writefua",
103 .lname = "sg engine write fua flag support",
104 .type = FIO_OPT_BOOL,
105 .off1 = offsetof(struct sg_options, writefua),
106 .help = "Set FUA flag (force unit access) for all Write operations",
107 .def = "0",
108 .category = FIO_OPT_C_ENGINE,
109 .group = FIO_OPT_G_SG,
110 },
cbdc9353
VF
111 {
112 .name = "sg_write_mode",
113 .lname = "specify sg write mode",
114 .type = FIO_OPT_STR,
115 .off1 = offsetof(struct sg_options, write_mode),
116 .help = "Specify SCSI WRITE mode",
117 .def = "write",
118 .posval = {
119 { .ival = "write",
120 .oval = FIO_SG_WRITE,
121 .help = "Issue standard SCSI WRITE commands",
122 },
123 { .ival = "verify",
124 .oval = FIO_SG_WRITE_VERIFY,
125 .help = "Issue SCSI WRITE AND VERIFY commands",
126 },
127 { .ival = "same",
128 .oval = FIO_SG_WRITE_SAME,
129 .help = "Issue SCSI WRITE SAME commands",
130 },
131 },
132 .category = FIO_OPT_C_ENGINE,
133 .group = FIO_OPT_G_SG,
134 },
52b81b7c
KD
135 {
136 .name = NULL,
137 },
138};
139
5ad7be56
KD
140#define MAX_10B_LBA 0xFFFFFFFFULL
141#define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
142#define MAX_SB 64 // sense block maximum return size
aa18e0ec
VF
143/*
144#define FIO_SGIO_DEBUG
145*/
5ad7be56 146
2866c82d 147struct sgio_cmd {
fde57152 148 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
5ad7be56 149 unsigned char sb[MAX_SB]; // add sense block to commands
2866c82d
JA
150 int nr;
151};
152
b4b9665e 153struct sgio_trim {
a824149a 154 uint8_t *unmap_param;
b4b9665e
VF
155 unsigned int unmap_range_count;
156 struct io_u **trim_io_us;
157};
158
2866c82d
JA
159struct sgio_data {
160 struct sgio_cmd *cmds;
161 struct io_u **events;
dc0deca2
JA
162 struct pollfd *pfds;
163 int *fd_flags;
164 void *sgbuf;
2866c82d 165 unsigned int bs;
b5af8293 166 int type_checked;
b4b9665e
VF
167 struct sgio_trim **trim_queues;
168 int current_queue;
aa18e0ec 169#ifdef FIO_SGIO_DEBUG
b4b9665e 170 unsigned int *trim_queue_map;
aa18e0ec 171#endif
2866c82d
JA
172};
173
a824149a
DF
174static inline uint32_t sgio_get_be32(uint8_t *buf)
175{
176 return be32_to_cpu(*((uint32_t *) buf));
177}
178
179static inline uint64_t sgio_get_be64(uint8_t *buf)
180{
181 return be64_to_cpu(*((uint64_t *) buf));
182}
183
184static inline void sgio_set_be16(uint16_t val, uint8_t *buf)
185{
186 uint16_t t = cpu_to_be16(val);
187
188 memcpy(buf, &t, sizeof(uint16_t));
189}
190
191static inline void sgio_set_be32(uint32_t val, uint8_t *buf)
192{
193 uint32_t t = cpu_to_be32(val);
194
195 memcpy(buf, &t, sizeof(uint32_t));
196}
197
198static inline void sgio_set_be64(uint64_t val, uint8_t *buf)
199{
200 uint64_t t = cpu_to_be64(val);
201
202 memcpy(buf, &t, sizeof(uint64_t));
203}
204
b4b9665e
VF
205static inline bool sgio_unbuffered(struct thread_data *td)
206{
207 return (td->o.odirect || td->o.sync_io);
208}
209
2866c82d
JA
210static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
211 struct io_u *io_u, int fs)
212{
213 struct sgio_cmd *sc = &sd->cmds[io_u->index];
214
215 memset(hdr, 0, sizeof(*hdr));
216 memset(sc->cdb, 0, sizeof(sc->cdb));
217
218 hdr->interface_id = 'S';
219 hdr->cmdp = sc->cdb;
220 hdr->cmd_len = sizeof(sc->cdb);
5ad7be56
KD
221 hdr->sbp = sc->sb;
222 hdr->mx_sb_len = sizeof(sc->sb);
2866c82d
JA
223 hdr->pack_id = io_u->index;
224 hdr->usr_ptr = io_u;
b4b9665e 225 hdr->timeout = SCSI_TIMEOUT_MS;
2866c82d
JA
226
227 if (fs) {
cec6b55d
JA
228 hdr->dxferp = io_u->xfer_buf;
229 hdr->dxfer_len = io_u->xfer_buflen;
2866c82d
JA
230 }
231}
232
adee86c5
JA
233static int pollin_events(struct pollfd *pfds, int fds)
234{
235 int i;
236
237 for (i = 0; i < fds; i++)
238 if (pfds[i].revents & POLLIN)
239 return 1;
240
241 return 0;
242}
2866c82d 243
14d0261e
JA
244static int sg_fd_read(int fd, void *data, size_t size)
245{
246 int err = 0;
247
248 while (size) {
249 ssize_t ret;
250
251 ret = read(fd, data, size);
252 if (ret < 0) {
253 if (errno == EAGAIN || errno == EINTR)
254 continue;
255 err = errno;
256 break;
257 } else if (!ret)
258 break;
259 else {
260 data += ret;
261 size -= ret;
262 }
263 }
264
265 if (err)
266 return err;
267 if (size)
268 return EAGAIN;
269
270 return 0;
271}
272
e7d2e616 273static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
1f440ece
JA
274 unsigned int max,
275 const struct timespec fio_unused *t)
2866c82d 276{
565e784d 277 struct sgio_data *sd = td->io_ops_data;
b4b9665e 278 int left = max, eventNum, ret, r = 0, trims = 0;
dc0deca2 279 void *buf = sd->sgbuf;
b4b9665e 280 unsigned int i, j, events;
946ff865 281 struct fio_file *f;
b4b9665e 282 struct io_u *io_u;
2866c82d
JA
283
284 /*
adee86c5 285 * Fill in the file descriptors
2866c82d 286 */
adee86c5
JA
287 for_each_file(td, f, i) {
288 /*
289 * don't block for min events == 0
290 */
4a851614 291 if (!min)
3a35845f
JA
292 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
293 else
294 sd->fd_flags[i] = -1;
4a851614 295
dc0deca2
JA
296 sd->pfds[i].fd = f->fd;
297 sd->pfds[i].events = POLLIN;
2866c82d
JA
298 }
299
b4b9665e
VF
300 /*
301 ** There are two counters here:
302 ** - number of SCSI commands completed
303 ** - number of io_us completed
304 **
305 ** These are the same with reads and writes, but
306 ** could differ with trim/unmap commands because
307 ** a single unmap can include multiple io_us
308 */
309
310 while (left > 0) {
c97e3cb0 311 char *p;
adee86c5 312
b4b9665e 313 dprint(FD_IO, "sgio_getevents: sd %p: min=%d, max=%d, left=%d\n", sd, min, max, left);
5ad7be56 314
2866c82d
JA
315 do {
316 if (!min)
317 break;
adee86c5 318
2dc1bbeb 319 ret = poll(sd->pfds, td->o.nr_files, -1);
adee86c5 320 if (ret < 0) {
adee86c5 321 if (!r)
22819ec2 322 r = -errno;
e1161c32 323 td_verror(td, errno, "poll");
adee86c5
JA
324 break;
325 } else if (!ret)
326 continue;
327
2dc1bbeb 328 if (pollin_events(sd->pfds, td->o.nr_files))
2866c82d
JA
329 break;
330 } while (1);
331
adee86c5 332 if (r < 0)
2866c82d 333 break;
adee86c5
JA
334
335re_read:
336 p = buf;
337 events = 0;
338 for_each_file(td, f, i) {
5ad7be56 339 for (eventNum = 0; eventNum < left; eventNum++) {
14d0261e 340 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
b4b9665e 341 dprint(FD_IO, "sgio_getevents: sg_fd_read ret: %d\n", ret);
14d0261e
JA
342 if (ret) {
343 r = -ret;
344 td_verror(td, r, "sg_read");
5ad7be56 345 break;
5ad7be56 346 }
b4b9665e
VF
347 io_u = ((struct sg_io_hdr *)p)->usr_ptr;
348 if (io_u->ddir == DDIR_TRIM) {
349 events += sd->trim_queues[io_u->index]->unmap_range_count;
350 eventNum += sd->trim_queues[io_u->index]->unmap_range_count - 1;
351 } else
352 events++;
353
14d0261e 354 p += sizeof(struct sg_io_hdr);
b4b9665e 355 dprint(FD_IO, "sgio_getevents: events: %d, eventNum: %d, left: %d\n", events, eventNum, left);
adee86c5
JA
356 }
357 }
358
14d0261e 359 if (r < 0 && !events)
2866c82d 360 break;
adee86c5
JA
361 if (!events) {
362 usleep(1000);
363 goto re_read;
364 }
2866c82d 365
2866c82d
JA
366 left -= events;
367 r += events;
368
369 for (i = 0; i < events; i++) {
370 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
b4b9665e
VF
371 sd->events[i + trims] = hdr->usr_ptr;
372 io_u = (struct io_u *)(hdr->usr_ptr);
5ad7be56 373
5ad7be56 374 if (hdr->info & SG_INFO_CHECK) {
b4b9665e 375 /* record if an io error occurred, ignore resid */
be660713 376 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
b4b9665e
VF
377 sd->events[i + trims]->error = EIO;
378 }
379
380 if (io_u->ddir == DDIR_TRIM) {
381 struct sgio_trim *st = sd->trim_queues[io_u->index];
aa18e0ec 382#ifdef FIO_SGIO_DEBUG
b4b9665e 383 assert(st->trim_io_us[0] == io_u);
aa18e0ec 384 assert(sd->trim_queue_map[io_u->index] == io_u->index);
b4b9665e
VF
385 dprint(FD_IO, "sgio_getevents: reaping %d io_us from trim queue %d\n", st->unmap_range_count, io_u->index);
386 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", io_u->index, i+trims);
aa18e0ec 387#endif
b4b9665e
VF
388 for (j = 1; j < st->unmap_range_count; j++) {
389 ++trims;
390 sd->events[i + trims] = st->trim_io_us[j];
aa18e0ec 391#ifdef FIO_SGIO_DEBUG
b4b9665e 392 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", st->trim_io_us[j]->index, i+trims);
aa18e0ec
VF
393 assert(sd->trim_queue_map[st->trim_io_us[j]->index] == io_u->index);
394#endif
b4b9665e
VF
395 if (hdr->info & SG_INFO_CHECK) {
396 /* record if an io error occurred, ignore resid */
397 memcpy(&st->trim_io_us[j]->hdr, hdr, sizeof(struct sg_io_hdr));
398 sd->events[i + trims]->error = EIO;
399 }
400 }
401 events -= st->unmap_range_count - 1;
402 st->unmap_range_count = 0;
5ad7be56 403 }
2866c82d
JA
404 }
405 }
406
adee86c5 407 if (!min) {
affe05a9 408 for_each_file(td, f, i) {
3a35845f
JA
409 if (sd->fd_flags[i] == -1)
410 continue;
411
affe05a9
JA
412 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
413 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
414 }
adee86c5 415 }
2866c82d 416
2866c82d
JA
417 return r;
418}
419
2e4ef4fb
JA
420static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
421 struct fio_file *f,
422 struct io_u *io_u)
2866c82d 423{
565e784d 424 struct sgio_data *sd = td->io_ops_data;
2866c82d 425 struct sg_io_hdr *hdr = &io_u->hdr;
36167d82 426 int ret;
2866c82d
JA
427
428 sd->events[0] = io_u;
429
36167d82
JA
430 ret = ioctl(f->fd, SG_IO, hdr);
431 if (ret < 0)
a05bd42d 432 return ret;
36167d82 433
5ad7be56
KD
434 /* record if an io error occurred */
435 if (hdr->info & SG_INFO_CHECK)
436 io_u->error = EIO;
437
36167d82 438 return FIO_Q_COMPLETED;
2866c82d
JA
439}
440
a999bc49
JA
441static enum fio_q_status fio_sgio_rw_doio(struct thread_data *td,
442 struct fio_file *f,
b4b9665e 443 struct io_u *io_u, int do_sync)
2866c82d
JA
444{
445 struct sg_io_hdr *hdr = &io_u->hdr;
446 int ret;
447
53cdc686 448 ret = write(f->fd, hdr, sizeof(*hdr));
2866c82d 449 if (ret < 0)
a05bd42d 450 return ret;
2866c82d 451
2b13e716 452 if (do_sync) {
a999bc49
JA
453 /*
454 * We can't just read back the first command that completes
455 * and assume it's the one we need, it could be any command
456 * that is inflight.
457 */
458 do {
459 struct io_u *__io_u;
460
461 ret = read(f->fd, hdr, sizeof(*hdr));
462 if (ret < 0)
463 return ret;
464
7508b394
JA
465 __io_u = hdr->usr_ptr;
466
a999bc49
JA
467 /* record if an io error occurred */
468 if (hdr->info & SG_INFO_CHECK)
7508b394 469 __io_u->error = EIO;
5ad7be56 470
a999bc49
JA
471 if (__io_u == io_u)
472 break;
473
691df596 474 if (io_u_sync_complete(td, __io_u))
a999bc49 475 break;
691df596 476
a999bc49 477 } while (1);
5ad7be56 478
36167d82 479 return FIO_Q_COMPLETED;
2866c82d
JA
480 }
481
36167d82 482 return FIO_Q_QUEUED;
2866c82d
JA
483}
484
b4b9665e
VF
485static enum fio_q_status fio_sgio_doio(struct thread_data *td,
486 struct io_u *io_u, int do_sync)
2866c82d 487{
53cdc686 488 struct fio_file *f = io_u->file;
b4b9665e 489 enum fio_q_status ret;
53cdc686 490
686fbd31 491 if (f->filetype == FIO_TYPE_BLOCK) {
5ad7be56 492 ret = fio_sgio_ioctl_doio(td, f, io_u);
a999bc49
JA
493 if (io_u->error)
494 td_verror(td, io_u->error, __func__);
5ad7be56 495 } else {
a999bc49
JA
496 ret = fio_sgio_rw_doio(td, f, io_u, do_sync);
497 if (io_u->error && do_sync)
c9aeb797 498 td_verror(td, io_u->error, __func__);
5ad7be56 499 }
2866c82d 500
5ad7be56 501 return ret;
2866c82d
JA
502}
503
b4b9665e
VF
504static void fio_sgio_rw_lba(struct sg_io_hdr *hdr, unsigned long long lba,
505 unsigned long long nr_blocks)
506{
507 if (lba < MAX_10B_LBA) {
a824149a
DF
508 sgio_set_be32((uint32_t) lba, &hdr->cmdp[2]);
509 sgio_set_be16((uint16_t) nr_blocks, &hdr->cmdp[7]);
b4b9665e 510 } else {
a824149a
DF
511 sgio_set_be64(lba, &hdr->cmdp[2]);
512 sgio_set_be32((uint32_t) nr_blocks, &hdr->cmdp[10]);
b4b9665e
VF
513 }
514
515 return;
516}
517
2866c82d
JA
518static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
519{
520 struct sg_io_hdr *hdr = &io_u->hdr;
52b81b7c 521 struct sg_options *o = td->eo;
565e784d 522 struct sgio_data *sd = td->io_ops_data;
b4b9665e
VF
523 unsigned long long nr_blocks, lba;
524 int offset;
2866c82d 525
cec6b55d 526 if (io_u->xfer_buflen & (sd->bs - 1)) {
2866c82d
JA
527 log_err("read/write not sector aligned\n");
528 return EINVAL;
529 }
530
5ad7be56
KD
531 nr_blocks = io_u->xfer_buflen / sd->bs;
532 lba = io_u->offset / sd->bs;
533
2866c82d 534 if (io_u->ddir == DDIR_READ) {
87dc1ab1
JA
535 sgio_hdr_init(sd, hdr, io_u, 1);
536
2866c82d 537 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
5ad7be56
KD
538 if (lba < MAX_10B_LBA)
539 hdr->cmdp[0] = 0x28; // read(10)
540 else
541 hdr->cmdp[0] = 0x88; // read(16)
52b81b7c 542
b0dc148e
DG
543 if (o->hipri)
544 hdr->flags |= SGV4_FLAG_HIPRI;
52b81b7c
KD
545 if (o->readfua)
546 hdr->cmdp[1] |= 0x08;
547
b4b9665e
VF
548 fio_sgio_rw_lba(hdr, lba, nr_blocks);
549
87dc1ab1
JA
550 } else if (io_u->ddir == DDIR_WRITE) {
551 sgio_hdr_init(sd, hdr, io_u, 1);
552
2866c82d 553 hdr->dxfer_direction = SG_DXFER_TO_DEV;
cbdc9353
VF
554 switch(o->write_mode) {
555 case FIO_SG_WRITE:
556 if (lba < MAX_10B_LBA)
557 hdr->cmdp[0] = 0x2a; // write(10)
558 else
559 hdr->cmdp[0] = 0x8a; // write(16)
b0dc148e
DG
560 if (o->hipri)
561 hdr->flags |= SGV4_FLAG_HIPRI;
cbdc9353
VF
562 if (o->writefua)
563 hdr->cmdp[1] |= 0x08;
564 break;
565 case FIO_SG_WRITE_VERIFY:
566 if (lba < MAX_10B_LBA)
567 hdr->cmdp[0] = 0x2e; // write and verify(10)
568 else
569 hdr->cmdp[0] = 0x8e; // write and verify(16)
570 break;
571 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
572 case FIO_SG_WRITE_SAME:
573 hdr->dxfer_len = sd->bs;
574 if (lba < MAX_10B_LBA)
575 hdr->cmdp[0] = 0x41; // write same(10)
576 else
577 hdr->cmdp[0] = 0x93; // write same(16)
578 break;
579 };
b4b9665e
VF
580
581 fio_sgio_rw_lba(hdr, lba, nr_blocks);
582
583 } else if (io_u->ddir == DDIR_TRIM) {
584 struct sgio_trim *st;
585
586 if (sd->current_queue == -1) {
587 sgio_hdr_init(sd, hdr, io_u, 0);
588
589 hdr->cmd_len = 10;
590 hdr->dxfer_direction = SG_DXFER_TO_DEV;
591 hdr->cmdp[0] = 0x42; // unmap
592 sd->current_queue = io_u->index;
593 st = sd->trim_queues[sd->current_queue];
594 hdr->dxferp = st->unmap_param;
aa18e0ec 595#ifdef FIO_SGIO_DEBUG
b4b9665e
VF
596 assert(sd->trim_queues[io_u->index]->unmap_range_count == 0);
597 dprint(FD_IO, "sg: creating new queue based on io_u %d\n", io_u->index);
aa18e0ec 598#endif
b4b9665e
VF
599 }
600 else
601 st = sd->trim_queues[sd->current_queue];
602
603 dprint(FD_IO, "sg: adding io_u %d to trim queue %d\n", io_u->index, sd->current_queue);
604 st->trim_io_us[st->unmap_range_count] = io_u;
aa18e0ec 605#ifdef FIO_SGIO_DEBUG
b4b9665e 606 sd->trim_queue_map[io_u->index] = sd->current_queue;
aa18e0ec 607#endif
b4b9665e
VF
608
609 offset = 8 + 16 * st->unmap_range_count;
a824149a
DF
610 sgio_set_be64(lba, &st->unmap_param[offset]);
611 sgio_set_be32((uint32_t) nr_blocks, &st->unmap_param[offset + 8]);
b4b9665e
VF
612
613 st->unmap_range_count++;
614
615 } else if (ddir_sync(io_u->ddir)) {
87dc1ab1 616 sgio_hdr_init(sd, hdr, io_u, 0);
87dc1ab1 617 hdr->dxfer_direction = SG_DXFER_NONE;
5ad7be56
KD
618 if (lba < MAX_10B_LBA)
619 hdr->cmdp[0] = 0x35; // synccache(10)
620 else
621 hdr->cmdp[0] = 0x91; // synccache(16)
b4b9665e
VF
622 } else
623 assert(0);
2866c82d 624
2866c82d
JA
625 return 0;
626}
627
b4b9665e
VF
628static void fio_sgio_unmap_setup(struct sg_io_hdr *hdr, struct sgio_trim *st)
629{
a824149a 630 uint16_t cnt = st->unmap_range_count * 16;
b4b9665e 631
a824149a
DF
632 hdr->dxfer_len = cnt + 8;
633 sgio_set_be16(cnt + 8, &hdr->cmdp[7]);
634 sgio_set_be16(cnt + 6, st->unmap_param);
635 sgio_set_be16(cnt, &st->unmap_param[2]);
b4b9665e
VF
636
637 return;
638}
639
2e4ef4fb
JA
640static enum fio_q_status fio_sgio_queue(struct thread_data *td,
641 struct io_u *io_u)
2866c82d
JA
642{
643 struct sg_io_hdr *hdr = &io_u->hdr;
b4b9665e 644 struct sgio_data *sd = td->io_ops_data;
f6db4fa5 645 int ret, do_sync = 0;
2866c82d 646
7101d9c2
JA
647 fio_ro_check(td, io_u);
648
b4b9665e 649 if (sgio_unbuffered(td) || ddir_sync(io_u->ddir))
f6db4fa5
JA
650 do_sync = 1;
651
b4b9665e
VF
652 if (io_u->ddir == DDIR_TRIM) {
653 if (do_sync || io_u->file->filetype == FIO_TYPE_BLOCK) {
654 struct sgio_trim *st = sd->trim_queues[sd->current_queue];
655
656 /* finish cdb setup for unmap because we are
657 ** doing unmap commands synchronously */
aa18e0ec 658#ifdef FIO_SGIO_DEBUG
b4b9665e
VF
659 assert(st->unmap_range_count == 1);
660 assert(io_u == st->trim_io_us[0]);
aa18e0ec 661#endif
b4b9665e
VF
662 hdr = &io_u->hdr;
663
664 fio_sgio_unmap_setup(hdr, st);
665
666 st->unmap_range_count = 0;
667 sd->current_queue = -1;
668 } else
669 /* queue up trim ranges and submit in commit() */
670 return FIO_Q_QUEUED;
671 }
672
f6db4fa5 673 ret = fio_sgio_doio(td, io_u, do_sync);
2866c82d
JA
674
675 if (ret < 0)
676 io_u->error = errno;
677 else if (hdr->status) {
678 io_u->resid = hdr->resid;
679 io_u->error = EIO;
b4b9665e
VF
680 } else if (td->io_ops->commit != NULL) {
681 if (do_sync && !ddir_sync(io_u->ddir)) {
682 io_u_mark_submit(td, 1);
683 io_u_mark_complete(td, 1);
684 } else if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
685 io_u_mark_submit(td, 1);
686 io_u_queued(td, io_u);
687 }
2866c82d
JA
688 }
689
95bcd815 690 if (io_u->error) {
e1161c32 691 td_verror(td, io_u->error, "xfer");
36167d82 692 return FIO_Q_COMPLETED;
95bcd815
JA
693 }
694
36167d82 695 return ret;
2866c82d
JA
696}
697
b4b9665e
VF
698static int fio_sgio_commit(struct thread_data *td)
699{
700 struct sgio_data *sd = td->io_ops_data;
701 struct sgio_trim *st;
702 struct io_u *io_u;
703 struct sg_io_hdr *hdr;
704 struct timespec now;
705 unsigned int i;
706 int ret;
707
708 if (sd->current_queue == -1)
709 return 0;
710
711 st = sd->trim_queues[sd->current_queue];
712 io_u = st->trim_io_us[0];
713 hdr = &io_u->hdr;
714
715 fio_sgio_unmap_setup(hdr, st);
716
717 sd->current_queue = -1;
718
a999bc49 719 ret = fio_sgio_rw_doio(td, io_u->file, io_u, 0);
b4b9665e 720
53ee8c17
VF
721 if (ret < 0 || hdr->status) {
722 int error;
723
724 if (ret < 0)
725 error = errno;
726 else {
727 error = EIO;
728 ret = -EIO;
b4b9665e 729 }
53ee8c17
VF
730
731 for (i = 0; i < st->unmap_range_count; i++) {
732 st->trim_io_us[i]->error = error;
733 clear_io_u(td, st->trim_io_us[i]);
734 if (hdr->status)
735 st->trim_io_us[i]->resid = hdr->resid;
b4b9665e 736 }
53ee8c17
VF
737
738 td_verror(td, error, "xfer");
739 return ret;
b4b9665e
VF
740 }
741
53ee8c17
VF
742 if (fio_fill_issue_time(td)) {
743 fio_gettime(&now, NULL);
744 for (i = 0; i < st->unmap_range_count; i++) {
745 memcpy(&st->trim_io_us[i]->issue_time, &now, sizeof(now));
746 io_u_queued(td, io_u);
747 }
b4b9665e 748 }
53ee8c17 749 io_u_mark_submit(td, st->unmap_range_count);
b4b9665e 750
53ee8c17 751 return 0;
b4b9665e
VF
752}
753
2866c82d
JA
754static struct io_u *fio_sgio_event(struct thread_data *td, int event)
755{
565e784d 756 struct sgio_data *sd = td->io_ops_data;
2866c82d
JA
757
758 return sd->events[event];
759}
760
5ad7be56
KD
761static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
762 unsigned long long *max_lba)
2866c82d 763{
5ad7be56
KD
764 /*
765 * need to do read capacity operation w/o benefit of sd or
766 * io_u structures, which are not initialized until later.
767 */
768 struct sg_io_hdr hdr;
02ae7bd8
DF
769 unsigned long long hlba;
770 unsigned int blksz = 0;
5ad7be56
KD
771 unsigned char cmd[16];
772 unsigned char sb[64];
773 unsigned char buf[32]; // read capacity return
2866c82d 774 int ret;
5ad7be56 775 int fd = -1;
2866c82d 776
5ad7be56 777 struct fio_file *f = td->files[0];
2866c82d 778
5ad7be56
KD
779 /* open file independent of rest of application */
780 fd = open(f->file_name, O_RDONLY);
781 if (fd < 0)
782 return -errno;
2866c82d 783
5ad7be56
KD
784 memset(&hdr, 0, sizeof(hdr));
785 memset(cmd, 0, sizeof(cmd));
786 memset(sb, 0, sizeof(sb));
787 memset(buf, 0, sizeof(buf));
2866c82d 788
5ad7be56
KD
789 /* First let's try a 10 byte read capacity. */
790 hdr.interface_id = 'S';
791 hdr.cmdp = cmd;
792 hdr.cmd_len = 10;
793 hdr.sbp = sb;
794 hdr.mx_sb_len = sizeof(sb);
795 hdr.timeout = SCSI_TIMEOUT_MS;
796 hdr.cmdp[0] = 0x25; // Read Capacity(10)
797 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
798 hdr.dxferp = buf;
799 hdr.dxfer_len = sizeof(buf);
800
801 ret = ioctl(fd, SG_IO, &hdr);
802 if (ret < 0) {
803 close(fd);
2866c82d 804 return ret;
5ad7be56 805 }
2866c82d 806
02ae7bd8
DF
807 if (hdr.info & SG_INFO_CHECK) {
808 /* RCAP(10) might be unsupported by device. Force RCAP(16) */
809 hlba = MAX_10B_LBA;
810 } else {
a824149a
DF
811 blksz = sgio_get_be32(&buf[4]);
812 hlba = sgio_get_be32(buf);
02ae7bd8 813 }
5ad7be56
KD
814
815 /*
fde57152
TK
816 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
817 * then need to retry with 16 byte Read Capacity command.
5ad7be56 818 */
02ae7bd8 819 if (hlba == MAX_10B_LBA) {
5ad7be56 820 hdr.cmd_len = 16;
28c43a89
TK
821 hdr.cmdp[0] = 0x9e; // service action
822 hdr.cmdp[1] = 0x10; // Read Capacity(16)
a824149a 823 sgio_set_be32(sizeof(buf), &hdr.cmdp[10]);
5ad7be56
KD
824
825 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
826 hdr.dxferp = buf;
827 hdr.dxfer_len = sizeof(buf);
828
829 ret = ioctl(fd, SG_IO, &hdr);
830 if (ret < 0) {
831 close(fd);
832 return ret;
833 }
834
835 /* record if an io error occurred */
836 if (hdr.info & SG_INFO_CHECK)
837 td_verror(td, EIO, "fio_sgio_read_capacity");
838
a824149a
DF
839 blksz = sgio_get_be32(&buf[8]);
840 hlba = sgio_get_be64(buf);
02ae7bd8
DF
841 }
842
843 if (blksz) {
844 *bs = blksz;
845 *max_lba = hlba;
846 ret = 0;
847 } else {
848 ret = EIO;
5ad7be56
KD
849 }
850
851 close(fd);
02ae7bd8 852 return ret;
2866c82d
JA
853}
854
855static void fio_sgio_cleanup(struct thread_data *td)
856{
565e784d 857 struct sgio_data *sd = td->io_ops_data;
b4b9665e 858 int i;
dc0deca2
JA
859
860 if (sd) {
861 free(sd->events);
862 free(sd->cmds);
863 free(sd->fd_flags);
864 free(sd->pfds);
865 free(sd->sgbuf);
aa18e0ec 866#ifdef FIO_SGIO_DEBUG
b4b9665e 867 free(sd->trim_queue_map);
aa18e0ec 868#endif
b4b9665e
VF
869
870 for (i = 0; i < td->o.iodepth; i++) {
871 free(sd->trim_queues[i]->unmap_param);
872 free(sd->trim_queues[i]->trim_io_us);
873 free(sd->trim_queues[i]);
874 }
875
876 free(sd->trim_queues);
dc0deca2 877 free(sd);
2866c82d
JA
878 }
879}
880
881static int fio_sgio_init(struct thread_data *td)
882{
883 struct sgio_data *sd;
b4b9665e 884 struct sgio_trim *st;
b0dc148e 885 struct sg_io_hdr *h3p;
b4b9665e 886 int i;
2866c82d 887
b4b9665e
VF
888 sd = calloc(1, sizeof(*sd));
889 sd->cmds = calloc(td->o.iodepth, sizeof(struct sgio_cmd));
890 sd->sgbuf = calloc(td->o.iodepth, sizeof(struct sg_io_hdr));
891 sd->events = calloc(td->o.iodepth, sizeof(struct io_u *));
892 sd->pfds = calloc(td->o.nr_files, sizeof(struct pollfd));
893 sd->fd_flags = calloc(td->o.nr_files, sizeof(int));
5ad7be56 894 sd->type_checked = 0;
b4b9665e
VF
895
896 sd->trim_queues = calloc(td->o.iodepth, sizeof(struct sgio_trim *));
897 sd->current_queue = -1;
aa18e0ec 898#ifdef FIO_SGIO_DEBUG
b4b9665e 899 sd->trim_queue_map = calloc(td->o.iodepth, sizeof(int));
aa18e0ec 900#endif
b0dc148e 901 for (i = 0, h3p = sd->sgbuf; i < td->o.iodepth; i++, ++h3p) {
b4b9665e
VF
902 sd->trim_queues[i] = calloc(1, sizeof(struct sgio_trim));
903 st = sd->trim_queues[i];
904 st->unmap_param = calloc(td->o.iodepth + 1, sizeof(char[16]));
905 st->unmap_range_count = 0;
906 st->trim_io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
b0dc148e 907 h3p->interface_id = 'S';
b4b9665e
VF
908 }
909
565e784d 910 td->io_ops_data = sd;
2866c82d 911
b5af8293
JA
912 /*
913 * we want to do it, regardless of whether odirect is set or not
914 */
2dc1bbeb 915 td->o.override_sync = 1;
b5af8293
JA
916 return 0;
917}
918
919static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
920{
565e784d 921 struct sgio_data *sd = td->io_ops_data;
5ad7be56
KD
922 unsigned int bs = 0;
923 unsigned long long max_lba = 0;
924
686fbd31 925 if (f->filetype == FIO_TYPE_BLOCK) {
53cdc686 926 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
e1161c32 927 td_verror(td, errno, "ioctl");
b5af8293 928 return 1;
2866c82d 929 }
af52b345 930 } else if (f->filetype == FIO_TYPE_CHAR) {
b5af8293 931 int version, ret;
2866c82d 932
53cdc686 933 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
e1161c32 934 td_verror(td, errno, "ioctl");
b5af8293 935 return 1;
2866c82d
JA
936 }
937
5ad7be56
KD
938 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
939 if (ret) {
940 td_verror(td, td->error, "fio_sgio_read_capacity");
941 log_err("ioengine sg unable to read capacity successfully\n");
b5af8293 942 return 1;
5ad7be56 943 }
2866c82d 944 } else {
16ada754 945 td_verror(td, EINVAL, "wrong file type");
30dac136 946 log_err("ioengine sg only works on block or character devices\n");
b5af8293 947 return 1;
2866c82d
JA
948 }
949
950 sd->bs = bs;
5ad7be56 951 // Determine size of commands needed based on max_lba
166c6b42
TK
952 if (max_lba >= MAX_10B_LBA) {
953 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
954 "commands for lba above 0x%016llx/0x%016llx\n",
955 MAX_10B_LBA, max_lba);
5ad7be56
KD
956 }
957
686fbd31 958 if (f->filetype == FIO_TYPE_BLOCK) {
36167d82
JA
959 td->io_ops->getevents = NULL;
960 td->io_ops->event = NULL;
b4b9665e
VF
961 td->io_ops->commit = NULL;
962 /*
963 ** Setting these functions to null may cause problems
964 ** with filename=/dev/sda:/dev/sg0 since we are only
965 ** considering a single file
966 */
36167d82 967 }
5ad7be56 968 sd->type_checked = 1;
2866c82d 969
2866c82d 970 return 0;
b5af8293
JA
971}
972
973static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
974{
565e784d 975 struct sgio_data *sd = td->io_ops_data;
b5af8293
JA
976 int ret;
977
978 ret = generic_open_file(td, f);
979 if (ret)
980 return ret;
981
15ba640a 982 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
6977bcd0 983 ret = generic_close_file(td, f);
67486df3 984 return ret;
b5af8293
JA
985 }
986
987 return 0;
2866c82d
JA
988}
989
5ad7be56
KD
990/*
991 * Build an error string with details about the driver, host or scsi
992 * error contained in the sg header Caller will use as necessary.
993 */
994static char *fio_sgio_errdetails(struct io_u *io_u)
995{
996 struct sg_io_hdr *hdr = &io_u->hdr;
997#define MAXERRDETAIL 1024
998#define MAXMSGCHUNK 128
fd04fa03 999 char *msg, msgchunk[MAXMSGCHUNK];
5ad7be56
KD
1000 int i;
1001
efa72f25 1002 msg = calloc(1, MAXERRDETAIL);
fd04fa03 1003 strcpy(msg, "");
5ad7be56
KD
1004
1005 /*
1006 * can't seem to find sg_err.h, so I'll just echo the define values
1007 * so others can search on internet to find clearer clues of meaning.
1008 */
1009 if (hdr->info & SG_INFO_CHECK) {
5ad7be56
KD
1010 if (hdr->host_status) {
1011 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
1012 strlcat(msg, msgchunk, MAXERRDETAIL);
1013 switch (hdr->host_status) {
1014 case 0x01:
1015 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
1016 break;
1017 case 0x02:
1018 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
1019 break;
1020 case 0x03:
1021 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
1022 break;
1023 case 0x04:
1024 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
1025 break;
1026 case 0x05:
1027 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
1028 break;
1029 case 0x06:
1030 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
1031 break;
1032 case 0x07:
1033 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
1034 break;
1035 case 0x08:
1036 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
1037 break;
1038 case 0x09:
1039 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
1040 break;
1041 case 0x0a:
1042 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
1043 break;
1044 case 0x0b:
1045 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
1046 break;
1047 case 0x0c:
1048 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
1049 break;
1050 case 0x0d:
1051 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
1052 break;
2ce6c6e5
TK
1053 case 0x0e:
1054 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
1055 break;
1056 case 0x0f:
1057 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
1058 break;
1059 case 0x10:
1060 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
1061 break;
1062 case 0x11:
1063 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
1064 break;
1065 case 0x12:
1066 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
1067 break;
1068 case 0x13:
1069 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
1070 break;
5ad7be56
KD
1071 default:
1072 strlcat(msg, "Unknown", MAXERRDETAIL);
1073 break;
1074 }
1075 strlcat(msg, ". ", MAXERRDETAIL);
1076 }
1077 if (hdr->driver_status) {
1078 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
1079 strlcat(msg, msgchunk, MAXERRDETAIL);
1080 switch (hdr->driver_status & 0x0F) {
1081 case 0x01:
1082 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
1083 break;
1084 case 0x02:
1085 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
1086 break;
1087 case 0x03:
1088 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
1089 break;
1090 case 0x04:
1091 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
1092 break;
1093 case 0x05:
1094 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
1095 break;
1096 case 0x06:
1097 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
1098 break;
1099 case 0x07:
1100 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
1101 break;
1102 case 0x08:
1103 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
1104 break;
1105 default:
1106 strlcat(msg, "Unknown", MAXERRDETAIL);
1107 break;
1108 }
1109 strlcat(msg, "; ", MAXERRDETAIL);
1110 switch (hdr->driver_status & 0xF0) {
1111 case 0x10:
1112 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
1113 break;
1114 case 0x20:
1115 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
1116 break;
1117 case 0x30:
1118 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
1119 break;
1120 case 0x40:
1121 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
1122 break;
1123 case 0x80:
1124 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
1125 break;
1126 }
1127 strlcat(msg, ". ", MAXERRDETAIL);
1128 }
1129 if (hdr->status) {
1130 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
1131 strlcat(msg, msgchunk, MAXERRDETAIL);
1132 // SCSI 3 status codes
1133 switch (hdr->status) {
1134 case 0x02:
1135 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
1136 break;
1137 case 0x04:
1138 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
1139 break;
1140 case 0x08:
1141 strlcat(msg, "BUSY", MAXERRDETAIL);
1142 break;
1143 case 0x10:
1144 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
1145 break;
1146 case 0x14:
1147 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
1148 break;
1149 case 0x18:
1150 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
1151 break;
1152 case 0x22:
1153 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
1154 break;
1155 case 0x28:
1156 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
1157 break;
1158 case 0x30:
1159 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
1160 break;
1161 case 0x40:
1162 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
1163 break;
1164 default:
1165 strlcat(msg, "Unknown", MAXERRDETAIL);
1166 break;
1167 }
1168 strlcat(msg, ". ", MAXERRDETAIL);
1169 }
1170 if (hdr->sb_len_wr) {
1171 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
1172 strlcat(msg, msgchunk, MAXERRDETAIL);
1173 for (i = 0; i < hdr->sb_len_wr; i++) {
1174 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
1175 strlcat(msg, msgchunk, MAXERRDETAIL);
1176 }
1177 strlcat(msg, ". ", MAXERRDETAIL);
1178 }
1179 if (hdr->resid != 0) {
1180 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
1181 strlcat(msg, msgchunk, MAXERRDETAIL);
5ad7be56 1182 }
b4dbb3ce
VF
1183 if (hdr->cmdp) {
1184 strlcat(msg, "cdb:", MAXERRDETAIL);
1185 for (i = 0; i < hdr->cmd_len; i++) {
1186 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->cmdp[i]);
1187 strlcat(msg, msgchunk, MAXERRDETAIL);
1188 }
1189 strlcat(msg, ". ", MAXERRDETAIL);
1190 if (io_u->ddir == DDIR_TRIM) {
1191 unsigned char *param_list = hdr->dxferp;
1192 strlcat(msg, "dxferp:", MAXERRDETAIL);
1193 for (i = 0; i < hdr->dxfer_len; i++) {
1194 snprintf(msgchunk, MAXMSGCHUNK, " %02x", param_list[i]);
1195 strlcat(msg, msgchunk, MAXERRDETAIL);
1196 }
1197 strlcat(msg, ". ", MAXERRDETAIL);
1198 }
1199 }
5ad7be56
KD
1200 }
1201
fd04fa03 1202 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
36833fb0
BVA
1203 snprintf(msg, MAXERRDETAIL, "%s",
1204 "SG Driver did not report a Host, Driver or Device check");
5ad7be56 1205
fd04fa03 1206 return msg;
5ad7be56
KD
1207}
1208
1209/*
1210 * get max file size from read capacity.
1211 */
1212static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
1213{
1214 /*
1215 * get_file_size is being called even before sgio_init is
1216 * called, so none of the sg_io structures are
1217 * initialized in the thread_data yet. So we need to do the
1218 * ReadCapacity without any of those helpers. One of the effects
1219 * is that ReadCapacity may get called 4 times on each open:
1220 * readcap(10) followed by readcap(16) if needed - just to get
1221 * the file size after the init occurs - it will be called
1222 * again when "type_check" is called during structure
1223 * initialization I'm not sure how to prevent this little
1224 * inefficiency.
1225 */
1226 unsigned int bs = 0;
1227 unsigned long long max_lba = 0;
1228 int ret;
1229
1230 if (fio_file_size_known(f))
1231 return 0;
1232
686fbd31 1233 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
30dac136
TK
1234 td_verror(td, EINVAL, "wrong file type");
1235 log_err("ioengine sg only works on block or character devices\n");
1236 return 1;
1237 }
1238
5ad7be56
KD
1239 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
1240 if (ret ) {
1241 td_verror(td, td->error, "fio_sgio_read_capacity");
1242 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
1243 return 1;
1244 }
1245
1246 f->real_file_size = (max_lba + 1) * bs;
1247 fio_file_set_size_known(f);
1248 return 0;
1249}
1250
1251
5f350952 1252static struct ioengine_ops ioengine = {
2866c82d
JA
1253 .name = "sg",
1254 .version = FIO_IOOPS_VERSION,
1255 .init = fio_sgio_init,
1256 .prep = fio_sgio_prep,
1257 .queue = fio_sgio_queue,
b4b9665e 1258 .commit = fio_sgio_commit,
2866c82d 1259 .getevents = fio_sgio_getevents,
5ad7be56 1260 .errdetails = fio_sgio_errdetails,
2866c82d
JA
1261 .event = fio_sgio_event,
1262 .cleanup = fio_sgio_cleanup,
b5af8293
JA
1263 .open_file = fio_sgio_open,
1264 .close_file = generic_close_file,
fde57152 1265 .get_file_size = fio_sgio_get_file_size,
b2a15192 1266 .flags = FIO_SYNCIO | FIO_RAWIO,
52b81b7c
KD
1267 .options = options,
1268 .option_struct_size = sizeof(struct sg_options)
2866c82d 1269};
34cfcdaf
JA
1270
1271#else /* FIO_HAVE_SGIO */
1272
1273/*
1274 * When we have a proper configure system in place, we simply wont build
1275 * and install this io engine. For now install a crippled version that
1276 * just complains and fails to load.
1277 */
1278static int fio_sgio_init(struct thread_data fio_unused *td)
1279{
a3edaf76 1280 log_err("fio: ioengine sg not available\n");
34cfcdaf
JA
1281 return 1;
1282}
1283
5f350952 1284static struct ioengine_ops ioengine = {
d0c70934 1285 .name = "sg",
34cfcdaf
JA
1286 .version = FIO_IOOPS_VERSION,
1287 .init = fio_sgio_init,
1288};
1289
1290#endif
5f350952
JA
1291
1292static void fio_init fio_sgio_register(void)
1293{
1294 register_ioengine(&ioengine);
1295}
1296
1297static void fio_exit fio_sgio_unregister(void)
1298{
1299 unregister_ioengine(&ioengine);
1300}