Optimize the code that copies strings
[fio.git] / engines / sg.c
CommitLineData
2866c82d 1/*
da751ca9
JA
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
2866c82d 5 *
b4b9665e
VF
6 * This ioengine can operate in two modes:
7 * sync with block devices (/dev/sdX) or
8 * with character devices (/dev/sgY) with direct=1 or sync=1
9 * async with character devices with direct=0 and sync=0
10 *
11 * What value does queue() return for the different cases?
12 * queue() return value
13 * In sync mode:
14 * /dev/sdX RWT FIO_Q_COMPLETED
15 * /dev/sgY RWT FIO_Q_COMPLETED
16 * with direct=1 or sync=1
17 *
18 * In async mode:
19 * /dev/sgY RWT FIO_Q_QUEUED
20 * direct=0 and sync=0
21 *
22 * Because FIO_SYNCIO is set for this ioengine td_io_queue() will fill in
23 * issue_time *before* each IO is sent to queue()
24 *
25 * Where are the IO counting functions called for the different cases?
26 *
27 * In sync mode:
28 * /dev/sdX (commit==NULL)
29 * RWT
30 * io_u_mark_depth() called in td_io_queue()
31 * io_u_mark_submit/complete() called in td_io_queue()
32 * issue_time set in td_io_queue()
33 *
34 * /dev/sgY with direct=1 or sync=1 (commit does nothing)
35 * RWT
36 * io_u_mark_depth() called in td_io_queue()
37 * io_u_mark_submit/complete() called in queue()
38 * issue_time set in td_io_queue()
39 *
40 * In async mode:
41 * /dev/sgY with direct=0 and sync=0
42 * RW: read and write operations are submitted in queue()
43 * io_u_mark_depth() called in td_io_commit()
44 * io_u_mark_submit() called in queue()
45 * issue_time set in td_io_queue()
46 * T: trim operations are queued in queue() and submitted in commit()
47 * io_u_mark_depth() called in td_io_commit()
48 * io_u_mark_submit() called in commit()
49 * issue_time set in commit()
50 *
2866c82d
JA
51 */
52#include <stdio.h>
53#include <stdlib.h>
54#include <unistd.h>
55#include <errno.h>
8393ca93 56#include <poll.h>
5f350952
JA
57
58#include "../fio.h"
52b81b7c 59#include "../optgroup.h"
2866c82d 60
34cfcdaf
JA
61#ifdef FIO_HAVE_SGIO
62
cbdc9353
VF
63enum {
64 FIO_SG_WRITE = 1,
65 FIO_SG_WRITE_VERIFY = 2,
66 FIO_SG_WRITE_SAME = 3
67};
52b81b7c
KD
68
69struct sg_options {
70 void *pad;
71 unsigned int readfua;
72 unsigned int writefua;
cbdc9353 73 unsigned int write_mode;
52b81b7c
KD
74};
75
76static struct fio_option options[] = {
77 {
78 .name = "readfua",
79 .lname = "sg engine read fua flag support",
80 .type = FIO_OPT_BOOL,
81 .off1 = offsetof(struct sg_options, readfua),
82 .help = "Set FUA flag (force unit access) for all Read operations",
83 .def = "0",
84 .category = FIO_OPT_C_ENGINE,
85 .group = FIO_OPT_G_SG,
86 },
87 {
88 .name = "writefua",
89 .lname = "sg engine write fua flag support",
90 .type = FIO_OPT_BOOL,
91 .off1 = offsetof(struct sg_options, writefua),
92 .help = "Set FUA flag (force unit access) for all Write operations",
93 .def = "0",
94 .category = FIO_OPT_C_ENGINE,
95 .group = FIO_OPT_G_SG,
96 },
cbdc9353
VF
97 {
98 .name = "sg_write_mode",
99 .lname = "specify sg write mode",
100 .type = FIO_OPT_STR,
101 .off1 = offsetof(struct sg_options, write_mode),
102 .help = "Specify SCSI WRITE mode",
103 .def = "write",
104 .posval = {
105 { .ival = "write",
106 .oval = FIO_SG_WRITE,
107 .help = "Issue standard SCSI WRITE commands",
108 },
109 { .ival = "verify",
110 .oval = FIO_SG_WRITE_VERIFY,
111 .help = "Issue SCSI WRITE AND VERIFY commands",
112 },
113 { .ival = "same",
114 .oval = FIO_SG_WRITE_SAME,
115 .help = "Issue SCSI WRITE SAME commands",
116 },
117 },
118 .category = FIO_OPT_C_ENGINE,
119 .group = FIO_OPT_G_SG,
120 },
52b81b7c
KD
121 {
122 .name = NULL,
123 },
124};
125
5ad7be56
KD
126#define MAX_10B_LBA 0xFFFFFFFFULL
127#define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
128#define MAX_SB 64 // sense block maximum return size
aa18e0ec
VF
129/*
130#define FIO_SGIO_DEBUG
131*/
5ad7be56 132
2866c82d 133struct sgio_cmd {
fde57152 134 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
5ad7be56 135 unsigned char sb[MAX_SB]; // add sense block to commands
2866c82d
JA
136 int nr;
137};
138
b4b9665e 139struct sgio_trim {
a824149a 140 uint8_t *unmap_param;
b4b9665e
VF
141 unsigned int unmap_range_count;
142 struct io_u **trim_io_us;
143};
144
2866c82d
JA
145struct sgio_data {
146 struct sgio_cmd *cmds;
147 struct io_u **events;
dc0deca2
JA
148 struct pollfd *pfds;
149 int *fd_flags;
150 void *sgbuf;
2866c82d 151 unsigned int bs;
b5af8293 152 int type_checked;
b4b9665e
VF
153 struct sgio_trim **trim_queues;
154 int current_queue;
aa18e0ec 155#ifdef FIO_SGIO_DEBUG
b4b9665e 156 unsigned int *trim_queue_map;
aa18e0ec 157#endif
2866c82d
JA
158};
159
a824149a
DF
160static inline uint32_t sgio_get_be32(uint8_t *buf)
161{
162 return be32_to_cpu(*((uint32_t *) buf));
163}
164
165static inline uint64_t sgio_get_be64(uint8_t *buf)
166{
167 return be64_to_cpu(*((uint64_t *) buf));
168}
169
170static inline void sgio_set_be16(uint16_t val, uint8_t *buf)
171{
172 uint16_t t = cpu_to_be16(val);
173
174 memcpy(buf, &t, sizeof(uint16_t));
175}
176
177static inline void sgio_set_be32(uint32_t val, uint8_t *buf)
178{
179 uint32_t t = cpu_to_be32(val);
180
181 memcpy(buf, &t, sizeof(uint32_t));
182}
183
184static inline void sgio_set_be64(uint64_t val, uint8_t *buf)
185{
186 uint64_t t = cpu_to_be64(val);
187
188 memcpy(buf, &t, sizeof(uint64_t));
189}
190
b4b9665e
VF
191static inline bool sgio_unbuffered(struct thread_data *td)
192{
193 return (td->o.odirect || td->o.sync_io);
194}
195
2866c82d
JA
196static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
197 struct io_u *io_u, int fs)
198{
199 struct sgio_cmd *sc = &sd->cmds[io_u->index];
200
201 memset(hdr, 0, sizeof(*hdr));
202 memset(sc->cdb, 0, sizeof(sc->cdb));
203
204 hdr->interface_id = 'S';
205 hdr->cmdp = sc->cdb;
206 hdr->cmd_len = sizeof(sc->cdb);
5ad7be56
KD
207 hdr->sbp = sc->sb;
208 hdr->mx_sb_len = sizeof(sc->sb);
2866c82d
JA
209 hdr->pack_id = io_u->index;
210 hdr->usr_ptr = io_u;
b4b9665e 211 hdr->timeout = SCSI_TIMEOUT_MS;
2866c82d
JA
212
213 if (fs) {
cec6b55d
JA
214 hdr->dxferp = io_u->xfer_buf;
215 hdr->dxfer_len = io_u->xfer_buflen;
2866c82d
JA
216 }
217}
218
adee86c5
JA
219static int pollin_events(struct pollfd *pfds, int fds)
220{
221 int i;
222
223 for (i = 0; i < fds; i++)
224 if (pfds[i].revents & POLLIN)
225 return 1;
226
227 return 0;
228}
2866c82d 229
14d0261e
JA
230static int sg_fd_read(int fd, void *data, size_t size)
231{
232 int err = 0;
233
234 while (size) {
235 ssize_t ret;
236
237 ret = read(fd, data, size);
238 if (ret < 0) {
239 if (errno == EAGAIN || errno == EINTR)
240 continue;
241 err = errno;
242 break;
243 } else if (!ret)
244 break;
245 else {
246 data += ret;
247 size -= ret;
248 }
249 }
250
251 if (err)
252 return err;
253 if (size)
254 return EAGAIN;
255
256 return 0;
257}
258
e7d2e616 259static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
1f440ece
JA
260 unsigned int max,
261 const struct timespec fio_unused *t)
2866c82d 262{
565e784d 263 struct sgio_data *sd = td->io_ops_data;
b4b9665e 264 int left = max, eventNum, ret, r = 0, trims = 0;
dc0deca2 265 void *buf = sd->sgbuf;
b4b9665e 266 unsigned int i, j, events;
946ff865 267 struct fio_file *f;
b4b9665e 268 struct io_u *io_u;
2866c82d
JA
269
270 /*
adee86c5 271 * Fill in the file descriptors
2866c82d 272 */
adee86c5
JA
273 for_each_file(td, f, i) {
274 /*
275 * don't block for min events == 0
276 */
4a851614 277 if (!min)
3a35845f
JA
278 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
279 else
280 sd->fd_flags[i] = -1;
4a851614 281
dc0deca2
JA
282 sd->pfds[i].fd = f->fd;
283 sd->pfds[i].events = POLLIN;
2866c82d
JA
284 }
285
b4b9665e
VF
286 /*
287 ** There are two counters here:
288 ** - number of SCSI commands completed
289 ** - number of io_us completed
290 **
291 ** These are the same with reads and writes, but
292 ** could differ with trim/unmap commands because
293 ** a single unmap can include multiple io_us
294 */
295
296 while (left > 0) {
c97e3cb0 297 char *p;
adee86c5 298
b4b9665e 299 dprint(FD_IO, "sgio_getevents: sd %p: min=%d, max=%d, left=%d\n", sd, min, max, left);
5ad7be56 300
2866c82d
JA
301 do {
302 if (!min)
303 break;
adee86c5 304
2dc1bbeb 305 ret = poll(sd->pfds, td->o.nr_files, -1);
adee86c5 306 if (ret < 0) {
adee86c5 307 if (!r)
22819ec2 308 r = -errno;
e1161c32 309 td_verror(td, errno, "poll");
adee86c5
JA
310 break;
311 } else if (!ret)
312 continue;
313
2dc1bbeb 314 if (pollin_events(sd->pfds, td->o.nr_files))
2866c82d
JA
315 break;
316 } while (1);
317
adee86c5 318 if (r < 0)
2866c82d 319 break;
adee86c5
JA
320
321re_read:
322 p = buf;
323 events = 0;
324 for_each_file(td, f, i) {
5ad7be56 325 for (eventNum = 0; eventNum < left; eventNum++) {
14d0261e 326 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
b4b9665e 327 dprint(FD_IO, "sgio_getevents: sg_fd_read ret: %d\n", ret);
14d0261e
JA
328 if (ret) {
329 r = -ret;
330 td_verror(td, r, "sg_read");
5ad7be56 331 break;
5ad7be56 332 }
b4b9665e
VF
333 io_u = ((struct sg_io_hdr *)p)->usr_ptr;
334 if (io_u->ddir == DDIR_TRIM) {
335 events += sd->trim_queues[io_u->index]->unmap_range_count;
336 eventNum += sd->trim_queues[io_u->index]->unmap_range_count - 1;
337 } else
338 events++;
339
14d0261e 340 p += sizeof(struct sg_io_hdr);
b4b9665e 341 dprint(FD_IO, "sgio_getevents: events: %d, eventNum: %d, left: %d\n", events, eventNum, left);
adee86c5
JA
342 }
343 }
344
14d0261e 345 if (r < 0 && !events)
2866c82d 346 break;
adee86c5
JA
347 if (!events) {
348 usleep(1000);
349 goto re_read;
350 }
2866c82d 351
2866c82d
JA
352 left -= events;
353 r += events;
354
355 for (i = 0; i < events; i++) {
356 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
b4b9665e
VF
357 sd->events[i + trims] = hdr->usr_ptr;
358 io_u = (struct io_u *)(hdr->usr_ptr);
5ad7be56 359
5ad7be56 360 if (hdr->info & SG_INFO_CHECK) {
b4b9665e 361 /* record if an io error occurred, ignore resid */
be660713 362 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
b4b9665e
VF
363 sd->events[i + trims]->error = EIO;
364 }
365
366 if (io_u->ddir == DDIR_TRIM) {
367 struct sgio_trim *st = sd->trim_queues[io_u->index];
aa18e0ec 368#ifdef FIO_SGIO_DEBUG
b4b9665e 369 assert(st->trim_io_us[0] == io_u);
aa18e0ec 370 assert(sd->trim_queue_map[io_u->index] == io_u->index);
b4b9665e
VF
371 dprint(FD_IO, "sgio_getevents: reaping %d io_us from trim queue %d\n", st->unmap_range_count, io_u->index);
372 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", io_u->index, i+trims);
aa18e0ec 373#endif
b4b9665e
VF
374 for (j = 1; j < st->unmap_range_count; j++) {
375 ++trims;
376 sd->events[i + trims] = st->trim_io_us[j];
aa18e0ec 377#ifdef FIO_SGIO_DEBUG
b4b9665e 378 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", st->trim_io_us[j]->index, i+trims);
aa18e0ec
VF
379 assert(sd->trim_queue_map[st->trim_io_us[j]->index] == io_u->index);
380#endif
b4b9665e
VF
381 if (hdr->info & SG_INFO_CHECK) {
382 /* record if an io error occurred, ignore resid */
383 memcpy(&st->trim_io_us[j]->hdr, hdr, sizeof(struct sg_io_hdr));
384 sd->events[i + trims]->error = EIO;
385 }
386 }
387 events -= st->unmap_range_count - 1;
388 st->unmap_range_count = 0;
5ad7be56 389 }
2866c82d
JA
390 }
391 }
392
adee86c5 393 if (!min) {
affe05a9 394 for_each_file(td, f, i) {
3a35845f
JA
395 if (sd->fd_flags[i] == -1)
396 continue;
397
affe05a9
JA
398 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
399 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
400 }
adee86c5 401 }
2866c82d 402
2866c82d
JA
403 return r;
404}
405
2e4ef4fb
JA
406static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
407 struct fio_file *f,
408 struct io_u *io_u)
2866c82d 409{
565e784d 410 struct sgio_data *sd = td->io_ops_data;
2866c82d 411 struct sg_io_hdr *hdr = &io_u->hdr;
36167d82 412 int ret;
2866c82d
JA
413
414 sd->events[0] = io_u;
415
36167d82
JA
416 ret = ioctl(f->fd, SG_IO, hdr);
417 if (ret < 0)
a05bd42d 418 return ret;
36167d82 419
5ad7be56
KD
420 /* record if an io error occurred */
421 if (hdr->info & SG_INFO_CHECK)
422 io_u->error = EIO;
423
36167d82 424 return FIO_Q_COMPLETED;
2866c82d
JA
425}
426
a999bc49
JA
427static enum fio_q_status fio_sgio_rw_doio(struct thread_data *td,
428 struct fio_file *f,
b4b9665e 429 struct io_u *io_u, int do_sync)
2866c82d
JA
430{
431 struct sg_io_hdr *hdr = &io_u->hdr;
432 int ret;
433
53cdc686 434 ret = write(f->fd, hdr, sizeof(*hdr));
2866c82d 435 if (ret < 0)
a05bd42d 436 return ret;
2866c82d 437
2b13e716 438 if (do_sync) {
a999bc49
JA
439 /*
440 * We can't just read back the first command that completes
441 * and assume it's the one we need, it could be any command
442 * that is inflight.
443 */
444 do {
445 struct io_u *__io_u;
446
447 ret = read(f->fd, hdr, sizeof(*hdr));
448 if (ret < 0)
449 return ret;
450
7508b394
JA
451 __io_u = hdr->usr_ptr;
452
a999bc49
JA
453 /* record if an io error occurred */
454 if (hdr->info & SG_INFO_CHECK)
7508b394 455 __io_u->error = EIO;
5ad7be56 456
a999bc49
JA
457 if (__io_u == io_u)
458 break;
459
460 if (io_u_sync_complete(td, __io_u)) {
461 ret = -1;
462 break;
463 }
464 } while (1);
5ad7be56 465
36167d82 466 return FIO_Q_COMPLETED;
2866c82d
JA
467 }
468
36167d82 469 return FIO_Q_QUEUED;
2866c82d
JA
470}
471
b4b9665e
VF
472static enum fio_q_status fio_sgio_doio(struct thread_data *td,
473 struct io_u *io_u, int do_sync)
2866c82d 474{
53cdc686 475 struct fio_file *f = io_u->file;
b4b9665e 476 enum fio_q_status ret;
53cdc686 477
686fbd31 478 if (f->filetype == FIO_TYPE_BLOCK) {
5ad7be56 479 ret = fio_sgio_ioctl_doio(td, f, io_u);
a999bc49
JA
480 if (io_u->error)
481 td_verror(td, io_u->error, __func__);
5ad7be56 482 } else {
a999bc49
JA
483 ret = fio_sgio_rw_doio(td, f, io_u, do_sync);
484 if (io_u->error && do_sync)
c9aeb797 485 td_verror(td, io_u->error, __func__);
5ad7be56 486 }
2866c82d 487
5ad7be56 488 return ret;
2866c82d
JA
489}
490
b4b9665e
VF
491static void fio_sgio_rw_lba(struct sg_io_hdr *hdr, unsigned long long lba,
492 unsigned long long nr_blocks)
493{
494 if (lba < MAX_10B_LBA) {
a824149a
DF
495 sgio_set_be32((uint32_t) lba, &hdr->cmdp[2]);
496 sgio_set_be16((uint16_t) nr_blocks, &hdr->cmdp[7]);
b4b9665e 497 } else {
a824149a
DF
498 sgio_set_be64(lba, &hdr->cmdp[2]);
499 sgio_set_be32((uint32_t) nr_blocks, &hdr->cmdp[10]);
b4b9665e
VF
500 }
501
502 return;
503}
504
2866c82d
JA
505static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
506{
507 struct sg_io_hdr *hdr = &io_u->hdr;
52b81b7c 508 struct sg_options *o = td->eo;
565e784d 509 struct sgio_data *sd = td->io_ops_data;
b4b9665e
VF
510 unsigned long long nr_blocks, lba;
511 int offset;
2866c82d 512
cec6b55d 513 if (io_u->xfer_buflen & (sd->bs - 1)) {
2866c82d
JA
514 log_err("read/write not sector aligned\n");
515 return EINVAL;
516 }
517
5ad7be56
KD
518 nr_blocks = io_u->xfer_buflen / sd->bs;
519 lba = io_u->offset / sd->bs;
520
2866c82d 521 if (io_u->ddir == DDIR_READ) {
87dc1ab1
JA
522 sgio_hdr_init(sd, hdr, io_u, 1);
523
2866c82d 524 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
5ad7be56
KD
525 if (lba < MAX_10B_LBA)
526 hdr->cmdp[0] = 0x28; // read(10)
527 else
528 hdr->cmdp[0] = 0x88; // read(16)
52b81b7c
KD
529
530 if (o->readfua)
531 hdr->cmdp[1] |= 0x08;
532
b4b9665e
VF
533 fio_sgio_rw_lba(hdr, lba, nr_blocks);
534
87dc1ab1
JA
535 } else if (io_u->ddir == DDIR_WRITE) {
536 sgio_hdr_init(sd, hdr, io_u, 1);
537
2866c82d 538 hdr->dxfer_direction = SG_DXFER_TO_DEV;
cbdc9353
VF
539 switch(o->write_mode) {
540 case FIO_SG_WRITE:
541 if (lba < MAX_10B_LBA)
542 hdr->cmdp[0] = 0x2a; // write(10)
543 else
544 hdr->cmdp[0] = 0x8a; // write(16)
545 if (o->writefua)
546 hdr->cmdp[1] |= 0x08;
547 break;
548 case FIO_SG_WRITE_VERIFY:
549 if (lba < MAX_10B_LBA)
550 hdr->cmdp[0] = 0x2e; // write and verify(10)
551 else
552 hdr->cmdp[0] = 0x8e; // write and verify(16)
553 break;
554 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
555 case FIO_SG_WRITE_SAME:
556 hdr->dxfer_len = sd->bs;
557 if (lba < MAX_10B_LBA)
558 hdr->cmdp[0] = 0x41; // write same(10)
559 else
560 hdr->cmdp[0] = 0x93; // write same(16)
561 break;
562 };
b4b9665e
VF
563
564 fio_sgio_rw_lba(hdr, lba, nr_blocks);
565
566 } else if (io_u->ddir == DDIR_TRIM) {
567 struct sgio_trim *st;
568
569 if (sd->current_queue == -1) {
570 sgio_hdr_init(sd, hdr, io_u, 0);
571
572 hdr->cmd_len = 10;
573 hdr->dxfer_direction = SG_DXFER_TO_DEV;
574 hdr->cmdp[0] = 0x42; // unmap
575 sd->current_queue = io_u->index;
576 st = sd->trim_queues[sd->current_queue];
577 hdr->dxferp = st->unmap_param;
aa18e0ec 578#ifdef FIO_SGIO_DEBUG
b4b9665e
VF
579 assert(sd->trim_queues[io_u->index]->unmap_range_count == 0);
580 dprint(FD_IO, "sg: creating new queue based on io_u %d\n", io_u->index);
aa18e0ec 581#endif
b4b9665e
VF
582 }
583 else
584 st = sd->trim_queues[sd->current_queue];
585
586 dprint(FD_IO, "sg: adding io_u %d to trim queue %d\n", io_u->index, sd->current_queue);
587 st->trim_io_us[st->unmap_range_count] = io_u;
aa18e0ec 588#ifdef FIO_SGIO_DEBUG
b4b9665e 589 sd->trim_queue_map[io_u->index] = sd->current_queue;
aa18e0ec 590#endif
b4b9665e
VF
591
592 offset = 8 + 16 * st->unmap_range_count;
a824149a
DF
593 sgio_set_be64(lba, &st->unmap_param[offset]);
594 sgio_set_be32((uint32_t) nr_blocks, &st->unmap_param[offset + 8]);
b4b9665e
VF
595
596 st->unmap_range_count++;
597
598 } else if (ddir_sync(io_u->ddir)) {
87dc1ab1 599 sgio_hdr_init(sd, hdr, io_u, 0);
87dc1ab1 600 hdr->dxfer_direction = SG_DXFER_NONE;
5ad7be56
KD
601 if (lba < MAX_10B_LBA)
602 hdr->cmdp[0] = 0x35; // synccache(10)
603 else
604 hdr->cmdp[0] = 0x91; // synccache(16)
b4b9665e
VF
605 } else
606 assert(0);
2866c82d 607
2866c82d
JA
608 return 0;
609}
610
b4b9665e
VF
611static void fio_sgio_unmap_setup(struct sg_io_hdr *hdr, struct sgio_trim *st)
612{
a824149a 613 uint16_t cnt = st->unmap_range_count * 16;
b4b9665e 614
a824149a
DF
615 hdr->dxfer_len = cnt + 8;
616 sgio_set_be16(cnt + 8, &hdr->cmdp[7]);
617 sgio_set_be16(cnt + 6, st->unmap_param);
618 sgio_set_be16(cnt, &st->unmap_param[2]);
b4b9665e
VF
619
620 return;
621}
622
2e4ef4fb
JA
623static enum fio_q_status fio_sgio_queue(struct thread_data *td,
624 struct io_u *io_u)
2866c82d
JA
625{
626 struct sg_io_hdr *hdr = &io_u->hdr;
b4b9665e 627 struct sgio_data *sd = td->io_ops_data;
f6db4fa5 628 int ret, do_sync = 0;
2866c82d 629
7101d9c2
JA
630 fio_ro_check(td, io_u);
631
b4b9665e 632 if (sgio_unbuffered(td) || ddir_sync(io_u->ddir))
f6db4fa5
JA
633 do_sync = 1;
634
b4b9665e
VF
635 if (io_u->ddir == DDIR_TRIM) {
636 if (do_sync || io_u->file->filetype == FIO_TYPE_BLOCK) {
637 struct sgio_trim *st = sd->trim_queues[sd->current_queue];
638
639 /* finish cdb setup for unmap because we are
640 ** doing unmap commands synchronously */
aa18e0ec 641#ifdef FIO_SGIO_DEBUG
b4b9665e
VF
642 assert(st->unmap_range_count == 1);
643 assert(io_u == st->trim_io_us[0]);
aa18e0ec 644#endif
b4b9665e
VF
645 hdr = &io_u->hdr;
646
647 fio_sgio_unmap_setup(hdr, st);
648
649 st->unmap_range_count = 0;
650 sd->current_queue = -1;
651 } else
652 /* queue up trim ranges and submit in commit() */
653 return FIO_Q_QUEUED;
654 }
655
f6db4fa5 656 ret = fio_sgio_doio(td, io_u, do_sync);
2866c82d
JA
657
658 if (ret < 0)
659 io_u->error = errno;
660 else if (hdr->status) {
661 io_u->resid = hdr->resid;
662 io_u->error = EIO;
b4b9665e
VF
663 } else if (td->io_ops->commit != NULL) {
664 if (do_sync && !ddir_sync(io_u->ddir)) {
665 io_u_mark_submit(td, 1);
666 io_u_mark_complete(td, 1);
667 } else if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
668 io_u_mark_submit(td, 1);
669 io_u_queued(td, io_u);
670 }
2866c82d
JA
671 }
672
95bcd815 673 if (io_u->error) {
e1161c32 674 td_verror(td, io_u->error, "xfer");
36167d82 675 return FIO_Q_COMPLETED;
95bcd815
JA
676 }
677
36167d82 678 return ret;
2866c82d
JA
679}
680
b4b9665e
VF
681static int fio_sgio_commit(struct thread_data *td)
682{
683 struct sgio_data *sd = td->io_ops_data;
684 struct sgio_trim *st;
685 struct io_u *io_u;
686 struct sg_io_hdr *hdr;
687 struct timespec now;
688 unsigned int i;
689 int ret;
690
691 if (sd->current_queue == -1)
692 return 0;
693
694 st = sd->trim_queues[sd->current_queue];
695 io_u = st->trim_io_us[0];
696 hdr = &io_u->hdr;
697
698 fio_sgio_unmap_setup(hdr, st);
699
700 sd->current_queue = -1;
701
a999bc49 702 ret = fio_sgio_rw_doio(td, io_u->file, io_u, 0);
b4b9665e 703
53ee8c17
VF
704 if (ret < 0 || hdr->status) {
705 int error;
706
707 if (ret < 0)
708 error = errno;
709 else {
710 error = EIO;
711 ret = -EIO;
b4b9665e 712 }
53ee8c17
VF
713
714 for (i = 0; i < st->unmap_range_count; i++) {
715 st->trim_io_us[i]->error = error;
716 clear_io_u(td, st->trim_io_us[i]);
717 if (hdr->status)
718 st->trim_io_us[i]->resid = hdr->resid;
b4b9665e 719 }
53ee8c17
VF
720
721 td_verror(td, error, "xfer");
722 return ret;
b4b9665e
VF
723 }
724
53ee8c17
VF
725 if (fio_fill_issue_time(td)) {
726 fio_gettime(&now, NULL);
727 for (i = 0; i < st->unmap_range_count; i++) {
728 memcpy(&st->trim_io_us[i]->issue_time, &now, sizeof(now));
729 io_u_queued(td, io_u);
730 }
b4b9665e 731 }
53ee8c17 732 io_u_mark_submit(td, st->unmap_range_count);
b4b9665e 733
53ee8c17 734 return 0;
b4b9665e
VF
735}
736
2866c82d
JA
737static struct io_u *fio_sgio_event(struct thread_data *td, int event)
738{
565e784d 739 struct sgio_data *sd = td->io_ops_data;
2866c82d
JA
740
741 return sd->events[event];
742}
743
5ad7be56
KD
744static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
745 unsigned long long *max_lba)
2866c82d 746{
5ad7be56
KD
747 /*
748 * need to do read capacity operation w/o benefit of sd or
749 * io_u structures, which are not initialized until later.
750 */
751 struct sg_io_hdr hdr;
02ae7bd8
DF
752 unsigned long long hlba;
753 unsigned int blksz = 0;
5ad7be56
KD
754 unsigned char cmd[16];
755 unsigned char sb[64];
756 unsigned char buf[32]; // read capacity return
2866c82d 757 int ret;
5ad7be56 758 int fd = -1;
2866c82d 759
5ad7be56 760 struct fio_file *f = td->files[0];
2866c82d 761
5ad7be56
KD
762 /* open file independent of rest of application */
763 fd = open(f->file_name, O_RDONLY);
764 if (fd < 0)
765 return -errno;
2866c82d 766
5ad7be56
KD
767 memset(&hdr, 0, sizeof(hdr));
768 memset(cmd, 0, sizeof(cmd));
769 memset(sb, 0, sizeof(sb));
770 memset(buf, 0, sizeof(buf));
2866c82d 771
5ad7be56
KD
772 /* First let's try a 10 byte read capacity. */
773 hdr.interface_id = 'S';
774 hdr.cmdp = cmd;
775 hdr.cmd_len = 10;
776 hdr.sbp = sb;
777 hdr.mx_sb_len = sizeof(sb);
778 hdr.timeout = SCSI_TIMEOUT_MS;
779 hdr.cmdp[0] = 0x25; // Read Capacity(10)
780 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
781 hdr.dxferp = buf;
782 hdr.dxfer_len = sizeof(buf);
783
784 ret = ioctl(fd, SG_IO, &hdr);
785 if (ret < 0) {
786 close(fd);
2866c82d 787 return ret;
5ad7be56 788 }
2866c82d 789
02ae7bd8
DF
790 if (hdr.info & SG_INFO_CHECK) {
791 /* RCAP(10) might be unsupported by device. Force RCAP(16) */
792 hlba = MAX_10B_LBA;
793 } else {
a824149a
DF
794 blksz = sgio_get_be32(&buf[4]);
795 hlba = sgio_get_be32(buf);
02ae7bd8 796 }
5ad7be56
KD
797
798 /*
fde57152
TK
799 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
800 * then need to retry with 16 byte Read Capacity command.
5ad7be56 801 */
02ae7bd8 802 if (hlba == MAX_10B_LBA) {
5ad7be56 803 hdr.cmd_len = 16;
28c43a89
TK
804 hdr.cmdp[0] = 0x9e; // service action
805 hdr.cmdp[1] = 0x10; // Read Capacity(16)
a824149a 806 sgio_set_be32(sizeof(buf), &hdr.cmdp[10]);
5ad7be56
KD
807
808 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
809 hdr.dxferp = buf;
810 hdr.dxfer_len = sizeof(buf);
811
812 ret = ioctl(fd, SG_IO, &hdr);
813 if (ret < 0) {
814 close(fd);
815 return ret;
816 }
817
818 /* record if an io error occurred */
819 if (hdr.info & SG_INFO_CHECK)
820 td_verror(td, EIO, "fio_sgio_read_capacity");
821
a824149a
DF
822 blksz = sgio_get_be32(&buf[8]);
823 hlba = sgio_get_be64(buf);
02ae7bd8
DF
824 }
825
826 if (blksz) {
827 *bs = blksz;
828 *max_lba = hlba;
829 ret = 0;
830 } else {
831 ret = EIO;
5ad7be56
KD
832 }
833
834 close(fd);
02ae7bd8 835 return ret;
2866c82d
JA
836}
837
838static void fio_sgio_cleanup(struct thread_data *td)
839{
565e784d 840 struct sgio_data *sd = td->io_ops_data;
b4b9665e 841 int i;
dc0deca2
JA
842
843 if (sd) {
844 free(sd->events);
845 free(sd->cmds);
846 free(sd->fd_flags);
847 free(sd->pfds);
848 free(sd->sgbuf);
aa18e0ec 849#ifdef FIO_SGIO_DEBUG
b4b9665e 850 free(sd->trim_queue_map);
aa18e0ec 851#endif
b4b9665e
VF
852
853 for (i = 0; i < td->o.iodepth; i++) {
854 free(sd->trim_queues[i]->unmap_param);
855 free(sd->trim_queues[i]->trim_io_us);
856 free(sd->trim_queues[i]);
857 }
858
859 free(sd->trim_queues);
dc0deca2 860 free(sd);
2866c82d
JA
861 }
862}
863
864static int fio_sgio_init(struct thread_data *td)
865{
866 struct sgio_data *sd;
b4b9665e
VF
867 struct sgio_trim *st;
868 int i;
2866c82d 869
b4b9665e
VF
870 sd = calloc(1, sizeof(*sd));
871 sd->cmds = calloc(td->o.iodepth, sizeof(struct sgio_cmd));
872 sd->sgbuf = calloc(td->o.iodepth, sizeof(struct sg_io_hdr));
873 sd->events = calloc(td->o.iodepth, sizeof(struct io_u *));
874 sd->pfds = calloc(td->o.nr_files, sizeof(struct pollfd));
875 sd->fd_flags = calloc(td->o.nr_files, sizeof(int));
5ad7be56 876 sd->type_checked = 0;
b4b9665e
VF
877
878 sd->trim_queues = calloc(td->o.iodepth, sizeof(struct sgio_trim *));
879 sd->current_queue = -1;
aa18e0ec 880#ifdef FIO_SGIO_DEBUG
b4b9665e 881 sd->trim_queue_map = calloc(td->o.iodepth, sizeof(int));
aa18e0ec 882#endif
b4b9665e
VF
883 for (i = 0; i < td->o.iodepth; i++) {
884 sd->trim_queues[i] = calloc(1, sizeof(struct sgio_trim));
885 st = sd->trim_queues[i];
886 st->unmap_param = calloc(td->o.iodepth + 1, sizeof(char[16]));
887 st->unmap_range_count = 0;
888 st->trim_io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
889 }
890
565e784d 891 td->io_ops_data = sd;
2866c82d 892
b5af8293
JA
893 /*
894 * we want to do it, regardless of whether odirect is set or not
895 */
2dc1bbeb 896 td->o.override_sync = 1;
b5af8293
JA
897 return 0;
898}
899
900static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
901{
565e784d 902 struct sgio_data *sd = td->io_ops_data;
5ad7be56
KD
903 unsigned int bs = 0;
904 unsigned long long max_lba = 0;
905
686fbd31 906 if (f->filetype == FIO_TYPE_BLOCK) {
53cdc686 907 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
e1161c32 908 td_verror(td, errno, "ioctl");
b5af8293 909 return 1;
2866c82d 910 }
af52b345 911 } else if (f->filetype == FIO_TYPE_CHAR) {
b5af8293 912 int version, ret;
2866c82d 913
53cdc686 914 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
e1161c32 915 td_verror(td, errno, "ioctl");
b5af8293 916 return 1;
2866c82d
JA
917 }
918
5ad7be56
KD
919 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
920 if (ret) {
921 td_verror(td, td->error, "fio_sgio_read_capacity");
922 log_err("ioengine sg unable to read capacity successfully\n");
b5af8293 923 return 1;
5ad7be56 924 }
2866c82d 925 } else {
16ada754 926 td_verror(td, EINVAL, "wrong file type");
30dac136 927 log_err("ioengine sg only works on block or character devices\n");
b5af8293 928 return 1;
2866c82d
JA
929 }
930
931 sd->bs = bs;
5ad7be56 932 // Determine size of commands needed based on max_lba
166c6b42
TK
933 if (max_lba >= MAX_10B_LBA) {
934 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
935 "commands for lba above 0x%016llx/0x%016llx\n",
936 MAX_10B_LBA, max_lba);
5ad7be56
KD
937 }
938
686fbd31 939 if (f->filetype == FIO_TYPE_BLOCK) {
36167d82
JA
940 td->io_ops->getevents = NULL;
941 td->io_ops->event = NULL;
b4b9665e
VF
942 td->io_ops->commit = NULL;
943 /*
944 ** Setting these functions to null may cause problems
945 ** with filename=/dev/sda:/dev/sg0 since we are only
946 ** considering a single file
947 */
36167d82 948 }
5ad7be56 949 sd->type_checked = 1;
2866c82d 950
2866c82d 951 return 0;
b5af8293
JA
952}
953
954static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
955{
565e784d 956 struct sgio_data *sd = td->io_ops_data;
b5af8293
JA
957 int ret;
958
959 ret = generic_open_file(td, f);
960 if (ret)
961 return ret;
962
15ba640a 963 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
6977bcd0 964 ret = generic_close_file(td, f);
b5af8293
JA
965 return 1;
966 }
967
968 return 0;
2866c82d
JA
969}
970
5ad7be56
KD
971/*
972 * Build an error string with details about the driver, host or scsi
973 * error contained in the sg header Caller will use as necessary.
974 */
975static char *fio_sgio_errdetails(struct io_u *io_u)
976{
977 struct sg_io_hdr *hdr = &io_u->hdr;
978#define MAXERRDETAIL 1024
979#define MAXMSGCHUNK 128
fd04fa03 980 char *msg, msgchunk[MAXMSGCHUNK];
5ad7be56
KD
981 int i;
982
efa72f25 983 msg = calloc(1, MAXERRDETAIL);
fd04fa03 984 strcpy(msg, "");
5ad7be56
KD
985
986 /*
987 * can't seem to find sg_err.h, so I'll just echo the define values
988 * so others can search on internet to find clearer clues of meaning.
989 */
990 if (hdr->info & SG_INFO_CHECK) {
5ad7be56
KD
991 if (hdr->host_status) {
992 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
993 strlcat(msg, msgchunk, MAXERRDETAIL);
994 switch (hdr->host_status) {
995 case 0x01:
996 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
997 break;
998 case 0x02:
999 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
1000 break;
1001 case 0x03:
1002 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
1003 break;
1004 case 0x04:
1005 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
1006 break;
1007 case 0x05:
1008 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
1009 break;
1010 case 0x06:
1011 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
1012 break;
1013 case 0x07:
1014 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
1015 break;
1016 case 0x08:
1017 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
1018 break;
1019 case 0x09:
1020 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
1021 break;
1022 case 0x0a:
1023 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
1024 break;
1025 case 0x0b:
1026 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
1027 break;
1028 case 0x0c:
1029 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
1030 break;
1031 case 0x0d:
1032 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
1033 break;
2ce6c6e5
TK
1034 case 0x0e:
1035 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
1036 break;
1037 case 0x0f:
1038 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
1039 break;
1040 case 0x10:
1041 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
1042 break;
1043 case 0x11:
1044 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
1045 break;
1046 case 0x12:
1047 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
1048 break;
1049 case 0x13:
1050 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
1051 break;
5ad7be56
KD
1052 default:
1053 strlcat(msg, "Unknown", MAXERRDETAIL);
1054 break;
1055 }
1056 strlcat(msg, ". ", MAXERRDETAIL);
1057 }
1058 if (hdr->driver_status) {
1059 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
1060 strlcat(msg, msgchunk, MAXERRDETAIL);
1061 switch (hdr->driver_status & 0x0F) {
1062 case 0x01:
1063 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
1064 break;
1065 case 0x02:
1066 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
1067 break;
1068 case 0x03:
1069 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
1070 break;
1071 case 0x04:
1072 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
1073 break;
1074 case 0x05:
1075 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
1076 break;
1077 case 0x06:
1078 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
1079 break;
1080 case 0x07:
1081 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
1082 break;
1083 case 0x08:
1084 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
1085 break;
1086 default:
1087 strlcat(msg, "Unknown", MAXERRDETAIL);
1088 break;
1089 }
1090 strlcat(msg, "; ", MAXERRDETAIL);
1091 switch (hdr->driver_status & 0xF0) {
1092 case 0x10:
1093 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
1094 break;
1095 case 0x20:
1096 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
1097 break;
1098 case 0x30:
1099 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
1100 break;
1101 case 0x40:
1102 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
1103 break;
1104 case 0x80:
1105 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
1106 break;
1107 }
1108 strlcat(msg, ". ", MAXERRDETAIL);
1109 }
1110 if (hdr->status) {
1111 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
1112 strlcat(msg, msgchunk, MAXERRDETAIL);
1113 // SCSI 3 status codes
1114 switch (hdr->status) {
1115 case 0x02:
1116 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
1117 break;
1118 case 0x04:
1119 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
1120 break;
1121 case 0x08:
1122 strlcat(msg, "BUSY", MAXERRDETAIL);
1123 break;
1124 case 0x10:
1125 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
1126 break;
1127 case 0x14:
1128 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
1129 break;
1130 case 0x18:
1131 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
1132 break;
1133 case 0x22:
1134 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
1135 break;
1136 case 0x28:
1137 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
1138 break;
1139 case 0x30:
1140 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
1141 break;
1142 case 0x40:
1143 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
1144 break;
1145 default:
1146 strlcat(msg, "Unknown", MAXERRDETAIL);
1147 break;
1148 }
1149 strlcat(msg, ". ", MAXERRDETAIL);
1150 }
1151 if (hdr->sb_len_wr) {
1152 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
1153 strlcat(msg, msgchunk, MAXERRDETAIL);
1154 for (i = 0; i < hdr->sb_len_wr; i++) {
1155 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
1156 strlcat(msg, msgchunk, MAXERRDETAIL);
1157 }
1158 strlcat(msg, ". ", MAXERRDETAIL);
1159 }
1160 if (hdr->resid != 0) {
1161 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
1162 strlcat(msg, msgchunk, MAXERRDETAIL);
5ad7be56 1163 }
b4dbb3ce
VF
1164 if (hdr->cmdp) {
1165 strlcat(msg, "cdb:", MAXERRDETAIL);
1166 for (i = 0; i < hdr->cmd_len; i++) {
1167 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->cmdp[i]);
1168 strlcat(msg, msgchunk, MAXERRDETAIL);
1169 }
1170 strlcat(msg, ". ", MAXERRDETAIL);
1171 if (io_u->ddir == DDIR_TRIM) {
1172 unsigned char *param_list = hdr->dxferp;
1173 strlcat(msg, "dxferp:", MAXERRDETAIL);
1174 for (i = 0; i < hdr->dxfer_len; i++) {
1175 snprintf(msgchunk, MAXMSGCHUNK, " %02x", param_list[i]);
1176 strlcat(msg, msgchunk, MAXERRDETAIL);
1177 }
1178 strlcat(msg, ". ", MAXERRDETAIL);
1179 }
1180 }
5ad7be56
KD
1181 }
1182
fd04fa03 1183 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
36833fb0
BVA
1184 snprintf(msg, MAXERRDETAIL, "%s",
1185 "SG Driver did not report a Host, Driver or Device check");
5ad7be56 1186
fd04fa03 1187 return msg;
5ad7be56
KD
1188}
1189
1190/*
1191 * get max file size from read capacity.
1192 */
1193static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
1194{
1195 /*
1196 * get_file_size is being called even before sgio_init is
1197 * called, so none of the sg_io structures are
1198 * initialized in the thread_data yet. So we need to do the
1199 * ReadCapacity without any of those helpers. One of the effects
1200 * is that ReadCapacity may get called 4 times on each open:
1201 * readcap(10) followed by readcap(16) if needed - just to get
1202 * the file size after the init occurs - it will be called
1203 * again when "type_check" is called during structure
1204 * initialization I'm not sure how to prevent this little
1205 * inefficiency.
1206 */
1207 unsigned int bs = 0;
1208 unsigned long long max_lba = 0;
1209 int ret;
1210
1211 if (fio_file_size_known(f))
1212 return 0;
1213
686fbd31 1214 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
30dac136
TK
1215 td_verror(td, EINVAL, "wrong file type");
1216 log_err("ioengine sg only works on block or character devices\n");
1217 return 1;
1218 }
1219
5ad7be56
KD
1220 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
1221 if (ret ) {
1222 td_verror(td, td->error, "fio_sgio_read_capacity");
1223 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
1224 return 1;
1225 }
1226
1227 f->real_file_size = (max_lba + 1) * bs;
1228 fio_file_set_size_known(f);
1229 return 0;
1230}
1231
1232
5f350952 1233static struct ioengine_ops ioengine = {
2866c82d
JA
1234 .name = "sg",
1235 .version = FIO_IOOPS_VERSION,
1236 .init = fio_sgio_init,
1237 .prep = fio_sgio_prep,
1238 .queue = fio_sgio_queue,
b4b9665e 1239 .commit = fio_sgio_commit,
2866c82d 1240 .getevents = fio_sgio_getevents,
5ad7be56 1241 .errdetails = fio_sgio_errdetails,
2866c82d
JA
1242 .event = fio_sgio_event,
1243 .cleanup = fio_sgio_cleanup,
b5af8293
JA
1244 .open_file = fio_sgio_open,
1245 .close_file = generic_close_file,
fde57152 1246 .get_file_size = fio_sgio_get_file_size,
b2a15192 1247 .flags = FIO_SYNCIO | FIO_RAWIO,
52b81b7c
KD
1248 .options = options,
1249 .option_struct_size = sizeof(struct sg_options)
2866c82d 1250};
34cfcdaf
JA
1251
1252#else /* FIO_HAVE_SGIO */
1253
1254/*
1255 * When we have a proper configure system in place, we simply wont build
1256 * and install this io engine. For now install a crippled version that
1257 * just complains and fails to load.
1258 */
1259static int fio_sgio_init(struct thread_data fio_unused *td)
1260{
a3edaf76 1261 log_err("fio: ioengine sg not available\n");
34cfcdaf
JA
1262 return 1;
1263}
1264
5f350952 1265static struct ioengine_ops ioengine = {
d0c70934 1266 .name = "sg",
34cfcdaf
JA
1267 .version = FIO_IOOPS_VERSION,
1268 .init = fio_sgio_init,
1269};
1270
1271#endif
5f350952
JA
1272
1273static void fio_init fio_sgio_register(void)
1274{
1275 register_ioengine(&ioengine);
1276}
1277
1278static void fio_exit fio_sgio_unregister(void)
1279{
1280 unregister_ioengine(&ioengine);
1281}