sg: add support for WRITE STREAM(16) commands
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
5 *
6 * This ioengine can operate in two modes:
7 * sync with block devices (/dev/sdX) or
8 * with character devices (/dev/sgY) with direct=1 or sync=1
9 * async with character devices with direct=0 and sync=0
10 *
11 * What value does queue() return for the different cases?
12 * queue() return value
13 * In sync mode:
14 * /dev/sdX RWT FIO_Q_COMPLETED
15 * /dev/sgY RWT FIO_Q_COMPLETED
16 * with direct=1 or sync=1
17 *
18 * In async mode:
19 * /dev/sgY RWT FIO_Q_QUEUED
20 * direct=0 and sync=0
21 *
22 * Because FIO_SYNCIO is set for this ioengine td_io_queue() will fill in
23 * issue_time *before* each IO is sent to queue()
24 *
25 * Where are the IO counting functions called for the different cases?
26 *
27 * In sync mode:
28 * /dev/sdX (commit==NULL)
29 * RWT
30 * io_u_mark_depth() called in td_io_queue()
31 * io_u_mark_submit/complete() called in td_io_queue()
32 * issue_time set in td_io_queue()
33 *
34 * /dev/sgY with direct=1 or sync=1 (commit does nothing)
35 * RWT
36 * io_u_mark_depth() called in td_io_queue()
37 * io_u_mark_submit/complete() called in queue()
38 * issue_time set in td_io_queue()
39 *
40 * In async mode:
41 * /dev/sgY with direct=0 and sync=0
42 * RW: read and write operations are submitted in queue()
43 * io_u_mark_depth() called in td_io_commit()
44 * io_u_mark_submit() called in queue()
45 * issue_time set in td_io_queue()
46 * T: trim operations are queued in queue() and submitted in commit()
47 * io_u_mark_depth() called in td_io_commit()
48 * io_u_mark_submit() called in commit()
49 * issue_time set in commit()
50 *
51 */
52#include <stdio.h>
53#include <stdlib.h>
54#include <unistd.h>
55#include <errno.h>
56#include <poll.h>
57
58#include "../fio.h"
59#include "../optgroup.h"
60
61#ifdef FIO_HAVE_SGIO
62
63#ifndef SGV4_FLAG_HIPRI
64#define SGV4_FLAG_HIPRI 0x800
65#endif
66
67enum {
68 FIO_SG_WRITE = 1,
69 FIO_SG_WRITE_VERIFY,
70 FIO_SG_WRITE_SAME,
71 FIO_SG_WRITE_SAME_NDOB,
72 FIO_SG_WRITE_STREAM,
73 FIO_SG_VERIFY_BYTCHK_00,
74 FIO_SG_VERIFY_BYTCHK_01,
75 FIO_SG_VERIFY_BYTCHK_11,
76};
77
78struct sg_options {
79 void *pad;
80 unsigned int hipri;
81 unsigned int readfua;
82 unsigned int writefua;
83 unsigned int write_mode;
84 uint16_t stream_id;
85};
86
87static struct fio_option options[] = {
88 {
89 .name = "hipri",
90 .lname = "High Priority",
91 .type = FIO_OPT_STR_SET,
92 .off1 = offsetof(struct sg_options, hipri),
93 .help = "Use polled IO completions",
94 .category = FIO_OPT_C_ENGINE,
95 .group = FIO_OPT_G_SG,
96 },
97 {
98 .name = "readfua",
99 .lname = "sg engine read fua flag support",
100 .type = FIO_OPT_BOOL,
101 .off1 = offsetof(struct sg_options, readfua),
102 .help = "Set FUA flag (force unit access) for all Read operations",
103 .def = "0",
104 .category = FIO_OPT_C_ENGINE,
105 .group = FIO_OPT_G_SG,
106 },
107 {
108 .name = "writefua",
109 .lname = "sg engine write fua flag support",
110 .type = FIO_OPT_BOOL,
111 .off1 = offsetof(struct sg_options, writefua),
112 .help = "Set FUA flag (force unit access) for all Write operations",
113 .def = "0",
114 .category = FIO_OPT_C_ENGINE,
115 .group = FIO_OPT_G_SG,
116 },
117 {
118 .name = "sg_write_mode",
119 .lname = "specify sg write mode",
120 .type = FIO_OPT_STR,
121 .off1 = offsetof(struct sg_options, write_mode),
122 .help = "Specify SCSI WRITE mode",
123 .def = "write",
124 .posval = {
125 { .ival = "write",
126 .oval = FIO_SG_WRITE,
127 .help = "Issue standard SCSI WRITE commands",
128 },
129 { .ival = "write_and_verify",
130 .oval = FIO_SG_WRITE_VERIFY,
131 .help = "Issue SCSI WRITE AND VERIFY commands",
132 },
133 { .ival = "verify",
134 .oval = FIO_SG_WRITE_VERIFY,
135 .help = "Issue SCSI WRITE AND VERIFY commands. This "
136 "option is deprecated. Use write_and_verify instead.",
137 },
138 { .ival = "write_same",
139 .oval = FIO_SG_WRITE_SAME,
140 .help = "Issue SCSI WRITE SAME commands",
141 },
142 { .ival = "same",
143 .oval = FIO_SG_WRITE_SAME,
144 .help = "Issue SCSI WRITE SAME commands. This "
145 "option is deprecated. Use write_same instead.",
146 },
147 { .ival = "write_same_ndob",
148 .oval = FIO_SG_WRITE_SAME_NDOB,
149 .help = "Issue SCSI WRITE SAME(16) commands with NDOB flag set",
150 },
151 { .ival = "verify_bytchk_00",
152 .oval = FIO_SG_VERIFY_BYTCHK_00,
153 .help = "Issue SCSI VERIFY commands with BYTCHK set to 00",
154 },
155 { .ival = "verify_bytchk_01",
156 .oval = FIO_SG_VERIFY_BYTCHK_01,
157 .help = "Issue SCSI VERIFY commands with BYTCHK set to 01",
158 },
159 { .ival = "verify_bytchk_11",
160 .oval = FIO_SG_VERIFY_BYTCHK_11,
161 .help = "Issue SCSI VERIFY commands with BYTCHK set to 11",
162 },
163 { .ival = "write_stream",
164 .oval = FIO_SG_WRITE_STREAM,
165 .help = "Issue SCSI WRITE STREAM(16) commands",
166 },
167 },
168 .category = FIO_OPT_C_ENGINE,
169 .group = FIO_OPT_G_SG,
170 },
171 {
172 .name = "stream_id",
173 .lname = "stream id for WRITE STREAM(16) commands",
174 .type = FIO_OPT_INT,
175 .off1 = offsetof(struct sg_options, stream_id),
176 .help = "Stream ID for WRITE STREAM(16) commands",
177 .def = "0",
178 .category = FIO_OPT_C_ENGINE,
179 .group = FIO_OPT_G_SG,
180 },
181 {
182 .name = NULL,
183 },
184};
185
186#define MAX_10B_LBA 0xFFFFFFFFULL
187#define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
188#define MAX_SB 64 // sense block maximum return size
189/*
190#define FIO_SGIO_DEBUG
191*/
192
193struct sgio_cmd {
194 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
195 unsigned char sb[MAX_SB]; // add sense block to commands
196 int nr;
197};
198
199struct sgio_trim {
200 uint8_t *unmap_param;
201 unsigned int unmap_range_count;
202 struct io_u **trim_io_us;
203};
204
205struct sgio_data {
206 struct sgio_cmd *cmds;
207 struct io_u **events;
208 struct pollfd *pfds;
209 int *fd_flags;
210 void *sgbuf;
211 unsigned int bs;
212 int type_checked;
213 struct sgio_trim **trim_queues;
214 int current_queue;
215#ifdef FIO_SGIO_DEBUG
216 unsigned int *trim_queue_map;
217#endif
218};
219
220static inline uint32_t sgio_get_be32(uint8_t *buf)
221{
222 return be32_to_cpu(*((uint32_t *) buf));
223}
224
225static inline uint64_t sgio_get_be64(uint8_t *buf)
226{
227 return be64_to_cpu(*((uint64_t *) buf));
228}
229
230static inline void sgio_set_be16(uint16_t val, uint8_t *buf)
231{
232 uint16_t t = cpu_to_be16(val);
233
234 memcpy(buf, &t, sizeof(uint16_t));
235}
236
237static inline void sgio_set_be32(uint32_t val, uint8_t *buf)
238{
239 uint32_t t = cpu_to_be32(val);
240
241 memcpy(buf, &t, sizeof(uint32_t));
242}
243
244static inline void sgio_set_be64(uint64_t val, uint8_t *buf)
245{
246 uint64_t t = cpu_to_be64(val);
247
248 memcpy(buf, &t, sizeof(uint64_t));
249}
250
251static inline bool sgio_unbuffered(struct thread_data *td)
252{
253 return (td->o.odirect || td->o.sync_io);
254}
255
256static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
257 struct io_u *io_u, int fs)
258{
259 struct sgio_cmd *sc = &sd->cmds[io_u->index];
260
261 memset(hdr, 0, sizeof(*hdr));
262 memset(sc->cdb, 0, sizeof(sc->cdb));
263
264 hdr->interface_id = 'S';
265 hdr->cmdp = sc->cdb;
266 hdr->cmd_len = sizeof(sc->cdb);
267 hdr->sbp = sc->sb;
268 hdr->mx_sb_len = sizeof(sc->sb);
269 hdr->pack_id = io_u->index;
270 hdr->usr_ptr = io_u;
271 hdr->timeout = SCSI_TIMEOUT_MS;
272
273 if (fs) {
274 hdr->dxferp = io_u->xfer_buf;
275 hdr->dxfer_len = io_u->xfer_buflen;
276 }
277}
278
279static int pollin_events(struct pollfd *pfds, int fds)
280{
281 int i;
282
283 for (i = 0; i < fds; i++)
284 if (pfds[i].revents & POLLIN)
285 return 1;
286
287 return 0;
288}
289
290static int sg_fd_read(int fd, void *data, size_t size)
291{
292 int err = 0;
293
294 while (size) {
295 ssize_t ret;
296
297 ret = read(fd, data, size);
298 if (ret < 0) {
299 if (errno == EAGAIN || errno == EINTR)
300 continue;
301 err = errno;
302 break;
303 } else if (!ret)
304 break;
305 else {
306 data += ret;
307 size -= ret;
308 }
309 }
310
311 if (err)
312 return err;
313 if (size)
314 return EAGAIN;
315
316 return 0;
317}
318
319static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
320 unsigned int max,
321 const struct timespec fio_unused *t)
322{
323 struct sgio_data *sd = td->io_ops_data;
324 int left = max, eventNum, ret, r = 0, trims = 0;
325 void *buf = sd->sgbuf;
326 unsigned int i, j, events;
327 struct fio_file *f;
328 struct io_u *io_u;
329
330 /*
331 * Fill in the file descriptors
332 */
333 for_each_file(td, f, i) {
334 /*
335 * don't block for min events == 0
336 */
337 if (!min)
338 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
339 else
340 sd->fd_flags[i] = -1;
341
342 sd->pfds[i].fd = f->fd;
343 sd->pfds[i].events = POLLIN;
344 }
345
346 /*
347 ** There are two counters here:
348 ** - number of SCSI commands completed
349 ** - number of io_us completed
350 **
351 ** These are the same with reads and writes, but
352 ** could differ with trim/unmap commands because
353 ** a single unmap can include multiple io_us
354 */
355
356 while (left > 0) {
357 char *p;
358
359 dprint(FD_IO, "sgio_getevents: sd %p: min=%d, max=%d, left=%d\n", sd, min, max, left);
360
361 do {
362 if (!min)
363 break;
364
365 ret = poll(sd->pfds, td->o.nr_files, -1);
366 if (ret < 0) {
367 if (!r)
368 r = -errno;
369 td_verror(td, errno, "poll");
370 break;
371 } else if (!ret)
372 continue;
373
374 if (pollin_events(sd->pfds, td->o.nr_files))
375 break;
376 } while (1);
377
378 if (r < 0)
379 break;
380
381re_read:
382 p = buf;
383 events = 0;
384 for_each_file(td, f, i) {
385 for (eventNum = 0; eventNum < left; eventNum++) {
386 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
387 dprint(FD_IO, "sgio_getevents: sg_fd_read ret: %d\n", ret);
388 if (ret) {
389 r = -ret;
390 td_verror(td, r, "sg_read");
391 break;
392 }
393 io_u = ((struct sg_io_hdr *)p)->usr_ptr;
394 if (io_u->ddir == DDIR_TRIM) {
395 events += sd->trim_queues[io_u->index]->unmap_range_count;
396 eventNum += sd->trim_queues[io_u->index]->unmap_range_count - 1;
397 } else
398 events++;
399
400 p += sizeof(struct sg_io_hdr);
401 dprint(FD_IO, "sgio_getevents: events: %d, eventNum: %d, left: %d\n", events, eventNum, left);
402 }
403 }
404
405 if (r < 0 && !events)
406 break;
407 if (!events) {
408 usleep(1000);
409 goto re_read;
410 }
411
412 left -= events;
413 r += events;
414
415 for (i = 0; i < events; i++) {
416 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
417 sd->events[i + trims] = hdr->usr_ptr;
418 io_u = (struct io_u *)(hdr->usr_ptr);
419
420 if (hdr->info & SG_INFO_CHECK) {
421 /* record if an io error occurred, ignore resid */
422 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
423 sd->events[i + trims]->error = EIO;
424 }
425
426 if (io_u->ddir == DDIR_TRIM) {
427 struct sgio_trim *st = sd->trim_queues[io_u->index];
428#ifdef FIO_SGIO_DEBUG
429 assert(st->trim_io_us[0] == io_u);
430 assert(sd->trim_queue_map[io_u->index] == io_u->index);
431 dprint(FD_IO, "sgio_getevents: reaping %d io_us from trim queue %d\n", st->unmap_range_count, io_u->index);
432 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", io_u->index, i+trims);
433#endif
434 for (j = 1; j < st->unmap_range_count; j++) {
435 ++trims;
436 sd->events[i + trims] = st->trim_io_us[j];
437#ifdef FIO_SGIO_DEBUG
438 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", st->trim_io_us[j]->index, i+trims);
439 assert(sd->trim_queue_map[st->trim_io_us[j]->index] == io_u->index);
440#endif
441 if (hdr->info & SG_INFO_CHECK) {
442 /* record if an io error occurred, ignore resid */
443 memcpy(&st->trim_io_us[j]->hdr, hdr, sizeof(struct sg_io_hdr));
444 sd->events[i + trims]->error = EIO;
445 }
446 }
447 events -= st->unmap_range_count - 1;
448 st->unmap_range_count = 0;
449 }
450 }
451 }
452
453 if (!min) {
454 for_each_file(td, f, i) {
455 if (sd->fd_flags[i] == -1)
456 continue;
457
458 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
459 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
460 }
461 }
462
463 return r;
464}
465
466static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
467 struct fio_file *f,
468 struct io_u *io_u)
469{
470 struct sgio_data *sd = td->io_ops_data;
471 struct sg_io_hdr *hdr = &io_u->hdr;
472 int ret;
473
474 sd->events[0] = io_u;
475
476 ret = ioctl(f->fd, SG_IO, hdr);
477 if (ret < 0)
478 return ret;
479
480 /* record if an io error occurred */
481 if (hdr->info & SG_INFO_CHECK)
482 io_u->error = EIO;
483
484 return FIO_Q_COMPLETED;
485}
486
487static enum fio_q_status fio_sgio_rw_doio(struct thread_data *td,
488 struct fio_file *f,
489 struct io_u *io_u, int do_sync)
490{
491 struct sg_io_hdr *hdr = &io_u->hdr;
492 int ret;
493
494 ret = write(f->fd, hdr, sizeof(*hdr));
495 if (ret < 0)
496 return ret;
497
498 if (do_sync) {
499 /*
500 * We can't just read back the first command that completes
501 * and assume it's the one we need, it could be any command
502 * that is inflight.
503 */
504 do {
505 struct io_u *__io_u;
506
507 ret = read(f->fd, hdr, sizeof(*hdr));
508 if (ret < 0)
509 return ret;
510
511 __io_u = hdr->usr_ptr;
512
513 /* record if an io error occurred */
514 if (hdr->info & SG_INFO_CHECK)
515 __io_u->error = EIO;
516
517 if (__io_u == io_u)
518 break;
519
520 if (io_u_sync_complete(td, __io_u))
521 break;
522
523 } while (1);
524
525 return FIO_Q_COMPLETED;
526 }
527
528 return FIO_Q_QUEUED;
529}
530
531static enum fio_q_status fio_sgio_doio(struct thread_data *td,
532 struct io_u *io_u, int do_sync)
533{
534 struct fio_file *f = io_u->file;
535 enum fio_q_status ret;
536
537 if (f->filetype == FIO_TYPE_BLOCK) {
538 ret = fio_sgio_ioctl_doio(td, f, io_u);
539 if (io_u->error)
540 td_verror(td, io_u->error, __func__);
541 } else {
542 ret = fio_sgio_rw_doio(td, f, io_u, do_sync);
543 if (io_u->error && do_sync)
544 td_verror(td, io_u->error, __func__);
545 }
546
547 return ret;
548}
549
550static void fio_sgio_rw_lba(struct sg_io_hdr *hdr, unsigned long long lba,
551 unsigned long long nr_blocks, bool override16)
552{
553 if (lba < MAX_10B_LBA && !override16) {
554 sgio_set_be32((uint32_t) lba, &hdr->cmdp[2]);
555 sgio_set_be16((uint16_t) nr_blocks, &hdr->cmdp[7]);
556 } else {
557 sgio_set_be64(lba, &hdr->cmdp[2]);
558 sgio_set_be32((uint32_t) nr_blocks, &hdr->cmdp[10]);
559 }
560
561 return;
562}
563
564static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
565{
566 struct sg_io_hdr *hdr = &io_u->hdr;
567 struct sg_options *o = td->eo;
568 struct sgio_data *sd = td->io_ops_data;
569 unsigned long long nr_blocks, lba;
570 int offset;
571
572 if (io_u->xfer_buflen & (sd->bs - 1)) {
573 log_err("read/write not sector aligned\n");
574 return EINVAL;
575 }
576
577 nr_blocks = io_u->xfer_buflen / sd->bs;
578 lba = io_u->offset / sd->bs;
579
580 if (io_u->ddir == DDIR_READ) {
581 sgio_hdr_init(sd, hdr, io_u, 1);
582
583 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
584 if (lba < MAX_10B_LBA)
585 hdr->cmdp[0] = 0x28; // read(10)
586 else
587 hdr->cmdp[0] = 0x88; // read(16)
588
589 if (o->hipri)
590 hdr->flags |= SGV4_FLAG_HIPRI;
591 if (o->readfua)
592 hdr->cmdp[1] |= 0x08;
593
594 fio_sgio_rw_lba(hdr, lba, nr_blocks, false);
595
596 } else if (io_u->ddir == DDIR_WRITE) {
597 sgio_hdr_init(sd, hdr, io_u, 1);
598
599 hdr->dxfer_direction = SG_DXFER_TO_DEV;
600 switch(o->write_mode) {
601 case FIO_SG_WRITE:
602 if (lba < MAX_10B_LBA)
603 hdr->cmdp[0] = 0x2a; // write(10)
604 else
605 hdr->cmdp[0] = 0x8a; // write(16)
606 if (o->hipri)
607 hdr->flags |= SGV4_FLAG_HIPRI;
608 if (o->writefua)
609 hdr->cmdp[1] |= 0x08;
610 break;
611 case FIO_SG_WRITE_VERIFY:
612 if (lba < MAX_10B_LBA)
613 hdr->cmdp[0] = 0x2e; // write and verify(10)
614 else
615 hdr->cmdp[0] = 0x8e; // write and verify(16)
616 break;
617 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
618 case FIO_SG_WRITE_SAME:
619 hdr->dxfer_len = sd->bs;
620 if (lba < MAX_10B_LBA)
621 hdr->cmdp[0] = 0x41; // write same(10)
622 else
623 hdr->cmdp[0] = 0x93; // write same(16)
624 break;
625 case FIO_SG_WRITE_SAME_NDOB:
626 hdr->cmdp[0] = 0x93; // write same(16)
627 hdr->cmdp[1] |= 0x1; // no data output buffer
628 hdr->dxfer_len = 0;
629 break;
630 case FIO_SG_WRITE_STREAM:
631 hdr->cmdp[0] = 0x9a; // write stream (16)
632 if (o->writefua)
633 hdr->cmdp[1] |= 0x08;
634 sgio_set_be64(lba, &hdr->cmdp[2]);
635 sgio_set_be16(o->stream_id, &hdr->cmdp[10]);
636 sgio_set_be16((uint16_t) nr_blocks, &hdr->cmdp[12]);
637 break;
638 case FIO_SG_VERIFY_BYTCHK_00:
639 if (lba < MAX_10B_LBA)
640 hdr->cmdp[0] = 0x2f; // VERIFY(10)
641 else
642 hdr->cmdp[0] = 0x8f; // VERIFY(16)
643 hdr->dxfer_len = 0;
644 break;
645 case FIO_SG_VERIFY_BYTCHK_01:
646 if (lba < MAX_10B_LBA)
647 hdr->cmdp[0] = 0x2f; // VERIFY(10)
648 else
649 hdr->cmdp[0] = 0x8f; // VERIFY(16)
650 hdr->cmdp[1] |= 0x02; // BYTCHK = 01b
651 break;
652 case FIO_SG_VERIFY_BYTCHK_11:
653 if (lba < MAX_10B_LBA)
654 hdr->cmdp[0] = 0x2f; // VERIFY(10)
655 else
656 hdr->cmdp[0] = 0x8f; // VERIFY(16)
657 hdr->cmdp[1] |= 0x06; // BYTCHK = 11b
658 hdr->dxfer_len = sd->bs;
659 break;
660 };
661
662 if (o->write_mode != FIO_SG_WRITE_STREAM)
663 fio_sgio_rw_lba(hdr, lba, nr_blocks,
664 o->write_mode == FIO_SG_WRITE_SAME_NDOB);
665
666 } else if (io_u->ddir == DDIR_TRIM) {
667 struct sgio_trim *st;
668
669 if (sd->current_queue == -1) {
670 sgio_hdr_init(sd, hdr, io_u, 0);
671
672 hdr->cmd_len = 10;
673 hdr->dxfer_direction = SG_DXFER_TO_DEV;
674 hdr->cmdp[0] = 0x42; // unmap
675 sd->current_queue = io_u->index;
676 st = sd->trim_queues[sd->current_queue];
677 hdr->dxferp = st->unmap_param;
678#ifdef FIO_SGIO_DEBUG
679 assert(sd->trim_queues[io_u->index]->unmap_range_count == 0);
680 dprint(FD_IO, "sg: creating new queue based on io_u %d\n", io_u->index);
681#endif
682 }
683 else
684 st = sd->trim_queues[sd->current_queue];
685
686 dprint(FD_IO, "sg: adding io_u %d to trim queue %d\n", io_u->index, sd->current_queue);
687 st->trim_io_us[st->unmap_range_count] = io_u;
688#ifdef FIO_SGIO_DEBUG
689 sd->trim_queue_map[io_u->index] = sd->current_queue;
690#endif
691
692 offset = 8 + 16 * st->unmap_range_count;
693 sgio_set_be64(lba, &st->unmap_param[offset]);
694 sgio_set_be32((uint32_t) nr_blocks, &st->unmap_param[offset + 8]);
695
696 st->unmap_range_count++;
697
698 } else if (ddir_sync(io_u->ddir)) {
699 sgio_hdr_init(sd, hdr, io_u, 0);
700 hdr->dxfer_direction = SG_DXFER_NONE;
701 if (lba < MAX_10B_LBA)
702 hdr->cmdp[0] = 0x35; // synccache(10)
703 else
704 hdr->cmdp[0] = 0x91; // synccache(16)
705 } else
706 assert(0);
707
708 return 0;
709}
710
711static void fio_sgio_unmap_setup(struct sg_io_hdr *hdr, struct sgio_trim *st)
712{
713 uint16_t cnt = st->unmap_range_count * 16;
714
715 hdr->dxfer_len = cnt + 8;
716 sgio_set_be16(cnt + 8, &hdr->cmdp[7]);
717 sgio_set_be16(cnt + 6, st->unmap_param);
718 sgio_set_be16(cnt, &st->unmap_param[2]);
719
720 return;
721}
722
723static enum fio_q_status fio_sgio_queue(struct thread_data *td,
724 struct io_u *io_u)
725{
726 struct sg_io_hdr *hdr = &io_u->hdr;
727 struct sgio_data *sd = td->io_ops_data;
728 int ret, do_sync = 0;
729
730 fio_ro_check(td, io_u);
731
732 if (sgio_unbuffered(td) || ddir_sync(io_u->ddir))
733 do_sync = 1;
734
735 if (io_u->ddir == DDIR_TRIM) {
736 if (do_sync || io_u->file->filetype == FIO_TYPE_BLOCK) {
737 struct sgio_trim *st = sd->trim_queues[sd->current_queue];
738
739 /* finish cdb setup for unmap because we are
740 ** doing unmap commands synchronously */
741#ifdef FIO_SGIO_DEBUG
742 assert(st->unmap_range_count == 1);
743 assert(io_u == st->trim_io_us[0]);
744#endif
745 hdr = &io_u->hdr;
746
747 fio_sgio_unmap_setup(hdr, st);
748
749 st->unmap_range_count = 0;
750 sd->current_queue = -1;
751 } else
752 /* queue up trim ranges and submit in commit() */
753 return FIO_Q_QUEUED;
754 }
755
756 ret = fio_sgio_doio(td, io_u, do_sync);
757
758 if (ret < 0)
759 io_u->error = errno;
760 else if (hdr->status) {
761 io_u->resid = hdr->resid;
762 io_u->error = EIO;
763 } else if (td->io_ops->commit != NULL) {
764 if (do_sync && !ddir_sync(io_u->ddir)) {
765 io_u_mark_submit(td, 1);
766 io_u_mark_complete(td, 1);
767 } else if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
768 io_u_mark_submit(td, 1);
769 io_u_queued(td, io_u);
770 }
771 }
772
773 if (io_u->error) {
774 td_verror(td, io_u->error, "xfer");
775 return FIO_Q_COMPLETED;
776 }
777
778 return ret;
779}
780
781static int fio_sgio_commit(struct thread_data *td)
782{
783 struct sgio_data *sd = td->io_ops_data;
784 struct sgio_trim *st;
785 struct io_u *io_u;
786 struct sg_io_hdr *hdr;
787 struct timespec now;
788 unsigned int i;
789 int ret;
790
791 if (sd->current_queue == -1)
792 return 0;
793
794 st = sd->trim_queues[sd->current_queue];
795 io_u = st->trim_io_us[0];
796 hdr = &io_u->hdr;
797
798 fio_sgio_unmap_setup(hdr, st);
799
800 sd->current_queue = -1;
801
802 ret = fio_sgio_rw_doio(td, io_u->file, io_u, 0);
803
804 if (ret < 0 || hdr->status) {
805 int error;
806
807 if (ret < 0)
808 error = errno;
809 else {
810 error = EIO;
811 ret = -EIO;
812 }
813
814 for (i = 0; i < st->unmap_range_count; i++) {
815 st->trim_io_us[i]->error = error;
816 clear_io_u(td, st->trim_io_us[i]);
817 if (hdr->status)
818 st->trim_io_us[i]->resid = hdr->resid;
819 }
820
821 td_verror(td, error, "xfer");
822 return ret;
823 }
824
825 if (fio_fill_issue_time(td)) {
826 fio_gettime(&now, NULL);
827 for (i = 0; i < st->unmap_range_count; i++) {
828 memcpy(&st->trim_io_us[i]->issue_time, &now, sizeof(now));
829 io_u_queued(td, io_u);
830 }
831 }
832 io_u_mark_submit(td, st->unmap_range_count);
833
834 return 0;
835}
836
837static struct io_u *fio_sgio_event(struct thread_data *td, int event)
838{
839 struct sgio_data *sd = td->io_ops_data;
840
841 return sd->events[event];
842}
843
844static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
845 unsigned long long *max_lba)
846{
847 /*
848 * need to do read capacity operation w/o benefit of sd or
849 * io_u structures, which are not initialized until later.
850 */
851 struct sg_io_hdr hdr;
852 unsigned long long hlba;
853 unsigned int blksz = 0;
854 unsigned char cmd[16];
855 unsigned char sb[64];
856 unsigned char buf[32]; // read capacity return
857 int ret;
858 int fd = -1;
859
860 struct fio_file *f = td->files[0];
861
862 /* open file independent of rest of application */
863 fd = open(f->file_name, O_RDONLY);
864 if (fd < 0)
865 return -errno;
866
867 memset(&hdr, 0, sizeof(hdr));
868 memset(cmd, 0, sizeof(cmd));
869 memset(sb, 0, sizeof(sb));
870 memset(buf, 0, sizeof(buf));
871
872 /* First let's try a 10 byte read capacity. */
873 hdr.interface_id = 'S';
874 hdr.cmdp = cmd;
875 hdr.cmd_len = 10;
876 hdr.sbp = sb;
877 hdr.mx_sb_len = sizeof(sb);
878 hdr.timeout = SCSI_TIMEOUT_MS;
879 hdr.cmdp[0] = 0x25; // Read Capacity(10)
880 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
881 hdr.dxferp = buf;
882 hdr.dxfer_len = sizeof(buf);
883
884 ret = ioctl(fd, SG_IO, &hdr);
885 if (ret < 0) {
886 close(fd);
887 return ret;
888 }
889
890 if (hdr.info & SG_INFO_CHECK) {
891 /* RCAP(10) might be unsupported by device. Force RCAP(16) */
892 hlba = MAX_10B_LBA;
893 } else {
894 blksz = sgio_get_be32(&buf[4]);
895 hlba = sgio_get_be32(buf);
896 }
897
898 /*
899 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
900 * then need to retry with 16 byte Read Capacity command.
901 */
902 if (hlba == MAX_10B_LBA) {
903 hdr.cmd_len = 16;
904 hdr.cmdp[0] = 0x9e; // service action
905 hdr.cmdp[1] = 0x10; // Read Capacity(16)
906 sgio_set_be32(sizeof(buf), &hdr.cmdp[10]);
907
908 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
909 hdr.dxferp = buf;
910 hdr.dxfer_len = sizeof(buf);
911
912 ret = ioctl(fd, SG_IO, &hdr);
913 if (ret < 0) {
914 close(fd);
915 return ret;
916 }
917
918 /* record if an io error occurred */
919 if (hdr.info & SG_INFO_CHECK)
920 td_verror(td, EIO, "fio_sgio_read_capacity");
921
922 blksz = sgio_get_be32(&buf[8]);
923 hlba = sgio_get_be64(buf);
924 }
925
926 if (blksz) {
927 *bs = blksz;
928 *max_lba = hlba;
929 ret = 0;
930 } else {
931 ret = EIO;
932 }
933
934 close(fd);
935 return ret;
936}
937
938static void fio_sgio_cleanup(struct thread_data *td)
939{
940 struct sgio_data *sd = td->io_ops_data;
941 int i;
942
943 if (sd) {
944 free(sd->events);
945 free(sd->cmds);
946 free(sd->fd_flags);
947 free(sd->pfds);
948 free(sd->sgbuf);
949#ifdef FIO_SGIO_DEBUG
950 free(sd->trim_queue_map);
951#endif
952
953 for (i = 0; i < td->o.iodepth; i++) {
954 free(sd->trim_queues[i]->unmap_param);
955 free(sd->trim_queues[i]->trim_io_us);
956 free(sd->trim_queues[i]);
957 }
958
959 free(sd->trim_queues);
960 free(sd);
961 }
962}
963
964static int fio_sgio_init(struct thread_data *td)
965{
966 struct sgio_data *sd;
967 struct sgio_trim *st;
968 struct sg_io_hdr *h3p;
969 int i;
970
971 sd = calloc(1, sizeof(*sd));
972 sd->cmds = calloc(td->o.iodepth, sizeof(struct sgio_cmd));
973 sd->sgbuf = calloc(td->o.iodepth, sizeof(struct sg_io_hdr));
974 sd->events = calloc(td->o.iodepth, sizeof(struct io_u *));
975 sd->pfds = calloc(td->o.nr_files, sizeof(struct pollfd));
976 sd->fd_flags = calloc(td->o.nr_files, sizeof(int));
977 sd->type_checked = 0;
978
979 sd->trim_queues = calloc(td->o.iodepth, sizeof(struct sgio_trim *));
980 sd->current_queue = -1;
981#ifdef FIO_SGIO_DEBUG
982 sd->trim_queue_map = calloc(td->o.iodepth, sizeof(int));
983#endif
984 for (i = 0, h3p = sd->sgbuf; i < td->o.iodepth; i++, ++h3p) {
985 sd->trim_queues[i] = calloc(1, sizeof(struct sgio_trim));
986 st = sd->trim_queues[i];
987 st->unmap_param = calloc(td->o.iodepth + 1, sizeof(char[16]));
988 st->unmap_range_count = 0;
989 st->trim_io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
990 h3p->interface_id = 'S';
991 }
992
993 td->io_ops_data = sd;
994
995 /*
996 * we want to do it, regardless of whether odirect is set or not
997 */
998 td->o.override_sync = 1;
999 return 0;
1000}
1001
1002static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
1003{
1004 struct sgio_data *sd = td->io_ops_data;
1005 unsigned int bs = 0;
1006 unsigned long long max_lba = 0;
1007
1008 if (f->filetype == FIO_TYPE_BLOCK) {
1009 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
1010 td_verror(td, errno, "ioctl");
1011 return 1;
1012 }
1013 } else if (f->filetype == FIO_TYPE_CHAR) {
1014 int version, ret;
1015
1016 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
1017 td_verror(td, errno, "ioctl");
1018 return 1;
1019 }
1020
1021 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
1022 if (ret) {
1023 td_verror(td, td->error, "fio_sgio_read_capacity");
1024 log_err("ioengine sg unable to read capacity successfully\n");
1025 return 1;
1026 }
1027 } else {
1028 td_verror(td, EINVAL, "wrong file type");
1029 log_err("ioengine sg only works on block or character devices\n");
1030 return 1;
1031 }
1032
1033 sd->bs = bs;
1034 // Determine size of commands needed based on max_lba
1035 if (max_lba >= MAX_10B_LBA) {
1036 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
1037 "commands for lba above 0x%016llx/0x%016llx\n",
1038 MAX_10B_LBA, max_lba);
1039 }
1040
1041 if (f->filetype == FIO_TYPE_BLOCK) {
1042 td->io_ops->getevents = NULL;
1043 td->io_ops->event = NULL;
1044 td->io_ops->commit = NULL;
1045 /*
1046 ** Setting these functions to null may cause problems
1047 ** with filename=/dev/sda:/dev/sg0 since we are only
1048 ** considering a single file
1049 */
1050 }
1051 sd->type_checked = 1;
1052
1053 return 0;
1054}
1055
1056static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
1057{
1058 struct sgio_data *sd = td->io_ops_data;
1059 int ret;
1060
1061 ret = generic_open_file(td, f);
1062 if (ret)
1063 return ret;
1064
1065 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
1066 ret = generic_close_file(td, f);
1067 return ret;
1068 }
1069
1070 return 0;
1071}
1072
1073/*
1074 * Build an error string with details about the driver, host or scsi
1075 * error contained in the sg header Caller will use as necessary.
1076 */
1077static char *fio_sgio_errdetails(struct io_u *io_u)
1078{
1079 struct sg_io_hdr *hdr = &io_u->hdr;
1080#define MAXERRDETAIL 1024
1081#define MAXMSGCHUNK 128
1082 char *msg, msgchunk[MAXMSGCHUNK];
1083 int i;
1084
1085 msg = calloc(1, MAXERRDETAIL);
1086 strcpy(msg, "");
1087
1088 /*
1089 * can't seem to find sg_err.h, so I'll just echo the define values
1090 * so others can search on internet to find clearer clues of meaning.
1091 */
1092 if (hdr->info & SG_INFO_CHECK) {
1093 if (hdr->host_status) {
1094 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
1095 strlcat(msg, msgchunk, MAXERRDETAIL);
1096 switch (hdr->host_status) {
1097 case 0x01:
1098 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
1099 break;
1100 case 0x02:
1101 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
1102 break;
1103 case 0x03:
1104 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
1105 break;
1106 case 0x04:
1107 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
1108 break;
1109 case 0x05:
1110 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
1111 break;
1112 case 0x06:
1113 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
1114 break;
1115 case 0x07:
1116 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
1117 break;
1118 case 0x08:
1119 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
1120 break;
1121 case 0x09:
1122 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
1123 break;
1124 case 0x0a:
1125 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
1126 break;
1127 case 0x0b:
1128 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
1129 break;
1130 case 0x0c:
1131 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
1132 break;
1133 case 0x0d:
1134 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
1135 break;
1136 case 0x0e:
1137 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
1138 break;
1139 case 0x0f:
1140 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
1141 break;
1142 case 0x10:
1143 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
1144 break;
1145 case 0x11:
1146 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
1147 break;
1148 case 0x12:
1149 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
1150 break;
1151 case 0x13:
1152 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
1153 break;
1154 default:
1155 strlcat(msg, "Unknown", MAXERRDETAIL);
1156 break;
1157 }
1158 strlcat(msg, ". ", MAXERRDETAIL);
1159 }
1160 if (hdr->driver_status) {
1161 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
1162 strlcat(msg, msgchunk, MAXERRDETAIL);
1163 switch (hdr->driver_status & 0x0F) {
1164 case 0x01:
1165 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
1166 break;
1167 case 0x02:
1168 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
1169 break;
1170 case 0x03:
1171 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
1172 break;
1173 case 0x04:
1174 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
1175 break;
1176 case 0x05:
1177 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
1178 break;
1179 case 0x06:
1180 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
1181 break;
1182 case 0x07:
1183 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
1184 break;
1185 case 0x08:
1186 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
1187 break;
1188 default:
1189 strlcat(msg, "Unknown", MAXERRDETAIL);
1190 break;
1191 }
1192 strlcat(msg, "; ", MAXERRDETAIL);
1193 switch (hdr->driver_status & 0xF0) {
1194 case 0x10:
1195 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
1196 break;
1197 case 0x20:
1198 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
1199 break;
1200 case 0x30:
1201 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
1202 break;
1203 case 0x40:
1204 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
1205 break;
1206 case 0x80:
1207 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
1208 break;
1209 }
1210 strlcat(msg, ". ", MAXERRDETAIL);
1211 }
1212 if (hdr->status) {
1213 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
1214 strlcat(msg, msgchunk, MAXERRDETAIL);
1215 // SCSI 3 status codes
1216 switch (hdr->status) {
1217 case 0x02:
1218 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
1219 break;
1220 case 0x04:
1221 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
1222 break;
1223 case 0x08:
1224 strlcat(msg, "BUSY", MAXERRDETAIL);
1225 break;
1226 case 0x10:
1227 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
1228 break;
1229 case 0x14:
1230 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
1231 break;
1232 case 0x18:
1233 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
1234 break;
1235 case 0x22:
1236 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
1237 break;
1238 case 0x28:
1239 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
1240 break;
1241 case 0x30:
1242 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
1243 break;
1244 case 0x40:
1245 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
1246 break;
1247 default:
1248 strlcat(msg, "Unknown", MAXERRDETAIL);
1249 break;
1250 }
1251 strlcat(msg, ". ", MAXERRDETAIL);
1252 }
1253 if (hdr->sb_len_wr) {
1254 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
1255 strlcat(msg, msgchunk, MAXERRDETAIL);
1256 for (i = 0; i < hdr->sb_len_wr; i++) {
1257 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
1258 strlcat(msg, msgchunk, MAXERRDETAIL);
1259 }
1260 strlcat(msg, ". ", MAXERRDETAIL);
1261 }
1262 if (hdr->resid != 0) {
1263 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
1264 strlcat(msg, msgchunk, MAXERRDETAIL);
1265 }
1266 if (hdr->cmdp) {
1267 strlcat(msg, "cdb:", MAXERRDETAIL);
1268 for (i = 0; i < hdr->cmd_len; i++) {
1269 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->cmdp[i]);
1270 strlcat(msg, msgchunk, MAXERRDETAIL);
1271 }
1272 strlcat(msg, ". ", MAXERRDETAIL);
1273 if (io_u->ddir == DDIR_TRIM) {
1274 unsigned char *param_list = hdr->dxferp;
1275 strlcat(msg, "dxferp:", MAXERRDETAIL);
1276 for (i = 0; i < hdr->dxfer_len; i++) {
1277 snprintf(msgchunk, MAXMSGCHUNK, " %02x", param_list[i]);
1278 strlcat(msg, msgchunk, MAXERRDETAIL);
1279 }
1280 strlcat(msg, ". ", MAXERRDETAIL);
1281 }
1282 }
1283 }
1284
1285 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
1286 snprintf(msg, MAXERRDETAIL, "%s",
1287 "SG Driver did not report a Host, Driver or Device check");
1288
1289 return msg;
1290}
1291
1292/*
1293 * get max file size from read capacity.
1294 */
1295static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
1296{
1297 /*
1298 * get_file_size is being called even before sgio_init is
1299 * called, so none of the sg_io structures are
1300 * initialized in the thread_data yet. So we need to do the
1301 * ReadCapacity without any of those helpers. One of the effects
1302 * is that ReadCapacity may get called 4 times on each open:
1303 * readcap(10) followed by readcap(16) if needed - just to get
1304 * the file size after the init occurs - it will be called
1305 * again when "type_check" is called during structure
1306 * initialization I'm not sure how to prevent this little
1307 * inefficiency.
1308 */
1309 unsigned int bs = 0;
1310 unsigned long long max_lba = 0;
1311 int ret;
1312
1313 if (fio_file_size_known(f))
1314 return 0;
1315
1316 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
1317 td_verror(td, EINVAL, "wrong file type");
1318 log_err("ioengine sg only works on block or character devices\n");
1319 return 1;
1320 }
1321
1322 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
1323 if (ret ) {
1324 td_verror(td, td->error, "fio_sgio_read_capacity");
1325 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
1326 return 1;
1327 }
1328
1329 f->real_file_size = (max_lba + 1) * bs;
1330 fio_file_set_size_known(f);
1331 return 0;
1332}
1333
1334
1335static struct ioengine_ops ioengine = {
1336 .name = "sg",
1337 .version = FIO_IOOPS_VERSION,
1338 .init = fio_sgio_init,
1339 .prep = fio_sgio_prep,
1340 .queue = fio_sgio_queue,
1341 .commit = fio_sgio_commit,
1342 .getevents = fio_sgio_getevents,
1343 .errdetails = fio_sgio_errdetails,
1344 .event = fio_sgio_event,
1345 .cleanup = fio_sgio_cleanup,
1346 .open_file = fio_sgio_open,
1347 .close_file = generic_close_file,
1348 .get_file_size = fio_sgio_get_file_size,
1349 .flags = FIO_SYNCIO | FIO_RAWIO,
1350 .options = options,
1351 .option_struct_size = sizeof(struct sg_options)
1352};
1353
1354#else /* FIO_HAVE_SGIO */
1355
1356/*
1357 * When we have a proper configure system in place, we simply wont build
1358 * and install this io engine. For now install a crippled version that
1359 * just complains and fails to load.
1360 */
1361static int fio_sgio_init(struct thread_data fio_unused *td)
1362{
1363 log_err("fio: ioengine sg not available\n");
1364 return 1;
1365}
1366
1367static struct ioengine_ops ioengine = {
1368 .name = "sg",
1369 .version = FIO_IOOPS_VERSION,
1370 .init = fio_sgio_init,
1371};
1372
1373#endif
1374
1375static void fio_init fio_sgio_register(void)
1376{
1377 register_ioengine(&ioengine);
1378}
1379
1380static void fio_exit fio_sgio_unregister(void)
1381{
1382 unregister_ioengine(&ioengine);
1383}