engines/sg: add cmdp and dxferp for trims to sg error string
[fio.git] / engines / sg.c
CommitLineData
2866c82d 1/*
da751ca9
JA
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
2866c82d 5 *
b4b9665e
VF
6 * This ioengine can operate in two modes:
7 * sync with block devices (/dev/sdX) or
8 * with character devices (/dev/sgY) with direct=1 or sync=1
9 * async with character devices with direct=0 and sync=0
10 *
11 * What value does queue() return for the different cases?
12 * queue() return value
13 * In sync mode:
14 * /dev/sdX RWT FIO_Q_COMPLETED
15 * /dev/sgY RWT FIO_Q_COMPLETED
16 * with direct=1 or sync=1
17 *
18 * In async mode:
19 * /dev/sgY RWT FIO_Q_QUEUED
20 * direct=0 and sync=0
21 *
22 * Because FIO_SYNCIO is set for this ioengine td_io_queue() will fill in
23 * issue_time *before* each IO is sent to queue()
24 *
25 * Where are the IO counting functions called for the different cases?
26 *
27 * In sync mode:
28 * /dev/sdX (commit==NULL)
29 * RWT
30 * io_u_mark_depth() called in td_io_queue()
31 * io_u_mark_submit/complete() called in td_io_queue()
32 * issue_time set in td_io_queue()
33 *
34 * /dev/sgY with direct=1 or sync=1 (commit does nothing)
35 * RWT
36 * io_u_mark_depth() called in td_io_queue()
37 * io_u_mark_submit/complete() called in queue()
38 * issue_time set in td_io_queue()
39 *
40 * In async mode:
41 * /dev/sgY with direct=0 and sync=0
42 * RW: read and write operations are submitted in queue()
43 * io_u_mark_depth() called in td_io_commit()
44 * io_u_mark_submit() called in queue()
45 * issue_time set in td_io_queue()
46 * T: trim operations are queued in queue() and submitted in commit()
47 * io_u_mark_depth() called in td_io_commit()
48 * io_u_mark_submit() called in commit()
49 * issue_time set in commit()
50 *
2866c82d
JA
51 */
52#include <stdio.h>
53#include <stdlib.h>
54#include <unistd.h>
55#include <errno.h>
8393ca93 56#include <poll.h>
5f350952
JA
57
58#include "../fio.h"
52b81b7c 59#include "../optgroup.h"
2866c82d 60
34cfcdaf
JA
61#ifdef FIO_HAVE_SGIO
62
cbdc9353
VF
63enum {
64 FIO_SG_WRITE = 1,
65 FIO_SG_WRITE_VERIFY = 2,
66 FIO_SG_WRITE_SAME = 3
67};
52b81b7c
KD
68
69struct sg_options {
70 void *pad;
71 unsigned int readfua;
72 unsigned int writefua;
cbdc9353 73 unsigned int write_mode;
52b81b7c
KD
74};
75
76static struct fio_option options[] = {
77 {
78 .name = "readfua",
79 .lname = "sg engine read fua flag support",
80 .type = FIO_OPT_BOOL,
81 .off1 = offsetof(struct sg_options, readfua),
82 .help = "Set FUA flag (force unit access) for all Read operations",
83 .def = "0",
84 .category = FIO_OPT_C_ENGINE,
85 .group = FIO_OPT_G_SG,
86 },
87 {
88 .name = "writefua",
89 .lname = "sg engine write fua flag support",
90 .type = FIO_OPT_BOOL,
91 .off1 = offsetof(struct sg_options, writefua),
92 .help = "Set FUA flag (force unit access) for all Write operations",
93 .def = "0",
94 .category = FIO_OPT_C_ENGINE,
95 .group = FIO_OPT_G_SG,
96 },
cbdc9353
VF
97 {
98 .name = "sg_write_mode",
99 .lname = "specify sg write mode",
100 .type = FIO_OPT_STR,
101 .off1 = offsetof(struct sg_options, write_mode),
102 .help = "Specify SCSI WRITE mode",
103 .def = "write",
104 .posval = {
105 { .ival = "write",
106 .oval = FIO_SG_WRITE,
107 .help = "Issue standard SCSI WRITE commands",
108 },
109 { .ival = "verify",
110 .oval = FIO_SG_WRITE_VERIFY,
111 .help = "Issue SCSI WRITE AND VERIFY commands",
112 },
113 { .ival = "same",
114 .oval = FIO_SG_WRITE_SAME,
115 .help = "Issue SCSI WRITE SAME commands",
116 },
117 },
118 .category = FIO_OPT_C_ENGINE,
119 .group = FIO_OPT_G_SG,
120 },
52b81b7c
KD
121 {
122 .name = NULL,
123 },
124};
125
5ad7be56
KD
126#define MAX_10B_LBA 0xFFFFFFFFULL
127#define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
128#define MAX_SB 64 // sense block maximum return size
129
2866c82d 130struct sgio_cmd {
fde57152 131 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
5ad7be56 132 unsigned char sb[MAX_SB]; // add sense block to commands
2866c82d
JA
133 int nr;
134};
135
b4b9665e
VF
136struct sgio_trim {
137 char *unmap_param;
138 unsigned int unmap_range_count;
139 struct io_u **trim_io_us;
140};
141
2866c82d
JA
142struct sgio_data {
143 struct sgio_cmd *cmds;
144 struct io_u **events;
dc0deca2
JA
145 struct pollfd *pfds;
146 int *fd_flags;
147 void *sgbuf;
2866c82d 148 unsigned int bs;
b5af8293 149 int type_checked;
b4b9665e
VF
150 struct sgio_trim **trim_queues;
151 int current_queue;
152 unsigned int *trim_queue_map;
2866c82d
JA
153};
154
b4b9665e
VF
155static inline bool sgio_unbuffered(struct thread_data *td)
156{
157 return (td->o.odirect || td->o.sync_io);
158}
159
2866c82d
JA
160static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
161 struct io_u *io_u, int fs)
162{
163 struct sgio_cmd *sc = &sd->cmds[io_u->index];
164
165 memset(hdr, 0, sizeof(*hdr));
166 memset(sc->cdb, 0, sizeof(sc->cdb));
167
168 hdr->interface_id = 'S';
169 hdr->cmdp = sc->cdb;
170 hdr->cmd_len = sizeof(sc->cdb);
5ad7be56
KD
171 hdr->sbp = sc->sb;
172 hdr->mx_sb_len = sizeof(sc->sb);
2866c82d
JA
173 hdr->pack_id = io_u->index;
174 hdr->usr_ptr = io_u;
b4b9665e 175 hdr->timeout = SCSI_TIMEOUT_MS;
2866c82d
JA
176
177 if (fs) {
cec6b55d
JA
178 hdr->dxferp = io_u->xfer_buf;
179 hdr->dxfer_len = io_u->xfer_buflen;
2866c82d
JA
180 }
181}
182
adee86c5
JA
183static int pollin_events(struct pollfd *pfds, int fds)
184{
185 int i;
186
187 for (i = 0; i < fds; i++)
188 if (pfds[i].revents & POLLIN)
189 return 1;
190
191 return 0;
192}
2866c82d 193
14d0261e
JA
194static int sg_fd_read(int fd, void *data, size_t size)
195{
196 int err = 0;
197
198 while (size) {
199 ssize_t ret;
200
201 ret = read(fd, data, size);
202 if (ret < 0) {
203 if (errno == EAGAIN || errno == EINTR)
204 continue;
205 err = errno;
206 break;
207 } else if (!ret)
208 break;
209 else {
210 data += ret;
211 size -= ret;
212 }
213 }
214
215 if (err)
216 return err;
217 if (size)
218 return EAGAIN;
219
220 return 0;
221}
222
e7d2e616 223static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
1f440ece
JA
224 unsigned int max,
225 const struct timespec fio_unused *t)
2866c82d 226{
565e784d 227 struct sgio_data *sd = td->io_ops_data;
b4b9665e 228 int left = max, eventNum, ret, r = 0, trims = 0;
dc0deca2 229 void *buf = sd->sgbuf;
b4b9665e 230 unsigned int i, j, events;
946ff865 231 struct fio_file *f;
b4b9665e 232 struct io_u *io_u;
2866c82d
JA
233
234 /*
adee86c5 235 * Fill in the file descriptors
2866c82d 236 */
adee86c5
JA
237 for_each_file(td, f, i) {
238 /*
239 * don't block for min events == 0
240 */
4a851614 241 if (!min)
3a35845f
JA
242 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
243 else
244 sd->fd_flags[i] = -1;
4a851614 245
dc0deca2
JA
246 sd->pfds[i].fd = f->fd;
247 sd->pfds[i].events = POLLIN;
2866c82d
JA
248 }
249
b4b9665e
VF
250 /*
251 ** There are two counters here:
252 ** - number of SCSI commands completed
253 ** - number of io_us completed
254 **
255 ** These are the same with reads and writes, but
256 ** could differ with trim/unmap commands because
257 ** a single unmap can include multiple io_us
258 */
259
260 while (left > 0) {
c97e3cb0 261 char *p;
adee86c5 262
b4b9665e 263 dprint(FD_IO, "sgio_getevents: sd %p: min=%d, max=%d, left=%d\n", sd, min, max, left);
5ad7be56 264
2866c82d
JA
265 do {
266 if (!min)
267 break;
adee86c5 268
2dc1bbeb 269 ret = poll(sd->pfds, td->o.nr_files, -1);
adee86c5 270 if (ret < 0) {
adee86c5 271 if (!r)
22819ec2 272 r = -errno;
e1161c32 273 td_verror(td, errno, "poll");
adee86c5
JA
274 break;
275 } else if (!ret)
276 continue;
277
2dc1bbeb 278 if (pollin_events(sd->pfds, td->o.nr_files))
2866c82d
JA
279 break;
280 } while (1);
281
adee86c5 282 if (r < 0)
2866c82d 283 break;
adee86c5
JA
284
285re_read:
286 p = buf;
287 events = 0;
288 for_each_file(td, f, i) {
5ad7be56 289 for (eventNum = 0; eventNum < left; eventNum++) {
14d0261e 290 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
b4b9665e 291 dprint(FD_IO, "sgio_getevents: sg_fd_read ret: %d\n", ret);
14d0261e
JA
292 if (ret) {
293 r = -ret;
294 td_verror(td, r, "sg_read");
5ad7be56 295 break;
5ad7be56 296 }
b4b9665e
VF
297 io_u = ((struct sg_io_hdr *)p)->usr_ptr;
298 if (io_u->ddir == DDIR_TRIM) {
299 events += sd->trim_queues[io_u->index]->unmap_range_count;
300 eventNum += sd->trim_queues[io_u->index]->unmap_range_count - 1;
301 } else
302 events++;
303
14d0261e 304 p += sizeof(struct sg_io_hdr);
b4b9665e 305 dprint(FD_IO, "sgio_getevents: events: %d, eventNum: %d, left: %d\n", events, eventNum, left);
adee86c5
JA
306 }
307 }
308
14d0261e 309 if (r < 0 && !events)
2866c82d 310 break;
adee86c5
JA
311 if (!events) {
312 usleep(1000);
313 goto re_read;
314 }
2866c82d 315
2866c82d
JA
316 left -= events;
317 r += events;
318
319 for (i = 0; i < events; i++) {
320 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
b4b9665e
VF
321 sd->events[i + trims] = hdr->usr_ptr;
322 io_u = (struct io_u *)(hdr->usr_ptr);
5ad7be56 323
5ad7be56 324 if (hdr->info & SG_INFO_CHECK) {
b4b9665e 325 /* record if an io error occurred, ignore resid */
be660713 326 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
b4b9665e
VF
327 sd->events[i + trims]->error = EIO;
328 }
329
330 if (io_u->ddir == DDIR_TRIM) {
331 struct sgio_trim *st = sd->trim_queues[io_u->index];
332 assert(st->trim_io_us[0] == io_u);
333 dprint(FD_IO, "sgio_getevents: reaping %d io_us from trim queue %d\n", st->unmap_range_count, io_u->index);
334 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", io_u->index, i+trims);
335 for (j = 1; j < st->unmap_range_count; j++) {
336 ++trims;
337 sd->events[i + trims] = st->trim_io_us[j];
338 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", st->trim_io_us[j]->index, i+trims);
339 if (hdr->info & SG_INFO_CHECK) {
340 /* record if an io error occurred, ignore resid */
341 memcpy(&st->trim_io_us[j]->hdr, hdr, sizeof(struct sg_io_hdr));
342 sd->events[i + trims]->error = EIO;
343 }
344 }
345 events -= st->unmap_range_count - 1;
346 st->unmap_range_count = 0;
5ad7be56 347 }
2866c82d
JA
348 }
349 }
350
adee86c5 351 if (!min) {
affe05a9 352 for_each_file(td, f, i) {
3a35845f
JA
353 if (sd->fd_flags[i] == -1)
354 continue;
355
affe05a9
JA
356 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
357 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
358 }
adee86c5 359 }
2866c82d 360
2866c82d
JA
361 return r;
362}
363
2e4ef4fb
JA
364static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
365 struct fio_file *f,
366 struct io_u *io_u)
2866c82d 367{
565e784d 368 struct sgio_data *sd = td->io_ops_data;
2866c82d 369 struct sg_io_hdr *hdr = &io_u->hdr;
36167d82 370 int ret;
2866c82d
JA
371
372 sd->events[0] = io_u;
373
36167d82
JA
374 ret = ioctl(f->fd, SG_IO, hdr);
375 if (ret < 0)
a05bd42d 376 return ret;
36167d82 377
5ad7be56
KD
378 /* record if an io error occurred */
379 if (hdr->info & SG_INFO_CHECK)
380 io_u->error = EIO;
381
36167d82 382 return FIO_Q_COMPLETED;
2866c82d
JA
383}
384
b4b9665e
VF
385static enum fio_q_status fio_sgio_rw_doio(struct fio_file *f,
386 struct io_u *io_u, int do_sync)
2866c82d
JA
387{
388 struct sg_io_hdr *hdr = &io_u->hdr;
389 int ret;
390
53cdc686 391 ret = write(f->fd, hdr, sizeof(*hdr));
2866c82d 392 if (ret < 0)
a05bd42d 393 return ret;
2866c82d 394
2b13e716 395 if (do_sync) {
53cdc686 396 ret = read(f->fd, hdr, sizeof(*hdr));
2866c82d 397 if (ret < 0)
a05bd42d 398 return ret;
5ad7be56
KD
399
400 /* record if an io error occurred */
401 if (hdr->info & SG_INFO_CHECK)
402 io_u->error = EIO;
403
36167d82 404 return FIO_Q_COMPLETED;
2866c82d
JA
405 }
406
36167d82 407 return FIO_Q_QUEUED;
2866c82d
JA
408}
409
b4b9665e
VF
410static enum fio_q_status fio_sgio_doio(struct thread_data *td,
411 struct io_u *io_u, int do_sync)
2866c82d 412{
53cdc686 413 struct fio_file *f = io_u->file;
b4b9665e 414 enum fio_q_status ret;
53cdc686 415
686fbd31 416 if (f->filetype == FIO_TYPE_BLOCK) {
5ad7be56 417 ret = fio_sgio_ioctl_doio(td, f, io_u);
c9aeb797 418 td_verror(td, io_u->error, __func__);
5ad7be56
KD
419 } else {
420 ret = fio_sgio_rw_doio(f, io_u, do_sync);
421 if (do_sync)
c9aeb797 422 td_verror(td, io_u->error, __func__);
5ad7be56 423 }
2866c82d 424
5ad7be56 425 return ret;
2866c82d
JA
426}
427
b4b9665e
VF
428static void fio_sgio_rw_lba(struct sg_io_hdr *hdr, unsigned long long lba,
429 unsigned long long nr_blocks)
430{
431 if (lba < MAX_10B_LBA) {
432 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
433 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
434 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
435 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
436 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
437 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
438 } else {
439 hdr->cmdp[2] = (unsigned char) ((lba >> 56) & 0xff);
440 hdr->cmdp[3] = (unsigned char) ((lba >> 48) & 0xff);
441 hdr->cmdp[4] = (unsigned char) ((lba >> 40) & 0xff);
442 hdr->cmdp[5] = (unsigned char) ((lba >> 32) & 0xff);
443 hdr->cmdp[6] = (unsigned char) ((lba >> 24) & 0xff);
444 hdr->cmdp[7] = (unsigned char) ((lba >> 16) & 0xff);
445 hdr->cmdp[8] = (unsigned char) ((lba >> 8) & 0xff);
446 hdr->cmdp[9] = (unsigned char) (lba & 0xff);
447 hdr->cmdp[10] = (unsigned char) ((nr_blocks >> 32) & 0xff);
448 hdr->cmdp[11] = (unsigned char) ((nr_blocks >> 16) & 0xff);
449 hdr->cmdp[12] = (unsigned char) ((nr_blocks >> 8) & 0xff);
450 hdr->cmdp[13] = (unsigned char) (nr_blocks & 0xff);
451 }
452
453 return;
454}
455
2866c82d
JA
456static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
457{
458 struct sg_io_hdr *hdr = &io_u->hdr;
52b81b7c 459 struct sg_options *o = td->eo;
565e784d 460 struct sgio_data *sd = td->io_ops_data;
b4b9665e
VF
461 unsigned long long nr_blocks, lba;
462 int offset;
2866c82d 463
cec6b55d 464 if (io_u->xfer_buflen & (sd->bs - 1)) {
2866c82d
JA
465 log_err("read/write not sector aligned\n");
466 return EINVAL;
467 }
468
5ad7be56
KD
469 nr_blocks = io_u->xfer_buflen / sd->bs;
470 lba = io_u->offset / sd->bs;
471
2866c82d 472 if (io_u->ddir == DDIR_READ) {
87dc1ab1
JA
473 sgio_hdr_init(sd, hdr, io_u, 1);
474
2866c82d 475 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
5ad7be56
KD
476 if (lba < MAX_10B_LBA)
477 hdr->cmdp[0] = 0x28; // read(10)
478 else
479 hdr->cmdp[0] = 0x88; // read(16)
52b81b7c
KD
480
481 if (o->readfua)
482 hdr->cmdp[1] |= 0x08;
483
b4b9665e
VF
484 fio_sgio_rw_lba(hdr, lba, nr_blocks);
485
87dc1ab1
JA
486 } else if (io_u->ddir == DDIR_WRITE) {
487 sgio_hdr_init(sd, hdr, io_u, 1);
488
2866c82d 489 hdr->dxfer_direction = SG_DXFER_TO_DEV;
cbdc9353
VF
490 switch(o->write_mode) {
491 case FIO_SG_WRITE:
492 if (lba < MAX_10B_LBA)
493 hdr->cmdp[0] = 0x2a; // write(10)
494 else
495 hdr->cmdp[0] = 0x8a; // write(16)
496 if (o->writefua)
497 hdr->cmdp[1] |= 0x08;
498 break;
499 case FIO_SG_WRITE_VERIFY:
500 if (lba < MAX_10B_LBA)
501 hdr->cmdp[0] = 0x2e; // write and verify(10)
502 else
503 hdr->cmdp[0] = 0x8e; // write and verify(16)
504 break;
505 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
506 case FIO_SG_WRITE_SAME:
507 hdr->dxfer_len = sd->bs;
508 if (lba < MAX_10B_LBA)
509 hdr->cmdp[0] = 0x41; // write same(10)
510 else
511 hdr->cmdp[0] = 0x93; // write same(16)
512 break;
513 };
b4b9665e
VF
514
515 fio_sgio_rw_lba(hdr, lba, nr_blocks);
516
517 } else if (io_u->ddir == DDIR_TRIM) {
518 struct sgio_trim *st;
519
520 if (sd->current_queue == -1) {
521 sgio_hdr_init(sd, hdr, io_u, 0);
522
523 hdr->cmd_len = 10;
524 hdr->dxfer_direction = SG_DXFER_TO_DEV;
525 hdr->cmdp[0] = 0x42; // unmap
526 sd->current_queue = io_u->index;
527 st = sd->trim_queues[sd->current_queue];
528 hdr->dxferp = st->unmap_param;
529 assert(sd->trim_queues[io_u->index]->unmap_range_count == 0);
530 dprint(FD_IO, "sg: creating new queue based on io_u %d\n", io_u->index);
531 }
532 else
533 st = sd->trim_queues[sd->current_queue];
534
535 dprint(FD_IO, "sg: adding io_u %d to trim queue %d\n", io_u->index, sd->current_queue);
536 st->trim_io_us[st->unmap_range_count] = io_u;
537 sd->trim_queue_map[io_u->index] = sd->current_queue;
538
539 offset = 8 + 16 * st->unmap_range_count;
540 st->unmap_param[offset] = (unsigned char) ((lba >> 56) & 0xff);
541 st->unmap_param[offset+1] = (unsigned char) ((lba >> 48) & 0xff);
542 st->unmap_param[offset+2] = (unsigned char) ((lba >> 40) & 0xff);
543 st->unmap_param[offset+3] = (unsigned char) ((lba >> 32) & 0xff);
544 st->unmap_param[offset+4] = (unsigned char) ((lba >> 24) & 0xff);
545 st->unmap_param[offset+5] = (unsigned char) ((lba >> 16) & 0xff);
546 st->unmap_param[offset+6] = (unsigned char) ((lba >> 8) & 0xff);
547 st->unmap_param[offset+7] = (unsigned char) (lba & 0xff);
548 st->unmap_param[offset+8] = (unsigned char) ((nr_blocks >> 32) & 0xff);
549 st->unmap_param[offset+9] = (unsigned char) ((nr_blocks >> 16) & 0xff);
550 st->unmap_param[offset+10] = (unsigned char) ((nr_blocks >> 8) & 0xff);
551 st->unmap_param[offset+11] = (unsigned char) (nr_blocks & 0xff);
552
553 st->unmap_range_count++;
554
555 } else if (ddir_sync(io_u->ddir)) {
87dc1ab1 556 sgio_hdr_init(sd, hdr, io_u, 0);
87dc1ab1 557 hdr->dxfer_direction = SG_DXFER_NONE;
5ad7be56
KD
558 if (lba < MAX_10B_LBA)
559 hdr->cmdp[0] = 0x35; // synccache(10)
560 else
561 hdr->cmdp[0] = 0x91; // synccache(16)
b4b9665e
VF
562 } else
563 assert(0);
2866c82d 564
2866c82d
JA
565 return 0;
566}
567
b4b9665e
VF
568static void fio_sgio_unmap_setup(struct sg_io_hdr *hdr, struct sgio_trim *st)
569{
570 hdr->dxfer_len = st->unmap_range_count * 16 + 8;
571 hdr->cmdp[7] = (unsigned char) (((st->unmap_range_count * 16 + 8) >> 8) & 0xff);
572 hdr->cmdp[8] = (unsigned char) ((st->unmap_range_count * 16 + 8) & 0xff);
573
574 st->unmap_param[0] = (unsigned char) (((16 * st->unmap_range_count + 6) >> 8) & 0xff);
575 st->unmap_param[1] = (unsigned char) ((16 * st->unmap_range_count + 6) & 0xff);
576 st->unmap_param[2] = (unsigned char) (((16 * st->unmap_range_count) >> 8) & 0xff);
577 st->unmap_param[3] = (unsigned char) ((16 * st->unmap_range_count) & 0xff);
578
579 return;
580}
581
2e4ef4fb
JA
582static enum fio_q_status fio_sgio_queue(struct thread_data *td,
583 struct io_u *io_u)
2866c82d
JA
584{
585 struct sg_io_hdr *hdr = &io_u->hdr;
b4b9665e 586 struct sgio_data *sd = td->io_ops_data;
f6db4fa5 587 int ret, do_sync = 0;
2866c82d 588
7101d9c2
JA
589 fio_ro_check(td, io_u);
590
b4b9665e 591 if (sgio_unbuffered(td) || ddir_sync(io_u->ddir))
f6db4fa5
JA
592 do_sync = 1;
593
b4b9665e
VF
594 if (io_u->ddir == DDIR_TRIM) {
595 if (do_sync || io_u->file->filetype == FIO_TYPE_BLOCK) {
596 struct sgio_trim *st = sd->trim_queues[sd->current_queue];
597
598 /* finish cdb setup for unmap because we are
599 ** doing unmap commands synchronously */
600 assert(st->unmap_range_count == 1);
601 assert(io_u == st->trim_io_us[0]);
602 hdr = &io_u->hdr;
603
604 fio_sgio_unmap_setup(hdr, st);
605
606 st->unmap_range_count = 0;
607 sd->current_queue = -1;
608 } else
609 /* queue up trim ranges and submit in commit() */
610 return FIO_Q_QUEUED;
611 }
612
f6db4fa5 613 ret = fio_sgio_doio(td, io_u, do_sync);
2866c82d
JA
614
615 if (ret < 0)
616 io_u->error = errno;
617 else if (hdr->status) {
618 io_u->resid = hdr->resid;
619 io_u->error = EIO;
b4b9665e
VF
620 } else if (td->io_ops->commit != NULL) {
621 if (do_sync && !ddir_sync(io_u->ddir)) {
622 io_u_mark_submit(td, 1);
623 io_u_mark_complete(td, 1);
624 } else if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
625 io_u_mark_submit(td, 1);
626 io_u_queued(td, io_u);
627 }
2866c82d
JA
628 }
629
95bcd815 630 if (io_u->error) {
e1161c32 631 td_verror(td, io_u->error, "xfer");
36167d82 632 return FIO_Q_COMPLETED;
95bcd815
JA
633 }
634
36167d82 635 return ret;
2866c82d
JA
636}
637
b4b9665e
VF
638static int fio_sgio_commit(struct thread_data *td)
639{
640 struct sgio_data *sd = td->io_ops_data;
641 struct sgio_trim *st;
642 struct io_u *io_u;
643 struct sg_io_hdr *hdr;
644 struct timespec now;
645 unsigned int i;
646 int ret;
647
648 if (sd->current_queue == -1)
649 return 0;
650
651 st = sd->trim_queues[sd->current_queue];
652 io_u = st->trim_io_us[0];
653 hdr = &io_u->hdr;
654
655 fio_sgio_unmap_setup(hdr, st);
656
657 sd->current_queue = -1;
658
659 ret = fio_sgio_rw_doio(io_u->file, io_u, 0);
660
661 if (ret < 0)
662 for (i = 0; i < st->unmap_range_count; i++)
663 st->trim_io_us[i]->error = errno;
664 else if (hdr->status)
665 for (i = 0; i < st->unmap_range_count; i++) {
666 st->trim_io_us[i]->resid = hdr->resid;
667 st->trim_io_us[i]->error = EIO;
668 }
669 else {
670 if (fio_fill_issue_time(td)) {
671 fio_gettime(&now, NULL);
672 for (i = 0; i < st->unmap_range_count; i++) {
673 struct io_u *io_u = st->trim_io_us[i];
674
675 memcpy(&io_u->issue_time, &now, sizeof(now));
676 io_u_queued(td, io_u);
677 }
678 }
679 io_u_mark_submit(td, st->unmap_range_count);
680 }
681
682 if (io_u->error) {
683 td_verror(td, io_u->error, "xfer");
684 return 0;
685 }
686
687 if (ret == FIO_Q_QUEUED)
688 return 0;
689 else
690 return ret;
691}
692
2866c82d
JA
693static struct io_u *fio_sgio_event(struct thread_data *td, int event)
694{
565e784d 695 struct sgio_data *sd = td->io_ops_data;
2866c82d
JA
696
697 return sd->events[event];
698}
699
5ad7be56
KD
700static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
701 unsigned long long *max_lba)
2866c82d 702{
5ad7be56
KD
703 /*
704 * need to do read capacity operation w/o benefit of sd or
705 * io_u structures, which are not initialized until later.
706 */
707 struct sg_io_hdr hdr;
708 unsigned char cmd[16];
709 unsigned char sb[64];
710 unsigned char buf[32]; // read capacity return
2866c82d 711 int ret;
5ad7be56 712 int fd = -1;
2866c82d 713
5ad7be56 714 struct fio_file *f = td->files[0];
2866c82d 715
5ad7be56
KD
716 /* open file independent of rest of application */
717 fd = open(f->file_name, O_RDONLY);
718 if (fd < 0)
719 return -errno;
2866c82d 720
5ad7be56
KD
721 memset(&hdr, 0, sizeof(hdr));
722 memset(cmd, 0, sizeof(cmd));
723 memset(sb, 0, sizeof(sb));
724 memset(buf, 0, sizeof(buf));
2866c82d 725
5ad7be56
KD
726 /* First let's try a 10 byte read capacity. */
727 hdr.interface_id = 'S';
728 hdr.cmdp = cmd;
729 hdr.cmd_len = 10;
730 hdr.sbp = sb;
731 hdr.mx_sb_len = sizeof(sb);
732 hdr.timeout = SCSI_TIMEOUT_MS;
733 hdr.cmdp[0] = 0x25; // Read Capacity(10)
734 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
735 hdr.dxferp = buf;
736 hdr.dxfer_len = sizeof(buf);
737
738 ret = ioctl(fd, SG_IO, &hdr);
739 if (ret < 0) {
740 close(fd);
2866c82d 741 return ret;
5ad7be56 742 }
2866c82d 743
7ad2ddff
JA
744 *bs = ((unsigned long) buf[4] << 24) | ((unsigned long) buf[5] << 16) |
745 ((unsigned long) buf[6] << 8) | (unsigned long) buf[7];
746 *max_lba = ((unsigned long) buf[0] << 24) | ((unsigned long) buf[1] << 16) |
747 ((unsigned long) buf[2] << 8) | (unsigned long) buf[3];
5ad7be56
KD
748
749 /*
fde57152
TK
750 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
751 * then need to retry with 16 byte Read Capacity command.
5ad7be56
KD
752 */
753 if (*max_lba == MAX_10B_LBA) {
754 hdr.cmd_len = 16;
28c43a89
TK
755 hdr.cmdp[0] = 0x9e; // service action
756 hdr.cmdp[1] = 0x10; // Read Capacity(16)
5ad7be56
KD
757 hdr.cmdp[10] = (unsigned char) ((sizeof(buf) >> 24) & 0xff);
758 hdr.cmdp[11] = (unsigned char) ((sizeof(buf) >> 16) & 0xff);
759 hdr.cmdp[12] = (unsigned char) ((sizeof(buf) >> 8) & 0xff);
760 hdr.cmdp[13] = (unsigned char) (sizeof(buf) & 0xff);
761
762 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
763 hdr.dxferp = buf;
764 hdr.dxfer_len = sizeof(buf);
765
766 ret = ioctl(fd, SG_IO, &hdr);
767 if (ret < 0) {
768 close(fd);
769 return ret;
770 }
771
772 /* record if an io error occurred */
773 if (hdr.info & SG_INFO_CHECK)
774 td_verror(td, EIO, "fio_sgio_read_capacity");
775
776 *bs = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11];
777 *max_lba = ((unsigned long long)buf[0] << 56) |
778 ((unsigned long long)buf[1] << 48) |
779 ((unsigned long long)buf[2] << 40) |
780 ((unsigned long long)buf[3] << 32) |
781 ((unsigned long long)buf[4] << 24) |
782 ((unsigned long long)buf[5] << 16) |
783 ((unsigned long long)buf[6] << 8) |
784 (unsigned long long)buf[7];
785 }
786
787 close(fd);
2866c82d
JA
788 return 0;
789}
790
791static void fio_sgio_cleanup(struct thread_data *td)
792{
565e784d 793 struct sgio_data *sd = td->io_ops_data;
b4b9665e 794 int i;
dc0deca2
JA
795
796 if (sd) {
797 free(sd->events);
798 free(sd->cmds);
799 free(sd->fd_flags);
800 free(sd->pfds);
801 free(sd->sgbuf);
b4b9665e
VF
802 free(sd->trim_queue_map);
803
804 for (i = 0; i < td->o.iodepth; i++) {
805 free(sd->trim_queues[i]->unmap_param);
806 free(sd->trim_queues[i]->trim_io_us);
807 free(sd->trim_queues[i]);
808 }
809
810 free(sd->trim_queues);
dc0deca2 811 free(sd);
2866c82d
JA
812 }
813}
814
815static int fio_sgio_init(struct thread_data *td)
816{
817 struct sgio_data *sd;
b4b9665e
VF
818 struct sgio_trim *st;
819 int i;
2866c82d 820
b4b9665e
VF
821 sd = calloc(1, sizeof(*sd));
822 sd->cmds = calloc(td->o.iodepth, sizeof(struct sgio_cmd));
823 sd->sgbuf = calloc(td->o.iodepth, sizeof(struct sg_io_hdr));
824 sd->events = calloc(td->o.iodepth, sizeof(struct io_u *));
825 sd->pfds = calloc(td->o.nr_files, sizeof(struct pollfd));
826 sd->fd_flags = calloc(td->o.nr_files, sizeof(int));
5ad7be56 827 sd->type_checked = 0;
b4b9665e
VF
828
829 sd->trim_queues = calloc(td->o.iodepth, sizeof(struct sgio_trim *));
830 sd->current_queue = -1;
831 sd->trim_queue_map = calloc(td->o.iodepth, sizeof(int));
832 for (i = 0; i < td->o.iodepth; i++) {
833 sd->trim_queues[i] = calloc(1, sizeof(struct sgio_trim));
834 st = sd->trim_queues[i];
835 st->unmap_param = calloc(td->o.iodepth + 1, sizeof(char[16]));
836 st->unmap_range_count = 0;
837 st->trim_io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
838 }
839
565e784d 840 td->io_ops_data = sd;
2866c82d 841
b5af8293
JA
842 /*
843 * we want to do it, regardless of whether odirect is set or not
844 */
2dc1bbeb 845 td->o.override_sync = 1;
b5af8293
JA
846 return 0;
847}
848
849static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
850{
565e784d 851 struct sgio_data *sd = td->io_ops_data;
5ad7be56
KD
852 unsigned int bs = 0;
853 unsigned long long max_lba = 0;
854
686fbd31 855 if (f->filetype == FIO_TYPE_BLOCK) {
53cdc686 856 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
e1161c32 857 td_verror(td, errno, "ioctl");
b5af8293 858 return 1;
2866c82d 859 }
af52b345 860 } else if (f->filetype == FIO_TYPE_CHAR) {
b5af8293 861 int version, ret;
2866c82d 862
53cdc686 863 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
e1161c32 864 td_verror(td, errno, "ioctl");
b5af8293 865 return 1;
2866c82d
JA
866 }
867
5ad7be56
KD
868 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
869 if (ret) {
870 td_verror(td, td->error, "fio_sgio_read_capacity");
871 log_err("ioengine sg unable to read capacity successfully\n");
b5af8293 872 return 1;
5ad7be56 873 }
2866c82d 874 } else {
16ada754 875 td_verror(td, EINVAL, "wrong file type");
30dac136 876 log_err("ioengine sg only works on block or character devices\n");
b5af8293 877 return 1;
2866c82d
JA
878 }
879
880 sd->bs = bs;
5ad7be56 881 // Determine size of commands needed based on max_lba
166c6b42
TK
882 if (max_lba >= MAX_10B_LBA) {
883 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
884 "commands for lba above 0x%016llx/0x%016llx\n",
885 MAX_10B_LBA, max_lba);
5ad7be56
KD
886 }
887
686fbd31 888 if (f->filetype == FIO_TYPE_BLOCK) {
36167d82
JA
889 td->io_ops->getevents = NULL;
890 td->io_ops->event = NULL;
b4b9665e
VF
891 td->io_ops->commit = NULL;
892 /*
893 ** Setting these functions to null may cause problems
894 ** with filename=/dev/sda:/dev/sg0 since we are only
895 ** considering a single file
896 */
36167d82 897 }
5ad7be56 898 sd->type_checked = 1;
2866c82d 899
2866c82d 900 return 0;
b5af8293
JA
901}
902
903static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
904{
565e784d 905 struct sgio_data *sd = td->io_ops_data;
b5af8293
JA
906 int ret;
907
908 ret = generic_open_file(td, f);
909 if (ret)
910 return ret;
911
15ba640a 912 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
6977bcd0 913 ret = generic_close_file(td, f);
b5af8293
JA
914 return 1;
915 }
916
917 return 0;
2866c82d
JA
918}
919
5ad7be56
KD
920/*
921 * Build an error string with details about the driver, host or scsi
922 * error contained in the sg header Caller will use as necessary.
923 */
924static char *fio_sgio_errdetails(struct io_u *io_u)
925{
926 struct sg_io_hdr *hdr = &io_u->hdr;
927#define MAXERRDETAIL 1024
928#define MAXMSGCHUNK 128
fd04fa03 929 char *msg, msgchunk[MAXMSGCHUNK];
5ad7be56
KD
930 int i;
931
efa72f25 932 msg = calloc(1, MAXERRDETAIL);
fd04fa03 933 strcpy(msg, "");
5ad7be56
KD
934
935 /*
936 * can't seem to find sg_err.h, so I'll just echo the define values
937 * so others can search on internet to find clearer clues of meaning.
938 */
939 if (hdr->info & SG_INFO_CHECK) {
5ad7be56
KD
940 if (hdr->host_status) {
941 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
942 strlcat(msg, msgchunk, MAXERRDETAIL);
943 switch (hdr->host_status) {
944 case 0x01:
945 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
946 break;
947 case 0x02:
948 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
949 break;
950 case 0x03:
951 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
952 break;
953 case 0x04:
954 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
955 break;
956 case 0x05:
957 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
958 break;
959 case 0x06:
960 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
961 break;
962 case 0x07:
963 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
964 break;
965 case 0x08:
966 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
967 break;
968 case 0x09:
969 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
970 break;
971 case 0x0a:
972 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
973 break;
974 case 0x0b:
975 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
976 break;
977 case 0x0c:
978 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
979 break;
980 case 0x0d:
981 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
982 break;
2ce6c6e5
TK
983 case 0x0e:
984 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
985 break;
986 case 0x0f:
987 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
988 break;
989 case 0x10:
990 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
991 break;
992 case 0x11:
993 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
994 break;
995 case 0x12:
996 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
997 break;
998 case 0x13:
999 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
1000 break;
5ad7be56
KD
1001 default:
1002 strlcat(msg, "Unknown", MAXERRDETAIL);
1003 break;
1004 }
1005 strlcat(msg, ". ", MAXERRDETAIL);
1006 }
1007 if (hdr->driver_status) {
1008 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
1009 strlcat(msg, msgchunk, MAXERRDETAIL);
1010 switch (hdr->driver_status & 0x0F) {
1011 case 0x01:
1012 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
1013 break;
1014 case 0x02:
1015 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
1016 break;
1017 case 0x03:
1018 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
1019 break;
1020 case 0x04:
1021 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
1022 break;
1023 case 0x05:
1024 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
1025 break;
1026 case 0x06:
1027 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
1028 break;
1029 case 0x07:
1030 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
1031 break;
1032 case 0x08:
1033 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
1034 break;
1035 default:
1036 strlcat(msg, "Unknown", MAXERRDETAIL);
1037 break;
1038 }
1039 strlcat(msg, "; ", MAXERRDETAIL);
1040 switch (hdr->driver_status & 0xF0) {
1041 case 0x10:
1042 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
1043 break;
1044 case 0x20:
1045 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
1046 break;
1047 case 0x30:
1048 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
1049 break;
1050 case 0x40:
1051 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
1052 break;
1053 case 0x80:
1054 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
1055 break;
1056 }
1057 strlcat(msg, ". ", MAXERRDETAIL);
1058 }
1059 if (hdr->status) {
1060 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
1061 strlcat(msg, msgchunk, MAXERRDETAIL);
1062 // SCSI 3 status codes
1063 switch (hdr->status) {
1064 case 0x02:
1065 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
1066 break;
1067 case 0x04:
1068 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
1069 break;
1070 case 0x08:
1071 strlcat(msg, "BUSY", MAXERRDETAIL);
1072 break;
1073 case 0x10:
1074 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
1075 break;
1076 case 0x14:
1077 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
1078 break;
1079 case 0x18:
1080 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
1081 break;
1082 case 0x22:
1083 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
1084 break;
1085 case 0x28:
1086 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
1087 break;
1088 case 0x30:
1089 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
1090 break;
1091 case 0x40:
1092 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
1093 break;
1094 default:
1095 strlcat(msg, "Unknown", MAXERRDETAIL);
1096 break;
1097 }
1098 strlcat(msg, ". ", MAXERRDETAIL);
1099 }
1100 if (hdr->sb_len_wr) {
1101 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
1102 strlcat(msg, msgchunk, MAXERRDETAIL);
1103 for (i = 0; i < hdr->sb_len_wr; i++) {
1104 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
1105 strlcat(msg, msgchunk, MAXERRDETAIL);
1106 }
1107 strlcat(msg, ". ", MAXERRDETAIL);
1108 }
1109 if (hdr->resid != 0) {
1110 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
1111 strlcat(msg, msgchunk, MAXERRDETAIL);
5ad7be56 1112 }
b4dbb3ce
VF
1113 if (hdr->cmdp) {
1114 strlcat(msg, "cdb:", MAXERRDETAIL);
1115 for (i = 0; i < hdr->cmd_len; i++) {
1116 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->cmdp[i]);
1117 strlcat(msg, msgchunk, MAXERRDETAIL);
1118 }
1119 strlcat(msg, ". ", MAXERRDETAIL);
1120 if (io_u->ddir == DDIR_TRIM) {
1121 unsigned char *param_list = hdr->dxferp;
1122 strlcat(msg, "dxferp:", MAXERRDETAIL);
1123 for (i = 0; i < hdr->dxfer_len; i++) {
1124 snprintf(msgchunk, MAXMSGCHUNK, " %02x", param_list[i]);
1125 strlcat(msg, msgchunk, MAXERRDETAIL);
1126 }
1127 strlcat(msg, ". ", MAXERRDETAIL);
1128 }
1129 }
5ad7be56
KD
1130 }
1131
fd04fa03
TK
1132 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
1133 strncpy(msg, "SG Driver did not report a Host, Driver or Device check",
1134 MAXERRDETAIL - 1);
5ad7be56 1135
fd04fa03 1136 return msg;
5ad7be56
KD
1137}
1138
1139/*
1140 * get max file size from read capacity.
1141 */
1142static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
1143{
1144 /*
1145 * get_file_size is being called even before sgio_init is
1146 * called, so none of the sg_io structures are
1147 * initialized in the thread_data yet. So we need to do the
1148 * ReadCapacity without any of those helpers. One of the effects
1149 * is that ReadCapacity may get called 4 times on each open:
1150 * readcap(10) followed by readcap(16) if needed - just to get
1151 * the file size after the init occurs - it will be called
1152 * again when "type_check" is called during structure
1153 * initialization I'm not sure how to prevent this little
1154 * inefficiency.
1155 */
1156 unsigned int bs = 0;
1157 unsigned long long max_lba = 0;
1158 int ret;
1159
1160 if (fio_file_size_known(f))
1161 return 0;
1162
686fbd31 1163 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
30dac136
TK
1164 td_verror(td, EINVAL, "wrong file type");
1165 log_err("ioengine sg only works on block or character devices\n");
1166 return 1;
1167 }
1168
5ad7be56
KD
1169 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
1170 if (ret ) {
1171 td_verror(td, td->error, "fio_sgio_read_capacity");
1172 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
1173 return 1;
1174 }
1175
1176 f->real_file_size = (max_lba + 1) * bs;
1177 fio_file_set_size_known(f);
1178 return 0;
1179}
1180
1181
5f350952 1182static struct ioengine_ops ioengine = {
2866c82d
JA
1183 .name = "sg",
1184 .version = FIO_IOOPS_VERSION,
1185 .init = fio_sgio_init,
1186 .prep = fio_sgio_prep,
1187 .queue = fio_sgio_queue,
b4b9665e 1188 .commit = fio_sgio_commit,
2866c82d 1189 .getevents = fio_sgio_getevents,
5ad7be56 1190 .errdetails = fio_sgio_errdetails,
2866c82d
JA
1191 .event = fio_sgio_event,
1192 .cleanup = fio_sgio_cleanup,
b5af8293
JA
1193 .open_file = fio_sgio_open,
1194 .close_file = generic_close_file,
fde57152 1195 .get_file_size = fio_sgio_get_file_size,
b2a15192 1196 .flags = FIO_SYNCIO | FIO_RAWIO,
52b81b7c
KD
1197 .options = options,
1198 .option_struct_size = sizeof(struct sg_options)
2866c82d 1199};
34cfcdaf
JA
1200
1201#else /* FIO_HAVE_SGIO */
1202
1203/*
1204 * When we have a proper configure system in place, we simply wont build
1205 * and install this io engine. For now install a crippled version that
1206 * just complains and fails to load.
1207 */
1208static int fio_sgio_init(struct thread_data fio_unused *td)
1209{
a3edaf76 1210 log_err("fio: ioengine sg not available\n");
34cfcdaf
JA
1211 return 1;
1212}
1213
5f350952 1214static struct ioengine_ops ioengine = {
d0c70934 1215 .name = "sg",
34cfcdaf
JA
1216 .version = FIO_IOOPS_VERSION,
1217 .init = fio_sgio_init,
1218};
1219
1220#endif
5f350952
JA
1221
1222static void fio_init fio_sgio_register(void)
1223{
1224 register_ioengine(&ioengine);
1225}
1226
1227static void fio_exit fio_sgio_unregister(void)
1228{
1229 unregister_ioengine(&ioengine);
1230}