zbd: Fix zone locking for async I/O engines
[fio.git] / engines / sg.c
CommitLineData
2866c82d 1/*
da751ca9
JA
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
2866c82d 5 *
b4b9665e
VF
6 * This ioengine can operate in two modes:
7 * sync with block devices (/dev/sdX) or
8 * with character devices (/dev/sgY) with direct=1 or sync=1
9 * async with character devices with direct=0 and sync=0
10 *
11 * What value does queue() return for the different cases?
12 * queue() return value
13 * In sync mode:
14 * /dev/sdX RWT FIO_Q_COMPLETED
15 * /dev/sgY RWT FIO_Q_COMPLETED
16 * with direct=1 or sync=1
17 *
18 * In async mode:
19 * /dev/sgY RWT FIO_Q_QUEUED
20 * direct=0 and sync=0
21 *
22 * Because FIO_SYNCIO is set for this ioengine td_io_queue() will fill in
23 * issue_time *before* each IO is sent to queue()
24 *
25 * Where are the IO counting functions called for the different cases?
26 *
27 * In sync mode:
28 * /dev/sdX (commit==NULL)
29 * RWT
30 * io_u_mark_depth() called in td_io_queue()
31 * io_u_mark_submit/complete() called in td_io_queue()
32 * issue_time set in td_io_queue()
33 *
34 * /dev/sgY with direct=1 or sync=1 (commit does nothing)
35 * RWT
36 * io_u_mark_depth() called in td_io_queue()
37 * io_u_mark_submit/complete() called in queue()
38 * issue_time set in td_io_queue()
39 *
40 * In async mode:
41 * /dev/sgY with direct=0 and sync=0
42 * RW: read and write operations are submitted in queue()
43 * io_u_mark_depth() called in td_io_commit()
44 * io_u_mark_submit() called in queue()
45 * issue_time set in td_io_queue()
46 * T: trim operations are queued in queue() and submitted in commit()
47 * io_u_mark_depth() called in td_io_commit()
48 * io_u_mark_submit() called in commit()
49 * issue_time set in commit()
50 *
2866c82d
JA
51 */
52#include <stdio.h>
53#include <stdlib.h>
54#include <unistd.h>
55#include <errno.h>
8393ca93 56#include <poll.h>
5f350952
JA
57
58#include "../fio.h"
52b81b7c 59#include "../optgroup.h"
2866c82d 60
34cfcdaf
JA
61#ifdef FIO_HAVE_SGIO
62
cbdc9353
VF
63enum {
64 FIO_SG_WRITE = 1,
65 FIO_SG_WRITE_VERIFY = 2,
66 FIO_SG_WRITE_SAME = 3
67};
52b81b7c
KD
68
69struct sg_options {
70 void *pad;
71 unsigned int readfua;
72 unsigned int writefua;
cbdc9353 73 unsigned int write_mode;
52b81b7c
KD
74};
75
76static struct fio_option options[] = {
77 {
78 .name = "readfua",
79 .lname = "sg engine read fua flag support",
80 .type = FIO_OPT_BOOL,
81 .off1 = offsetof(struct sg_options, readfua),
82 .help = "Set FUA flag (force unit access) for all Read operations",
83 .def = "0",
84 .category = FIO_OPT_C_ENGINE,
85 .group = FIO_OPT_G_SG,
86 },
87 {
88 .name = "writefua",
89 .lname = "sg engine write fua flag support",
90 .type = FIO_OPT_BOOL,
91 .off1 = offsetof(struct sg_options, writefua),
92 .help = "Set FUA flag (force unit access) for all Write operations",
93 .def = "0",
94 .category = FIO_OPT_C_ENGINE,
95 .group = FIO_OPT_G_SG,
96 },
cbdc9353
VF
97 {
98 .name = "sg_write_mode",
99 .lname = "specify sg write mode",
100 .type = FIO_OPT_STR,
101 .off1 = offsetof(struct sg_options, write_mode),
102 .help = "Specify SCSI WRITE mode",
103 .def = "write",
104 .posval = {
105 { .ival = "write",
106 .oval = FIO_SG_WRITE,
107 .help = "Issue standard SCSI WRITE commands",
108 },
109 { .ival = "verify",
110 .oval = FIO_SG_WRITE_VERIFY,
111 .help = "Issue SCSI WRITE AND VERIFY commands",
112 },
113 { .ival = "same",
114 .oval = FIO_SG_WRITE_SAME,
115 .help = "Issue SCSI WRITE SAME commands",
116 },
117 },
118 .category = FIO_OPT_C_ENGINE,
119 .group = FIO_OPT_G_SG,
120 },
52b81b7c
KD
121 {
122 .name = NULL,
123 },
124};
125
5ad7be56
KD
126#define MAX_10B_LBA 0xFFFFFFFFULL
127#define SCSI_TIMEOUT_MS 30000 // 30 second timeout; currently no method to override
128#define MAX_SB 64 // sense block maximum return size
aa18e0ec
VF
129/*
130#define FIO_SGIO_DEBUG
131*/
5ad7be56 132
2866c82d 133struct sgio_cmd {
fde57152 134 unsigned char cdb[16]; // enhanced from 10 to support 16 byte commands
5ad7be56 135 unsigned char sb[MAX_SB]; // add sense block to commands
2866c82d
JA
136 int nr;
137};
138
b4b9665e 139struct sgio_trim {
a824149a 140 uint8_t *unmap_param;
b4b9665e
VF
141 unsigned int unmap_range_count;
142 struct io_u **trim_io_us;
143};
144
2866c82d
JA
145struct sgio_data {
146 struct sgio_cmd *cmds;
147 struct io_u **events;
dc0deca2
JA
148 struct pollfd *pfds;
149 int *fd_flags;
150 void *sgbuf;
2866c82d 151 unsigned int bs;
b5af8293 152 int type_checked;
b4b9665e
VF
153 struct sgio_trim **trim_queues;
154 int current_queue;
aa18e0ec 155#ifdef FIO_SGIO_DEBUG
b4b9665e 156 unsigned int *trim_queue_map;
aa18e0ec 157#endif
2866c82d
JA
158};
159
a824149a
DF
160static inline uint16_t sgio_get_be16(uint8_t *buf)
161{
162 return be16_to_cpu(*((uint16_t *) buf));
163}
164
165static inline uint32_t sgio_get_be32(uint8_t *buf)
166{
167 return be32_to_cpu(*((uint32_t *) buf));
168}
169
170static inline uint64_t sgio_get_be64(uint8_t *buf)
171{
172 return be64_to_cpu(*((uint64_t *) buf));
173}
174
175static inline void sgio_set_be16(uint16_t val, uint8_t *buf)
176{
177 uint16_t t = cpu_to_be16(val);
178
179 memcpy(buf, &t, sizeof(uint16_t));
180}
181
182static inline void sgio_set_be32(uint32_t val, uint8_t *buf)
183{
184 uint32_t t = cpu_to_be32(val);
185
186 memcpy(buf, &t, sizeof(uint32_t));
187}
188
189static inline void sgio_set_be64(uint64_t val, uint8_t *buf)
190{
191 uint64_t t = cpu_to_be64(val);
192
193 memcpy(buf, &t, sizeof(uint64_t));
194}
195
b4b9665e
VF
196static inline bool sgio_unbuffered(struct thread_data *td)
197{
198 return (td->o.odirect || td->o.sync_io);
199}
200
2866c82d
JA
201static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
202 struct io_u *io_u, int fs)
203{
204 struct sgio_cmd *sc = &sd->cmds[io_u->index];
205
206 memset(hdr, 0, sizeof(*hdr));
207 memset(sc->cdb, 0, sizeof(sc->cdb));
208
209 hdr->interface_id = 'S';
210 hdr->cmdp = sc->cdb;
211 hdr->cmd_len = sizeof(sc->cdb);
5ad7be56
KD
212 hdr->sbp = sc->sb;
213 hdr->mx_sb_len = sizeof(sc->sb);
2866c82d
JA
214 hdr->pack_id = io_u->index;
215 hdr->usr_ptr = io_u;
b4b9665e 216 hdr->timeout = SCSI_TIMEOUT_MS;
2866c82d
JA
217
218 if (fs) {
cec6b55d
JA
219 hdr->dxferp = io_u->xfer_buf;
220 hdr->dxfer_len = io_u->xfer_buflen;
2866c82d
JA
221 }
222}
223
adee86c5
JA
224static int pollin_events(struct pollfd *pfds, int fds)
225{
226 int i;
227
228 for (i = 0; i < fds; i++)
229 if (pfds[i].revents & POLLIN)
230 return 1;
231
232 return 0;
233}
2866c82d 234
14d0261e
JA
235static int sg_fd_read(int fd, void *data, size_t size)
236{
237 int err = 0;
238
239 while (size) {
240 ssize_t ret;
241
242 ret = read(fd, data, size);
243 if (ret < 0) {
244 if (errno == EAGAIN || errno == EINTR)
245 continue;
246 err = errno;
247 break;
248 } else if (!ret)
249 break;
250 else {
251 data += ret;
252 size -= ret;
253 }
254 }
255
256 if (err)
257 return err;
258 if (size)
259 return EAGAIN;
260
261 return 0;
262}
263
e7d2e616 264static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
1f440ece
JA
265 unsigned int max,
266 const struct timespec fio_unused *t)
2866c82d 267{
565e784d 268 struct sgio_data *sd = td->io_ops_data;
b4b9665e 269 int left = max, eventNum, ret, r = 0, trims = 0;
dc0deca2 270 void *buf = sd->sgbuf;
b4b9665e 271 unsigned int i, j, events;
946ff865 272 struct fio_file *f;
b4b9665e 273 struct io_u *io_u;
2866c82d
JA
274
275 /*
adee86c5 276 * Fill in the file descriptors
2866c82d 277 */
adee86c5
JA
278 for_each_file(td, f, i) {
279 /*
280 * don't block for min events == 0
281 */
4a851614 282 if (!min)
3a35845f
JA
283 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
284 else
285 sd->fd_flags[i] = -1;
4a851614 286
dc0deca2
JA
287 sd->pfds[i].fd = f->fd;
288 sd->pfds[i].events = POLLIN;
2866c82d
JA
289 }
290
b4b9665e
VF
291 /*
292 ** There are two counters here:
293 ** - number of SCSI commands completed
294 ** - number of io_us completed
295 **
296 ** These are the same with reads and writes, but
297 ** could differ with trim/unmap commands because
298 ** a single unmap can include multiple io_us
299 */
300
301 while (left > 0) {
c97e3cb0 302 char *p;
adee86c5 303
b4b9665e 304 dprint(FD_IO, "sgio_getevents: sd %p: min=%d, max=%d, left=%d\n", sd, min, max, left);
5ad7be56 305
2866c82d
JA
306 do {
307 if (!min)
308 break;
adee86c5 309
2dc1bbeb 310 ret = poll(sd->pfds, td->o.nr_files, -1);
adee86c5 311 if (ret < 0) {
adee86c5 312 if (!r)
22819ec2 313 r = -errno;
e1161c32 314 td_verror(td, errno, "poll");
adee86c5
JA
315 break;
316 } else if (!ret)
317 continue;
318
2dc1bbeb 319 if (pollin_events(sd->pfds, td->o.nr_files))
2866c82d
JA
320 break;
321 } while (1);
322
adee86c5 323 if (r < 0)
2866c82d 324 break;
adee86c5
JA
325
326re_read:
327 p = buf;
328 events = 0;
329 for_each_file(td, f, i) {
5ad7be56 330 for (eventNum = 0; eventNum < left; eventNum++) {
14d0261e 331 ret = sg_fd_read(f->fd, p, sizeof(struct sg_io_hdr));
b4b9665e 332 dprint(FD_IO, "sgio_getevents: sg_fd_read ret: %d\n", ret);
14d0261e
JA
333 if (ret) {
334 r = -ret;
335 td_verror(td, r, "sg_read");
5ad7be56 336 break;
5ad7be56 337 }
b4b9665e
VF
338 io_u = ((struct sg_io_hdr *)p)->usr_ptr;
339 if (io_u->ddir == DDIR_TRIM) {
340 events += sd->trim_queues[io_u->index]->unmap_range_count;
341 eventNum += sd->trim_queues[io_u->index]->unmap_range_count - 1;
342 } else
343 events++;
344
14d0261e 345 p += sizeof(struct sg_io_hdr);
b4b9665e 346 dprint(FD_IO, "sgio_getevents: events: %d, eventNum: %d, left: %d\n", events, eventNum, left);
adee86c5
JA
347 }
348 }
349
14d0261e 350 if (r < 0 && !events)
2866c82d 351 break;
adee86c5
JA
352 if (!events) {
353 usleep(1000);
354 goto re_read;
355 }
2866c82d 356
2866c82d
JA
357 left -= events;
358 r += events;
359
360 for (i = 0; i < events; i++) {
361 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
b4b9665e
VF
362 sd->events[i + trims] = hdr->usr_ptr;
363 io_u = (struct io_u *)(hdr->usr_ptr);
5ad7be56 364
5ad7be56 365 if (hdr->info & SG_INFO_CHECK) {
b4b9665e 366 /* record if an io error occurred, ignore resid */
be660713 367 memcpy(&io_u->hdr, hdr, sizeof(struct sg_io_hdr));
b4b9665e
VF
368 sd->events[i + trims]->error = EIO;
369 }
370
371 if (io_u->ddir == DDIR_TRIM) {
372 struct sgio_trim *st = sd->trim_queues[io_u->index];
aa18e0ec 373#ifdef FIO_SGIO_DEBUG
b4b9665e 374 assert(st->trim_io_us[0] == io_u);
aa18e0ec 375 assert(sd->trim_queue_map[io_u->index] == io_u->index);
b4b9665e
VF
376 dprint(FD_IO, "sgio_getevents: reaping %d io_us from trim queue %d\n", st->unmap_range_count, io_u->index);
377 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", io_u->index, i+trims);
aa18e0ec 378#endif
b4b9665e
VF
379 for (j = 1; j < st->unmap_range_count; j++) {
380 ++trims;
381 sd->events[i + trims] = st->trim_io_us[j];
aa18e0ec 382#ifdef FIO_SGIO_DEBUG
b4b9665e 383 dprint(FD_IO, "sgio_getevents: reaped io_u %d and stored in events[%d]\n", st->trim_io_us[j]->index, i+trims);
aa18e0ec
VF
384 assert(sd->trim_queue_map[st->trim_io_us[j]->index] == io_u->index);
385#endif
b4b9665e
VF
386 if (hdr->info & SG_INFO_CHECK) {
387 /* record if an io error occurred, ignore resid */
388 memcpy(&st->trim_io_us[j]->hdr, hdr, sizeof(struct sg_io_hdr));
389 sd->events[i + trims]->error = EIO;
390 }
391 }
392 events -= st->unmap_range_count - 1;
393 st->unmap_range_count = 0;
5ad7be56 394 }
2866c82d
JA
395 }
396 }
397
adee86c5 398 if (!min) {
affe05a9 399 for_each_file(td, f, i) {
3a35845f
JA
400 if (sd->fd_flags[i] == -1)
401 continue;
402
affe05a9
JA
403 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
404 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
405 }
adee86c5 406 }
2866c82d 407
2866c82d
JA
408 return r;
409}
410
2e4ef4fb
JA
411static enum fio_q_status fio_sgio_ioctl_doio(struct thread_data *td,
412 struct fio_file *f,
413 struct io_u *io_u)
2866c82d 414{
565e784d 415 struct sgio_data *sd = td->io_ops_data;
2866c82d 416 struct sg_io_hdr *hdr = &io_u->hdr;
36167d82 417 int ret;
2866c82d
JA
418
419 sd->events[0] = io_u;
420
36167d82
JA
421 ret = ioctl(f->fd, SG_IO, hdr);
422 if (ret < 0)
a05bd42d 423 return ret;
36167d82 424
5ad7be56
KD
425 /* record if an io error occurred */
426 if (hdr->info & SG_INFO_CHECK)
427 io_u->error = EIO;
428
36167d82 429 return FIO_Q_COMPLETED;
2866c82d
JA
430}
431
b4b9665e
VF
432static enum fio_q_status fio_sgio_rw_doio(struct fio_file *f,
433 struct io_u *io_u, int do_sync)
2866c82d
JA
434{
435 struct sg_io_hdr *hdr = &io_u->hdr;
436 int ret;
437
53cdc686 438 ret = write(f->fd, hdr, sizeof(*hdr));
2866c82d 439 if (ret < 0)
a05bd42d 440 return ret;
2866c82d 441
2b13e716 442 if (do_sync) {
53cdc686 443 ret = read(f->fd, hdr, sizeof(*hdr));
2866c82d 444 if (ret < 0)
a05bd42d 445 return ret;
5ad7be56
KD
446
447 /* record if an io error occurred */
448 if (hdr->info & SG_INFO_CHECK)
449 io_u->error = EIO;
450
36167d82 451 return FIO_Q_COMPLETED;
2866c82d
JA
452 }
453
36167d82 454 return FIO_Q_QUEUED;
2866c82d
JA
455}
456
b4b9665e
VF
457static enum fio_q_status fio_sgio_doio(struct thread_data *td,
458 struct io_u *io_u, int do_sync)
2866c82d 459{
53cdc686 460 struct fio_file *f = io_u->file;
b4b9665e 461 enum fio_q_status ret;
53cdc686 462
686fbd31 463 if (f->filetype == FIO_TYPE_BLOCK) {
5ad7be56 464 ret = fio_sgio_ioctl_doio(td, f, io_u);
c9aeb797 465 td_verror(td, io_u->error, __func__);
5ad7be56
KD
466 } else {
467 ret = fio_sgio_rw_doio(f, io_u, do_sync);
468 if (do_sync)
c9aeb797 469 td_verror(td, io_u->error, __func__);
5ad7be56 470 }
2866c82d 471
5ad7be56 472 return ret;
2866c82d
JA
473}
474
b4b9665e
VF
475static void fio_sgio_rw_lba(struct sg_io_hdr *hdr, unsigned long long lba,
476 unsigned long long nr_blocks)
477{
478 if (lba < MAX_10B_LBA) {
a824149a
DF
479 sgio_set_be32((uint32_t) lba, &hdr->cmdp[2]);
480 sgio_set_be16((uint16_t) nr_blocks, &hdr->cmdp[7]);
b4b9665e 481 } else {
a824149a
DF
482 sgio_set_be64(lba, &hdr->cmdp[2]);
483 sgio_set_be32((uint32_t) nr_blocks, &hdr->cmdp[10]);
b4b9665e
VF
484 }
485
486 return;
487}
488
2866c82d
JA
489static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
490{
491 struct sg_io_hdr *hdr = &io_u->hdr;
52b81b7c 492 struct sg_options *o = td->eo;
565e784d 493 struct sgio_data *sd = td->io_ops_data;
b4b9665e
VF
494 unsigned long long nr_blocks, lba;
495 int offset;
2866c82d 496
cec6b55d 497 if (io_u->xfer_buflen & (sd->bs - 1)) {
2866c82d
JA
498 log_err("read/write not sector aligned\n");
499 return EINVAL;
500 }
501
5ad7be56
KD
502 nr_blocks = io_u->xfer_buflen / sd->bs;
503 lba = io_u->offset / sd->bs;
504
2866c82d 505 if (io_u->ddir == DDIR_READ) {
87dc1ab1
JA
506 sgio_hdr_init(sd, hdr, io_u, 1);
507
2866c82d 508 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
5ad7be56
KD
509 if (lba < MAX_10B_LBA)
510 hdr->cmdp[0] = 0x28; // read(10)
511 else
512 hdr->cmdp[0] = 0x88; // read(16)
52b81b7c
KD
513
514 if (o->readfua)
515 hdr->cmdp[1] |= 0x08;
516
b4b9665e
VF
517 fio_sgio_rw_lba(hdr, lba, nr_blocks);
518
87dc1ab1
JA
519 } else if (io_u->ddir == DDIR_WRITE) {
520 sgio_hdr_init(sd, hdr, io_u, 1);
521
2866c82d 522 hdr->dxfer_direction = SG_DXFER_TO_DEV;
cbdc9353
VF
523 switch(o->write_mode) {
524 case FIO_SG_WRITE:
525 if (lba < MAX_10B_LBA)
526 hdr->cmdp[0] = 0x2a; // write(10)
527 else
528 hdr->cmdp[0] = 0x8a; // write(16)
529 if (o->writefua)
530 hdr->cmdp[1] |= 0x08;
531 break;
532 case FIO_SG_WRITE_VERIFY:
533 if (lba < MAX_10B_LBA)
534 hdr->cmdp[0] = 0x2e; // write and verify(10)
535 else
536 hdr->cmdp[0] = 0x8e; // write and verify(16)
537 break;
538 // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
539 case FIO_SG_WRITE_SAME:
540 hdr->dxfer_len = sd->bs;
541 if (lba < MAX_10B_LBA)
542 hdr->cmdp[0] = 0x41; // write same(10)
543 else
544 hdr->cmdp[0] = 0x93; // write same(16)
545 break;
546 };
b4b9665e
VF
547
548 fio_sgio_rw_lba(hdr, lba, nr_blocks);
549
550 } else if (io_u->ddir == DDIR_TRIM) {
551 struct sgio_trim *st;
552
553 if (sd->current_queue == -1) {
554 sgio_hdr_init(sd, hdr, io_u, 0);
555
556 hdr->cmd_len = 10;
557 hdr->dxfer_direction = SG_DXFER_TO_DEV;
558 hdr->cmdp[0] = 0x42; // unmap
559 sd->current_queue = io_u->index;
560 st = sd->trim_queues[sd->current_queue];
561 hdr->dxferp = st->unmap_param;
aa18e0ec 562#ifdef FIO_SGIO_DEBUG
b4b9665e
VF
563 assert(sd->trim_queues[io_u->index]->unmap_range_count == 0);
564 dprint(FD_IO, "sg: creating new queue based on io_u %d\n", io_u->index);
aa18e0ec 565#endif
b4b9665e
VF
566 }
567 else
568 st = sd->trim_queues[sd->current_queue];
569
570 dprint(FD_IO, "sg: adding io_u %d to trim queue %d\n", io_u->index, sd->current_queue);
571 st->trim_io_us[st->unmap_range_count] = io_u;
aa18e0ec 572#ifdef FIO_SGIO_DEBUG
b4b9665e 573 sd->trim_queue_map[io_u->index] = sd->current_queue;
aa18e0ec 574#endif
b4b9665e
VF
575
576 offset = 8 + 16 * st->unmap_range_count;
a824149a
DF
577 sgio_set_be64(lba, &st->unmap_param[offset]);
578 sgio_set_be32((uint32_t) nr_blocks, &st->unmap_param[offset + 8]);
b4b9665e
VF
579
580 st->unmap_range_count++;
581
582 } else if (ddir_sync(io_u->ddir)) {
87dc1ab1 583 sgio_hdr_init(sd, hdr, io_u, 0);
87dc1ab1 584 hdr->dxfer_direction = SG_DXFER_NONE;
5ad7be56
KD
585 if (lba < MAX_10B_LBA)
586 hdr->cmdp[0] = 0x35; // synccache(10)
587 else
588 hdr->cmdp[0] = 0x91; // synccache(16)
b4b9665e
VF
589 } else
590 assert(0);
2866c82d 591
2866c82d
JA
592 return 0;
593}
594
b4b9665e
VF
595static void fio_sgio_unmap_setup(struct sg_io_hdr *hdr, struct sgio_trim *st)
596{
a824149a 597 uint16_t cnt = st->unmap_range_count * 16;
b4b9665e 598
a824149a
DF
599 hdr->dxfer_len = cnt + 8;
600 sgio_set_be16(cnt + 8, &hdr->cmdp[7]);
601 sgio_set_be16(cnt + 6, st->unmap_param);
602 sgio_set_be16(cnt, &st->unmap_param[2]);
b4b9665e
VF
603
604 return;
605}
606
2e4ef4fb
JA
607static enum fio_q_status fio_sgio_queue(struct thread_data *td,
608 struct io_u *io_u)
2866c82d
JA
609{
610 struct sg_io_hdr *hdr = &io_u->hdr;
b4b9665e 611 struct sgio_data *sd = td->io_ops_data;
f6db4fa5 612 int ret, do_sync = 0;
2866c82d 613
7101d9c2
JA
614 fio_ro_check(td, io_u);
615
b4b9665e 616 if (sgio_unbuffered(td) || ddir_sync(io_u->ddir))
f6db4fa5
JA
617 do_sync = 1;
618
b4b9665e
VF
619 if (io_u->ddir == DDIR_TRIM) {
620 if (do_sync || io_u->file->filetype == FIO_TYPE_BLOCK) {
621 struct sgio_trim *st = sd->trim_queues[sd->current_queue];
622
623 /* finish cdb setup for unmap because we are
624 ** doing unmap commands synchronously */
aa18e0ec 625#ifdef FIO_SGIO_DEBUG
b4b9665e
VF
626 assert(st->unmap_range_count == 1);
627 assert(io_u == st->trim_io_us[0]);
aa18e0ec 628#endif
b4b9665e
VF
629 hdr = &io_u->hdr;
630
631 fio_sgio_unmap_setup(hdr, st);
632
633 st->unmap_range_count = 0;
634 sd->current_queue = -1;
635 } else
636 /* queue up trim ranges and submit in commit() */
637 return FIO_Q_QUEUED;
638 }
639
f6db4fa5 640 ret = fio_sgio_doio(td, io_u, do_sync);
2866c82d
JA
641
642 if (ret < 0)
643 io_u->error = errno;
644 else if (hdr->status) {
645 io_u->resid = hdr->resid;
646 io_u->error = EIO;
b4b9665e
VF
647 } else if (td->io_ops->commit != NULL) {
648 if (do_sync && !ddir_sync(io_u->ddir)) {
649 io_u_mark_submit(td, 1);
650 io_u_mark_complete(td, 1);
651 } else if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
652 io_u_mark_submit(td, 1);
653 io_u_queued(td, io_u);
654 }
2866c82d
JA
655 }
656
95bcd815 657 if (io_u->error) {
e1161c32 658 td_verror(td, io_u->error, "xfer");
36167d82 659 return FIO_Q_COMPLETED;
95bcd815
JA
660 }
661
36167d82 662 return ret;
2866c82d
JA
663}
664
b4b9665e
VF
665static int fio_sgio_commit(struct thread_data *td)
666{
667 struct sgio_data *sd = td->io_ops_data;
668 struct sgio_trim *st;
669 struct io_u *io_u;
670 struct sg_io_hdr *hdr;
671 struct timespec now;
672 unsigned int i;
673 int ret;
674
675 if (sd->current_queue == -1)
676 return 0;
677
678 st = sd->trim_queues[sd->current_queue];
679 io_u = st->trim_io_us[0];
680 hdr = &io_u->hdr;
681
682 fio_sgio_unmap_setup(hdr, st);
683
684 sd->current_queue = -1;
685
686 ret = fio_sgio_rw_doio(io_u->file, io_u, 0);
687
53ee8c17
VF
688 if (ret < 0 || hdr->status) {
689 int error;
690
691 if (ret < 0)
692 error = errno;
693 else {
694 error = EIO;
695 ret = -EIO;
b4b9665e 696 }
53ee8c17
VF
697
698 for (i = 0; i < st->unmap_range_count; i++) {
699 st->trim_io_us[i]->error = error;
700 clear_io_u(td, st->trim_io_us[i]);
701 if (hdr->status)
702 st->trim_io_us[i]->resid = hdr->resid;
b4b9665e 703 }
53ee8c17
VF
704
705 td_verror(td, error, "xfer");
706 return ret;
b4b9665e
VF
707 }
708
53ee8c17
VF
709 if (fio_fill_issue_time(td)) {
710 fio_gettime(&now, NULL);
711 for (i = 0; i < st->unmap_range_count; i++) {
712 memcpy(&st->trim_io_us[i]->issue_time, &now, sizeof(now));
713 io_u_queued(td, io_u);
714 }
b4b9665e 715 }
53ee8c17 716 io_u_mark_submit(td, st->unmap_range_count);
b4b9665e 717
53ee8c17 718 return 0;
b4b9665e
VF
719}
720
2866c82d
JA
721static struct io_u *fio_sgio_event(struct thread_data *td, int event)
722{
565e784d 723 struct sgio_data *sd = td->io_ops_data;
2866c82d
JA
724
725 return sd->events[event];
726}
727
5ad7be56
KD
728static int fio_sgio_read_capacity(struct thread_data *td, unsigned int *bs,
729 unsigned long long *max_lba)
2866c82d 730{
5ad7be56
KD
731 /*
732 * need to do read capacity operation w/o benefit of sd or
733 * io_u structures, which are not initialized until later.
734 */
735 struct sg_io_hdr hdr;
02ae7bd8
DF
736 unsigned long long hlba;
737 unsigned int blksz = 0;
5ad7be56
KD
738 unsigned char cmd[16];
739 unsigned char sb[64];
740 unsigned char buf[32]; // read capacity return
2866c82d 741 int ret;
5ad7be56 742 int fd = -1;
2866c82d 743
5ad7be56 744 struct fio_file *f = td->files[0];
2866c82d 745
5ad7be56
KD
746 /* open file independent of rest of application */
747 fd = open(f->file_name, O_RDONLY);
748 if (fd < 0)
749 return -errno;
2866c82d 750
5ad7be56
KD
751 memset(&hdr, 0, sizeof(hdr));
752 memset(cmd, 0, sizeof(cmd));
753 memset(sb, 0, sizeof(sb));
754 memset(buf, 0, sizeof(buf));
2866c82d 755
5ad7be56
KD
756 /* First let's try a 10 byte read capacity. */
757 hdr.interface_id = 'S';
758 hdr.cmdp = cmd;
759 hdr.cmd_len = 10;
760 hdr.sbp = sb;
761 hdr.mx_sb_len = sizeof(sb);
762 hdr.timeout = SCSI_TIMEOUT_MS;
763 hdr.cmdp[0] = 0x25; // Read Capacity(10)
764 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
765 hdr.dxferp = buf;
766 hdr.dxfer_len = sizeof(buf);
767
768 ret = ioctl(fd, SG_IO, &hdr);
769 if (ret < 0) {
770 close(fd);
2866c82d 771 return ret;
5ad7be56 772 }
2866c82d 773
02ae7bd8
DF
774 if (hdr.info & SG_INFO_CHECK) {
775 /* RCAP(10) might be unsupported by device. Force RCAP(16) */
776 hlba = MAX_10B_LBA;
777 } else {
a824149a
DF
778 blksz = sgio_get_be32(&buf[4]);
779 hlba = sgio_get_be32(buf);
02ae7bd8 780 }
5ad7be56
KD
781
782 /*
fde57152
TK
783 * If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
784 * then need to retry with 16 byte Read Capacity command.
5ad7be56 785 */
02ae7bd8 786 if (hlba == MAX_10B_LBA) {
5ad7be56 787 hdr.cmd_len = 16;
28c43a89
TK
788 hdr.cmdp[0] = 0x9e; // service action
789 hdr.cmdp[1] = 0x10; // Read Capacity(16)
a824149a 790 sgio_set_be32(sizeof(buf), &hdr.cmdp[10]);
5ad7be56
KD
791
792 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
793 hdr.dxferp = buf;
794 hdr.dxfer_len = sizeof(buf);
795
796 ret = ioctl(fd, SG_IO, &hdr);
797 if (ret < 0) {
798 close(fd);
799 return ret;
800 }
801
802 /* record if an io error occurred */
803 if (hdr.info & SG_INFO_CHECK)
804 td_verror(td, EIO, "fio_sgio_read_capacity");
805
a824149a
DF
806 blksz = sgio_get_be32(&buf[8]);
807 hlba = sgio_get_be64(buf);
02ae7bd8
DF
808 }
809
810 if (blksz) {
811 *bs = blksz;
812 *max_lba = hlba;
813 ret = 0;
814 } else {
815 ret = EIO;
5ad7be56
KD
816 }
817
818 close(fd);
02ae7bd8 819 return ret;
2866c82d
JA
820}
821
822static void fio_sgio_cleanup(struct thread_data *td)
823{
565e784d 824 struct sgio_data *sd = td->io_ops_data;
b4b9665e 825 int i;
dc0deca2
JA
826
827 if (sd) {
828 free(sd->events);
829 free(sd->cmds);
830 free(sd->fd_flags);
831 free(sd->pfds);
832 free(sd->sgbuf);
aa18e0ec 833#ifdef FIO_SGIO_DEBUG
b4b9665e 834 free(sd->trim_queue_map);
aa18e0ec 835#endif
b4b9665e
VF
836
837 for (i = 0; i < td->o.iodepth; i++) {
838 free(sd->trim_queues[i]->unmap_param);
839 free(sd->trim_queues[i]->trim_io_us);
840 free(sd->trim_queues[i]);
841 }
842
843 free(sd->trim_queues);
dc0deca2 844 free(sd);
2866c82d
JA
845 }
846}
847
848static int fio_sgio_init(struct thread_data *td)
849{
850 struct sgio_data *sd;
b4b9665e
VF
851 struct sgio_trim *st;
852 int i;
2866c82d 853
b4b9665e
VF
854 sd = calloc(1, sizeof(*sd));
855 sd->cmds = calloc(td->o.iodepth, sizeof(struct sgio_cmd));
856 sd->sgbuf = calloc(td->o.iodepth, sizeof(struct sg_io_hdr));
857 sd->events = calloc(td->o.iodepth, sizeof(struct io_u *));
858 sd->pfds = calloc(td->o.nr_files, sizeof(struct pollfd));
859 sd->fd_flags = calloc(td->o.nr_files, sizeof(int));
5ad7be56 860 sd->type_checked = 0;
b4b9665e
VF
861
862 sd->trim_queues = calloc(td->o.iodepth, sizeof(struct sgio_trim *));
863 sd->current_queue = -1;
aa18e0ec 864#ifdef FIO_SGIO_DEBUG
b4b9665e 865 sd->trim_queue_map = calloc(td->o.iodepth, sizeof(int));
aa18e0ec 866#endif
b4b9665e
VF
867 for (i = 0; i < td->o.iodepth; i++) {
868 sd->trim_queues[i] = calloc(1, sizeof(struct sgio_trim));
869 st = sd->trim_queues[i];
870 st->unmap_param = calloc(td->o.iodepth + 1, sizeof(char[16]));
871 st->unmap_range_count = 0;
872 st->trim_io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
873 }
874
565e784d 875 td->io_ops_data = sd;
2866c82d 876
b5af8293
JA
877 /*
878 * we want to do it, regardless of whether odirect is set or not
879 */
2dc1bbeb 880 td->o.override_sync = 1;
b5af8293
JA
881 return 0;
882}
883
884static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
885{
565e784d 886 struct sgio_data *sd = td->io_ops_data;
5ad7be56
KD
887 unsigned int bs = 0;
888 unsigned long long max_lba = 0;
889
686fbd31 890 if (f->filetype == FIO_TYPE_BLOCK) {
53cdc686 891 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
e1161c32 892 td_verror(td, errno, "ioctl");
b5af8293 893 return 1;
2866c82d 894 }
af52b345 895 } else if (f->filetype == FIO_TYPE_CHAR) {
b5af8293 896 int version, ret;
2866c82d 897
53cdc686 898 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
e1161c32 899 td_verror(td, errno, "ioctl");
b5af8293 900 return 1;
2866c82d
JA
901 }
902
5ad7be56
KD
903 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
904 if (ret) {
905 td_verror(td, td->error, "fio_sgio_read_capacity");
906 log_err("ioengine sg unable to read capacity successfully\n");
b5af8293 907 return 1;
5ad7be56 908 }
2866c82d 909 } else {
16ada754 910 td_verror(td, EINVAL, "wrong file type");
30dac136 911 log_err("ioengine sg only works on block or character devices\n");
b5af8293 912 return 1;
2866c82d
JA
913 }
914
915 sd->bs = bs;
5ad7be56 916 // Determine size of commands needed based on max_lba
166c6b42
TK
917 if (max_lba >= MAX_10B_LBA) {
918 dprint(FD_IO, "sgio_type_check: using 16 byte read/write "
919 "commands for lba above 0x%016llx/0x%016llx\n",
920 MAX_10B_LBA, max_lba);
5ad7be56
KD
921 }
922
686fbd31 923 if (f->filetype == FIO_TYPE_BLOCK) {
36167d82
JA
924 td->io_ops->getevents = NULL;
925 td->io_ops->event = NULL;
b4b9665e
VF
926 td->io_ops->commit = NULL;
927 /*
928 ** Setting these functions to null may cause problems
929 ** with filename=/dev/sda:/dev/sg0 since we are only
930 ** considering a single file
931 */
36167d82 932 }
5ad7be56 933 sd->type_checked = 1;
2866c82d 934
2866c82d 935 return 0;
b5af8293
JA
936}
937
938static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
939{
565e784d 940 struct sgio_data *sd = td->io_ops_data;
b5af8293
JA
941 int ret;
942
943 ret = generic_open_file(td, f);
944 if (ret)
945 return ret;
946
15ba640a 947 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
6977bcd0 948 ret = generic_close_file(td, f);
b5af8293
JA
949 return 1;
950 }
951
952 return 0;
2866c82d
JA
953}
954
5ad7be56
KD
955/*
956 * Build an error string with details about the driver, host or scsi
957 * error contained in the sg header Caller will use as necessary.
958 */
959static char *fio_sgio_errdetails(struct io_u *io_u)
960{
961 struct sg_io_hdr *hdr = &io_u->hdr;
962#define MAXERRDETAIL 1024
963#define MAXMSGCHUNK 128
fd04fa03 964 char *msg, msgchunk[MAXMSGCHUNK];
5ad7be56
KD
965 int i;
966
efa72f25 967 msg = calloc(1, MAXERRDETAIL);
fd04fa03 968 strcpy(msg, "");
5ad7be56
KD
969
970 /*
971 * can't seem to find sg_err.h, so I'll just echo the define values
972 * so others can search on internet to find clearer clues of meaning.
973 */
974 if (hdr->info & SG_INFO_CHECK) {
5ad7be56
KD
975 if (hdr->host_status) {
976 snprintf(msgchunk, MAXMSGCHUNK, "SG Host Status: 0x%02x; ", hdr->host_status);
977 strlcat(msg, msgchunk, MAXERRDETAIL);
978 switch (hdr->host_status) {
979 case 0x01:
980 strlcat(msg, "SG_ERR_DID_NO_CONNECT", MAXERRDETAIL);
981 break;
982 case 0x02:
983 strlcat(msg, "SG_ERR_DID_BUS_BUSY", MAXERRDETAIL);
984 break;
985 case 0x03:
986 strlcat(msg, "SG_ERR_DID_TIME_OUT", MAXERRDETAIL);
987 break;
988 case 0x04:
989 strlcat(msg, "SG_ERR_DID_BAD_TARGET", MAXERRDETAIL);
990 break;
991 case 0x05:
992 strlcat(msg, "SG_ERR_DID_ABORT", MAXERRDETAIL);
993 break;
994 case 0x06:
995 strlcat(msg, "SG_ERR_DID_PARITY", MAXERRDETAIL);
996 break;
997 case 0x07:
998 strlcat(msg, "SG_ERR_DID_ERROR (internal error)", MAXERRDETAIL);
999 break;
1000 case 0x08:
1001 strlcat(msg, "SG_ERR_DID_RESET", MAXERRDETAIL);
1002 break;
1003 case 0x09:
1004 strlcat(msg, "SG_ERR_DID_BAD_INTR (unexpected)", MAXERRDETAIL);
1005 break;
1006 case 0x0a:
1007 strlcat(msg, "SG_ERR_DID_PASSTHROUGH", MAXERRDETAIL);
1008 break;
1009 case 0x0b:
1010 strlcat(msg, "SG_ERR_DID_SOFT_ERROR (driver retry?)", MAXERRDETAIL);
1011 break;
1012 case 0x0c:
1013 strlcat(msg, "SG_ERR_DID_IMM_RETRY", MAXERRDETAIL);
1014 break;
1015 case 0x0d:
1016 strlcat(msg, "SG_ERR_DID_REQUEUE", MAXERRDETAIL);
1017 break;
2ce6c6e5
TK
1018 case 0x0e:
1019 strlcat(msg, "SG_ERR_DID_TRANSPORT_DISRUPTED", MAXERRDETAIL);
1020 break;
1021 case 0x0f:
1022 strlcat(msg, "SG_ERR_DID_TRANSPORT_FAILFAST", MAXERRDETAIL);
1023 break;
1024 case 0x10:
1025 strlcat(msg, "SG_ERR_DID_TARGET_FAILURE", MAXERRDETAIL);
1026 break;
1027 case 0x11:
1028 strlcat(msg, "SG_ERR_DID_NEXUS_FAILURE", MAXERRDETAIL);
1029 break;
1030 case 0x12:
1031 strlcat(msg, "SG_ERR_DID_ALLOC_FAILURE", MAXERRDETAIL);
1032 break;
1033 case 0x13:
1034 strlcat(msg, "SG_ERR_DID_MEDIUM_ERROR", MAXERRDETAIL);
1035 break;
5ad7be56
KD
1036 default:
1037 strlcat(msg, "Unknown", MAXERRDETAIL);
1038 break;
1039 }
1040 strlcat(msg, ". ", MAXERRDETAIL);
1041 }
1042 if (hdr->driver_status) {
1043 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver Status: 0x%02x; ", hdr->driver_status);
1044 strlcat(msg, msgchunk, MAXERRDETAIL);
1045 switch (hdr->driver_status & 0x0F) {
1046 case 0x01:
1047 strlcat(msg, "SG_ERR_DRIVER_BUSY", MAXERRDETAIL);
1048 break;
1049 case 0x02:
1050 strlcat(msg, "SG_ERR_DRIVER_SOFT", MAXERRDETAIL);
1051 break;
1052 case 0x03:
1053 strlcat(msg, "SG_ERR_DRIVER_MEDIA", MAXERRDETAIL);
1054 break;
1055 case 0x04:
1056 strlcat(msg, "SG_ERR_DRIVER_ERROR", MAXERRDETAIL);
1057 break;
1058 case 0x05:
1059 strlcat(msg, "SG_ERR_DRIVER_INVALID", MAXERRDETAIL);
1060 break;
1061 case 0x06:
1062 strlcat(msg, "SG_ERR_DRIVER_TIMEOUT", MAXERRDETAIL);
1063 break;
1064 case 0x07:
1065 strlcat(msg, "SG_ERR_DRIVER_HARD", MAXERRDETAIL);
1066 break;
1067 case 0x08:
1068 strlcat(msg, "SG_ERR_DRIVER_SENSE", MAXERRDETAIL);
1069 break;
1070 default:
1071 strlcat(msg, "Unknown", MAXERRDETAIL);
1072 break;
1073 }
1074 strlcat(msg, "; ", MAXERRDETAIL);
1075 switch (hdr->driver_status & 0xF0) {
1076 case 0x10:
1077 strlcat(msg, "SG_ERR_SUGGEST_RETRY", MAXERRDETAIL);
1078 break;
1079 case 0x20:
1080 strlcat(msg, "SG_ERR_SUGGEST_ABORT", MAXERRDETAIL);
1081 break;
1082 case 0x30:
1083 strlcat(msg, "SG_ERR_SUGGEST_REMAP", MAXERRDETAIL);
1084 break;
1085 case 0x40:
1086 strlcat(msg, "SG_ERR_SUGGEST_DIE", MAXERRDETAIL);
1087 break;
1088 case 0x80:
1089 strlcat(msg, "SG_ERR_SUGGEST_SENSE", MAXERRDETAIL);
1090 break;
1091 }
1092 strlcat(msg, ". ", MAXERRDETAIL);
1093 }
1094 if (hdr->status) {
1095 snprintf(msgchunk, MAXMSGCHUNK, "SG SCSI Status: 0x%02x; ", hdr->status);
1096 strlcat(msg, msgchunk, MAXERRDETAIL);
1097 // SCSI 3 status codes
1098 switch (hdr->status) {
1099 case 0x02:
1100 strlcat(msg, "CHECK_CONDITION", MAXERRDETAIL);
1101 break;
1102 case 0x04:
1103 strlcat(msg, "CONDITION_MET", MAXERRDETAIL);
1104 break;
1105 case 0x08:
1106 strlcat(msg, "BUSY", MAXERRDETAIL);
1107 break;
1108 case 0x10:
1109 strlcat(msg, "INTERMEDIATE", MAXERRDETAIL);
1110 break;
1111 case 0x14:
1112 strlcat(msg, "INTERMEDIATE_CONDITION_MET", MAXERRDETAIL);
1113 break;
1114 case 0x18:
1115 strlcat(msg, "RESERVATION_CONFLICT", MAXERRDETAIL);
1116 break;
1117 case 0x22:
1118 strlcat(msg, "COMMAND_TERMINATED", MAXERRDETAIL);
1119 break;
1120 case 0x28:
1121 strlcat(msg, "TASK_SET_FULL", MAXERRDETAIL);
1122 break;
1123 case 0x30:
1124 strlcat(msg, "ACA_ACTIVE", MAXERRDETAIL);
1125 break;
1126 case 0x40:
1127 strlcat(msg, "TASK_ABORTED", MAXERRDETAIL);
1128 break;
1129 default:
1130 strlcat(msg, "Unknown", MAXERRDETAIL);
1131 break;
1132 }
1133 strlcat(msg, ". ", MAXERRDETAIL);
1134 }
1135 if (hdr->sb_len_wr) {
1136 snprintf(msgchunk, MAXMSGCHUNK, "Sense Data (%d bytes):", hdr->sb_len_wr);
1137 strlcat(msg, msgchunk, MAXERRDETAIL);
1138 for (i = 0; i < hdr->sb_len_wr; i++) {
1139 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->sbp[i]);
1140 strlcat(msg, msgchunk, MAXERRDETAIL);
1141 }
1142 strlcat(msg, ". ", MAXERRDETAIL);
1143 }
1144 if (hdr->resid != 0) {
1145 snprintf(msgchunk, MAXMSGCHUNK, "SG Driver: %d bytes out of %d not transferred. ", hdr->resid, hdr->dxfer_len);
1146 strlcat(msg, msgchunk, MAXERRDETAIL);
5ad7be56 1147 }
b4dbb3ce
VF
1148 if (hdr->cmdp) {
1149 strlcat(msg, "cdb:", MAXERRDETAIL);
1150 for (i = 0; i < hdr->cmd_len; i++) {
1151 snprintf(msgchunk, MAXMSGCHUNK, " %02x", hdr->cmdp[i]);
1152 strlcat(msg, msgchunk, MAXERRDETAIL);
1153 }
1154 strlcat(msg, ". ", MAXERRDETAIL);
1155 if (io_u->ddir == DDIR_TRIM) {
1156 unsigned char *param_list = hdr->dxferp;
1157 strlcat(msg, "dxferp:", MAXERRDETAIL);
1158 for (i = 0; i < hdr->dxfer_len; i++) {
1159 snprintf(msgchunk, MAXMSGCHUNK, " %02x", param_list[i]);
1160 strlcat(msg, msgchunk, MAXERRDETAIL);
1161 }
1162 strlcat(msg, ". ", MAXERRDETAIL);
1163 }
1164 }
5ad7be56
KD
1165 }
1166
fd04fa03
TK
1167 if (!(hdr->info & SG_INFO_CHECK) && !strlen(msg))
1168 strncpy(msg, "SG Driver did not report a Host, Driver or Device check",
1169 MAXERRDETAIL - 1);
5ad7be56 1170
fd04fa03 1171 return msg;
5ad7be56
KD
1172}
1173
1174/*
1175 * get max file size from read capacity.
1176 */
1177static int fio_sgio_get_file_size(struct thread_data *td, struct fio_file *f)
1178{
1179 /*
1180 * get_file_size is being called even before sgio_init is
1181 * called, so none of the sg_io structures are
1182 * initialized in the thread_data yet. So we need to do the
1183 * ReadCapacity without any of those helpers. One of the effects
1184 * is that ReadCapacity may get called 4 times on each open:
1185 * readcap(10) followed by readcap(16) if needed - just to get
1186 * the file size after the init occurs - it will be called
1187 * again when "type_check" is called during structure
1188 * initialization I'm not sure how to prevent this little
1189 * inefficiency.
1190 */
1191 unsigned int bs = 0;
1192 unsigned long long max_lba = 0;
1193 int ret;
1194
1195 if (fio_file_size_known(f))
1196 return 0;
1197
686fbd31 1198 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
30dac136
TK
1199 td_verror(td, EINVAL, "wrong file type");
1200 log_err("ioengine sg only works on block or character devices\n");
1201 return 1;
1202 }
1203
5ad7be56
KD
1204 ret = fio_sgio_read_capacity(td, &bs, &max_lba);
1205 if (ret ) {
1206 td_verror(td, td->error, "fio_sgio_read_capacity");
1207 log_err("ioengine sg unable to successfully execute read capacity to get block size and maximum lba\n");
1208 return 1;
1209 }
1210
1211 f->real_file_size = (max_lba + 1) * bs;
1212 fio_file_set_size_known(f);
1213 return 0;
1214}
1215
1216
5f350952 1217static struct ioengine_ops ioengine = {
2866c82d
JA
1218 .name = "sg",
1219 .version = FIO_IOOPS_VERSION,
1220 .init = fio_sgio_init,
1221 .prep = fio_sgio_prep,
1222 .queue = fio_sgio_queue,
b4b9665e 1223 .commit = fio_sgio_commit,
2866c82d 1224 .getevents = fio_sgio_getevents,
5ad7be56 1225 .errdetails = fio_sgio_errdetails,
2866c82d
JA
1226 .event = fio_sgio_event,
1227 .cleanup = fio_sgio_cleanup,
b5af8293
JA
1228 .open_file = fio_sgio_open,
1229 .close_file = generic_close_file,
fde57152 1230 .get_file_size = fio_sgio_get_file_size,
b2a15192 1231 .flags = FIO_SYNCIO | FIO_RAWIO,
52b81b7c
KD
1232 .options = options,
1233 .option_struct_size = sizeof(struct sg_options)
2866c82d 1234};
34cfcdaf
JA
1235
1236#else /* FIO_HAVE_SGIO */
1237
1238/*
1239 * When we have a proper configure system in place, we simply wont build
1240 * and install this io engine. For now install a crippled version that
1241 * just complains and fails to load.
1242 */
1243static int fio_sgio_init(struct thread_data fio_unused *td)
1244{
a3edaf76 1245 log_err("fio: ioengine sg not available\n");
34cfcdaf
JA
1246 return 1;
1247}
1248
5f350952 1249static struct ioengine_ops ioengine = {
d0c70934 1250 .name = "sg",
34cfcdaf
JA
1251 .version = FIO_IOOPS_VERSION,
1252 .init = fio_sgio_init,
1253};
1254
1255#endif
5f350952
JA
1256
1257static void fio_init fio_sgio_register(void)
1258{
1259 register_ioengine(&ioengine);
1260}
1261
1262static void fio_exit fio_sgio_unregister(void)
1263{
1264 unregister_ioengine(&ioengine);
1265}