4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
19 unsigned char cdb[10];
24 struct sgio_cmd *cmds;
33 static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
34 struct io_u *io_u, int fs)
36 struct sgio_cmd *sc = &sd->cmds[io_u->index];
38 memset(hdr, 0, sizeof(*hdr));
39 memset(sc->cdb, 0, sizeof(sc->cdb));
41 hdr->interface_id = 'S';
43 hdr->cmd_len = sizeof(sc->cdb);
44 hdr->pack_id = io_u->index;
48 hdr->dxferp = io_u->xfer_buf;
49 hdr->dxfer_len = io_u->xfer_buflen;
53 static int pollin_events(struct pollfd *pfds, int fds)
57 for (i = 0; i < fds; i++)
58 if (pfds[i].revents & POLLIN)
64 static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
65 unsigned int max, struct timespec fio_unused *t)
67 struct sgio_data *sd = td->io_ops->data;
68 int left = max, ret, r = 0;
69 void *buf = sd->sgbuf;
70 unsigned int i, events;
74 * Fill in the file descriptors
76 for_each_file(td, f, i) {
78 * don't block for min events == 0
81 sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
85 sd->pfds[i].fd = f->fd;
86 sd->pfds[i].events = POLLIN;
96 ret = poll(sd->pfds, td->o.nr_files, -1);
100 td_verror(td, errno, "poll");
105 if (pollin_events(sd->pfds, td->o.nr_files))
115 for_each_file(td, f, i) {
116 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
121 td_verror(td, errno, "read");
125 events += ret / sizeof(struct sg_io_hdr);
139 for (i = 0; i < events; i++) {
140 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
142 sd->events[i] = hdr->usr_ptr;
147 for_each_file(td, f, i) {
148 if (sd->fd_flags[i] == -1)
151 if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
152 log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
159 static int fio_sgio_ioctl_doio(struct thread_data *td,
160 struct fio_file *f, struct io_u *io_u)
162 struct sgio_data *sd = td->io_ops->data;
163 struct sg_io_hdr *hdr = &io_u->hdr;
166 sd->events[0] = io_u;
168 ret = ioctl(f->fd, SG_IO, hdr);
172 return FIO_Q_COMPLETED;
175 static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int do_sync)
177 struct sg_io_hdr *hdr = &io_u->hdr;
180 ret = write(f->fd, hdr, sizeof(*hdr));
185 ret = read(f->fd, hdr, sizeof(*hdr));
188 return FIO_Q_COMPLETED;
194 static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int do_sync)
196 struct fio_file *f = io_u->file;
198 if (f->filetype == FIO_TYPE_BD)
199 return fio_sgio_ioctl_doio(td, f, io_u);
201 return fio_sgio_rw_doio(f, io_u, do_sync);
204 static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
206 struct sg_io_hdr *hdr = &io_u->hdr;
207 struct sgio_data *sd = td->io_ops->data;
210 if (io_u->xfer_buflen & (sd->bs - 1)) {
211 log_err("read/write not sector aligned\n");
215 if (io_u->ddir == DDIR_READ) {
216 sgio_hdr_init(sd, hdr, io_u, 1);
218 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
220 } else if (io_u->ddir == DDIR_WRITE) {
221 sgio_hdr_init(sd, hdr, io_u, 1);
223 hdr->dxfer_direction = SG_DXFER_TO_DEV;
226 sgio_hdr_init(sd, hdr, io_u, 0);
228 hdr->dxfer_direction = SG_DXFER_NONE;
232 if (hdr->dxfer_direction != SG_DXFER_NONE) {
233 nr_blocks = io_u->xfer_buflen / sd->bs;
234 lba = io_u->offset / sd->bs;
235 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
236 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
237 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
238 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
239 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
240 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
246 static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
248 struct sg_io_hdr *hdr = &io_u->hdr;
249 int ret, do_sync = 0;
251 fio_ro_check(td, io_u);
253 if (td->o.sync_io || td->o.odirect || ddir_sync(io_u->ddir))
256 ret = fio_sgio_doio(td, io_u, do_sync);
260 else if (hdr->status) {
261 io_u->resid = hdr->resid;
266 td_verror(td, io_u->error, "xfer");
267 return FIO_Q_COMPLETED;
273 static struct io_u *fio_sgio_event(struct thread_data *td, int event)
275 struct sgio_data *sd = td->io_ops->data;
277 return sd->events[event];
280 static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
282 struct sgio_data *sd = td->io_ops->data;
284 struct sg_io_hdr *hdr;
285 unsigned char buf[8];
288 memset(&io_u, 0, sizeof(io_u));
289 io_u.file = td->files[0];
292 sgio_hdr_init(sd, hdr, &io_u, 0);
293 memset(buf, 0, sizeof(buf));
296 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
298 hdr->dxfer_len = sizeof(buf);
300 ret = fio_sgio_doio(td, &io_u, 1);
304 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
308 static void fio_sgio_cleanup(struct thread_data *td)
310 struct sgio_data *sd = td->io_ops->data;
322 static int fio_sgio_init(struct thread_data *td)
324 struct sgio_data *sd;
326 sd = malloc(sizeof(*sd));
327 memset(sd, 0, sizeof(*sd));
328 sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
329 memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
330 sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
331 memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
332 sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
333 memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
334 sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
335 memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
336 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
337 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
339 td->io_ops->data = sd;
342 * we want to do it, regardless of whether odirect is set or not
344 td->o.override_sync = 1;
348 static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
350 struct sgio_data *sd = td->io_ops->data;
353 if (f->filetype == FIO_TYPE_BD) {
354 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
355 td_verror(td, errno, "ioctl");
358 } else if (f->filetype == FIO_TYPE_CHAR) {
361 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
362 td_verror(td, errno, "ioctl");
366 ret = fio_sgio_get_bs(td, &bs);
370 log_err("ioengine sg only works on block devices\n");
376 if (f->filetype == FIO_TYPE_BD) {
377 td->io_ops->getevents = NULL;
378 td->io_ops->event = NULL;
384 static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
386 struct sgio_data *sd = td->io_ops->data;
389 ret = generic_open_file(td, f);
393 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
394 ret = generic_close_file(td, f);
401 static struct ioengine_ops ioengine = {
403 .version = FIO_IOOPS_VERSION,
404 .init = fio_sgio_init,
405 .prep = fio_sgio_prep,
406 .queue = fio_sgio_queue,
407 .getevents = fio_sgio_getevents,
408 .event = fio_sgio_event,
409 .cleanup = fio_sgio_cleanup,
410 .open_file = fio_sgio_open,
411 .close_file = generic_close_file,
412 .get_file_size = generic_get_file_size,
413 .flags = FIO_SYNCIO | FIO_RAWIO,
416 #else /* FIO_HAVE_SGIO */
419 * When we have a proper configure system in place, we simply wont build
420 * and install this io engine. For now install a crippled version that
421 * just complains and fails to load.
423 static int fio_sgio_init(struct thread_data fio_unused *td)
425 log_err("fio: ioengine sg not available\n");
429 static struct ioengine_ops ioengine = {
431 .version = FIO_IOOPS_VERSION,
432 .init = fio_sgio_init,
437 static void fio_init fio_sgio_register(void)
439 register_ioengine(&ioengine);
442 static void fio_exit fio_sgio_unregister(void)
444 unregister_ioengine(&ioengine);