2 * scsi generic sg v3 io engine
18 unsigned char cdb[10];
23 struct sgio_cmd *cmds;
31 static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
32 struct io_u *io_u, int fs)
34 struct sgio_cmd *sc = &sd->cmds[io_u->index];
36 memset(hdr, 0, sizeof(*hdr));
37 memset(sc->cdb, 0, sizeof(sc->cdb));
39 hdr->interface_id = 'S';
41 hdr->cmd_len = sizeof(sc->cdb);
42 hdr->pack_id = io_u->index;
46 hdr->dxferp = io_u->xfer_buf;
47 hdr->dxfer_len = io_u->xfer_buflen;
51 static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
52 int max, struct timespec fio_unused *t)
57 * we can only have one finished io_u for sync io, since the depth
60 if (list_empty(&td->io_u_busylist))
66 static int pollin_events(struct pollfd *pfds, int fds)
70 for (i = 0; i < fds; i++)
71 if (pfds[i].revents & POLLIN)
77 static int fio_sgio_getevents(struct thread_data *td, int min, int max,
78 struct timespec fio_unused *t)
81 * normally hard coding &td->files[0] is a bug that needs to be fixed,
82 * but it's ok here as all files should point to the same device.
84 struct fio_file *f = &td->files[0];
85 struct sgio_data *sd = td->io_ops->data;
86 int left = max, ret, events, i, r = 0;
87 void *buf = sd->sgbuf;
90 * Fill in the file descriptors
92 for_each_file(td, f, i) {
94 * don't block for min events == 0
97 sd->fd_flags[i] = fcntl(f->fd, F_GETFL);
98 fcntl(f->fd, F_SETFL, sd->fd_flags[i] | O_NONBLOCK);
100 sd->pfds[i].fd = f->fd;
101 sd->pfds[i].events = POLLIN;
111 ret = poll(sd->pfds, td->nr_files, -1);
113 td_verror(td, errno);
120 if (pollin_events(sd->pfds, td->nr_files))
130 for_each_file(td, f, i) {
131 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
135 td_verror(td, errno);
140 events += ret / sizeof(struct sg_io_hdr);
154 for (i = 0; i < events; i++) {
155 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
157 sd->events[i] = hdr->usr_ptr;
162 for_each_file(td, f, i)
163 fcntl(f->fd, F_SETFL, sd->fd_flags[i]);
169 static int fio_sgio_ioctl_doio(struct thread_data *td,
170 struct fio_file *f, struct io_u *io_u)
172 struct sgio_data *sd = td->io_ops->data;
173 struct sg_io_hdr *hdr = &io_u->hdr;
175 sd->events[0] = io_u;
177 return ioctl(f->fd, SG_IO, hdr);
180 static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
182 struct sg_io_hdr *hdr = &io_u->hdr;
185 ret = write(f->fd, hdr, sizeof(*hdr));
190 ret = read(f->fd, hdr, sizeof(*hdr));
198 static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
200 struct fio_file *f = io_u->file;
202 if (td->filetype == FIO_TYPE_BD)
203 return fio_sgio_ioctl_doio(td, f, io_u);
205 return fio_sgio_rw_doio(f, io_u, sync);
208 static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
210 struct sg_io_hdr *hdr = &io_u->hdr;
211 struct sgio_data *sd = td->io_ops->data;
214 if (io_u->xfer_buflen & (sd->bs - 1)) {
215 log_err("read/write not sector aligned\n");
219 if (io_u->ddir == DDIR_READ) {
220 sgio_hdr_init(sd, hdr, io_u, 1);
222 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
224 } else if (io_u->ddir == DDIR_WRITE) {
225 sgio_hdr_init(sd, hdr, io_u, 1);
227 hdr->dxfer_direction = SG_DXFER_TO_DEV;
230 sgio_hdr_init(sd, hdr, io_u, 0);
232 hdr->dxfer_direction = SG_DXFER_NONE;
236 if (hdr->dxfer_direction != SG_DXFER_NONE) {
237 nr_blocks = io_u->xfer_buflen / sd->bs;
238 lba = io_u->offset / sd->bs;
239 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
240 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
241 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
242 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
243 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
244 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
250 static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
252 struct sg_io_hdr *hdr = &io_u->hdr;
255 ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
259 else if (hdr->status) {
260 io_u->resid = hdr->resid;
265 td_verror(td, io_u->error);
272 static struct io_u *fio_sgio_event(struct thread_data *td, int event)
274 struct sgio_data *sd = td->io_ops->data;
276 return sd->events[event];
279 static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
281 struct sgio_data *sd = td->io_ops->data;
283 struct sg_io_hdr *hdr;
284 unsigned char buf[8];
287 io_u = __get_io_u(td);
288 io_u->file = &td->files[0];
292 sgio_hdr_init(sd, hdr, io_u, 0);
293 memset(buf, 0, sizeof(buf));
296 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
298 hdr->dxfer_len = sizeof(buf);
300 ret = fio_sgio_doio(td, io_u, 1);
306 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
311 static void fio_sgio_cleanup(struct thread_data *td)
313 struct sgio_data *sd = td->io_ops->data;
323 td->io_ops->data = NULL;
327 static int fio_sgio_init(struct thread_data *td)
329 struct fio_file *f = &td->files[0];
330 struct sgio_data *sd;
334 sd = malloc(sizeof(*sd));
335 memset(sd, 0, sizeof(*sd));
336 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
337 memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
338 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
339 memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
340 sd->pfds = malloc(sizeof(struct pollfd) * td->nr_files);
341 memset(sd->pfds, 0, sizeof(struct pollfd) * td->nr_files);
342 sd->fd_flags = malloc(sizeof(int) * td->nr_files);
343 memset(sd->fd_flags, 0, sizeof(int) * td->nr_files);
344 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->iodepth);
345 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->iodepth);
347 td->io_ops->data = sd;
349 if (td->filetype == FIO_TYPE_BD) {
350 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
351 td_verror(td, errno);
354 } else if (td->filetype == FIO_TYPE_CHAR) {
357 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
358 td_verror(td, errno);
362 ret = fio_sgio_get_bs(td, &bs);
366 log_err("ioengine sgio only works on block devices\n");
372 if (td->filetype == FIO_TYPE_BD)
373 td->io_ops->getevents = fio_sgio_ioctl_getevents;
375 td->io_ops->getevents = fio_sgio_getevents;
378 * we want to do it, regardless of whether odirect is set or not
380 td->override_sync = 1;
389 td->io_ops->data = NULL;
393 static struct ioengine_ops ioengine = {
395 .version = FIO_IOOPS_VERSION,
396 .init = fio_sgio_init,
397 .prep = fio_sgio_prep,
398 .queue = fio_sgio_queue,
399 .getevents = fio_sgio_getevents,
400 .event = fio_sgio_event,
401 .cleanup = fio_sgio_cleanup,
402 .flags = FIO_SYNCIO | FIO_RAWIO,
405 #else /* FIO_HAVE_SGIO */
408 * When we have a proper configure system in place, we simply wont build
409 * and install this io engine. For now install a crippled version that
410 * just complains and fails to load.
412 static int fio_sgio_init(struct thread_data fio_unused *td)
414 fprintf(stderr, "fio: sgio not available\n");
418 static struct ioengine_ops ioengine = {
420 .version = FIO_IOOPS_VERSION,
421 .init = fio_sgio_init,
426 static void fio_init fio_sgio_register(void)
428 register_ioengine(&ioengine);
431 static void fio_exit fio_sgio_unregister(void)
433 unregister_ioengine(&ioengine);