2 * scsi generic sg v3 io engine
18 unsigned char cdb[10];
23 struct sgio_cmd *cmds;
28 static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
29 struct io_u *io_u, int fs)
31 struct sgio_cmd *sc = &sd->cmds[io_u->index];
33 memset(hdr, 0, sizeof(*hdr));
34 memset(sc->cdb, 0, sizeof(sc->cdb));
36 hdr->interface_id = 'S';
38 hdr->cmd_len = sizeof(sc->cdb);
39 hdr->pack_id = io_u->index;
43 hdr->dxferp = io_u->xfer_buf;
44 hdr->dxfer_len = io_u->xfer_buflen;
48 static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
49 int max, struct timespec fio_unused *t)
54 * we can only have one finished io_u for sync io, since the depth
57 if (list_empty(&td->io_u_busylist))
63 static int pollin_events(struct pollfd *pfds, int fds)
67 for (i = 0; i < fds; i++)
68 if (pfds[i].revents & POLLIN)
74 static int fio_sgio_getevents(struct thread_data *td, int min, int max,
75 struct timespec fio_unused *t)
78 * normally hard coding &td->files[0] is a bug that needs to be fixed,
79 * but it's ok here as all files should point to the same device.
81 struct fio_file *f = &td->files[0];
82 struct sgio_data *sd = td->io_ops->data;
83 int left = max, ret, events, i, r = 0, *fl;
88 * Fill in the file descriptors
90 pfds = malloc(sizeof(struct pollfd) * td->nr_files);
91 fl = malloc(sizeof(int) * td->nr_files);
93 for_each_file(td, f, i) {
95 * don't block for min events == 0
98 fl[i] = fcntl(f->fd, F_GETFL);
99 fcntl(f->fd, F_SETFL, fl[i] | O_NONBLOCK);
102 pfds[i].events = POLLIN;
105 buf = malloc(max * sizeof(struct sg_io_hdr));
113 ret = poll(pfds, td->nr_files, -1);
115 td_verror(td, errno);
122 if (pollin_events(pfds, td->nr_files))
132 for_each_file(td, f, i) {
133 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
137 td_verror(td, errno);
142 events += ret / sizeof(struct sg_io_hdr);
156 for (i = 0; i < events; i++) {
157 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
159 sd->events[i] = hdr->usr_ptr;
164 for_each_file(td, f, i)
165 fcntl(f->fd, F_SETFL, fl[i]);
174 static int fio_sgio_ioctl_doio(struct thread_data *td,
175 struct fio_file *f, struct io_u *io_u)
177 struct sgio_data *sd = td->io_ops->data;
178 struct sg_io_hdr *hdr = &io_u->hdr;
180 sd->events[0] = io_u;
182 return ioctl(f->fd, SG_IO, hdr);
185 static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
187 struct sg_io_hdr *hdr = &io_u->hdr;
190 ret = write(f->fd, hdr, sizeof(*hdr));
195 ret = read(f->fd, hdr, sizeof(*hdr));
203 static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
205 struct fio_file *f = io_u->file;
207 if (td->filetype == FIO_TYPE_BD)
208 return fio_sgio_ioctl_doio(td, f, io_u);
210 return fio_sgio_rw_doio(f, io_u, sync);
213 static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
215 struct sg_io_hdr *hdr = &io_u->hdr;
216 struct sgio_data *sd = td->io_ops->data;
219 if (io_u->xfer_buflen & (sd->bs - 1)) {
220 log_err("read/write not sector aligned\n");
224 if (io_u->ddir == DDIR_READ) {
225 sgio_hdr_init(sd, hdr, io_u, 1);
227 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
229 } else if (io_u->ddir == DDIR_WRITE) {
230 sgio_hdr_init(sd, hdr, io_u, 1);
232 hdr->dxfer_direction = SG_DXFER_TO_DEV;
235 sgio_hdr_init(sd, hdr, io_u, 0);
237 hdr->dxfer_direction = SG_DXFER_NONE;
241 if (hdr->dxfer_direction != SG_DXFER_NONE) {
242 nr_blocks = io_u->xfer_buflen / sd->bs;
243 lba = io_u->offset / sd->bs;
244 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
245 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
246 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
247 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
248 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
249 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
255 static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
257 struct sg_io_hdr *hdr = &io_u->hdr;
260 ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
264 else if (hdr->status) {
265 io_u->resid = hdr->resid;
272 static struct io_u *fio_sgio_event(struct thread_data *td, int event)
274 struct sgio_data *sd = td->io_ops->data;
276 return sd->events[event];
279 static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
281 struct sgio_data *sd = td->io_ops->data;
283 struct sg_io_hdr *hdr;
284 unsigned char buf[8];
287 io_u = __get_io_u(td);
288 io_u->file = &td->files[0];
292 sgio_hdr_init(sd, hdr, io_u, 0);
293 memset(buf, 0, sizeof(buf));
296 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
298 hdr->dxfer_len = sizeof(buf);
300 ret = fio_sgio_doio(td, io_u, 1);
306 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
311 static void fio_sgio_cleanup(struct thread_data *td)
313 if (td->io_ops->data) {
314 free(td->io_ops->data);
315 td->io_ops->data = NULL;
319 static int fio_sgio_init(struct thread_data *td)
321 struct fio_file *f = &td->files[0];
322 struct sgio_data *sd;
326 sd = malloc(sizeof(*sd));
327 memset(sd, 0, sizeof(*sd));
328 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
329 memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
330 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
331 memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
332 td->io_ops->data = sd;
334 if (td->filetype == FIO_TYPE_BD) {
335 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
336 td_verror(td, errno);
339 } else if (td->filetype == FIO_TYPE_CHAR) {
342 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
343 td_verror(td, errno);
347 ret = fio_sgio_get_bs(td, &bs);
351 log_err("ioengine sgio only works on block devices\n");
357 if (td->filetype == FIO_TYPE_BD)
358 td->io_ops->getevents = fio_sgio_ioctl_getevents;
360 td->io_ops->getevents = fio_sgio_getevents;
363 * we want to do it, regardless of whether odirect is set or not
365 td->override_sync = 1;
371 td->io_ops->data = NULL;
375 static struct ioengine_ops ioengine = {
377 .version = FIO_IOOPS_VERSION,
378 .init = fio_sgio_init,
379 .prep = fio_sgio_prep,
380 .queue = fio_sgio_queue,
381 .getevents = fio_sgio_getevents,
382 .event = fio_sgio_event,
383 .cleanup = fio_sgio_cleanup,
384 .flags = FIO_SYNCIO | FIO_RAWIO,
387 #else /* FIO_HAVE_SGIO */
390 * When we have a proper configure system in place, we simply wont build
391 * and install this io engine. For now install a crippled version that
392 * just complains and fails to load.
394 static int fio_sgio_init(struct thread_data fio_unused *td)
396 fprintf(stderr, "fio: sgio not available\n");
400 static struct ioengine_ops ioengine = {
402 .version = FIO_IOOPS_VERSION,
403 .init = fio_sgio_init,
408 static void fio_init fio_sgio_register(void)
410 register_ioengine(&ioengine);
413 static void fio_exit fio_sgio_unregister(void)
415 unregister_ioengine(&ioengine);