2 * scsi generic sg v3 io engine
18 unsigned char cdb[10];
23 struct sgio_cmd *cmds;
28 static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
29 struct io_u *io_u, int fs)
31 struct sgio_cmd *sc = &sd->cmds[io_u->index];
33 memset(hdr, 0, sizeof(*hdr));
34 memset(sc->cdb, 0, sizeof(sc->cdb));
36 hdr->interface_id = 'S';
38 hdr->cmd_len = sizeof(sc->cdb);
39 hdr->pack_id = io_u->index;
43 hdr->dxferp = io_u->buf;
44 hdr->dxfer_len = io_u->buflen;
48 static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
49 int max, struct timespec fio_unused *t)
54 * we can only have one finished io_u for sync io, since the depth
57 if (list_empty(&td->io_u_busylist))
64 static int fio_sgio_getevents(struct thread_data *td, int min, int max,
65 struct timespec fio_unused *t)
67 struct fio_file *f = &td->files[0];
68 struct sgio_data *sd = td->io_ops->data;
69 struct pollfd pfd = { .fd = f->fd, .events = POLLIN };
70 void *buf = malloc(max * sizeof(struct sg_io_hdr));
71 int left = max, ret, events, i, r = 0, fl = 0;
74 * don't block for !events
77 fl = fcntl(f->fd, F_GETFL);
78 fcntl(f->fd, F_SETFL, fl | O_NONBLOCK);
86 if (pfd.revents & POLLIN)
90 ret = read(f->fd, buf, left * sizeof(struct sg_io_hdr));
100 events = ret / sizeof(struct sg_io_hdr);
104 for (i = 0; i < events; i++) {
105 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
107 sd->events[i] = hdr->usr_ptr;
112 fcntl(f->fd, F_SETFL, fl);
118 static int fio_sgio_ioctl_doio(struct thread_data *td,
119 struct fio_file *f, struct io_u *io_u)
121 struct sgio_data *sd = td->io_ops->data;
122 struct sg_io_hdr *hdr = &io_u->hdr;
124 sd->events[0] = io_u;
126 return ioctl(f->fd, SG_IO, hdr);
129 static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
131 struct sg_io_hdr *hdr = &io_u->hdr;
134 ret = write(f->fd, hdr, sizeof(*hdr));
139 ret = read(f->fd, hdr, sizeof(*hdr));
147 static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
149 struct fio_file *f = io_u->file;
151 if (td->filetype == FIO_TYPE_BD)
152 return fio_sgio_ioctl_doio(td, f, io_u);
154 return fio_sgio_rw_doio(f, io_u, sync);
157 static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
159 struct sg_io_hdr *hdr = &io_u->hdr;
160 struct sgio_data *sd = td->io_ops->data;
163 if (io_u->buflen & (sd->bs - 1)) {
164 log_err("read/write not sector aligned\n");
168 if (io_u->ddir == DDIR_READ) {
169 sgio_hdr_init(sd, hdr, io_u, 1);
171 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
173 } else if (io_u->ddir == DDIR_WRITE) {
174 sgio_hdr_init(sd, hdr, io_u, 1);
176 hdr->dxfer_direction = SG_DXFER_TO_DEV;
179 sgio_hdr_init(sd, hdr, io_u, 0);
181 hdr->dxfer_direction = SG_DXFER_NONE;
185 if (hdr->dxfer_direction != SG_DXFER_NONE) {
186 nr_blocks = io_u->buflen / sd->bs;
187 lba = io_u->offset / sd->bs;
188 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
189 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
190 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
191 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
192 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
193 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
199 static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
201 struct sg_io_hdr *hdr = &io_u->hdr;
204 ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
208 else if (hdr->status) {
209 io_u->resid = hdr->resid;
216 static struct io_u *fio_sgio_event(struct thread_data *td, int event)
218 struct sgio_data *sd = td->io_ops->data;
220 return sd->events[event];
223 static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
225 struct sgio_data *sd = td->io_ops->data;
227 struct sg_io_hdr *hdr;
228 unsigned char buf[8];
231 io_u = __get_io_u(td);
235 sgio_hdr_init(sd, hdr, io_u, 0);
236 memset(buf, 0, sizeof(buf));
239 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
241 hdr->dxfer_len = sizeof(buf);
243 ret = fio_sgio_doio(td, io_u, 1);
249 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
254 static void fio_sgio_cleanup(struct thread_data *td)
256 if (td->io_ops->data) {
257 free(td->io_ops->data);
258 td->io_ops->data = NULL;
262 static int fio_sgio_init(struct thread_data *td)
264 struct fio_file *f = &td->files[0];
265 struct sgio_data *sd;
269 sd = malloc(sizeof(*sd));
270 memset(sd, 0, sizeof(*sd));
271 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
272 memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
273 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
274 memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
275 td->io_ops->data = sd;
277 if (td->filetype == FIO_TYPE_BD) {
278 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
279 td_verror(td, errno);
282 } else if (td->filetype == FIO_TYPE_CHAR) {
285 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
286 td_verror(td, errno);
290 ret = fio_sgio_get_bs(td, &bs);
294 log_err("ioengine sgio only works on block devices\n");
300 if (td->filetype == FIO_TYPE_BD)
301 td->io_ops->getevents = fio_sgio_ioctl_getevents;
303 td->io_ops->getevents = fio_sgio_getevents;
306 * we want to do it, regardless of whether odirect is set or not
308 td->override_sync = 1;
317 static struct ioengine_ops ioengine = {
319 .version = FIO_IOOPS_VERSION,
320 .init = fio_sgio_init,
321 .prep = fio_sgio_prep,
322 .queue = fio_sgio_queue,
323 .getevents = fio_sgio_getevents,
324 .event = fio_sgio_event,
325 .cleanup = fio_sgio_cleanup,
326 .flags = FIO_SYNCIO | FIO_RAWIO,
329 #else /* FIO_HAVE_SGIO */
332 * When we have a proper configure system in place, we simply wont build
333 * and install this io engine. For now install a crippled version that
334 * just complains and fails to load.
336 static int fio_sgio_init(struct thread_data fio_unused *td)
338 fprintf(stderr, "fio: sgio not available\n");
342 static struct ioengine_ops ioengine = {
344 .version = FIO_IOOPS_VERSION,
345 .init = fio_sgio_init,
350 static void fio_init fio_sgio_register(void)
352 register_ioengine(&ioengine);
355 static void fio_exit fio_sgio_unregister(void)
357 unregister_ioengine(&ioengine);