2 * scsi generic sg v3 io engine
15 unsigned char cdb[10];
20 struct sgio_cmd *cmds;
25 static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
26 struct io_u *io_u, int fs)
28 struct sgio_cmd *sc = &sd->cmds[io_u->index];
30 memset(hdr, 0, sizeof(*hdr));
31 memset(sc->cdb, 0, sizeof(sc->cdb));
33 hdr->interface_id = 'S';
35 hdr->cmd_len = sizeof(sc->cdb);
36 hdr->pack_id = io_u->index;
40 hdr->dxferp = io_u->buf;
41 hdr->dxfer_len = io_u->buflen;
45 static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
46 int max, struct timespec fio_unused *t)
51 * we can only have one finished io_u for sync io, since the depth
54 if (list_empty(&td->io_u_busylist))
61 static int fio_sgio_getevents(struct thread_data *td, int min, int max,
62 struct timespec fio_unused *t)
64 struct fio_file *f = &td->files[0];
65 struct sgio_data *sd = td->io_ops->data;
66 struct pollfd pfd = { .fd = f->fd, .events = POLLIN };
67 void *buf = malloc(max * sizeof(struct sg_io_hdr));
68 int left = max, ret, events, i, r = 0, fl = 0;
71 * don't block for !events
74 fl = fcntl(f->fd, F_GETFL);
75 fcntl(f->fd, F_SETFL, fl | O_NONBLOCK);
83 if (pfd.revents & POLLIN)
87 ret = read(f->fd, buf, left * sizeof(struct sg_io_hdr));
97 events = ret / sizeof(struct sg_io_hdr);
101 for (i = 0; i < events; i++) {
102 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
104 sd->events[i] = hdr->usr_ptr;
109 fcntl(f->fd, F_SETFL, fl);
115 static int fio_sgio_ioctl_doio(struct thread_data *td, struct fio_file *f,
118 struct sgio_data *sd = td->io_ops->data;
119 struct sg_io_hdr *hdr = &io_u->hdr;
121 sd->events[0] = io_u;
123 return ioctl(f->fd, SG_IO, hdr);
126 static int fio_sgio_rw_doio(struct thread_data *td, struct fio_file *f,
127 struct io_u *io_u, int sync)
129 struct sg_io_hdr *hdr = &io_u->hdr;
132 ret = write(f->fd, hdr, sizeof(*hdr));
137 ret = read(f->fd, hdr, sizeof(*hdr));
145 static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
147 struct fio_file *f = io_u->file;
149 if (td->filetype == FIO_TYPE_BD)
150 return fio_sgio_ioctl_doio(td, f, io_u);
152 return fio_sgio_rw_doio(td, f, io_u, sync);
155 static int fio_sgio_sync(struct thread_data *td, struct fio_file *f)
157 struct sgio_data *sd = td->io_ops->data;
158 struct sg_io_hdr *hdr;
162 io_u = __get_io_u(td);
167 sgio_hdr_init(sd, hdr, io_u, 0);
168 hdr->dxfer_direction = SG_DXFER_NONE;
172 ret = fio_sgio_doio(td, io_u, 1);
177 static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
179 struct sg_io_hdr *hdr = &io_u->hdr;
180 struct sgio_data *sd = td->io_ops->data;
183 if (io_u->buflen & (sd->bs - 1)) {
184 log_err("read/write not sector aligned\n");
188 sgio_hdr_init(sd, hdr, io_u, 1);
190 if (io_u->ddir == DDIR_READ) {
191 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
194 hdr->dxfer_direction = SG_DXFER_TO_DEV;
198 nr_blocks = io_u->buflen / sd->bs;
199 lba = io_u->offset / sd->bs;
200 hdr->cmdp[2] = (lba >> 24) & 0xff;
201 hdr->cmdp[3] = (lba >> 16) & 0xff;
202 hdr->cmdp[4] = (lba >> 8) & 0xff;
203 hdr->cmdp[5] = lba & 0xff;
204 hdr->cmdp[7] = (nr_blocks >> 8) & 0xff;
205 hdr->cmdp[8] = nr_blocks & 0xff;
209 static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
211 struct sg_io_hdr *hdr = &io_u->hdr;
214 ret = fio_sgio_doio(td, io_u, 0);
218 else if (hdr->status) {
219 io_u->resid = hdr->resid;
226 static struct io_u *fio_sgio_event(struct thread_data *td, int event)
228 struct sgio_data *sd = td->io_ops->data;
230 return sd->events[event];
233 static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
235 struct sgio_data *sd = td->io_ops->data;
237 struct sg_io_hdr *hdr;
238 unsigned char buf[8];
241 io_u = __get_io_u(td);
245 sgio_hdr_init(sd, hdr, io_u, 0);
246 memset(buf, 0, sizeof(buf));
249 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
251 hdr->dxfer_len = sizeof(buf);
253 ret = fio_sgio_doio(td, io_u, 1);
259 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
264 static void fio_sgio_cleanup(struct thread_data *td)
266 if (td->io_ops->data) {
267 free(td->io_ops->data);
268 td->io_ops->data = NULL;
272 static int fio_sgio_init(struct thread_data *td)
274 struct fio_file *f = &td->files[0];
275 struct sgio_data *sd;
279 sd = malloc(sizeof(*sd));
280 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
281 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
282 td->io_ops->data = sd;
284 if (td->filetype == FIO_TYPE_BD) {
285 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
286 td_verror(td, errno);
289 } else if (td->filetype == FIO_TYPE_CHAR) {
292 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
293 td_verror(td, errno);
297 ret = fio_sgio_get_bs(td, &bs);
301 log_err("ioengine sgio only works on block devices\n");
307 if (td->filetype == FIO_TYPE_BD)
308 td->io_ops->getevents = fio_sgio_ioctl_getevents;
310 td->io_ops->getevents = fio_sgio_getevents;
313 * we want to do it, regardless of whether odirect is set or not
315 td->override_sync = 1;
319 struct ioengine_ops ioengine = {
321 .version = FIO_IOOPS_VERSION,
322 .init = fio_sgio_init,
323 .prep = fio_sgio_prep,
324 .queue = fio_sgio_queue,
325 .getevents = fio_sgio_getevents,
326 .event = fio_sgio_event,
327 .cleanup = fio_sgio_cleanup,
328 .sync = fio_sgio_sync,
329 .flags = FIO_SYNCIO | FIO_RAWIO,