An option need not include ->posval[] entries
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * scsi generic sg v3 io engine
3 *
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <assert.h>
10#include <sys/poll.h>
11
12#include "../fio.h"
13#include "../os.h"
14
15#ifdef FIO_HAVE_SGIO
16
17struct sgio_cmd {
18 unsigned char cdb[10];
19 int nr;
20};
21
22struct sgio_data {
23 struct sgio_cmd *cmds;
24 struct io_u **events;
25 struct pollfd *pfds;
26 int *fd_flags;
27 void *sgbuf;
28 unsigned int bs;
29 int type_checked;
30};
31
32static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
33 struct io_u *io_u, int fs)
34{
35 struct sgio_cmd *sc = &sd->cmds[io_u->index];
36
37 memset(hdr, 0, sizeof(*hdr));
38 memset(sc->cdb, 0, sizeof(sc->cdb));
39
40 hdr->interface_id = 'S';
41 hdr->cmdp = sc->cdb;
42 hdr->cmd_len = sizeof(sc->cdb);
43 hdr->pack_id = io_u->index;
44 hdr->usr_ptr = io_u;
45
46 if (fs) {
47 hdr->dxferp = io_u->xfer_buf;
48 hdr->dxfer_len = io_u->xfer_buflen;
49 }
50}
51
52static int pollin_events(struct pollfd *pfds, int fds)
53{
54 int i;
55
56 for (i = 0; i < fds; i++)
57 if (pfds[i].revents & POLLIN)
58 return 1;
59
60 return 0;
61}
62
63static int fio_sgio_getevents(struct thread_data *td, int min, int max,
64 struct timespec fio_unused *t)
65{
66 /*
67 * normally hard coding &td->files[0] is a bug that needs to be fixed,
68 * but it's ok here as all files should point to the same device.
69 */
70 struct fio_file *f = &td->files[0];
71 struct sgio_data *sd = td->io_ops->data;
72 int left = max, ret, events, i, r = 0;
73 void *buf = sd->sgbuf;
74
75 /*
76 * Fill in the file descriptors
77 */
78 for_each_file(td, f, i) {
79 /*
80 * don't block for min events == 0
81 */
82 if (!min) {
83 sd->fd_flags[i] = fcntl(f->fd, F_GETFL);
84 fcntl(f->fd, F_SETFL, sd->fd_flags[i] | O_NONBLOCK);
85 }
86 sd->pfds[i].fd = f->fd;
87 sd->pfds[i].events = POLLIN;
88 }
89
90 while (left) {
91 void *p;
92
93 do {
94 if (!min)
95 break;
96
97 ret = poll(sd->pfds, td->nr_files, -1);
98 if (ret < 0) {
99 if (!r)
100 r = -errno;
101 td_verror(td, errno, "poll");
102 break;
103 } else if (!ret)
104 continue;
105
106 if (pollin_events(sd->pfds, td->nr_files))
107 break;
108 } while (1);
109
110 if (r < 0)
111 break;
112
113re_read:
114 p = buf;
115 events = 0;
116 for_each_file(td, f, i) {
117 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
118 if (ret < 0) {
119 if (errno == EAGAIN)
120 continue;
121 r = -errno;
122 td_verror(td, errno, "read");
123 break;
124 } else if (ret) {
125 p += ret;
126 events += ret / sizeof(struct sg_io_hdr);
127 }
128 }
129
130 if (r < 0)
131 break;
132 if (!events) {
133 usleep(1000);
134 goto re_read;
135 }
136
137 left -= events;
138 r += events;
139
140 for (i = 0; i < events; i++) {
141 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
142
143 sd->events[i] = hdr->usr_ptr;
144 }
145 }
146
147 if (!min) {
148 for_each_file(td, f, i)
149 fcntl(f->fd, F_SETFL, sd->fd_flags[i]);
150 }
151
152 return r;
153}
154
155static int fio_sgio_ioctl_doio(struct thread_data *td,
156 struct fio_file *f, struct io_u *io_u)
157{
158 struct sgio_data *sd = td->io_ops->data;
159 struct sg_io_hdr *hdr = &io_u->hdr;
160 int ret;
161
162 sd->events[0] = io_u;
163
164 ret = ioctl(f->fd, SG_IO, hdr);
165 if (ret < 0)
166 return -errno;
167
168 return FIO_Q_COMPLETED;
169}
170
171static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
172{
173 struct sg_io_hdr *hdr = &io_u->hdr;
174 int ret;
175
176 ret = write(f->fd, hdr, sizeof(*hdr));
177 if (ret < 0)
178 return errno;
179
180 if (sync) {
181 ret = read(f->fd, hdr, sizeof(*hdr));
182 if (ret < 0)
183 return -errno;
184 return FIO_Q_COMPLETED;
185 }
186
187 return FIO_Q_QUEUED;
188}
189
190static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
191{
192 struct fio_file *f = io_u->file;
193
194 if (td->filetype == FIO_TYPE_BD)
195 return fio_sgio_ioctl_doio(td, f, io_u);
196
197 return fio_sgio_rw_doio(f, io_u, sync);
198}
199
200static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
201{
202 struct sg_io_hdr *hdr = &io_u->hdr;
203 struct sgio_data *sd = td->io_ops->data;
204 int nr_blocks, lba;
205
206 if (io_u->xfer_buflen & (sd->bs - 1)) {
207 log_err("read/write not sector aligned\n");
208 return EINVAL;
209 }
210
211 if (io_u->ddir == DDIR_READ) {
212 sgio_hdr_init(sd, hdr, io_u, 1);
213
214 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
215 hdr->cmdp[0] = 0x28;
216 } else if (io_u->ddir == DDIR_WRITE) {
217 sgio_hdr_init(sd, hdr, io_u, 1);
218
219 hdr->dxfer_direction = SG_DXFER_TO_DEV;
220 hdr->cmdp[0] = 0x2a;
221 } else {
222 sgio_hdr_init(sd, hdr, io_u, 0);
223
224 hdr->dxfer_direction = SG_DXFER_NONE;
225 hdr->cmdp[0] = 0x35;
226 }
227
228 if (hdr->dxfer_direction != SG_DXFER_NONE) {
229 nr_blocks = io_u->xfer_buflen / sd->bs;
230 lba = io_u->offset / sd->bs;
231 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
232 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
233 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
234 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
235 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
236 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
237 }
238
239 return 0;
240}
241
242static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
243{
244 struct sg_io_hdr *hdr = &io_u->hdr;
245 int ret;
246
247 ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
248
249 if (ret < 0)
250 io_u->error = errno;
251 else if (hdr->status) {
252 io_u->resid = hdr->resid;
253 io_u->error = EIO;
254 }
255
256 if (io_u->error) {
257 td_verror(td, io_u->error, "xfer");
258 return FIO_Q_COMPLETED;
259 }
260
261 return ret;
262}
263
264static struct io_u *fio_sgio_event(struct thread_data *td, int event)
265{
266 struct sgio_data *sd = td->io_ops->data;
267
268 return sd->events[event];
269}
270
271static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
272{
273 struct sgio_data *sd = td->io_ops->data;
274 struct io_u *io_u;
275 struct sg_io_hdr *hdr;
276 unsigned char buf[8];
277 int ret;
278
279 io_u = __get_io_u(td);
280 io_u->file = &td->files[0];
281 assert(io_u);
282
283 hdr = &io_u->hdr;
284 sgio_hdr_init(sd, hdr, io_u, 0);
285 memset(buf, 0, sizeof(buf));
286
287 hdr->cmdp[0] = 0x25;
288 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
289 hdr->dxferp = buf;
290 hdr->dxfer_len = sizeof(buf);
291
292 ret = fio_sgio_doio(td, io_u, 1);
293 if (ret) {
294 put_io_u(td, io_u);
295 return ret;
296 }
297
298 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
299 put_io_u(td, io_u);
300 return 0;
301}
302
303static void fio_sgio_cleanup(struct thread_data *td)
304{
305 struct sgio_data *sd = td->io_ops->data;
306
307 if (sd) {
308 free(sd->events);
309 free(sd->cmds);
310 free(sd->fd_flags);
311 free(sd->pfds);
312 free(sd->sgbuf);
313 free(sd);
314
315 td->io_ops->data = NULL;
316 }
317}
318
319static int fio_sgio_init(struct thread_data *td)
320{
321 struct sgio_data *sd;
322
323 sd = malloc(sizeof(*sd));
324 memset(sd, 0, sizeof(*sd));
325 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
326 memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
327 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
328 memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
329 sd->pfds = malloc(sizeof(struct pollfd) * td->nr_files);
330 memset(sd->pfds, 0, sizeof(struct pollfd) * td->nr_files);
331 sd->fd_flags = malloc(sizeof(int) * td->nr_files);
332 memset(sd->fd_flags, 0, sizeof(int) * td->nr_files);
333 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->iodepth);
334 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->iodepth);
335
336 td->io_ops->data = sd;
337
338 /*
339 * we want to do it, regardless of whether odirect is set or not
340 */
341 td->override_sync = 1;
342 return 0;
343}
344
345static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
346{
347 struct sgio_data *sd = td->io_ops->data;
348 unsigned int bs;
349
350 if (td->filetype == FIO_TYPE_BD) {
351 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
352 td_verror(td, errno, "ioctl");
353 return 1;
354 }
355 } else if (td->filetype == FIO_TYPE_CHAR) {
356 int version, ret;
357
358 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
359 td_verror(td, errno, "ioctl");
360 return 1;
361 }
362
363 ret = fio_sgio_get_bs(td, &bs);
364 if (ret)
365 return 1;
366 } else {
367 log_err("ioengine sgio only works on block devices\n");
368 return 1;
369 }
370
371 sd->bs = bs;
372
373 if (td->filetype == FIO_TYPE_BD) {
374 td->io_ops->getevents = NULL;
375 td->io_ops->event = NULL;
376 }
377
378 return 0;
379}
380
381static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
382{
383 struct sgio_data *sd = td->io_ops->data;
384 int ret;
385
386 ret = generic_open_file(td, f);
387 if (ret)
388 return ret;
389
390 if (!sd->type_checked && fio_sgio_type_check(td, f)) {
391 generic_close_file(td, f);
392 return 1;
393 }
394
395 return 0;
396}
397
398static struct ioengine_ops ioengine = {
399 .name = "sg",
400 .version = FIO_IOOPS_VERSION,
401 .init = fio_sgio_init,
402 .prep = fio_sgio_prep,
403 .queue = fio_sgio_queue,
404 .getevents = fio_sgio_getevents,
405 .event = fio_sgio_event,
406 .cleanup = fio_sgio_cleanup,
407 .open_file = fio_sgio_open,
408 .close_file = generic_close_file,
409 .flags = FIO_SYNCIO | FIO_RAWIO,
410};
411
412#else /* FIO_HAVE_SGIO */
413
414/*
415 * When we have a proper configure system in place, we simply wont build
416 * and install this io engine. For now install a crippled version that
417 * just complains and fails to load.
418 */
419static int fio_sgio_init(struct thread_data fio_unused *td)
420{
421 fprintf(stderr, "fio: sgio not available\n");
422 return 1;
423}
424
425static struct ioengine_ops ioengine = {
426 .name = "sgio",
427 .version = FIO_IOOPS_VERSION,
428 .init = fio_sgio_init,
429};
430
431#endif
432
433static void fio_init fio_sgio_register(void)
434{
435 register_ioengine(&ioengine);
436}
437
438static void fio_exit fio_sgio_unregister(void)
439{
440 unregister_ioengine(&ioengine);
441}