Make profile io op overrides a dedicated structure
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
5 *
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <unistd.h>
10#include <errno.h>
11#include <assert.h>
12#include <sys/poll.h>
13
14#include "../fio.h"
15
16#ifdef FIO_HAVE_SGIO
17
18struct sgio_cmd {
19 unsigned char cdb[10];
20 int nr;
21};
22
23struct sgio_data {
24 struct sgio_cmd *cmds;
25 struct io_u **events;
26 struct pollfd *pfds;
27 int *fd_flags;
28 void *sgbuf;
29 unsigned int bs;
30 int type_checked;
31};
32
33static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
34 struct io_u *io_u, int fs)
35{
36 struct sgio_cmd *sc = &sd->cmds[io_u->index];
37
38 memset(hdr, 0, sizeof(*hdr));
39 memset(sc->cdb, 0, sizeof(sc->cdb));
40
41 hdr->interface_id = 'S';
42 hdr->cmdp = sc->cdb;
43 hdr->cmd_len = sizeof(sc->cdb);
44 hdr->pack_id = io_u->index;
45 hdr->usr_ptr = io_u;
46
47 if (fs) {
48 hdr->dxferp = io_u->xfer_buf;
49 hdr->dxfer_len = io_u->xfer_buflen;
50 }
51}
52
53static int pollin_events(struct pollfd *pfds, int fds)
54{
55 int i;
56
57 for (i = 0; i < fds; i++)
58 if (pfds[i].revents & POLLIN)
59 return 1;
60
61 return 0;
62}
63
64static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
65 unsigned int max, struct timespec fio_unused *t)
66{
67 struct sgio_data *sd = td->io_ops->data;
68 int left = max, ret, r = 0;
69 void *buf = sd->sgbuf;
70 unsigned int i, events;
71 struct fio_file *f;
72
73 /*
74 * Fill in the file descriptors
75 */
76 for_each_file(td, f, i) {
77 /*
78 * don't block for min events == 0
79 */
80 if (!min) {
81 sd->fd_flags[i] = fcntl(f->fd, F_GETFL);
82 fcntl(f->fd, F_SETFL, sd->fd_flags[i] | O_NONBLOCK);
83 }
84 sd->pfds[i].fd = f->fd;
85 sd->pfds[i].events = POLLIN;
86 }
87
88 while (left) {
89 void *p;
90
91 do {
92 if (!min)
93 break;
94
95 ret = poll(sd->pfds, td->o.nr_files, -1);
96 if (ret < 0) {
97 if (!r)
98 r = -errno;
99 td_verror(td, errno, "poll");
100 break;
101 } else if (!ret)
102 continue;
103
104 if (pollin_events(sd->pfds, td->o.nr_files))
105 break;
106 } while (1);
107
108 if (r < 0)
109 break;
110
111re_read:
112 p = buf;
113 events = 0;
114 for_each_file(td, f, i) {
115 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
116 if (ret < 0) {
117 if (errno == EAGAIN)
118 continue;
119 r = -errno;
120 td_verror(td, errno, "read");
121 break;
122 } else if (ret) {
123 p += ret;
124 events += ret / sizeof(struct sg_io_hdr);
125 }
126 }
127
128 if (r < 0)
129 break;
130 if (!events) {
131 usleep(1000);
132 goto re_read;
133 }
134
135 left -= events;
136 r += events;
137
138 for (i = 0; i < events; i++) {
139 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
140
141 sd->events[i] = hdr->usr_ptr;
142 }
143 }
144
145 if (!min) {
146 for_each_file(td, f, i)
147 fcntl(f->fd, F_SETFL, sd->fd_flags[i]);
148 }
149
150 return r;
151}
152
153static int fio_sgio_ioctl_doio(struct thread_data *td,
154 struct fio_file *f, struct io_u *io_u)
155{
156 struct sgio_data *sd = td->io_ops->data;
157 struct sg_io_hdr *hdr = &io_u->hdr;
158 int ret;
159
160 sd->events[0] = io_u;
161
162 ret = ioctl(f->fd, SG_IO, hdr);
163 if (ret < 0)
164 return ret;
165
166 return FIO_Q_COMPLETED;
167}
168
169static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
170{
171 struct sg_io_hdr *hdr = &io_u->hdr;
172 int ret;
173
174 ret = write(f->fd, hdr, sizeof(*hdr));
175 if (ret < 0)
176 return ret;
177
178 if (sync) {
179 ret = read(f->fd, hdr, sizeof(*hdr));
180 if (ret < 0)
181 return ret;
182 return FIO_Q_COMPLETED;
183 }
184
185 return FIO_Q_QUEUED;
186}
187
188static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
189{
190 struct fio_file *f = io_u->file;
191
192 if (f->filetype == FIO_TYPE_BD)
193 return fio_sgio_ioctl_doio(td, f, io_u);
194
195 return fio_sgio_rw_doio(f, io_u, sync);
196}
197
198static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
199{
200 struct sg_io_hdr *hdr = &io_u->hdr;
201 struct sgio_data *sd = td->io_ops->data;
202 int nr_blocks, lba;
203
204 if (io_u->xfer_buflen & (sd->bs - 1)) {
205 log_err("read/write not sector aligned\n");
206 return EINVAL;
207 }
208
209 if (io_u->ddir == DDIR_READ) {
210 sgio_hdr_init(sd, hdr, io_u, 1);
211
212 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
213 hdr->cmdp[0] = 0x28;
214 } else if (io_u->ddir == DDIR_WRITE) {
215 sgio_hdr_init(sd, hdr, io_u, 1);
216
217 hdr->dxfer_direction = SG_DXFER_TO_DEV;
218 hdr->cmdp[0] = 0x2a;
219 } else {
220 sgio_hdr_init(sd, hdr, io_u, 0);
221
222 hdr->dxfer_direction = SG_DXFER_NONE;
223 hdr->cmdp[0] = 0x35;
224 }
225
226 if (hdr->dxfer_direction != SG_DXFER_NONE) {
227 nr_blocks = io_u->xfer_buflen / sd->bs;
228 lba = io_u->offset / sd->bs;
229 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
230 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
231 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
232 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
233 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
234 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
235 }
236
237 return 0;
238}
239
240static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
241{
242 struct sg_io_hdr *hdr = &io_u->hdr;
243 int ret, do_sync = 0;
244
245 fio_ro_check(td, io_u);
246
247 if (td->o.sync_io || td->o.odirect || ddir_sync(io_u->ddir))
248 do_sync = 1;
249
250 ret = fio_sgio_doio(td, io_u, do_sync);
251
252 if (ret < 0)
253 io_u->error = errno;
254 else if (hdr->status) {
255 io_u->resid = hdr->resid;
256 io_u->error = EIO;
257 }
258
259 if (io_u->error) {
260 td_verror(td, io_u->error, "xfer");
261 return FIO_Q_COMPLETED;
262 }
263
264 return ret;
265}
266
267static struct io_u *fio_sgio_event(struct thread_data *td, int event)
268{
269 struct sgio_data *sd = td->io_ops->data;
270
271 return sd->events[event];
272}
273
274static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
275{
276 struct sgio_data *sd = td->io_ops->data;
277 struct io_u *io_u;
278 struct sg_io_hdr *hdr;
279 unsigned char buf[8];
280 int ret;
281
282 io_u = __get_io_u(td);
283 io_u->file = td->files[0];
284 assert(io_u);
285
286 hdr = &io_u->hdr;
287 sgio_hdr_init(sd, hdr, io_u, 0);
288 memset(buf, 0, sizeof(buf));
289
290 hdr->cmdp[0] = 0x25;
291 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
292 hdr->dxferp = buf;
293 hdr->dxfer_len = sizeof(buf);
294
295 ret = fio_sgio_doio(td, io_u, 1);
296 if (ret) {
297 put_io_u(td, io_u);
298 return ret;
299 }
300
301 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
302 put_io_u(td, io_u);
303 return 0;
304}
305
306static void fio_sgio_cleanup(struct thread_data *td)
307{
308 struct sgio_data *sd = td->io_ops->data;
309
310 if (sd) {
311 free(sd->events);
312 free(sd->cmds);
313 free(sd->fd_flags);
314 free(sd->pfds);
315 free(sd->sgbuf);
316 free(sd);
317 }
318}
319
320static int fio_sgio_init(struct thread_data *td)
321{
322 struct sgio_data *sd;
323
324 sd = malloc(sizeof(*sd));
325 memset(sd, 0, sizeof(*sd));
326 sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
327 memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
328 sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
329 memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
330 sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
331 memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
332 sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
333 memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
334 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
335 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
336
337 td->io_ops->data = sd;
338
339 /*
340 * we want to do it, regardless of whether odirect is set or not
341 */
342 td->o.override_sync = 1;
343 return 0;
344}
345
346static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
347{
348 struct sgio_data *sd = td->io_ops->data;
349 unsigned int bs;
350
351 if (f->filetype == FIO_TYPE_BD) {
352 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
353 td_verror(td, errno, "ioctl");
354 return 1;
355 }
356 } else if (f->filetype == FIO_TYPE_CHAR) {
357 int version, ret;
358
359 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
360 td_verror(td, errno, "ioctl");
361 return 1;
362 }
363
364 ret = fio_sgio_get_bs(td, &bs);
365 if (ret)
366 return 1;
367 } else {
368 log_err("ioengine sg only works on block devices\n");
369 return 1;
370 }
371
372 sd->bs = bs;
373
374 if (f->filetype == FIO_TYPE_BD) {
375 td->io_ops->getevents = NULL;
376 td->io_ops->event = NULL;
377 }
378
379 return 0;
380}
381
382static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
383{
384 struct sgio_data *sd = td->io_ops->data;
385 int ret;
386
387 ret = generic_open_file(td, f);
388 if (ret)
389 return ret;
390
391 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
392 ret = generic_close_file(td, f);
393 return 1;
394 }
395
396 return 0;
397}
398
399static struct ioengine_ops ioengine = {
400 .name = "sg",
401 .version = FIO_IOOPS_VERSION,
402 .init = fio_sgio_init,
403 .prep = fio_sgio_prep,
404 .queue = fio_sgio_queue,
405 .getevents = fio_sgio_getevents,
406 .event = fio_sgio_event,
407 .cleanup = fio_sgio_cleanup,
408 .open_file = fio_sgio_open,
409 .close_file = generic_close_file,
410 .get_file_size = generic_get_file_size,
411 .flags = FIO_SYNCIO | FIO_RAWIO,
412};
413
414#else /* FIO_HAVE_SGIO */
415
416/*
417 * When we have a proper configure system in place, we simply wont build
418 * and install this io engine. For now install a crippled version that
419 * just complains and fails to load.
420 */
421static int fio_sgio_init(struct thread_data fio_unused *td)
422{
423 fprintf(stderr, "fio: ioengine sg not available\n");
424 return 1;
425}
426
427static struct ioengine_ops ioengine = {
428 .name = "sg",
429 .version = FIO_IOOPS_VERSION,
430 .init = fio_sgio_init,
431};
432
433#endif
434
435static void fio_init fio_sgio_register(void)
436{
437 register_ioengine(&ioengine);
438}
439
440static void fio_exit fio_sgio_unregister(void)
441{
442 unregister_ioengine(&ioengine);
443}