Error check fcntl() calls
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
5 *
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <unistd.h>
10#include <errno.h>
11#include <assert.h>
12#include <sys/poll.h>
13
14#include "../fio.h"
15
16#ifdef FIO_HAVE_SGIO
17
18struct sgio_cmd {
19 unsigned char cdb[10];
20 int nr;
21};
22
23struct sgio_data {
24 struct sgio_cmd *cmds;
25 struct io_u **events;
26 struct pollfd *pfds;
27 int *fd_flags;
28 void *sgbuf;
29 unsigned int bs;
30 int type_checked;
31};
32
33static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
34 struct io_u *io_u, int fs)
35{
36 struct sgio_cmd *sc = &sd->cmds[io_u->index];
37
38 memset(hdr, 0, sizeof(*hdr));
39 memset(sc->cdb, 0, sizeof(sc->cdb));
40
41 hdr->interface_id = 'S';
42 hdr->cmdp = sc->cdb;
43 hdr->cmd_len = sizeof(sc->cdb);
44 hdr->pack_id = io_u->index;
45 hdr->usr_ptr = io_u;
46
47 if (fs) {
48 hdr->dxferp = io_u->xfer_buf;
49 hdr->dxfer_len = io_u->xfer_buflen;
50 }
51}
52
53static int pollin_events(struct pollfd *pfds, int fds)
54{
55 int i;
56
57 for (i = 0; i < fds; i++)
58 if (pfds[i].revents & POLLIN)
59 return 1;
60
61 return 0;
62}
63
64static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
65 unsigned int max, struct timespec fio_unused *t)
66{
67 struct sgio_data *sd = td->io_ops->data;
68 int left = max, ret, r = 0;
69 void *buf = sd->sgbuf;
70 unsigned int i, events;
71 struct fio_file *f;
72
73 /*
74 * Fill in the file descriptors
75 */
76 for_each_file(td, f, i) {
77 /*
78 * don't block for min events == 0
79 */
80 if (!min)
81 fio_set_fd_nonblocking(f->fd, "sg");
82
83 sd->pfds[i].fd = f->fd;
84 sd->pfds[i].events = POLLIN;
85 }
86
87 while (left) {
88 void *p;
89
90 do {
91 if (!min)
92 break;
93
94 ret = poll(sd->pfds, td->o.nr_files, -1);
95 if (ret < 0) {
96 if (!r)
97 r = -errno;
98 td_verror(td, errno, "poll");
99 break;
100 } else if (!ret)
101 continue;
102
103 if (pollin_events(sd->pfds, td->o.nr_files))
104 break;
105 } while (1);
106
107 if (r < 0)
108 break;
109
110re_read:
111 p = buf;
112 events = 0;
113 for_each_file(td, f, i) {
114 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
115 if (ret < 0) {
116 if (errno == EAGAIN)
117 continue;
118 r = -errno;
119 td_verror(td, errno, "read");
120 break;
121 } else if (ret) {
122 p += ret;
123 events += ret / sizeof(struct sg_io_hdr);
124 }
125 }
126
127 if (r < 0)
128 break;
129 if (!events) {
130 usleep(1000);
131 goto re_read;
132 }
133
134 left -= events;
135 r += events;
136
137 for (i = 0; i < events; i++) {
138 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
139
140 sd->events[i] = hdr->usr_ptr;
141 }
142 }
143
144 if (!min) {
145 for_each_file(td, f, i)
146 fcntl(f->fd, F_SETFL, sd->fd_flags[i]);
147 }
148
149 return r;
150}
151
152static int fio_sgio_ioctl_doio(struct thread_data *td,
153 struct fio_file *f, struct io_u *io_u)
154{
155 struct sgio_data *sd = td->io_ops->data;
156 struct sg_io_hdr *hdr = &io_u->hdr;
157 int ret;
158
159 sd->events[0] = io_u;
160
161 ret = ioctl(f->fd, SG_IO, hdr);
162 if (ret < 0)
163 return ret;
164
165 return FIO_Q_COMPLETED;
166}
167
168static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int do_sync)
169{
170 struct sg_io_hdr *hdr = &io_u->hdr;
171 int ret;
172
173 ret = write(f->fd, hdr, sizeof(*hdr));
174 if (ret < 0)
175 return ret;
176
177 if (do_sync) {
178 ret = read(f->fd, hdr, sizeof(*hdr));
179 if (ret < 0)
180 return ret;
181 return FIO_Q_COMPLETED;
182 }
183
184 return FIO_Q_QUEUED;
185}
186
187static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int do_sync)
188{
189 struct fio_file *f = io_u->file;
190
191 if (f->filetype == FIO_TYPE_BD)
192 return fio_sgio_ioctl_doio(td, f, io_u);
193
194 return fio_sgio_rw_doio(f, io_u, do_sync);
195}
196
197static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
198{
199 struct sg_io_hdr *hdr = &io_u->hdr;
200 struct sgio_data *sd = td->io_ops->data;
201 int nr_blocks, lba;
202
203 if (io_u->xfer_buflen & (sd->bs - 1)) {
204 log_err("read/write not sector aligned\n");
205 return EINVAL;
206 }
207
208 if (io_u->ddir == DDIR_READ) {
209 sgio_hdr_init(sd, hdr, io_u, 1);
210
211 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
212 hdr->cmdp[0] = 0x28;
213 } else if (io_u->ddir == DDIR_WRITE) {
214 sgio_hdr_init(sd, hdr, io_u, 1);
215
216 hdr->dxfer_direction = SG_DXFER_TO_DEV;
217 hdr->cmdp[0] = 0x2a;
218 } else {
219 sgio_hdr_init(sd, hdr, io_u, 0);
220
221 hdr->dxfer_direction = SG_DXFER_NONE;
222 hdr->cmdp[0] = 0x35;
223 }
224
225 if (hdr->dxfer_direction != SG_DXFER_NONE) {
226 nr_blocks = io_u->xfer_buflen / sd->bs;
227 lba = io_u->offset / sd->bs;
228 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
229 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
230 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
231 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
232 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
233 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
234 }
235
236 return 0;
237}
238
239static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
240{
241 struct sg_io_hdr *hdr = &io_u->hdr;
242 int ret, do_sync = 0;
243
244 fio_ro_check(td, io_u);
245
246 if (td->o.sync_io || td->o.odirect || ddir_sync(io_u->ddir))
247 do_sync = 1;
248
249 ret = fio_sgio_doio(td, io_u, do_sync);
250
251 if (ret < 0)
252 io_u->error = errno;
253 else if (hdr->status) {
254 io_u->resid = hdr->resid;
255 io_u->error = EIO;
256 }
257
258 if (io_u->error) {
259 td_verror(td, io_u->error, "xfer");
260 return FIO_Q_COMPLETED;
261 }
262
263 return ret;
264}
265
266static struct io_u *fio_sgio_event(struct thread_data *td, int event)
267{
268 struct sgio_data *sd = td->io_ops->data;
269
270 return sd->events[event];
271}
272
273static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
274{
275 struct sgio_data *sd = td->io_ops->data;
276 struct io_u io_u;
277 struct sg_io_hdr *hdr;
278 unsigned char buf[8];
279 int ret;
280
281 memset(&io_u, 0, sizeof(io_u));
282 io_u.file = td->files[0];
283
284 hdr = &io_u.hdr;
285 sgio_hdr_init(sd, hdr, &io_u, 0);
286 memset(buf, 0, sizeof(buf));
287
288 hdr->cmdp[0] = 0x25;
289 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
290 hdr->dxferp = buf;
291 hdr->dxfer_len = sizeof(buf);
292
293 ret = fio_sgio_doio(td, &io_u, 1);
294 if (ret)
295 return ret;
296
297 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
298 return 0;
299}
300
301static void fio_sgio_cleanup(struct thread_data *td)
302{
303 struct sgio_data *sd = td->io_ops->data;
304
305 if (sd) {
306 free(sd->events);
307 free(sd->cmds);
308 free(sd->fd_flags);
309 free(sd->pfds);
310 free(sd->sgbuf);
311 free(sd);
312 }
313}
314
315static int fio_sgio_init(struct thread_data *td)
316{
317 struct sgio_data *sd;
318
319 sd = malloc(sizeof(*sd));
320 memset(sd, 0, sizeof(*sd));
321 sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
322 memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
323 sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
324 memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
325 sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
326 memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
327 sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
328 memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
329 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
330 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
331
332 td->io_ops->data = sd;
333
334 /*
335 * we want to do it, regardless of whether odirect is set or not
336 */
337 td->o.override_sync = 1;
338 return 0;
339}
340
341static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
342{
343 struct sgio_data *sd = td->io_ops->data;
344 unsigned int bs;
345
346 if (f->filetype == FIO_TYPE_BD) {
347 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
348 td_verror(td, errno, "ioctl");
349 return 1;
350 }
351 } else if (f->filetype == FIO_TYPE_CHAR) {
352 int version, ret;
353
354 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
355 td_verror(td, errno, "ioctl");
356 return 1;
357 }
358
359 ret = fio_sgio_get_bs(td, &bs);
360 if (ret)
361 return 1;
362 } else {
363 log_err("ioengine sg only works on block devices\n");
364 return 1;
365 }
366
367 sd->bs = bs;
368
369 if (f->filetype == FIO_TYPE_BD) {
370 td->io_ops->getevents = NULL;
371 td->io_ops->event = NULL;
372 }
373
374 return 0;
375}
376
377static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
378{
379 struct sgio_data *sd = td->io_ops->data;
380 int ret;
381
382 ret = generic_open_file(td, f);
383 if (ret)
384 return ret;
385
386 if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
387 ret = generic_close_file(td, f);
388 return 1;
389 }
390
391 return 0;
392}
393
394static struct ioengine_ops ioengine = {
395 .name = "sg",
396 .version = FIO_IOOPS_VERSION,
397 .init = fio_sgio_init,
398 .prep = fio_sgio_prep,
399 .queue = fio_sgio_queue,
400 .getevents = fio_sgio_getevents,
401 .event = fio_sgio_event,
402 .cleanup = fio_sgio_cleanup,
403 .open_file = fio_sgio_open,
404 .close_file = generic_close_file,
405 .get_file_size = generic_get_file_size,
406 .flags = FIO_SYNCIO | FIO_RAWIO,
407};
408
409#else /* FIO_HAVE_SGIO */
410
411/*
412 * When we have a proper configure system in place, we simply wont build
413 * and install this io engine. For now install a crippled version that
414 * just complains and fails to load.
415 */
416static int fio_sgio_init(struct thread_data fio_unused *td)
417{
418 log_err("fio: ioengine sg not available\n");
419 return 1;
420}
421
422static struct ioengine_ops ioengine = {
423 .name = "sg",
424 .version = FIO_IOOPS_VERSION,
425 .init = fio_sgio_init,
426};
427
428#endif
429
430static void fio_init fio_sgio_register(void)
431{
432 register_ioengine(&ioengine);
433}
434
435static void fio_exit fio_sgio_unregister(void)
436{
437 unregister_ioengine(&ioengine);
438}