Allow explicit setting of a number of files
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * scsi generic sg v3 io engine
3 *
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <assert.h>
10#include <sys/poll.h>
11
12#include "../fio.h"
13#include "../os.h"
14
15#ifdef FIO_HAVE_SGIO
16
17struct sgio_cmd {
18 unsigned char cdb[10];
19 int nr;
20};
21
22struct sgio_data {
23 struct sgio_cmd *cmds;
24 struct io_u **events;
25 struct pollfd *pfds;
26 int *fd_flags;
27 void *sgbuf;
28 unsigned int bs;
29 int type_checked;
30};
31
32static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
33 struct io_u *io_u, int fs)
34{
35 struct sgio_cmd *sc = &sd->cmds[io_u->index];
36
37 memset(hdr, 0, sizeof(*hdr));
38 memset(sc->cdb, 0, sizeof(sc->cdb));
39
40 hdr->interface_id = 'S';
41 hdr->cmdp = sc->cdb;
42 hdr->cmd_len = sizeof(sc->cdb);
43 hdr->pack_id = io_u->index;
44 hdr->usr_ptr = io_u;
45
46 if (fs) {
47 hdr->dxferp = io_u->xfer_buf;
48 hdr->dxfer_len = io_u->xfer_buflen;
49 }
50}
51
52static int pollin_events(struct pollfd *pfds, int fds)
53{
54 int i;
55
56 for (i = 0; i < fds; i++)
57 if (pfds[i].revents & POLLIN)
58 return 1;
59
60 return 0;
61}
62
63static int fio_sgio_getevents(struct thread_data *td, int min, int max,
64 struct timespec fio_unused *t)
65{
66 /*
67 * normally hard coding &td->files[0] is a bug that needs to be fixed,
68 * but it's ok here as all files should point to the same device.
69 */
70 struct fio_file *f = &td->files[0];
71 struct sgio_data *sd = td->io_ops->data;
72 int left = max, ret, r = 0;
73 void *buf = sd->sgbuf;
74 unsigned int i, events;
75
76 /*
77 * Fill in the file descriptors
78 */
79 for_each_file(td, f, i) {
80 /*
81 * don't block for min events == 0
82 */
83 if (!min) {
84 sd->fd_flags[i] = fcntl(f->fd, F_GETFL);
85 fcntl(f->fd, F_SETFL, sd->fd_flags[i] | O_NONBLOCK);
86 }
87 sd->pfds[i].fd = f->fd;
88 sd->pfds[i].events = POLLIN;
89 }
90
91 while (left) {
92 void *p;
93
94 do {
95 if (!min)
96 break;
97
98 ret = poll(sd->pfds, td->nr_files, -1);
99 if (ret < 0) {
100 if (!r)
101 r = -errno;
102 td_verror(td, errno, "poll");
103 break;
104 } else if (!ret)
105 continue;
106
107 if (pollin_events(sd->pfds, td->nr_files))
108 break;
109 } while (1);
110
111 if (r < 0)
112 break;
113
114re_read:
115 p = buf;
116 events = 0;
117 for_each_file(td, f, i) {
118 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
119 if (ret < 0) {
120 if (errno == EAGAIN)
121 continue;
122 r = -errno;
123 td_verror(td, errno, "read");
124 break;
125 } else if (ret) {
126 p += ret;
127 events += ret / sizeof(struct sg_io_hdr);
128 }
129 }
130
131 if (r < 0)
132 break;
133 if (!events) {
134 usleep(1000);
135 goto re_read;
136 }
137
138 left -= events;
139 r += events;
140
141 for (i = 0; i < events; i++) {
142 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
143
144 sd->events[i] = hdr->usr_ptr;
145 }
146 }
147
148 if (!min) {
149 for_each_file(td, f, i)
150 fcntl(f->fd, F_SETFL, sd->fd_flags[i]);
151 }
152
153 return r;
154}
155
156static int fio_sgio_ioctl_doio(struct thread_data *td,
157 struct fio_file *f, struct io_u *io_u)
158{
159 struct sgio_data *sd = td->io_ops->data;
160 struct sg_io_hdr *hdr = &io_u->hdr;
161 int ret;
162
163 sd->events[0] = io_u;
164
165 ret = ioctl(f->fd, SG_IO, hdr);
166 if (ret < 0)
167 return -errno;
168
169 return FIO_Q_COMPLETED;
170}
171
172static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
173{
174 struct sg_io_hdr *hdr = &io_u->hdr;
175 int ret;
176
177 ret = write(f->fd, hdr, sizeof(*hdr));
178 if (ret < 0)
179 return errno;
180
181 if (sync) {
182 ret = read(f->fd, hdr, sizeof(*hdr));
183 if (ret < 0)
184 return -errno;
185 return FIO_Q_COMPLETED;
186 }
187
188 return FIO_Q_QUEUED;
189}
190
191static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
192{
193 struct fio_file *f = io_u->file;
194
195 if (f->filetype == FIO_TYPE_BD)
196 return fio_sgio_ioctl_doio(td, f, io_u);
197
198 return fio_sgio_rw_doio(f, io_u, sync);
199}
200
201static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
202{
203 struct sg_io_hdr *hdr = &io_u->hdr;
204 struct sgio_data *sd = td->io_ops->data;
205 int nr_blocks, lba;
206
207 if (io_u->xfer_buflen & (sd->bs - 1)) {
208 log_err("read/write not sector aligned\n");
209 return EINVAL;
210 }
211
212 if (io_u->ddir == DDIR_READ) {
213 sgio_hdr_init(sd, hdr, io_u, 1);
214
215 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
216 hdr->cmdp[0] = 0x28;
217 } else if (io_u->ddir == DDIR_WRITE) {
218 sgio_hdr_init(sd, hdr, io_u, 1);
219
220 hdr->dxfer_direction = SG_DXFER_TO_DEV;
221 hdr->cmdp[0] = 0x2a;
222 } else {
223 sgio_hdr_init(sd, hdr, io_u, 0);
224
225 hdr->dxfer_direction = SG_DXFER_NONE;
226 hdr->cmdp[0] = 0x35;
227 }
228
229 if (hdr->dxfer_direction != SG_DXFER_NONE) {
230 nr_blocks = io_u->xfer_buflen / sd->bs;
231 lba = io_u->offset / sd->bs;
232 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
233 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
234 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
235 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
236 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
237 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
238 }
239
240 return 0;
241}
242
243static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
244{
245 struct sg_io_hdr *hdr = &io_u->hdr;
246 int ret;
247
248 ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
249
250 if (ret < 0)
251 io_u->error = errno;
252 else if (hdr->status) {
253 io_u->resid = hdr->resid;
254 io_u->error = EIO;
255 }
256
257 if (io_u->error) {
258 td_verror(td, io_u->error, "xfer");
259 return FIO_Q_COMPLETED;
260 }
261
262 return ret;
263}
264
265static struct io_u *fio_sgio_event(struct thread_data *td, int event)
266{
267 struct sgio_data *sd = td->io_ops->data;
268
269 return sd->events[event];
270}
271
272static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
273{
274 struct sgio_data *sd = td->io_ops->data;
275 struct io_u *io_u;
276 struct sg_io_hdr *hdr;
277 unsigned char buf[8];
278 int ret;
279
280 io_u = __get_io_u(td);
281 io_u->file = &td->files[0];
282 assert(io_u);
283
284 hdr = &io_u->hdr;
285 sgio_hdr_init(sd, hdr, io_u, 0);
286 memset(buf, 0, sizeof(buf));
287
288 hdr->cmdp[0] = 0x25;
289 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
290 hdr->dxferp = buf;
291 hdr->dxfer_len = sizeof(buf);
292
293 ret = fio_sgio_doio(td, io_u, 1);
294 if (ret) {
295 put_io_u(td, io_u);
296 return ret;
297 }
298
299 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
300 put_io_u(td, io_u);
301 return 0;
302}
303
304static void fio_sgio_cleanup(struct thread_data *td)
305{
306 struct sgio_data *sd = td->io_ops->data;
307
308 if (sd) {
309 free(sd->events);
310 free(sd->cmds);
311 free(sd->fd_flags);
312 free(sd->pfds);
313 free(sd->sgbuf);
314 free(sd);
315
316 td->io_ops->data = NULL;
317 }
318}
319
320static int fio_sgio_init(struct thread_data *td)
321{
322 struct sgio_data *sd;
323
324 sd = malloc(sizeof(*sd));
325 memset(sd, 0, sizeof(*sd));
326 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
327 memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
328 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
329 memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
330 sd->pfds = malloc(sizeof(struct pollfd) * td->nr_files);
331 memset(sd->pfds, 0, sizeof(struct pollfd) * td->nr_files);
332 sd->fd_flags = malloc(sizeof(int) * td->nr_files);
333 memset(sd->fd_flags, 0, sizeof(int) * td->nr_files);
334 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->iodepth);
335 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->iodepth);
336
337 td->io_ops->data = sd;
338
339 /*
340 * we want to do it, regardless of whether odirect is set or not
341 */
342 td->override_sync = 1;
343 return 0;
344}
345
346static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
347{
348 struct sgio_data *sd = td->io_ops->data;
349 unsigned int bs;
350
351 if (f->filetype == FIO_TYPE_BD) {
352 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
353 td_verror(td, errno, "ioctl");
354 return 1;
355 }
356 } else if (f->filetype == FIO_TYPE_CHAR) {
357 int version, ret;
358
359 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
360 td_verror(td, errno, "ioctl");
361 return 1;
362 }
363
364 ret = fio_sgio_get_bs(td, &bs);
365 if (ret)
366 return 1;
367 } else {
368 log_err("ioengine sgio only works on block devices\n");
369 return 1;
370 }
371
372 sd->bs = bs;
373
374 if (f->filetype == FIO_TYPE_BD) {
375 td->io_ops->getevents = NULL;
376 td->io_ops->event = NULL;
377 }
378
379 return 0;
380}
381
382static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
383{
384 struct sgio_data *sd = td->io_ops->data;
385 int ret;
386
387 ret = generic_open_file(td, f);
388 if (ret)
389 return ret;
390
391 if (!sd->type_checked && fio_sgio_type_check(td, f)) {
392 generic_close_file(td, f);
393 return 1;
394 }
395
396 return 0;
397}
398
399static struct ioengine_ops ioengine = {
400 .name = "sg",
401 .version = FIO_IOOPS_VERSION,
402 .init = fio_sgio_init,
403 .prep = fio_sgio_prep,
404 .queue = fio_sgio_queue,
405 .getevents = fio_sgio_getevents,
406 .event = fio_sgio_event,
407 .cleanup = fio_sgio_cleanup,
408 .open_file = fio_sgio_open,
409 .close_file = generic_close_file,
410 .flags = FIO_SYNCIO | FIO_RAWIO,
411};
412
413#else /* FIO_HAVE_SGIO */
414
415/*
416 * When we have a proper configure system in place, we simply wont build
417 * and install this io engine. For now install a crippled version that
418 * just complains and fails to load.
419 */
420static int fio_sgio_init(struct thread_data fio_unused *td)
421{
422 fprintf(stderr, "fio: sgio not available\n");
423 return 1;
424}
425
426static struct ioengine_ops ioengine = {
427 .name = "sgio",
428 .version = FIO_IOOPS_VERSION,
429 .init = fio_sgio_init,
430};
431
432#endif
433
434static void fio_init fio_sgio_register(void)
435{
436 register_ioengine(&ioengine);
437}
438
439static void fio_exit fio_sgio_unregister(void)
440{
441 unregister_ioengine(&ioengine);
442}