Rate must always calculate bytes done
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * scsi generic sg v3 io engine
3 *
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <assert.h>
10#include <sys/poll.h>
11
12#include "../fio.h"
13#include "../os.h"
14
15#ifdef FIO_HAVE_SGIO
16
17struct sgio_cmd {
18 unsigned char cdb[10];
19 int nr;
20};
21
22struct sgio_data {
23 struct sgio_cmd *cmds;
24 struct io_u **events;
25 struct pollfd *pfds;
26 int *fd_flags;
27 void *sgbuf;
28 unsigned int bs;
29};
30
31static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
32 struct io_u *io_u, int fs)
33{
34 struct sgio_cmd *sc = &sd->cmds[io_u->index];
35
36 memset(hdr, 0, sizeof(*hdr));
37 memset(sc->cdb, 0, sizeof(sc->cdb));
38
39 hdr->interface_id = 'S';
40 hdr->cmdp = sc->cdb;
41 hdr->cmd_len = sizeof(sc->cdb);
42 hdr->pack_id = io_u->index;
43 hdr->usr_ptr = io_u;
44
45 if (fs) {
46 hdr->dxferp = io_u->xfer_buf;
47 hdr->dxfer_len = io_u->xfer_buflen;
48 }
49}
50
51static int pollin_events(struct pollfd *pfds, int fds)
52{
53 int i;
54
55 for (i = 0; i < fds; i++)
56 if (pfds[i].revents & POLLIN)
57 return 1;
58
59 return 0;
60}
61
62static int fio_sgio_getevents(struct thread_data *td, int min, int max,
63 struct timespec fio_unused *t)
64{
65 /*
66 * normally hard coding &td->files[0] is a bug that needs to be fixed,
67 * but it's ok here as all files should point to the same device.
68 */
69 struct fio_file *f = &td->files[0];
70 struct sgio_data *sd = td->io_ops->data;
71 int left = max, ret, events, i, r = 0;
72 void *buf = sd->sgbuf;
73
74 /*
75 * Fill in the file descriptors
76 */
77 for_each_file(td, f, i) {
78 /*
79 * don't block for min events == 0
80 */
81 if (!min) {
82 sd->fd_flags[i] = fcntl(f->fd, F_GETFL);
83 fcntl(f->fd, F_SETFL, sd->fd_flags[i] | O_NONBLOCK);
84 }
85 sd->pfds[i].fd = f->fd;
86 sd->pfds[i].events = POLLIN;
87 }
88
89 while (left) {
90 void *p;
91
92 do {
93 if (!min)
94 break;
95
96 ret = poll(sd->pfds, td->nr_files, -1);
97 if (ret < 0) {
98 if (!r)
99 r = -errno;
100 td_verror(td, errno, "poll");
101 break;
102 } else if (!ret)
103 continue;
104
105 if (pollin_events(sd->pfds, td->nr_files))
106 break;
107 } while (1);
108
109 if (r < 0)
110 break;
111
112re_read:
113 p = buf;
114 events = 0;
115 for_each_file(td, f, i) {
116 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
117 if (ret < 0) {
118 if (errno == EAGAIN)
119 continue;
120 r = -errno;
121 td_verror(td, errno, "read");
122 break;
123 } else if (ret) {
124 p += ret;
125 events += ret / sizeof(struct sg_io_hdr);
126 }
127 }
128
129 if (r < 0)
130 break;
131 if (!events) {
132 usleep(1000);
133 goto re_read;
134 }
135
136 left -= events;
137 r += events;
138
139 for (i = 0; i < events; i++) {
140 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
141
142 sd->events[i] = hdr->usr_ptr;
143 }
144 }
145
146 if (!min) {
147 for_each_file(td, f, i)
148 fcntl(f->fd, F_SETFL, sd->fd_flags[i]);
149 }
150
151 return r;
152}
153
154static int fio_sgio_ioctl_doio(struct thread_data *td,
155 struct fio_file *f, struct io_u *io_u)
156{
157 struct sgio_data *sd = td->io_ops->data;
158 struct sg_io_hdr *hdr = &io_u->hdr;
159 int ret;
160
161 sd->events[0] = io_u;
162
163 ret = ioctl(f->fd, SG_IO, hdr);
164 if (ret < 0)
165 return -errno;
166
167 return FIO_Q_COMPLETED;
168}
169
170static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
171{
172 struct sg_io_hdr *hdr = &io_u->hdr;
173 int ret;
174
175 ret = write(f->fd, hdr, sizeof(*hdr));
176 if (ret < 0)
177 return errno;
178
179 if (sync) {
180 ret = read(f->fd, hdr, sizeof(*hdr));
181 if (ret < 0)
182 return -errno;
183 return FIO_Q_COMPLETED;
184 }
185
186 return FIO_Q_QUEUED;
187}
188
189static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
190{
191 struct fio_file *f = io_u->file;
192
193 if (td->filetype == FIO_TYPE_BD)
194 return fio_sgio_ioctl_doio(td, f, io_u);
195
196 return fio_sgio_rw_doio(f, io_u, sync);
197}
198
199static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
200{
201 struct sg_io_hdr *hdr = &io_u->hdr;
202 struct sgio_data *sd = td->io_ops->data;
203 int nr_blocks, lba;
204
205 if (io_u->xfer_buflen & (sd->bs - 1)) {
206 log_err("read/write not sector aligned\n");
207 return EINVAL;
208 }
209
210 if (io_u->ddir == DDIR_READ) {
211 sgio_hdr_init(sd, hdr, io_u, 1);
212
213 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
214 hdr->cmdp[0] = 0x28;
215 } else if (io_u->ddir == DDIR_WRITE) {
216 sgio_hdr_init(sd, hdr, io_u, 1);
217
218 hdr->dxfer_direction = SG_DXFER_TO_DEV;
219 hdr->cmdp[0] = 0x2a;
220 } else {
221 sgio_hdr_init(sd, hdr, io_u, 0);
222
223 hdr->dxfer_direction = SG_DXFER_NONE;
224 hdr->cmdp[0] = 0x35;
225 }
226
227 if (hdr->dxfer_direction != SG_DXFER_NONE) {
228 nr_blocks = io_u->xfer_buflen / sd->bs;
229 lba = io_u->offset / sd->bs;
230 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
231 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
232 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
233 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
234 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
235 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
236 }
237
238 return 0;
239}
240
241static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
242{
243 struct sg_io_hdr *hdr = &io_u->hdr;
244 int ret;
245
246 ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
247
248 if (ret < 0)
249 io_u->error = errno;
250 else if (hdr->status) {
251 io_u->resid = hdr->resid;
252 io_u->error = EIO;
253 }
254
255 if (io_u->error) {
256 td_verror(td, io_u->error, "xfer");
257 return FIO_Q_COMPLETED;
258 }
259
260 return ret;
261}
262
263static struct io_u *fio_sgio_event(struct thread_data *td, int event)
264{
265 struct sgio_data *sd = td->io_ops->data;
266
267 return sd->events[event];
268}
269
270static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
271{
272 struct sgio_data *sd = td->io_ops->data;
273 struct io_u *io_u;
274 struct sg_io_hdr *hdr;
275 unsigned char buf[8];
276 int ret;
277
278 io_u = __get_io_u(td);
279 io_u->file = &td->files[0];
280 assert(io_u);
281
282 hdr = &io_u->hdr;
283 sgio_hdr_init(sd, hdr, io_u, 0);
284 memset(buf, 0, sizeof(buf));
285
286 hdr->cmdp[0] = 0x25;
287 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
288 hdr->dxferp = buf;
289 hdr->dxfer_len = sizeof(buf);
290
291 ret = fio_sgio_doio(td, io_u, 1);
292 if (ret) {
293 put_io_u(td, io_u);
294 return ret;
295 }
296
297 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
298 put_io_u(td, io_u);
299 return 0;
300}
301
302static void fio_sgio_cleanup(struct thread_data *td)
303{
304 struct sgio_data *sd = td->io_ops->data;
305
306 if (sd) {
307 free(sd->events);
308 free(sd->cmds);
309 free(sd->fd_flags);
310 free(sd->pfds);
311 free(sd->sgbuf);
312 free(sd);
313
314 td->io_ops->data = NULL;
315 }
316}
317
318static int fio_sgio_init(struct thread_data *td)
319{
320 struct fio_file *f = &td->files[0];
321 struct sgio_data *sd;
322 unsigned int bs;
323 int ret;
324
325 sd = malloc(sizeof(*sd));
326 memset(sd, 0, sizeof(*sd));
327 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
328 memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
329 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
330 memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
331 sd->pfds = malloc(sizeof(struct pollfd) * td->nr_files);
332 memset(sd->pfds, 0, sizeof(struct pollfd) * td->nr_files);
333 sd->fd_flags = malloc(sizeof(int) * td->nr_files);
334 memset(sd->fd_flags, 0, sizeof(int) * td->nr_files);
335 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->iodepth);
336 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->iodepth);
337
338 td->io_ops->data = sd;
339
340 if (td->filetype == FIO_TYPE_BD) {
341 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
342 td_verror(td, errno, "ioctl");
343 goto err;
344 }
345 } else if (td->filetype == FIO_TYPE_CHAR) {
346 int version;
347
348 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
349 td_verror(td, errno, "ioctl");
350 goto err;
351 }
352
353 ret = fio_sgio_get_bs(td, &bs);
354 if (ret)
355 goto err;
356 } else {
357 log_err("ioengine sgio only works on block devices\n");
358 goto err;
359 }
360
361 sd->bs = bs;
362
363 if (td->filetype == FIO_TYPE_BD) {
364 td->io_ops->getevents = NULL;
365 td->io_ops->event = NULL;
366 }
367
368 /*
369 * we want to do it, regardless of whether odirect is set or not
370 */
371 td->override_sync = 1;
372 return 0;
373err:
374 free(sd->events);
375 free(sd->cmds);
376 free(sd->fd_flags);
377 free(sd->pfds);
378 free(sd->sgbuf);
379 free(sd);
380 td->io_ops->data = NULL;
381 return 1;
382}
383
384static struct ioengine_ops ioengine = {
385 .name = "sg",
386 .version = FIO_IOOPS_VERSION,
387 .init = fio_sgio_init,
388 .prep = fio_sgio_prep,
389 .queue = fio_sgio_queue,
390 .getevents = fio_sgio_getevents,
391 .event = fio_sgio_event,
392 .cleanup = fio_sgio_cleanup,
393 .flags = FIO_SYNCIO | FIO_RAWIO,
394};
395
396#else /* FIO_HAVE_SGIO */
397
398/*
399 * When we have a proper configure system in place, we simply wont build
400 * and install this io engine. For now install a crippled version that
401 * just complains and fails to load.
402 */
403static int fio_sgio_init(struct thread_data fio_unused *td)
404{
405 fprintf(stderr, "fio: sgio not available\n");
406 return 1;
407}
408
409static struct ioengine_ops ioengine = {
410 .name = "sgio",
411 .version = FIO_IOOPS_VERSION,
412 .init = fio_sgio_init,
413};
414
415#endif
416
417static void fio_init fio_sgio_register(void)
418{
419 register_ioengine(&ioengine);
420}
421
422static void fio_exit fio_sgio_unregister(void)
423{
424 unregister_ioengine(&ioengine);
425}