[PATCH] SG IO engine: reduce allocations and memory leaks
[fio.git] / engines / sg.c
... / ...
CommitLineData
1/*
2 * scsi generic sg v3 io engine
3 *
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <assert.h>
10#include <sys/poll.h>
11
12#include "../fio.h"
13#include "../os.h"
14
15#ifdef FIO_HAVE_SGIO
16
17struct sgio_cmd {
18 unsigned char cdb[10];
19 int nr;
20};
21
22struct sgio_data {
23 struct sgio_cmd *cmds;
24 struct io_u **events;
25 struct pollfd *pfds;
26 int *fd_flags;
27 void *sgbuf;
28 unsigned int bs;
29};
30
31static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
32 struct io_u *io_u, int fs)
33{
34 struct sgio_cmd *sc = &sd->cmds[io_u->index];
35
36 memset(hdr, 0, sizeof(*hdr));
37 memset(sc->cdb, 0, sizeof(sc->cdb));
38
39 hdr->interface_id = 'S';
40 hdr->cmdp = sc->cdb;
41 hdr->cmd_len = sizeof(sc->cdb);
42 hdr->pack_id = io_u->index;
43 hdr->usr_ptr = io_u;
44
45 if (fs) {
46 hdr->dxferp = io_u->xfer_buf;
47 hdr->dxfer_len = io_u->xfer_buflen;
48 }
49}
50
51static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
52 int max, struct timespec fio_unused *t)
53{
54 assert(max <= 1);
55
56 /*
57 * we can only have one finished io_u for sync io, since the depth
58 * is always 1
59 */
60 if (list_empty(&td->io_u_busylist))
61 return 0;
62
63 return 1;
64}
65
66static int pollin_events(struct pollfd *pfds, int fds)
67{
68 int i;
69
70 for (i = 0; i < fds; i++)
71 if (pfds[i].revents & POLLIN)
72 return 1;
73
74 return 0;
75}
76
77static int fio_sgio_getevents(struct thread_data *td, int min, int max,
78 struct timespec fio_unused *t)
79{
80 /*
81 * normally hard coding &td->files[0] is a bug that needs to be fixed,
82 * but it's ok here as all files should point to the same device.
83 */
84 struct fio_file *f = &td->files[0];
85 struct sgio_data *sd = td->io_ops->data;
86 int left = max, ret, events, i, r = 0;
87 void *buf = sd->sgbuf;
88
89 /*
90 * Fill in the file descriptors
91 */
92 for_each_file(td, f, i) {
93 /*
94 * don't block for min events == 0
95 */
96 if (!min) {
97 sd->fd_flags[i] = fcntl(f->fd, F_GETFL);
98 fcntl(f->fd, F_SETFL, sd->fd_flags[i] | O_NONBLOCK);
99 }
100 sd->pfds[i].fd = f->fd;
101 sd->pfds[i].events = POLLIN;
102 }
103
104 while (left) {
105 void *p;
106
107 do {
108 if (!min)
109 break;
110
111 ret = poll(sd->pfds, td->nr_files, -1);
112 if (ret < 0) {
113 td_verror(td, errno);
114 if (!r)
115 r = -1;
116 break;
117 } else if (!ret)
118 continue;
119
120 if (pollin_events(sd->pfds, td->nr_files))
121 break;
122 } while (1);
123
124 if (r < 0)
125 break;
126
127re_read:
128 p = buf;
129 events = 0;
130 for_each_file(td, f, i) {
131 ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
132 if (ret < 0) {
133 if (errno == EAGAIN)
134 continue;
135 td_verror(td, errno);
136 r = -1;
137 break;
138 } else if (ret) {
139 p += ret;
140 events += ret / sizeof(struct sg_io_hdr);
141 }
142 }
143
144 if (r < 0)
145 break;
146 if (!events) {
147 usleep(1000);
148 goto re_read;
149 }
150
151 left -= events;
152 r += events;
153
154 for (i = 0; i < events; i++) {
155 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
156
157 sd->events[i] = hdr->usr_ptr;
158 }
159 }
160
161 if (!min) {
162 for_each_file(td, f, i)
163 fcntl(f->fd, F_SETFL, sd->fd_flags[i]);
164 }
165
166 return r;
167}
168
169static int fio_sgio_ioctl_doio(struct thread_data *td,
170 struct fio_file *f, struct io_u *io_u)
171{
172 struct sgio_data *sd = td->io_ops->data;
173 struct sg_io_hdr *hdr = &io_u->hdr;
174
175 sd->events[0] = io_u;
176
177 return ioctl(f->fd, SG_IO, hdr);
178}
179
180static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
181{
182 struct sg_io_hdr *hdr = &io_u->hdr;
183 int ret;
184
185 ret = write(f->fd, hdr, sizeof(*hdr));
186 if (ret < 0)
187 return errno;
188
189 if (sync) {
190 ret = read(f->fd, hdr, sizeof(*hdr));
191 if (ret < 0)
192 return errno;
193 }
194
195 return 0;
196}
197
198static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
199{
200 struct fio_file *f = io_u->file;
201
202 if (td->filetype == FIO_TYPE_BD)
203 return fio_sgio_ioctl_doio(td, f, io_u);
204
205 return fio_sgio_rw_doio(f, io_u, sync);
206}
207
208static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
209{
210 struct sg_io_hdr *hdr = &io_u->hdr;
211 struct sgio_data *sd = td->io_ops->data;
212 int nr_blocks, lba;
213
214 if (io_u->xfer_buflen & (sd->bs - 1)) {
215 log_err("read/write not sector aligned\n");
216 return EINVAL;
217 }
218
219 if (io_u->ddir == DDIR_READ) {
220 sgio_hdr_init(sd, hdr, io_u, 1);
221
222 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
223 hdr->cmdp[0] = 0x28;
224 } else if (io_u->ddir == DDIR_WRITE) {
225 sgio_hdr_init(sd, hdr, io_u, 1);
226
227 hdr->dxfer_direction = SG_DXFER_TO_DEV;
228 hdr->cmdp[0] = 0x2a;
229 } else {
230 sgio_hdr_init(sd, hdr, io_u, 0);
231
232 hdr->dxfer_direction = SG_DXFER_NONE;
233 hdr->cmdp[0] = 0x35;
234 }
235
236 if (hdr->dxfer_direction != SG_DXFER_NONE) {
237 nr_blocks = io_u->xfer_buflen / sd->bs;
238 lba = io_u->offset / sd->bs;
239 hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
240 hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
241 hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
242 hdr->cmdp[5] = (unsigned char) (lba & 0xff);
243 hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
244 hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
245 }
246
247 return 0;
248}
249
250static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
251{
252 struct sg_io_hdr *hdr = &io_u->hdr;
253 int ret;
254
255 ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
256
257 if (ret < 0)
258 io_u->error = errno;
259 else if (hdr->status) {
260 io_u->resid = hdr->resid;
261 io_u->error = EIO;
262 }
263
264 return io_u->error;
265}
266
267static struct io_u *fio_sgio_event(struct thread_data *td, int event)
268{
269 struct sgio_data *sd = td->io_ops->data;
270
271 return sd->events[event];
272}
273
274static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
275{
276 struct sgio_data *sd = td->io_ops->data;
277 struct io_u *io_u;
278 struct sg_io_hdr *hdr;
279 unsigned char buf[8];
280 int ret;
281
282 io_u = __get_io_u(td);
283 io_u->file = &td->files[0];
284 assert(io_u);
285
286 hdr = &io_u->hdr;
287 sgio_hdr_init(sd, hdr, io_u, 0);
288 memset(buf, 0, sizeof(buf));
289
290 hdr->cmdp[0] = 0x25;
291 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
292 hdr->dxferp = buf;
293 hdr->dxfer_len = sizeof(buf);
294
295 ret = fio_sgio_doio(td, io_u, 1);
296 if (ret) {
297 put_io_u(td, io_u);
298 return ret;
299 }
300
301 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
302 put_io_u(td, io_u);
303 return 0;
304}
305
306static void fio_sgio_cleanup(struct thread_data *td)
307{
308 struct sgio_data *sd = td->io_ops->data;
309
310 if (sd) {
311 free(sd->events);
312 free(sd->cmds);
313 free(sd->fd_flags);
314 free(sd->pfds);
315 free(sd->sgbuf);
316 free(sd);
317
318 td->io_ops->data = NULL;
319 }
320}
321
322static int fio_sgio_init(struct thread_data *td)
323{
324 struct fio_file *f = &td->files[0];
325 struct sgio_data *sd;
326 unsigned int bs;
327 int ret;
328
329 sd = malloc(sizeof(*sd));
330 memset(sd, 0, sizeof(*sd));
331 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
332 memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
333 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
334 memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
335 sd->pfds = malloc(sizeof(struct pollfd) * td->nr_files);
336 memset(sd->pfds, 0, sizeof(struct pollfd) * td->nr_files);
337 sd->fd_flags = malloc(sizeof(int) * td->nr_files);
338 memset(sd->fd_flags, 0, sizeof(int) * td->nr_files);
339 sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->iodepth);
340 memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->iodepth);
341
342 td->io_ops->data = sd;
343
344 if (td->filetype == FIO_TYPE_BD) {
345 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
346 td_verror(td, errno);
347 goto err;
348 }
349 } else if (td->filetype == FIO_TYPE_CHAR) {
350 int version;
351
352 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
353 td_verror(td, errno);
354 goto err;
355 }
356
357 ret = fio_sgio_get_bs(td, &bs);
358 if (ret)
359 goto err;
360 } else {
361 log_err("ioengine sgio only works on block devices\n");
362 goto err;
363 }
364
365 sd->bs = bs;
366
367 if (td->filetype == FIO_TYPE_BD)
368 td->io_ops->getevents = fio_sgio_ioctl_getevents;
369 else
370 td->io_ops->getevents = fio_sgio_getevents;
371
372 /*
373 * we want to do it, regardless of whether odirect is set or not
374 */
375 td->override_sync = 1;
376 return 0;
377err:
378 free(sd->events);
379 free(sd->cmds);
380 free(sd->fd_flags);
381 free(sd->pfds);
382 free(sd->sgbuf);
383 free(sd);
384 td->io_ops->data = NULL;
385 return 1;
386}
387
388static struct ioengine_ops ioengine = {
389 .name = "sg",
390 .version = FIO_IOOPS_VERSION,
391 .init = fio_sgio_init,
392 .prep = fio_sgio_prep,
393 .queue = fio_sgio_queue,
394 .getevents = fio_sgio_getevents,
395 .event = fio_sgio_event,
396 .cleanup = fio_sgio_cleanup,
397 .flags = FIO_SYNCIO | FIO_RAWIO,
398};
399
400#else /* FIO_HAVE_SGIO */
401
402/*
403 * When we have a proper configure system in place, we simply wont build
404 * and install this io engine. For now install a crippled version that
405 * just complains and fails to load.
406 */
407static int fio_sgio_init(struct thread_data fio_unused *td)
408{
409 fprintf(stderr, "fio: sgio not available\n");
410 return 1;
411}
412
413static struct ioengine_ops ioengine = {
414 .name = "sgio",
415 .version = FIO_IOOPS_VERSION,
416 .init = fio_sgio_init,
417};
418
419#endif
420
421static void fio_init fio_sgio_register(void)
422{
423 register_ioengine(&ioengine);
424}
425
426static void fio_exit fio_sgio_unregister(void)
427{
428 unregister_ioengine(&ioengine);
429}