[PATCH Various fixes
[fio.git] / engines / fio-engine-sg.c
... / ...
CommitLineData
1/*
2 * scsi generic sg v3 io engine
3 *
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <assert.h>
10#include <sys/poll.h>
11#include "fio.h"
12#include "os.h"
13
14struct sgio_cmd {
15 unsigned char cdb[10];
16 int nr;
17};
18
19struct sgio_data {
20 struct sgio_cmd *cmds;
21 struct io_u **events;
22 unsigned int bs;
23};
24
25static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
26 struct io_u *io_u, int fs)
27{
28 struct sgio_cmd *sc = &sd->cmds[io_u->index];
29
30 memset(hdr, 0, sizeof(*hdr));
31 memset(sc->cdb, 0, sizeof(sc->cdb));
32
33 hdr->interface_id = 'S';
34 hdr->cmdp = sc->cdb;
35 hdr->cmd_len = sizeof(sc->cdb);
36 hdr->pack_id = io_u->index;
37 hdr->usr_ptr = io_u;
38
39 if (fs) {
40 hdr->dxferp = io_u->buf;
41 hdr->dxfer_len = io_u->buflen;
42 }
43}
44
45static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
46 int max, struct timespec fio_unused *t)
47{
48 assert(max <= 1);
49
50 /*
51 * we can only have one finished io_u for sync io, since the depth
52 * is always 1
53 */
54 if (list_empty(&td->io_u_busylist))
55 return 0;
56
57 return 1;
58}
59
60
61static int fio_sgio_getevents(struct thread_data *td, int min, int max,
62 struct timespec fio_unused *t)
63{
64 struct fio_file *f = &td->files[0];
65 struct sgio_data *sd = td->io_ops->data;
66 struct pollfd pfd = { .fd = f->fd, .events = POLLIN };
67 void *buf = malloc(max * sizeof(struct sg_io_hdr));
68 int left = max, ret, events, i, r = 0, fl = 0;
69
70 /*
71 * don't block for !events
72 */
73 if (!min) {
74 fl = fcntl(f->fd, F_GETFL);
75 fcntl(f->fd, F_SETFL, fl | O_NONBLOCK);
76 }
77
78 while (left) {
79 do {
80 if (!min)
81 break;
82 poll(&pfd, 1, -1);
83 if (pfd.revents & POLLIN)
84 break;
85 } while (1);
86
87 ret = read(f->fd, buf, left * sizeof(struct sg_io_hdr));
88 if (ret < 0) {
89 if (errno == EAGAIN)
90 break;
91 td_verror(td, errno);
92 r = -1;
93 break;
94 } else if (!ret)
95 break;
96
97 events = ret / sizeof(struct sg_io_hdr);
98 left -= events;
99 r += events;
100
101 for (i = 0; i < events; i++) {
102 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
103
104 sd->events[i] = hdr->usr_ptr;
105 }
106 }
107
108 if (!min)
109 fcntl(f->fd, F_SETFL, fl);
110
111 free(buf);
112 return r;
113}
114
115static int fio_sgio_ioctl_doio(struct thread_data *td, struct fio_file *f,
116 struct io_u *io_u)
117{
118 struct sgio_data *sd = td->io_ops->data;
119 struct sg_io_hdr *hdr = &io_u->hdr;
120
121 sd->events[0] = io_u;
122
123 return ioctl(f->fd, SG_IO, hdr);
124}
125
126static int fio_sgio_rw_doio(struct thread_data *td, struct fio_file *f,
127 struct io_u *io_u, int sync)
128{
129 struct sg_io_hdr *hdr = &io_u->hdr;
130 int ret;
131
132 ret = write(f->fd, hdr, sizeof(*hdr));
133 if (ret < 0)
134 return errno;
135
136 if (sync) {
137 ret = read(f->fd, hdr, sizeof(*hdr));
138 if (ret < 0)
139 return errno;
140 }
141
142 return 0;
143}
144
145static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
146{
147 struct fio_file *f = io_u->file;
148
149 if (td->filetype == FIO_TYPE_BD)
150 return fio_sgio_ioctl_doio(td, f, io_u);
151
152 return fio_sgio_rw_doio(td, f, io_u, sync);
153}
154
155static int fio_sgio_sync(struct thread_data *td, struct fio_file *f)
156{
157 struct sgio_data *sd = td->io_ops->data;
158 struct sg_io_hdr *hdr;
159 struct io_u *io_u;
160 int ret;
161
162 io_u = __get_io_u(td);
163 if (!io_u)
164 return ENOMEM;
165
166 hdr = &io_u->hdr;
167 sgio_hdr_init(sd, hdr, io_u, 0);
168 hdr->dxfer_direction = SG_DXFER_NONE;
169
170 hdr->cmdp[0] = 0x35;
171
172 ret = fio_sgio_doio(td, io_u, 1);
173 put_io_u(td, io_u);
174 return ret;
175}
176
177static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
178{
179 struct sg_io_hdr *hdr = &io_u->hdr;
180 struct sgio_data *sd = td->io_ops->data;
181 int nr_blocks, lba;
182
183 if (io_u->buflen & (sd->bs - 1)) {
184 log_err("read/write not sector aligned\n");
185 return EINVAL;
186 }
187
188 sgio_hdr_init(sd, hdr, io_u, 1);
189
190 if (io_u->ddir == DDIR_READ) {
191 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
192 hdr->cmdp[0] = 0x28;
193 } else {
194 hdr->dxfer_direction = SG_DXFER_TO_DEV;
195 hdr->cmdp[0] = 0x2a;
196 }
197
198 nr_blocks = io_u->buflen / sd->bs;
199 lba = io_u->offset / sd->bs;
200 hdr->cmdp[2] = (lba >> 24) & 0xff;
201 hdr->cmdp[3] = (lba >> 16) & 0xff;
202 hdr->cmdp[4] = (lba >> 8) & 0xff;
203 hdr->cmdp[5] = lba & 0xff;
204 hdr->cmdp[7] = (nr_blocks >> 8) & 0xff;
205 hdr->cmdp[8] = nr_blocks & 0xff;
206 return 0;
207}
208
209static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
210{
211 struct sg_io_hdr *hdr = &io_u->hdr;
212 int ret;
213
214 ret = fio_sgio_doio(td, io_u, 0);
215
216 if (ret < 0)
217 io_u->error = errno;
218 else if (hdr->status) {
219 io_u->resid = hdr->resid;
220 io_u->error = EIO;
221 }
222
223 return io_u->error;
224}
225
226static struct io_u *fio_sgio_event(struct thread_data *td, int event)
227{
228 struct sgio_data *sd = td->io_ops->data;
229
230 return sd->events[event];
231}
232
233static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
234{
235 struct sgio_data *sd = td->io_ops->data;
236 struct io_u *io_u;
237 struct sg_io_hdr *hdr;
238 unsigned char buf[8];
239 int ret;
240
241 io_u = __get_io_u(td);
242 assert(io_u);
243
244 hdr = &io_u->hdr;
245 sgio_hdr_init(sd, hdr, io_u, 0);
246 memset(buf, 0, sizeof(buf));
247
248 hdr->cmdp[0] = 0x25;
249 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
250 hdr->dxferp = buf;
251 hdr->dxfer_len = sizeof(buf);
252
253 ret = fio_sgio_doio(td, io_u, 1);
254 if (ret) {
255 put_io_u(td, io_u);
256 return ret;
257 }
258
259 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
260 put_io_u(td, io_u);
261 return 0;
262}
263
264static void fio_sgio_cleanup(struct thread_data *td)
265{
266 if (td->io_ops->data) {
267 free(td->io_ops->data);
268 td->io_ops->data = NULL;
269 }
270}
271
272static int fio_sgio_init(struct thread_data *td)
273{
274 struct fio_file *f = &td->files[0];
275 struct sgio_data *sd;
276 unsigned int bs;
277 int ret;
278
279 sd = malloc(sizeof(*sd));
280 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
281 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
282 td->io_ops->data = sd;
283
284 if (td->filetype == FIO_TYPE_BD) {
285 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
286 td_verror(td, errno);
287 return 1;
288 }
289 } else if (td->filetype == FIO_TYPE_CHAR) {
290 int version;
291
292 if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
293 td_verror(td, errno);
294 return 1;
295 }
296
297 ret = fio_sgio_get_bs(td, &bs);
298 if (ret)
299 return ret;
300 } else {
301 log_err("ioengine sgio only works on block devices\n");
302 return 1;
303 }
304
305 sd->bs = bs;
306
307 if (td->filetype == FIO_TYPE_BD)
308 td->io_ops->getevents = fio_sgio_ioctl_getevents;
309 else
310 td->io_ops->getevents = fio_sgio_getevents;
311
312 /*
313 * we want to do it, regardless of whether odirect is set or not
314 */
315 td->override_sync = 1;
316 return 0;
317}
318
319struct ioengine_ops ioengine = {
320 .name = "sg",
321 .version = FIO_IOOPS_VERSION,
322 .init = fio_sgio_init,
323 .prep = fio_sgio_prep,
324 .queue = fio_sgio_queue,
325 .getevents = fio_sgio_getevents,
326 .event = fio_sgio_event,
327 .cleanup = fio_sgio_cleanup,
328 .sync = fio_sgio_sync,
329 .flags = FIO_SYNCIO | FIO_RAWIO,
330};