[PATCH] engines/Makefile had extra LIBS line
[fio.git] / engines / fio-engine-sg.c
CommitLineData
2866c82d
JA
1/*
2 * scsi generic sg v3 io engine
3 *
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <assert.h>
10#include <sys/poll.h>
11#include "fio.h"
12#include "os.h"
13
14struct sgio_cmd {
15 unsigned char cdb[10];
16 int nr;
17};
18
19struct sgio_data {
20 struct sgio_cmd *cmds;
21 struct io_u **events;
22 unsigned int bs;
23};
24
25static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
26 struct io_u *io_u, int fs)
27{
28 struct sgio_cmd *sc = &sd->cmds[io_u->index];
29
30 memset(hdr, 0, sizeof(*hdr));
31 memset(sc->cdb, 0, sizeof(sc->cdb));
32
33 hdr->interface_id = 'S';
34 hdr->cmdp = sc->cdb;
35 hdr->cmd_len = sizeof(sc->cdb);
36 hdr->pack_id = io_u->index;
37 hdr->usr_ptr = io_u;
38
39 if (fs) {
40 hdr->dxferp = io_u->buf;
41 hdr->dxfer_len = io_u->buflen;
42 }
43}
44
45static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
46 int max, struct timespec fio_unused *t)
47{
48 assert(max <= 1);
49
50 /*
51 * we can only have one finished io_u for sync io, since the depth
52 * is always 1
53 */
54 if (list_empty(&td->io_u_busylist))
55 return 0;
56
57 return 1;
58}
59
60
61static int fio_sgio_getevents(struct thread_data *td, int min, int max,
62 struct timespec fio_unused *t)
63{
64 struct sgio_data *sd = td->io_ops->data;
65 struct pollfd pfd = { .fd = td->fd, .events = POLLIN };
66 void *buf = malloc(max * sizeof(struct sg_io_hdr));
67 int left = max, ret, events, i, r = 0, fl = 0;
68
69 /*
70 * don't block for !events
71 */
72 if (!min) {
73 fl = fcntl(td->fd, F_GETFL);
74 fcntl(td->fd, F_SETFL, fl | O_NONBLOCK);
75 }
76
77 while (left) {
78 do {
79 if (!min)
80 break;
81 poll(&pfd, 1, -1);
82 if (pfd.revents & POLLIN)
83 break;
84 } while (1);
85
86 ret = read(td->fd, buf, left * sizeof(struct sg_io_hdr));
87 if (ret < 0) {
88 if (errno == EAGAIN)
89 break;
90 td_verror(td, errno);
91 r = -1;
92 break;
93 } else if (!ret)
94 break;
95
96 events = ret / sizeof(struct sg_io_hdr);
97 left -= events;
98 r += events;
99
100 for (i = 0; i < events; i++) {
101 struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
102
103 sd->events[i] = hdr->usr_ptr;
104 }
105 }
106
107 if (!min)
108 fcntl(td->fd, F_SETFL, fl);
109
110 free(buf);
111 return r;
112}
113
114static int fio_sgio_ioctl_doio(struct thread_data *td, struct io_u *io_u)
115{
116 struct sgio_data *sd = td->io_ops->data;
117 struct sg_io_hdr *hdr = &io_u->hdr;
118
119 sd->events[0] = io_u;
120
121 return ioctl(td->fd, SG_IO, hdr);
122}
123
124static int fio_sgio_rw_doio(struct thread_data *td, struct io_u *io_u, int sync)
125{
126 struct sg_io_hdr *hdr = &io_u->hdr;
127 int ret;
128
129 ret = write(td->fd, hdr, sizeof(*hdr));
130 if (ret < 0)
131 return errno;
132
133 if (sync) {
134 ret = read(td->fd, hdr, sizeof(*hdr));
135 if (ret < 0)
136 return errno;
137 }
138
139 return 0;
140}
141
142static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
143{
144 if (td->filetype == FIO_TYPE_BD)
145 return fio_sgio_ioctl_doio(td, io_u);
146
147 return fio_sgio_rw_doio(td, io_u, sync);
148}
149
150static int fio_sgio_sync(struct thread_data *td)
151{
152 struct sgio_data *sd = td->io_ops->data;
153 struct sg_io_hdr *hdr;
154 struct io_u *io_u;
155 int ret;
156
157 io_u = __get_io_u(td);
158 if (!io_u)
159 return ENOMEM;
160
161 hdr = &io_u->hdr;
162 sgio_hdr_init(sd, hdr, io_u, 0);
163 hdr->dxfer_direction = SG_DXFER_NONE;
164
165 hdr->cmdp[0] = 0x35;
166
167 ret = fio_sgio_doio(td, io_u, 1);
168 put_io_u(td, io_u);
169 return ret;
170}
171
172static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
173{
174 struct sg_io_hdr *hdr = &io_u->hdr;
175 struct sgio_data *sd = td->io_ops->data;
176 int nr_blocks, lba;
177
178 if (io_u->buflen & (sd->bs - 1)) {
179 log_err("read/write not sector aligned\n");
180 return EINVAL;
181 }
182
183 sgio_hdr_init(sd, hdr, io_u, 1);
184
185 if (io_u->ddir == DDIR_READ) {
186 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
187 hdr->cmdp[0] = 0x28;
188 } else {
189 hdr->dxfer_direction = SG_DXFER_TO_DEV;
190 hdr->cmdp[0] = 0x2a;
191 }
192
193 nr_blocks = io_u->buflen / sd->bs;
194 lba = io_u->offset / sd->bs;
195 hdr->cmdp[2] = (lba >> 24) & 0xff;
196 hdr->cmdp[3] = (lba >> 16) & 0xff;
197 hdr->cmdp[4] = (lba >> 8) & 0xff;
198 hdr->cmdp[5] = lba & 0xff;
199 hdr->cmdp[7] = (nr_blocks >> 8) & 0xff;
200 hdr->cmdp[8] = nr_blocks & 0xff;
201 return 0;
202}
203
204static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
205{
206 struct sg_io_hdr *hdr = &io_u->hdr;
207 int ret;
208
209 ret = fio_sgio_doio(td, io_u, 0);
210
211 if (ret < 0)
212 io_u->error = errno;
213 else if (hdr->status) {
214 io_u->resid = hdr->resid;
215 io_u->error = EIO;
216 }
217
218 return io_u->error;
219}
220
221static struct io_u *fio_sgio_event(struct thread_data *td, int event)
222{
223 struct sgio_data *sd = td->io_ops->data;
224
225 return sd->events[event];
226}
227
228static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
229{
230 struct sgio_data *sd = td->io_ops->data;
231 struct io_u *io_u;
232 struct sg_io_hdr *hdr;
233 unsigned char buf[8];
234 int ret;
235
236 io_u = __get_io_u(td);
237 assert(io_u);
238
239 hdr = &io_u->hdr;
240 sgio_hdr_init(sd, hdr, io_u, 0);
241 memset(buf, 0, sizeof(buf));
242
243 hdr->cmdp[0] = 0x25;
244 hdr->dxfer_direction = SG_DXFER_FROM_DEV;
245 hdr->dxferp = buf;
246 hdr->dxfer_len = sizeof(buf);
247
248 ret = fio_sgio_doio(td, io_u, 1);
249 if (ret) {
250 put_io_u(td, io_u);
251 return ret;
252 }
253
254 *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
255 put_io_u(td, io_u);
256 return 0;
257}
258
259static void fio_sgio_cleanup(struct thread_data *td)
260{
261 if (td->io_ops->data) {
262 free(td->io_ops->data);
263 td->io_ops->data = NULL;
264 }
265}
266
267static int fio_sgio_init(struct thread_data *td)
268{
269 struct sgio_data *sd;
270 unsigned int bs;
271 int ret;
272
273 sd = malloc(sizeof(*sd));
274 sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
275 sd->events = malloc(td->iodepth * sizeof(struct io_u *));
276 td->io_ops->data = sd;
277
278 if (td->filetype == FIO_TYPE_BD) {
279 if (ioctl(td->fd, BLKSSZGET, &bs) < 0) {
280 td_verror(td, errno);
281 return 1;
282 }
283 } else if (td->filetype == FIO_TYPE_CHAR) {
284 int version;
285
286 if (ioctl(td->fd, SG_GET_VERSION_NUM, &version) < 0) {
287 td_verror(td, errno);
288 return 1;
289 }
290
291 ret = fio_sgio_get_bs(td, &bs);
292 if (ret)
293 return ret;
294 } else {
295 log_err("ioengine sgio only works on block devices\n");
296 return 1;
297 }
298
299 sd->bs = bs;
300
301 if (td->filetype == FIO_TYPE_BD)
302 td->io_ops->getevents = fio_sgio_ioctl_getevents;
303 else
304 td->io_ops->getevents = fio_sgio_getevents;
305
306 /*
307 * we want to do it, regardless of whether odirect is set or not
308 */
309 td->override_sync = 1;
310 return 0;
311}
312
313struct ioengine_ops ioengine = {
314 .name = "sg",
315 .version = FIO_IOOPS_VERSION,
316 .init = fio_sgio_init,
317 .prep = fio_sgio_prep,
318 .queue = fio_sgio_queue,
319 .getevents = fio_sgio_getevents,
320 .event = fio_sgio_event,
321 .cleanup = fio_sgio_cleanup,
322 .sync = fio_sgio_sync,
323 .flags = FIO_SYNCIO,
324};