binject: correctly retrieve block size of device
[fio.git] / engines / binject.c
CommitLineData
79a43187
JA
1/*
2 * binject engine
3 *
4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <string.h>
14#include <sys/poll.h>
15
16#include "../fio.h"
17
18#ifdef FIO_HAVE_BINJECT
19
20struct binject_data {
21 struct b_user_cmd *cmds;
22 struct io_u **events;
23 struct pollfd *pfds;
24 int *fd_flags;
79a43187
JA
25};
26
27static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
28{
29 struct b_user_cmd *buc = &io_u->buc;
30
31 memset(buc, 0, sizeof(*buc));
32 binject_buc_set_magic(buc);
33
34 buc->buf = (unsigned long) io_u->xfer_buf;
35 buc->len = io_u->xfer_buflen;
36 buc->offset = io_u->offset;
37 buc->usr_ptr = (unsigned long) io_u;
38
39 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
40 assert(buc->buf);
41}
42
43static int pollin_events(struct pollfd *pfds, int fds)
44{
45 int i;
46
47 for (i = 0; i < fds; i++)
48 if (pfds[i].revents & POLLIN)
49 return 1;
50
51 return 0;
52}
53
54static int fio_binject_getevents(struct thread_data *td, unsigned int min,
55 unsigned int max, struct timespec fio_unused *t)
56{
57 struct binject_data *bd = td->io_ops->data;
58 int left = max, ret, r = 0, ev_index = 0;
59 void *buf = bd->cmds;
60 unsigned int i, events;
61 struct fio_file *f;
62
63 /*
64 * Fill in the file descriptors
65 */
66 for_each_file(td, f, i) {
67 /*
68 * don't block for min events == 0
69 */
70 if (!min) {
71 bd->fd_flags[i] = fcntl(f->fd, F_GETFL);
72 fcntl(f->fd, F_SETFL, bd->fd_flags[i] | O_NONBLOCK);
73 }
74 bd->pfds[i].fd = f->fd;
75 bd->pfds[i].events = POLLIN;
76 }
77
78 while (left) {
79 void *p;
80
81 do {
82 if (!min)
83 break;
84
85 ret = poll(bd->pfds, td->o.nr_files, -1);
86 if (ret < 0) {
87 if (!r)
88 r = -errno;
89 td_verror(td, errno, "poll");
90 break;
91 } else if (!ret)
92 continue;
93
94 if (pollin_events(bd->pfds, td->o.nr_files))
95 break;
96 } while (1);
97
98 if (r < 0)
99 break;
100
101re_read:
102 p = buf;
103 events = 0;
104 for_each_file(td, f, i) {
105 ret = read(f->fd, p, left * sizeof(struct b_user_cmd));
106 if (ret < 0) {
107 if (errno == EAGAIN)
108 continue;
109 r = -errno;
110 td_verror(td, errno, "read");
111 break;
112 } else if (ret) {
113 p += ret;
114 events += ret / sizeof(struct b_user_cmd);
115 }
116 }
117
118 if (r < 0)
119 break;
120 if (!events) {
121 usleep(1000);
122 goto re_read;
123 }
124
125 left -= events;
126 r += events;
127
128 for (i = 0; i < events; i++) {
129 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
130
131 bd->events[ev_index] = (struct io_u *) buc->usr_ptr;
132 ev_index++;
133 }
134 }
135
136 if (!min) {
137 for_each_file(td, f, i)
138 fcntl(f->fd, F_SETFL, bd->fd_flags[i]);
139 }
140
141 if (r > 0)
142 assert(ev_index == r);
143
144 return r;
145}
146
147static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
148{
149 struct b_user_cmd *buc = &io_u->buc;
150 struct fio_file *f = io_u->file;
151 int ret;
152
153 ret = write(f->fd, buc, sizeof(*buc));
154 if (ret < 0)
155 return ret;
156
157 return FIO_Q_QUEUED;
158}
159
160static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
161{
162 struct binject_data *bd = td->io_ops->data;
4a435dac 163 unsigned int bs = io_u->file->file_data;
79a43187
JA
164 struct b_user_cmd *buc = &io_u->buc;
165
4a435dac 166 if (io_u->xfer_buflen & (bs - 1)) {
79a43187
JA
167 log_err("read/write not sector aligned\n");
168 return EINVAL;
169 }
170
171 if (io_u->ddir == DDIR_READ) {
172 binject_buc_init(bd, io_u);
173 buc->type = B_TYPE_READ;
174 } else if (io_u->ddir == DDIR_WRITE) {
175 binject_buc_init(bd, io_u);
3410599a 176 buc->type = B_TYPE_WRITE;
79a43187
JA
177 } else if (io_u->ddir == DDIR_TRIM) {
178 binject_buc_init(bd, io_u);
179 buc->type = B_TYPE_DISCARD;
180 } else {
181 assert(0);
182 }
183
184 return 0;
185}
186
187static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
188{
189 int ret;
190
191 fio_ro_check(td, io_u);
192
193 ret = fio_binject_doio(td, io_u);
194
195 if (ret < 0)
196 io_u->error = errno;
197
198 if (io_u->error) {
199 td_verror(td, io_u->error, "xfer");
200 return FIO_Q_COMPLETED;
201 }
202
203 return ret;
204}
205
206static struct io_u *fio_binject_event(struct thread_data *td, int event)
207{
208 struct binject_data *bd = td->io_ops->data;
209
210 return bd->events[event];
211}
212
4a435dac
JA
213static int fio_binject_open_file(struct thread_data *td, struct fio_file *f)
214{
215 unsigned int bs;
216 int ret;
217
218 ret = generic_open_file(td, f);
219 if (ret)
220 return 1;
221
222 if (f->filetype != FIO_TYPE_BD) {
223 log_err("fio: binject only works with block devices\n");
224 return 1;
225 }
226 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
227 td_verror(td, errno, "BLKSSZGET");
228 return 1;
229 }
230
231 f->file_data = bs;
232 return 0;
233}
234
79a43187
JA
235static void fio_binject_cleanup(struct thread_data *td)
236{
237 struct binject_data *bd = td->io_ops->data;
238
239 if (bd) {
240 free(bd->events);
241 free(bd->cmds);
242 free(bd->fd_flags);
243 free(bd->pfds);
244 free(bd);
245 }
246}
247
248static int fio_binject_init(struct thread_data *td)
249{
250 struct binject_data *bd;
251
252 bd = malloc(sizeof(*bd));
253 memset(bd, 0, sizeof(*bd));
254
255 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
256 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
257
258 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
259 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
260
261 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
262 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
263
264 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
265 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
266
267 td->io_ops->data = bd;
268 return 0;
269}
270
271static struct ioengine_ops ioengine = {
272 .name = "binject",
273 .version = FIO_IOOPS_VERSION,
274 .init = fio_binject_init,
275 .prep = fio_binject_prep,
276 .queue = fio_binject_queue,
277 .getevents = fio_binject_getevents,
278 .event = fio_binject_event,
279 .cleanup = fio_binject_cleanup,
4a435dac 280 .open_file = fio_binject_open_file,
79a43187
JA
281 .close_file = generic_close_file,
282 .get_file_size = generic_get_file_size,
283 .flags = FIO_RAWIO,
284};
285
286#else /* FIO_HAVE_BINJECT */
287
288/*
289 * When we have a proper configure system in place, we simply wont build
290 * and install this io engine. For now install a crippled version that
291 * just complains and fails to load.
292 */
293static int fio_binject_init(struct thread_data fio_unused *td)
294{
295 fprintf(stderr, "fio: ioengine binject not available\n");
296 return 1;
297}
298
299static struct ioengine_ops ioengine = {
300 .name = "binject",
301 .version = FIO_IOOPS_VERSION,
302 .init = fio_binject_init,
303};
304
305#endif
306
307static void fio_init fio_binject_register(void)
308{
309 register_ioengine(&ioengine);
310}
311
312static void fio_exit fio_binject_unregister(void)
313{
314 unregister_ioengine(&ioengine);
315}