Commit | Line | Data |
---|---|---|
79a43187 JA |
1 | /* |
2 | * binject engine | |
3 | * | |
4 | * IO engine that uses the Linux binject interface to directly inject | |
5 | * bio's to block devices. | |
6 | * | |
7 | */ | |
8 | #include <stdio.h> | |
9 | #include <stdlib.h> | |
10 | #include <unistd.h> | |
11 | #include <errno.h> | |
12 | #include <assert.h> | |
13 | #include <string.h> | |
14 | #include <sys/poll.h> | |
15 | ||
16 | #include "../fio.h" | |
17 | ||
18 | #ifdef FIO_HAVE_BINJECT | |
19 | ||
20 | struct binject_data { | |
21 | struct b_user_cmd *cmds; | |
22 | struct io_u **events; | |
23 | struct pollfd *pfds; | |
24 | int *fd_flags; | |
25 | unsigned int bs; | |
26 | }; | |
27 | ||
28 | static void binject_buc_init(struct binject_data *bd, struct io_u *io_u) | |
29 | { | |
30 | struct b_user_cmd *buc = &io_u->buc; | |
31 | ||
32 | memset(buc, 0, sizeof(*buc)); | |
33 | binject_buc_set_magic(buc); | |
34 | ||
35 | buc->buf = (unsigned long) io_u->xfer_buf; | |
36 | buc->len = io_u->xfer_buflen; | |
37 | buc->offset = io_u->offset; | |
38 | buc->usr_ptr = (unsigned long) io_u; | |
39 | ||
40 | buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG; | |
41 | assert(buc->buf); | |
42 | } | |
43 | ||
44 | static int pollin_events(struct pollfd *pfds, int fds) | |
45 | { | |
46 | int i; | |
47 | ||
48 | for (i = 0; i < fds; i++) | |
49 | if (pfds[i].revents & POLLIN) | |
50 | return 1; | |
51 | ||
52 | return 0; | |
53 | } | |
54 | ||
55 | static int fio_binject_getevents(struct thread_data *td, unsigned int min, | |
56 | unsigned int max, struct timespec fio_unused *t) | |
57 | { | |
58 | struct binject_data *bd = td->io_ops->data; | |
59 | int left = max, ret, r = 0, ev_index = 0; | |
60 | void *buf = bd->cmds; | |
61 | unsigned int i, events; | |
62 | struct fio_file *f; | |
63 | ||
64 | /* | |
65 | * Fill in the file descriptors | |
66 | */ | |
67 | for_each_file(td, f, i) { | |
68 | /* | |
69 | * don't block for min events == 0 | |
70 | */ | |
71 | if (!min) { | |
72 | bd->fd_flags[i] = fcntl(f->fd, F_GETFL); | |
73 | fcntl(f->fd, F_SETFL, bd->fd_flags[i] | O_NONBLOCK); | |
74 | } | |
75 | bd->pfds[i].fd = f->fd; | |
76 | bd->pfds[i].events = POLLIN; | |
77 | } | |
78 | ||
79 | while (left) { | |
80 | void *p; | |
81 | ||
82 | do { | |
83 | if (!min) | |
84 | break; | |
85 | ||
86 | ret = poll(bd->pfds, td->o.nr_files, -1); | |
87 | if (ret < 0) { | |
88 | if (!r) | |
89 | r = -errno; | |
90 | td_verror(td, errno, "poll"); | |
91 | break; | |
92 | } else if (!ret) | |
93 | continue; | |
94 | ||
95 | if (pollin_events(bd->pfds, td->o.nr_files)) | |
96 | break; | |
97 | } while (1); | |
98 | ||
99 | if (r < 0) | |
100 | break; | |
101 | ||
102 | re_read: | |
103 | p = buf; | |
104 | events = 0; | |
105 | for_each_file(td, f, i) { | |
106 | ret = read(f->fd, p, left * sizeof(struct b_user_cmd)); | |
107 | if (ret < 0) { | |
108 | if (errno == EAGAIN) | |
109 | continue; | |
110 | r = -errno; | |
111 | td_verror(td, errno, "read"); | |
112 | break; | |
113 | } else if (ret) { | |
114 | p += ret; | |
115 | events += ret / sizeof(struct b_user_cmd); | |
116 | } | |
117 | } | |
118 | ||
119 | if (r < 0) | |
120 | break; | |
121 | if (!events) { | |
122 | usleep(1000); | |
123 | goto re_read; | |
124 | } | |
125 | ||
126 | left -= events; | |
127 | r += events; | |
128 | ||
129 | for (i = 0; i < events; i++) { | |
130 | struct b_user_cmd *buc = (struct b_user_cmd *) buf + i; | |
131 | ||
132 | bd->events[ev_index] = (struct io_u *) buc->usr_ptr; | |
133 | ev_index++; | |
134 | } | |
135 | } | |
136 | ||
137 | if (!min) { | |
138 | for_each_file(td, f, i) | |
139 | fcntl(f->fd, F_SETFL, bd->fd_flags[i]); | |
140 | } | |
141 | ||
142 | if (r > 0) | |
143 | assert(ev_index == r); | |
144 | ||
145 | return r; | |
146 | } | |
147 | ||
148 | static int fio_binject_doio(struct thread_data *td, struct io_u *io_u) | |
149 | { | |
150 | struct b_user_cmd *buc = &io_u->buc; | |
151 | struct fio_file *f = io_u->file; | |
152 | int ret; | |
153 | ||
154 | ret = write(f->fd, buc, sizeof(*buc)); | |
155 | if (ret < 0) | |
156 | return ret; | |
157 | ||
158 | return FIO_Q_QUEUED; | |
159 | } | |
160 | ||
161 | static int fio_binject_prep(struct thread_data *td, struct io_u *io_u) | |
162 | { | |
163 | struct binject_data *bd = td->io_ops->data; | |
164 | struct b_user_cmd *buc = &io_u->buc; | |
165 | ||
166 | bd->bs = 512; | |
167 | ||
168 | if (io_u->xfer_buflen & (bd->bs - 1)) { | |
169 | log_err("read/write not sector aligned\n"); | |
170 | return EINVAL; | |
171 | } | |
172 | ||
173 | if (io_u->ddir == DDIR_READ) { | |
174 | binject_buc_init(bd, io_u); | |
175 | buc->type = B_TYPE_READ; | |
176 | } else if (io_u->ddir == DDIR_WRITE) { | |
177 | binject_buc_init(bd, io_u); | |
178 | buc->type = B_TYPE_WRITEZERO; | |
179 | } else if (io_u->ddir == DDIR_TRIM) { | |
180 | binject_buc_init(bd, io_u); | |
181 | buc->type = B_TYPE_DISCARD; | |
182 | } else { | |
183 | assert(0); | |
184 | } | |
185 | ||
186 | return 0; | |
187 | } | |
188 | ||
189 | static int fio_binject_queue(struct thread_data *td, struct io_u *io_u) | |
190 | { | |
191 | int ret; | |
192 | ||
193 | fio_ro_check(td, io_u); | |
194 | ||
195 | ret = fio_binject_doio(td, io_u); | |
196 | ||
197 | if (ret < 0) | |
198 | io_u->error = errno; | |
199 | ||
200 | if (io_u->error) { | |
201 | td_verror(td, io_u->error, "xfer"); | |
202 | return FIO_Q_COMPLETED; | |
203 | } | |
204 | ||
205 | return ret; | |
206 | } | |
207 | ||
208 | static struct io_u *fio_binject_event(struct thread_data *td, int event) | |
209 | { | |
210 | struct binject_data *bd = td->io_ops->data; | |
211 | ||
212 | return bd->events[event]; | |
213 | } | |
214 | ||
215 | static void fio_binject_cleanup(struct thread_data *td) | |
216 | { | |
217 | struct binject_data *bd = td->io_ops->data; | |
218 | ||
219 | if (bd) { | |
220 | free(bd->events); | |
221 | free(bd->cmds); | |
222 | free(bd->fd_flags); | |
223 | free(bd->pfds); | |
224 | free(bd); | |
225 | } | |
226 | } | |
227 | ||
228 | static int fio_binject_init(struct thread_data *td) | |
229 | { | |
230 | struct binject_data *bd; | |
231 | ||
232 | bd = malloc(sizeof(*bd)); | |
233 | memset(bd, 0, sizeof(*bd)); | |
234 | ||
235 | bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd)); | |
236 | memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd)); | |
237 | ||
238 | bd->events = malloc(td->o.iodepth * sizeof(struct io_u *)); | |
239 | memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *)); | |
240 | ||
241 | bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files); | |
242 | memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files); | |
243 | ||
244 | bd->fd_flags = malloc(sizeof(int) * td->o.nr_files); | |
245 | memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files); | |
246 | ||
247 | td->io_ops->data = bd; | |
248 | return 0; | |
249 | } | |
250 | ||
251 | static struct ioengine_ops ioengine = { | |
252 | .name = "binject", | |
253 | .version = FIO_IOOPS_VERSION, | |
254 | .init = fio_binject_init, | |
255 | .prep = fio_binject_prep, | |
256 | .queue = fio_binject_queue, | |
257 | .getevents = fio_binject_getevents, | |
258 | .event = fio_binject_event, | |
259 | .cleanup = fio_binject_cleanup, | |
260 | .open_file = generic_open_file, | |
261 | .close_file = generic_close_file, | |
262 | .get_file_size = generic_get_file_size, | |
263 | .flags = FIO_RAWIO, | |
264 | }; | |
265 | ||
266 | #else /* FIO_HAVE_BINJECT */ | |
267 | ||
268 | /* | |
269 | * When we have a proper configure system in place, we simply wont build | |
270 | * and install this io engine. For now install a crippled version that | |
271 | * just complains and fails to load. | |
272 | */ | |
273 | static int fio_binject_init(struct thread_data fio_unused *td) | |
274 | { | |
275 | fprintf(stderr, "fio: ioengine binject not available\n"); | |
276 | return 1; | |
277 | } | |
278 | ||
279 | static struct ioengine_ops ioengine = { | |
280 | .name = "binject", | |
281 | .version = FIO_IOOPS_VERSION, | |
282 | .init = fio_binject_init, | |
283 | }; | |
284 | ||
285 | #endif | |
286 | ||
287 | static void fio_init fio_binject_register(void) | |
288 | { | |
289 | register_ioengine(&ioengine); | |
290 | } | |
291 | ||
292 | static void fio_exit fio_binject_unregister(void) | |
293 | { | |
294 | unregister_ioengine(&ioengine); | |
295 | } |