Fix refill buffers overwriting verify data
[fio.git] / ioengines.c
... / ...
CommitLineData
1/*
2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
4 *
5 * sync io is implemented on top of aio.
6 *
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
10 *
11 */
12#include <stdio.h>
13#include <stdlib.h>
14#include <unistd.h>
15#include <string.h>
16#include <dlfcn.h>
17#include <assert.h>
18
19#include "fio.h"
20
21static LIST_HEAD(engine_list);
22
23static int check_engine_ops(struct ioengine_ops *ops)
24{
25 if (ops->version != FIO_IOOPS_VERSION) {
26 log_err("bad ioops version %d (want %d)\n", ops->version,
27 FIO_IOOPS_VERSION);
28 return 1;
29 }
30
31 if (!ops->queue) {
32 log_err("%s: no queue handler\n", ops->name);
33 return 1;
34 }
35
36 /*
37 * sync engines only need a ->queue()
38 */
39 if (ops->flags & FIO_SYNCIO)
40 return 0;
41
42 if (!ops->event) {
43 log_err("%s: no event handler\n", ops->name);
44 return 1;
45 }
46 if (!ops->getevents) {
47 log_err("%s: no getevents handler\n", ops->name);
48 return 1;
49 }
50 if (!ops->queue) {
51 log_err("%s: no queue handler\n", ops->name);
52 return 1;
53 }
54
55 return 0;
56}
57
58void unregister_ioengine(struct ioengine_ops *ops)
59{
60 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
61 list_del(&ops->list);
62 INIT_LIST_HEAD(&ops->list);
63}
64
65void register_ioengine(struct ioengine_ops *ops)
66{
67 dprint(FD_IO, "ioengine %s registered\n", ops->name);
68 INIT_LIST_HEAD(&ops->list);
69 list_add_tail(&ops->list, &engine_list);
70}
71
72static struct ioengine_ops *find_ioengine(const char *name)
73{
74 struct ioengine_ops *ops;
75 struct list_head *entry;
76
77 list_for_each(entry, &engine_list) {
78 ops = list_entry(entry, struct ioengine_ops, list);
79 if (!strcmp(name, ops->name))
80 return ops;
81 }
82
83 return NULL;
84}
85
86static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
87 const char *engine_lib)
88{
89 struct ioengine_ops *ops;
90 void *dlhandle;
91
92 dprint(FD_IO, "dload engine %s\n", engine_lib);
93
94 dlerror();
95 dlhandle = dlopen(engine_lib, RTLD_LAZY);
96 if (!dlhandle) {
97 td_vmsg(td, -1, dlerror(), "dlopen");
98 return NULL;
99 }
100
101 /*
102 * Unlike the included modules, external engines should have a
103 * non-static ioengine structure that we can reference.
104 */
105 ops = dlsym(dlhandle, "ioengine");
106 if (!ops) {
107 td_vmsg(td, -1, dlerror(), "dlsym");
108 dlclose(dlhandle);
109 return NULL;
110 }
111
112 ops->dlhandle = dlhandle;
113 return ops;
114}
115
116struct ioengine_ops *load_ioengine(struct thread_data *td, const char *name)
117{
118 struct ioengine_ops *ops, *ret;
119 char engine[16];
120
121 dprint(FD_IO, "load ioengine %s\n", name);
122
123 strncpy(engine, name, sizeof(engine) - 1);
124
125 /*
126 * linux libaio has alias names, so convert to what we want
127 */
128 if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3))
129 strcpy(engine, "libaio");
130
131 ops = find_ioengine(engine);
132 if (!ops)
133 ops = dlopen_ioengine(td, name);
134
135 if (!ops) {
136 log_err("fio: engine %s not loadable\n", name);
137 return NULL;
138 }
139
140 /*
141 * Check that the required methods are there.
142 */
143 if (check_engine_ops(ops))
144 return NULL;
145
146 ret = malloc(sizeof(*ret));
147 memcpy(ret, ops, sizeof(*ret));
148 ret->data = NULL;
149
150 return ret;
151}
152
153void close_ioengine(struct thread_data *td)
154{
155 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
156
157 if (td->io_ops->cleanup)
158 td->io_ops->cleanup(td);
159
160 if (td->io_ops->dlhandle)
161 dlclose(td->io_ops->dlhandle);
162
163 free(td->io_ops);
164 td->io_ops = NULL;
165}
166
167int td_io_prep(struct thread_data *td, struct io_u *io_u)
168{
169 dprint_io_u(io_u, "prep");
170 fio_ro_check(td, io_u);
171
172 lock_file(td, io_u->file, io_u->ddir);
173
174 if (td->io_ops->prep) {
175 int ret = td->io_ops->prep(td, io_u);
176
177 dprint(FD_IO, "->prep(%p)=%d\n", io_u, ret);
178 if (ret)
179 unlock_file(td, io_u->file);
180 return ret;
181 }
182
183 return 0;
184}
185
186int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
187 struct timespec *t)
188{
189 int r = 0;
190
191 if (min > 0 && td->io_ops->commit) {
192 r = td->io_ops->commit(td);
193 if (r < 0)
194 goto out;
195 }
196
197 r = 0;
198 if (td->io_ops->getevents)
199 r = td->io_ops->getevents(td, min, max, t);
200out:
201 if (r >= 0)
202 io_u_mark_complete(td, r);
203 dprint(FD_IO, "getevents: %d\n", r);
204 return r;
205}
206
207int td_io_queue(struct thread_data *td, struct io_u *io_u)
208{
209 int ret;
210
211 dprint_io_u(io_u, "queue");
212 fio_ro_check(td, io_u);
213
214 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
215 io_u->flags |= IO_U_F_FLIGHT;
216
217 assert(io_u->file->flags & FIO_FILE_OPEN);
218
219 io_u->error = 0;
220 io_u->resid = 0;
221
222 if (td->io_ops->flags & FIO_SYNCIO) {
223 fio_gettime(&io_u->issue_time, NULL);
224 memcpy(&td->last_issue, &io_u->issue_time,
225 sizeof(struct timeval));
226
227 /*
228 * for a sync engine, set the timeout upfront
229 */
230 if (mtime_since(&td->timeout_end, &io_u->issue_time)
231 < IO_U_TIMEOUT)
232 io_u_set_timeout(td);
233 }
234
235 if (io_u->ddir != DDIR_SYNC)
236 td->io_issues[io_u->ddir]++;
237
238 ret = td->io_ops->queue(td, io_u);
239
240 unlock_file(td, io_u->file);
241
242 if (!td->io_ops->commit) {
243 io_u_mark_submit(td, 1);
244 io_u_mark_complete(td, 1);
245 }
246
247 if (ret == FIO_Q_COMPLETED) {
248 if (io_u->ddir != DDIR_SYNC) {
249 io_u_mark_depth(td, 1);
250 td->ts.total_io_u[io_u->ddir]++;
251 }
252 } else if (ret == FIO_Q_QUEUED) {
253 int r;
254
255 if (io_u->ddir != DDIR_SYNC) {
256 td->io_u_queued++;
257 td->ts.total_io_u[io_u->ddir]++;
258 }
259
260 if (td->io_u_queued >= td->o.iodepth_batch) {
261 r = td_io_commit(td);
262 if (r < 0)
263 return r;
264 }
265 }
266
267 if ((td->io_ops->flags & FIO_SYNCIO) == 0) {
268 fio_gettime(&io_u->issue_time, NULL);
269 memcpy(&td->last_issue, &io_u->issue_time,
270 sizeof(struct timeval));
271
272 /*
273 * async engine, set the timeout here
274 */
275 if (ret == FIO_Q_QUEUED &&
276 (mtime_since(&td->timeout_end, &io_u->issue_time)
277 < IO_U_TIMEOUT)) {
278 io_u_set_timeout(td);
279 }
280 }
281
282 return ret;
283}
284
285int td_io_init(struct thread_data *td)
286{
287 int ret = 0;
288
289 if (td->io_ops->init) {
290 ret = td->io_ops->init(td);
291 if (ret && td->o.iodepth > 1) {
292 log_err("fio: io engine init failed. Perhaps try"
293 " reducing io depth?\n");
294 }
295 }
296
297 return ret;
298}
299
300int td_io_commit(struct thread_data *td)
301{
302 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
303
304 if (!td->cur_depth || !td->io_u_queued)
305 return 0;
306
307 io_u_mark_depth(td, td->io_u_queued);
308 td->io_u_queued = 0;
309
310 if (td->io_ops->commit)
311 return td->io_ops->commit(td);
312
313 return 0;
314}
315
316int td_io_open_file(struct thread_data *td, struct fio_file *f)
317{
318 if (td->io_ops->open_file(td, f)) {
319 if (td->error == EINVAL && td->o.odirect)
320 log_err("fio: destination does not support O_DIRECT\n");
321 if (td->error == EMFILE) {
322 log_err("fio: try reducing/setting openfiles (failed"
323 " at %u of %u)\n", td->nr_open_files,
324 td->o.nr_files);
325 }
326
327 return 1;
328 }
329
330 if (f->filetype == FIO_TYPE_PIPE) {
331 if (td_random(td)) {
332 log_err("fio: can't seek on pipes (no random io)\n");
333 goto err;
334 }
335 }
336
337 fio_file_reset(f);
338 f->flags |= FIO_FILE_OPEN;
339 f->flags &= ~FIO_FILE_CLOSING;
340
341 if (td->io_ops->flags & FIO_DISKLESSIO)
342 goto done;
343
344 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
345 goto err;
346
347 if (td->o.fadvise_hint &&
348 (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_FILE)) {
349 int flags;
350
351 if (td_random(td))
352 flags = POSIX_FADV_RANDOM;
353 else
354 flags = POSIX_FADV_SEQUENTIAL;
355
356 if (fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
357 td_verror(td, errno, "fadvise");
358 goto err;
359 }
360 }
361
362 if (f->file_map)
363 memset(f->file_map, 0, f->num_maps * sizeof(long));
364
365done:
366 log_file(td, f, FIO_LOG_OPEN_FILE);
367 td->nr_open_files++;
368 get_file(f);
369 return 0;
370err:
371 if (td->io_ops->close_file)
372 td->io_ops->close_file(td, f);
373 return 1;
374}
375
376int td_io_close_file(struct thread_data *td, struct fio_file *f)
377{
378 if (!(f->flags & FIO_FILE_CLOSING))
379 log_file(td, f, FIO_LOG_CLOSE_FILE);
380
381 /*
382 * mark as closing, do real close when last io on it has completed
383 */
384 f->flags |= FIO_FILE_CLOSING;
385
386 unlock_file_all(td, f);
387
388 return put_file(td, f);
389}