fix dynamic engine build
[fio.git] / ioengines.c
CommitLineData
ebac4655
JA
1/*
2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
4 *
5 * sync io is implemented on top of aio.
6 *
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
10 *
11 */
ebac4655
JA
12#include <stdlib.h>
13#include <unistd.h>
5c4e1dbc 14#include <string.h>
2866c82d 15#include <dlfcn.h>
ecc314ba 16#include <fcntl.h>
0c6e7517 17#include <assert.h>
8c16d840 18
ebac4655 19#include "fio.h"
7c9b1bce 20#include "diskutil.h"
bfbdd35b 21#include "zbd.h"
ebac4655 22
01743ee1 23static FLIST_HEAD(engine_list);
5f350952 24
abfd235a 25static bool check_engine_ops(struct thread_data *td, struct ioengine_ops *ops)
8c16d840 26{
5f350952 27 if (ops->version != FIO_IOOPS_VERSION) {
5ec10eaa
JA
28 log_err("bad ioops version %d (want %d)\n", ops->version,
29 FIO_IOOPS_VERSION);
1cc954ba 30 return true;
5f350952
JA
31 }
32
36167d82
JA
33 if (!ops->queue) {
34 log_err("%s: no queue handler\n", ops->name);
1cc954ba 35 return true;
36167d82
JA
36 }
37
38 /*
39 * sync engines only need a ->queue()
40 */
41 if (ops->flags & FIO_SYNCIO)
1cc954ba 42 return false;
5ec10eaa 43
abfd235a
JA
44 /*
45 * async engines aren't reliable with offload
46 */
695611a9 47 if ((td->o.io_submit_mode == IO_MODE_OFFLOAD) &&
8bfe330e 48 (ops->flags & FIO_NO_OFFLOAD)) {
abfd235a
JA
49 log_err("%s: can't be used with offloaded submit. Use a sync "
50 "engine\n", ops->name);
51 return true;
52 }
53
1cc954ba
JA
54 if (!ops->event || !ops->getevents) {
55 log_err("%s: no event/getevents handler\n", ops->name);
56 return true;
8c16d840 57 }
5ec10eaa 58
1cc954ba 59 return false;
8c16d840
JA
60}
61
5f350952 62void unregister_ioengine(struct ioengine_ops *ops)
ebac4655 63{
ee56ad50 64 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
adf075fa 65 flist_del_init(&ops->list);
5f350952
JA
66}
67
b2fdda43 68void register_ioengine(struct ioengine_ops *ops)
5f350952 69{
ee56ad50 70 dprint(FD_IO, "ioengine %s registered\n", ops->name);
01743ee1 71 flist_add_tail(&ops->list, &engine_list);
5f350952
JA
72}
73
74static struct ioengine_ops *find_ioengine(const char *name)
75{
76 struct ioengine_ops *ops;
01743ee1 77 struct flist_head *entry;
ebac4655 78
01743ee1
JA
79 flist_for_each(entry, &engine_list) {
80 ops = flist_entry(entry, struct ioengine_ops, list);
bc5b77a8 81 if (!strcmp(name, ops->name))
5f350952
JA
82 return ops;
83 }
84
85 return NULL;
86}
87
5a8a6a03
YK
88#ifdef CONFIG_DYNAMIC_ENGINES
89static void *dlopen_external(struct thread_data *td, const char *engine)
90{
91 char engine_path[PATH_MAX];
7dc1183d 92 void *dlhandle;
5a8a6a03
YK
93
94 sprintf(engine_path, "%s/lib%s.so", FIO_EXT_ENG_DIR, engine);
95
7dc1183d
YK
96 dlhandle = dlopen(engine_path, RTLD_LAZY);
97 if (!dlhandle)
98 log_info("Engine %s not found; Either name is invalid, was not built, or fio-engine-%s package is missing.\n",
99 engine, engine);
100
101 return dlhandle;
5a8a6a03
YK
102}
103#else
104#define dlopen_external(td, engine) (NULL)
105#endif
106
5f350952
JA
107static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
108 const char *engine_lib)
109{
110 struct ioengine_ops *ops;
111 void *dlhandle;
112
ee56ad50
JA
113 dprint(FD_IO, "dload engine %s\n", engine_lib);
114
2866c82d
JA
115 dlerror();
116 dlhandle = dlopen(engine_lib, RTLD_LAZY);
d4dbaaa8 117 if (!dlhandle) {
5a8a6a03
YK
118 dlhandle = dlopen_external(td, engine_lib);
119 if (!dlhandle) {
120 td_vmsg(td, -1, dlerror(), "dlopen");
121 return NULL;
122 }
d4dbaaa8 123 }
8756e4d4 124
da51c050
JA
125 /*
126 * Unlike the included modules, external engines should have a
127 * non-static ioengine structure that we can reference.
128 */
0abea0b7
DM
129 ops = dlsym(dlhandle, engine_lib);
130 if (!ops)
131 ops = dlsym(dlhandle, "ioengine");
a8075704
DG
132
133 /*
134 * For some external engines (like C++ ones) it is not that trivial
135 * to provide a non-static ionengine structure that we can reference.
136 * Instead we call a method which allocates the required ioengine
137 * structure.
138 */
139 if (!ops) {
140 get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
141
142 if (get_ioengine)
143 get_ioengine(&ops);
144 }
145
d4dbaaa8 146 if (!ops) {
e1161c32 147 td_vmsg(td, -1, dlerror(), "dlsym");
d4dbaaa8
JA
148 dlclose(dlhandle);
149 return NULL;
150 }
8756e4d4 151
565e784d 152 td->io_ops_dlhandle = dlhandle;
5f350952
JA
153 return ops;
154}
155
044be36e 156static struct ioengine_ops *__load_ioengine(const char *engine)
97bb54c9 157{
97bb54c9
TK
158 /*
159 * linux libaio has alias names, so convert to what we want
160 */
59cd9313 161 if (!strncmp(engine, "linuxaio", 8)) {
044be36e
BVA
162 dprint(FD_IO, "converting ioengine name: %s -> libaio\n",
163 engine);
164 engine = "libaio";
966fcbd4 165 }
97bb54c9
TK
166
167 dprint(FD_IO, "load ioengine %s\n", engine);
168 return find_ioengine(engine);
169}
170
4fedf59a 171struct ioengine_ops *load_ioengine(struct thread_data *td)
5f350952 172{
81647a9a 173 struct ioengine_ops *ops = NULL;
ba872a0b 174 const char *name;
81647a9a 175
ba872a0b
TK
176 /*
177 * Use ->ioengine_so_path if an external ioengine path is specified.
178 * In this case, ->ioengine is "external" which also means the prefix
179 * for external ioengines "external:" is properly used.
180 */
181 name = td->o.ioengine_so_path ?: td->o.ioengine;
182
183 /*
184 * Try to load ->ioengine first, and if failed try to dlopen(3) either
185 * ->ioengine or ->ioengine_so_path. This is redundant for an external
186 * ioengine with prefix, and also leaves the possibility of unexpected
187 * behavior (e.g. if the "external" ioengine exists), but we do this
188 * so as not to break job files not using the prefix.
189 */
190 ops = __load_ioengine(td->o.ioengine);
191 if (!ops)
5f350952
JA
192 ops = dlopen_ioengine(td, name);
193
ba872a0b
TK
194 /*
195 * If ops is NULL, we failed to load ->ioengine, and also failed to
196 * dlopen(3) either ->ioengine or ->ioengine_so_path as a path.
197 */
5f350952
JA
198 if (!ops) {
199 log_err("fio: engine %s not loadable\n", name);
b902ceb5
JA
200 return NULL;
201 }
202
8c16d840
JA
203 /*
204 * Check that the required methods are there.
205 */
abfd235a 206 if (check_engine_ops(td, ops))
8c16d840 207 return NULL;
8c16d840 208
565e784d 209 return ops;
8756e4d4
JA
210}
211
de890a1e
SL
212/*
213 * For cleaning up an ioengine which never made it to init().
214 */
215void free_ioengine(struct thread_data *td)
8756e4d4 216{
de890a1e 217 dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
ee56ad50 218
de890a1e
SL
219 if (td->eo && td->io_ops->options) {
220 options_free(td->io_ops->options, td->eo);
221 free(td->eo);
222 td->eo = NULL;
2992b059 223 }
b990b5c0 224
9b50942e 225 if (td->io_ops_dlhandle) {
565e784d 226 dlclose(td->io_ops_dlhandle);
9b50942e
JA
227 td->io_ops_dlhandle = NULL;
228 }
5f350952 229
84585003 230 td->io_ops = NULL;
b990b5c0 231}
10ba535a 232
de890a1e
SL
233void close_ioengine(struct thread_data *td)
234{
235 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
236
237 if (td->io_ops->cleanup) {
238 td->io_ops->cleanup(td);
565e784d 239 td->io_ops_data = NULL;
de890a1e
SL
240 }
241
242 free_ioengine(td);
243}
244
10ba535a
JA
245int td_io_prep(struct thread_data *td, struct io_u *io_u)
246{
ee56ad50 247 dprint_io_u(io_u, "prep");
7101d9c2
JA
248 fio_ro_check(td, io_u);
249
4d4e80f2 250 lock_file(td, io_u->file, io_u->ddir);
b2bd2bd9 251
2ba1c290
JA
252 if (td->io_ops->prep) {
253 int ret = td->io_ops->prep(td, io_u);
254
e5f9a813
RE
255 dprint(FD_IO, "prep: io_u %p: ret=%d\n", io_u, ret);
256
b2bd2bd9 257 if (ret)
4d4e80f2 258 unlock_file(td, io_u->file);
2ba1c290
JA
259 return ret;
260 }
10ba535a
JA
261
262 return 0;
263}
264
e7d2e616 265int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
1f440ece 266 const struct timespec *t)
10ba535a 267{
ee56ad50 268 int r = 0;
face81b2 269
a05d62b2
YR
270 /*
271 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
272 * server side gets a message from the client
273 * side that the task is finished, and
274 * td->done is set to 1 after td_io_commit(). In this case,
275 * there is no need to reap complete event in server side.
276 */
277 if (td->done)
278 return 0;
279
ee56ad50
JA
280 if (min > 0 && td->io_ops->commit) {
281 r = td->io_ops->commit(td);
face81b2 282 if (r < 0)
ee56ad50 283 goto out;
face81b2 284 }
4950421a
JA
285 if (max > td->cur_depth)
286 max = td->cur_depth;
287 if (min > max)
288 max = min;
36167d82 289
ee56ad50 290 r = 0;
4950421a 291 if (max && td->io_ops->getevents)
ee56ad50
JA
292 r = td->io_ops->getevents(td, min, max, t);
293out:
422f9e4b
RM
294 if (r >= 0) {
295 /*
3fd9efbc 296 * Reflect that our submitted requests were retrieved with
422f9e4b
RM
297 * whatever OS async calls are in the underlying engine.
298 */
299 td->io_u_in_flight -= r;
838bc709 300 io_u_mark_complete(td, r);
422f9e4b 301 } else
7c639b14 302 td_verror(td, r, "get_events");
f3e11d05 303
ee56ad50
JA
304 dprint(FD_IO, "getevents: %d\n", r);
305 return r;
10ba535a
JA
306}
307
d3b07186 308enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u)
10ba535a 309{
a9da8ab2 310 const enum fio_ddir ddir = acct_ddir(io_u);
5fff9543 311 unsigned long long buflen = io_u->xfer_buflen;
d3b07186 312 enum fio_q_status ret;
7e77dd02 313
ee56ad50 314 dprint_io_u(io_u, "queue");
7101d9c2
JA
315 fio_ro_check(td, io_u);
316
0c6e7517 317 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
1651e431 318 io_u_set(td, io_u, IO_U_F_FLIGHT);
817ae977
VF
319
320 /*
321 * If overlap checking was enabled in offload mode we
322 * can release this lock that was acquired when we
323 * started the overlap check because the IO_U_F_FLIGHT
324 * flag is now set
325 */
a38c70a8
BVA
326 if (td_offload_overlap(td)) {
327 int res = pthread_mutex_unlock(&overlap_check);
328 assert(res == 0);
329 }
0c6e7517 330
d6aed795 331 assert(fio_file_open(io_u->file));
3d7b485f 332
bcd5abfa
JA
333 /*
334 * If using a write iolog, store this entry.
335 */
336 log_io_u(td, io_u);
337
11786802
JA
338 io_u->error = 0;
339 io_u->resid = 0;
340
04ba61df
VF
341 if (td_ioengine_flagged(td, FIO_SYNCIO) ||
342 (td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) &&
343 io_u->ddir == DDIR_TRIM)) {
12d9d841 344 if (fio_fill_issue_time(td))
9520ebb9 345 fio_gettime(&io_u->issue_time, NULL);
d0c15328
JA
346
347 /*
348 * only used for iolog
349 */
350 if (td->o.read_iolog_file)
351 memcpy(&td->last_issue, &io_u->issue_time,
8b6a404c 352 sizeof(io_u->issue_time));
433afcb4
JA
353 }
354
b2a432bf 355
a9da8ab2 356 if (ddir_rw(ddir)) {
a7d01f02
JA
357 if (!(io_u->flags & IO_U_F_VER_LIST)) {
358 td->io_issues[ddir]++;
359 td->io_issue_bytes[ddir] += buflen;
360 }
50a8ce86 361 td->rate_io_issue_bytes[ddir] += buflen;
74d6277f 362 }
755200a3 363
7e77dd02 364 ret = td->io_ops->queue(td, io_u);
b2da58c4 365 zbd_queue_io_u(td, io_u, ret);
5aeb77df 366
4d4e80f2 367 unlock_file(td, io_u->file);
b2bd2bd9 368
a9da8ab2
JA
369 if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
370 td->io_issues[ddir]--;
371 td->io_issue_bytes[ddir] -= buflen;
50a8ce86 372 td->rate_io_issue_bytes[ddir] -= buflen;
871467d9 373 io_u_clear(td, io_u, IO_U_F_FLIGHT);
cd8a19e6
JA
374 }
375
39a43d34
JA
376 /*
377 * If an error was seen and the io engine didn't propagate it
378 * back to 'td', do so.
379 */
380 if (io_u->error && !td->error)
381 td_verror(td, io_u->error, "td_io_queue");
382
cb211682
JA
383 /*
384 * Add warning for O_DIRECT so that users have an easier time
385 * spotting potentially bad alignment. If this triggers for the first
386 * IO, then it's likely an alignment problem or because the host fs
387 * does not support O_DIRECT
388 */
ff58fced 389 if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
cb211682 390 td->o.odirect) {
214ac7e0 391
cb211682 392 log_info("fio: first direct IO errored. File system may not "
78d4a262
TK
393 "support direct IO, or iomem_align= is bad, or "
394 "invalid block size. Try setting direct=0.\n");
cb211682
JA
395 }
396
bfbdd35b
BVA
397 if (zbd_unaligned_write(io_u->error) &&
398 td->io_issues[io_u->ddir & 1] == 1 &&
399 td->o.zone_mode != ZONE_MODE_ZBD) {
400 log_info("fio: first I/O failed. If %s is a zoned block device, consider --zonemode=zbd\n",
401 io_u->file->file_name);
402 }
403
c0681c9d 404 if (!td->io_ops->commit) {
838bc709
JA
405 io_u_mark_submit(td, 1);
406 io_u_mark_complete(td, 1);
b2da58c4 407 zbd_put_io_u(td, io_u);
838bc709
JA
408 }
409
d8005759 410 if (ret == FIO_Q_COMPLETED) {
00615bfb
SW
411 if (ddir_rw(io_u->ddir) ||
412 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) {
d8005759
JA
413 io_u_mark_depth(td, 1);
414 td->ts.total_io_u[io_u->ddir]++;
6eaf09d6 415 }
d8005759 416 } else if (ret == FIO_Q_QUEUED) {
e6727cbd
JA
417 td->io_u_queued++;
418
00615bfb
SW
419 if (ddir_rw(io_u->ddir) ||
420 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING))
d8005759 421 td->ts.total_io_u[io_u->ddir]++;
d8005759 422
a80cb54b
BVA
423 if (td->io_u_queued >= td->o.iodepth_batch)
424 td_io_commit(td);
eb7c8ae2 425 }
cb5ab512 426
04ba61df
VF
427 if (!td_ioengine_flagged(td, FIO_SYNCIO) &&
428 (!td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) ||
429 io_u->ddir != DDIR_TRIM)) {
12d9d841 430 if (fio_fill_issue_time(td))
9520ebb9 431 fio_gettime(&io_u->issue_time, NULL);
d0c15328
JA
432
433 /*
434 * only used for iolog
435 */
436 if (td->o.read_iolog_file)
437 memcpy(&td->last_issue, &io_u->issue_time,
8b6a404c 438 sizeof(io_u->issue_time));
433afcb4
JA
439 }
440
7e77dd02 441 return ret;
10ba535a 442}
8c16d840
JA
443
444int td_io_init(struct thread_data *td)
445{
eeb12160 446 int ret = 0;
8c16d840 447
eeb12160
JA
448 if (td->io_ops->init) {
449 ret = td->io_ops->init(td);
356ef1a1
TK
450 if (ret)
451 log_err("fio: io engine %s init failed.%s\n",
452 td->io_ops->name,
453 td->o.iodepth > 1 ?
454 " Perhaps try reducing io depth?" : "");
455 else
456 td->io_ops_init = 1;
7c973896
JA
457 if (!td->error)
458 td->error = ret;
eeb12160
JA
459 }
460
461 return ret;
8c16d840 462}
755200a3 463
a80cb54b 464void td_io_commit(struct thread_data *td)
755200a3 465{
f3e11d05
JA
466 int ret;
467
ee56ad50
JA
468 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
469
d8005759 470 if (!td->cur_depth || !td->io_u_queued)
a80cb54b 471 return;
cb5ab512 472
3fd9efbc 473 io_u_mark_depth(td, td->io_u_queued);
d8005759 474
f3e11d05
JA
475 if (td->io_ops->commit) {
476 ret = td->io_ops->commit(td);
477 if (ret)
478 td_verror(td, -ret, "io commit");
479 }
3fd9efbc 480
422f9e4b
RM
481 /*
482 * Reflect that events were submitted as async IO requests.
483 */
484 td->io_u_in_flight += td->io_u_queued;
485 td->io_u_queued = 0;
755200a3 486}
b5af8293
JA
487
488int td_io_open_file(struct thread_data *td, struct fio_file *f)
489{
230f33fb
AK
490 if (fio_file_closing(f)) {
491 /*
492 * Open translates to undo closing.
493 */
494 fio_file_clear_closing(f);
495 get_file(f);
496 return 0;
497 }
22a57ba8
JA
498 assert(!fio_file_open(f));
499 assert(f->fd == -1);
966fcbd4 500 assert(td->io_ops->open_file);
22a57ba8 501
413d6693
JA
502 if (td->io_ops->open_file(td, f)) {
503 if (td->error == EINVAL && td->o.odirect)
504 log_err("fio: destination does not support O_DIRECT\n");
5ec10eaa
JA
505 if (td->error == EMFILE) {
506 log_err("fio: try reducing/setting openfiles (failed"
507 " at %u of %u)\n", td->nr_open_files,
508 td->o.nr_files);
509 }
413d6693 510
22a57ba8
JA
511 assert(f->fd == -1);
512 assert(!fio_file_open(f));
413d6693
JA
513 return 1;
514 }
515
33c48814 516 fio_file_reset(td, f);
d6aed795
JA
517 fio_file_set_open(f);
518 fio_file_clear_closing(f);
c97bd0fa 519 disk_util_inc(f->du);
d5707a35
JA
520
521 td->nr_open_files++;
522 get_file(f);
523
66159828
JA
524 if (f->filetype == FIO_TYPE_PIPE) {
525 if (td_random(td)) {
526 log_err("fio: can't seek on pipes (no random io)\n");
527 goto err;
528 }
529 }
530
9b87f09b 531 if (td_ioengine_flagged(td, FIO_DISKLESSIO))
413d6693
JA
532 goto done;
533
534 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
535 goto err;
536
ecb2083d 537 if (td->o.fadvise_hint != F_ADV_NONE &&
686fbd31 538 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
413d6693
JA
539 int flags;
540
ecb2083d
JA
541 if (td->o.fadvise_hint == F_ADV_TYPE) {
542 if (td_random(td))
543 flags = POSIX_FADV_RANDOM;
544 else
545 flags = POSIX_FADV_SEQUENTIAL;
546 } else if (td->o.fadvise_hint == F_ADV_RANDOM)
413d6693 547 flags = POSIX_FADV_RANDOM;
ecb2083d 548 else if (td->o.fadvise_hint == F_ADV_SEQUENTIAL)
413d6693 549 flags = POSIX_FADV_SEQUENTIAL;
ecb2083d
JA
550 else {
551 log_err("fio: unknown fadvise type %d\n",
552 td->o.fadvise_hint);
553 flags = POSIX_FADV_NORMAL;
554 }
413d6693 555
ecc314ba 556 if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
825b0a75
JA
557 if (!fio_did_warn(FIO_WARN_FADVISE))
558 log_err("fio: fadvise hint failed\n");
413d6693 559 }
7bb48f84 560 }
ae8e559e
JA
561#ifdef FIO_HAVE_WRITE_HINT
562 if (fio_option_is_set(&td->o, write_hint) &&
686fbd31 563 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
ae8e559e 564 uint64_t hint = td->o.write_hint;
bd553af6 565 int cmd;
37659335 566
bd553af6
JA
567 /*
568 * For direct IO, we just need/want to set the hint on
569 * the file descriptor. For buffered IO, we need to set
570 * it on the inode.
571 */
572 if (td->o.odirect)
573 cmd = F_SET_FILE_RW_HINT;
574 else
575 cmd = F_SET_RW_HINT;
576
577 if (fcntl(f->fd, cmd, &hint) < 0) {
ae8e559e 578 td_verror(td, errno, "fcntl write hint");
37659335
JA
579 goto err;
580 }
581 }
582#endif
a978ba68 583
47534cda
TK
584 if (td->o.odirect && !OS_O_DIRECT && fio_set_directio(td, f))
585 goto err;
e116f2b9 586
413d6693 587done:
f29b25a3 588 log_file(td, f, FIO_LOG_OPEN_FILE);
413d6693
JA
589 return 0;
590err:
c97bd0fa 591 disk_util_dec(f->du);
b284075a
JA
592 if (td->io_ops->close_file)
593 td->io_ops->close_file(td, f);
7bb48f84 594 return 1;
b5af8293
JA
595}
596
6977bcd0 597int td_io_close_file(struct thread_data *td, struct fio_file *f)
b5af8293 598{
d6aed795 599 if (!fio_file_closing(f))
f29b25a3
JA
600 log_file(td, f, FIO_LOG_CLOSE_FILE);
601
0ad920e7
JA
602 /*
603 * mark as closing, do real close when last io on it has completed
604 */
d6aed795 605 fio_file_set_closing(f);
0ad920e7 606
6977bcd0 607 return put_file(td, f);
b5af8293 608}
df9c26b1 609
38ef9c90
CF
610int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
611{
612 if (td->io_ops->unlink_file)
613 return td->io_ops->unlink_file(td, f);
2442c935
JA
614 else {
615 int ret;
616
617 ret = unlink(f->file_name);
618 if (ret < 0)
619 return errno;
620
621 return 0;
622 }
38ef9c90
CF
623}
624
df9c26b1
JA
625int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
626{
627 if (!td->io_ops->get_file_size)
628 return 0;
629
630 return td->io_ops->get_file_size(td, f);
631}
44f29692 632
de890a1e
SL
633int fio_show_ioengine_help(const char *engine)
634{
635 struct flist_head *entry;
d04f1d5b 636 struct thread_data td;
755dcbbd 637 struct ioengine_ops *io_ops;
de890a1e
SL
638 char *sep;
639 int ret = 1;
640
641 if (!engine || !*engine) {
642 log_info("Available IO engines:\n");
643 flist_for_each(entry, &engine_list) {
755dcbbd
TK
644 io_ops = flist_entry(entry, struct ioengine_ops, list);
645 log_info("\t%s\n", io_ops->name);
de890a1e
SL
646 }
647 return 0;
648 }
649 sep = strchr(engine, ',');
650 if (sep) {
651 *sep = 0;
652 sep++;
653 }
654
d04f1d5b
JM
655 memset(&td, 0, sizeof(struct thread_data));
656 td.o.ioengine = (char *)engine;
657 io_ops = load_ioengine(&td);
658
755dcbbd 659 if (!io_ops) {
de890a1e
SL
660 log_info("IO engine %s not found\n", engine);
661 return 1;
662 }
663
755dcbbd
TK
664 if (io_ops->options)
665 ret = show_cmd_help(io_ops->options, sep);
de890a1e 666 else
755dcbbd 667 log_info("IO engine %s has no options\n", io_ops->name);
de890a1e 668
d04f1d5b 669 free_ioengine(&td);
de890a1e
SL
670 return ret;
671}