Disable io_submit_mode=offload with async engines
[fio.git] / ioengines.c
CommitLineData
ebac4655
JA
1/*
2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
4 *
5 * sync io is implemented on top of aio.
6 *
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
10 *
11 */
ebac4655
JA
12#include <stdlib.h>
13#include <unistd.h>
5c4e1dbc 14#include <string.h>
2866c82d 15#include <dlfcn.h>
ecc314ba 16#include <fcntl.h>
0c6e7517 17#include <assert.h>
8c16d840 18
ebac4655 19#include "fio.h"
7c9b1bce 20#include "diskutil.h"
bfbdd35b 21#include "zbd.h"
ebac4655 22
01743ee1 23static FLIST_HEAD(engine_list);
5f350952 24
abfd235a 25static bool check_engine_ops(struct thread_data *td, struct ioengine_ops *ops)
8c16d840 26{
5f350952 27 if (ops->version != FIO_IOOPS_VERSION) {
5ec10eaa
JA
28 log_err("bad ioops version %d (want %d)\n", ops->version,
29 FIO_IOOPS_VERSION);
1cc954ba 30 return true;
5f350952
JA
31 }
32
36167d82
JA
33 if (!ops->queue) {
34 log_err("%s: no queue handler\n", ops->name);
1cc954ba 35 return true;
36167d82
JA
36 }
37
38 /*
39 * sync engines only need a ->queue()
40 */
41 if (ops->flags & FIO_SYNCIO)
1cc954ba 42 return false;
5ec10eaa 43
abfd235a
JA
44 /*
45 * async engines aren't reliable with offload
46 */
47 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
48 log_err("%s: can't be used with offloaded submit. Use a sync "
49 "engine\n", ops->name);
50 return true;
51 }
52
1cc954ba
JA
53 if (!ops->event || !ops->getevents) {
54 log_err("%s: no event/getevents handler\n", ops->name);
55 return true;
8c16d840 56 }
5ec10eaa 57
1cc954ba 58 return false;
8c16d840
JA
59}
60
5f350952 61void unregister_ioengine(struct ioengine_ops *ops)
ebac4655 62{
ee56ad50 63 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
adf075fa 64 flist_del_init(&ops->list);
5f350952
JA
65}
66
b2fdda43 67void register_ioengine(struct ioengine_ops *ops)
5f350952 68{
ee56ad50 69 dprint(FD_IO, "ioengine %s registered\n", ops->name);
01743ee1 70 flist_add_tail(&ops->list, &engine_list);
5f350952
JA
71}
72
73static struct ioengine_ops *find_ioengine(const char *name)
74{
75 struct ioengine_ops *ops;
01743ee1 76 struct flist_head *entry;
ebac4655 77
01743ee1
JA
78 flist_for_each(entry, &engine_list) {
79 ops = flist_entry(entry, struct ioengine_ops, list);
bc5b77a8 80 if (!strcmp(name, ops->name))
5f350952
JA
81 return ops;
82 }
83
84 return NULL;
85}
86
5a8a6a03
YK
87#ifdef CONFIG_DYNAMIC_ENGINES
88static void *dlopen_external(struct thread_data *td, const char *engine)
89{
90 char engine_path[PATH_MAX];
7dc1183d 91 void *dlhandle;
5a8a6a03
YK
92
93 sprintf(engine_path, "%s/lib%s.so", FIO_EXT_ENG_DIR, engine);
94
7dc1183d
YK
95 dlhandle = dlopen(engine_path, RTLD_LAZY);
96 if (!dlhandle)
97 log_info("Engine %s not found; Either name is invalid, was not built, or fio-engine-%s package is missing.\n",
98 engine, engine);
99
100 return dlhandle;
5a8a6a03
YK
101}
102#else
103#define dlopen_external(td, engine) (NULL)
104#endif
105
5f350952
JA
106static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
107 const char *engine_lib)
108{
109 struct ioengine_ops *ops;
110 void *dlhandle;
111
ee56ad50
JA
112 dprint(FD_IO, "dload engine %s\n", engine_lib);
113
2866c82d
JA
114 dlerror();
115 dlhandle = dlopen(engine_lib, RTLD_LAZY);
d4dbaaa8 116 if (!dlhandle) {
5a8a6a03
YK
117 dlhandle = dlopen_external(td, engine_lib);
118 if (!dlhandle) {
119 td_vmsg(td, -1, dlerror(), "dlopen");
120 return NULL;
121 }
d4dbaaa8 122 }
8756e4d4 123
da51c050
JA
124 /*
125 * Unlike the included modules, external engines should have a
126 * non-static ioengine structure that we can reference.
127 */
0abea0b7
DM
128 ops = dlsym(dlhandle, engine_lib);
129 if (!ops)
130 ops = dlsym(dlhandle, "ioengine");
a8075704
DG
131
132 /*
133 * For some external engines (like C++ ones) it is not that trivial
134 * to provide a non-static ionengine structure that we can reference.
135 * Instead we call a method which allocates the required ioengine
136 * structure.
137 */
138 if (!ops) {
139 get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
140
141 if (get_ioengine)
142 get_ioengine(&ops);
143 }
144
d4dbaaa8 145 if (!ops) {
e1161c32 146 td_vmsg(td, -1, dlerror(), "dlsym");
d4dbaaa8
JA
147 dlclose(dlhandle);
148 return NULL;
149 }
8756e4d4 150
565e784d 151 td->io_ops_dlhandle = dlhandle;
5f350952
JA
152 return ops;
153}
154
044be36e 155static struct ioengine_ops *__load_ioengine(const char *engine)
97bb54c9 156{
97bb54c9
TK
157 /*
158 * linux libaio has alias names, so convert to what we want
159 */
59cd9313 160 if (!strncmp(engine, "linuxaio", 8)) {
044be36e
BVA
161 dprint(FD_IO, "converting ioengine name: %s -> libaio\n",
162 engine);
163 engine = "libaio";
966fcbd4 164 }
97bb54c9
TK
165
166 dprint(FD_IO, "load ioengine %s\n", engine);
167 return find_ioengine(engine);
168}
169
4fedf59a 170struct ioengine_ops *load_ioengine(struct thread_data *td)
5f350952 171{
81647a9a 172 struct ioengine_ops *ops = NULL;
ba872a0b 173 const char *name;
81647a9a 174
ba872a0b
TK
175 /*
176 * Use ->ioengine_so_path if an external ioengine path is specified.
177 * In this case, ->ioengine is "external" which also means the prefix
178 * for external ioengines "external:" is properly used.
179 */
180 name = td->o.ioengine_so_path ?: td->o.ioengine;
181
182 /*
183 * Try to load ->ioengine first, and if failed try to dlopen(3) either
184 * ->ioengine or ->ioengine_so_path. This is redundant for an external
185 * ioengine with prefix, and also leaves the possibility of unexpected
186 * behavior (e.g. if the "external" ioengine exists), but we do this
187 * so as not to break job files not using the prefix.
188 */
189 ops = __load_ioengine(td->o.ioengine);
190 if (!ops)
5f350952
JA
191 ops = dlopen_ioengine(td, name);
192
ba872a0b
TK
193 /*
194 * If ops is NULL, we failed to load ->ioengine, and also failed to
195 * dlopen(3) either ->ioengine or ->ioengine_so_path as a path.
196 */
5f350952
JA
197 if (!ops) {
198 log_err("fio: engine %s not loadable\n", name);
b902ceb5
JA
199 return NULL;
200 }
201
8c16d840
JA
202 /*
203 * Check that the required methods are there.
204 */
abfd235a 205 if (check_engine_ops(td, ops))
8c16d840 206 return NULL;
8c16d840 207
565e784d 208 return ops;
8756e4d4
JA
209}
210
de890a1e
SL
211/*
212 * For cleaning up an ioengine which never made it to init().
213 */
214void free_ioengine(struct thread_data *td)
8756e4d4 215{
de890a1e 216 dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
ee56ad50 217
de890a1e
SL
218 if (td->eo && td->io_ops->options) {
219 options_free(td->io_ops->options, td->eo);
220 free(td->eo);
221 td->eo = NULL;
2992b059 222 }
b990b5c0 223
9b50942e 224 if (td->io_ops_dlhandle) {
565e784d 225 dlclose(td->io_ops_dlhandle);
9b50942e
JA
226 td->io_ops_dlhandle = NULL;
227 }
5f350952 228
84585003 229 td->io_ops = NULL;
b990b5c0 230}
10ba535a 231
de890a1e
SL
232void close_ioengine(struct thread_data *td)
233{
234 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
235
236 if (td->io_ops->cleanup) {
237 td->io_ops->cleanup(td);
565e784d 238 td->io_ops_data = NULL;
de890a1e
SL
239 }
240
241 free_ioengine(td);
242}
243
10ba535a
JA
244int td_io_prep(struct thread_data *td, struct io_u *io_u)
245{
ee56ad50 246 dprint_io_u(io_u, "prep");
7101d9c2
JA
247 fio_ro_check(td, io_u);
248
4d4e80f2 249 lock_file(td, io_u->file, io_u->ddir);
b2bd2bd9 250
2ba1c290
JA
251 if (td->io_ops->prep) {
252 int ret = td->io_ops->prep(td, io_u);
253
e5f9a813
RE
254 dprint(FD_IO, "prep: io_u %p: ret=%d\n", io_u, ret);
255
b2bd2bd9 256 if (ret)
4d4e80f2 257 unlock_file(td, io_u->file);
2ba1c290
JA
258 return ret;
259 }
10ba535a
JA
260
261 return 0;
262}
263
e7d2e616 264int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
1f440ece 265 const struct timespec *t)
10ba535a 266{
ee56ad50 267 int r = 0;
face81b2 268
a05d62b2
YR
269 /*
270 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
271 * server side gets a message from the client
272 * side that the task is finished, and
273 * td->done is set to 1 after td_io_commit(). In this case,
274 * there is no need to reap complete event in server side.
275 */
276 if (td->done)
277 return 0;
278
ee56ad50
JA
279 if (min > 0 && td->io_ops->commit) {
280 r = td->io_ops->commit(td);
face81b2 281 if (r < 0)
ee56ad50 282 goto out;
face81b2 283 }
4950421a
JA
284 if (max > td->cur_depth)
285 max = td->cur_depth;
286 if (min > max)
287 max = min;
36167d82 288
ee56ad50 289 r = 0;
4950421a 290 if (max && td->io_ops->getevents)
ee56ad50
JA
291 r = td->io_ops->getevents(td, min, max, t);
292out:
422f9e4b
RM
293 if (r >= 0) {
294 /*
3fd9efbc 295 * Reflect that our submitted requests were retrieved with
422f9e4b
RM
296 * whatever OS async calls are in the underlying engine.
297 */
298 td->io_u_in_flight -= r;
838bc709 299 io_u_mark_complete(td, r);
422f9e4b 300 } else
7c639b14 301 td_verror(td, r, "get_events");
f3e11d05 302
ee56ad50
JA
303 dprint(FD_IO, "getevents: %d\n", r);
304 return r;
10ba535a
JA
305}
306
d3b07186 307enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u)
10ba535a 308{
a9da8ab2 309 const enum fio_ddir ddir = acct_ddir(io_u);
5fff9543 310 unsigned long long buflen = io_u->xfer_buflen;
d3b07186 311 enum fio_q_status ret;
7e77dd02 312
ee56ad50 313 dprint_io_u(io_u, "queue");
7101d9c2
JA
314 fio_ro_check(td, io_u);
315
0c6e7517 316 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
1651e431 317 io_u_set(td, io_u, IO_U_F_FLIGHT);
817ae977
VF
318
319 /*
320 * If overlap checking was enabled in offload mode we
321 * can release this lock that was acquired when we
322 * started the overlap check because the IO_U_F_FLIGHT
323 * flag is now set
324 */
a38c70a8
BVA
325 if (td_offload_overlap(td)) {
326 int res = pthread_mutex_unlock(&overlap_check);
327 assert(res == 0);
328 }
0c6e7517 329
d6aed795 330 assert(fio_file_open(io_u->file));
3d7b485f 331
bcd5abfa
JA
332 /*
333 * If using a write iolog, store this entry.
334 */
335 log_io_u(td, io_u);
336
11786802
JA
337 io_u->error = 0;
338 io_u->resid = 0;
339
04ba61df
VF
340 if (td_ioengine_flagged(td, FIO_SYNCIO) ||
341 (td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) &&
342 io_u->ddir == DDIR_TRIM)) {
12d9d841 343 if (fio_fill_issue_time(td))
9520ebb9 344 fio_gettime(&io_u->issue_time, NULL);
d0c15328
JA
345
346 /*
347 * only used for iolog
348 */
349 if (td->o.read_iolog_file)
350 memcpy(&td->last_issue, &io_u->issue_time,
8b6a404c 351 sizeof(io_u->issue_time));
433afcb4
JA
352 }
353
b2a432bf 354
a9da8ab2 355 if (ddir_rw(ddir)) {
a7d01f02
JA
356 if (!(io_u->flags & IO_U_F_VER_LIST)) {
357 td->io_issues[ddir]++;
358 td->io_issue_bytes[ddir] += buflen;
359 }
50a8ce86 360 td->rate_io_issue_bytes[ddir] += buflen;
74d6277f 361 }
755200a3 362
7e77dd02 363 ret = td->io_ops->queue(td, io_u);
b2da58c4 364 zbd_queue_io_u(td, io_u, ret);
5aeb77df 365
4d4e80f2 366 unlock_file(td, io_u->file);
b2bd2bd9 367
a9da8ab2
JA
368 if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
369 td->io_issues[ddir]--;
370 td->io_issue_bytes[ddir] -= buflen;
50a8ce86 371 td->rate_io_issue_bytes[ddir] -= buflen;
871467d9 372 io_u_clear(td, io_u, IO_U_F_FLIGHT);
cd8a19e6
JA
373 }
374
39a43d34
JA
375 /*
376 * If an error was seen and the io engine didn't propagate it
377 * back to 'td', do so.
378 */
379 if (io_u->error && !td->error)
380 td_verror(td, io_u->error, "td_io_queue");
381
cb211682
JA
382 /*
383 * Add warning for O_DIRECT so that users have an easier time
384 * spotting potentially bad alignment. If this triggers for the first
385 * IO, then it's likely an alignment problem or because the host fs
386 * does not support O_DIRECT
387 */
ff58fced 388 if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
cb211682 389 td->o.odirect) {
214ac7e0 390
cb211682 391 log_info("fio: first direct IO errored. File system may not "
78d4a262
TK
392 "support direct IO, or iomem_align= is bad, or "
393 "invalid block size. Try setting direct=0.\n");
cb211682
JA
394 }
395
bfbdd35b
BVA
396 if (zbd_unaligned_write(io_u->error) &&
397 td->io_issues[io_u->ddir & 1] == 1 &&
398 td->o.zone_mode != ZONE_MODE_ZBD) {
399 log_info("fio: first I/O failed. If %s is a zoned block device, consider --zonemode=zbd\n",
400 io_u->file->file_name);
401 }
402
c0681c9d 403 if (!td->io_ops->commit) {
838bc709
JA
404 io_u_mark_submit(td, 1);
405 io_u_mark_complete(td, 1);
b2da58c4 406 zbd_put_io_u(td, io_u);
838bc709
JA
407 }
408
d8005759 409 if (ret == FIO_Q_COMPLETED) {
00615bfb
SW
410 if (ddir_rw(io_u->ddir) ||
411 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) {
d8005759
JA
412 io_u_mark_depth(td, 1);
413 td->ts.total_io_u[io_u->ddir]++;
6eaf09d6 414 }
d8005759 415 } else if (ret == FIO_Q_QUEUED) {
e6727cbd
JA
416 td->io_u_queued++;
417
00615bfb
SW
418 if (ddir_rw(io_u->ddir) ||
419 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING))
d8005759 420 td->ts.total_io_u[io_u->ddir]++;
d8005759 421
a80cb54b
BVA
422 if (td->io_u_queued >= td->o.iodepth_batch)
423 td_io_commit(td);
eb7c8ae2 424 }
cb5ab512 425
04ba61df
VF
426 if (!td_ioengine_flagged(td, FIO_SYNCIO) &&
427 (!td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) ||
428 io_u->ddir != DDIR_TRIM)) {
12d9d841 429 if (fio_fill_issue_time(td))
9520ebb9 430 fio_gettime(&io_u->issue_time, NULL);
d0c15328
JA
431
432 /*
433 * only used for iolog
434 */
435 if (td->o.read_iolog_file)
436 memcpy(&td->last_issue, &io_u->issue_time,
8b6a404c 437 sizeof(io_u->issue_time));
433afcb4
JA
438 }
439
7e77dd02 440 return ret;
10ba535a 441}
8c16d840
JA
442
443int td_io_init(struct thread_data *td)
444{
eeb12160 445 int ret = 0;
8c16d840 446
eeb12160
JA
447 if (td->io_ops->init) {
448 ret = td->io_ops->init(td);
356ef1a1
TK
449 if (ret)
450 log_err("fio: io engine %s init failed.%s\n",
451 td->io_ops->name,
452 td->o.iodepth > 1 ?
453 " Perhaps try reducing io depth?" : "");
454 else
455 td->io_ops_init = 1;
7c973896
JA
456 if (!td->error)
457 td->error = ret;
eeb12160
JA
458 }
459
460 return ret;
8c16d840 461}
755200a3 462
a80cb54b 463void td_io_commit(struct thread_data *td)
755200a3 464{
f3e11d05
JA
465 int ret;
466
ee56ad50
JA
467 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
468
d8005759 469 if (!td->cur_depth || !td->io_u_queued)
a80cb54b 470 return;
cb5ab512 471
3fd9efbc 472 io_u_mark_depth(td, td->io_u_queued);
d8005759 473
f3e11d05
JA
474 if (td->io_ops->commit) {
475 ret = td->io_ops->commit(td);
476 if (ret)
477 td_verror(td, -ret, "io commit");
478 }
3fd9efbc 479
422f9e4b
RM
480 /*
481 * Reflect that events were submitted as async IO requests.
482 */
483 td->io_u_in_flight += td->io_u_queued;
484 td->io_u_queued = 0;
755200a3 485}
b5af8293
JA
486
487int td_io_open_file(struct thread_data *td, struct fio_file *f)
488{
230f33fb
AK
489 if (fio_file_closing(f)) {
490 /*
491 * Open translates to undo closing.
492 */
493 fio_file_clear_closing(f);
494 get_file(f);
495 return 0;
496 }
22a57ba8
JA
497 assert(!fio_file_open(f));
498 assert(f->fd == -1);
966fcbd4 499 assert(td->io_ops->open_file);
22a57ba8 500
413d6693
JA
501 if (td->io_ops->open_file(td, f)) {
502 if (td->error == EINVAL && td->o.odirect)
503 log_err("fio: destination does not support O_DIRECT\n");
5ec10eaa
JA
504 if (td->error == EMFILE) {
505 log_err("fio: try reducing/setting openfiles (failed"
506 " at %u of %u)\n", td->nr_open_files,
507 td->o.nr_files);
508 }
413d6693 509
22a57ba8
JA
510 assert(f->fd == -1);
511 assert(!fio_file_open(f));
413d6693
JA
512 return 1;
513 }
514
33c48814 515 fio_file_reset(td, f);
d6aed795
JA
516 fio_file_set_open(f);
517 fio_file_clear_closing(f);
c97bd0fa 518 disk_util_inc(f->du);
d5707a35
JA
519
520 td->nr_open_files++;
521 get_file(f);
522
66159828
JA
523 if (f->filetype == FIO_TYPE_PIPE) {
524 if (td_random(td)) {
525 log_err("fio: can't seek on pipes (no random io)\n");
526 goto err;
527 }
528 }
529
9b87f09b 530 if (td_ioengine_flagged(td, FIO_DISKLESSIO))
413d6693
JA
531 goto done;
532
533 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
534 goto err;
535
ecb2083d 536 if (td->o.fadvise_hint != F_ADV_NONE &&
686fbd31 537 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
413d6693
JA
538 int flags;
539
ecb2083d
JA
540 if (td->o.fadvise_hint == F_ADV_TYPE) {
541 if (td_random(td))
542 flags = POSIX_FADV_RANDOM;
543 else
544 flags = POSIX_FADV_SEQUENTIAL;
545 } else if (td->o.fadvise_hint == F_ADV_RANDOM)
413d6693 546 flags = POSIX_FADV_RANDOM;
ecb2083d 547 else if (td->o.fadvise_hint == F_ADV_SEQUENTIAL)
413d6693 548 flags = POSIX_FADV_SEQUENTIAL;
ecb2083d
JA
549 else {
550 log_err("fio: unknown fadvise type %d\n",
551 td->o.fadvise_hint);
552 flags = POSIX_FADV_NORMAL;
553 }
413d6693 554
ecc314ba 555 if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
825b0a75
JA
556 if (!fio_did_warn(FIO_WARN_FADVISE))
557 log_err("fio: fadvise hint failed\n");
413d6693 558 }
7bb48f84 559 }
ae8e559e
JA
560#ifdef FIO_HAVE_WRITE_HINT
561 if (fio_option_is_set(&td->o, write_hint) &&
686fbd31 562 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
ae8e559e 563 uint64_t hint = td->o.write_hint;
bd553af6 564 int cmd;
37659335 565
bd553af6
JA
566 /*
567 * For direct IO, we just need/want to set the hint on
568 * the file descriptor. For buffered IO, we need to set
569 * it on the inode.
570 */
571 if (td->o.odirect)
572 cmd = F_SET_FILE_RW_HINT;
573 else
574 cmd = F_SET_RW_HINT;
575
576 if (fcntl(f->fd, cmd, &hint) < 0) {
ae8e559e 577 td_verror(td, errno, "fcntl write hint");
37659335
JA
578 goto err;
579 }
580 }
581#endif
a978ba68 582
47534cda
TK
583 if (td->o.odirect && !OS_O_DIRECT && fio_set_directio(td, f))
584 goto err;
e116f2b9 585
413d6693 586done:
f29b25a3 587 log_file(td, f, FIO_LOG_OPEN_FILE);
413d6693
JA
588 return 0;
589err:
c97bd0fa 590 disk_util_dec(f->du);
b284075a
JA
591 if (td->io_ops->close_file)
592 td->io_ops->close_file(td, f);
7bb48f84 593 return 1;
b5af8293
JA
594}
595
6977bcd0 596int td_io_close_file(struct thread_data *td, struct fio_file *f)
b5af8293 597{
d6aed795 598 if (!fio_file_closing(f))
f29b25a3
JA
599 log_file(td, f, FIO_LOG_CLOSE_FILE);
600
0ad920e7
JA
601 /*
602 * mark as closing, do real close when last io on it has completed
603 */
d6aed795 604 fio_file_set_closing(f);
0ad920e7 605
6977bcd0 606 return put_file(td, f);
b5af8293 607}
df9c26b1 608
38ef9c90
CF
609int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
610{
611 if (td->io_ops->unlink_file)
612 return td->io_ops->unlink_file(td, f);
2442c935
JA
613 else {
614 int ret;
615
616 ret = unlink(f->file_name);
617 if (ret < 0)
618 return errno;
619
620 return 0;
621 }
38ef9c90
CF
622}
623
df9c26b1
JA
624int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
625{
626 if (!td->io_ops->get_file_size)
627 return 0;
628
629 return td->io_ops->get_file_size(td, f);
630}
44f29692 631
de890a1e
SL
632int fio_show_ioengine_help(const char *engine)
633{
634 struct flist_head *entry;
d04f1d5b 635 struct thread_data td;
755dcbbd 636 struct ioengine_ops *io_ops;
de890a1e
SL
637 char *sep;
638 int ret = 1;
639
640 if (!engine || !*engine) {
641 log_info("Available IO engines:\n");
642 flist_for_each(entry, &engine_list) {
755dcbbd
TK
643 io_ops = flist_entry(entry, struct ioengine_ops, list);
644 log_info("\t%s\n", io_ops->name);
de890a1e
SL
645 }
646 return 0;
647 }
648 sep = strchr(engine, ',');
649 if (sep) {
650 *sep = 0;
651 sep++;
652 }
653
d04f1d5b
JM
654 memset(&td, 0, sizeof(struct thread_data));
655 td.o.ioengine = (char *)engine;
656 io_ops = load_ioengine(&td);
657
755dcbbd 658 if (!io_ops) {
de890a1e
SL
659 log_info("IO engine %s not found\n", engine);
660 return 1;
661 }
662
755dcbbd
TK
663 if (io_ops->options)
664 ret = show_cmd_help(io_ops->options, sep);
de890a1e 665 else
755dcbbd 666 log_info("IO engine %s has no options\n", io_ops->name);
de890a1e 667
d04f1d5b 668 free_ioengine(&td);
de890a1e
SL
669 return ret;
670}