engines/io_uring: mark as not compatible with io_submit_mode=offload
[fio.git] / ioengines.c
CommitLineData
ebac4655
JA
1/*
2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
4 *
5 * sync io is implemented on top of aio.
6 *
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
10 *
11 */
ebac4655
JA
12#include <stdlib.h>
13#include <unistd.h>
5c4e1dbc 14#include <string.h>
2866c82d 15#include <dlfcn.h>
ecc314ba 16#include <fcntl.h>
0c6e7517 17#include <assert.h>
8c16d840 18
ebac4655 19#include "fio.h"
7c9b1bce 20#include "diskutil.h"
bfbdd35b 21#include "zbd.h"
ebac4655 22
01743ee1 23static FLIST_HEAD(engine_list);
5f350952 24
1cc954ba 25static bool check_engine_ops(struct ioengine_ops *ops)
8c16d840 26{
5f350952 27 if (ops->version != FIO_IOOPS_VERSION) {
5ec10eaa
JA
28 log_err("bad ioops version %d (want %d)\n", ops->version,
29 FIO_IOOPS_VERSION);
1cc954ba 30 return true;
5f350952
JA
31 }
32
36167d82
JA
33 if (!ops->queue) {
34 log_err("%s: no queue handler\n", ops->name);
1cc954ba 35 return true;
36167d82
JA
36 }
37
38 /*
39 * sync engines only need a ->queue()
40 */
41 if (ops->flags & FIO_SYNCIO)
1cc954ba 42 return false;
5ec10eaa 43
1cc954ba
JA
44 if (!ops->event || !ops->getevents) {
45 log_err("%s: no event/getevents handler\n", ops->name);
46 return true;
8c16d840 47 }
5ec10eaa 48
1cc954ba 49 return false;
8c16d840
JA
50}
51
5f350952 52void unregister_ioengine(struct ioengine_ops *ops)
ebac4655 53{
ee56ad50 54 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
adf075fa 55 flist_del_init(&ops->list);
5f350952
JA
56}
57
b2fdda43 58void register_ioengine(struct ioengine_ops *ops)
5f350952 59{
ee56ad50 60 dprint(FD_IO, "ioengine %s registered\n", ops->name);
01743ee1 61 flist_add_tail(&ops->list, &engine_list);
5f350952
JA
62}
63
64static struct ioengine_ops *find_ioengine(const char *name)
65{
66 struct ioengine_ops *ops;
01743ee1 67 struct flist_head *entry;
ebac4655 68
01743ee1
JA
69 flist_for_each(entry, &engine_list) {
70 ops = flist_entry(entry, struct ioengine_ops, list);
bc5b77a8 71 if (!strcmp(name, ops->name))
5f350952
JA
72 return ops;
73 }
74
75 return NULL;
76}
77
5a8a6a03
YK
78#ifdef CONFIG_DYNAMIC_ENGINES
79static void *dlopen_external(struct thread_data *td, const char *engine)
80{
81 char engine_path[PATH_MAX];
7dc1183d 82 void *dlhandle;
5a8a6a03
YK
83
84 sprintf(engine_path, "%s/lib%s.so", FIO_EXT_ENG_DIR, engine);
85
7dc1183d
YK
86 dlhandle = dlopen(engine_path, RTLD_LAZY);
87 if (!dlhandle)
88 log_info("Engine %s not found; Either name is invalid, was not built, or fio-engine-%s package is missing.\n",
89 engine, engine);
90
91 return dlhandle;
5a8a6a03
YK
92}
93#else
94#define dlopen_external(td, engine) (NULL)
95#endif
96
5f350952
JA
97static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
98 const char *engine_lib)
99{
100 struct ioengine_ops *ops;
101 void *dlhandle;
102
ee56ad50
JA
103 dprint(FD_IO, "dload engine %s\n", engine_lib);
104
2866c82d
JA
105 dlerror();
106 dlhandle = dlopen(engine_lib, RTLD_LAZY);
d4dbaaa8 107 if (!dlhandle) {
5a8a6a03
YK
108 dlhandle = dlopen_external(td, engine_lib);
109 if (!dlhandle) {
110 td_vmsg(td, -1, dlerror(), "dlopen");
111 return NULL;
112 }
d4dbaaa8 113 }
8756e4d4 114
da51c050
JA
115 /*
116 * Unlike the included modules, external engines should have a
117 * non-static ioengine structure that we can reference.
118 */
0abea0b7
DM
119 ops = dlsym(dlhandle, engine_lib);
120 if (!ops)
121 ops = dlsym(dlhandle, "ioengine");
a8075704
DG
122
123 /*
124 * For some external engines (like C++ ones) it is not that trivial
125 * to provide a non-static ionengine structure that we can reference.
126 * Instead we call a method which allocates the required ioengine
127 * structure.
128 */
129 if (!ops) {
130 get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
131
132 if (get_ioengine)
133 get_ioengine(&ops);
134 }
135
d4dbaaa8 136 if (!ops) {
e1161c32 137 td_vmsg(td, -1, dlerror(), "dlsym");
d4dbaaa8
JA
138 dlclose(dlhandle);
139 return NULL;
140 }
8756e4d4 141
565e784d 142 td->io_ops_dlhandle = dlhandle;
5f350952
JA
143 return ops;
144}
145
044be36e 146static struct ioengine_ops *__load_ioengine(const char *engine)
97bb54c9 147{
97bb54c9
TK
148 /*
149 * linux libaio has alias names, so convert to what we want
150 */
59cd9313 151 if (!strncmp(engine, "linuxaio", 8)) {
044be36e
BVA
152 dprint(FD_IO, "converting ioengine name: %s -> libaio\n",
153 engine);
154 engine = "libaio";
966fcbd4 155 }
97bb54c9
TK
156
157 dprint(FD_IO, "load ioengine %s\n", engine);
158 return find_ioengine(engine);
159}
160
4fedf59a 161struct ioengine_ops *load_ioengine(struct thread_data *td)
5f350952 162{
81647a9a 163 struct ioengine_ops *ops = NULL;
ba872a0b 164 const char *name;
81647a9a 165
ba872a0b
TK
166 /*
167 * Use ->ioengine_so_path if an external ioengine path is specified.
168 * In this case, ->ioengine is "external" which also means the prefix
169 * for external ioengines "external:" is properly used.
170 */
171 name = td->o.ioengine_so_path ?: td->o.ioengine;
172
173 /*
174 * Try to load ->ioengine first, and if failed try to dlopen(3) either
175 * ->ioengine or ->ioengine_so_path. This is redundant for an external
176 * ioengine with prefix, and also leaves the possibility of unexpected
177 * behavior (e.g. if the "external" ioengine exists), but we do this
178 * so as not to break job files not using the prefix.
179 */
180 ops = __load_ioengine(td->o.ioengine);
181 if (!ops)
5f350952
JA
182 ops = dlopen_ioengine(td, name);
183
ba872a0b
TK
184 /*
185 * If ops is NULL, we failed to load ->ioengine, and also failed to
186 * dlopen(3) either ->ioengine or ->ioengine_so_path as a path.
187 */
5f350952
JA
188 if (!ops) {
189 log_err("fio: engine %s not loadable\n", name);
b902ceb5
JA
190 return NULL;
191 }
192
8c16d840
JA
193 /*
194 * Check that the required methods are there.
195 */
5f350952 196 if (check_engine_ops(ops))
8c16d840 197 return NULL;
8c16d840 198
565e784d 199 return ops;
8756e4d4
JA
200}
201
de890a1e
SL
202/*
203 * For cleaning up an ioengine which never made it to init().
204 */
205void free_ioengine(struct thread_data *td)
8756e4d4 206{
de890a1e 207 dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
ee56ad50 208
de890a1e
SL
209 if (td->eo && td->io_ops->options) {
210 options_free(td->io_ops->options, td->eo);
211 free(td->eo);
212 td->eo = NULL;
2992b059 213 }
b990b5c0 214
9b50942e 215 if (td->io_ops_dlhandle) {
565e784d 216 dlclose(td->io_ops_dlhandle);
9b50942e
JA
217 td->io_ops_dlhandle = NULL;
218 }
5f350952 219
84585003 220 td->io_ops = NULL;
b990b5c0 221}
10ba535a 222
de890a1e
SL
223void close_ioengine(struct thread_data *td)
224{
225 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
226
227 if (td->io_ops->cleanup) {
228 td->io_ops->cleanup(td);
565e784d 229 td->io_ops_data = NULL;
de890a1e
SL
230 }
231
232 free_ioengine(td);
233}
234
10ba535a
JA
235int td_io_prep(struct thread_data *td, struct io_u *io_u)
236{
ee56ad50 237 dprint_io_u(io_u, "prep");
7101d9c2
JA
238 fio_ro_check(td, io_u);
239
4d4e80f2 240 lock_file(td, io_u->file, io_u->ddir);
b2bd2bd9 241
2ba1c290
JA
242 if (td->io_ops->prep) {
243 int ret = td->io_ops->prep(td, io_u);
244
e5f9a813
RE
245 dprint(FD_IO, "prep: io_u %p: ret=%d\n", io_u, ret);
246
b2bd2bd9 247 if (ret)
4d4e80f2 248 unlock_file(td, io_u->file);
2ba1c290
JA
249 return ret;
250 }
10ba535a
JA
251
252 return 0;
253}
254
e7d2e616 255int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
1f440ece 256 const struct timespec *t)
10ba535a 257{
ee56ad50 258 int r = 0;
face81b2 259
a05d62b2
YR
260 /*
261 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
262 * server side gets a message from the client
263 * side that the task is finished, and
264 * td->done is set to 1 after td_io_commit(). In this case,
265 * there is no need to reap complete event in server side.
266 */
267 if (td->done)
268 return 0;
269
ee56ad50
JA
270 if (min > 0 && td->io_ops->commit) {
271 r = td->io_ops->commit(td);
face81b2 272 if (r < 0)
ee56ad50 273 goto out;
face81b2 274 }
4950421a
JA
275 if (max > td->cur_depth)
276 max = td->cur_depth;
277 if (min > max)
278 max = min;
36167d82 279
ee56ad50 280 r = 0;
4950421a 281 if (max && td->io_ops->getevents)
ee56ad50
JA
282 r = td->io_ops->getevents(td, min, max, t);
283out:
422f9e4b
RM
284 if (r >= 0) {
285 /*
3fd9efbc 286 * Reflect that our submitted requests were retrieved with
422f9e4b
RM
287 * whatever OS async calls are in the underlying engine.
288 */
289 td->io_u_in_flight -= r;
838bc709 290 io_u_mark_complete(td, r);
422f9e4b 291 } else
7c639b14 292 td_verror(td, r, "get_events");
f3e11d05 293
ee56ad50
JA
294 dprint(FD_IO, "getevents: %d\n", r);
295 return r;
10ba535a
JA
296}
297
d3b07186 298enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u)
10ba535a 299{
a9da8ab2 300 const enum fio_ddir ddir = acct_ddir(io_u);
5fff9543 301 unsigned long long buflen = io_u->xfer_buflen;
d3b07186 302 enum fio_q_status ret;
7e77dd02 303
ee56ad50 304 dprint_io_u(io_u, "queue");
7101d9c2
JA
305 fio_ro_check(td, io_u);
306
0c6e7517 307 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
1651e431 308 io_u_set(td, io_u, IO_U_F_FLIGHT);
817ae977
VF
309
310 /*
311 * If overlap checking was enabled in offload mode we
312 * can release this lock that was acquired when we
313 * started the overlap check because the IO_U_F_FLIGHT
314 * flag is now set
315 */
a38c70a8
BVA
316 if (td_offload_overlap(td)) {
317 int res = pthread_mutex_unlock(&overlap_check);
318 assert(res == 0);
319 }
0c6e7517 320
d6aed795 321 assert(fio_file_open(io_u->file));
3d7b485f 322
bcd5abfa
JA
323 /*
324 * If using a write iolog, store this entry.
325 */
326 log_io_u(td, io_u);
327
11786802
JA
328 io_u->error = 0;
329 io_u->resid = 0;
330
04ba61df
VF
331 if (td_ioengine_flagged(td, FIO_SYNCIO) ||
332 (td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) &&
333 io_u->ddir == DDIR_TRIM)) {
12d9d841 334 if (fio_fill_issue_time(td))
9520ebb9 335 fio_gettime(&io_u->issue_time, NULL);
d0c15328
JA
336
337 /*
338 * only used for iolog
339 */
340 if (td->o.read_iolog_file)
341 memcpy(&td->last_issue, &io_u->issue_time,
8b6a404c 342 sizeof(io_u->issue_time));
433afcb4
JA
343 }
344
b2a432bf 345
a9da8ab2 346 if (ddir_rw(ddir)) {
a7d01f02
JA
347 if (!(io_u->flags & IO_U_F_VER_LIST)) {
348 td->io_issues[ddir]++;
349 td->io_issue_bytes[ddir] += buflen;
350 }
50a8ce86 351 td->rate_io_issue_bytes[ddir] += buflen;
74d6277f 352 }
755200a3 353
7e77dd02 354 ret = td->io_ops->queue(td, io_u);
b2da58c4 355 zbd_queue_io_u(td, io_u, ret);
5aeb77df 356
4d4e80f2 357 unlock_file(td, io_u->file);
b2bd2bd9 358
a9da8ab2
JA
359 if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
360 td->io_issues[ddir]--;
361 td->io_issue_bytes[ddir] -= buflen;
50a8ce86 362 td->rate_io_issue_bytes[ddir] -= buflen;
871467d9 363 io_u_clear(td, io_u, IO_U_F_FLIGHT);
cd8a19e6
JA
364 }
365
39a43d34
JA
366 /*
367 * If an error was seen and the io engine didn't propagate it
368 * back to 'td', do so.
369 */
370 if (io_u->error && !td->error)
371 td_verror(td, io_u->error, "td_io_queue");
372
cb211682
JA
373 /*
374 * Add warning for O_DIRECT so that users have an easier time
375 * spotting potentially bad alignment. If this triggers for the first
376 * IO, then it's likely an alignment problem or because the host fs
377 * does not support O_DIRECT
378 */
ff58fced 379 if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
cb211682 380 td->o.odirect) {
214ac7e0 381
cb211682 382 log_info("fio: first direct IO errored. File system may not "
78d4a262
TK
383 "support direct IO, or iomem_align= is bad, or "
384 "invalid block size. Try setting direct=0.\n");
cb211682
JA
385 }
386
bfbdd35b
BVA
387 if (zbd_unaligned_write(io_u->error) &&
388 td->io_issues[io_u->ddir & 1] == 1 &&
389 td->o.zone_mode != ZONE_MODE_ZBD) {
390 log_info("fio: first I/O failed. If %s is a zoned block device, consider --zonemode=zbd\n",
391 io_u->file->file_name);
392 }
393
c0681c9d 394 if (!td->io_ops->commit) {
838bc709
JA
395 io_u_mark_submit(td, 1);
396 io_u_mark_complete(td, 1);
b2da58c4 397 zbd_put_io_u(td, io_u);
838bc709
JA
398 }
399
d8005759 400 if (ret == FIO_Q_COMPLETED) {
00615bfb
SW
401 if (ddir_rw(io_u->ddir) ||
402 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) {
d8005759
JA
403 io_u_mark_depth(td, 1);
404 td->ts.total_io_u[io_u->ddir]++;
6eaf09d6 405 }
d8005759 406 } else if (ret == FIO_Q_QUEUED) {
e6727cbd
JA
407 td->io_u_queued++;
408
00615bfb
SW
409 if (ddir_rw(io_u->ddir) ||
410 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING))
d8005759 411 td->ts.total_io_u[io_u->ddir]++;
d8005759 412
a80cb54b
BVA
413 if (td->io_u_queued >= td->o.iodepth_batch)
414 td_io_commit(td);
eb7c8ae2 415 }
cb5ab512 416
04ba61df
VF
417 if (!td_ioengine_flagged(td, FIO_SYNCIO) &&
418 (!td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) ||
419 io_u->ddir != DDIR_TRIM)) {
12d9d841 420 if (fio_fill_issue_time(td))
9520ebb9 421 fio_gettime(&io_u->issue_time, NULL);
d0c15328
JA
422
423 /*
424 * only used for iolog
425 */
426 if (td->o.read_iolog_file)
427 memcpy(&td->last_issue, &io_u->issue_time,
8b6a404c 428 sizeof(io_u->issue_time));
433afcb4
JA
429 }
430
7e77dd02 431 return ret;
10ba535a 432}
8c16d840
JA
433
434int td_io_init(struct thread_data *td)
435{
eeb12160 436 int ret = 0;
8c16d840 437
eeb12160
JA
438 if (td->io_ops->init) {
439 ret = td->io_ops->init(td);
356ef1a1
TK
440 if (ret)
441 log_err("fio: io engine %s init failed.%s\n",
442 td->io_ops->name,
443 td->o.iodepth > 1 ?
444 " Perhaps try reducing io depth?" : "");
445 else
446 td->io_ops_init = 1;
7c973896
JA
447 if (!td->error)
448 td->error = ret;
eeb12160
JA
449 }
450
451 return ret;
8c16d840 452}
755200a3 453
a80cb54b 454void td_io_commit(struct thread_data *td)
755200a3 455{
f3e11d05
JA
456 int ret;
457
ee56ad50
JA
458 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
459
d8005759 460 if (!td->cur_depth || !td->io_u_queued)
a80cb54b 461 return;
cb5ab512 462
3fd9efbc 463 io_u_mark_depth(td, td->io_u_queued);
d8005759 464
f3e11d05
JA
465 if (td->io_ops->commit) {
466 ret = td->io_ops->commit(td);
467 if (ret)
468 td_verror(td, -ret, "io commit");
469 }
3fd9efbc 470
422f9e4b
RM
471 /*
472 * Reflect that events were submitted as async IO requests.
473 */
474 td->io_u_in_flight += td->io_u_queued;
475 td->io_u_queued = 0;
755200a3 476}
b5af8293
JA
477
478int td_io_open_file(struct thread_data *td, struct fio_file *f)
479{
230f33fb
AK
480 if (fio_file_closing(f)) {
481 /*
482 * Open translates to undo closing.
483 */
484 fio_file_clear_closing(f);
485 get_file(f);
486 return 0;
487 }
22a57ba8
JA
488 assert(!fio_file_open(f));
489 assert(f->fd == -1);
966fcbd4 490 assert(td->io_ops->open_file);
22a57ba8 491
413d6693
JA
492 if (td->io_ops->open_file(td, f)) {
493 if (td->error == EINVAL && td->o.odirect)
494 log_err("fio: destination does not support O_DIRECT\n");
5ec10eaa
JA
495 if (td->error == EMFILE) {
496 log_err("fio: try reducing/setting openfiles (failed"
497 " at %u of %u)\n", td->nr_open_files,
498 td->o.nr_files);
499 }
413d6693 500
22a57ba8
JA
501 assert(f->fd == -1);
502 assert(!fio_file_open(f));
413d6693
JA
503 return 1;
504 }
505
33c48814 506 fio_file_reset(td, f);
d6aed795
JA
507 fio_file_set_open(f);
508 fio_file_clear_closing(f);
c97bd0fa 509 disk_util_inc(f->du);
d5707a35
JA
510
511 td->nr_open_files++;
512 get_file(f);
513
66159828
JA
514 if (f->filetype == FIO_TYPE_PIPE) {
515 if (td_random(td)) {
516 log_err("fio: can't seek on pipes (no random io)\n");
517 goto err;
518 }
519 }
520
9b87f09b 521 if (td_ioengine_flagged(td, FIO_DISKLESSIO))
413d6693
JA
522 goto done;
523
524 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
525 goto err;
526
ecb2083d 527 if (td->o.fadvise_hint != F_ADV_NONE &&
686fbd31 528 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
413d6693
JA
529 int flags;
530
ecb2083d
JA
531 if (td->o.fadvise_hint == F_ADV_TYPE) {
532 if (td_random(td))
533 flags = POSIX_FADV_RANDOM;
534 else
535 flags = POSIX_FADV_SEQUENTIAL;
536 } else if (td->o.fadvise_hint == F_ADV_RANDOM)
413d6693 537 flags = POSIX_FADV_RANDOM;
ecb2083d 538 else if (td->o.fadvise_hint == F_ADV_SEQUENTIAL)
413d6693 539 flags = POSIX_FADV_SEQUENTIAL;
ecb2083d
JA
540 else {
541 log_err("fio: unknown fadvise type %d\n",
542 td->o.fadvise_hint);
543 flags = POSIX_FADV_NORMAL;
544 }
413d6693 545
ecc314ba 546 if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
825b0a75
JA
547 if (!fio_did_warn(FIO_WARN_FADVISE))
548 log_err("fio: fadvise hint failed\n");
413d6693 549 }
7bb48f84 550 }
ae8e559e
JA
551#ifdef FIO_HAVE_WRITE_HINT
552 if (fio_option_is_set(&td->o, write_hint) &&
686fbd31 553 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
ae8e559e 554 uint64_t hint = td->o.write_hint;
bd553af6 555 int cmd;
37659335 556
bd553af6
JA
557 /*
558 * For direct IO, we just need/want to set the hint on
559 * the file descriptor. For buffered IO, we need to set
560 * it on the inode.
561 */
562 if (td->o.odirect)
563 cmd = F_SET_FILE_RW_HINT;
564 else
565 cmd = F_SET_RW_HINT;
566
567 if (fcntl(f->fd, cmd, &hint) < 0) {
ae8e559e 568 td_verror(td, errno, "fcntl write hint");
37659335
JA
569 goto err;
570 }
571 }
572#endif
a978ba68 573
47534cda
TK
574 if (td->o.odirect && !OS_O_DIRECT && fio_set_directio(td, f))
575 goto err;
e116f2b9 576
413d6693 577done:
f29b25a3 578 log_file(td, f, FIO_LOG_OPEN_FILE);
413d6693
JA
579 return 0;
580err:
c97bd0fa 581 disk_util_dec(f->du);
b284075a
JA
582 if (td->io_ops->close_file)
583 td->io_ops->close_file(td, f);
7bb48f84 584 return 1;
b5af8293
JA
585}
586
6977bcd0 587int td_io_close_file(struct thread_data *td, struct fio_file *f)
b5af8293 588{
d6aed795 589 if (!fio_file_closing(f))
f29b25a3
JA
590 log_file(td, f, FIO_LOG_CLOSE_FILE);
591
0ad920e7
JA
592 /*
593 * mark as closing, do real close when last io on it has completed
594 */
d6aed795 595 fio_file_set_closing(f);
0ad920e7 596
6977bcd0 597 return put_file(td, f);
b5af8293 598}
df9c26b1 599
38ef9c90
CF
600int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
601{
602 if (td->io_ops->unlink_file)
603 return td->io_ops->unlink_file(td, f);
2442c935
JA
604 else {
605 int ret;
606
607 ret = unlink(f->file_name);
608 if (ret < 0)
609 return errno;
610
611 return 0;
612 }
38ef9c90
CF
613}
614
df9c26b1
JA
615int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
616{
617 if (!td->io_ops->get_file_size)
618 return 0;
619
620 return td->io_ops->get_file_size(td, f);
621}
44f29692 622
de890a1e
SL
623int fio_show_ioengine_help(const char *engine)
624{
625 struct flist_head *entry;
d04f1d5b 626 struct thread_data td;
755dcbbd 627 struct ioengine_ops *io_ops;
de890a1e
SL
628 char *sep;
629 int ret = 1;
630
631 if (!engine || !*engine) {
632 log_info("Available IO engines:\n");
633 flist_for_each(entry, &engine_list) {
755dcbbd
TK
634 io_ops = flist_entry(entry, struct ioengine_ops, list);
635 log_info("\t%s\n", io_ops->name);
de890a1e
SL
636 }
637 return 0;
638 }
639 sep = strchr(engine, ',');
640 if (sep) {
641 *sep = 0;
642 sep++;
643 }
644
d04f1d5b
JM
645 memset(&td, 0, sizeof(struct thread_data));
646 td.o.ioengine = (char *)engine;
647 io_ops = load_ioengine(&td);
648
755dcbbd 649 if (!io_ops) {
de890a1e
SL
650 log_info("IO engine %s not found\n", engine);
651 return 1;
652 }
653
755dcbbd
TK
654 if (io_ops->options)
655 ret = show_cmd_help(io_ops->options, sep);
de890a1e 656 else
755dcbbd 657 log_info("IO engine %s has no options\n", io_ops->name);
de890a1e 658
d04f1d5b 659 free_ioengine(&td);
de890a1e
SL
660 return ret;
661}