Sometimes we allocated too little memory for buffers
[fio.git] / options.c
CommitLineData
214e1eca
JA
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <ctype.h>
5#include <string.h>
6#include <getopt.h>
7#include <assert.h>
8
9#include "fio.h"
10#include "parse.h"
11
2dc1bbeb 12#define td_var_offset(var) ((size_t) &((struct thread_options *)0)->var)
214e1eca
JA
13
14/*
15 * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
16 */
17static char *get_opt_postfix(const char *str)
18{
19 char *p = strstr(str, ":");
20
21 if (!p)
22 return NULL;
23
24 p++;
25 strip_blank_front(&p);
26 strip_blank_end(p);
27 return strdup(p);
28}
29
30static int str_mem_cb(void *data, const char *mem)
31{
32 struct thread_data *td = data;
33
2dc1bbeb 34 if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP) {
214e1eca 35 td->mmapfile = get_opt_postfix(mem);
2dc1bbeb 36 if (td->o.mem_type == MEM_MMAPHUGE && !td->mmapfile) {
214e1eca
JA
37 log_err("fio: mmaphuge:/path/to/file\n");
38 return 1;
39 }
40 }
41
42 return 0;
43}
44
45static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
46{
47 mlock_size = *val;
48 return 0;
49}
50
51#ifdef FIO_HAVE_IOPRIO
52static int str_prioclass_cb(void *data, unsigned int *val)
53{
54 struct thread_data *td = data;
55
56 td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
57 return 0;
58}
59
60static int str_prio_cb(void *data, unsigned int *val)
61{
62 struct thread_data *td = data;
63
64 td->ioprio |= *val;
65 return 0;
66}
67#endif
68
69static int str_exitall_cb(void)
70{
71 exitall_on_terminate = 1;
72 return 0;
73}
74
75static void fill_cpu_mask(os_cpu_mask_t cpumask, int cpu)
76{
77#ifdef FIO_HAVE_CPU_AFFINITY
78 unsigned int i;
79
80 CPU_ZERO(&cpumask);
81
82 for (i = 0; i < sizeof(int) * 8; i++) {
83 if ((1 << i) & cpu)
84 CPU_SET(i, &cpumask);
85 }
86#endif
87}
88
89static int str_cpumask_cb(void *data, unsigned int *val)
90{
91 struct thread_data *td = data;
92
2dc1bbeb 93 fill_cpu_mask(td->o.cpumask, *val);
214e1eca
JA
94 return 0;
95}
96
97static int str_fst_cb(void *data, const char *str)
98{
99 struct thread_data *td = data;
100 char *nr = get_opt_postfix(str);
101
102 td->file_service_nr = 1;
103 if (nr)
104 td->file_service_nr = atoi(nr);
105
106 return 0;
107}
108
109static int str_filename_cb(void *data, const char *input)
110{
111 struct thread_data *td = data;
112 char *fname, *str, *p;
113
114 p = str = strdup(input);
115
116 strip_blank_front(&str);
117 strip_blank_end(str);
118
119 if (!td->files_index)
2dc1bbeb 120 td->o.nr_files = 0;
214e1eca
JA
121
122 while ((fname = strsep(&str, ":")) != NULL) {
123 if (!strlen(fname))
124 break;
125 add_file(td, fname);
2dc1bbeb 126 td->o.nr_files++;
214e1eca
JA
127 }
128
129 free(p);
130 return 0;
131}
132
133static int str_directory_cb(void *data, const char fio_unused *str)
134{
135 struct thread_data *td = data;
136 struct stat sb;
137
2dc1bbeb
JA
138 if (lstat(td->o.directory, &sb) < 0) {
139 log_err("fio: %s is not a directory\n", td->o.directory);
214e1eca
JA
140 td_verror(td, errno, "lstat");
141 return 1;
142 }
143 if (!S_ISDIR(sb.st_mode)) {
2dc1bbeb 144 log_err("fio: %s is not a directory\n", td->o.directory);
214e1eca
JA
145 return 1;
146 }
147
148 return 0;
149}
150
151static int str_opendir_cb(void *data, const char fio_unused *str)
152{
153 struct thread_data *td = data;
154
155 if (!td->files_index)
2dc1bbeb 156 td->o.nr_files = 0;
214e1eca 157
2dc1bbeb 158 return add_dir_files(td, td->o.opendir);
214e1eca
JA
159}
160
161
162#define __stringify_1(x) #x
163#define __stringify(x) __stringify_1(x)
164
165/*
166 * Map of job/command line options
167 */
168static struct fio_option options[] = {
169 {
170 .name = "description",
171 .type = FIO_OPT_STR_STORE,
172 .off1 = td_var_offset(description),
173 .help = "Text job description",
174 },
175 {
176 .name = "name",
177 .type = FIO_OPT_STR_STORE,
178 .off1 = td_var_offset(name),
179 .help = "Name of this job",
180 },
181 {
182 .name = "directory",
183 .type = FIO_OPT_STR_STORE,
184 .off1 = td_var_offset(directory),
185 .cb = str_directory_cb,
186 .help = "Directory to store files in",
187 },
188 {
189 .name = "filename",
190 .type = FIO_OPT_STR_STORE,
191 .off1 = td_var_offset(filename),
192 .cb = str_filename_cb,
193 .help = "File(s) to use for the workload",
194 },
195 {
196 .name = "opendir",
197 .type = FIO_OPT_STR_STORE,
198 .off1 = td_var_offset(opendir),
199 .cb = str_opendir_cb,
200 .help = "Recursively add files from this directory and down",
201 },
202 {
203 .name = "rw",
d3aad8f2 204 .alias = "readwrite",
214e1eca
JA
205 .type = FIO_OPT_STR,
206 .off1 = td_var_offset(td_ddir),
207 .help = "IO direction",
208 .def = "read",
209 .posval = {
210 { .ival = "read",
211 .oval = TD_DDIR_READ,
212 .help = "Sequential read",
213 },
214 { .ival = "write",
215 .oval = TD_DDIR_WRITE,
216 .help = "Sequential write",
217 },
218 { .ival = "randread",
219 .oval = TD_DDIR_RANDREAD,
220 .help = "Random read",
221 },
222 { .ival = "randwrite",
223 .oval = TD_DDIR_RANDWRITE,
224 .help = "Random write",
225 },
226 { .ival = "rw",
227 .oval = TD_DDIR_RW,
228 .help = "Sequential read and write mix",
229 },
230 { .ival = "randrw",
231 .oval = TD_DDIR_RANDRW,
232 .help = "Random read and write mix"
233 },
234 },
235 },
236 {
237 .name = "ioengine",
238 .type = FIO_OPT_STR_STORE,
239 .off1 = td_var_offset(ioengine),
240 .help = "IO engine to use",
241 .def = "sync",
242 .posval = {
243 { .ival = "sync",
244 .help = "Use read/write",
245 },
246#ifdef FIO_HAVE_LIBAIO
247 { .ival = "libaio",
248 .help = "Linux native asynchronous IO",
249 },
250#endif
251#ifdef FIO_HAVE_POSIXAIO
252 { .ival = "posixaio",
253 .help = "POSIX asynchronous IO",
254 },
255#endif
256 { .ival = "mmap",
257 .help = "Memory mapped IO",
258 },
259#ifdef FIO_HAVE_SPLICE
260 { .ival = "splice",
261 .help = "splice/vmsplice based IO",
262 },
263#endif
264#ifdef FIO_HAVE_SGIO
265 { .ival = "sg",
266 .help = "SCSI generic v3 IO",
267 },
268#endif
269 { .ival = "null",
270 .help = "Testing engine (no data transfer)",
271 },
272 { .ival = "net",
273 .help = "Network IO",
274 },
275#ifdef FIO_HAVE_SYSLET
276 { .ival = "syslet-rw",
277 .help = "syslet enabled async pread/pwrite IO",
278 },
279#endif
280 { .ival = "cpuio",
281 .help = "CPU cycler burner engine",
282 },
283 { .ival = "external",
284 .help = "Load external engine (append name)",
285 },
286 },
287 },
288 {
289 .name = "iodepth",
290 .type = FIO_OPT_INT,
291 .off1 = td_var_offset(iodepth),
292 .help = "Amount of IO buffers to keep in flight",
293 .def = "1",
294 },
295 {
296 .name = "iodepth_batch",
297 .type = FIO_OPT_INT,
298 .off1 = td_var_offset(iodepth_batch),
299 .help = "Number of IO to submit in one go",
300 },
301 {
302 .name = "iodepth_low",
303 .type = FIO_OPT_INT,
304 .off1 = td_var_offset(iodepth_low),
305 .help = "Low water mark for queuing depth",
306 },
307 {
308 .name = "size",
309 .type = FIO_OPT_STR_VAL,
2dc1bbeb 310 .off1 = td_var_offset(size),
214e1eca
JA
311 .help = "Total size of device or files",
312 },
313 {
314 .name = "filesize",
315 .type = FIO_OPT_STR_VAL,
316 .off1 = td_var_offset(file_size_low),
317 .off2 = td_var_offset(file_size_high),
318 .help = "Size of individual files",
319 },
320 {
321 .name = "bs",
d3aad8f2 322 .alias = "blocksize",
214e1eca
JA
323 .type = FIO_OPT_STR_VAL_INT,
324 .off1 = td_var_offset(bs[DDIR_READ]),
325 .off2 = td_var_offset(bs[DDIR_WRITE]),
326 .help = "Block size unit",
327 .def = "4k",
328 },
329 {
330 .name = "bsrange",
d3aad8f2 331 .alias = "blocksize_range",
214e1eca
JA
332 .type = FIO_OPT_RANGE,
333 .off1 = td_var_offset(min_bs[DDIR_READ]),
334 .off2 = td_var_offset(max_bs[DDIR_READ]),
335 .off3 = td_var_offset(min_bs[DDIR_WRITE]),
336 .off4 = td_var_offset(max_bs[DDIR_WRITE]),
337 .help = "Set block size range (in more detail than bs)",
338 },
339 {
340 .name = "bs_unaligned",
d3aad8f2 341 .alias = "blocksize_unaligned",
214e1eca
JA
342 .type = FIO_OPT_STR_SET,
343 .off1 = td_var_offset(bs_unaligned),
344 .help = "Don't sector align IO buffer sizes",
345 },
346 {
347 .name = "offset",
348 .type = FIO_OPT_STR_VAL,
349 .off1 = td_var_offset(start_offset),
350 .help = "Start IO from this offset",
351 .def = "0",
352 },
353 {
354 .name = "randrepeat",
355 .type = FIO_OPT_BOOL,
356 .off1 = td_var_offset(rand_repeatable),
357 .help = "Use repeatable random IO pattern",
358 .def = "1",
359 },
360 {
361 .name = "norandommap",
362 .type = FIO_OPT_STR_SET,
363 .off1 = td_var_offset(norandommap),
364 .help = "Accept potential duplicate random blocks",
365 },
366 {
367 .name = "nrfiles",
368 .type = FIO_OPT_INT,
369 .off1 = td_var_offset(nr_files),
370 .help = "Split job workload between this number of files",
371 .def = "1",
372 },
373 {
374 .name = "openfiles",
375 .type = FIO_OPT_INT,
376 .off1 = td_var_offset(open_files),
377 .help = "Number of files to keep open at the same time",
378 },
379 {
380 .name = "file_service_type",
381 .type = FIO_OPT_STR,
382 .cb = str_fst_cb,
383 .off1 = td_var_offset(file_service_type),
384 .help = "How to select which file to service next",
385 .def = "roundrobin",
386 .posval = {
387 { .ival = "random",
388 .oval = FIO_FSERVICE_RANDOM,
389 .help = "Choose a file at random",
390 },
391 { .ival = "roundrobin",
392 .oval = FIO_FSERVICE_RR,
393 .help = "Round robin select files",
394 },
395 },
396 },
397 {
398 .name = "fsync",
399 .type = FIO_OPT_INT,
400 .off1 = td_var_offset(fsync_blocks),
401 .help = "Issue fsync for writes every given number of blocks",
402 .def = "0",
403 },
404 {
405 .name = "direct",
406 .type = FIO_OPT_BOOL,
407 .off1 = td_var_offset(odirect),
408 .help = "Use O_DIRECT IO (negates buffered)",
409 .def = "0",
410 },
411 {
412 .name = "buffered",
413 .type = FIO_OPT_BOOL,
414 .off1 = td_var_offset(odirect),
415 .neg = 1,
416 .help = "Use buffered IO (negates direct)",
417 .def = "1",
418 },
419 {
420 .name = "overwrite",
421 .type = FIO_OPT_BOOL,
422 .off1 = td_var_offset(overwrite),
423 .help = "When writing, set whether to overwrite current data",
424 .def = "0",
425 },
426 {
427 .name = "loops",
428 .type = FIO_OPT_INT,
429 .off1 = td_var_offset(loops),
430 .help = "Number of times to run the job",
431 .def = "1",
432 },
433 {
434 .name = "numjobs",
435 .type = FIO_OPT_INT,
436 .off1 = td_var_offset(numjobs),
437 .help = "Duplicate this job this many times",
438 .def = "1",
439 },
440 {
441 .name = "startdelay",
442 .type = FIO_OPT_INT,
443 .off1 = td_var_offset(start_delay),
444 .help = "Only start job when this period has passed",
445 .def = "0",
446 },
447 {
448 .name = "runtime",
449 .alias = "timeout",
450 .type = FIO_OPT_STR_VAL_TIME,
451 .off1 = td_var_offset(timeout),
452 .help = "Stop workload when this amount of time has passed",
453 .def = "0",
454 },
455 {
456 .name = "mem",
d3aad8f2 457 .alias = "iomem",
214e1eca
JA
458 .type = FIO_OPT_STR,
459 .cb = str_mem_cb,
460 .off1 = td_var_offset(mem_type),
461 .help = "Backing type for IO buffers",
462 .def = "malloc",
463 .posval = {
464 { .ival = "malloc",
465 .oval = MEM_MALLOC,
466 .help = "Use malloc(3) for IO buffers",
467 },
37c8cdfe
JA
468 { .ival = "shm",
469 .oval = MEM_SHM,
470 .help = "Use shared memory segments for IO buffers",
471 },
214e1eca
JA
472#ifdef FIO_HAVE_HUGETLB
473 { .ival = "shmhuge",
474 .oval = MEM_SHMHUGE,
475 .help = "Like shm, but use huge pages",
476 },
b370e46a 477#endif
37c8cdfe
JA
478 { .ival = "mmap",
479 .oval = MEM_MMAP,
480 .help = "Use mmap(2) (file or anon) for IO buffers",
481 },
214e1eca
JA
482#ifdef FIO_HAVE_HUGETLB
483 { .ival = "mmaphuge",
484 .oval = MEM_MMAPHUGE,
485 .help = "Like mmap, but use huge pages",
486 },
487#endif
488 },
489 },
490 {
491 .name = "verify",
492 .type = FIO_OPT_STR,
493 .off1 = td_var_offset(verify),
494 .help = "Verify data written",
495 .def = "0",
496 .posval = {
497 { .ival = "0",
498 .oval = VERIFY_NONE,
499 .help = "Don't do IO verification",
500 },
501 { .ival = "crc32",
502 .oval = VERIFY_CRC32,
503 .help = "Use crc32 checksums for verification",
504 },
505 { .ival = "md5",
506 .oval = VERIFY_MD5,
507 .help = "Use md5 checksums for verification",
508 },
509 },
510 },
511 {
512 .name = "write_iolog",
513 .type = FIO_OPT_STR_STORE,
514 .off1 = td_var_offset(write_iolog_file),
515 .help = "Store IO pattern to file",
516 },
517 {
518 .name = "read_iolog",
519 .type = FIO_OPT_STR_STORE,
520 .off1 = td_var_offset(read_iolog_file),
521 .help = "Playback IO pattern from file",
522 },
523 {
524 .name = "exec_prerun",
525 .type = FIO_OPT_STR_STORE,
526 .off1 = td_var_offset(exec_prerun),
527 .help = "Execute this file prior to running job",
528 },
529 {
530 .name = "exec_postrun",
531 .type = FIO_OPT_STR_STORE,
532 .off1 = td_var_offset(exec_postrun),
533 .help = "Execute this file after running job",
534 },
535#ifdef FIO_HAVE_IOSCHED_SWITCH
536 {
537 .name = "ioscheduler",
538 .type = FIO_OPT_STR_STORE,
539 .off1 = td_var_offset(ioscheduler),
540 .help = "Use this IO scheduler on the backing device",
541 },
542#endif
543 {
544 .name = "zonesize",
545 .type = FIO_OPT_STR_VAL,
546 .off1 = td_var_offset(zone_size),
547 .help = "Give size of an IO zone",
548 .def = "0",
549 },
550 {
551 .name = "zoneskip",
552 .type = FIO_OPT_STR_VAL,
553 .off1 = td_var_offset(zone_skip),
554 .help = "Space between IO zones",
555 .def = "0",
556 },
557 {
558 .name = "lockmem",
559 .type = FIO_OPT_STR_VAL,
560 .cb = str_lockmem_cb,
561 .help = "Lock down this amount of memory",
562 .def = "0",
563 },
564 {
565 .name = "rwmixcycle",
566 .type = FIO_OPT_INT,
567 .off1 = td_var_offset(rwmixcycle),
568 .help = "Cycle period for mixed read/write workloads (msec)",
569 .def = "500",
570 },
571 {
572 .name = "rwmixread",
573 .type = FIO_OPT_INT,
574 .off1 = td_var_offset(rwmixread),
575 .maxval = 100,
576 .help = "Percentage of mixed workload that is reads",
577 .def = "50",
578 },
579 {
580 .name = "rwmixwrite",
581 .type = FIO_OPT_INT,
582 .off1 = td_var_offset(rwmixwrite),
583 .maxval = 100,
584 .help = "Percentage of mixed workload that is writes",
585 .def = "50",
586 },
587 {
588 .name = "nice",
589 .type = FIO_OPT_INT,
590 .off1 = td_var_offset(nice),
591 .help = "Set job CPU nice value",
592 .minval = -19,
593 .maxval = 20,
594 .def = "0",
595 },
596#ifdef FIO_HAVE_IOPRIO
597 {
598 .name = "prio",
599 .type = FIO_OPT_INT,
600 .cb = str_prio_cb,
601 .help = "Set job IO priority value",
602 .minval = 0,
603 .maxval = 7,
604 },
605 {
606 .name = "prioclass",
607 .type = FIO_OPT_INT,
608 .cb = str_prioclass_cb,
609 .help = "Set job IO priority class",
610 .minval = 0,
611 .maxval = 3,
612 },
613#endif
614 {
615 .name = "thinktime",
616 .type = FIO_OPT_INT,
617 .off1 = td_var_offset(thinktime),
618 .help = "Idle time between IO buffers (usec)",
619 .def = "0",
620 },
621 {
622 .name = "thinktime_spin",
623 .type = FIO_OPT_INT,
624 .off1 = td_var_offset(thinktime_spin),
625 .help = "Start think time by spinning this amount (usec)",
626 .def = "0",
627 },
628 {
629 .name = "thinktime_blocks",
630 .type = FIO_OPT_INT,
631 .off1 = td_var_offset(thinktime_blocks),
632 .help = "IO buffer period between 'thinktime'",
633 .def = "1",
634 },
635 {
636 .name = "rate",
637 .type = FIO_OPT_INT,
638 .off1 = td_var_offset(rate),
639 .help = "Set bandwidth rate",
640 },
641 {
642 .name = "ratemin",
643 .type = FIO_OPT_INT,
644 .off1 = td_var_offset(ratemin),
4e991c23
JA
645 .help = "Job must meet this rate or it will be shutdown",
646 },
647 {
648 .name = "rate_iops",
649 .type = FIO_OPT_INT,
650 .off1 = td_var_offset(rate_iops),
651 .help = "Limit IO used to this number of IO operations/sec",
652 },
653 {
654 .name = "rate_iops_min",
655 .type = FIO_OPT_INT,
656 .off1 = td_var_offset(rate_iops_min),
657 .help = "Job must meet this rate or it will be shutdown",
214e1eca
JA
658 },
659 {
660 .name = "ratecycle",
661 .type = FIO_OPT_INT,
662 .off1 = td_var_offset(ratecycle),
663 .help = "Window average for rate limits (msec)",
664 .def = "1000",
665 },
666 {
667 .name = "invalidate",
668 .type = FIO_OPT_BOOL,
669 .off1 = td_var_offset(invalidate_cache),
670 .help = "Invalidate buffer/page cache prior to running job",
671 .def = "1",
672 },
673 {
674 .name = "sync",
675 .type = FIO_OPT_BOOL,
676 .off1 = td_var_offset(sync_io),
677 .help = "Use O_SYNC for buffered writes",
678 .def = "0",
679 },
680 {
681 .name = "bwavgtime",
682 .type = FIO_OPT_INT,
683 .off1 = td_var_offset(bw_avg_time),
684 .help = "Time window over which to calculate bandwidth (msec)",
685 .def = "500",
686 },
687 {
688 .name = "create_serialize",
689 .type = FIO_OPT_BOOL,
690 .off1 = td_var_offset(create_serialize),
691 .help = "Serialize creating of job files",
692 .def = "1",
693 },
694 {
695 .name = "create_fsync",
696 .type = FIO_OPT_BOOL,
697 .off1 = td_var_offset(create_fsync),
698 .help = "Fsync file after creation",
699 .def = "1",
700 },
701 {
702 .name = "cpuload",
703 .type = FIO_OPT_INT,
704 .off1 = td_var_offset(cpuload),
705 .help = "Use this percentage of CPU",
706 },
707 {
708 .name = "cpuchunks",
709 .type = FIO_OPT_INT,
710 .off1 = td_var_offset(cpucycle),
711 .help = "Length of the CPU burn cycles (usecs)",
712 .def = "50000",
713 },
714#ifdef FIO_HAVE_CPU_AFFINITY
715 {
716 .name = "cpumask",
717 .type = FIO_OPT_INT,
718 .cb = str_cpumask_cb,
719 .help = "CPU affinity mask",
720 },
721#endif
722 {
723 .name = "end_fsync",
724 .type = FIO_OPT_BOOL,
725 .off1 = td_var_offset(end_fsync),
726 .help = "Include fsync at the end of job",
727 .def = "0",
728 },
729 {
730 .name = "fsync_on_close",
731 .type = FIO_OPT_BOOL,
732 .off1 = td_var_offset(fsync_on_close),
733 .help = "fsync files on close",
734 .def = "0",
735 },
736 {
737 .name = "unlink",
738 .type = FIO_OPT_BOOL,
739 .off1 = td_var_offset(unlink),
740 .help = "Unlink created files after job has completed",
741 .def = "0",
742 },
743 {
744 .name = "exitall",
745 .type = FIO_OPT_STR_SET,
746 .cb = str_exitall_cb,
747 .help = "Terminate all jobs when one exits",
748 },
749 {
750 .name = "stonewall",
751 .type = FIO_OPT_STR_SET,
752 .off1 = td_var_offset(stonewall),
753 .help = "Insert a hard barrier between this job and previous",
754 },
755 {
756 .name = "thread",
757 .type = FIO_OPT_STR_SET,
758 .off1 = td_var_offset(use_thread),
759 .help = "Use threads instead of forks",
760 },
761 {
762 .name = "write_bw_log",
763 .type = FIO_OPT_STR_SET,
764 .off1 = td_var_offset(write_bw_log),
765 .help = "Write log of bandwidth during run",
766 },
767 {
768 .name = "write_lat_log",
769 .type = FIO_OPT_STR_SET,
770 .off1 = td_var_offset(write_lat_log),
771 .help = "Write log of latency during run",
772 },
773 {
774 .name = "hugepage-size",
775 .type = FIO_OPT_STR_VAL,
776 .off1 = td_var_offset(hugepage_size),
777 .help = "When using hugepages, specify size of each page",
778 .def = __stringify(FIO_HUGE_PAGE),
779 },
780 {
781 .name = "group_reporting",
782 .type = FIO_OPT_STR_SET,
783 .off1 = td_var_offset(group_reporting),
784 .help = "Do reporting on a per-group basis",
785 },
786 {
787 .name = NULL,
788 },
789};
790
791void fio_options_dup_and_init(struct option *long_options)
792{
793 struct fio_option *o;
794 unsigned int i;
795
796 options_init(options);
797
798 i = 0;
799 while (long_options[i].name)
800 i++;
801
802 o = &options[0];
803 while (o->name) {
804 long_options[i].name = o->name;
805 long_options[i].val = FIO_GETOPT_JOB;
806 if (o->type == FIO_OPT_STR_SET)
807 long_options[i].has_arg = no_argument;
808 else
809 long_options[i].has_arg = required_argument;
810
811 i++;
812 o++;
813 assert(i < FIO_NR_OPTIONS);
814 }
815}
816
817int fio_option_parse(struct thread_data *td, const char *opt)
818{
819 return parse_option(opt, options, td);
820}
821
822int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)
823{
824 return parse_cmd_option(opt, val, options, td);
825}
826
827void fio_fill_default_options(struct thread_data *td)
828{
829 fill_default_options(td, options);
830}
831
832int fio_show_option_help(const char *opt)
833{
834 return show_cmd_help(options, opt);
835}