Have the job set ->terminate when runtime is exceeded
[fio.git] / options.c
CommitLineData
214e1eca
JA
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <ctype.h>
5#include <string.h>
6#include <getopt.h>
7#include <assert.h>
8
9#include "fio.h"
10#include "parse.h"
11
2dc1bbeb 12#define td_var_offset(var) ((size_t) &((struct thread_options *)0)->var)
214e1eca
JA
13
14/*
15 * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
16 */
17static char *get_opt_postfix(const char *str)
18{
19 char *p = strstr(str, ":");
20
21 if (!p)
22 return NULL;
23
24 p++;
25 strip_blank_front(&p);
26 strip_blank_end(p);
27 return strdup(p);
28}
29
211097b2
JA
30static int str_rw_cb(void *data, const char *str)
31{
32 struct thread_data *td = data;
33 char *nr = get_opt_postfix(str);
34
fafdba3c 35 td->o.ddir_nr = 1;
211097b2
JA
36 if (nr)
37 td->o.ddir_nr = atoi(nr);
38
211097b2
JA
39 return 0;
40}
41
214e1eca
JA
42static int str_mem_cb(void *data, const char *mem)
43{
44 struct thread_data *td = data;
45
2dc1bbeb 46 if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP) {
214e1eca 47 td->mmapfile = get_opt_postfix(mem);
2dc1bbeb 48 if (td->o.mem_type == MEM_MMAPHUGE && !td->mmapfile) {
214e1eca
JA
49 log_err("fio: mmaphuge:/path/to/file\n");
50 return 1;
51 }
52 }
53
54 return 0;
55}
56
57static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
58{
59 mlock_size = *val;
60 return 0;
61}
62
63#ifdef FIO_HAVE_IOPRIO
64static int str_prioclass_cb(void *data, unsigned int *val)
65{
66 struct thread_data *td = data;
67
68 td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
69 return 0;
70}
71
72static int str_prio_cb(void *data, unsigned int *val)
73{
74 struct thread_data *td = data;
75
76 td->ioprio |= *val;
77 return 0;
78}
79#endif
80
81static int str_exitall_cb(void)
82{
83 exitall_on_terminate = 1;
84 return 0;
85}
86
87static void fill_cpu_mask(os_cpu_mask_t cpumask, int cpu)
88{
89#ifdef FIO_HAVE_CPU_AFFINITY
90 unsigned int i;
91
92 CPU_ZERO(&cpumask);
93
94 for (i = 0; i < sizeof(int) * 8; i++) {
95 if ((1 << i) & cpu)
96 CPU_SET(i, &cpumask);
97 }
98#endif
99}
100
101static int str_cpumask_cb(void *data, unsigned int *val)
102{
103 struct thread_data *td = data;
104
2dc1bbeb 105 fill_cpu_mask(td->o.cpumask, *val);
214e1eca
JA
106 return 0;
107}
108
109static int str_fst_cb(void *data, const char *str)
110{
111 struct thread_data *td = data;
112 char *nr = get_opt_postfix(str);
113
114 td->file_service_nr = 1;
115 if (nr)
116 td->file_service_nr = atoi(nr);
117
118 return 0;
119}
120
121static int str_filename_cb(void *data, const char *input)
122{
123 struct thread_data *td = data;
124 char *fname, *str, *p;
125
126 p = str = strdup(input);
127
128 strip_blank_front(&str);
129 strip_blank_end(str);
130
131 if (!td->files_index)
2dc1bbeb 132 td->o.nr_files = 0;
214e1eca
JA
133
134 while ((fname = strsep(&str, ":")) != NULL) {
135 if (!strlen(fname))
136 break;
137 add_file(td, fname);
2dc1bbeb 138 td->o.nr_files++;
214e1eca
JA
139 }
140
141 free(p);
142 return 0;
143}
144
145static int str_directory_cb(void *data, const char fio_unused *str)
146{
147 struct thread_data *td = data;
148 struct stat sb;
149
2dc1bbeb
JA
150 if (lstat(td->o.directory, &sb) < 0) {
151 log_err("fio: %s is not a directory\n", td->o.directory);
214e1eca
JA
152 td_verror(td, errno, "lstat");
153 return 1;
154 }
155 if (!S_ISDIR(sb.st_mode)) {
2dc1bbeb 156 log_err("fio: %s is not a directory\n", td->o.directory);
214e1eca
JA
157 return 1;
158 }
159
160 return 0;
161}
162
163static int str_opendir_cb(void *data, const char fio_unused *str)
164{
165 struct thread_data *td = data;
166
167 if (!td->files_index)
2dc1bbeb 168 td->o.nr_files = 0;
214e1eca 169
2dc1bbeb 170 return add_dir_files(td, td->o.opendir);
214e1eca
JA
171}
172
173
174#define __stringify_1(x) #x
175#define __stringify(x) __stringify_1(x)
176
177/*
178 * Map of job/command line options
179 */
180static struct fio_option options[] = {
181 {
182 .name = "description",
183 .type = FIO_OPT_STR_STORE,
184 .off1 = td_var_offset(description),
185 .help = "Text job description",
186 },
187 {
188 .name = "name",
189 .type = FIO_OPT_STR_STORE,
190 .off1 = td_var_offset(name),
191 .help = "Name of this job",
192 },
193 {
194 .name = "directory",
195 .type = FIO_OPT_STR_STORE,
196 .off1 = td_var_offset(directory),
197 .cb = str_directory_cb,
198 .help = "Directory to store files in",
199 },
200 {
201 .name = "filename",
202 .type = FIO_OPT_STR_STORE,
203 .off1 = td_var_offset(filename),
204 .cb = str_filename_cb,
205 .help = "File(s) to use for the workload",
206 },
207 {
208 .name = "opendir",
209 .type = FIO_OPT_STR_STORE,
210 .off1 = td_var_offset(opendir),
211 .cb = str_opendir_cb,
212 .help = "Recursively add files from this directory and down",
213 },
214 {
215 .name = "rw",
d3aad8f2 216 .alias = "readwrite",
214e1eca 217 .type = FIO_OPT_STR,
211097b2 218 .cb = str_rw_cb,
214e1eca
JA
219 .off1 = td_var_offset(td_ddir),
220 .help = "IO direction",
221 .def = "read",
222 .posval = {
223 { .ival = "read",
224 .oval = TD_DDIR_READ,
225 .help = "Sequential read",
226 },
227 { .ival = "write",
228 .oval = TD_DDIR_WRITE,
229 .help = "Sequential write",
230 },
231 { .ival = "randread",
232 .oval = TD_DDIR_RANDREAD,
233 .help = "Random read",
234 },
235 { .ival = "randwrite",
236 .oval = TD_DDIR_RANDWRITE,
237 .help = "Random write",
238 },
239 { .ival = "rw",
240 .oval = TD_DDIR_RW,
241 .help = "Sequential read and write mix",
242 },
243 { .ival = "randrw",
244 .oval = TD_DDIR_RANDRW,
245 .help = "Random read and write mix"
246 },
247 },
248 },
d2f3ac35
JA
249 {
250 .name = "fadvise_hint",
251 .type = FIO_OPT_BOOL,
252 .off1 = td_var_offset(fadvise_hint),
253 .help = "Use fadvise() to advise the kernel on IO pattern",
254 .def = "1",
255 },
214e1eca
JA
256 {
257 .name = "ioengine",
258 .type = FIO_OPT_STR_STORE,
259 .off1 = td_var_offset(ioengine),
260 .help = "IO engine to use",
261 .def = "sync",
262 .posval = {
263 { .ival = "sync",
264 .help = "Use read/write",
265 },
266#ifdef FIO_HAVE_LIBAIO
267 { .ival = "libaio",
268 .help = "Linux native asynchronous IO",
269 },
270#endif
271#ifdef FIO_HAVE_POSIXAIO
272 { .ival = "posixaio",
273 .help = "POSIX asynchronous IO",
274 },
275#endif
276 { .ival = "mmap",
277 .help = "Memory mapped IO",
278 },
279#ifdef FIO_HAVE_SPLICE
280 { .ival = "splice",
281 .help = "splice/vmsplice based IO",
282 },
283#endif
284#ifdef FIO_HAVE_SGIO
285 { .ival = "sg",
286 .help = "SCSI generic v3 IO",
287 },
288#endif
289 { .ival = "null",
290 .help = "Testing engine (no data transfer)",
291 },
292 { .ival = "net",
293 .help = "Network IO",
294 },
295#ifdef FIO_HAVE_SYSLET
296 { .ival = "syslet-rw",
297 .help = "syslet enabled async pread/pwrite IO",
298 },
299#endif
300 { .ival = "cpuio",
301 .help = "CPU cycler burner engine",
302 },
b8c82a46
JA
303#ifdef FIO_HAVE_GUASI
304 { .ival = "guasi",
305 .help = "GUASI IO engine",
306 },
307#endif
214e1eca
JA
308 { .ival = "external",
309 .help = "Load external engine (append name)",
310 },
311 },
312 },
313 {
314 .name = "iodepth",
315 .type = FIO_OPT_INT,
316 .off1 = td_var_offset(iodepth),
317 .help = "Amount of IO buffers to keep in flight",
318 .def = "1",
319 },
320 {
321 .name = "iodepth_batch",
322 .type = FIO_OPT_INT,
323 .off1 = td_var_offset(iodepth_batch),
324 .help = "Number of IO to submit in one go",
325 },
326 {
327 .name = "iodepth_low",
328 .type = FIO_OPT_INT,
329 .off1 = td_var_offset(iodepth_low),
330 .help = "Low water mark for queuing depth",
331 },
332 {
333 .name = "size",
334 .type = FIO_OPT_STR_VAL,
2dc1bbeb 335 .off1 = td_var_offset(size),
214e1eca
JA
336 .help = "Total size of device or files",
337 },
338 {
339 .name = "filesize",
340 .type = FIO_OPT_STR_VAL,
341 .off1 = td_var_offset(file_size_low),
342 .off2 = td_var_offset(file_size_high),
343 .help = "Size of individual files",
344 },
345 {
346 .name = "bs",
d3aad8f2 347 .alias = "blocksize",
214e1eca
JA
348 .type = FIO_OPT_STR_VAL_INT,
349 .off1 = td_var_offset(bs[DDIR_READ]),
350 .off2 = td_var_offset(bs[DDIR_WRITE]),
351 .help = "Block size unit",
352 .def = "4k",
353 },
354 {
355 .name = "bsrange",
d3aad8f2 356 .alias = "blocksize_range",
214e1eca
JA
357 .type = FIO_OPT_RANGE,
358 .off1 = td_var_offset(min_bs[DDIR_READ]),
359 .off2 = td_var_offset(max_bs[DDIR_READ]),
360 .off3 = td_var_offset(min_bs[DDIR_WRITE]),
361 .off4 = td_var_offset(max_bs[DDIR_WRITE]),
362 .help = "Set block size range (in more detail than bs)",
363 },
364 {
365 .name = "bs_unaligned",
d3aad8f2 366 .alias = "blocksize_unaligned",
214e1eca
JA
367 .type = FIO_OPT_STR_SET,
368 .off1 = td_var_offset(bs_unaligned),
369 .help = "Don't sector align IO buffer sizes",
370 },
371 {
372 .name = "offset",
373 .type = FIO_OPT_STR_VAL,
374 .off1 = td_var_offset(start_offset),
375 .help = "Start IO from this offset",
376 .def = "0",
377 },
378 {
379 .name = "randrepeat",
380 .type = FIO_OPT_BOOL,
381 .off1 = td_var_offset(rand_repeatable),
382 .help = "Use repeatable random IO pattern",
383 .def = "1",
384 },
385 {
386 .name = "norandommap",
387 .type = FIO_OPT_STR_SET,
388 .off1 = td_var_offset(norandommap),
389 .help = "Accept potential duplicate random blocks",
390 },
391 {
392 .name = "nrfiles",
393 .type = FIO_OPT_INT,
394 .off1 = td_var_offset(nr_files),
395 .help = "Split job workload between this number of files",
396 .def = "1",
397 },
398 {
399 .name = "openfiles",
400 .type = FIO_OPT_INT,
401 .off1 = td_var_offset(open_files),
402 .help = "Number of files to keep open at the same time",
403 },
404 {
405 .name = "file_service_type",
406 .type = FIO_OPT_STR,
407 .cb = str_fst_cb,
408 .off1 = td_var_offset(file_service_type),
409 .help = "How to select which file to service next",
410 .def = "roundrobin",
411 .posval = {
412 { .ival = "random",
413 .oval = FIO_FSERVICE_RANDOM,
414 .help = "Choose a file at random",
415 },
416 { .ival = "roundrobin",
417 .oval = FIO_FSERVICE_RR,
418 .help = "Round robin select files",
419 },
420 },
421 },
422 {
423 .name = "fsync",
424 .type = FIO_OPT_INT,
425 .off1 = td_var_offset(fsync_blocks),
426 .help = "Issue fsync for writes every given number of blocks",
427 .def = "0",
428 },
429 {
430 .name = "direct",
431 .type = FIO_OPT_BOOL,
432 .off1 = td_var_offset(odirect),
433 .help = "Use O_DIRECT IO (negates buffered)",
434 .def = "0",
435 },
436 {
437 .name = "buffered",
438 .type = FIO_OPT_BOOL,
439 .off1 = td_var_offset(odirect),
440 .neg = 1,
441 .help = "Use buffered IO (negates direct)",
442 .def = "1",
443 },
444 {
445 .name = "overwrite",
446 .type = FIO_OPT_BOOL,
447 .off1 = td_var_offset(overwrite),
448 .help = "When writing, set whether to overwrite current data",
449 .def = "0",
450 },
451 {
452 .name = "loops",
453 .type = FIO_OPT_INT,
454 .off1 = td_var_offset(loops),
455 .help = "Number of times to run the job",
456 .def = "1",
457 },
458 {
459 .name = "numjobs",
460 .type = FIO_OPT_INT,
461 .off1 = td_var_offset(numjobs),
462 .help = "Duplicate this job this many times",
463 .def = "1",
464 },
465 {
466 .name = "startdelay",
467 .type = FIO_OPT_INT,
468 .off1 = td_var_offset(start_delay),
469 .help = "Only start job when this period has passed",
470 .def = "0",
471 },
472 {
473 .name = "runtime",
474 .alias = "timeout",
475 .type = FIO_OPT_STR_VAL_TIME,
476 .off1 = td_var_offset(timeout),
477 .help = "Stop workload when this amount of time has passed",
478 .def = "0",
479 },
cf4464ca
JA
480 {
481 .name = "time_based",
482 .type = FIO_OPT_STR_SET,
483 .off1 = td_var_offset(time_based),
484 .help = "Keep running until runtime/timeout is met",
485 },
214e1eca
JA
486 {
487 .name = "mem",
d3aad8f2 488 .alias = "iomem",
214e1eca
JA
489 .type = FIO_OPT_STR,
490 .cb = str_mem_cb,
491 .off1 = td_var_offset(mem_type),
492 .help = "Backing type for IO buffers",
493 .def = "malloc",
494 .posval = {
495 { .ival = "malloc",
496 .oval = MEM_MALLOC,
497 .help = "Use malloc(3) for IO buffers",
498 },
37c8cdfe
JA
499 { .ival = "shm",
500 .oval = MEM_SHM,
501 .help = "Use shared memory segments for IO buffers",
502 },
214e1eca
JA
503#ifdef FIO_HAVE_HUGETLB
504 { .ival = "shmhuge",
505 .oval = MEM_SHMHUGE,
506 .help = "Like shm, but use huge pages",
507 },
b370e46a 508#endif
37c8cdfe
JA
509 { .ival = "mmap",
510 .oval = MEM_MMAP,
511 .help = "Use mmap(2) (file or anon) for IO buffers",
512 },
214e1eca
JA
513#ifdef FIO_HAVE_HUGETLB
514 { .ival = "mmaphuge",
515 .oval = MEM_MMAPHUGE,
516 .help = "Like mmap, but use huge pages",
517 },
518#endif
519 },
520 },
521 {
522 .name = "verify",
523 .type = FIO_OPT_STR,
524 .off1 = td_var_offset(verify),
525 .help = "Verify data written",
526 .def = "0",
527 .posval = {
528 { .ival = "0",
529 .oval = VERIFY_NONE,
530 .help = "Don't do IO verification",
531 },
532 { .ival = "crc32",
533 .oval = VERIFY_CRC32,
534 .help = "Use crc32 checksums for verification",
535 },
536 { .ival = "md5",
537 .oval = VERIFY_MD5,
538 .help = "Use md5 checksums for verification",
539 },
36690c9b
JA
540 {
541 .ival = "null",
542 .oval = VERIFY_NULL,
543 .help = "Pretend to verify",
544 },
214e1eca
JA
545 },
546 },
160b966d
JA
547 {
548 .name = "verifysort",
549 .type = FIO_OPT_BOOL,
550 .off1 = td_var_offset(verifysort),
551 .help = "Sort written verify blocks for read back",
552 .def = "1",
553 },
214e1eca
JA
554 {
555 .name = "write_iolog",
556 .type = FIO_OPT_STR_STORE,
557 .off1 = td_var_offset(write_iolog_file),
558 .help = "Store IO pattern to file",
559 },
560 {
561 .name = "read_iolog",
562 .type = FIO_OPT_STR_STORE,
563 .off1 = td_var_offset(read_iolog_file),
564 .help = "Playback IO pattern from file",
565 },
566 {
567 .name = "exec_prerun",
568 .type = FIO_OPT_STR_STORE,
569 .off1 = td_var_offset(exec_prerun),
570 .help = "Execute this file prior to running job",
571 },
572 {
573 .name = "exec_postrun",
574 .type = FIO_OPT_STR_STORE,
575 .off1 = td_var_offset(exec_postrun),
576 .help = "Execute this file after running job",
577 },
578#ifdef FIO_HAVE_IOSCHED_SWITCH
579 {
580 .name = "ioscheduler",
581 .type = FIO_OPT_STR_STORE,
582 .off1 = td_var_offset(ioscheduler),
583 .help = "Use this IO scheduler on the backing device",
584 },
585#endif
586 {
587 .name = "zonesize",
588 .type = FIO_OPT_STR_VAL,
589 .off1 = td_var_offset(zone_size),
590 .help = "Give size of an IO zone",
591 .def = "0",
592 },
593 {
594 .name = "zoneskip",
595 .type = FIO_OPT_STR_VAL,
596 .off1 = td_var_offset(zone_skip),
597 .help = "Space between IO zones",
598 .def = "0",
599 },
600 {
601 .name = "lockmem",
602 .type = FIO_OPT_STR_VAL,
603 .cb = str_lockmem_cb,
604 .help = "Lock down this amount of memory",
605 .def = "0",
606 },
607 {
608 .name = "rwmixcycle",
609 .type = FIO_OPT_INT,
610 .off1 = td_var_offset(rwmixcycle),
611 .help = "Cycle period for mixed read/write workloads (msec)",
612 .def = "500",
613 },
614 {
615 .name = "rwmixread",
616 .type = FIO_OPT_INT,
e47f799f 617 .off1 = td_var_offset(rwmix[DDIR_READ]),
214e1eca
JA
618 .maxval = 100,
619 .help = "Percentage of mixed workload that is reads",
620 .def = "50",
621 },
622 {
623 .name = "rwmixwrite",
624 .type = FIO_OPT_INT,
e47f799f 625 .off1 = td_var_offset(rwmix[DDIR_WRITE]),
214e1eca
JA
626 .maxval = 100,
627 .help = "Percentage of mixed workload that is writes",
628 .def = "50",
629 },
630 {
631 .name = "nice",
632 .type = FIO_OPT_INT,
633 .off1 = td_var_offset(nice),
634 .help = "Set job CPU nice value",
635 .minval = -19,
636 .maxval = 20,
637 .def = "0",
638 },
639#ifdef FIO_HAVE_IOPRIO
640 {
641 .name = "prio",
642 .type = FIO_OPT_INT,
643 .cb = str_prio_cb,
644 .help = "Set job IO priority value",
645 .minval = 0,
646 .maxval = 7,
647 },
648 {
649 .name = "prioclass",
650 .type = FIO_OPT_INT,
651 .cb = str_prioclass_cb,
652 .help = "Set job IO priority class",
653 .minval = 0,
654 .maxval = 3,
655 },
656#endif
657 {
658 .name = "thinktime",
659 .type = FIO_OPT_INT,
660 .off1 = td_var_offset(thinktime),
661 .help = "Idle time between IO buffers (usec)",
662 .def = "0",
663 },
664 {
665 .name = "thinktime_spin",
666 .type = FIO_OPT_INT,
667 .off1 = td_var_offset(thinktime_spin),
668 .help = "Start think time by spinning this amount (usec)",
669 .def = "0",
670 },
671 {
672 .name = "thinktime_blocks",
673 .type = FIO_OPT_INT,
674 .off1 = td_var_offset(thinktime_blocks),
675 .help = "IO buffer period between 'thinktime'",
676 .def = "1",
677 },
678 {
679 .name = "rate",
680 .type = FIO_OPT_INT,
681 .off1 = td_var_offset(rate),
682 .help = "Set bandwidth rate",
683 },
684 {
685 .name = "ratemin",
686 .type = FIO_OPT_INT,
687 .off1 = td_var_offset(ratemin),
4e991c23
JA
688 .help = "Job must meet this rate or it will be shutdown",
689 },
690 {
691 .name = "rate_iops",
692 .type = FIO_OPT_INT,
693 .off1 = td_var_offset(rate_iops),
694 .help = "Limit IO used to this number of IO operations/sec",
695 },
696 {
697 .name = "rate_iops_min",
698 .type = FIO_OPT_INT,
699 .off1 = td_var_offset(rate_iops_min),
700 .help = "Job must meet this rate or it will be shutdown",
214e1eca
JA
701 },
702 {
703 .name = "ratecycle",
704 .type = FIO_OPT_INT,
705 .off1 = td_var_offset(ratecycle),
706 .help = "Window average for rate limits (msec)",
707 .def = "1000",
708 },
709 {
710 .name = "invalidate",
711 .type = FIO_OPT_BOOL,
712 .off1 = td_var_offset(invalidate_cache),
713 .help = "Invalidate buffer/page cache prior to running job",
714 .def = "1",
715 },
716 {
717 .name = "sync",
718 .type = FIO_OPT_BOOL,
719 .off1 = td_var_offset(sync_io),
720 .help = "Use O_SYNC for buffered writes",
721 .def = "0",
722 },
723 {
724 .name = "bwavgtime",
725 .type = FIO_OPT_INT,
726 .off1 = td_var_offset(bw_avg_time),
727 .help = "Time window over which to calculate bandwidth (msec)",
728 .def = "500",
729 },
730 {
731 .name = "create_serialize",
732 .type = FIO_OPT_BOOL,
733 .off1 = td_var_offset(create_serialize),
734 .help = "Serialize creating of job files",
735 .def = "1",
736 },
737 {
738 .name = "create_fsync",
739 .type = FIO_OPT_BOOL,
740 .off1 = td_var_offset(create_fsync),
741 .help = "Fsync file after creation",
742 .def = "1",
743 },
744 {
745 .name = "cpuload",
746 .type = FIO_OPT_INT,
747 .off1 = td_var_offset(cpuload),
748 .help = "Use this percentage of CPU",
749 },
750 {
751 .name = "cpuchunks",
752 .type = FIO_OPT_INT,
753 .off1 = td_var_offset(cpucycle),
754 .help = "Length of the CPU burn cycles (usecs)",
755 .def = "50000",
756 },
757#ifdef FIO_HAVE_CPU_AFFINITY
758 {
759 .name = "cpumask",
760 .type = FIO_OPT_INT,
761 .cb = str_cpumask_cb,
762 .help = "CPU affinity mask",
763 },
764#endif
765 {
766 .name = "end_fsync",
767 .type = FIO_OPT_BOOL,
768 .off1 = td_var_offset(end_fsync),
769 .help = "Include fsync at the end of job",
770 .def = "0",
771 },
772 {
773 .name = "fsync_on_close",
774 .type = FIO_OPT_BOOL,
775 .off1 = td_var_offset(fsync_on_close),
776 .help = "fsync files on close",
777 .def = "0",
778 },
779 {
780 .name = "unlink",
781 .type = FIO_OPT_BOOL,
782 .off1 = td_var_offset(unlink),
783 .help = "Unlink created files after job has completed",
784 .def = "0",
785 },
786 {
787 .name = "exitall",
788 .type = FIO_OPT_STR_SET,
789 .cb = str_exitall_cb,
790 .help = "Terminate all jobs when one exits",
791 },
792 {
793 .name = "stonewall",
794 .type = FIO_OPT_STR_SET,
795 .off1 = td_var_offset(stonewall),
796 .help = "Insert a hard barrier between this job and previous",
797 },
b3d62a75
JA
798 {
799 .name = "new_group",
800 .type = FIO_OPT_STR_SET,
801 .off1 = td_var_offset(new_group),
802 .help = "Mark the start of a new group (for reporting)",
803 },
214e1eca
JA
804 {
805 .name = "thread",
806 .type = FIO_OPT_STR_SET,
807 .off1 = td_var_offset(use_thread),
808 .help = "Use threads instead of forks",
809 },
810 {
811 .name = "write_bw_log",
812 .type = FIO_OPT_STR_SET,
813 .off1 = td_var_offset(write_bw_log),
814 .help = "Write log of bandwidth during run",
815 },
816 {
817 .name = "write_lat_log",
818 .type = FIO_OPT_STR_SET,
819 .off1 = td_var_offset(write_lat_log),
820 .help = "Write log of latency during run",
821 },
822 {
823 .name = "hugepage-size",
824 .type = FIO_OPT_STR_VAL,
825 .off1 = td_var_offset(hugepage_size),
826 .help = "When using hugepages, specify size of each page",
827 .def = __stringify(FIO_HUGE_PAGE),
828 },
829 {
830 .name = "group_reporting",
831 .type = FIO_OPT_STR_SET,
832 .off1 = td_var_offset(group_reporting),
833 .help = "Do reporting on a per-group basis",
834 },
e9459e5a
JA
835 {
836 .name = "zero_buffers",
837 .type = FIO_OPT_STR_SET,
838 .off1 = td_var_offset(zero_buffers),
839 .help = "Init IO buffers to all zeroes",
840 },
214e1eca
JA
841 {
842 .name = NULL,
843 },
844};
845
846void fio_options_dup_and_init(struct option *long_options)
847{
848 struct fio_option *o;
849 unsigned int i;
850
851 options_init(options);
852
853 i = 0;
854 while (long_options[i].name)
855 i++;
856
857 o = &options[0];
858 while (o->name) {
859 long_options[i].name = o->name;
860 long_options[i].val = FIO_GETOPT_JOB;
861 if (o->type == FIO_OPT_STR_SET)
862 long_options[i].has_arg = no_argument;
863 else
864 long_options[i].has_arg = required_argument;
865
866 i++;
867 o++;
868 assert(i < FIO_NR_OPTIONS);
869 }
870}
871
872int fio_option_parse(struct thread_data *td, const char *opt)
873{
874 return parse_option(opt, options, td);
875}
876
877int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)
878{
879 return parse_cmd_option(opt, val, options, td);
880}
881
882void fio_fill_default_options(struct thread_data *td)
883{
884 fill_default_options(td, options);
885}
886
887int fio_show_option_help(const char *opt)
888{
889 return show_cmd_help(options, opt);
890}
d23bb327
JA
891
892static void __options_mem(struct thread_data *td, int alloc)
893{
894 struct thread_options *o = &td->o;
895 struct fio_option *opt;
896 char **ptr;
897 int i;
898
899 for (i = 0, opt = &options[0]; opt->name; i++, opt = &options[i]) {
900 if (opt->type != FIO_OPT_STR_STORE)
901 continue;
902
903 ptr = (void *) o + opt->off1;
904 if (*ptr) {
905 if (alloc)
906 *ptr = strdup(*ptr);
907 else {
908 free(*ptr);
909 *ptr = NULL;
910 }
911 }
912 }
913}
914
915/*
916 * dupe FIO_OPT_STR_STORE options
917 */
918void options_mem_dupe(struct thread_data *td)
919{
920 __options_mem(td, 1);
921}
922
22d66213 923void options_mem_free(struct thread_data fio_unused *td)
d23bb327 924{
22d66213 925#if 0
d23bb327 926 __options_mem(td, 0);
22d66213 927#endif
d23bb327 928}