[PATCH] Allow verification to be done at finer granularity
[fio.git] / options.c
CommitLineData
214e1eca
JA
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <ctype.h>
5#include <string.h>
6#include <getopt.h>
7#include <assert.h>
8
9#include "fio.h"
10#include "parse.h"
11
2dc1bbeb 12#define td_var_offset(var) ((size_t) &((struct thread_options *)0)->var)
214e1eca
JA
13
14/*
15 * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
16 */
17static char *get_opt_postfix(const char *str)
18{
19 char *p = strstr(str, ":");
20
21 if (!p)
22 return NULL;
23
24 p++;
25 strip_blank_front(&p);
26 strip_blank_end(p);
27 return strdup(p);
28}
29
211097b2
JA
30static int str_rw_cb(void *data, const char *str)
31{
32 struct thread_data *td = data;
33 char *nr = get_opt_postfix(str);
34
fafdba3c 35 td->o.ddir_nr = 1;
211097b2
JA
36 if (nr)
37 td->o.ddir_nr = atoi(nr);
38
211097b2
JA
39 return 0;
40}
41
214e1eca
JA
42static int str_mem_cb(void *data, const char *mem)
43{
44 struct thread_data *td = data;
45
2dc1bbeb 46 if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP) {
214e1eca 47 td->mmapfile = get_opt_postfix(mem);
2dc1bbeb 48 if (td->o.mem_type == MEM_MMAPHUGE && !td->mmapfile) {
214e1eca
JA
49 log_err("fio: mmaphuge:/path/to/file\n");
50 return 1;
51 }
52 }
53
54 return 0;
55}
56
57static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
58{
59 mlock_size = *val;
60 return 0;
61}
62
63#ifdef FIO_HAVE_IOPRIO
64static int str_prioclass_cb(void *data, unsigned int *val)
65{
66 struct thread_data *td = data;
6cefbe33
JA
67 unsigned short mask;
68
69 /*
70 * mask off old class bits, str_prio_cb() may have set a default class
71 */
72 mask = (1 << IOPRIO_CLASS_SHIFT) - 1;
73 td->ioprio &= mask;
214e1eca
JA
74
75 td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
76 return 0;
77}
78
79static int str_prio_cb(void *data, unsigned int *val)
80{
81 struct thread_data *td = data;
82
83 td->ioprio |= *val;
6cefbe33
JA
84
85 /*
86 * If no class is set, assume BE
87 */
88 if ((td->ioprio >> IOPRIO_CLASS_SHIFT) == 0)
89 td->ioprio |= IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT;
90
214e1eca
JA
91 return 0;
92}
93#endif
94
95static int str_exitall_cb(void)
96{
97 exitall_on_terminate = 1;
98 return 0;
99}
100
214e1eca 101#ifdef FIO_HAVE_CPU_AFFINITY
d2e268b0
JA
102static int str_cpumask_cb(void *data, unsigned int *val)
103{
104 struct thread_data *td = data;
214e1eca
JA
105 unsigned int i;
106
d2e268b0 107 CPU_ZERO(&td->o.cpumask);
214e1eca 108
e600f7f1 109 for (i = 0; i < sizeof(int) * 8; i++)
d2e268b0
JA
110 if ((1 << i) & *val)
111 CPU_SET(*val, &td->o.cpumask);
112
113 td->o.cpumask_set = 1;
114 return 0;
214e1eca
JA
115}
116
d2e268b0 117static int str_cpus_allowed_cb(void *data, const char *input)
214e1eca
JA
118{
119 struct thread_data *td = data;
d2e268b0
JA
120 char *cpu, *str, *p;
121
122 CPU_ZERO(&td->o.cpumask);
123
124 p = str = strdup(input);
214e1eca 125
d2e268b0
JA
126 strip_blank_front(&str);
127 strip_blank_end(str);
128
129 while ((cpu = strsep(&str, ",")) != NULL) {
130 if (!strlen(cpu))
131 break;
132 CPU_SET(atoi(cpu), &td->o.cpumask);
133 }
134
135 free(p);
375b2695 136 td->o.cpumask_set = 1;
d2e268b0 137 exit(0);
214e1eca
JA
138 return 0;
139}
d2e268b0 140#endif
214e1eca
JA
141
142static int str_fst_cb(void *data, const char *str)
143{
144 struct thread_data *td = data;
145 char *nr = get_opt_postfix(str);
146
147 td->file_service_nr = 1;
148 if (nr)
149 td->file_service_nr = atoi(nr);
150
151 return 0;
152}
153
154static int str_filename_cb(void *data, const char *input)
155{
156 struct thread_data *td = data;
157 char *fname, *str, *p;
158
159 p = str = strdup(input);
160
161 strip_blank_front(&str);
162 strip_blank_end(str);
163
164 if (!td->files_index)
2dc1bbeb 165 td->o.nr_files = 0;
214e1eca
JA
166
167 while ((fname = strsep(&str, ":")) != NULL) {
168 if (!strlen(fname))
169 break;
170 add_file(td, fname);
2dc1bbeb 171 td->o.nr_files++;
214e1eca
JA
172 }
173
174 free(p);
175 return 0;
176}
177
178static int str_directory_cb(void *data, const char fio_unused *str)
179{
180 struct thread_data *td = data;
181 struct stat sb;
182
2dc1bbeb
JA
183 if (lstat(td->o.directory, &sb) < 0) {
184 log_err("fio: %s is not a directory\n", td->o.directory);
214e1eca
JA
185 td_verror(td, errno, "lstat");
186 return 1;
187 }
188 if (!S_ISDIR(sb.st_mode)) {
2dc1bbeb 189 log_err("fio: %s is not a directory\n", td->o.directory);
214e1eca
JA
190 return 1;
191 }
192
193 return 0;
194}
195
196static int str_opendir_cb(void *data, const char fio_unused *str)
197{
198 struct thread_data *td = data;
199
200 if (!td->files_index)
2dc1bbeb 201 td->o.nr_files = 0;
214e1eca 202
2dc1bbeb 203 return add_dir_files(td, td->o.opendir);
214e1eca
JA
204}
205
206
207#define __stringify_1(x) #x
208#define __stringify(x) __stringify_1(x)
209
210/*
211 * Map of job/command line options
212 */
213static struct fio_option options[] = {
214 {
215 .name = "description",
216 .type = FIO_OPT_STR_STORE,
217 .off1 = td_var_offset(description),
218 .help = "Text job description",
219 },
220 {
221 .name = "name",
222 .type = FIO_OPT_STR_STORE,
223 .off1 = td_var_offset(name),
224 .help = "Name of this job",
225 },
226 {
227 .name = "directory",
228 .type = FIO_OPT_STR_STORE,
229 .off1 = td_var_offset(directory),
230 .cb = str_directory_cb,
231 .help = "Directory to store files in",
232 },
233 {
234 .name = "filename",
235 .type = FIO_OPT_STR_STORE,
236 .off1 = td_var_offset(filename),
237 .cb = str_filename_cb,
238 .help = "File(s) to use for the workload",
239 },
240 {
241 .name = "opendir",
242 .type = FIO_OPT_STR_STORE,
243 .off1 = td_var_offset(opendir),
244 .cb = str_opendir_cb,
245 .help = "Recursively add files from this directory and down",
246 },
247 {
248 .name = "rw",
d3aad8f2 249 .alias = "readwrite",
214e1eca 250 .type = FIO_OPT_STR,
211097b2 251 .cb = str_rw_cb,
214e1eca
JA
252 .off1 = td_var_offset(td_ddir),
253 .help = "IO direction",
254 .def = "read",
255 .posval = {
256 { .ival = "read",
257 .oval = TD_DDIR_READ,
258 .help = "Sequential read",
259 },
260 { .ival = "write",
261 .oval = TD_DDIR_WRITE,
262 .help = "Sequential write",
263 },
264 { .ival = "randread",
265 .oval = TD_DDIR_RANDREAD,
266 .help = "Random read",
267 },
268 { .ival = "randwrite",
269 .oval = TD_DDIR_RANDWRITE,
270 .help = "Random write",
271 },
272 { .ival = "rw",
273 .oval = TD_DDIR_RW,
274 .help = "Sequential read and write mix",
275 },
276 { .ival = "randrw",
277 .oval = TD_DDIR_RANDRW,
278 .help = "Random read and write mix"
279 },
280 },
281 },
d2f3ac35
JA
282 {
283 .name = "fadvise_hint",
284 .type = FIO_OPT_BOOL,
285 .off1 = td_var_offset(fadvise_hint),
286 .help = "Use fadvise() to advise the kernel on IO pattern",
287 .def = "1",
288 },
214e1eca
JA
289 {
290 .name = "ioengine",
291 .type = FIO_OPT_STR_STORE,
292 .off1 = td_var_offset(ioengine),
293 .help = "IO engine to use",
294 .def = "sync",
295 .posval = {
296 { .ival = "sync",
297 .help = "Use read/write",
298 },
299#ifdef FIO_HAVE_LIBAIO
300 { .ival = "libaio",
301 .help = "Linux native asynchronous IO",
302 },
303#endif
304#ifdef FIO_HAVE_POSIXAIO
305 { .ival = "posixaio",
306 .help = "POSIX asynchronous IO",
307 },
308#endif
309 { .ival = "mmap",
310 .help = "Memory mapped IO",
311 },
312#ifdef FIO_HAVE_SPLICE
313 { .ival = "splice",
314 .help = "splice/vmsplice based IO",
315 },
9cce02e8
JA
316 { .ival = "netsplice",
317 .help = "splice/vmsplice to/from the network",
318 },
214e1eca
JA
319#endif
320#ifdef FIO_HAVE_SGIO
321 { .ival = "sg",
322 .help = "SCSI generic v3 IO",
323 },
324#endif
325 { .ival = "null",
326 .help = "Testing engine (no data transfer)",
327 },
328 { .ival = "net",
329 .help = "Network IO",
330 },
331#ifdef FIO_HAVE_SYSLET
332 { .ival = "syslet-rw",
333 .help = "syslet enabled async pread/pwrite IO",
334 },
335#endif
336 { .ival = "cpuio",
337 .help = "CPU cycler burner engine",
338 },
b8c82a46
JA
339#ifdef FIO_HAVE_GUASI
340 { .ival = "guasi",
341 .help = "GUASI IO engine",
342 },
343#endif
214e1eca
JA
344 { .ival = "external",
345 .help = "Load external engine (append name)",
346 },
347 },
348 },
349 {
350 .name = "iodepth",
351 .type = FIO_OPT_INT,
352 .off1 = td_var_offset(iodepth),
353 .help = "Amount of IO buffers to keep in flight",
354 .def = "1",
355 },
356 {
357 .name = "iodepth_batch",
358 .type = FIO_OPT_INT,
359 .off1 = td_var_offset(iodepth_batch),
360 .help = "Number of IO to submit in one go",
361 },
362 {
363 .name = "iodepth_low",
364 .type = FIO_OPT_INT,
365 .off1 = td_var_offset(iodepth_low),
366 .help = "Low water mark for queuing depth",
367 },
368 {
369 .name = "size",
370 .type = FIO_OPT_STR_VAL,
2dc1bbeb 371 .off1 = td_var_offset(size),
c3edbdba 372 .minval = 1,
214e1eca
JA
373 .help = "Total size of device or files",
374 },
375 {
376 .name = "filesize",
377 .type = FIO_OPT_STR_VAL,
378 .off1 = td_var_offset(file_size_low),
379 .off2 = td_var_offset(file_size_high),
c3edbdba 380 .minval = 1,
214e1eca
JA
381 .help = "Size of individual files",
382 },
383 {
384 .name = "bs",
d3aad8f2 385 .alias = "blocksize",
214e1eca
JA
386 .type = FIO_OPT_STR_VAL_INT,
387 .off1 = td_var_offset(bs[DDIR_READ]),
388 .off2 = td_var_offset(bs[DDIR_WRITE]),
c3edbdba 389 .minval = 1,
214e1eca
JA
390 .help = "Block size unit",
391 .def = "4k",
392 },
393 {
394 .name = "bsrange",
d3aad8f2 395 .alias = "blocksize_range",
214e1eca
JA
396 .type = FIO_OPT_RANGE,
397 .off1 = td_var_offset(min_bs[DDIR_READ]),
398 .off2 = td_var_offset(max_bs[DDIR_READ]),
399 .off3 = td_var_offset(min_bs[DDIR_WRITE]),
400 .off4 = td_var_offset(max_bs[DDIR_WRITE]),
c3edbdba 401 .minval = 1,
214e1eca
JA
402 .help = "Set block size range (in more detail than bs)",
403 },
404 {
405 .name = "bs_unaligned",
d3aad8f2 406 .alias = "blocksize_unaligned",
214e1eca
JA
407 .type = FIO_OPT_STR_SET,
408 .off1 = td_var_offset(bs_unaligned),
409 .help = "Don't sector align IO buffer sizes",
410 },
411 {
412 .name = "offset",
413 .type = FIO_OPT_STR_VAL,
414 .off1 = td_var_offset(start_offset),
415 .help = "Start IO from this offset",
416 .def = "0",
417 },
418 {
419 .name = "randrepeat",
420 .type = FIO_OPT_BOOL,
421 .off1 = td_var_offset(rand_repeatable),
422 .help = "Use repeatable random IO pattern",
423 .def = "1",
424 },
425 {
426 .name = "norandommap",
427 .type = FIO_OPT_STR_SET,
428 .off1 = td_var_offset(norandommap),
429 .help = "Accept potential duplicate random blocks",
430 },
431 {
432 .name = "nrfiles",
433 .type = FIO_OPT_INT,
434 .off1 = td_var_offset(nr_files),
435 .help = "Split job workload between this number of files",
436 .def = "1",
437 },
438 {
439 .name = "openfiles",
440 .type = FIO_OPT_INT,
441 .off1 = td_var_offset(open_files),
442 .help = "Number of files to keep open at the same time",
443 },
444 {
445 .name = "file_service_type",
446 .type = FIO_OPT_STR,
447 .cb = str_fst_cb,
448 .off1 = td_var_offset(file_service_type),
449 .help = "How to select which file to service next",
450 .def = "roundrobin",
451 .posval = {
452 { .ival = "random",
453 .oval = FIO_FSERVICE_RANDOM,
454 .help = "Choose a file at random",
455 },
456 { .ival = "roundrobin",
457 .oval = FIO_FSERVICE_RR,
458 .help = "Round robin select files",
459 },
460 },
461 },
462 {
463 .name = "fsync",
464 .type = FIO_OPT_INT,
465 .off1 = td_var_offset(fsync_blocks),
466 .help = "Issue fsync for writes every given number of blocks",
467 .def = "0",
468 },
469 {
470 .name = "direct",
471 .type = FIO_OPT_BOOL,
472 .off1 = td_var_offset(odirect),
473 .help = "Use O_DIRECT IO (negates buffered)",
474 .def = "0",
475 },
476 {
477 .name = "buffered",
478 .type = FIO_OPT_BOOL,
479 .off1 = td_var_offset(odirect),
480 .neg = 1,
481 .help = "Use buffered IO (negates direct)",
482 .def = "1",
483 },
484 {
485 .name = "overwrite",
486 .type = FIO_OPT_BOOL,
487 .off1 = td_var_offset(overwrite),
488 .help = "When writing, set whether to overwrite current data",
489 .def = "0",
490 },
491 {
492 .name = "loops",
493 .type = FIO_OPT_INT,
494 .off1 = td_var_offset(loops),
495 .help = "Number of times to run the job",
496 .def = "1",
497 },
498 {
499 .name = "numjobs",
500 .type = FIO_OPT_INT,
501 .off1 = td_var_offset(numjobs),
502 .help = "Duplicate this job this many times",
503 .def = "1",
504 },
505 {
506 .name = "startdelay",
507 .type = FIO_OPT_INT,
508 .off1 = td_var_offset(start_delay),
509 .help = "Only start job when this period has passed",
510 .def = "0",
511 },
512 {
513 .name = "runtime",
514 .alias = "timeout",
515 .type = FIO_OPT_STR_VAL_TIME,
516 .off1 = td_var_offset(timeout),
517 .help = "Stop workload when this amount of time has passed",
518 .def = "0",
519 },
cf4464ca
JA
520 {
521 .name = "time_based",
522 .type = FIO_OPT_STR_SET,
523 .off1 = td_var_offset(time_based),
524 .help = "Keep running until runtime/timeout is met",
525 },
214e1eca
JA
526 {
527 .name = "mem",
d3aad8f2 528 .alias = "iomem",
214e1eca
JA
529 .type = FIO_OPT_STR,
530 .cb = str_mem_cb,
531 .off1 = td_var_offset(mem_type),
532 .help = "Backing type for IO buffers",
533 .def = "malloc",
534 .posval = {
535 { .ival = "malloc",
536 .oval = MEM_MALLOC,
537 .help = "Use malloc(3) for IO buffers",
538 },
37c8cdfe
JA
539 { .ival = "shm",
540 .oval = MEM_SHM,
541 .help = "Use shared memory segments for IO buffers",
542 },
214e1eca
JA
543#ifdef FIO_HAVE_HUGETLB
544 { .ival = "shmhuge",
545 .oval = MEM_SHMHUGE,
546 .help = "Like shm, but use huge pages",
547 },
b370e46a 548#endif
37c8cdfe
JA
549 { .ival = "mmap",
550 .oval = MEM_MMAP,
551 .help = "Use mmap(2) (file or anon) for IO buffers",
552 },
214e1eca
JA
553#ifdef FIO_HAVE_HUGETLB
554 { .ival = "mmaphuge",
555 .oval = MEM_MMAPHUGE,
556 .help = "Like mmap, but use huge pages",
557 },
558#endif
559 },
560 },
561 {
562 .name = "verify",
563 .type = FIO_OPT_STR,
564 .off1 = td_var_offset(verify),
565 .help = "Verify data written",
566 .def = "0",
567 .posval = {
568 { .ival = "0",
569 .oval = VERIFY_NONE,
570 .help = "Don't do IO verification",
571 },
fcca4b58
JA
572 { .ival = "md5",
573 .oval = VERIFY_MD5,
574 .help = "Use md5 checksums for verification",
575 },
d77a7af3
JA
576 { .ival = "crc64",
577 .oval = VERIFY_CRC64,
578 .help = "Use crc64 checksums for verification",
579 },
214e1eca
JA
580 { .ival = "crc32",
581 .oval = VERIFY_CRC32,
582 .help = "Use crc32 checksums for verification",
583 },
969f7ed3
JA
584 { .ival = "crc16",
585 .oval = VERIFY_CRC16,
586 .help = "Use crc16 checksums for verification",
587 },
1e154bdb
JA
588 { .ival = "crc7",
589 .oval = VERIFY_CRC7,
590 .help = "Use crc7 checksums for verification",
591 },
36690c9b
JA
592 {
593 .ival = "null",
594 .oval = VERIFY_NULL,
595 .help = "Pretend to verify",
596 },
214e1eca
JA
597 },
598 },
160b966d
JA
599 {
600 .name = "verifysort",
601 .type = FIO_OPT_BOOL,
602 .off1 = td_var_offset(verifysort),
603 .help = "Sort written verify blocks for read back",
604 .def = "1",
605 },
3f9f4e26
SL
606 {
607 .name = "header_interval",
608 .type = FIO_OPT_STR_VAL_INT,
609 .off1 = td_var_offset(header_interval),
610 .help = "Store buffer header every N bytes",
611 .def = "0",
612 },
214e1eca
JA
613 {
614 .name = "write_iolog",
615 .type = FIO_OPT_STR_STORE,
616 .off1 = td_var_offset(write_iolog_file),
617 .help = "Store IO pattern to file",
618 },
619 {
620 .name = "read_iolog",
621 .type = FIO_OPT_STR_STORE,
622 .off1 = td_var_offset(read_iolog_file),
623 .help = "Playback IO pattern from file",
624 },
625 {
626 .name = "exec_prerun",
627 .type = FIO_OPT_STR_STORE,
628 .off1 = td_var_offset(exec_prerun),
629 .help = "Execute this file prior to running job",
630 },
631 {
632 .name = "exec_postrun",
633 .type = FIO_OPT_STR_STORE,
634 .off1 = td_var_offset(exec_postrun),
635 .help = "Execute this file after running job",
636 },
637#ifdef FIO_HAVE_IOSCHED_SWITCH
638 {
639 .name = "ioscheduler",
640 .type = FIO_OPT_STR_STORE,
641 .off1 = td_var_offset(ioscheduler),
642 .help = "Use this IO scheduler on the backing device",
643 },
644#endif
645 {
646 .name = "zonesize",
647 .type = FIO_OPT_STR_VAL,
648 .off1 = td_var_offset(zone_size),
649 .help = "Give size of an IO zone",
650 .def = "0",
651 },
652 {
653 .name = "zoneskip",
654 .type = FIO_OPT_STR_VAL,
655 .off1 = td_var_offset(zone_skip),
656 .help = "Space between IO zones",
657 .def = "0",
658 },
659 {
660 .name = "lockmem",
661 .type = FIO_OPT_STR_VAL,
662 .cb = str_lockmem_cb,
663 .help = "Lock down this amount of memory",
664 .def = "0",
665 },
666 {
667 .name = "rwmixcycle",
668 .type = FIO_OPT_INT,
669 .off1 = td_var_offset(rwmixcycle),
670 .help = "Cycle period for mixed read/write workloads (msec)",
671 .def = "500",
672 },
673 {
674 .name = "rwmixread",
675 .type = FIO_OPT_INT,
e47f799f 676 .off1 = td_var_offset(rwmix[DDIR_READ]),
214e1eca
JA
677 .maxval = 100,
678 .help = "Percentage of mixed workload that is reads",
679 .def = "50",
680 },
681 {
682 .name = "rwmixwrite",
683 .type = FIO_OPT_INT,
e47f799f 684 .off1 = td_var_offset(rwmix[DDIR_WRITE]),
214e1eca
JA
685 .maxval = 100,
686 .help = "Percentage of mixed workload that is writes",
687 .def = "50",
688 },
689 {
690 .name = "nice",
691 .type = FIO_OPT_INT,
692 .off1 = td_var_offset(nice),
693 .help = "Set job CPU nice value",
694 .minval = -19,
695 .maxval = 20,
696 .def = "0",
697 },
698#ifdef FIO_HAVE_IOPRIO
699 {
700 .name = "prio",
701 .type = FIO_OPT_INT,
702 .cb = str_prio_cb,
703 .help = "Set job IO priority value",
704 .minval = 0,
705 .maxval = 7,
706 },
707 {
708 .name = "prioclass",
709 .type = FIO_OPT_INT,
710 .cb = str_prioclass_cb,
711 .help = "Set job IO priority class",
712 .minval = 0,
713 .maxval = 3,
714 },
715#endif
716 {
717 .name = "thinktime",
718 .type = FIO_OPT_INT,
719 .off1 = td_var_offset(thinktime),
720 .help = "Idle time between IO buffers (usec)",
721 .def = "0",
722 },
723 {
724 .name = "thinktime_spin",
725 .type = FIO_OPT_INT,
726 .off1 = td_var_offset(thinktime_spin),
727 .help = "Start think time by spinning this amount (usec)",
728 .def = "0",
729 },
730 {
731 .name = "thinktime_blocks",
732 .type = FIO_OPT_INT,
733 .off1 = td_var_offset(thinktime_blocks),
734 .help = "IO buffer period between 'thinktime'",
735 .def = "1",
736 },
737 {
738 .name = "rate",
739 .type = FIO_OPT_INT,
740 .off1 = td_var_offset(rate),
741 .help = "Set bandwidth rate",
742 },
743 {
744 .name = "ratemin",
745 .type = FIO_OPT_INT,
746 .off1 = td_var_offset(ratemin),
4e991c23
JA
747 .help = "Job must meet this rate or it will be shutdown",
748 },
749 {
750 .name = "rate_iops",
751 .type = FIO_OPT_INT,
752 .off1 = td_var_offset(rate_iops),
753 .help = "Limit IO used to this number of IO operations/sec",
754 },
755 {
756 .name = "rate_iops_min",
757 .type = FIO_OPT_INT,
758 .off1 = td_var_offset(rate_iops_min),
759 .help = "Job must meet this rate or it will be shutdown",
214e1eca
JA
760 },
761 {
762 .name = "ratecycle",
763 .type = FIO_OPT_INT,
764 .off1 = td_var_offset(ratecycle),
765 .help = "Window average for rate limits (msec)",
766 .def = "1000",
767 },
768 {
769 .name = "invalidate",
770 .type = FIO_OPT_BOOL,
771 .off1 = td_var_offset(invalidate_cache),
772 .help = "Invalidate buffer/page cache prior to running job",
773 .def = "1",
774 },
775 {
776 .name = "sync",
777 .type = FIO_OPT_BOOL,
778 .off1 = td_var_offset(sync_io),
779 .help = "Use O_SYNC for buffered writes",
780 .def = "0",
781 },
782 {
783 .name = "bwavgtime",
784 .type = FIO_OPT_INT,
785 .off1 = td_var_offset(bw_avg_time),
786 .help = "Time window over which to calculate bandwidth (msec)",
787 .def = "500",
788 },
789 {
790 .name = "create_serialize",
791 .type = FIO_OPT_BOOL,
792 .off1 = td_var_offset(create_serialize),
793 .help = "Serialize creating of job files",
794 .def = "1",
795 },
796 {
797 .name = "create_fsync",
798 .type = FIO_OPT_BOOL,
799 .off1 = td_var_offset(create_fsync),
800 .help = "Fsync file after creation",
801 .def = "1",
802 },
803 {
804 .name = "cpuload",
805 .type = FIO_OPT_INT,
806 .off1 = td_var_offset(cpuload),
807 .help = "Use this percentage of CPU",
808 },
809 {
810 .name = "cpuchunks",
811 .type = FIO_OPT_INT,
812 .off1 = td_var_offset(cpucycle),
813 .help = "Length of the CPU burn cycles (usecs)",
814 .def = "50000",
815 },
816#ifdef FIO_HAVE_CPU_AFFINITY
817 {
818 .name = "cpumask",
819 .type = FIO_OPT_INT,
820 .cb = str_cpumask_cb,
821 .help = "CPU affinity mask",
822 },
d2e268b0
JA
823 {
824 .name = "cpus_allowed",
825 .type = FIO_OPT_STR,
826 .cb = str_cpus_allowed_cb,
827 .help = "Set CPUs allowed",
828 },
214e1eca
JA
829#endif
830 {
831 .name = "end_fsync",
832 .type = FIO_OPT_BOOL,
833 .off1 = td_var_offset(end_fsync),
834 .help = "Include fsync at the end of job",
835 .def = "0",
836 },
837 {
838 .name = "fsync_on_close",
839 .type = FIO_OPT_BOOL,
840 .off1 = td_var_offset(fsync_on_close),
841 .help = "fsync files on close",
842 .def = "0",
843 },
844 {
845 .name = "unlink",
846 .type = FIO_OPT_BOOL,
847 .off1 = td_var_offset(unlink),
848 .help = "Unlink created files after job has completed",
849 .def = "0",
850 },
851 {
852 .name = "exitall",
853 .type = FIO_OPT_STR_SET,
854 .cb = str_exitall_cb,
855 .help = "Terminate all jobs when one exits",
856 },
857 {
858 .name = "stonewall",
859 .type = FIO_OPT_STR_SET,
860 .off1 = td_var_offset(stonewall),
861 .help = "Insert a hard barrier between this job and previous",
862 },
b3d62a75
JA
863 {
864 .name = "new_group",
865 .type = FIO_OPT_STR_SET,
866 .off1 = td_var_offset(new_group),
867 .help = "Mark the start of a new group (for reporting)",
868 },
214e1eca
JA
869 {
870 .name = "thread",
871 .type = FIO_OPT_STR_SET,
872 .off1 = td_var_offset(use_thread),
873 .help = "Use threads instead of forks",
874 },
875 {
876 .name = "write_bw_log",
877 .type = FIO_OPT_STR_SET,
878 .off1 = td_var_offset(write_bw_log),
879 .help = "Write log of bandwidth during run",
880 },
881 {
882 .name = "write_lat_log",
883 .type = FIO_OPT_STR_SET,
884 .off1 = td_var_offset(write_lat_log),
885 .help = "Write log of latency during run",
886 },
887 {
888 .name = "hugepage-size",
889 .type = FIO_OPT_STR_VAL,
890 .off1 = td_var_offset(hugepage_size),
891 .help = "When using hugepages, specify size of each page",
892 .def = __stringify(FIO_HUGE_PAGE),
893 },
894 {
895 .name = "group_reporting",
896 .type = FIO_OPT_STR_SET,
897 .off1 = td_var_offset(group_reporting),
898 .help = "Do reporting on a per-group basis",
899 },
e9459e5a
JA
900 {
901 .name = "zero_buffers",
902 .type = FIO_OPT_STR_SET,
903 .off1 = td_var_offset(zero_buffers),
904 .help = "Init IO buffers to all zeroes",
905 },
0a839f30
JA
906#ifdef FIO_HAVE_DISK_UTIL
907 {
908 .name = "disk_util",
909 .type = FIO_OPT_BOOL,
910 .off1 = td_var_offset(do_disk_util),
911 .help = "Log disk utilization stats",
912 .def = "1",
913 },
914#endif
214e1eca
JA
915 {
916 .name = NULL,
917 },
918};
919
920void fio_options_dup_and_init(struct option *long_options)
921{
922 struct fio_option *o;
923 unsigned int i;
924
925 options_init(options);
926
927 i = 0;
928 while (long_options[i].name)
929 i++;
930
931 o = &options[0];
932 while (o->name) {
933 long_options[i].name = o->name;
934 long_options[i].val = FIO_GETOPT_JOB;
935 if (o->type == FIO_OPT_STR_SET)
936 long_options[i].has_arg = no_argument;
937 else
938 long_options[i].has_arg = required_argument;
939
940 i++;
941 o++;
942 assert(i < FIO_NR_OPTIONS);
943 }
944}
945
946int fio_option_parse(struct thread_data *td, const char *opt)
947{
948 return parse_option(opt, options, td);
949}
950
951int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)
952{
953 return parse_cmd_option(opt, val, options, td);
954}
955
956void fio_fill_default_options(struct thread_data *td)
957{
958 fill_default_options(td, options);
959}
960
961int fio_show_option_help(const char *opt)
962{
963 return show_cmd_help(options, opt);
964}
d23bb327
JA
965
966static void __options_mem(struct thread_data *td, int alloc)
967{
968 struct thread_options *o = &td->o;
969 struct fio_option *opt;
970 char **ptr;
971 int i;
972
973 for (i = 0, opt = &options[0]; opt->name; i++, opt = &options[i]) {
974 if (opt->type != FIO_OPT_STR_STORE)
975 continue;
976
977 ptr = (void *) o + opt->off1;
978 if (*ptr) {
979 if (alloc)
980 *ptr = strdup(*ptr);
981 else {
982 free(*ptr);
983 *ptr = NULL;
984 }
985 }
986 }
987}
988
989/*
990 * dupe FIO_OPT_STR_STORE options
991 */
992void options_mem_dupe(struct thread_data *td)
993{
994 __options_mem(td, 1);
995}
996
22d66213 997void options_mem_free(struct thread_data fio_unused *td)
d23bb327 998{
22d66213 999#if 0
d23bb327 1000 __options_mem(td, 0);
22d66213 1001#endif
d23bb327 1002}