2 * This file contains job initialization and setup functions.
15 #include <sys/types.h>
21 #define FIO_RANDSEED (0xb1899bedUL)
23 #define td_var_offset(var) ((size_t) &((struct thread_data *)0)->var)
25 static int str_mem_cb(void *, const char *);
26 static int str_lockmem_cb(void *, unsigned long *);
27 #ifdef FIO_HAVE_IOPRIO
28 static int str_prio_cb(void *, unsigned int *);
29 static int str_prioclass_cb(void *, unsigned int *);
31 static int str_exitall_cb(void);
32 static int str_cpumask_cb(void *, unsigned int *);
34 #define __stringify_1(x) #x
35 #define __stringify(x) __stringify_1(x)
38 * Map of job/command line options
40 static struct fio_option options[] = {
42 .name = "description",
43 .type = FIO_OPT_STR_STORE,
44 .off1 = td_var_offset(description),
45 .help = "Text job description",
49 .type = FIO_OPT_STR_STORE,
50 .off1 = td_var_offset(name),
51 .help = "Name of this job",
55 .type = FIO_OPT_STR_STORE,
56 .off1 = td_var_offset(directory),
57 .help = "Directory to store files in",
61 .type = FIO_OPT_STR_STORE,
62 .off1 = td_var_offset(filename),
63 .help = "Force the use of a specific file",
68 .off1 = td_var_offset(td_ddir),
69 .help = "IO direction",
72 { .ival = "read", .oval = TD_DDIR_READ },
73 { .ival = "write", .oval = TD_DDIR_WRITE },
74 { .ival = "randread", .oval = TD_DDIR_RANDREAD },
75 { .ival = "randwrite", .oval = TD_DDIR_RANDWRITE },
76 { .ival = "rw", .oval = TD_DDIR_RW },
77 { .ival = "randrw", .oval = TD_DDIR_RANDRW },
82 .type = FIO_OPT_STR_STORE,
83 .off1 = td_var_offset(ioengine),
84 .help = "IO engine to use",
88 #ifdef FIO_HAVE_LIBAIO
89 { .ival = "libaio", },
91 #ifdef FIO_HAVE_POSIXAIO
92 { .ival = "posixaio", },
95 #ifdef FIO_HAVE_SPLICE
96 { .ival = "splice", },
101 { .ival = "null", }, { .ival = "net", },
102 #ifdef FIO_HAVE_SYSLET
103 { .ival = "syslet-rw", },
105 { .ival = "cpuio", },
106 { .ival = "external", },
112 .off1 = td_var_offset(iodepth),
113 .help = "Amount of IO buffers to keep in flight",
117 .name = "iodepth_batch",
119 .off1 = td_var_offset(iodepth_batch),
120 .help = "Number of IO to submit in one go",
123 .name = "iodepth_low",
125 .off1 = td_var_offset(iodepth_low),
126 .help = "Low water mark for queuing depth",
130 .type = FIO_OPT_STR_VAL,
131 .off1 = td_var_offset(total_file_size),
132 .help = "Size of device or file",
136 .type = FIO_OPT_STR_VAL_INT,
137 .off1 = td_var_offset(bs[DDIR_READ]),
138 .off2 = td_var_offset(bs[DDIR_WRITE]),
139 .help = "Block size unit",
144 .type = FIO_OPT_RANGE,
145 .off1 = td_var_offset(min_bs[DDIR_READ]),
146 .off2 = td_var_offset(max_bs[DDIR_READ]),
147 .off3 = td_var_offset(min_bs[DDIR_WRITE]),
148 .off4 = td_var_offset(max_bs[DDIR_WRITE]),
149 .help = "Set block size range (in more detail than bs)",
152 .name = "bs_unaligned",
153 .type = FIO_OPT_STR_SET,
154 .off1 = td_var_offset(bs_unaligned),
155 .help = "Don't sector align IO buffer sizes",
159 .type = FIO_OPT_STR_VAL,
160 .off1 = td_var_offset(start_offset),
161 .help = "Start IO from this offset",
165 .name = "randrepeat",
166 .type = FIO_OPT_BOOL,
167 .off1 = td_var_offset(rand_repeatable),
168 .help = "Use repeatable random IO pattern",
172 .name = "norandommap",
173 .type = FIO_OPT_STR_SET,
174 .off1 = td_var_offset(norandommap),
175 .help = "Accept potential duplicate random blocks",
180 .off1 = td_var_offset(nr_files),
181 .help = "Split job workload between this number of files",
187 .off1 = td_var_offset(open_files),
188 .help = "Number of files to keep open at the same time",
191 .name = "file_service_type",
193 .off1 = td_var_offset(file_service_type),
194 .help = "How to select which file to service next",
197 { .ival = "random", .oval = FIO_FSERVICE_RANDOM },
198 { .ival = "roundrobin", .oval = FIO_FSERVICE_RR },
204 .off1 = td_var_offset(fsync_blocks),
205 .help = "Issue fsync for writes every given number of blocks",
210 .type = FIO_OPT_BOOL,
211 .off1 = td_var_offset(odirect),
212 .help = "Use O_DIRECT IO (negates buffered)",
217 .type = FIO_OPT_BOOL,
218 .off1 = td_var_offset(odirect),
220 .help = "Use buffered IO (negates direct)",
225 .type = FIO_OPT_BOOL,
226 .off1 = td_var_offset(overwrite),
227 .help = "When writing, set whether to overwrite current data",
233 .off1 = td_var_offset(loops),
234 .help = "Number of times to run the job",
240 .off1 = td_var_offset(numjobs),
241 .help = "Duplicate this job this many times",
245 .name = "startdelay",
247 .off1 = td_var_offset(start_delay),
248 .help = "Only start job when this period has passed",
254 .type = FIO_OPT_STR_VAL_TIME,
255 .off1 = td_var_offset(timeout),
256 .help = "Stop workload when this amount of time has passed",
263 .off1 = td_var_offset(mem_type),
264 .help = "Backing type for IO buffers",
267 { .ival = "malloc", .oval = MEM_MALLOC },
268 { .ival = "shm", .oval = MEM_SHM },
269 #ifdef FIO_HAVE_HUGETLB
270 { .ival = "shmhuge", .oval = MEM_SHMHUGE },
272 { .ival = "mmap", .oval = MEM_MMAP },
273 #ifdef FIO_HAVE_HUGETLB
274 { .ival = "mmaphuge", .oval = MEM_MMAPHUGE },
281 .off1 = td_var_offset(verify),
282 .help = "Verify sum function",
285 { .ival = "0", .oval = VERIFY_NONE },
286 { .ival = "crc32", .oval = VERIFY_CRC32 },
287 { .ival = "md5", .oval = VERIFY_MD5 },
291 .name = "write_iolog",
292 .type = FIO_OPT_STR_STORE,
293 .off1 = td_var_offset(write_iolog_file),
294 .help = "Store IO pattern to file",
297 .name = "read_iolog",
298 .type = FIO_OPT_STR_STORE,
299 .off1 = td_var_offset(read_iolog_file),
300 .help = "Playback IO pattern from file",
303 .name = "exec_prerun",
304 .type = FIO_OPT_STR_STORE,
305 .off1 = td_var_offset(exec_prerun),
306 .help = "Execute this file prior to running job",
309 .name = "exec_postrun",
310 .type = FIO_OPT_STR_STORE,
311 .off1 = td_var_offset(exec_postrun),
312 .help = "Execute this file after running job",
314 #ifdef FIO_HAVE_IOSCHED_SWITCH
316 .name = "ioscheduler",
317 .type = FIO_OPT_STR_STORE,
318 .off1 = td_var_offset(ioscheduler),
319 .help = "Use this IO scheduler on the backing device",
324 .type = FIO_OPT_STR_VAL,
325 .off1 = td_var_offset(zone_size),
326 .help = "Give size of an IO zone",
331 .type = FIO_OPT_STR_VAL,
332 .off1 = td_var_offset(zone_skip),
333 .help = "Space between IO zones",
338 .type = FIO_OPT_STR_VAL,
339 .cb = str_lockmem_cb,
340 .help = "Lock down this amount of memory",
344 .name = "rwmixcycle",
346 .off1 = td_var_offset(rwmixcycle),
347 .help = "Cycle period for mixed read/write workloads (msec)",
353 .off1 = td_var_offset(rwmixread),
355 .help = "Percentage of mixed workload that is reads",
359 .name = "rwmixwrite",
361 .off1 = td_var_offset(rwmixwrite),
363 .help = "Percentage of mixed workload that is writes",
369 .off1 = td_var_offset(nice),
370 .help = "Set job CPU nice value",
375 #ifdef FIO_HAVE_IOPRIO
380 .help = "Set job IO priority value",
387 .cb = str_prioclass_cb,
388 .help = "Set job IO priority class",
396 .off1 = td_var_offset(thinktime),
397 .help = "Idle time between IO buffers (usec)",
401 .name = "thinktime_spin",
403 .off1 = td_var_offset(thinktime_spin),
404 .help = "Start think time by spinning this amount (usec)",
408 .name = "thinktime_blocks",
410 .off1 = td_var_offset(thinktime_blocks),
411 .help = "IO buffer period between 'thinktime'",
417 .off1 = td_var_offset(rate),
418 .help = "Set bandwidth rate",
423 .off1 = td_var_offset(ratemin),
424 .help = "The bottom limit accepted",
429 .off1 = td_var_offset(ratecycle),
430 .help = "Window average for rate limits (msec)",
434 .name = "invalidate",
435 .type = FIO_OPT_BOOL,
436 .off1 = td_var_offset(invalidate_cache),
437 .help = "Invalidate buffer/page cache prior to running job",
442 .type = FIO_OPT_BOOL,
443 .off1 = td_var_offset(sync_io),
444 .help = "Use O_SYNC for buffered writes",
450 .off1 = td_var_offset(bw_avg_time),
451 .help = "Time window over which to calculate bandwidth (msec)",
455 .name = "create_serialize",
456 .type = FIO_OPT_BOOL,
457 .off1 = td_var_offset(create_serialize),
458 .help = "Serialize creating of job files",
462 .name = "create_fsync",
463 .type = FIO_OPT_BOOL,
464 .off1 = td_var_offset(create_fsync),
465 .help = "Fsync file after creation",
471 .off1 = td_var_offset(cpuload),
472 .help = "Use this percentage of CPU",
477 .off1 = td_var_offset(cpucycle),
478 .help = "Length of the CPU burn cycles (usecs)",
481 #ifdef FIO_HAVE_CPU_AFFINITY
485 .cb = str_cpumask_cb,
486 .help = "CPU affinity mask",
491 .type = FIO_OPT_BOOL,
492 .off1 = td_var_offset(end_fsync),
493 .help = "Include fsync at the end of job",
498 .type = FIO_OPT_BOOL,
499 .off1 = td_var_offset(unlink),
500 .help = "Unlink created files after job has completed",
505 .type = FIO_OPT_STR_SET,
506 .cb = str_exitall_cb,
507 .help = "Terminate all jobs when one exits",
511 .type = FIO_OPT_STR_SET,
512 .off1 = td_var_offset(stonewall),
513 .help = "Insert a hard barrier between this job and previous",
517 .type = FIO_OPT_STR_SET,
518 .off1 = td_var_offset(use_thread),
519 .help = "Use threads instead of forks",
522 .name = "write_bw_log",
523 .type = FIO_OPT_STR_SET,
524 .off1 = td_var_offset(write_bw_log),
525 .help = "Write log of bandwidth during run",
528 .name = "write_lat_log",
529 .type = FIO_OPT_STR_SET,
530 .off1 = td_var_offset(write_lat_log),
531 .help = "Write log of latency during run",
534 .name = "hugepage-size",
535 .type = FIO_OPT_STR_VAL,
536 .off1 = td_var_offset(hugepage_size),
537 .help = "When using hugepages, specify size of each page",
538 .def = __stringify(FIO_HUGE_PAGE),
541 .name = "group_reporting",
542 .type = FIO_OPT_STR_SET,
543 .off1 = td_var_offset(group_reporting),
544 .help = "Do reporting on a per-group basis",
551 #define FIO_JOB_OPTS (sizeof(options) / sizeof(struct fio_option))
552 #define FIO_CMD_OPTS (16)
553 #define FIO_GETOPT_JOB (0x89988998)
556 * Command line options. These will contain the above, plus a few
557 * extra that only pertain to fio itself and not jobs.
559 static struct option long_options[FIO_JOB_OPTS + FIO_CMD_OPTS] = {
562 .has_arg = required_argument,
567 .has_arg = required_argument,
571 .name = "latency-log",
572 .has_arg = required_argument,
576 .name = "bandwidth-log",
577 .has_arg = required_argument,
582 .has_arg = optional_argument,
587 .has_arg = no_argument,
592 .has_arg = no_argument,
597 .has_arg = optional_argument,
605 static int def_timeout = 0;
607 static char fio_version_string[] = "fio 1.13";
609 static char **ini_file;
610 static int max_jobs = MAX_JOBS;
612 struct thread_data def_thread;
613 struct thread_data *threads = NULL;
615 int exitall_on_terminate = 0;
616 int terse_output = 0;
617 unsigned long long mlock_size = 0;
621 static int write_lat_log = 0;
622 int write_bw_log = 0;
635 * Return a free job structure.
637 static struct thread_data *get_new_job(int global, struct thread_data *parent)
639 struct thread_data *td;
643 if (thread_number >= max_jobs)
646 td = &threads[thread_number++];
649 td->thread_number = thread_number;
653 static void put_job(struct thread_data *td)
655 if (td == &def_thread)
659 fprintf(f_out, "fio: %s\n", td->verror);
661 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
666 * Lazy way of fixing up options that depend on each other. We could also
667 * define option callback handlers, but this is easier.
669 static void fixup_options(struct thread_data *td)
671 if (!td->rwmixread && td->rwmixwrite)
672 td->rwmixread = 100 - td->rwmixwrite;
674 if (td->write_iolog_file && td->read_iolog_file) {
675 log_err("fio: read iolog overrides write_iolog\n");
676 free(td->write_iolog_file);
677 td->write_iolog_file = NULL;
680 if (td->io_ops->flags & FIO_SYNCIO)
684 td->iodepth = td->open_files;
688 * only really works for sequential io for now, and with 1 file
690 if (td->zone_size && td_random(td) && td->open_files == 1)
694 * Reads can do overwrites, we always need to pre-create the file
696 if (td_read(td) || td_rw(td))
699 if (!td->min_bs[DDIR_READ])
700 td->min_bs[DDIR_READ]= td->bs[DDIR_READ];
701 if (!td->max_bs[DDIR_READ])
702 td->max_bs[DDIR_READ] = td->bs[DDIR_READ];
703 if (!td->min_bs[DDIR_WRITE])
704 td->min_bs[DDIR_WRITE]= td->bs[DDIR_WRITE];
705 if (!td->max_bs[DDIR_WRITE])
706 td->max_bs[DDIR_WRITE] = td->bs[DDIR_WRITE];
708 td->rw_min_bs = min(td->min_bs[DDIR_READ], td->min_bs[DDIR_WRITE]);
710 if (td_read(td) && !td_rw(td))
713 if (td->norandommap && td->verify != VERIFY_NONE) {
714 log_err("fio: norandommap given, verify disabled\n");
715 td->verify = VERIFY_NONE;
717 if (td->bs_unaligned && (td->odirect || td->io_ops->flags & FIO_RAWIO))
718 log_err("fio: bs_unaligned may not work with raw io\n");
721 * O_DIRECT and char doesn't mix, clear that flag if necessary.
723 if (td->filetype == FIO_TYPE_CHAR && td->odirect)
727 * thinktime_spin must be less than thinktime
729 if (td->thinktime_spin > td->thinktime)
730 td->thinktime_spin = td->thinktime;
733 * The low water mark cannot be bigger than the iodepth
735 if (td->iodepth_low > td->iodepth || !td->iodepth_low) {
737 * syslet work around - if the workload is sequential,
738 * we want to let the queue drain all the way down to
739 * avoid seeking between async threads
741 if (!strcmp(td->io_ops->name, "syslet-rw") && !td_random(td))
744 td->iodepth_low = td->iodepth;
748 * If batch number isn't set, default to the same as iodepth
750 if (td->iodepth_batch > td->iodepth || !td->iodepth_batch)
751 td->iodepth_batch = td->iodepth;
753 if (td->open_files > td->nr_files || !td->open_files)
754 td->open_files = td->nr_files;
758 * This function leaks the buffer
760 static char *to_kmg(unsigned int val)
762 char *buf = malloc(32);
763 char post[] = { 0, 'K', 'M', 'G', 'P', 'E', 0 };
774 snprintf(buf, 31, "%u%c", val, *p);
778 /* External engines are specified by "external:name.o") */
779 static const char *get_engine_name(const char *str)
781 char *p = strstr(str, ":");
787 strip_blank_front(&p);
793 * Adds a job to the list of things todo. Sanitizes the various options
794 * to make sure we don't have conflicts, and initializes various
797 static int add_job(struct thread_data *td, const char *jobname, int job_add_num)
799 const char *ddir_str[] = { NULL, "read", "write", "rw", NULL,
800 "randread", "randwrite", "randrw" };
807 * the def_thread is just for options, it's not a real job
809 if (td == &def_thread)
812 engine = get_engine_name(td->ioengine);
813 td->io_ops = load_ioengine(td, engine);
815 log_err("fio: failed to load engine %s\n", engine);
820 td->io_ops->flags |= FIO_RAWIO;
822 td->filetype = FIO_TYPE_FILE;
823 if (td->filename && !lstat(td->filename, &sb)) {
824 if (S_ISBLK(sb.st_mode))
825 td->filetype = FIO_TYPE_BD;
826 else if (S_ISCHR(sb.st_mode))
827 td->filetype = FIO_TYPE_CHAR;
833 td->nr_uniq_files = 1;
835 td->nr_uniq_files = td->open_files;
837 if (td->filetype == FIO_TYPE_FILE || td->filename) {
841 if (td->directory && td->directory[0] != '\0') {
842 if (lstat(td->directory, &sb) < 0) {
843 log_err("fio: %s is not a directory\n", td->directory);
844 td_verror(td, errno, "lstat");
847 if (!S_ISDIR(sb.st_mode)) {
848 log_err("fio: %s is not a directory\n", td->directory);
851 len = sprintf(tmp, "%s/", td->directory);
854 td->files = malloc(sizeof(struct fio_file) * td->open_files);
856 for_each_file(td, f, i) {
857 memset(f, 0, sizeof(*f));
861 sprintf(tmp + len, "%s", td->filename);
863 sprintf(tmp + len, "%s.%d.%d", jobname, td->thread_number, i);
864 f->file_name = strdup(tmp);
867 td->open_files = td->nr_files = 1;
868 td->files = malloc(sizeof(struct fio_file));
871 memset(f, 0, sizeof(*f));
873 f->file_name = strdup(jobname);
876 for_each_file(td, f, i) {
877 f->file_size = td->total_file_size / td->nr_files;
878 f->file_offset = td->start_offset;
881 td->mutex = fio_sem_init(0);
883 td->ts.clat_stat[0].min_val = td->ts.clat_stat[1].min_val = ULONG_MAX;
884 td->ts.slat_stat[0].min_val = td->ts.slat_stat[1].min_val = ULONG_MAX;
885 td->ts.bw_stat[0].min_val = td->ts.bw_stat[1].min_val = ULONG_MAX;
887 if (td->stonewall && td->thread_number > 1)
890 td->groupid = groupid;
895 if (td->write_lat_log) {
896 setup_log(&td->ts.slat_log);
897 setup_log(&td->ts.clat_log);
899 if (td->write_bw_log)
900 setup_log(&td->ts.bw_log);
903 td->name = strdup(jobname);
907 if (!strcmp(td->io_ops->name, "cpuio"))
908 fprintf(f_out, "%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
910 char *c1, *c2, *c3, *c4;
912 c1 = to_kmg(td->min_bs[DDIR_READ]);
913 c2 = to_kmg(td->max_bs[DDIR_READ]);
914 c3 = to_kmg(td->min_bs[DDIR_WRITE]);
915 c4 = to_kmg(td->max_bs[DDIR_WRITE]);
917 fprintf(f_out, "%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->name, td->groupid, ddir_str[td->td_ddir], c1, c2, c3, c4, td->io_ops->name, td->iodepth);
924 } else if (job_add_num == 1)
925 fprintf(f_out, "...\n");
929 * recurse add identical jobs, clear numjobs and stonewall options
930 * as they don't apply to sub-jobs
932 numjobs = td->numjobs;
934 struct thread_data *td_new = get_new_job(0, td);
940 td_new->stonewall = 0;
941 job_add_num = numjobs - 1;
943 if (add_job(td_new, jobname, job_add_num))
953 * Initialize the various random states we need (random io, block size ranges,
954 * read/write mix, etc).
956 int init_random_state(struct thread_data *td)
958 unsigned long seeds[5];
959 int fd, num_maps, blocks, i;
962 if (td->io_ops->flags & FIO_DISKLESSIO)
965 fd = open("/dev/urandom", O_RDONLY);
967 td_verror(td, errno, "open");
971 if (read(fd, seeds, sizeof(seeds)) < (int) sizeof(seeds)) {
972 td_verror(td, EIO, "read");
979 os_random_seed(seeds[0], &td->bsrange_state);
980 os_random_seed(seeds[1], &td->verify_state);
981 os_random_seed(seeds[2], &td->rwmix_state);
983 if (td->file_service_type == FIO_FSERVICE_RANDOM)
984 os_random_seed(seeds[3], &td->next_file_state);
989 if (td->rand_repeatable)
990 seeds[4] = FIO_RANDSEED * td->thread_number;
992 if (!td->norandommap) {
993 for_each_file(td, f, i) {
994 blocks = (f->real_file_size + td->rw_min_bs - 1) / td->rw_min_bs;
995 num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
996 f->file_map = malloc(num_maps * sizeof(long));
997 f->num_maps = num_maps;
998 memset(f->file_map, 0, num_maps * sizeof(long));
1002 os_random_seed(seeds[4], &td->random_state);
1006 static void fill_cpu_mask(os_cpu_mask_t cpumask, int cpu)
1008 #ifdef FIO_HAVE_CPU_AFFINITY
1013 for (i = 0; i < sizeof(int) * 8; i++) {
1015 CPU_SET(i, &cpumask);
1020 static int is_empty_or_comment(char *line)
1024 for (i = 0; i < strlen(line); i++) {
1029 if (!isspace(line[i]) && !iscntrl(line[i]))
1037 * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
1039 static char *get_mmap_file(const char *str)
1041 char *p = strstr(str, ":");
1047 strip_blank_front(&p);
1052 static int str_mem_cb(void *data, const char *mem)
1054 struct thread_data *td = data;
1056 if (td->mem_type == MEM_MMAPHUGE || td->mem_type == MEM_MMAP) {
1057 td->mmapfile = get_mmap_file(mem);
1058 if (td->mem_type == MEM_MMAPHUGE && !td->mmapfile) {
1059 log_err("fio: mmaphuge:/path/to/file\n");
1067 static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
1073 #ifdef FIO_HAVE_IOPRIO
1074 static int str_prioclass_cb(void *data, unsigned int *val)
1076 struct thread_data *td = data;
1078 td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
1082 static int str_prio_cb(void *data, unsigned int *val)
1084 struct thread_data *td = data;
1091 static int str_exitall_cb(void)
1093 exitall_on_terminate = 1;
1097 static int str_cpumask_cb(void *data, unsigned int *val)
1099 struct thread_data *td = data;
1101 fill_cpu_mask(td->cpumask, *val);
1106 * This is our [ini] type file parser.
1108 static int parse_jobs_ini(char *file, int stonewall_flag)
1110 unsigned int global;
1111 struct thread_data *td;
1112 char *string, *name;
1116 int ret = 0, stonewall;
1118 f = fopen(file, "r");
1120 perror("fopen job file");
1124 string = malloc(4096);
1126 memset(name, 0, 256);
1128 stonewall = stonewall_flag;
1130 p = fgets(string, 4095, f);
1133 if (is_empty_or_comment(p))
1135 if (sscanf(p, "[%255s]", name) != 1)
1138 global = !strncmp(name, "global", 6);
1140 name[strlen(name) - 1] = '\0';
1142 td = get_new_job(global, &def_thread);
1149 * Seperate multiple job files by a stonewall
1151 if (!global && stonewall) {
1152 td->stonewall = stonewall;
1157 while ((p = fgets(string, 4096, f)) != NULL) {
1158 if (is_empty_or_comment(p))
1161 strip_blank_front(&p);
1171 * Don't break here, continue parsing options so we
1172 * dump all the bad ones. Makes trial/error fixups
1173 * easier on the user.
1175 ret |= parse_option(p, options, td);
1180 ret = add_job(td, name, 0);
1182 log_err("fio: job %s dropped\n", name);
1193 static int fill_def_thread(void)
1195 memset(&def_thread, 0, sizeof(def_thread));
1197 if (fio_getaffinity(getpid(), &def_thread.cpumask) == -1) {
1198 perror("sched_getaffinity");
1203 * fill default options
1205 fill_default_options(&def_thread, options);
1207 def_thread.timeout = def_timeout;
1208 def_thread.write_bw_log = write_bw_log;
1209 def_thread.write_lat_log = write_lat_log;
1211 #ifdef FIO_HAVE_DISK_UTIL
1212 def_thread.do_disk_util = 1;
1218 static void usage(void)
1220 printf("%s\n", fio_version_string);
1221 printf("\t--output\tWrite output to file\n");
1222 printf("\t--timeout\tRuntime in seconds\n");
1223 printf("\t--latency-log\tGenerate per-job latency logs\n");
1224 printf("\t--bandwidth-log\tGenerate per-job bandwidth logs\n");
1225 printf("\t--minimal\tMinimal (terse) output\n");
1226 printf("\t--version\tPrint version info and exit\n");
1227 printf("\t--help\t\tPrint this page\n");
1228 printf("\t--cmdhelp=cmd\tPrint command help, \"all\" for all of them\n");
1231 static int parse_cmd_line(int argc, char *argv[])
1233 struct thread_data *td = NULL;
1234 int c, ini_idx = 0, lidx, ret, dont_add_job = 0;
1236 while ((c = getopt_long_only(argc, argv, "", long_options, &lidx)) != -1) {
1239 def_timeout = atoi(optarg);
1248 f_out = fopen(optarg, "w+");
1250 perror("fopen output");
1262 ret = show_cmd_help(options, optarg);
1265 printf("%s\n", fio_version_string);
1267 case FIO_GETOPT_JOB: {
1268 const char *opt = long_options[lidx].name;
1271 if (!strncmp(opt, "name", 4) && td) {
1272 ret = add_job(td, td->name ?: "fio", 0);
1280 int global = !strncmp(val, "global", 6);
1282 td = get_new_job(global, &def_thread);
1287 ret = parse_cmd_option(opt, val, options, td);
1301 ret = add_job(td, td->name ?: "fio", 0);
1307 while (optind < argc) {
1309 ini_file = realloc(ini_file, ini_idx * sizeof(char *));
1310 ini_file[ini_idx - 1] = strdup(argv[optind]);
1317 static void free_shm(void)
1319 struct shmid_ds sbuf;
1322 shmdt((void *) threads);
1324 shmctl(shm_id, IPC_RMID, &sbuf);
1329 * The thread area is shared between the main process and the job
1330 * threads/processes. So setup a shared memory segment that will hold
1333 static int setup_thread_area(void)
1336 * 1024 is too much on some machines, scale max_jobs if
1337 * we get a failure that looks like too large a shm segment
1340 size_t size = max_jobs * sizeof(struct thread_data);
1342 shm_id = shmget(0, size, IPC_CREAT | 0600);
1345 if (errno != EINVAL) {
1356 threads = shmat(shm_id, NULL, 0);
1357 if (threads == (void *) -1) {
1367 * Copy the fio options into the long options map, so we mirror
1368 * job and cmd line options.
1370 static void dupe_job_options(void)
1372 struct fio_option *o;
1376 while (long_options[i].name)
1381 long_options[i].name = o->name;
1382 long_options[i].val = FIO_GETOPT_JOB;
1383 if (o->type == FIO_OPT_STR_SET)
1384 long_options[i].has_arg = no_argument;
1386 long_options[i].has_arg = required_argument;
1390 assert(i < FIO_JOB_OPTS + FIO_CMD_OPTS);
1394 int parse_options(int argc, char *argv[])
1401 options_init(options);
1405 if (setup_thread_area())
1407 if (fill_def_thread())
1410 job_files = parse_cmd_line(argc, argv);
1412 for (i = 0; i < job_files; i++) {
1413 if (fill_def_thread())
1415 if (parse_jobs_ini(ini_file[i], i))
1422 if (!thread_number) {
1423 log_err("No jobs defined(s)\n");