2 * This file contains job initialization and setup functions.
15 #include <sys/types.h>
21 #define FIO_RANDSEED (0xb1899bedUL)
23 #define td_var_offset(var) ((size_t) &((struct thread_data *)0)->var)
25 static int str_rw_cb(void *, const char *);
26 static int str_ioengine_cb(void *, const char *);
27 static int str_mem_cb(void *, const char *);
28 static int str_verify_cb(void *, const char *);
29 static int str_lockmem_cb(void *, unsigned long *);
30 #ifdef FIO_HAVE_IOPRIO
31 static int str_prio_cb(void *, unsigned int *);
32 static int str_prioclass_cb(void *, unsigned int *);
34 static int str_exitall_cb(void);
35 static int str_cpumask_cb(void *, unsigned int *);
37 #define __stringify_1(x) #x
38 #define __stringify(x) __stringify_1(x)
41 * Map of job/command line options
43 static struct fio_option options[] = {
45 .name = "description",
46 .type = FIO_OPT_STR_STORE,
47 .off1 = td_var_offset(description),
48 .help = "Text job description",
52 .type = FIO_OPT_STR_STORE,
53 .off1 = td_var_offset(name),
54 .help = "Name of this job",
58 .type = FIO_OPT_STR_STORE,
59 .off1 = td_var_offset(directory),
60 .help = "Directory to store files in",
64 .type = FIO_OPT_STR_STORE,
65 .off1 = td_var_offset(filename),
66 .help = "Force the use of a specific file",
72 .help = "IO direction",
74 .posval = { "read", "write", "randwrite", "randread", "rw",
80 .cb = str_ioengine_cb,
81 .help = "IO engine to use",
83 .posval = { "sync", "libaio", "posixaio", "mmap", "splice",
84 "sg", "null", "net", "syslet-rw" },
89 .off1 = td_var_offset(iodepth),
90 .help = "Amount of IO buffers to keep in flight",
94 .name = "iodepth_low",
96 .off1 = td_var_offset(iodepth_low),
97 .help = "Low water mark for queuing depth",
101 .type = FIO_OPT_STR_VAL,
102 .off1 = td_var_offset(total_file_size),
103 .help = "Size of device or file",
107 .type = FIO_OPT_STR_VAL_INT,
108 .off1 = td_var_offset(bs[DDIR_READ]),
109 .off2 = td_var_offset(bs[DDIR_WRITE]),
110 .help = "Block size unit",
115 .type = FIO_OPT_RANGE,
116 .off1 = td_var_offset(min_bs[DDIR_READ]),
117 .off2 = td_var_offset(max_bs[DDIR_READ]),
118 .off3 = td_var_offset(min_bs[DDIR_WRITE]),
119 .off4 = td_var_offset(max_bs[DDIR_WRITE]),
120 .help = "Set block size range (in more detail than bs)",
123 .name = "bs_unaligned",
124 .type = FIO_OPT_STR_SET,
125 .off1 = td_var_offset(bs_unaligned),
126 .help = "Don't sector align IO buffer sizes",
130 .type = FIO_OPT_STR_VAL,
131 .off1 = td_var_offset(start_offset),
132 .help = "Start IO from this offset",
136 .name = "randrepeat",
137 .type = FIO_OPT_BOOL,
138 .off1 = td_var_offset(rand_repeatable),
139 .help = "Use repeatable random IO pattern",
143 .name = "norandommap",
144 .type = FIO_OPT_STR_SET,
145 .off1 = td_var_offset(norandommap),
146 .help = "Accept potential duplicate random blocks",
151 .off1 = td_var_offset(nr_files),
152 .help = "Split job workload between this number of files",
158 .off1 = td_var_offset(fsync_blocks),
159 .help = "Issue fsync for writes every given number of blocks",
164 .type = FIO_OPT_BOOL,
165 .off1 = td_var_offset(odirect),
166 .help = "Use O_DIRECT IO (negates buffered)",
171 .type = FIO_OPT_BOOL,
172 .off1 = td_var_offset(odirect),
174 .help = "Use buffered IO (negates direct)",
179 .type = FIO_OPT_BOOL,
180 .off1 = td_var_offset(overwrite),
181 .help = "When writing, set whether to overwrite current data",
187 .off1 = td_var_offset(loops),
188 .help = "Number of times to run the job",
194 .off1 = td_var_offset(numjobs),
195 .help = "Duplicate this job this many times",
199 .name = "startdelay",
201 .off1 = td_var_offset(start_delay),
202 .help = "Only start job when this period has passed",
208 .type = FIO_OPT_STR_VAL_TIME,
209 .off1 = td_var_offset(timeout),
210 .help = "Stop workload when this amount of time has passed",
217 .help = "Backing type for IO buffers",
219 .posval = { "malloc", "shm", "shmhuge", "mmap", "mmaphuge", },
225 .help = "Verify sum function",
227 .posval = { "crc32", "md5", },
230 .name = "write_iolog",
231 .type = FIO_OPT_STR_STORE,
232 .off1 = td_var_offset(write_iolog_file),
233 .help = "Store IO pattern to file",
236 .name = "read_iolog",
237 .type = FIO_OPT_STR_STORE,
238 .off1 = td_var_offset(read_iolog_file),
239 .help = "Playback IO pattern from file",
242 .name = "exec_prerun",
243 .type = FIO_OPT_STR_STORE,
244 .off1 = td_var_offset(exec_prerun),
245 .help = "Execute this file prior to running job",
248 .name = "exec_postrun",
249 .type = FIO_OPT_STR_STORE,
250 .off1 = td_var_offset(exec_postrun),
251 .help = "Execute this file after running job",
253 #ifdef FIO_HAVE_IOSCHED_SWITCH
255 .name = "ioscheduler",
256 .type = FIO_OPT_STR_STORE,
257 .off1 = td_var_offset(ioscheduler),
258 .help = "Use this IO scheduler on the backing device",
263 .type = FIO_OPT_STR_VAL,
264 .off1 = td_var_offset(zone_size),
265 .help = "Give size of an IO zone",
270 .type = FIO_OPT_STR_VAL,
271 .off1 = td_var_offset(zone_skip),
272 .help = "Space between IO zones",
277 .type = FIO_OPT_STR_VAL,
278 .cb = str_lockmem_cb,
279 .help = "Lock down this amount of memory",
283 .name = "rwmixcycle",
285 .off1 = td_var_offset(rwmixcycle),
286 .help = "Cycle period for mixed read/write workloads (msec)",
292 .off1 = td_var_offset(rwmixread),
294 .help = "Percentage of mixed workload that is reads",
298 .name = "rwmixwrite",
300 .off1 = td_var_offset(rwmixwrite),
302 .help = "Percentage of mixed workload that is writes",
308 .off1 = td_var_offset(nice),
309 .help = "Set job CPU nice value",
314 #ifdef FIO_HAVE_IOPRIO
319 .help = "Set job IO priority value",
326 .cb = str_prioclass_cb,
327 .help = "Set job IO priority class",
335 .off1 = td_var_offset(thinktime),
336 .help = "Idle time between IO buffers (usec)",
340 .name = "thinktime_spin",
342 .off1 = td_var_offset(thinktime_spin),
343 .help = "Start thinktime by spinning this amount (usec)",
347 .name = "thinktime_blocks",
349 .off1 = td_var_offset(thinktime_blocks),
350 .help = "IO buffer period between 'thinktime'",
356 .off1 = td_var_offset(rate),
357 .help = "Set bandwidth rate",
362 .off1 = td_var_offset(ratemin),
363 .help = "The bottom limit accepted",
368 .off1 = td_var_offset(ratecycle),
369 .help = "Window average for rate limits (msec)",
373 .name = "invalidate",
374 .type = FIO_OPT_BOOL,
375 .off1 = td_var_offset(invalidate_cache),
376 .help = "Invalidate buffer/page cache prior to running job",
381 .type = FIO_OPT_BOOL,
382 .off1 = td_var_offset(sync_io),
383 .help = "Use O_SYNC for buffered writes",
389 .off1 = td_var_offset(bw_avg_time),
390 .help = "Time window over which to calculate bandwidth (msec)",
394 .name = "create_serialize",
395 .type = FIO_OPT_BOOL,
396 .off1 = td_var_offset(create_serialize),
397 .help = "Serialize creating of job files",
401 .name = "create_fsync",
402 .type = FIO_OPT_BOOL,
403 .off1 = td_var_offset(create_fsync),
404 .help = "Fsync file after creation",
410 .off1 = td_var_offset(cpuload),
411 .help = "Use this percentage of CPU",
416 .off1 = td_var_offset(cpucycle),
417 .help = "Length of the CPU burn cycles",
419 #ifdef FIO_HAVE_CPU_AFFINITY
423 .cb = str_cpumask_cb,
424 .help = "CPU affinity mask",
429 .type = FIO_OPT_BOOL,
430 .off1 = td_var_offset(end_fsync),
431 .help = "Include fsync at the end of job",
436 .type = FIO_OPT_BOOL,
437 .off1 = td_var_offset(unlink),
438 .help = "Unlink created files after job has completed",
443 .type = FIO_OPT_STR_SET,
444 .cb = str_exitall_cb,
445 .help = "Terminate all jobs when one exits",
449 .type = FIO_OPT_STR_SET,
450 .off1 = td_var_offset(stonewall),
451 .help = "Insert a hard barrier between this job and previous",
455 .type = FIO_OPT_STR_SET,
456 .off1 = td_var_offset(use_thread),
457 .help = "Use threads instead of forks",
460 .name = "write_bw_log",
461 .type = FIO_OPT_STR_SET,
462 .off1 = td_var_offset(write_bw_log),
463 .help = "Write log of bandwidth during run",
466 .name = "write_lat_log",
467 .type = FIO_OPT_STR_SET,
468 .off1 = td_var_offset(write_lat_log),
469 .help = "Write log of latency during run",
472 .name = "hugepage-size",
473 .type = FIO_OPT_STR_VAL,
474 .off1 = td_var_offset(hugepage_size),
475 .help = "When using hugepages, specify size of each page",
476 .def = __stringify(FIO_HUGE_PAGE),
483 #define FIO_JOB_OPTS (sizeof(options) / sizeof(struct fio_option))
484 #define FIO_CMD_OPTS (16)
485 #define FIO_GETOPT_JOB (0x89988998)
488 * Command line options. These will contain the above, plus a few
489 * extra that only pertain to fio itself and not jobs.
491 static struct option long_options[FIO_JOB_OPTS + FIO_CMD_OPTS] = {
494 .has_arg = required_argument,
499 .has_arg = required_argument,
503 .name = "latency-log",
504 .has_arg = required_argument,
508 .name = "bandwidth-log",
509 .has_arg = required_argument,
514 .has_arg = optional_argument,
519 .has_arg = no_argument,
524 .has_arg = no_argument,
529 .has_arg = required_argument,
537 static int def_timeout = 0;
539 static char fio_version_string[] = "fio 1.11";
541 static char **ini_file;
542 static int max_jobs = MAX_JOBS;
544 struct thread_data def_thread;
545 struct thread_data *threads = NULL;
547 int exitall_on_terminate = 0;
548 int terse_output = 0;
549 unsigned long long mlock_size = 0;
553 static int write_lat_log = 0;
554 int write_bw_log = 0;
557 * Return a free job structure.
559 static struct thread_data *get_new_job(int global, struct thread_data *parent)
561 struct thread_data *td;
565 if (thread_number >= max_jobs)
568 td = &threads[thread_number++];
571 td->thread_number = thread_number;
575 static void put_job(struct thread_data *td)
577 if (td == &def_thread)
581 fprintf(f_out, "fio: %s\n", td->verror);
583 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
588 * Lazy way of fixing up options that depend on each other. We could also
589 * define option callback handlers, but this is easier.
591 static void fixup_options(struct thread_data *td)
593 if (!td->rwmixread && td->rwmixwrite)
594 td->rwmixread = 100 - td->rwmixwrite;
596 if (td->write_iolog_file && td->read_iolog_file) {
597 log_err("fio: read iolog overrides write_iolog\n");
598 free(td->write_iolog_file);
599 td->write_iolog_file = NULL;
602 if (td->io_ops->flags & FIO_SYNCIO)
606 td->iodepth = td->nr_files;
610 * only really works for sequential io for now, and with 1 file
612 if (td->zone_size && !td->sequential && td->nr_files == 1)
616 * Reads can do overwrites, we always need to pre-create the file
618 if (td_read(td) || td_rw(td))
621 if (!td->min_bs[DDIR_READ])
622 td->min_bs[DDIR_READ]= td->bs[DDIR_READ];
623 if (!td->max_bs[DDIR_READ])
624 td->max_bs[DDIR_READ] = td->bs[DDIR_READ];
625 if (!td->min_bs[DDIR_WRITE])
626 td->min_bs[DDIR_WRITE]= td->bs[DDIR_WRITE];
627 if (!td->max_bs[DDIR_WRITE])
628 td->max_bs[DDIR_WRITE] = td->bs[DDIR_WRITE];
630 td->rw_min_bs = min(td->min_bs[DDIR_READ], td->min_bs[DDIR_WRITE]);
632 if (td_read(td) && !td_rw(td))
635 if (td->norandommap && td->verify != VERIFY_NONE) {
636 log_err("fio: norandommap given, verify disabled\n");
637 td->verify = VERIFY_NONE;
639 if (td->bs_unaligned && (td->odirect || td->io_ops->flags & FIO_RAWIO))
640 log_err("fio: bs_unaligned may not work with raw io\n");
643 * O_DIRECT and char doesn't mix, clear that flag if necessary.
645 if (td->filetype == FIO_TYPE_CHAR && td->odirect)
649 * thinktime_spin must be less than thinktime
651 if (td->thinktime_spin > td->thinktime)
652 td->thinktime_spin = td->thinktime;
655 * The low water mark cannot be bigger than the iodepth
657 if (td->iodepth_low > td->iodepth || !td->iodepth_low)
658 td->iodepth_low = td->iodepth;
662 * This function leaks the buffer
664 static char *to_kmg(unsigned int val)
666 char *buf = malloc(32);
667 char post[] = { 0, 'K', 'M', 'G', 'P', 'E', 0 };
678 snprintf(buf, 31, "%u%c", val, *p);
683 * Adds a job to the list of things todo. Sanitizes the various options
684 * to make sure we don't have conflicts, and initializes various
687 static int add_job(struct thread_data *td, const char *jobname, int job_add_num)
689 const char *ddir_str[] = { "read", "write", "randread", "randwrite",
690 "rw", NULL, "randrw" };
692 int numjobs, ddir, i;
696 * the def_thread is just for options, it's not a real job
698 if (td == &def_thread)
704 td->io_ops->flags |= FIO_RAWIO;
706 td->filetype = FIO_TYPE_FILE;
707 if (td->filename && !lstat(td->filename, &sb)) {
708 if (S_ISBLK(sb.st_mode))
709 td->filetype = FIO_TYPE_BD;
710 else if (S_ISCHR(sb.st_mode))
711 td->filetype = FIO_TYPE_CHAR;
717 td->nr_uniq_files = 1;
719 td->nr_uniq_files = td->nr_files;
721 if (td->filetype == FIO_TYPE_FILE || td->filename) {
725 if (td->directory && td->directory[0] != '\0') {
726 if (lstat(td->directory, &sb) < 0) {
727 log_err("fio: %s is not a directory\n", td->directory);
728 td_verror(td, errno);
731 if (!S_ISDIR(sb.st_mode)) {
732 log_err("fio: %s is not a directory\n", td->directory);
735 len = sprintf(tmp, "%s/", td->directory);
738 td->files = malloc(sizeof(struct fio_file) * td->nr_files);
740 for_each_file(td, f, i) {
741 memset(f, 0, sizeof(*f));
745 sprintf(tmp + len, "%s", td->filename);
747 sprintf(tmp + len, "%s.%d.%d", jobname, td->thread_number, i);
748 f->file_name = strdup(tmp);
752 td->files = malloc(sizeof(struct fio_file));
755 memset(f, 0, sizeof(*f));
757 f->file_name = strdup(jobname);
760 for_each_file(td, f, i) {
761 f->file_size = td->total_file_size / td->nr_files;
762 f->file_offset = td->start_offset;
765 fio_sem_init(&td->mutex, 0);
767 td->ts.clat_stat[0].min_val = td->ts.clat_stat[1].min_val = ULONG_MAX;
768 td->ts.slat_stat[0].min_val = td->ts.slat_stat[1].min_val = ULONG_MAX;
769 td->ts.bw_stat[0].min_val = td->ts.bw_stat[1].min_val = ULONG_MAX;
771 if (td->stonewall && td->thread_number > 1)
774 td->groupid = groupid;
779 if (td->write_lat_log) {
780 setup_log(&td->ts.slat_log);
781 setup_log(&td->ts.clat_log);
783 if (td->write_bw_log)
784 setup_log(&td->ts.bw_log);
787 td->name = strdup(jobname);
789 ddir = td->ddir + (!td->sequential << 1) + (td->iomix << 2);
793 if (td->io_ops->flags & FIO_CPUIO)
794 fprintf(f_out, "%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
796 char *c1, *c2, *c3, *c4;
798 c1 = to_kmg(td->min_bs[DDIR_READ]);
799 c2 = to_kmg(td->max_bs[DDIR_READ]);
800 c3 = to_kmg(td->min_bs[DDIR_WRITE]);
801 c4 = to_kmg(td->max_bs[DDIR_WRITE]);
803 fprintf(f_out, "%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->name, td->groupid, ddir_str[ddir], c1, c2, c3, c4, td->io_ops->name, td->iodepth);
810 } else if (job_add_num == 1)
811 fprintf(f_out, "...\n");
815 * recurse add identical jobs, clear numjobs and stonewall options
816 * as they don't apply to sub-jobs
818 numjobs = td->numjobs;
820 struct thread_data *td_new = get_new_job(0, td);
826 td_new->stonewall = 0;
827 job_add_num = numjobs - 1;
829 if (add_job(td_new, jobname, job_add_num))
839 * Initialize the various random states we need (random io, block size ranges,
840 * read/write mix, etc).
842 int init_random_state(struct thread_data *td)
844 unsigned long seeds[4];
845 int fd, num_maps, blocks, i;
848 if (td->io_ops->flags & FIO_CPUIO)
851 fd = open("/dev/urandom", O_RDONLY);
853 td_verror(td, errno);
857 if (read(fd, seeds, sizeof(seeds)) < (int) sizeof(seeds)) {
865 os_random_seed(seeds[0], &td->bsrange_state);
866 os_random_seed(seeds[1], &td->verify_state);
867 os_random_seed(seeds[2], &td->rwmix_state);
872 if (td->rand_repeatable)
873 seeds[3] = FIO_RANDSEED * td->thread_number;
875 if (!td->norandommap) {
876 for_each_file(td, f, i) {
877 blocks = (f->real_file_size + td->rw_min_bs - 1) / td->rw_min_bs;
878 num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
879 f->file_map = malloc(num_maps * sizeof(long));
880 f->num_maps = num_maps;
881 memset(f->file_map, 0, num_maps * sizeof(long));
885 os_random_seed(seeds[3], &td->random_state);
889 static void fill_cpu_mask(os_cpu_mask_t cpumask, int cpu)
891 #ifdef FIO_HAVE_CPU_AFFINITY
896 for (i = 0; i < sizeof(int) * 8; i++) {
898 CPU_SET(i, &cpumask);
903 static int is_empty_or_comment(char *line)
907 for (i = 0; i < strlen(line); i++) {
912 if (!isspace(line[i]) && !iscntrl(line[i]))
919 static int str_rw_cb(void *data, const char *mem)
921 struct thread_data *td = data;
923 if (!strncmp(mem, "read", 4) || !strncmp(mem, "0", 1)) {
924 td->ddir = DDIR_READ;
927 } else if (!strncmp(mem, "randread", 8)) {
928 td->ddir = DDIR_READ;
931 } else if (!strncmp(mem, "write", 5) || !strncmp(mem, "1", 1)) {
932 td->ddir = DDIR_WRITE;
935 } else if (!strncmp(mem, "randwrite", 9)) {
936 td->ddir = DDIR_WRITE;
939 } else if (!strncmp(mem, "rw", 2)) {
940 td->ddir = DDIR_READ;
944 } else if (!strncmp(mem, "randrw", 6)) {
945 td->ddir = DDIR_READ;
951 log_err("fio: data direction: read, write, randread, randwrite, rw, randrw\n");
955 static int str_verify_cb(void *data, const char *mem)
957 struct thread_data *td = data;
959 if (!strncmp(mem, "0", 1)) {
960 td->verify = VERIFY_NONE;
962 } else if (!strncmp(mem, "md5", 3) || !strncmp(mem, "1", 1)) {
963 td->verify = VERIFY_MD5;
965 } else if (!strncmp(mem, "crc32", 5)) {
966 td->verify = VERIFY_CRC32;
970 log_err("fio: verify types: md5, crc32\n");
975 * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
977 static char *get_mmap_file(const char *str)
979 char *p = strstr(str, ":");
985 strip_blank_front(&p);
990 static int str_mem_cb(void *data, const char *mem)
992 struct thread_data *td = data;
994 if (!strncmp(mem, "malloc", 6)) {
995 td->mem_type = MEM_MALLOC;
997 } else if (!strncmp(mem, "mmaphuge", 8)) {
998 #ifdef FIO_HAVE_HUGETLB
1000 * mmaphuge must be appended with the actual file
1002 td->mmapfile = get_mmap_file(mem);
1003 if (!td->mmapfile) {
1004 log_err("fio: mmaphuge:/path/to/file\n");
1008 td->mem_type = MEM_MMAPHUGE;
1011 log_err("fio: mmaphuge not available\n");
1014 } else if (!strncmp(mem, "mmap", 4)) {
1016 * Check if the user wants file backed memory. It's ok
1017 * if there's no file given, we'll just use anon mamp then.
1019 td->mmapfile = get_mmap_file(mem);
1020 td->mem_type = MEM_MMAP;
1022 } else if (!strncmp(mem, "shmhuge", 7)) {
1023 #ifdef FIO_HAVE_HUGETLB
1024 td->mem_type = MEM_SHMHUGE;
1027 log_err("fio: shmhuge not available\n");
1030 } else if (!strncmp(mem, "shm", 3)) {
1031 td->mem_type = MEM_SHM;
1035 log_err("fio: mem type: malloc, shm, shmhuge, mmap, mmaphuge\n");
1039 static int str_ioengine_cb(void *data, const char *str)
1041 struct thread_data *td = data;
1043 td->io_ops = load_ioengine(td, str);
1047 log_err("fio: ioengine= libaio, posixaio, sync, mmap, sgio, splice, cpu, null\n");
1048 log_err("fio: or specify path to dynamic ioengine module\n");
1052 static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
1058 #ifdef FIO_HAVE_IOPRIO
1059 static int str_prioclass_cb(void *data, unsigned int *val)
1061 struct thread_data *td = data;
1063 td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
1067 static int str_prio_cb(void *data, unsigned int *val)
1069 struct thread_data *td = data;
1076 static int str_exitall_cb(void)
1078 exitall_on_terminate = 1;
1082 static int str_cpumask_cb(void *data, unsigned int *val)
1084 struct thread_data *td = data;
1086 fill_cpu_mask(td->cpumask, *val);
1091 * This is our [ini] type file parser.
1093 static int parse_jobs_ini(char *file, int stonewall_flag)
1095 unsigned int global;
1096 struct thread_data *td;
1097 char *string, *name;
1101 int ret = 0, stonewall;
1103 f = fopen(file, "r");
1105 perror("fopen job file");
1109 string = malloc(4096);
1111 memset(name, 0, 256);
1113 stonewall = stonewall_flag;
1115 p = fgets(string, 4095, f);
1118 if (is_empty_or_comment(p))
1120 if (sscanf(p, "[%255s]", name) != 1)
1123 global = !strncmp(name, "global", 6);
1125 name[strlen(name) - 1] = '\0';
1127 td = get_new_job(global, &def_thread);
1134 * Seperate multiple job files by a stonewall
1136 if (!global && stonewall) {
1137 td->stonewall = stonewall;
1142 while ((p = fgets(string, 4096, f)) != NULL) {
1143 if (is_empty_or_comment(p))
1146 strip_blank_front(&p);
1156 * Don't break here, continue parsing options so we
1157 * dump all the bad ones. Makes trial/error fixups
1158 * easier on the user.
1160 ret |= parse_option(p, options, td);
1165 ret = add_job(td, name, 0);
1167 log_err("fio: job %s dropped\n", name);
1178 static int fill_def_thread(void)
1180 memset(&def_thread, 0, sizeof(def_thread));
1182 if (fio_getaffinity(getpid(), &def_thread.cpumask) == -1) {
1183 perror("sched_getaffinity");
1188 * fill default options
1190 fill_default_options(&def_thread, options);
1192 def_thread.timeout = def_timeout;
1193 def_thread.write_bw_log = write_bw_log;
1194 def_thread.write_lat_log = write_lat_log;
1196 #ifdef FIO_HAVE_DISK_UTIL
1197 def_thread.do_disk_util = 1;
1203 static void usage(void)
1205 printf("%s\n", fio_version_string);
1206 printf("\t--output\tWrite output to file\n");
1207 printf("\t--timeout\tRuntime in seconds\n");
1208 printf("\t--latency-log\tGenerate per-job latency logs\n");
1209 printf("\t--bandwidth-log\tGenerate per-job bandwidth logs\n");
1210 printf("\t--minimal\tMinimal (terse) output\n");
1211 printf("\t--version\tPrint version info and exit\n");
1212 printf("\t--help\t\tPrint this page\n");
1213 printf("\t--cmdhelp=cmd\tPrint command help, \"all\" for all of them\n");
1216 static int parse_cmd_line(int argc, char *argv[])
1218 struct thread_data *td = NULL;
1219 int c, ini_idx = 0, lidx, ret;
1221 while ((c = getopt_long(argc, argv, "", long_options, &lidx)) != -1) {
1224 def_timeout = atoi(optarg);
1233 f_out = fopen(optarg, "w+");
1235 perror("fopen output");
1247 ret = show_cmd_help(options, optarg);
1250 printf("%s\n", fio_version_string);
1252 case FIO_GETOPT_JOB: {
1253 const char *opt = long_options[lidx].name;
1256 if (!strncmp(opt, "name", 4) && td) {
1257 ret = add_job(td, td->name ?: "fio", 0);
1265 int global = !strncmp(val, "global", 6);
1267 td = get_new_job(global, &def_thread);
1272 ret = parse_cmd_option(opt, val, options, td);
1274 log_err("fio: job dropped\n");
1286 ret = add_job(td, td->name ?: "fio", 0);
1291 while (optind < argc) {
1293 ini_file = realloc(ini_file, ini_idx * sizeof(char *));
1294 ini_file[ini_idx - 1] = strdup(argv[optind]);
1301 static void free_shm(void)
1303 struct shmid_ds sbuf;
1306 shmdt((void *) threads);
1308 shmctl(shm_id, IPC_RMID, &sbuf);
1313 * The thread area is shared between the main process and the job
1314 * threads/processes. So setup a shared memory segment that will hold
1317 static int setup_thread_area(void)
1320 * 1024 is too much on some machines, scale max_jobs if
1321 * we get a failure that looks like too large a shm segment
1324 size_t size = max_jobs * sizeof(struct thread_data);
1326 shm_id = shmget(0, size, IPC_CREAT | 0600);
1329 if (errno != EINVAL) {
1340 threads = shmat(shm_id, NULL, 0);
1341 if (threads == (void *) -1) {
1351 * Copy the fio options into the long options map, so we mirror
1352 * job and cmd line options.
1354 static void dupe_job_options(void)
1356 struct fio_option *o;
1360 while (long_options[i].name)
1365 long_options[i].name = o->name;
1366 long_options[i].val = FIO_GETOPT_JOB;
1367 if (o->type == FIO_OPT_STR_SET)
1368 long_options[i].has_arg = no_argument;
1370 long_options[i].has_arg = required_argument;
1374 assert(i < FIO_JOB_OPTS + FIO_CMD_OPTS);
1378 int parse_options(int argc, char *argv[])
1385 options_init(options);
1389 if (setup_thread_area())
1391 if (fill_def_thread())
1394 job_files = parse_cmd_line(argc, argv);
1396 for (i = 0; i < job_files; i++) {
1397 if (fill_def_thread())
1399 if (parse_jobs_ini(ini_file[i], i))
1406 if (!thread_number) {
1407 log_err("No jobs defined(s)\n");