2 * This file contains job initialization and setup functions.
15 #include <sys/types.h>
25 #define DEF_TIMEOUT (0)
26 #define DEF_RATE_CYCLE (1000)
27 #define DEF_ODIRECT (1)
28 #define DEF_IO_ENGINE (FIO_SYNCIO)
29 #define DEF_IO_ENGINE_NAME "sync"
30 #define DEF_SEQUENTIAL (1)
31 #define DEF_RAND_REPEAT (1)
32 #define DEF_OVERWRITE (1)
33 #define DEF_INVALIDATE (1)
34 #define DEF_SYNCIO (0)
35 #define DEF_RANDSEED (0xb1899bedUL)
36 #define DEF_BWAVGTIME (500)
37 #define DEF_CREATE_SER (1)
38 #define DEF_CREATE_FSYNC (1)
40 #define DEF_VERIFY (0)
41 #define DEF_STONEWALL (0)
42 #define DEF_NUMJOBS (1)
43 #define DEF_USE_THREAD (0)
44 #define DEF_FILE_SIZE (1024 * 1024 * 1024UL)
45 #define DEF_ZONE_SIZE (0)
46 #define DEF_ZONE_SKIP (0)
47 #define DEF_RWMIX_CYCLE (500)
48 #define DEF_RWMIX_READ (50)
50 #define DEF_NR_FILES (1)
51 #define DEF_UNLINK (0)
52 #define DEF_WRITE_BW_LOG (0)
53 #define DEF_WRITE_LAT_LOG (0)
54 #define DEF_NO_RAND_MAP (0)
56 #define td_var_offset(var) ((size_t) &((struct thread_data *)0)->var)
58 static int str_rw_cb(void *, const char *);
59 static int str_ioengine_cb(void *, const char *);
60 static int str_mem_cb(void *, const char *);
61 static int str_verify_cb(void *, const char *);
62 static int str_lockmem_cb(void *, unsigned long *);
63 static int str_prio_cb(void *, unsigned int *);
64 static int str_prioclass_cb(void *, unsigned int *);
65 static int str_exitall_cb(void);
66 static int str_cpumask_cb(void *, unsigned int *);
69 * Map of job/command line options
71 static struct fio_option options[] = {
74 .type = FIO_OPT_STR_STORE,
75 .off1 = td_var_offset(name),
79 .type = FIO_OPT_STR_STORE,
80 .off1 = td_var_offset(directory),
84 .type = FIO_OPT_STR_STORE,
85 .off1 = td_var_offset(filename),
95 .cb = str_ioengine_cb,
108 .name = "write_iolog",
109 .type = FIO_OPT_STR_STORE,
110 .off1 = td_var_offset(write_iolog_file),
113 .name = "read_iolog",
114 .type = FIO_OPT_STR_STORE,
115 .off1 = td_var_offset(read_iolog_file),
118 .name = "exec_prerun",
119 .type = FIO_OPT_STR_STORE,
120 .off1 = td_var_offset(exec_prerun),
123 .name = "exec_postrun",
124 .type = FIO_OPT_STR_STORE,
125 .off1 = td_var_offset(exec_postrun),
127 #ifdef FIO_HAVE_IOSCHED_SWITCH
129 .name = "ioscheduler",
130 .type = FIO_OPT_STR_STORE,
131 .off1 = td_var_offset(ioscheduler),
136 .type = FIO_OPT_STR_VAL,
137 .off1 = td_var_offset(total_file_size),
141 .type = FIO_OPT_STR_VAL_INT,
142 .off1 = td_var_offset(bs[DDIR_READ]),
146 .type = FIO_OPT_STR_VAL_INT,
147 .off1 = td_var_offset(bs[DDIR_READ]),
151 .type = FIO_OPT_STR_VAL_INT,
152 .off1 = td_var_offset(bs[DDIR_WRITE]),
156 .type = FIO_OPT_STR_VAL,
157 .off1 = td_var_offset(start_offset),
161 .type = FIO_OPT_STR_VAL,
162 .off1 = td_var_offset(zone_size),
166 .type = FIO_OPT_STR_VAL,
167 .off1 = td_var_offset(zone_skip),
171 .type = FIO_OPT_STR_VAL,
172 .cb = str_lockmem_cb,
176 .type = FIO_OPT_RANGE,
177 .off1 = td_var_offset(min_bs[DDIR_READ]),
178 .off2 = td_var_offset(max_bs[DDIR_READ]),
181 .name = "read_bsrange",
182 .type = FIO_OPT_RANGE,
183 .off1 = td_var_offset(min_bs[DDIR_READ]),
184 .off2 = td_var_offset(max_bs[DDIR_READ]),
187 .name = "write_bsrange",
188 .type = FIO_OPT_RANGE,
189 .off1 = td_var_offset(min_bs[DDIR_WRITE]),
190 .off2 = td_var_offset(max_bs[DDIR_WRITE]),
195 .off1 = td_var_offset(nr_files),
200 .off1 = td_var_offset(iodepth),
205 .off1 = td_var_offset(fsync_blocks),
208 .name = "rwmixcycle",
210 .off1 = td_var_offset(rwmixcycle),
215 .off1 = td_var_offset(rwmixread),
219 .name = "rwmixwrite",
221 .off1 = td_var_offset(rwmixwrite),
227 .off1 = td_var_offset(nice),
229 #ifdef FIO_HAVE_IOPRIO
238 .cb = str_prioclass_cb,
244 .off1 = td_var_offset(thinktime)
249 .off1 = td_var_offset(rate)
254 .off1 = td_var_offset(ratemin)
259 .off1 = td_var_offset(ratecycle)
262 .name = "startdelay",
264 .off1 = td_var_offset(start_delay)
268 .type = FIO_OPT_STR_VAL_TIME,
269 .off1 = td_var_offset(timeout)
272 .name = "invalidate",
274 .off1 = td_var_offset(invalidate_cache)
279 .off1 = td_var_offset(sync_io)
284 .off1 = td_var_offset(bw_avg_time)
287 .name = "create_serialize",
289 .off1 = td_var_offset(create_serialize)
292 .name = "create_fsync",
294 .off1 = td_var_offset(create_fsync)
299 .off1 = td_var_offset(loops)
304 .off1 = td_var_offset(numjobs)
309 .off1 = td_var_offset(cpuload)
314 .off1 = td_var_offset(cpucycle)
319 .off1 = td_var_offset(odirect)
324 .off1 = td_var_offset(overwrite)
326 #ifdef FIO_HAVE_CPU_AFFINITY
330 .cb = str_cpumask_cb,
336 .off1 = td_var_offset(end_fsync)
340 .type = FIO_OPT_STR_SET,
341 .off1 = td_var_offset(unlink),
345 .type = FIO_OPT_STR_SET,
346 .cb = str_exitall_cb,
350 .type = FIO_OPT_STR_SET,
351 .off1 = td_var_offset(stonewall),
355 .type = FIO_OPT_STR_SET,
356 .off1 = td_var_offset(thread),
359 .name = "write_bw_log",
360 .type = FIO_OPT_STR_SET,
361 .off1 = td_var_offset(write_bw_log),
364 .name = "write_lat_log",
365 .type = FIO_OPT_STR_SET,
366 .off1 = td_var_offset(write_lat_log),
369 .name = "norandommap",
370 .type = FIO_OPT_STR_SET,
371 .off1 = td_var_offset(norandommap),
374 .name = "bs_unaligned",
375 .type = FIO_OPT_STR_SET,
376 .off1 = td_var_offset(bs_unaligned),
383 #define FIO_JOB_OPTS (sizeof(options) / sizeof(struct fio_option))
384 #define FIO_CMD_OPTS (16)
385 #define FIO_GETOPT_JOB (0x89988998)
388 * Command line options. These will contain the above, plus a few
389 * extra that only pertain to fio itself and not jobs.
391 static struct option long_options[FIO_JOB_OPTS + FIO_CMD_OPTS] = {
394 .has_arg = required_argument,
399 .has_arg = required_argument,
403 .name = "latency-log",
404 .has_arg = required_argument,
408 .name = "bandwidth-log",
409 .has_arg = required_argument,
414 .has_arg = optional_argument,
419 .has_arg = no_argument,
427 static int def_timeout = DEF_TIMEOUT;
429 static char fio_version_string[] = "fio 1.7";
431 static char **ini_file;
432 static int max_jobs = MAX_JOBS;
434 struct thread_data def_thread;
435 struct thread_data *threads = NULL;
438 int exitall_on_terminate = 0;
439 int terse_output = 0;
440 unsigned long long mlock_size = 0;
444 static int write_lat_log = DEF_WRITE_LAT_LOG;
445 static int write_bw_log = DEF_WRITE_BW_LOG;
448 * Return a free job structure.
450 static struct thread_data *get_new_job(int global, struct thread_data *parent)
452 struct thread_data *td;
456 if (thread_number >= max_jobs)
459 td = &threads[thread_number++];
462 td->thread_number = thread_number;
466 static void put_job(struct thread_data *td)
468 if (td == &def_thread)
471 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
476 * Lazy way of fixing up options that depend on each other. We could also
477 * define option callback handlers, but this is easier.
479 static void fixup_options(struct thread_data *td)
481 if (!td->rwmixread && td->rwmixwrite)
482 td->rwmixread = 100 - td->rwmixwrite;
484 if (td->write_iolog_file && td->read_iolog_file) {
485 log_err("fio: read iolog overrides write_iolog\n");
486 free(td->write_iolog_file);
487 td->write_iolog_file = NULL;
490 if (td->io_ops->flags & FIO_SYNCIO)
494 td->iodepth = td->nr_files;
498 * only really works for sequential io for now, and with 1 file
500 if (td->zone_size && !td->sequential && td->nr_files == 1)
504 * Reads can do overwrites, we always need to pre-create the file
506 if (td_read(td) || td_rw(td))
509 if (td->bs[DDIR_READ] != DEF_BS)
510 td->bs[DDIR_WRITE] = td->bs[DDIR_READ];
511 if (!td->min_bs[DDIR_READ])
512 td->min_bs[DDIR_READ]= td->bs[DDIR_READ];
513 if (!td->max_bs[DDIR_READ])
514 td->max_bs[DDIR_READ] = td->bs[DDIR_READ];
515 if (!td->min_bs[DDIR_WRITE])
516 td->min_bs[DDIR_WRITE]= td->bs[DDIR_WRITE];
517 if (!td->max_bs[DDIR_WRITE])
518 td->max_bs[DDIR_WRITE] = td->bs[DDIR_WRITE];
520 td->rw_min_bs = min(td->min_bs[DDIR_READ], td->min_bs[DDIR_WRITE]);
522 if (td_read(td) && !td_rw(td))
525 if (td->norandommap && td->verify != VERIFY_NONE) {
526 log_err("fio: norandommap given, verify disabled\n");
527 td->verify = VERIFY_NONE;
529 if (td->bs_unaligned && (td->odirect || td->io_ops->flags & FIO_RAWIO))
530 log_err("fio: bs_unaligned may not work with raw io\n");
534 * Adds a job to the list of things todo. Sanitizes the various options
535 * to make sure we don't have conflicts, and initializes various
538 static int add_job(struct thread_data *td, const char *jobname, int job_add_num)
540 char *ddir_str[] = { "read", "write", "randread", "randwrite",
541 "rw", NULL, "randrw" };
543 int numjobs, ddir, i;
546 #ifndef FIO_HAVE_LIBAIO
547 if (td->io_engine == FIO_LIBAIO) {
548 log_err("Linux libaio not available\n");
552 #ifndef FIO_HAVE_POSIXAIO
553 if (td->io_engine == FIO_POSIXAIO) {
554 log_err("posix aio not available\n");
560 * the def_thread is just for options, it's not a real job
562 if (td == &def_thread)
566 * Set default io engine, if none set
569 td->io_ops = load_ioengine(td, DEF_IO_ENGINE_NAME);
571 log_err("default engine %s not there?\n", DEF_IO_ENGINE_NAME);
577 td->io_ops->flags |= FIO_RAWIO;
581 td->filetype = FIO_TYPE_FILE;
582 if (!stat(jobname, &sb)) {
583 if (S_ISBLK(sb.st_mode))
584 td->filetype = FIO_TYPE_BD;
585 else if (S_ISCHR(sb.st_mode))
586 td->filetype = FIO_TYPE_CHAR;
590 td->nr_uniq_files = 1;
592 td->nr_uniq_files = td->nr_files;
594 if (td->filetype == FIO_TYPE_FILE || td->filename) {
599 if (td->directory && td->directory[0] != '\0')
600 sprintf(tmp, "%s/", td->directory);
602 td->files = malloc(sizeof(struct fio_file) * td->nr_files);
604 for_each_file(td, f, i) {
605 memset(f, 0, sizeof(*f));
609 sprintf(tmp + len, "%s", td->filename);
611 sprintf(tmp + len, "%s.%d.%d", jobname, td->thread_number, i);
612 f->file_name = strdup(tmp);
616 td->files = malloc(sizeof(struct fio_file));
619 memset(f, 0, sizeof(*f));
621 f->file_name = strdup(jobname);
624 for_each_file(td, f, i) {
625 f->file_size = td->total_file_size / td->nr_files;
626 f->file_offset = td->start_offset;
629 fio_sem_init(&td->mutex, 0);
631 td->clat_stat[0].min_val = td->clat_stat[1].min_val = ULONG_MAX;
632 td->slat_stat[0].min_val = td->slat_stat[1].min_val = ULONG_MAX;
633 td->bw_stat[0].min_val = td->bw_stat[1].min_val = ULONG_MAX;
635 if (td->stonewall && td->thread_number > 1)
638 td->groupid = groupid;
643 if (td->write_lat_log) {
644 setup_log(&td->slat_log);
645 setup_log(&td->clat_log);
647 if (td->write_bw_log)
648 setup_log(&td->bw_log);
651 td->name = strdup(jobname);
653 ddir = td->ddir + (!td->sequential << 1) + (td->iomix << 2);
657 if (td->io_ops->flags & FIO_CPUIO)
658 fprintf(f_out, "%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
660 fprintf(f_out, "%s: (g=%d): rw=%s, odir=%d, bs=%d-%d/%d-%d, rate=%d, ioengine=%s, iodepth=%d\n", td->name, td->groupid, ddir_str[ddir], td->odirect, td->min_bs[DDIR_READ], td->max_bs[DDIR_READ], td->min_bs[DDIR_WRITE], td->max_bs[DDIR_WRITE], td->rate, td->io_ops->name, td->iodepth);
661 } else if (job_add_num == 1)
662 fprintf(f_out, "...\n");
666 * recurse add identical jobs, clear numjobs and stonewall options
667 * as they don't apply to sub-jobs
669 numjobs = td->numjobs;
671 struct thread_data *td_new = get_new_job(0, td);
677 td_new->stonewall = 0;
678 job_add_num = numjobs - 1;
680 if (add_job(td_new, jobname, job_add_num))
690 * Initialize the various random states we need (random io, block size ranges,
691 * read/write mix, etc).
693 int init_random_state(struct thread_data *td)
695 unsigned long seeds[4];
696 int fd, num_maps, blocks, i;
699 if (td->io_ops->flags & FIO_CPUIO)
702 fd = open("/dev/urandom", O_RDONLY);
704 td_verror(td, errno);
708 if (read(fd, seeds, sizeof(seeds)) < (int) sizeof(seeds)) {
716 os_random_seed(seeds[0], &td->bsrange_state);
717 os_random_seed(seeds[1], &td->verify_state);
718 os_random_seed(seeds[2], &td->rwmix_state);
723 if (td->rand_repeatable)
724 seeds[3] = DEF_RANDSEED;
726 if (!td->norandommap) {
727 for_each_file(td, f, i) {
728 blocks = (f->file_size + td->rw_min_bs - 1) / td->rw_min_bs;
729 num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
730 f->file_map = malloc(num_maps * sizeof(long));
731 f->num_maps = num_maps;
732 memset(f->file_map, 0, num_maps * sizeof(long));
736 os_random_seed(seeds[3], &td->random_state);
740 static void fill_cpu_mask(os_cpu_mask_t cpumask, int cpu)
742 #ifdef FIO_HAVE_CPU_AFFINITY
747 for (i = 0; i < sizeof(int) * 8; i++) {
749 CPU_SET(i, &cpumask);
754 static int is_empty_or_comment(char *line)
758 for (i = 0; i < strlen(line); i++) {
761 if (!isspace(line[i]) && !iscntrl(line[i]))
768 static int str_rw_cb(void *data, const char *mem)
770 struct thread_data *td = data;
772 if (!strncmp(mem, "read", 4) || !strncmp(mem, "0", 1)) {
773 td->ddir = DDIR_READ;
776 } else if (!strncmp(mem, "randread", 8)) {
777 td->ddir = DDIR_READ;
780 } else if (!strncmp(mem, "write", 5) || !strncmp(mem, "1", 1)) {
781 td->ddir = DDIR_WRITE;
784 } else if (!strncmp(mem, "randwrite", 9)) {
785 td->ddir = DDIR_WRITE;
788 } else if (!strncmp(mem, "rw", 2)) {
793 } else if (!strncmp(mem, "randrw", 6)) {
800 log_err("fio: data direction: read, write, randread, randwrite, rw, randrw\n");
804 static int str_verify_cb(void *data, const char *mem)
806 struct thread_data *td = data;
808 if (!strncmp(mem, "0", 1)) {
809 td->verify = VERIFY_NONE;
811 } else if (!strncmp(mem, "md5", 3) || !strncmp(mem, "1", 1)) {
812 td->verify = VERIFY_MD5;
814 } else if (!strncmp(mem, "crc32", 5)) {
815 td->verify = VERIFY_CRC32;
819 log_err("fio: verify types: md5, crc32\n");
823 static int str_mem_cb(void *data, const char *mem)
825 struct thread_data *td = data;
827 if (!strncmp(mem, "malloc", 6)) {
828 td->mem_type = MEM_MALLOC;
830 } else if (!strncmp(mem, "shm", 3)) {
831 td->mem_type = MEM_SHM;
833 } else if (!strncmp(mem, "mmap", 4)) {
834 td->mem_type = MEM_MMAP;
838 log_err("fio: mem type: malloc, shm, mmap\n");
842 static int str_ioengine_cb(void *data, const char *str)
844 struct thread_data *td = data;
846 td->io_ops = load_ioengine(td, str);
850 log_err("fio: ioengine: { linuxaio, aio, libaio }, posixaio, sync, mmap, sgio, splice, cpu\n");
854 static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
860 static int str_prioclass_cb(void *data, unsigned int *val)
862 struct thread_data *td = data;
864 td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
868 static int str_prio_cb(void *data, unsigned int *val)
870 struct thread_data *td = data;
876 static int str_exitall_cb(void)
878 exitall_on_terminate = 1;
882 static int str_cpumask_cb(void *data, unsigned int *val)
884 struct thread_data *td = data;
886 fill_cpu_mask(td->cpumask, *val);
891 * This is our [ini] type file parser.
893 int parse_jobs_ini(char *file, int stonewall_flag)
896 struct thread_data *td;
901 int ret = 0, stonewall;
903 f = fopen(file, "r");
905 perror("fopen job file");
909 string = malloc(4096);
911 memset(name, 0, 256);
913 stonewall = stonewall_flag;
915 p = fgets(string, 4095, f);
918 if (is_empty_or_comment(p))
920 if (sscanf(p, "[%255s]", name) != 1)
923 global = !strncmp(name, "global", 6);
925 name[strlen(name) - 1] = '\0';
927 td = get_new_job(global, &def_thread);
934 * Seperate multiple job files by a stonewall
936 if (!global && stonewall) {
937 td->stonewall = stonewall;
942 while ((p = fgets(string, 4096, f)) != NULL) {
943 if (is_empty_or_comment(p))
946 strip_blank_front(&p);
956 * Don't break here, continue parsing options so we
957 * dump all the bad ones. Makes trial/error fixups
958 * easier on the user.
960 ret |= parse_option(p, options, td);
965 ret = add_job(td, name, 0);
967 log_err("fio: job %s dropped\n", name);
978 static int fill_def_thread(void)
980 memset(&def_thread, 0, sizeof(def_thread));
982 if (fio_getaffinity(getpid(), &def_thread.cpumask) == -1) {
983 perror("sched_getaffinity");
990 def_thread.ddir = DDIR_READ;
991 def_thread.iomix = 0;
992 def_thread.bs[DDIR_READ] = DEF_BS;
993 def_thread.bs[DDIR_WRITE] = DEF_BS;
994 def_thread.min_bs[DDIR_READ] = def_thread.min_bs[DDIR_WRITE] = 0;
995 def_thread.max_bs[DDIR_READ] = def_thread.max_bs[DDIR_WRITE] = 0;
996 def_thread.odirect = DEF_ODIRECT;
997 def_thread.ratecycle = DEF_RATE_CYCLE;
998 def_thread.sequential = DEF_SEQUENTIAL;
999 def_thread.timeout = def_timeout;
1000 def_thread.overwrite = DEF_OVERWRITE;
1001 def_thread.invalidate_cache = DEF_INVALIDATE;
1002 def_thread.sync_io = DEF_SYNCIO;
1003 def_thread.mem_type = MEM_MALLOC;
1004 def_thread.bw_avg_time = DEF_BWAVGTIME;
1005 def_thread.create_serialize = DEF_CREATE_SER;
1006 def_thread.create_fsync = DEF_CREATE_FSYNC;
1007 def_thread.loops = DEF_LOOPS;
1008 def_thread.verify = DEF_VERIFY;
1009 def_thread.stonewall = DEF_STONEWALL;
1010 def_thread.numjobs = DEF_NUMJOBS;
1011 def_thread.use_thread = DEF_USE_THREAD;
1012 def_thread.rwmixcycle = DEF_RWMIX_CYCLE;
1013 def_thread.rwmixread = DEF_RWMIX_READ;
1014 def_thread.nice = DEF_NICE;
1015 def_thread.rand_repeatable = DEF_RAND_REPEAT;
1016 def_thread.nr_files = DEF_NR_FILES;
1017 def_thread.unlink = DEF_UNLINK;
1018 def_thread.write_bw_log = write_bw_log;
1019 def_thread.write_lat_log = write_lat_log;
1020 def_thread.norandommap = DEF_NO_RAND_MAP;
1021 #ifdef FIO_HAVE_DISK_UTIL
1022 def_thread.do_disk_util = 1;
1028 static void usage(void)
1030 printf("%s\n", fio_version_string);
1031 printf("\t--output\tWrite output to file\n");
1032 printf("\t--timeout\tRuntime in seconds\n");
1033 printf("\t--latency-log\tGenerate per-job latency logs\n");
1034 printf("\t--bandwidth-log\tGenerate per-job bandwidth logs\n");
1035 printf("\t--minimal\tMinimal (terse) output\n");
1036 printf("\t--version\tPrint version info and exit\n");
1039 static int parse_cmd_line(int argc, char *argv[])
1041 struct thread_data *td = NULL;
1042 int c, ini_idx = 0, lidx, ret;
1044 while ((c = getopt_long(argc, argv, "", long_options, &lidx)) != -1) {
1047 def_timeout = atoi(optarg);
1056 f_out = fopen(optarg, "w+");
1058 perror("fopen output");
1070 printf("%s\n", fio_version_string);
1072 case FIO_GETOPT_JOB: {
1073 const char *opt = long_options[lidx].name;
1076 if (!strncmp(opt, "name", 4) && td) {
1077 ret = add_job(td, td->name ?: "fio", 0);
1085 int global = !strncmp(val, "global", 6);
1087 td = get_new_job(global, &def_thread);
1092 ret = parse_cmd_option(opt, val, options, td);
1094 log_err("fio: job dropped\n");
1101 printf("optarg <<%s>>\n", argv[optind]);
1107 ret = add_job(td, td->name ?: "fio", 0);
1112 while (optind < argc) {
1114 ini_file = realloc(ini_file, ini_idx * sizeof(char *));
1115 ini_file[ini_idx - 1] = strdup(argv[optind]);
1122 static void free_shm(void)
1124 struct shmid_ds sbuf;
1127 shmdt((void *) threads);
1129 shmctl(shm_id, IPC_RMID, &sbuf);
1134 * The thread area is shared between the main process and the job
1135 * threads/processes. So setup a shared memory segment that will hold
1138 static int setup_thread_area(void)
1141 * 1024 is too much on some machines, scale max_jobs if
1142 * we get a failure that looks like too large a shm segment
1145 size_t size = max_jobs * sizeof(struct thread_data);
1147 shm_id = shmget(0, size, IPC_CREAT | 0600);
1150 if (errno != EINVAL) {
1161 threads = shmat(shm_id, NULL, 0);
1162 if (threads == (void *) -1) {
1172 * Copy the fio options into the long options map, so we mirror
1173 * job and cmd line options.
1175 static void dupe_job_options(void)
1177 struct fio_option *o;
1181 while (long_options[i].name)
1186 long_options[i].name = o->name;
1187 long_options[i].val = FIO_GETOPT_JOB;
1188 if (o->type == FIO_OPT_STR_SET)
1189 long_options[i].has_arg = no_argument;
1191 long_options[i].has_arg = required_argument;
1195 assert(i < FIO_JOB_OPTS + FIO_CMD_OPTS);
1199 int parse_options(int argc, char *argv[])
1208 if (setup_thread_area())
1210 if (fill_def_thread())
1213 job_files = parse_cmd_line(argc, argv);
1215 for (i = 0; i < job_files; i++) {
1216 if (fill_def_thread())
1218 if (parse_jobs_ini(ini_file[i], i))
1225 if (!thread_number) {
1226 log_err("No jobs defined(s)\n");