2 * This file contains job initialization and setup functions.
13 #include <sys/types.h>
23 #define DEF_TIMEOUT (0)
24 #define DEF_RATE_CYCLE (1000)
25 #define DEF_ODIRECT (1)
26 #define DEF_IO_ENGINE (FIO_SYNCIO)
27 #define DEF_IO_ENGINE_NAME "sync"
28 #define DEF_SEQUENTIAL (1)
29 #define DEF_RAND_REPEAT (1)
30 #define DEF_OVERWRITE (1)
31 #define DEF_INVALIDATE (1)
32 #define DEF_SYNCIO (0)
33 #define DEF_RANDSEED (0xb1899bedUL)
34 #define DEF_BWAVGTIME (500)
35 #define DEF_CREATE_SER (1)
36 #define DEF_CREATE_FSYNC (1)
38 #define DEF_VERIFY (0)
39 #define DEF_STONEWALL (0)
40 #define DEF_NUMJOBS (1)
41 #define DEF_USE_THREAD (0)
42 #define DEF_FILE_SIZE (1024 * 1024 * 1024UL)
43 #define DEF_ZONE_SIZE (0)
44 #define DEF_ZONE_SKIP (0)
45 #define DEF_RWMIX_CYCLE (500)
46 #define DEF_RWMIX_READ (50)
48 #define DEF_NR_FILES (1)
49 #define DEF_UNLINK (0)
50 #define DEF_WRITE_BW_LOG (0)
51 #define DEF_WRITE_LAT_LOG (0)
53 #define td_var_offset(var) ((size_t) &((struct thread_data *)0)->var)
55 static int str_rw_cb(void *, char *);
56 static int str_ioengine_cb(void *, char *);
57 static int str_mem_cb(void *, char *);
58 static int str_verify_cb(void *, char *);
59 static int str_lockmem_cb(void *, unsigned long *);
60 static int str_prio_cb(void *, unsigned int *);
61 static int str_prioclass_cb(void *, unsigned int *);
62 static int str_exitall_cb(void);
63 static int str_cpumask_cb(void *, unsigned int *);
66 * Map of job/command line options
68 static struct fio_option options[] = {
71 .type = FIO_OPT_STR_STORE,
72 .off1 = td_var_offset(name),
76 .type = FIO_OPT_STR_STORE,
77 .off1 = td_var_offset(directory),
81 .type = FIO_OPT_STR_STORE,
82 .off1 = td_var_offset(filename),
92 .cb = str_ioengine_cb,
105 .name = "write_iolog",
106 .type = FIO_OPT_STR_STORE,
107 .off1 = td_var_offset(write_iolog_file),
110 .name = "read_iolog",
111 .type = FIO_OPT_STR_STORE,
112 .off1 = td_var_offset(read_iolog_file),
115 .name = "exec_prerun",
116 .type = FIO_OPT_STR_STORE,
117 .off1 = td_var_offset(exec_prerun),
120 .name = "exec_postrun",
121 .type = FIO_OPT_STR_STORE,
122 .off1 = td_var_offset(exec_postrun),
124 #ifdef FIO_HAVE_IOSCHED_SWITCH
126 .name = "ioscheduler",
127 .type = FIO_OPT_STR_STORE,
128 .off1 = td_var_offset(ioscheduler),
133 .type = FIO_OPT_STR_VAL,
134 .off1 = td_var_offset(total_file_size),
138 .type = FIO_OPT_STR_VAL,
139 .off1 = td_var_offset(bs),
143 .type = FIO_OPT_STR_VAL,
144 .off1 = td_var_offset(start_offset),
148 .type = FIO_OPT_STR_VAL,
149 .off1 = td_var_offset(zone_size),
153 .type = FIO_OPT_STR_VAL,
154 .off1 = td_var_offset(zone_skip),
158 .type = FIO_OPT_STR_VAL,
159 .cb = str_lockmem_cb,
163 .type = FIO_OPT_RANGE,
164 .off1 = td_var_offset(min_bs),
165 .off2 = td_var_offset(max_bs),
170 .off1 = td_var_offset(nr_files),
175 .off1 = td_var_offset(iodepth),
180 .off1 = td_var_offset(fsync_blocks),
183 .name = "rwmixcycle",
185 .off1 = td_var_offset(rwmixcycle),
190 .off1 = td_var_offset(rwmixread),
194 .name = "rwmixwrite",
196 .off1 = td_var_offset(rwmixwrite),
202 .off1 = td_var_offset(nice),
204 #ifdef FIO_HAVE_IOPRIO
213 .cb = str_prioclass_cb,
219 .off1 = td_var_offset(thinktime)
224 .off1 = td_var_offset(rate)
229 .off1 = td_var_offset(ratemin)
234 .off1 = td_var_offset(ratecycle)
237 .name = "startdelay",
239 .off1 = td_var_offset(start_delay)
243 .type = FIO_OPT_STR_VAL_TIME,
244 .off1 = td_var_offset(timeout)
247 .name = "invalidate",
249 .off1 = td_var_offset(invalidate_cache)
254 .off1 = td_var_offset(sync_io)
259 .off1 = td_var_offset(bw_avg_time)
262 .name = "create_serialize",
264 .off1 = td_var_offset(create_serialize)
267 .name = "create_fsync",
269 .off1 = td_var_offset(create_fsync)
274 .off1 = td_var_offset(loops)
279 .off1 = td_var_offset(numjobs)
284 .off1 = td_var_offset(cpuload)
289 .off1 = td_var_offset(cpucycle)
294 .off1 = td_var_offset(odirect)
299 .off1 = td_var_offset(overwrite)
301 #ifdef FIO_HAVE_CPU_AFFINITY
305 .cb = str_cpumask_cb,
311 .off1 = td_var_offset(end_fsync)
315 .type = FIO_OPT_STR_SET,
316 .off1 = td_var_offset(unlink),
320 .type = FIO_OPT_STR_SET,
321 .cb = str_exitall_cb,
325 .type = FIO_OPT_STR_SET,
326 .off1 = td_var_offset(stonewall),
330 .type = FIO_OPT_STR_SET,
331 .off1 = td_var_offset(thread),
334 .name = "write_bw_log",
335 .type = FIO_OPT_STR_SET,
336 .off1 = td_var_offset(write_bw_log),
339 .name = "write_lat_log",
340 .type = FIO_OPT_STR_SET,
341 .off1 = td_var_offset(write_lat_log),
348 static int def_timeout = DEF_TIMEOUT;
350 static char fio_version_string[] = "fio 1.5";
352 static char **ini_file;
353 static int max_jobs = MAX_JOBS;
355 struct thread_data def_thread;
356 struct thread_data *threads = NULL;
359 int exitall_on_terminate = 0;
360 int terse_output = 0;
361 unsigned long long mlock_size = 0;
365 static int write_lat_log = DEF_WRITE_LAT_LOG;
366 static int write_bw_log = DEF_WRITE_BW_LOG;
369 * Return a free job structure.
371 static struct thread_data *get_new_job(int global, struct thread_data *parent)
373 struct thread_data *td;
377 if (thread_number >= max_jobs)
380 td = &threads[thread_number++];
384 td->thread_number = thread_number;
388 static void put_job(struct thread_data *td)
390 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
395 * Lazy way of fixing up options that depend on each other. We could also
396 * define option callback handlers, but this is easier.
398 static void fixup_options(struct thread_data *td)
405 if (!td->rwmixread && td->rwmixwrite)
406 td->rwmixread = 100 - td->rwmixwrite;
408 if (td->write_iolog_file && td->read_iolog_file) {
409 log_err("fio: read iolog overrides write_iolog\n");
410 free(td->write_iolog_file);
411 td->write_iolog_file = NULL;
416 * Adds a job to the list of things todo. Sanitizes the various options
417 * to make sure we don't have conflicts, and initializes various
420 static int add_job(struct thread_data *td, const char *jobname, int job_add_num)
422 char *ddir_str[] = { "read", "write", "randread", "randwrite",
423 "rw", NULL, "randrw" };
425 int numjobs, ddir, i;
428 #ifndef FIO_HAVE_LIBAIO
429 if (td->io_engine == FIO_LIBAIO) {
430 log_err("Linux libaio not available\n");
434 #ifndef FIO_HAVE_POSIXAIO
435 if (td->io_engine == FIO_POSIXAIO) {
436 log_err("posix aio not available\n");
444 * the def_thread is just for options, it's not a real job
446 if (td == &def_thread)
450 * Set default io engine, if none set
453 td->io_ops = load_ioengine(td, DEF_IO_ENGINE_NAME);
455 log_err("default engine %s not there?\n", DEF_IO_ENGINE_NAME);
460 if (td->io_ops->flags & FIO_SYNCIO)
464 td->iodepth = td->nr_files;
468 * only really works for sequential io for now, and with 1 file
470 if (td->zone_size && !td->sequential && td->nr_files == 1)
474 * Reads can do overwrites, we always need to pre-create the file
476 if (td_read(td) || td_rw(td))
479 td->filetype = FIO_TYPE_FILE;
480 if (!stat(jobname, &sb)) {
481 if (S_ISBLK(sb.st_mode))
482 td->filetype = FIO_TYPE_BD;
483 else if (S_ISCHR(sb.st_mode))
484 td->filetype = FIO_TYPE_CHAR;
488 td->io_ops->flags |= FIO_RAWIO;
491 td->nr_uniq_files = 1;
493 td->nr_uniq_files = td->nr_files;
495 if (td->filetype == FIO_TYPE_FILE || td->filename) {
500 if (td->directory && td->directory[0] != '\0')
501 sprintf(tmp, "%s/", td->directory);
503 td->files = malloc(sizeof(struct fio_file) * td->nr_files);
505 for_each_file(td, f, i) {
506 memset(f, 0, sizeof(*f));
510 sprintf(tmp + len, "%s", td->filename);
512 sprintf(tmp + len, "%s.%d.%d", jobname, td->thread_number, i);
513 f->file_name = strdup(tmp);
517 td->files = malloc(sizeof(struct fio_file));
520 memset(f, 0, sizeof(*f));
522 f->file_name = strdup(jobname);
525 for_each_file(td, f, i) {
526 f->file_size = td->total_file_size / td->nr_files;
527 f->file_offset = td->start_offset;
530 fio_sem_init(&td->mutex, 0);
532 td->clat_stat[0].min_val = td->clat_stat[1].min_val = ULONG_MAX;
533 td->slat_stat[0].min_val = td->slat_stat[1].min_val = ULONG_MAX;
534 td->bw_stat[0].min_val = td->bw_stat[1].min_val = ULONG_MAX;
536 if (td->min_bs == -1U)
538 if (td->max_bs == -1U)
540 if (td_read(td) && !td_rw(td))
543 if (td->stonewall && td->thread_number > 1)
546 td->groupid = groupid;
551 if (td->write_lat_log) {
552 setup_log(&td->slat_log);
553 setup_log(&td->clat_log);
555 if (td->write_bw_log)
556 setup_log(&td->bw_log);
558 if (td->name[0] == '\0')
559 snprintf(td->name, sizeof(td->name)-1, "client%d", td->thread_number);
561 ddir = td->ddir + (!td->sequential << 1) + (td->iomix << 2);
565 if (td->io_ops->flags & FIO_CPUIO)
566 fprintf(f_out, "%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
568 fprintf(f_out, "%s: (g=%d): rw=%s, odir=%d, bs=%d-%d, rate=%d, ioengine=%s, iodepth=%d\n", td->name, td->groupid, ddir_str[ddir], td->odirect, td->min_bs, td->max_bs, td->rate, td->io_ops->name, td->iodepth);
569 } else if (job_add_num == 1)
570 fprintf(f_out, "...\n");
574 * recurse add identical jobs, clear numjobs and stonewall options
575 * as they don't apply to sub-jobs
577 numjobs = td->numjobs;
579 struct thread_data *td_new = get_new_job(0, td);
585 td_new->stonewall = 0;
586 job_add_num = numjobs - 1;
588 if (add_job(td_new, jobname, job_add_num))
598 * Initialize the various random states we need (random io, block size ranges,
599 * read/write mix, etc).
601 int init_random_state(struct thread_data *td)
603 unsigned long seeds[4];
604 int fd, num_maps, blocks, i;
607 if (td->io_ops->flags & FIO_CPUIO)
610 fd = open("/dev/urandom", O_RDONLY);
612 td_verror(td, errno);
616 if (read(fd, seeds, sizeof(seeds)) < (int) sizeof(seeds)) {
624 os_random_seed(seeds[0], &td->bsrange_state);
625 os_random_seed(seeds[1], &td->verify_state);
626 os_random_seed(seeds[2], &td->rwmix_state);
631 if (td->rand_repeatable)
632 seeds[3] = DEF_RANDSEED;
634 for_each_file(td, f, i) {
635 blocks = (f->file_size + td->min_bs - 1) / td->min_bs;
636 num_maps = blocks / BLOCKS_PER_MAP;
637 f->file_map = malloc(num_maps * sizeof(long));
638 f->num_maps = num_maps;
639 memset(f->file_map, 0, num_maps * sizeof(long));
642 os_random_seed(seeds[3], &td->random_state);
646 static void fill_cpu_mask(os_cpu_mask_t cpumask, int cpu)
648 #ifdef FIO_HAVE_CPU_AFFINITY
653 for (i = 0; i < sizeof(int) * 8; i++) {
655 CPU_SET(i, &cpumask);
660 static int is_empty_or_comment(char *line)
664 for (i = 0; i < strlen(line); i++) {
667 if (!isspace(line[i]) && !iscntrl(line[i]))
674 static int str_rw_cb(void *data, char *mem)
676 struct thread_data *td = data;
678 if (!strncmp(mem, "read", 4) || !strncmp(mem, "0", 1)) {
679 td->ddir = DDIR_READ;
682 } else if (!strncmp(mem, "randread", 8)) {
683 td->ddir = DDIR_READ;
686 } else if (!strncmp(mem, "write", 5) || !strncmp(mem, "1", 1)) {
687 td->ddir = DDIR_WRITE;
690 } else if (!strncmp(mem, "randwrite", 9)) {
691 td->ddir = DDIR_WRITE;
694 } else if (!strncmp(mem, "rw", 2)) {
699 } else if (!strncmp(mem, "randrw", 6)) {
706 log_err("fio: data direction: read, write, randread, randwrite, rw, randrw\n");
710 static int str_verify_cb(void *data, char *mem)
712 struct thread_data *td = data;
714 if (!strncmp(mem, "0", 1)) {
715 td->verify = VERIFY_NONE;
717 } else if (!strncmp(mem, "md5", 3) || !strncmp(mem, "1", 1)) {
718 td->verify = VERIFY_MD5;
720 } else if (!strncmp(mem, "crc32", 5)) {
721 td->verify = VERIFY_CRC32;
725 log_err("fio: verify types: md5, crc32\n");
729 static int str_mem_cb(void *data, char *mem)
731 struct thread_data *td = data;
733 if (!strncmp(mem, "malloc", 6)) {
734 td->mem_type = MEM_MALLOC;
736 } else if (!strncmp(mem, "shm", 3)) {
737 td->mem_type = MEM_SHM;
739 } else if (!strncmp(mem, "mmap", 4)) {
740 td->mem_type = MEM_MMAP;
744 log_err("fio: mem type: malloc, shm, mmap\n");
748 static int str_ioengine_cb(void *data, char *str)
750 struct thread_data *td = data;
752 td->io_ops = load_ioengine(td, str);
756 log_err("fio: ioengine: { linuxaio, aio, libaio }, posixaio, sync, mmap, sgio, splice, cpu\n");
760 static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
766 static int str_prioclass_cb(void *data, unsigned int *val)
768 struct thread_data *td = data;
770 td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
774 static int str_prio_cb(void *data, unsigned int *val)
776 struct thread_data *td = data;
782 static int str_exitall_cb(void)
784 exitall_on_terminate = 1;
788 static int str_cpumask_cb(void *data, unsigned int *val)
790 struct thread_data *td = data;
792 fill_cpu_mask(td->cpumask, *val);
797 * This is our [ini] type file parser.
799 int parse_jobs_ini(char *file, int stonewall_flag)
802 struct thread_data *td;
803 char *string, *name, *tmpbuf;
807 int ret = 0, stonewall;
809 f = fopen(file, "r");
811 perror("fopen job file");
815 string = malloc(4096);
817 tmpbuf = malloc(4096);
819 stonewall = stonewall_flag;
820 while ((p = fgets(string, 4096, f)) != NULL) {
823 if (is_empty_or_comment(p))
825 if (sscanf(p, "[%s]", name) != 1)
828 global = !strncmp(name, "global", 6);
830 name[strlen(name) - 1] = '\0';
832 td = get_new_job(global, &def_thread);
839 * Seperate multiple job files by a stonewall
841 if (!global && stonewall) {
842 td->stonewall = stonewall;
847 while ((p = fgets(string, 4096, f)) != NULL) {
848 if (is_empty_or_comment(p))
853 strip_blank_front(&p);
859 * Don't break here, continue parsing options so we
860 * dump all the bad ones. Makes trial/error fixups
861 * easier on the user.
863 ret = parse_option(p, options, td);
868 ret = add_job(td, name, 0);
881 static int fill_def_thread(void)
883 memset(&def_thread, 0, sizeof(def_thread));
885 if (fio_getaffinity(getpid(), &def_thread.cpumask) == -1) {
886 perror("sched_getaffinity");
893 def_thread.ddir = DDIR_READ;
894 def_thread.iomix = 0;
895 def_thread.bs = DEF_BS;
896 def_thread.min_bs = -1;
897 def_thread.max_bs = -1;
898 def_thread.odirect = DEF_ODIRECT;
899 def_thread.ratecycle = DEF_RATE_CYCLE;
900 def_thread.sequential = DEF_SEQUENTIAL;
901 def_thread.timeout = def_timeout;
902 def_thread.overwrite = DEF_OVERWRITE;
903 def_thread.invalidate_cache = DEF_INVALIDATE;
904 def_thread.sync_io = DEF_SYNCIO;
905 def_thread.mem_type = MEM_MALLOC;
906 def_thread.bw_avg_time = DEF_BWAVGTIME;
907 def_thread.create_serialize = DEF_CREATE_SER;
908 def_thread.create_fsync = DEF_CREATE_FSYNC;
909 def_thread.loops = DEF_LOOPS;
910 def_thread.verify = DEF_VERIFY;
911 def_thread.stonewall = DEF_STONEWALL;
912 def_thread.numjobs = DEF_NUMJOBS;
913 def_thread.use_thread = DEF_USE_THREAD;
914 def_thread.rwmixcycle = DEF_RWMIX_CYCLE;
915 def_thread.rwmixread = DEF_RWMIX_READ;
916 def_thread.nice = DEF_NICE;
917 def_thread.rand_repeatable = DEF_RAND_REPEAT;
918 def_thread.nr_files = DEF_NR_FILES;
919 def_thread.unlink = DEF_UNLINK;
920 def_thread.write_bw_log = write_bw_log;
921 def_thread.write_lat_log = write_lat_log;
922 #ifdef FIO_HAVE_DISK_UTIL
923 def_thread.do_disk_util = 1;
929 static void usage(void)
931 printf("%s\n", fio_version_string);
932 printf("\t-o Write output to file\n");
933 printf("\t-t Runtime in seconds\n");
934 printf("\t-l Generate per-job latency logs\n");
935 printf("\t-w Generate per-job bandwidth logs\n");
936 printf("\t-m Minimal (terse) output\n");
937 printf("\t-v Print version info and exit\n");
940 static int parse_cmd_line(int argc, char *argv[])
942 int c, idx = 1, ini_idx = 0;
944 while ((c = getopt(argc, argv, "t:o:lwvhm")) != EOF) {
947 def_timeout = atoi(optarg);
959 f_out = fopen(optarg, "w+");
961 perror("fopen output");
975 printf("%s\n", fio_version_string);
982 ini_file = realloc(ini_file, ini_idx * sizeof(char *));
983 ini_file[ini_idx - 1] = strdup(argv[idx]);
995 static void free_shm(void)
997 struct shmid_ds sbuf;
1000 shmdt((void *) threads);
1002 shmctl(shm_id, IPC_RMID, &sbuf);
1007 * The thread area is shared between the main process and the job
1008 * threads/processes. So setup a shared memory segment that will hold
1011 static int setup_thread_area(void)
1014 * 1024 is too much on some machines, scale max_jobs if
1015 * we get a failure that looks like too large a shm segment
1018 size_t size = max_jobs * sizeof(struct thread_data);
1020 shm_id = shmget(0, size, IPC_CREAT | 0600);
1023 if (errno != EINVAL) {
1034 threads = shmat(shm_id, NULL, 0);
1035 if (threads == (void *) -1) {
1044 int parse_options(int argc, char *argv[])
1048 if (setup_thread_area())
1050 if (fill_def_thread())
1053 job_files = parse_cmd_line(argc, argv);
1055 log_err("Need job file(s)\n");
1060 for (i = 0; i < job_files; i++) {
1061 if (fill_def_thread())
1063 if (parse_jobs_ini(ini_file[i], i))