#include <string.h>
#include <errno.h>
#include <getopt.h>
-#include <assert.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/types.h>
#include "fio.h"
#include "parse.h"
+static char fio_version_string[] = "fio 1.14a";
+
#define FIO_RANDSEED (0xb1899bedUL)
-#define td_var_offset(var) ((size_t) &((struct thread_data *)0)->var)
+static char **ini_file;
+static int max_jobs = MAX_JOBS;
-static int str_mem_cb(void *, const char *);
-static int str_lockmem_cb(void *, unsigned long *);
-#ifdef FIO_HAVE_IOPRIO
-static int str_prio_cb(void *, unsigned int *);
-static int str_prioclass_cb(void *, unsigned int *);
-#endif
-static int str_exitall_cb(void);
-static int str_cpumask_cb(void *, unsigned int *);
-static int str_fst_cb(void *, const char *);
-static int str_filename_cb(void *, const char *);
-static int str_directory_cb(void *, const char *);
-static int str_opendir_cb(void *, const char *);
+struct thread_data def_thread;
+struct thread_data *threads = NULL;
-#define __stringify_1(x) #x
-#define __stringify(x) __stringify_1(x)
+int exitall_on_terminate = 0;
+int terse_output = 0;
+unsigned long long mlock_size = 0;
+FILE *f_out = NULL;
+FILE *f_err = NULL;
-/*
- * Map of job/command line options
- */
-static struct fio_option options[] = {
- {
- .name = "description",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(description),
- .help = "Text job description",
- },
- {
- .name = "name",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(name),
- .help = "Name of this job",
- },
- {
- .name = "directory",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(directory),
- .cb = str_directory_cb,
- .help = "Directory to store files in",
- },
- {
- .name = "filename",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(filename),
- .cb = str_filename_cb,
- .help = "File(s) to use for the workload",
- },
- {
- .name = "opendir",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(opendir),
- .cb = str_opendir_cb,
- .help = "Recursively add files from this directory and down",
- },
- {
- .name = "rw",
- .type = FIO_OPT_STR,
- .off1 = td_var_offset(td_ddir),
- .help = "IO direction",
- .def = "read",
- .posval = {
- { .ival = "read",
- .oval = TD_DDIR_READ,
- .help = "Sequential read",
- },
- { .ival = "write",
- .oval = TD_DDIR_WRITE,
- .help = "Sequential write",
- },
- { .ival = "randread",
- .oval = TD_DDIR_RANDREAD,
- .help = "Random read",
- },
- { .ival = "randwrite",
- .oval = TD_DDIR_RANDWRITE,
- .help = "Random write",
- },
- { .ival = "rw",
- .oval = TD_DDIR_RW,
- .help = "Sequential read and write mix",
- },
- { .ival = "randrw",
- .oval = TD_DDIR_RANDRW,
- .help = "Random read and write mix"
- },
- },
- },
- {
- .name = "ioengine",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(ioengine),
- .help = "IO engine to use",
- .def = "sync",
- .posval = {
- { .ival = "sync",
- .help = "Use read/write",
- },
-#ifdef FIO_HAVE_LIBAIO
- { .ival = "libaio",
- .help = "Linux native asynchronous IO",
- },
-#endif
-#ifdef FIO_HAVE_POSIXAIO
- { .ival = "posixaio",
- .help = "POSIX asynchronous IO",
- },
-#endif
- { .ival = "mmap",
- .help = "Memory mapped IO",
- },
-#ifdef FIO_HAVE_SPLICE
- { .ival = "splice",
- .help = "splice/vmsplice based IO",
- },
-#endif
-#ifdef FIO_HAVE_SGIO
- { .ival = "sg",
- .help = "SCSI generic v3 IO",
- },
-#endif
- { .ival = "null",
- .help = "Testing engine (no data transfer)",
- },
- { .ival = "net",
- .help = "Network IO",
- },
-#ifdef FIO_HAVE_SYSLET
- { .ival = "syslet-rw",
- .help = "syslet enabled async pread/pwrite IO",
- },
-#endif
- { .ival = "cpuio",
- .help = "CPU cycler burner engine",
- },
- { .ival = "external",
- .help = "Load external engine (append name)",
- },
- },
- },
- {
- .name = "iodepth",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth),
- .help = "Amount of IO buffers to keep in flight",
- .def = "1",
- },
- {
- .name = "iodepth_batch",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth_batch),
- .help = "Number of IO to submit in one go",
- },
- {
- .name = "iodepth_low",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth_low),
- .help = "Low water mark for queuing depth",
- },
- {
- .name = "size",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(total_file_size),
- .help = "Total size of device or files",
- },
- {
- .name = "filesize",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(file_size_low),
- .off2 = td_var_offset(file_size_high),
- .help = "Size of individual files",
- },
- {
- .name = "bs",
- .type = FIO_OPT_STR_VAL_INT,
- .off1 = td_var_offset(bs[DDIR_READ]),
- .off2 = td_var_offset(bs[DDIR_WRITE]),
- .help = "Block size unit",
- .def = "4k",
- },
- {
- .name = "bsrange",
- .type = FIO_OPT_RANGE,
- .off1 = td_var_offset(min_bs[DDIR_READ]),
- .off2 = td_var_offset(max_bs[DDIR_READ]),
- .off3 = td_var_offset(min_bs[DDIR_WRITE]),
- .off4 = td_var_offset(max_bs[DDIR_WRITE]),
- .help = "Set block size range (in more detail than bs)",
- },
- {
- .name = "bs_unaligned",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(bs_unaligned),
- .help = "Don't sector align IO buffer sizes",
- },
- {
- .name = "offset",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(start_offset),
- .help = "Start IO from this offset",
- .def = "0",
- },
- {
- .name = "randrepeat",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(rand_repeatable),
- .help = "Use repeatable random IO pattern",
- .def = "1",
- },
- {
- .name = "norandommap",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(norandommap),
- .help = "Accept potential duplicate random blocks",
- },
- {
- .name = "nrfiles",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(nr_files),
- .help = "Split job workload between this number of files",
- .def = "1",
- },
- {
- .name = "openfiles",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(open_files),
- .help = "Number of files to keep open at the same time",
- },
- {
- .name = "file_service_type",
- .type = FIO_OPT_STR,
- .cb = str_fst_cb,
- .off1 = td_var_offset(file_service_type),
- .help = "How to select which file to service next",
- .def = "roundrobin",
- .posval = {
- { .ival = "random",
- .oval = FIO_FSERVICE_RANDOM,
- .help = "Choose a file at random",
- },
- { .ival = "roundrobin",
- .oval = FIO_FSERVICE_RR,
- .help = "Round robin select files",
- },
- },
- },
- {
- .name = "fsync",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(fsync_blocks),
- .help = "Issue fsync for writes every given number of blocks",
- .def = "0",
- },
- {
- .name = "direct",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(odirect),
- .help = "Use O_DIRECT IO (negates buffered)",
- .def = "0",
- },
- {
- .name = "buffered",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(odirect),
- .neg = 1,
- .help = "Use buffered IO (negates direct)",
- .def = "1",
- },
- {
- .name = "overwrite",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(overwrite),
- .help = "When writing, set whether to overwrite current data",
- .def = "0",
- },
- {
- .name = "loops",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(loops),
- .help = "Number of times to run the job",
- .def = "1",
- },
- {
- .name = "numjobs",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(numjobs),
- .help = "Duplicate this job this many times",
- .def = "1",
- },
- {
- .name = "startdelay",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(start_delay),
- .help = "Only start job when this period has passed",
- .def = "0",
- },
- {
- .name = "runtime",
- .alias = "timeout",
- .type = FIO_OPT_STR_VAL_TIME,
- .off1 = td_var_offset(timeout),
- .help = "Stop workload when this amount of time has passed",
- .def = "0",
- },
- {
- .name = "mem",
- .type = FIO_OPT_STR,
- .cb = str_mem_cb,
- .off1 = td_var_offset(mem_type),
- .help = "Backing type for IO buffers",
- .def = "malloc",
- .posval = {
- { .ival = "malloc",
- .oval = MEM_MALLOC,
- .help = "Use malloc(3) for IO buffers",
- },
- { .ival = "shm",
- .oval = MEM_SHM,
- .help = "Use shared memory segments for IO buffers",
- },
-#ifdef FIO_HAVE_HUGETLB
- { .ival = "shmhuge",
- .oval = MEM_SHMHUGE,
- .help = "Like shm, but use huge pages",
- },
-#endif
- { .ival = "mmap",
- .oval = MEM_MMAP,
- .help = "Use mmap(2) (file or anon) for IO buffers",
- },
-#ifdef FIO_HAVE_HUGETLB
- { .ival = "mmaphuge",
- .oval = MEM_MMAPHUGE,
- .help = "Like mmap, but use huge pages",
- },
-#endif
- },
- },
- {
- .name = "verify",
- .type = FIO_OPT_STR,
- .off1 = td_var_offset(verify),
- .help = "Verify data written",
- .def = "0",
- .posval = {
- { .ival = "0",
- .oval = VERIFY_NONE,
- .help = "Don't do IO verification",
- },
- { .ival = "crc32",
- .oval = VERIFY_CRC32,
- .help = "Use crc32 checksums for verification",
- },
- { .ival = "md5",
- .oval = VERIFY_MD5,
- .help = "Use md5 checksums for verification",
- },
- },
- },
- {
- .name = "write_iolog",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(write_iolog_file),
- .help = "Store IO pattern to file",
- },
- {
- .name = "read_iolog",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(read_iolog_file),
- .help = "Playback IO pattern from file",
- },
- {
- .name = "exec_prerun",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(exec_prerun),
- .help = "Execute this file prior to running job",
- },
- {
- .name = "exec_postrun",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(exec_postrun),
- .help = "Execute this file after running job",
- },
-#ifdef FIO_HAVE_IOSCHED_SWITCH
- {
- .name = "ioscheduler",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(ioscheduler),
- .help = "Use this IO scheduler on the backing device",
- },
-#endif
- {
- .name = "zonesize",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(zone_size),
- .help = "Give size of an IO zone",
- .def = "0",
- },
- {
- .name = "zoneskip",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(zone_skip),
- .help = "Space between IO zones",
- .def = "0",
- },
- {
- .name = "lockmem",
- .type = FIO_OPT_STR_VAL,
- .cb = str_lockmem_cb,
- .help = "Lock down this amount of memory",
- .def = "0",
- },
- {
- .name = "rwmixcycle",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(rwmixcycle),
- .help = "Cycle period for mixed read/write workloads (msec)",
- .def = "500",
- },
- {
- .name = "rwmixread",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(rwmixread),
- .maxval = 100,
- .help = "Percentage of mixed workload that is reads",
- .def = "50",
- },
- {
- .name = "rwmixwrite",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(rwmixwrite),
- .maxval = 100,
- .help = "Percentage of mixed workload that is writes",
- .def = "50",
- },
- {
- .name = "nice",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(nice),
- .help = "Set job CPU nice value",
- .minval = -19,
- .maxval = 20,
- .def = "0",
- },
-#ifdef FIO_HAVE_IOPRIO
- {
- .name = "prio",
- .type = FIO_OPT_INT,
- .cb = str_prio_cb,
- .help = "Set job IO priority value",
- .minval = 0,
- .maxval = 7,
- },
- {
- .name = "prioclass",
- .type = FIO_OPT_INT,
- .cb = str_prioclass_cb,
- .help = "Set job IO priority class",
- .minval = 0,
- .maxval = 3,
- },
-#endif
- {
- .name = "thinktime",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(thinktime),
- .help = "Idle time between IO buffers (usec)",
- .def = "0",
- },
- {
- .name = "thinktime_spin",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(thinktime_spin),
- .help = "Start think time by spinning this amount (usec)",
- .def = "0",
- },
- {
- .name = "thinktime_blocks",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(thinktime_blocks),
- .help = "IO buffer period between 'thinktime'",
- .def = "1",
- },
- {
- .name = "rate",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(rate),
- .help = "Set bandwidth rate",
- },
- {
- .name = "ratemin",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(ratemin),
- .help = "The bottom limit accepted",
- },
- {
- .name = "ratecycle",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(ratecycle),
- .help = "Window average for rate limits (msec)",
- .def = "1000",
- },
- {
- .name = "invalidate",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(invalidate_cache),
- .help = "Invalidate buffer/page cache prior to running job",
- .def = "1",
- },
- {
- .name = "sync",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(sync_io),
- .help = "Use O_SYNC for buffered writes",
- .def = "0",
- },
- {
- .name = "bwavgtime",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(bw_avg_time),
- .help = "Time window over which to calculate bandwidth (msec)",
- .def = "500",
- },
- {
- .name = "create_serialize",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(create_serialize),
- .help = "Serialize creating of job files",
- .def = "1",
- },
- {
- .name = "create_fsync",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(create_fsync),
- .help = "Fsync file after creation",
- .def = "1",
- },
- {
- .name = "cpuload",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(cpuload),
- .help = "Use this percentage of CPU",
- },
- {
- .name = "cpuchunks",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(cpucycle),
- .help = "Length of the CPU burn cycles (usecs)",
- .def = "50000",
- },
-#ifdef FIO_HAVE_CPU_AFFINITY
- {
- .name = "cpumask",
- .type = FIO_OPT_INT,
- .cb = str_cpumask_cb,
- .help = "CPU affinity mask",
- },
-#endif
- {
- .name = "end_fsync",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(end_fsync),
- .help = "Include fsync at the end of job",
- .def = "0",
- },
- {
- .name = "fsync_on_close",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(fsync_on_close),
- .help = "fsync files on close",
- .def = "0",
- },
- {
- .name = "unlink",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(unlink),
- .help = "Unlink created files after job has completed",
- .def = "0",
- },
- {
- .name = "exitall",
- .type = FIO_OPT_STR_SET,
- .cb = str_exitall_cb,
- .help = "Terminate all jobs when one exits",
- },
- {
- .name = "stonewall",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(stonewall),
- .help = "Insert a hard barrier between this job and previous",
- },
- {
- .name = "thread",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(use_thread),
- .help = "Use threads instead of forks",
- },
- {
- .name = "write_bw_log",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(write_bw_log),
- .help = "Write log of bandwidth during run",
- },
- {
- .name = "write_lat_log",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(write_lat_log),
- .help = "Write log of latency during run",
- },
- {
- .name = "hugepage-size",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(hugepage_size),
- .help = "When using hugepages, specify size of each page",
- .def = __stringify(FIO_HUGE_PAGE),
- },
- {
- .name = "group_reporting",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(group_reporting),
- .help = "Do reporting on a per-group basis",
- },
- {
- .name = NULL,
- },
-};
+int write_bw_log = 0;
+
+static int def_timeout = 0;
+static int write_lat_log = 0;
-#define FIO_JOB_OPTS (sizeof(options) / sizeof(struct fio_option))
-#define FIO_CMD_OPTS (16)
-#define FIO_GETOPT_JOB (0x89988998)
+static int prev_group_jobs;
/*
* Command line options. These will contain the above, plus a few
* extra that only pertain to fio itself and not jobs.
*/
-static struct option long_options[FIO_JOB_OPTS + FIO_CMD_OPTS] = {
+static struct option long_options[FIO_NR_OPTIONS] = {
{
.name = "output",
.has_arg = required_argument,
},
};
-static int def_timeout = 0;
-
-static char fio_version_string[] = "fio 1.14a";
-
-static char **ini_file;
-static int max_jobs = MAX_JOBS;
-
-struct thread_data def_thread;
-struct thread_data *threads = NULL;
-
-int exitall_on_terminate = 0;
-int terse_output = 0;
-unsigned long long mlock_size = 0;
-FILE *f_out = NULL;
-FILE *f_err = NULL;
-
-static int write_lat_log = 0;
-int write_bw_log = 0;
-
-static int prev_group_jobs;
-
FILE *get_f_out()
{
return f_out;
thread_number--;
}
+static int setup_rate(struct thread_data *td)
+{
+ unsigned long nr_reads_per_msec;
+ unsigned long long rate;
+ unsigned int bs;
+
+ if (!td->o.rate && !td->o.rate_iops)
+ return 0;
+
+ if (td_rw(td))
+ bs = td->o.rw_min_bs;
+ else if (td_read(td))
+ bs = td->o.min_bs[DDIR_READ];
+ else
+ bs = td->o.min_bs[DDIR_WRITE];
+
+ if (td->o.rate) {
+ rate = td->o.rate;
+ nr_reads_per_msec = (rate * 1024 * 1000LL) / bs;
+ } else
+ nr_reads_per_msec = td->o.rate_iops * 1000UL;
+
+ if (!nr_reads_per_msec) {
+ log_err("rate lower than supported\n");
+ return -1;
+ }
+
+ td->rate_usec_cycle = 1000000000ULL / nr_reads_per_msec;
+ td->rate_pending_usleep = 0;
+ return 0;
+}
+
/*
* Lazy way of fixing up options that depend on each other. We could also
* define option callback handlers, but this is easier.
*/
-static void fixup_options(struct thread_data *td)
+static int fixup_options(struct thread_data *td)
{
- if (!td->rwmixread && td->rwmixwrite)
- td->rwmixread = 100 - td->rwmixwrite;
+ struct thread_options *o = &td->o;
+
+ if (!o->rwmixread && o->rwmixwrite)
+ o->rwmixread = 100 - o->rwmixwrite;
- if (td->write_iolog_file && td->read_iolog_file) {
+ if (o->write_iolog_file && o->read_iolog_file) {
log_err("fio: read iolog overrides write_iolog\n");
- free(td->write_iolog_file);
- td->write_iolog_file = NULL;
+ free(o->write_iolog_file);
+ o->write_iolog_file = NULL;
}
if (td->io_ops->flags & FIO_SYNCIO)
- td->iodepth = 1;
+ o->iodepth = 1;
else {
- if (!td->iodepth)
- td->iodepth = td->open_files;
+ if (!o->iodepth)
+ o->iodepth = o->open_files;
}
/*
* only really works for sequential io for now, and with 1 file
*/
- if (td->zone_size && td_random(td) && td->open_files == 1)
- td->zone_size = 0;
+ if (o->zone_size && td_random(td) && o->open_files == 1)
+ o->zone_size = 0;
/*
* Reads can do overwrites, we always need to pre-create the file
*/
if (td_read(td) || td_rw(td))
- td->overwrite = 1;
+ o->overwrite = 1;
- if (!td->min_bs[DDIR_READ])
- td->min_bs[DDIR_READ]= td->bs[DDIR_READ];
- if (!td->max_bs[DDIR_READ])
- td->max_bs[DDIR_READ] = td->bs[DDIR_READ];
- if (!td->min_bs[DDIR_WRITE])
- td->min_bs[DDIR_WRITE]= td->bs[DDIR_WRITE];
- if (!td->max_bs[DDIR_WRITE])
- td->max_bs[DDIR_WRITE] = td->bs[DDIR_WRITE];
+ if (!o->min_bs[DDIR_READ])
+ o->min_bs[DDIR_READ]= o->bs[DDIR_READ];
+ if (!o->max_bs[DDIR_READ])
+ o->max_bs[DDIR_READ] = o->bs[DDIR_READ];
+ if (!o->min_bs[DDIR_WRITE])
+ o->min_bs[DDIR_WRITE]= o->bs[DDIR_WRITE];
+ if (!o->max_bs[DDIR_WRITE])
+ o->max_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
- td->rw_min_bs = min(td->min_bs[DDIR_READ], td->min_bs[DDIR_WRITE]);
+ o->rw_min_bs = min(o->min_bs[DDIR_READ], o->min_bs[DDIR_WRITE]);
- if (!td->file_size_high)
- td->file_size_high = td->file_size_low;
+ if (!o->file_size_high)
+ o->file_size_high = o->file_size_low;
if (td_read(td) && !td_rw(td))
- td->verify = 0;
+ o->verify = 0;
- if (td->norandommap && td->verify != VERIFY_NONE) {
+ if (o->norandommap && o->verify != VERIFY_NONE) {
log_err("fio: norandommap given, verify disabled\n");
- td->verify = VERIFY_NONE;
+ o->verify = VERIFY_NONE;
}
- if (td->bs_unaligned && (td->odirect || td->io_ops->flags & FIO_RAWIO))
+ if (o->bs_unaligned && (o->odirect || td->io_ops->flags & FIO_RAWIO))
log_err("fio: bs_unaligned may not work with raw io\n");
/*
* thinktime_spin must be less than thinktime
*/
- if (td->thinktime_spin > td->thinktime)
- td->thinktime_spin = td->thinktime;
+ if (o->thinktime_spin > o->thinktime)
+ o->thinktime_spin = o->thinktime;
/*
* The low water mark cannot be bigger than the iodepth
*/
- if (td->iodepth_low > td->iodepth || !td->iodepth_low) {
+ if (o->iodepth_low > o->iodepth || !o->iodepth_low) {
/*
* syslet work around - if the workload is sequential,
* we want to let the queue drain all the way down to
* avoid seeking between async threads
*/
if (!strcmp(td->io_ops->name, "syslet-rw") && !td_random(td))
- td->iodepth_low = 1;
+ o->iodepth_low = 1;
else
- td->iodepth_low = td->iodepth;
+ o->iodepth_low = o->iodepth;
}
/*
* If batch number isn't set, default to the same as iodepth
*/
- if (td->iodepth_batch > td->iodepth || !td->iodepth_batch)
- td->iodepth_batch = td->iodepth;
+ if (o->iodepth_batch > o->iodepth || !o->iodepth_batch)
+ o->iodepth_batch = o->iodepth;
- if (td->nr_files > td->files_index)
- td->nr_files = td->files_index;
+ if (o->nr_files > td->files_index)
+ o->nr_files = td->files_index;
+
+ if (o->open_files > o->nr_files || !o->open_files)
+ o->open_files = o->nr_files;
+
+ if ((o->rate && o->rate_iops) || (o->ratemin && o->rate_iops_min)) {
+ log_err("fio: rate and rate_iops are mutually exclusive\n");
+ return 1;
+ }
+ if ((o->rate < o->ratemin) || (o->rate_iops < o->rate_iops_min)) {
+ log_err("fio: minimum rate exceeds rate\n");
+ return 1;
+ }
- if (td->open_files > td->nr_files || !td->open_files)
- td->open_files = td->nr_files;
+ return 0;
}
/*
static int init_random_state(struct thread_data *td)
{
unsigned long seeds[6];
- int fd, num_maps, blocks;
- struct fio_file *f;
- unsigned int i;
+ int fd;
fd = open("/dev/urandom", O_RDONLY);
if (fd == -1) {
os_random_seed(seeds[1], &td->verify_state);
os_random_seed(seeds[2], &td->rwmix_state);
- if (td->file_service_type == FIO_FSERVICE_RANDOM)
+ if (td->o.file_service_type == FIO_FSERVICE_RANDOM)
os_random_seed(seeds[3], &td->next_file_state);
os_random_seed(seeds[5], &td->file_size_state);
if (!td_random(td))
return 0;
- if (td->rand_repeatable)
+ if (td->o.rand_repeatable)
seeds[4] = FIO_RANDSEED * td->thread_number;
- if (!td->norandommap) {
- for_each_file(td, f, i) {
- blocks = (f->real_file_size + td->rw_min_bs - 1) / td->rw_min_bs;
- num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
- f->file_map = malloc(num_maps * sizeof(long));
- if (!f->file_map) {
- log_err("fio: failed allocating random map. If running a large number of jobs, try the 'norandommap' option\n");
- return 1;
- }
- f->num_maps = num_maps;
- memset(f->file_map, 0, num_maps * sizeof(long));
- }
- }
-
os_random_seed(seeds[4], &td->random_state);
return 0;
}
if (td == &def_thread)
return 0;
- engine = get_engine_name(td->ioengine);
+ engine = get_engine_name(td->o.ioengine);
td->io_ops = load_ioengine(td, engine);
if (!td->io_ops) {
log_err("fio: failed to load engine %s\n", engine);
- return 1;
+ goto err;
}
- if (td->use_thread)
+ if (td->o.use_thread)
nr_thread++;
else
nr_process++;
- if (td->odirect)
+ if (td->o.odirect)
td->io_ops->flags |= FIO_RAWIO;
file_alloced = 0;
- if (!td->filename && !td->files_index) {
+ if (!td->o.filename && !td->files_index) {
file_alloced = 1;
- if (td->nr_files == 1 && exists_and_not_file(jobname))
+ if (td->o.nr_files == 1 && exists_and_not_file(jobname))
add_file(td, jobname);
else {
- for (i = 0; i < td->nr_files; i++) {
+ for (i = 0; i < td->o.nr_files; i++) {
sprintf(fname, "%s.%d.%d", jobname, td->thread_number, i);
add_file(td, fname);
}
}
}
- fixup_options(td);
+ if (fixup_options(td))
+ goto err;
for_each_file(td, f, i) {
- if (td->directory && f->filetype == FIO_TYPE_FILE) {
- sprintf(fname, "%s/%s", td->directory, f->file_name);
+ if (td->o.directory && f->filetype == FIO_TYPE_FILE) {
+ sprintf(fname, "%s/%s", td->o.directory, f->file_name);
f->file_name = strdup(fname);
}
}
td->ts.slat_stat[0].min_val = td->ts.slat_stat[1].min_val = ULONG_MAX;
td->ts.bw_stat[0].min_val = td->ts.bw_stat[1].min_val = ULONG_MAX;
- if ((td->stonewall || td->numjobs > 1) && prev_group_jobs) {
+ if ((td->o.stonewall || td->o.numjobs > 1 || td->o.new_group)
+ && prev_group_jobs) {
prev_group_jobs = 0;
groupid++;
}
if (setup_rate(td))
goto err;
- if (td->write_lat_log) {
+ if (td->o.write_lat_log) {
setup_log(&td->ts.slat_log);
setup_log(&td->ts.clat_log);
}
- if (td->write_bw_log)
+ if (td->o.write_bw_log)
setup_log(&td->ts.bw_log);
- if (!td->name)
- td->name = strdup(jobname);
+ if (!td->o.name)
+ td->o.name = strdup(jobname);
if (!terse_output) {
if (!job_add_num) {
if (!strcmp(td->io_ops->name, "cpuio"))
- log_info("%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
+ log_info("%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->o.name, td->o.cpuload, td->o.cpucycle);
else {
char *c1, *c2, *c3, *c4;
- c1 = to_kmg(td->min_bs[DDIR_READ]);
- c2 = to_kmg(td->max_bs[DDIR_READ]);
- c3 = to_kmg(td->min_bs[DDIR_WRITE]);
- c4 = to_kmg(td->max_bs[DDIR_WRITE]);
+ c1 = to_kmg(td->o.min_bs[DDIR_READ]);
+ c2 = to_kmg(td->o.max_bs[DDIR_READ]);
+ c3 = to_kmg(td->o.min_bs[DDIR_WRITE]);
+ c4 = to_kmg(td->o.max_bs[DDIR_WRITE]);
- log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->name, td->groupid, ddir_str[td->td_ddir], c1, c2, c3, c4, td->io_ops->name, td->iodepth);
+ log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->o.name, td->groupid, ddir_str[td->o.td_ddir], c1, c2, c3, c4, td->io_ops->name, td->o.iodepth);
free(c1);
free(c2);
* recurse add identical jobs, clear numjobs and stonewall options
* as they don't apply to sub-jobs
*/
- numjobs = td->numjobs;
+ numjobs = td->o.numjobs;
while (--numjobs) {
struct thread_data *td_new = get_new_job(0, td);
if (!td_new)
goto err;
- td_new->numjobs = 1;
- td_new->stonewall = 0;
+ td_new->o.numjobs = 1;
+ td_new->o.stonewall = 0;
if (file_alloced) {
- td_new->filename = NULL;
+ td_new->o.filename = NULL;
td_new->files_index = 0;
td_new->files = NULL;
}
goto err;
}
- if (td->numjobs > 1) {
- groupid++;
- prev_group_jobs = 0;
- }
-
return 0;
err:
put_job(td);
return -1;
}
-static void fill_cpu_mask(os_cpu_mask_t cpumask, int cpu)
-{
-#ifdef FIO_HAVE_CPU_AFFINITY
- unsigned int i;
-
- CPU_ZERO(&cpumask);
-
- for (i = 0; i < sizeof(int) * 8; i++) {
- if ((1 << i) & cpu)
- CPU_SET(i, &cpumask);
- }
-#endif
-}
-
static int is_empty_or_comment(char *line)
{
unsigned int i;
return 1;
}
-/*
- * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
- */
-static char *get_opt_postfix(const char *str)
-{
- char *p = strstr(str, ":");
-
- if (!p)
- return NULL;
-
- p++;
- strip_blank_front(&p);
- strip_blank_end(p);
- return strdup(p);
-}
-
-static int str_mem_cb(void *data, const char *mem)
-{
- struct thread_data *td = data;
-
- if (td->mem_type == MEM_MMAPHUGE || td->mem_type == MEM_MMAP) {
- td->mmapfile = get_opt_postfix(mem);
- if (td->mem_type == MEM_MMAPHUGE && !td->mmapfile) {
- log_err("fio: mmaphuge:/path/to/file\n");
- return 1;
- }
- }
-
- return 0;
-}
-
-static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
-{
- mlock_size = *val;
- return 0;
-}
-
-#ifdef FIO_HAVE_IOPRIO
-static int str_prioclass_cb(void *data, unsigned int *val)
-{
- struct thread_data *td = data;
-
- td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
- return 0;
-}
-
-static int str_prio_cb(void *data, unsigned int *val)
-{
- struct thread_data *td = data;
-
- td->ioprio |= *val;
- return 0;
-}
-#endif
-
-static int str_exitall_cb(void)
-{
- exitall_on_terminate = 1;
- return 0;
-}
-
-static int str_cpumask_cb(void *data, unsigned int *val)
-{
- struct thread_data *td = data;
-
- fill_cpu_mask(td->cpumask, *val);
- return 0;
-}
-
-static int str_fst_cb(void *data, const char *str)
-{
- struct thread_data *td = data;
- char *nr = get_opt_postfix(str);
-
- td->file_service_nr = 1;
- if (nr)
- td->file_service_nr = atoi(nr);
-
- return 0;
-}
-
-static int str_filename_cb(void *data, const char *input)
-{
- struct thread_data *td = data;
- char *fname, *str, *p;
-
- p = str = strdup(input);
-
- strip_blank_front(&str);
- strip_blank_end(str);
-
- if (!td->files_index)
- td->nr_files = 0;
-
- while ((fname = strsep(&str, ":")) != NULL) {
- if (!strlen(fname))
- break;
- add_file(td, fname);
- td->nr_files++;
- }
-
- free(p);
- return 0;
-}
-
-static int str_directory_cb(void *data, const char fio_unused *str)
-{
- struct thread_data *td = data;
- struct stat sb;
-
- if (lstat(td->directory, &sb) < 0) {
- log_err("fio: %s is not a directory\n", td->directory);
- td_verror(td, errno, "lstat");
- return 1;
- }
- if (!S_ISDIR(sb.st_mode)) {
- log_err("fio: %s is not a directory\n", td->directory);
- return 1;
- }
-
- return 0;
-}
-
-static int str_opendir_cb(void *data, const char fio_unused *str)
-{
- struct thread_data *td = data;
-
- if (!td->files_index)
- td->nr_files = 0;
-
- return add_dir_files(td, td->opendir);
-}
-
/*
* This is our [ini] type file parser.
*/
* Seperate multiple job files by a stonewall
*/
if (!global && stonewall) {
- td->stonewall = stonewall;
+ td->o.stonewall = stonewall;
stonewall = 0;
}
* dump all the bad ones. Makes trial/error fixups
* easier on the user.
*/
- ret |= parse_option(p, options, td);
+ ret |= fio_option_parse(td, p);
}
if (!ret) {
{
memset(&def_thread, 0, sizeof(def_thread));
- if (fio_getaffinity(getpid(), &def_thread.cpumask) == -1) {
+ if (fio_getaffinity(getpid(), &def_thread.o.cpumask) == -1) {
perror("sched_getaffinity");
return 1;
}
/*
* fill default options
*/
- fill_default_options(&def_thread, options);
+ fio_fill_default_options(&def_thread);
- def_thread.timeout = def_timeout;
- def_thread.write_bw_log = write_bw_log;
- def_thread.write_lat_log = write_lat_log;
+ def_thread.o.timeout = def_timeout;
+ def_thread.o.write_bw_log = write_bw_log;
+ def_thread.o.write_lat_log = write_lat_log;
#ifdef FIO_HAVE_DISK_UTIL
- def_thread.do_disk_util = 1;
+ def_thread.o.do_disk_util = 1;
#endif
return 0;
}
+static void free_shm(void)
+{
+ struct shmid_ds sbuf;
+
+ if (threads) {
+ shmdt((void *) threads);
+ threads = NULL;
+ shmctl(shm_id, IPC_RMID, &sbuf);
+ }
+}
+
+/*
+ * The thread area is shared between the main process and the job
+ * threads/processes. So setup a shared memory segment that will hold
+ * all the job info.
+ */
+static int setup_thread_area(void)
+{
+ /*
+ * 1024 is too much on some machines, scale max_jobs if
+ * we get a failure that looks like too large a shm segment
+ */
+ do {
+ size_t size = max_jobs * sizeof(struct thread_data);
+
+ shm_id = shmget(0, size, IPC_CREAT | 0600);
+ if (shm_id != -1)
+ break;
+ if (errno != EINVAL) {
+ perror("shmget");
+ break;
+ }
+
+ max_jobs >>= 1;
+ } while (max_jobs);
+
+ if (shm_id == -1)
+ return 1;
+
+ threads = shmat(shm_id, NULL, 0);
+ if (threads == (void *) -1) {
+ perror("shmat");
+ return 1;
+ }
+
+ atexit(free_shm);
+ return 0;
+}
+
static void usage(void)
{
printf("%s\n", fio_version_string);
usage();
exit(0);
case 'c':
- ret = show_cmd_help(options, optarg);
- exit(ret);
+ exit(fio_show_option_help(optarg));
case 'v':
printf("%s\n", fio_version_string);
exit(0);
char *val = optarg;
if (!strncmp(opt, "name", 4) && td) {
- ret = add_job(td, td->name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0);
if (ret) {
put_job(td);
return 0;
return 0;
}
- ret = parse_cmd_option(opt, val, options, td);
+ ret = fio_cmd_option_parse(td, opt, val);
if (ret)
dont_add_job = 1;
break;
if (dont_add_job)
put_job(td);
else {
- ret = add_job(td, td->name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0);
if (ret)
put_job(td);
}
return ini_idx;
}
-static void free_shm(void)
-{
- struct shmid_ds sbuf;
-
- if (threads) {
- shmdt((void *) threads);
- threads = NULL;
- shmctl(shm_id, IPC_RMID, &sbuf);
- }
-}
-
-/*
- * The thread area is shared between the main process and the job
- * threads/processes. So setup a shared memory segment that will hold
- * all the job info.
- */
-static int setup_thread_area(void)
-{
- /*
- * 1024 is too much on some machines, scale max_jobs if
- * we get a failure that looks like too large a shm segment
- */
- do {
- size_t size = max_jobs * sizeof(struct thread_data);
-
- shm_id = shmget(0, size, IPC_CREAT | 0600);
- if (shm_id != -1)
- break;
- if (errno != EINVAL) {
- perror("shmget");
- break;
- }
-
- max_jobs >>= 1;
- } while (max_jobs);
-
- if (shm_id == -1)
- return 1;
-
- threads = shmat(shm_id, NULL, 0);
- if (threads == (void *) -1) {
- perror("shmat");
- return 1;
- }
-
- atexit(free_shm);
- return 0;
-}
-
-/*
- * Copy the fio options into the long options map, so we mirror
- * job and cmd line options.
- */
-static void dupe_job_options(void)
-{
- struct fio_option *o;
- unsigned int i;
-
- i = 0;
- while (long_options[i].name)
- i++;
-
- o = &options[0];
- while (o->name) {
- long_options[i].name = o->name;
- long_options[i].val = FIO_GETOPT_JOB;
- if (o->type == FIO_OPT_STR_SET)
- long_options[i].has_arg = no_argument;
- else
- long_options[i].has_arg = required_argument;
-
- i++;
- o++;
- assert(i < FIO_JOB_OPTS + FIO_CMD_OPTS);
- }
-}
int parse_options(int argc, char *argv[])
{
f_out = stdout;
f_err = stderr;
- options_init(options);
-
- dupe_job_options();
+ fio_options_dup_and_init(long_options);
if (setup_thread_area())
return 1;