#include "fio.h"
#include "parse.h"
-/*
- * The default options
- */
-#define DEF_BS (4096)
-#define DEF_TIMEOUT (0)
-#define DEF_RATE_CYCLE (1000)
-#define DEF_ODIRECT (1)
-#define DEF_IO_ENGINE (FIO_SYNCIO)
-#define DEF_IO_ENGINE_NAME "sync"
-#define DEF_SEQUENTIAL (1)
-#define DEF_RAND_REPEAT (1)
-#define DEF_OVERWRITE (0)
-#define DEF_INVALIDATE (1)
-#define DEF_SYNCIO (0)
-#define DEF_RANDSEED (0xb1899bedUL)
-#define DEF_BWAVGTIME (500)
-#define DEF_CREATE_SER (1)
-#define DEF_CREATE_FSYNC (1)
-#define DEF_LOOPS (1)
-#define DEF_VERIFY (0)
-#define DEF_STONEWALL (0)
-#define DEF_NUMJOBS (1)
-#define DEF_USE_THREAD (0)
-#define DEF_FILE_SIZE (1024 * 1024 * 1024UL)
-#define DEF_ZONE_SIZE (0)
-#define DEF_ZONE_SKIP (0)
-#define DEF_RWMIX_CYCLE (500)
-#define DEF_RWMIX_READ (50)
-#define DEF_NICE (0)
-#define DEF_NR_FILES (1)
-#define DEF_UNLINK (1)
-#define DEF_WRITE_BW_LOG (0)
-#define DEF_WRITE_LAT_LOG (0)
-#define DEF_NO_RAND_MAP (0)
-#define DEF_HUGEPAGE_SIZE FIO_HUGE_PAGE
+#define FIO_RANDSEED (0xb1899bedUL)
#define td_var_offset(var) ((size_t) &((struct thread_data *)0)->var)
-static int str_rw_cb(void *, const char *);
-static int str_ioengine_cb(void *, const char *);
static int str_mem_cb(void *, const char *);
-static int str_verify_cb(void *, const char *);
static int str_lockmem_cb(void *, unsigned long *);
#ifdef FIO_HAVE_IOPRIO
static int str_prio_cb(void *, unsigned int *);
#endif
static int str_exitall_cb(void);
static int str_cpumask_cb(void *, unsigned int *);
+static int str_fst_cb(void *, const char *);
+static int str_filename_cb(void *, const char *);
+static int str_directory_cb(void *, const char *);
+static int str_opendir_cb(void *, const char *);
+
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
/*
* Map of job/command line options
*/
static struct fio_option options[] = {
+ {
+ .name = "description",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(description),
+ .help = "Text job description",
+ },
{
.name = "name",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(name),
+ .help = "Name of this job",
},
{
.name = "directory",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(directory),
+ .cb = str_directory_cb,
+ .help = "Directory to store files in",
},
{
.name = "filename",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(filename),
+ .cb = str_filename_cb,
+ .help = "File(s) to use for the workload",
+ },
+ {
+ .name = "opendir",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(opendir),
+ .cb = str_opendir_cb,
+ .help = "Recursively add files from this directory and down",
},
{
.name = "rw",
.type = FIO_OPT_STR,
- .cb = str_rw_cb,
+ .off1 = td_var_offset(td_ddir),
+ .help = "IO direction",
+ .def = "read",
+ .posval = {
+ { .ival = "read", .oval = TD_DDIR_READ },
+ { .ival = "write", .oval = TD_DDIR_WRITE },
+ { .ival = "randread", .oval = TD_DDIR_RANDREAD },
+ { .ival = "randwrite", .oval = TD_DDIR_RANDWRITE },
+ { .ival = "rw", .oval = TD_DDIR_RW },
+ { .ival = "randrw", .oval = TD_DDIR_RANDRW },
+ },
},
{
.name = "ioengine",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(ioengine),
+ .help = "IO engine to use",
+ .def = "sync",
+ .posval = {
+ { .ival = "sync", },
+#ifdef FIO_HAVE_LIBAIO
+ { .ival = "libaio", },
+#endif
+#ifdef FIO_HAVE_POSIXAIO
+ { .ival = "posixaio", },
+#endif
+ { .ival = "mmap", },
+#ifdef FIO_HAVE_SPLICE
+ { .ival = "splice", },
+#endif
+#ifdef FIO_HAVE_SGIO
+ { .ival = "sg", },
+#endif
+ { .ival = "null", }, { .ival = "net", },
+#ifdef FIO_HAVE_SYSLET
+ { .ival = "syslet-rw", },
+#endif
+ { .ival = "cpuio", },
+ { .ival = "external", },
+ },
+ },
+ {
+ .name = "iodepth",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iodepth),
+ .help = "Amount of IO buffers to keep in flight",
+ .def = "1",
+ },
+ {
+ .name = "iodepth_batch",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iodepth_batch),
+ .help = "Number of IO to submit in one go",
+ },
+ {
+ .name = "iodepth_low",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iodepth_low),
+ .help = "Low water mark for queuing depth",
+ },
+ {
+ .name = "size",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(total_file_size),
+ .help = "Size of device or file",
+ },
+ {
+ .name = "bs",
+ .type = FIO_OPT_STR_VAL_INT,
+ .off1 = td_var_offset(bs[DDIR_READ]),
+ .off2 = td_var_offset(bs[DDIR_WRITE]),
+ .help = "Block size unit",
+ .def = "4k",
+ },
+ {
+ .name = "bsrange",
+ .type = FIO_OPT_RANGE,
+ .off1 = td_var_offset(min_bs[DDIR_READ]),
+ .off2 = td_var_offset(max_bs[DDIR_READ]),
+ .off3 = td_var_offset(min_bs[DDIR_WRITE]),
+ .off4 = td_var_offset(max_bs[DDIR_WRITE]),
+ .help = "Set block size range (in more detail than bs)",
+ },
+ {
+ .name = "bs_unaligned",
+ .type = FIO_OPT_STR_SET,
+ .off1 = td_var_offset(bs_unaligned),
+ .help = "Don't sector align IO buffer sizes",
+ },
+ {
+ .name = "offset",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(start_offset),
+ .help = "Start IO from this offset",
+ .def = "0",
+ },
+ {
+ .name = "randrepeat",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(rand_repeatable),
+ .help = "Use repeatable random IO pattern",
+ .def = "1",
+ },
+ {
+ .name = "norandommap",
+ .type = FIO_OPT_STR_SET,
+ .off1 = td_var_offset(norandommap),
+ .help = "Accept potential duplicate random blocks",
+ },
+ {
+ .name = "nrfiles",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(nr_files),
+ .help = "Split job workload between this number of files",
+ .def = "1",
+ },
+ {
+ .name = "openfiles",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(open_files),
+ .help = "Number of files to keep open at the same time",
+ },
+ {
+ .name = "file_service_type",
.type = FIO_OPT_STR,
- .cb = str_ioengine_cb,
+ .cb = str_fst_cb,
+ .off1 = td_var_offset(file_service_type),
+ .help = "How to select which file to service next",
+ .def = "roundrobin",
+ .posval = {
+ { .ival = "random", .oval = FIO_FSERVICE_RANDOM },
+ { .ival = "roundrobin", .oval = FIO_FSERVICE_RR },
+ },
+ },
+ {
+ .name = "fsync",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(fsync_blocks),
+ .help = "Issue fsync for writes every given number of blocks",
+ .def = "0",
+ },
+ {
+ .name = "direct",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(odirect),
+ .help = "Use O_DIRECT IO (negates buffered)",
+ .def = "0",
+ },
+ {
+ .name = "buffered",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(odirect),
+ .neg = 1,
+ .help = "Use buffered IO (negates direct)",
+ .def = "1",
+ },
+ {
+ .name = "overwrite",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(overwrite),
+ .help = "When writing, set whether to overwrite current data",
+ .def = "0",
+ },
+ {
+ .name = "loops",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(loops),
+ .help = "Number of times to run the job",
+ .def = "1",
+ },
+ {
+ .name = "numjobs",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(numjobs),
+ .help = "Duplicate this job this many times",
+ .def = "1",
+ },
+ {
+ .name = "startdelay",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(start_delay),
+ .help = "Only start job when this period has passed",
+ .def = "0",
+ },
+ {
+ .name = "runtime",
+ .alias = "timeout",
+ .type = FIO_OPT_STR_VAL_TIME,
+ .off1 = td_var_offset(timeout),
+ .help = "Stop workload when this amount of time has passed",
+ .def = "0",
},
{
.name = "mem",
.type = FIO_OPT_STR,
.cb = str_mem_cb,
+ .off1 = td_var_offset(mem_type),
+ .help = "Backing type for IO buffers",
+ .def = "malloc",
+ .posval = {
+ { .ival = "malloc", .oval = MEM_MALLOC },
+ { .ival = "shm", .oval = MEM_SHM },
+#ifdef FIO_HAVE_HUGETLB
+ { .ival = "shmhuge", .oval = MEM_SHMHUGE },
+#endif
+ { .ival = "mmap", .oval = MEM_MMAP },
+#ifdef FIO_HAVE_HUGETLB
+ { .ival = "mmaphuge", .oval = MEM_MMAPHUGE },
+#endif
+ },
},
{
.name = "verify",
.type = FIO_OPT_STR,
- .cb = str_verify_cb,
+ .off1 = td_var_offset(verify),
+ .help = "Verify sum function",
+ .def = "0",
+ .posval = {
+ { .ival = "0", .oval = VERIFY_NONE },
+ { .ival = "crc32", .oval = VERIFY_CRC32 },
+ { .ival = "md5", .oval = VERIFY_MD5 },
+ },
},
{
.name = "write_iolog",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(write_iolog_file),
+ .help = "Store IO pattern to file",
},
{
.name = "read_iolog",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(read_iolog_file),
+ .help = "Playback IO pattern from file",
},
{
.name = "exec_prerun",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_prerun),
+ .help = "Execute this file prior to running job",
},
{
.name = "exec_postrun",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_postrun),
+ .help = "Execute this file after running job",
},
#ifdef FIO_HAVE_IOSCHED_SWITCH
{
.name = "ioscheduler",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioscheduler),
+ .help = "Use this IO scheduler on the backing device",
},
#endif
- {
- .name = "size",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(total_file_size),
- },
- {
- .name = "bs",
- .type = FIO_OPT_STR_VAL_INT,
- .off1 = td_var_offset(bs[DDIR_READ]),
- .off2 = td_var_offset(bs[DDIR_WRITE]),
- },
- {
- .name = "offset",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(start_offset),
- },
{
.name = "zonesize",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_size),
+ .help = "Give size of an IO zone",
+ .def = "0",
},
{
.name = "zoneskip",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_skip),
+ .help = "Space between IO zones",
+ .def = "0",
},
{
.name = "lockmem",
.type = FIO_OPT_STR_VAL,
.cb = str_lockmem_cb,
- },
- {
- .name = "bsrange",
- .type = FIO_OPT_RANGE,
- .off1 = td_var_offset(min_bs[DDIR_READ]),
- .off2 = td_var_offset(max_bs[DDIR_READ]),
- .off3 = td_var_offset(min_bs[DDIR_WRITE]),
- .off4 = td_var_offset(max_bs[DDIR_WRITE]),
- },
- {
- .name = "nrfiles",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(nr_files),
- },
- {
- .name = "iodepth",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth),
- },
- {
- .name = "fsync",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(fsync_blocks),
+ .help = "Lock down this amount of memory",
+ .def = "0",
},
{
.name = "rwmixcycle",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rwmixcycle),
+ .help = "Cycle period for mixed read/write workloads (msec)",
+ .def = "500",
},
{
.name = "rwmixread",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rwmixread),
- .max_val= 100,
+ .maxval = 100,
+ .help = "Percentage of mixed workload that is reads",
+ .def = "50",
},
{
.name = "rwmixwrite",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rwmixwrite),
- .max_val= 100,
+ .maxval = 100,
+ .help = "Percentage of mixed workload that is writes",
+ .def = "50",
},
{
.name = "nice",
.type = FIO_OPT_INT,
.off1 = td_var_offset(nice),
+ .help = "Set job CPU nice value",
+ .minval = -19,
+ .maxval = 20,
+ .def = "0",
},
#ifdef FIO_HAVE_IOPRIO
{
.name = "prio",
.type = FIO_OPT_INT,
.cb = str_prio_cb,
+ .help = "Set job IO priority value",
+ .minval = 0,
+ .maxval = 7,
},
{
.name = "prioclass",
.type = FIO_OPT_INT,
.cb = str_prioclass_cb,
+ .help = "Set job IO priority class",
+ .minval = 0,
+ .maxval = 3,
},
#endif
{
.name = "thinktime",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(thinktime)
+ .off1 = td_var_offset(thinktime),
+ .help = "Idle time between IO buffers (usec)",
+ .def = "0",
},
{
- .name = "rate",
+ .name = "thinktime_spin",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate)
+ .off1 = td_var_offset(thinktime_spin),
+ .help = "Start think time by spinning this amount (usec)",
+ .def = "0",
},
{
- .name = "ratemin",
+ .name = "thinktime_blocks",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ratemin)
+ .off1 = td_var_offset(thinktime_blocks),
+ .help = "IO buffer period between 'thinktime'",
+ .def = "1",
},
{
- .name = "ratecycle",
+ .name = "rate",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ratecycle)
+ .off1 = td_var_offset(rate),
+ .help = "Set bandwidth rate",
},
{
- .name = "startdelay",
+ .name = "ratemin",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(start_delay)
+ .off1 = td_var_offset(ratemin),
+ .help = "The bottom limit accepted",
},
{
- .name = "timeout",
- .type = FIO_OPT_STR_VAL_TIME,
- .off1 = td_var_offset(timeout)
+ .name = "ratecycle",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(ratecycle),
+ .help = "Window average for rate limits (msec)",
+ .def = "1000",
},
{
.name = "invalidate",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(invalidate_cache)
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(invalidate_cache),
+ .help = "Invalidate buffer/page cache prior to running job",
+ .def = "1",
},
{
.name = "sync",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(sync_io)
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(sync_io),
+ .help = "Use O_SYNC for buffered writes",
+ .def = "0",
},
{
.name = "bwavgtime",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(bw_avg_time)
+ .off1 = td_var_offset(bw_avg_time),
+ .help = "Time window over which to calculate bandwidth (msec)",
+ .def = "500",
},
{
.name = "create_serialize",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(create_serialize)
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(create_serialize),
+ .help = "Serialize creating of job files",
+ .def = "1",
},
{
.name = "create_fsync",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(create_fsync)
- },
- {
- .name = "loops",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(loops)
- },
- {
- .name = "numjobs",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(numjobs)
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(create_fsync),
+ .help = "Fsync file after creation",
+ .def = "1",
},
{
.name = "cpuload",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(cpuload)
+ .off1 = td_var_offset(cpuload),
+ .help = "Use this percentage of CPU",
},
{
.name = "cpuchunks",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(cpucycle)
- },
- {
- .name = "direct",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(odirect)
- },
- {
- .name = "overwrite",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(overwrite)
+ .off1 = td_var_offset(cpucycle),
+ .help = "Length of the CPU burn cycles (usecs)",
+ .def = "50000",
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "cpumask",
.type = FIO_OPT_INT,
.cb = str_cpumask_cb,
+ .help = "CPU affinity mask",
},
#endif
{
.name = "end_fsync",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(end_fsync)
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(end_fsync),
+ .help = "Include fsync at the end of job",
+ .def = "0",
+ },
+ {
+ .name = "fsync_on_close",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(fsync_on_close),
+ .help = "fsync files on close",
+ .def = "0",
},
{
.name = "unlink",
- .type = FIO_OPT_STR_SET,
+ .type = FIO_OPT_BOOL,
.off1 = td_var_offset(unlink),
+ .help = "Unlink created files after job has completed",
+ .def = "0",
},
{
.name = "exitall",
.type = FIO_OPT_STR_SET,
.cb = str_exitall_cb,
+ .help = "Terminate all jobs when one exits",
},
{
.name = "stonewall",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(stonewall),
+ .help = "Insert a hard barrier between this job and previous",
},
{
.name = "thread",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(thread),
+ .off1 = td_var_offset(use_thread),
+ .help = "Use threads instead of forks",
},
{
.name = "write_bw_log",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(write_bw_log),
+ .help = "Write log of bandwidth during run",
},
{
.name = "write_lat_log",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(write_lat_log),
- },
- {
- .name = "norandommap",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(norandommap),
- },
- {
- .name = "bs_unaligned",
- .type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(bs_unaligned),
+ .help = "Write log of latency during run",
},
{
.name = "hugepage-size",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(hugepage_size),
+ .help = "When using hugepages, specify size of each page",
+ .def = __stringify(FIO_HUGE_PAGE),
+ },
+ {
+ .name = "group_reporting",
+ .type = FIO_OPT_STR_SET,
+ .off1 = td_var_offset(group_reporting),
+ .help = "Do reporting on a per-group basis",
},
{
.name = NULL,
.has_arg = no_argument,
.val = 'v',
},
+ {
+ .name = "help",
+ .has_arg = no_argument,
+ .val = 'h',
+ },
+ {
+ .name = "cmdhelp",
+ .has_arg = optional_argument,
+ .val = 'c',
+ },
{
.name = NULL,
},
};
-static int def_timeout = DEF_TIMEOUT;
+static int def_timeout = 0;
-static char fio_version_string[] = "fio 1.10";
+static char fio_version_string[] = "fio 1.14";
static char **ini_file;
static int max_jobs = MAX_JOBS;
FILE *f_out = NULL;
FILE *f_err = NULL;
-static int write_lat_log = DEF_WRITE_LAT_LOG;
-static int write_bw_log = DEF_WRITE_BW_LOG;
+static int write_lat_log = 0;
+int write_bw_log = 0;
+
+static int prev_group_jobs;
+
+FILE *get_f_out()
+{
+ return f_out;
+}
+
+FILE *get_f_err()
+{
+ return f_err;
+}
/*
* Return a free job structure.
if (td == &def_thread)
return;
+ if (td->error)
+ fprintf(f_out, "fio: %s\n", td->verror);
+
memset(&threads[td->thread_number - 1], 0, sizeof(*td));
thread_number--;
}
td->iodepth = 1;
else {
if (!td->iodepth)
- td->iodepth = td->nr_files;
+ td->iodepth = td->open_files;
}
/*
* only really works for sequential io for now, and with 1 file
*/
- if (td->zone_size && !td->sequential && td->nr_files == 1)
+ if (td->zone_size && td_random(td) && td->open_files == 1)
td->zone_size = 0;
/*
log_err("fio: bs_unaligned may not work with raw io\n");
/*
- * O_DIRECT and char doesn't mix, clear that flag if necessary.
+ * thinktime_spin must be less than thinktime
+ */
+ if (td->thinktime_spin > td->thinktime)
+ td->thinktime_spin = td->thinktime;
+
+ /*
+ * The low water mark cannot be bigger than the iodepth
+ */
+ if (td->iodepth_low > td->iodepth || !td->iodepth_low) {
+ /*
+ * syslet work around - if the workload is sequential,
+ * we want to let the queue drain all the way down to
+ * avoid seeking between async threads
+ */
+ if (!strcmp(td->io_ops->name, "syslet-rw") && !td_random(td))
+ td->iodepth_low = 1;
+ else
+ td->iodepth_low = td->iodepth;
+ }
+
+ /*
+ * If batch number isn't set, default to the same as iodepth
*/
- if (td->filetype == FIO_TYPE_CHAR && td->odirect)
- td->odirect = 0;
+ if (td->iodepth_batch > td->iodepth || !td->iodepth_batch)
+ td->iodepth_batch = td->iodepth;
+
+ if (td->nr_files > td->files_index)
+ td->nr_files = td->files_index;
+
+ if (td->open_files > td->nr_files || !td->open_files)
+ td->open_files = td->nr_files;
}
/*
static char *to_kmg(unsigned int val)
{
char *buf = malloc(32);
- char post[] = { 0, 'K', 'M', 'G', 'P', 0 };
+ char post[] = { 0, 'K', 'M', 'G', 'P', 'E', 0 };
char *p = post;
do {
return buf;
}
+/* External engines are specified by "external:name.o") */
+static const char *get_engine_name(const char *str)
+{
+ char *p = strstr(str, ":");
+
+ if (!p)
+ return str;
+
+ p++;
+ strip_blank_front(&p);
+ strip_blank_end(p);
+ return p;
+}
+
/*
* Adds a job to the list of things todo. Sanitizes the various options
* to make sure we don't have conflicts, and initializes various
*/
static int add_job(struct thread_data *td, const char *jobname, int job_add_num)
{
- const char *ddir_str[] = { "read", "write", "randread", "randwrite",
- "rw", NULL, "randrw" };
- struct stat sb;
- int numjobs, ddir, i;
+ const char *ddir_str[] = { NULL, "read", "write", "rw", NULL,
+ "randread", "randwrite", "randrw" };
+ unsigned int i;
struct fio_file *f;
+ const char *engine;
+ char fname[PATH_MAX];
+ int numjobs;
/*
* the def_thread is just for options, it's not a real job
if (td == &def_thread)
return 0;
- /*
- * Set default io engine, if none set
- */
+ engine = get_engine_name(td->ioengine);
+ td->io_ops = load_ioengine(td, engine);
if (!td->io_ops) {
- td->io_ops = load_ioengine(td, DEF_IO_ENGINE_NAME);
- if (!td->io_ops) {
- log_err("default engine %s not there?\n", DEF_IO_ENGINE_NAME);
- return 1;
- }
- }
-
- if (td->odirect)
- td->io_ops->flags |= FIO_RAWIO;
-
- td->filetype = FIO_TYPE_FILE;
- if (!stat(jobname, &sb)) {
- if (S_ISBLK(sb.st_mode))
- td->filetype = FIO_TYPE_BD;
- else if (S_ISCHR(sb.st_mode))
- td->filetype = FIO_TYPE_CHAR;
+ log_err("fio: failed to load engine %s\n", engine);
+ return 1;
}
- fixup_options(td);
-
- if (td->filename)
- td->nr_uniq_files = 1;
+ if (td->use_thread)
+ nr_thread++;
else
- td->nr_uniq_files = td->nr_files;
+ nr_process++;
- if (td->filetype == FIO_TYPE_FILE || td->filename) {
- char tmp[PATH_MAX];
- int len = 0;
-
- if (td->directory && td->directory[0] != '\0')
- sprintf(tmp, "%s/", td->directory);
+ if (td->odirect)
+ td->io_ops->flags |= FIO_RAWIO;
- td->files = malloc(sizeof(struct fio_file) * td->nr_files);
+ if (!td->filename && !td->files_index) {
+ td->filename = strdup(jobname);
- for_each_file(td, f, i) {
- memset(f, 0, sizeof(*f));
- f->fd = -1;
-
- if (td->filename)
- sprintf(tmp + len, "%s", td->filename);
- else
- sprintf(tmp + len, "%s.%d.%d", jobname, td->thread_number, i);
- f->file_name = strdup(tmp);
+ if (td->nr_files == 1)
+ add_file(td, td->filename);
+ else {
+ for (i = 0; i < td->nr_files; i++) {
+ sprintf(fname, "%s.%d.%d", td->filename, td->thread_number, i);
+ add_file(td, fname);
+ }
}
- } else {
- td->nr_files = 1;
- td->files = malloc(sizeof(struct fio_file));
- f = &td->files[0];
-
- memset(f, 0, sizeof(*f));
- f->fd = -1;
- f->file_name = strdup(jobname);
}
+ fixup_options(td);
+
for_each_file(td, f, i) {
- f->file_size = td->total_file_size / td->nr_files;
- f->file_offset = td->start_offset;
+ if (td->directory && f->filetype == FIO_TYPE_FILE) {
+ sprintf(fname, "%s/%s", td->directory, f->file_name);
+ f->file_name = strdup(fname);
+ }
}
- fio_sem_init(&td->mutex, 0);
+ td->mutex = fio_sem_init(0);
- td->clat_stat[0].min_val = td->clat_stat[1].min_val = ULONG_MAX;
- td->slat_stat[0].min_val = td->slat_stat[1].min_val = ULONG_MAX;
- td->bw_stat[0].min_val = td->bw_stat[1].min_val = ULONG_MAX;
+ td->ts.clat_stat[0].min_val = td->ts.clat_stat[1].min_val = ULONG_MAX;
+ td->ts.slat_stat[0].min_val = td->ts.slat_stat[1].min_val = ULONG_MAX;
+ td->ts.bw_stat[0].min_val = td->ts.bw_stat[1].min_val = ULONG_MAX;
- if (td->stonewall && td->thread_number > 1)
+ if ((td->stonewall || td->numjobs > 1) && prev_group_jobs) {
+ prev_group_jobs = 0;
groupid++;
+ }
td->groupid = groupid;
+ prev_group_jobs++;
if (setup_rate(td))
goto err;
if (td->write_lat_log) {
- setup_log(&td->slat_log);
- setup_log(&td->clat_log);
+ setup_log(&td->ts.slat_log);
+ setup_log(&td->ts.clat_log);
}
if (td->write_bw_log)
- setup_log(&td->bw_log);
+ setup_log(&td->ts.bw_log);
if (!td->name)
td->name = strdup(jobname);
- ddir = td->ddir + (!td->sequential << 1) + (td->iomix << 2);
-
if (!terse_output) {
if (!job_add_num) {
- if (td->io_ops->flags & FIO_CPUIO)
+ if (!strcmp(td->io_ops->name, "cpuio"))
fprintf(f_out, "%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
else {
char *c1, *c2, *c3, *c4;
c3 = to_kmg(td->min_bs[DDIR_WRITE]);
c4 = to_kmg(td->max_bs[DDIR_WRITE]);
- fprintf(f_out, "%s: (g=%d): rw=%s, odir=%u, bs=%s-%s/%s-%s, rate=%u, ioengine=%s, iodepth=%u\n", td->name, td->groupid, ddir_str[ddir], td->odirect, c1, c2, c3, c4, td->rate, td->io_ops->name, td->iodepth);
+ fprintf(f_out, "%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->name, td->groupid, ddir_str[td->td_ddir], c1, c2, c3, c4, td->io_ops->name, td->iodepth);
free(c1);
free(c2);
if (add_job(td_new, jobname, job_add_num))
goto err;
}
+
+ if (td->numjobs > 1) {
+ groupid++;
+ prev_group_jobs = 0;
+ }
+
return 0;
err:
put_job(td);
*/
int init_random_state(struct thread_data *td)
{
- unsigned long seeds[4];
- int fd, num_maps, blocks, i;
+ unsigned long seeds[5];
+ int fd, num_maps, blocks;
struct fio_file *f;
+ unsigned int i;
- if (td->io_ops->flags & FIO_CPUIO)
+ if (td->io_ops->flags & FIO_DISKLESSIO)
return 0;
fd = open("/dev/urandom", O_RDONLY);
if (fd == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "open");
return 1;
}
if (read(fd, seeds, sizeof(seeds)) < (int) sizeof(seeds)) {
- td_verror(td, EIO);
+ td_verror(td, EIO, "read");
close(fd);
return 1;
}
os_random_seed(seeds[1], &td->verify_state);
os_random_seed(seeds[2], &td->rwmix_state);
- if (td->sequential)
+ if (td->file_service_type == FIO_FSERVICE_RANDOM)
+ os_random_seed(seeds[3], &td->next_file_state);
+
+ if (!td_random(td))
return 0;
if (td->rand_repeatable)
- seeds[3] = DEF_RANDSEED;
+ seeds[4] = FIO_RANDSEED * td->thread_number;
if (!td->norandommap) {
for_each_file(td, f, i) {
- blocks = (f->file_size + td->rw_min_bs - 1) / td->rw_min_bs;
+ blocks = (f->real_file_size + td->rw_min_bs - 1) / td->rw_min_bs;
num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
f->file_map = malloc(num_maps * sizeof(long));
+ if (!f->file_map) {
+ log_err("fio: failed allocating random map. If running a large number of jobs, try the 'norandommap' option\n");
+ return 1;
+ }
f->num_maps = num_maps;
memset(f->file_map, 0, num_maps * sizeof(long));
}
}
- os_random_seed(seeds[3], &td->random_state);
+ os_random_seed(seeds[4], &td->random_state);
return 0;
}
for (i = 0; i < strlen(line); i++) {
if (line[i] == ';')
return 1;
+ if (line[i] == '#')
+ return 1;
if (!isspace(line[i]) && !iscntrl(line[i]))
return 0;
}
return 1;
}
-static int str_rw_cb(void *data, const char *mem)
-{
- struct thread_data *td = data;
-
- if (!strncmp(mem, "read", 4) || !strncmp(mem, "0", 1)) {
- td->ddir = DDIR_READ;
- td->sequential = 1;
- return 0;
- } else if (!strncmp(mem, "randread", 8)) {
- td->ddir = DDIR_READ;
- td->sequential = 0;
- return 0;
- } else if (!strncmp(mem, "write", 5) || !strncmp(mem, "1", 1)) {
- td->ddir = DDIR_WRITE;
- td->sequential = 1;
- return 0;
- } else if (!strncmp(mem, "randwrite", 9)) {
- td->ddir = DDIR_WRITE;
- td->sequential = 0;
- return 0;
- } else if (!strncmp(mem, "rw", 2)) {
- td->ddir = DDIR_READ;
- td->iomix = 1;
- td->sequential = 1;
- return 0;
- } else if (!strncmp(mem, "randrw", 6)) {
- td->ddir = DDIR_READ;
- td->iomix = 1;
- td->sequential = 0;
- return 0;
- }
-
- log_err("fio: data direction: read, write, randread, randwrite, rw, randrw\n");
- return 1;
-}
-
-static int str_verify_cb(void *data, const char *mem)
-{
- struct thread_data *td = data;
-
- if (!strncmp(mem, "0", 1)) {
- td->verify = VERIFY_NONE;
- return 0;
- } else if (!strncmp(mem, "md5", 3) || !strncmp(mem, "1", 1)) {
- td->verify = VERIFY_MD5;
- return 0;
- } else if (!strncmp(mem, "crc32", 5)) {
- td->verify = VERIFY_CRC32;
- return 0;
- }
-
- log_err("fio: verify types: md5, crc32\n");
- return 1;
-}
-
/*
* Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
*/
-static char *get_mmap_file(const char *str)
+static char *get_opt_postfix(const char *str)
{
char *p = strstr(str, ":");
{
struct thread_data *td = data;
- if (!strncmp(mem, "malloc", 6)) {
- td->mem_type = MEM_MALLOC;
- return 0;
- } else if (!strncmp(mem, "mmaphuge", 8)) {
-#ifdef FIO_HAVE_HUGETLB
- /*
- * mmaphuge must be appended with the actual file
- */
- td->mmapfile = get_mmap_file(mem);
- if (!td->mmapfile) {
+ if (td->mem_type == MEM_MMAPHUGE || td->mem_type == MEM_MMAP) {
+ td->mmapfile = get_opt_postfix(mem);
+ if (td->mem_type == MEM_MMAPHUGE && !td->mmapfile) {
log_err("fio: mmaphuge:/path/to/file\n");
return 1;
}
-
- td->mem_type = MEM_MMAPHUGE;
- return 0;
-#else
- log_err("fio: mmaphuge not available\n");
- return 1;
-#endif
- } else if (!strncmp(mem, "mmap", 4)) {
- /*
- * Check if the user wants file backed memory. It's ok
- * if there's no file given, we'll just use anon mamp then.
- */
- td->mmapfile = get_mmap_file(mem);
- td->mem_type = MEM_MMAP;
- return 0;
- } else if (!strncmp(mem, "shmhuge", 7)) {
-#ifdef FIO_HAVE_HUGETLB
- td->mem_type = MEM_SHMHUGE;
- return 0;
-#else
- log_err("fio: shmhuge not available\n");
- return 1;
-#endif
- } else if (!strncmp(mem, "shm", 3)) {
- td->mem_type = MEM_SHM;
- return 0;
}
- log_err("fio: mem type: malloc, shm, shmhuge, mmap, mmaphuge\n");
- return 1;
-}
-
-static int str_ioengine_cb(void *data, const char *str)
-{
- struct thread_data *td = data;
-
- td->io_ops = load_ioengine(td, str);
- if (td->io_ops)
- return 0;
-
- log_err("fio: ioengine= libaio, posixaio, sync, mmap, sgio, splice, cpu, null\n");
- log_err("fio: or specify path to dynamic ioengine module\n");
- return 1;
+ return 0;
}
static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
return 0;
}
+static int str_fst_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+ char *nr = get_opt_postfix(str);
+
+ td->file_service_nr = 1;
+ if (nr)
+ td->file_service_nr = atoi(nr);
+
+ return 0;
+}
+
+static int str_filename_cb(void *data, const char *input)
+{
+ struct thread_data *td = data;
+ char *fname, *str, *p;
+
+ p = str = strdup(input);
+
+ strip_blank_front(&str);
+ strip_blank_end(str);
+
+ if (!td->files_index)
+ td->nr_files = 0;
+
+ while ((fname = strsep(&str, ":")) != NULL) {
+ if (!strlen(fname))
+ break;
+ add_file(td, fname);
+ td->nr_files++;
+ }
+
+ free(p);
+ return 0;
+}
+
+static int str_directory_cb(void *data, const char fio_unused *str)
+{
+ struct thread_data *td = data;
+ struct stat sb;
+
+ if (lstat(td->directory, &sb) < 0) {
+ log_err("fio: %s is not a directory\n", td->directory);
+ td_verror(td, errno, "lstat");
+ return 1;
+ }
+ if (!S_ISDIR(sb.st_mode)) {
+ log_err("fio: %s is not a directory\n", td->directory);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int str_opendir_cb(void *data, const char fio_unused *str)
+{
+ struct thread_data *td = data;
+
+ if (!td->files_index)
+ td->nr_files = 0;
+
+ return add_dir_files(td, td->opendir);
+}
+
/*
* This is our [ini] type file parser.
*/
}
/*
- * fill globals
+ * fill default options
*/
- def_thread.ddir = DDIR_READ;
- def_thread.iomix = 0;
- def_thread.bs[DDIR_READ] = DEF_BS;
- def_thread.bs[DDIR_WRITE] = DEF_BS;
- def_thread.min_bs[DDIR_READ] = def_thread.min_bs[DDIR_WRITE] = 0;
- def_thread.max_bs[DDIR_READ] = def_thread.max_bs[DDIR_WRITE] = 0;
- def_thread.odirect = DEF_ODIRECT;
- def_thread.ratecycle = DEF_RATE_CYCLE;
- def_thread.sequential = DEF_SEQUENTIAL;
+ fill_default_options(&def_thread, options);
+
def_thread.timeout = def_timeout;
- def_thread.overwrite = DEF_OVERWRITE;
- def_thread.invalidate_cache = DEF_INVALIDATE;
- def_thread.sync_io = DEF_SYNCIO;
- def_thread.mem_type = MEM_MALLOC;
- def_thread.bw_avg_time = DEF_BWAVGTIME;
- def_thread.create_serialize = DEF_CREATE_SER;
- def_thread.create_fsync = DEF_CREATE_FSYNC;
- def_thread.loops = DEF_LOOPS;
- def_thread.verify = DEF_VERIFY;
- def_thread.stonewall = DEF_STONEWALL;
- def_thread.numjobs = DEF_NUMJOBS;
- def_thread.use_thread = DEF_USE_THREAD;
- def_thread.rwmixcycle = DEF_RWMIX_CYCLE;
- def_thread.rwmixread = DEF_RWMIX_READ;
- def_thread.nice = DEF_NICE;
- def_thread.rand_repeatable = DEF_RAND_REPEAT;
- def_thread.nr_files = DEF_NR_FILES;
- def_thread.unlink = DEF_UNLINK;
def_thread.write_bw_log = write_bw_log;
def_thread.write_lat_log = write_lat_log;
- def_thread.norandommap = DEF_NO_RAND_MAP;
- def_thread.hugepage_size = DEF_HUGEPAGE_SIZE;
+
#ifdef FIO_HAVE_DISK_UTIL
def_thread.do_disk_util = 1;
#endif
printf("\t--bandwidth-log\tGenerate per-job bandwidth logs\n");
printf("\t--minimal\tMinimal (terse) output\n");
printf("\t--version\tPrint version info and exit\n");
+ printf("\t--help\t\tPrint this page\n");
+ printf("\t--cmdhelp=cmd\tPrint command help, \"all\" for all of them\n");
}
static int parse_cmd_line(int argc, char *argv[])
{
struct thread_data *td = NULL;
- int c, ini_idx = 0, lidx, ret;
+ int c, ini_idx = 0, lidx, ret, dont_add_job = 0;
- while ((c = getopt_long(argc, argv, "", long_options, &lidx)) != -1) {
+ while ((c = getopt_long_only(argc, argv, "", long_options, &lidx)) != -1) {
switch (c) {
case 't':
def_timeout = atoi(optarg);
case 'h':
usage();
exit(0);
+ case 'c':
+ ret = show_cmd_help(options, optarg);
+ exit(ret);
case 'v':
printf("%s\n", fio_version_string);
exit(0);
}
ret = parse_cmd_option(opt, val, options, td);
- if (ret) {
- log_err("fio: job dropped\n");
- put_job(td);
- td = NULL;
- }
+ if (ret)
+ dont_add_job = 1;
break;
}
default:
}
if (td) {
- ret = add_job(td, td->name ?: "fio", 0);
- if (ret)
+ if (dont_add_job)
put_job(td);
+ else {
+ ret = add_job(td, td->name ?: "fio", 0);
+ if (ret)
+ put_job(td);
+ }
}
while (optind < argc) {
f_out = stdout;
f_err = stderr;
+ options_init(options);
+
dupe_job_options();
if (setup_thread_area())