X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=options.c;h=74c24d02d5d4bc200da3cf671c64d363d02e92a4;hp=dda7cba1bdf16b17f6ea009b722ce8ed79ecc38c;hb=6925dd356191bc40e8a1ebc8fd92a40b476658c3;hpb=3843deb322eb7b54d2d19d7b1ce19c5dc44d57ff diff --git a/options.c b/options.c index dda7cba1..74c24d02 100644 --- a/options.c +++ b/options.c @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -16,6 +15,8 @@ #include "lib/fls.h" #include "options.h" +#include "crc/crc32c.h" + /* * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that. */ @@ -201,15 +202,45 @@ static int str_rw_cb(void *data, const char *str) struct thread_data *td = data; char *nr = get_opt_postfix(str); - td->o.ddir_nr = 1; - if (nr) { - td->o.ddir_nr = atoi(nr); - free(nr); + td->o.ddir_seq_nr = 1; + td->o.ddir_seq_add = 0; + + if (!nr) + return 0; + + if (td_random(td)) + td->o.ddir_seq_nr = atoi(nr); + else { + long long val; + + if (str_to_decimal(nr, &val, 1, td)) { + log_err("fio: rw postfix parsing failed\n"); + free(nr); + return 1; + } + + td->o.ddir_seq_add = val; } + free(nr); return 0; } +#ifdef FIO_HAVE_LIBAIO +static int str_libaio_cb(void *data, const char *str) +{ + struct thread_data *td = data; + + if (!strcmp(str, "userspace_reap")) { + td->o.userspace_libaio_reap = 1; + return 0; + } + + log_err("fio: bad libaio sub-option: %s\n", str); + return 1; +} +#endif + static int str_mem_cb(void *data, const char *mem) { struct thread_data *td = data; @@ -225,13 +256,37 @@ static int str_mem_cb(void *data, const char *mem) return 0; } -static int str_lockmem_cb(void fio_unused *data, unsigned long *val) +static int str_verify_cb(void *data, const char *mem) +{ + struct thread_data *td = data; + + if (td->o.verify != VERIFY_CRC32C_INTEL) + return 0; + + if (!crc32c_intel_works()) { + log_info("fio: System does not support hw accelerated crc32c. Falling back to sw crc32c.\n"); + td->o.verify = VERIFY_CRC32C; + } + + return 0; +} + +static int fio_clock_source_cb(void *data, const char *str) +{ + struct thread_data *td = data; + + fio_clock_source = td->o.clocksource; + fio_time_init(); + return 0; +} + +static int str_lockmem_cb(void fio_unused *data, unsigned long long *val) { mlock_size = *val; return 0; } -static int str_rwmix_read_cb(void *data, unsigned int *val) +static int str_rwmix_read_cb(void *data, unsigned long long *val) { struct thread_data *td = data; @@ -240,7 +295,7 @@ static int str_rwmix_read_cb(void *data, unsigned int *val) return 0; } -static int str_rwmix_write_cb(void *data, unsigned int *val) +static int str_rwmix_write_cb(void *data, unsigned long long *val) { struct thread_data *td = data; @@ -250,7 +305,7 @@ static int str_rwmix_write_cb(void *data, unsigned int *val) } #ifdef FIO_HAVE_IOPRIO -static int str_prioclass_cb(void *data, unsigned int *val) +static int str_prioclass_cb(void *data, unsigned long long *val) { struct thread_data *td = data; unsigned short mask; @@ -266,7 +321,7 @@ static int str_prioclass_cb(void *data, unsigned int *val) return 0; } -static int str_prio_cb(void *data, unsigned int *val) +static int str_prio_cb(void *data, unsigned long long *val) { struct thread_data *td = data; @@ -290,7 +345,7 @@ static int str_exitall_cb(void) } #ifdef FIO_HAVE_CPU_AFFINITY -static int str_cpumask_cb(void *data, unsigned int *val) +static int str_cpumask_cb(void *data, unsigned long long *val) { struct thread_data *td = data; unsigned int i; @@ -304,7 +359,7 @@ static int str_cpumask_cb(void *data, unsigned int *val) return 1; } - max_cpu = sysconf(_SC_NPROCESSORS_ONLN); + max_cpu = cpus_online(); for (i = 0; i < sizeof(int) * 8; i++) { if ((1 << i) & *val) { @@ -341,7 +396,7 @@ static int set_cpus_allowed(struct thread_data *td, os_cpu_mask_t *mask, strip_blank_front(&str); strip_blank_end(str); - max_cpu = sysconf(_SC_NPROCESSORS_ONLN); + max_cpu = cpus_online(); while ((cpu = strsep(&str, ",")) != NULL) { char *str2, *cpu2; @@ -415,6 +470,16 @@ static int str_verify_cpus_allowed_cb(void *data, const char *input) } #endif +#ifdef FIO_HAVE_TRIM +static int str_verify_trim_cb(void *data, unsigned long long *val) +{ + struct thread_data *td = data; + + td->o.trim_percentage = *val; + return 0; +} +#endif + static int str_fst_cb(void *data, const char *str) { struct thread_data *td = data; @@ -429,6 +494,7 @@ static int str_fst_cb(void *data, const char *str) return 0; } +#ifdef FIO_HAVE_SYNC_FILE_RANGE static int str_sfr_cb(void *data, const char *str) { struct thread_data *td = data; @@ -442,9 +508,11 @@ static int str_sfr_cb(void *data, const char *str) return 0; } +#endif static int check_dir(struct thread_data *td, char *fname) { +#if 0 char file[PATH_MAX], *dir; int elen = 0; @@ -457,7 +525,6 @@ static int check_dir(struct thread_data *td, char *fname) sprintf(file + elen, "%s", fname); dir = dirname(file); -#if 0 { struct stat sb; /* @@ -586,7 +653,7 @@ static int str_opendir_cb(void *data, const char fio_unused *str) return add_dir_files(td, td->o.opendir); } -static int str_verify_offset_cb(void *data, unsigned int *off) +static int str_verify_offset_cb(void *data, unsigned long long *off) { struct thread_data *td = data; @@ -638,6 +705,11 @@ static int str_verify_pattern_cb(void *data, const char *input) } } td->o.verify_pattern_bytes = i; + /* + * VERIFY_META could already be set + */ + if (td->o.verify == VERIFY_NONE) + td->o.verify = VERIFY_PATTERN; return 0; } @@ -682,6 +754,7 @@ static int str_gtod_reduce_cb(void *data, int *il) struct thread_data *td = data; int val = *il; + td->o.disable_lat = !!val; td->o.disable_clat = !!val; td->o.disable_slat = !!val; td->o.disable_bw = !!val; @@ -691,7 +764,7 @@ static int str_gtod_reduce_cb(void *data, int *il) return 0; } -static int str_gtod_cpu_cb(void *data, int *il) +static int str_gtod_cpu_cb(void *data, long long *il) { struct thread_data *td = data; int val = *il; @@ -701,6 +774,20 @@ static int str_gtod_cpu_cb(void *data, int *il) return 0; } +static int str_size_cb(void *data, unsigned long long *__val) +{ + struct thread_data *td = data; + unsigned long long v = *__val; + + if (parse_is_percent(v)) { + td->o.size = 0; + td->o.size_percent = -1ULL - v; + } else + td->o.size = v; + + return 0; +} + static int rw_verify(struct fio_option *o, void *data) { struct thread_data *td = data; @@ -852,12 +939,30 @@ static struct fio_option options[FIO_MAX_OPTS] = { }, }, }, + { + .name = "rw_sequencer", + .type = FIO_OPT_STR, + .off1 = td_var_offset(rw_seq), + .help = "IO offset generator modifier", + .def = "sequential", + .posval = { + { .ival = "sequential", + .oval = RW_SEQ_SEQ, + .help = "Generate sequential offsets", + }, + { .ival = "identical", + .oval = RW_SEQ_IDENT, + .help = "Generate identical offsets", + }, + }, + }, + { .name = "ioengine", .type = FIO_OPT_STR_STORE, .off1 = td_var_offset(ioengine), .help = "IO engine to use", - .def = "sync", + .def = FIO_PREFERRED_ENGINE, .posval = { { .ival = "sync", .help = "Use read/write", @@ -866,11 +971,12 @@ static struct fio_option options[FIO_MAX_OPTS] = { .help = "Use pread/pwrite", }, { .ival = "vsync", - .help = "Use readv/writev", + .help = "Use readv/writev", }, #ifdef FIO_HAVE_LIBAIO { .ival = "libaio", .help = "Linux native asynchronous IO", + .cb = str_libaio_cb, }, #endif #ifdef FIO_HAVE_POSIXAIO @@ -882,9 +988,14 @@ static struct fio_option options[FIO_MAX_OPTS] = { { .ival = "solarisaio", .help = "Solaris native asynchronous IO", }, +#endif +#ifdef FIO_HAVE_WINDOWSAIO + { .ival = "windowsaio", + .help = "Windows native asynchronous IO" + }, #endif { .ival = "mmap", - .help = "Memory mapped IO", + .help = "Memory mapped IO" }, #ifdef FIO_HAVE_SPLICE { .ival = "splice", @@ -911,12 +1022,22 @@ static struct fio_option options[FIO_MAX_OPTS] = { }, #endif { .ival = "cpuio", - .help = "CPU cycler burner engine", + .help = "CPU cycle burner engine", }, #ifdef FIO_HAVE_GUASI { .ival = "guasi", .help = "GUASI IO engine", }, +#endif +#ifdef FIO_HAVE_BINJECT + { .ival = "binject", + .help = "binject direct inject block engine", + }, +#endif +#ifdef FIO_HAVE_RDMA + { .ival = "rdma", + .help = "RDMA IO engine", + }, #endif { .ival = "external", .help = "Load external engine (append name)", @@ -927,7 +1048,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { .name = "iodepth", .type = FIO_OPT_INT, .off1 = td_var_offset(iodepth), - .help = "Amount of IO buffers to keep in flight", + .help = "Number of IO buffers to keep in flight", .minval = 1, .def = "1", }, @@ -960,12 +1081,12 @@ static struct fio_option options[FIO_MAX_OPTS] = { { .name = "size", .type = FIO_OPT_STR_VAL, - .off1 = td_var_offset(size), - .minval = 1, + .cb = str_size_cb, .help = "Total size of device or files", }, { .name = "fill_device", + .alias = "fill_fs", .type = FIO_OPT_BOOL, .off1 = td_var_offset(fill_device), .help = "Write until an ENOSPC error occurs", @@ -1043,6 +1164,14 @@ static struct fio_option options[FIO_MAX_OPTS] = { .def = "1", .parent = "rw", }, + { + .name = "use_os_rand", + .type = FIO_OPT_BOOL, + .off1 = td_var_offset(use_os_rand), + .help = "Set to use OS random generator", + .def = "0", + .parent = "rw", + }, { .name = "norandommap", .type = FIO_OPT_STR_SET, @@ -1060,6 +1189,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { }, { .name = "nrfiles", + .alias = "nr_files", .type = FIO_OPT_INT, .off1 = td_var_offset(nr_files), .help = "Split job workload between this number of files", @@ -1097,12 +1227,37 @@ static struct fio_option options[FIO_MAX_OPTS] = { #ifdef FIO_HAVE_FALLOCATE { .name = "fallocate", - .type = FIO_OPT_BOOL, - .off1 = td_var_offset(fallocate), - .help = "Use fallocate() when laying out files", - .def = "1", - }, + .type = FIO_OPT_STR, + .off1 = td_var_offset(fallocate_mode), + .help = "Whether pre-allocation is performed when laying out files", + .def = "posix", + .posval = { + { .ival = "none", + .oval = FIO_FALLOCATE_NONE, + .help = "Do not pre-allocate space", + }, + { .ival = "posix", + .oval = FIO_FALLOCATE_POSIX, + .help = "Use posix_fallocate()", + }, +#ifdef FIO_HAVE_LINUX_FALLOCATE + { .ival = "keep", + .oval = FIO_FALLOCATE_KEEP_SIZE, + .help = "Use fallocate(..., FALLOC_FL_KEEP_SIZE, ...)", + }, #endif + /* Compatibility with former boolean values */ + { .ival = "0", + .oval = FIO_FALLOCATE_NONE, + .help = "Alias for 'none'", + }, + { .ival = "1", + .oval = FIO_FALLOCATE_POSIX, + .help = "Alias for 'posix'", + }, + }, + }, +#endif /* FIO_HAVE_FALLOCATE */ { .name = "fadvise_hint", .type = FIO_OPT_BOOL, @@ -1124,6 +1279,13 @@ static struct fio_option options[FIO_MAX_OPTS] = { .help = "Issue fdatasync for writes every given number of blocks", .def = "0", }, + { + .name = "write_barrier", + .type = FIO_OPT_INT, + .off1 = td_var_offset(barrier_blocks), + .help = "Make every Nth write a barrier write", + .def = "0", + }, #ifdef FIO_HAVE_SYNC_FILE_RANGE { .name = "sync_file_range", @@ -1189,7 +1351,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { }, { .name = "startdelay", - .type = FIO_OPT_INT, + .type = FIO_OPT_STR_VAL_TIME, .off1 = td_var_offset(start_delay), .help = "Only start job when this period has passed", .def = "0", @@ -1214,6 +1376,29 @@ static struct fio_option options[FIO_MAX_OPTS] = { .off1 = td_var_offset(ramp_time), .help = "Ramp up time before measuring performance", }, + { + .name = "clocksource", + .type = FIO_OPT_STR, + .cb = fio_clock_source_cb, + .off1 = td_var_offset(clocksource), + .help = "What type of timing source to use", + .posval = { + { .ival = "gettimeofday", + .oval = CS_GTOD, + .help = "Use gettimeofday(2) for timing", + }, + { .ival = "clock_gettime", + .oval = CS_CGETTIME, + .help = "Use clock_gettime(2) for timing", + }, +#ifdef ARCH_HAVE_CPU_CLOCK + { .ival = "cpu", + .oval = CS_CPUCLOCK, + .help = "Use CPU private clock", + }, +#endif + }, + }, { .name = "mem", .alias = "iomem", @@ -1264,6 +1449,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { .type = FIO_OPT_STR, .off1 = td_var_offset(verify), .help = "Verify data written", + .cb = str_verify_cb, .def = "0", .posval = { { .ival = "0", @@ -1368,6 +1554,14 @@ static struct fio_option options[FIO_MAX_OPTS] = { .help = "Exit on a single verify failure, don't continue", .parent = "verify", }, + { + .name = "verify_dump", + .type = FIO_OPT_BOOL, + .off1 = td_var_offset(verify_dump), + .def = "1", + .help = "Dump contents of good and bad blocks on failure", + .parent = "verify", + }, { .name = "verify_async", .type = FIO_OPT_INT, @@ -1376,6 +1570,20 @@ static struct fio_option options[FIO_MAX_OPTS] = { .help = "Number of async verifier threads to use", .parent = "verify", }, + { + .name = "verify_backlog", + .type = FIO_OPT_STR_VAL, + .off1 = td_var_offset(verify_backlog), + .help = "Verify after this number of blocks are written", + .parent = "verify", + }, + { + .name = "verify_backlog_batch", + .type = FIO_OPT_INT, + .off1 = td_var_offset(verify_batch), + .help = "Verify this number of IO blocks", + .parent = "verify", + }, #ifdef FIO_HAVE_CPU_AFFINITY { .name = "verify_async_cpus", @@ -1384,6 +1592,39 @@ static struct fio_option options[FIO_MAX_OPTS] = { .help = "Set CPUs allowed for async verify threads", .parent = "verify_async", }, +#endif +#ifdef FIO_HAVE_TRIM + { + .name = "trim_percentage", + .type = FIO_OPT_INT, + .cb = str_verify_trim_cb, + .maxval = 100, + .help = "Number of verify blocks to discard/trim", + .parent = "verify", + .def = "0", + }, + { + .name = "trim_verify_zero", + .type = FIO_OPT_INT, + .help = "Verify that trim/discarded blocks are returned as zeroes", + .off1 = td_var_offset(trim_zero), + .parent = "trim_percentage", + .def = "1", + }, + { + .name = "trim_backlog", + .type = FIO_OPT_STR_VAL, + .off1 = td_var_offset(trim_backlog), + .help = "Trim after this number of blocks are written", + .parent = "trim_percentage", + }, + { + .name = "trim_backlog_batch", + .type = FIO_OPT_INT, + .off1 = td_var_offset(trim_batch), + .help = "Trim this number of IO blocks", + .parent = "trim_percentage", + }, #endif { .name = "write_iolog", @@ -1397,6 +1638,21 @@ static struct fio_option options[FIO_MAX_OPTS] = { .off1 = td_var_offset(read_iolog_file), .help = "Playback IO pattern from file", }, + { + .name = "replay_no_stall", + .type = FIO_OPT_INT, + .off1 = td_var_offset(no_stall), + .def = "0", + .parent = "read_iolog", + .help = "Playback IO pattern file as fast as possible without stalls", + }, + { + .name = "replay_redirect", + .type = FIO_OPT_STR_STORE, + .off1 = td_var_offset(replay_redirect), + .parent = "read_iolog", + .help = "Replay all I/O onto this device, regardless of trace device", + }, { .name = "exec_prerun", .type = FIO_OPT_STR_STORE, @@ -1535,7 +1791,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { .type = FIO_OPT_INT, .off1 = td_var_offset(rate_iops_min[0]), .off2 = td_var_offset(rate_iops_min[1]), - .help = "Job must meet this rate or it will be shutdown", + .help = "Job must meet this rate or it will be shut down", .parent = "rate_iops", }, { @@ -1580,7 +1836,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { .name = "create_fsync", .type = FIO_OPT_BOOL, .off1 = td_var_offset(create_fsync), - .help = "Fsync file after creation", + .help = "fsync file after creation", .def = "1", }, { @@ -1594,7 +1850,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { .name = "pre_read", .type = FIO_OPT_BOOL, .off1 = td_var_offset(pre_read), - .help = "Preread files before starting official testing", + .help = "Pre-read files before starting official testing", .def = "0", }, { @@ -1654,6 +1910,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { }, { .name = "stonewall", + .alias = "wait_for_previous", .type = FIO_OPT_STR_SET, .off1 = td_var_offset(stonewall), .help = "Insert a hard barrier between this job and previous", @@ -1709,6 +1966,24 @@ static struct fio_option options[FIO_MAX_OPTS] = { .off1 = td_var_offset(refill_buffers), .help = "Refill IO buffers on every IO submit", }, + { + .name = "clat_percentiles", + .type = FIO_OPT_BOOL, + .off1 = td_var_offset(clat_percentiles), + .help = "Enable the reporting of completion latency percentiles", + .def = "0", + }, + { + .name = "percentile_list", + .type = FIO_OPT_FLOAT_LIST, + .off1 = td_var_offset(percentile_list), + .off2 = td_var_offset(overwrite_plist), + .help = "Specify a custom list of percentiles to report", + .maxlen = FIO_IO_U_LIST_MAX_LEN, + .minfp = 0.0, + .maxfp = 100.0, + }, + #ifdef FIO_HAVE_DISK_UTIL { .name = "disk_util", @@ -1725,6 +2000,14 @@ static struct fio_option options[FIO_MAX_OPTS] = { .cb = str_gtod_reduce_cb, .def = "0", }, + { + .name = "disable_lat", + .type = FIO_OPT_BOOL, + .off1 = td_var_offset(disable_lat), + .help = "Disable latency numbers", + .parent = "gtod_reduce", + .def = "0", + }, { .name = "disable_clat", .type = FIO_OPT_BOOL, @@ -1737,7 +2020,7 @@ static struct fio_option options[FIO_MAX_OPTS] = { .name = "disable_slat", .type = FIO_OPT_BOOL, .off1 = td_var_offset(disable_slat), - .help = "Disable submissionn latency numbers", + .help = "Disable submission latency numbers", .parent = "gtod_reduce", .def = "0", }, @@ -1753,14 +2036,14 @@ static struct fio_option options[FIO_MAX_OPTS] = { .name = "gtod_cpu", .type = FIO_OPT_INT, .cb = str_gtod_cpu_cb, - .help = "Setup dedicated gettimeofday() thread on this CPU", + .help = "Set up dedicated gettimeofday() thread on this CPU", .verify = gtod_cpu_verify, }, { .name = "continue_on_error", .type = FIO_OPT_BOOL, .off1 = td_var_offset(continue_on_error), - .help = "Continue on non-fatal errors during I/O", + .help = "Continue on non-fatal errors during IO", .def = "0", }, { @@ -1783,6 +2066,13 @@ static struct fio_option options[FIO_MAX_OPTS] = { .minval = 100, .maxval = 1000, }, + { + .name = "cgroup_nodelete", + .type = FIO_OPT_BOOL, + .off1 = td_var_offset(cgroup_nodelete), + .help = "Do not delete cgroups after job completion", + .def = "0", + }, { .name = "uid", .type = FIO_OPT_INT, @@ -1800,9 +2090,10 @@ static struct fio_option options[FIO_MAX_OPTS] = { }, }; -static void add_to_lopt(struct option *lopt, struct fio_option *o) +static void add_to_lopt(struct option *lopt, struct fio_option *o, + const char *name) { - lopt->name = (char *) o->name; + lopt->name = (char *) name; lopt->val = FIO_GETOPT_JOB; if (o->type == FIO_OPT_STR_SET) lopt->has_arg = no_argument; @@ -1823,7 +2114,11 @@ void fio_options_dup_and_init(struct option *long_options) o = &options[0]; while (o->name) { - add_to_lopt(&long_options[i], o); + add_to_lopt(&long_options[i], o, o->name); + if (o->alias) { + i++; + add_to_lopt(&long_options[i], o, o->alias); + } i++; o++; @@ -1864,11 +2159,11 @@ void fio_keywords_init(void) sprintf(buf, "%lu", page_size); fio_keywords[0].replace = strdup(buf); - mb_memory = os_phys_mem() / page_size; + mb_memory = os_phys_mem() / (1024 * 1024); sprintf(buf, "%llu", mb_memory); fio_keywords[1].replace = strdup(buf); - l = sysconf(_SC_NPROCESSORS_ONLN); + l = cpus_online(); sprintf(buf, "%lu", l); fio_keywords[2].replace = strdup(buf); }