same time, but writes get exclusive
access.
- The option may be post-fixed with a lock batch number. If
- set, then each thread/process may do that amount of IOs to
- the file before giving up the lock. Since lock acquisition is
- expensive, batching the lock/unlocks will speed up IO.
-
readwrite=str
rw=str Type of io pattern. Accepted values are:
ioscheduler=str Attempt to switch the device hosting the file to the specified
io scheduler before running.
-cpuload=int If the job is a CPU cycle eater, attempt to use the specified
- percentage of CPU cycles.
-
-cpuchunks=int If the job is a CPU cycle eater, split the load into
- cycles of the given time. In microseconds.
-
disk_util=bool Generate disk utilization statistics, if the platform
supports it. Defaults to on.
enabled when polling for a minimum of 0 events (eg when
iodepth_batch_complete=0).
+[cpu] cpuload=int Attempt to use the specified percentage of CPU cycles.
+
+[cpu] cpuchunks=int Split the load into cycles of the given time. In
+ microseconds.
+
[netsplice] hostname=str
[net] hostname=str The host name or IP address to use for TCP or UDP based IO.
If the job is a TCP listener or UDP reader, the hostname is not
CPPFLAGS= -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 $(DEBUGFLAGS)
OPTFLAGS= -O3 -g -ffast-math $(EXTFLAGS)
CFLAGS = -std=gnu99 -Wwrite-strings -Wall $(OPTFLAGS)
-LIBS = -lm $(EXTLIBS)
+LIBS = -lm -lz $(EXTLIBS)
PROGS = fio
SCRIPTS = fio_generate_plots
- UNAME := $(shell uname)
ifneq ($(wildcard config-host.mak),)
all:
include config-host.mak
endif
-SOURCE := gettime.c fio.c ioengines.c init.c stat.c log.c time.c filesetup.c \
+ifdef CONFIG_GFIO
+ PROGS += gfio
+endif
+
+SOURCE := gettime.c ioengines.c init.c stat.c log.c time.c filesetup.c \
eta.c verify.c memory.c io_u.c parse.c mutex.c options.c \
- rbtree.c smalloc.c filehash.c profile.c debug.c lib/rand.c \
+ lib/rbtree.c smalloc.c filehash.c profile.c debug.c lib/rand.c \
lib/num2str.c lib/ieee754.c $(wildcard crc/*.c) engines/cpu.c \
engines/mmap.c engines/sync.c engines/null.c engines/net.c \
memalign.c server.c client.c iolog.c backend.c libfio.c flow.c \
- json.c lib/zipf.c lib/axmap.c lib/lfsr.c gettime-thread.c \
- helpers.c lib/flist_sort.c lib/hweight.c lib/getrusage.c \
- idletime.c
+ cconv.c lib/prio_tree.c json.c lib/zipf.c lib/axmap.c \
+ lib/lfsr.c gettime-thread.c helpers.c lib/flist_sort.c \
+ lib/hweight.c lib/getrusage.c idletime.c
ifdef CONFIG_64BIT_LLP64
CFLAGS += -DBITS_PER_LONG=32
SOURCE += lib/inet_aton.c
endif
- ifeq ($(UNAME), Linux)
+ ifeq ($(CONFIG_TARGET_OS), Linux)
SOURCE += diskutil.c fifo.c blktrace.c cgroup.c trim.c engines/sg.c \
engines/binject.c profiles/tiobench.c
LIBS += -lpthread -ldl
LDFLAGS += -rdynamic
endif
- ifeq ($(UNAME), Android)
+ ifeq ($(CONFIG_TARGET_OS), Android)
SOURCE += diskutil.c fifo.c blktrace.c trim.c profiles/tiobench.c
LIBS += -ldl
LDFLAGS += -rdynamic
endif
- ifeq ($(UNAME), SunOS)
+ ifeq ($(CONFIG_TARGET_OS), SunOS)
LIBS += -lpthread -ldl
CPPFLAGS += -D__EXTENSIONS__
endif
- ifeq ($(UNAME), FreeBSD)
+ ifeq ($(CONFIG_TARGET_OS), FreeBSD)
LIBS += -lpthread -lrt
LDFLAGS += -rdynamic
endif
- ifeq ($(UNAME), NetBSD)
+ ifeq ($(CONFIG_TARGET_OS), NetBSD)
LIBS += -lpthread -lrt
LDFLAGS += -rdynamic
endif
- ifeq ($(UNAME), AIX)
+ ifeq ($(CONFIG_TARGET_OS), AIX)
LIBS += -lpthread -ldl -lrt
CPPFLAGS += -D_LARGE_FILES -D__ppc__
LDFLAGS += -L/opt/freeware/lib -Wl,-blibpath:/opt/freeware/lib:/usr/lib:/lib -Wl,-bmaxdata:0x80000000
endif
- ifeq ($(UNAME), HP-UX)
+ ifeq ($(CONFIG_TARGET_OS), HP-UX)
LIBS += -lpthread -ldl -lrt
CFLAGS += -D_LARGEFILE64_SOURCE -D_XOPEN_SOURCE_EXTENDED
endif
- ifeq ($(UNAME), Darwin)
+ ifeq ($(CONFIG_TARGET_OS), Darwin)
LIBS += -lpthread -ldl
endif
- ifneq (,$(findstring CYGWIN,$(UNAME)))
+ ifneq (,$(findstring CYGWIN,$(CONFIG_TARGET_OS)))
SOURCE := $(filter-out engines/mmap.c,$(SOURCE))
SOURCE += os/windows/posix.c
LIBS += -lpthread -lpsapi -lws2_32
endif
OBJS = $(SOURCE:.c=.o)
+
+FIO_OBJS = $(OBJS) fio.o
+GFIO_OBJS = $(OBJS) gfio.o graph.o tickmarks.o ghelpers.o goptions.o gerror.o \
+ gclient.o gcompat.o cairo_text_helpers.o printing.o
+
-include $(OBJS:.o=.d)
T_SMALLOC_OBJS = t/stest.o
T_AXMAP_OBJS += lib/lfsr.o lib/axmap.o
T_AXMAP_PROGS = t/axmap
+ T_LFSR_TEST_OBJS = t/lfsr-test.o
+ T_LFSR_TEST_OBJS += lib/lfsr.o
+ T_LFSR_TEST_PROGS = t/lfsr-test
+
T_OBJS = $(T_SMALLOC_OBJS)
T_OBJS += $(T_IEEE_OBJS)
T_OBJS += $(T_ZIPF_OBJS)
T_OBJS += $(T_AXMAP_OBJS)
+ T_OBJS += $(T_LFSR_TEST_OBJS)
T_PROGS = $(T_SMALLOC_PROGS)
T_PROGS += $(T_IEEE_PROGS)
T_PROGS += $(T_ZIPF_PROGS)
T_PROGS += $(T_AXMAP_PROGS)
+ T_PROGS += $(T_LFSR_TEST_PROGS)
ifneq ($(findstring $(MAKEFLAGS),s),s)
ifndef V
prefix = /usr/local
bindir = $(prefix)/bin
- ifeq ($(UNAME), Darwin)
+ ifeq ($(CONFIG_TARGET_OS), Darwin)
mandir = /usr/share/man
else
mandir = $(prefix)/man
init.o: FIO-VERSION-FILE init.c
$(QUIET_CC)$(CC) -o init.o $(CFLAGS) $(CPPFLAGS) -c init.c
+gcompat.o: gcompat.c gcompat.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c gcompat.c
+
+goptions.o: goptions.c goptions.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c goptions.c
+
+ghelpers.o: ghelpers.c ghelpers.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c ghelpers.c
+
+gerror.o: gerror.c gerror.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c gerror.c
+
+gclient.o: gclient.c gclient.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c gclient.c
+
+gfio.o: gfio.c ghelpers.c
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c gfio.c
+
+graph.o: graph.c graph.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c graph.c
+
+cairo_text_helpers.o: cairo_text_helpers.c cairo_text_helpers.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c cairo_text_helpers.c
+
+printing.o: printing.c printing.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c printing.c
+
t/stest: $(T_SMALLOC_OBJS)
$(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_SMALLOC_OBJS) $(LIBS) $(LDFLAGS)
t/ieee754: $(T_IEEE_OBJS)
$(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_IEEE_OBJS) $(LIBS) $(LDFLAGS)
+fio: $(FIO_OBJS)
+ $(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(FIO_OBJS) $(LIBS) $(LDFLAGS)
+
+gfio: $(GFIO_OBJS)
+ $(QUIET_LINK)$(CC) $(LIBS) -o gfio $(GFIO_OBJS) $(LIBS) $(GTK_LDFLAGS)
+
t/genzipf: $(T_ZIPF_OBJS)
$(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_ZIPF_OBJS) $(LIBS) $(LDFLAGS)
t/axmap: $(T_AXMAP_OBJS)
$(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_AXMAP_OBJS) $(LIBS) $(LDFLAGS)
-fio: $(OBJS)
- $(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(OBJS) $(LIBS) $(LDFLAGS)
-
+ t/lfsr-test: $(T_LFSR_TEST_OBJS)
+ $(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_LFSR_TEST_OBJS) $(LIBS) $(LDFLAGS)
+
clean: FORCE
- -rm -f .depend $(GFIO_OBJS) $(OBJS) $(T_OBJS) $(PROGS) $(T_PROGS) core.* core gfio FIO-VERSION-FILE config-host.mak cscope.out *.d
- -rm -f .depend $(OBJS) $(T_OBJS) $(PROGS) $(T_PROGS) core.* core FIO-VERSION-FILE config-host.mak config-host.h cscope.out *.d
++ -rm -f .depend $(GFIO_OBJS) $(OBJS) $(T_OBJS) $(PROGS) $(T_PROGS) core.* core gfio FIO-VERSION-FILE config-host.mak config-host.h cscope.out *.d
cscope:
@cscope -b -R
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/ipc.h>
- #ifndef FIO_NO_HAVE_SHM_H
- #include <sys/shm.h>
- #endif
#include <sys/mman.h>
#include "fio.h"
+ #ifndef FIO_NO_HAVE_SHM_H
+ #include <sys/shm.h>
+ #endif
#include "hash.h"
#include "smalloc.h"
#include "verify.h"
static char *cgroup_mnt;
static int exit_value;
static volatile int fio_abort;
+static unsigned int nr_process = 0;
+static unsigned int nr_thread = 0;
struct io_log *agg_io_log[DDIR_RWDIR_CNT];
int groupid = 0;
unsigned int thread_number = 0;
unsigned int stat_number = 0;
-unsigned int nr_process = 0;
-unsigned int nr_thread = 0;
int shm_id = 0;
int temp_stall_ts;
unsigned long done_secs = 0;
return 1;
}
- if (ddir_rw_sum(td->io_bytes) < td->o.size) {
+ if (td->o.size != -1ULL && ddir_rw_sum(td->io_bytes) < td->o.size) {
uint64_t diff;
/*
{
unsigned long long elapsed;
struct thread_data *td = data;
+ struct thread_options *o = &td->o;
pthread_condattr_t attr;
int clear_state;
+ int ret;
- if (!td->o.use_thread) {
+ if (!o->use_thread) {
setsid();
td->pid = getpid();
} else
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
+ if (is_backend)
+ fio_server_send_start(td);
+
INIT_FLIST_HEAD(&td->io_u_freelist);
INIT_FLIST_HEAD(&td->io_u_busylist);
INIT_FLIST_HEAD(&td->io_u_requeues);
* eating a file descriptor
*/
fio_mutex_remove(td->mutex);
+ td->mutex = NULL;
/*
* A new gid requires privilege, so we need to do this before setting
* the uid.
*/
- if (td->o.gid != -1U && setgid(td->o.gid)) {
+ if (o->gid != -1U && setgid(o->gid)) {
td_verror(td, errno, "setgid");
goto err;
}
- if (td->o.uid != -1U && setuid(td->o.uid)) {
+ if (o->uid != -1U && setuid(o->uid)) {
td_verror(td, errno, "setuid");
goto err;
}
* If we have a gettimeofday() thread, make sure we exclude that
* thread from this job
*/
- if (td->o.gtod_cpu)
- fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu);
+ if (o->gtod_cpu)
+ fio_cpu_clear(&o->cpumask, o->gtod_cpu);
/*
* Set affinity first, in case it has an impact on the memory
* allocations.
*/
- if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) {
- td_verror(td, errno, "cpu_set_affinity");
- goto err;
+ if (o->cpumask_set) {
+ ret = fio_setaffinity(td->pid, o->cpumask);
+ if (ret == -1) {
+ td_verror(td, errno, "cpu_set_affinity");
+ goto err;
+ }
}
#ifdef CONFIG_LIBNUMA
if (init_io_u(td))
goto err;
- if (td->o.verify_async && verify_async_init(td))
+ if (o->verify_async && verify_async_init(td))
goto err;
- if (td->ioprio_set) {
- if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
+ if (o->ioprio) {
+ ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
+ if (ret == -1) {
td_verror(td, errno, "ioprio_set");
goto err;
}
goto err;
errno = 0;
- if (nice(td->o.nice) == -1 && errno != 0) {
+ if (nice(o->nice) == -1 && errno != 0) {
td_verror(td, errno, "nice");
goto err;
}
- if (td->o.ioscheduler && switch_ioscheduler(td))
+ if (o->ioscheduler && switch_ioscheduler(td))
goto err;
- if (!td->o.create_serialize && setup_files(td))
+ if (!o->create_serialize && setup_files(td))
goto err;
if (td_io_init(td))
if (init_random_map(td))
goto err;
- if (td->o.exec_prerun) {
- if (exec_string(td->o.exec_prerun))
- goto err;
- }
+ if (o->exec_prerun && exec_string(o->exec_prerun))
+ goto err;
- if (td->o.pre_read) {
+ if (o->pre_read) {
if (pre_read_files(td) < 0)
goto err;
}
+ fio_verify_init(td);
+
fio_gettime(&td->epoch, NULL);
fio_getrusage(&td->ru_start);
clear_state = 0;
td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+ fio_unpin_memory(td);
+
fio_mutex_down(writeout_mutex);
if (td->bw_log) {
if (td->o.bw_log_file) {
close_ioengine(td);
cgroup_shutdown(td, &cgroup_mnt);
- if (td->o.cpumask_set) {
- int ret = fio_cpuset_exit(&td->o.cpumask);
+ if (o->cpumask_set) {
+ int ret = fio_cpuset_exit(&o->cpumask);
td_verror(td, ret, "fio_cpuset_exit");
}
unsigned long spent;
unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
- if (fio_pin_memory())
- return;
-
if (fio_gtod_offload && fio_start_gtod_thread())
return;
set_sig_handlers();
+ nr_thread = nr_process = 0;
+ for_each_td(td, i) {
+ if (td->o.use_thread)
+ nr_thread++;
+ else
+ nr_process++;
+ }
+
if (output_format == FIO_OUTPUT_NORMAL) {
log_info("Starting ");
if (nr_thread)
reap_threads(&nr_running, &t_rate, &m_rate);
- if (todo) {
- if (is_backend)
- fio_server_idle_loop();
- else
- usleep(100000);
- }
+ if (todo)
+ usleep(100000);
}
while (nr_running) {
reap_threads(&nr_running, &t_rate, &m_rate);
-
- if (is_backend)
- fio_server_idle_loop();
- else
- usleep(10000);
+ usleep(10000);
}
fio_idle_prof_stop();
update_io_ticks();
- fio_unpin_memory();
}
void wait_for_disk_thread_exit(void)
return 0;
if (write_bw_log) {
- setup_log(&agg_io_log[DDIR_READ], 0);
- setup_log(&agg_io_log[DDIR_WRITE], 0);
- setup_log(&agg_io_log[DDIR_TRIM], 0);
+ setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW);
+ setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW);
+ setup_log(&agg_io_log[DDIR_TRIM], 0, IO_LOG_TYPE_BW);
}
startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
--- /dev/null
- o->lockfile_batch = le32_to_cpu(top->lockfile_batch);
+#include <string.h>
+
+#include "thread_options.h"
+
+static void string_to_cpu(char **dst, const uint8_t *src)
+{
+ const char *__src = (const char *) src;
+
+ if (strlen(__src))
+ *dst = strdup(__src);
+}
+
+static void string_to_net(uint8_t *dst, const char *src)
+{
+ if (src)
+ strcpy((char *) dst, src);
+ else
+ dst[0] = '\0';
+}
+
+void convert_thread_options_to_cpu(struct thread_options *o,
+ struct thread_options_pack *top)
+{
+ int i, j;
+
+ string_to_cpu(&o->description, top->description);
+ string_to_cpu(&o->name, top->name);
+ string_to_cpu(&o->directory, top->directory);
+ string_to_cpu(&o->filename, top->filename);
+ string_to_cpu(&o->opendir, top->opendir);
+ string_to_cpu(&o->ioengine, top->ioengine);
+ string_to_cpu(&o->mmapfile, top->mmapfile);
+ string_to_cpu(&o->read_iolog_file, top->read_iolog_file);
+ string_to_cpu(&o->write_iolog_file, top->write_iolog_file);
+ string_to_cpu(&o->bw_log_file, top->bw_log_file);
+ string_to_cpu(&o->lat_log_file, top->lat_log_file);
+ string_to_cpu(&o->iops_log_file, top->iops_log_file);
+ string_to_cpu(&o->replay_redirect, top->replay_redirect);
+ string_to_cpu(&o->exec_prerun, top->exec_prerun);
+ string_to_cpu(&o->exec_postrun, top->exec_postrun);
+ string_to_cpu(&o->ioscheduler, top->ioscheduler);
+ string_to_cpu(&o->profile, top->profile);
+ string_to_cpu(&o->cgroup, top->cgroup);
+
+ o->td_ddir = le32_to_cpu(top->td_ddir);
+ o->rw_seq = le32_to_cpu(top->rw_seq);
+ o->kb_base = le32_to_cpu(top->kb_base);
+ o->ddir_seq_nr = le32_to_cpu(top->ddir_seq_nr);
+ o->ddir_seq_add = le64_to_cpu(top->ddir_seq_add);
+ o->iodepth = le32_to_cpu(top->iodepth);
+ o->iodepth_low = le32_to_cpu(top->iodepth_low);
+ o->iodepth_batch = le32_to_cpu(top->iodepth_batch);
+ o->iodepth_batch_complete = le32_to_cpu(top->iodepth_batch_complete);
+ o->size = le64_to_cpu(top->size);
+ o->size_percent = le32_to_cpu(top->size_percent);
+ o->fill_device = le32_to_cpu(top->fill_device);
+ o->file_size_low = le64_to_cpu(top->file_size_low);
+ o->file_size_high = le64_to_cpu(top->file_size_high);
+ o->start_offset = le64_to_cpu(top->start_offset);
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ o->bs[i] = le32_to_cpu(top->bs[i]);
+ o->ba[i] = le32_to_cpu(top->ba[i]);
+ o->min_bs[i] = le32_to_cpu(top->min_bs[i]);
+ o->max_bs[i] = le32_to_cpu(top->max_bs[i]);
+ o->bssplit_nr[i] = le32_to_cpu(top->bssplit_nr[i]);
+
+ if (o->bssplit_nr[i]) {
+ o->bssplit[i] = malloc(o->bssplit_nr[i] * sizeof(struct bssplit));
+ for (j = 0; j < o->bssplit_nr[i]; j++) {
+ o->bssplit[i][j].bs = le32_to_cpu(top->bssplit[i][j].bs);
+ o->bssplit[i][j].perc = le32_to_cpu(top->bssplit[i][j].perc);
+ }
+ }
+
+ o->rwmix[i] = le32_to_cpu(top->rwmix[i]);
+ o->rate[i] = le32_to_cpu(top->rate[i]);
+ o->ratemin[i] = le32_to_cpu(top->ratemin[i]);
+ o->rate_iops[i] = le32_to_cpu(top->rate_iops[i]);
+ o->rate_iops_min[i] = le32_to_cpu(top->rate_iops_min[i]);
+ }
+
+ o->ratecycle = le32_to_cpu(top->ratecycle);
+ o->nr_files = le32_to_cpu(top->nr_files);
+ o->open_files = le32_to_cpu(top->open_files);
+ o->file_lock_mode = le32_to_cpu(top->file_lock_mode);
- top->lockfile_batch = cpu_to_le32(o->lockfile_batch);
+ o->odirect = le32_to_cpu(top->odirect);
+ o->invalidate_cache = le32_to_cpu(top->invalidate_cache);
+ o->create_serialize = le32_to_cpu(top->create_serialize);
+ o->create_fsync = le32_to_cpu(top->create_fsync);
+ o->create_on_open = le32_to_cpu(top->create_on_open);
+ o->create_only = le32_to_cpu(top->create_only);
+ o->end_fsync = le32_to_cpu(top->end_fsync);
+ o->pre_read = le32_to_cpu(top->pre_read);
+ o->sync_io = le32_to_cpu(top->sync_io);
+ o->verify = le32_to_cpu(top->verify);
+ o->do_verify = le32_to_cpu(top->do_verify);
+ o->verifysort = le32_to_cpu(top->verifysort);
+ o->verifysort_nr = le32_to_cpu(top->verifysort_nr);
+ o->experimental_verify = le32_to_cpu(top->experimental_verify);
+ o->verify_interval = le32_to_cpu(top->verify_interval);
+ o->verify_offset = le32_to_cpu(top->verify_offset);
+
+ memcpy(o->verify_pattern, top->verify_pattern, MAX_PATTERN_SIZE);
+
+ o->verify_pattern_bytes = le32_to_cpu(top->verify_pattern_bytes);
+ o->verify_fatal = le32_to_cpu(top->verify_fatal);
+ o->verify_dump = le32_to_cpu(top->verify_dump);
+ o->verify_async = le32_to_cpu(top->verify_async);
+ o->verify_batch = le32_to_cpu(top->verify_batch);
+ o->use_thread = le32_to_cpu(top->use_thread);
+ o->unlink = le32_to_cpu(top->unlink);
+ o->do_disk_util = le32_to_cpu(top->do_disk_util);
+ o->override_sync = le32_to_cpu(top->override_sync);
+ o->rand_repeatable = le32_to_cpu(top->rand_repeatable);
+ o->use_os_rand = le32_to_cpu(top->use_os_rand);
+ o->log_avg_msec = le32_to_cpu(top->log_avg_msec);
+ o->norandommap = le32_to_cpu(top->norandommap);
+ o->softrandommap = le32_to_cpu(top->softrandommap);
+ o->bs_unaligned = le32_to_cpu(top->bs_unaligned);
+ o->fsync_on_close = le32_to_cpu(top->fsync_on_close);
+ o->random_distribution = le32_to_cpu(top->random_distribution);
+ o->zipf_theta.u.f = fio_uint64_to_double(le64_to_cpu(top->zipf_theta.u.i));
+ o->pareto_h.u.f = fio_uint64_to_double(le64_to_cpu(top->pareto_h.u.i));
+ o->random_generator = le32_to_cpu(top->random_generator);
+ o->hugepage_size = le32_to_cpu(top->hugepage_size);
+ o->rw_min_bs = le32_to_cpu(top->rw_min_bs);
+ o->thinktime = le32_to_cpu(top->thinktime);
+ o->thinktime_spin = le32_to_cpu(top->thinktime_spin);
+ o->thinktime_blocks = le32_to_cpu(top->thinktime_blocks);
+ o->fsync_blocks = le32_to_cpu(top->fsync_blocks);
+ o->fdatasync_blocks = le32_to_cpu(top->fdatasync_blocks);
+ o->barrier_blocks = le32_to_cpu(top->barrier_blocks);
+
+ o->verify_backlog = le64_to_cpu(top->verify_backlog);
+ o->start_delay = le64_to_cpu(top->start_delay);
+ o->timeout = le64_to_cpu(top->timeout);
+ o->ramp_time = le64_to_cpu(top->ramp_time);
+ o->zone_range = le64_to_cpu(top->zone_range);
+ o->zone_size = le64_to_cpu(top->zone_size);
+ o->zone_skip = le64_to_cpu(top->zone_skip);
+ o->lockmem = le64_to_cpu(top->lockmem);
+ o->offset_increment = le64_to_cpu(top->offset_increment);
+
+ o->overwrite = le32_to_cpu(top->overwrite);
+ o->bw_avg_time = le32_to_cpu(top->bw_avg_time);
+ o->iops_avg_time = le32_to_cpu(top->iops_avg_time);
+ o->loops = le32_to_cpu(top->loops);
+ o->mem_type = le32_to_cpu(top->mem_type);
+ o->mem_align = le32_to_cpu(top->mem_align);
+ o->max_latency = le32_to_cpu(top->max_latency);
+ o->stonewall = le32_to_cpu(top->stonewall);
+ o->new_group = le32_to_cpu(top->new_group);
+ o->numjobs = le32_to_cpu(top->numjobs);
+ o->cpumask_set = le32_to_cpu(top->cpumask_set);
+ o->verify_cpumask_set = le32_to_cpu(top->verify_cpumask_set);
+ o->iolog = le32_to_cpu(top->iolog);
+ o->rwmixcycle = le32_to_cpu(top->rwmixcycle);
+ o->nice = le32_to_cpu(top->nice);
+ o->ioprio = le32_to_cpu(top->ioprio);
+ o->ioprio_class = le32_to_cpu(top->ioprio_class);
+ o->file_service_type = le32_to_cpu(top->file_service_type);
+ o->group_reporting = le32_to_cpu(top->group_reporting);
+ o->fadvise_hint = le32_to_cpu(top->fadvise_hint);
+ o->fallocate_mode = le32_to_cpu(top->fallocate_mode);
+ o->zero_buffers = le32_to_cpu(top->zero_buffers);
+ o->refill_buffers = le32_to_cpu(top->refill_buffers);
+ o->scramble_buffers = le32_to_cpu(top->scramble_buffers);
+ o->time_based = le32_to_cpu(top->time_based);
+ o->disable_lat = le32_to_cpu(top->disable_lat);
+ o->disable_clat = le32_to_cpu(top->disable_clat);
+ o->disable_slat = le32_to_cpu(top->disable_slat);
+ o->disable_bw = le32_to_cpu(top->disable_bw);
+ o->unified_rw_rep = le32_to_cpu(top->unified_rw_rep);
+ o->gtod_reduce = le32_to_cpu(top->gtod_reduce);
+ o->gtod_cpu = le32_to_cpu(top->gtod_cpu);
+ o->gtod_offload = le32_to_cpu(top->gtod_offload);
+ o->clocksource = le32_to_cpu(top->clocksource);
+ o->no_stall = le32_to_cpu(top->no_stall);
+ o->trim_percentage = le32_to_cpu(top->trim_percentage);
+ o->trim_batch = le32_to_cpu(top->trim_batch);
+ o->trim_zero = le32_to_cpu(top->trim_zero);
+ o->clat_percentiles = le32_to_cpu(top->clat_percentiles);
+ o->percentile_precision = le32_to_cpu(top->percentile_precision);
+ o->continue_on_error = le32_to_cpu(top->continue_on_error);
+ o->cgroup_weight = le32_to_cpu(top->cgroup_weight);
+ o->cgroup_nodelete = le32_to_cpu(top->cgroup_nodelete);
+ o->uid = le32_to_cpu(top->uid);
+ o->gid = le32_to_cpu(top->gid);
+ o->flow_id = __le32_to_cpu(top->flow_id);
+ o->flow = __le32_to_cpu(top->flow);
+ o->flow_watermark = __le32_to_cpu(top->flow_watermark);
+ o->flow_sleep = le32_to_cpu(top->flow_sleep);
+ o->sync_file_range = le32_to_cpu(top->sync_file_range);
+ o->compress_percentage = le32_to_cpu(top->compress_percentage);
+ o->compress_chunk = le32_to_cpu(top->compress_chunk);
+
+ o->trim_backlog = le64_to_cpu(top->trim_backlog);
+
+ for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
+ o->percentile_list[i].u.f = fio_uint64_to_double(le64_to_cpu(top->percentile_list[i].u.i));
+#if 0
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+#endif
+}
+
+void convert_thread_options_to_net(struct thread_options_pack *top,
+ struct thread_options *o)
+{
+ int i, j;
+
+ string_to_net(top->description, o->description);
+ string_to_net(top->name, o->name);
+ string_to_net(top->directory, o->directory);
+ string_to_net(top->filename, o->filename);
+ string_to_net(top->opendir, o->opendir);
+ string_to_net(top->ioengine, o->ioengine);
+ string_to_net(top->mmapfile, o->mmapfile);
+ string_to_net(top->read_iolog_file, o->read_iolog_file);
+ string_to_net(top->write_iolog_file, o->write_iolog_file);
+ string_to_net(top->bw_log_file, o->bw_log_file);
+ string_to_net(top->lat_log_file, o->lat_log_file);
+ string_to_net(top->iops_log_file, o->iops_log_file);
+ string_to_net(top->replay_redirect, o->replay_redirect);
+ string_to_net(top->exec_prerun, o->exec_prerun);
+ string_to_net(top->exec_postrun, o->exec_postrun);
+ string_to_net(top->ioscheduler, o->ioscheduler);
+ string_to_net(top->profile, o->profile);
+ string_to_net(top->cgroup, o->cgroup);
+
+ top->td_ddir = cpu_to_le32(o->td_ddir);
+ top->rw_seq = cpu_to_le32(o->rw_seq);
+ top->kb_base = cpu_to_le32(o->kb_base);
+ top->ddir_seq_nr = cpu_to_le32(o->ddir_seq_nr);
+ top->iodepth = cpu_to_le32(o->iodepth);
+ top->iodepth_low = cpu_to_le32(o->iodepth_low);
+ top->iodepth_batch = cpu_to_le32(o->iodepth_batch);
+ top->iodepth_batch_complete = cpu_to_le32(o->iodepth_batch_complete);
+ top->size_percent = cpu_to_le32(o->size_percent);
+ top->fill_device = cpu_to_le32(o->fill_device);
+ top->ratecycle = cpu_to_le32(o->ratecycle);
+ top->nr_files = cpu_to_le32(o->nr_files);
+ top->open_files = cpu_to_le32(o->open_files);
+ top->file_lock_mode = cpu_to_le32(o->file_lock_mode);
+ top->odirect = cpu_to_le32(o->odirect);
+ top->invalidate_cache = cpu_to_le32(o->invalidate_cache);
+ top->create_serialize = cpu_to_le32(o->create_serialize);
+ top->create_fsync = cpu_to_le32(o->create_fsync);
+ top->create_on_open = cpu_to_le32(o->create_on_open);
+ top->create_only = cpu_to_le32(o->create_only);
+ top->end_fsync = cpu_to_le32(o->end_fsync);
+ top->pre_read = cpu_to_le32(o->pre_read);
+ top->sync_io = cpu_to_le32(o->sync_io);
+ top->verify = cpu_to_le32(o->verify);
+ top->do_verify = cpu_to_le32(o->do_verify);
+ top->verifysort = cpu_to_le32(o->verifysort);
+ top->verifysort_nr = cpu_to_le32(o->verifysort_nr);
+ top->experimental_verify = cpu_to_le32(o->experimental_verify);
+ top->verify_interval = cpu_to_le32(o->verify_interval);
+ top->verify_offset = cpu_to_le32(o->verify_offset);
+ top->verify_pattern_bytes = cpu_to_le32(o->verify_pattern_bytes);
+ top->verify_fatal = cpu_to_le32(o->verify_fatal);
+ top->verify_dump = cpu_to_le32(o->verify_dump);
+ top->verify_async = cpu_to_le32(o->verify_async);
+ top->verify_batch = cpu_to_le32(o->verify_batch);
+ top->use_thread = cpu_to_le32(o->use_thread);
+ top->unlink = cpu_to_le32(o->unlink);
+ top->do_disk_util = cpu_to_le32(o->do_disk_util);
+ top->override_sync = cpu_to_le32(o->override_sync);
+ top->rand_repeatable = cpu_to_le32(o->rand_repeatable);
+ top->use_os_rand = cpu_to_le32(o->use_os_rand);
+ top->log_avg_msec = cpu_to_le32(o->log_avg_msec);
+ top->norandommap = cpu_to_le32(o->norandommap);
+ top->softrandommap = cpu_to_le32(o->softrandommap);
+ top->bs_unaligned = cpu_to_le32(o->bs_unaligned);
+ top->fsync_on_close = cpu_to_le32(o->fsync_on_close);
+ top->random_distribution = cpu_to_le32(o->random_distribution);
+ top->zipf_theta.u.i = __cpu_to_le64(fio_double_to_uint64(o->zipf_theta.u.f));
+ top->pareto_h.u.i = __cpu_to_le64(fio_double_to_uint64(o->pareto_h.u.f));
+ top->random_generator = cpu_to_le32(o->random_generator);
+ top->hugepage_size = cpu_to_le32(o->hugepage_size);
+ top->rw_min_bs = cpu_to_le32(o->rw_min_bs);
+ top->thinktime = cpu_to_le32(o->thinktime);
+ top->thinktime_spin = cpu_to_le32(o->thinktime_spin);
+ top->thinktime_blocks = cpu_to_le32(o->thinktime_blocks);
+ top->fsync_blocks = cpu_to_le32(o->fsync_blocks);
+ top->fdatasync_blocks = cpu_to_le32(o->fdatasync_blocks);
+ top->barrier_blocks = cpu_to_le32(o->barrier_blocks);
+ top->overwrite = cpu_to_le32(o->overwrite);
+ top->bw_avg_time = cpu_to_le32(o->bw_avg_time);
+ top->iops_avg_time = cpu_to_le32(o->iops_avg_time);
+ top->loops = cpu_to_le32(o->loops);
+ top->mem_type = cpu_to_le32(o->mem_type);
+ top->mem_align = cpu_to_le32(o->mem_align);
+ top->max_latency = cpu_to_le32(o->max_latency);
+ top->stonewall = cpu_to_le32(o->stonewall);
+ top->new_group = cpu_to_le32(o->new_group);
+ top->numjobs = cpu_to_le32(o->numjobs);
+ top->cpumask_set = cpu_to_le32(o->cpumask_set);
+ top->verify_cpumask_set = cpu_to_le32(o->verify_cpumask_set);
+ top->iolog = cpu_to_le32(o->iolog);
+ top->rwmixcycle = cpu_to_le32(o->rwmixcycle);
+ top->nice = cpu_to_le32(o->nice);
+ top->ioprio = cpu_to_le32(o->ioprio);
+ top->ioprio_class = cpu_to_le32(o->ioprio_class);
+ top->file_service_type = cpu_to_le32(o->file_service_type);
+ top->group_reporting = cpu_to_le32(o->group_reporting);
+ top->fadvise_hint = cpu_to_le32(o->fadvise_hint);
+ top->fallocate_mode = cpu_to_le32(o->fallocate_mode);
+ top->zero_buffers = cpu_to_le32(o->zero_buffers);
+ top->refill_buffers = cpu_to_le32(o->refill_buffers);
+ top->scramble_buffers = cpu_to_le32(o->scramble_buffers);
+ top->time_based = cpu_to_le32(o->time_based);
+ top->disable_lat = cpu_to_le32(o->disable_lat);
+ top->disable_clat = cpu_to_le32(o->disable_clat);
+ top->disable_slat = cpu_to_le32(o->disable_slat);
+ top->disable_bw = cpu_to_le32(o->disable_bw);
+ top->unified_rw_rep = cpu_to_le32(o->unified_rw_rep);
+ top->gtod_reduce = cpu_to_le32(o->gtod_reduce);
+ top->gtod_cpu = cpu_to_le32(o->gtod_cpu);
+ top->gtod_offload = cpu_to_le32(o->gtod_offload);
+ top->clocksource = cpu_to_le32(o->clocksource);
+ top->no_stall = cpu_to_le32(o->no_stall);
+ top->trim_percentage = cpu_to_le32(o->trim_percentage);
+ top->trim_batch = cpu_to_le32(o->trim_batch);
+ top->trim_zero = cpu_to_le32(o->trim_zero);
+ top->clat_percentiles = cpu_to_le32(o->clat_percentiles);
+ top->percentile_precision = cpu_to_le32(o->percentile_precision);
+ top->continue_on_error = cpu_to_le32(o->continue_on_error);
+ top->cgroup_weight = cpu_to_le32(o->cgroup_weight);
+ top->cgroup_nodelete = cpu_to_le32(o->cgroup_nodelete);
+ top->uid = cpu_to_le32(o->uid);
+ top->gid = cpu_to_le32(o->gid);
+ top->flow_id = __cpu_to_le32(o->flow_id);
+ top->flow = __cpu_to_le32(o->flow);
+ top->flow_watermark = __cpu_to_le32(o->flow_watermark);
+ top->flow_sleep = cpu_to_le32(o->flow_sleep);
+ top->sync_file_range = cpu_to_le32(o->sync_file_range);
+ top->compress_percentage = cpu_to_le32(o->compress_percentage);
+ top->compress_chunk = cpu_to_le32(o->compress_chunk);
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ top->bs[i] = cpu_to_le32(o->bs[i]);
+ top->ba[i] = cpu_to_le32(o->ba[i]);
+ top->min_bs[i] = cpu_to_le32(o->min_bs[i]);
+ top->max_bs[i] = cpu_to_le32(o->max_bs[i]);
+ top->bssplit_nr[i] = cpu_to_le32(o->bssplit_nr[i]);
+
+ if (o->bssplit_nr[i]) {
+ unsigned int bssplit_nr = o->bssplit_nr[i];
+
+ if (bssplit_nr > BSSPLIT_MAX) {
+ log_err("fio: BSSPLIT_MAX is too small\n");
+ bssplit_nr = BSSPLIT_MAX;
+ }
+ for (j = 0; j < bssplit_nr; j++) {
+ top->bssplit[i][j].bs = cpu_to_le32(o->bssplit[i][j].bs);
+ top->bssplit[i][j].perc = cpu_to_le32(o->bssplit[i][j].perc);
+ }
+ }
+
+ top->rwmix[i] = cpu_to_le32(o->rwmix[i]);
+ top->rate[i] = cpu_to_le32(o->rate[i]);
+ top->ratemin[i] = cpu_to_le32(o->ratemin[i]);
+ top->rate_iops[i] = cpu_to_le32(o->rate_iops[i]);
+ top->rate_iops_min[i] = cpu_to_le32(o->rate_iops_min[i]);
+ }
+
+ memcpy(top->verify_pattern, o->verify_pattern, MAX_PATTERN_SIZE);
+
+ top->size = __cpu_to_le64(o->size);
+ top->verify_backlog = __cpu_to_le64(o->verify_backlog);
+ top->start_delay = __cpu_to_le64(o->start_delay);
+ top->timeout = __cpu_to_le64(o->timeout);
+ top->ramp_time = __cpu_to_le64(o->ramp_time);
+ top->zone_range = __cpu_to_le64(o->zone_range);
+ top->zone_size = __cpu_to_le64(o->zone_size);
+ top->zone_skip = __cpu_to_le64(o->zone_skip);
+ top->lockmem = __cpu_to_le64(o->lockmem);
+ top->ddir_seq_add = __cpu_to_le64(o->ddir_seq_add);
+ top->file_size_low = __cpu_to_le64(o->file_size_low);
+ top->file_size_high = __cpu_to_le64(o->file_size_high);
+ top->start_offset = __cpu_to_le64(o->start_offset);
+ top->trim_backlog = __cpu_to_le64(o->trim_backlog);
+ top->offset_increment = __cpu_to_le64(o->offset_increment);
+
+ for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
+ top->percentile_list[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->percentile_list[i].u.f));
+#if 0
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+#endif
+
+}
+
+/*
+ * Basic conversion test. We'd really need to fill in more of the options
+ * to have a thorough test. Even better, we should auto-generate the
+ * converter functions...
+ */
+int fio_test_cconv(struct thread_options *__o)
+{
+ struct thread_options o;
+ struct thread_options_pack top1, top2;
+
+ memset(&top1, 0, sizeof(top1));
+ memset(&top2, 0, sizeof(top2));
+
+ convert_thread_options_to_net(&top1, __o);
+ memset(&o, 0, sizeof(o));
+ convert_thread_options_to_cpu(&o, &top1);
+ convert_thread_options_to_net(&top2, &o);
+
+ return memcmp(&top1, &top2, sizeof(top1));
+}
targetos=""
cpu=""
+ cross_prefix=${cross_prefix-${CROSS_COMPILE}}
cc="${CC-${cross_prefix}gcc}"
+# default options
show_help="no"
exit_val=0
+gfio="no"
# parse options
for opt do
;;
--build-32bit-win=*) build_32bit_win="$optarg"
;;
- --help)
- show_help="yes"
+ --enable-gfio)
+ gfio="yes"
;;
+ --help)
+ show_help="yes"
+ ;;
*)
echo "Bad option $opt"
show_help="yes"
echo "--cc= Specify compiler to use"
echo "--extra-cflags= Specify extra CFLAGS to pass to compiler"
echo "--build-32bit-win= Specify yes for a 32-bit build on Windows"
+ echo "--enable-gfio Enable building of gtk gfio"
exit $exit_val
fi
targetos=`uname -s`
fi
+ echo "# Automatically generated by configure - do not modify" > $config_host_mak
+ printf "# Configured with:" >> $config_host_mak
+ printf " '%s'" "$0" "$@" >> $config_host_mak
+ echo >> $config_host_mak
+ echo "CONFIG_TARGET_OS=$targetos" >> $config_host_mak
+
# Some host OSes need non-standard checks for which CPU to use.
# Note that these checks are broken for cross-compilation: if you're
# cross-compiling to one of these OSes then you'll need to specify
echo "EXTFLAGS=$CFLAGS -include config-host.h -D_GNU_SOURCE" >> $config_host_mak
exit 0
;;
- Android)
- output_sym "CONFIG_32BIT"
- output_sym "CONFIG_LITTLE_ENDIAN"
- output_sym "CONFIG_SOCKLEN_T"
- output_sym "CONFIG_GETTIMEOFDAY"
- output_sym "CONFIG_CLOCK_GETTIME"
- output_sym "CONFIG_CLOCK_MONOTONIC"
- echo "CC=$cc" >> $config_host_mak
- echo "EXTFLAGS=$CFLAGS -include config-host.h -DFIO_NO_HAVE_SHM_H -D_GNU_SOURCE" >> $config_host_mak
- exit 0
esac
if test ! -z "$cpu" ; then
cc="${CC-${cross_prefix}gcc}"
+ ##########################################
+ # check cross compile
+
+ cross_compile="no"
+ cat > $TMPC <<EOF
+ int main(void)
+ {
+ return 0;
+ }
+ EOF
+ if compile_prog "" "" "cross"; then
+ $TMPE 2>/dev/null || cross_compile="yes"
+ else
+ fatal "compile test failed"
+ fi
+
##########################################
# check endianness
bigendian="no"
- cat > $TMPC <<EOF
+ if test "$cross_compile" = "no" ; then
+ cat > $TMPC <<EOF
#include <inttypes.h>
int main(void)
{
return (*((uint8_t*)(&i))) == 0x67;
}
EOF
- if compile_prog "" "" "endian"; then
- $TMPE && bigendian="yes"
+ if compile_prog "" "" "endian"; then
+ $TMPE && bigendian="yes"
+ fi
+ else
+ # If we're cross compiling, try our best to work it out and rely on the
+ # run-time check to fail if we get it wrong.
+ cat > $TMPC <<EOF
+ #include <endian.h>
+ int main(void)
+ {
+ #if __BYTE_ORDER != __BIG_ENDIAN
+ # error "Unknown endianness"
+ #endif
+ }
+ EOF
+ compile_prog "" "" "endian" && bigendian="yes"
+ check_define "__ARMEB__" && bigendian="yes"
+ check_define "__MIPSEB__" && bigendian="yes"
fi
echo "CPU $cpu"
echo "Big endian $bigendian"
echo "Compiler $cc"
+ echo "Cross compile $cross_compile"
echo
##########################################
# check for wordsize
wordsize="0"
cat > $TMPC <<EOF
- #include <stdio.h>
+ #include <limits.h>
+ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
int main(void)
{
- unsigned int wsize = sizeof(long) * 8;
- printf("%d\n", wsize);
+ BUILD_BUG_ON(sizeof(long)*CHAR_BIT != WORDSIZE);
return 0;
}
EOF
- if compile_prog "" "" "wordsize"; then
- wordsize=`$TMPE`
+ if compile_prog "-DWORDSIZE=32" "" "wordsize"; then
+ wordsize="32"
+ elif compile_prog "-DWORDSIZE=64" "" "wordsize"; then
+ wordsize="64"
+ else
+ fatal "Unknown wordsize"
fi
echo "Wordsize $wordsize"
# socklen_t probe
socklen_t="no"
cat > $TMPC << EOF
- #include <string.h>
- #include <netinet/in.h>
+ #include <sys/socket.h>
int main(int argc, char **argv)
{
socklen_t len = 0;
echo "__thread $tls_thread"
##########################################
+# Whether or not __thread is supported for TLS
+if test "$gfio" = "yes" ; then
+ cat > $TMPC << EOF
+#include <glib.h>
+#include <cairo.h>
+#include <gtk/gtk.h>
+int main(void)
+{
+ gdk_threads_enter();
+ gdk_threads_leave();
+
+ printf("%d", GTK_CHECK_VERSION(2, 18, 0));
+}
+EOF
+GTK_CFLAGS=$(pkg-config --cflags gtk+-2.0 gthread-2.0)
+if test "$?" != "0" ; then
+ echo "configure: gtk and gthread not found"
+ exit 1
+fi
+GTK_LIBS=$(pkg-config --libs gtk+-2.0 gthread-2.0)
+if test "$?" != "0" ; then
+ echo "configure: gtk and gthread not found"
+ exit 1
+fi
+if compile_prog "$GTK_CFLAGS" "$GTK_LIBS" "gfio" ; then
+ r=$($TMPE)
+ if test "$r" != "0" ; then
+ gfio="yes"
+ LIBS="$LIBS $GTK_LIBS"
+ CFLAGS="$CFLAGS $GTK_CFLAGS"
+ else
+ echo "GTK found, but need version 2.18 or higher"
+ gfio="no"
+ fi
+else
+ echo "Please install gtk and gdk libraries"
+ gfio="no"
+fi
+fi
+
+echo "gfio $gfio"
+
# Check whether we have getrusage(RUSAGE_THREAD)
rusage_thread="no"
cat > $TMPC << EOF
#############################################################################
- echo "# Automatically generated by configure - do not modify" > $config_host_mak
- printf "# Configured with:" >> $config_host_mak
- printf " '%s'" "$0" "$@" >> $config_host_mak
- echo >> $config_host_mak
-
if test "$wordsize" = "64" ; then
output_sym "CONFIG_64BIT"
elif test "$wordsize" = "32" ; then
if test "$rusage_thread" = "yes" ; then
output_sym "CONFIG_RUSAGE_THREAD"
fi
+if test "$gfio" = "yes" ; then
+ echo "CONFIG_GFIO=y" >> $config_host_mak
+fi
if test "$sched_idle" = "yes" ; then
output_sym "CONFIG_SCHED_IDLE"
fi
fi
echo "LIBS+=$LIBS" >> $config_host_mak
+echo "CFLAGS+=$CFLAGS" >> $config_host_mak
echo "CC=$cc" >> $config_host_mak
echo "EXTFLAGS=$EXTFLAGS $CFLAGS" >> $config_host_mak
r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
f->real_file_size);
- if (r != 0) {
+ if (r != 0)
td_verror(td, errno, "fallocate");
- }
+
break;
#endif /* CONFIG_LINUX_FALLOCATE */
default:
* racy, need the __f->lock locked
*/
f->lock = __f->lock;
- f->lock_owner = __f->lock_owner;
- f->lock_batch = __f->lock_batch;
- f->lock_ddir = __f->lock_ddir;
from_hash = 1;
} else {
dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
seed = td->rand_seeds[4];
if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
- zipf_init(&f->zipf, nranges, td->o.zipf_theta, seed);
+ zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
else
- pareto_init(&f->zipf, nranges, td->o.pareto_h, seed);
+ pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
return 1;
}
seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
- if (!lfsr_init(&f->lfsr, blocks, seed))
+ if (!lfsr_init(&f->lfsr, blocks, seed, seed & 0xF))
continue;
} else if (!td->o.norandommap) {
f->io_axmap = axmap_new(blocks);
td->o.filename = NULL;
free(td->files);
+ free(td->file_locks);
td->files_index = 0;
td->files = NULL;
+ td->file_locks = NULL;
td->o.nr_files = 0;
}
log_err("fio: realloc OOM\n");
assert(0);
}
+ if (td->o.file_lock_mode != FILE_LOCK_NONE) {
+ td->file_locks = realloc(td->file_locks, new_size);
+ if (!td->file_locks) {
+ log_err("fio: realloc OOM\n");
+ assert(0);
+ }
+ td->file_locks[cur_files] = FILE_LOCK_NONE;
+ }
td->files_size = new_size;
}
td->files[cur_files] = f;
case FILE_LOCK_NONE:
break;
case FILE_LOCK_READWRITE:
- f->lock = fio_mutex_rw_init();
+ f->rwlock = fio_rwlock_init();
break;
case FILE_LOCK_EXCLUSIVE:
f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
return;
- if (f->lock_owner == td && f->lock_batch--)
- return;
-
if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
if (ddir == DDIR_READ)
- fio_mutex_down_read(f->lock);
+ fio_rwlock_read(f->rwlock);
else
- fio_mutex_down_write(f->lock);
+ fio_rwlock_write(f->rwlock);
} else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
fio_mutex_down(f->lock);
- f->lock_owner = td;
- f->lock_batch = td->o.lockfile_batch;
- f->lock_ddir = ddir;
+ td->file_locks[f->fileno] = td->o.file_lock_mode;
}
void unlock_file(struct thread_data *td, struct fio_file *f)
{
if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
return;
- if (f->lock_batch)
- return;
-
- if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
- const int is_read = f->lock_ddir == DDIR_READ;
- int val = fio_mutex_getval(f->lock);
-
- if ((is_read && val == 1) || (!is_read && val == -1))
- f->lock_owner = NULL;
-
- if (is_read)
- fio_mutex_up_read(f->lock);
- else
- fio_mutex_up_write(f->lock);
- } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) {
- int val = fio_mutex_getval(f->lock);
-
- if (val == 0)
- f->lock_owner = NULL;
+ if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
+ fio_rwlock_unlock(f->rwlock);
+ else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
fio_mutex_up(f->lock);
- }
+
+ td->file_locks[f->fileno] = FILE_LOCK_NONE;
}
void unlock_file_all(struct thread_data *td, struct fio_file *f)
{
- if (f->lock_owner != td)
- return;
-
- f->lock_batch = 0;
- unlock_file(td, f);
+ if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
+ unlock_file(td, f);
}
static int recurse_dir(struct thread_data *td, const char *dirname)
td->files = malloc(org->files_index * sizeof(f));
+ if (td->o.file_lock_mode != FILE_LOCK_NONE)
+ td->file_locks = malloc(org->files_index);
+
for_each_file(org, f, i) {
struct fio_file *__f;
time, but writes get exclusive access.
.RE
.P
- The option may be post-fixed with a lock batch number. If set, then each
- thread/process may do that amount of IOs to the file before giving up the lock.
- Since lock acquisition is expensive, batching the lock/unlocks will speed up IO.
- .RE
- .P
.BI opendir \fR=\fPstr
Recursively open any files below directory \fIstr\fR.
.TP
used identically to normal parameters, with the caveat that when used on the
command line, the must come after the ioengine that defines them is selected.
.TP
+.BI (cpu)cpuload \fR=\fPint
+Attempt to use the specified percentage of CPU cycles.
+.TP
+.BI (cpu)cpuchunks \fR=\fPint
+Split the load into cycles of the given time. In microseconds.
+.TP
.BI (libaio)userspace_reap
Normally, with the libaio engine in use, fio will use
the io_getevents system call to reap newly returned events.
struct thread_data;
#include "compiler/compiler.h"
+#include "thread_options.h"
#include "flist.h"
#include "fifo.h"
-#include "rbtree.h"
+#include "lib/rbtree.h"
#include "arch/arch.h"
#include "os/os.h"
#include "mutex.h"
#include "gettime.h"
#include "lib/getopt.h"
#include "lib/rand.h"
+#include "client.h"
#include "server.h"
#include "stat.h"
#include "flow.h"
#define MPOL_LOCAL MPOL_MAX
#endif
-/*
- * What type of allocation to use for io buffers
- */
-enum fio_memtype {
- MEM_MALLOC = 0, /* ordinary malloc */
- MEM_SHM, /* use shared memory segments */
- MEM_SHMHUGE, /* use shared memory segments with huge pages */
- MEM_MMAP, /* use anonynomous mmap */
- MEM_MMAPHUGE, /* memory mapped huge file */
-};
-
/*
* offset generator types
*/
RW_SEQ_IDENT,
};
-/*
- * What type of errors to continue on when continue_on_error is used
- */
-enum error_type_bit {
- ERROR_TYPE_READ_BIT = 0,
- ERROR_TYPE_WRITE_BIT = 1,
- ERROR_TYPE_VERIFY_BIT = 2,
- ERROR_TYPE_CNT = 3,
-};
-
-enum error_type {
- ERROR_TYPE_NONE = 0,
- ERROR_TYPE_READ = 1 << ERROR_TYPE_READ_BIT,
- ERROR_TYPE_WRITE = 1 << ERROR_TYPE_WRITE_BIT,
- ERROR_TYPE_VERIFY = 1 << ERROR_TYPE_VERIFY_BIT,
- ERROR_TYPE_ANY = 0xffff,
-};
-
-struct bssplit {
- unsigned int bs;
- unsigned char perc;
-};
-
-struct thread_options {
- int pad;
- char *description;
- char *name;
- char *directory;
- char *filename;
- char *opendir;
- char *ioengine;
- enum td_ddir td_ddir;
- unsigned int rw_seq;
- unsigned int kb_base;
- unsigned int ddir_seq_nr;
- long ddir_seq_add;
- unsigned int iodepth;
- unsigned int iodepth_low;
- unsigned int iodepth_batch;
- unsigned int iodepth_batch_complete;
-
- unsigned long long size;
- unsigned int size_percent;
- unsigned int fill_device;
- unsigned long long file_size_low;
- unsigned long long file_size_high;
- unsigned long long start_offset;
-
- unsigned int bs[DDIR_RWDIR_CNT];
- unsigned int ba[DDIR_RWDIR_CNT];
- unsigned int min_bs[DDIR_RWDIR_CNT];
- unsigned int max_bs[DDIR_RWDIR_CNT];
- struct bssplit *bssplit[DDIR_RWDIR_CNT];
- unsigned int bssplit_nr[DDIR_RWDIR_CNT];
-
- int *ignore_error[ERROR_TYPE_CNT];
- unsigned int ignore_error_nr[ERROR_TYPE_CNT];
- unsigned int error_dump;
-
- unsigned int nr_files;
- unsigned int open_files;
- enum file_lock_mode file_lock_mode;
-
- unsigned int odirect;
- unsigned int invalidate_cache;
- unsigned int create_serialize;
- unsigned int create_fsync;
- unsigned int create_on_open;
- unsigned int create_only;
- unsigned int end_fsync;
- unsigned int pre_read;
- unsigned int sync_io;
- unsigned int verify;
- unsigned int do_verify;
- unsigned int verifysort;
- unsigned int verifysort_nr;
- unsigned int verify_interval;
- unsigned int verify_offset;
- char verify_pattern[MAX_PATTERN_SIZE];
- unsigned int verify_pattern_bytes;
- unsigned int verify_fatal;
- unsigned int verify_dump;
- unsigned int verify_async;
- unsigned long long verify_backlog;
- unsigned int verify_batch;
- unsigned int experimental_verify;
- unsigned int use_thread;
- unsigned int unlink;
- unsigned int do_disk_util;
- unsigned int override_sync;
- unsigned int rand_repeatable;
- unsigned int use_os_rand;
- unsigned int write_lat_log;
- unsigned int write_bw_log;
- unsigned int write_iops_log;
- unsigned int log_avg_msec;
- unsigned int norandommap;
- unsigned int softrandommap;
- unsigned int bs_unaligned;
- unsigned int fsync_on_close;
-
- unsigned int random_distribution;
- double zipf_theta;
- double pareto_h;
-
- unsigned int random_generator;
-
- unsigned int hugepage_size;
- unsigned int rw_min_bs;
- unsigned int thinktime;
- unsigned int thinktime_spin;
- unsigned int thinktime_blocks;
- unsigned int fsync_blocks;
- unsigned int fdatasync_blocks;
- unsigned int barrier_blocks;
- unsigned long long start_delay;
- unsigned long long timeout;
- unsigned long long ramp_time;
- unsigned int overwrite;
- unsigned int bw_avg_time;
- unsigned int iops_avg_time;
- unsigned int loops;
- unsigned long long zone_range;
- unsigned long long zone_size;
- unsigned long long zone_skip;
- enum fio_memtype mem_type;
- unsigned int mem_align;
-
- unsigned int max_latency;
-
- unsigned int stonewall;
- unsigned int new_group;
- unsigned int numjobs;
- os_cpu_mask_t cpumask;
- unsigned int cpumask_set;
- os_cpu_mask_t verify_cpumask;
- unsigned int verify_cpumask_set;
-#ifdef CONFIG_LIBNUMA
- struct bitmask *numa_cpunodesmask;
- unsigned int numa_cpumask_set;
- unsigned short numa_mem_mode;
- unsigned int numa_mem_prefer_node;
- struct bitmask *numa_memnodesmask;
- unsigned int numa_memmask_set;
-#endif
- unsigned int iolog;
- unsigned int rwmixcycle;
- unsigned int rwmix[2];
- unsigned int nice;
- unsigned int file_service_type;
- unsigned int group_reporting;
- unsigned int fadvise_hint;
- enum fio_fallocate_mode fallocate_mode;
- unsigned int zero_buffers;
- unsigned int refill_buffers;
- unsigned int scramble_buffers;
- unsigned int compress_percentage;
- unsigned int compress_chunk;
- unsigned int time_based;
- unsigned int disable_lat;
- unsigned int disable_clat;
- unsigned int disable_slat;
- unsigned int disable_bw;
- unsigned int unified_rw_rep;
- unsigned int gtod_reduce;
- unsigned int gtod_cpu;
- unsigned int gtod_offload;
- enum fio_cs clocksource;
- unsigned int no_stall;
- unsigned int trim_percentage;
- unsigned int trim_batch;
- unsigned int trim_zero;
- unsigned long long trim_backlog;
- unsigned int clat_percentiles;
- unsigned int percentile_precision; /* digits after decimal for percentiles */
- fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
-
- char *read_iolog_file;
- char *write_iolog_file;
- char *bw_log_file;
- char *lat_log_file;
- char *iops_log_file;
- char *replay_redirect;
-
- /*
- * Pre-run and post-run shell
- */
- char *exec_prerun;
- char *exec_postrun;
-
- unsigned int rate[DDIR_RWDIR_CNT];
- unsigned int ratemin[DDIR_RWDIR_CNT];
- unsigned int ratecycle;
- unsigned int rate_iops[DDIR_RWDIR_CNT];
- unsigned int rate_iops_min[DDIR_RWDIR_CNT];
-
- char *ioscheduler;
-
- /*
- * CPU "io" cycle burner
- */
- unsigned int cpuload;
- unsigned int cpucycle;
-
- /*
- * I/O Error handling
- */
- enum error_type continue_on_error;
-
- /*
- * Benchmark profile type
- */
- char *profile;
-
- /*
- * blkio cgroup support
- */
- char *cgroup;
- unsigned int cgroup_weight;
- unsigned int cgroup_nodelete;
-
- unsigned int uid;
- unsigned int gid;
-
- int flow_id;
- int flow;
- int flow_watermark;
- unsigned int flow_sleep;
-
- unsigned long long offset_increment;
-
- unsigned int sync_file_range;
-};
-
enum {
TD_F_VER_BACKLOG = 1,
TD_F_TRIM_BACKLOG = 2,
void *eo;
char verror[FIO_VERROR_SIZE];
pthread_t thread;
- int thread_number;
- int groupid;
+ unsigned int thread_number;
+ unsigned int groupid;
struct thread_stat ts;
+ int client_type;
+
struct io_log *slat_log;
struct io_log *clat_log;
struct io_log *lat_log;
struct rusage ru_end;
struct fio_file **files;
+ unsigned char *file_locks;
unsigned int files_size;
unsigned int files_index;
unsigned int nr_open_files;
size_t orig_buffer_size;
volatile int terminate;
volatile int runstate;
- unsigned int ioprio;
- unsigned int ioprio_set;
unsigned int last_was_sync;
enum fio_ddir last_ddir;
- char *mmapfile;
int mmapfd;
void *iolog_buf;
/*
* Rate state
*/
- unsigned long long rate_bps[DDIR_RWDIR_CNT];
+ uint64_t rate_bps[DDIR_RWDIR_CNT];
long rate_pending_usleep[DDIR_RWDIR_CNT];
unsigned long rate_bytes[DDIR_RWDIR_CNT];
unsigned long rate_blocks[DDIR_RWDIR_CNT];
struct timeval lastrate[DDIR_RWDIR_CNT];
- unsigned long long total_io_size;
- unsigned long long fill_device_size;
+ uint64_t total_io_size;
+ uint64_t fill_device_size;
unsigned long io_issues[DDIR_RWDIR_CNT];
- unsigned long long io_blocks[DDIR_RWDIR_CNT];
- unsigned long long this_io_blocks[DDIR_RWDIR_CNT];
- unsigned long long io_bytes[DDIR_RWDIR_CNT];
- unsigned long long io_skip_bytes;
- unsigned long long this_io_bytes[DDIR_RWDIR_CNT];
- unsigned long long zone_bytes;
+ uint64_t io_blocks[DDIR_RWDIR_CNT];
+ uint64_t this_io_blocks[DDIR_RWDIR_CNT];
+ uint64_t io_bytes[DDIR_RWDIR_CNT];
+ uint64_t io_skip_bytes;
+ uint64_t this_io_bytes[DDIR_RWDIR_CNT];
+ uint64_t zone_bytes;
struct fio_mutex *mutex;
/*
*/
struct prof_io_ops prof_io_ops;
void *prof_data;
+
+ void *pinned_mem;
};
/*
extern int exitall_on_terminate;
extern unsigned int thread_number;
extern unsigned int stat_number;
-extern unsigned int nr_process, nr_thread;
extern int shm_id;
extern int groupid;
extern int output_format;
extern int temp_stall_ts;
-extern unsigned long long mlock_size;
extern uintptr_t page_mask, page_size;
extern int read_only;
extern int eta_print;
/*
* Init/option functions
*/
+extern int __must_check fio_init_options(void);
extern int __must_check parse_options(int, char **);
-extern int parse_jobs_ini(char *, int, int);
-extern int parse_cmd_line(int, char **);
+extern int parse_jobs_ini(char *, int, int, int);
+extern int parse_cmd_line(int, char **, int);
extern int fio_backend(void);
extern void reset_fio_state(void);
extern void clear_io_state(struct thread_data *);
extern void fio_options_mem_dupe(struct thread_data *);
extern void options_mem_dupe(void *data, struct fio_option *options);
extern void td_fill_rand_seeds(struct thread_data *);
-extern void add_job_opts(const char **);
+extern void add_job_opts(const char **, int);
extern char *num2str(unsigned long, int, int, int);
extern int ioengine_load(struct thread_data *);
+extern unsigned long page_mask;
+extern unsigned long page_size;
+extern int initialize_fio(char *envp[]);
+
#define FIO_GETOPT_JOB 0x89000000
#define FIO_GETOPT_IOENGINE 0x98000000
#define FIO_NR_OPTIONS (FIO_MAX_OPTS + 128)
*/
extern void print_thread_status(void);
extern void print_status_init(int);
+extern char *fio_uint_to_kmg(unsigned int val);
/*
* Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
/*
* Memory helpers
*/
-extern int __must_check fio_pin_memory(void);
-extern void fio_unpin_memory(void);
+extern int __must_check fio_pin_memory(struct thread_data *);
+extern void fio_unpin_memory(struct thread_data *);
extern int __must_check allocate_io_mem(struct thread_data *);
extern void free_io_mem(struct thread_data *);
+extern void free_threads_shm(void);
/*
* Reset stats after ramp time completes
extern const char *fio_get_arch_string(int);
extern const char *fio_get_os_string(int);
+#define ARRAY_SIZE(x) (sizeof((x)) / (sizeof((x)[0])))
+
enum {
FIO_OUTPUT_TERSE = 0,
FIO_OUTPUT_JSON,
#include <string.h>
#include <errno.h>
#include <sys/ipc.h>
- #ifndef FIO_NO_HAVE_SHM_H
- #include <sys/shm.h>
- #endif
#include <sys/types.h>
#include <sys/stat.h>
#include "fio.h"
+ #ifndef FIO_NO_HAVE_SHM_H
+ #include <sys/shm.h>
+ #endif
+
#include "parse.h"
#include "smalloc.h"
#include "filehash.h"
static int max_jobs = FIO_MAX_JOBS;
static int dump_cmdline;
static int def_timeout;
+ static int parse_only;
static struct thread_data def_thread;
struct thread_data *threads = NULL;
.has_arg = required_argument,
.val = 'd' | FIO_CLIENT_FLAG,
},
+ {
+ .name = (char *) "parse-only",
+ .has_arg = no_argument,
+ .val = 'P' | FIO_CLIENT_FLAG,
+ },
{
.name = (char *) "section",
.has_arg = required_argument,
},
};
-static void free_shm(void)
+void free_threads_shm(void)
{
struct shmid_ds sbuf;
void *tp = threads;
threads = NULL;
+ shmdt(tp);
+ shmctl(shm_id, IPC_RMID, &sbuf);
+ shm_id = -1;
+ }
+}
+
+void free_shm(void)
+{
+ if (threads) {
file_hash_exit();
flow_exit();
fio_debug_jobp = NULL;
- shmdt(tp);
- shmctl(shm_id, IPC_RMID, &sbuf);
+ free_threads_shm();
}
scleanup();
/*
* This function leaks the buffer
*/
-static char *to_kmg(unsigned int val)
+char *fio_uint_to_kmg(unsigned int val)
{
char *buf = malloc(32);
char post[] = { 0, 'K', 'M', 'G', 'P', 'E', 0 };
* to make sure we don't have conflicts, and initializes various
* members of td.
*/
-static int add_job(struct thread_data *td, const char *jobname, int job_add_num)
+static int add_job(struct thread_data *td, const char *jobname, int job_add_num,
+ int recursed, int client_type)
{
- const char *ddir_str[] = { NULL, "read", "write", "rw", NULL,
- "randread", "randwrite", "randrw",
- "trim", NULL, NULL, NULL, "randtrim" };
unsigned int i;
char fname[PATH_MAX];
int numjobs, file_alloced;
/*
* if we are just dumping the output command line, don't add the job
*/
- if (dump_cmdline) {
+ if (dump_cmdline || parse_only) {
put_job(td);
return 0;
}
+ td->client_type = client_type;
+
if (profile_td_init(td))
goto err;
if (ioengine_load(td))
goto err;
- if (td->o.use_thread)
- nr_thread++;
- else
- nr_process++;
-
if (td->o.odirect)
td->io_ops->flags |= FIO_RAWIO;
if (setup_rate(td))
goto err;
- if (td->o.write_lat_log) {
- setup_log(&td->lat_log, td->o.log_avg_msec);
- setup_log(&td->slat_log, td->o.log_avg_msec);
- setup_log(&td->clat_log, td->o.log_avg_msec);
+ if (td->o.lat_log_file) {
+ setup_log(&td->lat_log, td->o.log_avg_msec, IO_LOG_TYPE_LAT);
+ setup_log(&td->slat_log, td->o.log_avg_msec, IO_LOG_TYPE_SLAT);
+ setup_log(&td->clat_log, td->o.log_avg_msec, IO_LOG_TYPE_CLAT);
}
- if (td->o.write_bw_log)
- setup_log(&td->bw_log, td->o.log_avg_msec);
- if (td->o.write_iops_log)
- setup_log(&td->iops_log, td->o.log_avg_msec);
+ if (td->o.bw_log_file)
+ setup_log(&td->bw_log, td->o.log_avg_msec, IO_LOG_TYPE_BW);
+ if (td->o.iops_log_file)
+ setup_log(&td->iops_log, td->o.log_avg_msec, IO_LOG_TYPE_IOPS);
if (!td->o.name)
td->o.name = strdup(jobname);
if (output_format == FIO_OUTPUT_NORMAL) {
if (!job_add_num) {
- if (!strcmp(td->io_ops->name, "cpuio")) {
- log_info("%s: ioengine=cpu, cpuload=%u,"
- " cpucycle=%u\n", td->o.name,
- td->o.cpuload,
- td->o.cpucycle);
- } else {
+ if (is_backend && !recursed)
+ fio_server_send_add_job(td);
+
+ if (!(td->io_ops->flags & FIO_NOIO)) {
char *c1, *c2, *c3, *c4, *c5, *c6;
- c1 = to_kmg(td->o.min_bs[DDIR_READ]);
- c2 = to_kmg(td->o.max_bs[DDIR_READ]);
- c3 = to_kmg(td->o.min_bs[DDIR_WRITE]);
- c4 = to_kmg(td->o.max_bs[DDIR_WRITE]);
- c5 = to_kmg(td->o.min_bs[DDIR_TRIM]);
- c6 = to_kmg(td->o.max_bs[DDIR_TRIM]);
+ c1 = fio_uint_to_kmg(td->o.min_bs[DDIR_READ]);
+ c2 = fio_uint_to_kmg(td->o.max_bs[DDIR_READ]);
+ c3 = fio_uint_to_kmg(td->o.min_bs[DDIR_WRITE]);
+ c4 = fio_uint_to_kmg(td->o.max_bs[DDIR_WRITE]);
+ c5 = fio_uint_to_kmg(td->o.min_bs[DDIR_TRIM]);
+ c6 = fio_uint_to_kmg(td->o.max_bs[DDIR_TRIM]);
log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s/%s-%s,"
" ioengine=%s, iodepth=%u\n",
td->o.name, td->groupid,
- ddir_str[td->o.td_ddir],
+ ddir_str(td->o.td_ddir),
c1, c2, c3, c4, c5, c6,
td->io_ops->name,
td->o.iodepth);
job_add_num = numjobs - 1;
- if (add_job(td_new, jobname, job_add_num))
+ if (add_job(td_new, jobname, job_add_num, 1, client_type))
goto err;
}
/*
* Parse as if 'o' was a command line
*/
-void add_job_opts(const char **o)
+void add_job_opts(const char **o, int client_type)
{
struct thread_data *td, *td_parent;
int i, in_global = 1;
if (!strncmp(o[i], "name", 4)) {
in_global = 0;
if (td)
- add_job(td, jobname, 0);
+ add_job(td, jobname, 0, 0, client_type);
td = NULL;
sprintf(jobname, "%s", o[i] + 5);
}
}
if (td)
- add_job(td, jobname, 0);
+ add_job(td, jobname, 0, 0, client_type);
}
static int skip_this_section(const char *name)
/*
* This is our [ini] type file parser.
*/
-int parse_jobs_ini(char *file, int is_buf, int stonewall_flag)
+int parse_jobs_ini(char *file, int is_buf, int stonewall_flag, int type)
{
unsigned int global;
struct thread_data *td;
for (i = 0; i < num_opts; i++)
log_info("--%s ", opts[i]);
- ret = add_job(td, name, 0);
+ ret = add_job(td, name, 0, 0, type);
} else {
log_err("fio: job %s dropped\n", name);
put_job(td);
printf(" --debug=options\tEnable debug logging. May be one/more of:\n"
"\t\t\tprocess,file,io,mem,blktrace,verify,random,parse,\n"
"\t\t\tdiskutil,job,mutex,profile,time,net\n");
+ printf(" --parse-only\t\tParse options only, don't start any IO\n");
printf(" --output\t\tWrite output to file\n");
printf(" --runtime\t\tRuntime in seconds\n");
printf(" --latency-log\t\tGenerate per-job latency logs\n");
#ifdef FIO_INC_DEBUG
struct debug_level debug_levels[] = {
- { .name = "process", .shift = FD_PROCESS, },
- { .name = "file", .shift = FD_FILE, },
- { .name = "io", .shift = FD_IO, },
- { .name = "mem", .shift = FD_MEM, },
- { .name = "blktrace", .shift = FD_BLKTRACE },
- { .name = "verify", .shift = FD_VERIFY },
- { .name = "random", .shift = FD_RANDOM },
- { .name = "parse", .shift = FD_PARSE },
- { .name = "diskutil", .shift = FD_DISKUTIL },
- { .name = "job", .shift = FD_JOB },
- { .name = "mutex", .shift = FD_MUTEX },
- { .name = "profile", .shift = FD_PROFILE },
- { .name = "time", .shift = FD_TIME },
- { .name = "net", .shift = FD_NET },
+ { .name = "process",
+ .help = "Process creation/exit logging",
+ .shift = FD_PROCESS,
+ },
+ { .name = "file",
+ .help = "File related action logging",
+ .shift = FD_FILE,
+ },
+ { .name = "io",
+ .help = "IO and IO engine action logging (offsets, queue, completions, etc)",
+ .shift = FD_IO,
+ },
+ { .name = "mem",
+ .help = "Memory allocation/freeing logging",
+ .shift = FD_MEM,
+ },
+ { .name = "blktrace",
+ .help = "blktrace action logging",
+ .shift = FD_BLKTRACE,
+ },
+ { .name = "verify",
+ .help = "IO verification action logging",
+ .shift = FD_VERIFY,
+ },
+ { .name = "random",
+ .help = "Random generation logging",
+ .shift = FD_RANDOM,
+ },
+ { .name = "parse",
+ .help = "Parser logging",
+ .shift = FD_PARSE,
+ },
+ { .name = "diskutil",
+ .help = "Disk utility logging actions",
+ .shift = FD_DISKUTIL,
+ },
+ { .name = "job",
+ .help = "Logging related to creating/destroying jobs",
+ .shift = FD_JOB,
+ },
+ { .name = "mutex",
+ .help = "Mutex logging",
+ .shift = FD_MUTEX
+ },
+ { .name = "profile",
+ .help = "Logging related to profiles",
+ .shift = FD_PROFILE,
+ },
+ { .name = "time",
+ .help = "Logging related to time keeping functions",
+ .shift = FD_TIME,
+ },
+ { .name = "net",
+ .help = "Network logging",
+ .shift = FD_NET,
+ },
{ .name = NULL, },
};
fio_client_add_cmd_option(client, opt);
}
-int parse_cmd_line(int argc, char *argv[])
+int parse_cmd_line(int argc, char *argv[], int client_type)
{
struct thread_data *td = NULL;
int c, ini_idx = 0, lidx, ret = 0, do_exit = 0, exit_val = 0;
if (set_debug(optarg))
do_exit++;
break;
+ case 'P':
+ parse_only = 1;
+ break;
case 'x': {
size_t new_size;
char *val = optarg;
if (!strncmp(opt, "name", 4) && td) {
- ret = add_job(td, td->o.name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0, 0, client_type);
if (ret)
return 0;
td = NULL;
exit_val = 1;
break;
}
- if (fio_client_add(optarg, &cur_client)) {
+ if (fio_client_add(&fio_client_ops, optarg, &cur_client)) {
log_err("fio: failed adding client %s\n", optarg);
do_exit++;
exit_val = 1;
if (td) {
if (!ret)
- ret = add_job(td, td->o.name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0, 0, client_type);
}
while (!ret && optind < argc) {
return ini_idx;
}
-int parse_options(int argc, char *argv[])
+int fio_init_options(void)
{
- int job_files, i;
-
f_out = stdout;
f_err = stderr;
if (fill_def_thread())
return 1;
- job_files = parse_cmd_line(argc, argv);
+ return 0;
+}
+
+extern int fio_check_options(struct thread_options *);
+
+int parse_options(int argc, char *argv[])
+{
+ const int type = FIO_CLIENT_TYPE_CLI;
+ int job_files, i;
+
+ if (fio_init_options())
+ return 1;
+ if (fio_test_cconv(&def_thread.o))
+ log_err("fio: failed internal cconv test\n");
+
+ job_files = parse_cmd_line(argc, argv, type);
if (job_files > 0) {
for (i = 0; i < job_files; i++) {
return 1;
free(ini_file[i]);
} else if (!is_backend) {
- if (parse_jobs_ini(ini_file[i], 0, i))
+ if (parse_jobs_ini(ini_file[i], 0, i, type))
return 1;
free(ini_file[i]);
}
fio_options_free(&def_thread);
if (!thread_number) {
- if (dump_cmdline)
+ if (dump_cmdline || parse_only)
return 0;
if (exec_profile)
return 0;
return 0;
}
+
+void options_default_fill(struct thread_options *o)
+{
+ memcpy(o, &def_thread.o, sizeof(*o));
+}
#include <string.h>
#include <sys/types.h>
#include <signal.h>
+#include <stdint.h>
+#include <locale.h>
+
#include "fio.h"
+#include "smalloc.h"
+#include "os/os.h"
/*
* Just expose an empty list, if the OS does not support disk util stats
unsigned long arch_flags = 0;
+uintptr_t page_mask;
+uintptr_t page_size;
+
static const char *fio_os_strings[os_nr] = {
"Invalid",
"Linux",
td->io_issues[i] = 0;
td->ts.total_io_u[i] = 0;
td->ts.runtime[i] = 0;
+ td->rwmix_issues = 0;
}
fio_gettime(&tv, NULL);
groupid = 0;
thread_number = 0;
stat_number = 0;
- nr_process = 0;
- nr_thread = 0;
done_secs = 0;
}
}
}
+static int endian_check(void)
+{
+ union {
+ uint8_t c[8];
+ uint64_t v;
+ } u;
+ int le = 0, be = 0;
+
+ u.v = 0x12;
+ if (u.c[7] == 0x12)
+ be = 1;
+ else if (u.c[0] == 0x12)
+ le = 1;
+
+#if defined(CONFIG_LITTLE_ENDIAN)
+ if (be)
+ return 1;
+#elif defined(CONFIG_BIG_ENDIAN)
+ if (le)
+ return 1;
+#else
+ return 1;
+#endif
+
+ if (!le && !be)
+ return 1;
+
+ return 0;
+}
+
+int initialize_fio(char *envp[])
+{
+ long ps;
+
+ if (endian_check()) {
+ log_err("fio: endianness settings appear wrong.\n");
+ log_err("fio: please report this to fio@vger.kernel.org\n");
+ return 1;
+ }
+
+ arch_init(envp);
+ sinit();
+
+ /*
+ * We need locale for number printing, if it isn't set then just
+ * go with the US format.
+ */
+ if (!getenv("LC_NUMERIC"))
+ setlocale(LC_NUMERIC, "en_US");
+
+ ps = sysconf(_SC_PAGESIZE);
+ if (ps < 0) {
+ log_err("Failed to get page size\n");
+ return 1;
+ }
+
+ page_size = ps;
+ page_mask = ps - 1;
+
+ fio_keywords_init();
+ return 0;
+}
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
- #ifndef FIO_NO_HAVE_SHM_H
- #include <sys/shm.h>
- #endif
#include <sys/mman.h>
#include "fio.h"
+ #ifndef FIO_NO_HAVE_SHM_H
+ #include <sys/shm.h>
+ #endif
-static void *pinned_mem;
-
-void fio_unpin_memory(void)
+void fio_unpin_memory(struct thread_data *td)
{
- if (pinned_mem) {
- dprint(FD_MEM, "unpinning %llu bytes\n", mlock_size);
- if (munlock(pinned_mem, mlock_size) < 0)
+ if (td->pinned_mem) {
+ dprint(FD_MEM, "unpinning %llu bytes\n", td->o.lockmem);
+ if (munlock(td->pinned_mem, td->o.lockmem) < 0)
perror("munlock");
- munmap(pinned_mem, mlock_size);
- pinned_mem = NULL;
+ munmap(td->pinned_mem, td->o.lockmem);
+ td->pinned_mem = NULL;
}
}
-int fio_pin_memory(void)
+int fio_pin_memory(struct thread_data *td)
{
unsigned long long phys_mem;
- if (!mlock_size)
+ if (!td->o.lockmem)
return 0;
- dprint(FD_MEM, "pinning %llu bytes\n", mlock_size);
+ dprint(FD_MEM, "pinning %llu bytes\n", td->o.lockmem);
/*
* Don't allow mlock of more than real_mem-128MB
*/
phys_mem = os_phys_mem();
if (phys_mem) {
- if ((mlock_size + 128 * 1024 * 1024) > phys_mem) {
- mlock_size = phys_mem - 128 * 1024 * 1024;
+ if ((td->o.lockmem + 128 * 1024 * 1024) > phys_mem) {
+ td->o.lockmem = phys_mem - 128 * 1024 * 1024;
log_info("fio: limiting mlocked memory to %lluMB\n",
- mlock_size >> 20);
+ td->o.lockmem >> 20);
}
}
- pinned_mem = mmap(NULL, mlock_size, PROT_READ | PROT_WRITE,
+ td->pinned_mem = mmap(NULL, td->o.lockmem, PROT_READ | PROT_WRITE,
MAP_PRIVATE | OS_MAP_ANON, -1, 0);
- if (pinned_mem == MAP_FAILED) {
+ if (td->pinned_mem == MAP_FAILED) {
perror("malloc locked mem");
- pinned_mem = NULL;
+ td->pinned_mem = NULL;
return 1;
}
- if (mlock(pinned_mem, mlock_size) < 0) {
+ if (mlock(td->pinned_mem, td->o.lockmem) < 0) {
perror("mlock");
- munmap(pinned_mem, mlock_size);
- pinned_mem = NULL;
+ munmap(td->pinned_mem, td->o.lockmem);
+ td->pinned_mem = NULL;
return 1;
}
unsigned long mask = td->o.hugepage_size - 1;
/* TODO: make sure the file is a real hugetlbfs file */
- if (!td->mmapfile)
+ if (!td->o.mmapfile)
flags |= MAP_HUGETLB;
total_mem = (total_mem + mask) & ~mask;
}
- if (td->mmapfile) {
- td->mmapfd = open(td->mmapfile, O_RDWR|O_CREAT, 0644);
+ if (td->o.mmapfile) {
+ td->mmapfd = open(td->o.mmapfile, O_RDWR|O_CREAT, 0644);
if (td->mmapfd < 0) {
td_verror(td, errno, "open mmap file");
td->orig_buffer = NULL;
if (td->mmapfd) {
close(td->mmapfd);
- unlink(td->mmapfile);
+ unlink(td->o.mmapfile);
}
return 1;
dprint(FD_MEM, "munmap %llu %p\n", (unsigned long long) total_mem,
td->orig_buffer);
munmap(td->orig_buffer, td->orig_buffer_size);
- if (td->mmapfile) {
+ if (td->o.mmapfile) {
close(td->mmapfd);
- unlink(td->mmapfile);
- free(td->mmapfile);
+ unlink(td->o.mmapfile);
+ free(td->o.mmapfile);
}
}
{
int base;
- switch(a) {
+ switch (a) {
case '0'...'9':
base = '0';
break;
default:
base = 0;
}
- return (a - base);
+ return a - base;
}
static int bs_cmp(const void *p1, const void *p2)
return bsp1->perc < bsp2->perc;
}
-static int bssplit_ddir(struct thread_data *td, int ddir, char *str)
+static int bssplit_ddir(struct thread_options *o, int ddir, char *str)
{
struct bssplit *bssplit;
unsigned int i, perc, perc_missing;
long long val;
char *fname;
- td->o.bssplit_nr[ddir] = 4;
+ o->bssplit_nr[ddir] = 4;
bssplit = malloc(4 * sizeof(struct bssplit));
i = 0;
/*
* grow struct buffer, if needed
*/
- if (i == td->o.bssplit_nr[ddir]) {
- td->o.bssplit_nr[ddir] <<= 1;
- bssplit = realloc(bssplit, td->o.bssplit_nr[ddir]
+ if (i == o->bssplit_nr[ddir]) {
+ o->bssplit_nr[ddir] <<= 1;
+ bssplit = realloc(bssplit, o->bssplit_nr[ddir]
* sizeof(struct bssplit));
}
} else
perc = -1;
- if (str_to_decimal(fname, &val, 1, td)) {
+ if (str_to_decimal(fname, &val, 1, o)) {
log_err("fio: bssplit conversion failed\n");
- free(td->o.bssplit);
+ free(o->bssplit);
return 1;
}
i++;
}
- td->o.bssplit_nr[ddir] = i;
+ o->bssplit_nr[ddir] = i;
/*
* Now check if the percentages add up, and how much is missing
*/
perc = perc_missing = 0;
- for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &bssplit[i];
if (bsp->perc == (unsigned char) -1)
* them.
*/
if (perc_missing) {
- for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &bssplit[i];
if (bsp->perc == (unsigned char) -1)
}
}
- td->o.min_bs[ddir] = min_bs;
- td->o.max_bs[ddir] = max_bs;
+ o->min_bs[ddir] = min_bs;
+ o->max_bs[ddir] = max_bs;
/*
* now sort based on percentages, for ease of lookup
*/
- qsort(bssplit, td->o.bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
- td->o.bssplit[ddir] = bssplit;
+ qsort(bssplit, o->bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
+ o->bssplit[ddir] = bssplit;
return 0;
-
}
static int str_bssplit_cb(void *data, const char *input)
if (odir) {
ddir = strchr(odir + 1, ',');
if (ddir) {
- ret = bssplit_ddir(td, DDIR_TRIM, ddir + 1);
+ ret = bssplit_ddir(&td->o, DDIR_TRIM, ddir + 1);
if (!ret)
*ddir = '\0';
} else {
char *op;
op = strdup(odir + 1);
- ret = bssplit_ddir(td, DDIR_TRIM, op);
+ ret = bssplit_ddir(&td->o, DDIR_TRIM, op);
free(op);
}
- if (!ret)
- ret = bssplit_ddir(td, DDIR_WRITE, odir + 1);
+ if (!ret)
+ ret = bssplit_ddir(&td->o, DDIR_WRITE, odir + 1);
if (!ret) {
*odir = '\0';
- ret = bssplit_ddir(td, DDIR_READ, str);
+ ret = bssplit_ddir(&td->o, DDIR_READ, str);
}
} else {
char *op;
op = strdup(str);
- ret = bssplit_ddir(td, DDIR_WRITE, op);
+ ret = bssplit_ddir(&td->o, DDIR_WRITE, op);
free(op);
if (!ret) {
op = strdup(str);
- ret = bssplit_ddir(td, DDIR_TRIM, op);
+ ret = bssplit_ddir(&td->o, DDIR_TRIM, op);
free(op);
}
- ret = bssplit_ddir(td, DDIR_READ, str);
+ ret = bssplit_ddir(&td->o, DDIR_READ, str);
}
free(p);
static int str2error(char *str)
{
- const char * err[] = {"EPERM", "ENOENT", "ESRCH", "EINTR", "EIO",
+ const char *err[] = { "EPERM", "ENOENT", "ESRCH", "EINTR", "EIO",
"ENXIO", "E2BIG", "ENOEXEC", "EBADF",
"ECHILD", "EAGAIN", "ENOMEM", "EACCES",
"EFAULT", "ENOTBLK", "EBUSY", "EEXIST",
"EXDEV", "ENODEV", "ENOTDIR", "EISDIR",
"EINVAL", "ENFILE", "EMFILE", "ENOTTY",
"ETXTBSY","EFBIG", "ENOSPC", "ESPIPE",
- "EROFS","EMLINK", "EPIPE", "EDOM", "ERANGE"};
+ "EROFS","EMLINK", "EPIPE", "EDOM", "ERANGE" };
int i = 0, num = sizeof(err) / sizeof(void *);
- while( i < num) {
+ while (i < num) {
if (!strcmp(err[i], str))
return i + 1;
i++;
static int str_rw_cb(void *data, const char *str)
{
struct thread_data *td = data;
+ struct thread_options *o = &td->o;
char *nr = get_opt_postfix(str);
- td->o.ddir_seq_nr = 1;
- td->o.ddir_seq_add = 0;
+ o->ddir_seq_nr = 1;
+ o->ddir_seq_add = 0;
if (!nr)
return 0;
if (td_random(td))
- td->o.ddir_seq_nr = atoi(nr);
+ o->ddir_seq_nr = atoi(nr);
else {
long long val;
- if (str_to_decimal(nr, &val, 1, td)) {
+ if (str_to_decimal(nr, &val, 1, o)) {
log_err("fio: rw postfix parsing failed\n");
free(nr);
return 1;
}
- td->o.ddir_seq_add = val;
+ o->ddir_seq_add = val;
}
free(nr);
struct thread_data *td = data;
if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP)
- td->mmapfile = get_opt_postfix(mem);
-
- return 0;
-}
-
-static int str_verify_cb(void *data, const char *mem)
-{
- struct thread_data *td = data;
-
- if (td->o.verify == VERIFY_CRC32C_INTEL ||
- td->o.verify == VERIFY_CRC32C) {
- crc32c_intel_probe();
- }
+ td->o.mmapfile = get_opt_postfix(mem);
return 0;
}
return 0;
}
-static int str_lockmem_cb(void fio_unused *data, unsigned long long *val)
-{
- mlock_size = *val;
- return 0;
-}
-
static int str_rwmix_read_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
return 0;
}
-#ifdef FIO_HAVE_IOPRIO
-static int str_prioclass_cb(void *data, unsigned long long *val)
-{
- struct thread_data *td = data;
- unsigned short mask;
-
- /*
- * mask off old class bits, str_prio_cb() may have set a default class
- */
- mask = (1 << IOPRIO_CLASS_SHIFT) - 1;
- td->ioprio &= mask;
-
- td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
- td->ioprio_set = 1;
- return 0;
-}
-
-static int str_prio_cb(void *data, unsigned long long *val)
-{
- struct thread_data *td = data;
-
- td->ioprio |= *val;
-
- /*
- * If no class is set, assume BE
- */
- if ((td->ioprio >> IOPRIO_CLASS_SHIFT) == 0)
- td->ioprio |= IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT;
-
- td->ioprio_set = 1;
- return 0;
-}
-#endif
-
static int str_exitall_cb(void)
{
exitall_on_terminate = 1;
}
#endif
-#ifdef FIO_HAVE_TRIM
-static int str_verify_trim_cb(void *data, unsigned long long *val)
-{
- struct thread_data *td = data;
-
- td->o.trim_percentage = *val;
- return 0;
-}
-#endif
-
static int str_fst_cb(void *data, const char *str)
{
struct thread_data *td = data;
log_err("fio: zipf theta must different than 1.0\n");
return 1;
}
- td->o.zipf_theta = val;
+ td->o.zipf_theta.u.f = val;
} else {
if (val <= 0.00 || val >= 1.00) {
log_err("fio: pareto input out of range (0 < input < 1.0)\n");
return 1;
}
- td->o.pareto_h = val;
- }
-
- return 0;
-}
-
-static int check_dir(struct thread_data *td, char *fname)
-{
-#if 0
- char file[PATH_MAX], *dir;
- int elen = 0;
-
- if (td->o.directory) {
- strcpy(file, td->o.directory);
- strcat(file, "/");
- elen = strlen(file);
- }
-
- sprintf(file + elen, "%s", fname);
- dir = dirname(file);
-
- {
- struct stat sb;
- /*
- * We can't do this on FIO_DISKLESSIO engines. The engine isn't loaded
- * yet, so we can't do this check right here...
- */
- if (lstat(dir, &sb) < 0) {
- int ret = errno;
-
- log_err("fio: %s is not a directory\n", dir);
- td_verror(td, ret, "lstat");
- return 1;
- }
-
- if (!S_ISDIR(sb.st_mode)) {
- log_err("fio: %s is not a directory\n", dir);
- return 1;
- }
+ td->o.pareto_h.u.f = val;
}
-#endif
return 0;
}
while ((fname = get_next_file_name(&str)) != NULL) {
if (!strlen(fname))
break;
- if (check_dir(td, fname)) {
- free(p);
- return 1;
- }
add_file(td, fname);
td->o.nr_files++;
}
return add_dir_files(td, td->o.opendir);
}
-static int str_verify_offset_cb(void *data, unsigned long long *off)
-{
- struct thread_data *td = data;
-
- if (*off && *off < sizeof(struct verify_header)) {
- log_err("fio: verify_offset too small\n");
- return 1;
- }
-
- td->o.verify_offset = *off;
- return 0;
-}
-
static int str_verify_pattern_cb(void *data, const char *input)
{
struct thread_data *td = data;
long off;
int i = 0, j = 0, len, k, base = 10;
- char* loc1, * loc2;
+ char *loc1, *loc2;
loc1 = strstr(input, "0x");
loc2 = strstr(input, "0X");
return 0;
}
- static int str_lockfile_cb(void *data, const char *str)
-static int str_write_bw_log_cb(void *data, const char *str)
--{
-- struct thread_data *td = data;
- char *nr = get_opt_postfix(str);
--
- td->o.lockfile_batch = 1;
- if (nr) {
- td->o.lockfile_batch = atoi(nr);
- free(nr);
- }
- if (str)
- td->o.bw_log_file = strdup(str);
-
- td->o.write_bw_log = 1;
- return 0;
-}
-
-static int str_write_lat_log_cb(void *data, const char *str)
-{
- struct thread_data *td = data;
-
- if (str)
- td->o.lat_log_file = strdup(str);
-
- td->o.write_lat_log = 1;
- return 0;
-}
-
-static int str_write_iops_log_cb(void *data, const char *str)
-{
- struct thread_data *td = data;
-
- if (str)
- td->o.iops_log_file = strdup(str);
--
- td->o.write_iops_log = 1;
-- return 0;
--}
--
static int str_gtod_reduce_cb(void *data, int *il)
{
struct thread_data *td = data;
return 0;
}
+/*
+ * Option grouping
+ */
+static struct opt_group fio_opt_groups[] = {
+ {
+ .name = "General",
+ .mask = FIO_OPT_C_GENERAL,
+ },
+ {
+ .name = "I/O",
+ .mask = FIO_OPT_C_IO,
+ },
+ {
+ .name = "File",
+ .mask = FIO_OPT_C_FILE,
+ },
+ {
+ .name = "Statistics",
+ .mask = FIO_OPT_C_STAT,
+ },
+ {
+ .name = "Logging",
+ .mask = FIO_OPT_C_LOG,
+ },
+ {
+ .name = "Profiles",
+ .mask = FIO_OPT_C_PROFILE,
+ },
+ {
+ .name = NULL,
+ },
+};
+
+static struct opt_group *__opt_group_from_mask(struct opt_group *ogs, unsigned int *mask,
+ unsigned int inv_mask)
+{
+ struct opt_group *og;
+ int i;
+
+ if (*mask == inv_mask || !*mask)
+ return NULL;
+
+ for (i = 0; ogs[i].name; i++) {
+ og = &ogs[i];
+
+ if (*mask & og->mask) {
+ *mask &= ~(og->mask);
+ return og;
+ }
+ }
+
+ return NULL;
+}
+
+struct opt_group *opt_group_from_mask(unsigned int *mask)
+{
+ return __opt_group_from_mask(fio_opt_groups, mask, FIO_OPT_C_INVALID);
+}
+
+static struct opt_group fio_opt_cat_groups[] = {
+ {
+ .name = "Rate",
+ .mask = FIO_OPT_G_RATE,
+ },
+ {
+ .name = "Zone",
+ .mask = FIO_OPT_G_ZONE,
+ },
+ {
+ .name = "Read/write mix",
+ .mask = FIO_OPT_G_RWMIX,
+ },
+ {
+ .name = "Verify",
+ .mask = FIO_OPT_G_VERIFY,
+ },
+ {
+ .name = "Trim",
+ .mask = FIO_OPT_G_TRIM,
+ },
+ {
+ .name = "I/O Logging",
+ .mask = FIO_OPT_G_IOLOG,
+ },
+ {
+ .name = "I/O Depth",
+ .mask = FIO_OPT_G_IO_DEPTH,
+ },
+ {
+ .name = "I/O Flow",
+ .mask = FIO_OPT_G_IO_FLOW,
+ },
+ {
+ .name = "Description",
+ .mask = FIO_OPT_G_DESC,
+ },
+ {
+ .name = "Filename",
+ .mask = FIO_OPT_G_FILENAME,
+ },
+ {
+ .name = "General I/O",
+ .mask = FIO_OPT_G_IO_BASIC,
+ },
+ {
+ .name = "Cgroups",
+ .mask = FIO_OPT_G_CGROUP,
+ },
+ {
+ .name = "Runtime",
+ .mask = FIO_OPT_G_RUNTIME,
+ },
+ {
+ .name = "Process",
+ .mask = FIO_OPT_G_PROCESS,
+ },
+ {
+ .name = "Job credentials / priority",
+ .mask = FIO_OPT_G_CRED,
+ },
+ {
+ .name = "Clock settings",
+ .mask = FIO_OPT_G_CLOCK,
+ },
+ {
+ .name = "I/O Type",
+ .mask = FIO_OPT_G_IO_TYPE,
+ },
+ {
+ .name = "I/O Thinktime",
+ .mask = FIO_OPT_G_THINKTIME,
+ },
+ {
+ .name = "Randomizations",
+ .mask = FIO_OPT_G_RANDOM,
+ },
+ {
+ .name = "I/O buffers",
+ .mask = FIO_OPT_G_IO_BUF,
+ },
+ {
+ .name = "Tiobench profile",
+ .mask = FIO_OPT_G_TIOBENCH,
+ },
+
+ {
+ .name = NULL,
+ }
+};
+
+struct opt_group *opt_group_cat_from_mask(unsigned int *mask)
+{
+ return __opt_group_from_mask(fio_opt_cat_groups, mask, FIO_OPT_G_INVALID);
+}
+
/*
* Map of job/command line options
*/
-static struct fio_option options[FIO_MAX_OPTS] = {
+struct fio_option fio_options[FIO_MAX_OPTS] = {
{
.name = "description",
+ .lname = "Description of job",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(description),
.help = "Text job description",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_DESC,
},
{
.name = "name",
+ .lname = "Job name",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(name),
.help = "Name of this job",
- },
- {
- .name = "directory",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(directory),
- .cb = str_directory_cb,
- .help = "Directory to store files in",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_DESC,
},
{
.name = "filename",
+ .lname = "Filename(s)",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(filename),
.cb = str_filename_cb,
.prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
},
{
- .name = "kb_base",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(kb_base),
- .verify = kb_base_verify,
- .prio = 1,
- .def = "1024",
- .help = "How many bytes per KB for reporting (1000 or 1024)",
+ .name = "directory",
+ .lname = "Directory",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(directory),
+ .cb = str_directory_cb,
+ .help = "Directory to store files in",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
},
{
.name = "lockfile",
+ .lname = "Lockfile",
.type = FIO_OPT_STR,
- .cb = str_lockfile_cb,
.off1 = td_var_offset(file_lock_mode),
.help = "Lock file when doing IO to it",
.parent = "filename",
+ .hide = 0,
.def = "none",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
.posval = {
{ .ival = "none",
.oval = FILE_LOCK_NONE,
},
{
.name = "opendir",
+ .lname = "Open directory",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(opendir),
.cb = str_opendir_cb,
.help = "Recursively add files from this directory and down",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
},
{
.name = "rw",
+ .lname = "Read/write",
.alias = "readwrite",
.type = FIO_OPT_STR,
.cb = str_rw_cb,
.help = "IO direction",
.def = "read",
.verify = rw_verify,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "read",
.oval = TD_DDIR_READ,
},
{
.name = "rw_sequencer",
+ .lname = "RW Sequencer",
.type = FIO_OPT_STR,
.off1 = td_var_offset(rw_seq),
.help = "IO offset generator modifier",
.def = "sequential",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "sequential",
.oval = RW_SEQ_SEQ,
{
.name = "ioengine",
+ .lname = "IO Engine",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioengine),
.help = "IO engine to use",
.def = FIO_PREFERRED_ENGINE,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "sync",
.help = "Use read/write",
},
{
.name = "iodepth",
+ .lname = "IO Depth",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth),
.help = "Number of IO buffers to keep in flight",
.minval = 1,
+ .interval = 1,
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch",
+ .lname = "IO Depth batch",
.alias = "iodepth_batch_submit",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch),
.help = "Number of IO buffers to submit in one go",
.parent = "iodepth",
+ .hide = 1,
.minval = 1,
+ .interval = 1,
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch_complete",
+ .lname = "IO Depth batch complete",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch_complete),
.help = "Number of IO buffers to retrieve in one go",
.parent = "iodepth",
+ .hide = 1,
.minval = 0,
+ .interval = 1,
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_low",
+ .lname = "IO Depth batch low",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_low),
.help = "Low water mark for queuing depth",
.parent = "iodepth",
+ .hide = 1,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "size",
+ .lname = "Size",
.type = FIO_OPT_STR_VAL,
.cb = str_size_cb,
.help = "Total size of device or files",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fill_device",
+ .lname = "Fill device",
.alias = "fill_fs",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fill_device),
.help = "Write until an ENOSPC error occurs",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "filesize",
+ .lname = "File size",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(file_size_low),
.off2 = td_var_offset(file_size_high),
.minval = 1,
.help = "Size of individual files",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "offset",
+ .lname = "IO offset",
.alias = "fileoffset",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(start_offset),
.help = "Start IO from this offset",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "offset_increment",
+ .lname = "IO offset increment",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(offset_increment),
.help = "What is the increment from one offset to the next",
.parent = "offset",
+ .hide = 1,
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bs",
+ .lname = "Block size",
.alias = "blocksize",
.type = FIO_OPT_INT,
.off1 = td_var_offset(bs[DDIR_READ]),
.help = "Block size unit",
.def = "4k",
.parent = "rw",
+ .hide = 1,
+ .interval = 512,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "ba",
+ .lname = "Block size align",
.alias = "blockalign",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ba[DDIR_READ]),
.minval = 1,
.help = "IO block offset alignment",
.parent = "rw",
+ .hide = 1,
+ .interval = 512,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bsrange",
+ .lname = "Block size range",
.alias = "blocksize_range",
.type = FIO_OPT_RANGE,
.off1 = td_var_offset(min_bs[DDIR_READ]),
.minval = 1,
.help = "Set block size range (in more detail than bs)",
.parent = "rw",
+ .hide = 1,
+ .interval = 4096,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bssplit",
+ .lname = "Block size split",
.type = FIO_OPT_STR,
.cb = str_bssplit_cb,
.help = "Set a specific mix of block sizes",
.parent = "rw",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bs_unaligned",
+ .lname = "Block size unaligned",
.alias = "blocksize_unaligned",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(bs_unaligned),
.help = "Don't sector align IO buffer sizes",
.parent = "rw",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "randrepeat",
+ .lname = "Random repeatable",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(rand_repeatable),
.help = "Use repeatable random IO pattern",
.def = "1",
.parent = "rw",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "use_os_rand",
+ .lname = "Use OS random",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(use_os_rand),
.help = "Set to use OS random generator",
.def = "0",
.parent = "rw",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "norandommap",
+ .lname = "No randommap",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(norandommap),
.help = "Accept potential duplicate random blocks",
.parent = "rw",
+ .hide = 1,
+ .hide_on_set = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "softrandommap",
+ .lname = "Soft randommap",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(softrandommap),
.help = "Set norandommap if randommap allocation fails",
.parent = "norandommap",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "random_generator",
.help = "Variable length LFSR",
},
},
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "random_distribution",
.help = "Pareto distribution",
},
},
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "nrfiles",
+ .lname = "Number of files",
.alias = "nr_files",
.type = FIO_OPT_INT,
.off1 = td_var_offset(nr_files),
.help = "Split job workload between this number of files",
.def = "1",
+ .interval = 1,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "openfiles",
+ .lname = "Number of open files",
.type = FIO_OPT_INT,
.off1 = td_var_offset(open_files),
.help = "Number of files to keep open at the same time",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "file_service_type",
+ .lname = "File service type",
.type = FIO_OPT_STR,
.cb = str_fst_cb,
.off1 = td_var_offset(file_service_type),
.help = "How to select which file to service next",
.def = "roundrobin",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "random",
.oval = FIO_FSERVICE_RANDOM,
},
},
.parent = "nrfiles",
+ .hide = 1,
},
#ifdef CONFIG_POSIX_FALLOCATE
{
.name = "fallocate",
+ .lname = "Fallocate",
.type = FIO_OPT_STR,
.off1 = td_var_offset(fallocate_mode),
.help = "Whether pre-allocation is performed when laying out files",
.def = "posix",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "none",
.oval = FIO_FALLOCATE_NONE,
#endif /* CONFIG_POSIX_FALLOCATE */
{
.name = "fadvise_hint",
+ .lname = "Fadvise hint",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fadvise_hint),
.help = "Use fadvise() to advise the kernel on IO pattern",
.def = "1",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fsync",
+ .lname = "Fsync",
.type = FIO_OPT_INT,
.off1 = td_var_offset(fsync_blocks),
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
+ .interval = 1,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fdatasync",
+ .lname = "Fdatasync",
.type = FIO_OPT_INT,
.off1 = td_var_offset(fdatasync_blocks),
.help = "Issue fdatasync for writes every given number of blocks",
.def = "0",
+ .interval = 1,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_barrier",
+ .lname = "Write barrier",
.type = FIO_OPT_INT,
.off1 = td_var_offset(barrier_blocks),
.help = "Make every Nth write a barrier write",
.def = "0",
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef CONFIG_SYNC_FILE_RANGE
{
.name = "sync_file_range",
+ .lname = "Sync file range",
.posval = {
{ .ival = "wait_before",
.oval = SYNC_FILE_RANGE_WAIT_BEFORE,
.cb = str_sfr_cb,
.off1 = td_var_offset(sync_file_range),
.help = "Use sync_file_range()",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "direct",
+ .lname = "Direct I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(odirect),
.help = "Use O_DIRECT IO (negates buffered)",
.def = "0",
+ .inverse = "buffered",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "buffered",
+ .lname = "Buffered I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(odirect),
.neg = 1,
.help = "Use buffered IO (negates direct)",
.def = "1",
+ .inverse = "direct",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "overwrite",
+ .lname = "Overwrite",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(overwrite),
.help = "When writing, set whether to overwrite current data",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "loops",
+ .lname = "Loops",
.type = FIO_OPT_INT,
.off1 = td_var_offset(loops),
.help = "Number of times to run the job",
.def = "1",
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "numjobs",
+ .lname = "Number of jobs",
.type = FIO_OPT_INT,
.off1 = td_var_offset(numjobs),
.help = "Duplicate this job this many times",
.def = "1",
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "startdelay",
+ .lname = "Start delay",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(start_delay),
.help = "Only start job when this period has passed",
.def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "runtime",
+ .lname = "Runtime",
.alias = "timeout",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(timeout),
.help = "Stop workload when this amount of time has passed",
.def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "time_based",
+ .lname = "Time based",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "ramp_time",
+ .lname = "Ramp time",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(ramp_time),
.help = "Ramp up time before measuring performance",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "clocksource",
+ .lname = "Clock source",
.type = FIO_OPT_STR,
.cb = fio_clock_source_cb,
.off1 = td_var_offset(clocksource),
.help = "What type of timing source to use",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CLOCK,
.posval = {
#ifdef CONFIG_GETTIMEOFDAY
{ .ival = "gettimeofday",
{
.name = "mem",
.alias = "iomem",
+ .lname = "I/O Memory",
.type = FIO_OPT_STR,
.cb = str_mem_cb,
.off1 = td_var_offset(mem_type),
.help = "Backing type for IO buffers",
.def = "malloc",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "malloc",
.oval = MEM_MALLOC,
{
.name = "iomem_align",
.alias = "mem_align",
+ .lname = "I/O memory alignment",
.type = FIO_OPT_INT,
.off1 = td_var_offset(mem_align),
.minval = 0,
.help = "IO memory buffer offset alignment",
.def = "0",
.parent = "iomem",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "verify",
+ .lname = "Verify",
.type = FIO_OPT_STR,
.off1 = td_var_offset(verify),
.help = "Verify data written",
- .cb = str_verify_cb,
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
.posval = {
{ .ival = "0",
.oval = VERIFY_NONE,
},
{
.name = "do_verify",
+ .lname = "Perform verify step",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(do_verify),
.help = "Run verification stage after write",
.def = "1",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verifysort",
+ .lname = "Verify sort",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verifysort),
.help = "Sort written verify blocks for read back",
.def = "1",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verifysort_nr",
.maxval = 131072,
.def = "1024",
.parent = "verify",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_interval",
+ .lname = "Verify interval",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_interval),
.minval = 2 * sizeof(struct verify_header),
.help = "Store verify buffer header every N bytes",
.parent = "verify",
+ .hide = 1,
+ .interval = 2 * sizeof(struct verify_header),
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_offset",
+ .lname = "Verify offset",
.type = FIO_OPT_INT,
.help = "Offset verify header location by N bytes",
- .def = "0",
- .cb = str_verify_offset_cb,
+ .off1 = td_var_offset(verify_offset),
+ .minval = sizeof(struct verify_header),
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_pattern",
+ .lname = "Verify pattern",
.type = FIO_OPT_STR,
.cb = str_verify_pattern_cb,
.help = "Fill pattern for IO buffers",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_fatal",
+ .lname = "Verify fatal",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verify_fatal),
.def = "0",
.help = "Exit on a single verify failure, don't continue",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_dump",
+ .lname = "Verify dump",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verify_dump),
.def = "0",
.help = "Dump contents of good and bad blocks on failure",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_async",
+ .lname = "Verify asynchronously",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_async),
.def = "0",
.help = "Number of async verifier threads to use",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog",
+ .lname = "Verify backlog",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(verify_backlog),
.help = "Verify after this number of blocks are written",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog_batch",
+ .lname = "Verify backlog batch",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_batch),
.help = "Verify this number of IO blocks",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "verify_async_cpus",
+ .lname = "Async verify CPUs",
.type = FIO_OPT_STR,
.cb = str_verify_cpus_allowed_cb,
.help = "Set CPUs allowed for async verify threads",
.parent = "verify_async",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
#endif
{
.off1 = td_var_offset(experimental_verify),
.type = FIO_OPT_BOOL,
.help = "Enable experimental verification",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
#ifdef FIO_HAVE_TRIM
{
.name = "trim_percentage",
+ .lname = "Trim percentage",
.type = FIO_OPT_INT,
- .cb = str_verify_trim_cb,
+ .off1 = td_var_offset(trim_percentage),
+ .minval = 0,
.maxval = 100,
.help = "Number of verify blocks to discard/trim",
.parent = "verify",
.def = "0",
+ .interval = 1,
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_verify_zero",
- .type = FIO_OPT_INT,
+ .lname = "Verify trim zero",
+ .type = FIO_OPT_BOOL,
.help = "Verify that trim/discarded blocks are returned as zeroes",
.off1 = td_var_offset(trim_zero),
.parent = "trim_percentage",
+ .hide = 1,
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_backlog",
+ .lname = "Trim backlog",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(trim_backlog),
.help = "Trim after this number of blocks are written",
.parent = "trim_percentage",
+ .hide = 1,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_backlog_batch",
+ .lname = "Trim backlog batch",
.type = FIO_OPT_INT,
.off1 = td_var_offset(trim_batch),
.help = "Trim this number of IO blocks",
.parent = "trim_percentage",
+ .hide = 1,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
#endif
{
.name = "write_iolog",
+ .lname = "Write I/O log",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(write_iolog_file),
.help = "Store IO pattern to file",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "read_iolog",
+ .lname = "Read I/O log",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(read_iolog_file),
.help = "Playback IO pattern from file",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "replay_no_stall",
- .type = FIO_OPT_INT,
+ .lname = "Don't stall on replay",
+ .type = FIO_OPT_BOOL,
.off1 = td_var_offset(no_stall),
.def = "0",
.parent = "read_iolog",
+ .hide = 1,
.help = "Playback IO pattern file as fast as possible without stalls",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "replay_redirect",
+ .lname = "Redirect device for replay",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(replay_redirect),
.parent = "read_iolog",
+ .hide = 1,
.help = "Replay all I/O onto this device, regardless of trace device",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "exec_prerun",
+ .lname = "Pre-execute runnable",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_prerun),
.help = "Execute this file prior to running job",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "exec_postrun",
+ .lname = "Post-execute runnable",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_postrun),
.help = "Execute this file after running job",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_IOSCHED_SWITCH
{
.name = "ioscheduler",
+ .lname = "I/O scheduler",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioscheduler),
.help = "Use this IO scheduler on the backing device",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "zonesize",
+ .lname = "Zone size",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_size),
.help = "Amount of data to read per zone",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "zonerange",
+ .lname = "Zone range",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_range),
.help = "Give size of an IO zone",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "zoneskip",
+ .lname = "Zone skip",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_skip),
.help = "Space between IO zones",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "lockmem",
+ .lname = "Lock memory",
.type = FIO_OPT_STR_VAL,
- .cb = str_lockmem_cb,
+ .off1 = td_var_offset(lockmem),
.help = "Lock down this amount of memory",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "rwmixread",
+ .lname = "Read/write mix read",
.type = FIO_OPT_INT,
.cb = str_rwmix_read_cb,
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
+ .interval = 5,
+ .inverse = "rwmixwrite",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "rwmixwrite",
+ .lname = "Read/write mix write",
.type = FIO_OPT_INT,
.cb = str_rwmix_write_cb,
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
+ .interval = 5,
+ .inverse = "rwmixread",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "rwmixcycle",
+ .lname = "Read/write mix cycle",
.type = FIO_OPT_DEPRECATED,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "nice",
+ .lname = "Nice",
.type = FIO_OPT_INT,
.off1 = td_var_offset(nice),
.help = "Set job CPU nice value",
.minval = -19,
.maxval = 20,
.def = "0",
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#ifdef FIO_HAVE_IOPRIO
{
.name = "prio",
+ .lname = "I/O nice priority",
.type = FIO_OPT_INT,
- .cb = str_prio_cb,
+ .off1 = td_var_offset(ioprio),
.help = "Set job IO priority value",
.minval = 0,
.maxval = 7,
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "prioclass",
+ .lname = "I/O nice priority class",
.type = FIO_OPT_INT,
- .cb = str_prioclass_cb,
+ .off1 = td_var_offset(ioprio_class),
.help = "Set job IO priority class",
.minval = 0,
.maxval = 3,
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#endif
{
.name = "thinktime",
+ .lname = "Thinktime",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime),
.help = "Idle time between IO buffers (usec)",
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "thinktime_spin",
+ .lname = "Thinktime spin",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime_spin),
.help = "Start think time by spinning this amount (usec)",
.def = "0",
.parent = "thinktime",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "thinktime_blocks",
+ .lname = "Thinktime blocks",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime_blocks),
.help = "IO buffer period between 'thinktime'",
.def = "1",
.parent = "thinktime",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "rate",
+ .lname = "I/O rate",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate[DDIR_READ]),
.off2 = td_var_offset(rate[DDIR_WRITE]),
.off3 = td_var_offset(rate[DDIR_TRIM]),
.help = "Set bandwidth rate",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "ratemin",
+ .lname = "I/O min rate",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratemin[DDIR_READ]),
.off2 = td_var_offset(ratemin[DDIR_WRITE]),
.off3 = td_var_offset(ratemin[DDIR_TRIM]),
.help = "Job must meet this rate or it will be shutdown",
.parent = "rate",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "rate_iops",
+ .lname = "I/O rate IOPS",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate_iops[DDIR_READ]),
.off2 = td_var_offset(rate_iops[DDIR_WRITE]),
.off3 = td_var_offset(rate_iops[DDIR_TRIM]),
.help = "Limit IO used to this number of IO operations/sec",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "rate_iops_min",
+ .lname = "I/O min rate IOPS",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate_iops_min[DDIR_READ]),
.off2 = td_var_offset(rate_iops_min[DDIR_WRITE]),
.off3 = td_var_offset(rate_iops_min[DDIR_TRIM]),
.help = "Job must meet this rate or it will be shut down",
.parent = "rate_iops",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "ratecycle",
+ .lname = "I/O rate cycle",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratecycle),
.help = "Window average for rate limits (msec)",
.def = "1000",
.parent = "rate",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "max_latency",
.type = FIO_OPT_INT,
.off1 = td_var_offset(max_latency),
.help = "Maximum tolerated IO latency (usec)",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "invalidate",
+ .lname = "Cache invalidate",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(invalidate_cache),
.help = "Invalidate buffer/page cache prior to running job",
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "sync",
+ .lname = "Synchronous I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(sync_io),
.help = "Use O_SYNC for buffered writes",
.def = "0",
.parent = "buffered",
- },
- {
- .name = "bwavgtime",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(bw_avg_time),
- .help = "Time window over which to calculate bandwidth"
- " (msec)",
- .def = "500",
- .parent = "write_bw_log",
- },
- {
- .name = "iopsavgtime",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(iops_avg_time),
- .help = "Time window over which to calculate IOPS (msec)",
- .def = "500",
- .parent = "write_iops_log",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "create_serialize",
+ .lname = "Create serialize",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_serialize),
.help = "Serialize creating of job files",
.def = "1",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_fsync",
+ .lname = "Create fsync",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_fsync),
.help = "fsync file after creation",
.def = "1",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_on_open",
+ .lname = "Create on open",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_on_open),
.help = "Create files when they are opened for IO",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_only",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_only),
.help = "Only perform file creation phase",
+ .category = FIO_OPT_C_FILE,
.def = "0",
},
{
.name = "pre_read",
+ .lname = "Pre-read files",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(pre_read),
.help = "Pre-read files before starting official testing",
.def = "0",
- },
- {
- .name = "cpuload",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(cpuload),
- .help = "Use this percentage of CPU",
- },
- {
- .name = "cpuchunks",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(cpucycle),
- .help = "Length of the CPU burn cycles (usecs)",
- .def = "50000",
- .parent = "cpuload",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "cpumask",
+ .lname = "CPU mask",
.type = FIO_OPT_INT,
.cb = str_cpumask_cb,
.help = "CPU affinity mask",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "cpus_allowed",
+ .lname = "CPUs allowed",
.type = FIO_OPT_STR,
.cb = str_cpus_allowed_cb,
.help = "Set CPUs allowed",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#endif
#ifdef CONFIG_LIBNUMA
#endif
{
.name = "end_fsync",
+ .lname = "End fsync",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(end_fsync),
.help = "Include fsync at the end of job",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fsync_on_close",
+ .lname = "Fsync on close",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fsync_on_close),
.help = "fsync files on close",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "unlink",
+ .lname = "Unlink file",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(unlink),
.help = "Unlink created files after job has completed",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "exitall",
+ .lname = "Exit-all on terminate",
.type = FIO_OPT_STR_SET,
.cb = str_exitall_cb,
.help = "Terminate all jobs when one exits",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "stonewall",
+ .lname = "Wait for previous",
.alias = "wait_for_previous",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(stonewall),
.help = "Insert a hard barrier between this job and previous",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "new_group",
+ .lname = "New group",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(new_group),
.help = "Mark the start of a new group (for reporting)",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "thread",
+ .lname = "Thread",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(use_thread),
- .help = "Use threads instead of forks",
+ .help = "Use threads instead of processes",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "write_bw_log",
- .type = FIO_OPT_STR,
- .off1 = td_var_offset(write_bw_log),
- .cb = str_write_bw_log_cb,
+ .lname = "Write bandwidth log",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(bw_log_file),
.help = "Write log of bandwidth during run",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_lat_log",
- .type = FIO_OPT_STR,
- .off1 = td_var_offset(write_lat_log),
- .cb = str_write_lat_log_cb,
+ .lname = "Write latency log",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(lat_log_file),
.help = "Write log of latency during run",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_iops_log",
+ .lname = "Write IOPS log",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(write_iops_log),
- .cb = str_write_iops_log_cb,
+ .off1 = td_var_offset(iops_log_file),
.help = "Write log of IOPS during run",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "log_avg_msec",
+ .lname = "Log averaging (msec)",
.type = FIO_OPT_INT,
.off1 = td_var_offset(log_avg_msec),
.help = "Average bw/iops/lat logs over this period of time",
.def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
- .name = "hugepage-size",
+ .name = "bwavgtime",
+ .lname = "Bandwidth average time",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(hugepage_size),
- .help = "When using hugepages, specify size of each page",
- .def = __fio_stringify(FIO_HUGE_PAGE),
+ .off1 = td_var_offset(bw_avg_time),
+ .help = "Time window over which to calculate bandwidth"
+ " (msec)",
+ .def = "500",
+ .parent = "write_bw_log",
+ .hide = 1,
+ .interval = 100,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "iopsavgtime",
+ .lname = "IOPS average time",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iops_avg_time),
+ .help = "Time window over which to calculate IOPS (msec)",
+ .def = "500",
+ .parent = "write_iops_log",
+ .hide = 1,
+ .interval = 100,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "group_reporting",
- .type = FIO_OPT_STR_SET,
+ .lname = "Group reporting",
+ .type = FIO_OPT_BOOL,
.off1 = td_var_offset(group_reporting),
.help = "Do reporting on a per-group basis",
+ .def = "1",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "zero_buffers",
+ .lname = "Zero I/O buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(zero_buffers),
.help = "Init IO buffers to all zeroes",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "refill_buffers",
+ .lname = "Refill I/O buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(refill_buffers),
.help = "Refill IO buffers on every IO submit",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "scramble_buffers",
+ .lname = "Scramble I/O buffers",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(scramble_buffers),
.help = "Slightly scramble buffers on every IO submit",
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "buffer_compress_percentage",
+ .lname = "Buffer compression percentage",
.type = FIO_OPT_INT,
.off1 = td_var_offset(compress_percentage),
.maxval = 100,
.minval = 1,
.help = "How compressible the buffer is (approximately)",
+ .interval = 5,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "buffer_compress_chunk",
+ .lname = "Buffer compression chunk size",
.type = FIO_OPT_INT,
.off1 = td_var_offset(compress_chunk),
.parent = "buffer_compress_percentage",
+ .hide = 1,
.help = "Size of compressible region in buffer",
+ .interval = 256,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "clat_percentiles",
+ .lname = "Completion latency percentiles",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(clat_percentiles),
.help = "Enable the reporting of completion latency percentiles",
.def = "1",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "percentile_list",
+ .lname = "Completion latency percentile list",
.type = FIO_OPT_FLOAT_LIST,
.off1 = td_var_offset(percentile_list),
.off2 = td_var_offset(percentile_precision),
.maxlen = FIO_IO_U_LIST_MAX_LEN,
.minfp = 0.0,
.maxfp = 100.0,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_DISK_UTIL
{
.name = "disk_util",
+ .lname = "Disk utilization",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(do_disk_util),
.help = "Log disk utilization statistics",
.def = "1",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "gtod_reduce",
+ .lname = "Reduce gettimeofday() calls",
.type = FIO_OPT_BOOL,
.help = "Greatly reduce number of gettimeofday() calls",
.cb = str_gtod_reduce_cb,
.def = "0",
+ .hide_on_set = 1,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_lat",
+ .lname = "Disable all latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_lat),
.help = "Disable latency numbers",
.parent = "gtod_reduce",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_clat",
+ .lname = "Disable completion latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_clat),
.help = "Disable completion latency numbers",
.parent = "gtod_reduce",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_slat",
+ .lname = "Disable submission latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_slat),
.help = "Disable submission latency numbers",
.parent = "gtod_reduce",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_bw_measurement",
+ .lname = "Disable bandwidth stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_bw),
.help = "Disable bandwidth logging",
.parent = "gtod_reduce",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "gtod_cpu",
+ .lname = "Dedicated gettimeofday() CPU",
.type = FIO_OPT_INT,
.cb = str_gtod_cpu_cb,
.help = "Set up dedicated gettimeofday() thread on this CPU",
.verify = gtod_cpu_verify,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CLOCK,
},
{
.name = "unified_rw_reporting",
.off1 = td_var_offset(unified_rw_rep),
.help = "Unify reporting across data direction",
.def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "continue_on_error",
+ .lname = "Continue on error",
.type = FIO_OPT_STR,
.off1 = td_var_offset(continue_on_error),
.help = "Continue on non-fatal errors during IO",
.def = "none",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_ERR,
.posval = {
{ .ival = "none",
.oval = ERROR_TYPE_NONE,
.cb = str_ignore_error_cb,
.help = "Set a specific list of errors to ignore",
.parent = "rw",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_ERR,
},
{
.name = "error_dump",
.off1 = td_var_offset(error_dump),
.def = "0",
.help = "Dump info on each error",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_ERR,
},
-
{
.name = "profile",
+ .lname = "Profile",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(profile),
.help = "Select a specific builtin performance test",
+ .category = FIO_OPT_C_PROFILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "cgroup",
+ .lname = "Cgroup",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(cgroup),
.help = "Add job to cgroup of this name",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
+ },
+ {
+ .name = "cgroup_nodelete",
+ .lname = "Cgroup no-delete",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(cgroup_nodelete),
+ .help = "Do not delete cgroups after job completion",
+ .def = "0",
+ .parent = "cgroup",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
},
{
.name = "cgroup_weight",
+ .lname = "Cgroup weight",
.type = FIO_OPT_INT,
.off1 = td_var_offset(cgroup_weight),
.help = "Use given weight for cgroup",
.minval = 100,
.maxval = 1000,
- },
- {
- .name = "cgroup_nodelete",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(cgroup_nodelete),
- .help = "Do not delete cgroups after job completion",
- .def = "0",
+ .parent = "cgroup",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
},
{
.name = "uid",
+ .lname = "User ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(uid),
.help = "Run job with this user ID",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "gid",
+ .lname = "Group ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(gid),
.help = "Run job with this group ID",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
+ },
+ {
+ .name = "kb_base",
+ .lname = "KB Base",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(kb_base),
+ .verify = kb_base_verify,
+ .prio = 1,
+ .def = "1024",
+ .help = "How many bytes per KB for reporting (1000 or 1024)",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "hugepage-size",
+ .lname = "Hugepage size",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(hugepage_size),
+ .help = "When using hugepages, specify size of each page",
+ .def = __fio_stringify(FIO_HUGE_PAGE),
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "flow_id",
+ .lname = "I/O flow ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_id),
.help = "The flow index ID to use",
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow",
+ .lname = "I/O flow weight",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow),
.help = "Weight for flow control of this job",
.parent = "flow_id",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow_watermark",
+ .lname = "I/O flow watermark",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_watermark),
.help = "High watermark for flow control. This option"
" should be set to the same value for all threads"
" with non-zero flow.",
.parent = "flow_id",
+ .hide = 1,
.def = "1024",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow_sleep",
+ .lname = "I/O flow sleep",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_sleep),
.help = "How many microseconds to sleep after being held"
" back by the flow control mechanism",
.parent = "flow_id",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = NULL,
{
unsigned int i;
- options_init(options);
+ options_init(fio_options);
i = 0;
while (long_options[i].name)
i++;
- options_to_lopts(options, long_options, i, FIO_GETOPT_JOB);
+ options_to_lopts(fio_options, long_options, i, FIO_GETOPT_JOB);
}
struct fio_keyword {
sprintf(buf, "echo '%s' | %s", tmp, BC_APP);
f = popen(buf, "r");
- if (!f) {
+ if (!f)
return NULL;
- }
ret = fread(&buf[tmp - str], 1, 128 - (tmp - str), f);
- if (ret <= 0) {
+ if (ret <= 0)
return NULL;
- }
pclose(f);
buf[(tmp - str) + ret - 1] = '\0';
int i, ret, unknown;
char **opts_copy;
- sort_options(opts, options, num_opts);
+ sort_options(opts, fio_options, num_opts);
opts_copy = dup_and_sub_options(opts, num_opts);
for (ret = 0, i = 0, unknown = 0; i < num_opts; i++) {
struct fio_option *o;
- int newret = parse_option(opts_copy[i], opts[i], options, &o,
- td);
+ int newret = parse_option(opts_copy[i], opts[i], fio_options,
+ &o, td);
if (opts_copy[i]) {
if (newret && !o) {
int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)
{
- return parse_cmd_option(opt, val, options, td);
+ return parse_cmd_option(opt, val, fio_options, td);
}
int fio_cmd_ioengine_option_parse(struct thread_data *td, const char *opt,
void fio_fill_default_options(struct thread_data *td)
{
- fill_default_options(td, options);
+ fill_default_options(td, fio_options);
}
int fio_show_option_help(const char *opt)
{
- return show_cmd_help(options, opt);
+ return show_cmd_help(fio_options, opt);
}
void options_mem_dupe(void *data, struct fio_option *options)
*/
void fio_options_mem_dupe(struct thread_data *td)
{
- options_mem_dupe(&td->o, options);
+ options_mem_dupe(&td->o, fio_options);
if (td->eo && td->io_ops) {
void *oldeo = td->eo;
unsigned int fio_get_kb_base(void *data)
{
- struct thread_data *td = data;
+ struct thread_options *o = data;
unsigned int kb_base = 0;
- if (td)
- kb_base = td->o.kb_base;
+ if (o)
+ kb_base = o->kb_base;
if (!kb_base)
kb_base = 1024;
struct fio_option *__o;
int opt_index = 0;
- __o = options;
+ __o = fio_options;
while (__o->name) {
opt_index++;
__o++;
}
- memcpy(&options[opt_index], o, sizeof(*o));
+ memcpy(&fio_options[opt_index], o, sizeof(*o));
return 0;
}
{
struct fio_option *o;
- o = options;
+ o = fio_options;
while (o->name) {
if (o->prof_name && !strcmp(o->prof_name, prof_name)) {
o->type = FIO_OPT_INVALID;
struct fio_option *o;
unsigned int i;
- o = find_option(options, optname);
+ o = find_option(fio_options, optname);
if (!o)
return;
struct fio_option *o;
unsigned int i;
- o = find_option(options, optname);
+ o = find_option(fio_options, optname);
if (!o)
return;
void fio_options_free(struct thread_data *td)
{
- options_free(options, td);
+ options_free(fio_options, td);
if (td->eo && td->io_ops && td->io_ops->options) {
options_free(td->io_ops->options, td->eo);
free(td->eo);
td->eo = NULL;
}
}
+
+struct fio_option *fio_option_find(const char *name)
+{
+ return find_option(fio_options, name);
+}
+
--- /dev/null
- unsigned int lockfile_batch;
+#ifndef FIO_THREAD_OPTIONS_H
+#define FIO_THREAD_OPTIONS_H
+
+#include "arch/arch.h"
+#include "os/os.h"
+#include "stat.h"
+#include "gettime.h"
+
+/*
+ * What type of allocation to use for io buffers
+ */
+enum fio_memtype {
+ MEM_MALLOC = 0, /* ordinary malloc */
+ MEM_SHM, /* use shared memory segments */
+ MEM_SHMHUGE, /* use shared memory segments with huge pages */
+ MEM_MMAP, /* use anonynomous mmap */
+ MEM_MMAPHUGE, /* memory mapped huge file */
+};
+
+/*
+ * What type of errors to continue on when continue_on_error is used
+ */
+enum error_type_bit {
+ ERROR_TYPE_READ_BIT = 0,
+ ERROR_TYPE_WRITE_BIT = 1,
+ ERROR_TYPE_VERIFY_BIT = 2,
+ ERROR_TYPE_CNT = 3,
+};
+
+#define ERROR_STR_MAX 128
+
+enum error_type {
+ ERROR_TYPE_NONE = 0,
+ ERROR_TYPE_READ = 1 << ERROR_TYPE_READ_BIT,
+ ERROR_TYPE_WRITE = 1 << ERROR_TYPE_WRITE_BIT,
+ ERROR_TYPE_VERIFY = 1 << ERROR_TYPE_VERIFY_BIT,
+ ERROR_TYPE_ANY = 0xffff,
+};
+
+#define BSSPLIT_MAX 64
+
+struct bssplit {
+ uint32_t bs;
+ uint32_t perc;
+};
+
+struct thread_options {
+ int pad;
+ char *description;
+ char *name;
+ char *directory;
+ char *filename;
+ char *opendir;
+ char *ioengine;
+ char *mmapfile;
+ enum td_ddir td_ddir;
+ unsigned int rw_seq;
+ unsigned int kb_base;
+ unsigned int ddir_seq_nr;
+ long ddir_seq_add;
+ unsigned int iodepth;
+ unsigned int iodepth_low;
+ unsigned int iodepth_batch;
+ unsigned int iodepth_batch_complete;
+
+ unsigned long long size;
+ unsigned int size_percent;
+ unsigned int fill_device;
+ unsigned long long file_size_low;
+ unsigned long long file_size_high;
+ unsigned long long start_offset;
+
+ unsigned int bs[DDIR_RWDIR_CNT];
+ unsigned int ba[DDIR_RWDIR_CNT];
+ unsigned int min_bs[DDIR_RWDIR_CNT];
+ unsigned int max_bs[DDIR_RWDIR_CNT];
+ struct bssplit *bssplit[DDIR_RWDIR_CNT];
+ unsigned int bssplit_nr[DDIR_RWDIR_CNT];
+
+ int *ignore_error[ERROR_TYPE_CNT];
+ unsigned int ignore_error_nr[ERROR_TYPE_CNT];
+ unsigned int error_dump;
+
+ unsigned int nr_files;
+ unsigned int open_files;
+ enum file_lock_mode file_lock_mode;
- uint32_t lockfile_batch;
+
+ unsigned int odirect;
+ unsigned int invalidate_cache;
+ unsigned int create_serialize;
+ unsigned int create_fsync;
+ unsigned int create_on_open;
+ unsigned int create_only;
+ unsigned int end_fsync;
+ unsigned int pre_read;
+ unsigned int sync_io;
+ unsigned int verify;
+ unsigned int do_verify;
+ unsigned int verifysort;
+ unsigned int verifysort_nr;
+ unsigned int verify_interval;
+ unsigned int verify_offset;
+ char verify_pattern[MAX_PATTERN_SIZE];
+ unsigned int verify_pattern_bytes;
+ unsigned int verify_fatal;
+ unsigned int verify_dump;
+ unsigned int verify_async;
+ unsigned long long verify_backlog;
+ unsigned int verify_batch;
+ unsigned int experimental_verify;
+ unsigned int use_thread;
+ unsigned int unlink;
+ unsigned int do_disk_util;
+ unsigned int override_sync;
+ unsigned int rand_repeatable;
+ unsigned int use_os_rand;
+ unsigned int log_avg_msec;
+ unsigned int norandommap;
+ unsigned int softrandommap;
+ unsigned int bs_unaligned;
+ unsigned int fsync_on_close;
+
+ unsigned int random_distribution;
+ fio_fp64_t zipf_theta;
+ fio_fp64_t pareto_h;
+
+ unsigned int random_generator;
+
+ unsigned int hugepage_size;
+ unsigned int rw_min_bs;
+ unsigned int thinktime;
+ unsigned int thinktime_spin;
+ unsigned int thinktime_blocks;
+ unsigned int fsync_blocks;
+ unsigned int fdatasync_blocks;
+ unsigned int barrier_blocks;
+ unsigned long long start_delay;
+ unsigned long long timeout;
+ unsigned long long ramp_time;
+ unsigned int overwrite;
+ unsigned int bw_avg_time;
+ unsigned int iops_avg_time;
+ unsigned int loops;
+ unsigned long long zone_range;
+ unsigned long long zone_size;
+ unsigned long long zone_skip;
+ unsigned long long lockmem;
+ enum fio_memtype mem_type;
+ unsigned int mem_align;
+
+ unsigned max_latency;
+
+ unsigned int stonewall;
+ unsigned int new_group;
+ unsigned int numjobs;
+ os_cpu_mask_t cpumask;
+ unsigned int cpumask_set;
+ os_cpu_mask_t verify_cpumask;
+ unsigned int verify_cpumask_set;
+#ifdef CONFIG_LIBNUMA
+ struct bitmask *numa_cpunodesmask;
+ unsigned int numa_cpumask_set;
+ unsigned short numa_mem_mode;
+ unsigned int numa_mem_prefer_node;
+ struct bitmask *numa_memnodesmask;
+ unsigned int numa_memmask_set;
+#endif
+ unsigned int iolog;
+ unsigned int rwmixcycle;
+ unsigned int rwmix[2];
+ unsigned int nice;
+ unsigned int ioprio;
+ unsigned int ioprio_class;
+ unsigned int file_service_type;
+ unsigned int group_reporting;
+ unsigned int fadvise_hint;
+ enum fio_fallocate_mode fallocate_mode;
+ unsigned int zero_buffers;
+ unsigned int refill_buffers;
+ unsigned int scramble_buffers;
+ unsigned int compress_percentage;
+ unsigned int compress_chunk;
+ unsigned int time_based;
+ unsigned int disable_lat;
+ unsigned int disable_clat;
+ unsigned int disable_slat;
+ unsigned int disable_bw;
+ unsigned int unified_rw_rep;
+ unsigned int gtod_reduce;
+ unsigned int gtod_cpu;
+ unsigned int gtod_offload;
+ enum fio_cs clocksource;
+ unsigned int no_stall;
+ unsigned int trim_percentage;
+ unsigned int trim_batch;
+ unsigned int trim_zero;
+ unsigned long long trim_backlog;
+ unsigned int clat_percentiles;
+ unsigned int percentile_precision; /* digits after decimal for percentiles */
+ fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
+
+ char *read_iolog_file;
+ char *write_iolog_file;
+ char *bw_log_file;
+ char *lat_log_file;
+ char *iops_log_file;
+ char *replay_redirect;
+
+ /*
+ * Pre-run and post-run shell
+ */
+ char *exec_prerun;
+ char *exec_postrun;
+
+ unsigned int rate[DDIR_RWDIR_CNT];
+ unsigned int ratemin[DDIR_RWDIR_CNT];
+ unsigned int ratecycle;
+ unsigned int rate_iops[DDIR_RWDIR_CNT];
+ unsigned int rate_iops_min[DDIR_RWDIR_CNT];
+
+ char *ioscheduler;
+
+ /*
+ * I/O Error handling
+ */
+ enum error_type continue_on_error;
+
+ /*
+ * Benchmark profile type
+ */
+ char *profile;
+
+ /*
+ * blkio cgroup support
+ */
+ char *cgroup;
+ unsigned int cgroup_weight;
+ unsigned int cgroup_nodelete;
+
+ unsigned int uid;
+ unsigned int gid;
+
+ int flow_id;
+ int flow;
+ int flow_watermark;
+ unsigned int flow_sleep;
+
+ unsigned long long offset_increment;
+
+ unsigned int sync_file_range;
+};
+
+#define FIO_TOP_STR_MAX 256
+
+struct thread_options_pack {
+ uint8_t description[FIO_TOP_STR_MAX];
+ uint8_t name[FIO_TOP_STR_MAX];
+ uint8_t directory[FIO_TOP_STR_MAX];
+ uint8_t filename[FIO_TOP_STR_MAX];
+ uint8_t opendir[FIO_TOP_STR_MAX];
+ uint8_t ioengine[FIO_TOP_STR_MAX];
+ uint8_t mmapfile[FIO_TOP_STR_MAX];
+ uint32_t td_ddir;
+ uint32_t rw_seq;
+ uint32_t kb_base;
+ uint32_t ddir_seq_nr;
+ uint64_t ddir_seq_add;
+ uint32_t iodepth;
+ uint32_t iodepth_low;
+ uint32_t iodepth_batch;
+ uint32_t iodepth_batch_complete;
+
+ uint64_t size;
+ uint32_t size_percent;
+ uint32_t fill_device;
+ uint64_t file_size_low;
+ uint64_t file_size_high;
+ uint64_t start_offset;
+
+ uint32_t bs[DDIR_RWDIR_CNT];
+ uint32_t ba[DDIR_RWDIR_CNT];
+ uint32_t min_bs[DDIR_RWDIR_CNT];
+ uint32_t max_bs[DDIR_RWDIR_CNT];
+ struct bssplit bssplit[DDIR_RWDIR_CNT][BSSPLIT_MAX];
+ uint32_t bssplit_nr[DDIR_RWDIR_CNT];
+
+ uint32_t ignore_error[ERROR_TYPE_CNT][ERROR_STR_MAX];
+ uint32_t ignore_error_nr[ERROR_TYPE_CNT];
+ uint32_t error_dump;
+
+ uint32_t nr_files;
+ uint32_t open_files;
+ uint32_t file_lock_mode;
+
+ uint32_t odirect;
+ uint32_t invalidate_cache;
+ uint32_t create_serialize;
+ uint32_t create_fsync;
+ uint32_t create_on_open;
+ uint32_t create_only;
+ uint32_t end_fsync;
+ uint32_t pre_read;
+ uint32_t sync_io;
+ uint32_t verify;
+ uint32_t do_verify;
+ uint32_t verifysort;
+ uint32_t verifysort_nr;
+ uint32_t verify_interval;
+ uint32_t verify_offset;
+ uint8_t verify_pattern[MAX_PATTERN_SIZE];
+ uint32_t verify_pattern_bytes;
+ uint32_t verify_fatal;
+ uint32_t verify_dump;
+ uint32_t verify_async;
+ uint64_t verify_backlog;
+ uint32_t verify_batch;
+ uint32_t experimental_verify;
+ uint32_t use_thread;
+ uint32_t unlink;
+ uint32_t do_disk_util;
+ uint32_t override_sync;
+ uint32_t rand_repeatable;
+ uint32_t use_os_rand;
+ uint32_t log_avg_msec;
+ uint32_t norandommap;
+ uint32_t softrandommap;
+ uint32_t bs_unaligned;
+ uint32_t fsync_on_close;
+
+ uint32_t random_distribution;
+ fio_fp64_t zipf_theta;
+ fio_fp64_t pareto_h;
+
+ uint32_t random_generator;
+
+ uint32_t hugepage_size;
+ uint32_t rw_min_bs;
+ uint32_t thinktime;
+ uint32_t thinktime_spin;
+ uint32_t thinktime_blocks;
+ uint32_t fsync_blocks;
+ uint32_t fdatasync_blocks;
+ uint32_t barrier_blocks;
+ uint64_t start_delay;
+ uint64_t timeout;
+ uint64_t ramp_time;
+ uint32_t overwrite;
+ uint32_t bw_avg_time;
+ uint32_t iops_avg_time;
+ uint32_t loops;
+ uint64_t zone_range;
+ uint64_t zone_size;
+ uint64_t zone_skip;
+ uint64_t lockmem;
+ uint32_t mem_type;
+ uint32_t mem_align;
+
+ uint32_t max_latency;
+
+ uint32_t stonewall;
+ uint32_t new_group;
+ uint32_t numjobs;
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint32_t cpumask_set;
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+ uint32_t verify_cpumask_set;
+ uint32_t iolog;
+ uint32_t rwmixcycle;
+ uint32_t rwmix[2];
+ uint32_t nice;
+ uint32_t ioprio;
+ uint32_t ioprio_class;
+ uint32_t file_service_type;
+ uint32_t group_reporting;
+ uint32_t fadvise_hint;
+ uint32_t fallocate_mode;
+ uint32_t zero_buffers;
+ uint32_t refill_buffers;
+ uint32_t scramble_buffers;
+ unsigned int compress_percentage;
+ unsigned int compress_chunk;
+ uint32_t time_based;
+ uint32_t disable_lat;
+ uint32_t disable_clat;
+ uint32_t disable_slat;
+ uint32_t disable_bw;
+ uint32_t unified_rw_rep;
+ uint32_t gtod_reduce;
+ uint32_t gtod_cpu;
+ uint32_t gtod_offload;
+ uint32_t clocksource;
+ uint32_t no_stall;
+ uint32_t trim_percentage;
+ uint32_t trim_batch;
+ uint32_t trim_zero;
+ uint64_t trim_backlog;
+ uint32_t clat_percentiles;
+ uint32_t percentile_precision;
+ fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
+
+ uint8_t read_iolog_file[FIO_TOP_STR_MAX];
+ uint8_t write_iolog_file[FIO_TOP_STR_MAX];
+ uint8_t bw_log_file[FIO_TOP_STR_MAX];
+ uint8_t lat_log_file[FIO_TOP_STR_MAX];
+ uint8_t iops_log_file[FIO_TOP_STR_MAX];
+ uint8_t replay_redirect[FIO_TOP_STR_MAX];
+
+ /*
+ * Pre-run and post-run shell
+ */
+ uint8_t exec_prerun[FIO_TOP_STR_MAX];
+ uint8_t exec_postrun[FIO_TOP_STR_MAX];
+
+ uint32_t rate[DDIR_RWDIR_CNT];
+ uint32_t ratemin[DDIR_RWDIR_CNT];
+ uint32_t ratecycle;
+ uint32_t rate_iops[DDIR_RWDIR_CNT];
+ uint32_t rate_iops_min[DDIR_RWDIR_CNT];
+
+ uint8_t ioscheduler[FIO_TOP_STR_MAX];
+
+ /*
+ * I/O Error handling
+ */
+ uint32_t continue_on_error;
+
+ /*
+ * Benchmark profile type
+ */
+ uint8_t profile[FIO_TOP_STR_MAX];
+
+ /*
+ * blkio cgroup support
+ */
+ uint8_t cgroup[FIO_TOP_STR_MAX];
+ uint32_t cgroup_weight;
+ uint32_t cgroup_nodelete;
+
+ uint32_t uid;
+ uint32_t gid;
+
+ int32_t flow_id;
+ int32_t flow;
+ int32_t flow_watermark;
+ uint32_t flow_sleep;
+
+ uint64_t offset_increment;
+
+ uint32_t sync_file_range;
+} __attribute__((packed));
+
+extern void convert_thread_options_to_cpu(struct thread_options *o, struct thread_options_pack *top);
+extern void convert_thread_options_to_net(struct thread_options_pack *top, struct thread_options *);
+extern int fio_test_cconv(struct thread_options *);
+extern void options_default_fill(struct thread_options *o);
+
+#endif