stdin or stdout. Which of the two depends on the read/write
direction set.
+ filename_format=str
+ If sharing multiple files between jobs, it is usually necessary
+ to have fio generate the exact names that you want. By default,
+ fio will name a file based on the default file format
+ specification of jobname.jobnumber.filenumber. With this
+ option, that can be customized. Fio will recognize and replace
+ the following keywords in this string:
+
+ $jobname
+ The name of the worker thread or process.
+
+ $jobnum
+ The incremental number of the worker thread or
+ process.
+
+ $filenum
+ The incremental number of the file for that worker
+ thread or process.
+
+ To have dependent jobs share a set of files, this option can
+ be set to have fio generate filenames that are shared between
+ the two. For instance, if testfiles.$filenum is specified,
+ file number 4 for any job will be named testfiles.4. The
+ default of $jobname.$jobnum.$filenum will be used if
+ no other format specifier is given.
+
opendir=str Tell fio to recursively add any file it can find in this
directory and down the file system tree.
fill_device=bool
fill_fs=bool Sets size to something really large and waits for ENOSPC (no
space left on device) as the terminating condition. Only makes
- sense with sequential write. For a read workload, the mount
+ sense with sequential write. For a read workload, the mount
point will be filled first then IO started on the result. This
option doesn't make sense if operating on a raw device node,
since the size of that is already known by the file system.
ioscheduler=str Attempt to switch the device hosting the file to the specified
io scheduler before running.
-cpuload=int If the job is a CPU cycle eater, attempt to use the specified
- percentage of CPU cycles.
-
-cpuchunks=int If the job is a CPU cycle eater, split the load into
- cycles of the given time. In microseconds.
-
disk_util=bool Generate disk utilization statistics, if the platform
supports it. Defaults to on.
enabled when polling for a minimum of 0 events (eg when
iodepth_batch_complete=0).
+[cpu] cpuload=int Attempt to use the specified percentage of CPU cycles.
+
+[cpu] cpuchunks=int Split the load into cycles of the given time. In
+ microseconds.
+
[netsplice] hostname=str
[net] hostname=str The host name or IP address to use for TCP or UDP based IO.
If the job is a TCP listener or UDP reader, the hostname is not
PROGS = fio
SCRIPTS = fio_generate_plots
-SOURCE := gettime.c fio.c ioengines.c init.c stat.c log.c time.c filesetup.c \
+ifdef CONFIG_GFIO
+ PROGS += gfio
+endif
+
+SOURCE := gettime.c ioengines.c init.c stat.c log.c time.c filesetup.c \
eta.c verify.c memory.c io_u.c parse.c mutex.c options.c \
- rbtree.c smalloc.c filehash.c profile.c debug.c lib/rand.c \
+ lib/rbtree.c smalloc.c filehash.c profile.c debug.c lib/rand.c \
lib/num2str.c lib/ieee754.c $(wildcard crc/*.c) engines/cpu.c \
engines/mmap.c engines/sync.c engines/null.c engines/net.c \
memalign.c server.c client.c iolog.c backend.c libfio.c flow.c \
- json.c lib/zipf.c lib/axmap.c lib/lfsr.c gettime-thread.c \
- helpers.c lib/flist_sort.c lib/hweight.c lib/getrusage.c \
- idletime.c
+ cconv.c lib/prio_tree.c json.c lib/zipf.c lib/axmap.c \
+ lib/lfsr.c gettime-thread.c helpers.c lib/flist_sort.c \
+ lib/hweight.c lib/getrusage.c idletime.c
ifdef CONFIG_64BIT_LLP64
CFLAGS += -DBITS_PER_LONG=32
ifndef CONFIG_STRSEP
SOURCE += lib/strsep.c
endif
+ ifndef CONFIG_STRCASESTR
+ SOURCE += lib/strcasestr.c
+ endif
ifndef CONFIG_GETOPT_LONG_ONLY
SOURCE += lib/getopt_long.c
endif
endif
OBJS = $(SOURCE:.c=.o)
+
+FIO_OBJS = $(OBJS) fio.o
+GFIO_OBJS = $(OBJS) gfio.o graph.o tickmarks.o ghelpers.o goptions.o gerror.o \
+ gclient.o gcompat.o cairo_text_helpers.o printing.o
+
-include $(OBJS:.o=.d)
T_SMALLOC_OBJS = t/stest.o
endif
endif
- INSTALL = install
+ ifeq ($(CONFIG_TARGET_OS), SunOS)
+ INSTALL = ginstall
+ else
+ INSTALL = install
+ endif
prefix = /usr/local
bindir = $(prefix)/bin
init.o: FIO-VERSION-FILE init.c
$(QUIET_CC)$(CC) -o init.o $(CFLAGS) $(CPPFLAGS) -c init.c
+gcompat.o: gcompat.c gcompat.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c gcompat.c
+
+goptions.o: goptions.c goptions.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c goptions.c
+
+ghelpers.o: ghelpers.c ghelpers.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c ghelpers.c
+
+gerror.o: gerror.c gerror.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c gerror.c
+
+gclient.o: gclient.c gclient.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c gclient.c
+
+gfio.o: gfio.c ghelpers.c
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c gfio.c
+
+graph.o: graph.c graph.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c graph.c
+
+cairo_text_helpers.o: cairo_text_helpers.c cairo_text_helpers.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c cairo_text_helpers.c
+
+printing.o: printing.c printing.h
+ $(QUIET_CC)$(CC) $(CFLAGS) $(GTK_CFLAGS) $(CPPFLAGS) -c printing.c
+
t/stest: $(T_SMALLOC_OBJS)
$(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_SMALLOC_OBJS) $(LIBS) $(LDFLAGS)
t/ieee754: $(T_IEEE_OBJS)
$(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_IEEE_OBJS) $(LIBS) $(LDFLAGS)
+fio: $(FIO_OBJS)
+ $(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(FIO_OBJS) $(LIBS) $(LDFLAGS)
+
+gfio: $(GFIO_OBJS)
+ $(QUIET_LINK)$(CC) $(LIBS) -o gfio $(GFIO_OBJS) $(LIBS) $(GTK_LDFLAGS)
+
t/genzipf: $(T_ZIPF_OBJS)
$(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_ZIPF_OBJS) $(LIBS) $(LDFLAGS)
t/lfsr-test: $(T_LFSR_TEST_OBJS)
$(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_LFSR_TEST_OBJS) $(LIBS) $(LDFLAGS)
-fio: $(OBJS)
- $(QUIET_LINK)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(OBJS) $(LIBS) $(LDFLAGS)
-
clean: FORCE
- -rm -f .depend $(OBJS) $(T_OBJS) $(PROGS) $(T_PROGS) core.* core FIO-VERSION-FILE config-host.mak config-host.h cscope.out *.d
+ -rm -f .depend $(GFIO_OBJS) $(OBJS) $(T_OBJS) $(PROGS) $(T_PROGS) core.* core gfio FIO-VERSION-FILE config-host.mak config-host.h cscope.out *.d
cscope:
@cscope -b -R
--- /dev/null
+#include <string.h>
+
+#include "thread_options.h"
+
+static void string_to_cpu(char **dst, const uint8_t *src)
+{
+ const char *__src = (const char *) src;
+
+ if (strlen(__src))
+ *dst = strdup(__src);
+}
+
+static void string_to_net(uint8_t *dst, const char *src)
+{
+ if (src)
+ strcpy((char *) dst, src);
+ else
+ dst[0] = '\0';
+}
+
+void convert_thread_options_to_cpu(struct thread_options *o,
+ struct thread_options_pack *top)
+{
+ int i, j;
+
+ string_to_cpu(&o->description, top->description);
+ string_to_cpu(&o->name, top->name);
+ string_to_cpu(&o->directory, top->directory);
+ string_to_cpu(&o->filename, top->filename);
++ string_to_cpu(&o->filename_format, top->filename_format);
+ string_to_cpu(&o->opendir, top->opendir);
+ string_to_cpu(&o->ioengine, top->ioengine);
+ string_to_cpu(&o->mmapfile, top->mmapfile);
+ string_to_cpu(&o->read_iolog_file, top->read_iolog_file);
+ string_to_cpu(&o->write_iolog_file, top->write_iolog_file);
+ string_to_cpu(&o->bw_log_file, top->bw_log_file);
+ string_to_cpu(&o->lat_log_file, top->lat_log_file);
+ string_to_cpu(&o->iops_log_file, top->iops_log_file);
+ string_to_cpu(&o->replay_redirect, top->replay_redirect);
+ string_to_cpu(&o->exec_prerun, top->exec_prerun);
+ string_to_cpu(&o->exec_postrun, top->exec_postrun);
+ string_to_cpu(&o->ioscheduler, top->ioscheduler);
+ string_to_cpu(&o->profile, top->profile);
+ string_to_cpu(&o->cgroup, top->cgroup);
+
+ o->td_ddir = le32_to_cpu(top->td_ddir);
+ o->rw_seq = le32_to_cpu(top->rw_seq);
+ o->kb_base = le32_to_cpu(top->kb_base);
++ o->unit_base = le32_to_cpu(top->kb_base);
+ o->ddir_seq_nr = le32_to_cpu(top->ddir_seq_nr);
+ o->ddir_seq_add = le64_to_cpu(top->ddir_seq_add);
+ o->iodepth = le32_to_cpu(top->iodepth);
+ o->iodepth_low = le32_to_cpu(top->iodepth_low);
+ o->iodepth_batch = le32_to_cpu(top->iodepth_batch);
+ o->iodepth_batch_complete = le32_to_cpu(top->iodepth_batch_complete);
+ o->size = le64_to_cpu(top->size);
+ o->size_percent = le32_to_cpu(top->size_percent);
+ o->fill_device = le32_to_cpu(top->fill_device);
+ o->file_size_low = le64_to_cpu(top->file_size_low);
+ o->file_size_high = le64_to_cpu(top->file_size_high);
+ o->start_offset = le64_to_cpu(top->start_offset);
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ o->bs[i] = le32_to_cpu(top->bs[i]);
+ o->ba[i] = le32_to_cpu(top->ba[i]);
+ o->min_bs[i] = le32_to_cpu(top->min_bs[i]);
+ o->max_bs[i] = le32_to_cpu(top->max_bs[i]);
+ o->bssplit_nr[i] = le32_to_cpu(top->bssplit_nr[i]);
+
+ if (o->bssplit_nr[i]) {
+ o->bssplit[i] = malloc(o->bssplit_nr[i] * sizeof(struct bssplit));
+ for (j = 0; j < o->bssplit_nr[i]; j++) {
+ o->bssplit[i][j].bs = le32_to_cpu(top->bssplit[i][j].bs);
+ o->bssplit[i][j].perc = le32_to_cpu(top->bssplit[i][j].perc);
+ }
+ }
+
+ o->rwmix[i] = le32_to_cpu(top->rwmix[i]);
+ o->rate[i] = le32_to_cpu(top->rate[i]);
+ o->ratemin[i] = le32_to_cpu(top->ratemin[i]);
+ o->rate_iops[i] = le32_to_cpu(top->rate_iops[i]);
+ o->rate_iops_min[i] = le32_to_cpu(top->rate_iops_min[i]);
+ }
+
+ o->ratecycle = le32_to_cpu(top->ratecycle);
+ o->nr_files = le32_to_cpu(top->nr_files);
+ o->open_files = le32_to_cpu(top->open_files);
+ o->file_lock_mode = le32_to_cpu(top->file_lock_mode);
+ o->odirect = le32_to_cpu(top->odirect);
+ o->invalidate_cache = le32_to_cpu(top->invalidate_cache);
+ o->create_serialize = le32_to_cpu(top->create_serialize);
+ o->create_fsync = le32_to_cpu(top->create_fsync);
+ o->create_on_open = le32_to_cpu(top->create_on_open);
+ o->create_only = le32_to_cpu(top->create_only);
+ o->end_fsync = le32_to_cpu(top->end_fsync);
+ o->pre_read = le32_to_cpu(top->pre_read);
+ o->sync_io = le32_to_cpu(top->sync_io);
+ o->verify = le32_to_cpu(top->verify);
+ o->do_verify = le32_to_cpu(top->do_verify);
+ o->verifysort = le32_to_cpu(top->verifysort);
+ o->verifysort_nr = le32_to_cpu(top->verifysort_nr);
+ o->experimental_verify = le32_to_cpu(top->experimental_verify);
+ o->verify_interval = le32_to_cpu(top->verify_interval);
+ o->verify_offset = le32_to_cpu(top->verify_offset);
+
+ memcpy(o->verify_pattern, top->verify_pattern, MAX_PATTERN_SIZE);
+
+ o->verify_pattern_bytes = le32_to_cpu(top->verify_pattern_bytes);
+ o->verify_fatal = le32_to_cpu(top->verify_fatal);
+ o->verify_dump = le32_to_cpu(top->verify_dump);
+ o->verify_async = le32_to_cpu(top->verify_async);
+ o->verify_batch = le32_to_cpu(top->verify_batch);
+ o->use_thread = le32_to_cpu(top->use_thread);
+ o->unlink = le32_to_cpu(top->unlink);
+ o->do_disk_util = le32_to_cpu(top->do_disk_util);
+ o->override_sync = le32_to_cpu(top->override_sync);
+ o->rand_repeatable = le32_to_cpu(top->rand_repeatable);
+ o->use_os_rand = le32_to_cpu(top->use_os_rand);
+ o->log_avg_msec = le32_to_cpu(top->log_avg_msec);
+ o->norandommap = le32_to_cpu(top->norandommap);
+ o->softrandommap = le32_to_cpu(top->softrandommap);
+ o->bs_unaligned = le32_to_cpu(top->bs_unaligned);
+ o->fsync_on_close = le32_to_cpu(top->fsync_on_close);
+ o->random_distribution = le32_to_cpu(top->random_distribution);
+ o->zipf_theta.u.f = fio_uint64_to_double(le64_to_cpu(top->zipf_theta.u.i));
+ o->pareto_h.u.f = fio_uint64_to_double(le64_to_cpu(top->pareto_h.u.i));
+ o->random_generator = le32_to_cpu(top->random_generator);
+ o->hugepage_size = le32_to_cpu(top->hugepage_size);
+ o->rw_min_bs = le32_to_cpu(top->rw_min_bs);
+ o->thinktime = le32_to_cpu(top->thinktime);
+ o->thinktime_spin = le32_to_cpu(top->thinktime_spin);
+ o->thinktime_blocks = le32_to_cpu(top->thinktime_blocks);
+ o->fsync_blocks = le32_to_cpu(top->fsync_blocks);
+ o->fdatasync_blocks = le32_to_cpu(top->fdatasync_blocks);
+ o->barrier_blocks = le32_to_cpu(top->barrier_blocks);
+
+ o->verify_backlog = le64_to_cpu(top->verify_backlog);
+ o->start_delay = le64_to_cpu(top->start_delay);
+ o->timeout = le64_to_cpu(top->timeout);
+ o->ramp_time = le64_to_cpu(top->ramp_time);
+ o->zone_range = le64_to_cpu(top->zone_range);
+ o->zone_size = le64_to_cpu(top->zone_size);
+ o->zone_skip = le64_to_cpu(top->zone_skip);
+ o->lockmem = le64_to_cpu(top->lockmem);
+ o->offset_increment = le64_to_cpu(top->offset_increment);
+
+ o->overwrite = le32_to_cpu(top->overwrite);
+ o->bw_avg_time = le32_to_cpu(top->bw_avg_time);
+ o->iops_avg_time = le32_to_cpu(top->iops_avg_time);
+ o->loops = le32_to_cpu(top->loops);
+ o->mem_type = le32_to_cpu(top->mem_type);
+ o->mem_align = le32_to_cpu(top->mem_align);
+ o->max_latency = le32_to_cpu(top->max_latency);
+ o->stonewall = le32_to_cpu(top->stonewall);
+ o->new_group = le32_to_cpu(top->new_group);
+ o->numjobs = le32_to_cpu(top->numjobs);
+ o->cpumask_set = le32_to_cpu(top->cpumask_set);
+ o->verify_cpumask_set = le32_to_cpu(top->verify_cpumask_set);
+ o->iolog = le32_to_cpu(top->iolog);
+ o->rwmixcycle = le32_to_cpu(top->rwmixcycle);
+ o->nice = le32_to_cpu(top->nice);
+ o->ioprio = le32_to_cpu(top->ioprio);
+ o->ioprio_class = le32_to_cpu(top->ioprio_class);
+ o->file_service_type = le32_to_cpu(top->file_service_type);
+ o->group_reporting = le32_to_cpu(top->group_reporting);
+ o->fadvise_hint = le32_to_cpu(top->fadvise_hint);
+ o->fallocate_mode = le32_to_cpu(top->fallocate_mode);
+ o->zero_buffers = le32_to_cpu(top->zero_buffers);
+ o->refill_buffers = le32_to_cpu(top->refill_buffers);
+ o->scramble_buffers = le32_to_cpu(top->scramble_buffers);
+ o->time_based = le32_to_cpu(top->time_based);
+ o->disable_lat = le32_to_cpu(top->disable_lat);
+ o->disable_clat = le32_to_cpu(top->disable_clat);
+ o->disable_slat = le32_to_cpu(top->disable_slat);
+ o->disable_bw = le32_to_cpu(top->disable_bw);
+ o->unified_rw_rep = le32_to_cpu(top->unified_rw_rep);
+ o->gtod_reduce = le32_to_cpu(top->gtod_reduce);
+ o->gtod_cpu = le32_to_cpu(top->gtod_cpu);
+ o->gtod_offload = le32_to_cpu(top->gtod_offload);
+ o->clocksource = le32_to_cpu(top->clocksource);
+ o->no_stall = le32_to_cpu(top->no_stall);
+ o->trim_percentage = le32_to_cpu(top->trim_percentage);
+ o->trim_batch = le32_to_cpu(top->trim_batch);
+ o->trim_zero = le32_to_cpu(top->trim_zero);
+ o->clat_percentiles = le32_to_cpu(top->clat_percentiles);
+ o->percentile_precision = le32_to_cpu(top->percentile_precision);
+ o->continue_on_error = le32_to_cpu(top->continue_on_error);
+ o->cgroup_weight = le32_to_cpu(top->cgroup_weight);
+ o->cgroup_nodelete = le32_to_cpu(top->cgroup_nodelete);
+ o->uid = le32_to_cpu(top->uid);
+ o->gid = le32_to_cpu(top->gid);
+ o->flow_id = __le32_to_cpu(top->flow_id);
+ o->flow = __le32_to_cpu(top->flow);
+ o->flow_watermark = __le32_to_cpu(top->flow_watermark);
+ o->flow_sleep = le32_to_cpu(top->flow_sleep);
+ o->sync_file_range = le32_to_cpu(top->sync_file_range);
+ o->compress_percentage = le32_to_cpu(top->compress_percentage);
+ o->compress_chunk = le32_to_cpu(top->compress_chunk);
+
+ o->trim_backlog = le64_to_cpu(top->trim_backlog);
+
+ for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
+ o->percentile_list[i].u.f = fio_uint64_to_double(le64_to_cpu(top->percentile_list[i].u.i));
+#if 0
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+#endif
+}
+
+void convert_thread_options_to_net(struct thread_options_pack *top,
+ struct thread_options *o)
+{
+ int i, j;
+
+ string_to_net(top->description, o->description);
+ string_to_net(top->name, o->name);
+ string_to_net(top->directory, o->directory);
+ string_to_net(top->filename, o->filename);
++ string_to_net(top->filename_format, o->filename_format);
+ string_to_net(top->opendir, o->opendir);
+ string_to_net(top->ioengine, o->ioengine);
+ string_to_net(top->mmapfile, o->mmapfile);
+ string_to_net(top->read_iolog_file, o->read_iolog_file);
+ string_to_net(top->write_iolog_file, o->write_iolog_file);
+ string_to_net(top->bw_log_file, o->bw_log_file);
+ string_to_net(top->lat_log_file, o->lat_log_file);
+ string_to_net(top->iops_log_file, o->iops_log_file);
+ string_to_net(top->replay_redirect, o->replay_redirect);
+ string_to_net(top->exec_prerun, o->exec_prerun);
+ string_to_net(top->exec_postrun, o->exec_postrun);
+ string_to_net(top->ioscheduler, o->ioscheduler);
+ string_to_net(top->profile, o->profile);
+ string_to_net(top->cgroup, o->cgroup);
+
+ top->td_ddir = cpu_to_le32(o->td_ddir);
+ top->rw_seq = cpu_to_le32(o->rw_seq);
+ top->kb_base = cpu_to_le32(o->kb_base);
++ top->unit_base = cpu_to_le32(o->kb_base);
+ top->ddir_seq_nr = cpu_to_le32(o->ddir_seq_nr);
+ top->iodepth = cpu_to_le32(o->iodepth);
+ top->iodepth_low = cpu_to_le32(o->iodepth_low);
+ top->iodepth_batch = cpu_to_le32(o->iodepth_batch);
+ top->iodepth_batch_complete = cpu_to_le32(o->iodepth_batch_complete);
+ top->size_percent = cpu_to_le32(o->size_percent);
+ top->fill_device = cpu_to_le32(o->fill_device);
+ top->ratecycle = cpu_to_le32(o->ratecycle);
+ top->nr_files = cpu_to_le32(o->nr_files);
+ top->open_files = cpu_to_le32(o->open_files);
+ top->file_lock_mode = cpu_to_le32(o->file_lock_mode);
+ top->odirect = cpu_to_le32(o->odirect);
+ top->invalidate_cache = cpu_to_le32(o->invalidate_cache);
+ top->create_serialize = cpu_to_le32(o->create_serialize);
+ top->create_fsync = cpu_to_le32(o->create_fsync);
+ top->create_on_open = cpu_to_le32(o->create_on_open);
+ top->create_only = cpu_to_le32(o->create_only);
+ top->end_fsync = cpu_to_le32(o->end_fsync);
+ top->pre_read = cpu_to_le32(o->pre_read);
+ top->sync_io = cpu_to_le32(o->sync_io);
+ top->verify = cpu_to_le32(o->verify);
+ top->do_verify = cpu_to_le32(o->do_verify);
+ top->verifysort = cpu_to_le32(o->verifysort);
+ top->verifysort_nr = cpu_to_le32(o->verifysort_nr);
+ top->experimental_verify = cpu_to_le32(o->experimental_verify);
+ top->verify_interval = cpu_to_le32(o->verify_interval);
+ top->verify_offset = cpu_to_le32(o->verify_offset);
+ top->verify_pattern_bytes = cpu_to_le32(o->verify_pattern_bytes);
+ top->verify_fatal = cpu_to_le32(o->verify_fatal);
+ top->verify_dump = cpu_to_le32(o->verify_dump);
+ top->verify_async = cpu_to_le32(o->verify_async);
+ top->verify_batch = cpu_to_le32(o->verify_batch);
+ top->use_thread = cpu_to_le32(o->use_thread);
+ top->unlink = cpu_to_le32(o->unlink);
+ top->do_disk_util = cpu_to_le32(o->do_disk_util);
+ top->override_sync = cpu_to_le32(o->override_sync);
+ top->rand_repeatable = cpu_to_le32(o->rand_repeatable);
+ top->use_os_rand = cpu_to_le32(o->use_os_rand);
+ top->log_avg_msec = cpu_to_le32(o->log_avg_msec);
+ top->norandommap = cpu_to_le32(o->norandommap);
+ top->softrandommap = cpu_to_le32(o->softrandommap);
+ top->bs_unaligned = cpu_to_le32(o->bs_unaligned);
+ top->fsync_on_close = cpu_to_le32(o->fsync_on_close);
+ top->random_distribution = cpu_to_le32(o->random_distribution);
+ top->zipf_theta.u.i = __cpu_to_le64(fio_double_to_uint64(o->zipf_theta.u.f));
+ top->pareto_h.u.i = __cpu_to_le64(fio_double_to_uint64(o->pareto_h.u.f));
+ top->random_generator = cpu_to_le32(o->random_generator);
+ top->hugepage_size = cpu_to_le32(o->hugepage_size);
+ top->rw_min_bs = cpu_to_le32(o->rw_min_bs);
+ top->thinktime = cpu_to_le32(o->thinktime);
+ top->thinktime_spin = cpu_to_le32(o->thinktime_spin);
+ top->thinktime_blocks = cpu_to_le32(o->thinktime_blocks);
+ top->fsync_blocks = cpu_to_le32(o->fsync_blocks);
+ top->fdatasync_blocks = cpu_to_le32(o->fdatasync_blocks);
+ top->barrier_blocks = cpu_to_le32(o->barrier_blocks);
+ top->overwrite = cpu_to_le32(o->overwrite);
+ top->bw_avg_time = cpu_to_le32(o->bw_avg_time);
+ top->iops_avg_time = cpu_to_le32(o->iops_avg_time);
+ top->loops = cpu_to_le32(o->loops);
+ top->mem_type = cpu_to_le32(o->mem_type);
+ top->mem_align = cpu_to_le32(o->mem_align);
+ top->max_latency = cpu_to_le32(o->max_latency);
+ top->stonewall = cpu_to_le32(o->stonewall);
+ top->new_group = cpu_to_le32(o->new_group);
+ top->numjobs = cpu_to_le32(o->numjobs);
+ top->cpumask_set = cpu_to_le32(o->cpumask_set);
+ top->verify_cpumask_set = cpu_to_le32(o->verify_cpumask_set);
+ top->iolog = cpu_to_le32(o->iolog);
+ top->rwmixcycle = cpu_to_le32(o->rwmixcycle);
+ top->nice = cpu_to_le32(o->nice);
+ top->ioprio = cpu_to_le32(o->ioprio);
+ top->ioprio_class = cpu_to_le32(o->ioprio_class);
+ top->file_service_type = cpu_to_le32(o->file_service_type);
+ top->group_reporting = cpu_to_le32(o->group_reporting);
+ top->fadvise_hint = cpu_to_le32(o->fadvise_hint);
+ top->fallocate_mode = cpu_to_le32(o->fallocate_mode);
+ top->zero_buffers = cpu_to_le32(o->zero_buffers);
+ top->refill_buffers = cpu_to_le32(o->refill_buffers);
+ top->scramble_buffers = cpu_to_le32(o->scramble_buffers);
+ top->time_based = cpu_to_le32(o->time_based);
+ top->disable_lat = cpu_to_le32(o->disable_lat);
+ top->disable_clat = cpu_to_le32(o->disable_clat);
+ top->disable_slat = cpu_to_le32(o->disable_slat);
+ top->disable_bw = cpu_to_le32(o->disable_bw);
+ top->unified_rw_rep = cpu_to_le32(o->unified_rw_rep);
+ top->gtod_reduce = cpu_to_le32(o->gtod_reduce);
+ top->gtod_cpu = cpu_to_le32(o->gtod_cpu);
+ top->gtod_offload = cpu_to_le32(o->gtod_offload);
+ top->clocksource = cpu_to_le32(o->clocksource);
+ top->no_stall = cpu_to_le32(o->no_stall);
+ top->trim_percentage = cpu_to_le32(o->trim_percentage);
+ top->trim_batch = cpu_to_le32(o->trim_batch);
+ top->trim_zero = cpu_to_le32(o->trim_zero);
+ top->clat_percentiles = cpu_to_le32(o->clat_percentiles);
+ top->percentile_precision = cpu_to_le32(o->percentile_precision);
+ top->continue_on_error = cpu_to_le32(o->continue_on_error);
+ top->cgroup_weight = cpu_to_le32(o->cgroup_weight);
+ top->cgroup_nodelete = cpu_to_le32(o->cgroup_nodelete);
+ top->uid = cpu_to_le32(o->uid);
+ top->gid = cpu_to_le32(o->gid);
+ top->flow_id = __cpu_to_le32(o->flow_id);
+ top->flow = __cpu_to_le32(o->flow);
+ top->flow_watermark = __cpu_to_le32(o->flow_watermark);
+ top->flow_sleep = cpu_to_le32(o->flow_sleep);
+ top->sync_file_range = cpu_to_le32(o->sync_file_range);
+ top->compress_percentage = cpu_to_le32(o->compress_percentage);
+ top->compress_chunk = cpu_to_le32(o->compress_chunk);
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ top->bs[i] = cpu_to_le32(o->bs[i]);
+ top->ba[i] = cpu_to_le32(o->ba[i]);
+ top->min_bs[i] = cpu_to_le32(o->min_bs[i]);
+ top->max_bs[i] = cpu_to_le32(o->max_bs[i]);
+ top->bssplit_nr[i] = cpu_to_le32(o->bssplit_nr[i]);
+
+ if (o->bssplit_nr[i]) {
+ unsigned int bssplit_nr = o->bssplit_nr[i];
+
+ if (bssplit_nr > BSSPLIT_MAX) {
+ log_err("fio: BSSPLIT_MAX is too small\n");
+ bssplit_nr = BSSPLIT_MAX;
+ }
+ for (j = 0; j < bssplit_nr; j++) {
+ top->bssplit[i][j].bs = cpu_to_le32(o->bssplit[i][j].bs);
+ top->bssplit[i][j].perc = cpu_to_le32(o->bssplit[i][j].perc);
+ }
+ }
+
+ top->rwmix[i] = cpu_to_le32(o->rwmix[i]);
+ top->rate[i] = cpu_to_le32(o->rate[i]);
+ top->ratemin[i] = cpu_to_le32(o->ratemin[i]);
+ top->rate_iops[i] = cpu_to_le32(o->rate_iops[i]);
+ top->rate_iops_min[i] = cpu_to_le32(o->rate_iops_min[i]);
+ }
+
+ memcpy(top->verify_pattern, o->verify_pattern, MAX_PATTERN_SIZE);
+
+ top->size = __cpu_to_le64(o->size);
+ top->verify_backlog = __cpu_to_le64(o->verify_backlog);
+ top->start_delay = __cpu_to_le64(o->start_delay);
+ top->timeout = __cpu_to_le64(o->timeout);
+ top->ramp_time = __cpu_to_le64(o->ramp_time);
+ top->zone_range = __cpu_to_le64(o->zone_range);
+ top->zone_size = __cpu_to_le64(o->zone_size);
+ top->zone_skip = __cpu_to_le64(o->zone_skip);
+ top->lockmem = __cpu_to_le64(o->lockmem);
+ top->ddir_seq_add = __cpu_to_le64(o->ddir_seq_add);
+ top->file_size_low = __cpu_to_le64(o->file_size_low);
+ top->file_size_high = __cpu_to_le64(o->file_size_high);
+ top->start_offset = __cpu_to_le64(o->start_offset);
+ top->trim_backlog = __cpu_to_le64(o->trim_backlog);
+ top->offset_increment = __cpu_to_le64(o->offset_increment);
+
+ for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
+ top->percentile_list[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->percentile_list[i].u.f));
+#if 0
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+#endif
+
+}
+
+/*
+ * Basic conversion test. We'd really need to fill in more of the options
+ * to have a thorough test. Even better, we should auto-generate the
+ * converter functions...
+ */
+int fio_test_cconv(struct thread_options *__o)
+{
+ struct thread_options o;
+ struct thread_options_pack top1, top2;
+
+ memset(&top1, 0, sizeof(top1));
+ memset(&top2, 0, sizeof(top2));
+
+ convert_thread_options_to_net(&top1, __o);
+ memset(&o, 0, sizeof(o));
+ convert_thread_options_to_cpu(&o, &top1);
+ convert_thread_options_to_net(&top2, &o);
+
+ return memcmp(&top1, &top2, sizeof(top1));
+}
#include <arpa/inet.h>
#include <netdb.h>
#include <signal.h>
+#include <zlib.h>
#include "fio.h"
+#include "client.h"
#include "server.h"
#include "flist.h"
#include "hash.h"
-struct client_eta {
- unsigned int pending;
- struct jobs_eta eta;
-};
-
-struct fio_client {
- struct flist_head list;
- struct flist_head hash_list;
- struct flist_head arg_list;
- union {
- struct sockaddr_in addr;
- struct sockaddr_in6 addr6;
- struct sockaddr_un addr_un;
- };
- char *hostname;
- int port;
- int fd;
- unsigned int refs;
-
- char *name;
-
- int state;
-
- int skip_newline;
- int is_sock;
- int disk_stats_shown;
- unsigned int jobs;
- unsigned int nr_stat;
- int error;
- int ipv6;
- int sent_job;
- int did_stat;
-
- struct flist_head eta_list;
- struct client_eta *eta_in_flight;
-
- struct flist_head cmd_list;
-
- uint16_t argc;
- char **argv;
-
- char **ini_file;
- unsigned int nr_ini_file;
+static void handle_du(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_ts(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_gs(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_probe(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_text(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_stop(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_start(struct fio_client *client, struct fio_net_cmd *cmd);
+
+struct client_ops fio_client_ops = {
+ .text = handle_text,
+ .disk_util = handle_du,
+ .thread_status = handle_ts,
+ .group_stats = handle_gs,
+ .stop = handle_stop,
+ .start = handle_start,
+ .eta = display_thread_status,
+ .probe = handle_probe,
+ .eta_msec = FIO_CLIENT_DEF_ETA_MSEC,
+ .client_type = FIO_CLIENT_TYPE_CLI,
};
static struct timeval eta_tv;
-enum {
- Client_created = 0,
- Client_connected = 1,
- Client_started = 2,
- Client_running = 3,
- Client_stopped = 4,
- Client_exited = 5,
-};
-
static FLIST_HEAD(client_list);
static FLIST_HEAD(eta_list);
static FLIST_HEAD(arg_list);
-static struct thread_stat client_ts;
-static struct group_run_stats client_gs;
-static int sum_stat_clients = 0;
+struct thread_stat client_ts;
+struct group_run_stats client_gs;
+int sum_stat_clients;
+
static int sum_stat_nr;
static int do_output_all_clients;
#define FIO_CLIENT_HASH_MASK (FIO_CLIENT_HASH_SZ - 1)
static struct flist_head client_hash[FIO_CLIENT_HASH_SZ];
-static int handle_client(struct fio_client *client);
-static void dec_jobs_eta(struct client_eta *eta);
-
static void fio_client_add_hash(struct fio_client *client)
{
int bucket = hash_long(client->fd, FIO_CLIENT_HASH_BITS);
return NULL;
}
-static void remove_client(struct fio_client *client)
+void fio_put_client(struct fio_client *client)
{
- assert(client->refs);
-
if (--client->refs)
return;
- dprint(FD_NET, "client: removed <%s>\n", client->hostname);
- flist_del(&client->list);
-
- fio_client_remove_hash(client);
-
- if (!flist_empty(&client->eta_list)) {
- flist_del_init(&client->eta_list);
- dec_jobs_eta(client->eta_in_flight);
- }
-
free(client->hostname);
if (client->argv)
free(client->argv);
sum_stat_clients -= client->nr_stat;
free(client);
+}
+
+static void remove_client(struct fio_client *client)
+{
+ assert(client->refs);
+
+ dprint(FD_NET, "client: removed <%s>\n", client->hostname);
+
+ if (!flist_empty(&client->list))
+ flist_del_init(&client->list);
+
+ fio_client_remove_hash(client);
+
+ if (!flist_empty(&client->eta_list)) {
+ flist_del_init(&client->eta_list);
+ fio_client_dec_jobs_eta(client->eta_in_flight, client->ops->eta);
+ }
+
+ close(client->fd);
+ client->fd = -1;
+
+ if (client->ops->removed)
+ client->ops->removed(client);
+
nr_clients--;
+ fio_put_client(client);
}
-static void put_client(struct fio_client *client)
+struct fio_client *fio_get_client(struct fio_client *client)
{
- remove_client(client);
+ client->refs++;
+ return client;
}
static void __fio_client_add_cmd_option(struct fio_client *client,
}
}
+struct fio_client *fio_client_add_explicit(struct client_ops *ops,
+ const char *hostname, int type,
+ int port)
+{
+ struct fio_client *client;
+
+ client = malloc(sizeof(*client));
+ memset(client, 0, sizeof(*client));
+
+ INIT_FLIST_HEAD(&client->list);
+ INIT_FLIST_HEAD(&client->hash_list);
+ INIT_FLIST_HEAD(&client->arg_list);
+ INIT_FLIST_HEAD(&client->eta_list);
+ INIT_FLIST_HEAD(&client->cmd_list);
+
+ client->hostname = strdup(hostname);
+
+ if (type == Fio_client_socket)
+ client->is_sock = 1;
+ else {
+ int ipv6;
+
+ ipv6 = type == Fio_client_ipv6;
+ if (fio_server_parse_host(hostname, &ipv6,
+ &client->addr.sin_addr,
+ &client->addr6.sin6_addr))
+ goto err;
+
+ client->port = port;
+ }
+
+ client->fd = -1;
+ client->ops = ops;
+ client->refs = 1;
+ client->type = ops->client_type;
+
+ __fio_client_add_cmd_option(client, "fio");
+
+ flist_add(&client->list, &client_list);
+ nr_clients++;
+ dprint(FD_NET, "client: added <%s>\n", client->hostname);
+ return client;
+err:
+ free(client);
+ return NULL;
+}
+
void fio_client_add_ini_file(void *cookie, const char *ini_file)
{
struct fio_client *client = cookie;
client->nr_ini_file++;
}
-int fio_client_add(const char *hostname, void **cookie)
+int fio_client_add(struct client_ops *ops, const char *hostname, void **cookie)
{
struct fio_client *existing = *cookie;
struct fio_client *client;
return -1;
client->fd = -1;
+ client->ops = ops;
client->refs = 1;
+ client->type = ops->client_type;
__fio_client_add_cmd_option(client, "fio");
return 0;
}
+static void probe_client(struct fio_client *client)
+{
+ dprint(FD_NET, "client: send probe\n");
+
+ fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_PROBE, 0, &client->cmd_list);
+}
+
static int fio_client_connect_ip(struct fio_client *client)
{
struct sockaddr *addr;
fd = socket(domain, SOCK_STREAM, 0);
if (fd < 0) {
+ int ret = -errno;
+
log_err("fio: socket: %s\n", strerror(errno));
- return -1;
+ return ret;
}
if (connect(fd, addr, socklen) < 0) {
+ int ret = -errno;
+
log_err("fio: connect: %s\n", strerror(errno));
log_err("fio: failed to connect to %s:%u\n", client->hostname,
client->port);
close(fd);
- return -1;
+ return ret;
}
return fd;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0) {
+ int ret = -errno;
+
log_err("fio: socket: %s\n", strerror(errno));
- return -1;
+ return ret;
}
len = sizeof(addr->sun_family) + strlen(addr->sun_path) + 1;
if (connect(fd, (struct sockaddr *) addr, len) < 0) {
+ int ret = -errno;
+
log_err("fio: connect; %s\n", strerror(errno));
close(fd);
- return -1;
+ return ret;
}
return fd;
}
-static int fio_client_connect(struct fio_client *client)
+int fio_client_connect(struct fio_client *client)
{
int fd;
dprint(FD_NET, "client: %s connected %d\n", client->hostname, fd);
if (fd < 0)
- return 1;
+ return fd;
client->fd = fd;
fio_client_add_hash(client);
client->state = Client_connected;
+
+ probe_client(client);
return 0;
}
+int fio_client_terminate(struct fio_client *client)
+{
+ return fio_net_send_quit(client->fd);
+}
+
void fio_clients_terminate(void)
{
struct flist_head *entry;
flist_for_each(entry, &client_list) {
client = flist_entry(entry, struct fio_client, list);
-
- fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_QUIT, 0, NULL);
+ fio_client_terminate(client);
}
}
sigaction(SIGUSR1, &act, NULL);
}
-static void probe_client(struct fio_client *client)
-{
- dprint(FD_NET, "client: send probe\n");
-
- fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_PROBE, 0, &client->cmd_list);
-}
-
static int send_client_cmd_line(struct fio_client *client)
{
struct cmd_single_line_pdu *cslp;
free(lens);
clp->lines = cpu_to_le16(client->argc);
- ret = fio_net_send_cmd(client->fd, FIO_NET_CMD_JOBLINE, pdu, mem, 0);
+ clp->client_type = __cpu_to_le16(client->type);
+ ret = fio_net_send_cmd(client->fd, FIO_NET_CMD_JOBLINE, pdu, mem, NULL, NULL);
free(pdu);
return ret;
}
#ifdef WIN32
WSADATA wsd;
- WSAStartup(MAKEWORD(2,2), &wsd);
+ WSAStartup(MAKEWORD(2, 2), &wsd);
#endif
dprint(FD_NET, "client: connect all\n");
continue;
}
- probe_client(client);
-
if (client->argc > 1)
send_client_cmd_line(client);
}
return !nr_clients;
}
+int fio_start_client(struct fio_client *client)
+{
+ dprint(FD_NET, "client: start %s\n", client->hostname);
+ return fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_RUN, 0, NULL);
+}
+
+int fio_start_all_clients(void)
+{
+ struct fio_client *client;
+ struct flist_head *entry, *tmp;
+ int ret;
+
+ dprint(FD_NET, "client: start all\n");
+
+ flist_for_each_safe(entry, tmp, &client_list) {
+ client = flist_entry(entry, struct fio_client, list);
+
+ ret = fio_start_client(client);
+ if (ret) {
+ remove_client(client);
+ continue;
+ }
+ }
+
+ return flist_empty(&client_list);
+}
+
/*
* Send file contents to server backend. We could use sendfile(), but to remain
* more portable lets just read/write the darn thing.
*/
-static int fio_client_send_ini(struct fio_client *client, const char *filename)
+static int __fio_client_send_ini(struct fio_client *client, const char *filename)
{
+ struct cmd_job_pdu *pdu;
+ size_t p_size;
struct stat sb;
- char *p, *buf;
+ char *p;
+ void *buf;
off_t len;
int fd, ret;
fd = open(filename, O_RDONLY);
if (fd < 0) {
+ int ret = -errno;
+
log_err("fio: job file <%s> open: %s\n", filename, strerror(errno));
- return 1;
+ return ret;
}
if (fstat(fd, &sb) < 0) {
+ int ret = -errno;
+
log_err("fio: job file stat: %s\n", strerror(errno));
close(fd);
- return 1;
+ return ret;
}
- buf = malloc(sb.st_size);
+ p_size = sb.st_size + sizeof(*pdu);
+ pdu = malloc(p_size);
+ buf = pdu->buf;
len = sb.st_size;
p = buf;
return 1;
}
+ pdu->buf_len = __cpu_to_le32(sb.st_size);
+ pdu->client_type = cpu_to_le32(client->type);
+
client->sent_job = 1;
- ret = fio_net_send_cmd(client->fd, FIO_NET_CMD_JOB, buf, sb.st_size, 0);
- free(buf);
+ ret = fio_net_send_cmd(client->fd, FIO_NET_CMD_JOB, pdu, p_size, NULL, NULL);
+ free(pdu);
close(fd);
return ret;
}
+int fio_client_send_ini(struct fio_client *client, const char *filename)
+{
+ int ret;
+
+ ret = __fio_client_send_ini(client, filename);
+ if (!ret)
+ client->sent_job = 1;
+
+ return ret;
+}
+
int fio_clients_send_ini(const char *filename)
{
struct fio_client *client;
}
} else if (!filename || fio_client_send_ini(client, filename))
remove_client(client);
-
- client->sent_job = 1;
}
return !nr_clients;
}
+int fio_client_update_options(struct fio_client *client,
+ struct thread_options *o, uint64_t *tag)
+{
+ struct cmd_add_job_pdu pdu;
+
+ pdu.thread_number = cpu_to_le32(client->thread_number);
+ pdu.groupid = cpu_to_le32(client->groupid);
+ convert_thread_options_to_net(&pdu.top, o);
+
+ return fio_net_send_cmd(client->fd, FIO_NET_CMD_UPDATE_JOB, &pdu, sizeof(pdu), tag, &client->cmd_list);
+}
+
static void convert_io_stat(struct io_stat *dst, struct io_stat *src)
{
dst->max_val = le64_to_cpu(src->max_val);
{
int i, j;
- dst->error = le32_to_cpu(src->error);
- dst->groupid = le32_to_cpu(src->groupid);
- dst->pid = le32_to_cpu(src->pid);
- dst->members = le32_to_cpu(src->members);
+ dst->error = le32_to_cpu(src->error);
+ dst->thread_number = le32_to_cpu(src->thread_number);
+ dst->groupid = le32_to_cpu(src->groupid);
+ dst->pid = le32_to_cpu(src->pid);
+ dst->members = le32_to_cpu(src->members);
dst->unified_rw_rep = le32_to_cpu(src->unified_rw_rep);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
dst->total_err_count = le64_to_cpu(src->total_err_count);
dst->first_error = le32_to_cpu(src->first_error);
dst->kb_base = le32_to_cpu(src->kb_base);
+ dst->unit_base = le32_to_cpu(src->unit_base);
}
static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
}
dst->kb_base = le32_to_cpu(src->kb_base);
+ dst->unit_base = le32_to_cpu(src->unit_base);
dst->groupid = le32_to_cpu(src->groupid);
dst->unified_rw_rep = le32_to_cpu(src->unified_rw_rep);
}
{
struct cmd_ts_pdu *p = (struct cmd_ts_pdu *) cmd->payload;
- convert_ts(&p->ts, &p->ts);
- convert_gs(&p->rs, &p->rs);
-
show_thread_status(&p->ts, &p->rs);
client->did_stat = 1;
sum_group_stats(&client_gs, &p->rs);
client_ts.members++;
+ client_ts.thread_number = p->ts.thread_number;
client_ts.groupid = p->ts.groupid;
client_ts.unified_rw_rep = p->ts.unified_rw_rep;
}
}
-static void handle_gs(struct fio_net_cmd *cmd)
+static void handle_gs(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct group_run_stats *gs = (struct group_run_stats *) cmd->payload;
- convert_gs(gs, gs);
show_group_stats(gs);
}
+static void handle_text(struct fio_client *client, struct fio_net_cmd *cmd)
+{
+ struct cmd_text_pdu *pdu = (struct cmd_text_pdu *) cmd->payload;
+ const char *buf = (const char *) pdu->buf;
+ const char *name;
+ int fio_unused ret;
+
+ name = client->name ? client->name : client->hostname;
+
+ if (!client->skip_newline)
+ fprintf(f_out, "<%s> ", name);
+ ret = fwrite(buf, pdu->buf_len, 1, f_out);
+ fflush(f_out);
+ client->skip_newline = strchr(buf, '\n') == NULL;
+}
+
static void convert_agg(struct disk_util_agg *agg)
{
int i;
{
struct cmd_du_pdu *du = (struct cmd_du_pdu *) cmd->payload;
- convert_dus(&du->dus);
- convert_agg(&du->agg);
-
if (!client->disk_stats_shown) {
client->disk_stats_shown = 1;
log_info("\nDisk stats (read/write):\n");
je->nr_ramp = le32_to_cpu(je->nr_ramp);
je->nr_pending = le32_to_cpu(je->nr_pending);
je->files_open = le32_to_cpu(je->files_open);
- je->m_rate = le32_to_cpu(je->m_rate);
- je->t_rate = le32_to_cpu(je->t_rate);
- je->m_iops = le32_to_cpu(je->m_iops);
- je->t_iops = le32_to_cpu(je->t_iops);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- je->rate[i] = le32_to_cpu(je->rate[i]);
- je->iops[i] = le32_to_cpu(je->iops[i]);
+ je->m_rate[i] = le32_to_cpu(je->m_rate[i]);
+ je->t_rate[i] = le32_to_cpu(je->t_rate[i]);
+ je->m_iops[i] = le32_to_cpu(je->m_iops[i]);
+ je->t_iops[i] = le32_to_cpu(je->t_iops[i]);
}
je->elapsed_sec = le64_to_cpu(je->elapsed_sec);
je->eta_sec = le64_to_cpu(je->eta_sec);
+ je->nr_threads = le32_to_cpu(je->nr_threads);
je->is_pow2 = le32_to_cpu(je->is_pow2);
+ je->unit_base = le32_to_cpu(je->unit_base);
}
-static void sum_jobs_eta(struct jobs_eta *dst, struct jobs_eta *je)
+void fio_client_sum_jobs_eta(struct jobs_eta *dst, struct jobs_eta *je)
{
int i;
dst->nr_ramp += je->nr_ramp;
dst->nr_pending += je->nr_pending;
dst->files_open += je->files_open;
- dst->m_rate += je->m_rate;
- dst->t_rate += je->t_rate;
- dst->m_iops += je->m_iops;
- dst->t_iops += je->t_iops;
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- dst->rate[i] += je->rate[i];
- dst->iops[i] += je->iops[i];
+ dst->m_rate[i] += je->m_rate[i];
+ dst->t_rate[i] += je->t_rate[i];
+ dst->m_iops[i] += je->m_iops[i];
+ dst->t_iops[i] += je->t_iops[i];
}
dst->elapsed_sec += je->elapsed_sec;
if (je->eta_sec > dst->eta_sec)
dst->eta_sec = je->eta_sec;
+
+ dst->nr_threads += je->nr_threads;
+ /* we need to handle je->run_str too ... */
}
-static void dec_jobs_eta(struct client_eta *eta)
+void fio_client_dec_jobs_eta(struct client_eta *eta, client_eta_op eta_fn)
{
if (!--eta->pending) {
- display_thread_status(&eta->eta);
+ eta_fn(&eta->eta);
free(eta);
}
}
static void remove_reply_cmd(struct fio_client *client, struct fio_net_cmd *cmd)
{
- struct fio_net_int_cmd *icmd = NULL;
+ struct fio_net_cmd_reply *reply = NULL;
struct flist_head *entry;
flist_for_each(entry, &client->cmd_list) {
- icmd = flist_entry(entry, struct fio_net_int_cmd, list);
+ reply = flist_entry(entry, struct fio_net_cmd_reply, list);
- if (cmd->tag == (uintptr_t) icmd)
+ if (cmd->tag == (uintptr_t) reply)
break;
- icmd = NULL;
+ reply = NULL;
}
- if (!icmd) {
- log_err("fio: client: unable to find matching tag\n");
+ if (!reply) {
+ log_err("fio: client: unable to find matching tag (%lx)\n", cmd->tag);
return;
}
- flist_del(&icmd->list);
- cmd->tag = icmd->saved_tag;
- free(icmd);
+ flist_del(&reply->list);
+ cmd->tag = reply->saved_tag;
+ free(reply);
+}
+
+int fio_client_wait_for_reply(struct fio_client *client, uint64_t tag)
+{
+ do {
+ struct fio_net_cmd_reply *reply = NULL;
+ struct flist_head *entry;
+
+ flist_for_each(entry, &client->cmd_list) {
+ reply = flist_entry(entry, struct fio_net_cmd_reply, list);
+
+ if (tag == (uintptr_t) reply)
+ break;
+
+ reply = NULL;
+ }
+
+ if (!reply)
+ break;
+
+ usleep(1000);
+ } while (1);
+
+ return 0;
}
static void handle_eta(struct fio_client *client, struct fio_net_cmd *cmd)
client->eta_in_flight = NULL;
flist_del_init(&client->eta_list);
- convert_jobs_eta(je);
- sum_jobs_eta(&eta->eta, je);
- dec_jobs_eta(eta);
+ if (client->ops->jobs_eta)
+ client->ops->jobs_eta(client, je);
+
+ fio_client_sum_jobs_eta(&eta->eta, je);
+ fio_client_dec_jobs_eta(eta, client->ops->eta);
}
static void handle_probe(struct fio_client *client, struct fio_net_cmd *cmd)
}
static void handle_stop(struct fio_client *client, struct fio_net_cmd *cmd)
+{
+ if (client->error)
+ log_info("client <%s>: exited with error %d\n", client->hostname, client->error);
+}
+
+static void convert_stop(struct fio_net_cmd *cmd)
{
struct cmd_end_pdu *pdu = (struct cmd_end_pdu *) cmd->payload;
- client->state = Client_stopped;
- client->error = le32_to_cpu(pdu->error);
+ pdu->error = le32_to_cpu(pdu->error);
+}
- if (client->error)
- log_info("client <%s>: exited with error %d\n", client->hostname, client->error);
+static void convert_text(struct fio_net_cmd *cmd)
+{
+ struct cmd_text_pdu *pdu = (struct cmd_text_pdu *) cmd->payload;
+
+ pdu->level = le32_to_cpu(pdu->level);
+ pdu->buf_len = le32_to_cpu(pdu->buf_len);
+ pdu->log_sec = le64_to_cpu(pdu->log_sec);
+ pdu->log_usec = le64_to_cpu(pdu->log_usec);
+}
+
+/*
+ * This has been compressed on the server side, since it can be big.
+ * Uncompress here.
+ */
+static struct cmd_iolog_pdu *convert_iolog(struct fio_net_cmd *cmd)
+{
+ struct cmd_iolog_pdu *pdu = (struct cmd_iolog_pdu *) cmd->payload;
+ struct cmd_iolog_pdu *ret;
+ uint32_t nr_samples;
+ unsigned long total;
+ z_stream stream;
+ void *p;
+ int i;
+
+ stream.zalloc = Z_NULL;
+ stream.zfree = Z_NULL;
+ stream.opaque = Z_NULL;
+ stream.avail_in = 0;
+ stream.next_in = Z_NULL;
+
+ if (inflateInit(&stream) != Z_OK)
+ return NULL;
+
+ /*
+ * Get header first, it's not compressed
+ */
+ nr_samples = le32_to_cpu(pdu->nr_samples);
+
+ total = nr_samples * sizeof(struct io_sample);
+ ret = malloc(total + sizeof(*pdu));
+ ret->thread_number = le32_to_cpu(pdu->thread_number);
+ ret->nr_samples = nr_samples;
+ ret->log_type = le32_to_cpu(pdu->log_type);
+ strcpy((char *) ret->name, (char *) pdu->name);
+
+ p = (void *) ret + sizeof(*pdu);
+
+ stream.avail_in = cmd->pdu_len - sizeof(*pdu);
+ stream.next_in = (void *) pdu + sizeof(*pdu);
+ while (stream.avail_in) {
+ unsigned int this_chunk = 65536;
+ unsigned int this_len;
+ int err;
+
+ if (this_chunk > total)
+ this_chunk = total;
+
+ stream.avail_out = this_chunk;
+ stream.next_out = p;
+ err = inflate(&stream, Z_NO_FLUSH);
+ /* may be Z_OK, or Z_STREAM_END */
+ if (err < 0) {
+ log_err("fio: inflate error %d\n", err);
+ free(ret);
+ ret = NULL;
+ goto out;
+ }
+
+ this_len = this_chunk - stream.avail_out;
+ p += this_len;
+ total -= this_len;
+ }
+
+ for (i = 0; i < ret->nr_samples; i++) {
+ struct io_sample *s = &ret->samples[i];
+
+ s->time = le64_to_cpu(s->time);
+ s->val = le64_to_cpu(s->val);
+ s->ddir = le32_to_cpu(s->ddir);
+ s->bs = le32_to_cpu(s->bs);
+ }
+
+out:
+ inflateEnd(&stream);
+ return ret;
}
-static int handle_client(struct fio_client *client)
+int fio_handle_client(struct fio_client *client)
{
+ struct client_ops *ops = client->ops;
struct fio_net_cmd *cmd;
dprint(FD_NET, "client: handle %s\n", client->hostname);
if (!cmd)
return 0;
- dprint(FD_NET, "client: got cmd op %s from %s\n",
- fio_server_op(cmd->opcode), client->hostname);
+ dprint(FD_NET, "client: got cmd op %s from %s (pdu=%u)\n",
+ fio_server_op(cmd->opcode), client->hostname, cmd->pdu_len);
switch (cmd->opcode) {
case FIO_NET_CMD_QUIT:
+ if (ops->quit)
+ ops->quit(client, cmd);
remove_client(client);
free(cmd);
break;
- case FIO_NET_CMD_TEXT: {
- const char *buf = (const char *) cmd->payload;
- const char *name;
- int fio_unused ret;
-
- name = client->name ? client->name : client->hostname;
-
- if (!client->skip_newline)
- fprintf(f_out, "<%s> ", name);
- ret = fwrite(buf, cmd->pdu_len, 1, f_out);
- fflush(f_out);
- client->skip_newline = strchr(buf, '\n') == NULL;
+ case FIO_NET_CMD_TEXT:
+ convert_text(cmd);
+ ops->text(client, cmd);
free(cmd);
break;
- }
- case FIO_NET_CMD_DU:
- handle_du(client, cmd);
+ case FIO_NET_CMD_DU: {
+ struct cmd_du_pdu *du = (struct cmd_du_pdu *) cmd->payload;
+
+ convert_dus(&du->dus);
+ convert_agg(&du->agg);
+
+ ops->disk_util(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_TS:
- handle_ts(client, cmd);
+ }
+ case FIO_NET_CMD_TS: {
+ struct cmd_ts_pdu *p = (struct cmd_ts_pdu *) cmd->payload;
+
+ convert_ts(&p->ts, &p->ts);
+ convert_gs(&p->rs, &p->rs);
+
+ ops->thread_status(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_GS:
- handle_gs(cmd);
+ }
+ case FIO_NET_CMD_GS: {
+ struct group_run_stats *gs = (struct group_run_stats *) cmd->payload;
+
+ convert_gs(gs, gs);
+
+ ops->group_stats(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_ETA:
+ }
+ case FIO_NET_CMD_ETA: {
+ struct jobs_eta *je = (struct jobs_eta *) cmd->payload;
+
remove_reply_cmd(client, cmd);
+ convert_jobs_eta(je);
handle_eta(client, cmd);
free(cmd);
break;
+ }
case FIO_NET_CMD_PROBE:
remove_reply_cmd(client, cmd);
- handle_probe(client, cmd);
+ ops->probe(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_RUN:
+ case FIO_NET_CMD_SERVER_START:
client->state = Client_running;
+ if (ops->job_start)
+ ops->job_start(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_START:
- handle_start(client, cmd);
+ case FIO_NET_CMD_START: {
+ struct cmd_start_pdu *pdu = (struct cmd_start_pdu *) cmd->payload;
+
+ pdu->jobs = le32_to_cpu(pdu->jobs);
+ ops->start(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_STOP:
- handle_stop(client, cmd);
+ }
+ case FIO_NET_CMD_STOP: {
+ struct cmd_end_pdu *pdu = (struct cmd_end_pdu *) cmd->payload;
+
+ convert_stop(cmd);
+ client->state = Client_stopped;
+ client->error = le32_to_cpu(pdu->error);
+ client->signal = le32_to_cpu(pdu->signal);
+ ops->stop(client, cmd);
+ free(cmd);
+ break;
+ }
+ case FIO_NET_CMD_ADD_JOB: {
+ struct cmd_add_job_pdu *pdu = (struct cmd_add_job_pdu *) cmd->payload;
+
+ client->thread_number = le32_to_cpu(pdu->thread_number);
+ client->groupid = le32_to_cpu(pdu->groupid);
+
+ if (ops->add_job)
+ ops->add_job(client, cmd);
+ free(cmd);
+ break;
+ }
+ case FIO_NET_CMD_IOLOG:
+ if (ops->iolog) {
+ struct cmd_iolog_pdu *pdu;
+
+ pdu = convert_iolog(cmd);
+ ops->iolog(client, pdu);
+ }
+ free(cmd);
+ break;
+ case FIO_NET_CMD_UPDATE_JOB:
+ ops->update_job(client, cmd);
+ remove_reply_cmd(client, cmd);
free(cmd);
break;
default:
return 1;
}
-static void request_client_etas(void)
+static void request_client_etas(struct client_ops *ops)
{
struct fio_client *client;
struct flist_head *entry;
}
while (skipped--)
- dec_jobs_eta(eta);
+ fio_client_dec_jobs_eta(eta, ops->eta);
dprint(FD_NET, "client: requested eta tag %p\n", eta);
}
static int client_check_cmd_timeout(struct fio_client *client,
struct timeval *now)
{
- struct fio_net_int_cmd *cmd;
+ struct fio_net_cmd_reply *reply;
struct flist_head *entry, *tmp;
int ret = 0;
flist_for_each_safe(entry, tmp, &client->cmd_list) {
- cmd = flist_entry(entry, struct fio_net_int_cmd, list);
+ reply = flist_entry(entry, struct fio_net_cmd_reply, list);
- if (mtime_since(&cmd->tv, now) < FIO_NET_CLIENT_TIMEOUT)
+ if (mtime_since(&reply->tv, now) < FIO_NET_CLIENT_TIMEOUT)
continue;
log_err("fio: client %s, timeout on cmd %s\n", client->hostname,
- fio_server_op(cmd->cmd.opcode));
- flist_del(&cmd->list);
- free(cmd);
+ fio_server_op(reply->opcode));
+ flist_del(&reply->list);
+ free(reply);
ret = 1;
}
return flist_empty(&client->cmd_list) && ret;
}
-static int fio_client_timed_out(void)
+static int fio_check_clients_timed_out(void)
{
struct fio_client *client;
struct flist_head *entry, *tmp;
if (!client_check_cmd_timeout(client, &tv))
continue;
- log_err("fio: client %s timed out\n", client->hostname);
+ if (client->ops->timed_out)
+ client->ops->timed_out(client);
+ else
+ log_err("fio: client %s timed out\n", client->hostname);
+
remove_client(client);
ret = 1;
}
return ret;
}
-int fio_handle_clients(void)
+int fio_handle_clients(struct client_ops *ops)
{
struct pollfd *pfds;
int i, ret = 0, retval = 0;
flist_for_each_safe(entry, tmp, &client_list) {
client = flist_entry(entry, struct fio_client, list);
- if (!client->sent_job &&
+ if (!client->sent_job && !client->ops->stay_connected &&
flist_empty(&client->cmd_list)) {
remove_client(client);
continue;
fio_gettime(&tv, NULL);
if (mtime_since(&eta_tv, &tv) >= 900) {
- request_client_etas();
+ request_client_etas(ops);
memcpy(&eta_tv, &tv, sizeof(tv));
- if (fio_client_timed_out())
+ if (fio_check_clients_timed_out())
break;
}
- ret = poll(pfds, nr_clients, 100);
+ ret = poll(pfds, nr_clients, ops->eta_msec);
if (ret < 0) {
if (errno == EINTR)
continue;
log_err("fio: unknown client fd %d\n", pfds[i].fd);
continue;
}
- if (!handle_client(client)) {
+ if (!fio_handle_client(client)) {
log_info("client: host=%s disconnected\n",
client->hostname);
remove_client(client);
retval = 1;
} else if (client->error)
retval = 1;
- put_client(client);
+ fio_put_client(client);
}
}
cross_prefix=${cross_prefix-${CROSS_COMPILE}}
cc="${CC-${cross_prefix}gcc}"
+# default options
show_help="no"
exit_val=0
+gfio="no"
# parse options
for opt do
;;
--build-32bit-win=*) build_32bit_win="$optarg"
;;
- --help)
- show_help="yes"
+ --enable-gfio)
+ gfio="yes"
;;
+ --help)
+ show_help="yes"
+ ;;
*)
echo "Bad option $opt"
show_help="yes"
echo "--cc= Specify compiler to use"
echo "--extra-cflags= Specify extra CFLAGS to pass to compiler"
echo "--build-32bit-win= Specify yes for a 32-bit build on Windows"
+ echo "--enable-gfio Enable building of gtk gfio"
exit $exit_val
fi
fi
echo "Wordsize $wordsize"
+##########################################
+# zlib probe
+zlib="no"
+cat > $TMPC <<EOF
+#include <zlib.h>
+int main(void)
+{
+ z_stream stream;
+ if (inflateInit(&stream) != Z_OK)
+ return 1;
+ return 0;
+}
+EOF
+if compile_prog "" "-lz" "zlib" ; then
+ zlib=yes
+ LIBS="-lz $LIBS"
+else
+ feature_not_found "zlib"
+ zlib=no
+fi
+echo "zlib $zlib"
+
##########################################
# linux-aio probe
libaio="no"
fi
echo "strsep $strsep"
+ ##########################################
+ # strcasestr() probe
+ strcasestr="no"
+ cat > $TMPC << EOF
+ #include <string.h>
+ int main(int argc, char **argv)
+ {
+ strcasestr(NULL, NULL);
+ return 0;
+ }
+ EOF
+ if compile_prog "" "" "strcasestr"; then
+ strcasestr="yes"
+ fi
+ echo "strcasestr $strcasestr"
+
##########################################
# getopt_long_only() probe
getopt_long_only="no"
echo "__thread $tls_thread"
##########################################
+# Whether or not __thread is supported for TLS
+if test "$gfio" = "yes" ; then
+ cat > $TMPC << EOF
+#include <glib.h>
+#include <cairo.h>
+#include <gtk/gtk.h>
+int main(void)
+{
+ gdk_threads_enter();
+ gdk_threads_leave();
+
+ printf("%d", GTK_CHECK_VERSION(2, 18, 0));
+}
+EOF
+GTK_CFLAGS=$(pkg-config --cflags gtk+-2.0 gthread-2.0)
+if test "$?" != "0" ; then
+ echo "configure: gtk and gthread not found"
+ exit 1
+fi
+GTK_LIBS=$(pkg-config --libs gtk+-2.0 gthread-2.0)
+if test "$?" != "0" ; then
+ echo "configure: gtk and gthread not found"
+ exit 1
+fi
+if compile_prog "$GTK_CFLAGS" "$GTK_LIBS" "gfio" ; then
+ r=$($TMPE)
+ if test "$r" != "0" ; then
+ gfio="yes"
+ LIBS="$LIBS $GTK_LIBS"
+ CFLAGS="$CFLAGS $GTK_CFLAGS"
+ else
+ echo "GTK found, but need version 2.18 or higher"
+ gfio="no"
+ fi
+else
+ echo "Please install gtk and gdk libraries"
+ gfio="no"
+fi
+fi
+
+echo "gfio $gfio"
+
# Check whether we have getrusage(RUSAGE_THREAD)
rusage_thread="no"
cat > $TMPC << EOF
if test "$strsep" = "yes" ; then
output_sym "CONFIG_STRSEP"
fi
+ if test "$strcasestr" = "yes" ; then
+ output_sym "CONFIG_STRCASESTR"
+ fi
if test "$getopt_long_only" = "yes" ; then
output_sym "CONFIG_GETOPT_LONG_ONLY"
fi
if test "$rusage_thread" = "yes" ; then
output_sym "CONFIG_RUSAGE_THREAD"
fi
+if test "$gfio" = "yes" ; then
+ echo "CONFIG_GFIO=y" >> $config_host_mak
+fi
if test "$sched_idle" = "yes" ; then
output_sym "CONFIG_SCHED_IDLE"
fi
fi
echo "LIBS+=$LIBS" >> $config_host_mak
+echo "CFLAGS+=$CFLAGS" >> $config_host_mak
echo "CC=$cc" >> $config_host_mak
echo "BUILD_CFLAGS=$BUILD_CFLAGS $CFLAGS" >> $config_host_mak
static struct fio_option options[] = {
{
.name = "hostname",
+ .lname = "net engine hostname",
.type = FIO_OPT_STR_STORE,
.cb = str_hostname_cb,
.help = "Hostname for net IO engine",
+ .category = FIO_OPT_C_IO,
},
{
.name = "port",
+ .lname = "net engine port",
.type = FIO_OPT_INT,
.off1 = offsetof(struct netio_options, port),
.minval = 1,
.maxval = 65535,
.help = "Port to use for TCP or UDP net connections",
+ .category = FIO_OPT_C_IO,
},
{
.name = "protocol",
+ .lname = "net engine protocol",
.alias = "proto",
.type = FIO_OPT_STR,
.off1 = offsetof(struct netio_options, proto),
.help = "Network protocol to use",
.def = "tcp",
+ .category = FIO_OPT_C_IO,
.posval = {
{ .ival = "tcp",
.oval = FIO_TYPE_TCP,
#endif
{
.name = "listen",
+ .lname = "net engine listen",
.type = FIO_OPT_STR_SET,
.off1 = offsetof(struct netio_options, listen),
.help = "Listen for incoming TCP connections",
+ .category = FIO_OPT_C_IO,
},
{
.name = "pingpong",
.options = options,
.option_struct_size = sizeof(struct netio_options),
.flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR |
- FIO_PIPEIO,
+ FIO_PIPEIO | FIO_BIT_BASED,
};
static int str_hostname_cb(void *data, const char *input)
/*
* Convert seconds to a printable string.
*/
-static void eta_to_str(char *str, unsigned long eta_sec)
+void eta_to_str(char *str, unsigned long eta_sec)
{
unsigned int d, h, m, s;
int disp_hour = 0;
unified_rw_rep += td->o.unified_rw_rep;
if (is_power_of_2(td->o.kb_base))
je->is_pow2 = 1;
+ je->unit_base = td->o.unit_base;
if (td->o.bw_avg_time < bw_avg_time)
bw_avg_time = td->o.bw_avg_time;
if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
|| td->runstate == TD_PRE_READING) {
je->nr_running++;
if (td_read(td)) {
- je->t_rate += td->o.rate[DDIR_READ];
- je->t_iops += td->o.rate_iops[DDIR_READ];
- je->m_rate += td->o.ratemin[DDIR_READ];
- je->m_iops += td->o.rate_iops_min[DDIR_READ];
+ je->t_rate[0] += td->o.rate[DDIR_READ];
+ je->t_iops[0] += td->o.rate_iops[DDIR_READ];
+ je->m_rate[0] += td->o.ratemin[DDIR_READ];
+ je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
}
if (td_write(td)) {
- je->t_rate += td->o.rate[DDIR_WRITE];
- je->t_iops += td->o.rate_iops[DDIR_WRITE];
- je->m_rate += td->o.ratemin[DDIR_WRITE];
- je->m_iops += td->o.rate_iops_min[DDIR_WRITE];
+ je->t_rate[1] += td->o.rate[DDIR_WRITE];
+ je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
+ je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
+ je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
}
if (td_trim(td)) {
- je->t_rate += td->o.rate[DDIR_TRIM];
- je->t_iops += td->o.rate_iops[DDIR_TRIM];
- je->m_rate += td->o.ratemin[DDIR_TRIM];
- je->m_iops += td->o.rate_iops_min[DDIR_TRIM];
+ je->t_rate[2] += td->o.rate[DDIR_TRIM];
+ je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
+ je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
+ je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
}
je->files_open += td->nr_open_files;
}
p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
- if (je->m_rate || je->t_rate) {
+ if (je->m_rate[0] || je->m_rate[1] || je->t_rate[0] || je->t_rate[1]) {
char *tr, *mr;
- mr = num2str(je->m_rate[0] + je->m_rate[1], 4, 0, je->is_pow2);
- tr = num2str(je->t_rate[0] + je->t_rate[1], 4, 0, je->is_pow2);
- mr = num2str(je->m_rate, 4, 0, je->is_pow2, 8);
- tr = num2str(je->t_rate, 4, 0, je->is_pow2, 8);
- p += sprintf(p, ", CR=%s/%s /s", tr, mr);
++ mr = num2str(je->m_rate[0] + je->m_rate[1], 4, 0, je->is_pow2, 8);
++ tr = num2str(je->t_rate[0] + je->t_rate[1], 4, 0, je->is_pow2, 8);
+ p += sprintf(p, ", CR=%s/%s KB/s", tr, mr);
free(tr);
free(mr);
- } else if (je->m_iops || je->t_iops)
- p += sprintf(p, ", CR=%d/%d IOPS", je->t_iops, je->m_iops);
+ } else if (je->m_iops[0] || je->m_iops[1] || je->t_iops[0] || je->t_iops[1]) {
+ p += sprintf(p, ", CR=%d/%d IOPS",
+ je->t_iops[0] + je->t_iops[1],
+ je->m_iops[0] + je->m_iops[1]);
+ }
if (je->eta_sec != INT_MAX && je->nr_running) {
char perc_str[32];
char *iops_str[DDIR_RWDIR_CNT];
for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
rate_str[ddir] = num2str(je->rate[ddir], 5,
- 1024, je->is_pow2);
- iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0);
+ 1024, je->is_pow2, je->unit_base);
+ iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, 0);
}
left = sizeof(output) - (p - output) - 1;
r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
f->real_file_size);
- if (r != 0) {
+ if (r != 0)
td_verror(td, errno, "fallocate");
- }
+
break;
#endif /* CONFIG_LINUX_FALLOCATE */
default:
int setup_files(struct thread_data *td)
{
unsigned long long total_size, extend_size;
+ struct thread_options *o = &td->o;
struct fio_file *f;
unsigned int i;
int err = 0, need_extend;
dprint(FD_FILE, "setup files\n");
- if (td->o.read_iolog_file)
+ if (o->read_iolog_file)
goto done;
/*
total_size += f->real_file_size;
}
- if (td->o.fill_device)
+ if (o->fill_device)
td->fill_device_size = get_fs_free_counts(td);
/*
* device/file sizes are zero and no size given, punt
*/
- if ((!total_size || total_size == -1ULL) && !td->o.size &&
- !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) {
- log_err("%s: you need to specify size=\n", td->o.name);
+ if ((!total_size || total_size == -1ULL) && !o->size &&
+ !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
+ !(o->nr_files && (o->file_size_low || o->file_size_high))) {
+ log_err("%s: you need to specify size=\n", o->name);
td_verror(td, EINVAL, "total_file_size");
return 1;
}
for_each_file(td, f, i) {
f->file_offset = get_start_offset(td);
- if (!td->o.file_size_low) {
+ if (!o->file_size_low) {
/*
* no file size range given, file size is equal to
* total size divided by number of files. if that is
* zero, set it to the real file size.
*/
- f->io_size = td->o.size / td->o.nr_files;
+ f->io_size = o->size / o->nr_files;
if (!f->io_size)
f->io_size = f->real_file_size - f->file_offset;
- } else if (f->real_file_size < td->o.file_size_low ||
- f->real_file_size > td->o.file_size_high) {
- if (f->file_offset > td->o.file_size_low)
+ } else if (f->real_file_size < o->file_size_low ||
+ f->real_file_size > o->file_size_high) {
+ if (f->file_offset > o->file_size_low)
goto err_offset;
/*
* file size given. if it's fixed, use that. if it's a
* range, generate a random size in-between.
*/
- if (td->o.file_size_low == td->o.file_size_high) {
- f->io_size = td->o.file_size_low
- - f->file_offset;
- } else {
+ if (o->file_size_low == o->file_size_high)
+ f->io_size = o->file_size_low - f->file_offset;
+ else {
f->io_size = get_rand_file_size(td)
- f->file_offset;
}
if (f->io_size == -1ULL)
total_size = -1ULL;
else {
- if (td->o.size_percent)
- f->io_size = (f->io_size * td->o.size_percent) / 100;
+ if (o->size_percent)
+ f->io_size = (f->io_size * o->size_percent) / 100;
total_size += f->io_size;
}
if (f->filetype == FIO_TYPE_FILE &&
(f->io_size + f->file_offset) > f->real_file_size &&
!(td->io_ops->flags & FIO_DISKLESSIO)) {
- if (!td->o.create_on_open) {
+ if (!o->create_on_open) {
need_extend++;
extend_size += (f->io_size + f->file_offset);
} else
}
}
- if (!td->o.size || td->o.size > total_size)
- td->o.size = total_size;
+ if (!o->size || o->size > total_size)
+ o->size = total_size;
/*
* See if we need to extend some files
temp_stall_ts = 1;
if (output_format == FIO_OUTPUT_NORMAL)
log_info("%s: Laying out IO file(s) (%u file(s) /"
- " %lluMB)\n", td->o.name, need_extend,
+ " %lluMB)\n", o->name, need_extend,
extend_size >> 20);
for_each_file(td, f, i) {
assert(f->filetype == FIO_TYPE_FILE);
fio_file_clear_extend(f);
- if (!td->o.fill_device) {
+ if (!o->fill_device) {
old_len = f->real_file_size;
extend_len = f->io_size + f->file_offset -
old_len;
if (err)
return err;
- if (!td->o.zone_size)
- td->o.zone_size = td->o.size;
+ if (!o->zone_size)
+ o->zone_size = o->size;
/*
* iolog already set the total io size, if we read back
* stored entries.
*/
- if (!td->o.read_iolog_file)
- td->total_io_size = td->o.size * td->o.loops;
+ if (!o->read_iolog_file)
+ td->total_io_size = o->size * o->loops;
done:
- if (td->o.create_only)
+ if (o->create_only)
td->done = 1;
return 0;
err_offset:
- log_err("%s: you need to specify valid offset=\n", td->o.name);
+ log_err("%s: you need to specify valid offset=\n", o->name);
return 1;
}
seed = td->rand_seeds[4];
if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
- zipf_init(&f->zipf, nranges, td->o.zipf_theta, seed);
+ zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
else
- pareto_init(&f->zipf, nranges, td->o.pareto_h, seed);
+ pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
return 1;
}
reserved name, meaning stdin or stdout, depending on the read/write direction
set.
.TP
+ .BI filename_format \fR=\fPstr
+ If sharing multiple files between jobs, it is usually necessary to have
+ fio generate the exact names that you want. By default, fio will name a file
+ based on the default file format specification of
+ \fBjobname.jobnumber.filenumber\fP. With this option, that can be
+ customized. Fio will recognize and replace the following keywords in this
+ string:
+ .RS
+ .RS
+ .TP
+ .B $jobname
+ The name of the worker thread or process.
+ .TP
+ .B $jobnum
+ The incremental number of the worker thread or process.
+ .TP
+ .B $filenum
+ The incremental number of the file for that worker thread or process.
+ .RE
+ .P
+ To have dependent jobs share a set of files, this option can be set to
+ have fio generate filenames that are shared between the two. For instance,
+ if \fBtestfiles.$filenum\fR is specified, file number 4 for any job will
+ be named \fBtestfiles.4\fR. The default of \fB$jobname.$jobnum.$filenum\fR
+ will be used if no other format specifier is given.
+ .RE
+ .P
+ .TP
.BI lockfile \fR=\fPstr
Fio defaults to not locking any files before it does IO to them. If a file or
file descriptor is shared, fio can serialize IO to that file to make the end
Read-write locking on the file. Many readers may access the file at the same
time, but writes get exclusive access.
.RE
+ .RE
.P
.BI opendir \fR=\fPstr
Recursively open any files below directory \fIstr\fR.
used identically to normal parameters, with the caveat that when used on the
command line, the must come after the ioengine that defines them is selected.
.TP
+.BI (cpu)cpuload \fR=\fPint
+Attempt to use the specified percentage of CPU cycles.
+.TP
+.BI (cpu)cpuchunks \fR=\fPint
+Split the load into cycles of the given time. In microseconds.
+.TP
.BI (libaio)userspace_reap
Normally, with the libaio engine in use, fio will use
the io_getevents system call to reap newly returned events.
struct thread_data;
#include "compiler/compiler.h"
+#include "thread_options.h"
#include "flist.h"
#include "fifo.h"
-#include "rbtree.h"
+#include "lib/rbtree.h"
#include "arch/arch.h"
#include "os/os.h"
#include "mutex.h"
#include "gettime.h"
#include "lib/getopt.h"
#include "lib/rand.h"
+#include "client.h"
#include "server.h"
#include "stat.h"
#include "flow.h"
#define MPOL_LOCAL MPOL_MAX
#endif
-/*
- * What type of allocation to use for io buffers
- */
-enum fio_memtype {
- MEM_MALLOC = 0, /* ordinary malloc */
- MEM_SHM, /* use shared memory segments */
- MEM_SHMHUGE, /* use shared memory segments with huge pages */
- MEM_MMAP, /* use anonynomous mmap */
- MEM_MMAPHUGE, /* memory mapped huge file */
-};
-
/*
* offset generator types
*/
RW_SEQ_IDENT,
};
-/*
- * What type of errors to continue on when continue_on_error is used
- */
-enum error_type_bit {
- ERROR_TYPE_READ_BIT = 0,
- ERROR_TYPE_WRITE_BIT = 1,
- ERROR_TYPE_VERIFY_BIT = 2,
- ERROR_TYPE_CNT = 3,
-};
-
-enum error_type {
- ERROR_TYPE_NONE = 0,
- ERROR_TYPE_READ = 1 << ERROR_TYPE_READ_BIT,
- ERROR_TYPE_WRITE = 1 << ERROR_TYPE_WRITE_BIT,
- ERROR_TYPE_VERIFY = 1 << ERROR_TYPE_VERIFY_BIT,
- ERROR_TYPE_ANY = 0xffff,
-};
-
-struct bssplit {
- unsigned int bs;
- unsigned char perc;
-};
-
-struct thread_options {
- int pad;
- char *description;
- char *name;
- char *directory;
- char *filename;
- char *filename_format;
- char *opendir;
- char *ioengine;
- enum td_ddir td_ddir;
- unsigned int rw_seq;
- unsigned int kb_base;
- unsigned int unit_base;
- unsigned int ddir_seq_nr;
- long ddir_seq_add;
- unsigned int iodepth;
- unsigned int iodepth_low;
- unsigned int iodepth_batch;
- unsigned int iodepth_batch_complete;
-
- unsigned long long size;
- unsigned int size_percent;
- unsigned int fill_device;
- unsigned long long file_size_low;
- unsigned long long file_size_high;
- unsigned long long start_offset;
-
- unsigned int bs[DDIR_RWDIR_CNT];
- unsigned int ba[DDIR_RWDIR_CNT];
- unsigned int min_bs[DDIR_RWDIR_CNT];
- unsigned int max_bs[DDIR_RWDIR_CNT];
- struct bssplit *bssplit[DDIR_RWDIR_CNT];
- unsigned int bssplit_nr[DDIR_RWDIR_CNT];
-
- int *ignore_error[ERROR_TYPE_CNT];
- unsigned int ignore_error_nr[ERROR_TYPE_CNT];
- unsigned int error_dump;
-
- unsigned int nr_files;
- unsigned int open_files;
- enum file_lock_mode file_lock_mode;
-
- unsigned int odirect;
- unsigned int invalidate_cache;
- unsigned int create_serialize;
- unsigned int create_fsync;
- unsigned int create_on_open;
- unsigned int create_only;
- unsigned int end_fsync;
- unsigned int pre_read;
- unsigned int sync_io;
- unsigned int verify;
- unsigned int do_verify;
- unsigned int verifysort;
- unsigned int verifysort_nr;
- unsigned int verify_interval;
- unsigned int verify_offset;
- char verify_pattern[MAX_PATTERN_SIZE];
- unsigned int verify_pattern_bytes;
- unsigned int verify_fatal;
- unsigned int verify_dump;
- unsigned int verify_async;
- unsigned long long verify_backlog;
- unsigned int verify_batch;
- unsigned int experimental_verify;
- unsigned int use_thread;
- unsigned int unlink;
- unsigned int do_disk_util;
- unsigned int override_sync;
- unsigned int rand_repeatable;
- unsigned int use_os_rand;
- unsigned int write_lat_log;
- unsigned int write_bw_log;
- unsigned int write_iops_log;
- unsigned int log_avg_msec;
- unsigned int norandommap;
- unsigned int softrandommap;
- unsigned int bs_unaligned;
- unsigned int fsync_on_close;
-
- unsigned int random_distribution;
- double zipf_theta;
- double pareto_h;
-
- unsigned int random_generator;
-
- unsigned int hugepage_size;
- unsigned int rw_min_bs;
- unsigned int thinktime;
- unsigned int thinktime_spin;
- unsigned int thinktime_blocks;
- unsigned int fsync_blocks;
- unsigned int fdatasync_blocks;
- unsigned int barrier_blocks;
- unsigned long long start_delay;
- unsigned long long timeout;
- unsigned long long ramp_time;
- unsigned int overwrite;
- unsigned int bw_avg_time;
- unsigned int iops_avg_time;
- unsigned int loops;
- unsigned long long zone_range;
- unsigned long long zone_size;
- unsigned long long zone_skip;
- enum fio_memtype mem_type;
- unsigned int mem_align;
-
- unsigned int max_latency;
-
- unsigned int stonewall;
- unsigned int new_group;
- unsigned int numjobs;
- os_cpu_mask_t cpumask;
- unsigned int cpumask_set;
- os_cpu_mask_t verify_cpumask;
- unsigned int verify_cpumask_set;
-#ifdef CONFIG_LIBNUMA
- struct bitmask *numa_cpunodesmask;
- unsigned int numa_cpumask_set;
- unsigned short numa_mem_mode;
- unsigned int numa_mem_prefer_node;
- struct bitmask *numa_memnodesmask;
- unsigned int numa_memmask_set;
-#endif
- unsigned int iolog;
- unsigned int rwmixcycle;
- unsigned int rwmix[2];
- unsigned int nice;
- unsigned int file_service_type;
- unsigned int group_reporting;
- unsigned int fadvise_hint;
- enum fio_fallocate_mode fallocate_mode;
- unsigned int zero_buffers;
- unsigned int refill_buffers;
- unsigned int scramble_buffers;
- unsigned int compress_percentage;
- unsigned int compress_chunk;
- unsigned int time_based;
- unsigned int disable_lat;
- unsigned int disable_clat;
- unsigned int disable_slat;
- unsigned int disable_bw;
- unsigned int unified_rw_rep;
- unsigned int gtod_reduce;
- unsigned int gtod_cpu;
- unsigned int gtod_offload;
- enum fio_cs clocksource;
- unsigned int no_stall;
- unsigned int trim_percentage;
- unsigned int trim_batch;
- unsigned int trim_zero;
- unsigned long long trim_backlog;
- unsigned int clat_percentiles;
- unsigned int percentile_precision; /* digits after decimal for percentiles */
- fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
-
- char *read_iolog_file;
- char *write_iolog_file;
- char *bw_log_file;
- char *lat_log_file;
- char *iops_log_file;
- char *replay_redirect;
-
- /*
- * Pre-run and post-run shell
- */
- char *exec_prerun;
- char *exec_postrun;
-
- unsigned int rate[DDIR_RWDIR_CNT];
- unsigned int ratemin[DDIR_RWDIR_CNT];
- unsigned int ratecycle;
- unsigned int rate_iops[DDIR_RWDIR_CNT];
- unsigned int rate_iops_min[DDIR_RWDIR_CNT];
-
- char *ioscheduler;
-
- /*
- * CPU "io" cycle burner
- */
- unsigned int cpuload;
- unsigned int cpucycle;
-
- /*
- * I/O Error handling
- */
- enum error_type continue_on_error;
-
- /*
- * Benchmark profile type
- */
- char *profile;
-
- /*
- * blkio cgroup support
- */
- char *cgroup;
- unsigned int cgroup_weight;
- unsigned int cgroup_nodelete;
-
- unsigned int uid;
- unsigned int gid;
-
- int flow_id;
- int flow;
- int flow_watermark;
- unsigned int flow_sleep;
-
- unsigned long long offset_increment;
-
- unsigned int sync_file_range;
-};
-
enum {
TD_F_VER_BACKLOG = 1,
TD_F_TRIM_BACKLOG = 2,
void *eo;
char verror[FIO_VERROR_SIZE];
pthread_t thread;
- int thread_number;
- int groupid;
+ unsigned int thread_number;
+ unsigned int groupid;
struct thread_stat ts;
+ int client_type;
+
struct io_log *slat_log;
struct io_log *clat_log;
struct io_log *lat_log;
size_t orig_buffer_size;
volatile int terminate;
volatile int runstate;
- unsigned int ioprio;
- unsigned int ioprio_set;
unsigned int last_was_sync;
enum fio_ddir last_ddir;
- char *mmapfile;
int mmapfd;
void *iolog_buf;
*/
struct prof_io_ops prof_io_ops;
void *prof_data;
+
+ void *pinned_mem;
};
/*
extern int exitall_on_terminate;
extern unsigned int thread_number;
extern unsigned int stat_number;
-extern unsigned int nr_process, nr_thread;
extern int shm_id;
extern int groupid;
extern int output_format;
extern int temp_stall_ts;
-extern unsigned long long mlock_size;
extern uintptr_t page_mask, page_size;
extern int read_only;
extern int eta_print;
/*
* Init/option functions
*/
+extern int __must_check fio_init_options(void);
extern int __must_check parse_options(int, char **);
-extern int parse_jobs_ini(char *, int, int);
-extern int parse_cmd_line(int, char **);
+extern int parse_jobs_ini(char *, int, int, int);
+extern int parse_cmd_line(int, char **, int);
extern int fio_backend(void);
extern void reset_fio_state(void);
extern void clear_io_state(struct thread_data *);
extern void fio_options_mem_dupe(struct thread_data *);
extern void options_mem_dupe(void *data, struct fio_option *options);
extern void td_fill_rand_seeds(struct thread_data *);
-extern void add_job_opts(const char **);
+extern void add_job_opts(const char **, int);
- extern char *num2str(unsigned long, int, int, int);
+ extern char *num2str(unsigned long, int, int, int, int);
extern int ioengine_load(struct thread_data *);
+extern unsigned long page_mask;
+extern unsigned long page_size;
+extern int initialize_fio(char *envp[]);
+
#define FIO_GETOPT_JOB 0x89000000
#define FIO_GETOPT_IOENGINE 0x98000000
#define FIO_NR_OPTIONS (FIO_MAX_OPTS + 128)
*/
extern void print_thread_status(void);
extern void print_status_init(int);
+extern char *fio_uint_to_kmg(unsigned int val);
/*
* Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
/*
* Memory helpers
*/
-extern int __must_check fio_pin_memory(void);
-extern void fio_unpin_memory(void);
+extern int __must_check fio_pin_memory(struct thread_data *);
+extern void fio_unpin_memory(struct thread_data *);
extern int __must_check allocate_io_mem(struct thread_data *);
extern void free_io_mem(struct thread_data *);
+extern void free_threads_shm(void);
/*
* Reset stats after ramp time completes
extern const char *fio_get_arch_string(int);
extern const char *fio_get_os_string(int);
+#define ARRAY_SIZE(x) (sizeof((x)) / (sizeof((x)[0])))
+
enum {
FIO_OUTPUT_TERSE = 0,
FIO_OUTPUT_JSON,
#include "idletime.h"
#include "lib/getopt.h"
+ #include "lib/strcasestr.h"
const char fio_version_string[] = FIO_VERSION;
},
};
-static void free_shm(void)
+void free_threads_shm(void)
{
struct shmid_ds sbuf;
void *tp = threads;
threads = NULL;
+ shmdt(tp);
+ shmctl(shm_id, IPC_RMID, &sbuf);
+ shm_id = -1;
+ }
+}
+
+void free_shm(void)
+{
+ if (threads) {
file_hash_exit();
flow_exit();
fio_debug_jobp = NULL;
- shmdt(tp);
- shmctl(shm_id, IPC_RMID, &sbuf);
+ free_threads_shm();
}
scleanup();
}
}
+ if (!o->unit_base) {
+ if (td->io_ops->flags & FIO_BIT_BASED)
+ o->unit_base = 1;
+ else
+ o->unit_base = 8;
+ }
+
#ifndef CONFIG_FDATASYNC
if (o->fdatasync_blocks) {
log_info("fio: this platform does not support fdatasync()"
/*
* This function leaks the buffer
*/
-static char *to_kmg(unsigned int val)
+char *fio_uint_to_kmg(unsigned int val)
{
char *buf = malloc(32);
char post[] = { 0, 'K', 'M', 'G', 'P', 'E', 0 };
return 0;
}
+ enum {
+ FPRE_NONE = 0,
+ FPRE_JOBNAME,
+ FPRE_JOBNUM,
+ FPRE_FILENUM
+ };
+
+ static struct fpre_keyword {
+ const char *keyword;
+ size_t strlen;
+ int key;
+ } fpre_keywords[] = {
+ { .keyword = "$jobname", .key = FPRE_JOBNAME, },
+ { .keyword = "$jobnum", .key = FPRE_JOBNUM, },
+ { .keyword = "$filenum", .key = FPRE_FILENUM, },
+ { .keyword = NULL, },
+ };
+
+ static char *make_filename(char *buf, struct thread_options *o,
+ const char *jobname, int jobnum, int filenum)
+ {
+ struct fpre_keyword *f;
+ char copy[PATH_MAX];
+
+ if (!o->filename_format || !strlen(o->filename_format)) {
+ sprintf(buf, "%s.%d.%d", jobname, jobnum, filenum);
+ return NULL;
+ }
+
+ for (f = &fpre_keywords[0]; f->keyword; f++)
+ f->strlen = strlen(f->keyword);
+
+ strcpy(buf, o->filename_format);
+ memset(copy, 0, sizeof(copy));
+ for (f = &fpre_keywords[0]; f->keyword; f++) {
+ do {
+ size_t pre_len, post_start = 0;
+ char *str, *dst = copy;
+
+ str = strcasestr(buf, f->keyword);
+ if (!str)
+ break;
+
+ pre_len = str - buf;
+ if (strlen(str) != f->strlen)
+ post_start = pre_len + f->strlen;
+
+ if (pre_len) {
+ strncpy(dst, buf, pre_len);
+ dst += pre_len;
+ }
+
+ switch (f->key) {
+ case FPRE_JOBNAME:
+ dst += sprintf(dst, "%s", jobname);
+ break;
+ case FPRE_JOBNUM:
+ dst += sprintf(dst, "%d", jobnum);
+ break;
+ case FPRE_FILENUM:
+ dst += sprintf(dst, "%d", filenum);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (post_start)
+ strcpy(dst, buf + post_start);
+
+ strcpy(buf, copy);
+ } while (1);
+ }
+
+ return buf;
+ }
/*
* Adds a job to the list of things todo. Sanitizes the various options
* to make sure we don't have conflicts, and initializes various
* members of td.
*/
-static int add_job(struct thread_data *td, const char *jobname, int job_add_num)
+static int add_job(struct thread_data *td, const char *jobname, int job_add_num,
+ int recursed, int client_type)
{
- const char *ddir_str[] = { NULL, "read", "write", "rw", NULL,
- "randread", "randwrite", "randrw",
- "trim", NULL, NULL, NULL, "randtrim" };
unsigned int i;
char fname[PATH_MAX];
int numjobs, file_alloced;
+ struct thread_options *o = &td->o;
/*
* the def_thread is just for options, it's not a real job
return 0;
}
+ td->client_type = client_type;
+
if (profile_td_init(td))
goto err;
if (ioengine_load(td))
goto err;
- if (td->o.odirect)
- if (o->use_thread)
- nr_thread++;
- else
- nr_process++;
-
+ if (o->odirect)
td->io_ops->flags |= FIO_RAWIO;
file_alloced = 0;
- if (!td->o.filename && !td->files_index && !td->o.read_iolog_file) {
+ if (!o->filename && !td->files_index && !o->read_iolog_file) {
file_alloced = 1;
- if (td->o.nr_files == 1 && exists_and_not_file(jobname))
+ if (o->nr_files == 1 && exists_and_not_file(jobname))
add_file(td, jobname);
else {
- for (i = 0; i < td->o.nr_files; i++) {
- sprintf(fname, "%s.%d.%d", jobname,
- td->thread_number, i);
- add_file(td, fname);
- }
+ for (i = 0; i < o->nr_files; i++)
+ add_file(td, make_filename(fname, o, jobname, td->thread_number, i));
}
}
td->mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
- td->ts.clat_percentiles = td->o.clat_percentiles;
- td->ts.percentile_precision = td->o.percentile_precision;
- memcpy(td->ts.percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
+ td->ts.clat_percentiles = o->clat_percentiles;
+ td->ts.percentile_precision = o->percentile_precision;
+ memcpy(td->ts.percentile_list, o->percentile_list, sizeof(o->percentile_list));
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
td->ts.clat_stat[i].min_val = ULONG_MAX;
td->ts.lat_stat[i].min_val = ULONG_MAX;
td->ts.bw_stat[i].min_val = ULONG_MAX;
}
- td->ddir_seq_nr = td->o.ddir_seq_nr;
+ td->ddir_seq_nr = o->ddir_seq_nr;
- if ((td->o.stonewall || td->o.new_group) && prev_group_jobs) {
+ if ((o->stonewall || o->new_group) && prev_group_jobs) {
prev_group_jobs = 0;
groupid++;
}
if (setup_rate(td))
goto err;
- if (td->o.lat_log_file) {
- setup_log(&td->lat_log, td->o.log_avg_msec, IO_LOG_TYPE_LAT);
- setup_log(&td->slat_log, td->o.log_avg_msec, IO_LOG_TYPE_SLAT);
- setup_log(&td->clat_log, td->o.log_avg_msec, IO_LOG_TYPE_CLAT);
- if (o->write_lat_log) {
- setup_log(&td->lat_log, o->log_avg_msec);
- setup_log(&td->slat_log, o->log_avg_msec);
- setup_log(&td->clat_log, o->log_avg_msec);
++ if (o->lat_log_file) {
++ setup_log(&td->lat_log, o->log_avg_msec, IO_LOG_TYPE_LAT);
++ setup_log(&td->slat_log, o->log_avg_msec, IO_LOG_TYPE_SLAT);
++ setup_log(&td->clat_log, o->log_avg_msec, IO_LOG_TYPE_CLAT);
}
- if (td->o.bw_log_file)
- setup_log(&td->bw_log, td->o.log_avg_msec, IO_LOG_TYPE_BW);
- if (td->o.iops_log_file)
- setup_log(&td->iops_log, td->o.log_avg_msec, IO_LOG_TYPE_IOPS);
- if (o->write_bw_log)
- setup_log(&td->bw_log, o->log_avg_msec);
- if (o->write_iops_log)
- setup_log(&td->iops_log, o->log_avg_msec);
++ if (o->bw_log_file)
++ setup_log(&td->bw_log, o->log_avg_msec, IO_LOG_TYPE_BW);
++ if (o->iops_log_file)
++ setup_log(&td->iops_log, o->log_avg_msec, IO_LOG_TYPE_IOPS);
- if (!td->o.name)
- td->o.name = strdup(jobname);
+ if (!o->name)
+ o->name = strdup(jobname);
if (output_format == FIO_OUTPUT_NORMAL) {
if (!job_add_num) {
- if (!strcmp(td->io_ops->name, "cpuio")) {
- log_info("%s: ioengine=cpu, cpuload=%u,"
- " cpucycle=%u\n", o->name,
- o->cpuload, o->cpucycle);
- } else {
+ if (is_backend && !recursed)
+ fio_server_send_add_job(td);
+
+ if (!(td->io_ops->flags & FIO_NOIO)) {
char *c1, *c2, *c3, *c4, *c5, *c6;
- c1 = fio_uint_to_kmg(td->o.min_bs[DDIR_READ]);
- c2 = fio_uint_to_kmg(td->o.max_bs[DDIR_READ]);
- c3 = fio_uint_to_kmg(td->o.min_bs[DDIR_WRITE]);
- c4 = fio_uint_to_kmg(td->o.max_bs[DDIR_WRITE]);
- c5 = fio_uint_to_kmg(td->o.min_bs[DDIR_TRIM]);
- c6 = fio_uint_to_kmg(td->o.max_bs[DDIR_TRIM]);
- c1 = to_kmg(o->min_bs[DDIR_READ]);
- c2 = to_kmg(o->max_bs[DDIR_READ]);
- c3 = to_kmg(o->min_bs[DDIR_WRITE]);
- c4 = to_kmg(o->max_bs[DDIR_WRITE]);
- c5 = to_kmg(o->min_bs[DDIR_TRIM]);
- c6 = to_kmg(o->max_bs[DDIR_TRIM]);
++ c1 = fio_uint_to_kmg(o->min_bs[DDIR_READ]);
++ c2 = fio_uint_to_kmg(o->max_bs[DDIR_READ]);
++ c3 = fio_uint_to_kmg(o->min_bs[DDIR_WRITE]);
++ c4 = fio_uint_to_kmg(o->max_bs[DDIR_WRITE]);
++ c5 = fio_uint_to_kmg(o->min_bs[DDIR_TRIM]);
++ c6 = fio_uint_to_kmg(o->max_bs[DDIR_TRIM]);
log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s/%s-%s,"
" ioengine=%s, iodepth=%u\n",
- o->name, td->groupid,
- ddir_str[o->td_ddir],
+ td->o.name, td->groupid,
- ddir_str(td->o.td_ddir),
++ ddir_str(o->td_ddir),
c1, c2, c3, c4, c5, c6,
- td->io_ops->name,
- td->o.iodepth);
+ td->io_ops->name, o->iodepth);
free(c1);
free(c2);
* recurse add identical jobs, clear numjobs and stonewall options
* as they don't apply to sub-jobs
*/
- numjobs = td->o.numjobs;
+ numjobs = o->numjobs;
while (--numjobs) {
struct thread_data *td_new = get_new_job(0, td, 1);
job_add_num = numjobs - 1;
- if (add_job(td_new, jobname, job_add_num))
+ if (add_job(td_new, jobname, job_add_num, 1, client_type))
goto err;
}
/*
* Parse as if 'o' was a command line
*/
-void add_job_opts(const char **o)
+void add_job_opts(const char **o, int client_type)
{
struct thread_data *td, *td_parent;
int i, in_global = 1;
if (!strncmp(o[i], "name", 4)) {
in_global = 0;
if (td)
- add_job(td, jobname, 0);
+ add_job(td, jobname, 0, 0, client_type);
td = NULL;
sprintf(jobname, "%s", o[i] + 5);
}
}
if (td)
- add_job(td, jobname, 0);
+ add_job(td, jobname, 0, 0, client_type);
}
static int skip_this_section(const char *name)
/*
* This is our [ini] type file parser.
*/
-int parse_jobs_ini(char *file, int is_buf, int stonewall_flag)
+int parse_jobs_ini(char *file, int is_buf, int stonewall_flag, int type)
{
unsigned int global;
struct thread_data *td;
for (i = 0; i < num_opts; i++)
log_info("--%s ", opts[i]);
- ret = add_job(td, name, 0);
+ ret = add_job(td, name, 0, 0, type);
} else {
log_err("fio: job %s dropped\n", name);
put_job(td);
#ifdef FIO_INC_DEBUG
struct debug_level debug_levels[] = {
- { .name = "process", .shift = FD_PROCESS, },
- { .name = "file", .shift = FD_FILE, },
- { .name = "io", .shift = FD_IO, },
- { .name = "mem", .shift = FD_MEM, },
- { .name = "blktrace", .shift = FD_BLKTRACE },
- { .name = "verify", .shift = FD_VERIFY },
- { .name = "random", .shift = FD_RANDOM },
- { .name = "parse", .shift = FD_PARSE },
- { .name = "diskutil", .shift = FD_DISKUTIL },
- { .name = "job", .shift = FD_JOB },
- { .name = "mutex", .shift = FD_MUTEX },
- { .name = "profile", .shift = FD_PROFILE },
- { .name = "time", .shift = FD_TIME },
- { .name = "net", .shift = FD_NET },
+ { .name = "process",
+ .help = "Process creation/exit logging",
+ .shift = FD_PROCESS,
+ },
+ { .name = "file",
+ .help = "File related action logging",
+ .shift = FD_FILE,
+ },
+ { .name = "io",
+ .help = "IO and IO engine action logging (offsets, queue, completions, etc)",
+ .shift = FD_IO,
+ },
+ { .name = "mem",
+ .help = "Memory allocation/freeing logging",
+ .shift = FD_MEM,
+ },
+ { .name = "blktrace",
+ .help = "blktrace action logging",
+ .shift = FD_BLKTRACE,
+ },
+ { .name = "verify",
+ .help = "IO verification action logging",
+ .shift = FD_VERIFY,
+ },
+ { .name = "random",
+ .help = "Random generation logging",
+ .shift = FD_RANDOM,
+ },
+ { .name = "parse",
+ .help = "Parser logging",
+ .shift = FD_PARSE,
+ },
+ { .name = "diskutil",
+ .help = "Disk utility logging actions",
+ .shift = FD_DISKUTIL,
+ },
+ { .name = "job",
+ .help = "Logging related to creating/destroying jobs",
+ .shift = FD_JOB,
+ },
+ { .name = "mutex",
+ .help = "Mutex logging",
+ .shift = FD_MUTEX
+ },
+ { .name = "profile",
+ .help = "Logging related to profiles",
+ .shift = FD_PROFILE,
+ },
+ { .name = "time",
+ .help = "Logging related to time keeping functions",
+ .shift = FD_TIME,
+ },
+ { .name = "net",
+ .help = "Network logging",
+ .shift = FD_NET,
+ },
{ .name = NULL, },
};
fio_client_add_cmd_option(client, opt);
}
-int parse_cmd_line(int argc, char *argv[])
+int parse_cmd_line(int argc, char *argv[], int client_type)
{
struct thread_data *td = NULL;
int c, ini_idx = 0, lidx, ret = 0, do_exit = 0, exit_val = 0;
char *val = optarg;
if (!strncmp(opt, "name", 4) && td) {
- ret = add_job(td, td->o.name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0, 0, client_type);
if (ret)
return 0;
td = NULL;
exit_val = 1;
break;
}
- if (fio_client_add(optarg, &cur_client)) {
+ if (fio_client_add(&fio_client_ops, optarg, &cur_client)) {
log_err("fio: failed adding client %s\n", optarg);
do_exit++;
exit_val = 1;
if (td) {
if (!ret)
- ret = add_job(td, td->o.name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0, 0, client_type);
}
while (!ret && optind < argc) {
return ini_idx;
}
-int parse_options(int argc, char *argv[])
+int fio_init_options(void)
{
- int job_files, i;
-
f_out = stdout;
f_err = stderr;
if (fill_def_thread())
return 1;
- job_files = parse_cmd_line(argc, argv);
+ return 0;
+}
+
+extern int fio_check_options(struct thread_options *);
+
+int parse_options(int argc, char *argv[])
+{
+ const int type = FIO_CLIENT_TYPE_CLI;
+ int job_files, i;
+
+ if (fio_init_options())
+ return 1;
+ if (fio_test_cconv(&def_thread.o))
+ log_err("fio: failed internal cconv test\n");
+
+ job_files = parse_cmd_line(argc, argv, type);
if (job_files > 0) {
for (i = 0; i < job_files; i++) {
return 1;
free(ini_file[i]);
} else if (!is_backend) {
- if (parse_jobs_ini(ini_file[i], 0, i))
+ if (parse_jobs_ini(ini_file[i], 0, i, type))
return 1;
free(ini_file[i]);
}
return 0;
}
+
+void options_default_fill(struct thread_options *o)
+{
+ memcpy(o, &def_thread.o, sizeof(*o));
+}
#ifndef FIO_IOENGINE_H
#define FIO_IOENGINE_H
+#include "compiler/compiler.h"
+#include "os/os.h"
+#include "log.h"
+#include "io_ddir.h"
+#include "debug.h"
+#include "file.h"
+
#ifdef CONFIG_LIBAIO
#include <libaio.h>
#endif
IO_U_F_VER_LIST = 1 << 7,
};
+struct thread_data;
+
/*
* The io unit
*/
FIO_PIPEIO = 1 << 7, /* input/output no seekable */
FIO_BARRIER = 1 << 8, /* engine supports barriers */
FIO_MEMALIGN = 1 << 9, /* engine wants aligned memory */
+ FIO_BIT_BASED = 1 << 10, /* engine uses a bit base (e.g. uses Kbit as opposed to KB) */
};
/*
{
int base;
- switch(a) {
+ switch (a) {
case '0'...'9':
base = '0';
break;
default:
base = 0;
}
- return (a - base);
+ return a - base;
}
static int bs_cmp(const void *p1, const void *p2)
return bsp1->perc < bsp2->perc;
}
-static int bssplit_ddir(struct thread_data *td, int ddir, char *str)
+static int bssplit_ddir(struct thread_options *o, int ddir, char *str)
{
struct bssplit *bssplit;
unsigned int i, perc, perc_missing;
long long val;
char *fname;
- td->o.bssplit_nr[ddir] = 4;
+ o->bssplit_nr[ddir] = 4;
bssplit = malloc(4 * sizeof(struct bssplit));
i = 0;
/*
* grow struct buffer, if needed
*/
- if (i == td->o.bssplit_nr[ddir]) {
- td->o.bssplit_nr[ddir] <<= 1;
- bssplit = realloc(bssplit, td->o.bssplit_nr[ddir]
+ if (i == o->bssplit_nr[ddir]) {
+ o->bssplit_nr[ddir] <<= 1;
+ bssplit = realloc(bssplit, o->bssplit_nr[ddir]
* sizeof(struct bssplit));
}
} else
perc = -1;
- if (str_to_decimal(fname, &val, 1, td)) {
+ if (str_to_decimal(fname, &val, 1, o)) {
log_err("fio: bssplit conversion failed\n");
- free(td->o.bssplit);
+ free(o->bssplit);
return 1;
}
i++;
}
- td->o.bssplit_nr[ddir] = i;
+ o->bssplit_nr[ddir] = i;
/*
* Now check if the percentages add up, and how much is missing
*/
perc = perc_missing = 0;
- for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &bssplit[i];
if (bsp->perc == (unsigned char) -1)
* them.
*/
if (perc_missing) {
- for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &bssplit[i];
if (bsp->perc == (unsigned char) -1)
}
}
- td->o.min_bs[ddir] = min_bs;
- td->o.max_bs[ddir] = max_bs;
+ o->min_bs[ddir] = min_bs;
+ o->max_bs[ddir] = max_bs;
/*
* now sort based on percentages, for ease of lookup
*/
- qsort(bssplit, td->o.bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
- td->o.bssplit[ddir] = bssplit;
+ qsort(bssplit, o->bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
+ o->bssplit[ddir] = bssplit;
return 0;
-
}
static int str_bssplit_cb(void *data, const char *input)
if (odir) {
ddir = strchr(odir + 1, ',');
if (ddir) {
- ret = bssplit_ddir(td, DDIR_TRIM, ddir + 1);
+ ret = bssplit_ddir(&td->o, DDIR_TRIM, ddir + 1);
if (!ret)
*ddir = '\0';
} else {
char *op;
op = strdup(odir + 1);
- ret = bssplit_ddir(td, DDIR_TRIM, op);
+ ret = bssplit_ddir(&td->o, DDIR_TRIM, op);
free(op);
}
- if (!ret)
- ret = bssplit_ddir(td, DDIR_WRITE, odir + 1);
+ if (!ret)
+ ret = bssplit_ddir(&td->o, DDIR_WRITE, odir + 1);
if (!ret) {
*odir = '\0';
- ret = bssplit_ddir(td, DDIR_READ, str);
+ ret = bssplit_ddir(&td->o, DDIR_READ, str);
}
} else {
char *op;
op = strdup(str);
- ret = bssplit_ddir(td, DDIR_WRITE, op);
+ ret = bssplit_ddir(&td->o, DDIR_WRITE, op);
free(op);
if (!ret) {
op = strdup(str);
- ret = bssplit_ddir(td, DDIR_TRIM, op);
+ ret = bssplit_ddir(&td->o, DDIR_TRIM, op);
free(op);
}
- ret = bssplit_ddir(td, DDIR_READ, str);
+ ret = bssplit_ddir(&td->o, DDIR_READ, str);
}
free(p);
static int str2error(char *str)
{
- const char * err[] = {"EPERM", "ENOENT", "ESRCH", "EINTR", "EIO",
+ const char *err[] = { "EPERM", "ENOENT", "ESRCH", "EINTR", "EIO",
"ENXIO", "E2BIG", "ENOEXEC", "EBADF",
"ECHILD", "EAGAIN", "ENOMEM", "EACCES",
"EFAULT", "ENOTBLK", "EBUSY", "EEXIST",
"EXDEV", "ENODEV", "ENOTDIR", "EISDIR",
"EINVAL", "ENFILE", "EMFILE", "ENOTTY",
"ETXTBSY","EFBIG", "ENOSPC", "ESPIPE",
- "EROFS","EMLINK", "EPIPE", "EDOM", "ERANGE"};
+ "EROFS","EMLINK", "EPIPE", "EDOM", "ERANGE" };
int i = 0, num = sizeof(err) / sizeof(void *);
- while( i < num) {
+ while (i < num) {
if (!strcmp(err[i], str))
return i + 1;
i++;
static int str_rw_cb(void *data, const char *str)
{
struct thread_data *td = data;
+ struct thread_options *o = &td->o;
char *nr = get_opt_postfix(str);
- td->o.ddir_seq_nr = 1;
- td->o.ddir_seq_add = 0;
+ o->ddir_seq_nr = 1;
+ o->ddir_seq_add = 0;
if (!nr)
return 0;
if (td_random(td))
- td->o.ddir_seq_nr = atoi(nr);
+ o->ddir_seq_nr = atoi(nr);
else {
long long val;
- if (str_to_decimal(nr, &val, 1, td)) {
+ if (str_to_decimal(nr, &val, 1, o)) {
log_err("fio: rw postfix parsing failed\n");
free(nr);
return 1;
}
- td->o.ddir_seq_add = val;
+ o->ddir_seq_add = val;
}
free(nr);
struct thread_data *td = data;
if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP)
- td->mmapfile = get_opt_postfix(mem);
-
- return 0;
-}
-
-static int str_verify_cb(void *data, const char *mem)
-{
- struct thread_data *td = data;
-
- if (td->o.verify == VERIFY_CRC32C_INTEL ||
- td->o.verify == VERIFY_CRC32C) {
- crc32c_intel_probe();
- }
+ td->o.mmapfile = get_opt_postfix(mem);
return 0;
}
return 0;
}
-static int str_lockmem_cb(void fio_unused *data, unsigned long long *val)
-{
- mlock_size = *val;
- return 0;
-}
-
static int str_rwmix_read_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
return 0;
}
-#ifdef FIO_HAVE_IOPRIO
-static int str_prioclass_cb(void *data, unsigned long long *val)
-{
- struct thread_data *td = data;
- unsigned short mask;
-
- /*
- * mask off old class bits, str_prio_cb() may have set a default class
- */
- mask = (1 << IOPRIO_CLASS_SHIFT) - 1;
- td->ioprio &= mask;
-
- td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
- td->ioprio_set = 1;
- return 0;
-}
-
-static int str_prio_cb(void *data, unsigned long long *val)
-{
- struct thread_data *td = data;
-
- td->ioprio |= *val;
-
- /*
- * If no class is set, assume BE
- */
- if ((td->ioprio >> IOPRIO_CLASS_SHIFT) == 0)
- td->ioprio |= IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT;
-
- td->ioprio_set = 1;
- return 0;
-}
-#endif
-
static int str_exitall_cb(void)
{
exitall_on_terminate = 1;
}
#endif
-#ifdef FIO_HAVE_TRIM
-static int str_verify_trim_cb(void *data, unsigned long long *val)
-{
- struct thread_data *td = data;
-
- td->o.trim_percentage = *val;
- return 0;
-}
-#endif
-
static int str_fst_cb(void *data, const char *str)
{
struct thread_data *td = data;
log_err("fio: zipf theta must different than 1.0\n");
return 1;
}
- td->o.zipf_theta = val;
+ td->o.zipf_theta.u.f = val;
} else {
if (val <= 0.00 || val >= 1.00) {
log_err("fio: pareto input out of range (0 < input < 1.0)\n");
return 1;
}
- td->o.pareto_h = val;
+ td->o.pareto_h.u.f = val;
}
return 0;
}
-static int check_dir(struct thread_data *td, char *fname)
-{
-#if 0
- char file[PATH_MAX], *dir;
- int elen = 0;
-
- if (td->o.directory) {
- strcpy(file, td->o.directory);
- strcat(file, "/");
- elen = strlen(file);
- }
-
- sprintf(file + elen, "%s", fname);
- dir = dirname(file);
-
- {
- struct stat sb;
- /*
- * We can't do this on FIO_DISKLESSIO engines. The engine isn't loaded
- * yet, so we can't do this check right here...
- */
- if (lstat(dir, &sb) < 0) {
- int ret = errno;
-
- log_err("fio: %s is not a directory\n", dir);
- td_verror(td, ret, "lstat");
- return 1;
- }
-
- if (!S_ISDIR(sb.st_mode)) {
- log_err("fio: %s is not a directory\n", dir);
- return 1;
- }
- }
-#endif
-
- return 0;
-}
-
/*
* Return next file in the string. Files are separated with ':'. If the ':'
* is escaped with a '\', then that ':' is part of the filename and does not
while ((fname = get_next_file_name(&str)) != NULL) {
if (!strlen(fname))
break;
- if (check_dir(td, fname)) {
- free(p);
- return 1;
- }
add_file(td, fname);
td->o.nr_files++;
}
return add_dir_files(td, td->o.opendir);
}
-static int str_verify_offset_cb(void *data, unsigned long long *off)
-{
- struct thread_data *td = data;
-
- if (*off && *off < sizeof(struct verify_header)) {
- log_err("fio: verify_offset too small\n");
- return 1;
- }
-
- td->o.verify_offset = *off;
- return 0;
-}
-
static int str_verify_pattern_cb(void *data, const char *input)
{
struct thread_data *td = data;
long off;
int i = 0, j = 0, len, k, base = 10;
- char* loc1, * loc2;
+ char *loc1, *loc2;
loc1 = strstr(input, "0x");
loc2 = strstr(input, "0X");
return 0;
}
-static int str_write_bw_log_cb(void *data, const char *str)
-{
- struct thread_data *td = data;
-
- if (str)
- td->o.bw_log_file = strdup(str);
-
- td->o.write_bw_log = 1;
- return 0;
-}
-
-static int str_write_lat_log_cb(void *data, const char *str)
-{
- struct thread_data *td = data;
-
- if (str)
- td->o.lat_log_file = strdup(str);
-
- td->o.write_lat_log = 1;
- return 0;
-}
-
-static int str_write_iops_log_cb(void *data, const char *str)
-{
- struct thread_data *td = data;
-
- if (str)
- td->o.iops_log_file = strdup(str);
-
- td->o.write_iops_log = 1;
- return 0;
-}
-
static int str_gtod_reduce_cb(void *data, int *il)
{
struct thread_data *td = data;
return 0;
}
+ static int unit_base_verify(struct fio_option *o, void *data)
+ {
+ struct thread_data *td = data;
+
+ /* 0 = default, pick based on engine
+ * 1 = use bits
+ * 8 = use bytes
+ */
+ if (td->o.unit_base != 0 &&
+ td->o.unit_base != 1 &&
+ td->o.unit_base != 8) {
+ log_err("fio: unit_base set to nonsensical value: %u\n",
+ td->o.unit_base);
+ return 1;
+ }
+
+ return 0;
+ }
+
+/*
+ * Option grouping
+ */
+static struct opt_group fio_opt_groups[] = {
+ {
+ .name = "General",
+ .mask = FIO_OPT_C_GENERAL,
+ },
+ {
+ .name = "I/O",
+ .mask = FIO_OPT_C_IO,
+ },
+ {
+ .name = "File",
+ .mask = FIO_OPT_C_FILE,
+ },
+ {
+ .name = "Statistics",
+ .mask = FIO_OPT_C_STAT,
+ },
+ {
+ .name = "Logging",
+ .mask = FIO_OPT_C_LOG,
+ },
+ {
+ .name = "Profiles",
+ .mask = FIO_OPT_C_PROFILE,
+ },
+ {
+ .name = NULL,
+ },
+};
+
+static struct opt_group *__opt_group_from_mask(struct opt_group *ogs, unsigned int *mask,
+ unsigned int inv_mask)
+{
+ struct opt_group *og;
+ int i;
+
+ if (*mask == inv_mask || !*mask)
+ return NULL;
+
+ for (i = 0; ogs[i].name; i++) {
+ og = &ogs[i];
+
+ if (*mask & og->mask) {
+ *mask &= ~(og->mask);
+ return og;
+ }
+ }
+
+ return NULL;
+}
+
+struct opt_group *opt_group_from_mask(unsigned int *mask)
+{
+ return __opt_group_from_mask(fio_opt_groups, mask, FIO_OPT_C_INVALID);
+}
+
+static struct opt_group fio_opt_cat_groups[] = {
+ {
+ .name = "Rate",
+ .mask = FIO_OPT_G_RATE,
+ },
+ {
+ .name = "Zone",
+ .mask = FIO_OPT_G_ZONE,
+ },
+ {
+ .name = "Read/write mix",
+ .mask = FIO_OPT_G_RWMIX,
+ },
+ {
+ .name = "Verify",
+ .mask = FIO_OPT_G_VERIFY,
+ },
+ {
+ .name = "Trim",
+ .mask = FIO_OPT_G_TRIM,
+ },
+ {
+ .name = "I/O Logging",
+ .mask = FIO_OPT_G_IOLOG,
+ },
+ {
+ .name = "I/O Depth",
+ .mask = FIO_OPT_G_IO_DEPTH,
+ },
+ {
+ .name = "I/O Flow",
+ .mask = FIO_OPT_G_IO_FLOW,
+ },
+ {
+ .name = "Description",
+ .mask = FIO_OPT_G_DESC,
+ },
+ {
+ .name = "Filename",
+ .mask = FIO_OPT_G_FILENAME,
+ },
+ {
+ .name = "General I/O",
+ .mask = FIO_OPT_G_IO_BASIC,
+ },
+ {
+ .name = "Cgroups",
+ .mask = FIO_OPT_G_CGROUP,
+ },
+ {
+ .name = "Runtime",
+ .mask = FIO_OPT_G_RUNTIME,
+ },
+ {
+ .name = "Process",
+ .mask = FIO_OPT_G_PROCESS,
+ },
+ {
+ .name = "Job credentials / priority",
+ .mask = FIO_OPT_G_CRED,
+ },
+ {
+ .name = "Clock settings",
+ .mask = FIO_OPT_G_CLOCK,
+ },
+ {
+ .name = "I/O Type",
+ .mask = FIO_OPT_G_IO_TYPE,
+ },
+ {
+ .name = "I/O Thinktime",
+ .mask = FIO_OPT_G_THINKTIME,
+ },
+ {
+ .name = "Randomizations",
+ .mask = FIO_OPT_G_RANDOM,
+ },
+ {
+ .name = "I/O buffers",
+ .mask = FIO_OPT_G_IO_BUF,
+ },
+ {
+ .name = "Tiobench profile",
+ .mask = FIO_OPT_G_TIOBENCH,
+ },
+
+ {
+ .name = NULL,
+ }
+};
+
+struct opt_group *opt_group_cat_from_mask(unsigned int *mask)
+{
+ return __opt_group_from_mask(fio_opt_cat_groups, mask, FIO_OPT_G_INVALID);
+}
+
/*
* Map of job/command line options
*/
-static struct fio_option options[FIO_MAX_OPTS] = {
+struct fio_option fio_options[FIO_MAX_OPTS] = {
{
.name = "description",
+ .lname = "Description of job",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(description),
.help = "Text job description",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_DESC,
},
{
.name = "name",
+ .lname = "Job name",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(name),
.help = "Name of this job",
- },
- {
- .name = "directory",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(directory),
- .cb = str_directory_cb,
- .help = "Directory to store files in",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_DESC,
},
{
.name = "filename",
+ .lname = "Filename(s)",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(filename),
.cb = str_filename_cb,
.prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
+ },
+ {
+ .name = "directory",
+ .lname = "Directory",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(directory),
+ .cb = str_directory_cb,
+ .help = "Directory to store files in",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
},
+ {
+ .name = "filename_format",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(filename_format),
+ .prio = -1, /* must come after "directory" */
+ .help = "Override default $jobname.$jobnum.$filenum naming",
+ .def = "$jobname.$jobnum.$filenum",
+ },
+ {
+ .name = "kb_base",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(kb_base),
+ .verify = kb_base_verify,
+ .prio = 1,
+ .def = "1024",
+ .help = "How many bytes per KB for reporting (1000 or 1024)",
+ },
+ {
+ .name = "unit_base",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(unit_base),
+ .verify = unit_base_verify,
+ .prio = 1,
+ .def = "0",
+ .help = "Bit multiple of result summary data (8 for byte, 1 for bit)",
+ },
{
.name = "lockfile",
+ .lname = "Lockfile",
.type = FIO_OPT_STR,
.off1 = td_var_offset(file_lock_mode),
.help = "Lock file when doing IO to it",
.parent = "filename",
+ .hide = 0,
.def = "none",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
.posval = {
{ .ival = "none",
.oval = FILE_LOCK_NONE,
},
{
.name = "opendir",
+ .lname = "Open directory",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(opendir),
.cb = str_opendir_cb,
.help = "Recursively add files from this directory and down",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
},
{
.name = "rw",
+ .lname = "Read/write",
.alias = "readwrite",
.type = FIO_OPT_STR,
.cb = str_rw_cb,
.help = "IO direction",
.def = "read",
.verify = rw_verify,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "read",
.oval = TD_DDIR_READ,
},
{
.name = "rw_sequencer",
+ .lname = "RW Sequencer",
.type = FIO_OPT_STR,
.off1 = td_var_offset(rw_seq),
.help = "IO offset generator modifier",
.def = "sequential",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "sequential",
.oval = RW_SEQ_SEQ,
{
.name = "ioengine",
+ .lname = "IO Engine",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioengine),
.help = "IO engine to use",
.def = FIO_PREFERRED_ENGINE,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "sync",
.help = "Use read/write",
},
{
.name = "iodepth",
+ .lname = "IO Depth",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth),
.help = "Number of IO buffers to keep in flight",
.minval = 1,
+ .interval = 1,
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch",
+ .lname = "IO Depth batch",
.alias = "iodepth_batch_submit",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch),
.help = "Number of IO buffers to submit in one go",
.parent = "iodepth",
+ .hide = 1,
.minval = 1,
+ .interval = 1,
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch_complete",
+ .lname = "IO Depth batch complete",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch_complete),
.help = "Number of IO buffers to retrieve in one go",
.parent = "iodepth",
+ .hide = 1,
.minval = 0,
+ .interval = 1,
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_low",
+ .lname = "IO Depth batch low",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_low),
.help = "Low water mark for queuing depth",
.parent = "iodepth",
+ .hide = 1,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "size",
+ .lname = "Size",
.type = FIO_OPT_STR_VAL,
.cb = str_size_cb,
.help = "Total size of device or files",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fill_device",
+ .lname = "Fill device",
.alias = "fill_fs",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fill_device),
.help = "Write until an ENOSPC error occurs",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "filesize",
+ .lname = "File size",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(file_size_low),
.off2 = td_var_offset(file_size_high),
.minval = 1,
.help = "Size of individual files",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "offset",
+ .lname = "IO offset",
.alias = "fileoffset",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(start_offset),
.help = "Start IO from this offset",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "offset_increment",
+ .lname = "IO offset increment",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(offset_increment),
.help = "What is the increment from one offset to the next",
.parent = "offset",
+ .hide = 1,
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bs",
+ .lname = "Block size",
.alias = "blocksize",
.type = FIO_OPT_INT,
.off1 = td_var_offset(bs[DDIR_READ]),
.help = "Block size unit",
.def = "4k",
.parent = "rw",
+ .hide = 1,
+ .interval = 512,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "ba",
+ .lname = "Block size align",
.alias = "blockalign",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ba[DDIR_READ]),
.minval = 1,
.help = "IO block offset alignment",
.parent = "rw",
+ .hide = 1,
+ .interval = 512,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bsrange",
+ .lname = "Block size range",
.alias = "blocksize_range",
.type = FIO_OPT_RANGE,
.off1 = td_var_offset(min_bs[DDIR_READ]),
.minval = 1,
.help = "Set block size range (in more detail than bs)",
.parent = "rw",
+ .hide = 1,
+ .interval = 4096,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bssplit",
+ .lname = "Block size split",
.type = FIO_OPT_STR,
.cb = str_bssplit_cb,
.help = "Set a specific mix of block sizes",
.parent = "rw",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bs_unaligned",
+ .lname = "Block size unaligned",
.alias = "blocksize_unaligned",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(bs_unaligned),
.help = "Don't sector align IO buffer sizes",
.parent = "rw",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "randrepeat",
+ .lname = "Random repeatable",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(rand_repeatable),
.help = "Use repeatable random IO pattern",
.def = "1",
.parent = "rw",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "use_os_rand",
+ .lname = "Use OS random",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(use_os_rand),
.help = "Set to use OS random generator",
.def = "0",
.parent = "rw",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "norandommap",
+ .lname = "No randommap",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(norandommap),
.help = "Accept potential duplicate random blocks",
.parent = "rw",
+ .hide = 1,
+ .hide_on_set = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "softrandommap",
+ .lname = "Soft randommap",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(softrandommap),
.help = "Set norandommap if randommap allocation fails",
.parent = "norandommap",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "random_generator",
.help = "Variable length LFSR",
},
},
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "random_distribution",
.help = "Pareto distribution",
},
},
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "nrfiles",
+ .lname = "Number of files",
.alias = "nr_files",
.type = FIO_OPT_INT,
.off1 = td_var_offset(nr_files),
.help = "Split job workload between this number of files",
.def = "1",
+ .interval = 1,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "openfiles",
+ .lname = "Number of open files",
.type = FIO_OPT_INT,
.off1 = td_var_offset(open_files),
.help = "Number of files to keep open at the same time",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "file_service_type",
+ .lname = "File service type",
.type = FIO_OPT_STR,
.cb = str_fst_cb,
.off1 = td_var_offset(file_service_type),
.help = "How to select which file to service next",
.def = "roundrobin",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "random",
.oval = FIO_FSERVICE_RANDOM,
},
},
.parent = "nrfiles",
+ .hide = 1,
},
#ifdef CONFIG_POSIX_FALLOCATE
{
.name = "fallocate",
+ .lname = "Fallocate",
.type = FIO_OPT_STR,
.off1 = td_var_offset(fallocate_mode),
.help = "Whether pre-allocation is performed when laying out files",
.def = "posix",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "none",
.oval = FIO_FALLOCATE_NONE,
#endif /* CONFIG_POSIX_FALLOCATE */
{
.name = "fadvise_hint",
+ .lname = "Fadvise hint",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fadvise_hint),
.help = "Use fadvise() to advise the kernel on IO pattern",
.def = "1",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fsync",
+ .lname = "Fsync",
.type = FIO_OPT_INT,
.off1 = td_var_offset(fsync_blocks),
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
+ .interval = 1,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fdatasync",
+ .lname = "Fdatasync",
.type = FIO_OPT_INT,
.off1 = td_var_offset(fdatasync_blocks),
.help = "Issue fdatasync for writes every given number of blocks",
.def = "0",
+ .interval = 1,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_barrier",
+ .lname = "Write barrier",
.type = FIO_OPT_INT,
.off1 = td_var_offset(barrier_blocks),
.help = "Make every Nth write a barrier write",
.def = "0",
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef CONFIG_SYNC_FILE_RANGE
{
.name = "sync_file_range",
+ .lname = "Sync file range",
.posval = {
{ .ival = "wait_before",
.oval = SYNC_FILE_RANGE_WAIT_BEFORE,
.cb = str_sfr_cb,
.off1 = td_var_offset(sync_file_range),
.help = "Use sync_file_range()",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "direct",
+ .lname = "Direct I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(odirect),
.help = "Use O_DIRECT IO (negates buffered)",
.def = "0",
+ .inverse = "buffered",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "buffered",
+ .lname = "Buffered I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(odirect),
.neg = 1,
.help = "Use buffered IO (negates direct)",
.def = "1",
+ .inverse = "direct",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "overwrite",
+ .lname = "Overwrite",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(overwrite),
.help = "When writing, set whether to overwrite current data",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "loops",
+ .lname = "Loops",
.type = FIO_OPT_INT,
.off1 = td_var_offset(loops),
.help = "Number of times to run the job",
.def = "1",
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "numjobs",
+ .lname = "Number of jobs",
.type = FIO_OPT_INT,
.off1 = td_var_offset(numjobs),
.help = "Duplicate this job this many times",
.def = "1",
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "startdelay",
+ .lname = "Start delay",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(start_delay),
.help = "Only start job when this period has passed",
.def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "runtime",
+ .lname = "Runtime",
.alias = "timeout",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(timeout),
.help = "Stop workload when this amount of time has passed",
.def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "time_based",
+ .lname = "Time based",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "ramp_time",
+ .lname = "Ramp time",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(ramp_time),
.help = "Ramp up time before measuring performance",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "clocksource",
+ .lname = "Clock source",
.type = FIO_OPT_STR,
.cb = fio_clock_source_cb,
.off1 = td_var_offset(clocksource),
.help = "What type of timing source to use",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CLOCK,
.posval = {
#ifdef CONFIG_GETTIMEOFDAY
{ .ival = "gettimeofday",
{
.name = "mem",
.alias = "iomem",
+ .lname = "I/O Memory",
.type = FIO_OPT_STR,
.cb = str_mem_cb,
.off1 = td_var_offset(mem_type),
.help = "Backing type for IO buffers",
.def = "malloc",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "malloc",
.oval = MEM_MALLOC,
{
.name = "iomem_align",
.alias = "mem_align",
+ .lname = "I/O memory alignment",
.type = FIO_OPT_INT,
.off1 = td_var_offset(mem_align),
.minval = 0,
.help = "IO memory buffer offset alignment",
.def = "0",
.parent = "iomem",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "verify",
+ .lname = "Verify",
.type = FIO_OPT_STR,
.off1 = td_var_offset(verify),
.help = "Verify data written",
- .cb = str_verify_cb,
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
.posval = {
{ .ival = "0",
.oval = VERIFY_NONE,
},
{
.name = "do_verify",
+ .lname = "Perform verify step",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(do_verify),
.help = "Run verification stage after write",
.def = "1",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verifysort",
+ .lname = "Verify sort",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verifysort),
.help = "Sort written verify blocks for read back",
.def = "1",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verifysort_nr",
.maxval = 131072,
.def = "1024",
.parent = "verify",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_interval",
+ .lname = "Verify interval",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_interval),
.minval = 2 * sizeof(struct verify_header),
.help = "Store verify buffer header every N bytes",
.parent = "verify",
+ .hide = 1,
+ .interval = 2 * sizeof(struct verify_header),
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_offset",
+ .lname = "Verify offset",
.type = FIO_OPT_INT,
.help = "Offset verify header location by N bytes",
- .def = "0",
- .cb = str_verify_offset_cb,
+ .off1 = td_var_offset(verify_offset),
+ .minval = sizeof(struct verify_header),
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_pattern",
+ .lname = "Verify pattern",
.type = FIO_OPT_STR,
.cb = str_verify_pattern_cb,
.help = "Fill pattern for IO buffers",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_fatal",
+ .lname = "Verify fatal",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verify_fatal),
.def = "0",
.help = "Exit on a single verify failure, don't continue",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_dump",
+ .lname = "Verify dump",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verify_dump),
.def = "0",
.help = "Dump contents of good and bad blocks on failure",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_async",
+ .lname = "Verify asynchronously",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_async),
.def = "0",
.help = "Number of async verifier threads to use",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog",
+ .lname = "Verify backlog",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(verify_backlog),
.help = "Verify after this number of blocks are written",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog_batch",
+ .lname = "Verify backlog batch",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_batch),
.help = "Verify this number of IO blocks",
.parent = "verify",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "verify_async_cpus",
+ .lname = "Async verify CPUs",
.type = FIO_OPT_STR,
.cb = str_verify_cpus_allowed_cb,
.help = "Set CPUs allowed for async verify threads",
.parent = "verify_async",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
#endif
{
.off1 = td_var_offset(experimental_verify),
.type = FIO_OPT_BOOL,
.help = "Enable experimental verification",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
#ifdef FIO_HAVE_TRIM
{
.name = "trim_percentage",
+ .lname = "Trim percentage",
.type = FIO_OPT_INT,
- .cb = str_verify_trim_cb,
+ .off1 = td_var_offset(trim_percentage),
+ .minval = 0,
.maxval = 100,
.help = "Number of verify blocks to discard/trim",
.parent = "verify",
.def = "0",
+ .interval = 1,
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_verify_zero",
- .type = FIO_OPT_INT,
+ .lname = "Verify trim zero",
+ .type = FIO_OPT_BOOL,
.help = "Verify that trim/discarded blocks are returned as zeroes",
.off1 = td_var_offset(trim_zero),
.parent = "trim_percentage",
+ .hide = 1,
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_backlog",
+ .lname = "Trim backlog",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(trim_backlog),
.help = "Trim after this number of blocks are written",
.parent = "trim_percentage",
+ .hide = 1,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_backlog_batch",
+ .lname = "Trim backlog batch",
.type = FIO_OPT_INT,
.off1 = td_var_offset(trim_batch),
.help = "Trim this number of IO blocks",
.parent = "trim_percentage",
+ .hide = 1,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
#endif
{
.name = "write_iolog",
+ .lname = "Write I/O log",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(write_iolog_file),
.help = "Store IO pattern to file",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "read_iolog",
+ .lname = "Read I/O log",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(read_iolog_file),
.help = "Playback IO pattern from file",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "replay_no_stall",
- .type = FIO_OPT_INT,
+ .lname = "Don't stall on replay",
+ .type = FIO_OPT_BOOL,
.off1 = td_var_offset(no_stall),
.def = "0",
.parent = "read_iolog",
+ .hide = 1,
.help = "Playback IO pattern file as fast as possible without stalls",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "replay_redirect",
+ .lname = "Redirect device for replay",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(replay_redirect),
.parent = "read_iolog",
+ .hide = 1,
.help = "Replay all I/O onto this device, regardless of trace device",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "exec_prerun",
+ .lname = "Pre-execute runnable",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_prerun),
.help = "Execute this file prior to running job",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "exec_postrun",
+ .lname = "Post-execute runnable",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_postrun),
.help = "Execute this file after running job",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_IOSCHED_SWITCH
{
.name = "ioscheduler",
+ .lname = "I/O scheduler",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioscheduler),
.help = "Use this IO scheduler on the backing device",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "zonesize",
+ .lname = "Zone size",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_size),
.help = "Amount of data to read per zone",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "zonerange",
+ .lname = "Zone range",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_range),
.help = "Give size of an IO zone",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "zoneskip",
+ .lname = "Zone skip",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_skip),
.help = "Space between IO zones",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "lockmem",
+ .lname = "Lock memory",
.type = FIO_OPT_STR_VAL,
- .cb = str_lockmem_cb,
+ .off1 = td_var_offset(lockmem),
.help = "Lock down this amount of memory",
.def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "rwmixread",
+ .lname = "Read/write mix read",
.type = FIO_OPT_INT,
.cb = str_rwmix_read_cb,
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
+ .interval = 5,
+ .inverse = "rwmixwrite",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "rwmixwrite",
+ .lname = "Read/write mix write",
.type = FIO_OPT_INT,
.cb = str_rwmix_write_cb,
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
+ .interval = 5,
+ .inverse = "rwmixread",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "rwmixcycle",
+ .lname = "Read/write mix cycle",
.type = FIO_OPT_DEPRECATED,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "nice",
+ .lname = "Nice",
.type = FIO_OPT_INT,
.off1 = td_var_offset(nice),
.help = "Set job CPU nice value",
.minval = -19,
.maxval = 20,
.def = "0",
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#ifdef FIO_HAVE_IOPRIO
{
.name = "prio",
+ .lname = "I/O nice priority",
.type = FIO_OPT_INT,
- .cb = str_prio_cb,
+ .off1 = td_var_offset(ioprio),
.help = "Set job IO priority value",
.minval = 0,
.maxval = 7,
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "prioclass",
+ .lname = "I/O nice priority class",
.type = FIO_OPT_INT,
- .cb = str_prioclass_cb,
+ .off1 = td_var_offset(ioprio_class),
.help = "Set job IO priority class",
.minval = 0,
.maxval = 3,
+ .interval = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#endif
{
.name = "thinktime",
+ .lname = "Thinktime",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime),
.help = "Idle time between IO buffers (usec)",
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "thinktime_spin",
+ .lname = "Thinktime spin",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime_spin),
.help = "Start think time by spinning this amount (usec)",
.def = "0",
.parent = "thinktime",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "thinktime_blocks",
+ .lname = "Thinktime blocks",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime_blocks),
.help = "IO buffer period between 'thinktime'",
.def = "1",
.parent = "thinktime",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "rate",
+ .lname = "I/O rate",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate[DDIR_READ]),
.off2 = td_var_offset(rate[DDIR_WRITE]),
.off3 = td_var_offset(rate[DDIR_TRIM]),
.help = "Set bandwidth rate",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "ratemin",
+ .lname = "I/O min rate",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratemin[DDIR_READ]),
.off2 = td_var_offset(ratemin[DDIR_WRITE]),
.off3 = td_var_offset(ratemin[DDIR_TRIM]),
.help = "Job must meet this rate or it will be shutdown",
.parent = "rate",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "rate_iops",
+ .lname = "I/O rate IOPS",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate_iops[DDIR_READ]),
.off2 = td_var_offset(rate_iops[DDIR_WRITE]),
.off3 = td_var_offset(rate_iops[DDIR_TRIM]),
.help = "Limit IO used to this number of IO operations/sec",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "rate_iops_min",
+ .lname = "I/O min rate IOPS",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate_iops_min[DDIR_READ]),
.off2 = td_var_offset(rate_iops_min[DDIR_WRITE]),
.off3 = td_var_offset(rate_iops_min[DDIR_TRIM]),
.help = "Job must meet this rate or it will be shut down",
.parent = "rate_iops",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "ratecycle",
+ .lname = "I/O rate cycle",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratecycle),
.help = "Window average for rate limits (msec)",
.def = "1000",
.parent = "rate",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "max_latency",
.type = FIO_OPT_INT,
.off1 = td_var_offset(max_latency),
.help = "Maximum tolerated IO latency (usec)",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "invalidate",
+ .lname = "Cache invalidate",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(invalidate_cache),
.help = "Invalidate buffer/page cache prior to running job",
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "sync",
+ .lname = "Synchronous I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(sync_io),
.help = "Use O_SYNC for buffered writes",
.def = "0",
.parent = "buffered",
- },
- {
- .name = "bwavgtime",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(bw_avg_time),
- .help = "Time window over which to calculate bandwidth"
- " (msec)",
- .def = "500",
- .parent = "write_bw_log",
- },
- {
- .name = "iopsavgtime",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(iops_avg_time),
- .help = "Time window over which to calculate IOPS (msec)",
- .def = "500",
- .parent = "write_iops_log",
+ .hide = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "create_serialize",
+ .lname = "Create serialize",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_serialize),
.help = "Serialize creating of job files",
.def = "1",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_fsync",
+ .lname = "Create fsync",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_fsync),
.help = "fsync file after creation",
.def = "1",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_on_open",
+ .lname = "Create on open",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_on_open),
.help = "Create files when they are opened for IO",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_only",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_only),
.help = "Only perform file creation phase",
+ .category = FIO_OPT_C_FILE,
.def = "0",
},
{
.name = "pre_read",
+ .lname = "Pre-read files",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(pre_read),
.help = "Pre-read files before starting official testing",
.def = "0",
- },
- {
- .name = "cpuload",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(cpuload),
- .help = "Use this percentage of CPU",
- },
- {
- .name = "cpuchunks",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(cpucycle),
- .help = "Length of the CPU burn cycles (usecs)",
- .def = "50000",
- .parent = "cpuload",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "cpumask",
+ .lname = "CPU mask",
.type = FIO_OPT_INT,
.cb = str_cpumask_cb,
.help = "CPU affinity mask",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "cpus_allowed",
+ .lname = "CPUs allowed",
.type = FIO_OPT_STR,
.cb = str_cpus_allowed_cb,
.help = "Set CPUs allowed",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#endif
#ifdef CONFIG_LIBNUMA
#endif
{
.name = "end_fsync",
+ .lname = "End fsync",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(end_fsync),
.help = "Include fsync at the end of job",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fsync_on_close",
+ .lname = "Fsync on close",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fsync_on_close),
.help = "fsync files on close",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "unlink",
+ .lname = "Unlink file",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(unlink),
.help = "Unlink created files after job has completed",
.def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "exitall",
+ .lname = "Exit-all on terminate",
.type = FIO_OPT_STR_SET,
.cb = str_exitall_cb,
.help = "Terminate all jobs when one exits",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "stonewall",
+ .lname = "Wait for previous",
.alias = "wait_for_previous",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(stonewall),
.help = "Insert a hard barrier between this job and previous",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "new_group",
+ .lname = "New group",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(new_group),
.help = "Mark the start of a new group (for reporting)",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "thread",
+ .lname = "Thread",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(use_thread),
- .help = "Use threads instead of forks",
+ .help = "Use threads instead of processes",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "write_bw_log",
- .type = FIO_OPT_STR,
- .off1 = td_var_offset(write_bw_log),
- .cb = str_write_bw_log_cb,
+ .lname = "Write bandwidth log",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(bw_log_file),
.help = "Write log of bandwidth during run",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_lat_log",
- .type = FIO_OPT_STR,
- .off1 = td_var_offset(write_lat_log),
- .cb = str_write_lat_log_cb,
+ .lname = "Write latency log",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(lat_log_file),
.help = "Write log of latency during run",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_iops_log",
+ .lname = "Write IOPS log",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(write_iops_log),
- .cb = str_write_iops_log_cb,
+ .off1 = td_var_offset(iops_log_file),
.help = "Write log of IOPS during run",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "log_avg_msec",
+ .lname = "Log averaging (msec)",
.type = FIO_OPT_INT,
.off1 = td_var_offset(log_avg_msec),
.help = "Average bw/iops/lat logs over this period of time",
.def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
- .name = "hugepage-size",
+ .name = "bwavgtime",
+ .lname = "Bandwidth average time",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(hugepage_size),
- .help = "When using hugepages, specify size of each page",
- .def = __fio_stringify(FIO_HUGE_PAGE),
+ .off1 = td_var_offset(bw_avg_time),
+ .help = "Time window over which to calculate bandwidth"
+ " (msec)",
+ .def = "500",
+ .parent = "write_bw_log",
+ .hide = 1,
+ .interval = 100,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "iopsavgtime",
+ .lname = "IOPS average time",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iops_avg_time),
+ .help = "Time window over which to calculate IOPS (msec)",
+ .def = "500",
+ .parent = "write_iops_log",
+ .hide = 1,
+ .interval = 100,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "group_reporting",
- .type = FIO_OPT_STR_SET,
+ .lname = "Group reporting",
+ .type = FIO_OPT_BOOL,
.off1 = td_var_offset(group_reporting),
.help = "Do reporting on a per-group basis",
+ .def = "1",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "zero_buffers",
+ .lname = "Zero I/O buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(zero_buffers),
.help = "Init IO buffers to all zeroes",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "refill_buffers",
+ .lname = "Refill I/O buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(refill_buffers),
.help = "Refill IO buffers on every IO submit",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "scramble_buffers",
+ .lname = "Scramble I/O buffers",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(scramble_buffers),
.help = "Slightly scramble buffers on every IO submit",
.def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "buffer_compress_percentage",
+ .lname = "Buffer compression percentage",
.type = FIO_OPT_INT,
.off1 = td_var_offset(compress_percentage),
.maxval = 100,
.minval = 1,
.help = "How compressible the buffer is (approximately)",
+ .interval = 5,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "buffer_compress_chunk",
+ .lname = "Buffer compression chunk size",
.type = FIO_OPT_INT,
.off1 = td_var_offset(compress_chunk),
.parent = "buffer_compress_percentage",
+ .hide = 1,
.help = "Size of compressible region in buffer",
+ .interval = 256,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "clat_percentiles",
+ .lname = "Completion latency percentiles",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(clat_percentiles),
.help = "Enable the reporting of completion latency percentiles",
.def = "1",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "percentile_list",
+ .lname = "Completion latency percentile list",
.type = FIO_OPT_FLOAT_LIST,
.off1 = td_var_offset(percentile_list),
.off2 = td_var_offset(percentile_precision),
.maxlen = FIO_IO_U_LIST_MAX_LEN,
.minfp = 0.0,
.maxfp = 100.0,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_DISK_UTIL
{
.name = "disk_util",
+ .lname = "Disk utilization",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(do_disk_util),
.help = "Log disk utilization statistics",
.def = "1",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "gtod_reduce",
+ .lname = "Reduce gettimeofday() calls",
.type = FIO_OPT_BOOL,
.help = "Greatly reduce number of gettimeofday() calls",
.cb = str_gtod_reduce_cb,
.def = "0",
+ .hide_on_set = 1,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_lat",
+ .lname = "Disable all latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_lat),
.help = "Disable latency numbers",
.parent = "gtod_reduce",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_clat",
+ .lname = "Disable completion latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_clat),
.help = "Disable completion latency numbers",
.parent = "gtod_reduce",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_slat",
+ .lname = "Disable submission latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_slat),
.help = "Disable submission latency numbers",
.parent = "gtod_reduce",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_bw_measurement",
+ .lname = "Disable bandwidth stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_bw),
.help = "Disable bandwidth logging",
.parent = "gtod_reduce",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "gtod_cpu",
+ .lname = "Dedicated gettimeofday() CPU",
.type = FIO_OPT_INT,
.cb = str_gtod_cpu_cb,
.help = "Set up dedicated gettimeofday() thread on this CPU",
.verify = gtod_cpu_verify,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CLOCK,
},
{
.name = "unified_rw_reporting",
.off1 = td_var_offset(unified_rw_rep),
.help = "Unify reporting across data direction",
.def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "continue_on_error",
+ .lname = "Continue on error",
.type = FIO_OPT_STR,
.off1 = td_var_offset(continue_on_error),
.help = "Continue on non-fatal errors during IO",
.def = "none",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_ERR,
.posval = {
{ .ival = "none",
.oval = ERROR_TYPE_NONE,
.cb = str_ignore_error_cb,
.help = "Set a specific list of errors to ignore",
.parent = "rw",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_ERR,
},
{
.name = "error_dump",
.off1 = td_var_offset(error_dump),
.def = "0",
.help = "Dump info on each error",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_ERR,
},
-
{
.name = "profile",
+ .lname = "Profile",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(profile),
.help = "Select a specific builtin performance test",
+ .category = FIO_OPT_C_PROFILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "cgroup",
+ .lname = "Cgroup",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(cgroup),
.help = "Add job to cgroup of this name",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
+ },
+ {
+ .name = "cgroup_nodelete",
+ .lname = "Cgroup no-delete",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(cgroup_nodelete),
+ .help = "Do not delete cgroups after job completion",
+ .def = "0",
+ .parent = "cgroup",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
},
{
.name = "cgroup_weight",
+ .lname = "Cgroup weight",
.type = FIO_OPT_INT,
.off1 = td_var_offset(cgroup_weight),
.help = "Use given weight for cgroup",
.minval = 100,
.maxval = 1000,
- },
- {
- .name = "cgroup_nodelete",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(cgroup_nodelete),
- .help = "Do not delete cgroups after job completion",
- .def = "0",
+ .parent = "cgroup",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
},
{
.name = "uid",
+ .lname = "User ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(uid),
.help = "Run job with this user ID",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "gid",
+ .lname = "Group ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(gid),
.help = "Run job with this group ID",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
+ },
+ {
+ .name = "kb_base",
+ .lname = "KB Base",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(kb_base),
+ .verify = kb_base_verify,
+ .prio = 1,
+ .def = "1024",
+ .help = "How many bytes per KB for reporting (1000 or 1024)",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "hugepage-size",
+ .lname = "Hugepage size",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(hugepage_size),
+ .help = "When using hugepages, specify size of each page",
+ .def = __fio_stringify(FIO_HUGE_PAGE),
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "flow_id",
+ .lname = "I/O flow ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_id),
.help = "The flow index ID to use",
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow",
+ .lname = "I/O flow weight",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow),
.help = "Weight for flow control of this job",
.parent = "flow_id",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow_watermark",
+ .lname = "I/O flow watermark",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_watermark),
.help = "High watermark for flow control. This option"
" should be set to the same value for all threads"
" with non-zero flow.",
.parent = "flow_id",
+ .hide = 1,
.def = "1024",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow_sleep",
+ .lname = "I/O flow sleep",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_sleep),
.help = "How many microseconds to sleep after being held"
" back by the flow control mechanism",
.parent = "flow_id",
+ .hide = 1,
.def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = NULL,
{
unsigned int i;
- options_init(options);
+ options_init(fio_options);
i = 0;
while (long_options[i].name)
i++;
- options_to_lopts(options, long_options, i, FIO_GETOPT_JOB);
+ options_to_lopts(fio_options, long_options, i, FIO_GETOPT_JOB);
}
struct fio_keyword {
sprintf(buf, "echo '%s' | %s", tmp, BC_APP);
f = popen(buf, "r");
- if (!f) {
+ if (!f)
return NULL;
- }
ret = fread(&buf[tmp - str], 1, 128 - (tmp - str), f);
- if (ret <= 0) {
+ if (ret <= 0)
return NULL;
- }
pclose(f);
buf[(tmp - str) + ret - 1] = '\0';
int i, ret, unknown;
char **opts_copy;
- sort_options(opts, options, num_opts);
+ sort_options(opts, fio_options, num_opts);
opts_copy = dup_and_sub_options(opts, num_opts);
for (ret = 0, i = 0, unknown = 0; i < num_opts; i++) {
struct fio_option *o;
- int newret = parse_option(opts_copy[i], opts[i], options, &o,
- td);
+ int newret = parse_option(opts_copy[i], opts[i], fio_options,
+ &o, td);
if (opts_copy[i]) {
if (newret && !o) {
int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)
{
- return parse_cmd_option(opt, val, options, td);
+ return parse_cmd_option(opt, val, fio_options, td);
}
int fio_cmd_ioengine_option_parse(struct thread_data *td, const char *opt,
void fio_fill_default_options(struct thread_data *td)
{
- fill_default_options(td, options);
+ fill_default_options(td, fio_options);
}
int fio_show_option_help(const char *opt)
{
- return show_cmd_help(options, opt);
+ return show_cmd_help(fio_options, opt);
}
void options_mem_dupe(void *data, struct fio_option *options)
*/
void fio_options_mem_dupe(struct thread_data *td)
{
- options_mem_dupe(&td->o, options);
+ options_mem_dupe(&td->o, fio_options);
if (td->eo && td->io_ops) {
void *oldeo = td->eo;
unsigned int fio_get_kb_base(void *data)
{
- struct thread_data *td = data;
+ struct thread_options *o = data;
unsigned int kb_base = 0;
- if (td)
- kb_base = td->o.kb_base;
+ if (o)
+ kb_base = o->kb_base;
if (!kb_base)
kb_base = 1024;
struct fio_option *__o;
int opt_index = 0;
- __o = options;
+ __o = fio_options;
while (__o->name) {
opt_index++;
__o++;
}
- memcpy(&options[opt_index], o, sizeof(*o));
+ memcpy(&fio_options[opt_index], o, sizeof(*o));
return 0;
}
{
struct fio_option *o;
- o = options;
+ o = fio_options;
while (o->name) {
if (o->prof_name && !strcmp(o->prof_name, prof_name)) {
o->type = FIO_OPT_INVALID;
struct fio_option *o;
unsigned int i;
- o = find_option(options, optname);
+ o = find_option(fio_options, optname);
if (!o)
return;
struct fio_option *o;
unsigned int i;
- o = find_option(options, optname);
+ o = find_option(fio_options, optname);
if (!o)
return;
void fio_options_free(struct thread_data *td)
{
- options_free(options, td);
+ options_free(fio_options, td);
if (td->eo && td->io_ops && td->io_ops->options) {
options_free(td->io_ops->options, td->eo);
free(td->eo);
td->eo = NULL;
}
}
+
+struct fio_option *fio_option_find(const char *name)
+{
+ return find_option(fio_options, name);
+}
+
#include <netdb.h>
#include <syslog.h>
#include <signal.h>
+#include <zlib.h>
#include "fio.h"
#include "server.h"
#include "crc/crc16.h"
#include "lib/ieee754.h"
-int fio_net_port = 8765;
+int fio_net_port = FIO_NET_PORT;
int exit_backend = 0;
static char *bind_sock;
static struct sockaddr_in saddr_in;
static struct sockaddr_in6 saddr_in6;
-static int first_cmd_check;
static int use_ipv6;
+struct fio_fork_item {
+ struct flist_head list;
+ int exitval;
+ int signal;
+ int exited;
+ pid_t pid;
+};
+
+/* Created on fork on new connection */
+static FLIST_HEAD(conn_list);
+
+/* Created on job fork from connection */
+static FLIST_HEAD(job_list);
+
static const char *fio_server_ops[FIO_NET_CMD_NR] = {
"",
"QUIT",
"START",
"STOP",
"DISK_UTIL",
- "RUN",
+ "SERVER_START",
+ "ADD_JOB",
+ "CMD_RUN"
+ "CMD_IOLOG",
};
const char *fio_server_op(unsigned int op)
return buf;
}
-int fio_send_data(int sk, const void *p, unsigned int len)
+static ssize_t iov_total_len(const struct iovec *iov, int count)
{
- assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_PDU);
+ ssize_t ret = 0;
- do {
- int ret = send(sk, p, len, 0);
+ while (count--) {
+ ret += iov->iov_len;
+ iov++;
+ }
+
+ return ret;
+}
+
+static int fio_sendv_data(int sk, struct iovec *iov, int count)
+{
+ ssize_t total_len = iov_total_len(iov, count);
+ ssize_t ret;
+ do {
+ ret = writev(sk, iov, count);
if (ret > 0) {
- len -= ret;
- if (!len)
+ total_len -= ret;
+ if (!total_len)
break;
- p += ret;
- continue;
+
+ while (ret) {
+ if (ret >= iov->iov_len) {
+ ret -= iov->iov_len;
+ iov++;
+ continue;
+ }
+ iov->iov_base += ret;
+ iov->iov_len -= ret;
+ ret = 0;
+ }
} else if (!ret)
break;
else if (errno == EAGAIN || errno == EINTR)
break;
} while (!exit_backend);
- if (!len)
+ if (!total_len)
return 0;
+ if (errno)
+ return -errno;
+
return 1;
}
+int fio_send_data(int sk, const void *p, unsigned int len)
+{
+ struct iovec iov = { .iov_base = (void *) p, .iov_len = len };
+
+ assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_FRAGMENT_PDU);
+
+ return fio_sendv_data(sk, &iov, 1);
+}
+
int fio_recv_data(int sk, void *p, unsigned int len)
{
do {
return 1;
}
- if (cmd->pdu_len > FIO_SERVER_MAX_PDU) {
+ if (cmd->pdu_len > FIO_SERVER_MAX_FRAGMENT_PDU) {
log_err("fio: command payload too large: %u\n", cmd->pdu_len);
return 1;
}
cmdret = NULL;
} else if (cmdret) {
/* zero-terminate text input */
- if (cmdret->pdu_len && (cmdret->opcode == FIO_NET_CMD_TEXT ||
- cmdret->opcode == FIO_NET_CMD_JOB)) {
- char *buf = (char *) cmdret->payload;
-
- buf[cmdret->pdu_len ] = '\0';
+ if (cmdret->pdu_len) {
+ if (cmdret->opcode == FIO_NET_CMD_TEXT) {
+ struct cmd_text_pdu *pdu = (struct cmd_text_pdu *) cmdret->payload;
+ char *buf = (char *) pdu->buf;
+
+ buf[pdu->buf_len] = '\0';
+ } else if (cmdret->opcode == FIO_NET_CMD_JOB) {
+ struct cmd_job_pdu *pdu = (struct cmd_job_pdu *) cmdret->payload;
+ char *buf = (char *) pdu->buf;
+ int len = le32_to_cpu(pdu->buf_len);
+
+ buf[len] = '\0';
+ }
}
+
/* frag flag is internal */
cmdret->flags &= ~FIO_NET_CMD_F_MORE;
}
return cmdret;
}
-void fio_net_cmd_crc(struct fio_net_cmd *cmd)
+static void add_reply(uint64_t tag, struct flist_head *list)
+{
+ struct fio_net_cmd_reply *reply = (struct fio_net_cmd_reply *) tag;
+
+ flist_add_tail(&reply->list, list);
+}
+
+static uint64_t alloc_reply(uint64_t tag, uint16_t opcode)
+{
+ struct fio_net_cmd_reply *reply;
+
+ reply = calloc(1, sizeof(*reply));
+ INIT_FLIST_HEAD(&reply->list);
+ gettimeofday(&reply->tv, NULL);
+ reply->saved_tag = tag;
+ reply->opcode = opcode;
+
+ return (uintptr_t) reply;
+}
+
+static void free_reply(uint64_t tag)
+{
+ struct fio_net_cmd_reply *reply = (struct fio_net_cmd_reply *) tag;
+
+ free(reply);
+}
+
+void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu)
{
uint32_t pdu_len;
cmd->cmd_crc16 = __cpu_to_le16(fio_crc16(cmd, FIO_NET_CMD_CRC_SZ));
pdu_len = le32_to_cpu(cmd->pdu_len);
- if (pdu_len)
- cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(cmd->payload, pdu_len));
+ cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(pdu, pdu_len));
+}
+
+void fio_net_cmd_crc(struct fio_net_cmd *cmd)
+{
+ fio_net_cmd_crc_pdu(cmd, cmd->payload);
}
int fio_net_send_cmd(int fd, uint16_t opcode, const void *buf, off_t size,
- uint64_t tag)
+ uint64_t *tagptr, struct flist_head *list)
{
struct fio_net_cmd *cmd = NULL;
size_t this_len, cur_len = 0;
+ uint64_t tag;
int ret;
+ if (list) {
+ assert(tagptr);
+ tag = *tagptr = alloc_reply(*tagptr, opcode);
+ } else
+ tag = tagptr ? *tagptr : 0;
+
do {
this_len = size;
- if (this_len > FIO_SERVER_MAX_PDU)
- this_len = FIO_SERVER_MAX_PDU;
+ if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
+ this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
if (!cmd || cur_len < sizeof(*cmd) + this_len) {
if (cmd)
buf += this_len;
} while (!ret && size);
+ if (list) {
+ if (ret)
+ free_reply(tag);
+ else
+ add_reply(tag, list);
+ }
+
if (cmd)
free(cmd);
int fio_net_send_simple_cmd(int sk, uint16_t opcode, uint64_t tag,
struct flist_head *list)
{
- struct fio_net_int_cmd *cmd;
int ret;
- if (!list)
- return fio_net_send_simple_stack_cmd(sk, opcode, tag);
-
- cmd = malloc(sizeof(*cmd));
-
- fio_init_net_cmd(&cmd->cmd, opcode, NULL, 0, (uintptr_t) cmd);
- fio_net_cmd_crc(&cmd->cmd);
+ if (list)
+ tag = alloc_reply(tag, opcode);
- INIT_FLIST_HEAD(&cmd->list);
- fio_gettime(&cmd->tv, NULL);
- cmd->saved_tag = tag;
-
- ret = fio_send_data(sk, &cmd->cmd, sizeof(cmd->cmd));
+ ret = fio_net_send_simple_stack_cmd(sk, opcode, tag);
if (ret) {
- free(cmd);
+ if (list)
+ free_reply(tag);
+
return ret;
}
- flist_add_tail(&cmd->list, list);
+ if (list)
+ add_reply(tag, list);
+
return 0;
}
-static int fio_server_send_quit_cmd(void)
+int fio_net_send_quit(int sk)
{
dprint(FD_NET, "server: sending quit\n");
- return fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_QUIT, 0, NULL);
+
+ return fio_net_send_simple_cmd(sk, FIO_NET_CMD_QUIT, 0, NULL);
}
-static int handle_job_cmd(struct fio_net_cmd *cmd)
+static int fio_net_send_ack(int sk, struct fio_net_cmd *cmd, int error,
+ int signal)
{
- char *buf = (char *) cmd->payload;
- struct cmd_start_pdu spdu;
struct cmd_end_pdu epdu;
- int ret;
+ uint64_t tag = 0;
- if (parse_jobs_ini(buf, 1, 0)) {
- fio_server_send_quit_cmd();
- return -1;
+ if (cmd)
+ tag = cmd->tag;
+
+ epdu.error = __cpu_to_le32(error);
+ epdu.signal = __cpu_to_le32(signal);
+ return fio_net_send_cmd(sk, FIO_NET_CMD_STOP, &epdu, sizeof(epdu), &tag, NULL);
+}
+
+int fio_net_send_stop(int sk, int error, int signal)
+{
+ dprint(FD_NET, "server: sending stop (%d, %d)\n", error, signal);
+ return fio_net_send_ack(sk, NULL, error, signal);
+}
+
+static void fio_server_add_fork_item(pid_t pid, struct flist_head *list)
+{
+ struct fio_fork_item *ffi;
+
+ ffi = malloc(sizeof(*ffi));
+ ffi->exitval = 0;
+ ffi->signal = 0;
+ ffi->exited = 0;
+ ffi->pid = pid;
+ flist_add_tail(&ffi->list, list);
+}
+
+static void fio_server_add_conn_pid(pid_t pid)
+{
+ dprint(FD_NET, "server: forked off connection job (pid=%u)\n", pid);
+ fio_server_add_fork_item(pid, &conn_list);
+}
+
+static void fio_server_add_job_pid(pid_t pid)
+{
+ dprint(FD_NET, "server: forked off job job (pid=%u)\n", pid);
+ fio_server_add_fork_item(pid, &job_list);
+}
+
+static void fio_server_check_fork_item(struct fio_fork_item *ffi)
+{
+ int ret, status;
+
+ ret = waitpid(ffi->pid, &status, WNOHANG);
+ if (ret < 0) {
+ if (errno == ECHILD) {
+ log_err("fio: connection pid %u disappeared\n", ffi->pid);
+ ffi->exited = 1;
+ } else
+ log_err("fio: waitpid: %s\n", strerror(errno));
+ } else if (ret == ffi->pid) {
+ if (WIFSIGNALED(status)) {
+ ffi->signal = WTERMSIG(status);
+ ffi->exited = 1;
+ }
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status))
+ ffi->exitval = WEXITSTATUS(status);
+ ffi->exited = 1;
+ }
}
+}
- spdu.jobs = cpu_to_le32(thread_number);
- spdu.stat_outputs = cpu_to_le32(stat_number);
- fio_net_send_cmd(server_fd, FIO_NET_CMD_START, &spdu, sizeof(spdu), 0);
+static void fio_server_fork_item_done(struct fio_fork_item *ffi)
+{
+ dprint(FD_NET, "pid %u exited, sig=%u, exitval=%d\n", ffi->pid, ffi->signal, ffi->exitval);
+
+ /*
+ * Fold STOP and QUIT...
+ */
+ fio_net_send_stop(server_fd, ffi->exitval, ffi->signal);
+ fio_net_send_quit(server_fd);
+ flist_del(&ffi->list);
+ free(ffi);
+}
+
+static void fio_server_check_fork_items(struct flist_head *list)
+{
+ struct flist_head *entry, *tmp;
+ struct fio_fork_item *ffi;
+
+ flist_for_each_safe(entry, tmp, list) {
+ ffi = flist_entry(entry, struct fio_fork_item, list);
+
+ fio_server_check_fork_item(ffi);
+
+ if (ffi->exited)
+ fio_server_fork_item_done(ffi);
+ }
+}
+
+static void fio_server_check_jobs(void)
+{
+ fio_server_check_fork_items(&job_list);
+}
+
+static void fio_server_check_conns(void)
+{
+ fio_server_check_fork_items(&conn_list);
+}
+
+static int handle_run_cmd(struct fio_net_cmd *cmd)
+{
+ pid_t pid;
+ int ret;
+
+ set_genesis_time();
+
+ pid = fork();
+ if (pid) {
+ fio_server_add_job_pid(pid);
+ return 0;
+ }
ret = fio_backend();
+ free_threads_shm();
+ _exit(ret);
+}
- epdu.error = ret;
- fio_net_send_cmd(server_fd, FIO_NET_CMD_STOP, &epdu, sizeof(epdu), 0);
+static int handle_job_cmd(struct fio_net_cmd *cmd)
+{
+ struct cmd_job_pdu *pdu = (struct cmd_job_pdu *) cmd->payload;
+ void *buf = pdu->buf;
+ struct cmd_start_pdu spdu;
- fio_server_send_quit_cmd();
- reset_fio_state();
- return ret;
+ pdu->buf_len = le32_to_cpu(pdu->buf_len);
+ pdu->client_type = le32_to_cpu(pdu->client_type);
+
+ if (parse_jobs_ini(buf, 1, 0, pdu->client_type)) {
+ fio_net_send_quit(server_fd);
+ return -1;
+ }
+
+ spdu.jobs = cpu_to_le32(thread_number);
+ spdu.stat_outputs = cpu_to_le32(stat_number);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, NULL);
+ return 0;
}
static int handle_jobline_cmd(struct fio_net_cmd *cmd)
struct cmd_single_line_pdu *cslp;
struct cmd_line_pdu *clp;
unsigned long offset;
+ struct cmd_start_pdu spdu;
char **argv;
- int ret, i;
+ int i;
clp = pdu;
clp->lines = le16_to_cpu(clp->lines);
+ clp->client_type = le16_to_cpu(clp->client_type);
argv = malloc(clp->lines * sizeof(char *));
offset = sizeof(*clp);
dprint(FD_NET, "server: %d: %s\n", i, argv[i]);
}
- if (parse_cmd_line(clp->lines, argv)) {
- fio_server_send_quit_cmd();
+ if (parse_cmd_line(clp->lines, argv, clp->client_type)) {
+ fio_net_send_quit(server_fd);
free(argv);
return -1;
}
free(argv);
- fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_START, 0, NULL);
-
- ret = fio_backend();
- fio_server_send_quit_cmd();
- reset_fio_state();
- return ret;
+ spdu.jobs = cpu_to_le32(thread_number);
+ spdu.stat_outputs = cpu_to_le32(stat_number);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, NULL);
+ return 0;
}
static int handle_probe_cmd(struct fio_net_cmd *cmd)
{
struct cmd_probe_pdu probe;
+ uint64_t tag = cmd->tag;
dprint(FD_NET, "server: sending probe reply\n");
probe.os = FIO_OS;
probe.arch = FIO_ARCH;
-
probe.bpp = sizeof(void *);
+ probe.cpus = __cpu_to_le32(cpus_online());
+ probe.flags = 0;
- return fio_net_send_cmd(server_fd, FIO_NET_CMD_PROBE, &probe, sizeof(probe), cmd->tag);
+ return fio_net_send_cmd(server_fd, FIO_NET_CMD_PROBE, &probe, sizeof(probe), &tag, NULL);
}
static int handle_send_eta_cmd(struct fio_net_cmd *cmd)
{
struct jobs_eta *je;
size_t size;
+ uint64_t tag = cmd->tag;
int i;
if (!thread_number)
je->nr_ramp = cpu_to_le32(je->nr_ramp);
je->nr_pending = cpu_to_le32(je->nr_pending);
je->files_open = cpu_to_le32(je->files_open);
- je->m_rate = cpu_to_le32(je->m_rate);
- je->t_rate = cpu_to_le32(je->t_rate);
- je->m_iops = cpu_to_le32(je->m_iops);
- je->t_iops = cpu_to_le32(je->t_iops);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- je->rate[i] = cpu_to_le32(je->rate[i]);
- je->iops[i] = cpu_to_le32(je->iops[i]);
+ je->m_rate[i] = cpu_to_le32(je->m_rate[i]);
+ je->t_rate[i] = cpu_to_le32(je->t_rate[i]);
+ je->m_iops[i] = cpu_to_le32(je->m_iops[i]);
+ je->t_iops[i] = cpu_to_le32(je->t_iops[i]);
}
je->elapsed_sec = cpu_to_le64(je->elapsed_sec);
je->eta_sec = cpu_to_le64(je->eta_sec);
+ je->nr_threads = cpu_to_le32(je->nr_threads);
je->is_pow2 = cpu_to_le32(je->is_pow2);
- fio_net_send_cmd(server_fd, FIO_NET_CMD_ETA, je, size, cmd->tag);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_ETA, je, size, &tag, NULL);
free(je);
return 0;
}
+static int send_update_job_reply(int fd, uint64_t __tag, int error)
+{
+ uint64_t tag = __tag;
+ uint32_t pdu_error;
+
+ pdu_error = __cpu_to_le32(error);
+ return fio_net_send_cmd(fd, FIO_NET_CMD_UPDATE_JOB, &pdu_error, sizeof(pdu_error), &tag, NULL);
+}
+
+static int handle_update_job_cmd(struct fio_net_cmd *cmd)
+{
+ struct cmd_add_job_pdu *pdu = (struct cmd_add_job_pdu *) cmd->payload;
+ struct thread_data *td;
+ uint32_t tnumber;
+
+ tnumber = le32_to_cpu(pdu->thread_number);
+
+ dprint(FD_NET, "server: updating options for job %u\n", tnumber);
+
+ if (!tnumber || tnumber > thread_number) {
+ send_update_job_reply(server_fd, cmd->tag, ENODEV);
+ return 0;
+ }
+
+ td = &threads[tnumber - 1];
+ convert_thread_options_to_cpu(&td->o, &pdu->top);
+ send_update_job_reply(server_fd, cmd->tag, 0);
+ return 0;
+}
+
static int handle_command(struct fio_net_cmd *cmd)
{
int ret;
case FIO_NET_CMD_SEND_ETA:
ret = handle_send_eta_cmd(cmd);
break;
+ case FIO_NET_CMD_RUN:
+ ret = handle_run_cmd(cmd);
+ break;
+ case FIO_NET_CMD_UPDATE_JOB:
+ ret = handle_update_job_cmd(cmd);
+ break;
default:
- log_err("fio: unknown opcode: %s\n",fio_server_op(cmd->opcode));
+ log_err("fio: unknown opcode: %s\n", fio_server_op(cmd->opcode));
ret = 1;
}
return ret;
}
-static int handle_connection(int sk, int block)
+static int handle_connection(int sk)
{
struct fio_net_cmd *cmd = NULL;
int ret = 0;
+ reset_fio_state();
+ INIT_FLIST_HEAD(&job_list);
+ server_fd = sk;
+
/* read forever */
while (!exit_backend) {
struct pollfd pfd = {
ret = 0;
do {
- ret = poll(&pfd, 1, 100);
+ int timeout = 1000;
+
+ if (!flist_empty(&job_list))
+ timeout = 100;
+
+ ret = poll(&pfd, 1, timeout);
if (ret < 0) {
if (errno == EINTR)
break;
log_err("fio: poll: %s\n", strerror(errno));
break;
} else if (!ret) {
- if (!block)
- return 0;
+ fio_server_check_jobs();
continue;
}
}
} while (!exit_backend);
+ fio_server_check_jobs();
+
if (ret < 0)
break;
if (cmd)
free(cmd);
- return ret;
-}
-
-void fio_server_idle_loop(void)
-{
- if (!first_cmd_check)
- fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_RUN, 0, NULL);
- if (server_fd != -1)
- handle_connection(server_fd, 0);
+ close(sk);
+ _exit(ret);
}
static int accept_loop(int listen_sk)
struct sockaddr_in addr;
socklen_t len = sizeof(addr);
struct pollfd pfd;
- int ret, sk, flags, exitval = 0;
+ int ret = 0, sk, flags, exitval = 0;
dprint(FD_NET, "server enter accept loop\n");
flags = fcntl(listen_sk, F_GETFL);
flags |= O_NONBLOCK;
fcntl(listen_sk, F_SETFL, flags);
-again:
- pfd.fd = listen_sk;
- pfd.events = POLLIN;
- do {
- ret = poll(&pfd, 1, 100);
- if (ret < 0) {
- if (errno == EINTR)
- break;
- log_err("fio: poll: %s\n", strerror(errno));
- goto out;
- } else if (!ret)
- continue;
- if (pfd.revents & POLLIN)
- break;
- } while (!exit_backend);
+ while (!exit_backend) {
+ pid_t pid;
- if (exit_backend)
- goto out;
+ pfd.fd = listen_sk;
+ pfd.events = POLLIN;
+ do {
+ int timeout = 1000;
- sk = accept(listen_sk, (struct sockaddr *) &addr, &len);
- if (sk < 0) {
- log_err("fio: accept: %s\n", strerror(errno));
- return -1;
- }
+ if (!flist_empty(&conn_list))
+ timeout = 100;
- dprint(FD_NET, "server: connect from %s\n", inet_ntoa(addr.sin_addr));
+ ret = poll(&pfd, 1, timeout);
+ if (ret < 0) {
+ if (errno == EINTR)
+ break;
+ log_err("fio: poll: %s\n", strerror(errno));
+ break;
+ } else if (!ret) {
+ fio_server_check_conns();
+ continue;
+ }
- server_fd = sk;
+ if (pfd.revents & POLLIN)
+ break;
+ } while (!exit_backend);
- exitval = handle_connection(sk, 1);
+ fio_server_check_conns();
- server_fd = -1;
- close(sk);
+ if (exit_backend || ret < 0)
+ break;
- if (!exit_backend)
- goto again;
+ sk = accept(listen_sk, (struct sockaddr *) &addr, &len);
+ if (sk < 0) {
+ log_err("fio: accept: %s\n", strerror(errno));
+ return -1;
+ }
+
+ dprint(FD_NET, "server: connect from %s\n", inet_ntoa(addr.sin_addr));
+
+ pid = fork();
+ if (pid) {
+ close(sk);
+ fio_server_add_conn_pid(pid);
+ continue;
+ }
+
+ /* exits */
+ handle_connection(sk);
+ }
-out:
return exitval;
}
-int fio_server_text_output(const char *buf, size_t len)
+int fio_server_text_output(int level, const char *buf, size_t len)
{
- if (server_fd != -1)
- return fio_net_send_cmd(server_fd, FIO_NET_CMD_TEXT, buf, len, 0);
+ struct cmd_text_pdu *pdu;
+ unsigned int tlen;
+ struct timeval tv;
+
+ if (server_fd == -1)
+ return log_local_buf(buf, len);
+
+ tlen = sizeof(*pdu) + len;
+ pdu = malloc(tlen);
+
+ pdu->level = __cpu_to_le32(level);
+ pdu->buf_len = __cpu_to_le32(len);
- return log_local_buf(buf, len);
+ gettimeofday(&tv, NULL);
+ pdu->log_sec = __cpu_to_le64(tv.tv_sec);
+ pdu->log_usec = __cpu_to_le64(tv.tv_usec);
+
+ memcpy(pdu->buf, buf, len);
+
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_TEXT, pdu, tlen, NULL, NULL);
+ free(pdu);
+ return len;
}
static void convert_io_stat(struct io_stat *dst, struct io_stat *src)
}
dst->kb_base = cpu_to_le32(src->kb_base);
+ dst->unit_base = cpu_to_le32(src->unit_base);
dst->groupid = cpu_to_le32(src->groupid);
dst->unified_rw_rep = cpu_to_le32(src->unified_rw_rep);
}
strcpy(p.ts.verror, ts->verror);
strcpy(p.ts.description, ts->description);
- p.ts.error = cpu_to_le32(ts->error);
- p.ts.groupid = cpu_to_le32(ts->groupid);
- p.ts.unified_rw_rep = cpu_to_le32(ts->unified_rw_rep);
- p.ts.pid = cpu_to_le32(ts->pid);
- p.ts.members = cpu_to_le32(ts->members);
+ p.ts.error = cpu_to_le32(ts->error);
+ p.ts.thread_number = cpu_to_le32(ts->thread_number);
+ p.ts.groupid = cpu_to_le32(ts->groupid);
+ p.ts.pid = cpu_to_le32(ts->pid);
+ p.ts.members = cpu_to_le32(ts->members);
p.ts.unified_rw_rep = cpu_to_le32(ts->unified_rw_rep);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
p.ts.total_err_count = cpu_to_le64(ts->total_err_count);
p.ts.first_error = cpu_to_le32(ts->first_error);
p.ts.kb_base = cpu_to_le32(ts->kb_base);
+ p.ts.unit_base = cpu_to_le32(ts->unit_base);
convert_gs(&p.rs, rs);
- fio_net_send_cmd(server_fd, FIO_NET_CMD_TS, &p, sizeof(p), 0);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_TS, &p, sizeof(p), NULL, NULL);
}
void fio_server_send_gs(struct group_run_stats *rs)
dprint(FD_NET, "server sending group run stats\n");
convert_gs(&gs, rs);
- fio_net_send_cmd(server_fd, FIO_NET_CMD_GS, &gs, sizeof(gs), 0);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_GS, &gs, sizeof(gs), NULL, NULL);
}
static void convert_agg(struct disk_util_agg *dst, struct disk_util_agg *src)
convert_dus(&pdu.dus, &du->dus);
convert_agg(&pdu.agg, &du->agg);
- fio_net_send_cmd(server_fd, FIO_NET_CMD_DU, &pdu, sizeof(pdu), 0);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_DU, &pdu, sizeof(pdu), NULL, NULL);
+ }
+}
+
+/*
+ * Send a command with a separate PDU, not inlined in the command
+ */
+static int fio_send_cmd_ext_pdu(int sk, uint16_t opcode, const void *buf,
+ off_t size, uint64_t tag, uint32_t flags)
+{
+ struct fio_net_cmd cmd;
+ struct iovec iov[2];
+
+ iov[0].iov_base = &cmd;
+ iov[0].iov_len = sizeof(cmd);
+ iov[1].iov_base = (void *) buf;
+ iov[1].iov_len = size;
+
+ __fio_init_net_cmd(&cmd, opcode, size, tag);
+ cmd.flags = __cpu_to_le32(flags);
+ fio_net_cmd_crc_pdu(&cmd, buf);
+
+ return fio_sendv_data(sk, iov, 2);
+}
+
+int fio_send_iolog(struct thread_data *td, struct io_log *log, const char *name)
+{
+ struct cmd_iolog_pdu pdu;
+ z_stream stream;
+ void *out_pdu;
+ int i, ret = 0;
+
+ pdu.thread_number = cpu_to_le32(td->thread_number);
+ pdu.nr_samples = __cpu_to_le32(log->nr_samples);
+ pdu.log_type = cpu_to_le32(log->log_type);
+ strcpy((char *) pdu.name, name);
+
+ for (i = 0; i < log->nr_samples; i++) {
+ struct io_sample *s = &log->log[i];
+
+ s->time = cpu_to_le64(s->time);
+ s->val = cpu_to_le64(s->val);
+ s->ddir = cpu_to_le32(s->ddir);
+ s->bs = cpu_to_le32(s->bs);
}
+
+ /*
+ * Dirty - since the log is potentially huge, compress it into
+ * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
+ * side defragment it.
+ */
+ out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
+
+ stream.zalloc = Z_NULL;
+ stream.zfree = Z_NULL;
+ stream.opaque = Z_NULL;
+
+ if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+ ret = 1;
+ goto err;
+ }
+
+ /*
+ * Send header first, it's not compressed.
+ */
+ ret = fio_send_cmd_ext_pdu(server_fd, FIO_NET_CMD_IOLOG, &pdu,
+ sizeof(pdu), 0, FIO_NET_CMD_F_MORE);
+ if (ret)
+ goto err_zlib;
+
+ stream.next_in = (void *) log->log;
+ stream.avail_in = log->nr_samples * sizeof(struct io_sample);
+
+ do {
+ unsigned int this_len, flags = 0;
+ int ret;
+
+ stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
+ stream.next_out = out_pdu;
+ ret = deflate(&stream, Z_FINISH);
+ /* may be Z_OK, or Z_STREAM_END */
+ if (ret < 0)
+ goto err_zlib;
+
+ this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out;
+
+ if (stream.avail_in)
+ flags = FIO_NET_CMD_F_MORE;
+
+ ret = fio_send_cmd_ext_pdu(server_fd, FIO_NET_CMD_IOLOG,
+ out_pdu, this_len, 0, flags);
+ if (ret)
+ goto err_zlib;
+ } while (stream.avail_in);
+
+err_zlib:
+ deflateEnd(&stream);
+err:
+ free(out_pdu);
+ return ret;
}
-int fio_server_log(const char *format, ...)
+void fio_server_send_add_job(struct thread_data *td)
{
- char buffer[1024];
- va_list args;
- size_t len;
+ struct cmd_add_job_pdu pdu;
+
+ memset(&pdu, 0, sizeof(pdu));
+ pdu.thread_number = cpu_to_le32(td->thread_number);
+ pdu.groupid = cpu_to_le32(td->groupid);
+ convert_thread_options_to_net(&pdu.top, &td->o);
- dprint(FD_NET, "server log\n");
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL, NULL);
+}
- va_start(args, format);
- len = vsnprintf(buffer, sizeof(buffer), format, args);
- va_end(args);
- len = min(len, sizeof(buffer) - 1);
+void fio_server_send_start(struct thread_data *td)
+{
+ assert(server_fd != -1);
- return fio_server_text_output(buffer, len);
+ fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_SERVER_START, 0, NULL);
}
static int fio_init_server_ip(void)
return sk;
}
+int fio_server_parse_host(const char *host, int *ipv6, struct in_addr *inp,
+ struct in6_addr *inp6)
+
+{
+ int ret = 0;
+
+ if (*ipv6)
+ ret = inet_pton(AF_INET6, host, inp6);
+ else
+ ret = inet_pton(AF_INET, host, inp);
+
+ if (ret != 1) {
+ struct hostent *hent;
+
+ hent = gethostbyname(host);
+ if (!hent) {
+ log_err("fio: failed to resolve <%s>\n", host);
+ return 0;
+ }
+
+ if (*ipv6) {
+ if (hent->h_addrtype != AF_INET6) {
+ log_info("fio: falling back to IPv4\n");
+ *ipv6 = 0;
+ } else
+ memcpy(inp6, hent->h_addr_list[0], 16);
+ }
+ if (!*ipv6) {
+ if (hent->h_addrtype != AF_INET) {
+ log_err("fio: lookup type mismatch\n");
+ return 0;
+ }
+ memcpy(inp, hent->h_addr_list[0], 4);
+ }
+ ret = 1;
+ }
+
+ return !(ret == 1);
+}
+
/*
* Parse a host/ip/port string. Reads from 'str'.
*
{
const char *host = str;
char *portp;
- int ret, lport = 0;
+ int lport = 0;
*ptr = NULL;
*is_sock = 0;
*ptr = strdup(host);
- if (*ipv6)
- ret = inet_pton(AF_INET6, host, inp6);
- else
- ret = inet_pton(AF_INET, host, inp);
-
- if (ret != 1) {
- struct hostent *hent;
-
- hent = gethostbyname(host);
- if (!hent) {
- log_err("fio: failed to resolve <%s>\n", host);
- free(*ptr);
- *ptr = NULL;
- return 1;
- }
-
- if (*ipv6) {
- if (hent->h_addrtype != AF_INET6) {
- log_info("fio: falling back to IPv4\n");
- *ipv6 = 0;
- } else
- memcpy(inp6, hent->h_addr_list[0], 16);
- }
- if (!*ipv6) {
- if (hent->h_addrtype != AF_INET) {
- log_err("fio: lookup type mismatch\n");
- free(*ptr);
- *ptr = NULL;
- return 1;
- }
- memcpy(inp, hent->h_addr_list[0], 4);
- }
+ if (fio_server_parse_host(*ptr, ipv6, inp, inp6)) {
+ free(*ptr);
+ *ptr = NULL;
+ return 1;
}
if (*port == 0)
#if defined(WIN32)
WSADATA wsd;
- WSAStartup(MAKEWORD(2,2), &wsd);
+ WSAStartup(MAKEWORD(2, 2), &wsd);
#endif
if (!pidfile)
/*
* Discard the error bits and apply the mask to find the
- * index for the buckets in the group
+ * index for the buckets in the group
*/
offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
/* Make sure the index does not exceed (array size - 1) */
- idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1)?
+ idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
(base + offset) : (FIO_IO_U_PLAT_NR - 1);
return idx;
/* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
* all bits of the sample as index */
- if (idx < (FIO_IO_U_PLAT_VAL << 1) )
+ if (idx < (FIO_IO_U_PLAT_VAL << 1))
return idx;
/* Find the group and compute the minimum value of that group */
- error_bits = (idx >> FIO_IO_U_PLAT_BITS) -1;
+ error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
base = 1 << (error_bits + FIO_IO_U_PLAT_BITS);
/* Find its bucket number of the group */
return cmp;
}
-static unsigned int calc_clat_percentiles(unsigned int *io_u_plat,
- unsigned long nr, fio_fp64_t *plist,
- unsigned int **output,
- unsigned int *maxv,
- unsigned int *minv)
+unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
+ fio_fp64_t *plist, unsigned int **output,
+ unsigned int *maxv, unsigned int *minv)
{
unsigned long sum = 0;
unsigned int len, i, j = 0;
* isn't a worry. Also note that this does not work for NaN values.
*/
if (len > 1)
- qsort((void*)plist, len, sizeof(plist[0]), double_cmp);
+ qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
/*
* Calculate bucket values, note down max and min values
free(ovals);
}
-static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
- double *mean, double *dev)
+int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
+ double *mean, double *dev)
{
double n = is->samples;
if (!rs->max_run[i])
continue;
- p1 = num2str(rs->io_kb[i], 6, rs->kb_base, i2p);
- p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p);
- p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p);
- p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p);
+ p1 = num2str(rs->io_kb[i], 6, rs->kb_base, i2p, 8);
+ p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p, rs->unit_base);
+ p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
+ p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
- log_info("%s: io=%sB, aggrb=%sB/s, minb=%sB/s, maxb=%sB/s,"
+ log_info("%s: io=%s, aggrb=%s/s, minb=%s/s, maxb=%s/s,"
" mint=%llumsec, maxt=%llumsec\n",
rs->unified_rw_rep ? " MIXED" : ddir_str[i],
p1, p2, p3, p4, rs->min_run[i], rs->max_run[i]);
}
}
-#define ts_total_io_u(ts) \
- ((ts)->total_io_u[DDIR_READ] + (ts)->total_io_u[DDIR_WRITE] +\
- (ts)->total_io_u[DDIR_TRIM])
-
-static void stat_calc_dist(unsigned int *map, unsigned long total,
- double *io_u_dist)
+void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist)
{
int i;
static void stat_calc_lat(struct thread_stat *ts, double *dst,
unsigned int *src, int nr)
{
- unsigned long total = ts_total_io_u(ts);
+ unsigned long total = ddir_rw_sum(ts->total_io_u);
int i;
/*
}
}
-static void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
+void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
{
stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
}
-static void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
+void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
{
stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
}
-static int usec_to_msec(unsigned long *min, unsigned long *max, double *mean,
- double *dev)
+static void display_lat(const char *name, unsigned long min, unsigned long max,
+ double mean, double dev)
{
- if (*min > 1000 && *max > 1000 && *mean > 1000.0 && *dev > 1000.0) {
- *min /= 1000;
- *max /= 1000;
- *mean /= 1000.0;
- *dev /= 1000.0;
- return 0;
- }
+ const char *base = "(usec)";
+ char *minp, *maxp;
- return 1;
+ if (!usec_to_msec(&min, &max, &mean, &dev))
+ base = "(msec)";
+
- minp = num2str(min, 6, 1, 0);
- maxp = num2str(max, 6, 1, 0);
++ minp = num2str(min, 6, 1, 0, 0);
++ maxp = num2str(max, 6, 1, 0, 0);
+
+ log_info(" %s %s: min=%s, max=%s, avg=%5.02f,"
+ " stdev=%5.02f\n", name, base, minp, maxp, mean, dev);
+
+ free(minp);
+ free(maxp);
}
static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
runt = ts->runtime[ddir];
bw = (1000 * ts->io_bytes[ddir]) / runt;
- io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p);
- bw_p = num2str(bw, 6, 1, i2p);
+ io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p, 8);
+ bw_p = num2str(bw, 6, 1, i2p, ts->unit_base);
iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
- iops_p = num2str(iops, 6, 1, 0);
+ iops_p = num2str(iops, 6, 1, 0, 0);
- log_info(" %s: io=%sB, bw=%sB/s, iops=%s, runt=%6llumsec\n",
+ log_info(" %s: io=%s, bw=%s/s, iops=%s, runt=%6llumsec\n",
rs->unified_rw_rep ? "mixed" : ddir_str[ddir],
io_p, bw_p, iops_p, ts->runtime[ddir]);
free(bw_p);
free(iops_p);
- if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
- const char *base = "(usec)";
- char *minp, *maxp;
-
- if (!usec_to_msec(&min, &max, &mean, &dev))
- base = "(msec)";
-
- minp = num2str(min, 6, 1, 0, 0);
- maxp = num2str(max, 6, 1, 0, 0);
-
- log_info(" slat %s: min=%s, max=%s, avg=%5.02f,"
- " stdev=%5.02f\n", base, minp, maxp, mean, dev);
-
- free(minp);
- free(maxp);
- }
- if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
- const char *base = "(usec)";
- char *minp, *maxp;
-
- if (!usec_to_msec(&min, &max, &mean, &dev))
- base = "(msec)";
-
- minp = num2str(min, 6, 1, 0, 0);
- maxp = num2str(max, 6, 1, 0, 0);
-
- log_info(" clat %s: min=%s, max=%s, avg=%5.02f,"
- " stdev=%5.02f\n", base, minp, maxp, mean, dev);
-
- free(minp);
- free(maxp);
- }
- if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
- const char *base = "(usec)";
- char *minp, *maxp;
-
- if (!usec_to_msec(&min, &max, &mean, &dev))
- base = "(msec)";
-
- minp = num2str(min, 6, 1, 0, 0);
- maxp = num2str(max, 6, 1, 0, 0);
-
- log_info(" lat %s: min=%s, max=%s, avg=%5.02f,"
- " stdev=%5.02f\n", base, minp, maxp, mean, dev);
+ if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
+ display_lat("slat", min, max, mean, dev);
+ if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
+ display_lat("clat", min, max, mean, dev);
+ if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
+ display_lat(" lat", min, max, mean, dev);
- free(minp);
- free(maxp);
- }
if (ts->clat_percentiles) {
show_clat_percentiles(ts->io_u_plat[ddir],
ts->clat_stat[ddir].samples,
ts->percentile_precision);
}
if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
- double p_of_agg = 100.0;
- const char *bw_str = "KB";
+ double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
+ const char *bw_str = (rs->unit_base == 1 ? "Kbit" : "KB");
+
+ if (rs->unit_base == 1) {
+ min *= 8.0;
+ max *= 8.0;
+ mean *= 8.0;
+ dev *= 8.0;
+ }
if (rs->agg[ddir]) {
p_of_agg = mean * 100 / (double) rs->agg[ddir];
p_of_agg = 100.0;
}
- if (mean > 999999.9) {
- min /= 1000.0;
- max /= 1000.0;
- mean /= 1000.0;
- dev /= 1000.0;
- bw_str = "MB";
+ if (mean > fkb_base * fkb_base) {
+ min /= fkb_base;
+ max /= fkb_base;
+ mean /= fkb_base;
+ dev /= fkb_base;
+ bw_str = (rs->unit_base == 1 ? "Mbit" : "MB");
}
- log_info(" bw (%s/s) : min=%5lu, max=%5lu, per=%3.2f%%,"
+ log_info(" bw (%-4s/s): min=%5lu, max=%5lu, per=%3.2f%%,"
" avg=%5.02f, stdev=%5.02f\n", bw_str, min, max,
p_of_agg, mean, dev);
}
show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec");
}
-static void show_latencies(double *io_u_lat_u, double *io_u_lat_m)
+static void show_latencies(struct thread_stat *ts)
{
+ double io_u_lat_u[FIO_IO_U_LAT_U_NR];
+ double io_u_lat_m[FIO_IO_U_LAT_M_NR];
+
+ stat_calc_lat_u(ts, io_u_lat_u);
+ stat_calc_lat_m(ts, io_u_lat_m);
+
show_lat_u(io_u_lat_u);
show_lat_m(io_u_lat_m);
}
double usr_cpu, sys_cpu;
unsigned long runtime;
double io_u_dist[FIO_IO_U_MAP_NR];
- double io_u_lat_u[FIO_IO_U_LAT_U_NR];
- double io_u_lat_m[FIO_IO_U_LAT_M_NR];
time_t time_p;
char time_buf[64];
if (ts->io_bytes[DDIR_TRIM])
show_ddir_status(rs, ts, DDIR_TRIM);
- stat_calc_lat_u(ts, io_u_lat_u);
- stat_calc_lat_m(ts, io_u_lat_m);
- show_latencies(io_u_lat_u, io_u_lat_m);
+ show_latencies(ts);
runtime = ts->total_run_time;
if (runtime) {
log_info(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu, majf=%lu,"
" minf=%lu\n", usr_cpu, sys_cpu, ts->ctx, ts->majf, ts->minf);
- stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
+ stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
log_info(" IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
" 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
io_u_dist[1], io_u_dist[2],
}
static void show_thread_status_terse_v2(struct thread_stat *ts,
- struct group_run_stats *rs)
+ struct group_run_stats *rs)
{
double io_u_dist[FIO_IO_U_MAP_NR];
double io_u_lat_u[FIO_IO_U_LAT_U_NR];
ts->minf);
/* Calc % distribution of IO depths, usecond, msecond latency */
- stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
+ stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
stat_calc_lat_u(ts, io_u_lat_u);
stat_calc_lat_m(ts, io_u_lat_m);
ts->minf);
/* Calc % distribution of IO depths, usecond, msecond latency */
- stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
+ stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
stat_calc_lat_u(ts, io_u_lat_u);
stat_calc_lat_m(ts, io_u_lat_m);
/* Calc % distribution of IO depths, usecond, msecond latency */
- stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
+ stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
stat_calc_lat_u(ts, io_u_lat_u);
stat_calc_lat_m(ts, io_u_lat_m);
struct thread_stat *threadstats, *ts;
int i, j, nr_ts, last_ts, idx;
int kb_base_warned = 0;
+ int unit_base_warned = 0;
struct json_object *root = NULL;
struct json_array *array = NULL;
else
memset(ts->description, 0, FIO_JOBNAME_SIZE);
+ /*
+ * If multiple entries in this group, this is
+ * the first member.
+ */
+ ts->thread_number = td->thread_number;
ts->groupid = td->groupid;
/*
ts->pid = td->pid;
ts->kb_base = td->o.kb_base;
+ ts->unit_base = td->o.unit_base;
ts->unified_rw_rep = td->o.unified_rw_rep;
} else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
log_info("fio: kb_base differs for jobs in group, using"
" %u as the base\n", ts->kb_base);
kb_base_warned = 1;
+ } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
+ log_info("fio: unit_base differs for jobs in group, using"
+ " %u as the base\n", ts->unit_base);
+ unit_base_warned = 1;
}
ts->continue_on_error = td->o.continue_on_error;
ts = &threadstats[i];
rs = &runstats[ts->groupid];
rs->kb_base = ts->kb_base;
+ rs->unit_base = ts->unit_base;
rs->unified_rw_rep += ts->unified_rw_rep;
for (j = 0; j < DDIR_RWDIR_CNT; j++) {
#ifndef FIO_STAT_H
#define FIO_STAT_H
+#include "iolog.h"
+
struct group_run_stats {
uint64_t max_run[DDIR_RWDIR_CNT], min_run[DDIR_RWDIR_CNT];
uint64_t max_bw[DDIR_RWDIR_CNT], min_bw[DDIR_RWDIR_CNT];
uint64_t io_kb[DDIR_RWDIR_CNT];
uint64_t agg[DDIR_RWDIR_CNT];
uint32_t kb_base;
+ uint32_t unit_base;
uint32_t groupid;
uint32_t unified_rw_rep;
};
char name[FIO_JOBNAME_SIZE];
char verror[FIO_VERROR_SIZE];
uint32_t error;
+ uint32_t thread_number;
uint32_t groupid;
uint32_t pid;
char description[FIO_JOBNAME_SIZE];
uint32_t first_error;
uint32_t kb_base;
+ uint32_t unit_base;
};
struct jobs_eta {
uint32_t nr_ramp;
uint32_t nr_pending;
uint32_t files_open;
- uint32_t m_rate, t_rate;
- uint32_t m_iops, t_iops;
+ uint32_t m_rate[DDIR_RWDIR_CNT], t_rate[DDIR_RWDIR_CNT];
+ uint32_t m_iops[DDIR_RWDIR_CNT], t_iops[DDIR_RWDIR_CNT];
uint32_t rate[DDIR_RWDIR_CNT];
uint32_t iops[DDIR_RWDIR_CNT];
uint64_t elapsed_sec;
uint64_t eta_sec;
uint32_t is_pow2;
+ uint32_t unit_base;
/*
* Network 'copy' of run_str[]
extern void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src);
extern void init_thread_stat(struct thread_stat *ts);
extern void init_group_run_stat(struct group_run_stats *gs);
+extern void eta_to_str(char *str, unsigned long eta_sec);
+extern int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max, double *mean, double *dev);
+extern unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr, fio_fp64_t *plist, unsigned int **output, unsigned int *maxv, unsigned int *minv);
+extern void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat);
+extern void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat);
+extern void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist);
+
+static inline int usec_to_msec(unsigned long *min, unsigned long *max,
+ double *mean, double *dev)
+{
+ if (*min > 1000 && *max > 1000 && *mean > 1000.0 && *dev > 1000.0) {
+ *min /= 1000;
+ *max /= 1000;
+ *mean /= 1000.0;
+ *dev /= 1000.0;
+ return 0;
+ }
+
+ return 1;
+}
#endif
--- /dev/null
+#ifndef FIO_THREAD_OPTIONS_H
+#define FIO_THREAD_OPTIONS_H
+
+#include "arch/arch.h"
+#include "os/os.h"
+#include "stat.h"
+#include "gettime.h"
+
+/*
+ * What type of allocation to use for io buffers
+ */
+enum fio_memtype {
+ MEM_MALLOC = 0, /* ordinary malloc */
+ MEM_SHM, /* use shared memory segments */
+ MEM_SHMHUGE, /* use shared memory segments with huge pages */
+ MEM_MMAP, /* use anonynomous mmap */
+ MEM_MMAPHUGE, /* memory mapped huge file */
+};
+
+/*
+ * What type of errors to continue on when continue_on_error is used
+ */
+enum error_type_bit {
+ ERROR_TYPE_READ_BIT = 0,
+ ERROR_TYPE_WRITE_BIT = 1,
+ ERROR_TYPE_VERIFY_BIT = 2,
+ ERROR_TYPE_CNT = 3,
+};
+
+#define ERROR_STR_MAX 128
+
+enum error_type {
+ ERROR_TYPE_NONE = 0,
+ ERROR_TYPE_READ = 1 << ERROR_TYPE_READ_BIT,
+ ERROR_TYPE_WRITE = 1 << ERROR_TYPE_WRITE_BIT,
+ ERROR_TYPE_VERIFY = 1 << ERROR_TYPE_VERIFY_BIT,
+ ERROR_TYPE_ANY = 0xffff,
+};
+
+#define BSSPLIT_MAX 64
+
+struct bssplit {
+ uint32_t bs;
+ uint32_t perc;
+};
+
+struct thread_options {
+ int pad;
+ char *description;
+ char *name;
+ char *directory;
+ char *filename;
++ char *filename_format;
+ char *opendir;
+ char *ioengine;
+ char *mmapfile;
+ enum td_ddir td_ddir;
+ unsigned int rw_seq;
+ unsigned int kb_base;
++ unsigned int unit_base;
+ unsigned int ddir_seq_nr;
+ long ddir_seq_add;
+ unsigned int iodepth;
+ unsigned int iodepth_low;
+ unsigned int iodepth_batch;
+ unsigned int iodepth_batch_complete;
+
+ unsigned long long size;
+ unsigned int size_percent;
+ unsigned int fill_device;
+ unsigned long long file_size_low;
+ unsigned long long file_size_high;
+ unsigned long long start_offset;
+
+ unsigned int bs[DDIR_RWDIR_CNT];
+ unsigned int ba[DDIR_RWDIR_CNT];
+ unsigned int min_bs[DDIR_RWDIR_CNT];
+ unsigned int max_bs[DDIR_RWDIR_CNT];
+ struct bssplit *bssplit[DDIR_RWDIR_CNT];
+ unsigned int bssplit_nr[DDIR_RWDIR_CNT];
+
+ int *ignore_error[ERROR_TYPE_CNT];
+ unsigned int ignore_error_nr[ERROR_TYPE_CNT];
+ unsigned int error_dump;
+
+ unsigned int nr_files;
+ unsigned int open_files;
+ enum file_lock_mode file_lock_mode;
+
+ unsigned int odirect;
+ unsigned int invalidate_cache;
+ unsigned int create_serialize;
+ unsigned int create_fsync;
+ unsigned int create_on_open;
+ unsigned int create_only;
+ unsigned int end_fsync;
+ unsigned int pre_read;
+ unsigned int sync_io;
+ unsigned int verify;
+ unsigned int do_verify;
+ unsigned int verifysort;
+ unsigned int verifysort_nr;
+ unsigned int verify_interval;
+ unsigned int verify_offset;
+ char verify_pattern[MAX_PATTERN_SIZE];
+ unsigned int verify_pattern_bytes;
+ unsigned int verify_fatal;
+ unsigned int verify_dump;
+ unsigned int verify_async;
+ unsigned long long verify_backlog;
+ unsigned int verify_batch;
+ unsigned int experimental_verify;
+ unsigned int use_thread;
+ unsigned int unlink;
+ unsigned int do_disk_util;
+ unsigned int override_sync;
+ unsigned int rand_repeatable;
+ unsigned int use_os_rand;
+ unsigned int log_avg_msec;
+ unsigned int norandommap;
+ unsigned int softrandommap;
+ unsigned int bs_unaligned;
+ unsigned int fsync_on_close;
+
+ unsigned int random_distribution;
+ fio_fp64_t zipf_theta;
+ fio_fp64_t pareto_h;
+
+ unsigned int random_generator;
+
+ unsigned int hugepage_size;
+ unsigned int rw_min_bs;
+ unsigned int thinktime;
+ unsigned int thinktime_spin;
+ unsigned int thinktime_blocks;
+ unsigned int fsync_blocks;
+ unsigned int fdatasync_blocks;
+ unsigned int barrier_blocks;
+ unsigned long long start_delay;
+ unsigned long long timeout;
+ unsigned long long ramp_time;
+ unsigned int overwrite;
+ unsigned int bw_avg_time;
+ unsigned int iops_avg_time;
+ unsigned int loops;
+ unsigned long long zone_range;
+ unsigned long long zone_size;
+ unsigned long long zone_skip;
+ unsigned long long lockmem;
+ enum fio_memtype mem_type;
+ unsigned int mem_align;
+
+ unsigned max_latency;
+
+ unsigned int stonewall;
+ unsigned int new_group;
+ unsigned int numjobs;
+ os_cpu_mask_t cpumask;
+ unsigned int cpumask_set;
+ os_cpu_mask_t verify_cpumask;
+ unsigned int verify_cpumask_set;
+#ifdef CONFIG_LIBNUMA
+ struct bitmask *numa_cpunodesmask;
+ unsigned int numa_cpumask_set;
+ unsigned short numa_mem_mode;
+ unsigned int numa_mem_prefer_node;
+ struct bitmask *numa_memnodesmask;
+ unsigned int numa_memmask_set;
+#endif
+ unsigned int iolog;
+ unsigned int rwmixcycle;
+ unsigned int rwmix[2];
+ unsigned int nice;
+ unsigned int ioprio;
+ unsigned int ioprio_class;
+ unsigned int file_service_type;
+ unsigned int group_reporting;
+ unsigned int fadvise_hint;
+ enum fio_fallocate_mode fallocate_mode;
+ unsigned int zero_buffers;
+ unsigned int refill_buffers;
+ unsigned int scramble_buffers;
+ unsigned int compress_percentage;
+ unsigned int compress_chunk;
+ unsigned int time_based;
+ unsigned int disable_lat;
+ unsigned int disable_clat;
+ unsigned int disable_slat;
+ unsigned int disable_bw;
+ unsigned int unified_rw_rep;
+ unsigned int gtod_reduce;
+ unsigned int gtod_cpu;
+ unsigned int gtod_offload;
+ enum fio_cs clocksource;
+ unsigned int no_stall;
+ unsigned int trim_percentage;
+ unsigned int trim_batch;
+ unsigned int trim_zero;
+ unsigned long long trim_backlog;
+ unsigned int clat_percentiles;
+ unsigned int percentile_precision; /* digits after decimal for percentiles */
+ fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
+
+ char *read_iolog_file;
+ char *write_iolog_file;
+ char *bw_log_file;
+ char *lat_log_file;
+ char *iops_log_file;
+ char *replay_redirect;
+
+ /*
+ * Pre-run and post-run shell
+ */
+ char *exec_prerun;
+ char *exec_postrun;
+
+ unsigned int rate[DDIR_RWDIR_CNT];
+ unsigned int ratemin[DDIR_RWDIR_CNT];
+ unsigned int ratecycle;
+ unsigned int rate_iops[DDIR_RWDIR_CNT];
+ unsigned int rate_iops_min[DDIR_RWDIR_CNT];
+
+ char *ioscheduler;
+
+ /*
+ * I/O Error handling
+ */
+ enum error_type continue_on_error;
+
+ /*
+ * Benchmark profile type
+ */
+ char *profile;
+
+ /*
+ * blkio cgroup support
+ */
+ char *cgroup;
+ unsigned int cgroup_weight;
+ unsigned int cgroup_nodelete;
+
+ unsigned int uid;
+ unsigned int gid;
+
+ int flow_id;
+ int flow;
+ int flow_watermark;
+ unsigned int flow_sleep;
+
+ unsigned long long offset_increment;
+
+ unsigned int sync_file_range;
+};
+
+#define FIO_TOP_STR_MAX 256
+
+struct thread_options_pack {
+ uint8_t description[FIO_TOP_STR_MAX];
+ uint8_t name[FIO_TOP_STR_MAX];
+ uint8_t directory[FIO_TOP_STR_MAX];
+ uint8_t filename[FIO_TOP_STR_MAX];
++ uint8_t filename_format[FIO_TOP_STR_MAX];
+ uint8_t opendir[FIO_TOP_STR_MAX];
+ uint8_t ioengine[FIO_TOP_STR_MAX];
+ uint8_t mmapfile[FIO_TOP_STR_MAX];
+ uint32_t td_ddir;
+ uint32_t rw_seq;
+ uint32_t kb_base;
++ uint32_t unit_base;
+ uint32_t ddir_seq_nr;
+ uint64_t ddir_seq_add;
+ uint32_t iodepth;
+ uint32_t iodepth_low;
+ uint32_t iodepth_batch;
+ uint32_t iodepth_batch_complete;
+
+ uint64_t size;
+ uint32_t size_percent;
+ uint32_t fill_device;
+ uint64_t file_size_low;
+ uint64_t file_size_high;
+ uint64_t start_offset;
+
+ uint32_t bs[DDIR_RWDIR_CNT];
+ uint32_t ba[DDIR_RWDIR_CNT];
+ uint32_t min_bs[DDIR_RWDIR_CNT];
+ uint32_t max_bs[DDIR_RWDIR_CNT];
+ struct bssplit bssplit[DDIR_RWDIR_CNT][BSSPLIT_MAX];
+ uint32_t bssplit_nr[DDIR_RWDIR_CNT];
+
+ uint32_t ignore_error[ERROR_TYPE_CNT][ERROR_STR_MAX];
+ uint32_t ignore_error_nr[ERROR_TYPE_CNT];
+ uint32_t error_dump;
+
+ uint32_t nr_files;
+ uint32_t open_files;
+ uint32_t file_lock_mode;
+
+ uint32_t odirect;
+ uint32_t invalidate_cache;
+ uint32_t create_serialize;
+ uint32_t create_fsync;
+ uint32_t create_on_open;
+ uint32_t create_only;
+ uint32_t end_fsync;
+ uint32_t pre_read;
+ uint32_t sync_io;
+ uint32_t verify;
+ uint32_t do_verify;
+ uint32_t verifysort;
+ uint32_t verifysort_nr;
+ uint32_t verify_interval;
+ uint32_t verify_offset;
+ uint8_t verify_pattern[MAX_PATTERN_SIZE];
+ uint32_t verify_pattern_bytes;
+ uint32_t verify_fatal;
+ uint32_t verify_dump;
+ uint32_t verify_async;
+ uint64_t verify_backlog;
+ uint32_t verify_batch;
+ uint32_t experimental_verify;
+ uint32_t use_thread;
+ uint32_t unlink;
+ uint32_t do_disk_util;
+ uint32_t override_sync;
+ uint32_t rand_repeatable;
+ uint32_t use_os_rand;
+ uint32_t log_avg_msec;
+ uint32_t norandommap;
+ uint32_t softrandommap;
+ uint32_t bs_unaligned;
+ uint32_t fsync_on_close;
+
+ uint32_t random_distribution;
+ fio_fp64_t zipf_theta;
+ fio_fp64_t pareto_h;
+
+ uint32_t random_generator;
+
+ uint32_t hugepage_size;
+ uint32_t rw_min_bs;
+ uint32_t thinktime;
+ uint32_t thinktime_spin;
+ uint32_t thinktime_blocks;
+ uint32_t fsync_blocks;
+ uint32_t fdatasync_blocks;
+ uint32_t barrier_blocks;
+ uint64_t start_delay;
+ uint64_t timeout;
+ uint64_t ramp_time;
+ uint32_t overwrite;
+ uint32_t bw_avg_time;
+ uint32_t iops_avg_time;
+ uint32_t loops;
+ uint64_t zone_range;
+ uint64_t zone_size;
+ uint64_t zone_skip;
+ uint64_t lockmem;
+ uint32_t mem_type;
+ uint32_t mem_align;
+
+ uint32_t max_latency;
+
+ uint32_t stonewall;
+ uint32_t new_group;
+ uint32_t numjobs;
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint32_t cpumask_set;
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+ uint32_t verify_cpumask_set;
+ uint32_t iolog;
+ uint32_t rwmixcycle;
+ uint32_t rwmix[2];
+ uint32_t nice;
+ uint32_t ioprio;
+ uint32_t ioprio_class;
+ uint32_t file_service_type;
+ uint32_t group_reporting;
+ uint32_t fadvise_hint;
+ uint32_t fallocate_mode;
+ uint32_t zero_buffers;
+ uint32_t refill_buffers;
+ uint32_t scramble_buffers;
+ unsigned int compress_percentage;
+ unsigned int compress_chunk;
+ uint32_t time_based;
+ uint32_t disable_lat;
+ uint32_t disable_clat;
+ uint32_t disable_slat;
+ uint32_t disable_bw;
+ uint32_t unified_rw_rep;
+ uint32_t gtod_reduce;
+ uint32_t gtod_cpu;
+ uint32_t gtod_offload;
+ uint32_t clocksource;
+ uint32_t no_stall;
+ uint32_t trim_percentage;
+ uint32_t trim_batch;
+ uint32_t trim_zero;
+ uint64_t trim_backlog;
+ uint32_t clat_percentiles;
+ uint32_t percentile_precision;
+ fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
+
+ uint8_t read_iolog_file[FIO_TOP_STR_MAX];
+ uint8_t write_iolog_file[FIO_TOP_STR_MAX];
+ uint8_t bw_log_file[FIO_TOP_STR_MAX];
+ uint8_t lat_log_file[FIO_TOP_STR_MAX];
+ uint8_t iops_log_file[FIO_TOP_STR_MAX];
+ uint8_t replay_redirect[FIO_TOP_STR_MAX];
+
+ /*
+ * Pre-run and post-run shell
+ */
+ uint8_t exec_prerun[FIO_TOP_STR_MAX];
+ uint8_t exec_postrun[FIO_TOP_STR_MAX];
+
+ uint32_t rate[DDIR_RWDIR_CNT];
+ uint32_t ratemin[DDIR_RWDIR_CNT];
+ uint32_t ratecycle;
+ uint32_t rate_iops[DDIR_RWDIR_CNT];
+ uint32_t rate_iops_min[DDIR_RWDIR_CNT];
+
+ uint8_t ioscheduler[FIO_TOP_STR_MAX];
+
+ /*
+ * I/O Error handling
+ */
+ uint32_t continue_on_error;
+
+ /*
+ * Benchmark profile type
+ */
+ uint8_t profile[FIO_TOP_STR_MAX];
+
+ /*
+ * blkio cgroup support
+ */
+ uint8_t cgroup[FIO_TOP_STR_MAX];
+ uint32_t cgroup_weight;
+ uint32_t cgroup_nodelete;
+
+ uint32_t uid;
+ uint32_t gid;
+
+ int32_t flow_id;
+ int32_t flow;
+ int32_t flow_watermark;
+ uint32_t flow_sleep;
+
+ uint64_t offset_increment;
+
+ uint32_t sync_file_range;
+} __attribute__((packed));
+
+extern void convert_thread_options_to_cpu(struct thread_options *o, struct thread_options_pack *top);
+extern void convert_thread_options_to_net(struct thread_options_pack *top, struct thread_options *);
+extern int fio_test_cconv(struct thread_options *);
+extern void options_default_fill(struct thread_options *o);
+
+#endif