--- /dev/null
+#include <string.h>
+
+#include "thread_options.h"
+
+static void string_to_cpu(char **dst, const uint8_t *src)
+{
+ const char *__src = (const char *) src;
+
+ if (strlen(__src))
+ *dst = strdup(__src);
+}
+
+static void string_to_net(uint8_t *dst, const char *src)
+{
+ if (src)
+ strcpy((char *) dst, src);
+ else
+ dst[0] = '\0';
+}
+
+void convert_thread_options_to_cpu(struct thread_options *o,
+ struct thread_options_pack *top)
+{
+ int i, j;
+
+ string_to_cpu(&o->description, top->description);
+ string_to_cpu(&o->name, top->name);
+ string_to_cpu(&o->directory, top->directory);
+ string_to_cpu(&o->filename, top->filename);
+ string_to_cpu(&o->opendir, top->opendir);
+ string_to_cpu(&o->ioengine, top->ioengine);
+ string_to_cpu(&o->read_iolog_file, top->read_iolog_file);
+ string_to_cpu(&o->write_iolog_file, top->write_iolog_file);
+ string_to_cpu(&o->bw_log_file, top->bw_log_file);
+ string_to_cpu(&o->lat_log_file, top->lat_log_file);
+ string_to_cpu(&o->iops_log_file, top->iops_log_file);
+ string_to_cpu(&o->replay_redirect, top->replay_redirect);
+ string_to_cpu(&o->exec_prerun, top->exec_prerun);
+ string_to_cpu(&o->exec_postrun, top->exec_postrun);
+ string_to_cpu(&o->ioscheduler, top->ioscheduler);
+ string_to_cpu(&o->profile, top->profile);
+ string_to_cpu(&o->cgroup, top->cgroup);
+
+ o->td_ddir = le32_to_cpu(top->td_ddir);
+ o->rw_seq = le32_to_cpu(top->rw_seq);
+ o->kb_base = le32_to_cpu(top->kb_base);
+ o->ddir_seq_nr = le32_to_cpu(top->ddir_seq_nr);
+ o->ddir_seq_add = le64_to_cpu(top->ddir_seq_add);
+ o->iodepth = le32_to_cpu(top->iodepth);
+ o->iodepth_low = le32_to_cpu(top->iodepth_low);
+ o->iodepth_batch = le32_to_cpu(top->iodepth_batch);
+ o->iodepth_batch_complete = le32_to_cpu(top->iodepth_batch_complete);
+ o->size = le64_to_cpu(top->size);
+ o->size_percent = le32_to_cpu(top->size_percent);
+ o->fill_device = le32_to_cpu(top->fill_device);
+ o->file_size_low = le64_to_cpu(top->file_size_low);
+ o->file_size_high = le64_to_cpu(top->file_size_high);
+ o->start_offset = le64_to_cpu(top->start_offset);
+
+ for (i = 0; i < 2; i++) {
+ o->bs[i] = le32_to_cpu(top->bs[i]);
+ o->ba[i] = le32_to_cpu(top->ba[i]);
+ o->min_bs[i] = le32_to_cpu(top->min_bs[i]);
+ o->max_bs[i] = le32_to_cpu(top->max_bs[i]);
+ o->bssplit_nr[i] = le32_to_cpu(top->bssplit_nr[i]);
+
+ if (o->bssplit_nr[i]) {
+ o->bssplit[i] = malloc(o->bssplit_nr[i] * sizeof(struct bssplit));
+ for (j = 0; j < o->bssplit_nr[i]; j++) {
+ o->bssplit[i][j].bs = le32_to_cpu(top->bssplit[i][j].bs);
+ o->bssplit[i][j].perc = le32_to_cpu(top->bssplit[i][j].perc);
+ }
+ }
+
+ o->rwmix[i] = le32_to_cpu(top->rwmix[i]);
+ o->rate[i] = le32_to_cpu(top->rate[i]);
+ o->ratemin[i] = le32_to_cpu(top->ratemin[i]);
+ o->rate_iops[i] = le32_to_cpu(top->rate_iops[i]);
+ o->rate_iops_min[i] = le32_to_cpu(top->rate_iops_min[i]);
+ }
+
+ o->ratecycle = le32_to_cpu(top->ratecycle);
+ o->nr_files = le32_to_cpu(top->nr_files);
+ o->open_files = le32_to_cpu(top->open_files);
+ o->file_lock_mode = le32_to_cpu(top->file_lock_mode);
+ o->lockfile_batch = le32_to_cpu(top->lockfile_batch);
+ o->odirect = le32_to_cpu(top->odirect);
+ o->invalidate_cache = le32_to_cpu(top->invalidate_cache);
+ o->create_serialize = le32_to_cpu(top->create_serialize);
+ o->create_fsync = le32_to_cpu(top->create_fsync);
+ o->create_on_open = le32_to_cpu(top->create_on_open);
+ o->end_fsync = le32_to_cpu(top->end_fsync);
+ o->pre_read = le32_to_cpu(top->pre_read);
+ o->sync_io = le32_to_cpu(top->sync_io);
+ o->verify = le32_to_cpu(top->verify);
+ o->do_verify = le32_to_cpu(top->do_verify);
+ o->verifysort = le32_to_cpu(top->verifysort);
+ o->verify_interval = le32_to_cpu(top->verify_interval);
+ o->verify_offset = le32_to_cpu(top->verify_offset);
+
+ memcpy(o->verify_pattern, top->verify_pattern, MAX_PATTERN_SIZE);
+
+ o->verify_pattern_bytes = le32_to_cpu(top->verify_pattern_bytes);
+ o->verify_fatal = le32_to_cpu(top->verify_fatal);
+ o->verify_dump = le32_to_cpu(top->verify_dump);
+ o->verify_async = le32_to_cpu(top->verify_async);
+ o->verify_batch = le32_to_cpu(top->verify_batch);
+ o->use_thread = le32_to_cpu(top->use_thread);
+ o->unlink = le32_to_cpu(top->unlink);
+ o->do_disk_util = le32_to_cpu(top->do_disk_util);
+ o->override_sync = le32_to_cpu(top->override_sync);
+ o->rand_repeatable = le32_to_cpu(top->rand_repeatable);
+ o->use_os_rand = le32_to_cpu(top->use_os_rand);
+ o->write_lat_log = le32_to_cpu(top->write_lat_log);
+ o->write_bw_log = le32_to_cpu(top->write_bw_log);
+ o->write_iops_log = le32_to_cpu(top->write_iops_log);
+ o->log_avg_msec = le32_to_cpu(top->log_avg_msec);
+ o->norandommap = le32_to_cpu(top->norandommap);
+ o->softrandommap = le32_to_cpu(top->softrandommap);
+ o->bs_unaligned = le32_to_cpu(top->bs_unaligned);
+ o->fsync_on_close = le32_to_cpu(top->fsync_on_close);
+ o->hugepage_size = le32_to_cpu(top->hugepage_size);
+ o->rw_min_bs = le32_to_cpu(top->rw_min_bs);
+ o->thinktime = le32_to_cpu(top->thinktime);
+ o->thinktime_spin = le32_to_cpu(top->thinktime_spin);
+ o->thinktime_blocks = le32_to_cpu(top->thinktime_blocks);
+ o->fsync_blocks = le32_to_cpu(top->fsync_blocks);
+ o->fdatasync_blocks = le32_to_cpu(top->fdatasync_blocks);
+ o->barrier_blocks = le32_to_cpu(top->barrier_blocks);
+
+ o->verify_backlog = le64_to_cpu(top->verify_backlog);
+ o->start_delay = le64_to_cpu(top->start_delay);
+ o->timeout = le64_to_cpu(top->timeout);
+ o->ramp_time = le64_to_cpu(top->ramp_time);
+ o->zone_range = le64_to_cpu(top->zone_range);
+ o->zone_size = le64_to_cpu(top->zone_size);
+ o->zone_skip = le64_to_cpu(top->zone_skip);
++ o->offset_increment = le64_to_cpu(top->offset_increment);
+
+ o->overwrite = le32_to_cpu(top->overwrite);
+ o->bw_avg_time = le32_to_cpu(top->bw_avg_time);
+ o->iops_avg_time = le32_to_cpu(top->iops_avg_time);
+ o->loops = le32_to_cpu(top->loops);
+ o->mem_type = le32_to_cpu(top->mem_type);
+ o->mem_align = le32_to_cpu(top->mem_align);
+ o->stonewall = le32_to_cpu(top->stonewall);
+ o->new_group = le32_to_cpu(top->new_group);
+ o->numjobs = le32_to_cpu(top->numjobs);
+ o->cpumask_set = le32_to_cpu(top->cpumask_set);
+ o->verify_cpumask_set = le32_to_cpu(top->verify_cpumask_set);
+ o->iolog = le32_to_cpu(top->iolog);
+ o->rwmixcycle = le32_to_cpu(top->rwmixcycle);
+ o->nice = le32_to_cpu(top->nice);
+ o->file_service_type = le32_to_cpu(top->file_service_type);
+ o->group_reporting = le32_to_cpu(top->group_reporting);
+ o->fadvise_hint = le32_to_cpu(top->fadvise_hint);
+ o->fallocate_mode = le32_to_cpu(top->fallocate_mode);
+ o->zero_buffers = le32_to_cpu(top->zero_buffers);
+ o->refill_buffers = le32_to_cpu(top->refill_buffers);
+ o->scramble_buffers = le32_to_cpu(top->scramble_buffers);
+ o->time_based = le32_to_cpu(top->time_based);
+ o->disable_lat = le32_to_cpu(top->disable_lat);
+ o->disable_clat = le32_to_cpu(top->disable_clat);
+ o->disable_slat = le32_to_cpu(top->disable_slat);
+ o->disable_bw = le32_to_cpu(top->disable_bw);
+ o->gtod_reduce = le32_to_cpu(top->gtod_reduce);
+ o->gtod_cpu = le32_to_cpu(top->gtod_cpu);
+ o->gtod_offload = le32_to_cpu(top->gtod_offload);
+ o->clocksource = le32_to_cpu(top->clocksource);
+ o->no_stall = le32_to_cpu(top->no_stall);
+ o->trim_percentage = le32_to_cpu(top->trim_percentage);
+ o->trim_batch = le32_to_cpu(top->trim_batch);
+ o->trim_zero = le32_to_cpu(top->trim_zero);
+ o->clat_percentiles = le32_to_cpu(top->clat_percentiles);
+ o->overwrite_plist = le32_to_cpu(top->overwrite_plist);
+ o->cpuload = le32_to_cpu(top->cpuload);
+ o->cpucycle = le32_to_cpu(top->cpucycle);
+ o->continue_on_error = le32_to_cpu(top->continue_on_error);
+ o->cgroup_weight = le32_to_cpu(top->cgroup_weight);
+ o->cgroup_nodelete = le32_to_cpu(top->cgroup_nodelete);
+ o->uid = le32_to_cpu(top->uid);
+ o->gid = le32_to_cpu(top->gid);
+ o->flow_id = __le32_to_cpu(top->flow_id);
+ o->flow = __le32_to_cpu(top->flow);
+ o->flow_watermark = __le32_to_cpu(top->flow_watermark);
+ o->flow_sleep = le32_to_cpu(top->flow_sleep);
+ o->sync_file_range = le32_to_cpu(top->sync_file_range);
+ o->compress_percentage = le32_to_cpu(top->compress_percentage);
+ o->compress_chunk = le32_to_cpu(top->compress_chunk);
+
+ o->trim_backlog = le64_to_cpu(top->trim_backlog);
+
+ for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
+ o->percentile_list[i].u.f = fio_uint64_to_double(le64_to_cpu(top->percentile_list[i].u.i));
+#if 0
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+#endif
+}
+
+void convert_thread_options_to_net(struct thread_options_pack *top,
+ struct thread_options *o)
+{
+ int i, j;
+
+ string_to_net(top->description, o->description);
+ string_to_net(top->name, o->name);
+ string_to_net(top->directory, o->directory);
+ string_to_net(top->filename, o->filename);
+ string_to_net(top->opendir, o->opendir);
+ string_to_net(top->ioengine, o->ioengine);
+ string_to_net(top->read_iolog_file, o->read_iolog_file);
+ string_to_net(top->write_iolog_file, o->write_iolog_file);
+ string_to_net(top->bw_log_file, o->bw_log_file);
+ string_to_net(top->lat_log_file, o->lat_log_file);
+ string_to_net(top->iops_log_file, o->iops_log_file);
+ string_to_net(top->replay_redirect, o->replay_redirect);
+ string_to_net(top->exec_prerun, o->exec_prerun);
+ string_to_net(top->exec_postrun, o->exec_postrun);
+ string_to_net(top->ioscheduler, o->ioscheduler);
+ string_to_net(top->profile, o->profile);
+ string_to_net(top->cgroup, o->cgroup);
+
+ top->td_ddir = cpu_to_le32(o->td_ddir);
+ top->rw_seq = cpu_to_le32(o->rw_seq);
+ top->kb_base = cpu_to_le32(o->kb_base);
+ top->ddir_seq_nr = cpu_to_le32(o->ddir_seq_nr);
+ top->iodepth = cpu_to_le32(o->iodepth);
+ top->iodepth_low = cpu_to_le32(o->iodepth_low);
+ top->iodepth_batch = cpu_to_le32(o->iodepth_batch);
+ top->iodepth_batch_complete = cpu_to_le32(o->iodepth_batch_complete);
+ top->size_percent = cpu_to_le32(o->size_percent);
+ top->fill_device = cpu_to_le32(o->fill_device);
+ top->ratecycle = cpu_to_le32(o->ratecycle);
+ top->nr_files = cpu_to_le32(o->nr_files);
+ top->open_files = cpu_to_le32(o->open_files);
+ top->file_lock_mode = cpu_to_le32(o->file_lock_mode);
+ top->lockfile_batch = cpu_to_le32(o->lockfile_batch);
+ top->odirect = cpu_to_le32(o->odirect);
+ top->invalidate_cache = cpu_to_le32(o->invalidate_cache);
+ top->create_serialize = cpu_to_le32(o->create_serialize);
+ top->create_fsync = cpu_to_le32(o->create_fsync);
+ top->create_on_open = cpu_to_le32(o->create_on_open);
+ top->end_fsync = cpu_to_le32(o->end_fsync);
+ top->pre_read = cpu_to_le32(o->pre_read);
+ top->sync_io = cpu_to_le32(o->sync_io);
+ top->verify = cpu_to_le32(o->verify);
+ top->do_verify = cpu_to_le32(o->do_verify);
+ top->verifysort = cpu_to_le32(o->verifysort);
+ top->verify_interval = cpu_to_le32(o->verify_interval);
+ top->verify_offset = cpu_to_le32(o->verify_offset);
+ top->verify_pattern_bytes = cpu_to_le32(o->verify_pattern_bytes);
+ top->verify_fatal = cpu_to_le32(o->verify_fatal);
+ top->verify_dump = cpu_to_le32(o->verify_dump);
+ top->verify_async = cpu_to_le32(o->verify_async);
+ top->verify_batch = cpu_to_le32(o->verify_batch);
+ top->use_thread = cpu_to_le32(o->use_thread);
+ top->unlink = cpu_to_le32(o->unlink);
+ top->do_disk_util = cpu_to_le32(o->do_disk_util);
+ top->override_sync = cpu_to_le32(o->override_sync);
+ top->rand_repeatable = cpu_to_le32(o->rand_repeatable);
+ top->use_os_rand = cpu_to_le32(o->use_os_rand);
+ top->write_lat_log = cpu_to_le32(o->write_lat_log);
+ top->write_bw_log = cpu_to_le32(o->write_bw_log);
+ top->write_iops_log = cpu_to_le32(o->write_iops_log);
+ top->log_avg_msec = cpu_to_le32(o->log_avg_msec);
+ top->norandommap = cpu_to_le32(o->norandommap);
+ top->softrandommap = cpu_to_le32(o->softrandommap);
+ top->bs_unaligned = cpu_to_le32(o->bs_unaligned);
+ top->fsync_on_close = cpu_to_le32(o->fsync_on_close);
+ top->hugepage_size = cpu_to_le32(o->hugepage_size);
+ top->rw_min_bs = cpu_to_le32(o->rw_min_bs);
+ top->thinktime = cpu_to_le32(o->thinktime);
+ top->thinktime_spin = cpu_to_le32(o->thinktime_spin);
+ top->thinktime_blocks = cpu_to_le32(o->thinktime_blocks);
+ top->fsync_blocks = cpu_to_le32(o->fsync_blocks);
+ top->fdatasync_blocks = cpu_to_le32(o->fdatasync_blocks);
+ top->barrier_blocks = cpu_to_le32(o->barrier_blocks);
+ top->overwrite = cpu_to_le32(o->overwrite);
+ top->bw_avg_time = cpu_to_le32(o->bw_avg_time);
+ top->iops_avg_time = cpu_to_le32(o->iops_avg_time);
+ top->loops = cpu_to_le32(o->loops);
+ top->mem_type = cpu_to_le32(o->mem_type);
+ top->mem_align = cpu_to_le32(o->mem_align);
+ top->stonewall = cpu_to_le32(o->stonewall);
+ top->new_group = cpu_to_le32(o->new_group);
+ top->numjobs = cpu_to_le32(o->numjobs);
+ top->cpumask_set = cpu_to_le32(o->cpumask_set);
+ top->verify_cpumask_set = cpu_to_le32(o->verify_cpumask_set);
+ top->iolog = cpu_to_le32(o->iolog);
+ top->rwmixcycle = cpu_to_le32(o->rwmixcycle);
+ top->nice = cpu_to_le32(o->nice);
+ top->file_service_type = cpu_to_le32(o->file_service_type);
+ top->group_reporting = cpu_to_le32(o->group_reporting);
+ top->fadvise_hint = cpu_to_le32(o->fadvise_hint);
+ top->fallocate_mode = cpu_to_le32(o->fallocate_mode);
+ top->zero_buffers = cpu_to_le32(o->zero_buffers);
+ top->refill_buffers = cpu_to_le32(o->refill_buffers);
+ top->scramble_buffers = cpu_to_le32(o->scramble_buffers);
+ top->time_based = cpu_to_le32(o->time_based);
+ top->disable_lat = cpu_to_le32(o->disable_lat);
+ top->disable_clat = cpu_to_le32(o->disable_clat);
+ top->disable_slat = cpu_to_le32(o->disable_slat);
+ top->disable_bw = cpu_to_le32(o->disable_bw);
+ top->gtod_reduce = cpu_to_le32(o->gtod_reduce);
+ top->gtod_cpu = cpu_to_le32(o->gtod_cpu);
+ top->gtod_offload = cpu_to_le32(o->gtod_offload);
+ top->clocksource = cpu_to_le32(o->clocksource);
+ top->no_stall = cpu_to_le32(o->no_stall);
+ top->trim_percentage = cpu_to_le32(o->trim_percentage);
+ top->trim_batch = cpu_to_le32(o->trim_batch);
+ top->trim_zero = cpu_to_le32(o->trim_zero);
+ top->clat_percentiles = cpu_to_le32(o->clat_percentiles);
+ top->overwrite_plist = cpu_to_le32(o->overwrite_plist);
+ top->cpuload = cpu_to_le32(o->cpuload);
+ top->cpucycle = cpu_to_le32(o->cpucycle);
+ top->continue_on_error = cpu_to_le32(o->continue_on_error);
+ top->cgroup_weight = cpu_to_le32(o->cgroup_weight);
+ top->cgroup_nodelete = cpu_to_le32(o->cgroup_nodelete);
+ top->uid = cpu_to_le32(o->uid);
+ top->gid = cpu_to_le32(o->gid);
+ top->flow_id = __cpu_to_le32(o->flow_id);
+ top->flow = __cpu_to_le32(o->flow);
+ top->flow_watermark = __cpu_to_le32(o->flow_watermark);
+ top->flow_sleep = cpu_to_le32(o->flow_sleep);
+ top->sync_file_range = cpu_to_le32(o->sync_file_range);
+ top->compress_percentage = cpu_to_le32(o->compress_percentage);
+ top->compress_chunk = cpu_to_le32(o->compress_chunk);
+
+ for (i = 0; i < 2; i++) {
+ top->bs[i] = cpu_to_le32(o->bs[i]);
+ top->ba[i] = cpu_to_le32(o->ba[i]);
+ top->min_bs[i] = cpu_to_le32(o->min_bs[i]);
+ top->max_bs[i] = cpu_to_le32(o->max_bs[i]);
+ top->bssplit_nr[i] = cpu_to_le32(o->bssplit_nr[i]);
+
+ if (o->bssplit_nr[i]) {
+ unsigned int bssplit_nr = o->bssplit_nr[i];
+
+ if (bssplit_nr > BSSPLIT_MAX) {
+ log_err("fio: BSSPLIT_MAX is too small\n");
+ bssplit_nr = BSSPLIT_MAX;
+ }
+ for (j = 0; j < bssplit_nr; j++) {
+ top->bssplit[i][j].bs = cpu_to_le32(o->bssplit[i][j].bs);
+ top->bssplit[i][j].perc = cpu_to_le32(o->bssplit[i][j].perc);
+ }
+ }
+
+ top->rwmix[i] = cpu_to_le32(o->rwmix[i]);
+ top->rate[i] = cpu_to_le32(o->rate[i]);
+ top->ratemin[i] = cpu_to_le32(o->ratemin[i]);
+ top->rate_iops[i] = cpu_to_le32(o->rate_iops[i]);
+ top->rate_iops_min[i] = cpu_to_le32(o->rate_iops_min[i]);
+ }
+
+ memcpy(top->verify_pattern, o->verify_pattern, MAX_PATTERN_SIZE);
+
+ top->size = __cpu_to_le64(o->size);
+ top->verify_backlog = __cpu_to_le64(o->verify_backlog);
+ top->start_delay = __cpu_to_le64(o->start_delay);
+ top->timeout = __cpu_to_le64(o->timeout);
+ top->ramp_time = __cpu_to_le64(o->ramp_time);
+ top->zone_range = __cpu_to_le64(o->zone_range);
+ top->zone_size = __cpu_to_le64(o->zone_size);
+ top->zone_skip = __cpu_to_le64(o->zone_skip);
+ top->ddir_seq_add = __cpu_to_le64(o->ddir_seq_add);
+ top->file_size_low = __cpu_to_le64(o->file_size_low);
+ top->file_size_high = __cpu_to_le64(o->file_size_high);
+ top->start_offset = __cpu_to_le64(o->start_offset);
+ top->trim_backlog = __cpu_to_le64(o->trim_backlog);
++ top->offset_increment = __cpu_to_le64(o->offset_increment);
+
+ for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
+ top->percentile_list[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->percentile_list[i].u.f));
+#if 0
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+#endif
+
+}
+
+/*
+ * Basic conversion test. We'd really need to fill in more of the options
+ * to have a thorough test. Even better, we should auto-generate the
+ * converter functions...
+ */
+int fio_test_cconv(struct thread_options *__o)
+{
+ struct thread_options o;
+ struct thread_options_pack top1, top2;
+
+ memset(&top1, 0, sizeof(top1));
+ memset(&top2, 0, sizeof(top2));
+
+ convert_thread_options_to_net(&top1, __o);
+ memset(&o, 0, sizeof(o));
+ convert_thread_options_to_cpu(&o, &top1);
+ convert_thread_options_to_net(&top2, &o);
+
+ return memcmp(&top1, &top2, sizeof(top1));
+}
qsort(bssplit, td->o.bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
td->o.bssplit[ddir] = bssplit;
return 0;
-
}
static int str_bssplit_cb(void *data, const char *input)
return 0;
}
+/*
+ * Option grouping
+ */
+static struct opt_group fio_opt_groups[] = {
+ {
+ .name = "Description",
+ .mask = FIO_OPT_G_DESC,
+ },
+ {
+ .name = "File",
+ .mask = FIO_OPT_G_FILE,
+ },
+ {
+ .name = "Misc",
+ .mask = FIO_OPT_G_MISC,
+ },
+ {
+ .name = "IO (main)",
+ .mask = FIO_OPT_G_IO,
+ },
+ {
+ .name = "IO direction",
+ .mask = FIO_OPT_G_IO_DDIR,
+ },
+ {
+ .name = "IO buffer",
+ .mask = FIO_OPT_G_IO_BUF,
+ },
+ {
+ .name = "Random",
+ .mask = FIO_OPT_G_RAND,
+ },
+ {
+ .name = "OS",
+ .mask = FIO_OPT_G_OS,
+ },
+ {
+ .name = "Memory",
+ .mask = FIO_OPT_G_MEM,
+ },
+ {
+ .name = "Verify",
+ .mask = FIO_OPT_G_VERIFY,
+ },
+ {
+ .name = "CPU",
+ .mask = FIO_OPT_G_CPU,
+ },
+ {
+ .name = "Log",
+ .mask = FIO_OPT_G_LOG,
+ },
+ {
+ .name = "Zone",
+ .mask = FIO_OPT_G_ZONE,
+ },
+ {
+ .name = "Cache",
+ .mask = FIO_OPT_G_CACHE,
+ },
+ {
+ .name = "Stat",
+ .mask = FIO_OPT_G_STAT,
+ },
+ {
+ .name = "Error",
+ .mask = FIO_OPT_G_ERR,
+ },
+ {
+ .name = "Job",
+ .mask = FIO_OPT_G_JOB,
+ },
+ {
+ .name = NULL,
+ },
+};
+
+struct opt_group *opt_group_from_mask(unsigned int *mask)
+{
+ struct opt_group *og;
+ int i;
+
+ if (*mask == FIO_OPT_G_INVALID)
+ return NULL;
+
+ for (i = 0; fio_opt_groups[i].name; i++) {
+ og = &fio_opt_groups[i];
+
+ if (*mask & og->mask) {
+ *mask &= ~(og->mask);
+ return og;
+ }
+ }
+
+ return NULL;
+}
+
/*
* Map of job/command line options
*/
-static struct fio_option options[FIO_MAX_OPTS] = {
+struct fio_option fio_options[FIO_MAX_OPTS] = {
{
.name = "description",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(description),
.help = "Text job description",
+ .category = FIO_OPT_G_DESC,
},
{
.name = "name",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(name),
.help = "Name of this job",
+ .category = FIO_OPT_G_DESC,
},
{
.name = "directory",
.off1 = td_var_offset(directory),
.cb = str_directory_cb,
.help = "Directory to store files in",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "filename",
.cb = str_filename_cb,
.prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "kb_base",
.prio = 1,
.def = "1024",
.help = "How many bytes per KB for reporting (1000 or 1024)",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "lockfile",
.help = "Lock file when doing IO to it",
.parent = "filename",
.def = "none",
+ .category = FIO_OPT_G_FILE,
.posval = {
{ .ival = "none",
.oval = FILE_LOCK_NONE,
.off1 = td_var_offset(opendir),
.cb = str_opendir_cb,
.help = "Recursively add files from this directory and down",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "rw",
.help = "IO direction",
.def = "read",
.verify = rw_verify,
+ .category = FIO_OPT_G_IO_DDIR,
.posval = {
{ .ival = "read",
.oval = TD_DDIR_READ,
.off1 = td_var_offset(rw_seq),
.help = "IO offset generator modifier",
.def = "sequential",
+ .category = FIO_OPT_G_IO_DDIR,
.posval = {
{ .ival = "sequential",
.oval = RW_SEQ_SEQ,
.off1 = td_var_offset(ioengine),
.help = "IO engine to use",
.def = FIO_PREFERRED_ENGINE,
+ .category = FIO_OPT_G_IO,
.posval = {
{ .ival = "sync",
.help = "Use read/write",
.help = "Number of IO buffers to keep in flight",
.minval = 1,
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "iodepth_batch",
.parent = "iodepth",
.minval = 1,
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "iodepth_batch_complete",
.parent = "iodepth",
.minval = 0,
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "iodepth_low",
.off1 = td_var_offset(iodepth_low),
.help = "Low water mark for queuing depth",
.parent = "iodepth",
+ .category = FIO_OPT_G_IO,
},
{
.name = "size",
.type = FIO_OPT_STR_VAL,
.cb = str_size_cb,
.help = "Total size of device or files",
+ .category = FIO_OPT_G_IO,
},
{
.name = "fill_device",
.off1 = td_var_offset(fill_device),
.help = "Write until an ENOSPC error occurs",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "filesize",
.off2 = td_var_offset(file_size_high),
.minval = 1,
.help = "Size of individual files",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
},
{
.name = "offset",
.off1 = td_var_offset(start_offset),
.help = "Start IO from this offset",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
+ {
+ .name = "offset_increment",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(offset_increment),
+ .help = "What is the increment from one offset to the next",
+ .parent = "offset",
+ .def = "0",
+ },
{
.name = "bs",
.alias = "blocksize",
.help = "Block size unit",
.def = "4k",
.parent = "rw",
+ .category = FIO_OPT_G_IO,
},
{
.name = "ba",
.minval = 1,
.help = "IO block offset alignment",
.parent = "rw",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_IO_BUF,
},
{
.name = "bsrange",
.minval = 1,
.help = "Set block size range (in more detail than bs)",
.parent = "rw",
+ .category = FIO_OPT_G_IO,
},
{
.name = "bssplit",
.cb = str_bssplit_cb,
.help = "Set a specific mix of block sizes",
.parent = "rw",
+ .category = FIO_OPT_G_IO,
},
{
.name = "bs_unaligned",
.off1 = td_var_offset(bs_unaligned),
.help = "Don't sector align IO buffer sizes",
.parent = "rw",
+ .category = FIO_OPT_G_IO,
},
{
.name = "randrepeat",
.help = "Use repeatable random IO pattern",
.def = "1",
.parent = "rw",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_RAND,
},
{
.name = "use_os_rand",
.help = "Set to use OS random generator",
.def = "0",
.parent = "rw",
+ .category = FIO_OPT_G_RAND,
},
{
.name = "norandommap",
.off1 = td_var_offset(norandommap),
.help = "Accept potential duplicate random blocks",
.parent = "rw",
+ .category = FIO_OPT_G_RAND,
},
{
.name = "softrandommap",
.help = "Set norandommap if randommap allocation fails",
.parent = "norandommap",
.def = "0",
+ .category = FIO_OPT_G_RAND,
},
{
.name = "nrfiles",
.off1 = td_var_offset(nr_files),
.help = "Split job workload between this number of files",
.def = "1",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "openfiles",
.type = FIO_OPT_INT,
.off1 = td_var_offset(open_files),
.help = "Number of files to keep open at the same time",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "file_service_type",
.off1 = td_var_offset(file_service_type),
.help = "How to select which file to service next",
.def = "roundrobin",
+ .category = FIO_OPT_G_FILE,
.posval = {
{ .ival = "random",
.oval = FIO_FSERVICE_RANDOM,
.off1 = td_var_offset(fallocate_mode),
.help = "Whether pre-allocation is performed when laying out files",
.def = "posix",
+ .category = FIO_OPT_G_FILE,
.posval = {
{ .ival = "none",
.oval = FIO_FALLOCATE_NONE,
.off1 = td_var_offset(fadvise_hint),
.help = "Use fadvise() to advise the kernel on IO pattern",
.def = "1",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "fsync",
.off1 = td_var_offset(fsync_blocks),
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "fdatasync",
.off1 = td_var_offset(fdatasync_blocks),
.help = "Issue fdatasync for writes every given number of blocks",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "write_barrier",
.off1 = td_var_offset(barrier_blocks),
.help = "Make every Nth write a barrier write",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
#ifdef FIO_HAVE_SYNC_FILE_RANGE
{
.cb = str_sfr_cb,
.off1 = td_var_offset(sync_file_range),
.help = "Use sync_file_range()",
+ .category = FIO_OPT_G_FILE,
},
#endif
{
.off1 = td_var_offset(odirect),
.help = "Use O_DIRECT IO (negates buffered)",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "buffered",
.neg = 1,
.help = "Use buffered IO (negates direct)",
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "overwrite",
.off1 = td_var_offset(overwrite),
.help = "When writing, set whether to overwrite current data",
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
},
{
.name = "loops",
.off1 = td_var_offset(loops),
.help = "Number of times to run the job",
.def = "1",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "numjobs",
.off1 = td_var_offset(numjobs),
.help = "Duplicate this job this many times",
.def = "1",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "startdelay",
.off1 = td_var_offset(start_delay),
.help = "Only start job when this period has passed",
.def = "0",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "runtime",
.off1 = td_var_offset(timeout),
.help = "Stop workload when this amount of time has passed",
.def = "0",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "time_based",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "ramp_time",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(ramp_time),
.help = "Ramp up time before measuring performance",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "clocksource",
.cb = fio_clock_source_cb,
.off1 = td_var_offset(clocksource),
.help = "What type of timing source to use",
+ .category = FIO_OPT_G_OS,
.posval = {
{ .ival = "gettimeofday",
.oval = CS_GTOD,
.off1 = td_var_offset(mem_type),
.help = "Backing type for IO buffers",
.def = "malloc",
+ .category = FIO_OPT_G_IO_BUF | FIO_OPT_G_MEM,
.posval = {
{ .ival = "malloc",
.oval = MEM_MALLOC,
.help = "IO memory buffer offset alignment",
.def = "0",
.parent = "iomem",
+ .category = FIO_OPT_G_IO_BUF | FIO_OPT_G_MEM,
},
{
.name = "verify",
.help = "Verify data written",
.cb = str_verify_cb,
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
.posval = {
{ .ival = "0",
.oval = VERIFY_NONE,
.help = "Run verification stage after write",
.def = "1",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verifysort",
.help = "Sort written verify blocks for read back",
.def = "1",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_interval",
.minval = 2 * sizeof(struct verify_header),
.help = "Store verify buffer header every N bytes",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_offset",
.def = "0",
.cb = str_verify_offset_cb,
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_pattern",
.cb = str_verify_pattern_cb,
.help = "Fill pattern for IO buffers",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_fatal",
.def = "0",
.help = "Exit on a single verify failure, don't continue",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY | FIO_OPT_G_ERR,
},
{
.name = "verify_dump",
.def = "0",
.help = "Dump contents of good and bad blocks on failure",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY | FIO_OPT_G_ERR,
},
{
.name = "verify_async",
.def = "0",
.help = "Number of async verifier threads to use",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog",
.off1 = td_var_offset(verify_backlog),
.help = "Verify after this number of blocks are written",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog_batch",
.off1 = td_var_offset(verify_batch),
.help = "Verify this number of IO blocks",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.cb = str_verify_cpus_allowed_cb,
.help = "Set CPUs allowed for async verify threads",
.parent = "verify_async",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_CPU | FIO_OPT_G_VERIFY,
},
#endif
#ifdef FIO_HAVE_TRIM
.help = "Number of verify blocks to discard/trim",
.parent = "verify",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "trim_verify_zero",
.off1 = td_var_offset(trim_zero),
.parent = "trim_percentage",
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "trim_backlog",
.off1 = td_var_offset(trim_backlog),
.help = "Trim after this number of blocks are written",
.parent = "trim_percentage",
+ .category = FIO_OPT_G_IO,
},
{
.name = "trim_backlog_batch",
.off1 = td_var_offset(trim_batch),
.help = "Trim this number of IO blocks",
.parent = "trim_percentage",
+ .category = FIO_OPT_G_IO,
},
#endif
{
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(write_iolog_file),
.help = "Store IO pattern to file",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
},
{
.name = "read_iolog",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(read_iolog_file),
.help = "Playback IO pattern from file",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
},
{
.name = "replay_no_stall",
.def = "0",
.parent = "read_iolog",
.help = "Playback IO pattern file as fast as possible without stalls",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
},
{
.name = "replay_redirect",
.off1 = td_var_offset(replay_redirect),
.parent = "read_iolog",
.help = "Replay all I/O onto this device, regardless of trace device",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
},
{
.name = "exec_prerun",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_prerun),
.help = "Execute this file prior to running job",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
{
.name = "exec_postrun",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_postrun),
.help = "Execute this file after running job",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
#ifdef FIO_HAVE_IOSCHED_SWITCH
{
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioscheduler),
.help = "Use this IO scheduler on the backing device",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_IO,
},
#endif
{
.off1 = td_var_offset(zone_size),
.help = "Amount of data to read per zone",
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
},
{
.name = "zonerange",
.off1 = td_var_offset(zone_range),
.help = "Give size of an IO zone",
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
},
{
.name = "zoneskip",
.off1 = td_var_offset(zone_skip),
.help = "Space between IO zones",
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
},
{
.name = "lockmem",
.cb = str_lockmem_cb,
.help = "Lock down this amount of memory",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MEM,
},
{
.name = "rwmixread",
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
+ .category = FIO_OPT_G_IO,
},
{
.name = "rwmixwrite",
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
+ .category = FIO_OPT_G_IO,
},
{
.name = "rwmixcycle",
.type = FIO_OPT_DEPRECATED,
+ .category = FIO_OPT_G_IO,
},
{
.name = "nice",
.minval = -19,
.maxval = 20,
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
},
#ifdef FIO_HAVE_IOPRIO
{
.help = "Set job IO priority value",
.minval = 0,
.maxval = 7,
+ .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
},
{
.name = "prioclass",
.help = "Set job IO priority class",
.minval = 0,
.maxval = 3,
+ .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
},
#endif
{
.off1 = td_var_offset(thinktime),
.help = "Idle time between IO buffers (usec)",
.def = "0",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "thinktime_spin",
.help = "Start think time by spinning this amount (usec)",
.def = "0",
.parent = "thinktime",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "thinktime_blocks",
.help = "IO buffer period between 'thinktime'",
.def = "1",
.parent = "thinktime",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "rate",
.off1 = td_var_offset(rate[0]),
.off2 = td_var_offset(rate[1]),
.help = "Set bandwidth rate",
+ .category = FIO_OPT_G_IO,
},
{
.name = "ratemin",
.off2 = td_var_offset(ratemin[1]),
.help = "Job must meet this rate or it will be shutdown",
.parent = "rate",
+ .category = FIO_OPT_G_IO,
},
{
.name = "rate_iops",
.off1 = td_var_offset(rate_iops[0]),
.off2 = td_var_offset(rate_iops[1]),
.help = "Limit IO used to this number of IO operations/sec",
+ .category = FIO_OPT_G_IO,
},
{
.name = "rate_iops_min",
.off2 = td_var_offset(rate_iops_min[1]),
.help = "Job must meet this rate or it will be shut down",
.parent = "rate_iops",
+ .category = FIO_OPT_G_IO,
},
{
.name = "ratecycle",
.help = "Window average for rate limits (msec)",
.def = "1000",
.parent = "rate",
+ .category = FIO_OPT_G_IO,
},
{
.name = "invalidate",
.off1 = td_var_offset(invalidate_cache),
.help = "Invalidate buffer/page cache prior to running job",
.def = "1",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_CACHE,
},
{
.name = "sync",
.help = "Use O_SYNC for buffered writes",
.def = "0",
.parent = "buffered",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
},
{
.name = "bwavgtime",
" (msec)",
.def = "500",
.parent = "write_bw_log",
+ .category = FIO_OPT_G_LOG | FIO_OPT_G_STAT,
},
{
.name = "iopsavgtime",
.help = "Time window over which to calculate IOPS (msec)",
.def = "500",
.parent = "write_iops_log",
+ .category = FIO_OPT_G_LOG | FIO_OPT_G_STAT,
},
{
.name = "create_serialize",
.off1 = td_var_offset(create_serialize),
.help = "Serialize creating of job files",
.def = "1",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "create_fsync",
.off1 = td_var_offset(create_fsync),
.help = "fsync file after creation",
.def = "1",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "create_on_open",
.off1 = td_var_offset(create_on_open),
.help = "Create files when they are opened for IO",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "pre_read",
.off1 = td_var_offset(pre_read),
.help = "Pre-read files before starting official testing",
.def = "0",
+ .category = FIO_OPT_G_FILE | FIO_OPT_G_CACHE,
},
{
.name = "cpuload",
.type = FIO_OPT_INT,
.off1 = td_var_offset(cpuload),
.help = "Use this percentage of CPU",
+ .category = FIO_OPT_G_CPU,
},
{
.name = "cpuchunks",
.help = "Length of the CPU burn cycles (usecs)",
.def = "50000",
.parent = "cpuload",
+ .category = FIO_OPT_G_CPU,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.type = FIO_OPT_INT,
.cb = str_cpumask_cb,
.help = "CPU affinity mask",
+ .category = FIO_OPT_G_CPU | FIO_OPT_G_OS,
},
{
.name = "cpus_allowed",
.type = FIO_OPT_STR,
.cb = str_cpus_allowed_cb,
.help = "Set CPUs allowed",
+ .category = FIO_OPT_G_CPU | FIO_OPT_G_OS,
},
#endif
{
.off1 = td_var_offset(end_fsync),
.help = "Include fsync at the end of job",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "fsync_on_close",
.off1 = td_var_offset(fsync_on_close),
.help = "fsync files on close",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "unlink",
.off1 = td_var_offset(unlink),
.help = "Unlink created files after job has completed",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "exitall",
.type = FIO_OPT_STR_SET,
.cb = str_exitall_cb,
.help = "Terminate all jobs when one exits",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
},
{
.name = "stonewall",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(stonewall),
.help = "Insert a hard barrier between this job and previous",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
},
{
.name = "new_group",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(new_group),
.help = "Mark the start of a new group (for reporting)",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
},
{
.name = "thread",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(use_thread),
.help = "Use threads instead of forks",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS | FIO_OPT_G_JOB,
},
{
.name = "write_bw_log",
.off1 = td_var_offset(write_bw_log),
.cb = str_write_bw_log_cb,
.help = "Write log of bandwidth during run",
+ .category = FIO_OPT_G_LOG,
},
{
.name = "write_lat_log",
.off1 = td_var_offset(write_lat_log),
.cb = str_write_lat_log_cb,
.help = "Write log of latency during run",
+ .category = FIO_OPT_G_LOG,
},
{
.name = "write_iops_log",
.off1 = td_var_offset(write_iops_log),
.cb = str_write_iops_log_cb,
.help = "Write log of IOPS during run",
+ .category = FIO_OPT_G_LOG,
},
{
.name = "log_avg_msec",
.off1 = td_var_offset(log_avg_msec),
.help = "Average bw/iops/lat logs over this period of time",
.def = "0",
+ .category = FIO_OPT_G_LOG,
},
{
.name = "hugepage-size",
.off1 = td_var_offset(hugepage_size),
.help = "When using hugepages, specify size of each page",
.def = __fio_stringify(FIO_HUGE_PAGE),
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MEM,
},
{
.name = "group_reporting",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(group_reporting),
.help = "Do reporting on a per-group basis",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "zero_buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(zero_buffers),
.help = "Init IO buffers to all zeroes",
+ .category = FIO_OPT_G_IO_BUF,
},
{
.name = "refill_buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(refill_buffers),
.help = "Refill IO buffers on every IO submit",
+ .category = FIO_OPT_G_IO_BUF,
},
{
.name = "scramble_buffers",
.off1 = td_var_offset(scramble_buffers),
.help = "Slightly scramble buffers on every IO submit",
.def = "1",
+ .category = FIO_OPT_G_IO_BUF,
},
{
.name = "buffer_compress_percentage",
.off1 = td_var_offset(clat_percentiles),
.help = "Enable the reporting of completion latency percentiles",
.def = "1",
+ .category = FIO_OPT_G_STAT,
},
{
.name = "percentile_list",
.maxlen = FIO_IO_U_LIST_MAX_LEN,
.minfp = 0.0,
.maxfp = 100.0,
+ .category = FIO_OPT_G_STAT,
},
#ifdef FIO_HAVE_DISK_UTIL
.off1 = td_var_offset(do_disk_util),
.help = "Log disk utilization statistics",
.def = "1",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_STAT,
},
#endif
{
.help = "Greatly reduce number of gettimeofday() calls",
.cb = str_gtod_reduce_cb,
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "disable_lat",
.help = "Disable latency numbers",
.parent = "gtod_reduce",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "disable_clat",
.help = "Disable completion latency numbers",
.parent = "gtod_reduce",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "disable_slat",
.help = "Disable submission latency numbers",
.parent = "gtod_reduce",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "disable_bw_measurement",
.help = "Disable bandwidth logging",
.parent = "gtod_reduce",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "gtod_cpu",
.cb = str_gtod_cpu_cb,
.help = "Set up dedicated gettimeofday() thread on this CPU",
.verify = gtod_cpu_verify,
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "continue_on_error",
.off1 = td_var_offset(continue_on_error),
.help = "Continue on non-fatal errors during IO",
.def = "none",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_ERR,
.posval = {
{ .ival = "none",
.oval = ERROR_TYPE_NONE,
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(profile),
.help = "Select a specific builtin performance test",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
},
{
.name = "cgroup",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(cgroup),
.help = "Add job to cgroup of this name",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
{
.name = "cgroup_weight",
.help = "Use given weight for cgroup",
.minval = 100,
.maxval = 1000,
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
{
.name = "cgroup_nodelete",
.off1 = td_var_offset(cgroup_nodelete),
.help = "Do not delete cgroups after job completion",
.def = "0",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
{
.name = "uid",
.type = FIO_OPT_INT,
.off1 = td_var_offset(uid),
.help = "Run job with this user ID",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_JOB,
},
{
.name = "gid",
.type = FIO_OPT_INT,
.off1 = td_var_offset(gid),
.help = "Run job with this group ID",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_JOB,
},
{
.name = "flow_id",
.off1 = td_var_offset(flow_id),
.help = "The flow index ID to use",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "flow",
.help = "Weight for flow control of this job",
.parent = "flow_id",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "flow_watermark",
" with non-zero flow.",
.parent = "flow_id",
.def = "1024",
+ .category = FIO_OPT_G_IO,
},
{
.name = "flow_sleep",
" back by the flow control mechanism",
.parent = "flow_id",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = NULL,
{
unsigned int i;
- options_init(options);
+ options_init(fio_options);
i = 0;
while (long_options[i].name)
i++;
- options_to_lopts(options, long_options, i, FIO_GETOPT_JOB);
+ options_to_lopts(fio_options, long_options, i, FIO_GETOPT_JOB);
}
struct fio_keyword {
int i, ret, unknown;
char **opts_copy;
- sort_options(opts, options, num_opts);
+ sort_options(opts, fio_options, num_opts);
opts_copy = dup_and_sub_options(opts, num_opts);
for (ret = 0, i = 0, unknown = 0; i < num_opts; i++) {
struct fio_option *o;
- int newret = parse_option(opts_copy[i], opts[i], options, &o,
- td);
+ int newret = parse_option(opts_copy[i], opts[i], fio_options,
+ &o, td);
if (opts_copy[i]) {
if (newret && !o) {
int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)
{
- return parse_cmd_option(opt, val, options, td);
+ return parse_cmd_option(opt, val, fio_options, td);
}
int fio_cmd_ioengine_option_parse(struct thread_data *td, const char *opt,
void fio_fill_default_options(struct thread_data *td)
{
- fill_default_options(td, options);
+ fill_default_options(td, fio_options);
}
int fio_show_option_help(const char *opt)
{
- return show_cmd_help(options, opt);
+ return show_cmd_help(fio_options, opt);
}
void options_mem_dupe(void *data, struct fio_option *options)
*/
void fio_options_mem_dupe(struct thread_data *td)
{
- options_mem_dupe(&td->o, options);
+ options_mem_dupe(&td->o, fio_options);
if (td->eo && td->io_ops) {
void *oldeo = td->eo;
struct fio_option *__o;
int opt_index = 0;
- __o = options;
+ __o = fio_options;
while (__o->name) {
opt_index++;
__o++;
}
- memcpy(&options[opt_index], o, sizeof(*o));
+ memcpy(&fio_options[opt_index], o, sizeof(*o));
return 0;
}
{
struct fio_option *o;
- o = options;
+ o = fio_options;
while (o->name) {
if (o->prof_name && !strcmp(o->prof_name, prof_name)) {
o->type = FIO_OPT_INVALID;
struct fio_option *o;
unsigned int i;
- o = find_option(options, optname);
+ o = find_option(fio_options, optname);
if (!o)
return;
struct fio_option *o;
unsigned int i;
- o = find_option(options, optname);
+ o = find_option(fio_options, optname);
if (!o)
return;
void fio_options_free(struct thread_data *td)
{
- options_free(options, td);
+ options_free(fio_options, td);
if (td->eo && td->io_ops && td->io_ops->options) {
options_free(td->io_ops->options, td->eo);
free(td->eo);
--- /dev/null
+#ifndef FIO_THREAD_OPTIONS_H
+#define FIO_THREAD_OPTIONS_H
+
+#include "arch/arch.h"
+#include "os/os.h"
+#include "stat.h"
+#include "gettime.h"
+
+/*
+ * What type of allocation to use for io buffers
+ */
+enum fio_memtype {
+ MEM_MALLOC = 0, /* ordinary malloc */
+ MEM_SHM, /* use shared memory segments */
+ MEM_SHMHUGE, /* use shared memory segments with huge pages */
+ MEM_MMAP, /* use anonynomous mmap */
+ MEM_MMAPHUGE, /* memory mapped huge file */
+};
+
+/*
+ * What type of errors to continue on when continue_on_error is used
+ */
+enum error_type {
+ ERROR_TYPE_NONE = 0,
+ ERROR_TYPE_READ = 1 << 0,
+ ERROR_TYPE_WRITE = 1 << 1,
+ ERROR_TYPE_VERIFY = 1 << 2,
+ ERROR_TYPE_ANY = 0xffff,
+};
+
+#define BSSPLIT_MAX 64
+
+struct bssplit {
+ uint32_t bs;
+ uint32_t perc;
+};
+
+struct thread_options {
+ int pad;
+ char *description;
+ char *name;
+ char *directory;
+ char *filename;
+ char *opendir;
+ char *ioengine;
+ enum td_ddir td_ddir;
+ unsigned int rw_seq;
+ unsigned int kb_base;
+ unsigned int ddir_seq_nr;
+ long ddir_seq_add;
+ unsigned int iodepth;
+ unsigned int iodepth_low;
+ unsigned int iodepth_batch;
+ unsigned int iodepth_batch_complete;
+
+ unsigned long long size;
+ unsigned int size_percent;
+ unsigned int fill_device;
+ unsigned long long file_size_low;
+ unsigned long long file_size_high;
+ unsigned long long start_offset;
+
+ unsigned int bs[2];
+ unsigned int ba[2];
+ unsigned int min_bs[2];
+ unsigned int max_bs[2];
+ struct bssplit *bssplit[2];
+ unsigned int bssplit_nr[2];
+
+ unsigned int nr_files;
+ unsigned int open_files;
+ enum file_lock_mode file_lock_mode;
+ unsigned int lockfile_batch;
+
+ unsigned int odirect;
+ unsigned int invalidate_cache;
+ unsigned int create_serialize;
+ unsigned int create_fsync;
+ unsigned int create_on_open;
+ unsigned int end_fsync;
+ unsigned int pre_read;
+ unsigned int sync_io;
+ unsigned int verify;
+ unsigned int do_verify;
+ unsigned int verifysort;
+ unsigned int verify_interval;
+ unsigned int verify_offset;
+ char verify_pattern[MAX_PATTERN_SIZE];
+ unsigned int verify_pattern_bytes;
+ unsigned int verify_fatal;
+ unsigned int verify_dump;
+ unsigned int verify_async;
+ unsigned long long verify_backlog;
+ unsigned int verify_batch;
+ unsigned int use_thread;
+ unsigned int unlink;
+ unsigned int do_disk_util;
+ unsigned int override_sync;
+ unsigned int rand_repeatable;
+ unsigned int use_os_rand;
+ unsigned int write_lat_log;
+ unsigned int write_bw_log;
+ unsigned int write_iops_log;
+ unsigned int log_avg_msec;
+ unsigned int norandommap;
+ unsigned int softrandommap;
+ unsigned int bs_unaligned;
+ unsigned int fsync_on_close;
+
+ unsigned int hugepage_size;
+ unsigned int rw_min_bs;
+ unsigned int thinktime;
+ unsigned int thinktime_spin;
+ unsigned int thinktime_blocks;
+ unsigned int fsync_blocks;
+ unsigned int fdatasync_blocks;
+ unsigned int barrier_blocks;
+ unsigned long long start_delay;
+ unsigned long long timeout;
+ unsigned long long ramp_time;
+ unsigned int overwrite;
+ unsigned int bw_avg_time;
+ unsigned int iops_avg_time;
+ unsigned int loops;
+ unsigned long long zone_range;
+ unsigned long long zone_size;
+ unsigned long long zone_skip;
+ enum fio_memtype mem_type;
+ unsigned int mem_align;
+
+ unsigned int stonewall;
+ unsigned int new_group;
+ unsigned int numjobs;
+ os_cpu_mask_t cpumask;
+ unsigned int cpumask_set;
+ os_cpu_mask_t verify_cpumask;
+ unsigned int verify_cpumask_set;
+ unsigned int iolog;
+ unsigned int rwmixcycle;
+ unsigned int rwmix[2];
+ unsigned int nice;
+ unsigned int file_service_type;
+ unsigned int group_reporting;
+ unsigned int fadvise_hint;
+ enum fio_fallocate_mode fallocate_mode;
+ unsigned int zero_buffers;
+ unsigned int refill_buffers;
+ unsigned int scramble_buffers;
+ unsigned int compress_percentage;
+ unsigned int compress_chunk;
+ unsigned int time_based;
+ unsigned int disable_lat;
+ unsigned int disable_clat;
+ unsigned int disable_slat;
+ unsigned int disable_bw;
+ unsigned int gtod_reduce;
+ unsigned int gtod_cpu;
+ unsigned int gtod_offload;
+ enum fio_cs clocksource;
+ unsigned int no_stall;
+ unsigned int trim_percentage;
+ unsigned int trim_batch;
+ unsigned int trim_zero;
+ unsigned long long trim_backlog;
+ unsigned int clat_percentiles;
+ unsigned int overwrite_plist;
+ fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
+
+ char *read_iolog_file;
+ char *write_iolog_file;
+ char *bw_log_file;
+ char *lat_log_file;
+ char *iops_log_file;
+ char *replay_redirect;
+
+ /*
+ * Pre-run and post-run shell
+ */
+ char *exec_prerun;
+ char *exec_postrun;
+
+ unsigned int rate[2];
+ unsigned int ratemin[2];
+ unsigned int ratecycle;
+ unsigned int rate_iops[2];
+ unsigned int rate_iops_min[2];
+
+ char *ioscheduler;
+
+ /*
+ * CPU "io" cycle burner
+ */
+ unsigned int cpuload;
+ unsigned int cpucycle;
+
+ /*
+ * I/O Error handling
+ */
+ enum error_type continue_on_error;
+
+ /*
+ * Benchmark profile type
+ */
+ char *profile;
+
+ /*
+ * blkio cgroup support
+ */
+ char *cgroup;
+ unsigned int cgroup_weight;
+ unsigned int cgroup_nodelete;
+
+ unsigned int uid;
+ unsigned int gid;
+
+ int flow_id;
+ int flow;
+ int flow_watermark;
+ unsigned int flow_sleep;
+
++ unsigned long long offset_increment;
++
+ unsigned int sync_file_range;
+};
+
+#define FIO_TOP_STR_MAX 256
+
+struct thread_options_pack {
+ uint8_t description[FIO_TOP_STR_MAX];
+ uint8_t name[FIO_TOP_STR_MAX];
+ uint8_t directory[FIO_TOP_STR_MAX];
+ uint8_t filename[FIO_TOP_STR_MAX];
+ uint8_t opendir[FIO_TOP_STR_MAX];
+ uint8_t ioengine[FIO_TOP_STR_MAX];
+ uint32_t td_ddir;
+ uint32_t rw_seq;
+ uint32_t kb_base;
+ uint32_t ddir_seq_nr;
+ uint64_t ddir_seq_add;
+ uint32_t iodepth;
+ uint32_t iodepth_low;
+ uint32_t iodepth_batch;
+ uint32_t iodepth_batch_complete;
+
+ uint64_t size;
+ uint32_t size_percent;
+ uint32_t fill_device;
+ uint64_t file_size_low;
+ uint64_t file_size_high;
+ uint64_t start_offset;
+
+ uint32_t bs[2];
+ uint32_t ba[2];
+ uint32_t min_bs[2];
+ uint32_t max_bs[2];
+ struct bssplit bssplit[2][BSSPLIT_MAX];
+ uint32_t bssplit_nr[2];
+
+ uint32_t nr_files;
+ uint32_t open_files;
+ uint32_t file_lock_mode;
+ uint32_t lockfile_batch;
+
+ uint32_t odirect;
+ uint32_t invalidate_cache;
+ uint32_t create_serialize;
+ uint32_t create_fsync;
+ uint32_t create_on_open;
+ uint32_t end_fsync;
+ uint32_t pre_read;
+ uint32_t sync_io;
+ uint32_t verify;
+ uint32_t do_verify;
+ uint32_t verifysort;
+ uint32_t verify_interval;
+ uint32_t verify_offset;
+ uint8_t verify_pattern[MAX_PATTERN_SIZE];
+ uint32_t verify_pattern_bytes;
+ uint32_t verify_fatal;
+ uint32_t verify_dump;
+ uint32_t verify_async;
+ uint64_t verify_backlog;
+ uint32_t verify_batch;
+ uint32_t use_thread;
+ uint32_t unlink;
+ uint32_t do_disk_util;
+ uint32_t override_sync;
+ uint32_t rand_repeatable;
+ uint32_t use_os_rand;
+ uint32_t write_lat_log;
+ uint32_t write_bw_log;
+ uint32_t write_iops_log;
+ uint32_t log_avg_msec;
+ uint32_t norandommap;
+ uint32_t softrandommap;
+ uint32_t bs_unaligned;
+ uint32_t fsync_on_close;
+
+ uint32_t hugepage_size;
+ uint32_t rw_min_bs;
+ uint32_t thinktime;
+ uint32_t thinktime_spin;
+ uint32_t thinktime_blocks;
+ uint32_t fsync_blocks;
+ uint32_t fdatasync_blocks;
+ uint32_t barrier_blocks;
+ uint64_t start_delay;
+ uint64_t timeout;
+ uint64_t ramp_time;
+ uint32_t overwrite;
+ uint32_t bw_avg_time;
+ uint32_t iops_avg_time;
+ uint32_t loops;
+ uint64_t zone_range;
+ uint64_t zone_size;
+ uint64_t zone_skip;
+ uint32_t mem_type;
+ uint32_t mem_align;
+
+ uint32_t stonewall;
+ uint32_t new_group;
+ uint32_t numjobs;
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint32_t cpumask_set;
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+ uint32_t verify_cpumask_set;
+ uint32_t iolog;
+ uint32_t rwmixcycle;
+ uint32_t rwmix[2];
+ uint32_t nice;
+ uint32_t file_service_type;
+ uint32_t group_reporting;
+ uint32_t fadvise_hint;
+ uint32_t fallocate_mode;
+ uint32_t zero_buffers;
+ uint32_t refill_buffers;
+ uint32_t scramble_buffers;
+ unsigned int compress_percentage;
+ unsigned int compress_chunk;
+ uint32_t time_based;
+ uint32_t disable_lat;
+ uint32_t disable_clat;
+ uint32_t disable_slat;
+ uint32_t disable_bw;
+ uint32_t gtod_reduce;
+ uint32_t gtod_cpu;
+ uint32_t gtod_offload;
+ uint32_t clocksource;
+ uint32_t no_stall;
+ uint32_t trim_percentage;
+ uint32_t trim_batch;
+ uint32_t trim_zero;
+ uint64_t trim_backlog;
+ uint32_t clat_percentiles;
+ uint32_t overwrite_plist;
+ fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
+
+ uint8_t read_iolog_file[FIO_TOP_STR_MAX];
+ uint8_t write_iolog_file[FIO_TOP_STR_MAX];
+ uint8_t bw_log_file[FIO_TOP_STR_MAX];
+ uint8_t lat_log_file[FIO_TOP_STR_MAX];
+ uint8_t iops_log_file[FIO_TOP_STR_MAX];
+ uint8_t replay_redirect[FIO_TOP_STR_MAX];
+
+ /*
+ * Pre-run and post-run shell
+ */
+ uint8_t exec_prerun[FIO_TOP_STR_MAX];
+ uint8_t exec_postrun[FIO_TOP_STR_MAX];
+
+ uint32_t rate[2];
+ uint32_t ratemin[2];
+ uint32_t ratecycle;
+ uint32_t rate_iops[2];
+ uint32_t rate_iops_min[2];
+
+ uint8_t ioscheduler[FIO_TOP_STR_MAX];
+
+ /*
+ * CPU "io" cycle burner
+ */
+ uint32_t cpuload;
+ uint32_t cpucycle;
+
+ /*
+ * I/O Error handling
+ */
+ uint32_t continue_on_error;
+
+ /*
+ * Benchmark profile type
+ */
+ uint8_t profile[FIO_TOP_STR_MAX];
+
+ /*
+ * blkio cgroup support
+ */
+ uint8_t cgroup[FIO_TOP_STR_MAX];
+ uint32_t cgroup_weight;
+ uint32_t cgroup_nodelete;
+
+ uint32_t uid;
+ uint32_t gid;
+
+ int32_t flow_id;
+ int32_t flow;
+ int32_t flow_watermark;
+ uint32_t flow_sleep;
+
++ uint64_t offset_increment;
++
+ uint32_t sync_file_range;
+} __attribute__((packed));
+
+extern void convert_thread_options_to_cpu(struct thread_options *o, struct thread_options_pack *top);
+extern void convert_thread_options_to_net(struct thread_options_pack *top, struct thread_options *);
+extern int fio_test_cconv(struct thread_options *);
+
+#endif