--- /dev/null
+#include <string.h>
+
+#include "thread_options.h"
+
+static void string_to_cpu(char **dst, const uint8_t *src)
+{
+ const char *__src = (const char *) src;
+
+ if (strlen(__src))
+ *dst = strdup(__src);
+}
+
+static void string_to_net(uint8_t *dst, const char *src)
+{
+ if (src)
+ strcpy((char *) dst, src);
+ else
+ dst[0] = '\0';
+}
+
+void convert_thread_options_to_cpu(struct thread_options *o,
+ struct thread_options_pack *top)
+{
+ int i, j;
+
+ string_to_cpu(&o->description, top->description);
+ string_to_cpu(&o->name, top->name);
+ string_to_cpu(&o->directory, top->directory);
+ string_to_cpu(&o->filename, top->filename);
+ string_to_cpu(&o->opendir, top->opendir);
+ string_to_cpu(&o->ioengine, top->ioengine);
+ string_to_cpu(&o->read_iolog_file, top->read_iolog_file);
+ string_to_cpu(&o->write_iolog_file, top->write_iolog_file);
+ string_to_cpu(&o->bw_log_file, top->bw_log_file);
+ string_to_cpu(&o->lat_log_file, top->lat_log_file);
+ string_to_cpu(&o->iops_log_file, top->iops_log_file);
+ string_to_cpu(&o->replay_redirect, top->replay_redirect);
+ string_to_cpu(&o->exec_prerun, top->exec_prerun);
+ string_to_cpu(&o->exec_postrun, top->exec_postrun);
+ string_to_cpu(&o->ioscheduler, top->ioscheduler);
+ string_to_cpu(&o->profile, top->profile);
+ string_to_cpu(&o->cgroup, top->cgroup);
+
+ o->td_ddir = le32_to_cpu(top->td_ddir);
+ o->rw_seq = le32_to_cpu(top->rw_seq);
+ o->kb_base = le32_to_cpu(top->kb_base);
+ o->ddir_seq_nr = le32_to_cpu(top->ddir_seq_nr);
+ o->ddir_seq_add = le64_to_cpu(top->ddir_seq_add);
+ o->iodepth = le32_to_cpu(top->iodepth);
+ o->iodepth_low = le32_to_cpu(top->iodepth_low);
+ o->iodepth_batch = le32_to_cpu(top->iodepth_batch);
+ o->iodepth_batch_complete = le32_to_cpu(top->iodepth_batch_complete);
+ o->size = le64_to_cpu(top->size);
+ o->size_percent = le32_to_cpu(top->size_percent);
+ o->fill_device = le32_to_cpu(top->fill_device);
+ o->file_size_low = le64_to_cpu(top->file_size_low);
+ o->file_size_high = le64_to_cpu(top->file_size_high);
+ o->start_offset = le64_to_cpu(top->start_offset);
+
+ for (i = 0; i < 2; i++) {
+ o->bs[i] = le32_to_cpu(top->bs[i]);
+ o->ba[i] = le32_to_cpu(top->ba[i]);
+ o->min_bs[i] = le32_to_cpu(top->min_bs[i]);
+ o->max_bs[i] = le32_to_cpu(top->max_bs[i]);
+ o->bssplit_nr[i] = le32_to_cpu(top->bssplit_nr[i]);
+
+ if (o->bssplit_nr[i]) {
+ o->bssplit[i] = malloc(o->bssplit_nr[i] * sizeof(struct bssplit));
+ for (j = 0; j < o->bssplit_nr[i]; j++) {
+ o->bssplit[i][j].bs = le32_to_cpu(top->bssplit[i][j].bs);
+ o->bssplit[i][j].perc = le32_to_cpu(top->bssplit[i][j].perc);
+ }
+ }
+
+ o->rwmix[i] = le32_to_cpu(top->rwmix[i]);
+ o->rate[i] = le32_to_cpu(top->rate[i]);
+ o->ratemin[i] = le32_to_cpu(top->ratemin[i]);
+ o->rate_iops[i] = le32_to_cpu(top->rate_iops[i]);
+ o->rate_iops_min[i] = le32_to_cpu(top->rate_iops_min[i]);
+ }
+
+ o->ratecycle = le32_to_cpu(top->ratecycle);
+ o->nr_files = le32_to_cpu(top->nr_files);
+ o->open_files = le32_to_cpu(top->open_files);
+ o->file_lock_mode = le32_to_cpu(top->file_lock_mode);
+ o->lockfile_batch = le32_to_cpu(top->lockfile_batch);
+ o->odirect = le32_to_cpu(top->odirect);
+ o->invalidate_cache = le32_to_cpu(top->invalidate_cache);
+ o->create_serialize = le32_to_cpu(top->create_serialize);
+ o->create_fsync = le32_to_cpu(top->create_fsync);
+ o->create_on_open = le32_to_cpu(top->create_on_open);
+ o->end_fsync = le32_to_cpu(top->end_fsync);
+ o->pre_read = le32_to_cpu(top->pre_read);
+ o->sync_io = le32_to_cpu(top->sync_io);
+ o->verify = le32_to_cpu(top->verify);
+ o->do_verify = le32_to_cpu(top->do_verify);
+ o->verifysort = le32_to_cpu(top->verifysort);
+ o->verify_interval = le32_to_cpu(top->verify_interval);
+ o->verify_offset = le32_to_cpu(top->verify_offset);
+
+ memcpy(o->verify_pattern, top->verify_pattern, MAX_PATTERN_SIZE);
+
+ o->verify_pattern_bytes = le32_to_cpu(top->verify_pattern_bytes);
+ o->verify_fatal = le32_to_cpu(top->verify_fatal);
+ o->verify_dump = le32_to_cpu(top->verify_dump);
+ o->verify_async = le32_to_cpu(top->verify_async);
+ o->verify_batch = le32_to_cpu(top->verify_batch);
+ o->use_thread = le32_to_cpu(top->use_thread);
+ o->unlink = le32_to_cpu(top->unlink);
+ o->do_disk_util = le32_to_cpu(top->do_disk_util);
+ o->override_sync = le32_to_cpu(top->override_sync);
+ o->rand_repeatable = le32_to_cpu(top->rand_repeatable);
+ o->use_os_rand = le32_to_cpu(top->use_os_rand);
+ o->write_lat_log = le32_to_cpu(top->write_lat_log);
+ o->write_bw_log = le32_to_cpu(top->write_bw_log);
+ o->write_iops_log = le32_to_cpu(top->write_iops_log);
+ o->log_avg_msec = le32_to_cpu(top->log_avg_msec);
+ o->norandommap = le32_to_cpu(top->norandommap);
+ o->softrandommap = le32_to_cpu(top->softrandommap);
+ o->bs_unaligned = le32_to_cpu(top->bs_unaligned);
+ o->fsync_on_close = le32_to_cpu(top->fsync_on_close);
+ o->hugepage_size = le32_to_cpu(top->hugepage_size);
+ o->rw_min_bs = le32_to_cpu(top->rw_min_bs);
+ o->thinktime = le32_to_cpu(top->thinktime);
+ o->thinktime_spin = le32_to_cpu(top->thinktime_spin);
+ o->thinktime_blocks = le32_to_cpu(top->thinktime_blocks);
+ o->fsync_blocks = le32_to_cpu(top->fsync_blocks);
+ o->fdatasync_blocks = le32_to_cpu(top->fdatasync_blocks);
+ o->barrier_blocks = le32_to_cpu(top->barrier_blocks);
+
+ o->verify_backlog = le64_to_cpu(top->verify_backlog);
+ o->start_delay = le64_to_cpu(top->start_delay);
+ o->timeout = le64_to_cpu(top->timeout);
+ o->ramp_time = le64_to_cpu(top->ramp_time);
+ o->zone_range = le64_to_cpu(top->zone_range);
+ o->zone_size = le64_to_cpu(top->zone_size);
+ o->zone_skip = le64_to_cpu(top->zone_skip);
+
+ o->overwrite = le32_to_cpu(top->overwrite);
+ o->bw_avg_time = le32_to_cpu(top->bw_avg_time);
+ o->iops_avg_time = le32_to_cpu(top->iops_avg_time);
+ o->loops = le32_to_cpu(top->loops);
+ o->mem_type = le32_to_cpu(top->mem_type);
+ o->mem_align = le32_to_cpu(top->mem_align);
+ o->stonewall = le32_to_cpu(top->stonewall);
+ o->new_group = le32_to_cpu(top->new_group);
+ o->numjobs = le32_to_cpu(top->numjobs);
+ o->cpumask_set = le32_to_cpu(top->cpumask_set);
+ o->verify_cpumask_set = le32_to_cpu(top->verify_cpumask_set);
+ o->iolog = le32_to_cpu(top->iolog);
+ o->rwmixcycle = le32_to_cpu(top->rwmixcycle);
+ o->nice = le32_to_cpu(top->nice);
+ o->file_service_type = le32_to_cpu(top->file_service_type);
+ o->group_reporting = le32_to_cpu(top->group_reporting);
+ o->fadvise_hint = le32_to_cpu(top->fadvise_hint);
+ o->fallocate_mode = le32_to_cpu(top->fallocate_mode);
+ o->zero_buffers = le32_to_cpu(top->zero_buffers);
+ o->refill_buffers = le32_to_cpu(top->refill_buffers);
+ o->scramble_buffers = le32_to_cpu(top->scramble_buffers);
+ o->time_based = le32_to_cpu(top->time_based);
+ o->disable_lat = le32_to_cpu(top->disable_lat);
+ o->disable_clat = le32_to_cpu(top->disable_clat);
+ o->disable_slat = le32_to_cpu(top->disable_slat);
+ o->disable_bw = le32_to_cpu(top->disable_bw);
+ o->gtod_reduce = le32_to_cpu(top->gtod_reduce);
+ o->gtod_cpu = le32_to_cpu(top->gtod_cpu);
+ o->gtod_offload = le32_to_cpu(top->gtod_offload);
+ o->clocksource = le32_to_cpu(top->clocksource);
+ o->no_stall = le32_to_cpu(top->no_stall);
+ o->trim_percentage = le32_to_cpu(top->trim_percentage);
+ o->trim_batch = le32_to_cpu(top->trim_batch);
+ o->trim_zero = le32_to_cpu(top->trim_zero);
+ o->clat_percentiles = le32_to_cpu(top->clat_percentiles);
+ o->overwrite_plist = le32_to_cpu(top->overwrite_plist);
+ o->cpuload = le32_to_cpu(top->cpuload);
+ o->cpucycle = le32_to_cpu(top->cpucycle);
+ o->continue_on_error = le32_to_cpu(top->continue_on_error);
+ o->cgroup_weight = le32_to_cpu(top->cgroup_weight);
+ o->cgroup_nodelete = le32_to_cpu(top->cgroup_nodelete);
+ o->uid = le32_to_cpu(top->uid);
+ o->gid = le32_to_cpu(top->gid);
+ o->flow_id = __le32_to_cpu(top->flow_id);
+ o->flow = __le32_to_cpu(top->flow);
+ o->flow_watermark = __le32_to_cpu(top->flow_watermark);
+ o->flow_sleep = le32_to_cpu(top->flow_sleep);
+ o->sync_file_range = le32_to_cpu(top->sync_file_range);
++ o->compress_percentage = le32_to_cpu(top->compress_percentage);
++ o->compress_chunk = le32_to_cpu(top->compress_chunk);
+
+ o->trim_backlog = le64_to_cpu(top->trim_backlog);
+
+ for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
+ o->percentile_list[i].u.f = fio_uint64_to_double(le64_to_cpu(top->percentile_list[i].u.i));
+#if 0
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+#endif
+}
+
+void convert_thread_options_to_net(struct thread_options_pack *top,
+ struct thread_options *o)
+{
+ int i, j;
+
+ string_to_net(top->description, o->description);
+ string_to_net(top->name, o->name);
+ string_to_net(top->directory, o->directory);
+ string_to_net(top->filename, o->filename);
+ string_to_net(top->opendir, o->opendir);
+ string_to_net(top->ioengine, o->ioengine);
+ string_to_net(top->read_iolog_file, o->read_iolog_file);
+ string_to_net(top->write_iolog_file, o->write_iolog_file);
+ string_to_net(top->bw_log_file, o->bw_log_file);
+ string_to_net(top->lat_log_file, o->lat_log_file);
+ string_to_net(top->iops_log_file, o->iops_log_file);
+ string_to_net(top->replay_redirect, o->replay_redirect);
+ string_to_net(top->exec_prerun, o->exec_prerun);
+ string_to_net(top->exec_postrun, o->exec_postrun);
+ string_to_net(top->ioscheduler, o->ioscheduler);
+ string_to_net(top->profile, o->profile);
+ string_to_net(top->cgroup, o->cgroup);
+
+ top->td_ddir = cpu_to_le32(o->td_ddir);
+ top->rw_seq = cpu_to_le32(o->rw_seq);
+ top->kb_base = cpu_to_le32(o->kb_base);
+ top->ddir_seq_nr = cpu_to_le32(o->ddir_seq_nr);
+ top->iodepth = cpu_to_le32(o->iodepth);
+ top->iodepth_low = cpu_to_le32(o->iodepth_low);
+ top->iodepth_batch = cpu_to_le32(o->iodepth_batch);
+ top->iodepth_batch_complete = cpu_to_le32(o->iodepth_batch_complete);
+ top->size_percent = cpu_to_le32(o->size_percent);
+ top->fill_device = cpu_to_le32(o->fill_device);
+ top->ratecycle = cpu_to_le32(o->ratecycle);
+ top->nr_files = cpu_to_le32(o->nr_files);
+ top->open_files = cpu_to_le32(o->open_files);
+ top->file_lock_mode = cpu_to_le32(o->file_lock_mode);
+ top->lockfile_batch = cpu_to_le32(o->lockfile_batch);
+ top->odirect = cpu_to_le32(o->odirect);
+ top->invalidate_cache = cpu_to_le32(o->invalidate_cache);
+ top->create_serialize = cpu_to_le32(o->create_serialize);
+ top->create_fsync = cpu_to_le32(o->create_fsync);
+ top->create_on_open = cpu_to_le32(o->create_on_open);
+ top->end_fsync = cpu_to_le32(o->end_fsync);
+ top->pre_read = cpu_to_le32(o->pre_read);
+ top->sync_io = cpu_to_le32(o->sync_io);
+ top->verify = cpu_to_le32(o->verify);
+ top->do_verify = cpu_to_le32(o->do_verify);
+ top->verifysort = cpu_to_le32(o->verifysort);
+ top->verify_interval = cpu_to_le32(o->verify_interval);
+ top->verify_offset = cpu_to_le32(o->verify_offset);
+ top->verify_pattern_bytes = cpu_to_le32(o->verify_pattern_bytes);
+ top->verify_fatal = cpu_to_le32(o->verify_fatal);
+ top->verify_dump = cpu_to_le32(o->verify_dump);
+ top->verify_async = cpu_to_le32(o->verify_async);
+ top->verify_batch = cpu_to_le32(o->verify_batch);
+ top->use_thread = cpu_to_le32(o->use_thread);
+ top->unlink = cpu_to_le32(o->unlink);
+ top->do_disk_util = cpu_to_le32(o->do_disk_util);
+ top->override_sync = cpu_to_le32(o->override_sync);
+ top->rand_repeatable = cpu_to_le32(o->rand_repeatable);
+ top->use_os_rand = cpu_to_le32(o->use_os_rand);
+ top->write_lat_log = cpu_to_le32(o->write_lat_log);
+ top->write_bw_log = cpu_to_le32(o->write_bw_log);
+ top->write_iops_log = cpu_to_le32(o->write_iops_log);
+ top->log_avg_msec = cpu_to_le32(o->log_avg_msec);
+ top->norandommap = cpu_to_le32(o->norandommap);
+ top->softrandommap = cpu_to_le32(o->softrandommap);
+ top->bs_unaligned = cpu_to_le32(o->bs_unaligned);
+ top->fsync_on_close = cpu_to_le32(o->fsync_on_close);
+ top->hugepage_size = cpu_to_le32(o->hugepage_size);
+ top->rw_min_bs = cpu_to_le32(o->rw_min_bs);
+ top->thinktime = cpu_to_le32(o->thinktime);
+ top->thinktime_spin = cpu_to_le32(o->thinktime_spin);
+ top->thinktime_blocks = cpu_to_le32(o->thinktime_blocks);
+ top->fsync_blocks = cpu_to_le32(o->fsync_blocks);
+ top->fdatasync_blocks = cpu_to_le32(o->fdatasync_blocks);
+ top->barrier_blocks = cpu_to_le32(o->barrier_blocks);
+ top->overwrite = cpu_to_le32(o->overwrite);
+ top->bw_avg_time = cpu_to_le32(o->bw_avg_time);
+ top->iops_avg_time = cpu_to_le32(o->iops_avg_time);
+ top->loops = cpu_to_le32(o->loops);
+ top->mem_type = cpu_to_le32(o->mem_type);
+ top->mem_align = cpu_to_le32(o->mem_align);
+ top->stonewall = cpu_to_le32(o->stonewall);
+ top->new_group = cpu_to_le32(o->new_group);
+ top->numjobs = cpu_to_le32(o->numjobs);
+ top->cpumask_set = cpu_to_le32(o->cpumask_set);
+ top->verify_cpumask_set = cpu_to_le32(o->verify_cpumask_set);
+ top->iolog = cpu_to_le32(o->iolog);
+ top->rwmixcycle = cpu_to_le32(o->rwmixcycle);
+ top->nice = cpu_to_le32(o->nice);
+ top->file_service_type = cpu_to_le32(o->file_service_type);
+ top->group_reporting = cpu_to_le32(o->group_reporting);
+ top->fadvise_hint = cpu_to_le32(o->fadvise_hint);
+ top->fallocate_mode = cpu_to_le32(o->fallocate_mode);
+ top->zero_buffers = cpu_to_le32(o->zero_buffers);
+ top->refill_buffers = cpu_to_le32(o->refill_buffers);
+ top->scramble_buffers = cpu_to_le32(o->scramble_buffers);
+ top->time_based = cpu_to_le32(o->time_based);
+ top->disable_lat = cpu_to_le32(o->disable_lat);
+ top->disable_clat = cpu_to_le32(o->disable_clat);
+ top->disable_slat = cpu_to_le32(o->disable_slat);
+ top->disable_bw = cpu_to_le32(o->disable_bw);
+ top->gtod_reduce = cpu_to_le32(o->gtod_reduce);
+ top->gtod_cpu = cpu_to_le32(o->gtod_cpu);
+ top->gtod_offload = cpu_to_le32(o->gtod_offload);
+ top->clocksource = cpu_to_le32(o->clocksource);
+ top->no_stall = cpu_to_le32(o->no_stall);
+ top->trim_percentage = cpu_to_le32(o->trim_percentage);
+ top->trim_batch = cpu_to_le32(o->trim_batch);
+ top->trim_zero = cpu_to_le32(o->trim_zero);
+ top->clat_percentiles = cpu_to_le32(o->clat_percentiles);
+ top->overwrite_plist = cpu_to_le32(o->overwrite_plist);
+ top->cpuload = cpu_to_le32(o->cpuload);
+ top->cpucycle = cpu_to_le32(o->cpucycle);
+ top->continue_on_error = cpu_to_le32(o->continue_on_error);
+ top->cgroup_weight = cpu_to_le32(o->cgroup_weight);
+ top->cgroup_nodelete = cpu_to_le32(o->cgroup_nodelete);
+ top->uid = cpu_to_le32(o->uid);
+ top->gid = cpu_to_le32(o->gid);
+ top->flow_id = __cpu_to_le32(o->flow_id);
+ top->flow = __cpu_to_le32(o->flow);
+ top->flow_watermark = __cpu_to_le32(o->flow_watermark);
+ top->flow_sleep = cpu_to_le32(o->flow_sleep);
+ top->sync_file_range = cpu_to_le32(o->sync_file_range);
++ top->compress_percentage = cpu_to_le32(o->compress_percentage);
++ top->compress_chunk = cpu_to_le32(o->compress_chunk);
+
+ for (i = 0; i < 2; i++) {
+ top->bs[i] = cpu_to_le32(o->bs[i]);
+ top->ba[i] = cpu_to_le32(o->ba[i]);
+ top->min_bs[i] = cpu_to_le32(o->min_bs[i]);
+ top->max_bs[i] = cpu_to_le32(o->max_bs[i]);
+ top->bssplit_nr[i] = cpu_to_le32(o->bssplit_nr[i]);
+
+ if (o->bssplit_nr[i]) {
+ unsigned int bssplit_nr = o->bssplit_nr[i];
+
+ if (bssplit_nr > BSSPLIT_MAX) {
+ log_err("fio: BSSPLIT_MAX is too small\n");
+ bssplit_nr = BSSPLIT_MAX;
+ }
+ for (j = 0; j < bssplit_nr; j++) {
+ top->bssplit[i][j].bs = cpu_to_le32(o->bssplit[i][j].bs);
+ top->bssplit[i][j].perc = cpu_to_le32(o->bssplit[i][j].perc);
+ }
+ }
+
+ top->rwmix[i] = cpu_to_le32(o->rwmix[i]);
+ top->rate[i] = cpu_to_le32(o->rate[i]);
+ top->ratemin[i] = cpu_to_le32(o->ratemin[i]);
+ top->rate_iops[i] = cpu_to_le32(o->rate_iops[i]);
+ top->rate_iops_min[i] = cpu_to_le32(o->rate_iops_min[i]);
+ }
+
+ memcpy(top->verify_pattern, o->verify_pattern, MAX_PATTERN_SIZE);
+
+ top->size = __cpu_to_le64(o->size);
+ top->verify_backlog = __cpu_to_le64(o->verify_backlog);
+ top->start_delay = __cpu_to_le64(o->start_delay);
+ top->timeout = __cpu_to_le64(o->timeout);
+ top->ramp_time = __cpu_to_le64(o->ramp_time);
+ top->zone_range = __cpu_to_le64(o->zone_range);
+ top->zone_size = __cpu_to_le64(o->zone_size);
+ top->zone_skip = __cpu_to_le64(o->zone_skip);
+ top->ddir_seq_add = __cpu_to_le64(o->ddir_seq_add);
+ top->file_size_low = __cpu_to_le64(o->file_size_low);
+ top->file_size_high = __cpu_to_le64(o->file_size_high);
+ top->start_offset = __cpu_to_le64(o->start_offset);
+ top->trim_backlog = __cpu_to_le64(o->trim_backlog);
+
+ for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
+ top->percentile_list[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->percentile_list[i].u.f));
+#if 0
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+#endif
+
+}
+
+/*
+ * Basic conversion test. We'd really need to fill in more of the options
+ * to have a thorough test. Even better, we should auto-generate the
+ * converter functions...
+ */
+int fio_test_cconv(struct thread_options *__o)
+{
+ struct thread_options o;
+ struct thread_options_pack top1, top2;
+
+ memset(&top1, 0, sizeof(top1));
+ memset(&top2, 0, sizeof(top2));
+
+ convert_thread_options_to_net(&top1, __o);
+ memset(&o, 0, sizeof(o));
+ convert_thread_options_to_cpu(&o, &top1);
+ convert_thread_options_to_net(&top2, &o);
+
+ return memcmp(&top1, &top2, sizeof(top1));
+}
#include <signal.h>
#include "fio.h"
+#include "client.h"
#include "server.h"
#include "flist.h"
#include "hash.h"
-struct client_eta {
- struct jobs_eta eta;
- unsigned int pending;
-};
-
-struct fio_client {
- struct flist_head list;
- struct flist_head hash_list;
- struct flist_head arg_list;
- union {
- struct sockaddr_in addr;
- struct sockaddr_in6 addr6;
- struct sockaddr_un addr_un;
- };
- char *hostname;
- int port;
- int fd;
- unsigned int refs;
-
- char *name;
-
- int state;
-
- int skip_newline;
- int is_sock;
- int disk_stats_shown;
- unsigned int jobs;
- int error;
- int ipv6;
- int sent_job;
-
- struct flist_head eta_list;
- struct client_eta *eta_in_flight;
-
- struct flist_head cmd_list;
-
- uint16_t argc;
- char **argv;
+static void handle_du(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_ts(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_gs(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_probe(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_text(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_stop(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_start(struct fio_client *client, struct fio_net_cmd *cmd);
+
+struct client_ops fio_client_ops = {
+ .text_op = handle_text,
+ .disk_util = handle_du,
+ .thread_status = handle_ts,
+ .group_stats = handle_gs,
+ .stop = handle_stop,
+ .start = handle_start,
+ .eta = display_thread_status,
+ .probe = handle_probe,
+ .eta_msec = FIO_CLIENT_DEF_ETA_MSEC,
};
static struct timeval eta_tv;
-enum {
- Client_created = 0,
- Client_connected = 1,
- Client_started = 2,
- Client_running = 3,
- Client_stopped = 4,
- Client_exited = 5,
-};
-
static FLIST_HEAD(client_list);
static FLIST_HEAD(eta_list);
static FLIST_HEAD(arg_list);
-static struct thread_stat client_ts;
-static struct group_run_stats client_gs;
-static int sum_stat_clients;
+struct thread_stat client_ts;
+struct group_run_stats client_gs;
+int sum_stat_clients;
+
static int sum_stat_nr;
#define FIO_CLIENT_HASH_BITS 7
#define FIO_CLIENT_HASH_MASK (FIO_CLIENT_HASH_SZ - 1)
static struct flist_head client_hash[FIO_CLIENT_HASH_SZ];
-static int handle_client(struct fio_client *client);
-static void dec_jobs_eta(struct client_eta *eta);
-
static void fio_client_add_hash(struct fio_client *client)
{
int bucket = hash_long(client->fd, FIO_CLIENT_HASH_BITS);
flist_for_each(entry, &client_hash[bucket]) {
client = flist_entry(entry, struct fio_client, hash_list);
- if (client->fd == fd)
- return fio_get_client(client);
+ if (client->fd == fd) {
+ client->refs++;
+ return client;
+ }
}
return NULL;
}
-static void remove_client(struct fio_client *client)
+void fio_put_client(struct fio_client *client)
{
- assert(client->refs);
-
if (--client->refs)
return;
+ free(client->hostname);
+ if (client->argv)
+ free(client->argv);
+ if (client->name)
+ free(client->name);
+
+ free(client);
+}
+
+static void remove_client(struct fio_client *client)
+{
+ assert(client->refs);
+
dprint(FD_NET, "client: removed <%s>\n", client->hostname);
- flist_del(&client->list);
+
+ if (!flist_empty(&client->list))
+ flist_del_init(&client->list);
fio_client_remove_hash(client);
if (!flist_empty(&client->eta_list)) {
flist_del_init(&client->eta_list);
- dec_jobs_eta(client->eta_in_flight);
+ fio_client_dec_jobs_eta(client->eta_in_flight, client->ops->eta);
}
- free(client->hostname);
- if (client->argv)
- free(client->argv);
- if (client->name)
- free(client->name);
-
- free(client);
nr_clients--;
sum_stat_clients--;
+
+ fio_put_client(client);
}
-static void put_client(struct fio_client *client)
+struct fio_client *fio_get_client(struct fio_client *client)
{
- remove_client(client);
+ client->refs++;
+ return client;
}
static void __fio_client_add_cmd_option(struct fio_client *client,
}
}
-int fio_client_add(const char *hostname, void **cookie)
+struct fio_client *fio_client_add_explicit(struct client_ops *ops,
+ const char *hostname, int type,
+ int port)
+{
+ struct fio_client *client;
+
+ client = malloc(sizeof(*client));
+ memset(client, 0, sizeof(*client));
+
+ INIT_FLIST_HEAD(&client->list);
+ INIT_FLIST_HEAD(&client->hash_list);
+ INIT_FLIST_HEAD(&client->arg_list);
+ INIT_FLIST_HEAD(&client->eta_list);
+ INIT_FLIST_HEAD(&client->cmd_list);
+
+ client->hostname = strdup(hostname);
+
+ if (type == Fio_client_socket)
+ client->is_sock = 1;
+ else {
+ int ipv6;
+
+ ipv6 = type == Fio_client_ipv6;
+ if (fio_server_parse_host(hostname, &ipv6,
+ &client->addr.sin_addr,
+ &client->addr6.sin6_addr))
+ goto err;
+
+ client->port = port;
+ }
+
+ client->fd = -1;
+ client->ops = ops;
+ client->refs = 1;
+
+ __fio_client_add_cmd_option(client, "fio");
+
+ flist_add(&client->list, &client_list);
+ nr_clients++;
+ dprint(FD_NET, "client: added <%s>\n", client->hostname);
+ return client;
+err:
+ free(client);
+ return NULL;
+}
+
+int fio_client_add(struct client_ops *ops, const char *hostname, void **cookie)
{
struct fio_client *existing = *cookie;
struct fio_client *client;
return -1;
client->fd = -1;
+ client->ops = ops;
client->refs = 1;
__fio_client_add_cmd_option(client, "fio");
return 0;
}
+static void probe_client(struct fio_client *client)
+{
+ dprint(FD_NET, "client: send probe\n");
+
+ fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_PROBE, 0, &client->cmd_list);
+}
+
static int fio_client_connect_ip(struct fio_client *client)
{
struct sockaddr *addr;
fd = socket(domain, SOCK_STREAM, 0);
if (fd < 0) {
+ int ret = -errno;
+
log_err("fio: socket: %s\n", strerror(errno));
- return -1;
+ return ret;
}
if (connect(fd, addr, socklen) < 0) {
+ int ret = -errno;
+
log_err("fio: connect: %s\n", strerror(errno));
log_err("fio: failed to connect to %s:%u\n", client->hostname,
client->port);
close(fd);
- return -1;
+ return ret;
}
return fd;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0) {
+ int ret = -errno;
+
log_err("fio: socket: %s\n", strerror(errno));
- return -1;
+ return ret;
}
len = sizeof(addr->sun_family) + strlen(addr->sun_path) + 1;
if (connect(fd, (struct sockaddr *) addr, len) < 0) {
+ int ret = -errno;
+
log_err("fio: connect; %s\n", strerror(errno));
close(fd);
- return -1;
+ return ret;
}
return fd;
}
-static int fio_client_connect(struct fio_client *client)
+int fio_client_connect(struct fio_client *client)
{
int fd;
dprint(FD_NET, "client: %s connected %d\n", client->hostname, fd);
if (fd < 0)
- return 1;
+ return fd;
client->fd = fd;
fio_client_add_hash(client);
client->state = Client_connected;
+
+ probe_client(client);
return 0;
}
+void fio_client_terminate(struct fio_client *client)
+{
+ fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_QUIT, 0, NULL);
+}
+
void fio_clients_terminate(void)
{
struct flist_head *entry;
flist_for_each(entry, &client_list) {
client = flist_entry(entry, struct fio_client, list);
-
- fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_QUIT, 0, NULL);
+ fio_client_terminate(client);
}
}
sigaction(SIGTERM, &act, NULL);
}
-static void probe_client(struct fio_client *client)
-{
- dprint(FD_NET, "client: send probe\n");
-
- fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_PROBE, 0, &client->cmd_list);
-}
-
static int send_client_cmd_line(struct fio_client *client)
{
struct cmd_single_line_pdu *cslp;
continue;
}
- probe_client(client);
-
if (client->argc > 1)
send_client_cmd_line(client);
}
return !nr_clients;
}
+int fio_start_client(struct fio_client *client)
+{
+ dprint(FD_NET, "client: start %s\n", client->hostname);
+ return fio_net_send_simple_cmd(client->fd, FIO_NET_CMD_RUN, 0, NULL);
+}
+
+int fio_start_all_clients(void)
+{
+ struct fio_client *client;
+ struct flist_head *entry, *tmp;
+ int ret;
+
+ dprint(FD_NET, "client: start all\n");
+
+ flist_for_each_safe(entry, tmp, &client_list) {
+ client = flist_entry(entry, struct fio_client, list);
+
+ ret = fio_start_client(client);
+ if (ret) {
+ remove_client(client);
+ continue;
+ }
+ }
+
+ return flist_empty(&client_list);
+}
+
/*
* Send file contents to server backend. We could use sendfile(), but to remain
* more portable lets just read/write the darn thing.
*/
-static int fio_client_send_ini(struct fio_client *client, const char *filename)
+static int __fio_client_send_ini(struct fio_client *client, const char *filename)
{
struct stat sb;
char *p, *buf;
fd = open(filename, O_RDONLY);
if (fd < 0) {
+ int ret = -errno;
+
log_err("fio: job file <%s> open: %s\n", filename, strerror(errno));
- return 1;
+ return ret;
}
if (fstat(fd, &sb) < 0) {
+ int ret = -errno;
+
log_err("fio: job file stat: %s\n", strerror(errno));
close(fd);
- return 1;
+ return ret;
}
buf = malloc(sb.st_size);
return ret;
}
+int fio_client_send_ini(struct fio_client *client, const char *filename)
+{
+ int ret;
+
+ ret = __fio_client_send_ini(client, filename);
+ if (!ret)
+ client->sent_job = 1;
+
+ return ret;
+}
+
int fio_clients_send_ini(const char *filename)
{
struct fio_client *client;
if (fio_client_send_ini(client, filename))
remove_client(client);
-
- client->sent_job = 1;
}
return !nr_clients;
dst->groupid = le32_to_cpu(src->groupid);
}
-static void handle_ts(struct fio_net_cmd *cmd)
+static void handle_ts(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct cmd_ts_pdu *p = (struct cmd_ts_pdu *) cmd->payload;
- convert_ts(&p->ts, &p->ts);
- convert_gs(&p->rs, &p->rs);
-
show_thread_status(&p->ts, &p->rs);
if (sum_stat_clients == 1)
}
}
-static void handle_gs(struct fio_net_cmd *cmd)
+static void handle_gs(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct group_run_stats *gs = (struct group_run_stats *) cmd->payload;
- convert_gs(gs, gs);
show_group_stats(gs);
}
+static void handle_text(struct fio_client *client, struct fio_net_cmd *cmd)
+{
+ struct cmd_text_pdu *pdu = (struct cmd_text_pdu *) cmd->payload;
+ const char *buf = (const char *) pdu->buf;
+ const char *name;
+ int fio_unused ret;
+
+ name = client->name ? client->name : client->hostname;
+
+ if (!client->skip_newline)
+ fprintf(f_out, "<%s> ", name);
+ ret = fwrite(buf, pdu->buf_len, 1, f_out);
+ fflush(f_out);
+ client->skip_newline = strchr(buf, '\n') == NULL;
+}
+
static void convert_agg(struct disk_util_agg *agg)
{
int i;
{
struct cmd_du_pdu *du = (struct cmd_du_pdu *) cmd->payload;
- convert_dus(&du->dus);
- convert_agg(&du->agg);
-
if (!client->disk_stats_shown) {
client->disk_stats_shown = 1;
log_info("\nDisk stats (read/write):\n");
je->nr_ramp = le32_to_cpu(je->nr_ramp);
je->nr_pending = le32_to_cpu(je->nr_pending);
je->files_open = le32_to_cpu(je->files_open);
- je->m_rate = le32_to_cpu(je->m_rate);
- je->t_rate = le32_to_cpu(je->t_rate);
- je->m_iops = le32_to_cpu(je->m_iops);
- je->t_iops = le32_to_cpu(je->t_iops);
for (i = 0; i < 2; i++) {
+ je->m_rate[i] = le32_to_cpu(je->m_rate[i]);
+ je->t_rate[i] = le32_to_cpu(je->t_rate[i]);
+ je->m_iops[i] = le32_to_cpu(je->m_iops[i]);
+ je->t_iops[i] = le32_to_cpu(je->t_iops[i]);
je->rate[i] = le32_to_cpu(je->rate[i]);
je->iops[i] = le32_to_cpu(je->iops[i]);
}
je->elapsed_sec = le64_to_cpu(je->elapsed_sec);
je->eta_sec = le64_to_cpu(je->eta_sec);
+ je->nr_threads = le32_to_cpu(je->nr_threads);
}
-static void sum_jobs_eta(struct jobs_eta *dst, struct jobs_eta *je)
+void fio_client_sum_jobs_eta(struct jobs_eta *dst, struct jobs_eta *je)
{
int i;
dst->nr_ramp += je->nr_ramp;
dst->nr_pending += je->nr_pending;
dst->files_open += je->files_open;
- dst->m_rate += je->m_rate;
- dst->t_rate += je->t_rate;
- dst->m_iops += je->m_iops;
- dst->t_iops += je->t_iops;
for (i = 0; i < 2; i++) {
+ dst->m_rate[i] += je->m_rate[i];
+ dst->t_rate[i] += je->t_rate[i];
+ dst->m_iops[i] += je->m_iops[i];
+ dst->t_iops[i] += je->t_iops[i];
dst->rate[i] += je->rate[i];
dst->iops[i] += je->iops[i];
}
if (je->eta_sec > dst->eta_sec)
dst->eta_sec = je->eta_sec;
+
+ dst->nr_threads += je->nr_threads;
+ /* we need to handle je->run_str too ... */
}
-static void dec_jobs_eta(struct client_eta *eta)
+void fio_client_dec_jobs_eta(struct client_eta *eta, client_eta_op eta_fn)
{
if (!--eta->pending) {
- display_thread_status(&eta->eta);
+ eta_fn(&eta->eta);
free(eta);
}
}
client->eta_in_flight = NULL;
flist_del_init(&client->eta_list);
- convert_jobs_eta(je);
- sum_jobs_eta(&eta->eta, je);
- dec_jobs_eta(eta);
+ if (client->ops->jobs_eta)
+ client->ops->jobs_eta(client, je);
+
+ fio_client_sum_jobs_eta(&eta->eta, je);
+ fio_client_dec_jobs_eta(eta, client->ops->eta);
}
static void handle_probe(struct fio_client *client, struct fio_net_cmd *cmd)
struct cmd_start_pdu *pdu = (struct cmd_start_pdu *) cmd->payload;
client->state = Client_started;
- client->jobs = le32_to_cpu(pdu->jobs);
+ client->jobs = pdu->jobs;
}
static void handle_stop(struct fio_client *client, struct fio_net_cmd *cmd)
+{
+ if (client->error)
+ log_info("client <%s>: exited with error %d\n", client->hostname, client->error);
+}
+
+static void convert_stop(struct fio_net_cmd *cmd)
{
struct cmd_end_pdu *pdu = (struct cmd_end_pdu *) cmd->payload;
- client->state = Client_stopped;
- client->error = le32_to_cpu(pdu->error);
+ pdu->error = le32_to_cpu(pdu->error);
+}
- if (client->error)
- log_info("client <%s>: exited with error %d\n", client->hostname, client->error);
+static void convert_text(struct fio_net_cmd *cmd)
+{
+ struct cmd_text_pdu *pdu = (struct cmd_text_pdu *) cmd->payload;
+
+ pdu->level = le32_to_cpu(pdu->level);
+ pdu->buf_len = le32_to_cpu(pdu->buf_len);
+ pdu->log_sec = le64_to_cpu(pdu->log_sec);
+ pdu->log_usec = le64_to_cpu(pdu->log_usec);
}
-static int handle_client(struct fio_client *client)
+int fio_handle_client(struct fio_client *client)
{
+ struct client_ops *ops = client->ops;
struct fio_net_cmd *cmd;
dprint(FD_NET, "client: handle %s\n", client->hostname);
if (!cmd)
return 0;
- dprint(FD_NET, "client: got cmd op %s from %s\n",
- fio_server_op(cmd->opcode), client->hostname);
+ dprint(FD_NET, "client: got cmd op %s from %s (pdu=%u)\n",
+ fio_server_op(cmd->opcode), client->hostname, cmd->pdu_len);
switch (cmd->opcode) {
case FIO_NET_CMD_QUIT:
+ if (ops->quit)
+ ops->quit(client);
remove_client(client);
free(cmd);
break;
- case FIO_NET_CMD_TEXT: {
- const char *buf = (const char *) cmd->payload;
- const char *name;
- int fio_unused ret;
-
- name = client->name ? client->name : client->hostname;
-
- if (!client->skip_newline)
- fprintf(f_out, "<%s> ", name);
- ret = fwrite(buf, cmd->pdu_len, 1, f_out);
- fflush(f_out);
- client->skip_newline = strchr(buf, '\n') == NULL;
+ case FIO_NET_CMD_TEXT:
+ convert_text(cmd);
+ ops->text_op(client, cmd);
free(cmd);
break;
- }
- case FIO_NET_CMD_DU:
- handle_du(client, cmd);
+ case FIO_NET_CMD_DU: {
+ struct cmd_du_pdu *du = (struct cmd_du_pdu *) cmd->payload;
+
+ convert_dus(&du->dus);
+ convert_agg(&du->agg);
+
+ ops->disk_util(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_TS:
- handle_ts(cmd);
+ }
+ case FIO_NET_CMD_TS: {
+ struct cmd_ts_pdu *p = (struct cmd_ts_pdu *) cmd->payload;
+
+ convert_ts(&p->ts, &p->ts);
+ convert_gs(&p->rs, &p->rs);
+
+ ops->thread_status(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_GS:
- handle_gs(cmd);
+ }
+ case FIO_NET_CMD_GS: {
+ struct group_run_stats *gs = (struct group_run_stats *) cmd->payload;
+
+ convert_gs(gs, gs);
+
+ ops->group_stats(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_ETA:
+ }
+ case FIO_NET_CMD_ETA: {
+ struct jobs_eta *je = (struct jobs_eta *) cmd->payload;
+
remove_reply_cmd(client, cmd);
+ convert_jobs_eta(je);
handle_eta(client, cmd);
free(cmd);
break;
+ }
case FIO_NET_CMD_PROBE:
remove_reply_cmd(client, cmd);
- handle_probe(client, cmd);
+ ops->probe(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_RUN:
+ case FIO_NET_CMD_SERVER_START:
client->state = Client_running;
+ if (ops->job_start)
+ ops->job_start(client, cmd);
+ free(cmd);
+ break;
+ case FIO_NET_CMD_START: {
+ struct cmd_start_pdu *pdu = (struct cmd_start_pdu *) cmd->payload;
+
+ pdu->jobs = le32_to_cpu(pdu->jobs);
+ ops->start(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_START:
- handle_start(client, cmd);
+ }
+ case FIO_NET_CMD_STOP: {
+ struct cmd_end_pdu *pdu = (struct cmd_end_pdu *) cmd->payload;
+
+ convert_stop(cmd);
+ client->state = Client_stopped;
+ client->error = pdu->error;
+ ops->stop(client, cmd);
free(cmd);
break;
- case FIO_NET_CMD_STOP:
- handle_stop(client, cmd);
+ }
+ case FIO_NET_CMD_ADD_JOB:
+ if (ops->add_job)
+ ops->add_job(client, cmd);
free(cmd);
break;
default:
return 1;
}
-static void request_client_etas(void)
+static void request_client_etas(struct client_ops *ops)
{
struct fio_client *client;
struct flist_head *entry;
}
while (skipped--)
- dec_jobs_eta(eta);
+ fio_client_dec_jobs_eta(eta, ops->eta);
dprint(FD_NET, "client: requested eta tag %p\n", eta);
}
return flist_empty(&client->cmd_list) && ret;
}
-static int fio_client_timed_out(void)
+static int fio_check_clients_timed_out(void)
{
struct fio_client *client;
struct flist_head *entry, *tmp;
if (!client_check_cmd_timeout(client, &tv))
continue;
- log_err("fio: client %s timed out\n", client->hostname);
+ if (client->ops->timed_out)
+ client->ops->timed_out(client);
+ else
+ log_err("fio: client %s timed out\n", client->hostname);
+
remove_client(client);
ret = 1;
}
return ret;
}
-int fio_handle_clients(void)
+int fio_handle_clients(struct client_ops *ops)
{
struct pollfd *pfds;
int i, ret = 0, retval = 0;
flist_for_each_safe(entry, tmp, &client_list) {
client = flist_entry(entry, struct fio_client, list);
- if (!client->sent_job &&
+ if (!client->sent_job && !client->ops->stay_connected &&
flist_empty(&client->cmd_list)) {
remove_client(client);
continue;
struct timeval tv;
gettimeofday(&tv, NULL);
- if (mtime_since(&eta_tv, &tv) >= 900) {
- request_client_etas();
+ if (mtime_since(&eta_tv, &tv) >= ops->eta_msec) {
+ request_client_etas(ops);
memcpy(&eta_tv, &tv, sizeof(tv));
- if (fio_client_timed_out())
+ if (fio_check_clients_timed_out())
break;
}
log_err("fio: unknown client fd %d\n", pfds[i].fd);
continue;
}
- if (!handle_client(client)) {
+ if (!fio_handle_client(client)) {
log_info("client: host=%s disconnected\n",
client->hostname);
remove_client(client);
retval = 1;
} else if (client->error)
retval = 1;
- put_client(client);
+ fio_put_client(client);
}
}
}
#endif
+ /*
+ * For fully compressible data, just zero them at init time.
+ * It's faster than repeatedly filling it.
+ */
+ if (td->o.compress_percentage == 100) {
+ td->o.zero_buffers = 1;
+ td->o.compress_percentage = 0;
+ }
+
return ret;
}
* to make sure we don't have conflicts, and initializes various
* members of td.
*/
-static int add_job(struct thread_data *td, const char *jobname, int job_add_num)
+static int add_job(struct thread_data *td, const char *jobname, int job_add_num,
+ int recursed)
{
- const char *ddir_str[] = { NULL, "read", "write", "rw", NULL,
- "randread", "randwrite", "randrw" };
unsigned int i;
char fname[PATH_MAX];
int numjobs, file_alloced;
if (!terse_output) {
if (!job_add_num) {
+ if (is_backend && !recursed)
+ fio_server_send_add_job(&td->o, td->io_ops->name);
+
if (!strcmp(td->io_ops->name, "cpuio")) {
log_info("%s: ioengine=cpu, cpuload=%u,"
" cpucycle=%u\n", td->o.name,
log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s,"
" ioengine=%s, iodepth=%u\n",
td->o.name, td->groupid,
- ddir_str[td->o.td_ddir],
+ ddir_str(td->o.td_ddir),
c1, c2, c3, c4,
td->io_ops->name,
td->o.iodepth);
job_add_num = numjobs - 1;
- if (add_job(td_new, jobname, job_add_num))
+ if (add_job(td_new, jobname, job_add_num, 1))
goto err;
}
if (!strncmp(o[i], "name", 4)) {
in_global = 0;
if (td)
- add_job(td, jobname, 0);
+ add_job(td, jobname, 0, 0);
td = NULL;
sprintf(jobname, "%s", o[i] + 5);
}
}
if (td)
- add_job(td, jobname, 0);
+ add_job(td, jobname, 0, 0);
}
static int skip_this_section(const char *name)
for (i = 0; i < num_opts; i++)
log_info("--%s ", opts[i]);
- ret = add_job(td, name, 0);
+ ret = add_job(td, name, 0, 0);
} else {
log_err("fio: job %s dropped\n", name);
put_job(td);
#ifdef FIO_INC_DEBUG
struct debug_level debug_levels[] = {
- { .name = "process", .shift = FD_PROCESS, },
- { .name = "file", .shift = FD_FILE, },
- { .name = "io", .shift = FD_IO, },
- { .name = "mem", .shift = FD_MEM, },
- { .name = "blktrace", .shift = FD_BLKTRACE },
- { .name = "verify", .shift = FD_VERIFY },
- { .name = "random", .shift = FD_RANDOM },
- { .name = "parse", .shift = FD_PARSE },
- { .name = "diskutil", .shift = FD_DISKUTIL },
- { .name = "job", .shift = FD_JOB },
- { .name = "mutex", .shift = FD_MUTEX },
- { .name = "profile", .shift = FD_PROFILE },
- { .name = "time", .shift = FD_TIME },
- { .name = "net", .shift = FD_NET },
+ { .name = "process",
+ .help = "Process creation/exit logging",
+ .shift = FD_PROCESS,
+ },
+ { .name = "file",
+ .help = "File related action logging",
+ .shift = FD_FILE,
+ },
+ { .name = "io",
+ .help = "IO and IO engine action logging (offsets, queue, completions, etc)",
+ .shift = FD_IO,
+ },
+ { .name = "mem",
+ .help = "Memory allocation/freeing logging",
+ .shift = FD_MEM,
+ },
+ { .name = "blktrace",
+ .help = "blktrace action logging",
+ .shift = FD_BLKTRACE,
+ },
+ { .name = "verify",
+ .help = "IO verification action logging",
+ .shift = FD_VERIFY,
+ },
+ { .name = "random",
+ .help = "Random generation logging",
+ .shift = FD_RANDOM,
+ },
+ { .name = "parse",
+ .help = "Parser logging",
+ .shift = FD_PARSE,
+ },
+ { .name = "diskutil",
+ .help = "Disk utility logging actions",
+ .shift = FD_DISKUTIL,
+ },
+ { .name = "job",
+ .help = "Logging related to creating/destroying jobs",
+ .shift = FD_JOB,
+ },
+ { .name = "mutex",
+ .help = "Mutex logging",
+ .shift = FD_MUTEX
+ },
+ { .name = "profile",
+ .help = "Logging related to profiles",
+ .shift = FD_PROFILE,
+ },
+ { .name = "time",
+ .help = "Logging related to time keeping functions",
+ .shift = FD_TIME,
+ },
+ { .name = "net",
+ .help = "Network logging",
+ .shift = FD_NET,
+ },
{ .name = NULL, },
};
char *val = optarg;
if (!strncmp(opt, "name", 4) && td) {
- ret = add_job(td, td->o.name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0, 0);
if (ret)
return 0;
td = NULL;
exit_val = 1;
break;
}
- if (fio_client_add(optarg, &cur_client)) {
+ if (fio_client_add(&fio_client_ops, optarg, &cur_client)) {
log_err("fio: failed adding client %s\n", optarg);
do_exit++;
exit_val = 1;
if (td) {
if (!ret)
- ret = add_job(td, td->o.name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0, 0);
}
while (!ret && optind < argc) {
return ini_idx;
}
-int parse_options(int argc, char *argv[])
+int fio_init_options(void)
{
- int job_files, i;
-
f_out = stdout;
f_err = stderr;
if (fill_def_thread())
return 1;
+ return 0;
+}
+
+extern int fio_check_options(struct thread_options *);
+
+int parse_options(int argc, char *argv[])
+{
+ int job_files, i;
+
+ if (fio_init_options())
+ return 1;
+ if (fio_test_cconv(&def_thread.o))
+ log_err("fio: failed internal cconv test\n");
+
job_files = parse_cmd_line(argc, argv);
if (job_files > 0) {
#ifndef FIO_IOENGINE_H
#define FIO_IOENGINE_H
+#include "debug.h"
+
#define FIO_IOOPS_VERSION 13
enum {
IO_U_F_BUSY_OK = 1 << 4,
IO_U_F_TRIMMED = 1 << 5,
IO_U_F_BARRIER = 1 << 6,
+ IO_U_F_VER_LIST = 1 << 7,
};
/*
extern void io_u_queued(struct thread_data *, struct io_u *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_mark_depth(struct thread_data *, unsigned int);
- extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int);
+ extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int, unsigned int);
void io_u_mark_complete(struct thread_data *, unsigned int);
void io_u_mark_submit(struct thread_data *, unsigned int);
qsort(bssplit, td->o.bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
td->o.bssplit[ddir] = bssplit;
return 0;
-
}
static int str_bssplit_cb(void *data, const char *input)
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(description),
.help = "Text job description",
+ .category = FIO_OPT_G_DESC,
},
{
.name = "name",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(name),
.help = "Name of this job",
+ .category = FIO_OPT_G_DESC,
},
{
.name = "directory",
.off1 = td_var_offset(directory),
.cb = str_directory_cb,
.help = "Directory to store files in",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "filename",
.cb = str_filename_cb,
.prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "kb_base",
.prio = 1,
.def = "1024",
.help = "How many bytes per KB for reporting (1000 or 1024)",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "lockfile",
.help = "Lock file when doing IO to it",
.parent = "filename",
.def = "none",
+ .category = FIO_OPT_G_FILE,
.posval = {
{ .ival = "none",
.oval = FILE_LOCK_NONE,
.off1 = td_var_offset(opendir),
.cb = str_opendir_cb,
.help = "Recursively add files from this directory and down",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "rw",
.help = "IO direction",
.def = "read",
.verify = rw_verify,
+ .category = FIO_OPT_G_IO_DDIR,
.posval = {
{ .ival = "read",
.oval = TD_DDIR_READ,
.off1 = td_var_offset(rw_seq),
.help = "IO offset generator modifier",
.def = "sequential",
+ .category = FIO_OPT_G_IO_DDIR,
.posval = {
{ .ival = "sequential",
.oval = RW_SEQ_SEQ,
.off1 = td_var_offset(ioengine),
.help = "IO engine to use",
.def = FIO_PREFERRED_ENGINE,
+ .category = FIO_OPT_G_IO,
.posval = {
{ .ival = "sync",
.help = "Use read/write",
.help = "Number of IO buffers to keep in flight",
.minval = 1,
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "iodepth_batch",
.parent = "iodepth",
.minval = 1,
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "iodepth_batch_complete",
.parent = "iodepth",
.minval = 0,
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "iodepth_low",
.off1 = td_var_offset(iodepth_low),
.help = "Low water mark for queuing depth",
.parent = "iodepth",
+ .category = FIO_OPT_G_IO,
},
{
.name = "size",
.type = FIO_OPT_STR_VAL,
.cb = str_size_cb,
.help = "Total size of device or files",
+ .category = FIO_OPT_G_IO,
},
{
.name = "fill_device",
.off1 = td_var_offset(fill_device),
.help = "Write until an ENOSPC error occurs",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "filesize",
.off2 = td_var_offset(file_size_high),
.minval = 1,
.help = "Size of individual files",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
},
{
.name = "offset",
.off1 = td_var_offset(start_offset),
.help = "Start IO from this offset",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "bs",
.help = "Block size unit",
.def = "4k",
.parent = "rw",
+ .category = FIO_OPT_G_IO,
},
{
.name = "ba",
.minval = 1,
.help = "IO block offset alignment",
.parent = "rw",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_IO_BUF,
},
{
.name = "bsrange",
.minval = 1,
.help = "Set block size range (in more detail than bs)",
.parent = "rw",
+ .category = FIO_OPT_G_IO,
},
{
.name = "bssplit",
.cb = str_bssplit_cb,
.help = "Set a specific mix of block sizes",
.parent = "rw",
+ .category = FIO_OPT_G_IO,
},
{
.name = "bs_unaligned",
.off1 = td_var_offset(bs_unaligned),
.help = "Don't sector align IO buffer sizes",
.parent = "rw",
+ .category = FIO_OPT_G_IO,
},
{
.name = "randrepeat",
.help = "Use repeatable random IO pattern",
.def = "1",
.parent = "rw",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_RAND,
},
{
.name = "use_os_rand",
.help = "Set to use OS random generator",
.def = "0",
.parent = "rw",
+ .category = FIO_OPT_G_RAND,
},
{
.name = "norandommap",
.off1 = td_var_offset(norandommap),
.help = "Accept potential duplicate random blocks",
.parent = "rw",
+ .category = FIO_OPT_G_RAND,
},
{
.name = "softrandommap",
.help = "Set norandommap if randommap allocation fails",
.parent = "norandommap",
.def = "0",
+ .category = FIO_OPT_G_RAND,
},
{
.name = "nrfiles",
.off1 = td_var_offset(nr_files),
.help = "Split job workload between this number of files",
.def = "1",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "openfiles",
.type = FIO_OPT_INT,
.off1 = td_var_offset(open_files),
.help = "Number of files to keep open at the same time",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "file_service_type",
.off1 = td_var_offset(file_service_type),
.help = "How to select which file to service next",
.def = "roundrobin",
+ .category = FIO_OPT_G_FILE,
.posval = {
{ .ival = "random",
.oval = FIO_FSERVICE_RANDOM,
.off1 = td_var_offset(fallocate_mode),
.help = "Whether pre-allocation is performed when laying out files",
.def = "posix",
+ .category = FIO_OPT_G_FILE,
.posval = {
{ .ival = "none",
.oval = FIO_FALLOCATE_NONE,
.off1 = td_var_offset(fadvise_hint),
.help = "Use fadvise() to advise the kernel on IO pattern",
.def = "1",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "fsync",
.off1 = td_var_offset(fsync_blocks),
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "fdatasync",
.off1 = td_var_offset(fdatasync_blocks),
.help = "Issue fdatasync for writes every given number of blocks",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "write_barrier",
.off1 = td_var_offset(barrier_blocks),
.help = "Make every Nth write a barrier write",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
#ifdef FIO_HAVE_SYNC_FILE_RANGE
{
.cb = str_sfr_cb,
.off1 = td_var_offset(sync_file_range),
.help = "Use sync_file_range()",
+ .category = FIO_OPT_G_FILE,
},
#endif
{
.off1 = td_var_offset(odirect),
.help = "Use O_DIRECT IO (negates buffered)",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "buffered",
.neg = 1,
.help = "Use buffered IO (negates direct)",
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "overwrite",
.off1 = td_var_offset(overwrite),
.help = "When writing, set whether to overwrite current data",
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
},
{
.name = "loops",
.off1 = td_var_offset(loops),
.help = "Number of times to run the job",
.def = "1",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "numjobs",
.off1 = td_var_offset(numjobs),
.help = "Duplicate this job this many times",
.def = "1",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "startdelay",
.off1 = td_var_offset(start_delay),
.help = "Only start job when this period has passed",
.def = "0",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "runtime",
.off1 = td_var_offset(timeout),
.help = "Stop workload when this amount of time has passed",
.def = "0",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "time_based",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "ramp_time",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(ramp_time),
.help = "Ramp up time before measuring performance",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "clocksource",
.cb = fio_clock_source_cb,
.off1 = td_var_offset(clocksource),
.help = "What type of timing source to use",
+ .category = FIO_OPT_G_OS,
.posval = {
{ .ival = "gettimeofday",
.oval = CS_GTOD,
.off1 = td_var_offset(mem_type),
.help = "Backing type for IO buffers",
.def = "malloc",
+ .category = FIO_OPT_G_IO_BUF | FIO_OPT_G_MEM,
.posval = {
{ .ival = "malloc",
.oval = MEM_MALLOC,
.help = "IO memory buffer offset alignment",
.def = "0",
.parent = "iomem",
+ .category = FIO_OPT_G_IO_BUF | FIO_OPT_G_MEM,
},
{
.name = "verify",
.help = "Verify data written",
.cb = str_verify_cb,
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
.posval = {
{ .ival = "0",
.oval = VERIFY_NONE,
.help = "Run verification stage after write",
.def = "1",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verifysort",
.help = "Sort written verify blocks for read back",
.def = "1",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_interval",
.minval = 2 * sizeof(struct verify_header),
.help = "Store verify buffer header every N bytes",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_offset",
.def = "0",
.cb = str_verify_offset_cb,
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_pattern",
.cb = str_verify_pattern_cb,
.help = "Fill pattern for IO buffers",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_fatal",
.def = "0",
.help = "Exit on a single verify failure, don't continue",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY | FIO_OPT_G_ERR,
},
{
.name = "verify_dump",
.def = "0",
.help = "Dump contents of good and bad blocks on failure",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY | FIO_OPT_G_ERR,
},
{
.name = "verify_async",
.def = "0",
.help = "Number of async verifier threads to use",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog",
.off1 = td_var_offset(verify_backlog),
.help = "Verify after this number of blocks are written",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog_batch",
.off1 = td_var_offset(verify_batch),
.help = "Verify this number of IO blocks",
.parent = "verify",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.cb = str_verify_cpus_allowed_cb,
.help = "Set CPUs allowed for async verify threads",
.parent = "verify_async",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_CPU | FIO_OPT_G_VERIFY,
},
#endif
#ifdef FIO_HAVE_TRIM
.help = "Number of verify blocks to discard/trim",
.parent = "verify",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "trim_verify_zero",
.off1 = td_var_offset(trim_zero),
.parent = "trim_percentage",
.def = "1",
+ .category = FIO_OPT_G_IO,
},
{
.name = "trim_backlog",
.off1 = td_var_offset(trim_backlog),
.help = "Trim after this number of blocks are written",
.parent = "trim_percentage",
+ .category = FIO_OPT_G_IO,
},
{
.name = "trim_backlog_batch",
.off1 = td_var_offset(trim_batch),
.help = "Trim this number of IO blocks",
.parent = "trim_percentage",
+ .category = FIO_OPT_G_IO,
},
#endif
{
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(write_iolog_file),
.help = "Store IO pattern to file",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
},
{
.name = "read_iolog",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(read_iolog_file),
.help = "Playback IO pattern from file",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
},
{
.name = "replay_no_stall",
.def = "0",
.parent = "read_iolog",
.help = "Playback IO pattern file as fast as possible without stalls",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
},
{
.name = "replay_redirect",
.off1 = td_var_offset(replay_redirect),
.parent = "read_iolog",
.help = "Replay all I/O onto this device, regardless of trace device",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
},
{
.name = "exec_prerun",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_prerun),
.help = "Execute this file prior to running job",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
{
.name = "exec_postrun",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_postrun),
.help = "Execute this file after running job",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
#ifdef FIO_HAVE_IOSCHED_SWITCH
{
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioscheduler),
.help = "Use this IO scheduler on the backing device",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_IO,
},
#endif
{
.off1 = td_var_offset(zone_size),
.help = "Amount of data to read per zone",
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
},
{
.name = "zonerange",
.off1 = td_var_offset(zone_range),
.help = "Give size of an IO zone",
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
},
{
.name = "zoneskip",
.off1 = td_var_offset(zone_skip),
.help = "Space between IO zones",
.def = "0",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
},
{
.name = "lockmem",
.cb = str_lockmem_cb,
.help = "Lock down this amount of memory",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MEM,
},
{
.name = "rwmixread",
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
+ .category = FIO_OPT_G_IO,
},
{
.name = "rwmixwrite",
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
+ .category = FIO_OPT_G_IO,
},
{
.name = "rwmixcycle",
.type = FIO_OPT_DEPRECATED,
+ .category = FIO_OPT_G_IO,
},
{
.name = "nice",
.minval = -19,
.maxval = 20,
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
},
#ifdef FIO_HAVE_IOPRIO
{
.help = "Set job IO priority value",
.minval = 0,
.maxval = 7,
+ .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
},
{
.name = "prioclass",
.help = "Set job IO priority class",
.minval = 0,
.maxval = 3,
+ .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
},
#endif
{
.off1 = td_var_offset(thinktime),
.help = "Idle time between IO buffers (usec)",
.def = "0",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "thinktime_spin",
.help = "Start think time by spinning this amount (usec)",
.def = "0",
.parent = "thinktime",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "thinktime_blocks",
.help = "IO buffer period between 'thinktime'",
.def = "1",
.parent = "thinktime",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "rate",
.off1 = td_var_offset(rate[0]),
.off2 = td_var_offset(rate[1]),
.help = "Set bandwidth rate",
+ .category = FIO_OPT_G_IO,
},
{
.name = "ratemin",
.off2 = td_var_offset(ratemin[1]),
.help = "Job must meet this rate or it will be shutdown",
.parent = "rate",
+ .category = FIO_OPT_G_IO,
},
{
.name = "rate_iops",
.off1 = td_var_offset(rate_iops[0]),
.off2 = td_var_offset(rate_iops[1]),
.help = "Limit IO used to this number of IO operations/sec",
+ .category = FIO_OPT_G_IO,
},
{
.name = "rate_iops_min",
.off2 = td_var_offset(rate_iops_min[1]),
.help = "Job must meet this rate or it will be shut down",
.parent = "rate_iops",
+ .category = FIO_OPT_G_IO,
},
{
.name = "ratecycle",
.help = "Window average for rate limits (msec)",
.def = "1000",
.parent = "rate",
+ .category = FIO_OPT_G_IO,
},
{
.name = "invalidate",
.off1 = td_var_offset(invalidate_cache),
.help = "Invalidate buffer/page cache prior to running job",
.def = "1",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_CACHE,
},
{
.name = "sync",
.help = "Use O_SYNC for buffered writes",
.def = "0",
.parent = "buffered",
+ .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
},
{
.name = "bwavgtime",
" (msec)",
.def = "500",
.parent = "write_bw_log",
+ .category = FIO_OPT_G_LOG | FIO_OPT_G_STAT,
},
{
.name = "iopsavgtime",
.help = "Time window over which to calculate IOPS (msec)",
.def = "500",
.parent = "write_iops_log",
+ .category = FIO_OPT_G_LOG | FIO_OPT_G_STAT,
},
{
.name = "create_serialize",
.off1 = td_var_offset(create_serialize),
.help = "Serialize creating of job files",
.def = "1",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "create_fsync",
.off1 = td_var_offset(create_fsync),
.help = "fsync file after creation",
.def = "1",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "create_on_open",
.off1 = td_var_offset(create_on_open),
.help = "Create files when they are opened for IO",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "pre_read",
.off1 = td_var_offset(pre_read),
.help = "Pre-read files before starting official testing",
.def = "0",
+ .category = FIO_OPT_G_FILE | FIO_OPT_G_CACHE,
},
{
.name = "cpuload",
.type = FIO_OPT_INT,
.off1 = td_var_offset(cpuload),
.help = "Use this percentage of CPU",
+ .category = FIO_OPT_G_CPU,
},
{
.name = "cpuchunks",
.help = "Length of the CPU burn cycles (usecs)",
.def = "50000",
.parent = "cpuload",
+ .category = FIO_OPT_G_CPU,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.type = FIO_OPT_INT,
.cb = str_cpumask_cb,
.help = "CPU affinity mask",
+ .category = FIO_OPT_G_CPU | FIO_OPT_G_OS,
},
{
.name = "cpus_allowed",
.type = FIO_OPT_STR,
.cb = str_cpus_allowed_cb,
.help = "Set CPUs allowed",
+ .category = FIO_OPT_G_CPU | FIO_OPT_G_OS,
},
#endif
{
.off1 = td_var_offset(end_fsync),
.help = "Include fsync at the end of job",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "fsync_on_close",
.off1 = td_var_offset(fsync_on_close),
.help = "fsync files on close",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "unlink",
.off1 = td_var_offset(unlink),
.help = "Unlink created files after job has completed",
.def = "0",
+ .category = FIO_OPT_G_FILE,
},
{
.name = "exitall",
.type = FIO_OPT_STR_SET,
.cb = str_exitall_cb,
.help = "Terminate all jobs when one exits",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
},
{
.name = "stonewall",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(stonewall),
.help = "Insert a hard barrier between this job and previous",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
},
{
.name = "new_group",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(new_group),
.help = "Mark the start of a new group (for reporting)",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
},
{
.name = "thread",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(use_thread),
.help = "Use threads instead of forks",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS | FIO_OPT_G_JOB,
},
{
.name = "write_bw_log",
.off1 = td_var_offset(write_bw_log),
.cb = str_write_bw_log_cb,
.help = "Write log of bandwidth during run",
+ .category = FIO_OPT_G_LOG,
},
{
.name = "write_lat_log",
.off1 = td_var_offset(write_lat_log),
.cb = str_write_lat_log_cb,
.help = "Write log of latency during run",
+ .category = FIO_OPT_G_LOG,
},
{
.name = "write_iops_log",
.off1 = td_var_offset(write_iops_log),
.cb = str_write_iops_log_cb,
.help = "Write log of IOPS during run",
+ .category = FIO_OPT_G_LOG,
},
{
.name = "log_avg_msec",
.off1 = td_var_offset(log_avg_msec),
.help = "Average bw/iops/lat logs over this period of time",
.def = "0",
+ .category = FIO_OPT_G_LOG,
},
{
.name = "hugepage-size",
.off1 = td_var_offset(hugepage_size),
.help = "When using hugepages, specify size of each page",
.def = __fio_stringify(FIO_HUGE_PAGE),
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MEM,
},
{
.name = "group_reporting",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(group_reporting),
.help = "Do reporting on a per-group basis",
+ .category = FIO_OPT_G_MISC,
},
{
.name = "zero_buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(zero_buffers),
.help = "Init IO buffers to all zeroes",
+ .category = FIO_OPT_G_IO_BUF,
},
{
.name = "refill_buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(refill_buffers),
.help = "Refill IO buffers on every IO submit",
+ .category = FIO_OPT_G_IO_BUF,
},
{
.name = "scramble_buffers",
.off1 = td_var_offset(scramble_buffers),
.help = "Slightly scramble buffers on every IO submit",
.def = "1",
+ .category = FIO_OPT_G_IO_BUF,
},
+ {
+ .name = "buffer_compress_percentage",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(compress_percentage),
+ .maxval = 100,
+ .minval = 1,
+ .help = "How compressible the buffer is (approximately)",
+ },
+ {
+ .name = "buffer_compress_chunk",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(compress_chunk),
+ .parent = "buffer_compress_percentage",
+ .help = "Size of compressible region in buffer",
+ },
{
.name = "clat_percentiles",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(clat_percentiles),
.help = "Enable the reporting of completion latency percentiles",
.def = "1",
+ .category = FIO_OPT_G_STAT,
},
{
.name = "percentile_list",
.maxlen = FIO_IO_U_LIST_MAX_LEN,
.minfp = 0.0,
.maxfp = 100.0,
+ .category = FIO_OPT_G_STAT,
},
#ifdef FIO_HAVE_DISK_UTIL
.off1 = td_var_offset(do_disk_util),
.help = "Log disk utilization statistics",
.def = "1",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_STAT,
},
#endif
{
.help = "Greatly reduce number of gettimeofday() calls",
.cb = str_gtod_reduce_cb,
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "disable_lat",
.help = "Disable latency numbers",
.parent = "gtod_reduce",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "disable_clat",
.help = "Disable completion latency numbers",
.parent = "gtod_reduce",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "disable_slat",
.help = "Disable submission latency numbers",
.parent = "gtod_reduce",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "disable_bw_measurement",
.help = "Disable bandwidth logging",
.parent = "gtod_reduce",
.def = "0",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "gtod_cpu",
.cb = str_gtod_cpu_cb,
.help = "Set up dedicated gettimeofday() thread on this CPU",
.verify = gtod_cpu_verify,
+ .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
},
{
.name = "continue_on_error",
.off1 = td_var_offset(continue_on_error),
.help = "Continue on non-fatal errors during IO",
.def = "none",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_ERR,
.posval = {
{ .ival = "none",
.oval = ERROR_TYPE_NONE,
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(profile),
.help = "Select a specific builtin performance test",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
},
{
.name = "cgroup",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(cgroup),
.help = "Add job to cgroup of this name",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
{
.name = "cgroup_weight",
.help = "Use given weight for cgroup",
.minval = 100,
.maxval = 1000,
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
{
.name = "cgroup_nodelete",
.off1 = td_var_offset(cgroup_nodelete),
.help = "Do not delete cgroups after job completion",
.def = "0",
+ .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
},
{
.name = "uid",
.type = FIO_OPT_INT,
.off1 = td_var_offset(uid),
.help = "Run job with this user ID",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_JOB,
},
{
.name = "gid",
.type = FIO_OPT_INT,
.off1 = td_var_offset(gid),
.help = "Run job with this group ID",
+ .category = FIO_OPT_G_OS | FIO_OPT_G_JOB,
},
{
.name = "flow_id",
.off1 = td_var_offset(flow_id),
.help = "The flow index ID to use",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "flow",
.help = "Weight for flow control of this job",
.parent = "flow_id",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = "flow_watermark",
" with non-zero flow.",
.parent = "flow_id",
.def = "1024",
+ .category = FIO_OPT_G_IO,
},
{
.name = "flow_sleep",
" back by the flow control mechanism",
.parent = "flow_id",
.def = "0",
+ .category = FIO_OPT_G_IO,
},
{
.name = NULL,
#include "fio_version.h"
-int fio_net_port = 8765;
+int fio_net_port = FIO_NET_PORT;
int exit_backend = 0;
"START",
"STOP",
"DISK_UTIL",
- "RUN",
+ "SERVER_START",
+ "ADD_JOB",
+ "CMD_RUN"
};
const char *fio_server_op(unsigned int op)
int fio_send_data(int sk, const void *p, unsigned int len)
{
- assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_PDU);
+ assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_FRAGMENT_PDU);
do {
int ret = send(sk, p, len, 0);
if (!len)
return 0;
+ if (errno)
+ return -errno;
+
return 1;
}
return 1;
}
- if (cmd->pdu_len > FIO_SERVER_MAX_PDU) {
+ if (cmd->pdu_len > FIO_SERVER_MAX_FRAGMENT_PDU) {
log_err("fio: command payload too large: %u\n", cmd->pdu_len);
return 1;
}
cmdret = NULL;
} else if (cmdret) {
/* zero-terminate text input */
- if (cmdret->pdu_len && (cmdret->opcode == FIO_NET_CMD_TEXT ||
- cmdret->opcode == FIO_NET_CMD_JOB)) {
- char *buf = (char *) cmdret->payload;
+ if (cmdret->pdu_len) {
+ if (cmdret->opcode == FIO_NET_CMD_TEXT) {
+ struct cmd_text_pdu *pdu = (struct cmd_text_pdu *) cmdret->payload;
+ char *buf = (char *) pdu->buf;
+
+ buf[pdu->buf_len ] = '\0';
+ } else if (cmdret->opcode == FIO_NET_CMD_JOB) {
+ char *buf = (char *) cmdret->payload;
- buf[cmdret->pdu_len ] = '\0';
+ buf[cmdret->pdu_len ] = '\0';
+ }
}
+
/* frag flag is internal */
cmdret->flags &= ~FIO_NET_CMD_F_MORE;
}
do {
this_len = size;
- if (this_len > FIO_SERVER_MAX_PDU)
- this_len = FIO_SERVER_MAX_PDU;
+ if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
+ this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
if (!cmd || cur_len < sizeof(*cmd) + this_len) {
if (cmd)
return fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_QUIT, 0, NULL);
}
-static int handle_job_cmd(struct fio_net_cmd *cmd)
+static int handle_run_cmd(struct fio_net_cmd *cmd)
{
- char *buf = (char *) cmd->payload;
- struct cmd_start_pdu spdu;
struct cmd_end_pdu epdu;
int ret;
- if (parse_jobs_ini(buf, 1, 0)) {
- fio_server_send_quit_cmd();
- return -1;
- }
-
- spdu.jobs = cpu_to_le32(thread_number);
- fio_net_send_cmd(server_fd, FIO_NET_CMD_START, &spdu, sizeof(spdu), 0);
-
ret = fio_backend();
epdu.error = ret;
fio_server_send_quit_cmd();
reset_fio_state();
+ first_cmd_check = 0;
return ret;
}
+static int handle_job_cmd(struct fio_net_cmd *cmd)
+{
+ char *buf = (char *) cmd->payload;
+ struct cmd_start_pdu spdu;
+
+ if (parse_jobs_ini(buf, 1, 0)) {
+ fio_server_send_quit_cmd();
+ return -1;
+ }
+
+ spdu.jobs = cpu_to_le32(thread_number);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_START, &spdu, sizeof(spdu), 0);
+ return 0;
+}
+
static int handle_jobline_cmd(struct fio_net_cmd *cmd)
{
void *pdu = cmd->payload;
struct cmd_single_line_pdu *cslp;
struct cmd_line_pdu *clp;
unsigned long offset;
+ struct cmd_start_pdu spdu;
char **argv;
- int ret, i;
+ int i;
clp = pdu;
clp->lines = le16_to_cpu(clp->lines);
free(argv);
- fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_START, 0, NULL);
-
- ret = fio_backend();
- fio_server_send_quit_cmd();
- reset_fio_state();
- return ret;
+ spdu.jobs = cpu_to_le32(thread_number);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_START, &spdu, sizeof(spdu), 0);
+ return 0;
}
static int handle_probe_cmd(struct fio_net_cmd *cmd)
je->nr_ramp = cpu_to_le32(je->nr_ramp);
je->nr_pending = cpu_to_le32(je->nr_pending);
je->files_open = cpu_to_le32(je->files_open);
- je->m_rate = cpu_to_le32(je->m_rate);
- je->t_rate = cpu_to_le32(je->t_rate);
- je->m_iops = cpu_to_le32(je->m_iops);
- je->t_iops = cpu_to_le32(je->t_iops);
for (i = 0; i < 2; i++) {
+ je->m_rate[i] = cpu_to_le32(je->m_rate[i]);
+ je->t_rate[i] = cpu_to_le32(je->t_rate[i]);
+ je->m_iops[i] = cpu_to_le32(je->m_iops[i]);
+ je->t_iops[i] = cpu_to_le32(je->t_iops[i]);
je->rate[i] = cpu_to_le32(je->rate[i]);
je->iops[i] = cpu_to_le32(je->iops[i]);
}
je->elapsed_sec = cpu_to_le64(je->elapsed_sec);
je->eta_sec = cpu_to_le64(je->eta_sec);
+ je->nr_threads = cpu_to_le32(je->nr_threads);
fio_net_send_cmd(server_fd, FIO_NET_CMD_ETA, je, size, cmd->tag);
free(je);
case FIO_NET_CMD_SEND_ETA:
ret = handle_send_eta_cmd(cmd);
break;
+ case FIO_NET_CMD_RUN:
+ ret = handle_run_cmd(cmd);
+ break;
default:
log_err("fio: unknown opcode: %s\n",fio_server_op(cmd->opcode));
ret = 1;
void fio_server_idle_loop(void)
{
- if (!first_cmd_check)
- fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_RUN, 0, NULL);
+ if (!first_cmd_check) {
+ fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_SERVER_START, 0, NULL);
+ first_cmd_check = 1;
+ }
if (server_fd != -1)
handle_connection(server_fd, 0);
}
return exitval;
}
-int fio_server_text_output(const char *buf, size_t len)
+int fio_server_text_output(int level, const char *buf, size_t len)
{
- if (server_fd != -1)
- return fio_net_send_cmd(server_fd, FIO_NET_CMD_TEXT, buf, len, 0);
+ struct cmd_text_pdu *pdu;
+ unsigned int tlen;
+ struct timeval tv;
- return log_local_buf(buf, len);
+ if (server_fd == -1)
+ return log_local_buf(buf, len);
+
+ tlen = sizeof(*pdu) + len;
+ pdu = malloc(tlen);
+
+ pdu->level = __cpu_to_le32(level);
+ pdu->buf_len = __cpu_to_le32(len);
+
+ gettimeofday(&tv, NULL);
+ pdu->log_sec = __cpu_to_le64(tv.tv_sec);
+ pdu->log_usec = __cpu_to_le64(tv.tv_usec);
+
+ memcpy(pdu->buf, buf, len);
+
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_TEXT, pdu, tlen, 0);
+ free(pdu);
+ return len;
}
static void convert_io_stat(struct io_stat *dst, struct io_stat *src)
}
}
-int fio_server_log(const char *format, ...)
+void fio_server_send_add_job(struct thread_options *o, const char *ioengine)
{
- char buffer[1024];
- va_list args;
- size_t len;
-
- dprint(FD_NET, "server log\n");
+ struct cmd_add_job_pdu pdu;
- va_start(args, format);
- len = vsnprintf(buffer, sizeof(buffer), format, args);
- va_end(args);
+ convert_thread_options_to_net(&pdu.top, o);
- return fio_server_text_output(buffer, len);
+ fio_net_send_cmd(server_fd, FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), 0);
}
static int fio_init_server_ip(void)
return sk;
}
+int fio_server_parse_host(const char *host, int *ipv6, struct in_addr *inp,
+ struct in6_addr *inp6)
+
+{
+ int ret = 0;
+
+ if (*ipv6)
+ ret = inet_pton(AF_INET6, host, inp6);
+ else
+ ret = inet_pton(AF_INET, host, inp);
+
+ if (ret != 1) {
+ struct hostent *hent;
+
+ hent = gethostbyname(host);
+ if (!hent) {
+ log_err("fio: failed to resolve <%s>\n", host);
+ return 0;
+ }
+
+ if (*ipv6) {
+ if (hent->h_addrtype != AF_INET6) {
+ log_info("fio: falling back to IPv4\n");
+ *ipv6 = 0;
+ } else
+ memcpy(inp6, hent->h_addr_list[0], 16);
+ }
+ if (!*ipv6) {
+ if (hent->h_addrtype != AF_INET) {
+ log_err("fio: lookup type mismatch\n");
+ return 0;
+ }
+ memcpy(inp, hent->h_addr_list[0], 4);
+ }
+ ret = 1;
+ }
+
+ return !(ret == 1);
+}
+
/*
* Parse a host/ip/port string. Reads from 'str'.
*
{
const char *host = str;
char *portp;
- int ret, lport = 0;
+ int lport = 0;
*ptr = NULL;
*is_sock = 0;
*ptr = strdup(host);
- if (*ipv6)
- ret = inet_pton(AF_INET6, host, inp6);
- else
- ret = inet_pton(AF_INET, host, inp);
-
- if (ret != 1) {
- struct hostent *hent;
-
- hent = gethostbyname(host);
- if (!hent) {
- log_err("fio: failed to resolve <%s>\n", host);
- free(*ptr);
- *ptr = NULL;
- return 1;
- }
-
- if (*ipv6) {
- if (hent->h_addrtype != AF_INET6) {
- log_info("fio: falling back to IPv4\n");
- *ipv6 = 0;
- } else
- memcpy(inp6, hent->h_addr_list[0], 16);
- }
- if (!*ipv6) {
- if (hent->h_addrtype != AF_INET) {
- log_err("fio: lookup type mismatch\n");
- free(*ptr);
- *ptr = NULL;
- return 1;
- }
- memcpy(inp, hent->h_addr_list[0], 4);
- }
+ if (fio_server_parse_host(*ptr, ipv6, inp, inp6)) {
+ free(*ptr);
+ *ptr = NULL;
+ return 1;
}
if (*port == 0)
int ret;
#if defined(WIN32)
- WSADATA wsd;
- WSAStartup(MAKEWORD(2,2), &wsd);
+ WSADATA wsd;
+ WSAStartup(MAKEWORD(2,2), &wsd);
#endif
if (!pidfile)
--- /dev/null
+#ifndef FIO_THREAD_OPTIONS_H
+#define FIO_THREAD_OPTIONS_H
+
+#include "arch/arch.h"
+#include "os/os.h"
+#include "stat.h"
+#include "gettime.h"
+
+/*
+ * What type of allocation to use for io buffers
+ */
+enum fio_memtype {
+ MEM_MALLOC = 0, /* ordinary malloc */
+ MEM_SHM, /* use shared memory segments */
+ MEM_SHMHUGE, /* use shared memory segments with huge pages */
+ MEM_MMAP, /* use anonynomous mmap */
+ MEM_MMAPHUGE, /* memory mapped huge file */
+};
+
+/*
+ * What type of errors to continue on when continue_on_error is used
+ */
+enum error_type {
+ ERROR_TYPE_NONE = 0,
+ ERROR_TYPE_READ = 1 << 0,
+ ERROR_TYPE_WRITE = 1 << 1,
+ ERROR_TYPE_VERIFY = 1 << 2,
+ ERROR_TYPE_ANY = 0xffff,
+};
+
+#define BSSPLIT_MAX 64
+
+struct bssplit {
+ uint32_t bs;
+ uint32_t perc;
+};
+
+struct thread_options {
+ int pad;
+ char *description;
+ char *name;
+ char *directory;
+ char *filename;
+ char *opendir;
+ char *ioengine;
+ enum td_ddir td_ddir;
+ unsigned int rw_seq;
+ unsigned int kb_base;
+ unsigned int ddir_seq_nr;
+ long ddir_seq_add;
+ unsigned int iodepth;
+ unsigned int iodepth_low;
+ unsigned int iodepth_batch;
+ unsigned int iodepth_batch_complete;
+
+ unsigned long long size;
+ unsigned int size_percent;
+ unsigned int fill_device;
+ unsigned long long file_size_low;
+ unsigned long long file_size_high;
+ unsigned long long start_offset;
+
+ unsigned int bs[2];
+ unsigned int ba[2];
+ unsigned int min_bs[2];
+ unsigned int max_bs[2];
+ struct bssplit *bssplit[2];
+ unsigned int bssplit_nr[2];
+
+ unsigned int nr_files;
+ unsigned int open_files;
+ enum file_lock_mode file_lock_mode;
+ unsigned int lockfile_batch;
+
+ unsigned int odirect;
+ unsigned int invalidate_cache;
+ unsigned int create_serialize;
+ unsigned int create_fsync;
+ unsigned int create_on_open;
+ unsigned int end_fsync;
+ unsigned int pre_read;
+ unsigned int sync_io;
+ unsigned int verify;
+ unsigned int do_verify;
+ unsigned int verifysort;
+ unsigned int verify_interval;
+ unsigned int verify_offset;
+ char verify_pattern[MAX_PATTERN_SIZE];
+ unsigned int verify_pattern_bytes;
+ unsigned int verify_fatal;
+ unsigned int verify_dump;
+ unsigned int verify_async;
+ unsigned long long verify_backlog;
+ unsigned int verify_batch;
+ unsigned int use_thread;
+ unsigned int unlink;
+ unsigned int do_disk_util;
+ unsigned int override_sync;
+ unsigned int rand_repeatable;
+ unsigned int use_os_rand;
+ unsigned int write_lat_log;
+ unsigned int write_bw_log;
+ unsigned int write_iops_log;
+ unsigned int log_avg_msec;
+ unsigned int norandommap;
+ unsigned int softrandommap;
+ unsigned int bs_unaligned;
+ unsigned int fsync_on_close;
+
+ unsigned int hugepage_size;
+ unsigned int rw_min_bs;
+ unsigned int thinktime;
+ unsigned int thinktime_spin;
+ unsigned int thinktime_blocks;
+ unsigned int fsync_blocks;
+ unsigned int fdatasync_blocks;
+ unsigned int barrier_blocks;
+ unsigned long long start_delay;
+ unsigned long long timeout;
+ unsigned long long ramp_time;
+ unsigned int overwrite;
+ unsigned int bw_avg_time;
+ unsigned int iops_avg_time;
+ unsigned int loops;
+ unsigned long long zone_range;
+ unsigned long long zone_size;
+ unsigned long long zone_skip;
+ enum fio_memtype mem_type;
+ unsigned int mem_align;
+
+ unsigned int stonewall;
+ unsigned int new_group;
+ unsigned int numjobs;
+ os_cpu_mask_t cpumask;
+ unsigned int cpumask_set;
+ os_cpu_mask_t verify_cpumask;
+ unsigned int verify_cpumask_set;
+ unsigned int iolog;
+ unsigned int rwmixcycle;
+ unsigned int rwmix[2];
+ unsigned int nice;
+ unsigned int file_service_type;
+ unsigned int group_reporting;
+ unsigned int fadvise_hint;
+ enum fio_fallocate_mode fallocate_mode;
+ unsigned int zero_buffers;
+ unsigned int refill_buffers;
+ unsigned int scramble_buffers;
++ unsigned int compress_percentage;
++ unsigned int compress_chunk;
+ unsigned int time_based;
+ unsigned int disable_lat;
+ unsigned int disable_clat;
+ unsigned int disable_slat;
+ unsigned int disable_bw;
+ unsigned int gtod_reduce;
+ unsigned int gtod_cpu;
+ unsigned int gtod_offload;
+ enum fio_cs clocksource;
+ unsigned int no_stall;
+ unsigned int trim_percentage;
+ unsigned int trim_batch;
+ unsigned int trim_zero;
+ unsigned long long trim_backlog;
+ unsigned int clat_percentiles;
+ unsigned int overwrite_plist;
+ fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
+
+ char *read_iolog_file;
+ char *write_iolog_file;
+ char *bw_log_file;
+ char *lat_log_file;
+ char *iops_log_file;
+ char *replay_redirect;
+
+ /*
+ * Pre-run and post-run shell
+ */
+ char *exec_prerun;
+ char *exec_postrun;
+
+ unsigned int rate[2];
+ unsigned int ratemin[2];
+ unsigned int ratecycle;
+ unsigned int rate_iops[2];
+ unsigned int rate_iops_min[2];
+
+ char *ioscheduler;
+
+ /*
+ * CPU "io" cycle burner
+ */
+ unsigned int cpuload;
+ unsigned int cpucycle;
+
+ /*
+ * I/O Error handling
+ */
+ enum error_type continue_on_error;
+
+ /*
+ * Benchmark profile type
+ */
+ char *profile;
+
+ /*
+ * blkio cgroup support
+ */
+ char *cgroup;
+ unsigned int cgroup_weight;
+ unsigned int cgroup_nodelete;
+
+ unsigned int uid;
+ unsigned int gid;
+
+ int flow_id;
+ int flow;
+ int flow_watermark;
+ unsigned int flow_sleep;
+
+ unsigned int sync_file_range;
+};
+
+#define FIO_TOP_STR_MAX 256
+
+struct thread_options_pack {
+ uint8_t description[FIO_TOP_STR_MAX];
+ uint8_t name[FIO_TOP_STR_MAX];
+ uint8_t directory[FIO_TOP_STR_MAX];
+ uint8_t filename[FIO_TOP_STR_MAX];
+ uint8_t opendir[FIO_TOP_STR_MAX];
+ uint8_t ioengine[FIO_TOP_STR_MAX];
+ uint32_t td_ddir;
+ uint32_t rw_seq;
+ uint32_t kb_base;
+ uint32_t ddir_seq_nr;
+ uint64_t ddir_seq_add;
+ uint32_t iodepth;
+ uint32_t iodepth_low;
+ uint32_t iodepth_batch;
+ uint32_t iodepth_batch_complete;
+
+ uint64_t size;
+ uint32_t size_percent;
+ uint32_t fill_device;
+ uint64_t file_size_low;
+ uint64_t file_size_high;
+ uint64_t start_offset;
+
+ uint32_t bs[2];
+ uint32_t ba[2];
+ uint32_t min_bs[2];
+ uint32_t max_bs[2];
+ struct bssplit bssplit[2][BSSPLIT_MAX];
+ uint32_t bssplit_nr[2];
+
+ uint32_t nr_files;
+ uint32_t open_files;
+ uint32_t file_lock_mode;
+ uint32_t lockfile_batch;
+
+ uint32_t odirect;
+ uint32_t invalidate_cache;
+ uint32_t create_serialize;
+ uint32_t create_fsync;
+ uint32_t create_on_open;
+ uint32_t end_fsync;
+ uint32_t pre_read;
+ uint32_t sync_io;
+ uint32_t verify;
+ uint32_t do_verify;
+ uint32_t verifysort;
+ uint32_t verify_interval;
+ uint32_t verify_offset;
+ uint8_t verify_pattern[MAX_PATTERN_SIZE];
+ uint32_t verify_pattern_bytes;
+ uint32_t verify_fatal;
+ uint32_t verify_dump;
+ uint32_t verify_async;
+ uint64_t verify_backlog;
+ uint32_t verify_batch;
+ uint32_t use_thread;
+ uint32_t unlink;
+ uint32_t do_disk_util;
+ uint32_t override_sync;
+ uint32_t rand_repeatable;
+ uint32_t use_os_rand;
+ uint32_t write_lat_log;
+ uint32_t write_bw_log;
+ uint32_t write_iops_log;
+ uint32_t log_avg_msec;
+ uint32_t norandommap;
+ uint32_t softrandommap;
+ uint32_t bs_unaligned;
+ uint32_t fsync_on_close;
+
+ uint32_t hugepage_size;
+ uint32_t rw_min_bs;
+ uint32_t thinktime;
+ uint32_t thinktime_spin;
+ uint32_t thinktime_blocks;
+ uint32_t fsync_blocks;
+ uint32_t fdatasync_blocks;
+ uint32_t barrier_blocks;
+ uint64_t start_delay;
+ uint64_t timeout;
+ uint64_t ramp_time;
+ uint32_t overwrite;
+ uint32_t bw_avg_time;
+ uint32_t iops_avg_time;
+ uint32_t loops;
+ uint64_t zone_range;
+ uint64_t zone_size;
+ uint64_t zone_skip;
+ uint32_t mem_type;
+ uint32_t mem_align;
+
+ uint32_t stonewall;
+ uint32_t new_group;
+ uint32_t numjobs;
+ uint8_t cpumask[FIO_TOP_STR_MAX];
+ uint32_t cpumask_set;
+ uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+ uint32_t verify_cpumask_set;
+ uint32_t iolog;
+ uint32_t rwmixcycle;
+ uint32_t rwmix[2];
+ uint32_t nice;
+ uint32_t file_service_type;
+ uint32_t group_reporting;
+ uint32_t fadvise_hint;
+ uint32_t fallocate_mode;
+ uint32_t zero_buffers;
+ uint32_t refill_buffers;
+ uint32_t scramble_buffers;
++ unsigned int compress_percentage;
++ unsigned int compress_chunk;
+ uint32_t time_based;
+ uint32_t disable_lat;
+ uint32_t disable_clat;
+ uint32_t disable_slat;
+ uint32_t disable_bw;
+ uint32_t gtod_reduce;
+ uint32_t gtod_cpu;
+ uint32_t gtod_offload;
+ uint32_t clocksource;
+ uint32_t no_stall;
+ uint32_t trim_percentage;
+ uint32_t trim_batch;
+ uint32_t trim_zero;
+ uint64_t trim_backlog;
+ uint32_t clat_percentiles;
+ uint32_t overwrite_plist;
+ fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
+
+ uint8_t read_iolog_file[FIO_TOP_STR_MAX];
+ uint8_t write_iolog_file[FIO_TOP_STR_MAX];
+ uint8_t bw_log_file[FIO_TOP_STR_MAX];
+ uint8_t lat_log_file[FIO_TOP_STR_MAX];
+ uint8_t iops_log_file[FIO_TOP_STR_MAX];
+ uint8_t replay_redirect[FIO_TOP_STR_MAX];
+
+ /*
+ * Pre-run and post-run shell
+ */
+ uint8_t exec_prerun[FIO_TOP_STR_MAX];
+ uint8_t exec_postrun[FIO_TOP_STR_MAX];
+
+ uint32_t rate[2];
+ uint32_t ratemin[2];
+ uint32_t ratecycle;
+ uint32_t rate_iops[2];
+ uint32_t rate_iops_min[2];
+
+ uint8_t ioscheduler[FIO_TOP_STR_MAX];
+
+ /*
+ * CPU "io" cycle burner
+ */
+ uint32_t cpuload;
+ uint32_t cpucycle;
+
+ /*
+ * I/O Error handling
+ */
+ uint32_t continue_on_error;
+
+ /*
+ * Benchmark profile type
+ */
+ uint8_t profile[FIO_TOP_STR_MAX];
+
+ /*
+ * blkio cgroup support
+ */
+ uint8_t cgroup[FIO_TOP_STR_MAX];
+ uint32_t cgroup_weight;
+ uint32_t cgroup_nodelete;
+
+ uint32_t uid;
+ uint32_t gid;
+
+ int32_t flow_id;
+ int32_t flow;
+ int32_t flow_watermark;
+ uint32_t flow_sleep;
+
+ uint32_t sync_file_range;
+} __attribute__((packed));
+
+extern void convert_thread_options_to_cpu(struct thread_options *o, struct thread_options_pack *top);
+extern void convert_thread_options_to_net(struct thread_options_pack *top, struct thread_options *);
+extern int fio_test_cconv(struct thread_options *);
+
+#endif