log: add support for logging max instead of averages
[fio.git] / cconv.c
diff --git a/cconv.c b/cconv.c
index 1e095afdc7ccc8960adba0ce311054b0c7f4e407..6f57d90ca8e2f137ab6f2f72c3e6d311223c71ee 100644 (file)
--- a/cconv.c
+++ b/cconv.c
@@ -25,6 +25,7 @@ static void free_thread_options_to_cpu(struct thread_options *o)
 {
        free(o->description);
        free(o->name);
+       free(o->wait_for);
        free(o->directory);
        free(o->filename);
        free(o->filename_format);
@@ -54,6 +55,7 @@ void convert_thread_options_to_cpu(struct thread_options *o,
 
        string_to_cpu(&o->description, top->description);
        string_to_cpu(&o->name, top->name);
+       string_to_cpu(&o->wait_for, top->wait_for);
        string_to_cpu(&o->directory, top->directory);
        string_to_cpu(&o->filename, top->filename);
        string_to_cpu(&o->filename_format, top->filename_format);
@@ -72,6 +74,8 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        string_to_cpu(&o->profile, top->profile);
        string_to_cpu(&o->cgroup, top->cgroup);
 
+       o->allow_create = le32_to_cpu(top->allow_create);
+       o->allow_mounted_write = le32_to_cpu(top->allow_mounted_write);
        o->td_ddir = le32_to_cpu(top->td_ddir);
        o->rw_seq = le32_to_cpu(top->rw_seq);
        o->kb_base = le32_to_cpu(top->kb_base);
@@ -81,7 +85,8 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->iodepth = le32_to_cpu(top->iodepth);
        o->iodepth_low = le32_to_cpu(top->iodepth_low);
        o->iodepth_batch = le32_to_cpu(top->iodepth_batch);
-       o->iodepth_batch_complete = le32_to_cpu(top->iodepth_batch_complete);
+       o->iodepth_batch_complete_min = le32_to_cpu(top->iodepth_batch_complete_min);
+       o->iodepth_batch_complete_max = le32_to_cpu(top->iodepth_batch_complete_max);
        o->size = le64_to_cpu(top->size);
        o->io_limit = le64_to_cpu(top->io_limit);
        o->size_percent = le32_to_cpu(top->size_percent);
@@ -155,6 +160,7 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->allrand_repeatable = le32_to_cpu(top->allrand_repeatable);
        o->rand_seed = le64_to_cpu(top->rand_seed);
        o->log_avg_msec = le32_to_cpu(top->log_avg_msec);
+       o->log_max = le32_to_cpu(top->log_max);
        o->log_offset = le32_to_cpu(top->log_offset);
        o->log_gz = le32_to_cpu(top->log_gz);
        o->log_gz_store = le32_to_cpu(top->log_gz_store);
@@ -164,6 +170,7 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->fsync_on_close = le32_to_cpu(top->fsync_on_close);
        o->bs_is_seq_rand = le32_to_cpu(top->bs_is_seq_rand);
        o->random_distribution = le32_to_cpu(top->random_distribution);
+       o->exitall_error = le32_to_cpu(top->exitall_error);
        o->zipf_theta.u.f = fio_uint64_to_double(le64_to_cpu(top->zipf_theta.u.i));
        o->pareto_h.u.f = fio_uint64_to_double(le64_to_cpu(top->pareto_h.u.i));
        o->gauss_dev.u.f = fio_uint64_to_double(le64_to_cpu(top->gauss_dev.u.i));
@@ -248,14 +255,17 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->block_error_hist = le32_to_cpu(top->block_error_hist);
        o->replay_align = le32_to_cpu(top->replay_align);
        o->replay_scale = le32_to_cpu(top->replay_scale);
+       o->per_job_logs = le32_to_cpu(top->per_job_logs);
 
        o->trim_backlog = le64_to_cpu(top->trim_backlog);
+       o->rate_process = le32_to_cpu(top->rate_process);
 
        for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
                o->percentile_list[i].u.f = fio_uint64_to_double(le64_to_cpu(top->percentile_list[i].u.i));
 #if 0
        uint8_t cpumask[FIO_TOP_STR_MAX];
        uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+       uint8_t log_gz_cpumask[FIO_TOP_STR_MAX];
 #endif
 }
 
@@ -269,6 +279,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
 
        string_to_net(top->description, o->description);
        string_to_net(top->name, o->name);
+       string_to_net(top->wait_for, o->wait_for);
        string_to_net(top->directory, o->directory);
        string_to_net(top->filename, o->filename);
        string_to_net(top->filename_format, o->filename_format);
@@ -287,6 +298,8 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        string_to_net(top->profile, o->profile);
        string_to_net(top->cgroup, o->cgroup);
 
+       top->allow_create = cpu_to_le32(o->allow_create);
+       top->allow_mounted_write = cpu_to_le32(o->allow_mounted_write);
        top->td_ddir = cpu_to_le32(o->td_ddir);
        top->rw_seq = cpu_to_le32(o->rw_seq);
        top->kb_base = cpu_to_le32(o->kb_base);
@@ -295,7 +308,8 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->iodepth = cpu_to_le32(o->iodepth);
        top->iodepth_low = cpu_to_le32(o->iodepth_low);
        top->iodepth_batch = cpu_to_le32(o->iodepth_batch);
-       top->iodepth_batch_complete = cpu_to_le32(o->iodepth_batch_complete);
+       top->iodepth_batch_complete_min = cpu_to_le32(o->iodepth_batch_complete_min);
+       top->iodepth_batch_complete_max = cpu_to_le32(o->iodepth_batch_complete_max);
        top->size_percent = cpu_to_le32(o->size_percent);
        top->fill_device = cpu_to_le32(o->fill_device);
        top->file_append = cpu_to_le32(o->file_append);
@@ -335,6 +349,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->allrand_repeatable = cpu_to_le32(o->allrand_repeatable);
        top->rand_seed = __cpu_to_le64(o->rand_seed);
        top->log_avg_msec = cpu_to_le32(o->log_avg_msec);
+       top->log_max = cpu_to_le32(o->log_max);
        top->log_offset = cpu_to_le32(o->log_offset);
        top->log_gz = cpu_to_le32(o->log_gz);
        top->log_gz_store = cpu_to_le32(o->log_gz_store);
@@ -344,6 +359,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->fsync_on_close = cpu_to_le32(o->fsync_on_close);
        top->bs_is_seq_rand = cpu_to_le32(o->bs_is_seq_rand);
        top->random_distribution = cpu_to_le32(o->random_distribution);
+       top->exitall_error = cpu_to_le32(o->exitall_error);
        top->zipf_theta.u.i = __cpu_to_le64(fio_double_to_uint64(o->zipf_theta.u.f));
        top->pareto_h.u.i = __cpu_to_le64(fio_double_to_uint64(o->pareto_h.u.f));
        top->gauss_dev.u.i = __cpu_to_le64(fio_double_to_uint64(o->gauss_dev.u.f));
@@ -415,6 +431,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->skip_bad = cpu_to_le32(o->skip_bad);
        top->replay_align = cpu_to_le32(o->replay_align);
        top->replay_scale = cpu_to_le32(o->replay_scale);
+       top->per_job_logs = cpu_to_le32(o->per_job_logs);
 
        for (i = 0; i < DDIR_RWDIR_CNT; i++) {
                top->bs[i] = cpu_to_le32(o->bs[i]);
@@ -466,12 +483,14 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->trim_backlog = __cpu_to_le64(o->trim_backlog);
        top->offset_increment = __cpu_to_le64(o->offset_increment);
        top->number_ios = __cpu_to_le64(o->number_ios);
+       top->rate_process = cpu_to_le32(o->rate_process);
 
        for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
                top->percentile_list[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->percentile_list[i].u.f));
 #if 0
        uint8_t cpumask[FIO_TOP_STR_MAX];
        uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+       uint8_t log_gz_cpumask[FIO_TOP_STR_MAX];
 #endif
 
 }