X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=cconv.c;h=0c3a36c2a5590fed059fab033a3b5017fa9e0241;hp=0fca764e97865b8e4bd9fbaf869a6d9c489c71ec;hb=152ea0355412da5d02c322393252673653866f9e;hpb=79c896a122a7a39b840257215e622bdeff8272f1 diff --git a/cconv.c b/cconv.c index 0fca764e..0c3a36c2 100644 --- a/cconv.c +++ b/cconv.c @@ -23,8 +23,11 @@ static void __string_to_net(uint8_t *dst, const char *src, size_t dst_size) static void free_thread_options_to_cpu(struct thread_options *o) { + int i; + free(o->description); free(o->name); + free(o->wait_for); free(o->directory); free(o->filename); free(o->filename_format); @@ -42,6 +45,11 @@ static void free_thread_options_to_cpu(struct thread_options *o) free(o->ioscheduler); free(o->profile); free(o->cgroup); + + for (i = 0; i < DDIR_RWDIR_CNT; i++) { + free(o->bssplit[i]); + free(o->zone_split[i]); + } } void convert_thread_options_to_cpu(struct thread_options *o, @@ -54,6 +62,7 @@ void convert_thread_options_to_cpu(struct thread_options *o, string_to_cpu(&o->description, top->description); string_to_cpu(&o->name, top->name); + string_to_cpu(&o->wait_for, top->wait_for); string_to_cpu(&o->directory, top->directory); string_to_cpu(&o->filename, top->filename); string_to_cpu(&o->filename_format, top->filename_format); @@ -72,6 +81,8 @@ void convert_thread_options_to_cpu(struct thread_options *o, string_to_cpu(&o->profile, top->profile); string_to_cpu(&o->cgroup, top->cgroup); + o->allow_create = le32_to_cpu(top->allow_create); + o->allow_mounted_write = le32_to_cpu(top->allow_mounted_write); o->td_ddir = le32_to_cpu(top->td_ddir); o->rw_seq = le32_to_cpu(top->rw_seq); o->kb_base = le32_to_cpu(top->kb_base); @@ -81,7 +92,8 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->iodepth = le32_to_cpu(top->iodepth); o->iodepth_low = le32_to_cpu(top->iodepth_low); o->iodepth_batch = le32_to_cpu(top->iodepth_batch); - o->iodepth_batch_complete = le32_to_cpu(top->iodepth_batch_complete); + o->iodepth_batch_complete_min = le32_to_cpu(top->iodepth_batch_complete_min); + o->iodepth_batch_complete_max = le32_to_cpu(top->iodepth_batch_complete_max); o->size = le64_to_cpu(top->size); o->io_limit = le64_to_cpu(top->io_limit); o->size_percent = le32_to_cpu(top->size_percent); @@ -106,6 +118,16 @@ void convert_thread_options_to_cpu(struct thread_options *o, } } + o->zone_split_nr[i] = le32_to_cpu(top->zone_split_nr[i]); + + if (o->zone_split_nr[i]) { + o->zone_split[i] = malloc(o->zone_split_nr[i] * sizeof(struct zone_split)); + for (j = 0; j < o->zone_split_nr[i]; j++) { + o->zone_split[i][j].access_perc = top->zone_split[i][j].access_perc; + o->zone_split[i][j].size_perc = top->zone_split[i][j].size_perc; + } + } + o->rwmix[i] = le32_to_cpu(top->rwmix[i]); o->rate[i] = le32_to_cpu(top->rate[i]); o->ratemin[i] = le32_to_cpu(top->ratemin[i]); @@ -116,6 +138,7 @@ void convert_thread_options_to_cpu(struct thread_options *o, } o->ratecycle = le32_to_cpu(top->ratecycle); + o->io_submit_mode = le32_to_cpu(top->io_submit_mode); o->nr_files = le32_to_cpu(top->nr_files); o->open_files = le32_to_cpu(top->open_files); o->file_lock_mode = le32_to_cpu(top->file_lock_mode); @@ -154,6 +177,7 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->allrand_repeatable = le32_to_cpu(top->allrand_repeatable); o->rand_seed = le64_to_cpu(top->rand_seed); o->log_avg_msec = le32_to_cpu(top->log_avg_msec); + o->log_max = le32_to_cpu(top->log_max); o->log_offset = le32_to_cpu(top->log_offset); o->log_gz = le32_to_cpu(top->log_gz); o->log_gz_store = le32_to_cpu(top->log_gz_store); @@ -163,8 +187,10 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->fsync_on_close = le32_to_cpu(top->fsync_on_close); o->bs_is_seq_rand = le32_to_cpu(top->bs_is_seq_rand); o->random_distribution = le32_to_cpu(top->random_distribution); + o->exitall_error = le32_to_cpu(top->exitall_error); o->zipf_theta.u.f = fio_uint64_to_double(le64_to_cpu(top->zipf_theta.u.i)); o->pareto_h.u.f = fio_uint64_to_double(le64_to_cpu(top->pareto_h.u.i)); + o->gauss_dev.u.f = fio_uint64_to_double(le64_to_cpu(top->gauss_dev.u.i)); o->random_generator = le32_to_cpu(top->random_generator); o->hugepage_size = le32_to_cpu(top->hugepage_size); o->rw_min_bs = le32_to_cpu(top->rw_min_bs); @@ -242,14 +268,21 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->compress_percentage = le32_to_cpu(top->compress_percentage); o->compress_chunk = le32_to_cpu(top->compress_chunk); o->dedupe_percentage = le32_to_cpu(top->dedupe_percentage); + o->skip_bad = le32_to_cpu(top->skip_bad); + o->block_error_hist = le32_to_cpu(top->block_error_hist); + o->replay_align = le32_to_cpu(top->replay_align); + o->replay_scale = le32_to_cpu(top->replay_scale); + o->per_job_logs = le32_to_cpu(top->per_job_logs); o->trim_backlog = le64_to_cpu(top->trim_backlog); + o->rate_process = le32_to_cpu(top->rate_process); for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) o->percentile_list[i].u.f = fio_uint64_to_double(le64_to_cpu(top->percentile_list[i].u.i)); #if 0 uint8_t cpumask[FIO_TOP_STR_MAX]; uint8_t verify_cpumask[FIO_TOP_STR_MAX]; + uint8_t log_gz_cpumask[FIO_TOP_STR_MAX]; #endif } @@ -263,6 +296,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top, string_to_net(top->description, o->description); string_to_net(top->name, o->name); + string_to_net(top->wait_for, o->wait_for); string_to_net(top->directory, o->directory); string_to_net(top->filename, o->filename); string_to_net(top->filename_format, o->filename_format); @@ -281,6 +315,8 @@ void convert_thread_options_to_net(struct thread_options_pack *top, string_to_net(top->profile, o->profile); string_to_net(top->cgroup, o->cgroup); + top->allow_create = cpu_to_le32(o->allow_create); + top->allow_mounted_write = cpu_to_le32(o->allow_mounted_write); top->td_ddir = cpu_to_le32(o->td_ddir); top->rw_seq = cpu_to_le32(o->rw_seq); top->kb_base = cpu_to_le32(o->kb_base); @@ -289,11 +325,13 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->iodepth = cpu_to_le32(o->iodepth); top->iodepth_low = cpu_to_le32(o->iodepth_low); top->iodepth_batch = cpu_to_le32(o->iodepth_batch); - top->iodepth_batch_complete = cpu_to_le32(o->iodepth_batch_complete); + top->iodepth_batch_complete_min = cpu_to_le32(o->iodepth_batch_complete_min); + top->iodepth_batch_complete_max = cpu_to_le32(o->iodepth_batch_complete_max); top->size_percent = cpu_to_le32(o->size_percent); top->fill_device = cpu_to_le32(o->fill_device); top->file_append = cpu_to_le32(o->file_append); top->ratecycle = cpu_to_le32(o->ratecycle); + top->io_submit_mode = cpu_to_le32(o->io_submit_mode); top->nr_files = cpu_to_le32(o->nr_files); top->open_files = cpu_to_le32(o->open_files); top->file_lock_mode = cpu_to_le32(o->file_lock_mode); @@ -328,6 +366,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->allrand_repeatable = cpu_to_le32(o->allrand_repeatable); top->rand_seed = __cpu_to_le64(o->rand_seed); top->log_avg_msec = cpu_to_le32(o->log_avg_msec); + top->log_max = cpu_to_le32(o->log_max); top->log_offset = cpu_to_le32(o->log_offset); top->log_gz = cpu_to_le32(o->log_gz); top->log_gz_store = cpu_to_le32(o->log_gz_store); @@ -337,8 +376,10 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->fsync_on_close = cpu_to_le32(o->fsync_on_close); top->bs_is_seq_rand = cpu_to_le32(o->bs_is_seq_rand); top->random_distribution = cpu_to_le32(o->random_distribution); + top->exitall_error = cpu_to_le32(o->exitall_error); top->zipf_theta.u.i = __cpu_to_le64(fio_double_to_uint64(o->zipf_theta.u.f)); top->pareto_h.u.i = __cpu_to_le64(fio_double_to_uint64(o->pareto_h.u.f)); + top->gauss_dev.u.i = __cpu_to_le64(fio_double_to_uint64(o->gauss_dev.u.f)); top->random_generator = cpu_to_le32(o->random_generator); top->hugepage_size = cpu_to_le32(o->hugepage_size); top->rw_min_bs = cpu_to_le32(o->rw_min_bs); @@ -403,6 +444,11 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->compress_percentage = cpu_to_le32(o->compress_percentage); top->compress_chunk = cpu_to_le32(o->compress_chunk); top->dedupe_percentage = cpu_to_le32(o->dedupe_percentage); + top->block_error_hist = cpu_to_le32(o->block_error_hist); + top->skip_bad = cpu_to_le32(o->skip_bad); + top->replay_align = cpu_to_le32(o->replay_align); + top->replay_scale = cpu_to_le32(o->replay_scale); + top->per_job_logs = cpu_to_le32(o->per_job_logs); for (i = 0; i < DDIR_RWDIR_CNT; i++) { top->bs[i] = cpu_to_le32(o->bs[i]); @@ -424,6 +470,21 @@ void convert_thread_options_to_net(struct thread_options_pack *top, } } + top->zone_split_nr[i] = cpu_to_le32(o->zone_split_nr[i]); + + if (o->zone_split_nr[i]) { + unsigned int zone_split_nr = o->zone_split_nr[i]; + + if (zone_split_nr > ZONESPLIT_MAX) { + log_err("fio: ZONESPLIT_MAX is too small\n"); + zone_split_nr = ZONESPLIT_MAX; + } + for (j = 0; j < zone_split_nr; j++) { + top->zone_split[i][j].access_perc = o->zone_split[i][j].access_perc; + top->zone_split[i][j].size_perc = o->zone_split[i][j].size_perc; + } + } + top->rwmix[i] = cpu_to_le32(o->rwmix[i]); top->rate[i] = cpu_to_le32(o->rate[i]); top->ratemin[i] = cpu_to_le32(o->ratemin[i]); @@ -454,12 +515,14 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->trim_backlog = __cpu_to_le64(o->trim_backlog); top->offset_increment = __cpu_to_le64(o->offset_increment); top->number_ios = __cpu_to_le64(o->number_ios); + top->rate_process = cpu_to_le32(o->rate_process); for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) top->percentile_list[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->percentile_list[i].u.f)); #if 0 uint8_t cpumask[FIO_TOP_STR_MAX]; uint8_t verify_cpumask[FIO_TOP_STR_MAX]; + uint8_t log_gz_cpumask[FIO_TOP_STR_MAX]; #endif }