X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=cconv.c;h=ebbac0aa1906c795b40ab6a13330b18c76c13f82;hp=d2539753c76f7a673a93cc4b7f7f4321fa6c92bc;hb=2c5d94bc117583336b23e39c18ed3fd492f02eeb;hpb=d6c0b40939e7184d47f5a0ac5b38440c34260eea diff --git a/cconv.c b/cconv.c index d2539753..ebbac0aa 100644 --- a/cconv.c +++ b/cconv.c @@ -23,8 +23,11 @@ static void __string_to_net(uint8_t *dst, const char *src, size_t dst_size) static void free_thread_options_to_cpu(struct thread_options *o) { + int i; + free(o->description); free(o->name); + free(o->wait_for); free(o->directory); free(o->filename); free(o->filename_format); @@ -36,12 +39,18 @@ static void free_thread_options_to_cpu(struct thread_options *o) free(o->bw_log_file); free(o->lat_log_file); free(o->iops_log_file); + free(o->hist_log_file); free(o->replay_redirect); free(o->exec_prerun); free(o->exec_postrun); free(o->ioscheduler); free(o->profile); free(o->cgroup); + + for (i = 0; i < DDIR_RWDIR_CNT; i++) { + free(o->bssplit[i]); + free(o->zone_split[i]); + } } void convert_thread_options_to_cpu(struct thread_options *o, @@ -49,8 +58,12 @@ void convert_thread_options_to_cpu(struct thread_options *o, { int i, j; + for (i = 0; i < NR_OPTS_SZ; i++) + o->set_options[i] = le64_to_cpu(top->set_options[i]); + string_to_cpu(&o->description, top->description); string_to_cpu(&o->name, top->name); + string_to_cpu(&o->wait_for, top->wait_for); string_to_cpu(&o->directory, top->directory); string_to_cpu(&o->filename, top->filename); string_to_cpu(&o->filename_format, top->filename_format); @@ -62,6 +75,7 @@ void convert_thread_options_to_cpu(struct thread_options *o, string_to_cpu(&o->bw_log_file, top->bw_log_file); string_to_cpu(&o->lat_log_file, top->lat_log_file); string_to_cpu(&o->iops_log_file, top->iops_log_file); + string_to_cpu(&o->hist_log_file, top->hist_log_file); string_to_cpu(&o->replay_redirect, top->replay_redirect); string_to_cpu(&o->exec_prerun, top->exec_prerun); string_to_cpu(&o->exec_postrun, top->exec_postrun); @@ -69,6 +83,8 @@ void convert_thread_options_to_cpu(struct thread_options *o, string_to_cpu(&o->profile, top->profile); string_to_cpu(&o->cgroup, top->cgroup); + o->allow_create = le32_to_cpu(top->allow_create); + o->allow_mounted_write = le32_to_cpu(top->allow_mounted_write); o->td_ddir = le32_to_cpu(top->td_ddir); o->rw_seq = le32_to_cpu(top->rw_seq); o->kb_base = le32_to_cpu(top->kb_base); @@ -78,7 +94,8 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->iodepth = le32_to_cpu(top->iodepth); o->iodepth_low = le32_to_cpu(top->iodepth_low); o->iodepth_batch = le32_to_cpu(top->iodepth_batch); - o->iodepth_batch_complete = le32_to_cpu(top->iodepth_batch_complete); + o->iodepth_batch_complete_min = le32_to_cpu(top->iodepth_batch_complete_min); + o->iodepth_batch_complete_max = le32_to_cpu(top->iodepth_batch_complete_max); o->size = le64_to_cpu(top->size); o->io_limit = le64_to_cpu(top->io_limit); o->size_percent = le32_to_cpu(top->size_percent); @@ -103,6 +120,16 @@ void convert_thread_options_to_cpu(struct thread_options *o, } } + o->zone_split_nr[i] = le32_to_cpu(top->zone_split_nr[i]); + + if (o->zone_split_nr[i]) { + o->zone_split[i] = malloc(o->zone_split_nr[i] * sizeof(struct zone_split)); + for (j = 0; j < o->zone_split_nr[i]; j++) { + o->zone_split[i][j].access_perc = top->zone_split[i][j].access_perc; + o->zone_split[i][j].size_perc = top->zone_split[i][j].size_perc; + } + } + o->rwmix[i] = le32_to_cpu(top->rwmix[i]); o->rate[i] = le32_to_cpu(top->rate[i]); o->ratemin[i] = le32_to_cpu(top->ratemin[i]); @@ -113,6 +140,8 @@ void convert_thread_options_to_cpu(struct thread_options *o, } o->ratecycle = le32_to_cpu(top->ratecycle); + o->io_submit_mode = le32_to_cpu(top->io_submit_mode); + o->unique_filename = le32_to_cpu(top->unique_filename); o->nr_files = le32_to_cpu(top->nr_files); o->open_files = le32_to_cpu(top->open_files); o->file_lock_mode = le32_to_cpu(top->file_lock_mode); @@ -131,6 +160,7 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->verifysort = le32_to_cpu(top->verifysort); o->verifysort_nr = le32_to_cpu(top->verifysort_nr); o->experimental_verify = le32_to_cpu(top->experimental_verify); + o->verify_state = le32_to_cpu(top->verify_state); o->verify_interval = le32_to_cpu(top->verify_interval); o->verify_offset = le32_to_cpu(top->verify_offset); @@ -144,22 +174,29 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->verify_batch = le32_to_cpu(top->verify_batch); o->use_thread = le32_to_cpu(top->use_thread); o->unlink = le32_to_cpu(top->unlink); + o->unlink_each_loop = le32_to_cpu(top->unlink_each_loop); o->do_disk_util = le32_to_cpu(top->do_disk_util); o->override_sync = le32_to_cpu(top->override_sync); o->rand_repeatable = le32_to_cpu(top->rand_repeatable); o->allrand_repeatable = le32_to_cpu(top->allrand_repeatable); o->rand_seed = le64_to_cpu(top->rand_seed); - o->use_os_rand = le32_to_cpu(top->use_os_rand); o->log_avg_msec = le32_to_cpu(top->log_avg_msec); + o->log_hist_msec = le32_to_cpu(top->log_hist_msec); + o->log_hist_coarseness = le32_to_cpu(top->log_hist_coarseness); + o->log_max = le32_to_cpu(top->log_max); o->log_offset = le32_to_cpu(top->log_offset); + o->log_gz = le32_to_cpu(top->log_gz); + o->log_gz_store = le32_to_cpu(top->log_gz_store); o->norandommap = le32_to_cpu(top->norandommap); o->softrandommap = le32_to_cpu(top->softrandommap); o->bs_unaligned = le32_to_cpu(top->bs_unaligned); o->fsync_on_close = le32_to_cpu(top->fsync_on_close); o->bs_is_seq_rand = le32_to_cpu(top->bs_is_seq_rand); o->random_distribution = le32_to_cpu(top->random_distribution); + o->exitall_error = le32_to_cpu(top->exitall_error); o->zipf_theta.u.f = fio_uint64_to_double(le64_to_cpu(top->zipf_theta.u.i)); o->pareto_h.u.f = fio_uint64_to_double(le64_to_cpu(top->pareto_h.u.i)); + o->gauss_dev.u.f = fio_uint64_to_double(le64_to_cpu(top->gauss_dev.u.i)); o->random_generator = le32_to_cpu(top->random_generator); o->hugepage_size = le32_to_cpu(top->hugepage_size); o->rw_min_bs = le32_to_cpu(top->rw_min_bs); @@ -175,6 +212,10 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->start_delay_high = le64_to_cpu(top->start_delay_high); o->timeout = le64_to_cpu(top->timeout); o->ramp_time = le64_to_cpu(top->ramp_time); + o->ss_dur = le64_to_cpu(top->ss_dur); + o->ss_ramp_time = le64_to_cpu(top->ss_ramp_time); + o->ss_state = le32_to_cpu(top->ss_state); + o->ss_limit.u.f = fio_uint64_to_double(le64_to_cpu(top->ss_limit.u.i)); o->zone_range = le64_to_cpu(top->zone_range); o->zone_size = le64_to_cpu(top->zone_size); o->zone_skip = le64_to_cpu(top->zone_skip); @@ -192,8 +233,6 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->stonewall = le32_to_cpu(top->stonewall); o->new_group = le32_to_cpu(top->new_group); o->numjobs = le32_to_cpu(top->numjobs); - o->cpumask_set = le32_to_cpu(top->cpumask_set); - o->verify_cpumask_set = le32_to_cpu(top->verify_cpumask_set); o->cpus_allowed_policy = le32_to_cpu(top->cpus_allowed_policy); o->iolog = le32_to_cpu(top->iolog); o->rwmixcycle = le32_to_cpu(top->rwmixcycle); @@ -216,7 +255,6 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->unified_rw_rep = le32_to_cpu(top->unified_rw_rep); o->gtod_reduce = le32_to_cpu(top->gtod_reduce); o->gtod_cpu = le32_to_cpu(top->gtod_cpu); - o->gtod_offload = le32_to_cpu(top->gtod_offload); o->clocksource = le32_to_cpu(top->clocksource); o->no_stall = le32_to_cpu(top->no_stall); o->trim_percentage = le32_to_cpu(top->trim_percentage); @@ -239,14 +277,22 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->latency_percentile.u.f = fio_uint64_to_double(le64_to_cpu(top->latency_percentile.u.i)); o->compress_percentage = le32_to_cpu(top->compress_percentage); o->compress_chunk = le32_to_cpu(top->compress_chunk); + o->dedupe_percentage = le32_to_cpu(top->dedupe_percentage); + o->skip_bad = le32_to_cpu(top->skip_bad); + o->block_error_hist = le32_to_cpu(top->block_error_hist); + o->replay_align = le32_to_cpu(top->replay_align); + o->replay_scale = le32_to_cpu(top->replay_scale); + o->per_job_logs = le32_to_cpu(top->per_job_logs); o->trim_backlog = le64_to_cpu(top->trim_backlog); + o->rate_process = le32_to_cpu(top->rate_process); for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) o->percentile_list[i].u.f = fio_uint64_to_double(le64_to_cpu(top->percentile_list[i].u.i)); #if 0 uint8_t cpumask[FIO_TOP_STR_MAX]; uint8_t verify_cpumask[FIO_TOP_STR_MAX]; + uint8_t log_gz_cpumask[FIO_TOP_STR_MAX]; #endif } @@ -255,8 +301,12 @@ void convert_thread_options_to_net(struct thread_options_pack *top, { int i, j; + for (i = 0; i < NR_OPTS_SZ; i++) + top->set_options[i] = cpu_to_le64(o->set_options[i]); + string_to_net(top->description, o->description); string_to_net(top->name, o->name); + string_to_net(top->wait_for, o->wait_for); string_to_net(top->directory, o->directory); string_to_net(top->filename, o->filename); string_to_net(top->filename_format, o->filename_format); @@ -268,6 +318,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top, string_to_net(top->bw_log_file, o->bw_log_file); string_to_net(top->lat_log_file, o->lat_log_file); string_to_net(top->iops_log_file, o->iops_log_file); + string_to_net(top->hist_log_file, o->hist_log_file); string_to_net(top->replay_redirect, o->replay_redirect); string_to_net(top->exec_prerun, o->exec_prerun); string_to_net(top->exec_postrun, o->exec_postrun); @@ -275,6 +326,8 @@ void convert_thread_options_to_net(struct thread_options_pack *top, string_to_net(top->profile, o->profile); string_to_net(top->cgroup, o->cgroup); + top->allow_create = cpu_to_le32(o->allow_create); + top->allow_mounted_write = cpu_to_le32(o->allow_mounted_write); top->td_ddir = cpu_to_le32(o->td_ddir); top->rw_seq = cpu_to_le32(o->rw_seq); top->kb_base = cpu_to_le32(o->kb_base); @@ -283,12 +336,15 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->iodepth = cpu_to_le32(o->iodepth); top->iodepth_low = cpu_to_le32(o->iodepth_low); top->iodepth_batch = cpu_to_le32(o->iodepth_batch); - top->iodepth_batch_complete = cpu_to_le32(o->iodepth_batch_complete); + top->iodepth_batch_complete_min = cpu_to_le32(o->iodepth_batch_complete_min); + top->iodepth_batch_complete_max = cpu_to_le32(o->iodepth_batch_complete_max); top->size_percent = cpu_to_le32(o->size_percent); top->fill_device = cpu_to_le32(o->fill_device); top->file_append = cpu_to_le32(o->file_append); top->ratecycle = cpu_to_le32(o->ratecycle); + top->io_submit_mode = cpu_to_le32(o->io_submit_mode); top->nr_files = cpu_to_le32(o->nr_files); + top->unique_filename = cpu_to_le32(o->unique_filename); top->open_files = cpu_to_le32(o->open_files); top->file_lock_mode = cpu_to_le32(o->file_lock_mode); top->odirect = cpu_to_le32(o->odirect); @@ -306,6 +362,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->verifysort = cpu_to_le32(o->verifysort); top->verifysort_nr = cpu_to_le32(o->verifysort_nr); top->experimental_verify = cpu_to_le32(o->experimental_verify); + top->verify_state = cpu_to_le32(o->verify_state); top->verify_interval = cpu_to_le32(o->verify_interval); top->verify_offset = cpu_to_le32(o->verify_offset); top->verify_pattern_bytes = cpu_to_le32(o->verify_pattern_bytes); @@ -315,22 +372,27 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->verify_batch = cpu_to_le32(o->verify_batch); top->use_thread = cpu_to_le32(o->use_thread); top->unlink = cpu_to_le32(o->unlink); + top->unlink_each_loop = cpu_to_le32(o->unlink_each_loop); top->do_disk_util = cpu_to_le32(o->do_disk_util); top->override_sync = cpu_to_le32(o->override_sync); top->rand_repeatable = cpu_to_le32(o->rand_repeatable); top->allrand_repeatable = cpu_to_le32(o->allrand_repeatable); top->rand_seed = __cpu_to_le64(o->rand_seed); - top->use_os_rand = cpu_to_le32(o->use_os_rand); top->log_avg_msec = cpu_to_le32(o->log_avg_msec); + top->log_max = cpu_to_le32(o->log_max); top->log_offset = cpu_to_le32(o->log_offset); + top->log_gz = cpu_to_le32(o->log_gz); + top->log_gz_store = cpu_to_le32(o->log_gz_store); top->norandommap = cpu_to_le32(o->norandommap); top->softrandommap = cpu_to_le32(o->softrandommap); top->bs_unaligned = cpu_to_le32(o->bs_unaligned); top->fsync_on_close = cpu_to_le32(o->fsync_on_close); top->bs_is_seq_rand = cpu_to_le32(o->bs_is_seq_rand); top->random_distribution = cpu_to_le32(o->random_distribution); + top->exitall_error = cpu_to_le32(o->exitall_error); top->zipf_theta.u.i = __cpu_to_le64(fio_double_to_uint64(o->zipf_theta.u.f)); top->pareto_h.u.i = __cpu_to_le64(fio_double_to_uint64(o->pareto_h.u.f)); + top->gauss_dev.u.i = __cpu_to_le64(fio_double_to_uint64(o->gauss_dev.u.f)); top->random_generator = cpu_to_le32(o->random_generator); top->hugepage_size = cpu_to_le32(o->hugepage_size); top->rw_min_bs = cpu_to_le32(o->rw_min_bs); @@ -350,8 +412,6 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->stonewall = cpu_to_le32(o->stonewall); top->new_group = cpu_to_le32(o->new_group); top->numjobs = cpu_to_le32(o->numjobs); - top->cpumask_set = cpu_to_le32(o->cpumask_set); - top->verify_cpumask_set = cpu_to_le32(o->verify_cpumask_set); top->cpus_allowed_policy = cpu_to_le32(o->cpus_allowed_policy); top->iolog = cpu_to_le32(o->iolog); top->rwmixcycle = cpu_to_le32(o->rwmixcycle); @@ -374,7 +434,6 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->unified_rw_rep = cpu_to_le32(o->unified_rw_rep); top->gtod_reduce = cpu_to_le32(o->gtod_reduce); top->gtod_cpu = cpu_to_le32(o->gtod_cpu); - top->gtod_offload = cpu_to_le32(o->gtod_offload); top->clocksource = cpu_to_le32(o->clocksource); top->no_stall = cpu_to_le32(o->no_stall); top->trim_percentage = cpu_to_le32(o->trim_percentage); @@ -397,6 +456,12 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->latency_percentile.u.i = __cpu_to_le64(fio_double_to_uint64(o->latency_percentile.u.f)); top->compress_percentage = cpu_to_le32(o->compress_percentage); top->compress_chunk = cpu_to_le32(o->compress_chunk); + top->dedupe_percentage = cpu_to_le32(o->dedupe_percentage); + top->block_error_hist = cpu_to_le32(o->block_error_hist); + top->skip_bad = cpu_to_le32(o->skip_bad); + top->replay_align = cpu_to_le32(o->replay_align); + top->replay_scale = cpu_to_le32(o->replay_scale); + top->per_job_logs = cpu_to_le32(o->per_job_logs); for (i = 0; i < DDIR_RWDIR_CNT; i++) { top->bs[i] = cpu_to_le32(o->bs[i]); @@ -418,6 +483,21 @@ void convert_thread_options_to_net(struct thread_options_pack *top, } } + top->zone_split_nr[i] = cpu_to_le32(o->zone_split_nr[i]); + + if (o->zone_split_nr[i]) { + unsigned int zone_split_nr = o->zone_split_nr[i]; + + if (zone_split_nr > ZONESPLIT_MAX) { + log_err("fio: ZONESPLIT_MAX is too small\n"); + zone_split_nr = ZONESPLIT_MAX; + } + for (j = 0; j < zone_split_nr; j++) { + top->zone_split[i][j].access_perc = o->zone_split[i][j].access_perc; + top->zone_split[i][j].size_perc = o->zone_split[i][j].size_perc; + } + } + top->rwmix[i] = cpu_to_le32(o->rwmix[i]); top->rate[i] = cpu_to_le32(o->rate[i]); top->ratemin[i] = cpu_to_le32(o->ratemin[i]); @@ -437,6 +517,10 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->start_delay_high = __cpu_to_le64(o->start_delay_high); top->timeout = __cpu_to_le64(o->timeout); top->ramp_time = __cpu_to_le64(o->ramp_time); + top->ss_dur = __cpu_to_le64(top->ss_dur); + top->ss_ramp_time = __cpu_to_le64(top->ss_ramp_time); + top->ss_state = cpu_to_le32(top->ss_state); + top->ss_limit.u.i = __cpu_to_le64(fio_double_to_uint64(o->ss_limit.u.f)); top->zone_range = __cpu_to_le64(o->zone_range); top->zone_size = __cpu_to_le64(o->zone_size); top->zone_skip = __cpu_to_le64(o->zone_skip); @@ -448,12 +532,14 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->trim_backlog = __cpu_to_le64(o->trim_backlog); top->offset_increment = __cpu_to_le64(o->offset_increment); top->number_ios = __cpu_to_le64(o->number_ios); + top->rate_process = cpu_to_le32(o->rate_process); for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) top->percentile_list[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->percentile_list[i].u.f)); #if 0 uint8_t cpumask[FIO_TOP_STR_MAX]; uint8_t verify_cpumask[FIO_TOP_STR_MAX]; + uint8_t log_gz_cpumask[FIO_TOP_STR_MAX]; #endif }