o->fsync_on_close = le32_to_cpu(top->fsync_on_close);
o->bs_is_seq_rand = le32_to_cpu(top->bs_is_seq_rand);
o->random_distribution = le32_to_cpu(top->random_distribution);
+ o->exitall_error = le32_to_cpu(top->exitall_error);
o->zipf_theta.u.f = fio_uint64_to_double(le64_to_cpu(top->zipf_theta.u.i));
o->pareto_h.u.f = fio_uint64_to_double(le64_to_cpu(top->pareto_h.u.i));
o->gauss_dev.u.f = fio_uint64_to_double(le64_to_cpu(top->gauss_dev.u.i));
o->per_job_logs = le32_to_cpu(top->per_job_logs);
o->trim_backlog = le64_to_cpu(top->trim_backlog);
+ o->rate_process = le32_to_cpu(top->rate_process);
for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
o->percentile_list[i].u.f = fio_uint64_to_double(le64_to_cpu(top->percentile_list[i].u.i));
#if 0
uint8_t cpumask[FIO_TOP_STR_MAX];
uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+ uint8_t log_gz_cpumask[FIO_TOP_STR_MAX];
#endif
}
top->fsync_on_close = cpu_to_le32(o->fsync_on_close);
top->bs_is_seq_rand = cpu_to_le32(o->bs_is_seq_rand);
top->random_distribution = cpu_to_le32(o->random_distribution);
+ top->exitall_error = cpu_to_le32(o->exitall_error);
top->zipf_theta.u.i = __cpu_to_le64(fio_double_to_uint64(o->zipf_theta.u.f));
top->pareto_h.u.i = __cpu_to_le64(fio_double_to_uint64(o->pareto_h.u.f));
top->gauss_dev.u.i = __cpu_to_le64(fio_double_to_uint64(o->gauss_dev.u.f));
top->trim_backlog = __cpu_to_le64(o->trim_backlog);
top->offset_increment = __cpu_to_le64(o->offset_increment);
top->number_ios = __cpu_to_le64(o->number_ios);
+ top->rate_process = cpu_to_le32(o->rate_process);
for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++)
top->percentile_list[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->percentile_list[i].u.f));
#if 0
uint8_t cpumask[FIO_TOP_STR_MAX];
uint8_t verify_cpumask[FIO_TOP_STR_MAX];
+ uint8_t log_gz_cpumask[FIO_TOP_STR_MAX];
#endif
}