client: defer local trigger execute until after state is received
[fio.git] / cconv.c
diff --git a/cconv.c b/cconv.c
index 2f7177d9b1e6e4e23cbecc6c37706b4ce3ed2bfd..d0a124ec6ee000026bf8787b55c4a50774e28b2b 100644 (file)
--- a/cconv.c
+++ b/cconv.c
@@ -131,6 +131,7 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->verifysort = le32_to_cpu(top->verifysort);
        o->verifysort_nr = le32_to_cpu(top->verifysort_nr);
        o->experimental_verify = le32_to_cpu(top->experimental_verify);
+       o->verify_state = le32_to_cpu(top->verify_state);
        o->verify_interval = le32_to_cpu(top->verify_interval);
        o->verify_offset = le32_to_cpu(top->verify_offset);
 
@@ -149,8 +150,10 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->rand_repeatable = le32_to_cpu(top->rand_repeatable);
        o->allrand_repeatable = le32_to_cpu(top->allrand_repeatable);
        o->rand_seed = le64_to_cpu(top->rand_seed);
-       o->use_os_rand = le32_to_cpu(top->use_os_rand);
        o->log_avg_msec = le32_to_cpu(top->log_avg_msec);
+       o->log_offset = le32_to_cpu(top->log_offset);
+       o->log_gz = le32_to_cpu(top->log_gz);
+       o->log_gz_store = le32_to_cpu(top->log_gz_store);
        o->norandommap = le32_to_cpu(top->norandommap);
        o->softrandommap = le32_to_cpu(top->softrandommap);
        o->bs_unaligned = le32_to_cpu(top->bs_unaligned);
@@ -238,6 +241,7 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->latency_percentile.u.f = fio_uint64_to_double(le64_to_cpu(top->latency_percentile.u.i));
        o->compress_percentage = le32_to_cpu(top->compress_percentage);
        o->compress_chunk = le32_to_cpu(top->compress_chunk);
+       o->dedupe_percentage = le32_to_cpu(top->dedupe_percentage);
 
        o->trim_backlog = le64_to_cpu(top->trim_backlog);
 
@@ -305,6 +309,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->verifysort = cpu_to_le32(o->verifysort);
        top->verifysort_nr = cpu_to_le32(o->verifysort_nr);
        top->experimental_verify = cpu_to_le32(o->experimental_verify);
+       top->verify_state = cpu_to_le32(o->verify_state);
        top->verify_interval = cpu_to_le32(o->verify_interval);
        top->verify_offset = cpu_to_le32(o->verify_offset);
        top->verify_pattern_bytes = cpu_to_le32(o->verify_pattern_bytes);
@@ -319,8 +324,10 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->rand_repeatable = cpu_to_le32(o->rand_repeatable);
        top->allrand_repeatable = cpu_to_le32(o->allrand_repeatable);
        top->rand_seed = __cpu_to_le64(o->rand_seed);
-       top->use_os_rand = cpu_to_le32(o->use_os_rand);
        top->log_avg_msec = cpu_to_le32(o->log_avg_msec);
+       top->log_offset = cpu_to_le32(o->log_offset);
+       top->log_gz = cpu_to_le32(o->log_gz);
+       top->log_gz_store = cpu_to_le32(o->log_gz_store);
        top->norandommap = cpu_to_le32(o->norandommap);
        top->softrandommap = cpu_to_le32(o->softrandommap);
        top->bs_unaligned = cpu_to_le32(o->bs_unaligned);
@@ -395,6 +402,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->latency_percentile.u.i = __cpu_to_le64(fio_double_to_uint64(o->latency_percentile.u.f));
        top->compress_percentage = cpu_to_le32(o->compress_percentage);
        top->compress_chunk = cpu_to_le32(o->compress_chunk);
+       top->dedupe_percentage = cpu_to_le32(o->dedupe_percentage);
 
        for (i = 0; i < DDIR_RWDIR_CNT; i++) {
                top->bs[i] = cpu_to_le32(o->bs[i]);