Clarify that include files may not contain job sections
[fio.git] / cconv.c
diff --git a/cconv.c b/cconv.c
index 2843a71ca555c664961dfa88826c46ae5f46ed68..4a40ed0d647ba863a27e1f751fdf71b7ac658174 100644 (file)
--- a/cconv.c
+++ b/cconv.c
@@ -10,15 +10,18 @@ static void string_to_cpu(char **dst, const uint8_t *src)
                *dst = strdup(__src);
 }
 
-static void string_to_net(uint8_t *dst, const char *src)
+static void __string_to_net(uint8_t *dst, const char *src, size_t dst_size)
 {
-       if (src)
-               strcpy((char *) dst, src);
-       else
+       if (src) {
+               dst[dst_size - 1] = '\0';
+               strncpy((char *) dst, src, dst_size - 1);
+       } else
                dst[0] = '\0';
 }
 
-void free_thread_options_to_cpu(struct thread_options *o)
+#define string_to_net(dst, src)        __string_to_net((dst), (src), sizeof(dst))
+
+static void free_thread_options_to_cpu(struct thread_options *o)
 {
        free(o->description);
        free(o->name);
@@ -77,6 +80,7 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->iodepth_batch = le32_to_cpu(top->iodepth_batch);
        o->iodepth_batch_complete = le32_to_cpu(top->iodepth_batch_complete);
        o->size = le64_to_cpu(top->size);
+       o->io_limit = le64_to_cpu(top->io_limit);
        o->size_percent = le32_to_cpu(top->size_percent);
        o->fill_device = le32_to_cpu(top->fill_device);
        o->file_append = le32_to_cpu(top->file_append);
@@ -147,6 +151,9 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->rand_seed = le64_to_cpu(top->rand_seed);
        o->use_os_rand = le32_to_cpu(top->use_os_rand);
        o->log_avg_msec = le32_to_cpu(top->log_avg_msec);
+       o->log_offset = le32_to_cpu(top->log_offset);
+       o->log_gz = le32_to_cpu(top->log_gz);
+       o->log_gz_store = le32_to_cpu(top->log_gz_store);
        o->norandommap = le32_to_cpu(top->norandommap);
        o->softrandommap = le32_to_cpu(top->softrandommap);
        o->bs_unaligned = le32_to_cpu(top->bs_unaligned);
@@ -234,6 +241,7 @@ void convert_thread_options_to_cpu(struct thread_options *o,
        o->latency_percentile.u.f = fio_uint64_to_double(le64_to_cpu(top->latency_percentile.u.i));
        o->compress_percentage = le32_to_cpu(top->compress_percentage);
        o->compress_chunk = le32_to_cpu(top->compress_chunk);
+       o->dedupe_percentage = le32_to_cpu(top->dedupe_percentage);
 
        o->trim_backlog = le64_to_cpu(top->trim_backlog);
 
@@ -317,6 +325,9 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->rand_seed = __cpu_to_le64(o->rand_seed);
        top->use_os_rand = cpu_to_le32(o->use_os_rand);
        top->log_avg_msec = cpu_to_le32(o->log_avg_msec);
+       top->log_offset = cpu_to_le32(o->log_offset);
+       top->log_gz = cpu_to_le32(o->log_gz);
+       top->log_gz_store = cpu_to_le32(o->log_gz_store);
        top->norandommap = cpu_to_le32(o->norandommap);
        top->softrandommap = cpu_to_le32(o->softrandommap);
        top->bs_unaligned = cpu_to_le32(o->bs_unaligned);
@@ -391,6 +402,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        top->latency_percentile.u.i = __cpu_to_le64(fio_double_to_uint64(o->latency_percentile.u.f));
        top->compress_percentage = cpu_to_le32(o->compress_percentage);
        top->compress_chunk = cpu_to_le32(o->compress_chunk);
+       top->dedupe_percentage = cpu_to_le32(o->dedupe_percentage);
 
        for (i = 0; i < DDIR_RWDIR_CNT; i++) {
                top->bs[i] = cpu_to_le32(o->bs[i]);
@@ -425,6 +437,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
        memcpy(top->buffer_pattern, o->buffer_pattern, MAX_PATTERN_SIZE);
 
        top->size = __cpu_to_le64(o->size);
+       top->io_limit = __cpu_to_le64(o->io_limit);
        top->verify_backlog = __cpu_to_le64(o->verify_backlog);
        top->start_delay = __cpu_to_le64(o->start_delay);
        top->start_delay_high = __cpu_to_le64(o->start_delay_high);