}
td->o.ignore_error_nr[etype] = 4;
- error = malloc(4 * sizeof(struct bssplit));
+ error = calloc(4, sizeof(int));
i = 0;
while ((fname = strsep(&str, ":")) != NULL) {
strip_blank_front(&str);
strip_blank_end(str);
+ /*
+ * Ignore what we may already have from nrfiles option.
+ */
if (!td->files_index)
td->o.nr_files = 0;
assert(ret != 0);
td->o.buffer_pattern_bytes = ret;
- if (!td->o.compress_percentage)
+
+ /*
+ * If this job is doing any reading or has compression set,
+ * ensure that we refill buffers for writes or we could be
+ * invalidating the pattern through reads.
+ */
+ if (!td->o.compress_percentage && !td_read(td))
td->o.refill_buffers = 0;
+ else
+ td->o.refill_buffers = 1;
+
td->o.scramble_buffers = 0;
td->o.zero_buffers = 0;
return 0;
}
+static int str_offset_cb(void *data, unsigned long long *__val)
+{
+ struct thread_data *td = cb_data_to_td(data);
+ unsigned long long v = *__val;
+
+ if (parse_is_percent(v)) {
+ td->o.start_offset = 0;
+ td->o.start_offset_percent = -1ULL - v;
+ dprint(FD_PARSE, "SET start_offset_percent %d\n",
+ td->o.start_offset_percent);
+ } else
+ td->o.start_offset = v;
+
+ return 0;
+}
+
static int str_size_cb(void *data, unsigned long long *__val)
{
struct thread_data *td = cb_data_to_td(data);
.alias = "io_limit",
.lname = "IO Size",
.type = FIO_OPT_STR_VAL,
- .off1 = offsetof(struct thread_options, io_limit),
+ .off1 = offsetof(struct thread_options, io_size),
.help = "Total size of I/O to be performed",
.interval = 1024 * 1024,
.category = FIO_OPT_C_IO,
.lname = "IO offset",
.alias = "fileoffset",
.type = FIO_OPT_STR_VAL,
+ .cb = str_offset_cb,
.off1 = offsetof(struct thread_options, start_offset),
.help = "Start IO from this offset",
.def = "0",
},
{ .ival = "gauss",
.oval = FIO_FSERVICE_GAUSS,
- .help = "Normal (gaussian) distribution",
+ .help = "Normal (Gaussian) distribution",
},
{ .ival = "roundrobin",
.oval = FIO_FSERVICE_RR,
.oval = MEM_MMAPHUGE,
.help = "Like mmap, but use huge pages",
},
+#endif
+#ifdef CONFIG_CUDA
+ { .ival = "cudamalloc",
+ .oval = MEM_CUDA_MALLOC,
+ .help = "Allocate GPU device memory for GPUDirect RDMA",
+ },
#endif
},
},
.oval = VERIFY_SHA512,
.help = "Use sha512 checksums for verification",
},
+ { .ival = "sha3-224",
+ .oval = VERIFY_SHA3_224,
+ .help = "Use sha3-224 checksums for verification",
+ },
+ { .ival = "sha3-256",
+ .oval = VERIFY_SHA3_256,
+ .help = "Use sha3-256 checksums for verification",
+ },
+ { .ival = "sha3-384",
+ .oval = VERIFY_SHA3_384,
+ .help = "Use sha3-384 checksums for verification",
+ },
+ { .ival = "sha3-512",
+ .oval = VERIFY_SHA3_512,
+ .help = "Use sha3-512 checksums for verification",
+ },
{ .ival = "xxhash",
.oval = VERIFY_XXHASH,
.help = "Use xxhash checksums for verification",
.type = FIO_OPT_UNSUPPORTED,
.help = "Build fio with libnuma-dev(el) to enable this option",
},
+#endif
+#ifdef CONFIG_CUDA
+ {
+ .name = "gpu_dev_id",
+ .lname = "GPU device ID",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, gpu_dev_id),
+ .help = "Set GPU device ID for GPUDirect RDMA",
+ .def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
+ },
#endif
{
.name = "end_fsync",
.category = FIO_OPT_C_STAT,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "stats",
+ .lname = "Stats",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, stats),
+ .help = "Enable collection of stats",
+ .def = "1",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "zero_buffers",
.lname = "Zero I/O buffers",