assert(ret != 0);
td->o.buffer_pattern_bytes = ret;
- if (!td->o.compress_percentage)
+
+ /*
+ * If this job is doing any reading or has compression set,
+ * ensure that we refill buffers for writes or we could be
+ * invalidating the pattern through reads.
+ */
+ if (!td->o.compress_percentage && !td_read(td))
td->o.refill_buffers = 0;
+ else
+ td->o.refill_buffers = 1;
+
td->o.scramble_buffers = 0;
td->o.zero_buffers = 0;
return 0;
}
+static int str_offset_cb(void *data, unsigned long long *__val)
+{
+ struct thread_data *td = cb_data_to_td(data);
+ unsigned long long v = *__val;
+
+ if (parse_is_percent(v)) {
+ td->o.start_offset = 0;
+ td->o.start_offset_percent = -1ULL - v;
+ dprint(FD_PARSE, "SET start_offset_percent %d\n",
+ td->o.start_offset_percent);
+ } else
+ td->o.start_offset = v;
+
+ return 0;
+}
+
static int str_size_cb(void *data, unsigned long long *__val)
{
struct thread_data *td = cb_data_to_td(data);
.lname = "IO offset",
.alias = "fileoffset",
.type = FIO_OPT_STR_VAL,
+ .cb = str_offset_cb,
.off1 = offsetof(struct thread_options, start_offset),
.help = "Start IO from this offset",
.def = "0",
.oval = MEM_MMAPHUGE,
.help = "Like mmap, but use huge pages",
},
+#endif
+#ifdef CONFIG_CUDA
+ { .ival = "cudamalloc",
+ .oval = MEM_CUDA_MALLOC,
+ .help = "Allocate GPU device memory for GPUDirect RDMA",
+ },
#endif
},
},
.type = FIO_OPT_UNSUPPORTED,
.help = "Build fio with libnuma-dev(el) to enable this option",
},
+#endif
+#ifdef CONFIG_CUDA
+ {
+ .name = "gpu_dev_id",
+ .lname = "GPU device ID",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, gpu_dev_id),
+ .help = "Set GPU device ID for GPUDirect RDMA",
+ .def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
+ },
#endif
{
.name = "end_fsync",