Unlink job files after each iteration or loop. Default: false.
-.. option:: zonesize=int
+.. option:: zonerange=int
- Divide a file into zones of the specified size. See :option:`zoneskip`.
+ Size of a single zone in which I/O occurs. See also :option:`zonesize`
+ and :option:`zoneskip`.
-.. option:: zonerange=int
+.. option:: zonesize=int
- Give size of an I/O zone. See :option:`zoneskip`.
+ Number of bytes to transfer before skipping :option:`zoneskip`
+ bytes. If this parameter is smaller than :option:`zonerange` then only
+ a fraction of each zone with :option:`zonerange` bytes will be
+ accessed. If this parameter is larger than :option:`zonerange` then
+ each zone will be accessed multiple times before skipping
.. option:: zoneskip=int
- Skip the specified number of bytes when :option:`zonesize` data has been
- read. The two zone options can be used to only do I/O on zones of a file.
+ Skip the specified number of bytes when :option:`zonesize` data have
+ been transferred. The three zone options can be used to do strided I/O
+ on a file.
I/O type
gettime-thread.c helpers.c json.c idletime.c td_error.c \
profiles/tiobench.c profiles/act.c io_u_queue.c filelock.c \
workqueue.c rate-submit.c optgroup.c helper_thread.c \
- steadystate.c
+ steadystate.c zone-dist.c
ifdef CONFIG_LIBHDFS
HDFSFLAGS= -I $(JAVA_HOME)/include -I $(JAVA_HOME)/include/linux -I $(FIO_LIBHDFS_INCLUDE)
#include "rate-submit.h"
#include "helper_thread.h"
#include "pshared.h"
+#include "zone-dist.h"
static struct fio_sem *startup_sem;
static struct flist_head *cgroup_list;
goto err;
}
+ td_zone_gen_index(td);
+
/*
* Do this early, we don't want the compress threads to be limited
* to the same CPUs as the IO workers. So do this before we set
close_ioengine(td);
cgroup_shutdown(td, cgroup_mnt);
verify_free_state(td);
-
- if (td->zone_state_index) {
- int i;
-
- for (i = 0; i < DDIR_RWDIR_CNT; i++)
- free(td->zone_state_index[i]);
- free(td->zone_state_index);
- td->zone_state_index = NULL;
- }
+ td_zone_free_index(td);
if (fio_option_is_set(o, cpumask)) {
ret = fio_cpuset_exit(&o->cpumask);
.BI unlink_each_loop \fR=\fPbool
Unlink job files after each iteration or loop. Default: false.
.TP
-.BI zonesize \fR=\fPint
-Divide a file into zones of the specified size. See \fBzoneskip\fR.
+Fio supports strided data access. After having read \fBzonesize\fR bytes from an area that is \fBzonerange\fR bytes big, \fBzoneskip\fR bytes are skipped.
.TP
.BI zonerange \fR=\fPint
-Give size of an I/O zone. See \fBzoneskip\fR.
+Size of a single zone in which I/O occurs.
+.TP
+.BI zonesize \fR=\fPint
+Number of bytes to transfer before skipping \fBzoneskip\fR bytes. If this
+parameter is smaller than \fBzonerange\fR then only a fraction of each zone
+with \fBzonerange\fR bytes will be accessed. If this parameter is larger than
+\fBzonerange\fR then each zone will be accessed multiple times before skipping
+to the next zone.
.TP
.BI zoneskip \fR=\fPint
-Skip the specified number of bytes when \fBzonesize\fR data has been
-read. The two zone options can be used to only do I/O on zones of a file.
+Skip the specified number of bytes after \fBzonesize\fR bytes of data have been
+transferred.
+
.SS "I/O type"
.TP
.BI direct \fR=\fPbool
GtkTreeIter iter;
struct tm *tm;
time_t sec;
- char tmp[64], timebuf[80];
+ char tmp[64], timebuf[96];
sec = p->log_sec;
tm = localtime(&sec);
return 0;
}
-static void __td_zone_gen_index(struct thread_data *td, enum fio_ddir ddir)
-{
- unsigned int i, j, sprev, aprev;
- uint64_t sprev_sz;
-
- td->zone_state_index[ddir] = malloc(sizeof(struct zone_split_index) * 100);
-
- sprev_sz = sprev = aprev = 0;
- for (i = 0; i < td->o.zone_split_nr[ddir]; i++) {
- struct zone_split *zsp = &td->o.zone_split[ddir][i];
-
- for (j = aprev; j < aprev + zsp->access_perc; j++) {
- struct zone_split_index *zsi = &td->zone_state_index[ddir][j];
-
- zsi->size_perc = sprev + zsp->size_perc;
- zsi->size_perc_prev = sprev;
-
- zsi->size = sprev_sz + zsp->size;
- zsi->size_prev = sprev_sz;
- }
-
- aprev += zsp->access_perc;
- sprev += zsp->size_perc;
- sprev_sz += zsp->size;
- }
-}
-
-/*
- * Generate state table for indexes, so we don't have to do it inline from
- * the hot IO path
- */
-static void td_zone_gen_index(struct thread_data *td)
-{
- int i;
-
- td->zone_state_index = malloc(DDIR_RWDIR_CNT *
- sizeof(struct zone_split_index *));
-
- for (i = 0; i < DDIR_RWDIR_CNT; i++)
- __td_zone_gen_index(td, i);
-}
-
static int parse_zoned_distribution(struct thread_data *td, const char *input,
bool absolute)
{
return ret;
}
- if (!ret)
- td_zone_gen_index(td);
- else {
+ if (ret) {
for (i = 0; i < DDIR_RWDIR_CNT; i++)
td->o.zone_split_nr[i] = 0;
}
#ifndef CONFIG_HAVE_VASPRINTF
int vasprintf(char **strp, const char *fmt, va_list ap)
{
- va_list ap_copy;
- char *str;
- int len;
+ va_list ap_copy;
+ char *str;
+ int len;
#ifdef va_copy
- va_copy(ap_copy, ap);
+ va_copy(ap_copy, ap);
#else
- __va_copy(ap_copy, ap);
+ __va_copy(ap_copy, ap);
#endif
- len = vsnprintf(NULL, 0, fmt, ap_copy);
- va_end(ap_copy);
+ len = vsnprintf(NULL, 0, fmt, ap_copy);
+ va_end(ap_copy);
- if (len < 0)
- return len;
+ if (len < 0)
+ return len;
- len++;
- str = malloc(len);
- *strp = str;
- return str ? vsnprintf(str, len, fmt, ap) : -1;
+ len++;
+ str = malloc(len);
+ *strp = str;
+ return str ? vsnprintf(str, len, fmt, ap) : -1;
}
#endif
#ifndef CONFIG_HAVE_ASPRINTF
int asprintf(char **strp, const char *fmt, ...)
{
- va_list arg;
- int done;
+ va_list arg;
+ int done;
- va_start(arg, fmt);
- done = vasprintf(strp, fmt, arg);
- va_end(arg);
+ va_start(arg, fmt);
+ done = vasprintf(strp, fmt, arg);
+ va_end(arg);
- return done;
+ return done;
}
#endif
--- /dev/null
+#include <stdlib.h>
+#include "fio.h"
+#include "zone-dist.h"
+
+static void __td_zone_gen_index(struct thread_data *td, enum fio_ddir ddir)
+{
+ unsigned int i, j, sprev, aprev;
+ uint64_t sprev_sz;
+
+ td->zone_state_index[ddir] = malloc(sizeof(struct zone_split_index) * 100);
+
+ sprev_sz = sprev = aprev = 0;
+ for (i = 0; i < td->o.zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &td->o.zone_split[ddir][i];
+
+ for (j = aprev; j < aprev + zsp->access_perc; j++) {
+ struct zone_split_index *zsi = &td->zone_state_index[ddir][j];
+
+ zsi->size_perc = sprev + zsp->size_perc;
+ zsi->size_perc_prev = sprev;
+
+ zsi->size = sprev_sz + zsp->size;
+ zsi->size_prev = sprev_sz;
+ }
+
+ aprev += zsp->access_perc;
+ sprev += zsp->size_perc;
+ sprev_sz += zsp->size;
+ }
+}
+
+static bool has_zones(struct thread_data *td)
+{
+ int i, zones = 0;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ zones += td->o.zone_split_nr[i];
+
+ return zones != 0;
+}
+
+/*
+ * Generate state table for indexes, so we don't have to do it inline from
+ * the hot IO path
+ */
+void td_zone_gen_index(struct thread_data *td)
+{
+ int i;
+
+ if (!has_zones(td))
+ return;
+
+ td->zone_state_index = malloc(DDIR_RWDIR_CNT *
+ sizeof(struct zone_split_index *));
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ __td_zone_gen_index(td, i);
+}
+
+void td_zone_free_index(struct thread_data *td)
+{
+ int i;
+
+ if (!td->zone_state_index)
+ return;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ free(td->zone_state_index[i]);
+ td->zone_state_index[i] = NULL;
+ }
+
+ free(td->zone_state_index);
+ td->zone_state_index = NULL;
+}
--- /dev/null
+#ifndef FIO_ZONE_DIST_H
+#define FIO_ZONE_DIST_H
+
+void td_zone_gen_index(struct thread_data *td);
+void td_zone_free_index(struct thread_data *td);
+
+#endif