--- /dev/null
+clone_depth: 1 # NB: this stops FIO-VERSION-GEN making tag based versions
+
+environment:
+ CYG_MIRROR: http://cygwin.mirror.constant.com
+ CYG_ROOT: C:\cygwin64
+ MAKEFLAGS: -j 2
+ matrix:
+ - platform: x64
+ PACKAGE_ARCH: x86_64
+ CONFIGURE_OPTIONS:
+ - platform: x86
+ PACKAGE_ARCH: i686
+ CONFIGURE_OPTIONS: --build-32bit-win --target-win-ver=xp
+
+install:
+ - '%CYG_ROOT%\setup-x86_64.exe --quiet-mode --no-shortcuts --only-site --site "%CYG_MIRROR%" --packages "mingw64-%PACKAGE_ARCH%-zlib" > NUL'
+ - SET PATH=%CYG_ROOT%\bin;%PATH% # NB: Changed env variables persist to later sections
+
+build_script:
+ - 'bash.exe -lc "cd \"${APPVEYOR_BUILD_FOLDER}\" && ./configure --disable-native --extra-cflags=\"-Werror\" ${CONFIGURE_OPTIONS} && make.exe'
+
+after_build:
+ - cd os\windows && dobuild.cmd %PLATFORM%
+
+test_script:
+ - 'bash.exe -lc "cd \"${APPVEYOR_BUILD_FOLDER}\" && file.exe fio.exe && make.exe test'
+
+artifacts:
+ - path: os\windows\*.msi
+ name: msi
#!/bin/sh
GVF=FIO-VERSION-FILE
-DEF_VER=fio-3.6
+DEF_VER=fio-3.8
LF='
'
.. option:: --readonly
- Turn on safety read-only checks, preventing writes. The ``--readonly``
- option is an extra safety guard to prevent users from accidentally starting
- a write workload when that is not desired. Fio will only write if
- `rw=write/randwrite/rw/randrw` is given. This extra safety net can be used
- as an extra precaution as ``--readonly`` will also enable a write check in
- the I/O engine core to prevent writes due to unknown user space bug(s).
+ Turn on safety read-only checks, preventing writes and trims. The
+ ``--readonly`` option is an extra safety guard to prevent users from
+ accidentally starting a write or trim workload when that is not desired.
+ Fio will only modify the device under test if
+ `rw=write/randwrite/rw/randrw/trim/randtrim/trimwrite` is given. This
+ safety net can be used as an extra precaution.
.. option:: --eta=when
and that some blocks may be read/written more than once. If this option is
used with :option:`verify` and multiple blocksizes (via :option:`bsrange`),
only intact blocks are verified, i.e., partially-overwritten blocks are
- ignored.
+ ignored. With an async I/O engine and an I/O depth > 1, it is possible for
+ the same block to be overwritten, which can cause verification errors. Either
+ do not use norandommap in this case, or also use the lfsr random generator.
.. option:: softrandommap=bool
If you want a workload that has 50% 2k reads and 50% 4k reads, while
having 90% 4k writes and 10% 8k writes, you would specify::
- bssplit=2k/50:4k/50,4k/90,8k/10
+ bssplit=2k/50:4k/50,4k/90:8k/10
Fio supports defining up to 64 different weights for each data
direction.
With writefua option set to 1, write operations include
the force unit access (fua) flag. Default is 0.
+.. option:: sg_write_mode=str : [sg]
+ Specify the type of write commands to issue. This option can take three values:
+
+ **write**
+ This is the default where write opcodes are issued as usual.
+ **verify**
+ Issue WRITE AND VERIFY commands. The BYTCHK bit is set to 0. This
+ directs the device to carry out a medium verification with no data
+ comparison. The writefua option is ignored with this selection.
+ **same**
+ Issue WRITE SAME commands. This transfers a single block to the device
+ and writes this same block of data to a contiguous sequence of LBAs
+ beginning at the specified offset. fio's block size parameter specifies
+ the amount of data written with each command. However, the amount of data
+ actually transferred to the device is equal to the device's block
+ (sector) size. For a device with 512 byte sectors, blocksize=8k will
+ write 16 sectors with each command. fio will still generate 8k of data
+ for each command but only the first 512 bytes will be used and
+ transferred to the device. The writefua option is ignored with this
+ selection.
I/O depth
~~~~~~~~~
previously written file. If the data direction includes any form of write,
the verify will be of the newly written data.
+ To avoid false verification errors, do not use the norandommap option when
+ verifying data with async I/O engines and I/O depths > 1. Or use the
+ norandommap and the lfsr random generator together to avoid writing to the
+ same offset with muliple outstanding I/Os.
+
.. option:: verify_offset=int
Swap the verification header with data somewhere else in the block before
+++ /dev/null
-clone_depth: 1 # NB: this stops FIO-VERSION-GEN making tag based versions
-
-environment:
- CYG_MIRROR: http://cygwin.mirror.constant.com
- CYG_ROOT: C:\cygwin64
- MAKEFLAGS: -j 2
- matrix:
- - platform: x64
- PACKAGE_ARCH: x86_64
- CONFIGURE_OPTIONS:
- - platform: x86
- PACKAGE_ARCH: i686
- CONFIGURE_OPTIONS: --build-32bit-win --target-win-ver=xp
-
-install:
- - '%CYG_ROOT%\setup-x86_64.exe --quiet-mode --no-shortcuts --only-site --site "%CYG_MIRROR%" --packages "mingw64-%PACKAGE_ARCH%-zlib" > NUL'
- - SET PATH=%CYG_ROOT%\bin;%PATH% # NB: Changed env variables persist to later sections
-
-build_script:
- - 'bash.exe -lc "cd \"${APPVEYOR_BUILD_FOLDER}\" && ./configure --disable-native --extra-cflags=\"-Werror\" ${CONFIGURE_OPTIONS} && make.exe'
-
-after_build:
- - cd os\windows && dobuild.cmd %PLATFORM%
-
-test_script:
- - 'bash.exe -lc "cd \"${APPVEYOR_BUILD_FOLDER}\" && file.exe fio.exe && make.exe test'
-
-artifacts:
- - path: os\windows\*.msi
- name: msi
extern bool tsc_reliable;
extern int arch_random;
-static inline void arch_init_intel(unsigned int level)
+static inline void arch_init_intel(void)
{
unsigned int eax, ebx, ecx = 0, edx;
arch_random = (ecx & (1U << 30)) != 0;
}
-static inline void arch_init_amd(unsigned int level)
+static inline void arch_init_amd(void)
{
unsigned int eax, ebx, ecx, edx;
str[12] = '\0';
if (!strcmp(str, "GenuineIntel"))
- arch_init_intel(level);
+ arch_init_intel();
else if (!strcmp(str, "AuthenticAMD"))
- arch_init_amd(level);
+ arch_init_amd();
}
#endif
static struct fio_sem *startup_sem;
static struct flist_head *cgroup_list;
-static char *cgroup_mnt;
+static struct cgroup_mnt *cgroup_mnt;
static int exit_value;
static volatile int fio_abort;
static unsigned int nr_process = 0;
if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
min_evts = 1;
- if (time && (__should_check_rate(td, DDIR_READ) ||
- __should_check_rate(td, DDIR_WRITE) ||
- __should_check_rate(td, DDIR_TRIM)))
+ if (time && __should_check_rate(td))
fio_gettime(time, NULL);
do {
*ret = -io_u->error;
clear_io_u(td, io_u);
} else if (io_u->resid) {
- int bytes = io_u->xfer_buflen - io_u->resid;
+ long long bytes = io_u->xfer_buflen - io_u->resid;
struct fio_file *f = io_u->file;
if (bytes_issued)
*bytes_issued += bytes;
if (!from_verify)
- trim_io_piece(td, io_u);
+ trim_io_piece(io_u);
/*
* zero read, fail
requeue_io_u(td, &io_u);
} else {
sync_done:
- if (comp_time && (__should_check_rate(td, DDIR_READ) ||
- __should_check_rate(td, DDIR_WRITE) ||
- __should_check_rate(td, DDIR_TRIM)))
+ if (comp_time && __should_check_rate(td))
fio_gettime(comp_time, NULL);
*ret = io_u_sync_complete(td, io_u);
if (x1 < y2 && y1 < x2) {
overlap = true;
- dprint(FD_IO, "in-flight overlap: %llu/%lu, %llu/%lu\n",
+ dprint(FD_IO, "in-flight overlap: %llu/%llu, %llu/%llu\n",
x1, io_u->buflen,
y1, check_io_u->buflen);
break;
over = (usperop - total) / usperop * -bs;
td->rate_io_issue_bytes[ddir] += (missed - over);
+ /* adjust for rate_process=poisson */
+ td->last_usec[ddir] += total;
}
}
log_io_piece(td, io_u);
if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
- const unsigned long blen = io_u->xfer_buflen;
- const enum fio_ddir ddir = acct_ddir(io_u);
+ const unsigned long long blen = io_u->xfer_buflen;
+ const enum fio_ddir __ddir = acct_ddir(io_u);
if (td->error)
break;
workqueue_enqueue(&td->io_wq, &io_u->work);
ret = FIO_Q_QUEUED;
- if (ddir_rw(ddir)) {
- td->io_issues[ddir]++;
- td->io_issue_bytes[ddir] += blen;
- td->rate_io_issue_bytes[ddir] += blen;
+ if (ddir_rw(__ddir)) {
+ td->io_issues[__ddir]++;
+ td->io_issue_bytes[__ddir] += blen;
+ td->rate_io_issue_bytes[__ddir] += blen;
}
if (should_check_rate(td))
- td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
+ td->rate_next_io_time[__ddir] = usec_for_io(td, __ddir);
} else {
ret = io_u_submit(td, io_u);
static int init_io_u(struct thread_data *td)
{
struct io_u *io_u;
- unsigned int max_bs, min_write;
+ unsigned long long max_bs, min_write;
int cl_align, i, max_units;
int data_xfer = 1, err;
char *p;
td->orig_buffer_size += page_mask + td->o.mem_align;
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
- unsigned long bs;
+ unsigned long long bs;
bs = td->orig_buffer_size + td->o.hugepage_size - 1;
td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
} else
td->pid = gettid();
- fio_local_clock_init(o->use_thread);
+ fio_local_clock_init();
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
close_and_free_files(td);
cleanup_io_u(td);
close_ioengine(td);
- cgroup_shutdown(td, &cgroup_mnt);
+ cgroup_shutdown(td, cgroup_mnt);
verify_free_state(td);
if (td->zone_state_index) {
cgroup_kill(cgroup_list);
sfree(cgroup_list);
}
- sfree(cgroup_mnt);
fio_sem_remove(startup_sem);
stat_exit();
queue_io_piece(td, ipo);
}
+static void dump_trace(struct blk_io_trace *t)
+{
+ log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action);
+}
+
static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
unsigned long long ttime, unsigned long *ios,
unsigned int *rw_bs)
return;
}
- assert(t->bytes);
+ if (!t->bytes) {
+ if (!fio_did_warn(FIO_WARN_BTRACE_ZERO))
+ dump_trace(t);
+ return;
+ }
if (t->bytes > rw_bs[rw])
rw_bs[rw] = t->bytes;
o->start_offset_percent = le32_to_cpu(top->start_offset_percent);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- o->bs[i] = le32_to_cpu(top->bs[i]);
- o->ba[i] = le32_to_cpu(top->ba[i]);
- o->min_bs[i] = le32_to_cpu(top->min_bs[i]);
- o->max_bs[i] = le32_to_cpu(top->max_bs[i]);
+ o->bs[i] = le64_to_cpu(top->bs[i]);
+ o->ba[i] = le64_to_cpu(top->ba[i]);
+ o->min_bs[i] = le64_to_cpu(top->min_bs[i]);
+ o->max_bs[i] = le64_to_cpu(top->max_bs[i]);
o->bssplit_nr[i] = le32_to_cpu(top->bssplit_nr[i]);
if (o->bssplit_nr[i]) {
o->bssplit[i] = malloc(o->bssplit_nr[i] * sizeof(struct bssplit));
for (j = 0; j < o->bssplit_nr[i]; j++) {
- o->bssplit[i][j].bs = le32_to_cpu(top->bssplit[i][j].bs);
+ o->bssplit[i][j].bs = le64_to_cpu(top->bssplit[i][j].bs);
o->bssplit[i][j].perc = le32_to_cpu(top->bssplit[i][j].perc);
}
}
o->gauss_dev.u.f = fio_uint64_to_double(le64_to_cpu(top->gauss_dev.u.i));
o->random_generator = le32_to_cpu(top->random_generator);
o->hugepage_size = le32_to_cpu(top->hugepage_size);
- o->rw_min_bs = le32_to_cpu(top->rw_min_bs);
+ o->rw_min_bs = le64_to_cpu(top->rw_min_bs);
o->thinktime = le32_to_cpu(top->thinktime);
o->thinktime_spin = le32_to_cpu(top->thinktime_spin);
o->thinktime_blocks = le32_to_cpu(top->thinktime_blocks);
top->gauss_dev.u.i = __cpu_to_le64(fio_double_to_uint64(o->gauss_dev.u.f));
top->random_generator = cpu_to_le32(o->random_generator);
top->hugepage_size = cpu_to_le32(o->hugepage_size);
- top->rw_min_bs = cpu_to_le32(o->rw_min_bs);
+ top->rw_min_bs = __cpu_to_le64(o->rw_min_bs);
top->thinktime = cpu_to_le32(o->thinktime);
top->thinktime_spin = cpu_to_le32(o->thinktime_spin);
top->thinktime_blocks = cpu_to_le32(o->thinktime_blocks);
top->write_hist_log = cpu_to_le32(o->write_hist_log);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- top->bs[i] = cpu_to_le32(o->bs[i]);
- top->ba[i] = cpu_to_le32(o->ba[i]);
- top->min_bs[i] = cpu_to_le32(o->min_bs[i]);
- top->max_bs[i] = cpu_to_le32(o->max_bs[i]);
+ top->bs[i] = __cpu_to_le64(o->bs[i]);
+ top->ba[i] = __cpu_to_le64(o->ba[i]);
+ top->min_bs[i] = __cpu_to_le64(o->min_bs[i]);
+ top->max_bs[i] = __cpu_to_le64(o->max_bs[i]);
top->bssplit_nr[i] = cpu_to_le32(o->bssplit_nr[i]);
if (o->bssplit_nr[i]) {
bssplit_nr = BSSPLIT_MAX;
}
for (j = 0; j < bssplit_nr; j++) {
- top->bssplit[i][j].bs = cpu_to_le32(o->bssplit[i][j].bs);
+ top->bssplit[i][j].bs = cpu_to_le64(o->bssplit[i][j].bs);
top->bssplit[i][j].perc = cpu_to_le32(o->bssplit[i][j].perc);
}
}
unsigned int cgroup_nodelete;
};
-static char *find_cgroup_mnt(struct thread_data *td)
+static struct cgroup_mnt *find_cgroup_mnt(struct thread_data *td)
{
- char *mntpoint = NULL;
+ struct cgroup_mnt *cgroup_mnt = NULL;
struct mntent *mnt, dummy;
char buf[256] = {0};
FILE *f;
+ bool cgroup2 = false;
f = setmntent("/proc/mounts", "r");
if (!f) {
if (!strcmp(mnt->mnt_type, "cgroup") &&
strstr(mnt->mnt_opts, "blkio"))
break;
+ if (!strcmp(mnt->mnt_type, "cgroup2")) {
+ cgroup2 = true;
+ break;
+ }
}
- if (mnt)
- mntpoint = smalloc_strdup(mnt->mnt_dir);
- else
+ if (mnt) {
+ cgroup_mnt = smalloc(sizeof(*cgroup_mnt));
+ if (cgroup_mnt) {
+ cgroup_mnt->path = smalloc_strdup(mnt->mnt_dir);
+ if (!cgroup_mnt->path) {
+ sfree(cgroup_mnt);
+ log_err("fio: could not allocate memory\n");
+ } else {
+ cgroup_mnt->cgroup2 = cgroup2;
+ }
+ }
+ } else {
log_err("fio: cgroup blkio does not appear to be mounted\n");
+ }
endmntent(f);
- return mntpoint;
+ return cgroup_mnt;
}
static void add_cgroup(struct thread_data *td, const char *name,
fio_sem_up(lock);
}
-static char *get_cgroup_root(struct thread_data *td, char *mnt)
+static char *get_cgroup_root(struct thread_data *td, struct cgroup_mnt *mnt)
{
char *str = malloc(64);
if (td->o.cgroup)
- sprintf(str, "%s/%s", mnt, td->o.cgroup);
+ sprintf(str, "%s/%s", mnt->path, td->o.cgroup);
else
- sprintf(str, "%s/%s", mnt, td->o.name);
+ sprintf(str, "%s/%s", mnt->path, td->o.name);
return str;
}
}
-static int cgroup_write_pid(struct thread_data *td, const char *root)
+static int cgroup_write_pid(struct thread_data *td, char *path, bool cgroup2)
{
unsigned int val = td->pid;
- return write_int_to_file(td, root, "tasks", val, "cgroup write pid");
+ if (cgroup2)
+ return write_int_to_file(td, path, "cgroup.procs",
+ val, "cgroup write pid");
+ return write_int_to_file(td, path, "tasks", val, "cgroup write pid");
}
/*
* Move pid to root class
*/
-static int cgroup_del_pid(struct thread_data *td, char *mnt)
+static int cgroup_del_pid(struct thread_data *td, struct cgroup_mnt *mnt)
{
- return cgroup_write_pid(td, mnt);
+ return cgroup_write_pid(td, mnt->path, mnt->cgroup2);
}
-int cgroup_setup(struct thread_data *td, struct flist_head *clist, char **mnt)
+int cgroup_setup(struct thread_data *td, struct flist_head *clist, struct cgroup_mnt **mnt)
{
char *root;
add_cgroup(td, root, clist);
if (td->o.cgroup_weight) {
+ if ((*mnt)->cgroup2) {
+ log_err("fio: cgroup weit doesn't work with cgroup2\n");
+ goto err;
+ }
if (write_int_to_file(td, root, "blkio.weight",
td->o.cgroup_weight,
"cgroup open weight"))
goto err;
}
- if (!cgroup_write_pid(td, root)) {
+ if (!cgroup_write_pid(td, root, (*mnt)->cgroup2)) {
free(root);
return 0;
}
return 1;
}
-void cgroup_shutdown(struct thread_data *td, char **mnt)
+void cgroup_shutdown(struct thread_data *td, struct cgroup_mnt *mnt)
{
- if (*mnt == NULL)
+ if (mnt == NULL)
return;
if (!td->o.cgroup_weight && !td->o.cgroup)
- return;
+ goto out;
- cgroup_del_pid(td, *mnt);
+ cgroup_del_pid(td, mnt);
+out:
+ if (mnt->path)
+ sfree(mnt->path);
+ sfree(mnt);
}
static void fio_init cgroup_init(void)
#ifdef FIO_HAVE_CGROUPS
-int cgroup_setup(struct thread_data *, struct flist_head *, char **);
-void cgroup_shutdown(struct thread_data *, char **);
+struct cgroup_mnt {
+ char *path;
+ bool cgroup2;
+};
+
+int cgroup_setup(struct thread_data *, struct flist_head *, struct cgroup_mnt **);
+void cgroup_shutdown(struct thread_data *, struct cgroup_mnt *);
void cgroup_kill(struct flist_head *list);
#else
+struct cgroup_mnt;
+
static inline int cgroup_setup(struct thread_data *td, struct flist_head *list,
- char **mnt)
+ struct cgroup_mnt **mnt)
{
td_verror(td, EINVAL, "cgroup_setup");
return 1;
}
-static inline void cgroup_shutdown(struct thread_data *td, char **mnt)
+static inline void cgroup_shutdown(struct thread_data *td, struct cgroup_mnt *mnt)
{
}
static void handle_gs(struct fio_client *client, struct fio_net_cmd *cmd);
static void handle_probe(struct fio_client *client, struct fio_net_cmd *cmd);
static void handle_text(struct fio_client *client, struct fio_net_cmd *cmd);
-static void handle_stop(struct fio_client *client, struct fio_net_cmd *cmd);
+static void handle_stop(struct fio_client *client);
static void handle_start(struct fio_client *client, struct fio_net_cmd *cmd);
static void convert_text(struct fio_net_cmd *cmd);
return 0;
}
+static int read_ini_data(int fd, void *data, size_t size)
+{
+ char *p = data;
+ int ret = 0;
+ FILE *fp;
+ int dupfd;
+
+ dupfd = dup(fd);
+ if (dupfd < 0)
+ return errno;
+
+ fp = fdopen(dupfd, "r");
+ if (!fp) {
+ ret = errno;
+ close(dupfd);
+ goto out;
+ }
+
+ while (1) {
+ ssize_t len;
+ char buf[OPT_LEN_MAX+1], *sub;
+
+ if (!fgets(buf, sizeof(buf), fp)) {
+ if (ferror(fp)) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ ret = errno;
+ }
+ break;
+ }
+
+ sub = fio_option_dup_subs(buf);
+ len = strlen(sub);
+ if (len + 1 > size) {
+ log_err("fio: no space left to read data\n");
+ free(sub);
+ ret = ENOSPC;
+ break;
+ }
+
+ memcpy(p, sub, len);
+ free(sub);
+ p += len;
+ *p = '\0';
+ size -= len;
+ }
+
+ fclose(fp);
+out:
+ return ret;
+}
+
static void fio_client_json_init(void)
{
char time_buf[32];
return ret;
}
+ /*
+ * Add extra space for variable expansion, but doesn't guarantee.
+ */
+ sb.st_size += OPT_LEN_MAX;
p_size = sb.st_size + sizeof(*pdu);
pdu = malloc(p_size);
buf = pdu->buf;
len = sb.st_size;
p = buf;
- if (read_data(fd, p, len)) {
+ if (read_ini_data(fd, p, len)) {
log_err("fio: failed reading job file %s\n", filename);
close(fd);
free(pdu);
entry = s->data.plat_entry;
io_u_plat = entry->io_u_plat;
- fprintf(f, "%lu, %u, %u, ", (unsigned long) s->time,
- io_sample_ddir(s), s->bs);
+ fprintf(f, "%lu, %u, %llu, ", (unsigned long) s->time,
+ io_sample_ddir(s), (unsigned long long) s->bs);
for (j = 0; j < FIO_IO_U_PLAT_NR - stride; j += stride) {
fprintf(f, "%llu, ", (unsigned long long)hist_sum(j, stride, io_u_plat, NULL));
}
sum_stat_clients += client->nr_stat;
}
-static void handle_stop(struct fio_client *client, struct fio_net_cmd *cmd)
+static void handle_stop(struct fio_client *client)
{
if (client->error)
log_info("client <%s>: exited with error %d\n", client->hostname, client->error);
s->time = le64_to_cpu(s->time);
s->data.val = le64_to_cpu(s->data.val);
s->__ddir = le32_to_cpu(s->__ddir);
- s->bs = le32_to_cpu(s->bs);
+ s->bs = le64_to_cpu(s->bs);
if (ret->log_offset) {
struct io_sample_offset *so = (void *) s;
client->state = Client_stopped;
client->error = le32_to_cpu(pdu->error);
client->signal = le32_to_cpu(pdu->signal);
- ops->stop(client, cmd);
+ ops->stop(client);
break;
}
case FIO_NET_CMD_ADD_JOB: {
};
typedef void (client_cmd_op)(struct fio_client *, struct fio_net_cmd *);
+typedef void (client_op)(struct fio_client *);
typedef void (client_eta_op)(struct jobs_eta *je);
typedef void (client_timed_out_op)(struct fio_client *);
typedef void (client_jobs_eta_op)(struct fio_client *client, struct jobs_eta *je);
client_cmd_op *add_job;
client_cmd_op *update_job;
client_timed_out_op *timed_out;
- client_cmd_op *stop;
+ client_op *stop;
client_cmd_op *start;
client_cmd_op *job_start;
client_timed_out_op *removed;
FIO_WARN_ZONED_BUG = 4,
FIO_WARN_IOLOG_DROP = 8,
FIO_WARN_FADVISE = 16,
+ FIO_WARN_BTRACE_ZERO = 32,
};
#ifdef FIO_INC_DEBUG
!strcmp(dirent->d_name, ".."))
continue;
- sprintf(temppath, "%s/%s", slavesdir, dirent->d_name);
+ nowarn_snprintf(temppath, sizeof(temppath), "%s/%s", slavesdir,
+ dirent->d_name);
/* Can we always assume that the slaves device entries
* are links to the real directories for the slave
* devices?
}
slavepath[linklen] = '\0';
- sprintf(temppath, "%s/%s/dev", slavesdir, slavepath);
+ nowarn_snprintf(temppath, sizeof(temppath), "%s/%s/dev",
+ slavesdir, slavepath);
if (access(temppath, F_OK) != 0)
- sprintf(temppath, "%s/%s/device/dev", slavesdir, slavepath);
+ nowarn_snprintf(temppath, sizeof(temppath),
+ "%s/%s/device/dev", slavesdir,
+ slavepath);
if (read_block_dev_entry(temppath, &majdev, &mindev)) {
perror("Error getting slave device numbers");
closedir(dirhandle);
if (slavedu)
continue;
- sprintf(temppath, "%s/%s", slavesdir, slavepath);
+ nowarn_snprintf(temppath, sizeof(temppath), "%s/%s", slavesdir,
+ slavepath);
__init_per_file_disk_util(td, majdev, mindev, temppath);
slavedu = disk_util_exists(majdev, mindev);
# -- General configuration ------------------------------------------------
+from __future__ import absolute_import
+from __future__ import print_function
+
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
void *pad;
char *gf_vol;
char *gf_brick;
+ int gf_single_instance;
};
struct gf_data {
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_GFAPI,
},
+ {
+ .name = "single-instance",
+ .lname = "Single glusterfs instance",
+ .type = FIO_OPT_BOOL,
+ .help = "Only one glusterfs instance",
+ .off1 = offsetof(struct gf_options, gf_single_instance),
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_GFAPI,
+ },
{
.name = NULL,
},
};
-int fio_gf_setup(struct thread_data *td)
+struct glfs_info {
+ struct flist_head list;
+ char *volume;
+ char *brick;
+ glfs_t *fs;
+ int refcount;
+};
+
+static pthread_mutex_t glfs_lock = PTHREAD_MUTEX_INITIALIZER;
+static FLIST_HEAD(glfs_list_head);
+
+static glfs_t *fio_gf_new_fs(char *volume, char *brick)
{
int r = 0;
+ glfs_t *fs;
+ struct stat sb = { 0, };
+
+ fs = glfs_new(volume);
+ if (!fs) {
+ log_err("glfs_new failed.\n");
+ goto out;
+ }
+ glfs_set_logging(fs, "/tmp/fio_gfapi.log", 7);
+ /* default to tcp */
+ r = glfs_set_volfile_server(fs, "tcp", brick, 0);
+ if (r) {
+ log_err("glfs_set_volfile_server failed.\n");
+ goto out;
+ }
+ r = glfs_init(fs);
+ if (r) {
+ log_err("glfs_init failed. Is glusterd running on brick?\n");
+ goto out;
+ }
+ sleep(2);
+ r = glfs_lstat(fs, ".", &sb);
+ if (r) {
+ log_err("glfs_lstat failed.\n");
+ goto out;
+ }
+
+out:
+ if (r) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+ return fs;
+}
+
+static glfs_t *fio_gf_get_glfs(struct gf_options *opt,
+ char *volume, char *brick)
+{
+ struct glfs_info *glfs = NULL;
+ struct glfs_info *tmp;
+ struct flist_head *entry;
+
+ if (!opt->gf_single_instance)
+ return fio_gf_new_fs(volume, brick);
+
+ pthread_mutex_lock (&glfs_lock);
+
+ flist_for_each(entry, &glfs_list_head) {
+ tmp = flist_entry(entry, struct glfs_info, list);
+ if (!strcmp(volume, tmp->volume) &&
+ !strcmp(brick, tmp->brick)) {
+ glfs = tmp;
+ break;
+ }
+ }
+
+ if (glfs) {
+ glfs->refcount++;
+ } else {
+ glfs = malloc(sizeof(*glfs));
+ if (!glfs)
+ goto out;
+ INIT_FLIST_HEAD(&glfs->list);
+ glfs->refcount = 0;
+ glfs->volume = strdup(volume);
+ glfs->brick = strdup(brick);
+ glfs->fs = fio_gf_new_fs(volume, brick);
+ if (!glfs->fs) {
+ free(glfs);
+ glfs = NULL;
+ goto out;
+ }
+
+ flist_add_tail(&glfs->list, &glfs_list_head);
+ glfs->refcount = 1;
+ }
+
+out:
+ pthread_mutex_unlock (&glfs_lock);
+
+ if (glfs)
+ return glfs->fs;
+ return NULL;
+}
+
+static void fio_gf_put_glfs(struct gf_options *opt, glfs_t *fs)
+{
+ struct glfs_info *glfs = NULL;
+ struct glfs_info *tmp;
+ struct flist_head *entry;
+
+ if (!opt->gf_single_instance) {
+ glfs_fini(fs);
+ return;
+ }
+
+ pthread_mutex_lock (&glfs_lock);
+
+ flist_for_each(entry, &glfs_list_head) {
+ tmp = flist_entry(entry, struct glfs_info, list);
+ if (tmp->fs == fs) {
+ glfs = tmp;
+ break;
+ }
+ }
+
+ if (!glfs) {
+ log_err("glfs not found to fini.\n");
+ } else {
+ glfs->refcount--;
+
+ if (glfs->refcount == 0) {
+ glfs_fini(glfs->fs);
+ free(glfs->volume);
+ free(glfs->brick);
+ flist_del(&glfs->list);
+ }
+ }
+
+ pthread_mutex_unlock (&glfs_lock);
+}
+
+int fio_gf_setup(struct thread_data *td)
+{
struct gf_data *g = NULL;
struct gf_options *opt = td->eo;
- struct stat sb = { 0, };
dprint(FD_IO, "fio setup\n");
log_err("malloc failed.\n");
return -ENOMEM;
}
- g->fs = NULL;
g->fd = NULL;
g->aio_events = NULL;
- g->fs = glfs_new(opt->gf_vol);
- if (!g->fs) {
- log_err("glfs_new failed.\n");
- goto cleanup;
- }
- glfs_set_logging(g->fs, "/tmp/fio_gfapi.log", 7);
- /* default to tcp */
- r = glfs_set_volfile_server(g->fs, "tcp", opt->gf_brick, 0);
- if (r) {
- log_err("glfs_set_volfile_server failed.\n");
+ g->fs = fio_gf_get_glfs(opt, opt->gf_vol, opt->gf_brick);
+ if (!g->fs)
goto cleanup;
- }
- r = glfs_init(g->fs);
- if (r) {
- log_err("glfs_init failed. Is glusterd running on brick?\n");
- goto cleanup;
- }
- sleep(2);
- r = glfs_lstat(g->fs, ".", &sb);
- if (r) {
- log_err("glfs_lstat failed.\n");
- goto cleanup;
- }
+
dprint(FD_FILE, "fio setup %p\n", g->fs);
td->io_ops_data = g;
return 0;
cleanup:
- if (g->fs)
- glfs_fini(g->fs);
free(g);
td->io_ops_data = NULL;
- return r;
+ return -EIO;
}
void fio_gf_cleanup(struct thread_data *td)
if (g->fd)
glfs_close(g->fd);
if (g->fs)
- glfs_fini(g->fs);
+ fio_gf_put_glfs(td->eo, g->fs);
free(g);
td->io_ops_data = NULL;
}
struct gf_data *g = td->io_ops_data;
int ret = 0;
- dprint(FD_FILE, "fio queue len %lu\n", io_u->xfer_buflen);
+ dprint(FD_FILE, "fio queue len %llu\n", io_u->xfer_buflen);
fio_ro_check(td, io_u);
if (io_u->ddir == DDIR_READ)
io_u->error = EINVAL;
return FIO_Q_COMPLETED;
}
- dprint(FD_FILE, "fio len %lu ret %d\n", io_u->xfer_buflen, ret);
+ dprint(FD_FILE, "fio len %llu ret %d\n", io_u->xfer_buflen, ret);
if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
LAST_POS(io_u->file) = io_u->offset + ret;
{
struct thread_options *o = &td->o;
- dprint(FD_IO,"o->rw_min_bs %d \n o->fsync_blocks %d \n o->fdatasync_blocks %d \n",
+ dprint(FD_IO,"o->rw_min_bs %llu \n o->fsync_blocks %d \n o->fdatasync_blocks %d \n",
o->rw_min_bs,o->fsync_blocks,o->fdatasync_blocks);
dprint(FD_IO, "DEBUG fio_libpmem_init\n");
#ifdef FIO_HAVE_SGIO
+enum {
+ FIO_SG_WRITE = 1,
+ FIO_SG_WRITE_VERIFY = 2,
+ FIO_SG_WRITE_SAME = 3
+};
struct sg_options {
void *pad;
unsigned int readfua;
unsigned int writefua;
+ unsigned int write_mode;
};
static struct fio_option options[] = {
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_SG,
},
+ {
+ .name = "sg_write_mode",
+ .lname = "specify sg write mode",
+ .type = FIO_OPT_STR,
+ .off1 = offsetof(struct sg_options, write_mode),
+ .help = "Specify SCSI WRITE mode",
+ .def = "write",
+ .posval = {
+ { .ival = "write",
+ .oval = FIO_SG_WRITE,
+ .help = "Issue standard SCSI WRITE commands",
+ },
+ { .ival = "verify",
+ .oval = FIO_SG_WRITE_VERIFY,
+ .help = "Issue SCSI WRITE AND VERIFY commands",
+ },
+ { .ival = "same",
+ .oval = FIO_SG_WRITE_SAME,
+ .help = "Issue SCSI WRITE SAME commands",
+ },
+ },
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_SG,
+ },
{
.name = NULL,
},
sgio_hdr_init(sd, hdr, io_u, 1);
hdr->dxfer_direction = SG_DXFER_TO_DEV;
- if (lba < MAX_10B_LBA)
- hdr->cmdp[0] = 0x2a; // write(10)
- else
- hdr->cmdp[0] = 0x8a; // write(16)
-
- if (o->writefua)
- hdr->cmdp[1] |= 0x08;
-
+ switch(o->write_mode) {
+ case FIO_SG_WRITE:
+ if (lba < MAX_10B_LBA)
+ hdr->cmdp[0] = 0x2a; // write(10)
+ else
+ hdr->cmdp[0] = 0x8a; // write(16)
+ if (o->writefua)
+ hdr->cmdp[1] |= 0x08;
+ break;
+ case FIO_SG_WRITE_VERIFY:
+ if (lba < MAX_10B_LBA)
+ hdr->cmdp[0] = 0x2e; // write and verify(10)
+ else
+ hdr->cmdp[0] = 0x8e; // write and verify(16)
+ break;
+ // BYTCHK is disabled by virtue of the memset in sgio_hdr_init
+ case FIO_SG_WRITE_SAME:
+ hdr->dxfer_len = sd->bs;
+ if (lba < MAX_10B_LBA)
+ hdr->cmdp[0] = 0x41; // write same(10)
+ else
+ hdr->cmdp[0] = 0x93; // write same(16)
+ break;
+ };
} else {
sgio_hdr_init(sd, hdr, io_u, 0);
hdr->dxfer_direction = SG_DXFER_NONE;
fio_ro_check(td, io_u);
if (o->hipri &&
- (rand32_between(&sd->rand_state, 1, 100) <= o->hipri_percentage))
+ (rand_between(&sd->rand_state, 1, 100) <= o->hipri_percentage))
flags |= RWF_HIPRI;
iov->iov_base = io_u->xfer_buf;
*/
unsigned int major, minor;
int fileno;
- int bs;
+ unsigned long long bs;
char *file_name;
/*
{
int new_layout = 0, unlink_file = 0, flags;
unsigned long long left;
- unsigned int bs;
+ unsigned long long bs;
char *b = NULL;
if (read_only) {
{
int r, did_open = 0, old_runstate;
unsigned long long left;
- unsigned int bs;
+ unsigned long long bs;
bool ret = true;
char *b;
from_hash = file_lookup_open(f, flags);
} else if (td_trim(td)) {
assert(!td_rw(td)); /* should have matched above */
- flags |= O_RDWR;
+ if (!read_only)
+ flags |= O_RDWR;
from_hash = file_lookup_open(f, flags);
}
unsigned int i, nr_fs_extra = 0;
int err = 0, need_extend;
int old_state;
- const unsigned int bs = td_min_bs(td);
+ const unsigned long long bs = td_min_bs(td);
uint64_t fs = 0;
dprint(FD_FILE, "setup files\n");
Convert \fIjobfile\fR to a set of command\-line options.
.TP
.BI \-\-readonly
-Turn on safety read\-only checks, preventing writes. The \fB\-\-readonly\fR
+Turn on safety read\-only checks, preventing writes and trims. The \fB\-\-readonly\fR
option is an extra safety guard to prevent users from accidentally starting
-a write workload when that is not desired. Fio will only write if
-`rw=write/randwrite/rw/randrw' is given. This extra safety net can be used
-as an extra precaution as \fB\-\-readonly\fR will also enable a write check in
-the I/O engine core to prevent writes due to unknown user space bug(s).
+a write or trim workload when that is not desired. Fio will only modify the
+device under test if `rw=write/randwrite/rw/randrw/trim/randtrim/trimwrite'
+is given. This safety net can be used as an extra precaution.
.TP
.BI \-\-eta \fR=\fPwhen
Specifies when real\-time ETA estimate should be printed. \fIwhen\fR may
and that some blocks may be read/written more than once. If this option is
used with \fBverify\fR and multiple blocksizes (via \fBbsrange\fR),
only intact blocks are verified, i.e., partially\-overwritten blocks are
-ignored.
+ignored. With an async I/O engine and an I/O depth > 1, it is possible for
+the same block to be overwritten, which can cause verification errors. Either
+do not use norandommap in this case, or also use the lfsr random generator.
.TP
.BI softrandommap \fR=\fPbool
See \fBnorandommap\fR. If fio runs with the random block map enabled and
90% 4k writes and 10% 8k writes, you would specify:
.RS
.P
-bssplit=2k/50:4k/50,4k/90,8k/10
+bssplit=2k/50:4k/50,4k/90:8k/10
.RE
.P
Fio supports defining up to 64 different weights for each data direction.
.BI (sg)writefua \fR=\fPbool
With writefua option set to 1, write operations include the force
unit access (fua) flag. Default: 0.
+.TP
+.BI (sg)sg_write_mode \fR=\fPstr
+Specify the type of write commands to issue. This option can take three
+values:
+.RS
+.RS
+.TP
+.B write (default)
+Write opcodes are issued as usual
+.TP
+.B verify
+Issue WRITE AND VERIFY commands. The BYTCHK bit is set to 0. This
+directs the device to carry out a medium verification with no data
+comparison. The writefua option is ignored with this selection.
+.TP
+.B same
+Issue WRITE SAME commands. This transfers a single block to the device
+and writes this same block of data to a contiguous sequence of LBAs
+beginning at the specified offset. fio's block size parameter
+specifies the amount of data written with each command. However, the
+amount of data actually transferred to the device is equal to the
+device's block (sector) size. For a device with 512 byte sectors,
+blocksize=8k will write 16 sectors with each command. fio will still
+generate 8k of data for each command butonly the first 512 bytes will
+be used and transferred to the device. The writefua option is ignored
+with this selection.
+
.SS "I/O depth"
.TP
.BI iodepth \fR=\fPint
given is a read or random read, fio will assume that it should verify a
previously written file. If the data direction includes any form of write,
the verify will be of the newly written data.
+.P
+To avoid false verification errors, do not use the norandommap option when
+verifying data with async I/O engines and I/O depths > 1. Or use the
+norandommap and the lfsr random generator together to avoid writing to the
+same offset with muliple outstanding I/Os.
.RE
.TP
.BI verify_offset \fR=\fPint
#include "io_u_queue.h"
#include "workqueue.h"
#include "steadystate.h"
+#include "lib/nowarn_snprintf.h"
#ifdef CONFIG_SOLARISAIO
#include <sys/asynch.h>
break; \
(td)->error = ____e; \
if (!(td)->first_error) \
- snprintf(td->verror, sizeof(td->verror), "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
+ nowarn_snprintf(td->verror, sizeof(td->verror), \
+ "file:%s:%d, func=%s, error=%s", \
+ __FILE__, __LINE__, (func), (msg)); \
} while (0)
static inline void fio_ro_check(const struct thread_data *td, struct io_u *io_u)
{
- assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
+ assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)) &&
+ !(io_u->ddir == DDIR_TRIM && !td_trim(td)));
}
#define REAL_MAX_JOBS 4096
-static inline int should_fsync(struct thread_data *td)
+static inline bool should_fsync(struct thread_data *td)
{
if (td->last_was_sync)
- return 0;
+ return false;
if (td_write(td) || td->o.override_sync)
- return 1;
+ return true;
- return 0;
+ return false;
}
/*
extern int fio_show_option_help(const char *);
extern void fio_options_set_ioengine_opts(struct option *long_options, struct thread_data *td);
extern void fio_options_dup_and_init(struct option *);
+extern char *fio_option_dup_subs(const char *);
extern void fio_options_mem_dupe(struct thread_data *);
extern void td_fill_rand_seeds(struct thread_data *);
extern void td_fill_verify_state_seed(struct thread_data *);
return false;
}
-static inline bool __should_check_rate(struct thread_data *td,
- enum fio_ddir ddir)
+static inline bool __should_check_rate(struct thread_data *td)
{
return (td->flags & TD_F_CHECK_RATE) != 0;
}
static inline bool should_check_rate(struct thread_data *td)
{
- if (__should_check_rate(td, DDIR_READ) && td->bytes_done[DDIR_READ])
- return true;
- if (__should_check_rate(td, DDIR_WRITE) && td->bytes_done[DDIR_WRITE])
- return true;
- if (__should_check_rate(td, DDIR_TRIM) && td->bytes_done[DDIR_TRIM])
- return true;
+ if (!__should_check_rate(td))
+ return false;
- return false;
+ return ddir_rw_sum(td->bytes_done) != 0;
}
-static inline unsigned int td_max_bs(struct thread_data *td)
+static inline unsigned long long td_max_bs(struct thread_data *td)
{
- unsigned int max_bs;
+ unsigned long long max_bs;
max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
return max(td->o.max_bs[DDIR_TRIM], max_bs);
}
-static inline unsigned int td_min_bs(struct thread_data *td)
+static inline unsigned long long td_min_bs(struct thread_data *td)
{
- unsigned int min_bs;
+ unsigned long long min_bs;
min_bs = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
return min(td->o.min_bs[DDIR_TRIM], min_bs);
-#include <malloc.h>
+#include <stdlib.h>
#include <string.h>
#include <glib.h>
gdk_threads_leave();
}
-static void gfio_client_stop(struct fio_client *client, struct fio_net_cmd *cmd)
+static void gfio_client_stop(struct fio_client *client)
{
struct gfio_client *gc = client->client_data;
#include <locale.h>
-#include <malloc.h>
+#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#endif // ARCH_HAVE_CPU_CLOCK
#ifndef CONFIG_TLS_THREAD
-void fio_local_clock_init(int is_thread)
+void fio_local_clock_init(void)
{
struct tv_valid *t;
free(data);
}
#else
-void fio_local_clock_init(int is_thread)
+void fio_local_clock_init(void)
{
}
#endif
extern void fio_clock_init(void);
extern int fio_start_gtod_thread(void);
extern int fio_monotonic_clocktest(int debug);
-extern void fio_local_clock_init(int);
+extern void fio_local_clock_init(void);
extern struct timespec *fio_ts;
*
*/
#include <locale.h>
-#include <malloc.h>
+#include <stdlib.h>
#include <string.h>
+#include <libgen.h>
#include <glib.h>
#include <cairo.h>
#include <locale.h>
-#include <malloc.h>
+#include <stdlib.h>
#include <string.h>
#include <glib.h>
*
*/
#include <string.h>
-#include <malloc.h>
+#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
return p * 100.0;
}
-static void fio_idle_prof_cleanup(void)
+void fio_idle_prof_cleanup(void)
{
if (ipc.ipts) {
free(ipc.ipts);
log_buf(out, " stddev=%3.2f\n", ipc.cali_stddev);
}
- /* dynamic mem allocations can now be freed */
- if (ipc.opt != IDLE_PROF_OPT_NONE)
- fio_idle_prof_cleanup();
-
return;
}
json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean);
json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev);
-
- fio_idle_prof_cleanup();
}
}
extern void show_idle_prof_stats(int, struct json_object *, struct buf_output *);
+extern void fio_idle_prof_cleanup(void);
+
#endif
static int __setup_rate(struct thread_data *td, enum fio_ddir ddir)
{
- unsigned int bs = td->o.min_bs[ddir];
+ unsigned long long bs = td->o.min_bs[ddir];
assert(ddir_rw(ddir));
o->min_bs[DDIR_READ] == o->min_bs[DDIR_TRIM];
}
-
-static unsigned long long get_rand_start_delay(struct thread_data *td)
-{
- unsigned long long delayrange;
- uint64_t frand_max;
- unsigned long r;
-
- delayrange = td->o.start_delay_high - td->o.start_delay;
-
- frand_max = rand_max(&td->delay_state);
- r = __rand(&td->delay_state);
- delayrange = (unsigned long long) ((double) delayrange * (r / (frand_max + 1.0)));
-
- delayrange += td->o.start_delay;
- return delayrange;
-}
-
/*
* <3 Johannes
*/
struct thread_options *o = &td->o;
int ret = 0;
+ if (read_only && (td_write(td) || td_trim(td))) {
+ log_err("fio: trim and write operations are not allowed"
+ " with the --readonly parameter.\n");
+ ret |= 1;
+ }
+
#ifndef CONFIG_PSHARED
if (!o->use_thread) {
log_info("fio: this platform does not support process shared"
" mutexes, forcing use of threads. Use the 'thread'"
" option to get rid of this warning.\n");
o->use_thread = 1;
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
#endif
log_err("fio: read iolog overrides write_iolog\n");
free(o->write_iolog_file);
o->write_iolog_file = NULL;
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
/*
!o->norandommap) {
log_err("fio: Any use of blockalign= turns off randommap\n");
o->norandommap = 1;
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
if (!o->file_size_high)
o->file_size_high = o->file_size_low;
- if (o->start_delay_high)
- o->start_delay = get_rand_start_delay(td);
+ if (o->start_delay_high) {
+ if (!o->start_delay_orig)
+ o->start_delay_orig = o->start_delay;
+ o->start_delay = rand_between(&td->delay_state,
+ o->start_delay_orig,
+ o->start_delay_high);
+ }
if (o->norandommap && o->verify != VERIFY_NONE
&& !fixed_block_size(o)) {
log_err("fio: norandommap given for variable block sizes, "
"verify limited\n");
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
if (o->bs_unaligned && (o->odirect || td_ioengine_flagged(td, FIO_RAWIO)))
log_err("fio: bs_unaligned may not work with raw io\n");
log_err("fio: checking for in-flight overlaps when the "
"io_submit_mode is offload is not supported\n");
o->serialize_overlap = 0;
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
if (o->nr_files > td->files_index)
((o->ratemin[DDIR_READ] + o->ratemin[DDIR_WRITE] + o->ratemin[DDIR_TRIM]) &&
(o->rate_iops_min[DDIR_READ] + o->rate_iops_min[DDIR_WRITE] + o->rate_iops_min[DDIR_TRIM]))) {
log_err("fio: rate and rate_iops are mutually exclusive\n");
- ret = 1;
+ ret |= 1;
}
if ((o->rate[DDIR_READ] && (o->rate[DDIR_READ] < o->ratemin[DDIR_READ])) ||
(o->rate[DDIR_WRITE] && (o->rate[DDIR_WRITE] < o->ratemin[DDIR_WRITE])) ||
(o->rate_iops[DDIR_WRITE] && (o->rate_iops[DDIR_WRITE] < o->rate_iops_min[DDIR_WRITE])) ||
(o->rate_iops[DDIR_TRIM] && (o->rate_iops[DDIR_TRIM] < o->rate_iops_min[DDIR_TRIM]))) {
log_err("fio: minimum rate exceeds rate\n");
- ret = 1;
+ ret |= 1;
}
if (!o->timeout && o->time_based) {
log_err("fio: time_based requires a runtime/timeout setting\n");
o->time_based = 0;
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
if (o->fill_device && !o->size)
log_info("fio: multiple writers may overwrite blocks "
"that belong to other jobs. This can cause "
"verification failures.\n");
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
/*
log_info("fio: verification read phase will never "
"start because write phase uses all of "
"runtime\n");
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
if (!fio_option_is_set(o, refill_buffers))
if (td_ioengine_flagged(td, FIO_PIPEIO)) {
log_info("fio: cannot pre-read files with an IO engine"
" that isn't seekable. Pre-read disabled.\n");
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
}
" this warning\n");
o->fsync_blocks = o->fdatasync_blocks;
o->fdatasync_blocks = 0;
- ret = warnings_fatal;
+ ret |= warnings_fatal;
}
#endif
log_err("fio: Windows does not support direct or non-buffered io with"
" the synchronous ioengines. Use the 'windowsaio' ioengine"
" with 'direct=1' and 'iodepth=1' instead.\n");
- ret = 1;
+ ret |= 1;
}
#endif
* If size is set but less than the min block size, complain
*/
if (o->size && o->size < td_min_bs(td)) {
- log_err("fio: size too small, must not be less than minimum block size: %llu < %u\n",
+ log_err("fio: size too small, must not be less than minimum block size: %llu < %llu\n",
(unsigned long long) o->size, td_min_bs(td));
- ret = 1;
+ ret |= 1;
}
/*
if (td_ioengine_flagged(td, FIO_NOEXTEND) && o->file_append) {
log_err("fio: can't append/extent with IO engine %s\n", td->io_ops->name);
- ret = 1;
+ ret |= 1;
}
if (fio_option_is_set(o, gtod_cpu)) {
log_err("fio: block error histogram only available "
"with a single file per job, but %d files "
"provided\n", o->nr_files);
- ret = 1;
+ ret |= 1;
}
if (fio_option_is_set(o, clat_percentiles) &&
o->lat_percentiles && o->clat_percentiles) {
log_err("fio: lat_percentiles and clat_percentiles are "
"mutually exclusive\n");
- ret = 1;
+ ret |= 1;
}
if (o->disable_lat)
}
}
+ if (setup_random_seeds(td)) {
+ td_verror(td, errno, "setup_random_seeds");
+ goto err;
+ }
+
if (fixup_options(td))
goto err;
td->groupid = groupid;
prev_group_jobs++;
- if (setup_random_seeds(td)) {
- td_verror(td, errno, "setup_random_seeds");
- goto err;
- }
-
if (setup_rate(td))
goto err;
printf(" --showcmd\t\tTurn a job file into command line options\n");
printf(" --eta=when\t\tWhen ETA estimate should be printed\n");
printf(" \t\tMay be \"always\", \"never\" or \"auto\"\n");
- printf(" --eta-newline=time\tForce a new line for every 'time'");
+ printf(" --eta-newline=t\tForce a new line for every 't'");
printf(" period passed\n");
printf(" --status-interval=t\tForce full status dump every");
printf(" 't' period passed\n");
*/
static void mark_random_map(struct thread_data *td, struct io_u *io_u)
{
- unsigned int min_bs = td->o.min_bs[io_u->ddir];
+ unsigned long long min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
- unsigned int nr_blocks;
+ unsigned long long nr_blocks;
uint64_t block;
block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
/*
* Generate a value, v, between 1 and 100, both inclusive
*/
- v = rand32_between(&td->zone_state, 1, 100);
+ v = rand_between(&td->zone_state, 1, 100);
/*
* Find our generated table. 'send' is the end block of this zone,
/*
* Generate a value, v, between 1 and 100, both inclusive
*/
- v = rand32_between(&td->zone_state, 1, 100);
+ v = rand_between(&td->zone_state, 1, 100);
zsi = &td->zone_state_index[ddir][v - 1];
stotal = zsi->size_perc_prev;
if (td->o.perc_rand[ddir] == 100)
return true;
- v = rand32_between(&td->seq_rand_state[ddir], 1, 100);
+ v = rand_between(&td->seq_rand_state[ddir], 1, 100);
return v <= td->o.perc_rand[ddir];
}
if (td->o.time_based ||
(td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
fio_file_reset(td, f);
+ loop_cache_invalidate(td, f);
if (!get_next_rand_offset(td, f, ddir, b))
return 0;
- loop_cache_invalidate(td, f);
}
dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
}
static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
- unsigned int buflen)
+ unsigned long long buflen)
{
struct fio_file *f = io_u->file;
return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
}
-static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
+static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *io_u,
bool is_random)
{
int ddir = io_u->ddir;
- unsigned int buflen = 0;
- unsigned int minbs, maxbs;
+ unsigned long long buflen = 0;
+ unsigned long long minbs, maxbs;
uint64_t frand_max, r;
bool power_2;
r = __rand(&td->bsrange_state[ddir]);
if (!td->o.bssplit_nr[ddir]) {
- buflen = 1 + (unsigned int) ((double) maxbs *
+ buflen = minbs + (unsigned long long) ((double) maxbs *
(r / (frand_max + 1.0)));
- if (buflen < minbs)
- buflen = minbs;
} else {
long long perc = 0;
unsigned int i;
{
unsigned int v;
- v = rand32_between(&td->rwmix_state, 1, 100);
+ v = rand_between(&td->rwmix_state, 1, 100);
if (v <= td->o.rwmix[DDIR_READ])
return DDIR_READ;
* Wrap from the beginning, if we exceed the file size
*/
if (f->file_offset >= f->real_file_size)
- f->file_offset = f->real_file_size - f->file_offset;
+ f->file_offset = get_start_offset(td, f);
+
f->last_pos[io_u->ddir] = f->file_offset;
td->io_skip_bytes += td->o.zone_skip;
}
/*
- * If zone_size > zone_range, then maintain the same zone until
- * zone_bytes >= zone_size.
- */
+ * If zone_size > zone_range, then maintain the same zone until
+ * zone_bytes >= zone_size.
+ */
if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) {
dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n",
f->file_offset, f->last_pos[io_u->ddir]);
* For random: if 'norandommap' is not set and zone_size > zone_range,
* map needs to be reset as it's done with zone_range everytime.
*/
- if ((td->zone_bytes % td->o.zone_range) == 0) {
+ if ((td->zone_bytes % td->o.zone_range) == 0)
fio_file_reset(td, f);
- }
}
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
/*
* When file is zoned zone_range is always positive
*/
- if (td->o.zone_range) {
+ if (td->o.zone_range)
__fill_io_u_zone(td, io_u);
- }
/*
* No log, let the seq/rand engine retrieve the next buflen and
}
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
- dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%lx exceeds file size=0x%llx\n",
+ dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
io_u,
(unsigned long long) io_u->offset, io_u->buflen,
(unsigned long long) io_u->file->real_file_size);
*/
static void small_content_scramble(struct io_u *io_u)
{
- unsigned int i, nr_blocks = io_u->buflen >> 9;
+ unsigned long long i, nr_blocks = io_u->buflen >> 9;
unsigned int offset;
uint64_t boffset, *iptr;
char *p;
if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
return;
- log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
+ log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%llu\n",
io_u->file ? " on file " : "",
io_u->file ? io_u->file->file_name : "",
strerror(io_u->error),
td->last_ddir = ddir;
if (!io_u->error && ddir_rw(ddir)) {
- unsigned int bytes = io_u->buflen - io_u->resid;
+ unsigned long long bytes = io_u->buflen - io_u->resid;
int ret;
td->io_blocks[ddir]++;
return &td->buf_state;
}
- v = rand32_between(&td->dedupe_state, 1, 100);
+ v = rand_between(&td->dedupe_state, 1, 100);
if (v <= td->o.dedupe_percentage)
return &td->buf_state_prev;
frand_copy(&td->buf_state_prev, rs);
}
-void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
- unsigned int max_bs)
+void fill_io_buffer(struct thread_data *td, void *buf, unsigned long long min_write,
+ unsigned long long max_bs)
{
struct thread_options *o = &td->o;
if (o->compress_percentage || o->dedupe_percentage) {
unsigned int perc = td->o.compress_percentage;
struct frand_state *rs;
- unsigned int left = max_bs;
- unsigned int this_write;
+ unsigned long long left = max_bs;
+ unsigned long long this_write;
do {
rs = get_buf_state(td);
if (perc) {
this_write = min_not_zero(min_write,
- td->o.compress_chunk);
+ (unsigned long long) td->o.compress_chunk);
fill_random_buf_percentage(rs, buf, perc,
this_write, this_write,
* "randomly" fill the buffer contents
*/
void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
- unsigned int min_write, unsigned int max_bs)
+ unsigned long long min_write, unsigned long long max_bs)
{
io_u->buf_filled_len = 0;
fill_io_buffer(td, io_u->buf, min_write, max_bs);
/*
* Allocated/set buffer and length
*/
- unsigned long buflen;
+ unsigned long long buflen;
unsigned long long offset;
void *buf;
* partial transfers / residual data counts
*/
void *xfer_buf;
- unsigned long xfer_buflen;
+ unsigned long long xfer_buflen;
/*
* Parameter related to pre-filled buffers and
* their size to handle variable block sizes.
*/
- unsigned long buf_filled_len;
+ unsigned long long buf_filled_len;
struct io_piece *ipo;
extern int io_u_quiesce(struct thread_data *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_mark_depth(struct thread_data *, unsigned int);
-extern void fill_io_buffer(struct thread_data *, void *, unsigned int, unsigned int);
-extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int, unsigned int);
+extern void fill_io_buffer(struct thread_data *, void *, unsigned long long, unsigned long long);
+extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned long long, unsigned long long);
void io_u_mark_complete(struct thread_data *, unsigned int);
void io_u_mark_submit(struct thread_data *, unsigned int);
bool queue_full(const struct thread_data *);
struct fio_file *f = io_u->file;
if (f)
- dprint(FD_IO, "%s: io_u %p: off=0x%llx,len=0x%lx,ddir=%d,file=%s\n",
+ dprint(FD_IO, "%s: io_u %p: off=0x%llx,len=0x%llx,ddir=%d,file=%s\n",
p, io_u,
(unsigned long long) io_u->offset,
io_u->buflen, io_u->ddir,
f->file_name);
else
- dprint(FD_IO, "%s: io_u %p: off=0x%llx,len=0x%lx,ddir=%d\n",
+ dprint(FD_IO, "%s: io_u %p: off=0x%llx,len=0x%llx,ddir=%d\n",
p, io_u,
(unsigned long long) io_u->offset,
io_u->buflen, io_u->ddir);
enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u)
{
const enum fio_ddir ddir = acct_ddir(io_u);
- unsigned long buflen = io_u->xfer_buflen;
+ unsigned long long buflen = io_u->xfer_buflen;
enum fio_q_status ret;
dprint_io_u(io_u, "queue");
if (!td->o.write_iolog_file)
return;
- fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
+ fprintf(td->iolog_f, "%s %s %llu %llu\n", io_u->file->file_name,
io_ddir_name(io_u->ddir),
io_u->offset, io_u->buflen);
}
io_u->buflen = ipo->len;
io_u->file = td->files[ipo->fileno];
get_file(io_u->file);
- dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
+ dprint(FD_IO, "iolog: get %llu/%llu/%s\n", io_u->offset,
io_u->buflen, io_u->file->file_name);
if (ipo->delay)
iolog_delay(td, ipo->delay);
td->io_hist_len--;
}
-void trim_io_piece(struct thread_data *td, const struct io_u *io_u)
+void trim_io_piece(const struct io_u *io_u)
{
struct io_piece *ipo = io_u->ipo;
}
if (l->td && l->td->o.io_submit_mode != IO_MODE_OFFLOAD) {
- struct io_logs *p;
+ struct io_logs *__p;
- p = calloc(1, sizeof(*l->pending));
- p->max_samples = DEF_LOG_ENTRIES;
- p->log = calloc(p->max_samples, log_entry_sz(l));
- l->pending = p;
+ __p = calloc(1, sizeof(*l->pending));
+ __p->max_samples = DEF_LOG_ENTRIES;
+ __p->log = calloc(__p->max_samples, log_entry_sz(l));
+ l->pending = __p;
}
if (l->log_offset)
entry_before = flist_first_entry(&entry->list, struct io_u_plat_entry, list);
io_u_plat_before = entry_before->io_u_plat;
- fprintf(f, "%lu, %u, %u, ", (unsigned long) s->time,
- io_sample_ddir(s), s->bs);
+ fprintf(f, "%lu, %u, %llu, ", (unsigned long) s->time,
+ io_sample_ddir(s), (unsigned long long) s->bs);
for (j = 0; j < FIO_IO_U_PLAT_NR - stride; j += stride) {
fprintf(f, "%llu, ", (unsigned long long)
hist_sum(j, stride, io_u_plat, io_u_plat_before));
s = __get_sample(samples, log_offset, i);
if (!log_offset) {
- fprintf(f, "%lu, %" PRId64 ", %u, %u\n",
+ fprintf(f, "%lu, %" PRId64 ", %u, %llu\n",
(unsigned long) s->time,
s->data.val,
- io_sample_ddir(s), s->bs);
+ io_sample_ddir(s), (unsigned long long) s->bs);
} else {
struct io_sample_offset *so = (void *) s;
- fprintf(f, "%lu, %" PRId64 ", %u, %u, %llu\n",
+ fprintf(f, "%lu, %" PRId64 ", %u, %llu, %llu\n",
(unsigned long) s->time,
s->data.val,
- io_sample_ddir(s), s->bs,
+ io_sample_ddir(s), (unsigned long long) s->bs,
(unsigned long long) so->offset);
}
}
uint64_t time;
union io_sample_data data;
uint32_t __ddir;
- uint32_t bs;
+ uint64_t bs;
};
struct io_sample_offset {
extern bool __must_check init_iolog(struct thread_data *td);
extern void log_io_piece(struct thread_data *, struct io_u *);
extern void unlog_io_piece(struct thread_data *, struct io_u *);
-extern void trim_io_piece(struct thread_data *, const struct io_u *);
+extern void trim_io_piece(const struct io_u *);
extern void queue_io_piece(struct thread_data *, struct io_piece *);
extern void prune_io_piece_log(struct thread_data *);
extern void write_iolog_close(struct thread_data *);
#define firstfree_valid(b) ((b)->first_free != (uint64_t) -1)
+static const unsigned long bit_masks[] = {
+ 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
+ 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
+ 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
+ 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
+ 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
+ 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
+ 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
+ 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
+ 0x00000000ffffffff,
+#if BITS_PER_LONG == 64
+ 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
+ 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
+ 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
+ 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
+ 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
+ 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
+ 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
+ 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
+#endif
+};
+
struct axmap_level {
int level;
unsigned long map_size;
uint64_t nr_bits;
};
-static unsigned long ulog64(unsigned long val, unsigned int log)
+static inline unsigned long ulog64(unsigned long val, unsigned int log)
{
while (log-- && val)
val >>= UNIT_SHIFT;
void *), void *data)
{
struct axmap_level *al;
+ uint64_t index = bit_nr;
int i;
for (i = 0; i < axmap->nr_levels; i++) {
- unsigned long index = ulog64(bit_nr, i);
unsigned long offset = index >> UNIT_SHIFT;
unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
if (func(al, offset, bit, data))
return true;
+
+ if (index)
+ index >>= UNIT_SHIFT;
}
return false;
}
static bool axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
- bool (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
- void *data)
+ bool (*func)(struct axmap_level *, unsigned long, unsigned int, void *))
{
- struct axmap_level *al;
- int i, level = axmap->nr_levels;
+ int i;
for (i = axmap->nr_levels - 1; i >= 0; i--) {
- unsigned long index = ulog64(bit_nr, --level);
+ unsigned long index = ulog64(bit_nr, i);
unsigned long offset = index >> UNIT_SHIFT;
unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
- al = &axmap->levels[i];
-
- if (func(al, offset, bit, data))
+ if (func(&axmap->levels[i], offset, bit, NULL))
return true;
}
unsigned int set_bits;
};
-static const unsigned long bit_masks[] = {
- 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
- 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
- 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
- 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
- 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
- 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
- 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
- 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
- 0x00000000ffffffff,
-#if BITS_PER_LONG == 64
- 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
- 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
- 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
- 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
- 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
- 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
- 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
- 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
-#endif
-};
-
static bool axmap_set_fn(struct axmap_level *al, unsigned long offset,
unsigned int bit, void *__data)
{
* Mask off any potential overlap, only sets contig regions
*/
overlap = al->map[offset] & mask;
- if (overlap == mask)
+ if (overlap == mask) {
+done:
+ data->set_bits = 0;
return true;
+ }
- while (overlap) {
- unsigned long clear_mask = ~(1UL << ffz(~overlap));
+ if (overlap) {
+ const int __bit = ffz(~overlap);
- mask &= clear_mask;
- overlap &= clear_mask;
- nr_bits--;
+ nr_bits = __bit - bit;
+ if (!nr_bits)
+ goto done;
+
+ mask = bit_masks[nr_bits] << bit;
}
assert(mask);
assert(!(al->map[offset] & mask));
-
al->map[offset] |= mask;
if (!al->level)
unsigned int max_bits, this_set;
max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
- if (max_bits < nr_bits)
+ if (nr_bits > max_bits)
data.nr_bits = max_bits;
this_set = data.nr_bits;
bool axmap_isset(struct axmap *axmap, uint64_t bit_nr)
{
if (bit_nr <= axmap->nr_bits)
- return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
+ return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn);
return false;
}
for (i = level; i >= 0; i--) {
struct axmap_level *al = &axmap->levels[i];
- /*
- * Clear 'ret', this is a bug condition.
- */
- if (index >= al->map_size) {
- ret = -1ULL;
- break;
- }
+ if (index >= al->map_size)
+ goto err;
for (j = index; j < al->map_size; j++) {
if (al->map[j] == -1UL)
if (ret < axmap->nr_bits)
return ret;
+err:
return (uint64_t) -1ULL;
}
--- /dev/null
+#ifndef _NOWARN_SNPRINTF_H_
+#define _NOWARN_SNPRINTF_H_
+
+#include <stdio.h>
+#include <stdarg.h>
+
+static inline int nowarn_snprintf(char *str, size_t size, const char *format,
+ ...)
+{
+ va_list args;
+ int res;
+
+ va_start(args, format);
+#if __GNUC__ -0 >= 8
+#pragma GCC diagnostic push "-Wformat-truncation"
+#pragma GCC diagnostic ignored "-Wformat-truncation"
+#endif
+ res = vsnprintf(str, size, format, args);
+#if __GNUC__ -0 >= 8
+#pragma GCC diagnostic pop "-Wformat-truncation"
+#endif
+ va_end(args);
+
+ return res;
+}
+
+#endif
}
}
-/*
- * Generate a random value between 'start' and 'end', both inclusive
- */
-static inline int rand32_between(struct frand_state *state, int start, int end)
+static inline uint32_t rand32_upto(struct frand_state *state, uint32_t end)
{
uint32_t r;
assert(!state->use64);
r = __rand32(&state->state32);
- return start + (int) ((double)end * (r / (FRAND32_MAX + 1.0)));
+ end++;
+ return (int) ((double)end * (r / (FRAND32_MAX + 1.0)));
+}
+
+static inline uint64_t rand64_upto(struct frand_state *state, uint64_t end)
+{
+ uint64_t r;
+
+ assert(state->use64);
+
+ r = __rand64(&state->state64);
+ end++;
+ return (uint64_t) ((double)end * (r / (FRAND64_MAX + 1.0)));
+}
+
+/*
+ * Generate a random value between 'start' and 'end', both inclusive
+ */
+static inline uint64_t rand_between(struct frand_state *state, uint64_t start,
+ uint64_t end)
+{
+ if (state->use64)
+ return start + rand64_upto(state, end - start);
+ else
+ return start + rand32_upto(state, end - start);
}
extern void init_rand(struct frand_state *, bool);
td->bytes_done[ddir] = 0;
td->rate_io_issue_bytes[ddir] = 0;
td->rate_next_io_time[ddir] = 0;
+ td->last_usec[ddir] = 0;
}
}
return 0;
if (is_backend) {
- size_t ret = fio_server_text_output(FIO_LOG_INFO, buf, len);
+ ssize_t ret = fio_server_text_output(FIO_LOG_INFO, buf, len);
if (ret != -1)
return ret;
}
free(buf2);
}
-size_t log_info(const char *format, ...)
+ssize_t log_info(const char *format, ...)
{
va_list args;
- size_t ret;
+ ssize_t ret;
va_start(args, format);
ret = log_valist(format, args);
return fflush(f_out);
}
-size_t log_err(const char *format, ...)
+ssize_t log_err(const char *format, ...)
{
- size_t ret;
+ ssize_t ret;
int len;
char *buffer;
va_list args;
extern FILE *f_out;
extern FILE *f_err;
-extern size_t log_err(const char *format, ...) __attribute__ ((__format__ (__printf__, 1, 2)));
-extern size_t log_info(const char *format, ...) __attribute__ ((__format__ (__printf__, 1, 2)));
+extern ssize_t log_err(const char *format, ...) __attribute__ ((__format__ (__printf__, 1, 2)));
+extern ssize_t log_info(const char *format, ...) __attribute__ ((__format__ (__printf__, 1, 2)));
extern size_t __log_buf(struct buf_output *, const char *format, ...) __attribute__ ((__format__ (__printf__, 2, 3)));
extern size_t log_valist(const char *str, va_list);
extern void log_prevalist(int type, const char *str, va_list);
struct split {
unsigned int nr;
- unsigned int val1[ZONESPLIT_MAX];
+ unsigned long long val1[ZONESPLIT_MAX];
unsigned long long val2[ZONESPLIT_MAX];
};
static int split_parse_ddir(struct thread_options *o, struct split *split,
- enum fio_ddir ddir, char *str, bool absolute,
- unsigned int max_splits)
+ char *str, bool absolute, unsigned int max_splits)
{
unsigned long long perc;
unsigned int i;
bool data)
{
unsigned int i, perc, perc_missing;
- unsigned int max_bs, min_bs;
+ unsigned long long max_bs, min_bs;
struct split split;
memset(&split, 0, sizeof(split));
- if (split_parse_ddir(o, &split, ddir, str, data, BSSPLIT_MAX))
+ if (split_parse_ddir(o, &split, str, data, BSSPLIT_MAX))
return 1;
if (!split.nr)
return 0;
memset(&split, 0, sizeof(split));
- if (split_parse_ddir(o, &split, ddir, str, absolute, ZONESPLIT_MAX))
+ if (split_parse_ddir(o, &split, str, absolute, ZONESPLIT_MAX))
return 1;
if (!split.nr)
return 0;
}
if (parse_dryrun()) {
- int i;
-
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
free(td->o.zone_split[i]);
td->o.zone_split[i] = NULL;
{
struct thread_data *td = cb_data_to_td(data);
- if (read_only && td_write(td)) {
- log_err("fio: job <%s> has write bit set, but fio is in"
- " read-only mode\n", td->o.name);
+ if (read_only && (td_write(td) || td_trim(td))) {
+ log_err("fio: job <%s> has write or trim bit set, but"
+ " fio is in read-only mode\n", td->o.name);
return 1;
}
.name = "bs",
.lname = "Block size",
.alias = "blocksize",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_ULL,
.off1 = offsetof(struct thread_options, bs[DDIR_READ]),
.off2 = offsetof(struct thread_options, bs[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, bs[DDIR_TRIM]),
.name = "ba",
.lname = "Block size align",
.alias = "blockalign",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_ULL,
.off1 = offsetof(struct thread_options, ba[DDIR_READ]),
.off2 = offsetof(struct thread_options, ba[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, ba[DDIR_TRIM]),
{
.name = "bssplit",
.lname = "Block size split",
- .type = FIO_OPT_STR,
+ .type = FIO_OPT_STR_ULL,
.cb = str_bssplit_cb,
.off1 = offsetof(struct thread_options, bssplit),
.help = "Set a specific mix of block sizes",
* substitution always occurs, even if VARNAME is empty or the corresponding
* environment variable undefined.
*/
-static char *option_dup_subs(const char *opt)
+char *fio_option_dup_subs(const char *opt)
{
char out[OPT_LEN_MAX+1];
char in[OPT_LEN_MAX+1];
int i;
char **opts_copy = malloc(num_opts * sizeof(*opts));
for (i = 0; i < num_opts; i++) {
- opts_copy[i] = option_dup_subs(opts[i]);
+ opts_copy[i] = fio_option_dup_subs(opts[i]);
if (!opts_copy[i])
continue;
opts_copy[i] = fio_keyword_replace(opts_copy[i]);
return find_option(fio_options, name);
}
-static struct fio_option *find_next_opt(struct thread_options *o,
- struct fio_option *from,
+static struct fio_option *find_next_opt(struct fio_option *from,
unsigned int off1)
{
struct fio_option *opt;
struct fio_option *opt, *next;
next = NULL;
- while ((opt = find_next_opt(o, next, off1)) != NULL) {
+ while ((opt = find_next_opt(next, off1)) != NULL) {
if (opt_is_set(o, opt))
return true;
uint64_t len)
{
int ret;
- ret = fallocate(f->fd, 0, 0, len);
+ ret = fallocate(f->fd, 0, offset, len);
if (ret == 0)
return true;
static const char *opt_type_names[] = {
"OPT_INVALID",
"OPT_STR",
+ "OPT_STR_ULL",
"OPT_STR_MULTI",
"OPT_STR_VAL",
"OPT_STR_VAL_TIME",
"OPT_STR_STORE",
"OPT_RANGE",
"OPT_INT",
+ "OPT_ULL",
"OPT_BOOL",
"OPT_FLOAT_LIST",
"OPT_STR_SET",
}
static void show_option_range(const struct fio_option *o,
- size_t (*logger)(const char *format, ...))
+ ssize_t (*logger)(const char *format, ...))
{
if (o->type == FIO_OPT_FLOAT_LIST) {
const char *sep = "";
"deprecated",
"unsupported",
};
- size_t (*logger)(const char *format, ...);
+ ssize_t (*logger)(const char *format, ...);
if (is_err)
logger = log_err;
*(s + 1) = '\0';
}
-static int check_range_bytes(const char *str, long *val, void *data)
+static int check_range_bytes(const char *str, long long *val, void *data)
{
long long __val;
int il=0, *ilp;
fio_fp64_t *flp;
long long ull, *ullp;
- long ul1, ul2;
+ long ul2;
+ long long ull1, ull2;
double uf;
char **cp = NULL;
int ret = 0, is_time = 0;
switch (o->type) {
case FIO_OPT_STR:
+ case FIO_OPT_STR_ULL:
case FIO_OPT_STR_MULTI: {
fio_opt_str_fn *fn = o->cb;
break;
if (!strncmp(vp->ival, ptr, str_match_len(vp, ptr))) {
ret = 0;
- if (o->off1)
+ if (!o->off1)
+ continue;
+ if (o->type == FIO_OPT_STR_ULL)
+ val_store(ullp, vp->oval, o->off1, vp->orval, data, o);
+ else
val_store(ilp, vp->oval, o->off1, vp->orval, data, o);
continue;
}
}
case FIO_OPT_STR_VAL_TIME:
is_time = 1;
+ /* fall through */
+ case FIO_OPT_ULL:
case FIO_OPT_INT:
case FIO_OPT_STR_VAL: {
fio_opt_str_val_fn *fn = o->cb;
if (o->maxval && ull > o->maxval) {
log_err("max value out of range: %llu"
- " (%u max)\n", ull, o->maxval);
+ " (%llu max)\n", ull, o->maxval);
return 1;
}
if (o->minval && ull < o->minval) {
val_store(ilp, ull, o->off3, 0, data, o);
}
}
+ } else if (o->type == FIO_OPT_ULL) {
+ if (first)
+ val_store(ullp, ull, o->off1, 0, data, o);
+ if (curr == 1) {
+ if (o->off2)
+ val_store(ullp, ull, o->off2, 0, data, o);
+ }
+ if (curr == 2) {
+ if (o->off3)
+ val_store(ullp, ull, o->off3, 0, data, o);
+ }
+ if (!more) {
+ if (curr < 1) {
+ if (o->off2)
+ val_store(ullp, ull, o->off2, 0, data, o);
+ }
+ if (curr < 2) {
+ if (o->off3)
+ val_store(ullp, ull, o->off3, 0, data, o);
+ }
+ }
} else {
if (first)
val_store(ullp, ull, o->off1, 0, data, o);
p1 = tmp;
ret = 1;
- if (!check_range_bytes(p1, &ul1, data) &&
- !check_range_bytes(p2, &ul2, data)) {
+ if (!check_range_bytes(p1, &ull1, data) &&
+ !check_range_bytes(p2, &ull2, data)) {
ret = 0;
- if (ul1 > ul2) {
- unsigned long foo = ul1;
+ if (ull1 > ull2) {
+ unsigned long long foo = ull1;
- ul1 = ul2;
- ul2 = foo;
+ ull1 = ull2;
+ ull2 = foo;
}
if (first) {
- val_store(ilp, ul1, o->off1, 0, data, o);
- val_store(ilp, ul2, o->off2, 0, data, o);
+ val_store(ullp, ull1, o->off1, 0, data, o);
+ val_store(ullp, ull2, o->off2, 0, data, o);
}
if (curr == 1) {
if (o->off3 && o->off4) {
- val_store(ilp, ul1, o->off3, 0, data, o);
- val_store(ilp, ul2, o->off4, 0, data, o);
+ val_store(ullp, ull1, o->off3, 0, data, o);
+ val_store(ullp, ull2, o->off4, 0, data, o);
}
}
if (curr == 2) {
if (o->off5 && o->off6) {
- val_store(ilp, ul1, o->off5, 0, data, o);
- val_store(ilp, ul2, o->off6, 0, data, o);
+ val_store(ullp, ull1, o->off5, 0, data, o);
+ val_store(ullp, ull2, o->off6, 0, data, o);
}
}
if (!more) {
if (curr < 1) {
if (o->off3 && o->off4) {
- val_store(ilp, ul1, o->off3, 0, data, o);
- val_store(ilp, ul2, o->off4, 0, data, o);
+ val_store(ullp, ull1, o->off3, 0, data, o);
+ val_store(ullp, ull2, o->off4, 0, data, o);
}
}
if (curr < 2) {
if (o->off5 && o->off6) {
- val_store(ilp, ul1, o->off5, 0, data, o);
- val_store(ilp, ul2, o->off6, 0, data, o);
+ val_store(ullp, ull1, o->off5, 0, data, o);
+ val_store(ullp, ull2, o->off6, 0, data, o);
}
}
}
break;
if (o->maxval && il > (int) o->maxval) {
- log_err("max value out of range: %d (%d max)\n",
+ log_err("max value out of range: %d (%llu max)\n",
il, o->maxval);
return 1;
}
}
case FIO_OPT_DEPRECATED:
ret = 1;
+ /* fall through */
case FIO_OPT_SOFT_DEPRECATED:
log_info("Option %s is deprecated\n", o->name);
break;
if (!o->maxval)
o->maxval = UINT_MAX;
}
+ if (o->type == FIO_OPT_ULL) {
+ if (!o->maxval)
+ o->maxval = ULLONG_MAX;
+ }
if (o->type == FIO_OPT_STR_SET && o->def && !o->no_warn_def) {
log_err("Option %s: string set option with"
" default will always be true\n", o->name);
enum fio_opt_type {
FIO_OPT_INVALID = 0,
FIO_OPT_STR,
+ FIO_OPT_STR_ULL,
FIO_OPT_STR_MULTI,
FIO_OPT_STR_VAL,
FIO_OPT_STR_VAL_TIME,
FIO_OPT_STR_STORE,
FIO_OPT_RANGE,
FIO_OPT_INT,
+ FIO_OPT_ULL,
FIO_OPT_BOOL,
FIO_OPT_FLOAT_LIST,
FIO_OPT_STR_SET,
*/
struct value_pair {
const char *ival; /* string option */
- unsigned int oval; /* output value */
+ unsigned long long oval;/* output value */
const char *help; /* help text for sub option */
int orval; /* OR value */
void *cb; /* sub-option callback */
unsigned int off4;
unsigned int off5;
unsigned int off6;
- unsigned int maxval; /* max and min value */
+ unsigned long long maxval; /* max and min value */
int minval;
double maxfp; /* max and min floating value */
double minfp;
}
void fio_server_send_job_options(struct flist_head *opt_list,
- unsigned int groupid)
+ unsigned int gid)
{
struct cmd_job_option pdu;
struct flist_head *entry;
p = flist_entry(entry, struct print_option, list);
memset(&pdu, 0, sizeof(pdu));
- if (groupid == -1U) {
+ if (gid == -1U) {
pdu.global = __cpu_to_le16(1);
pdu.groupid = 0;
} else {
pdu.global = 0;
- pdu.groupid = cpu_to_le32(groupid);
+ pdu.groupid = cpu_to_le32(gid);
}
len = strlen(p->name);
if (len >= sizeof(pdu.name)) {
s->time = cpu_to_le64(s->time);
s->data.val = cpu_to_le64(s->data.val);
s->__ddir = cpu_to_le32(s->__ddir);
- s->bs = cpu_to_le32(s->bs);
+ s->bs = cpu_to_le64(s->bs);
if (log->log_offset) {
struct io_sample_offset *so = (void *) s;
};
enum {
- FIO_SERVER_VER = 73,
+ FIO_SERVER_VER = 74,
FIO_SERVER_MAX_FRAGMENT_PDU = 1024,
FIO_SERVER_MAX_CMD_MB = 2048,
static int compare_block_infos(const void *bs1, const void *bs2)
{
- uint32_t block1 = *(uint32_t *)bs1;
- uint32_t block2 = *(uint32_t *)bs2;
+ uint64_t block1 = *(uint64_t *)bs1;
+ uint64_t block2 = *(uint64_t *)bs2;
int state1 = BLOCK_INFO_STATE(block1);
int state2 = BLOCK_INFO_STATE(block2);
int bscat1 = block_state_category(state1);
usr_cpu = 0;
sys_cpu = 0;
}
+ json_object_add_value_int(root, "job_runtime", ts->total_run_time);
json_object_add_value_float(root, "usr_cpu", usr_cpu);
json_object_add_value_float(root, "sys_cpu", sys_cpu);
json_object_add_value_int(root, "ctx", ts->ctx);
if (ts->ss_dur) {
struct json_object *data;
struct json_array *iops, *bw;
- int i, j, k;
+ int j, k, l;
char ss_buf[64];
snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
j = ts->ss_head;
else
j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1;
- for (i = 0; i < ts->ss_dur; i++) {
- k = (j + i) % ts->ss_dur;
+ for (l = 0; l < ts->ss_dur; l++) {
+ k = (j + l) % ts->ss_dur;
json_array_add_value_int(bw, ts->ss_bw_data[k]);
json_array_add_value_int(iops, ts->ss_iops_data[k]);
}
buf_output_free(out);
}
+ fio_idle_prof_cleanup();
+
log_info_flush();
free(runstats);
free(threadstats);
free(opt_lists);
}
-void show_run_stats(void)
-{
- fio_sem_down(stat_sem);
- __show_run_stats();
- fio_sem_up(stat_sem);
-}
-
void __show_running_run_stats(void)
{
struct thread_data *td;
* submissions, flag 'td' as needing a log regrow and we'll take
* care of it on the submission side.
*/
- if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD ||
+ if ((iolog->td && iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD) ||
!per_unit_log(iolog))
return regrow_log(iolog);
- iolog->td->flags |= TD_F_REGROW_LOGS;
- assert(iolog->pending->nr_samples < iolog->pending->max_samples);
+ if (iolog->td)
+ iolog->td->flags |= TD_F_REGROW_LOGS;
+ if (iolog->pending)
+ assert(iolog->pending->nr_samples < iolog->pending->max_samples);
return iolog->pending;
}
static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
- enum fio_ddir ddir, unsigned int bs,
+ enum fio_ddir ddir, unsigned long long bs,
unsigned long t, uint64_t offset)
{
struct io_logs *cur_log;
static unsigned long add_log_sample(struct thread_data *td,
struct io_log *iolog,
union io_sample_data data,
- enum fio_ddir ddir, unsigned int bs,
+ enum fio_ddir ddir, unsigned long long bs,
uint64_t offset)
{
unsigned long elapsed, this_window;
_add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
}
-void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned int bs)
+void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long long bs)
{
struct io_log *iolog;
}
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
- unsigned long long nsec, unsigned int bs, uint64_t offset)
+ unsigned long long nsec, unsigned long long bs,
+ uint64_t offset)
{
unsigned long elapsed, this_window;
struct thread_stat *ts = &td->ts;
}
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
- unsigned long usec, unsigned int bs, uint64_t offset)
+ unsigned long usec, unsigned long long bs, uint64_t offset)
{
struct thread_stat *ts = &td->ts;
}
void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
- unsigned long long nsec, unsigned int bs, uint64_t offset)
+ unsigned long long nsec, unsigned long long bs,
+ uint64_t offset)
{
struct thread_stat *ts = &td->ts;
add_stat_sample(&stat[ddir], rate);
if (log) {
- unsigned int bs = 0;
+ unsigned long long bs = 0;
if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
bs = td->o.min_bs[ddir];
extern void show_group_stats(struct group_run_stats *rs, struct buf_output *);
extern bool calc_thread_status(struct jobs_eta *je, int force);
extern void display_thread_status(struct jobs_eta *je);
-extern void show_run_stats(void);
extern void __show_run_stats(void);
extern void __show_running_run_stats(void);
extern void show_running_run_stats(void);
extern void clear_rusage_stat(struct thread_data *);
extern void add_lat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
- unsigned int, uint64_t);
+ unsigned long long, uint64_t);
extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
- unsigned int, uint64_t);
+ unsigned long long, uint64_t);
extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long,
- unsigned int, uint64_t);
-extern void add_agg_sample(union io_sample_data, enum fio_ddir, unsigned int);
+ unsigned long long, uint64_t);
+extern void add_agg_sample(union io_sample_data, enum fio_ddir, unsigned long long);
extern void add_iops_sample(struct thread_data *, struct io_u *,
unsigned int);
extern void add_bw_sample(struct thread_data *, struct io_u *,
return err;
}
+struct overlap_test {
+ unsigned int start;
+ unsigned int nr;
+ unsigned int ret;
+};
+
+static int test_overlap(void)
+{
+ struct overlap_test tests[] = {
+ {
+ .start = 0,
+ .nr = 0,
+ .ret = 0,
+ },
+ {
+ .start = 16,
+ .nr = 16,
+ .ret = 16,
+ },
+ {
+ .start = 16,
+ .nr = 0,
+ .ret = 0,
+ },
+ {
+ .start = 0,
+ .nr = 32,
+ .ret = 16,
+ },
+ {
+ .start = 48,
+ .nr = 32,
+ .ret = 32,
+ },
+ {
+ .start = 32,
+ .nr = 32,
+ .ret = 16,
+ },
+ {
+ .start = 79,
+ .nr = 1,
+ .ret = 0,
+ },
+ {
+ .start = 80,
+ .nr = 21,
+ .ret = 21,
+ },
+ {
+ .start = 102,
+ .nr = 1,
+ .ret = 1,
+ },
+ {
+ .start = 101,
+ .nr = 3,
+ .ret = 1,
+ },
+ {
+ .start = 106,
+ .nr = 4,
+ .ret = 4,
+ },
+ {
+ .start = 105,
+ .nr = 3,
+ .ret = 1,
+ },
+ {
+ .start = 120,
+ .nr = 4,
+ .ret = 4,
+ },
+ {
+ .start = 118,
+ .nr = 2,
+ .ret = 2,
+ },
+ {
+ .start = 118,
+ .nr = 2,
+ .ret = 0,
+ },
+ {
+ .start = 1100,
+ .nr = 1,
+ .ret = 1,
+ },
+ {
+ .start = 1000,
+ .nr = 256,
+ .ret = 100,
+ },
+ {
+ .start = 22684,
+ .nr = 1,
+ .ret = 1,
+ },
+ {
+ .start = 22670,
+ .nr = 60,
+ .ret = 14,
+ },
+ {
+ .start = -1U,
+ },
+ };
+ struct axmap *map;
+ int entries, i, ret, err = 0;
+
+ entries = 0;
+ for (i = 0; tests[i].start != -1U; i++) {
+ unsigned int this = tests[i].start + tests[i].nr;
+
+ if (this > entries)
+ entries = this;
+ }
+
+ printf("Test overlaps...");
+ fflush(stdout);
+
+ map = axmap_new(entries);
+
+ for (i = 0; tests[i].start != -1U; i++) {
+ struct overlap_test *t = &tests[i];
+
+ ret = axmap_set_nr(map, t->start, t->nr);
+ if (ret != t->ret) {
+ printf("fail\n");
+ printf("start=%u, nr=%d, ret=%d: %d\n", t->start, t->nr,
+ t->ret, ret);
+ err = 1;
+ break;
+ }
+ }
+
+ if (!err)
+ printf("pass!\n");
+ axmap_free(map);
+ return err;
+}
+
int main(int argc, char *argv[])
{
size_t size = (1UL << 23) - 200;
return 2;
if (test_multi(size, 17))
return 3;
+ if (test_overlap())
+ return 4;
return 0;
}
init_rand(&s, false);
for (i = 0; i < nvalues; i++) {
- int v = rand32_between(&s, start, end);
+ int v = rand_between(&s, start, end);
buckets[v - start]++;
}
--- /dev/null
+[test]
+filename=${DUT}
+rw=randread
+time_based
+runtime=1s
--- /dev/null
+[test]
+filename=${DUT}
+rw=randtrim
+time_based
+runtime=1s
--- /dev/null
+[test]
+filename=${DUT}
+rw=randwrite
+time_based
+runtime=1s
--- /dev/null
+#!/bin/bash
+#
+# Do some basic test of the --readonly parameter
+#
+# DUT should be a device that accepts read, write, and trim operations
+#
+# Example usage:
+#
+# DUT=/dev/fioa t/readonly.sh
+#
+TESTNUM=1
+
+#
+# The first parameter is the return code
+# The second parameter is 0 if the return code should be 0
+# positive if the return code should be positive
+#
+check () {
+ echo "********************"
+
+ if [ $2 -gt 0 ]; then
+ if [ $1 -eq 0 ]; then
+ echo "Test $TESTNUM failed"
+ echo "********************"
+ exit 1
+ else
+ echo "Test $TESTNUM passed"
+ fi
+ else
+ if [ $1 -gt 0 ]; then
+ echo "Test $TESTNUM failed"
+ echo "********************"
+ exit 1
+ else
+ echo "Test $TESTNUM passed"
+ fi
+ fi
+
+ echo "********************"
+ echo
+ TESTNUM=$((TESTNUM+1))
+}
+
+./fio --name=test --filename=$DUT --rw=randread --readonly --time_based --runtime=1s &> /dev/null
+check $? 0
+./fio --name=test --filename=$DUT --rw=randwrite --readonly --time_based --runtime=1s &> /dev/null
+check $? 1
+./fio --name=test --filename=$DUT --rw=randtrim --readonly --time_based --runtime=1s &> /dev/null
+check $? 1
+
+./fio --name=test --filename=$DUT --readonly --rw=randread --time_based --runtime=1s &> /dev/null
+check $? 0
+./fio --name=test --filename=$DUT --readonly --rw=randwrite --time_based --runtime=1s &> /dev/null
+check $? 1
+./fio --name=test --filename=$DUT --readonly --rw=randtrim --time_based --runtime=1s &> /dev/null
+check $? 1
+
+./fio --name=test --filename=$DUT --rw=randread --time_based --runtime=1s &> /dev/null
+check $? 0
+./fio --name=test --filename=$DUT --rw=randwrite --time_based --runtime=1s &> /dev/null
+check $? 0
+./fio --name=test --filename=$DUT --rw=randtrim --time_based --runtime=1s &> /dev/null
+check $? 0
+
+./fio t/jobs/readonly-r.fio --readonly &> /dev/null
+check $? 0
+./fio t/jobs/readonly-w.fio --readonly &> /dev/null
+check $? 1
+./fio t/jobs/readonly-t.fio --readonly &> /dev/null
+check $? 1
+
+./fio --readonly t/jobs/readonly-r.fio &> /dev/null
+check $? 0
+./fio --readonly t/jobs/readonly-w.fio &> /dev/null
+check $? 1
+./fio --readonly t/jobs/readonly-t.fio &> /dev/null
+check $? 1
+
+./fio t/jobs/readonly-r.fio &> /dev/null
+check $? 0
+./fio t/jobs/readonly-w.fio &> /dev/null
+check $? 0
+./fio t/jobs/readonly-t.fio &> /dev/null
+check $? 0
#define ZONESPLIT_MAX 256
struct bssplit {
- uint32_t bs;
+ uint64_t bs;
uint32_t perc;
};
unsigned long long start_offset;
unsigned long long start_offset_align;
- unsigned int bs[DDIR_RWDIR_CNT];
- unsigned int ba[DDIR_RWDIR_CNT];
- unsigned int min_bs[DDIR_RWDIR_CNT];
- unsigned int max_bs[DDIR_RWDIR_CNT];
+ unsigned long long bs[DDIR_RWDIR_CNT];
+ unsigned long long ba[DDIR_RWDIR_CNT];
+ unsigned long long min_bs[DDIR_RWDIR_CNT];
+ unsigned long long max_bs[DDIR_RWDIR_CNT];
struct bssplit *bssplit[DDIR_RWDIR_CNT];
unsigned int bssplit_nr[DDIR_RWDIR_CNT];
unsigned int perc_rand[DDIR_RWDIR_CNT];
unsigned int hugepage_size;
- unsigned int rw_min_bs;
+ unsigned long long rw_min_bs;
+ unsigned int pad2;
unsigned int thinktime;
unsigned int thinktime_spin;
unsigned int thinktime_blocks;
unsigned int fdatasync_blocks;
unsigned int barrier_blocks;
unsigned long long start_delay;
+ unsigned long long start_delay_orig;
unsigned long long start_delay_high;
unsigned long long timeout;
unsigned long long ramp_time;
uint64_t start_offset;
uint64_t start_offset_align;
- uint32_t bs[DDIR_RWDIR_CNT];
- uint32_t ba[DDIR_RWDIR_CNT];
- uint32_t min_bs[DDIR_RWDIR_CNT];
- uint32_t max_bs[DDIR_RWDIR_CNT];
+ uint64_t bs[DDIR_RWDIR_CNT];
+ uint64_t ba[DDIR_RWDIR_CNT];
+ uint64_t min_bs[DDIR_RWDIR_CNT];
+ uint64_t max_bs[DDIR_RWDIR_CNT];
struct bssplit bssplit[DDIR_RWDIR_CNT][BSSPLIT_MAX];
uint32_t bssplit_nr[DDIR_RWDIR_CNT];
uint32_t perc_rand[DDIR_RWDIR_CNT];
uint32_t hugepage_size;
- uint32_t rw_min_bs;
+ uint64_t rw_min_bs;
+ uint32_t pad2;
uint32_t thinktime;
uint32_t thinktime_spin;
uint32_t thinktime_blocks;
#include <stdio.h>
#include <math.h>
-#include <malloc.h>
+#include <stdlib.h>
#include <string.h>
/*
#!/usr/bin/python2.7
+# Note: this script is python2 and python3 compatible.
#
# fio_jsonplus_clat2csv
#
# 10304ns is the 100th percentile for read latency
#
+from __future__ import absolute_import
+from __future__ import print_function
import os
import json
import argparse
+import six
+from six.moves import range
def parse_args():
def more_lines(indices, bins):
- for key, value in indices.iteritems():
+ for key, value in six.iteritems(indices):
if value < len(bins[key]):
return True
"Are you sure you are using json+ output?")
bins[ddir] = [[int(key), value] for key, value in
- jsondata['jobs'][jobnum][ddir][bins_loc]
- ['bins'].iteritems()]
+ six.iteritems(jsondata['jobs'][jobnum][ddir][bins_loc]
+ ['bins'])]
bins[ddir] = sorted(bins[ddir], key=lambda bin: bin[0])
run_total[ddir] = [0 for x in range(0, len(bins[ddir]))]
output.write(", , , ")
output.write("\n")
- print "{0} generated".format(outfile)
+ print("{0} generated".format(outfile))
if __name__ == '__main__':
#!/usr/bin/python2.7
+# Note: this script is python2 and python 3 compatible.
#
# fiologparser.py
#
#
# to see per-interval average completion latency.
+from __future__ import absolute_import
+from __future__ import print_function
import argparse
import math
#!/usr/bin/python2.7
+# Note: this script is python2 and python3 compatible.
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
# Author: Erwan Velu <erwan@enovance.com>
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+from __future__ import absolute_import
+from __future__ import print_function
import os
import fnmatch
import sys
import re
import math
import shutil
+from six.moves import map
+from six.moves import range
def find_file(path, pattern):
fio_data_file=[]
return fio_data_file
def generate_gnuplot_script(fio_data_file,title,gnuplot_output_filename,gnuplot_output_dir,mode,disk_perf,gpm_dir):
- if verbose: print "Generating rendering scripts"
+ if verbose: print("Generating rendering scripts")
filename=gnuplot_output_dir+'mygraph'
temporary_files.append(filename)
f=open(filename,'w')
f.close()
def compute_aggregated_file(fio_data_file, gnuplot_output_filename, gnuplot_output_dir):
- if verbose: print "Processing data file 2/2"
+ if verbose: print("Processing data file 2/2")
temp_files=[]
pos=0
end_time=max_time
if end_time == -1:
end_time="infinite"
- if verbose: print "Processing data file 1/2 with %s<time<%s" % (min_time,end_time)
+ if verbose: print("Processing data file 1/2 with %s<time<%s" % (min_time,end_time))
files=[]
temp_outfile=[]
blk_size=0
try:
blk_size=int(block_size)
except:
- print "Error while reading the following line :"
- print line
+ print("Error while reading the following line :")
+ print(line)
sys.exit(1);
# We ignore the first 500msec as it doesn't seems to be part of the real benchmark
return blk_size
def compute_math(fio_data_file, title,gnuplot_output_filename,gnuplot_output_dir,mode,disk_perf,gpm_dir):
- if verbose: print "Computing Maths"
+ if verbose: print("Computing Maths")
global_min=[]
global_max=[]
average_file=open(gnuplot_output_dir+gnuplot_output_filename+'.average', 'w')
max_file.write('DiskName %s\n'% mode)
average_file.write('DiskName %s\n'% mode)
stddev_file.write('DiskName %s\n'% mode )
- for disk in xrange(len(fio_data_file)):
+ for disk in range(len(fio_data_file)):
# print disk_perf[disk]
min_file.write("# Disk%d was coming from %s\n" % (disk,fio_data_file[disk]))
max_file.write("# Disk%d was coming from %s\n" % (disk,fio_data_file[disk]))
average_file.write("# Disk%d was coming from %s\n" % (disk,fio_data_file[disk]))
stddev_file.write("# Disk%d was coming from %s\n" % (disk,fio_data_file[disk]))
avg = average(disk_perf[disk])
- variance = map(lambda x: (x - avg)**2, disk_perf[disk])
+ variance = [(x - avg)**2 for x in disk_perf[disk]]
standard_deviation = math.sqrt(average(variance))
# print "Disk%d [ min=%.2f max=%.2f avg=%.2f stddev=%.2f \n" % (disk,min(disk_perf[disk]),max(disk_perf[disk]),avg, standard_deviation)
average_file.write('%d %d\n' % (disk, avg))
global_disk_perf = sum(disk_perf, [])
avg = average(global_disk_perf)
- variance = map(lambda x: (x - avg)**2, global_disk_perf)
+ variance = [(x - avg)**2 for x in global_disk_perf]
standard_deviation = math.sqrt(average(variance))
global_file.write('min=%.2f\n' % min(global_disk_perf))
max_file=file
# Let's print the avg output
if global_search == "avg":
- print "Biggest aggregated value of %s was %2.f in file %s\n" % (global_search, max_result, max_file)
+ print("Biggest aggregated value of %s was %2.f in file %s\n" % (global_search, max_result, max_file))
else:
- print "Global search %s is not yet implemented\n" % global_search
+ print("Global search %s is not yet implemented\n" % global_search)
def render_gnuplot(fio_data_file, gnuplot_output_dir):
- print "Running gnuplot Rendering"
+ print("Running gnuplot Rendering")
try:
# Let's render all the compared files if some
if len(fio_data_file) > 1:
- if verbose: print " |-> Rendering comparing traces"
+ if verbose: print(" |-> Rendering comparing traces")
os.system("cd %s; for i in *.gnuplot; do gnuplot $i; done" % gnuplot_output_dir)
- if verbose: print " |-> Rendering math traces"
+ if verbose: print(" |-> Rendering math traces")
os.system("cd %s; gnuplot mymath" % gnuplot_output_dir)
- if verbose: print " |-> Rendering 2D & 3D traces"
+ if verbose: print(" |-> Rendering 2D & 3D traces")
os.system("cd %s; gnuplot mygraph" % gnuplot_output_dir)
name_of_directory="the current"
if gnuplot_output_dir != "./":
name_of_directory=gnuplot_output_dir
- print "\nRendering traces are available in %s directory" % name_of_directory
+ print("\nRendering traces are available in %s directory" % name_of_directory)
global keep_temp_files
keep_temp_files=False
except:
- print "Could not run gnuplot on mymath or mygraph !\n"
+ print("Could not run gnuplot on mymath or mygraph !\n")
sys.exit(1);
def print_help():
- print 'fio2gnuplot -ghbiodvk -t <title> -o <outputfile> -p <pattern> -G <type> -m <time> -M <time>'
- print
- print '-h --help : Print this help'
- print '-p <pattern> or --pattern <pattern> : A glob pattern to select fio input files'
- print '-b or --bandwidth : A predefined pattern for selecting *_bw.log files'
- print '-i or --iops : A predefined pattern for selecting *_iops.log files'
- print '-g or --gnuplot : Render gnuplot traces before exiting'
- print '-o or --outputfile <file> : The basename for gnuplot traces'
- print ' - Basename is set with the pattern if defined'
- print '-d or --outputdir <dir> : The directory where gnuplot shall render files'
- print '-t or --title <title> : The title of the gnuplot traces'
- print ' - Title is set with the block size detected in fio traces'
- print '-G or --Global <type> : Search for <type> in .global files match by a pattern'
- print ' - Available types are : min, max, avg, stddev'
- print ' - The .global extension is added automatically to the pattern'
- print '-m or --min_time <time> : Only consider data starting from <time> seconds (default is 0)'
- print '-M or --max_time <time> : Only consider data ending before <time> seconds (default is -1 aka nolimit)'
- print '-v or --verbose : Increasing verbosity'
- print '-k or --keep : Keep all temporary files from gnuplot\'s output dir'
+ print('fio2gnuplot -ghbiodvk -t <title> -o <outputfile> -p <pattern> -G <type> -m <time> -M <time>')
+ print()
+ print('-h --help : Print this help')
+ print('-p <pattern> or --pattern <pattern> : A glob pattern to select fio input files')
+ print('-b or --bandwidth : A predefined pattern for selecting *_bw.log files')
+ print('-i or --iops : A predefined pattern for selecting *_iops.log files')
+ print('-g or --gnuplot : Render gnuplot traces before exiting')
+ print('-o or --outputfile <file> : The basename for gnuplot traces')
+ print(' - Basename is set with the pattern if defined')
+ print('-d or --outputdir <dir> : The directory where gnuplot shall render files')
+ print('-t or --title <title> : The title of the gnuplot traces')
+ print(' - Title is set with the block size detected in fio traces')
+ print('-G or --Global <type> : Search for <type> in .global files match by a pattern')
+ print(' - Available types are : min, max, avg, stddev')
+ print(' - The .global extension is added automatically to the pattern')
+ print('-m or --min_time <time> : Only consider data starting from <time> seconds (default is 0)')
+ print('-M or --max_time <time> : Only consider data ending before <time> seconds (default is -1 aka nolimit)')
+ print('-v or --verbose : Increasing verbosity')
+ print('-k or --keep : Keep all temporary files from gnuplot\'s output dir')
def main(argv):
mode='unknown'
if not os.path.isfile(gpm_dir+'math.gpm'):
gpm_dir="/usr/local/share/fio/"
if not os.path.isfile(gpm_dir+'math.gpm'):
- print "Looks like fio didn't get installed properly as no gpm files found in '/usr/share/fio' or '/usr/local/share/fio'\n"
+ print("Looks like fio didn't get installed properly as no gpm files found in '/usr/share/fio' or '/usr/local/share/fio'\n")
sys.exit(3)
try:
opts, args = getopt.getopt(argv[1:],"ghkbivo:d:t:p:G:m:M:",['bandwidth', 'iops', 'pattern', 'outputfile', 'outputdir', 'title', 'min_time', 'max_time', 'gnuplot', 'Global', 'help', 'verbose','keep'])
except getopt.GetoptError:
- print "Error: One of the options passed to the cmdline was not supported"
- print "Please fix your command line or read the help (-h option)"
+ print("Error: One of the options passed to the cmdline was not supported")
+ print("Please fix your command line or read the help (-h option)")
sys.exit(2)
for opt, arg in opts:
fio_data_file=find_file('.',pattern)
if len(fio_data_file) == 0:
- print "No log file found with pattern %s!" % pattern
+ print("No log file found with pattern %s!" % pattern)
# Try numjob log file format if per_numjob_logs=1
if (pattern == '*_bw.log'):
fio_data_file=find_file('.','*_bw.*.log')
if len(fio_data_file) == 0:
sys.exit(1)
else:
- print "Using log file per job format instead"
+ print("Using log file per job format instead")
else:
- print "%d files Selected with pattern '%s'" % (len(fio_data_file), pattern)
+ print("%d files Selected with pattern '%s'" % (len(fio_data_file), pattern))
fio_data_file=sorted(fio_data_file, key=str.lower)
for file in fio_data_file:
- print ' |-> %s' % file
+ print(' |-> %s' % file)
if "_bw.log" in file :
mode="Bandwidth (KB/sec)"
if "_iops.log" in file :
if "IO" in mode:
title='IO benchmark with %d fio results' % len(fio_data_file)
- print
+ print()
#We need to adjust the output filename regarding the pattern required by the user
if (pattern_set_by_user == True):
gnuplot_output_filename=pattern
# Shall we clean the temporary files ?
if keep_temp_files==False and force_keep_temp_files==False:
# Cleaning temporary files
- if verbose: print "Cleaning temporary files"
+ if verbose: print("Cleaning temporary files")
for f in enumerate(temporary_files):
- if verbose: print " -> %s"%f[1]
+ if verbose: print(" -> %s"%f[1])
try:
os.remove(f[1])
except:
#!/usr/bin/python2.7
+# Note: this script is python2 and python 3 compatible.
#
# steadystate_tests.py
#
# if ss attained: min runtime = ss_dur + ss_ramp
# if not attained: runtime = timeout
+from __future__ import absolute_import
+from __future__ import print_function
import os
import sys
import json
import argparse
import subprocess
from scipy import stats
+from six.moves import range
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('fio',
- help='path to fio executable');
+ help='path to fio executable')
parser.add_argument('--read',
help='target for read testing')
parser.add_argument('--write',
data = data[measurement]
mean = sum(data) / len(data)
if slope:
- x = range(len(data))
+ x = list(range(len(data)))
m, intercept, r_value, p_value, std_err = stats.linregress(x,data)
m = abs(m)
if pct:
'output': "set steady state BW threshold to 12" },
]
for test in parsing:
- output = subprocess.check_output([args.fio] + test['args']);
- if test['output'] in output:
- print "PASSED '{0}' found with arguments {1}".format(test['output'], test['args'])
+ output = subprocess.check_output([args.fio] + test['args'])
+ if test['output'] in output.decode():
+ print("PASSED '{0}' found with arguments {1}".format(test['output'], test['args']))
else:
- print "FAILED '{0}' NOT found with arguments {1}".format(test['output'], test['args'])
+ print("FAILED '{0}' NOT found with arguments {1}".format(test['output'], test['args']))
#
# test some read workloads
args.read = '/dev/zero'
extra = [ "--size=134217728" ] # 128 MiB
else:
- print "ERROR: file for read testing must be specified on non-posix systems"
+ print("ERROR: file for read testing must be specified on non-posix systems")
sys.exit(1)
else:
extra = []
else:
result = 'FAILED '
line = result + line + ' no ss, expected runtime {0} ~= actual runtime {1}'.format(expected, actual)
- print line
+ print(line)
if 'steadystate' in jsonjob:
pp.pprint(jsonjob['steadystate'])
jobnum += 1
#include <stdint.h>
#include <string.h>
#include <limits.h>
+#include "lib/nowarn_snprintf.h"
struct thread_rand32_state {
uint32_t s[4];
name++;
} while (1);
- snprintf(out, size, "%s-%s-%d-verify.state", prefix, ename, num);
+ nowarn_snprintf(out, size, "%s-%s-%d-verify.state", prefix, ename, num);
out[size - 1] = '\0';
}
mem_is_zero_slow(io_u->buf, io_u->buflen, &offset);
- log_err("trim: verify failed at file %s offset %llu, length %lu"
+ log_err("trim: verify failed at file %s offset %llu, length %llu"
", block offset %lu\n",
io_u->file->file_name, io_u->offset, io_u->buflen,
(unsigned long) offset);