#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/ipc.h>
-#include <sys/shm.h>
#include <sys/mman.h>
#include "fio.h"
+#ifndef FIO_NO_HAVE_SHM_H
+#include <sys/shm.h>
+#endif
#include "hash.h"
#include "smalloc.h"
#include "verify.h"
#include "lib/rand.h"
#include "memalign.h"
#include "server.h"
+#include "lib/getrusage.h"
+#include "idletime.h"
+#include "err.h"
static pthread_t disk_util_thread;
static struct fio_mutex *disk_thread_mutex;
static struct fio_mutex *startup_mutex;
-static struct fio_mutex *writeout_mutex;
static struct flist_head *cgroup_list;
static char *cgroup_mnt;
static int exit_value;
ratemin);
return 1;
} else {
- rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
+ if (spent)
+ rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
+ else
+ rate = 0;
+
if (rate < ratemin ||
bytes < td->rate_bytes[ddir]) {
log_err("%s: min rate %u not met, got"
td->o.name, rate_iops);
return 1;
} else {
- rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
+ if (spent)
+ rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
+ else
+ rate = 0;
+
if (rate < rate_iops_min ||
iops < td->rate_blocks[ddir]) {
log_err("%s: min iops rate %u not met,"
}
static int check_min_rate(struct thread_data *td, struct timeval *now,
- unsigned long *bytes_done)
+ uint64_t *bytes_done)
{
int ret = 0;
*/
static void cleanup_pending_aio(struct thread_data *td)
{
- struct flist_head *entry, *n;
- struct io_u *io_u;
int r;
/*
* now cancel remaining active events
*/
if (td->io_ops->cancel) {
- flist_for_each_safe(entry, n, &td->io_u_busylist) {
- io_u = flist_entry(entry, struct io_u, list);
+ struct io_u *io_u;
+ int i;
- /*
- * if the io_u isn't in flight, then that generally
- * means someone leaked an io_u. complain but fix
- * it up, so we don't stall here.
- */
- if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
- log_err("fio: non-busy IO on busy list\n");
- put_io_u(td, io_u);
- } else {
+ io_u_qiter(&td->io_u_all, io_u, i) {
+ if (io_u->flags & IO_U_F_FLIGHT) {
r = td->io_ops->cancel(td, io_u);
if (!r)
put_io_u(td, io_u);
return 0;
}
+static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
+{
+ int ret;
+
+ if (fio_file_open(f))
+ return fio_io_sync(td, f);
+
+ if (td_io_open_file(td, f))
+ return 1;
+
+ ret = fio_io_sync(td, f);
+ td_io_close_file(td, f);
+ return ret;
+}
+
static inline void __update_tv_cache(struct thread_data *td)
{
fio_gettime(&td->tv_cache, NULL);
return 0;
if (!td->o.timeout)
return 0;
- if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
+ if (utime_since(&td->epoch, t) >= td->o.timeout)
return 1;
return 0;
return 0;
}
+static void check_update_rusage(struct thread_data *td)
+{
+ if (td->update_rusage) {
+ td->update_rusage = 0;
+ update_rusage_stat(td);
+ fio_mutex_up(td->rusage_sem);
+ }
+}
+
/*
* The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
-static void do_verify(struct thread_data *td)
+static void do_verify(struct thread_data *td, uint64_t verify_bytes)
{
+ uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
struct fio_file *f;
struct io_u *io_u;
int ret, min_events;
break;
}
+ check_update_rusage(td);
+
if (td->error)
return;
io_u = NULL;
while (!td->terminate) {
+ enum fio_ddir ddir;
int ret2, full;
update_tv_cache(td);
+ check_update_rusage(td);
if (runtime_exceeded(td, &td->tv_cache)) {
__update_tv_cache(td);
if (flow_threshold_exceeded(td))
continue;
- io_u = __get_io_u(td);
- if (!io_u)
- break;
+ if (!td->o.experimental_verify) {
+ io_u = __get_io_u(td);
+ if (!io_u)
+ break;
- if (get_next_verify(td, io_u)) {
- put_io_u(td, io_u);
- break;
- }
+ if (get_next_verify(td, io_u)) {
+ put_io_u(td, io_u);
+ break;
+ }
- if (td_io_prep(td, io_u)) {
- put_io_u(td, io_u);
- break;
+ if (td_io_prep(td, io_u)) {
+ put_io_u(td, io_u);
+ break;
+ }
+ } else {
+ if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes)
+ break;
+
+ while ((io_u = get_io_u(td)) != NULL) {
+ if (IS_ERR(io_u)) {
+ io_u = NULL;
+ ret = FIO_Q_BUSY;
+ goto reap;
+ }
+
+ /*
+ * We are only interested in the places where
+ * we wrote or trimmed IOs. Turn those into
+ * reads for verification purposes.
+ */
+ if (io_u->ddir == DDIR_READ) {
+ /*
+ * Pretend we issued it for rwmix
+ * accounting
+ */
+ td->io_issues[DDIR_READ]++;
+ put_io_u(td, io_u);
+ continue;
+ } else if (io_u->ddir == DDIR_TRIM) {
+ io_u->ddir = DDIR_READ;
+ io_u->flags |= IO_U_F_TRIMMED;
+ break;
+ } else if (io_u->ddir == DDIR_WRITE) {
+ io_u->ddir = DDIR_READ;
+ break;
+ } else {
+ put_io_u(td, io_u);
+ continue;
+ }
+ }
+
+ if (!io_u)
+ break;
}
if (td->o.verify_async)
else
io_u->end_io = verify_io_u;
+ ddir = io_u->ddir;
+
ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
requeue_io_u(td, &io_u);
} else {
sync_done:
- ret = io_u_sync_complete(td, io_u, NULL);
+ ret = io_u_sync_complete(td, io_u, bytes_done);
if (ret < 0)
break;
}
break;
}
- if (break_on_this_error(td, io_u->ddir, &ret))
+ if (break_on_this_error(td, ddir, &ret))
break;
/*
* completed io_u's first. Note that we can get BUSY even
* without IO queued, if the system is resource starved.
*/
+reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete) {
min_events = min(td->o.iodepth_batch_complete,
* and do the verification on them through
* the callback handler
*/
- if (io_u_queued_complete(td, min_events, NULL) < 0) {
+ if (io_u_queued_complete(td, min_events, bytes_done) < 0) {
ret = -1;
break;
}
break;
}
+ check_update_rusage(td);
+
if (!td->error) {
min_events = td->cur_depth;
dprint(FD_VERIFY, "exiting loop\n");
}
+static unsigned int exceeds_number_ios(struct thread_data *td)
+{
+ unsigned long long number_ios;
+
+ if (!td->o.number_ios)
+ return 0;
+
+ number_ios = ddir_rw_sum(td->this_io_blocks);
+ number_ios += td->io_u_queued + td->io_u_in_flight;
+
+ return number_ios >= td->o.number_ios;
+}
+
static int io_bytes_exceeded(struct thread_data *td)
{
- unsigned long long bytes;
+ unsigned long long bytes, limit;
if (td_rw(td))
bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
else
bytes = td->this_io_bytes[DDIR_TRIM];
- return bytes >= td->o.size;
+ if (td->o.io_limit)
+ limit = td->o.io_limit;
+ else
+ limit = td->o.size;
+
+ return bytes >= limit || exceeds_number_ios(td);
}
/*
* Main IO worker function. It retrieves io_u's to process and queues
* and reaps them, checking for rate and errors along the way.
+ *
+ * Returns number of bytes written and trimmed.
*/
-static void do_io(struct thread_data *td)
+static uint64_t do_io(struct thread_data *td)
{
+ uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
unsigned int i;
int ret = 0;
+ uint64_t total_bytes, bytes_issued = 0;
if (in_ramp_time(td))
td_set_runstate(td, TD_RAMP);
else
td_set_runstate(td, TD_RUNNING);
+ lat_target_init(td);
+
+ /*
+ * If verify_backlog is enabled, we'll run the verify in this
+ * handler as well. For that case, we may need up to twice the
+ * amount of bytes.
+ */
+ total_bytes = td->o.size;
+ if (td->o.verify != VERIFY_NONE &&
+ (td_write(td) && td->o.verify_backlog))
+ total_bytes += td->o.size;
+
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
td->o.time_based) {
struct timeval comp_time;
- unsigned long bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
int min_evts = 0;
struct io_u *io_u;
int ret2, full;
enum fio_ddir ddir;
+ check_update_rusage(td);
+
if (td->terminate || td->done)
break;
if (flow_threshold_exceeded(td))
continue;
+ if (bytes_issued >= total_bytes)
+ break;
+
io_u = get_io_u(td);
- if (!io_u)
+ if (IS_ERR_OR_NULL(io_u)) {
+ int err = PTR_ERR(io_u);
+
+ io_u = NULL;
+ if (err == -EBUSY) {
+ ret = FIO_Q_BUSY;
+ goto reap;
+ }
+ if (td->o.latency_target)
+ goto reap;
break;
+ }
ddir = io_u->ddir;
*/
if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
+
+ if (!td->o.verify_pattern_bytes) {
+ io_u->rand_seed = __rand(&td->__verify_state);
+ if (sizeof(int) != sizeof(long *))
+ io_u->rand_seed *= __rand(&td->__verify_state);
+ }
+
if (td->o.verify_async)
io_u->end_io = verify_io_u_async;
else
else
td_set_runstate(td, TD_RUNNING);
+ /*
+ * Always log IO before it's issued, so we know the specific
+ * order of it. The logged unit will track when the IO has
+ * completed.
+ */
+ if (td_write(td) && io_u->ddir == DDIR_WRITE &&
+ td->o.do_verify &&
+ td->o.verify != VERIFY_NONE &&
+ !td->o.experimental_verify)
+ log_io_piece(td, io_u);
+
ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
if (io_u->error) {
ret = -io_u->error;
+ unlog_io_piece(td, io_u);
clear_io_u(td, io_u);
} else if (io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
struct fio_file *f = io_u->file;
+ bytes_issued += bytes;
+
+ trim_io_piece(td, io_u);
+
/*
* zero read, fail
*/
if (!bytes) {
+ unlog_io_piece(td, io_u);
td_verror(td, EIO, "full resid");
put_io_u(td, io_u);
break;
ret = io_u_sync_complete(td, io_u, bytes_done);
if (ret < 0)
break;
+ bytes_issued += io_u->xfer_buflen;
}
break;
case FIO_Q_QUEUED:
*/
if (td->io_ops->commit == NULL)
io_u_queued(td, io_u);
+ bytes_issued += io_u->xfer_buflen;
break;
case FIO_Q_BUSY:
+ unlog_io_piece(td, io_u);
requeue_io_u(td, &io_u);
ret2 = td_io_commit(td);
if (ret2 < 0)
* can get BUSY even without IO queued, if the system is
* resource starved.
*/
+reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete) {
min_evts = min(td->o.iodepth_batch_complete,
break;
}
}
+ if (!in_ramp_time(td) && td->o.latency_target)
+ lat_target_check(td);
if (td->o.thinktime) {
unsigned long long b;
if (!(b % td->o.thinktime_blocks)) {
int left;
+ io_u_quiesce(td);
+
if (td->o.thinktime_spin)
usec_spin(td->o.thinktime_spin);
}
}
+ check_update_rusage(td);
+
if (td->trim_entries)
- log_err("fio: %d trim entries leaked?\n", td->trim_entries);
+ log_err("fio: %lu trim entries leaked?\n", td->trim_entries);
if (td->o.fill_device && td->error == ENOSPC) {
td->error = 0;
i = td->cur_depth;
if (i) {
- ret = io_u_queued_complete(td, i, NULL);
+ ret = io_u_queued_complete(td, i, bytes_done);
if (td->o.fill_device && td->error == ENOSPC)
td->error = 0;
}
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i) {
- if (!fio_file_open(f))
+ if (!fio_file_fsync(td, f))
continue;
- fio_io_sync(td, f);
+
+ log_err("fio: end_fsync failed for file %s\n",
+ f->file_name);
}
}
} else
*/
if (!ddir_rw_sum(td->this_io_bytes))
td->done = 1;
+
+ return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
}
static void cleanup_io_u(struct thread_data *td)
{
- struct flist_head *entry, *n;
struct io_u *io_u;
- flist_for_each_safe(entry, n, &td->io_u_freelist) {
- io_u = flist_entry(entry, struct io_u, list);
+ while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) {
+
+ if (td->io_ops->io_u_free)
+ td->io_ops->io_u_free(td, io_u);
- flist_del(&io_u->list);
fio_memfree(io_u, sizeof(*io_u));
}
free_io_mem(td);
+
+ io_u_rexit(&td->io_u_requeues);
+ io_u_qexit(&td->io_u_freelist);
+ io_u_qexit(&td->io_u_all);
}
static int init_io_u(struct thread_data *td)
struct io_u *io_u;
unsigned int max_bs, min_write;
int cl_align, i, max_units;
- int data_xfer = 1;
+ int data_xfer = 1, err;
char *p;
max_units = td->o.iodepth;
- max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
- max_bs = max(td->o.max_bs[DDIR_TRIM], max_bs);
+ max_bs = td_max_bs(td);
min_write = td->o.min_bs[DDIR_WRITE];
td->orig_buffer_size = (unsigned long long) max_bs
* (unsigned long long) max_units;
if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td)))
data_xfer = 0;
+ err = 0;
+ err += io_u_rinit(&td->io_u_requeues, td->o.iodepth);
+ err += io_u_qinit(&td->io_u_freelist, td->o.iodepth);
+ err += io_u_qinit(&td->io_u_all, td->o.iodepth);
+
+ if (err) {
+ log_err("fio: failed setting up IO queues\n");
+ return 1;
+ }
+
+ /*
+ * if we may later need to do address alignment, then add any
+ * possible adjustment here so that we don't cause a buffer
+ * overflow later. this adjustment may be too much if we get
+ * lucky and the allocator gives us an aligned address.
+ */
+ if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ (td->io_ops->flags & FIO_RAWIO))
+ td->orig_buffer_size += page_mask + td->o.mem_align;
+
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
unsigned long bs;
if (data_xfer && allocate_io_mem(td))
return 1;
- if (td->o.odirect || td->o.mem_align ||
+ if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
(td->io_ops->flags & FIO_RAWIO))
p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
else
io_u = ptr;
memset(io_u, 0, sizeof(*io_u));
- INIT_FLIST_HEAD(&io_u->list);
+ INIT_FLIST_HEAD(&io_u->verify_list);
dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
if (data_xfer) {
* Fill the buffer with the pattern if we are
* going to be doing writes.
*/
- fill_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
+ fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
}
}
io_u->index = i;
io_u->flags = IO_U_F_FREE;
- flist_add(&io_u->list, &td->io_u_freelist);
+ io_u_qpush(&td->io_u_freelist, io_u);
+
+ /*
+ * io_u never leaves this stack, used for iteration of all
+ * io_u buffers.
+ */
+ io_u_qpush(&td->io_u_all, io_u);
+
+ if (td->io_ops->io_u_init) {
+ int ret = td->io_ops->io_u_init(td, io_u);
+
+ if (ret) {
+ log_err("fio: failed to init engine data: %d\n", ret);
+ return 1;
+ }
+ }
+
p += max_bs;
}
/*
* Read back and check that the selected scheduler is now the default.
*/
- ret = fread(tmp, 1, sizeof(tmp), f);
+ ret = fread(tmp, sizeof(tmp), 1, f);
if (ferror(f) || ret < 0) {
td_verror(td, errno, "fread");
fclose(f);
return 1;
}
+ tmp[sizeof(tmp) - 1] = '\0';
+
sprintf(tmp2, "[%s]", td->o.ioscheduler);
if (!strstr(tmp, tmp2)) {
static int keep_running(struct thread_data *td)
{
+ unsigned long long limit;
+
if (td->done)
return 0;
if (td->o.time_based)
td->o.loops--;
return 1;
}
+ if (exceeds_number_ios(td))
+ return 0;
+
+ if (td->o.io_limit)
+ limit = td->o.io_limit;
+ else
+ limit = td->o.size;
+
+ if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) {
+ uint64_t diff;
+
+ /*
+ * If the difference is less than the minimum IO size, we
+ * are done.
+ */
+ diff = limit - ddir_rw_sum(td->io_bytes);
+ if (diff < td_max_bs(td))
+ return 0;
+
+ if (fio_files_done(td))
+ return 0;
- if (ddir_rw_sum(td->io_bytes) < td->o.size)
return 1;
+ }
return 0;
}
-static int exec_string(const char *string)
+static int exec_string(struct thread_options *o, const char *string, const char *mode)
{
- int ret, newlen = strlen(string) + 1 + 8;
+ int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
char *str;
str = malloc(newlen);
- sprintf(str, "sh -c %s", string);
+ sprintf(str, "%s &> %s.%s.txt", string, o->name, mode);
+ log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode);
ret = system(str);
if (ret == -1)
log_err("fio: exec of cmd <%s> failed\n", str);
return ret;
}
+/*
+ * Dry run to compute correct state of numberio for verification.
+ */
+static uint64_t do_dry_run(struct thread_data *td)
+{
+ uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
+
+ td_set_runstate(td, TD_RUNNING);
+
+ while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
+ (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) {
+ struct io_u *io_u;
+ int ret;
+
+ if (td->terminate || td->done)
+ break;
+
+ io_u = get_io_u(td);
+ if (!io_u)
+ break;
+
+ io_u->flags |= IO_U_F_FLIGHT;
+ io_u->error = 0;
+ io_u->resid = 0;
+ if (ddir_rw(acct_ddir(io_u)))
+ td->io_issues[acct_ddir(io_u)]++;
+ if (ddir_rw(io_u->ddir)) {
+ io_u_mark_depth(td, 1);
+ td->ts.total_io_u[io_u->ddir]++;
+ }
+
+ if (td_write(td) && io_u->ddir == DDIR_WRITE &&
+ td->o.do_verify &&
+ td->o.verify != VERIFY_NONE &&
+ !td->o.experimental_verify)
+ log_io_piece(td, io_u);
+
+ ret = io_u_sync_complete(td, io_u, bytes_done);
+ (void) ret;
+ }
+
+ return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+}
+
/*
* Entry point for the thread based jobs. The process based jobs end up
* here as well, after a little setup.
} else
td->pid = gettid();
+ fio_local_clock_init(o->use_thread);
+
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
if (is_backend)
fio_server_send_start(td);
- INIT_FLIST_HEAD(&td->io_u_freelist);
- INIT_FLIST_HEAD(&td->io_u_busylist);
- INIT_FLIST_HEAD(&td->io_u_requeues);
INIT_FLIST_HEAD(&td->io_log_list);
INIT_FLIST_HEAD(&td->io_hist_list);
INIT_FLIST_HEAD(&td->verify_list);
INIT_FLIST_HEAD(&td->trim_list);
+ INIT_FLIST_HEAD(&td->next_rand_list);
pthread_mutex_init(&td->io_u_lock, NULL);
td->io_hist_tree = RB_ROOT;
fio_mutex_down(td->mutex);
dprint(FD_MUTEX, "done waiting on td->mutex\n");
- /*
- * the ->mutex mutex is now no longer used, close it to avoid
- * eating a file descriptor
- */
- fio_mutex_remove(td->mutex);
- td->mutex = NULL;
-
/*
* A new gid requires privilege, so we need to do this before setting
* the uid.
* allocations.
*/
if (o->cpumask_set) {
+ if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
+ ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
+ if (!ret) {
+ log_err("fio: no CPUs set\n");
+ log_err("fio: Try increasing number of available CPUs\n");
+ td_verror(td, EINVAL, "cpus_split");
+ goto err;
+ }
+ }
ret = fio_setaffinity(td->pid, o->cpumask);
if (ret == -1) {
td_verror(td, errno, "cpu_set_affinity");
}
}
- if (fio_pin_memory(td))
- goto err;
-
-#ifdef FIO_HAVE_LIBNUMA
+#ifdef CONFIG_LIBNUMA
/* numa node setup */
- if (td->o.numa_cpumask_set || td->o.numa_memmask_set) {
+ if (o->numa_cpumask_set || o->numa_memmask_set) {
+ struct bitmask *mask;
int ret;
if (numa_available() < 0) {
goto err;
}
- if (td->o.numa_cpumask_set) {
- ret = numa_run_on_node_mask(td->o.numa_cpunodesmask);
+ if (o->numa_cpumask_set) {
+ mask = numa_parse_nodestring(o->numa_cpunodes);
+ ret = numa_run_on_node_mask(mask);
+ numa_free_nodemask(mask);
if (ret == -1) {
td_verror(td, errno, \
"numa_run_on_node_mask failed\n");
}
}
- if (td->o.numa_memmask_set) {
+ if (o->numa_memmask_set) {
+
+ mask = NULL;
+ if (o->numa_memnodes)
+ mask = numa_parse_nodestring(o->numa_memnodes);
- switch (td->o.numa_mem_mode) {
+ switch (o->numa_mem_mode) {
case MPOL_INTERLEAVE:
- numa_set_interleave_mask(td->o.numa_memnodesmask);
+ numa_set_interleave_mask(mask);
break;
case MPOL_BIND:
- numa_set_membind(td->o.numa_memnodesmask);
+ numa_set_membind(mask);
break;
case MPOL_LOCAL:
numa_set_localalloc();
break;
case MPOL_PREFERRED:
- numa_set_preferred(td->o.numa_mem_prefer_node);
+ numa_set_preferred(o->numa_mem_prefer_node);
break;
case MPOL_DEFAULT:
default:
break;
}
+ if (mask)
+ numa_free_nodemask(mask);
+
}
}
#endif
+ if (fio_pin_memory(td))
+ goto err;
+
/*
* May alter parameters that init_io_u() will use, so we need to
* do this first.
}
}
- if (td->o.cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
+ if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
goto err;
errno = 0;
if (init_random_map(td))
goto err;
- if (o->exec_prerun && exec_string(o->exec_prerun))
+ if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun"))
goto err;
if (o->pre_read) {
fio_verify_init(td);
fio_gettime(&td->epoch, NULL);
- getrusage(RUSAGE_SELF, &td->ru_start);
-
+ fio_getrusage(&td->ru_start);
clear_state = 0;
while (keep_running(td)) {
+ uint64_t verify_bytes;
+
fio_gettime(&td->start, NULL);
memcpy(&td->bw_sample_time, &td->start, sizeof(td->start));
memcpy(&td->iops_sample_time, &td->start, sizeof(td->start));
memcpy(&td->tv_cache, &td->start, sizeof(td->start));
- if (td->o.ratemin[DDIR_READ] || td->o.ratemin[DDIR_WRITE] ||
- td->o.ratemin[DDIR_TRIM]) {
+ if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
+ o->ratemin[DDIR_TRIM]) {
memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
sizeof(td->bw_sample_time));
memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
prune_io_piece_log(td);
- do_io(td);
+ if (td->o.verify_only && (td_write(td) || td_rw(td)))
+ verify_bytes = do_dry_run(td);
+ else
+ verify_bytes = do_io(td);
clear_state = 1;
if (td->error || td->terminate)
break;
- if (!td->o.do_verify ||
- td->o.verify == VERIFY_NONE ||
+ if (!o->do_verify ||
+ o->verify == VERIFY_NONE ||
(td->io_ops->flags & FIO_UNIDIR))
continue;
fio_gettime(&td->start, NULL);
- do_verify(td);
+ do_verify(td, verify_bytes);
td->ts.runtime[DDIR_READ] += utime_since_now(&td->start);
fio_unpin_memory(td);
- fio_mutex_down(writeout_mutex);
- if (td->bw_log) {
- if (td->o.bw_log_file) {
- finish_log_named(td, td->bw_log,
- td->o.bw_log_file, "bw");
- } else
- finish_log(td, td->bw_log, "bw");
- }
- if (td->lat_log) {
- if (td->o.lat_log_file) {
- finish_log_named(td, td->lat_log,
- td->o.lat_log_file, "lat");
- } else
- finish_log(td, td->lat_log, "lat");
- }
- if (td->slat_log) {
- if (td->o.lat_log_file) {
- finish_log_named(td, td->slat_log,
- td->o.lat_log_file, "slat");
- } else
- finish_log(td, td->slat_log, "slat");
- }
- if (td->clat_log) {
- if (td->o.lat_log_file) {
- finish_log_named(td, td->clat_log,
- td->o.lat_log_file, "clat");
- } else
- finish_log(td, td->clat_log, "clat");
- }
- if (td->iops_log) {
- if (td->o.iops_log_file) {
- finish_log_named(td, td->iops_log,
- td->o.iops_log_file, "iops");
- } else
- finish_log(td, td->iops_log, "iops");
- }
+ fio_writeout_logs(td);
- fio_mutex_up(writeout_mutex);
- if (td->o.exec_postrun)
- exec_string(td->o.exec_postrun);
+ if (o->exec_postrun)
+ exec_string(o, o->exec_postrun, (const char *)"postrun");
if (exitall_on_terminate)
fio_terminate_threads(td->groupid);
log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error,
td->verror);
- if (td->o.verify_async)
+ if (o->verify_async)
verify_async_exit(td);
close_and_free_files(td);
- close_ioengine(td);
cleanup_io_u(td);
+ close_ioengine(td);
cgroup_shutdown(td, &cgroup_mnt);
if (o->cpumask_set) {
/*
* do this very late, it will log file closing as well
*/
- if (td->o.write_iolog_file)
+ if (o->write_iolog_file)
write_iolog_close(td);
+ fio_mutex_remove(td->rusage_sem);
+ td->rusage_sem = NULL;
+
+ fio_mutex_remove(td->mutex);
+ td->mutex = NULL;
+
td_set_runstate(td, TD_EXITED);
return (void *) (uintptr_t) td->error;
}
struct thread_data *td;
void *data, *ret;
-#ifndef __hpux
+#if !defined(__hpux) && !defined(CONFIG_NO_SHM)
data = shmat(shmid, NULL, 0);
if (data == (void *) -1) {
int __err = errno;
if (WIFSIGNALED(status)) {
int sig = WTERMSIG(status);
- if (sig != SIGTERM)
+ if (sig != SIGTERM && sig != SIGUSR2)
log_err("fio: pid=%d, got signal=%d\n",
(int) td->pid, sig);
td->sig = sig;
exit_value++;
done_secs += mtime_since_now(&td->epoch) / 1000;
+ profile_td_exit(td);
}
if (*nr_running == cputhreads && !pending && realthreads)
fio_terminate_threads(TERMINATE_ALL);
}
+static void do_usleep(unsigned int usecs)
+{
+ check_for_running_stats();
+ usleep(usecs);
+}
+
/*
* Main function for kicking off and reaping jobs, as needed.
*/
static void run_threads(void)
{
struct thread_data *td;
- unsigned long spent;
unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
+ uint64_t spent;
if (fio_gtod_offload && fio_start_gtod_thread())
return;
+ fio_idle_prof_init();
+
set_sig_handlers();
nr_thread = nr_process = 0;
}
}
+ /* start idle threads before io threads start to run */
+ fio_idle_prof_start();
+
set_genesis_time();
while (todo) {
}
if (td->o.start_delay) {
- spent = mtime_since_genesis();
+ spent = utime_since_genesis();
- if (td->o.start_delay * 1000 > spent)
+ if (td->o.start_delay > spent)
continue;
}
init_disk_util(td);
+ td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED);
+ td->update_rusage = 0;
+
/*
* Set state to created. Thread will transition
* to TD_INITIALIZED when it's done setting up.
if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
break;
- usleep(100000);
+ do_usleep(100000);
for (i = 0; i < this_jobs; i++) {
td = map[i];
reap_threads(&nr_running, &t_rate, &m_rate);
if (todo)
- usleep(100000);
+ do_usleep(100000);
}
while (nr_running) {
reap_threads(&nr_running, &t_rate, &m_rate);
- usleep(10000);
+ do_usleep(10000);
}
+ fio_idle_prof_stop();
+
update_io_ticks();
}
startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
if (startup_mutex == NULL)
return 1;
- writeout_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
- if (writeout_mutex == NULL)
- return 1;
set_genesis_time();
+ stat_init();
create_disk_util_thread();
cgroup_list = smalloc(sizeof(*cgroup_list));
sfree(cgroup_mnt);
fio_mutex_remove(startup_mutex);
- fio_mutex_remove(writeout_mutex);
fio_mutex_remove(disk_thread_mutex);
+ stat_exit();
return exit_value;
}