return 0;
}
-static int check_min_rate(struct thread_data *td, struct timeval *now,
- uint64_t *bytes_done)
+static int check_min_rate(struct thread_data *td, struct timeval *now)
{
int ret = 0;
- if (bytes_done[DDIR_READ])
+ if (td->bytes_done[DDIR_READ])
ret |= __check_min_rate(td, now, DDIR_READ);
- if (bytes_done[DDIR_WRITE])
+ if (td->bytes_done[DDIR_WRITE])
ret |= __check_min_rate(td, now, DDIR_WRITE);
- if (bytes_done[DDIR_TRIM])
+ if (td->bytes_done[DDIR_TRIM])
ret |= __check_min_rate(td, now, DDIR_TRIM);
return ret;
/*
* get immediately available events, if any
*/
- r = io_u_queued_complete(td, 0, NULL);
+ r = io_u_queued_complete(td, 0);
if (r < 0)
return;
}
if (td->cur_depth)
- r = io_u_queued_complete(td, td->cur_depth, NULL);
+ r = io_u_queued_complete(td, td->cur_depth);
}
/*
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
- if (io_u_queued_complete(td, 1, NULL) < 0)
+ if (io_u_queued_complete(td, 1) < 0)
return 1;
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
return 1;
}
- if (io_u_sync_complete(td, io_u, NULL) < 0)
+ if (io_u_sync_complete(td, io_u) < 0)
return 1;
} else if (ret == FIO_Q_BUSY) {
if (td_io_commit(td))
}
}
-static int wait_for_completions(struct thread_data *td, struct timeval *time,
- uint64_t *bytes_done)
+static int wait_for_completions(struct thread_data *td, struct timeval *time)
{
const int full = queue_full(td);
int min_evts = 0;
if (full && !min_evts)
min_evts = 1;
- if (time && (should_check_rate(td, DDIR_READ) ||
+ if (time && (__should_check_rate(td, DDIR_READ) ||
__should_check_rate(td, DDIR_WRITE) ||
__should_check_rate(td, DDIR_TRIM)))
fio_gettime(time, NULL);
do {
- ret = io_u_queued_complete(td, min_evts, bytes_done);
+ ret = io_u_queued_complete(td, min_evts);
if (ret < 0)
break;
} while (full && (td->cur_depth > td->o.iodepth_low));
*/
static void do_verify(struct thread_data *td, uint64_t verify_bytes)
{
- uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
struct fio_file *f;
struct io_u *io_u;
int ret, min_events;
break;
}
} else {
- if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes)
+ if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
break;
while ((io_u = get_io_u(td)) != NULL) {
io_u->end_io = verify_io_u;
ddir = io_u->ddir;
+ if (!td->o.disable_slat)
+ fio_gettime(&io_u->start_time, NULL);
ret = td_io_queue(td, io_u);
switch (ret) {
requeue_io_u(td, &io_u);
} else {
sync_done:
- ret = io_u_sync_complete(td, io_u, bytes_done);
+ ret = io_u_sync_complete(td, io_u);
if (ret < 0)
break;
}
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete)
- ret = wait_for_completions(td, NULL, bytes_done);
+ ret = wait_for_completions(td, NULL);
if (ret < 0)
break;
min_events = td->cur_depth;
if (min_events)
- ret = io_u_queued_complete(td, min_events, NULL);
+ ret = io_u_queued_complete(td, min_events);
} else
cleanup_pending_aio(td);
if (!td->o.number_ios)
return 0;
- number_ios = ddir_rw_sum(td->this_io_blocks);
+ number_ios = ddir_rw_sum(td->io_blocks);
number_ios += td->io_u_queued + td->io_u_in_flight;
- return number_ios >= td->o.number_ios;
+ return number_ios >= (td->o.number_ios * td->loops);
}
-static int io_bytes_exceeded(struct thread_data *td)
+static int io_issue_bytes_exceeded(struct thread_data *td)
+{
+ unsigned long long bytes, limit;
+
+ if (td_rw(td))
+ bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE];
+ else if (td_write(td))
+ bytes = td->io_issue_bytes[DDIR_WRITE];
+ else if (td_read(td))
+ bytes = td->io_issue_bytes[DDIR_READ];
+ else
+ bytes = td->io_issue_bytes[DDIR_TRIM];
+
+ if (td->o.io_limit)
+ limit = td->o.io_limit;
+ else
+ limit = td->o.size;
+
+ limit *= td->loops;
+ return bytes >= limit || exceeds_number_ios(td);
+}
+
+static int io_complete_bytes_exceeded(struct thread_data *td)
{
unsigned long long bytes, limit;
else
limit = td->o.size;
+ limit *= td->loops;
return bytes >= limit || exceeds_number_ios(td);
}
*/
static uint64_t do_io(struct thread_data *td)
{
- uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
unsigned int i;
int ret = 0;
uint64_t total_bytes, bytes_issued = 0;
lat_target_init(td);
+ total_bytes = td->o.size;
+ /*
+ * Allow random overwrite workloads to write up to io_limit
+ * before starting verification phase as 'size' doesn't apply.
+ */
+ if (td_write(td) && td_random(td) && td->o.norandommap)
+ total_bytes = max(total_bytes, (uint64_t) td->o.io_limit);
/*
* If verify_backlog is enabled, we'll run the verify in this
* handler as well. For that case, we may need up to twice the
* amount of bytes.
*/
- total_bytes = td->o.size;
if (td->o.verify != VERIFY_NONE &&
(td_write(td) && td->o.verify_backlog))
total_bytes += td->o.size;
+ /* In trimwrite mode, each byte is trimmed and then written, so
+ * allow total_bytes to be twice as big */
+ if (td_trimwrite(td))
+ total_bytes += td->total_io_size;
+
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
- (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
+ (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
td->o.time_based) {
struct timeval comp_time;
struct io_u *io_u;
__should_check_rate(td, DDIR_TRIM))
fio_gettime(&comp_time, NULL);
- ret = io_u_sync_complete(td, io_u, bytes_done);
+ ret = io_u_sync_complete(td, io_u);
if (ret < 0)
break;
bytes_issued += io_u->xfer_buflen;
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete)
- ret = wait_for_completions(td, &comp_time, bytes_done);
+ ret = wait_for_completions(td, &comp_time);
if (ret < 0)
break;
- if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO))
+ if (!ddir_rw_sum(td->bytes_done) &&
+ !(td->io_ops->flags & FIO_NOIO))
continue;
- if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) {
- if (check_min_rate(td, &comp_time, bytes_done)) {
+ if (!in_ramp_time(td) && should_check_rate(td)) {
+ if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate)
fio_terminate_threads(td->groupid);
td_verror(td, EIO, "check_min_rate");
i = td->cur_depth;
if (i) {
- ret = io_u_queued_complete(td, i, bytes_done);
+ ret = io_u_queued_complete(td, i);
if (td->o.fill_device && td->error == ENOSPC)
td->error = 0;
}
if (!ddir_rw_sum(td->this_io_bytes))
td->done = 1;
- return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+ return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
}
static void cleanup_io_u(struct thread_data *td)
*/
static uint64_t do_dry_run(struct thread_data *td)
{
- uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
-
td_set_runstate(td, TD_RUNNING);
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
- (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) {
+ (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) {
struct io_u *io_u;
int ret;
!td->o.experimental_verify)
log_io_piece(td, io_u);
- ret = io_u_sync_complete(td, io_u, bytes_done);
+ ret = io_u_sync_complete(td, io_u);
(void) ret;
}
- return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+ return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
}
/*
clear_state = 1;
+ /*
+ * Make sure we've successfully updated the rusage stats
+ * before waiting on the stat mutex. Otherwise we could have
+ * the stat thread holding stat mutex and waiting for
+ * the rusage_sem, which would never get upped because
+ * this thread is waiting for the stat mutex.
+ */
+ check_update_rusage(td);
+
fio_mutex_down(stat_mutex);
if (td_read(td) && td->io_bytes[DDIR_READ]) {
elapsed = mtime_since_now(&td->start);
do_verify(td, verify_bytes);
+ /*
+ * See comment further up for why this is done here.
+ */
+ check_update_rusage(td);
+
fio_mutex_down(stat_mutex);
td->ts.runtime[DDIR_READ] += mtime_since_now(&td->start);
fio_gettime(&td->start, NULL);
gettimeofday(&tv, NULL);
ts.tv_sec = tv.tv_sec + sec;
ts.tv_nsec = (tv.tv_usec * 1000) + nsec;
- if (ts.tv_nsec > 1000000000ULL) {
+
+ if (ts.tv_nsec >= 1000000000ULL) {
ts.tv_nsec -= 1000000000ULL;
ts.tv_sec++;
}