#include "server.h"
#include "lib/getrusage.h"
#include "idletime.h"
+#include "err.h"
static pthread_t disk_util_thread;
static struct fio_mutex *disk_thread_mutex;
break;
while ((io_u = get_io_u(td)) != NULL) {
+ if (IS_ERR(io_u)) {
+ io_u = NULL;
+ ret = FIO_Q_BUSY;
+ goto reap;
+ }
+
/*
* We are only interested in the places where
* we wrote or trimmed IOs. Turn those into
* completed io_u's first. Note that we can get BUSY even
* without IO queued, if the system is resource starved.
*/
+reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete) {
min_events = min(td->o.iodepth_batch_complete,
uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
unsigned int i;
int ret = 0;
- uint64_t bytes_issued = 0;
+ uint64_t total_bytes, bytes_issued = 0;
if (in_ramp_time(td))
td_set_runstate(td, TD_RAMP);
else
td_set_runstate(td, TD_RUNNING);
+ lat_target_init(td);
+
+ /*
+ * If verify_backlog is enabled, we'll run the verify in this
+ * handler as well. For that case, we may need up to twice the
+ * amount of bytes.
+ */
+ total_bytes = td->o.size;
+ if (td->o.verify != VERIFY_NONE &&
+ (td_write(td) && td->o.verify_backlog))
+ total_bytes += td->o.size;
+
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
td->o.time_based) {
if (flow_threshold_exceeded(td))
continue;
- if (bytes_issued >= (uint64_t) td->o.size)
+ if (bytes_issued >= total_bytes)
break;
io_u = get_io_u(td);
- if (!io_u)
+ if (IS_ERR_OR_NULL(io_u)) {
+ int err = PTR_ERR(io_u);
+
+ io_u = NULL;
+ if (err == -EBUSY) {
+ ret = FIO_Q_BUSY;
+ goto reap;
+ }
+ if (td->o.latency_target)
+ goto reap;
break;
+ }
ddir = io_u->ddir;
*/
if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
+
+ if (!td->o.verify_pattern_bytes) {
+ io_u->rand_seed = __rand(&td->__verify_state);
+ if (sizeof(int) != sizeof(long *))
+ io_u->rand_seed *= __rand(&td->__verify_state);
+ }
+
if (td->o.verify_async)
io_u->end_io = verify_io_u_async;
else
else
td_set_runstate(td, TD_RUNNING);
+ /*
+ * Always log IO before it's issued, so we know the specific
+ * order of it. The logged unit will track when the IO has
+ * completed.
+ */
+ if (td_write(td) && io_u->ddir == DDIR_WRITE &&
+ td->o.do_verify &&
+ td->o.verify != VERIFY_NONE &&
+ !td->o.experimental_verify)
+ log_io_piece(td, io_u);
+
ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
* can get BUSY even without IO queued, if the system is
* resource starved.
*/
+reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete) {
min_evts = min(td->o.iodepth_batch_complete,
break;
}
}
+ if (!in_ramp_time(td) && td->o.latency_target)
+ lat_target_check(td);
if (td->o.thinktime) {
unsigned long long b;
* overflow later. this adjustment may be too much if we get
* lucky and the allocator gives us an aligned address.
*/
- if (td->o.odirect || td->o.mem_align || (td->io_ops->flags & FIO_RAWIO))
+ if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ (td->io_ops->flags & FIO_RAWIO))
td->orig_buffer_size += page_mask + td->o.mem_align;
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
if (data_xfer && allocate_io_mem(td))
return 1;
- if (td->o.odirect || td->o.mem_align ||
+ if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
(td->io_ops->flags & FIO_RAWIO))
p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
else
* Fill the buffer with the pattern if we are
* going to be doing writes.
*/
- fill_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
+ fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
}
}
if (diff < td_max_bs(td))
return 0;
+ if (fio_files_done(td))
+ return 0;
+
return 1;
}
return ret;
}
+/*
+ * Dry run to compute correct state of numberio for verification.
+ */
+static uint64_t do_dry_run(struct thread_data *td)
+{
+ uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
+
+ td_set_runstate(td, TD_RUNNING);
+
+ while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
+ (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) {
+ struct io_u *io_u;
+ int ret;
+
+ if (td->terminate || td->done)
+ break;
+
+ io_u = get_io_u(td);
+ if (!io_u)
+ break;
+
+ io_u->flags |= IO_U_F_FLIGHT;
+ io_u->error = 0;
+ io_u->resid = 0;
+ if (ddir_rw(acct_ddir(io_u)))
+ td->io_issues[acct_ddir(io_u)]++;
+ if (ddir_rw(io_u->ddir)) {
+ io_u_mark_depth(td, 1);
+ td->ts.total_io_u[io_u->ddir]++;
+ }
+
+ ret = io_u_sync_complete(td, io_u, bytes_done);
+ (void) ret;
+ }
+
+ return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+}
+
/*
* Entry point for the thread based jobs. The process based jobs end up
* here as well, after a little setup.
} else
td->pid = gettid();
+ /*
+ * fio_time_init() may not have been called yet if running as a server
+ */
+ fio_time_init();
+
fio_local_clock_init(o->use_thread);
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
fio_mutex_down(td->mutex);
dprint(FD_MUTEX, "done waiting on td->mutex\n");
- /*
- * the ->mutex mutex is now no longer used, close it to avoid
- * eating a file descriptor
- */
- fio_mutex_remove(td->mutex);
- td->mutex = NULL;
-
/*
* A new gid requires privilege, so we need to do this before setting
* the uid.
prune_io_piece_log(td);
- verify_bytes = do_io(td);
+ if (td->o.verify_only && (td_write(td) || td_rw(td)))
+ verify_bytes = do_dry_run(td);
+ else
+ verify_bytes = do_io(td);
clear_state = 1;
fio_mutex_remove(td->rusage_sem);
td->rusage_sem = NULL;
+ fio_mutex_remove(td->mutex);
+ td->mutex = NULL;
+
td_set_runstate(td, TD_EXITED);
return (void *) (uintptr_t) td->error;
}
if (fio_gtod_offload && fio_start_gtod_thread())
return;
-
+
fio_idle_prof_init();
set_sig_handlers();