static char *cgroup_mnt;
static int exit_value;
static volatile int fio_abort;
+static unsigned int nr_process = 0;
+static unsigned int nr_thread = 0;
struct io_log *agg_io_log[2];
int groupid = 0;
unsigned int thread_number = 0;
-unsigned int nr_process = 0;
-unsigned int nr_thread = 0;
int shm_id = 0;
int temp_stall_ts;
unsigned long done_secs = 0;
if (full || !td->o.iodepth_batch_complete) {
min_events = min(td->o.iodepth_batch_complete,
td->cur_depth);
- if (full && !min_events && td->o.iodepth_batch_complete != 0)
+ /*
+ * if the queue is full, we MUST reap at least 1 event
+ */
+ if (full && !min_events)
min_events = 1;
do {
dprint(FD_VERIFY, "exiting loop\n");
}
+static int io_bytes_exceeded(struct thread_data *td)
+{
+ unsigned long long bytes;
+
+ if (td_rw(td))
+ bytes = td->this_io_bytes[0] + td->this_io_bytes[1];
+ else if (td_write(td))
+ bytes = td->this_io_bytes[1];
+ else
+ bytes = td->this_io_bytes[0];
+
+ return bytes >= td->o.size;
+}
+
/*
* Main IO worker function. It retrieves io_u's to process and queues
* and reaps them, checking for rate and errors along the way.
else
td_set_runstate(td, TD_RUNNING);
- while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
- (!flist_empty(&td->trim_list)) ||
- ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) {
+ while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
+ (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
+ td->o.time_based) {
struct timeval comp_time;
unsigned long bytes_done[2] = { 0, 0 };
int min_evts = 0;
ddir = io_u->ddir;
/*
- * Add verification end_io handler, if asked to verify
- * a previously written file.
+ * Add verification end_io handler if:
+ * - Asked to verify (!td_rw(td))
+ * - Or the io_u is from our verify list (mixed write/ver)
*/
if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
- !td_rw(td)) {
+ ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
if (td->o.verify_async)
io_u->end_io = verify_io_u_async;
else
if (full || !td->o.iodepth_batch_complete) {
min_evts = min(td->o.iodepth_batch_complete,
td->cur_depth);
- if (full && !min_evts && td->o.iodepth_batch_complete != 0)
+ /*
+ * if the queue is full, we MUST reap at least 1 event
+ */
+ if (full && !min_evts)
min_evts = 1;
if (__should_check_rate(td, 0) ||
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
+ if (is_backend)
+ fio_server_send_start(td);
+
INIT_FLIST_HEAD(&td->io_u_freelist);
INIT_FLIST_HEAD(&td->io_u_busylist);
INIT_FLIST_HEAD(&td->io_u_requeues);
* eating a file descriptor
*/
fio_mutex_remove(td->mutex);
+ td->mutex = NULL;
/*
* A new gid requires privilege, so we need to do this before setting
set_sig_handlers();
+ nr_thread = nr_process = 0;
+ for_each_td(td, i) {
+ if (td->o.use_thread)
+ nr_thread++;
+ else
+ nr_process++;
+ }
+
if (!terse_output) {
log_info("Starting ");
if (nr_thread)
reap_threads(&nr_running, &t_rate, &m_rate);
- if (todo) {
- if (is_backend)
- fio_server_idle_loop();
- else
- usleep(100000);
- }
+ if (todo)
+ usleep(100000);
}
while (nr_running) {
reap_threads(&nr_running, &t_rate, &m_rate);
-
- if (is_backend)
- fio_server_idle_loop();
- else
- usleep(10000);
+ usleep(10000);
}
update_io_ticks();
return 0;
if (write_bw_log) {
- setup_log(&agg_io_log[DDIR_READ], 0);
- setup_log(&agg_io_log[DDIR_WRITE], 0);
+ setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW);
+ setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW);
}
startup_mutex = fio_mutex_init(0);