X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=backend.c;h=319bd253342e164405d04cc5d1506a1e6f29337e;hb=649cee91722e8e582b30175a1493f2815ae75f06;hp=f3f103038474e3c7e19bd2ece97203080bdab857;hpb=9c42684e32325da26e862280388798343c5f1305;p=fio.git diff --git a/backend.c b/backend.c index f3f10303..319bd253 100644 --- a/backend.c +++ b/backend.c @@ -56,13 +56,13 @@ static struct flist_head *cgroup_list; static char *cgroup_mnt; static int exit_value; static volatile int fio_abort; +static unsigned int nr_process = 0; +static unsigned int nr_thread = 0; struct io_log *agg_io_log[2]; int groupid = 0; unsigned int thread_number = 0; -unsigned int nr_process = 0; -unsigned int nr_thread = 0; int shm_id = 0; int temp_stall_ts; unsigned long done_secs = 0; @@ -335,8 +335,8 @@ static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir, return 1; if (td_non_fatal_error(err)) { - /* - * Continue with the I/Os in case of + /* + * Continue with the I/Os in case of * a non fatal error. */ update_error_count(td, err); @@ -494,7 +494,10 @@ sync_done: if (full || !td->o.iodepth_batch_complete) { min_events = min(td->o.iodepth_batch_complete, td->cur_depth); - if (full && !min_events && td->o.iodepth_batch_complete != 0) + /* + * if the queue is full, we MUST reap at least 1 event + */ + if (full && !min_events) min_events = 1; do { @@ -526,6 +529,20 @@ sync_done: dprint(FD_VERIFY, "exiting loop\n"); } +static int io_bytes_exceeded(struct thread_data *td) +{ + unsigned long long bytes; + + if (td_rw(td)) + bytes = td->this_io_bytes[0] + td->this_io_bytes[1]; + else if (td_write(td)) + bytes = td->this_io_bytes[1]; + else + bytes = td->this_io_bytes[0]; + + return bytes >= td->o.size; +} + /* * Main IO worker function. It retrieves io_u's to process and queues * and reaps them, checking for rate and errors along the way. @@ -540,9 +557,9 @@ static void do_io(struct thread_data *td) else td_set_runstate(td, TD_RUNNING); - while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || - (!flist_empty(&td->trim_list)) || - ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) { + while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || + (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || + td->o.time_based) { struct timeval comp_time; unsigned long bytes_done[2] = { 0, 0 }; int min_evts = 0; @@ -573,11 +590,12 @@ static void do_io(struct thread_data *td) ddir = io_u->ddir; /* - * Add verification end_io handler, if asked to verify - * a previously written file. + * Add verification end_io handler if: + * - Asked to verify (!td_rw(td)) + * - Or the io_u is from our verify list (mixed write/ver) */ if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && - !td_rw(td)) { + ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { if (td->o.verify_async) io_u->end_io = verify_io_u_async; else @@ -662,7 +680,10 @@ sync_done: if (full || !td->o.iodepth_batch_complete) { min_evts = min(td->o.iodepth_batch_complete, td->cur_depth); - if (full && !min_evts && td->o.iodepth_batch_complete != 0) + /* + * if the queue is full, we MUST reap at least 1 event + */ + if (full && !min_evts) min_evts = 1; if (__should_check_rate(td, 0) || @@ -948,6 +969,9 @@ static void *thread_main(void *data) dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); + if (is_backend) + fio_server_send_start(td); + INIT_FLIST_HEAD(&td->io_u_freelist); INIT_FLIST_HEAD(&td->io_u_busylist); INIT_FLIST_HEAD(&td->io_u_requeues); @@ -974,6 +998,7 @@ static void *thread_main(void *data) * eating a file descriptor */ fio_mutex_remove(td->mutex); + td->mutex = NULL; /* * A new gid requires privilege, so we need to do this before setting @@ -1066,9 +1091,9 @@ static void *thread_main(void *data) memcpy(&td->tv_cache, &td->start, sizeof(td->start)); if (td->o.ratemin[0] || td->o.ratemin[1]) { - memcpy(&td->lastrate[0], &td->bw_sample_time, + memcpy(&td->lastrate[0], &td->bw_sample_time, sizeof(td->bw_sample_time)); - memcpy(&td->lastrate[1], &td->bw_sample_time, + memcpy(&td->lastrate[1], &td->bw_sample_time, sizeof(td->bw_sample_time)); } @@ -1335,6 +1360,14 @@ static void run_threads(void) set_sig_handlers(); + nr_thread = nr_process = 0; + for_each_td(td, i) { + if (td->o.use_thread) + nr_thread++; + else + nr_process++; + } + if (!terse_output) { log_info("Starting "); if (nr_thread) @@ -1534,21 +1567,13 @@ static void run_threads(void) reap_threads(&nr_running, &t_rate, &m_rate); - if (todo) { - if (is_backend) - fio_server_idle_loop(); - else - usleep(100000); - } + if (todo) + usleep(100000); } while (nr_running) { reap_threads(&nr_running, &t_rate, &m_rate); - - if (is_backend) - fio_server_idle_loop(); - else - usleep(10000); + usleep(10000); } update_io_ticks(); @@ -1609,8 +1634,8 @@ int fio_backend(void) return 0; if (write_bw_log) { - setup_log(&agg_io_log[DDIR_READ], 0); - setup_log(&agg_io_log[DDIR_WRITE], 0); + setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW); + setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW); } startup_mutex = fio_mutex_init(0);