X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=backend.c;h=4c271ba92aba4e71493f53d1ba62c6c0da177240;hp=52791040ff21384d283b2a09de3ff9718e19d866;hb=a462baefd211d1847cefbaa500c8aaad2128374b;hpb=9e684a4976b7934f5ce011ea281dfef3352e5738 diff --git a/backend.c b/backend.c index 52791040..4c271ba9 100644 --- a/backend.c +++ b/backend.c @@ -494,7 +494,10 @@ sync_done: if (full || !td->o.iodepth_batch_complete) { min_events = min(td->o.iodepth_batch_complete, td->cur_depth); - if (full && !min_events && td->o.iodepth_batch_complete != 0) + /* + * if the queue is full, we MUST reap at least 1 event + */ + if (full && !min_events) min_events = 1; do { @@ -526,6 +529,20 @@ sync_done: dprint(FD_VERIFY, "exiting loop\n"); } +static int io_bytes_exceeded(struct thread_data *td) +{ + unsigned long long bytes; + + if (td_rw(td)) + bytes = td->this_io_bytes[0] + td->this_io_bytes[1]; + else if (td_write(td)) + bytes = td->this_io_bytes[1]; + else + bytes = td->this_io_bytes[0]; + + return bytes >= td->o.size; +} + /* * Main IO worker function. It retrieves io_u's to process and queues * and reaps them, checking for rate and errors along the way. @@ -540,9 +557,9 @@ static void do_io(struct thread_data *td) else td_set_runstate(td, TD_RUNNING); - while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || - (!flist_empty(&td->trim_list)) || - ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) { + while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || + (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || + td->o.time_based) { struct timeval comp_time; unsigned long bytes_done[2] = { 0, 0 }; int min_evts = 0; @@ -573,11 +590,12 @@ static void do_io(struct thread_data *td) ddir = io_u->ddir; /* - * Add verification end_io handler, if asked to verify - * a previously written file. + * Add verification end_io handler if: + * - Asked to verify (!td_rw(td)) + * - Or the io_u is from our verify list (mixed write/ver) */ if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && - !td_rw(td)) { + ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { if (td->o.verify_async) io_u->end_io = verify_io_u_async; else @@ -662,7 +680,10 @@ sync_done: if (full || !td->o.iodepth_batch_complete) { min_evts = min(td->o.iodepth_batch_complete, td->cur_depth); - if (full && !min_evts && td->o.iodepth_batch_complete != 0) + /* + * if the queue is full, we MUST reap at least 1 event + */ + if (full && !min_evts) min_evts = 1; if (__should_check_rate(td, 0) || @@ -762,12 +783,13 @@ static void cleanup_io_u(struct thread_data *td) static int init_io_u(struct thread_data *td) { struct io_u *io_u; - unsigned int max_bs; + unsigned int max_bs, min_write; int cl_align, i, max_units; char *p; max_units = td->o.iodepth; max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); + min_write = td->o.min_bs[DDIR_WRITE]; td->orig_buffer_size = (unsigned long long) max_bs * (unsigned long long) max_units; @@ -816,7 +838,7 @@ static int init_io_u(struct thread_data *td) dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf); if (td_write(td)) - io_u_fill_buffer(td, io_u, max_bs); + io_u_fill_buffer(td, io_u, min_write, max_bs); if (td_write(td) && td->o.verify_pattern_bytes) { /* * Fill the buffer with the pattern if we are @@ -1272,6 +1294,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, if (errno == ECHILD) { log_err("fio: pid=%d disappeared %d\n", (int) td->pid, td->runstate); + td->sig = ECHILD; td_set_runstate(td, TD_REAPED); goto reaped; } @@ -1283,6 +1306,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, if (sig != SIGTERM) log_err("fio: pid=%d, got signal=%d\n", (int) td->pid, sig); + td->sig = sig; td_set_runstate(td, TD_REAPED); goto reaped; } @@ -1639,6 +1663,7 @@ int fio_backend(void) for_each_td(td, i) fio_options_free(td); + free_disk_util(); cgroup_kill(cgroup_list); sfree(cgroup_list); sfree(cgroup_mnt);