X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=backend.c;h=8fd55359b6e40ce97885c2e92ffdc0f998acfd33;hp=3eafff6e6bbbcca9bc8ef3594ff3f74e0e222013;hb=e250c0a967d683e40889cc33ffc4c4003adc8d35;hpb=fd727d9de9f22a7ad3e026bcca80f58a65410ad6 diff --git a/backend.c b/backend.c index 3eafff6e..8fd55359 100644 --- a/backend.c +++ b/backend.c @@ -100,7 +100,7 @@ static void sig_int(int sig) } } -static void sig_show_status(int sig) +void sig_show_status(int sig) { show_running_run_stats(); } @@ -179,7 +179,7 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now, if (spent < td->o.ratecycle) return 0; - if (td->o.rate[ddir]) { + if (td->o.rate[ddir] || td->o.ratemin[ddir]) { /* * check bandwidth specified rate */ @@ -220,6 +220,7 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now, log_err("%s: min iops rate %u not met," " got %lu\n", td->o.name, rate_iops_min, rate); + return 1; } } } @@ -445,8 +446,8 @@ static int wait_for_completions(struct thread_data *td, struct timeval *time) /* * if the queue is full, we MUST reap at least 1 event */ - min_evts = min(td->o.iodepth_batch_complete, td->cur_depth); - if ((full && !min_evts) || !td->o.iodepth_batch_complete) + min_evts = min(td->o.iodepth_batch_complete_min, td->cur_depth); + if ((full && !min_evts) || !td->o.iodepth_batch_complete_min) min_evts = 1; if (time && (__should_check_rate(td, DDIR_READ) || @@ -539,7 +540,7 @@ sync_done: *ret = ret2; break; default: - assert(ret < 0); + assert(*ret < 0); td_verror(td, -(*ret), "td_io_queue"); break; } @@ -550,6 +551,12 @@ sync_done: return 0; } +static inline int io_in_polling(struct thread_data *td) +{ + return !td->o.iodepth_batch_complete_min && + !td->o.iodepth_batch_complete_max; +} + /* * The main verify engine. Runs over the writes we previously submitted, * reads the blocks back in, and checks the crc/md5 of the data. @@ -683,7 +690,7 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes) */ reap: full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); - if (full || !td->o.iodepth_batch_complete) + if (full || io_in_polling(td)) ret = wait_for_completions(td, NULL); if (ret < 0) @@ -762,6 +769,25 @@ static int io_complete_bytes_exceeded(struct thread_data *td) return bytes >= limit || exceeds_number_ios(td); } +/* + * used to calculate the next io time for rate control + * + */ +static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) +{ + uint64_t secs, remainder, bps, bytes; + + assert(!(td->flags & TD_F_CHILD)); + bytes = td->rate_io_issue_bytes[ddir]; + bps = td->rate_bps[ddir]; + if (bps) { + secs = bytes / bps; + remainder = bytes % bps; + return remainder * 1000000 / bps + secs * 1000000; + } else + return 0; +} + /* * Main IO worker function. It retrieves io_u's to process and queues * and reaps them, checking for rate and errors along the way. @@ -891,9 +917,16 @@ static uint64_t do_io(struct thread_data *td) if (td->error) break; ret = workqueue_enqueue(&td->io_wq, io_u); + + if (should_check_rate(td)) + td->rate_next_io_time[ddir] = usec_for_io(td, ddir); + } else { ret = td_io_queue(td, io_u); + if (should_check_rate(td)) + td->rate_next_io_time[ddir] = usec_for_io(td, ddir); + if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time)) break; @@ -905,7 +938,7 @@ static uint64_t do_io(struct thread_data *td) reap: full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); - if (full || !td->o.iodepth_batch_complete) + if (full || io_in_polling(td)) ret = wait_for_completions(td, &comp_time); } if (ret < 0) @@ -1611,16 +1644,8 @@ static void *thread_main(void *data) td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) && - (td->o.verify != VERIFY_NONE && td_write(td))) { - struct all_io_list *state; - size_t sz; - - state = get_all_io_list(td->thread_number, &sz); - if (state) { - __verify_save_state(state, "local"); - free(state); - } - } + (td->o.verify != VERIFY_NONE && td_write(td))) + verify_save_state(td->thread_number); fio_unpin_memory(td); @@ -1870,7 +1895,7 @@ void check_trigger_file(void) if (nr_clients) fio_clients_send_trigger(trigger_remote_cmd); else { - verify_save_state(); + verify_save_state(IO_LIST_ALL); fio_terminate_threads(TERMINATE_ALL); exec_trigger(trigger_cmd); }