X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=rate-submit.c;h=103a80aa13f8e671f06d2b568f7fa8bc1cc342df;hb=0dc6e911832fca2c5cf9a5ac2663b468ef2c4341;hp=cf00d9bc75c56f057b41a337ae8bdc740aebaa66;hpb=b7119c596912fae8f101e0c1cdf36119f29fe182;p=fio.git diff --git a/rate-submit.c b/rate-submit.c index cf00d9bc..103a80aa 100644 --- a/rate-submit.c +++ b/rate-submit.c @@ -4,6 +4,7 @@ * Copyright (C) 2015 Jens Axboe * */ +#include #include "fio.h" #include "ioengines.h" #include "lib/getrusage.h" @@ -11,40 +12,40 @@ static void check_overlap(struct io_u *io_u) { - int i; - struct thread_data *td; - bool overlap = false; - - do { - /* - * Allow only one thread to check for overlap at a - * time to prevent two threads from thinking the coast - * is clear and then submitting IOs that overlap with - * each other - * - * If an overlap is found, release the lock and - * re-acquire it before checking again to give other - * threads a chance to make progress - * - * If an overlap is not found, release the lock when the - * io_u's IO_U_F_FLIGHT flag is set so that this io_u - * can be checked by other threads as they assess overlap - */ - pthread_mutex_lock(&overlap_check); - for_each_td(td, i) { - if (td->runstate <= TD_SETTING_UP || - td->runstate >= TD_FINISHING || - !td->o.serialize_overlap || - td->o.io_submit_mode != IO_MODE_OFFLOAD) - continue; - - overlap = in_flight_overlap(&td->io_u_all, io_u); - if (overlap) { - pthread_mutex_unlock(&overlap_check); - break; - } - } - } while (overlap); + int res; + + /* + * Allow only one thread to check for overlap at a time to prevent two + * threads from thinking the coast is clear and then submitting IOs + * that overlap with each other. + * + * If an overlap is found, release the lock and re-acquire it before + * checking again to give other threads a chance to make progress. + * + * If no overlap is found, release the lock when the io_u's + * IO_U_F_FLIGHT flag is set so that this io_u can be checked by other + * threads as they assess overlap. + */ + res = pthread_mutex_lock(&overlap_check); + assert(res == 0); + +retry: + for_each_td(td) { + if (td->runstate <= TD_SETTING_UP || + td->runstate >= TD_FINISHING || + !td->o.serialize_overlap || + td->o.io_submit_mode != IO_MODE_OFFLOAD) + continue; + + if (!in_flight_overlap(&td->io_u_all, io_u)) + continue; + + res = pthread_mutex_unlock(&overlap_check); + assert(res == 0); + res = pthread_mutex_lock(&overlap_check); + assert(res == 0); + goto retry; + } end_for_each(); } static int io_workqueue_fn(struct submit_worker *sw, @@ -95,8 +96,11 @@ static int io_workqueue_fn(struct submit_worker *sw, td->cur_depth -= ret; } - if (error || td->error) + if (error || td->error) { + pthread_mutex_lock(&td->io_u_lock); pthread_cond_signal(&td->parent->free_cond); + pthread_mutex_unlock(&td->io_u_lock); + } return 0; } @@ -149,6 +153,7 @@ static int io_workqueue_init_worker_fn(struct submit_worker *sw) dup_files(td, parent); td->eo = parent->eo; fio_options_mem_dupe(td); + td->iolog_f = parent->iolog_f; if (ioengine_load(td)) goto err; @@ -168,7 +173,7 @@ static int io_workqueue_init_worker_fn(struct submit_worker *sw) if (td->io_ops->post_init && td->io_ops->post_init(td)) goto err_io_init; - set_epoch_time(td, td->o.log_unix_epoch); + set_epoch_time(td, td->o.log_unix_epoch | td->o.log_alternate_epoch, td->o.log_alternate_epoch_clock_id); fio_getrusage(&td->ru_start); clear_io_state(td, 1); @@ -190,7 +195,16 @@ static void io_workqueue_exit_worker_fn(struct submit_worker *sw, struct thread_data *td = sw->priv; (*sum_cnt)++; - sum_thread_stats(&sw->wq->td->ts, &td->ts, *sum_cnt == 1); + + /* + * io_workqueue_update_acct_fn() doesn't support per prio stats, and + * even if it did, offload can't be used with all async IO engines. + * If group reporting is set in the parent td, the group result + * generated by __show_run_stats() can still contain multiple prios + * from different offloaded jobs. + */ + sw->wq->td->ts.disable_prio_stat = 1; + sum_thread_stats(&sw->wq->td->ts, &td->ts); fio_options_free(td); close_and_free_files(td); @@ -249,6 +263,8 @@ static void sum_ddir(struct thread_data *dst, struct thread_data *src, sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]); sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]); sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]); + if (ddir == DDIR_READ) + sum_val(&dst->bytes_verified, &src->bytes_verified); pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock); }