X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=rate-submit.c;h=103a80aa13f8e671f06d2b568f7fa8bc1cc342df;hb=0dc6e911832fca2c5cf9a5ac2663b468ef2c4341;hp=3bcb5053d8be1b69d6f68906239ceb6a939a2c44;hpb=c76b661c50fdd085f8ac08c0028b5083b238b8e8;p=fio.git diff --git a/rate-submit.c b/rate-submit.c index 3bcb5053..103a80aa 100644 --- a/rate-submit.c +++ b/rate-submit.c @@ -4,6 +4,7 @@ * Copyright (C) 2015 Jens Axboe * */ +#include #include "fio.h" #include "ioengines.h" #include "lib/getrusage.h" @@ -11,8 +12,7 @@ static void check_overlap(struct io_u *io_u) { - int i; - struct thread_data *td; + int res; /* * Allow only one thread to check for overlap at a time to prevent two @@ -26,10 +26,11 @@ static void check_overlap(struct io_u *io_u) * IO_U_F_FLIGHT flag is set so that this io_u can be checked by other * threads as they assess overlap. */ - pthread_mutex_lock(&overlap_check); + res = pthread_mutex_lock(&overlap_check); + assert(res == 0); retry: - for_each_td(td, i) { + for_each_td(td) { if (td->runstate <= TD_SETTING_UP || td->runstate >= TD_FINISHING || !td->o.serialize_overlap || @@ -39,10 +40,12 @@ retry: if (!in_flight_overlap(&td->io_u_all, io_u)) continue; - pthread_mutex_unlock(&overlap_check); - pthread_mutex_lock(&overlap_check); + res = pthread_mutex_unlock(&overlap_check); + assert(res == 0); + res = pthread_mutex_lock(&overlap_check); + assert(res == 0); goto retry; - } + } end_for_each(); } static int io_workqueue_fn(struct submit_worker *sw, @@ -93,8 +96,11 @@ static int io_workqueue_fn(struct submit_worker *sw, td->cur_depth -= ret; } - if (error || td->error) + if (error || td->error) { + pthread_mutex_lock(&td->io_u_lock); pthread_cond_signal(&td->parent->free_cond); + pthread_mutex_unlock(&td->io_u_lock); + } return 0; } @@ -147,6 +153,7 @@ static int io_workqueue_init_worker_fn(struct submit_worker *sw) dup_files(td, parent); td->eo = parent->eo; fio_options_mem_dupe(td); + td->iolog_f = parent->iolog_f; if (ioengine_load(td)) goto err; @@ -166,7 +173,7 @@ static int io_workqueue_init_worker_fn(struct submit_worker *sw) if (td->io_ops->post_init && td->io_ops->post_init(td)) goto err_io_init; - set_epoch_time(td, td->o.log_unix_epoch); + set_epoch_time(td, td->o.log_unix_epoch | td->o.log_alternate_epoch, td->o.log_alternate_epoch_clock_id); fio_getrusage(&td->ru_start); clear_io_state(td, 1); @@ -188,7 +195,16 @@ static void io_workqueue_exit_worker_fn(struct submit_worker *sw, struct thread_data *td = sw->priv; (*sum_cnt)++; - sum_thread_stats(&sw->wq->td->ts, &td->ts, *sum_cnt == 1); + + /* + * io_workqueue_update_acct_fn() doesn't support per prio stats, and + * even if it did, offload can't be used with all async IO engines. + * If group reporting is set in the parent td, the group result + * generated by __show_run_stats() can still contain multiple prios + * from different offloaded jobs. + */ + sw->wq->td->ts.disable_prio_stat = 1; + sum_thread_stats(&sw->wq->td->ts, &td->ts); fio_options_free(td); close_and_free_files(td); @@ -247,6 +263,8 @@ static void sum_ddir(struct thread_data *dst, struct thread_data *src, sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]); sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]); sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]); + if (ddir == DDIR_READ) + sum_val(&dst->bytes_verified, &src->bytes_verified); pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock); }