* Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
*
*/
+#include <assert.h>
+#include <errno.h>
+#include <pthread.h>
+
#include "fio.h"
#include "ioengines.h"
#include "lib/getrusage.h"
#include "rate-submit.h"
+static void check_overlap(struct io_u *io_u)
+{
+ int res;
+
+ /*
+ * Allow only one thread to check for overlap at a time to prevent two
+ * threads from thinking the coast is clear and then submitting IOs
+ * that overlap with each other.
+ *
+ * If an overlap is found, release the lock and re-acquire it before
+ * checking again to give other threads a chance to make progress.
+ *
+ * If no overlap is found, release the lock when the io_u's
+ * IO_U_F_FLIGHT flag is set so that this io_u can be checked by other
+ * threads as they assess overlap.
+ */
+ res = pthread_mutex_lock(&overlap_check);
+ if (fio_unlikely(res != 0)) {
+ log_err("failed to lock overlap check mutex, err: %i:%s", errno, strerror(errno));
+ abort();
+ }
+
+retry:
+ for_each_td(td) {
+ if (td->runstate <= TD_SETTING_UP ||
+ td->runstate >= TD_FINISHING ||
+ !td->o.serialize_overlap ||
+ td->o.io_submit_mode != IO_MODE_OFFLOAD)
+ continue;
+
+ if (!in_flight_overlap(&td->io_u_all, io_u))
+ continue;
+
+ res = pthread_mutex_unlock(&overlap_check);
+ if (fio_unlikely(res != 0)) {
+ log_err("failed to unlock overlap check mutex, err: %i:%s", errno, strerror(errno));
+ abort();
+ }
+ res = pthread_mutex_lock(&overlap_check);
+ if (fio_unlikely(res != 0)) {
+ log_err("failed to lock overlap check mutex, err: %i:%s", errno, strerror(errno));
+ abort();
+ }
+ goto retry;
+ } end_for_each();
+}
+
static int io_workqueue_fn(struct submit_worker *sw,
struct workqueue_work *work)
{
struct io_u *io_u = container_of(work, struct io_u, work);
const enum fio_ddir ddir = io_u->ddir;
struct thread_data *td = sw->priv;
- int ret;
+ int ret, error;
+
+ if (td->o.serialize_overlap)
+ check_overlap(io_u);
dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
ret = io_u_queued_complete(td, 1);
if (ret > 0)
td->cur_depth -= ret;
+ else if (ret < 0)
+ break;
io_u_clear(td, io_u, IO_U_F_FLIGHT);
} while (1);
dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
- io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
+ error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
if (ret == FIO_Q_COMPLETED)
td->cur_depth--;
ret = io_u_queued_complete(td, min_evts);
if (ret > 0)
td->cur_depth -= ret;
- } else if (ret == FIO_Q_BUSY) {
- ret = io_u_queued_complete(td, td->cur_depth);
- if (ret > 0)
- td->cur_depth -= ret;
+ }
+
+ if (error || td->error) {
+ pthread_mutex_lock(&td->io_u_lock);
+ pthread_cond_signal(&td->parent->free_cond);
+ pthread_mutex_unlock(&td->io_u_lock);
}
return 0;
{
struct thread_data *td = sw->priv;
+ if (td->error)
+ return false;
if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
return true;
dup_files(td, parent);
td->eo = parent->eo;
fio_options_mem_dupe(td);
+ td->iolog_f = parent->iolog_f;
if (ioengine_load(td))
goto err;
if (td_io_init(td))
goto err_io_init;
- set_epoch_time(td, td->o.log_unix_epoch);
+ if (td->io_ops->post_init && td->io_ops->post_init(td))
+ goto err_io_init;
+
+ set_epoch_time(td, td->o.log_alternate_epoch_clock_id, td->o.job_start_clock_id);
fio_getrusage(&td->ru_start);
clear_io_state(td, 1);
struct thread_data *td = sw->priv;
(*sum_cnt)++;
- sum_thread_stats(&sw->wq->td->ts, &td->ts, *sum_cnt == 1);
+
+ /*
+ * io_workqueue_update_acct_fn() doesn't support per prio stats, and
+ * even if it did, offload can't be used with all async IO engines.
+ * If group reporting is set in the parent td, the group result
+ * generated by __show_run_stats() can still contain multiple prios
+ * from different offloaded jobs.
+ */
+ sw->wq->td->ts.disable_prio_stat = 1;
+ sum_thread_stats(&sw->wq->td->ts, &td->ts);
fio_options_free(td);
close_and_free_files(td);
sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]);
sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]);
sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]);
+ if (ddir == DDIR_READ)
+ sum_val(&dst->bytes_verified, &src->bytes_verified);
pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
}