X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=ba9e384b6609950a8c500dab96de39d0c8bce904;hp=63d9a1b7ab434ca4eba416ab2fe71bbc9e71385a;hb=90fef2d162fba55ad684c2c80b3b9739b8d16e72;hpb=d529ee1932bc85598900a3ef62f01293af87fbd8 diff --git a/fio.c b/fio.c index 63d9a1b7..ba9e384b 100644 --- a/fio.c +++ b/fio.c @@ -223,7 +223,7 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now, if (rate < ratemin || bytes < td->rate_bytes[ddir]) { log_err("%s: min rate %u not met, got" - " %luKiB/sec\n", td->o.name, + " %luKB/sec\n", td->o.name, ratemin, rate); return 1; } @@ -388,21 +388,29 @@ static int break_on_this_error(struct thread_data *td, int *retptr) else err = td->error; - update_error_count(td, err); - if (td_non_fatal_error(err)) { /* * Continue with the I/Os in case of * a non fatal error. */ + update_error_count(td, err); td_clear_error(td); *retptr = 0; return 0; + } else if (td->o.fill_device && err == ENOSPC) { + /* + * We expect to hit this error if + * fill_device option is set. + */ + td_clear_error(td); + td->terminate = 1; + return 1; } else { /* * Stop the I/O in case of a fatal * error. */ + update_error_count(td, err); return 1; } } @@ -464,7 +472,10 @@ static void do_verify(struct thread_data *td) break; } - io_u->end_io = verify_io_u; + if (td->o.verify_async) + io_u->end_io = verify_io_u_async; + else + io_u->end_io = verify_io_u; ret = td_io_queue(td, io_u); switch (ret) { @@ -596,7 +607,10 @@ static void do_io(struct thread_data *td) * a previously written file. */ if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ) { - io_u->end_io = verify_io_u; + if (td->o.verify_async) + io_u->end_io = verify_io_u_async; + else + io_u->end_io = verify_io_u; td_set_runstate(td, TD_VERIFYING); } else if (in_ramp_time(td)) td_set_runstate(td, TD_RAMP); @@ -791,7 +805,7 @@ static int init_io_u(struct thread_data *td) if (allocate_io_mem(td)) return 1; - if (td->o.mem_align) + if (td->o.odirect || td->o.mem_align) p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align; else p = td->orig_buffer; @@ -984,6 +998,7 @@ static void *thread_main(void *data) { unsigned long long runtime[2], elapsed; struct thread_data *td = data; + pthread_condattr_t attr; int clear_state; if (!td->o.use_thread) @@ -998,8 +1013,14 @@ static void *thread_main(void *data) INIT_FLIST_HEAD(&td->io_u_requeues); INIT_FLIST_HEAD(&td->io_log_list); INIT_FLIST_HEAD(&td->io_hist_list); + INIT_FLIST_HEAD(&td->verify_list); + pthread_mutex_init(&td->io_u_lock, NULL); td->io_hist_tree = RB_ROOT; + pthread_condattr_init(&attr); + pthread_cond_init(&td->verify_cond, &attr); + pthread_cond_init(&td->free_cond, &attr); + td_set_runstate(td, TD_INITIALIZED); dprint(FD_MUTEX, "up startup_mutex\n"); fio_mutex_up(startup_mutex); @@ -1023,7 +1044,10 @@ static void *thread_main(void *data) if (init_io_u(td)) goto err; - if (td->o.cpumask_set && fio_setaffinity(td) == -1) { + if (td->o.verify_async && verify_async_init(td)) + goto err; + + if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) { td_verror(td, errno, "cpu_set_affinity"); goto err; } @@ -1034,7 +1058,7 @@ static void *thread_main(void *data) */ if (td->o.gtod_cpu) { fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu); - if (fio_setaffinity(td) == -1) { + if (fio_setaffinity(td->pid, td->o.cpumask) == -1) { td_verror(td, errno, "cpu_set_affinity"); goto err; } @@ -1176,6 +1200,9 @@ err: td_verror(td, ret, "fio_cpuset_exit"); } + if (td->o.verify_async) + verify_async_exit(td); + /* * do this very late, it will log file closing as well */ @@ -1492,7 +1519,13 @@ static void run_threads(void) *fio_debug_jobp = pid; } dprint(FD_MUTEX, "wait on startup_mutex\n"); - fio_mutex_down(startup_mutex); + if (fio_mutex_down_timeout(startup_mutex, 10)) { + log_err("fio: job startup hung? exiting.\n"); + terminate_threads(TERMINATE_ALL); + fio_abort = 1; + nr_started--; + break; + } dprint(FD_MUTEX, "done waiting on startup_mutex\n"); }