X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=434b50372832c49952fd2195c4e4377ae3088227;hp=bb2606286650141fbc6de0b2725855a3b09e995f;hb=6a96276c901992b21c3ad93f95ab0a081c7d6bb1;hpb=1ec99eea970609e63f49982f0d052a23ac18848d;ds=sidebyside diff --git a/fio.c b/fio.c index bb260628..434b5037 100644 --- a/fio.c +++ b/fio.c @@ -42,7 +42,8 @@ unsigned long page_mask; unsigned long page_size; -#define ALIGN(buf) \ + +#define PAGE_ALIGN(buf) \ (char *) (((unsigned long) (buf) + page_mask) & ~page_mask) int groupid = 0; @@ -222,7 +223,7 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now, if (rate < ratemin || bytes < td->rate_bytes[ddir]) { log_err("%s: min rate %u not met, got" - " %luKiB/sec\n", td->o.name, + " %luKB/sec\n", td->o.name, ratemin, rate); return 1; } @@ -387,21 +388,29 @@ static int break_on_this_error(struct thread_data *td, int *retptr) else err = td->error; - update_error_count(td, err); - if (td_non_fatal_error(err)) { /* * Continue with the I/Os in case of * a non fatal error. */ + update_error_count(td, err); td_clear_error(td); *retptr = 0; return 0; + } else if (td->o.fill_device && err == ENOSPC) { + /* + * We expect to hit this error if + * fill_device option is set. + */ + td_clear_error(td); + td->terminate = 1; + return 1; } else { /* * Stop the I/O in case of a fatal * error. */ + update_error_count(td, err); return 1; } } @@ -442,18 +451,17 @@ static void do_verify(struct thread_data *td) while (!td->terminate) { int ret2, full; - io_u = __get_io_u(td); - if (!io_u) - break; - update_tv_cache(td); if (runtime_exceeded(td, &td->tv_cache)) { - put_io_u(td, io_u); td->terminate = 1; break; } + io_u = __get_io_u(td); + if (!io_u) + break; + if (get_next_verify(td, io_u)) { put_io_u(td, io_u); break; @@ -464,7 +472,10 @@ static void do_verify(struct thread_data *td) break; } - io_u->end_io = verify_io_u; + if (td->o.verify_async) + io_u->end_io = verify_io_u_async; + else + io_u->end_io = verify_io_u; ret = td_io_queue(td, io_u); switch (ret) { @@ -525,7 +536,8 @@ sync_done: */ full = queue_full(td) || ret == FIO_Q_BUSY; if (full || !td->o.iodepth_batch_complete) { - min_events = td->o.iodepth_batch_complete; + min_events = min(td->o.iodepth_batch_complete, + td->cur_depth); if (full && !min_events) min_events = 1; @@ -570,7 +582,8 @@ static void do_io(struct thread_data *td) else td_set_runstate(td, TD_RUNNING); - while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) { + while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || + ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) { struct timeval comp_time; unsigned long bytes_done[2] = { 0, 0 }; int min_evts = 0; @@ -580,24 +593,27 @@ static void do_io(struct thread_data *td) if (td->terminate) break; - io_u = get_io_u(td); - if (!io_u) - break; - update_tv_cache(td); if (runtime_exceeded(td, &td->tv_cache)) { - put_io_u(td, io_u); td->terminate = 1; break; } + io_u = get_io_u(td); + if (!io_u) + break; + /* * Add verification end_io handler, if asked to verify * a previously written file. */ - if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ) { - io_u->end_io = verify_io_u; + if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && + !td_rw(td)) { + if (td->o.verify_async) + io_u->end_io = verify_io_u_async; + else + io_u->end_io = verify_io_u; td_set_runstate(td, TD_VERIFYING); } else if (in_ramp_time(td)) td_set_runstate(td, TD_RAMP); @@ -673,7 +689,8 @@ sync_done: */ full = queue_full(td) || ret == FIO_Q_BUSY; if (full || !td->o.iodepth_batch_complete) { - min_evts = td->o.iodepth_batch_complete; + min_evts = min(td->o.iodepth_batch_complete, + td->cur_depth); if (full && !min_evts) min_evts = 1; @@ -792,8 +809,8 @@ static int init_io_u(struct thread_data *td) if (allocate_io_mem(td)) return 1; - if (td->o.odirect) - p = ALIGN(td->orig_buffer); + if (td->o.odirect || td->o.mem_align) + p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align; else p = td->orig_buffer; @@ -813,9 +830,11 @@ static int init_io_u(struct thread_data *td) io_u = ptr; memset(io_u, 0, sizeof(*io_u)); INIT_FLIST_HEAD(&io_u->list); + dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); if (!(td->io_ops->flags & FIO_NOIO)) { io_u->buf = p + max_bs * i; + dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf); if (td_write(td) && !td->o.refill_buffers) io_u_fill_buffer(td, io_u, max_bs); @@ -983,6 +1002,7 @@ static void *thread_main(void *data) { unsigned long long runtime[2], elapsed; struct thread_data *td = data; + pthread_condattr_t attr; int clear_state; if (!td->o.use_thread) @@ -997,8 +1017,14 @@ static void *thread_main(void *data) INIT_FLIST_HEAD(&td->io_u_requeues); INIT_FLIST_HEAD(&td->io_log_list); INIT_FLIST_HEAD(&td->io_hist_list); + INIT_FLIST_HEAD(&td->verify_list); + pthread_mutex_init(&td->io_u_lock, NULL); td->io_hist_tree = RB_ROOT; + pthread_condattr_init(&attr); + pthread_cond_init(&td->verify_cond, &attr); + pthread_cond_init(&td->free_cond, &attr); + td_set_runstate(td, TD_INITIALIZED); dprint(FD_MUTEX, "up startup_mutex\n"); fio_mutex_up(startup_mutex); @@ -1022,7 +1048,10 @@ static void *thread_main(void *data) if (init_io_u(td)) goto err; - if (td->o.cpumask_set && fio_setaffinity(td) == -1) { + if (td->o.verify_async && verify_async_init(td)) + goto err; + + if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) { td_verror(td, errno, "cpu_set_affinity"); goto err; } @@ -1033,7 +1062,7 @@ static void *thread_main(void *data) */ if (td->o.gtod_cpu) { fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu); - if (fio_setaffinity(td) == -1) { + if (fio_setaffinity(td->pid, td->o.cpumask) == -1) { td_verror(td, errno, "cpu_set_affinity"); goto err; } @@ -1080,7 +1109,10 @@ static void *thread_main(void *data) clear_state = 0; while (keep_running(td)) { fio_gettime(&td->start, NULL); - memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start)); + memcpy(&td->ts.stat_sample_time[0], &td->start, + sizeof(td->start)); + memcpy(&td->ts.stat_sample_time[1], &td->start, + sizeof(td->start)); memcpy(&td->tv_cache, &td->start, sizeof(td->start)); if (td->o.ratemin[0] || td->o.ratemin[1]) @@ -1165,6 +1197,10 @@ err: if (td->error) printf("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error, td->verror); + + if (td->o.verify_async) + verify_async_exit(td); + close_and_free_files(td); close_ioengine(td); cleanup_io_u(td); @@ -1491,7 +1527,13 @@ static void run_threads(void) *fio_debug_jobp = pid; } dprint(FD_MUTEX, "wait on startup_mutex\n"); - fio_mutex_down(startup_mutex); + if (fio_mutex_down_timeout(startup_mutex, 10)) { + log_err("fio: job startup hung? exiting.\n"); + terminate_threads(TERMINATE_ALL); + fio_abort = 1; + nr_started--; + break; + } dprint(FD_MUTEX, "done waiting on startup_mutex\n"); } @@ -1581,12 +1623,6 @@ int main(int argc, char *argv[]) if (!getenv("LC_NUMERIC")) setlocale(LC_NUMERIC, "en_US"); - if (parse_options(argc, argv)) - return 1; - - if (!thread_number) - return 0; - ps = sysconf(_SC_PAGESIZE); if (ps < 0) { log_err("Failed to get page size\n"); @@ -1596,6 +1632,14 @@ int main(int argc, char *argv[]) page_size = ps; page_mask = ps - 1; + fio_keywords_init(); + + if (parse_options(argc, argv)) + return 1; + + if (!thread_number) + return 0; + if (write_bw_log) { setup_log(&agg_io_log[DDIR_READ]); setup_log(&agg_io_log[DDIR_WRITE]);