X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=backend.c;h=d67a4a07c57ca3582ac40ec461b9b3332a242142;hb=2091760e59615146d7cce41afc8d38e6d74eda97;hp=e5bb4e259034b29af86fbaed018ae64c7bb288db;hpb=74ee19043ebb12dd6b0aa243f8cdb7ccd63af857;p=fio.git diff --git a/backend.c b/backend.c index e5bb4e25..d67a4a07 100644 --- a/backend.c +++ b/backend.c @@ -90,6 +90,22 @@ static void sig_int(int sig) } } +#ifdef WIN32 +static void sig_break(int sig) +{ + sig_int(sig); + + /** + * Windows terminates all job processes on SIGBREAK after the handler + * returns, so give them time to wrap-up and give stats + */ + for_each_td(td) { + while (td->runstate < TD_EXITED) + sleep(1); + } end_for_each(); +} +#endif + void sig_show_status(int sig) { show_running_run_stats(); @@ -112,7 +128,7 @@ static void set_sig_handlers(void) /* Windows uses SIGBREAK as a quit signal from other applications */ #ifdef WIN32 memset(&act, 0, sizeof(act)); - act.sa_handler = sig_int; + act.sa_handler = sig_break; act.sa_flags = SA_RESTART; sigaction(SIGBREAK, &act, NULL); #endif @@ -618,15 +634,6 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes) if (td->error) return; - /* - * verify_state needs to be reset before verification - * proceeds so that expected random seeds match actual - * random seeds in headers. The main loop will reset - * all random number generators if randrepeat is set. - */ - if (!td->o.rand_repeatable) - td_fill_verify_state_seed(td); - td_set_runstate(td, TD_VERIFYING); io_u = NULL; @@ -663,7 +670,7 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes) break; } } else { - if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes) + if (td->bytes_verified + td->o.rw_min_bs > verify_bytes) break; while ((io_u = get_io_u(td)) != NULL) { @@ -692,6 +699,8 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes) break; } else if (io_u->ddir == DDIR_WRITE) { io_u->ddir = DDIR_READ; + io_u->numberio = td->verify_read_issues; + td->verify_read_issues++; populate_verify_io_u(td, io_u); break; } else { @@ -845,6 +854,7 @@ static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir, struct timespec *time) { unsigned long long b; + unsigned long long runtime_left; uint64_t total; int left; struct timespec now; @@ -853,7 +863,7 @@ static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir, if (td->o.thinktime_iotime) { fio_gettime(&now, NULL); if (utime_since(&td->last_thinktime, &now) - >= td->o.thinktime_iotime + td->o.thinktime) { + >= td->o.thinktime_iotime) { stall = true; } else if (!fio_option_is_set(&td->o, thinktime_blocks)) { /* @@ -876,11 +886,24 @@ static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir, io_u_quiesce(td); + left = td->o.thinktime_spin; + if (td->o.timeout) { + runtime_left = td->o.timeout - utime_since_now(&td->epoch); + if (runtime_left < (unsigned long long)left) + left = runtime_left; + } + total = 0; - if (td->o.thinktime_spin) - total = usec_spin(td->o.thinktime_spin); + if (left) + total = usec_spin(left); left = td->o.thinktime - total; + if (td->o.timeout) { + runtime_left = td->o.timeout - utime_since_now(&td->epoch); + if (runtime_left < (unsigned long long)left) + left = runtime_left; + } + if (left) total += usec_sleep(td, left); @@ -909,8 +932,10 @@ static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir, fio_gettime(time, NULL); td->last_thinktime_blocks = b; - if (td->o.thinktime_iotime) + if (td->o.thinktime_iotime) { + fio_gettime(&now, NULL); td->last_thinktime = now; + } } /* @@ -952,9 +977,11 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) total_bytes += td->o.size; /* In trimwrite mode, each byte is trimmed and then written, so - * allow total_bytes to be twice as big */ - if (td_trimwrite(td)) + * allow total_bytes or number of ios to be twice as big */ + if (td_trimwrite(td)) { total_bytes += td->total_io_size; + td->o.number_ios *= 2; + } while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) || @@ -1009,8 +1036,13 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) break; } - if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY) - populate_verify_io_u(td, io_u); + if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY) { + if (!(io_u->flags & IO_U_F_PATTERN_DONE)) { + io_u_set(td, io_u, IO_U_F_PATTERN_DONE); + io_u->numberio = td->io_issues[io_u->ddir]; + populate_verify_io_u(td, io_u); + } + } ddir = io_u->ddir; @@ -1276,7 +1308,8 @@ static int init_io_u(struct thread_data *td) } } - init_io_u_buffers(td); + if (init_io_u_buffers(td)) + return 1; if (init_file_completion_logging(td, max_units)) return 1; @@ -1307,7 +1340,7 @@ int init_io_u_buffers(struct thread_data *td) * overflow later. this adjustment may be too much if we get * lucky and the allocator gives us an aligned address. */ - if (td->o.odirect || td->o.mem_align || td->o.oatomic || + if (td->o.odirect || td->o.mem_align || td_ioengine_flagged(td, FIO_RAWIO)) td->orig_buffer_size += page_mask + td->o.mem_align; @@ -1326,7 +1359,7 @@ int init_io_u_buffers(struct thread_data *td) if (data_xfer && allocate_io_mem(td)) return 1; - if (td->o.odirect || td->o.mem_align || td->o.oatomic || + if (td->o.odirect || td->o.mem_align || td_ioengine_flagged(td, FIO_RAWIO)) p = PTR_ALIGN(td->orig_buffer, page_mask) + td->o.mem_align; else @@ -1600,7 +1633,7 @@ static void *thread_main(void *data) uint64_t bytes_done[DDIR_RWDIR_CNT]; int deadlock_loop_cnt; bool clear_state; - int res, ret; + int ret; sk_out_assign(sk_out); free(fd); @@ -1770,6 +1803,11 @@ static void *thread_main(void *data) if (td_io_init(td)) goto err; + if (td_ioengine_flagged(td, FIO_SYNCIO) && td->o.iodepth > 1 && td->o.io_submit_mode != IO_MODE_OFFLOAD) { + log_info("note: both iodepth >= 1 and synchronous I/O engine " + "are selected, queue depth will be capped at 1\n"); + } + if (init_io_u(td)) goto err; @@ -1847,8 +1885,12 @@ static void *thread_main(void *data) if (td->o.verify_only && td_write(td)) verify_bytes = do_dry_run(td); else { + if (!td->o.rand_repeatable) + /* save verify rand state to replay hdr seeds later at verify */ + frand_copy(&td->verify_state_last_do_io, &td->verify_state); do_io(td, bytes_done); - + if (!td->o.rand_repeatable) + frand_copy(&td->verify_state, &td->verify_state_last_do_io); if (!ddir_rw_sum(bytes_done)) { fio_mark_td_terminate(td); verify_bytes = 0; @@ -1888,7 +1930,8 @@ static void *thread_main(void *data) } } while (1); - if (td_read(td) && td->io_bytes[DDIR_READ]) + if (td->io_bytes[DDIR_READ] && (td_read(td) || + ((td->flags & TD_F_VER_BACKLOG) && td_write(td)))) update_runtime(td, elapsed_us, DDIR_READ); if (td_write(td) && td->io_bytes[DDIR_WRITE]) update_runtime(td, elapsed_us, DDIR_WRITE); @@ -1931,13 +1974,23 @@ static void *thread_main(void *data) * another thread is checking its io_u's for overlap */ if (td_offload_overlap(td)) { - int res = pthread_mutex_lock(&overlap_check); - assert(res == 0); + int res; + + res = pthread_mutex_lock(&overlap_check); + if (res) { + td->error = errno; + goto err; + } } td_set_runstate(td, TD_FINISHING); if (td_offload_overlap(td)) { + int res; + res = pthread_mutex_unlock(&overlap_check); - assert(res == 0); + if (res) { + td->error = errno; + goto err; + } } update_rusage_stat(td); @@ -2010,15 +2063,14 @@ err: static void reap_threads(unsigned int *nr_running, uint64_t *t_rate, uint64_t *m_rate) { - struct thread_data *td; unsigned int cputhreads, realthreads, pending; - int i, status, ret; + int status, ret; /* * reap exited threads (TD_EXITED -> TD_REAPED) */ realthreads = pending = cputhreads = 0; - for_each_td(td, i) { + for_each_td(td) { int flags = 0; if (!strcmp(td->o.ioengine, "cpuio")) @@ -2111,7 +2163,7 @@ reaped: done_secs += mtime_since_now(&td->epoch) / 1000; profile_td_exit(td); flow_exit_job(td); - } + } end_for_each(); if (*nr_running == cputhreads && !pending && realthreads) fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL); @@ -2238,13 +2290,11 @@ static bool waitee_running(struct thread_data *me) { const char *waitee = me->o.wait_for; const char *self = me->o.name; - struct thread_data *td; - int i; if (!waitee) return false; - for_each_td(td, i) { + for_each_td(td) { if (!strcmp(td->o.name, self) || strcmp(td->o.name, waitee)) continue; @@ -2254,7 +2304,7 @@ static bool waitee_running(struct thread_data *me) runstate_to_name(td->runstate)); return true; } - } + } end_for_each(); dprint(FD_PROCESS, "%s: %s completed, can run\n", self, waitee); return false; @@ -2278,14 +2328,14 @@ static void run_threads(struct sk_out *sk_out) set_sig_handlers(); nr_thread = nr_process = 0; - for_each_td(td, i) { + for_each_td(td) { if (check_mount_writes(td)) return; if (td->o.use_thread) nr_thread++; else nr_process++; - } + } end_for_each(); if (output_format & FIO_OUTPUT_NORMAL) { struct buf_output out; @@ -2311,7 +2361,7 @@ static void run_threads(struct sk_out *sk_out) nr_started = 0; m_rate = t_rate = 0; - for_each_td(td, i) { + for_each_td(td) { print_status_init(td->thread_number - 1); if (!td->o.create_serialize) @@ -2347,7 +2397,7 @@ reap: td_io_close_file(td, f); } } - } + } end_for_each(); /* start idle threads before io threads start to run */ fio_idle_prof_start(); @@ -2363,7 +2413,7 @@ reap: /* * create threads (TD_NOT_CREATED -> TD_CREATED) */ - for_each_td(td, i) { + for_each_td(td) { if (td->runstate != TD_NOT_CREATED) continue; @@ -2432,10 +2482,8 @@ reap: strerror(ret)); } else { pid_t pid; - struct fio_file **files; void *eo; dprint(FD_PROCESS, "will fork\n"); - files = td->files; eo = td->eo; read_barrier(); pid = fork(); @@ -2444,11 +2492,8 @@ reap: ret = (int)(uintptr_t)thread_main(fd); _exit(ret); - } else if (i == fio_debug_jobno) + } else if (__td_index == fio_debug_jobno) *fio_debug_jobp = pid; - // freeing previously allocated memory for files - // this memory freed MUST NOT be shared between processes, only the pointer itself may be shared within TD - free(files); free(eo); free(fd); fd = NULL; @@ -2463,7 +2508,7 @@ reap: break; } dprint(FD_MUTEX, "done waiting on startup_sem\n"); - } + } end_for_each(); /* * Wait for the started threads to transition to @@ -2508,7 +2553,7 @@ reap: /* * start created threads (TD_INITIALIZED -> TD_RUNNING). */ - for_each_td(td, i) { + for_each_td(td) { if (td->runstate != TD_INITIALIZED) continue; @@ -2522,7 +2567,7 @@ reap: t_rate += ddir_rw_sum(td->o.rate); todo--; fio_sem_up(td->sem); - } + } end_for_each(); reap_threads(&nr_running, &t_rate, &m_rate); @@ -2548,9 +2593,7 @@ static void free_disk_util(void) int fio_backend(struct sk_out *sk_out) { - struct thread_data *td; int i; - if (exec_profile) { if (load_profile(exec_profile)) return 1; @@ -2606,7 +2649,7 @@ int fio_backend(struct sk_out *sk_out) } } - for_each_td(td, i) { + for_each_td(td) { struct thread_stat *ts = &td->ts; free_clat_prio_stats(ts); @@ -2619,7 +2662,7 @@ int fio_backend(struct sk_out *sk_out) } fio_sem_remove(td->sem); td->sem = NULL; - } + } end_for_each(); free_disk_util(); if (cgroup_list) {