X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=backend.c;h=0075a733ffca7b2c5fd727f53d56d95e69592c17;hp=d82d49487c84991b8105508cd419cd557b58182b;hb=63a4b9cca4ba3aa4101051402cbbe946ced17a49;hpb=971caeb177d3bc4f65fa31381bbfb83710bfc690 diff --git a/backend.c b/backend.c index d82d4948..0075a733 100644 --- a/backend.c +++ b/backend.c @@ -22,29 +22,18 @@ * */ #include -#include #include -#include #include -#include -#include #include -#include #include #include #include -#include -#include #include +#include #include "fio.h" -#ifndef FIO_NO_HAVE_SHM_H -#include -#endif -#include "hash.h" #include "smalloc.h" #include "verify.h" -#include "trim.h" #include "diskutil.h" #include "cgroup.h" #include "profile.h" @@ -59,12 +48,13 @@ #include "rate-submit.h" #include "helper_thread.h" #include "pshared.h" +#include "zone-dist.h" static struct fio_sem *startup_sem; static struct flist_head *cgroup_list; -static char *cgroup_mnt; +static struct cgroup_mnt *cgroup_mnt; static int exit_value; -static volatile int fio_abort; +static volatile bool fio_abort; static unsigned int nr_process = 0; static unsigned int nr_thread = 0; @@ -76,6 +66,7 @@ unsigned int stat_number = 0; int shm_id = 0; int temp_stall_ts; unsigned long done_secs = 0; +pthread_mutex_t overlap_check = PTHREAD_MUTEX_INITIALIZER; #define JOB_START_TIMEOUT (5 * 1000) @@ -90,7 +81,7 @@ static void sig_int(int sig) exit_value = 128; } - fio_terminate_threads(TERMINATE_ALL); + fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL); } } @@ -143,8 +134,8 @@ static bool __check_min_rate(struct thread_data *td, struct timespec *now, unsigned long long bytes = 0; unsigned long iops = 0; unsigned long spent; - unsigned long rate; - unsigned int ratemin = 0; + unsigned long long rate; + unsigned long long ratemin = 0; unsigned int rate_iops = 0; unsigned int rate_iops_min = 0; @@ -178,7 +169,7 @@ static bool __check_min_rate(struct thread_data *td, struct timespec *now, * check bandwidth specified rate */ if (bytes < td->rate_bytes[ddir]) { - log_err("%s: rate_min=%uB/s not met, only transferred %lluB\n", + log_err("%s: rate_min=%lluB/s not met, only transferred %lluB\n", td->o.name, ratemin, bytes); return true; } else { @@ -189,7 +180,7 @@ static bool __check_min_rate(struct thread_data *td, struct timespec *now, if (rate < ratemin || bytes < td->rate_bytes[ddir]) { - log_err("%s: rate_min=%uB/s not met, got %luB/s\n", + log_err("%s: rate_min=%lluB/s not met, got %lluB/s\n", td->o.name, ratemin, rate); return true; } @@ -210,7 +201,7 @@ static bool __check_min_rate(struct thread_data *td, struct timespec *now, if (rate < rate_iops_min || iops < td->rate_blocks[ddir]) { - log_err("%s: rate_iops_min=%u not met, got %lu IOPS\n", + log_err("%s: rate_iops_min=%u not met, got %llu IOPS\n", td->o.name, rate_iops_min, rate); return true; } @@ -250,8 +241,6 @@ static void cleanup_pending_aio(struct thread_data *td) * get immediately available events, if any */ r = io_u_queued_complete(td, 0); - if (r < 0) - return; /* * now cancel remaining active events @@ -280,13 +269,14 @@ static void cleanup_pending_aio(struct thread_data *td) static bool fio_io_sync(struct thread_data *td, struct fio_file *f) { struct io_u *io_u = __get_io_u(td); - int ret; + enum fio_q_status ret; if (!io_u) return true; io_u->ddir = DDIR_SYNC; io_u->file = f; + io_u_set(td, io_u, IO_U_F_NO_FILE_PUT); if (td_io_prep(td, io_u)) { put_io_u(td, io_u); @@ -295,16 +285,13 @@ static bool fio_io_sync(struct thread_data *td, struct fio_file *f) requeue: ret = td_io_queue(td, io_u); - if (ret < 0) { - td_verror(td, io_u->error, "td_io_queue"); - put_io_u(td, io_u); - return true; - } else if (ret == FIO_Q_QUEUED) { - if (td_io_commit(td)) - return true; + switch (ret) { + case FIO_Q_QUEUED: + td_io_commit(td); if (io_u_queued_complete(td, 1) < 0) return true; - } else if (ret == FIO_Q_COMPLETED) { + break; + case FIO_Q_COMPLETED: if (io_u->error) { td_verror(td, io_u->error, "td_io_queue"); return true; @@ -312,9 +299,9 @@ requeue: if (io_u_sync_complete(td, io_u) < 0) return true; - } else if (ret == FIO_Q_BUSY) { - if (td_io_commit(td)) - return true; + break; + case FIO_Q_BUSY: + td_io_commit(td); goto requeue; } @@ -323,7 +310,7 @@ requeue: static int fio_file_fsync(struct thread_data *td, struct fio_file *f) { - int ret; + int ret, ret2; if (fio_file_open(f)) return fio_io_sync(td, f); @@ -332,8 +319,10 @@ static int fio_file_fsync(struct thread_data *td, struct fio_file *f) return 1; ret = fio_io_sync(td, f); - td_io_close_file(td, f); - return ret; + ret2 = 0; + if (fio_file_open(f)) + ret2 = td_io_close_file(td, f); + return (ret || ret2); } static inline void __update_ts_cache(struct thread_data *td) @@ -447,9 +436,7 @@ static int wait_for_completions(struct thread_data *td, struct timespec *time) if ((full && !min_evts) || !td->o.iodepth_batch_complete_min) min_evts = 1; - if (time && (__should_check_rate(td, DDIR_READ) || - __should_check_rate(td, DDIR_WRITE) || - __should_check_rate(td, DDIR_TRIM))) + if (time && __should_check_rate(td)) fio_gettime(time, NULL); do { @@ -465,22 +452,20 @@ int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret, enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify, struct timespec *comp_time) { - int ret2; - switch (*ret) { case FIO_Q_COMPLETED: if (io_u->error) { *ret = -io_u->error; clear_io_u(td, io_u); } else if (io_u->resid) { - int bytes = io_u->xfer_buflen - io_u->resid; + long long bytes = io_u->xfer_buflen - io_u->resid; struct fio_file *f = io_u->file; if (bytes_issued) *bytes_issued += bytes; if (!from_verify) - trim_io_piece(td, io_u); + trim_io_piece(io_u); /* * zero read, fail @@ -506,9 +491,7 @@ int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret, requeue_io_u(td, &io_u); } else { sync_done: - if (comp_time && (__should_check_rate(td, DDIR_READ) || - __should_check_rate(td, DDIR_WRITE) || - __should_check_rate(td, DDIR_TRIM))) + if (comp_time && __should_check_rate(td)) fio_gettime(comp_time, NULL); *ret = io_u_sync_complete(td, io_u); @@ -542,9 +525,7 @@ sync_done: if (!from_verify) unlog_io_piece(td, io_u); requeue_io_u(td, &io_u); - ret2 = td_io_commit(td); - if (ret2 < 0) - *ret = ret2; + td_io_commit(td); break; default: assert(*ret < 0); @@ -589,7 +570,7 @@ static int unlink_all_files(struct thread_data *td) /* * Check if io_u will overlap an in-flight IO in the queue */ -static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u) +bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u) { bool overlap; struct io_u *check_io_u; @@ -606,7 +587,7 @@ static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u) if (x1 < y2 && y1 < x2) { overlap = true; - dprint(FD_IO, "in-flight overlap: %llu/%lu, %llu/%lu\n", + dprint(FD_IO, "in-flight overlap: %llu/%llu, %llu/%llu\n", x1, io_u->buflen, y1, check_io_u->buflen); break; @@ -617,7 +598,7 @@ static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u) return overlap; } -static int io_u_submit(struct thread_data *td, struct io_u *io_u) +static enum fio_q_status io_u_submit(struct thread_data *td, struct io_u *io_u) { /* * Check for overlap if the user asked us to, and we have @@ -735,6 +716,7 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes) break; } else if (io_u->ddir == DDIR_WRITE) { io_u->ddir = DDIR_READ; + populate_verify_io_u(td, io_u); break; } else { put_io_u(td, io_u); @@ -910,6 +892,8 @@ static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir) over = (usperop - total) / usperop * -bs; td->rate_io_issue_bytes[ddir] += (missed - over); + /* adjust for rate_process=poisson */ + td->last_usec[ddir] += total; } } @@ -986,8 +970,10 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) * Break if we exceeded the bytes. The exception is time * based runs, but we still need to break out of the loop * for those to run verification, if enabled. + * Jobs read from iolog do not use this stop condition. */ if (bytes_issued >= total_bytes && + !td->o.read_iolog_file && (!td->o.time_based || (td->o.time_based && td->o.verify != VERIFY_NONE))) break; @@ -1007,6 +993,9 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) break; } + if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY) + populate_verify_io_u(td, io_u); + ddir = io_u->ddir; /* @@ -1017,12 +1006,6 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { - if (!td->o.verify_pattern_bytes) { - io_u->rand_seed = __rand(&td->verify_state); - if (sizeof(int) != sizeof(long *)) - io_u->rand_seed *= __rand(&td->verify_state); - } - if (verify_state_should_stop(td, io_u)) { put_io_u(td, io_u); break; @@ -1050,8 +1033,8 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) log_io_piece(td, io_u); if (td->o.io_submit_mode == IO_MODE_OFFLOAD) { - const unsigned long blen = io_u->xfer_buflen; - const enum fio_ddir ddir = acct_ddir(io_u); + const unsigned long long blen = io_u->xfer_buflen; + const enum fio_ddir __ddir = acct_ddir(io_u); if (td->error) break; @@ -1059,14 +1042,14 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) workqueue_enqueue(&td->io_wq, &io_u->work); ret = FIO_Q_QUEUED; - if (ddir_rw(ddir)) { - td->io_issues[ddir]++; - td->io_issue_bytes[ddir] += blen; - td->rate_io_issue_bytes[ddir] += blen; + if (ddir_rw(__ddir)) { + td->io_issues[__ddir]++; + td->io_issue_bytes[__ddir] += blen; + td->rate_io_issue_bytes[__ddir] += blen; } if (should_check_rate(td)) - td->rate_next_io_time[ddir] = usec_for_io(td, ddir); + td->rate_next_io_time[__ddir] = usec_for_io(td, __ddir); } else { ret = io_u_submit(td, io_u); @@ -1097,7 +1080,7 @@ reap: if (!in_ramp_time(td) && should_check_rate(td)) { if (check_min_rate(td, &comp_time)) { if (exitall_on_terminate || td->o.exitall_error) - fio_terminate_threads(td->groupid); + fio_terminate_threads(td->groupid, td->o.exit_what); td_verror(td, EIO, "check_min_rate"); break; } @@ -1133,7 +1116,7 @@ reap: td->error = 0; } - if (should_fsync(td) && td->o.end_fsync) { + if (should_fsync(td) && (td->o.end_fsync || td->o.fsync_on_close)) { td_set_runstate(td, TD_FSYNCING); for_each_file(td, f, i) { @@ -1201,14 +1184,14 @@ static void cleanup_io_u(struct thread_data *td) if (td->io_ops->io_u_free) td->io_ops->io_u_free(td, io_u); - fio_memfree(io_u, sizeof(*io_u)); + fio_memfree(io_u, sizeof(*io_u), td_offload_overlap(td)); } free_io_mem(td); io_u_rexit(&td->io_u_requeues); - io_u_qexit(&td->io_u_freelist); - io_u_qexit(&td->io_u_all); + io_u_qexit(&td->io_u_freelist, false); + io_u_qexit(&td->io_u_all, td_offload_overlap(td)); free_file_completion_logging(td); } @@ -1216,30 +1199,85 @@ static void cleanup_io_u(struct thread_data *td) static int init_io_u(struct thread_data *td) { struct io_u *io_u; - unsigned int max_bs, min_write; int cl_align, i, max_units; - int data_xfer = 1, err; - char *p; + int err; max_units = td->o.iodepth; - max_bs = td_max_bs(td); - min_write = td->o.min_bs[DDIR_WRITE]; - td->orig_buffer_size = (unsigned long long) max_bs - * (unsigned long long) max_units; - - if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td))) - data_xfer = 0; err = 0; err += !io_u_rinit(&td->io_u_requeues, td->o.iodepth); - err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth); - err += !io_u_qinit(&td->io_u_all, td->o.iodepth); + err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth, false); + err += !io_u_qinit(&td->io_u_all, td->o.iodepth, td_offload_overlap(td)); if (err) { log_err("fio: failed setting up IO queues\n"); return 1; } + cl_align = os_cache_line_size(); + + for (i = 0; i < max_units; i++) { + void *ptr; + + if (td->terminate) + return 1; + + ptr = fio_memalign(cl_align, sizeof(*io_u), td_offload_overlap(td)); + if (!ptr) { + log_err("fio: unable to allocate aligned memory\n"); + return 1; + } + + io_u = ptr; + memset(io_u, 0, sizeof(*io_u)); + INIT_FLIST_HEAD(&io_u->verify_list); + dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); + + io_u->index = i; + io_u->flags = IO_U_F_FREE; + io_u_qpush(&td->io_u_freelist, io_u); + + /* + * io_u never leaves this stack, used for iteration of all + * io_u buffers. + */ + io_u_qpush(&td->io_u_all, io_u); + + if (td->io_ops->io_u_init) { + int ret = td->io_ops->io_u_init(td, io_u); + + if (ret) { + log_err("fio: failed to init engine data: %d\n", ret); + return 1; + } + } + } + + init_io_u_buffers(td); + + if (init_file_completion_logging(td, max_units)) + return 1; + + return 0; +} + +int init_io_u_buffers(struct thread_data *td) +{ + struct io_u *io_u; + unsigned long long max_bs, min_write; + int i, max_units; + int data_xfer = 1; + char *p; + + max_units = td->o.iodepth; + max_bs = td_max_bs(td); + min_write = td->o.min_bs[DDIR_WRITE]; + td->orig_buffer_size = (unsigned long long) max_bs + * (unsigned long long) max_units; + + if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td))) + data_xfer = 0; + /* * if we may later need to do address alignment, then add any * possible adjustment here so that we don't cause a buffer @@ -1251,7 +1289,7 @@ static int init_io_u(struct thread_data *td) td->orig_buffer_size += page_mask + td->o.mem_align; if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { - unsigned long bs; + unsigned long long bs; bs = td->orig_buffer_size + td->o.hugepage_size - 1; td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); @@ -1271,23 +1309,8 @@ static int init_io_u(struct thread_data *td) else p = td->orig_buffer; - cl_align = os_cache_line_size(); - for (i = 0; i < max_units; i++) { - void *ptr; - - if (td->terminate) - return 1; - - ptr = fio_memalign(cl_align, sizeof(*io_u)); - if (!ptr) { - log_err("fio: unable to allocate aligned memory\n"); - break; - } - - io_u = ptr; - memset(io_u, 0, sizeof(*io_u)); - INIT_FLIST_HEAD(&io_u->verify_list); + io_u = td->io_u_all.io_us[i]; dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); if (data_xfer) { @@ -1304,32 +1327,9 @@ static int init_io_u(struct thread_data *td) fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0); } } - - io_u->index = i; - io_u->flags = IO_U_F_FREE; - io_u_qpush(&td->io_u_freelist, io_u); - - /* - * io_u never leaves this stack, used for iteration of all - * io_u buffers. - */ - io_u_qpush(&td->io_u_all, io_u); - - if (td->io_ops->io_u_init) { - int ret = td->io_ops->io_u_init(td, io_u); - - if (ret) { - log_err("fio: failed to init engine data: %d\n", ret); - return 1; - } - } - p += max_bs; } - if (init_file_completion_logging(td, max_units)) - return 1; - return 0; } @@ -1340,7 +1340,7 @@ static int init_io_u(struct thread_data *td) static int switch_ioscheduler(struct thread_data *td) { #ifdef FIO_HAVE_IOSCHED_SWITCH - char tmp[256], tmp2[128]; + char tmp[256], tmp2[128], *p; FILE *f; int ret; @@ -1376,17 +1376,19 @@ static int switch_ioscheduler(struct thread_data *td) /* * Read back and check that the selected scheduler is now the default. */ - memset(tmp, 0, sizeof(tmp)); - ret = fread(tmp, sizeof(tmp), 1, f); + ret = fread(tmp, 1, sizeof(tmp) - 1, f); if (ferror(f) || ret < 0) { td_verror(td, errno, "fread"); fclose(f); return 1; } + tmp[ret] = '\0'; /* - * either a list of io schedulers or "none\n" is expected. + * either a list of io schedulers or "none\n" is expected. Strip the + * trailing newline. */ - tmp[strlen(tmp) - 1] = '\0'; + p = tmp; + strsep(&p, "\n"); /* * Write to "none" entry doesn't fail, so check the result here. @@ -1456,12 +1458,12 @@ static bool keep_running(struct thread_data *td) static int exec_string(struct thread_options *o, const char *string, const char *mode) { - size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1; + size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 13 + 1; int ret; char *str; str = malloc(newlen); - sprintf(str, "%s &> %s.%s.txt", string, o->name, mode); + sprintf(str, "%s > %s.%s.txt 2>&1", string, o->name, mode); log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode); ret = system(str); @@ -1532,7 +1534,7 @@ static void *thread_main(void *data) struct sk_out *sk_out = fd->sk_out; uint64_t bytes_done[DDIR_RWDIR_CNT]; int deadlock_loop_cnt; - bool clear_state, did_some_io; + bool clear_state; int ret; sk_out_assign(sk_out); @@ -1544,7 +1546,7 @@ static void *thread_main(void *data) } else td->pid = gettid(); - fio_local_clock_init(o->use_thread); + fio_local_clock_init(); dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); @@ -1555,7 +1557,6 @@ static void *thread_main(void *data) INIT_FLIST_HEAD(&td->io_hist_list); INIT_FLIST_HEAD(&td->verify_list); INIT_FLIST_HEAD(&td->trim_list); - INIT_FLIST_HEAD(&td->next_rand_list); td->io_hist_tree = RB_ROOT; ret = mutex_cond_init_pshared(&td->io_u_lock, &td->free_cond); @@ -1589,6 +1590,8 @@ static void *thread_main(void *data) goto err; } + td_zone_gen_index(td); + /* * Do this early, we don't want the compress threads to be limited * to the same CPUs as the IO workers. So do this before we set @@ -1684,12 +1687,18 @@ static void *thread_main(void *data) * May alter parameters that init_io_u() will use, so we need to * do this first. */ - if (init_iolog(td)) + if (!init_iolog(td)) + goto err; + + if (td_io_init(td)) goto err; if (init_io_u(td)) goto err; + if (td->io_ops->post_init && td->io_ops->post_init(td)) + goto err; + if (o->verify_async && verify_async_init(td)) goto err; @@ -1717,9 +1726,6 @@ static void *thread_main(void *data) if (!o->create_serialize && setup_files(td)) goto err; - if (td_io_init(td)) - goto err; - if (!init_random_map(td)) goto err; @@ -1752,7 +1758,6 @@ static void *thread_main(void *data) memset(bytes_done, 0, sizeof(bytes_done)); clear_state = false; - did_some_io = false; while (keep_running(td)) { uint64_t verify_bytes; @@ -1830,9 +1835,6 @@ static void *thread_main(void *data) td_ioengine_flagged(td, FIO_UNIDIR)) continue; - if (ddir_rw_sum(bytes_done)) - did_some_io = true; - clear_io_state(td, 0); fio_gettime(&td->start, NULL); @@ -1854,19 +1856,15 @@ static void *thread_main(void *data) } /* - * If td ended up with no I/O when it should have had, - * then something went wrong unless FIO_NOIO or FIO_DISKLESSIO. - * (Are we not missing other flags that can be ignored ?) + * Acquire this lock if we were doing overlap checking in + * offload mode so that we don't clean up this job while + * another thread is checking its io_u's for overlap */ - if ((td->o.size || td->o.io_size) && !ddir_rw_sum(bytes_done) && - !did_some_io && !td->o.create_only && - !(td_ioengine_flagged(td, FIO_NOIO) || - td_ioengine_flagged(td, FIO_DISKLESSIO))) - log_err("%s: No I/O performed by %s, " - "perhaps try --debug=io option for details?\n", - td->o.name, td->io_ops->name); - + if (td_offload_overlap(td)) + pthread_mutex_lock(&overlap_check); td_set_runstate(td, TD_FINISHING); + if (td_offload_overlap(td)) + pthread_mutex_unlock(&overlap_check); update_rusage_stat(td); td->ts.total_run_time = mtime_since_now(&td->epoch); @@ -1889,7 +1887,7 @@ static void *thread_main(void *data) exec_string(o, o->exec_postrun, (const char *)"postrun"); if (exitall_on_terminate || (o->exitall_error && td->error)) - fio_terminate_threads(td->groupid); + fio_terminate_threads(td->groupid, td->o.exit_what); err: if (td->error) @@ -1902,17 +1900,9 @@ err: close_and_free_files(td); cleanup_io_u(td); close_ioengine(td); - cgroup_shutdown(td, &cgroup_mnt); + cgroup_shutdown(td, cgroup_mnt); verify_free_state(td); - - if (td->zone_state_index) { - int i; - - for (i = 0; i < DDIR_RWDIR_CNT; i++) - free(td->zone_state_index[i]); - free(td->zone_state_index); - td->zone_state_index = NULL; - } + td_zone_free_index(td); if (fio_option_is_set(o, cpumask)) { ret = fio_cpuset_exit(&o->cpumask); @@ -1925,6 +1915,8 @@ err: */ if (o->write_iolog_file) write_iolog_close(td); + if (td->io_log_rfile) + fclose(td->io_log_rfile); td_set_runstate(td, TD_EXITED); @@ -2047,7 +2039,7 @@ reaped: } if (*nr_running == cputhreads && !pending && realthreads) - fio_terminate_threads(TERMINATE_ALL); + fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL); } static bool __check_trigger_file(void) @@ -2097,7 +2089,7 @@ void check_trigger_file(void) fio_clients_send_trigger(trigger_remote_cmd); else { verify_save_state(IO_LIST_ALL); - fio_terminate_threads(TERMINATE_ALL); + fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL); exec_trigger(trigger_cmd); } } @@ -2117,8 +2109,16 @@ static int fio_verify_load_state(struct thread_data *td) td->thread_number - 1, &data); if (!ret) verify_assign_state(td, data); - } else - ret = verify_load_state(td, "local"); + } else { + char prefix[PATH_MAX]; + + if (aux_path) + sprintf(prefix, "%s%clocal", aux_path, + FIO_OS_PATH_SEPARATOR); + else + strcpy(prefix, "local"); + ret = verify_load_state(td, prefix); + } return ret; } @@ -2213,18 +2213,22 @@ static void run_threads(struct sk_out *sk_out) } if (output_format & FIO_OUTPUT_NORMAL) { - log_info("Starting "); + struct buf_output out; + + buf_output_init(&out); + __log_buf(&out, "Starting "); if (nr_thread) - log_info("%d thread%s", nr_thread, + __log_buf(&out, "%d thread%s", nr_thread, nr_thread > 1 ? "s" : ""); if (nr_process) { if (nr_thread) - log_info(" and "); - log_info("%d process%s", nr_process, + __log_buf(&out, " and "); + __log_buf(&out, "%d process%s", nr_process, nr_process > 1 ? "es" : ""); } - log_info("\n"); - log_info_flush(); + __log_buf(&out, "\n"); + log_info_buf(out.buf, out.buflen); + buf_output_free(&out); } todo = thread_number; @@ -2366,8 +2370,8 @@ reap: dprint(FD_MUTEX, "wait on startup_sem\n"); if (fio_sem_down_timeout(startup_sem, 10000)) { log_err("fio: job startup hung? exiting.\n"); - fio_terminate_threads(TERMINATE_ALL); - fio_abort = 1; + fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL); + fio_abort = true; nr_started--; free(fd); break; @@ -2481,15 +2485,19 @@ int fio_backend(struct sk_out *sk_out) } startup_sem = fio_sem_init(FIO_SEM_LOCKED); + if (!sk_out) + is_local_backend = true; if (startup_sem == NULL) return 1; set_genesis_time(); stat_init(); - helper_thread_create(startup_sem, sk_out); + if (helper_thread_create(startup_sem, sk_out)) + log_err("fio: failed to create helper thread\n"); cgroup_list = smalloc(sizeof(*cgroup_list)); - INIT_FLIST_HEAD(cgroup_list); + if (cgroup_list) + INIT_FLIST_HEAD(cgroup_list); run_threads(sk_out); @@ -2519,9 +2527,10 @@ int fio_backend(struct sk_out *sk_out) } free_disk_util(); - cgroup_kill(cgroup_list); - sfree(cgroup_list); - sfree(cgroup_mnt); + if (cgroup_list) { + cgroup_kill(cgroup_list); + sfree(cgroup_list); + } fio_sem_remove(startup_sem); stat_exit();