X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=backend.c;h=3c734f0d6baf6eadd2d53deb7d963f6fe3ead385;hb=refs%2Fheads%2Flatency-probe;hp=f6cfbdd8288230be53136b8ad002439c862118df;hpb=71e6e5a2fd5c00ee42ecac36037a539fcad1efae;p=fio.git diff --git a/backend.c b/backend.c index f6cfbdd8..3c734f0d 100644 --- a/backend.c +++ b/backend.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "fio.h" #include "smalloc.h" @@ -47,12 +48,14 @@ #include "rate-submit.h" #include "helper_thread.h" #include "pshared.h" +#include "zone-dist.h" +#include "target.h" static struct fio_sem *startup_sem; static struct flist_head *cgroup_list; static struct cgroup_mnt *cgroup_mnt; static int exit_value; -static volatile int fio_abort; +static volatile bool fio_abort; static unsigned int nr_process = 0; static unsigned int nr_thread = 0; @@ -64,6 +67,7 @@ unsigned int stat_number = 0; int shm_id = 0; int temp_stall_ts; unsigned long done_secs = 0; +pthread_mutex_t overlap_check = PTHREAD_MUTEX_INITIALIZER; #define JOB_START_TIMEOUT (5 * 1000) @@ -566,7 +570,7 @@ static int unlink_all_files(struct thread_data *td) /* * Check if io_u will overlap an in-flight IO in the queue */ -static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u) +bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u) { bool overlap; struct io_u *check_io_u; @@ -1087,8 +1091,8 @@ reap: break; } } - if (!in_ramp_time(td) && td->o.latency_target) - lat_target_check(td); + if (!in_ramp_time(td) && lat_target_check(td)) + break; if (ddir_rw(ddir) && td->o.thinktime) handle_thinktime(td, ddir); @@ -1186,14 +1190,14 @@ static void cleanup_io_u(struct thread_data *td) if (td->io_ops->io_u_free) td->io_ops->io_u_free(td, io_u); - fio_memfree(io_u, sizeof(*io_u)); + fio_memfree(io_u, sizeof(*io_u), td_offload_overlap(td)); } free_io_mem(td); io_u_rexit(&td->io_u_requeues); - io_u_qexit(&td->io_u_freelist); - io_u_qexit(&td->io_u_all); + io_u_qexit(&td->io_u_freelist, false); + io_u_qexit(&td->io_u_all, td_offload_overlap(td)); free_file_completion_logging(td); } @@ -1208,8 +1212,8 @@ static int init_io_u(struct thread_data *td) err = 0; err += !io_u_rinit(&td->io_u_requeues, td->o.iodepth); - err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth); - err += !io_u_qinit(&td->io_u_all, td->o.iodepth); + err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth, false); + err += !io_u_qinit(&td->io_u_all, td->o.iodepth, td_offload_overlap(td)); if (err) { log_err("fio: failed setting up IO queues\n"); @@ -1224,7 +1228,7 @@ static int init_io_u(struct thread_data *td) if (td->terminate) return 1; - ptr = fio_memalign(cl_align, sizeof(*io_u)); + ptr = fio_memalign(cl_align, sizeof(*io_u), td_offload_overlap(td)); if (!ptr) { log_err("fio: unable to allocate aligned memory\n"); break; @@ -1592,6 +1596,8 @@ static void *thread_main(void *data) goto err; } + td_zone_gen_index(td); + /* * Do this early, we don't want the compress threads to be limited * to the same CPUs as the IO workers. So do this before we set @@ -1862,14 +1868,24 @@ static void *thread_main(void *data) * (Are we not missing other flags that can be ignored ?) */ if ((td->o.size || td->o.io_size) && !ddir_rw_sum(bytes_done) && - !did_some_io && !td->o.create_only && + !did_some_io && (td->o.iodepth_mode != IOD_STEPPED) && + !td->o.create_only && !(td_ioengine_flagged(td, FIO_NOIO) || td_ioengine_flagged(td, FIO_DISKLESSIO))) log_err("%s: No I/O performed by %s, " "perhaps try --debug=io option for details?\n", td->o.name, td->io_ops->name); + /* + * Acquire this lock if we were doing overlap checking in + * offload mode so that we don't clean up this job while + * another thread is checking its io_u's for overlap + */ + if (td_offload_overlap(td)) + pthread_mutex_lock(&overlap_check); td_set_runstate(td, TD_FINISHING); + if (td_offload_overlap(td)) + pthread_mutex_unlock(&overlap_check); update_rusage_stat(td); td->ts.total_run_time = mtime_since_now(&td->epoch); @@ -1907,15 +1923,7 @@ err: close_ioengine(td); cgroup_shutdown(td, cgroup_mnt); verify_free_state(td); - - if (td->zone_state_index) { - int i; - - for (i = 0; i < DDIR_RWDIR_CNT; i++) - free(td->zone_state_index[i]); - free(td->zone_state_index); - td->zone_state_index = NULL; - } + td_zone_free_index(td); if (fio_option_is_set(o, cpumask)) { ret = fio_cpuset_exit(&o->cpumask); @@ -2218,18 +2226,22 @@ static void run_threads(struct sk_out *sk_out) } if (output_format & FIO_OUTPUT_NORMAL) { - log_info("Starting "); + struct buf_output out; + + buf_output_init(&out); + __log_buf(&out, "Starting "); if (nr_thread) - log_info("%d thread%s", nr_thread, + __log_buf(&out, "%d thread%s", nr_thread, nr_thread > 1 ? "s" : ""); if (nr_process) { if (nr_thread) - log_info(" and "); - log_info("%d process%s", nr_process, + __log_buf(&out, " and "); + __log_buf(&out, "%d process%s", nr_process, nr_process > 1 ? "es" : ""); } - log_info("\n"); - log_info_flush(); + __log_buf(&out, "\n"); + log_info_buf(out.buf, out.buflen); + buf_output_free(&out); } todo = thread_number; @@ -2372,7 +2384,7 @@ reap: if (fio_sem_down_timeout(startup_sem, 10000)) { log_err("fio: job startup hung? exiting.\n"); fio_terminate_threads(TERMINATE_ALL); - fio_abort = 1; + fio_abort = true; nr_started--; free(fd); break; @@ -2486,6 +2498,8 @@ int fio_backend(struct sk_out *sk_out) } startup_sem = fio_sem_init(FIO_SEM_LOCKED); + if (!sk_out) + is_local_backend = true; if (startup_sem == NULL) return 1;