X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=backend.c;h=fdb7413d7719ff9ff4b46af8e611c072c6918435;hp=9bb017451f6bad52c62dbba069d8184ee9e6dde9;hb=f52c9691bc8c285f3445235c69acdfd6de7f9b82;hpb=b63efd302dc3b762a21264ca66e07c74b697b608 diff --git a/backend.c b/backend.c index 9bb01745..fdb7413d 100644 --- a/backend.c +++ b/backend.c @@ -418,6 +418,34 @@ static void check_update_rusage(struct thread_data *td) } } +static int wait_for_completions(struct thread_data *td, struct timeval *time, + uint64_t *bytes_done) +{ + const int full = queue_full(td); + int min_evts = 0; + int ret; + + /* + * if the queue is full, we MUST reap at least 1 event + */ + min_evts = min(td->o.iodepth_batch_complete, td->cur_depth); + if (full && !min_evts) + min_evts = 1; + + if (time && (__should_check_rate(td, DDIR_READ) || + __should_check_rate(td, DDIR_WRITE) || + __should_check_rate(td, DDIR_TRIM))) + fio_gettime(time, NULL); + + do { + ret = io_u_queued_complete(td, min_evts, bytes_done); + if (ret < 0) + break; + } while (full && (td->cur_depth > td->o.iodepth_low)); + + return ret; +} + /* * The main verify engine. Runs over the writes we previously submitted, * reads the blocks back in, and checks the crc/md5 of the data. @@ -537,6 +565,8 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes) io_u->end_io = verify_io_u; ddir = io_u->ddir; + if (!td->o.disable_slat) + fio_gettime(&io_u->start_time, NULL); ret = td_io_queue(td, io_u); switch (ret) { @@ -599,27 +629,9 @@ sync_done: */ reap: full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); - if (full || !td->o.iodepth_batch_complete) { - min_events = min(td->o.iodepth_batch_complete, - td->cur_depth); - /* - * if the queue is full, we MUST reap at least 1 event - */ - if (full && !min_events) - min_events = 1; + if (full || !td->o.iodepth_batch_complete) + ret = wait_for_completions(td, NULL, bytes_done); - do { - /* - * Reap required number of io units, if any, - * and do the verification on them through - * the callback handler - */ - if (io_u_queued_complete(td, min_events, bytes_done) < 0) { - ret = -1; - break; - } - } while (full && (td->cur_depth > td->o.iodepth_low)); - } if (ret < 0) break; } @@ -646,13 +658,35 @@ static unsigned int exceeds_number_ios(struct thread_data *td) if (!td->o.number_ios) return 0; - number_ios = ddir_rw_sum(td->this_io_blocks); + number_ios = ddir_rw_sum(td->io_blocks); number_ios += td->io_u_queued + td->io_u_in_flight; - return number_ios >= td->o.number_ios; + return number_ios >= (td->o.number_ios * td->loops); +} + +static int io_issue_bytes_exceeded(struct thread_data *td) +{ + unsigned long long bytes, limit; + + if (td_rw(td)) + bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE]; + else if (td_write(td)) + bytes = td->io_issue_bytes[DDIR_WRITE]; + else if (td_read(td)) + bytes = td->io_issue_bytes[DDIR_READ]; + else + bytes = td->io_issue_bytes[DDIR_TRIM]; + + if (td->o.io_limit) + limit = td->o.io_limit; + else + limit = td->o.size; + + limit *= td->loops; + return bytes >= limit || exceeds_number_ios(td); } -static int io_bytes_exceeded(struct thread_data *td) +static int io_complete_bytes_exceeded(struct thread_data *td) { unsigned long long bytes, limit; @@ -670,6 +704,7 @@ static int io_bytes_exceeded(struct thread_data *td) else limit = td->o.size; + limit *= td->loops; return bytes >= limit || exceeds_number_ios(td); } @@ -693,21 +728,26 @@ static uint64_t do_io(struct thread_data *td) lat_target_init(td); + total_bytes = td->o.size; + /* + * Allow random overwrite workloads to write up to io_limit + * before starting verification phase as 'size' doesn't apply. + */ + if (td_write(td) && td_random(td) && td->o.norandommap) + total_bytes = max(total_bytes, (uint64_t) td->o.io_limit); /* * If verify_backlog is enabled, we'll run the verify in this * handler as well. For that case, we may need up to twice the * amount of bytes. */ - total_bytes = td->o.size; if (td->o.verify != VERIFY_NONE && (td_write(td) && td->o.verify_backlog)) total_bytes += td->o.size; while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || - (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || + (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) || td->o.time_based) { struct timeval comp_time; - int min_evts = 0; struct io_u *io_u; int ret2, full; enum fio_ddir ddir; @@ -871,28 +911,8 @@ sync_done: */ reap: full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); - if (full || !td->o.iodepth_batch_complete) { - min_evts = min(td->o.iodepth_batch_complete, - td->cur_depth); - /* - * if the queue is full, we MUST reap at least 1 event - */ - if (full && !min_evts) - min_evts = 1; - - if (__should_check_rate(td, DDIR_READ) || - __should_check_rate(td, DDIR_WRITE) || - __should_check_rate(td, DDIR_TRIM)) - fio_gettime(&comp_time, NULL); - - do { - ret = io_u_queued_complete(td, min_evts, bytes_done); - if (ret < 0) - break; - - } while (full && (td->cur_depth > td->o.iodepth_low)); - } - + if (full || !td->o.iodepth_batch_complete) + ret = wait_for_completions(td, &comp_time, bytes_done); if (ret < 0) break; if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO)) @@ -1242,7 +1262,7 @@ static uint64_t do_dry_run(struct thread_data *td) td_set_runstate(td, TD_RUNNING); while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || - (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) { + (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) { struct io_u *io_u; int ret; @@ -1345,7 +1365,7 @@ static void *thread_main(void *data) * Set affinity first, in case it has an impact on the memory * allocations. */ - if (o->cpumask_set) { + if (fio_option_is_set(o, cpumask)) { if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) { ret = fio_cpus_split(&o->cpumask, td->thread_number - 1); if (!ret) { @@ -1364,7 +1384,8 @@ static void *thread_main(void *data) #ifdef CONFIG_LIBNUMA /* numa node setup */ - if (o->numa_cpumask_set || o->numa_memmask_set) { + if (fio_option_is_set(o, numa_cpunodes) || + fio_option_is_set(o, numa_memnodes)) { struct bitmask *mask; if (numa_available() < 0) { @@ -1372,7 +1393,7 @@ static void *thread_main(void *data) goto err; } - if (o->numa_cpumask_set) { + if (fio_option_is_set(o, numa_cpunodes)) { mask = numa_parse_nodestring(o->numa_cpunodes); ret = numa_run_on_node_mask(mask); numa_free_nodemask(mask); @@ -1383,8 +1404,7 @@ static void *thread_main(void *data) } } - if (o->numa_memmask_set) { - + if (fio_option_is_set(o, numa_memnodes)) { mask = NULL; if (o->numa_memnodes) mask = numa_parse_nodestring(o->numa_memnodes); @@ -1430,7 +1450,8 @@ static void *thread_main(void *data) if (o->verify_async && verify_async_init(td)) goto err; - if (o->ioprio) { + if (fio_option_is_set(o, ioprio) || + fio_option_is_set(o, ioprio_class)) { ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio); if (ret == -1) { td_verror(td, errno, "ioprio_set"); @@ -1589,7 +1610,7 @@ err: cgroup_shutdown(td, &cgroup_mnt); verify_free_state(td); - if (o->cpumask_set) { + if (fio_option_is_set(o, cpumask)) { ret = fio_cpuset_exit(&o->cpumask); if (ret) td_verror(td, ret, "fio_cpuset_exit"); @@ -1804,12 +1825,9 @@ void exec_trigger(const char *cmd) void check_trigger_file(void) { if (__check_trigger_file() || trigger_timedout()) { - if (nr_clients) { - if (trigger_remote_cmd) - fio_clients_send_trigger(trigger_remote_cmd); - if (trigger_cmd) - exec_trigger(trigger_cmd); - } else { + if (nr_clients) + fio_clients_send_trigger(trigger_remote_cmd); + else { verify_save_state(); fio_terminate_threads(TERMINATE_ALL); exec_trigger(trigger_cmd); @@ -2122,7 +2140,8 @@ static void *helper_thread_main(void *data) gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec + sec; ts.tv_nsec = (tv.tv_usec * 1000) + nsec; - if (ts.tv_nsec > 1000000000ULL) { + + if (ts.tv_nsec >= 1000000000ULL) { ts.tv_nsec -= 1000000000ULL; ts.tv_sec++; }