X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=backend.c;h=d5b260a81c896fbcbe8c2ddf581d584225f69067;hp=65a3e184a6d77d7d5439f23c9a4c17813a405326;hb=50a8ce864e2c5bee7c44935b39b357aa8071615b;hpb=a9da8ab2169810667aeb26f857a8ac3c056e4d61 diff --git a/backend.c b/backend.c index 65a3e184..d5b260a8 100644 --- a/backend.c +++ b/backend.c @@ -55,6 +55,7 @@ #include "err.h" #include "lib/tp.h" #include "workqueue.h" +#include "lib/mountcheck.h" static pthread_t helper_thread; static pthread_mutex_t helper_lock; @@ -363,6 +364,23 @@ static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) return 0; } +/* + * We need to update the runtime consistently in ms, but keep a running + * tally of the current elapsed time in microseconds for sub millisecond + * updates. + */ +static inline void update_runtime(struct thread_data *td, + unsigned long long *elapsed_us, + const enum fio_ddir ddir) +{ + if (ddir == DDIR_WRITE && td_write(td) && td->o.verify_only) + return; + + td->ts.runtime[ddir] -= (elapsed_us[ddir] + 999) / 1000; + elapsed_us[ddir] += utime_since_now(&td->start); + td->ts.runtime[ddir] += (elapsed_us[ddir] + 999) / 1000; +} + static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir, int *retptr) { @@ -428,7 +446,7 @@ static int wait_for_completions(struct thread_data *td, struct timeval *time) * if the queue is full, we MUST reap at least 1 event */ min_evts = min(td->o.iodepth_batch_complete, td->cur_depth); - if (full && !min_evts) + if ((full && !min_evts) || !td->o.iodepth_batch_complete) min_evts = 1; if (time && (__should_check_rate(td, DDIR_READ) || @@ -744,6 +762,25 @@ static int io_complete_bytes_exceeded(struct thread_data *td) return bytes >= limit || exceeds_number_ios(td); } +/* + * used to calculate the next io time for rate control + * + */ +static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) +{ + uint64_t secs, remainder, bps, bytes; + + assert(!(td->flags & TD_F_CHILD)); + bytes = td->rate_io_issue_bytes[ddir]; + bps = td->rate_bps[ddir]; + if (bps) { + secs = bytes / bps; + remainder = bytes % bps; + return remainder * 1000000 / bps + secs * 1000000; + } else + return 0; +} + /* * Main IO worker function. It retrieves io_u's to process and queues * and reaps them, checking for rate and errors along the way. @@ -873,10 +910,17 @@ static uint64_t do_io(struct thread_data *td) if (td->error) break; ret = workqueue_enqueue(&td->io_wq, io_u); + + if (should_check_rate(td)) + td->rate_next_io_time[ddir] = usec_for_io(td, ddir); + } else { ret = td_io_queue(td, io_u); - if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 1, &comp_time)) + if (should_check_rate(td)) + td->rate_next_io_time[ddir] = usec_for_io(td, ddir); + + if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time)) break; /* @@ -906,7 +950,7 @@ reap: } if (!in_ramp_time(td) && td->o.latency_target) lat_target_check(td); - + if (td->o.thinktime) { unsigned long long b; @@ -1157,13 +1201,17 @@ static int switch_ioscheduler(struct thread_data *td) /* * Read back and check that the selected scheduler is now the default. */ + memset(tmp, 0, sizeof(tmp)); ret = fread(tmp, sizeof(tmp), 1, f); if (ferror(f) || ret < 0) { td_verror(td, errno, "fread"); fclose(f); return 1; } - tmp[sizeof(tmp) - 1] = '\0'; + /* + * either a list of io schedulers or "none\n" is expected. + */ + tmp[strlen(tmp) - 1] = '\0'; sprintf(tmp2, "[%s]", td->o.ioscheduler); @@ -1220,7 +1268,8 @@ static int keep_running(struct thread_data *td) static int exec_string(struct thread_options *o, const char *string, const char *mode) { - int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1; + size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1; + int ret; char *str; str = malloc(newlen); @@ -1306,7 +1355,7 @@ static void io_workqueue_fn(struct thread_data *td, struct io_u *io_u) */ static void *thread_main(void *data) { - unsigned long long elapsed; + unsigned long long elapsed_us[DDIR_RWDIR_CNT] = { 0, }; struct thread_data *td = data; struct thread_options *o = &td->o; pthread_condattr_t attr; @@ -1544,18 +1593,12 @@ static void *thread_main(void *data) check_update_rusage(td); fio_mutex_down(stat_mutex); - if (td_read(td) && td->io_bytes[DDIR_READ]) { - elapsed = mtime_since_now(&td->start); - td->ts.runtime[DDIR_READ] += elapsed; - } - if (td_write(td) && td->io_bytes[DDIR_WRITE]) { - elapsed = mtime_since_now(&td->start); - td->ts.runtime[DDIR_WRITE] += elapsed; - } - if (td_trim(td) && td->io_bytes[DDIR_TRIM]) { - elapsed = mtime_since_now(&td->start); - td->ts.runtime[DDIR_TRIM] += elapsed; - } + if (td_read(td) && td->io_bytes[DDIR_READ]) + update_runtime(td, elapsed_us, DDIR_READ); + if (td_write(td) && td->io_bytes[DDIR_WRITE]) + update_runtime(td, elapsed_us, DDIR_WRITE); + if (td_trim(td) && td->io_bytes[DDIR_TRIM]) + update_runtime(td, elapsed_us, DDIR_TRIM); fio_gettime(&td->start, NULL); fio_mutex_up(stat_mutex); @@ -1579,7 +1622,7 @@ static void *thread_main(void *data) check_update_rusage(td); fio_mutex_down(stat_mutex); - td->ts.runtime[DDIR_READ] += mtime_since_now(&td->start); + update_runtime(td, elapsed_us, DDIR_READ); fio_gettime(&td->start, NULL); fio_mutex_up(stat_mutex); @@ -1869,11 +1912,12 @@ static int fio_verify_load_state(struct thread_data *td) if (is_backend) { void *data; + int ver; ret = fio_server_get_verify_state(td->o.name, - td->thread_number - 1, &data); + td->thread_number - 1, &data, &ver); if (!ret) - verify_convert_assign_state(td, data); + verify_convert_assign_state(td, data, ver); } else ret = verify_load_state(td, "local"); @@ -1887,6 +1931,27 @@ static void do_usleep(unsigned int usecs) usleep(usecs); } +static int check_mount_writes(struct thread_data *td) +{ + struct fio_file *f; + unsigned int i; + + if (!td_write(td) || td->o.allow_mounted_write) + return 0; + + for_each_file(td, f, i) { + if (f->filetype != FIO_TYPE_BD) + continue; + if (device_is_mounted(f->file_name)) + goto mounted; + } + + return 0; +mounted: + log_err("fio: %s appears mounted, and 'allow_mounted_write' isn't set. Aborting.", f->file_name); + return 1; +} + /* * Main function for kicking off and reaping jobs, as needed. */ @@ -1905,6 +1970,8 @@ static void run_threads(void) nr_thread = nr_process = 0; for_each_td(td, i) { + if (check_mount_writes(td)) + return; if (td->o.use_thread) nr_thread++; else @@ -2253,7 +2320,7 @@ int fio_backend(void) for (i = 0; i < DDIR_RWDIR_CNT; i++) { struct io_log *log = agg_io_log[i]; - flush_log(log); + flush_log(log, 0); free_log(log); } }