X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=74604929f164d0785d7f0635f6e4abfe9c27bf37;hp=78dca9a09610e3140ae5eaabc7518012bff40abe;hb=d7762cf829fd3e44e50e5e2e9889b6449772097c;hpb=73170f19408559ed7e318f93b21e5cbbf13725bd diff --git a/fio.c b/fio.c index 78dca9a0..74604929 100644 --- a/fio.c +++ b/fio.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -95,9 +96,9 @@ static void sig_handler(int sig) */ static int check_min_rate(struct thread_data *td, struct timeval *now) { + unsigned long long bytes = 0; unsigned long spent; unsigned long rate; - int ddir = td->ddir; /* * allow a 2 second settle period in the beginning @@ -105,6 +106,11 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) if (mtime_since(&td->start, now) < 2000) return 0; + if (td_read(td)) + bytes += td->this_io_bytes[DDIR_READ]; + if (td_write(td)) + bytes += td->this_io_bytes[DDIR_WRITE]; + /* * if rate blocks is set, sample is running */ @@ -113,14 +119,19 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) if (spent < td->ratecycle) return 0; - rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent; - if (rate < td->ratemin) { - fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); + if (bytes < td->rate_bytes) { + fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin); return 1; + } else { + rate = (bytes - td->rate_bytes) / spent; + if (rate < td->ratemin || bytes < td->rate_bytes) { + fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); + return 1; + } } } - td->rate_bytes = td->this_io_bytes[ddir]; + td->rate_bytes = bytes; memcpy(&td->lastrate, now, sizeof(*now)); return 0; } @@ -135,47 +146,22 @@ static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) return 0; } -static struct fio_file *get_next_file(struct thread_data *td) -{ - unsigned int old_next_file = td->next_file; - struct fio_file *f; - - do { - f = &td->files[td->next_file]; - - td->next_file++; - if (td->next_file >= td->nr_files) - td->next_file = 0; - - if (f->fd != -1) - break; - - f = NULL; - } while (td->next_file != old_next_file); - - return f; -} - /* * When job exits, we can cancel the in-flight IO if we are using async * io. Attempt to do so. */ static void cleanup_pending_aio(struct thread_data *td) { - struct timespec ts = { .tv_sec = 0, .tv_nsec = 0}; struct list_head *entry, *n; - struct io_completion_data icd; struct io_u *io_u; int r; /* * get immediately available events, if any */ - r = td_io_getevents(td, 0, td->cur_depth, &ts); - if (r > 0) { - icd.nr = r; - ios_completed(td, &icd); - } + r = io_u_queued_complete(td, 0); + if (r < 0) + return; /* * now cancel remaining active events @@ -184,19 +170,24 @@ static void cleanup_pending_aio(struct thread_data *td) list_for_each_safe(entry, n, &td->io_u_busylist) { io_u = list_entry(entry, struct io_u, list); - r = td->io_ops->cancel(td, io_u); - if (!r) + /* + * if the io_u isn't in flight, then that generally + * means someone leaked an io_u. complain but fix + * it up, so we don't stall here. + */ + if ((io_u->flags & IO_U_F_FLIGHT) == 0) { + log_err("fio: non-busy IO on busy list\n"); put_io_u(td, io_u); + } else { + r = td->io_ops->cancel(td, io_u); + if (!r) + put_io_u(td, io_u); + } } } - if (td->cur_depth) { - r = td_io_getevents(td, td->cur_depth, td->cur_depth, NULL); - if (r > 0) { - icd.nr = r; - ios_completed(td, &icd); - } - } + if (td->cur_depth) + r = io_u_queued_complete(td, td->cur_depth); } /* @@ -206,7 +197,6 @@ static void cleanup_pending_aio(struct thread_data *td) static int fio_io_sync(struct thread_data *td, struct fio_file *f) { struct io_u *io_u = __get_io_u(td); - struct io_completion_data icd; int ret; if (!io_u) @@ -220,24 +210,27 @@ static int fio_io_sync(struct thread_data *td, struct fio_file *f) return 1; } +requeue: ret = td_io_queue(td, io_u); - if (ret) { - td_verror(td, io_u->error); - put_io_u(td, io_u); - return 1; - } - - ret = td_io_getevents(td, 1, td->cur_depth, NULL); if (ret < 0) { - td_verror(td, ret); + td_verror(td, io_u->error, "td_io_queue"); + put_io_u(td, io_u); return 1; - } + } else if (ret == FIO_Q_QUEUED) { + if (io_u_queued_complete(td, 1) < 0) + return 1; + } else if (ret == FIO_Q_COMPLETED) { + if (io_u->error) { + td_verror(td, io_u->error, "td_io_queue"); + return 1; + } - icd.nr = ret; - ios_completed(td, &icd); - if (icd.error) { - td_verror(td, icd.error); - return 1; + if (io_u_sync_complete(td, io_u) < 0) + return 1; + } else if (ret == FIO_Q_BUSY) { + if (td_io_commit(td)) + return 1; + goto requeue; } return 0; @@ -249,26 +242,28 @@ static int fio_io_sync(struct thread_data *td, struct fio_file *f) */ static void do_verify(struct thread_data *td) { - struct io_u *io_u, *v_io_u = NULL; - struct io_completion_data icd; struct fio_file *f; - int ret, i; + struct io_u *io_u; + int ret, i, min_events; /* * sync io first and invalidate cache, to make sure we really * read from disk. */ for_each_file(td, f, i) { - fio_io_sync(td, f); - file_invalidate_cache(td, f); + if (fio_io_sync(td, f)) + break; + if (file_invalidate_cache(td, f)) + break; } - td_set_runstate(td, TD_VERIFYING); + if (td->error) + return; - do { - if (td->terminate) - break; + td_set_runstate(td, TD_VERIFYING); + io_u = NULL; + while (!td->terminate) { io_u = __get_io_u(td); if (!io_u) break; @@ -283,60 +278,64 @@ static void do_verify(struct thread_data *td) break; } - f = get_next_file(td); - if (!f) - break; - - io_u->file = f; - if (td_io_prep(td, io_u)) { put_io_u(td, io_u); break; } + io_u->end_io = verify_io_u; +requeue: ret = td_io_queue(td, io_u); - if (ret) { - td_verror(td, io_u->error); - put_io_u(td, io_u); - break; - } - /* - * we have one pending to verify, do that while - * we are doing io on the next one - */ - if (do_io_u_verify(td, &v_io_u)) - break; + switch (ret) { + case FIO_Q_COMPLETED: + if (io_u->error) + ret = -io_u->error; + if (io_u->xfer_buflen != io_u->resid && io_u->resid) { + int bytes = io_u->xfer_buflen - io_u->resid; - ret = td_io_getevents(td, 1, 1, NULL); - if (ret != 1) { + io_u->xfer_buflen = io_u->resid; + io_u->xfer_buf += bytes; + goto requeue; + } + ret = io_u_sync_complete(td, io_u); if (ret < 0) - td_verror(td, ret); + break; + continue; + case FIO_Q_QUEUED: + break; + case FIO_Q_BUSY: + requeue_io_u(td, &io_u); + ret = td_io_commit(td); + break; + default: + assert(ret < 0); + td_verror(td, -ret, "td_io_queue"); break; } - v_io_u = td->io_ops->event(td, 0); - icd.nr = 1; - icd.error = 0; - fio_gettime(&icd.time, NULL); - io_completed(td, v_io_u, &icd); - - if (icd.error) { - td_verror(td, icd.error); - put_io_u(td, v_io_u); - v_io_u = NULL; + if (ret < 0 || td->error) break; - } /* - * if we can't submit more io, we need to verify now + * if we can queue more, do so. but check if there are + * completed io_u's first. */ - if (queue_full(td) && do_io_u_verify(td, &v_io_u)) - break; + min_events = 0; + if (queue_full(td) || ret == FIO_Q_BUSY) { + min_events = 1; - } while (1); + if (td->cur_depth > td->iodepth_low) + min_events = td->cur_depth - td->iodepth_low; + } - do_io_u_verify(td, &v_io_u); + /* + * Reap required number of io units, if any, and do the + * verification on them through the callback handler + */ + if (io_u_queued_complete(td, min_events) < 0) + break; + } if (td->cur_depth) cleanup_pending_aio(td); @@ -375,103 +374,131 @@ static void do_cpuio(struct thread_data *td) */ static void do_io(struct thread_data *td) { - struct io_completion_data icd; struct timeval s; unsigned long usec; - struct fio_file *f; int i, ret = 0; td_set_runstate(td, TD_RUNNING); while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) { - struct timespec *timeout; + struct timeval comp_time; + long bytes_done = 0; int min_evts = 0; struct io_u *io_u; if (td->terminate) break; - f = get_next_file(td); - if (!f) - break; - - io_u = get_io_u(td, f); + io_u = get_io_u(td); if (!io_u) break; memcpy(&s, &io_u->start_time, sizeof(s)); + + if (runtime_exceeded(td, &s)) { + put_io_u(td, io_u); + break; + } requeue: ret = td_io_queue(td, io_u); - if (ret) { - if (ret > 0 && (io_u->xfer_buflen != io_u->resid) && - io_u->resid) { - /* - * short read/write. requeue. - */ + + switch (ret) { + case FIO_Q_COMPLETED: + if (io_u->error) { + ret = io_u->error; + break; + } + if (io_u->xfer_buflen != io_u->resid && io_u->resid) { + int bytes = io_u->xfer_buflen - io_u->resid; + io_u->xfer_buflen = io_u->resid; - io_u->xfer_buf += ret; + io_u->xfer_buf += bytes; goto requeue; - } else { - put_io_u(td, io_u); - break; } + fio_gettime(&comp_time, NULL); + bytes_done = io_u_sync_complete(td, io_u); + if (bytes_done < 0) + ret = bytes_done; + break; + case FIO_Q_QUEUED: + /* + * if the engine doesn't have a commit hook, + * the io_u is really queued. if it does have such + * a hook, it has to call io_u_queued() itself. + */ + if (td->io_ops->commit == NULL) + io_u_queued(td, io_u); + break; + case FIO_Q_BUSY: + requeue_io_u(td, &io_u); + ret = td_io_commit(td); + break; + default: + assert(ret < 0); + put_io_u(td, io_u); + break; } - add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time)); - - if (td->cur_depth < td->iodepth) { - struct timespec ts = { .tv_sec = 0, .tv_nsec = 0}; + if (ret < 0 || td->error) + break; - timeout = &ts; + /* + * See if we need to complete some commands + */ + if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) { min_evts = 0; - } else { - timeout = NULL; - min_evts = 1; - } + if (queue_full(td) || ret == FIO_Q_BUSY) { + min_evts = 1; - ret = td_io_getevents(td, min_evts, td->cur_depth, timeout); - if (ret < 0) { - td_verror(td, ret); - break; - } else if (!ret) - continue; + if (td->cur_depth > td->iodepth_low) + min_evts = td->cur_depth - td->iodepth_low; + } - icd.nr = ret; - ios_completed(td, &icd); - if (icd.error) { - td_verror(td, icd.error); - break; + fio_gettime(&comp_time, NULL); + bytes_done = io_u_queued_complete(td, min_evts); + if (bytes_done < 0) + break; } + if (!bytes_done) + continue; + /* * the rate is batched for now, it should work for batches * of completions except the very first one which may look * a little bursty */ - usec = utime_since(&s, &icd.time); + usec = utime_since(&s, &comp_time); - rate_throttle(td, usec, icd.bytes_done[td->ddir], td->ddir); + rate_throttle(td, usec, bytes_done); - if (check_min_rate(td, &icd.time)) { + if (check_min_rate(td, &comp_time)) { if (exitall_on_terminate) terminate_threads(td->groupid, 0); - td_verror(td, ENODATA); + td_verror(td, ENODATA, "check_min_rate"); break; } - if (runtime_exceeded(td, &icd.time)) - break; - if (td->thinktime) { unsigned long long b; b = td->io_blocks[0] + td->io_blocks[1]; - if (!(b % td->thinktime_blocks)) - usec_sleep(td, td->thinktime); + if (!(b % td->thinktime_blocks)) { + int left; + + if (td->thinktime_spin) + __usec_sleep(td->thinktime_spin); + + left = td->thinktime - td->thinktime_spin; + if (left) + usec_sleep(td, left); + } } } if (!td->error) { + struct fio_file *f; + if (td->cur_depth) cleanup_pending_aio(td); @@ -548,9 +575,12 @@ static int init_io_u(struct thread_data *td) fill_rand_buf(io_u, max_bs); io_u->index = i; + io_u->flags = IO_U_F_FREE; list_add(&io_u->list, &td->io_u_freelist); } + io_u_init_timeout(); + return 0; } @@ -567,7 +597,7 @@ static int switch_ioscheduler(struct thread_data *td) f = fopen(tmp, "r+"); if (!f) { - td_verror(td, errno); + td_verror(td, errno, "fopen"); return 1; } @@ -576,7 +606,7 @@ static int switch_ioscheduler(struct thread_data *td) */ ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f); if (ferror(f) || ret != 1) { - td_verror(td, errno); + td_verror(td, errno, "fwrite"); fclose(f); return 1; } @@ -588,7 +618,7 @@ static int switch_ioscheduler(struct thread_data *td) */ ret = fread(tmp, 1, sizeof(tmp), f); if (ferror(f) || ret < 0) { - td_verror(td, errno); + td_verror(td, errno, "fread"); fclose(f); return 1; } @@ -596,7 +626,7 @@ static int switch_ioscheduler(struct thread_data *td) sprintf(tmp2, "[%s]", td->ioscheduler); if (!strstr(tmp, tmp2)) { log_err("fio: io scheduler %s not found\n", td->ioscheduler); - td_verror(td, EINVAL); + td_verror(td, EINVAL, "iosched_switch"); fclose(f); return 1; } @@ -610,11 +640,15 @@ static void clear_io_state(struct thread_data *td) struct fio_file *f; int i; - td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0; + td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0; td->this_io_bytes[0] = td->this_io_bytes[1] = 0; td->zone_bytes = 0; + td->last_was_sync = 0; + for_each_file(td, f, i) { + f->last_completed_pos = 0; + f->last_pos = 0; if (td->io_ops->flags & FIO_SYNCIO) lseek(f->fd, SEEK_SET, 0); @@ -640,6 +674,7 @@ static void *thread_main(void *data) INIT_LIST_HEAD(&td->io_u_freelist); INIT_LIST_HEAD(&td->io_u_busylist); + INIT_LIST_HEAD(&td->io_u_requeues); INIT_LIST_HEAD(&td->io_hist_list); INIT_LIST_HEAD(&td->io_log_list); @@ -647,7 +682,7 @@ static void *thread_main(void *data) goto err; if (fio_setaffinity(td) == -1) { - td_verror(td, errno); + td_verror(td, errno, "cpu_set_affinity"); goto err; } @@ -656,13 +691,13 @@ static void *thread_main(void *data) if (td->ioprio) { if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) { - td_verror(td, errno); + td_verror(td, errno, "ioprio_set"); goto err; } } if (nice(td->nice) == -1) { - td_verror(td, errno); + td_verror(td, errno, "nice"); goto err; } @@ -694,15 +729,16 @@ static void *thread_main(void *data) } fio_gettime(&td->epoch, NULL); - getrusage(RUSAGE_SELF, &td->ru_start); + memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch)); + getrusage(RUSAGE_SELF, &td->ts.ru_start); runtime[0] = runtime[1] = 0; while (td->loops--) { fio_gettime(&td->start, NULL); - memcpy(&td->stat_sample_time, &td->start, sizeof(td->start)); + memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start)); if (td->ratemin) - memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate)); + memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate)); clear_io_state(td); prune_io_piece_log(td); @@ -712,10 +748,11 @@ static void *thread_main(void *data) else do_io(td); - runtime[td->ddir] += utime_since_now(&td->start); - if (td_rw(td) && td->io_bytes[td->ddir ^ 1]) - runtime[td->ddir ^ 1] = runtime[td->ddir]; - + if (td_read(td) && td->io_bytes[DDIR_READ]) + runtime[DDIR_READ] += utime_since_now(&td->start); + if (td_write(td) && td->io_bytes[DDIR_WRITE]) + runtime[DDIR_WRITE] += utime_since_now(&td->start); + if (td->error || td->terminate) break; @@ -738,12 +775,12 @@ static void *thread_main(void *data) td->runtime[0] = runtime[0] / 1000; td->runtime[1] = runtime[1] / 1000; - if (td->bw_log) - finish_log(td, td->bw_log, "bw"); - if (td->slat_log) - finish_log(td, td->slat_log, "slat"); - if (td->clat_log) - finish_log(td, td->clat_log, "clat"); + if (td->ts.bw_log) + finish_log(td, td->ts.bw_log, "bw"); + if (td->ts.slat_log) + finish_log(td, td->ts.slat_log, "slat"); + if (td->ts.clat_log) + finish_log(td, td->ts.clat_log, "clat"); if (td->write_iolog_file) write_iolog_close(td); if (td->exec_postrun) { @@ -761,7 +798,7 @@ err: close_ioengine(td); cleanup_io_u(td); td_set_runstate(td, TD_EXITED); - return (void *) td->error; + return (void *) (unsigned long) td->error; } /* @@ -784,7 +821,7 @@ static int fork_main(int shmid, int offset) td = data + offset * sizeof(struct thread_data); ret = thread_main(td); shmdt(data); - return (int) ret; + return (int) (unsigned long) ret; } /* @@ -800,6 +837,8 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) */ pending = cputhreads = 0; for_each_td(td, i) { + int flags = 0; + /* * ->io_ops is NULL for a thread that has closed its * io engine @@ -807,55 +846,66 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) if (td->io_ops && td->io_ops->flags & FIO_CPUIO) cputhreads++; - if (td->runstate < TD_EXITED) { - /* - * check if someone quit or got killed in an unusual way - */ - ret = waitpid(td->pid, &status, WNOHANG); - if (ret < 0) - perror("waitpid"); - else if ((ret == td->pid) && WIFSIGNALED(status)) { - int sig = WTERMSIG(status); - - log_err("fio: pid=%d, got signal=%d\n", td->pid, sig); + if (!td->pid || td->runstate == TD_REAPED) + continue; + if (td->use_thread) { + if (td->runstate == TD_EXITED) { td_set_runstate(td, TD_REAPED); goto reaped; } + continue; } - if (td->runstate != TD_EXITED) { - if (td->runstate < TD_RUNNING) - pending++; + flags = WNOHANG; + if (td->runstate == TD_EXITED) + flags = 0; - continue; - } + /* + * check if someone quit or got killed in an unusual way + */ + ret = waitpid(td->pid, &status, flags); + if (ret < 0) { + if (errno == ECHILD) { + log_err("fio: pid=%d disappeared %d\n", td->pid, td->runstate); + td_set_runstate(td, TD_REAPED); + goto reaped; + } + perror("waitpid"); + } else if (ret == td->pid) { + if (WIFSIGNALED(status)) { + int sig = WTERMSIG(status); - if (td->error) - exit_value++; + log_err("fio: pid=%d, got signal=%d\n", td->pid, sig); + td_set_runstate(td, TD_REAPED); + goto reaped; + } + if (WIFEXITED(status)) { + if (WEXITSTATUS(status) && !td->error) + td->error = WEXITSTATUS(status); - td_set_runstate(td, TD_REAPED); + td_set_runstate(td, TD_REAPED); + goto reaped; + } + } + /* + * thread is not dead, continue + */ + continue; +reaped: if (td->use_thread) { long ret; if (pthread_join(td->thread, (void *) &ret)) - perror("thread_join"); - } else { - int status; - - ret = waitpid(td->pid, &status, 0); - if (ret < 0) - perror("waitpid"); - else if (WIFEXITED(status) && WEXITSTATUS(status)) { - if (!exit_value) - exit_value++; - } + perror("pthread_join"); } -reaped: (*nr_running)--; (*m_rate) -= td->ratemin; (*t_rate) -= td->rate; + + if (td->error) + exit_value++; } if (*nr_running == cputhreads && !pending) @@ -911,6 +961,8 @@ static void run_threads(void) init_disk_util(td); } + set_genesis_time(); + while (todo) { struct thread_data *map[MAX_JOBS]; struct timeval this_start; @@ -1069,6 +1121,8 @@ int main(int argc, char *argv[]) setup_log(&agg_io_log[DDIR_WRITE]); } + set_genesis_time(); + disk_util_timer_arm(); run_threads();