X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=355d8730862643eb02aff27a90d56f1188818572;hp=478ef28aa7e2461365f2c30775e62949eb811400;hb=84422acde41c9cf462245de115d425cf5a82124c;hpb=4e991c23d2d06484f581eb5e2105bc102cb35941 diff --git a/fio.c b/fio.c index 478ef28a..355d8730 100644 --- a/fio.c +++ b/fio.c @@ -35,7 +35,7 @@ #include #include "fio.h" -#include "os.h" +#include "hash.h" unsigned long page_mask; unsigned long page_size; @@ -60,6 +60,8 @@ struct io_log *agg_io_log[2]; static inline void td_set_runstate(struct thread_data *td, int runstate) { + dprint(FD_PROCESS, "pid=%d: runstate %d -> %d\n", td->pid, td->runstate, + runstate); td->runstate = runstate; } @@ -70,13 +72,18 @@ static void terminate_threads(int group_id) for_each_td(td, i) { if (group_id == TERMINATE_ALL || groupid == td->groupid) { + dprint(FD_PROCESS, "setting terminate on %d\n",td->pid); + + td->terminate = 1; + td->o.start_delay = 0; + /* * if the thread is running, just let it exit */ if (td->runstate < TD_RUNNING) kill(td->pid, SIGQUIT); - td->terminate = 1; - td->start_delay = 0; + else if (td->io_ops->flags & FIO_SIGQUIT) + kill(td->pid, SIGQUIT); } } } @@ -110,7 +117,7 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) /* * No minimum rate set, always ok */ - if (!td->ratemin && !td->rate_iops_min) + if (!td->o.ratemin && !td->o.rate_iops_min) return 0; /* @@ -133,20 +140,20 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) */ if (td->rate_bytes || td->rate_blocks) { spent = mtime_since(&td->lastrate, now); - if (spent < td->ratecycle) + if (spent < td->o.ratecycle) return 0; - if (td->rate) { + if (td->o.rate) { /* * check bandwidth specified rate */ if (bytes < td->rate_bytes) { - log_err("%s: min rate %u not met\n", td->name, td->ratemin); + log_err("%s: min rate %u not met\n", td->o.name, td->o.ratemin); return 1; } else { rate = (bytes - td->rate_bytes) / spent; - if (rate < td->ratemin || bytes < td->rate_bytes) { - log_err("%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); + if (rate < td->o.ratemin || bytes < td->rate_bytes) { + log_err("%s: min rate %u not met, got %luKiB/sec\n", td->o.name, td->o.ratemin, rate); return 1; } } @@ -154,13 +161,13 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) /* * checks iops specified rate */ - if (iops < td->rate_iops) { - log_err("%s: min iops rate %u not met\n", td->name, td->rate_iops); + if (iops < td->o.rate_iops) { + log_err("%s: min iops rate %u not met\n", td->o.name, td->o.rate_iops); return 1; } else { rate = (iops - td->rate_blocks) / spent; - if (rate < td->rate_iops_min || iops < td->rate_blocks) { - log_err("%s: min iops rate %u not met, got %lu\n", td->name, td->rate_iops_min, rate); + if (rate < td->o.rate_iops_min || iops < td->rate_blocks) { + log_err("%s: min iops rate %u not met, got %lu\n", td->o.name, td->o.rate_iops_min, rate); } } } @@ -174,9 +181,9 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) { - if (!td->timeout) + if (!td->o.timeout) return 0; - if (mtime_since(&td->epoch, t) >= td->timeout * 1000) + if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000) return 1; return 0; @@ -311,6 +318,7 @@ static void do_verify(struct thread_data *td) if (runtime_exceeded(td, &io_u->start_time)) { put_io_u(td, io_u); + td->terminate = 1; break; } @@ -333,6 +341,7 @@ static void do_verify(struct thread_data *td) ret = -io_u->error; else if (io_u->resid) { int bytes = io_u->xfer_buflen - io_u->resid; + struct fio_file *f = io_u->file; /* * zero read, fail @@ -342,10 +351,20 @@ static void do_verify(struct thread_data *td) put_io_u(td, io_u); break; } + io_u->xfer_buflen = io_u->resid; io_u->xfer_buf += bytes; + io_u->offset += bytes; + f->last_completed_pos = io_u->offset; + + td->ts.short_io_u[io_u->ddir]++; + + if (io_u->offset == f->real_file_size) + goto sync_done; + requeue_io_u(td, &io_u); } else { +sync_done: ret = io_u_sync_complete(td, io_u); if (ret < 0) break; @@ -376,8 +395,8 @@ static void do_verify(struct thread_data *td) if (queue_full(td) || ret == FIO_Q_BUSY) { min_events = 1; - if (td->cur_depth > td->iodepth_low) - min_events = td->cur_depth - td->iodepth_low; + if (td->cur_depth > td->o.iodepth_low) + min_events = td->cur_depth - td->o.iodepth_low; } /* @@ -412,7 +431,7 @@ static void do_io(struct thread_data *td) td_set_runstate(td, TD_RUNNING); - while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) { + while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) { struct timeval comp_time; long bytes_done = 0; int min_evts = 0; @@ -430,9 +449,17 @@ static void do_io(struct thread_data *td) if (runtime_exceeded(td, &s)) { put_io_u(td, io_u); + td->terminate = 1; break; } + /* + * Add verification end_io handler, if asked to verify + * a previously written file. + */ + if (td->o.verify != VERIFY_NONE) + io_u->end_io = verify_io_u; + ret = td_io_queue(td, io_u); switch (ret) { case FIO_Q_COMPLETED: @@ -440,6 +467,7 @@ static void do_io(struct thread_data *td) ret = -io_u->error; else if (io_u->resid) { int bytes = io_u->xfer_buflen - io_u->resid; + struct fio_file *f = io_u->file; /* * zero read, fail @@ -452,8 +480,17 @@ static void do_io(struct thread_data *td) io_u->xfer_buflen = io_u->resid; io_u->xfer_buf += bytes; + io_u->offset += bytes; + f->last_completed_pos = io_u->offset; + + td->ts.short_io_u[io_u->ddir]++; + + if (io_u->offset == f->real_file_size) + goto sync_done; + requeue_io_u(td, &io_u); } else { +sync_done: fio_gettime(&comp_time, NULL); bytes_done = io_u_sync_complete(td, io_u); if (bytes_done < 0) @@ -492,8 +529,8 @@ static void do_io(struct thread_data *td) if (queue_full(td) || ret == FIO_Q_BUSY) { min_evts = 1; - if (td->cur_depth > td->iodepth_low) - min_evts = td->cur_depth - td->iodepth_low; + if (td->cur_depth > td->o.iodepth_low) + min_evts = td->cur_depth - td->o.iodepth_low; } fio_gettime(&comp_time, NULL); @@ -521,23 +558,27 @@ static void do_io(struct thread_data *td) break; } - if (td->thinktime) { + if (td->o.thinktime) { unsigned long long b; b = td->io_blocks[0] + td->io_blocks[1]; - if (!(b % td->thinktime_blocks)) { + if (!(b % td->o.thinktime_blocks)) { int left; - if (td->thinktime_spin) - __usec_sleep(td->thinktime_spin); + if (td->o.thinktime_spin) + __usec_sleep(td->o.thinktime_spin); - left = td->thinktime - td->thinktime_spin; + left = td->o.thinktime - td->o.thinktime_spin; if (left) usec_sleep(td, left); } } } + if (td->o.fill_device && td->error == ENOSPC) { + td->error = 0; + td->terminate = 1; + } if (!td->error) { struct fio_file *f; @@ -545,7 +586,7 @@ static void do_io(struct thread_data *td) if (i) ret = io_u_queued_complete(td, i); - if (should_fsync(td) && td->end_fsync) { + if (should_fsync(td) && td->o.end_fsync) { td_set_runstate(td, TD_FSYNCING); for_each_file(td, f, i) { @@ -556,6 +597,12 @@ static void do_io(struct thread_data *td) } } else cleanup_pending_aio(td); + + /* + * stop job if we failed doing any IO + */ + if ((td->this_io_bytes[0] + td->this_io_bytes[1]) == 0) + td->done = 1; } static void cleanup_io_u(struct thread_data *td) @@ -576,14 +623,17 @@ static void cleanup_io_u(struct thread_data *td) /* * "randomly" fill the buffer contents */ -static void fill_rand_buf(struct io_u *io_u, int max_bs) +static void fill_io_buf(struct thread_data *td, struct io_u *io_u, int max_bs) { - int *ptr = io_u->buf; + long *ptr = io_u->buf; - while ((void *) ptr - io_u->buf < max_bs) { - *ptr = rand() * 0x9e370001; - ptr++; - } + if (!td->o.zero_buffers) { + while ((void *) ptr - io_u->buf < max_bs) { + *ptr = rand() * GOLDEN_RATIO_PRIME; + ptr++; + } + } else + memset(ptr, 0, max_bs); } static int init_io_u(struct thread_data *td) @@ -593,31 +643,39 @@ static int init_io_u(struct thread_data *td) int i, max_units; char *p; - if (td->io_ops->flags & FIO_SYNCIO) - max_units = 1; - else - max_units = td->iodepth; + max_units = td->o.iodepth; + max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); + td->orig_buffer_size = (unsigned long long) max_bs * (unsigned long long) max_units; - max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]); - td->orig_buffer_size = max_bs * max_units; + if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) + td->orig_buffer_size = (td->orig_buffer_size + td->o.hugepage_size - 1) & ~(td->o.hugepage_size - 1); - if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE) - td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1); - else - td->orig_buffer_size += page_mask; + if (td->orig_buffer_size != (size_t) td->orig_buffer_size) { + log_err("fio: IO memory too large. Reduce max_bs or iodepth\n"); + return 1; + } if (allocate_io_mem(td)) return 1; - p = ALIGN(td->orig_buffer); + if (td->o.odirect) + p = ALIGN(td->orig_buffer); + else + p = td->orig_buffer; + for (i = 0; i < max_units; i++) { + if (td->terminate) + return 1; io_u = malloc(sizeof(*io_u)); memset(io_u, 0, sizeof(*io_u)); INIT_LIST_HEAD(&io_u->list); - io_u->buf = p + max_bs * i; - if (td_write(td) || td_rw(td)) - fill_rand_buf(io_u, max_bs); + if (!(td->io_ops->flags & FIO_NOIO)) { + io_u->buf = p + max_bs * i; + + if (td_write(td)) + fill_io_buf(td, io_u, max_bs); + } io_u->index = i; io_u->flags = IO_U_F_FREE; @@ -642,14 +700,18 @@ static int switch_ioscheduler(struct thread_data *td) f = fopen(tmp, "r+"); if (!f) { - td_verror(td, errno, "fopen"); + if (errno == ENOENT) { + log_err("fio: os or kernel doesn't support IO scheduler switching\n"); + return 0; + } + td_verror(td, errno, "fopen iosched"); return 1; } /* * Set io scheduler. */ - ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f); + ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); if (ferror(f) || ret != 1) { td_verror(td, errno, "fwrite"); fclose(f); @@ -668,9 +730,9 @@ static int switch_ioscheduler(struct thread_data *td) return 1; } - sprintf(tmp2, "[%s]", td->ioscheduler); + sprintf(tmp2, "[%s]", td->o.ioscheduler); if (!strstr(tmp, tmp2)) { - log_err("fio: io scheduler %s not found\n", td->ioscheduler); + log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); td_verror(td, EINVAL, "iosched_switch"); fclose(f); return 1; @@ -680,6 +742,26 @@ static int switch_ioscheduler(struct thread_data *td) return 0; } +static int keep_running(struct thread_data *td) +{ + unsigned long long io_done; + + if (td->done) + return 0; + if (td->o.time_based) + return 1; + if (td->o.loops) { + td->o.loops--; + return 1; + } + + io_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE] + td->io_skip_bytes; + if (io_done < td->o.size) + return 1; + + return 0; +} + static int clear_io_state(struct thread_data *td) { struct fio_file *f; @@ -691,14 +773,22 @@ static int clear_io_state(struct thread_data *td) td->zone_bytes = 0; td->rate_bytes = 0; td->rate_blocks = 0; + td->rw_end_set[0] = td->rw_end_set[1] = 0; td->last_was_sync = 0; + /* + * reset file done count if we are to start over + */ + if (td->o.time_based || td->o.loops) + td->nr_done_files = 0; + for_each_file(td, f, i) td_io_close_file(td, f); ret = 0; for_each_file(td, f, i) { + f->flags &= ~FIO_FILE_DONE; ret = td_io_open_file(td, f); if (ret) break; @@ -713,58 +803,65 @@ static int clear_io_state(struct thread_data *td) */ static void *thread_main(void *data) { - unsigned long long runtime[2]; + unsigned long long runtime[2], elapsed; struct thread_data *td = data; int clear_state; - if (!td->use_thread) + if (!td->o.use_thread) setsid(); td->pid = getpid(); + dprint(FD_PROCESS, "jobs pid=%d started\n", td->pid); + INIT_LIST_HEAD(&td->io_u_freelist); INIT_LIST_HEAD(&td->io_u_busylist); INIT_LIST_HEAD(&td->io_u_requeues); - INIT_LIST_HEAD(&td->io_hist_list); INIT_LIST_HEAD(&td->io_log_list); + INIT_LIST_HEAD(&td->io_hist_list); + td->io_hist_tree = RB_ROOT; + + td_set_runstate(td, TD_INITIALIZED); + fio_sem_up(startup_sem); + fio_sem_down(td->mutex); + + /* + * the ->mutex semaphore is now no longer used, close it to avoid + * eating a file descriptor + */ + fio_sem_remove(td->mutex); + + /* + * May alter parameters that init_io_u() will use, so we need to + * do this first. + */ + if (init_iolog(td)) + goto err; if (init_io_u(td)) - goto err_sem; + goto err; - if (fio_setaffinity(td) == -1) { + if (td->o.cpumask_set && fio_setaffinity(td) == -1) { td_verror(td, errno, "cpu_set_affinity"); - goto err_sem; + goto err; } - if (init_iolog(td)) - goto err_sem; - - if (td->ioprio) { + if (td->ioprio_set) { if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) { td_verror(td, errno, "ioprio_set"); - goto err_sem; + goto err; } } - if (nice(td->nice) == -1) { + if (nice(td->o.nice) == -1) { td_verror(td, errno, "nice"); - goto err_sem; + goto err; } - if (td->ioscheduler && switch_ioscheduler(td)) - goto err_sem; - - td_set_runstate(td, TD_INITIALIZED); - fio_sem_up(startup_sem); - fio_sem_down(td->mutex); - - /* - * the ->mutex semaphore is now no longer used, close it to avoid - * eating a file descriptor - */ - fio_sem_remove(td->mutex); + if (td->o.ioscheduler && switch_ioscheduler(td)) + goto err; - if (!td->create_serialize && setup_files(td)) + if (!td->o.create_serialize && setup_files(td)) goto err; if (td_io_init(td)) @@ -773,8 +870,11 @@ static void *thread_main(void *data) if (open_files(td)) goto err; - if (td->exec_prerun) { - if (system(td->exec_prerun) < 0) + if (init_random_map(td)) + goto err; + + if (td->o.exec_prerun) { + if (system(td->o.exec_prerun) < 0) goto err; } @@ -784,11 +884,11 @@ static void *thread_main(void *data) runtime[0] = runtime[1] = 0; clear_state = 0; - while (td->loops--) { + while (keep_running(td)) { fio_gettime(&td->start, NULL); memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start)); - if (td->ratemin) + if (td->o.ratemin) memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate)); if (clear_state && clear_io_state(td)) @@ -800,15 +900,29 @@ static void *thread_main(void *data) clear_state = 1; - if (td_read(td) && td->io_bytes[DDIR_READ]) - runtime[DDIR_READ] += utime_since_now(&td->start); - if (td_write(td) && td->io_bytes[DDIR_WRITE]) - runtime[DDIR_WRITE] += utime_since_now(&td->start); + if (td_read(td) && td->io_bytes[DDIR_READ]) { + if (td->rw_end_set[DDIR_READ]) + elapsed = utime_since(&td->start, &td->rw_end[DDIR_READ]); + else + elapsed = utime_since_now(&td->start); + + runtime[DDIR_READ] += elapsed; + } + if (td_write(td) && td->io_bytes[DDIR_WRITE]) { + if (td->rw_end_set[DDIR_WRITE]) + elapsed = utime_since(&td->start, &td->rw_end[DDIR_WRITE]); + else + elapsed = utime_since_now(&td->start); + + runtime[DDIR_WRITE] += elapsed; + } if (td->error || td->terminate) break; - if (td->verify == VERIFY_NONE) + if (!td->o.do_verify || + td->o.verify == VERIFY_NONE || + (td->io_ops->flags & FIO_UNIDIR)) continue; if (clear_io_state(td)) @@ -825,8 +939,8 @@ static void *thread_main(void *data) } update_rusage_stat(td); - td->ts.runtime[0] = runtime[0] / 1000; - td->ts.runtime[1] = runtime[1] / 1000; + td->ts.runtime[0] = (runtime[0] + 999) / 1000; + td->ts.runtime[1] = (runtime[1] + 999) / 1000; td->ts.total_run_time = mtime_since_now(&td->epoch); td->ts.io_bytes[0] = td->io_bytes[0]; td->ts.io_bytes[1] = td->io_bytes[1]; @@ -837,11 +951,9 @@ static void *thread_main(void *data) finish_log(td, td->ts.slat_log, "slat"); if (td->ts.clat_log) finish_log(td, td->ts.clat_log, "clat"); - if (td->write_iolog_file) - write_iolog_close(td); - if (td->exec_postrun) { - if (system(td->exec_postrun) < 0) - log_err("fio: postrun %s failed\n", td->exec_postrun); + if (td->o.exec_postrun) { + if (system(td->o.exec_postrun) < 0) + log_err("fio: postrun %s failed\n", td->o.exec_postrun); } if (exitall_on_terminate) @@ -853,11 +965,16 @@ err: close_files(td); close_ioengine(td); cleanup_io_u(td); + + /* + * do this very late, it will log file closing as well + */ + if (td->o.write_iolog_file) + write_iolog_close(td); + + options_mem_free(td); td_set_runstate(td, TD_EXITED); return (void *) (unsigned long) td->error; -err_sem: - fio_sem_up(startup_sem); - goto err; } /* @@ -889,12 +1006,12 @@ static int fork_main(int shmid, int offset) static void reap_threads(int *nr_running, int *t_rate, int *m_rate) { struct thread_data *td; - int i, cputhreads, pending, status, ret; + int i, cputhreads, realthreads, pending, status, ret; /* * reap exited threads (TD_EXITED -> TD_REAPED) */ - pending = cputhreads = 0; + realthreads = pending = cputhreads = 0; for_each_td(td, i) { int flags = 0; @@ -904,10 +1021,12 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) */ if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) cputhreads++; + else + realthreads++; if (!td->pid || td->runstate == TD_REAPED) continue; - if (td->use_thread) { + if (td->o.use_thread) { if (td->runstate == TD_EXITED) { td_set_runstate(td, TD_REAPED); goto reaped; @@ -951,24 +1070,29 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) /* * thread is not dead, continue */ + pending++; continue; reaped: - if (td->use_thread) { + if (td->o.use_thread) { long ret; - if (pthread_join(td->thread, (void *) &ret)) + dprint(FD_PROCESS, "joining tread %d\n", td->pid); + if (pthread_join(td->thread, (void *) &ret)) { + dprint(FD_PROCESS, "join failed %ld\n", ret); perror("pthread_join"); + } } (*nr_running)--; - (*m_rate) -= td->ratemin; - (*t_rate) -= td->rate; + (*m_rate) -= td->o.ratemin; + (*t_rate) -= td->o.rate; + pending--; if (td->error) exit_value++; } - if (*nr_running == cputhreads && !pending) + if (*nr_running == cputhreads && !pending && realthreads) terminate_threads(TERMINATE_ALL); } @@ -1008,7 +1132,7 @@ static void run_threads(void) for_each_td(td, i) { print_status_init(td->thread_number - 1); - if (!td->create_serialize) { + if (!td->o.create_serialize) { init_disk_util(td); continue; } @@ -1052,14 +1176,14 @@ static void run_threads(void) continue; } - if (td->start_delay) { + if (td->o.start_delay) { spent = mtime_since_genesis(); - if (td->start_delay * 1000 > spent) + if (td->o.start_delay * 1000 > spent) continue; } - if (td->stonewall && (nr_started || nr_running)) + if (td->o.stonewall && (nr_started || nr_running)) break; /* @@ -1070,13 +1194,15 @@ static void run_threads(void) map[this_jobs++] = td; nr_started++; - if (td->use_thread) { + if (td->o.use_thread) { + dprint(FD_PROCESS, "will pthread_create\n"); if (pthread_create(&td->thread, NULL, thread_main, td)) { perror("thread_create"); nr_started--; break; } } else { + dprint(FD_PROCESS, "will fork\n"); if (!fork()) { int ret = fork_main(shm_id, i); @@ -1135,8 +1261,8 @@ static void run_threads(void) td_set_runstate(td, TD_RUNNING); nr_running++; nr_started--; - m_rate += td->ratemin; - t_rate += td->rate; + m_rate += td->o.ratemin; + t_rate += td->o.rate; todo--; fio_sem_up(td->mutex); } @@ -1170,10 +1296,8 @@ int main(int argc, char *argv[]) if (parse_options(argc, argv)) return 1; - if (!thread_number) { - log_err("Nothing to do\n"); - return 1; - } + if (!thread_number) + return 0; ps = sysconf(_SC_PAGESIZE); if (ps < 0) {