X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=fcf3ae80c1fd5316c0ce121434eef2426b4471fe;hp=ab84b60a59ca77ace2e9984b4353b530a569ed8c;hb=492158cf6a2e81886c43a6e696b17d7160ec5540;hpb=b2560f3ca4677e2908fc4111f4d04fae5df3b229 diff --git a/fio.c b/fio.c index ab84b60a..fcf3ae80 100644 --- a/fio.c +++ b/fio.c @@ -35,18 +35,21 @@ #include #include "fio.h" -#include "os.h" +#include "hash.h" -static unsigned long page_mask; +unsigned long page_mask; +unsigned long page_size; #define ALIGN(buf) \ (char *) (((unsigned long) (buf) + page_mask) & ~page_mask) int groupid = 0; int thread_number = 0; +int nr_process = 0; +int nr_thread = 0; int shm_id = 0; int temp_stall_ts; -static volatile int startup_sem; +static struct fio_sem *startup_sem; static volatile int fio_abort; static int exit_value; @@ -73,7 +76,7 @@ static void terminate_threads(int group_id) if (td->runstate < TD_RUNNING) kill(td->pid, SIGQUIT); td->terminate = 1; - td->start_delay = 0; + td->o.start_delay = 0; } } } @@ -100,13 +103,14 @@ static void sig_handler(int sig) static int check_min_rate(struct thread_data *td, struct timeval *now) { unsigned long long bytes = 0; + unsigned long iops = 0; unsigned long spent; unsigned long rate; /* * No minimum rate set, always ok */ - if (!td->ratemin) + if (!td->o.ratemin && !td->o.rate_iops_min) return 0; /* @@ -115,41 +119,64 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) if (mtime_since(&td->start, now) < 2000) return 0; - if (td_read(td)) + if (td_read(td)) { + iops += td->io_blocks[DDIR_READ]; bytes += td->this_io_bytes[DDIR_READ]; - if (td_write(td)) + } + if (td_write(td)) { + iops += td->io_blocks[DDIR_WRITE]; bytes += td->this_io_bytes[DDIR_WRITE]; + } /* * if rate blocks is set, sample is running */ - if (td->rate_bytes) { + if (td->rate_bytes || td->rate_blocks) { spent = mtime_since(&td->lastrate, now); - if (spent < td->ratecycle) + if (spent < td->o.ratecycle) return 0; - if (bytes < td->rate_bytes) { - fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin); - return 1; + if (td->o.rate) { + /* + * check bandwidth specified rate + */ + if (bytes < td->rate_bytes) { + log_err("%s: min rate %u not met\n", td->o.name, td->o.ratemin); + return 1; + } else { + rate = (bytes - td->rate_bytes) / spent; + if (rate < td->o.ratemin || bytes < td->rate_bytes) { + log_err("%s: min rate %u not met, got %luKiB/sec\n", td->o.name, td->o.ratemin, rate); + return 1; + } + } } else { - rate = (bytes - td->rate_bytes) / spent; - if (rate < td->ratemin || bytes < td->rate_bytes) { - fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); + /* + * checks iops specified rate + */ + if (iops < td->o.rate_iops) { + log_err("%s: min iops rate %u not met\n", td->o.name, td->o.rate_iops); return 1; + } else { + rate = (iops - td->rate_blocks) / spent; + if (rate < td->o.rate_iops_min || iops < td->rate_blocks) { + log_err("%s: min iops rate %u not met, got %lu\n", td->o.name, td->o.rate_iops_min, rate); + } } } } td->rate_bytes = bytes; + td->rate_blocks = iops; memcpy(&td->lastrate, now, sizeof(*now)); return 0; } static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) { - if (!td->timeout) + if (!td->o.timeout) return 0; - if (mtime_since(&td->epoch, t) >= td->timeout * 1000) + if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000) return 1; return 0; @@ -253,13 +280,16 @@ static void do_verify(struct thread_data *td) { struct fio_file *f; struct io_u *io_u; - int ret, i, min_events; + int ret, min_events; + unsigned int i; /* * sync io first and invalidate cache, to make sure we really * read from disk. */ for_each_file(td, f, i) { + if (!(f->flags & FIO_FILE_OPEN)) + continue; if (fio_io_sync(td, f)) break; if (file_invalidate_cache(td, f)) @@ -281,6 +311,7 @@ static void do_verify(struct thread_data *td) if (runtime_exceeded(td, &io_u->start_time)) { put_io_u(td, io_u); + td->terminate = 1; break; } @@ -301,13 +332,32 @@ static void do_verify(struct thread_data *td) case FIO_Q_COMPLETED: if (io_u->error) ret = -io_u->error; - else if (io_u->xfer_buflen != io_u->resid && io_u->resid) { + else if (io_u->resid) { int bytes = io_u->xfer_buflen - io_u->resid; + struct fio_file *f = io_u->file; + + /* + * zero read, fail + */ + if (!bytes) { + td_verror(td, ENODATA, "full resid"); + put_io_u(td, io_u); + break; + } io_u->xfer_buflen = io_u->resid; io_u->xfer_buf += bytes; + io_u->offset += bytes; + f->last_completed_pos = io_u->offset; + + td->ts.short_io_u[io_u->ddir]++; + + if (io_u->offset == f->real_file_size) + goto sync_done; + requeue_io_u(td, &io_u); } else { +sync_done: ret = io_u_sync_complete(td, io_u); if (ret < 0) break; @@ -338,8 +388,8 @@ static void do_verify(struct thread_data *td) if (queue_full(td) || ret == FIO_Q_BUSY) { min_events = 1; - if (td->cur_depth > td->iodepth_low) - min_events = td->cur_depth - td->iodepth_low; + if (td->cur_depth > td->o.iodepth_low) + min_events = td->cur_depth - td->o.iodepth_low; } /* @@ -361,31 +411,6 @@ static void do_verify(struct thread_data *td) td_set_runstate(td, TD_RUNNING); } -/* - * Not really an io thread, all it does is burn CPU cycles in the specified - * manner. - */ -static void do_cpuio(struct thread_data *td) -{ - struct timeval e; - int split = 100 / td->cpuload; - int i = 0; - - while (!td->terminate) { - fio_gettime(&e, NULL); - - if (runtime_exceeded(td, &e)) - break; - - if (!(i % split)) - __usec_sleep(10000); - else - usec_sleep(td, 10000); - - i++; - } -} - /* * Main IO worker function. It retrieves io_u's to process and queues * and reaps them, checking for rate and errors along the way. @@ -394,11 +419,12 @@ static void do_io(struct thread_data *td) { struct timeval s; unsigned long usec; - int i, ret = 0; + unsigned int i; + int ret = 0; td_set_runstate(td, TD_RUNNING); - while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) { + while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) { struct timeval comp_time; long bytes_done = 0; int min_evts = 0; @@ -416,6 +442,7 @@ static void do_io(struct thread_data *td) if (runtime_exceeded(td, &s)) { put_io_u(td, io_u); + td->terminate = 1; break; } @@ -424,13 +451,32 @@ static void do_io(struct thread_data *td) case FIO_Q_COMPLETED: if (io_u->error) ret = -io_u->error; - else if (io_u->xfer_buflen != io_u->resid && io_u->resid) { + else if (io_u->resid) { int bytes = io_u->xfer_buflen - io_u->resid; + struct fio_file *f = io_u->file; + + /* + * zero read, fail + */ + if (!bytes) { + td_verror(td, ENODATA, "full resid"); + put_io_u(td, io_u); + break; + } io_u->xfer_buflen = io_u->resid; io_u->xfer_buf += bytes; + io_u->offset += bytes; + f->last_completed_pos = io_u->offset; + + td->ts.short_io_u[io_u->ddir]++; + + if (io_u->offset == f->real_file_size) + goto sync_done; + requeue_io_u(td, &io_u); } else { +sync_done: fio_gettime(&comp_time, NULL); bytes_done = io_u_sync_complete(td, io_u); if (bytes_done < 0) @@ -469,8 +515,8 @@ static void do_io(struct thread_data *td) if (queue_full(td) || ret == FIO_Q_BUSY) { min_evts = 1; - if (td->cur_depth > td->iodepth_low) - min_evts = td->cur_depth - td->iodepth_low; + if (td->cur_depth > td->o.iodepth_low) + min_evts = td->cur_depth - td->o.iodepth_low; } fio_gettime(&comp_time, NULL); @@ -498,17 +544,17 @@ static void do_io(struct thread_data *td) break; } - if (td->thinktime) { + if (td->o.thinktime) { unsigned long long b; b = td->io_blocks[0] + td->io_blocks[1]; - if (!(b % td->thinktime_blocks)) { + if (!(b % td->o.thinktime_blocks)) { int left; - if (td->thinktime_spin) - __usec_sleep(td->thinktime_spin); + if (td->o.thinktime_spin) + __usec_sleep(td->o.thinktime_spin); - left = td->thinktime - td->thinktime_spin; + left = td->o.thinktime - td->o.thinktime_spin; if (left) usec_sleep(td, left); } @@ -522,10 +568,14 @@ static void do_io(struct thread_data *td) if (i) ret = io_u_queued_complete(td, i); - if (should_fsync(td) && td->end_fsync) { + if (should_fsync(td) && td->o.end_fsync) { td_set_runstate(td, TD_FSYNCING); - for_each_file(td, f, i) + + for_each_file(td, f, i) { + if (!(f->flags & FIO_FILE_OPEN)) + continue; fio_io_sync(td, f); + } } } else cleanup_pending_aio(td); @@ -549,51 +599,62 @@ static void cleanup_io_u(struct thread_data *td) /* * "randomly" fill the buffer contents */ -static void fill_rand_buf(struct io_u *io_u, int max_bs) +static void fill_io_buf(struct thread_data *td, struct io_u *io_u, int max_bs) { - int *ptr = io_u->buf; + long *ptr = io_u->buf; - while ((void *) ptr - io_u->buf < max_bs) { - *ptr = rand() * 0x9e370001; - ptr++; - } + if (!td->o.zero_buffers) { + while ((void *) ptr - io_u->buf < max_bs) { + *ptr = rand() * GOLDEN_RATIO_PRIME; + ptr++; + } + } else + memset(ptr, 0, max_bs); } static int init_io_u(struct thread_data *td) { + unsigned long long buf_size; struct io_u *io_u; unsigned int max_bs; int i, max_units; char *p; - if (td->io_ops->flags & FIO_CPUIO) - return 0; - if (td->io_ops->flags & FIO_SYNCIO) max_units = 1; else - max_units = td->iodepth; + max_units = td->o.iodepth; - max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]); - td->orig_buffer_size = max_bs * max_units; + max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); + buf_size = (unsigned long long) max_bs * (unsigned long long) max_units; + buf_size += page_mask; + if (buf_size != (size_t) buf_size) { + log_err("fio: IO memory too large. Reduce max_bs or iodepth\n"); + return 1; + } - if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE) - td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1); - else - td->orig_buffer_size += page_mask; + td->orig_buffer_size = buf_size; + + if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) + td->orig_buffer_size = (td->orig_buffer_size + td->o.hugepage_size - 1) & ~(td->o.hugepage_size - 1); + else if (td->orig_buffer_size & page_mask) + td->orig_buffer_size = (td->orig_buffer_size + page_mask) & ~page_mask; if (allocate_io_mem(td)) return 1; p = ALIGN(td->orig_buffer); for (i = 0; i < max_units; i++) { + if (td->terminate) + return 1; io_u = malloc(sizeof(*io_u)); memset(io_u, 0, sizeof(*io_u)); INIT_LIST_HEAD(&io_u->list); io_u->buf = p + max_bs * i; - if (td_write(td) || td_rw(td)) - fill_rand_buf(io_u, max_bs); + + if (td_write(td)) + fill_io_buf(td, io_u, max_bs); io_u->index = i; io_u->flags = IO_U_F_FREE; @@ -611,21 +672,25 @@ static int switch_ioscheduler(struct thread_data *td) FILE *f; int ret; - if (td->io_ops->flags & FIO_CPUIO) + if (td->io_ops->flags & FIO_DISKLESSIO) return 0; sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); f = fopen(tmp, "r+"); if (!f) { - td_verror(td, errno, "fopen"); + if (errno == ENOENT) { + log_err("fio: os or kernel doesn't support IO scheduler switching\n"); + return 0; + } + td_verror(td, errno, "fopen iosched"); return 1; } /* * Set io scheduler. */ - ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f); + ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); if (ferror(f) || ret != 1) { td_verror(td, errno, "fwrite"); fclose(f); @@ -644,9 +709,9 @@ static int switch_ioscheduler(struct thread_data *td) return 1; } - sprintf(tmp2, "[%s]", td->ioscheduler); + sprintf(tmp2, "[%s]", td->o.ioscheduler); if (!strstr(tmp, tmp2)) { - log_err("fio: io scheduler %s not found\n", td->ioscheduler); + log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); td_verror(td, EINVAL, "iosched_switch"); fclose(f); return 1; @@ -656,27 +721,53 @@ static int switch_ioscheduler(struct thread_data *td) return 0; } -static void clear_io_state(struct thread_data *td) +static int keep_running(struct thread_data *td) +{ + unsigned long long io_done; + + if (td->o.time_based) + return 1; + if (td->o.loops) { + td->o.loops--; + return 1; + } + + io_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE]; + if (io_done < td->o.size) + return 1; + + return 0; +} + +static int clear_io_state(struct thread_data *td) { struct fio_file *f; - int i; + unsigned int i; + int ret; - td->ts->stat_io_bytes[0] = td->ts->stat_io_bytes[1] = 0; + td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0; td->this_io_bytes[0] = td->this_io_bytes[1] = 0; td->zone_bytes = 0; + td->rate_bytes = 0; + td->rate_blocks = 0; + td->rw_end_set[0] = td->rw_end_set[1] = 0; td->last_was_sync = 0; - for_each_file(td, f, i) { - f->last_completed_pos = 0; + td->nr_done_files = 0; - f->last_pos = 0; - if (td->io_ops->flags & FIO_SYNCIO) - lseek(f->fd, SEEK_SET, 0); + for_each_file(td, f, i) + td_io_close_file(td, f); - if (f->file_map) - memset(f->file_map, 0, f->num_maps * sizeof(long)); + ret = 0; + for_each_file(td, f, i) { + f->flags &= ~FIO_FILE_DONE; + ret = td_io_open_file(td, f); + if (ret) + break; } + + return ret; } /* @@ -687,8 +778,10 @@ static void *thread_main(void *data) { unsigned long long runtime[2]; struct thread_data *td = data; + unsigned long elapsed; + int clear_state; - if (!td->use_thread) + if (!td->o.use_thread) setsid(); td->pid = getpid(); @@ -696,20 +789,35 @@ static void *thread_main(void *data) INIT_LIST_HEAD(&td->io_u_freelist); INIT_LIST_HEAD(&td->io_u_busylist); INIT_LIST_HEAD(&td->io_u_requeues); - INIT_LIST_HEAD(&td->io_hist_list); INIT_LIST_HEAD(&td->io_log_list); + INIT_LIST_HEAD(&td->io_hist_list); + td->io_hist_tree = RB_ROOT; + + td_set_runstate(td, TD_INITIALIZED); + fio_sem_up(startup_sem); + fio_sem_down(td->mutex); + + /* + * the ->mutex semaphore is now no longer used, close it to avoid + * eating a file descriptor + */ + fio_sem_remove(td->mutex); + + /* + * May alter parameters that init_io_u() will use, so we need to + * do this first. + */ + if (init_iolog(td)) + goto err; if (init_io_u(td)) goto err; - if (fio_setaffinity(td) == -1) { + if (td->o.cpumask_set && fio_setaffinity(td) == -1) { td_verror(td, errno, "cpu_set_affinity"); goto err; } - if (init_iolog(td)) - goto err; - if (td->ioprio) { if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) { td_verror(td, errno, "ioprio_set"); @@ -717,70 +825,79 @@ static void *thread_main(void *data) } } - if (nice(td->nice) == -1) { + if (nice(td->o.nice) == -1) { td_verror(td, errno, "nice"); goto err; } - if (init_random_state(td)) + if (td->o.ioscheduler && switch_ioscheduler(td)) goto err; - if (td->ioscheduler && switch_ioscheduler(td)) + if (!td->o.create_serialize && setup_files(td)) goto err; - td_set_runstate(td, TD_INITIALIZED); - fio_sem_up(&startup_sem); - fio_sem_down(&td->mutex); - - if (!td->create_serialize && setup_files(td)) + if (td_io_init(td)) goto err; + if (open_files(td)) goto err; - /* - * Do this late, as some IO engines would like to have the - * files setup prior to initializing structures. - */ - if (td_io_init(td)) + if (init_random_map(td)) goto err; - if (td->exec_prerun) { - if (system(td->exec_prerun) < 0) + if (td->o.exec_prerun) { + if (system(td->o.exec_prerun) < 0) goto err; } fio_gettime(&td->epoch, NULL); memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch)); - getrusage(RUSAGE_SELF, &td->ts->ru_start); + getrusage(RUSAGE_SELF, &td->ts.ru_start); runtime[0] = runtime[1] = 0; - while (td->loops--) { + clear_state = 0; + while (keep_running(td)) { fio_gettime(&td->start, NULL); - memcpy(&td->ts->stat_sample_time, &td->start, sizeof(td->start)); + memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start)); + + if (td->o.ratemin) + memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate)); - if (td->ratemin) - memcpy(&td->lastrate, &td->ts->stat_sample_time, sizeof(td->lastrate)); + if (clear_state && clear_io_state(td)) + break; - clear_io_state(td); prune_io_piece_log(td); - if (td->io_ops->flags & FIO_CPUIO) - do_cpuio(td); - else - do_io(td); + do_io(td); + + clear_state = 1; - if (td_read(td) && td->io_bytes[DDIR_READ]) - runtime[DDIR_READ] += utime_since_now(&td->start); - if (td_write(td) && td->io_bytes[DDIR_WRITE]) - runtime[DDIR_WRITE] += utime_since_now(&td->start); + if (td_read(td) && td->io_bytes[DDIR_READ]) { + if (td->rw_end_set[DDIR_READ]) + elapsed = utime_since(&td->start, &td->rw_end[DDIR_READ]); + else + elapsed = utime_since_now(&td->start); + + runtime[DDIR_READ] += elapsed; + } + if (td_write(td) && td->io_bytes[DDIR_WRITE]) { + if (td->rw_end_set[DDIR_WRITE]) + elapsed = utime_since(&td->start, &td->rw_end[DDIR_WRITE]); + else + elapsed = utime_since_now(&td->start); + + runtime[DDIR_WRITE] += elapsed; + } if (td->error || td->terminate) break; - if (td->verify == VERIFY_NONE) + if (td->o.verify == VERIFY_NONE) continue; - clear_io_state(td); + if (clear_io_state(td)) + break; + fio_gettime(&td->start, NULL); do_verify(td); @@ -792,21 +909,23 @@ static void *thread_main(void *data) } update_rusage_stat(td); - fio_gettime(&td->end_time, NULL); - td->runtime[0] = runtime[0] / 1000; - td->runtime[1] = runtime[1] / 1000; - - if (td->ts->bw_log) - finish_log(td, td->ts->bw_log, "bw"); - if (td->ts->slat_log) - finish_log(td, td->ts->slat_log, "slat"); - if (td->ts->clat_log) - finish_log(td, td->ts->clat_log, "clat"); - if (td->write_iolog_file) + td->ts.runtime[0] = runtime[0] / 1000; + td->ts.runtime[1] = runtime[1] / 1000; + td->ts.total_run_time = mtime_since_now(&td->epoch); + td->ts.io_bytes[0] = td->io_bytes[0]; + td->ts.io_bytes[1] = td->io_bytes[1]; + + if (td->ts.bw_log) + finish_log(td, td->ts.bw_log, "bw"); + if (td->ts.slat_log) + finish_log(td, td->ts.slat_log, "slat"); + if (td->ts.clat_log) + finish_log(td, td->ts.clat_log, "clat"); + if (td->o.write_iolog_file) write_iolog_close(td); - if (td->exec_postrun) { - if (system(td->exec_postrun) < 0) - log_err("fio: postrun %s failed\n", td->exec_postrun); + if (td->o.exec_postrun) { + if (system(td->o.exec_postrun) < 0) + log_err("fio: postrun %s failed\n", td->o.exec_postrun); } if (exitall_on_terminate) @@ -818,6 +937,7 @@ err: close_files(td); close_ioengine(td); cleanup_io_u(td); + options_mem_free(td); td_set_runstate(td, TD_EXITED); return (void *) (unsigned long) td->error; } @@ -864,12 +984,12 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) * ->io_ops is NULL for a thread that has closed its * io engine */ - if (td->io_ops && td->io_ops->flags & FIO_CPUIO) + if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) cputhreads++; if (!td->pid || td->runstate == TD_REAPED) continue; - if (td->use_thread) { + if (td->o.use_thread) { if (td->runstate == TD_EXITED) { td_set_runstate(td, TD_REAPED); goto reaped; @@ -913,9 +1033,10 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) /* * thread is not dead, continue */ + pending++; continue; reaped: - if (td->use_thread) { + if (td->o.use_thread) { long ret; if (pthread_join(td->thread, (void *) &ret)) @@ -923,8 +1044,9 @@ reaped: } (*nr_running)--; - (*m_rate) -= td->ratemin; - (*t_rate) -= td->rate; + (*m_rate) -= td->o.ratemin; + (*t_rate) -= td->o.rate; + pending--; if (td->error) exit_value++; @@ -947,7 +1069,15 @@ static void run_threads(void) return; if (!terse_output) { - printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : ""); + printf("Starting "); + if (nr_thread) + printf("%d thread%s", nr_thread, nr_thread > 1 ? "s" : ""); + if (nr_process) { + if (nr_thread) + printf(" and "); + printf("%d process%s", nr_process, nr_process > 1 ? "es" : ""); + } + printf("\n"); fflush(stdout); } @@ -962,7 +1092,7 @@ static void run_threads(void) for_each_td(td, i) { print_status_init(td->thread_number - 1); - if (!td->create_serialize) { + if (!td->o.create_serialize) { init_disk_util(td); continue; } @@ -1006,14 +1136,14 @@ static void run_threads(void) continue; } - if (td->start_delay) { + if (td->o.start_delay) { spent = mtime_since_genesis(); - if (td->start_delay * 1000 > spent) + if (td->o.start_delay * 1000 > spent) continue; } - if (td->stonewall && (nr_started || nr_running)) + if (td->o.stonewall && (nr_started || nr_running)) break; /* @@ -1022,23 +1152,22 @@ static void run_threads(void) */ td_set_runstate(td, TD_CREATED); map[this_jobs++] = td; - fio_sem_init(&startup_sem, 1); nr_started++; - if (td->use_thread) { + if (td->o.use_thread) { if (pthread_create(&td->thread, NULL, thread_main, td)) { perror("thread_create"); nr_started--; + break; } } else { - if (fork()) - fio_sem_down(&startup_sem); - else { + if (!fork()) { int ret = fork_main(shm_id, i); exit(ret); } } + fio_sem_down(startup_sem); } /* @@ -1090,10 +1219,10 @@ static void run_threads(void) td_set_runstate(td, TD_RUNNING); nr_running++; nr_started--; - m_rate += td->ratemin; - t_rate += td->rate; + m_rate += td->o.ratemin; + t_rate += td->o.rate; todo--; - fio_sem_up(&td->mutex); + fio_sem_up(td->mutex); } reap_threads(&nr_running, &t_rate, &m_rate); @@ -1125,10 +1254,8 @@ int main(int argc, char *argv[]) if (parse_options(argc, argv)) return 1; - if (!thread_number) { - log_err("Nothing to do\n"); - return 1; - } + if (!thread_number) + return 0; ps = sysconf(_SC_PAGESIZE); if (ps < 0) { @@ -1136,6 +1263,7 @@ int main(int argc, char *argv[]) return 1; } + page_size = ps; page_mask = ps - 1; if (write_bw_log) { @@ -1143,6 +1271,8 @@ int main(int argc, char *argv[]) setup_log(&agg_io_log[DDIR_WRITE]); } + startup_sem = fio_sem_init(0); + set_genesis_time(); disk_util_timer_arm(); @@ -1157,5 +1287,6 @@ int main(int argc, char *argv[]) } } + fio_sem_remove(startup_sem); return exit_value; }