X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=9fabbe93bc53a7887cc6457371b55ea202ff3ebc;hp=1c2748e06ee4483b0cdef5457757985140a133a6;hb=38d77caebcb6f9cb33f247a341c162c0185bf604;hpb=e916b390684ec1ca6247f98138fa9c1682701d29 diff --git a/fio.c b/fio.c index 1c2748e0..9fabbe93 100644 --- a/fio.c +++ b/fio.c @@ -37,16 +37,19 @@ #include "fio.h" #include "os.h" -static unsigned long page_mask; +unsigned long page_mask; +unsigned long page_size; #define ALIGN(buf) \ (char *) (((unsigned long) (buf) + page_mask) & ~page_mask) int groupid = 0; int thread_number = 0; +int nr_process = 0; +int nr_thread = 0; int shm_id = 0; int temp_stall_ts; -static volatile int startup_sem; +static struct fio_sem *startup_sem; static volatile int fio_abort; static int exit_value; @@ -60,17 +63,20 @@ static inline void td_set_runstate(struct thread_data *td, int runstate) td->runstate = runstate; } -static void terminate_threads(int group_id, int forced_kill) +static void terminate_threads(int group_id) { struct thread_data *td; int i; for_each_td(td, i) { if (group_id == TERMINATE_ALL || groupid == td->groupid) { + /* + * if the thread is running, just let it exit + */ + if (td->runstate < TD_RUNNING) + kill(td->pid, SIGQUIT); td->terminate = 1; - td->start_delay = 0; - if (forced_kill) - td_set_runstate(td, TD_EXITED); + td->o.start_delay = 0; } } } @@ -86,7 +92,7 @@ static void sig_handler(int sig) default: printf("\nfio: terminating on signal %d\n", sig); fflush(stdout); - terminate_threads(TERMINATE_ALL, 0); + terminate_threads(TERMINATE_ALL); break; } } @@ -96,9 +102,16 @@ static void sig_handler(int sig) */ static int check_min_rate(struct thread_data *td, struct timeval *now) { + unsigned long long bytes = 0; + unsigned long iops = 0; unsigned long spent; unsigned long rate; - int ddir = td->ddir; + + /* + * No minimum rate set, always ok + */ + if (!td->o.ratemin && !td->o.rate_iops_min) + return 0; /* * allow a 2 second settle period in the beginning @@ -106,31 +119,64 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) if (mtime_since(&td->start, now) < 2000) return 0; + if (td_read(td)) { + iops += td->io_blocks[DDIR_READ]; + bytes += td->this_io_bytes[DDIR_READ]; + } + if (td_write(td)) { + iops += td->io_blocks[DDIR_WRITE]; + bytes += td->this_io_bytes[DDIR_WRITE]; + } + /* * if rate blocks is set, sample is running */ - if (td->rate_bytes) { + if (td->rate_bytes || td->rate_blocks) { spent = mtime_since(&td->lastrate, now); - if (spent < td->ratecycle) + if (spent < td->o.ratecycle) return 0; - rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent; - if (rate < td->ratemin) { - fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); - return 1; + if (td->o.rate) { + /* + * check bandwidth specified rate + */ + if (bytes < td->rate_bytes) { + log_err("%s: min rate %u not met\n", td->o.name, td->o.ratemin); + return 1; + } else { + rate = (bytes - td->rate_bytes) / spent; + if (rate < td->o.ratemin || bytes < td->rate_bytes) { + log_err("%s: min rate %u not met, got %luKiB/sec\n", td->o.name, td->o.ratemin, rate); + return 1; + } + } + } else { + /* + * checks iops specified rate + */ + if (iops < td->o.rate_iops) { + log_err("%s: min iops rate %u not met\n", td->o.name, td->o.rate_iops); + return 1; + } else { + rate = (iops - td->rate_blocks) / spent; + if (rate < td->o.rate_iops_min || iops < td->rate_blocks) { + log_err("%s: min iops rate %u not met, got %lu\n", td->o.name, td->o.rate_iops_min, rate); + } + } } } - td->rate_bytes = td->this_io_bytes[ddir]; + td->rate_bytes = bytes; + td->rate_blocks = iops; memcpy(&td->lastrate, now, sizeof(*now)); return 0; } static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) { - if (!td->timeout) + if (!td->o.timeout) return 0; - if (mtime_since(&td->epoch, t) >= td->timeout * 1000) + if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000) return 1; return 0; @@ -149,7 +195,9 @@ static void cleanup_pending_aio(struct thread_data *td) /* * get immediately available events, if any */ - io_u_queued_complete(td, 0, NULL); + r = io_u_queued_complete(td, 0); + if (r < 0) + return; /* * now cancel remaining active events @@ -158,14 +206,24 @@ static void cleanup_pending_aio(struct thread_data *td) list_for_each_safe(entry, n, &td->io_u_busylist) { io_u = list_entry(entry, struct io_u, list); - r = td->io_ops->cancel(td, io_u); - if (!r) + /* + * if the io_u isn't in flight, then that generally + * means someone leaked an io_u. complain but fix + * it up, so we don't stall here. + */ + if ((io_u->flags & IO_U_F_FLIGHT) == 0) { + log_err("fio: non-busy IO on busy list\n"); put_io_u(td, io_u); + } else { + r = td->io_ops->cancel(td, io_u); + if (!r) + put_io_u(td, io_u); + } } } if (td->cur_depth) - io_u_queued_complete(td, td->cur_depth, NULL); + r = io_u_queued_complete(td, td->cur_depth); } /* @@ -191,19 +249,20 @@ static int fio_io_sync(struct thread_data *td, struct fio_file *f) requeue: ret = td_io_queue(td, io_u); if (ret < 0) { - td_verror(td, io_u->error); + td_verror(td, io_u->error, "td_io_queue"); put_io_u(td, io_u); return 1; } else if (ret == FIO_Q_QUEUED) { - if (io_u_queued_complete(td, 1, NULL)) + if (io_u_queued_complete(td, 1) < 0) return 1; } else if (ret == FIO_Q_COMPLETED) { if (io_u->error) { - td_verror(td, io_u->error); + td_verror(td, io_u->error, "td_io_queue"); return 1; } - io_u_sync_complete(td, io_u, NULL); + if (io_u_sync_complete(td, io_u) < 0) + return 1; } else if (ret == FIO_Q_BUSY) { if (td_io_commit(td)) return 1; @@ -214,68 +273,95 @@ requeue: } /* - * The main verify engine. Runs over the writes we previusly submitted, + * The main verify engine. Runs over the writes we previously submitted, * reads the blocks back in, and checks the crc/md5 of the data. */ static void do_verify(struct thread_data *td) { struct fio_file *f; struct io_u *io_u; - int ret, i, min_events; + int ret, min_events; + unsigned int i; /* * sync io first and invalidate cache, to make sure we really * read from disk. */ for_each_file(td, f, i) { - fio_io_sync(td, f); - file_invalidate_cache(td, f); + if (!(f->flags & FIO_FILE_OPEN)) + continue; + if (fio_io_sync(td, f)) + break; + if (file_invalidate_cache(td, f)) + break; } + if (td->error) + return; + td_set_runstate(td, TD_VERIFYING); io_u = NULL; while (!td->terminate) { + int ret2; + io_u = __get_io_u(td); if (!io_u) break; - if (runtime_exceeded(td, &io_u->start_time)) + if (runtime_exceeded(td, &io_u->start_time)) { + put_io_u(td, io_u); break; + } - if (get_next_verify(td, io_u)) + if (get_next_verify(td, io_u)) { + put_io_u(td, io_u); break; + } - if (td_io_prep(td, io_u)) + if (td_io_prep(td, io_u)) { + put_io_u(td, io_u); break; + } -requeue: - ret = td_io_queue(td, io_u); + io_u->end_io = verify_io_u; + ret = td_io_queue(td, io_u); switch (ret) { case FIO_Q_COMPLETED: if (io_u->error) ret = -io_u->error; - if (io_u->xfer_buflen != io_u->resid && io_u->resid) { + else if (io_u->resid) { int bytes = io_u->xfer_buflen - io_u->resid; + /* + * zero read, fail + */ + if (!bytes) { + td_verror(td, ENODATA, "full resid"); + put_io_u(td, io_u); + break; + } io_u->xfer_buflen = io_u->resid; io_u->xfer_buf += bytes; - goto requeue; + requeue_io_u(td, &io_u); + } else { + ret = io_u_sync_complete(td, io_u); + if (ret < 0) + break; } - ret = io_u_sync_complete(td, io_u, verify_io_u); - if (ret) - break; continue; case FIO_Q_QUEUED: break; case FIO_Q_BUSY: requeue_io_u(td, &io_u); - ret = td_io_commit(td); + ret2 = td_io_commit(td); + if (ret2 < 0) + ret = ret2; break; default: assert(ret < 0); - td_verror(td, -ret); + td_verror(td, -ret, "td_io_queue"); break; } @@ -290,52 +376,29 @@ requeue: if (queue_full(td) || ret == FIO_Q_BUSY) { min_events = 1; - if (td->cur_depth > td->iodepth_low) - min_events = td->cur_depth - td->iodepth_low; + if (td->cur_depth > td->o.iodepth_low) + min_events = td->cur_depth - td->o.iodepth_low; } /* * Reap required number of io units, if any, and do the * verification on them through the callback handler */ - if (io_u_queued_complete(td, min_events, verify_io_u)) + if (io_u_queued_complete(td, min_events) < 0) break; } - if (io_u) - put_io_u(td, io_u); + if (!td->error) { + min_events = td->cur_depth; - if (td->cur_depth) + if (min_events) + ret = io_u_queued_complete(td, min_events); + } else cleanup_pending_aio(td); td_set_runstate(td, TD_RUNNING); } -/* - * Not really an io thread, all it does is burn CPU cycles in the specified - * manner. - */ -static void do_cpuio(struct thread_data *td) -{ - struct timeval e; - int split = 100 / td->cpuload; - int i = 0; - - while (!td->terminate) { - fio_gettime(&e, NULL); - - if (runtime_exceeded(td, &e)) - break; - - if (!(i % split)) - __usec_sleep(10000); - else - usec_sleep(td, 10000); - - i++; - } -} - /* * Main IO worker function. It retrieves io_u's to process and queues * and reaps them, checking for rate and errors along the way. @@ -344,7 +407,8 @@ static void do_io(struct thread_data *td) { struct timeval s; unsigned long usec; - int i, ret = 0; + unsigned int i; + int ret = 0; td_set_runstate(td, TD_RUNNING); @@ -353,6 +417,7 @@ static void do_io(struct thread_data *td) long bytes_done = 0; int min_evts = 0; struct io_u *io_u; + int ret2; if (td->terminate) break; @@ -367,26 +432,33 @@ static void do_io(struct thread_data *td) put_io_u(td, io_u); break; } -requeue: - ret = td_io_queue(td, io_u); + ret = td_io_queue(td, io_u); switch (ret) { case FIO_Q_COMPLETED: - if (io_u->error) { - ret = io_u->error; - break; - } - if (io_u->xfer_buflen != io_u->resid && io_u->resid) { + if (io_u->error) + ret = -io_u->error; + else if (io_u->resid) { int bytes = io_u->xfer_buflen - io_u->resid; + /* + * zero read, fail + */ + if (!bytes) { + td_verror(td, ENODATA, "full resid"); + put_io_u(td, io_u); + break; + } + io_u->xfer_buflen = io_u->resid; io_u->xfer_buf += bytes; - goto requeue; + requeue_io_u(td, &io_u); + } else { + fio_gettime(&comp_time, NULL); + bytes_done = io_u_sync_complete(td, io_u); + if (bytes_done < 0) + ret = bytes_done; } - fio_gettime(&comp_time, NULL); - bytes_done = io_u_sync_complete(td, io_u, NULL); - if (bytes_done < 0) - ret = bytes_done; break; case FIO_Q_QUEUED: /* @@ -399,7 +471,9 @@ requeue: break; case FIO_Q_BUSY: requeue_io_u(td, &io_u); - ret = td_io_commit(td); + ret2 = td_io_commit(td); + if (ret2 < 0) + ret = ret2; break; default: assert(ret < 0); @@ -418,12 +492,12 @@ requeue: if (queue_full(td) || ret == FIO_Q_BUSY) { min_evts = 1; - if (td->cur_depth > td->iodepth_low) - min_evts = td->cur_depth - td->iodepth_low; + if (td->cur_depth > td->o.iodepth_low) + min_evts = td->cur_depth - td->o.iodepth_low; } fio_gettime(&comp_time, NULL); - bytes_done = io_u_queued_complete(td, min_evts, NULL); + bytes_done = io_u_queued_complete(td, min_evts); if (bytes_done < 0) break; } @@ -438,26 +512,26 @@ requeue: */ usec = utime_since(&s, &comp_time); - rate_throttle(td, usec, bytes_done, td->ddir); + rate_throttle(td, usec, bytes_done); if (check_min_rate(td, &comp_time)) { if (exitall_on_terminate) - terminate_threads(td->groupid, 0); - td_verror(td, ENODATA); + terminate_threads(td->groupid); + td_verror(td, ENODATA, "check_min_rate"); break; } - if (td->thinktime) { + if (td->o.thinktime) { unsigned long long b; b = td->io_blocks[0] + td->io_blocks[1]; - if (!(b % td->thinktime_blocks)) { + if (!(b % td->o.thinktime_blocks)) { int left; - if (td->thinktime_spin) - __usec_sleep(td->thinktime_spin); + if (td->o.thinktime_spin) + __usec_sleep(td->o.thinktime_spin); - left = td->thinktime - td->thinktime_spin; + left = td->o.thinktime - td->o.thinktime_spin; if (left) usec_sleep(td, left); } @@ -467,15 +541,21 @@ requeue: if (!td->error) { struct fio_file *f; - if (td->cur_depth) - cleanup_pending_aio(td); + i = td->cur_depth; + if (i) + ret = io_u_queued_complete(td, i); - if (should_fsync(td) && td->end_fsync) { + if (should_fsync(td) && td->o.end_fsync) { td_set_runstate(td, TD_FSYNCING); - for_each_file(td, f, i) + + for_each_file(td, f, i) { + if (!(f->flags & FIO_FILE_OPEN)) + continue; fio_io_sync(td, f); + } } - } + } else + cleanup_pending_aio(td); } static void cleanup_io_u(struct thread_data *td) @@ -508,26 +588,31 @@ static void fill_rand_buf(struct io_u *io_u, int max_bs) static int init_io_u(struct thread_data *td) { + unsigned long long buf_size; struct io_u *io_u; unsigned int max_bs; int i, max_units; char *p; - if (td->io_ops->flags & FIO_CPUIO) - return 0; - if (td->io_ops->flags & FIO_SYNCIO) max_units = 1; else - max_units = td->iodepth; + max_units = td->o.iodepth; - max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]); - td->orig_buffer_size = max_bs * max_units; + max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); + buf_size = (unsigned long long) max_bs * (unsigned long long) max_units; + buf_size += page_mask; + if (buf_size != (size_t) buf_size) { + log_err("fio: IO memory too large. Reduce max_bs or iodepth\n"); + return 1; + } - if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE) - td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1); - else - td->orig_buffer_size += page_mask; + td->orig_buffer_size = buf_size; + + if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) + td->orig_buffer_size = (td->orig_buffer_size + td->o.hugepage_size - 1) & ~(td->o.hugepage_size - 1); + else if (td->orig_buffer_size & page_mask) + td->orig_buffer_size = (td->orig_buffer_size + page_mask) & ~page_mask; if (allocate_io_mem(td)) return 1; @@ -543,9 +628,12 @@ static int init_io_u(struct thread_data *td) fill_rand_buf(io_u, max_bs); io_u->index = i; + io_u->flags = IO_U_F_FREE; list_add(&io_u->list, &td->io_u_freelist); } + io_u_init_timeout(); + return 0; } @@ -555,23 +643,23 @@ static int switch_ioscheduler(struct thread_data *td) FILE *f; int ret; - if (td->io_ops->flags & FIO_CPUIO) + if (td->io_ops->flags & FIO_DISKLESSIO) return 0; sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); f = fopen(tmp, "r+"); if (!f) { - td_verror(td, errno); + td_verror(td, errno, "fopen"); return 1; } /* * Set io scheduler. */ - ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f); + ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); if (ferror(f) || ret != 1) { - td_verror(td, errno); + td_verror(td, errno, "fwrite"); fclose(f); return 1; } @@ -583,15 +671,15 @@ static int switch_ioscheduler(struct thread_data *td) */ ret = fread(tmp, 1, sizeof(tmp), f); if (ferror(f) || ret < 0) { - td_verror(td, errno); + td_verror(td, errno, "fread"); fclose(f); return 1; } - sprintf(tmp2, "[%s]", td->ioscheduler); + sprintf(tmp2, "[%s]", td->o.ioscheduler); if (!strstr(tmp, tmp2)) { - log_err("fio: io scheduler %s not found\n", td->ioscheduler); - td_verror(td, EINVAL); + log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); + td_verror(td, EINVAL, "iosched_switch"); fclose(f); return 1; } @@ -600,27 +688,32 @@ static int switch_ioscheduler(struct thread_data *td) return 0; } -static void clear_io_state(struct thread_data *td) +static int clear_io_state(struct thread_data *td) { struct fio_file *f; - int i; + unsigned int i; + int ret; td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0; td->this_io_bytes[0] = td->this_io_bytes[1] = 0; td->zone_bytes = 0; + td->rate_bytes = 0; + td->rate_blocks = 0; + td->rw_end_set[0] = td->rw_end_set[1] = 0; td->last_was_sync = 0; - for_each_file(td, f, i) { - f->last_completed_pos = 0; - - f->last_pos = 0; - if (td->io_ops->flags & FIO_SYNCIO) - lseek(f->fd, SEEK_SET, 0); + for_each_file(td, f, i) + td_io_close_file(td, f); - if (f->file_map) - memset(f->file_map, 0, f->num_maps * sizeof(long)); + ret = 0; + for_each_file(td, f, i) { + ret = td_io_open_file(td, f); + if (ret) + break; } + + return ret; } /* @@ -631,8 +724,10 @@ static void *thread_main(void *data) { unsigned long long runtime[2]; struct thread_data *td = data; + unsigned long elapsed; + int clear_state; - if (!td->use_thread) + if (!td->o.use_thread) setsid(); td->pid = getpid(); @@ -644,85 +739,106 @@ static void *thread_main(void *data) INIT_LIST_HEAD(&td->io_log_list); if (init_io_u(td)) - goto err; + goto err_sem; if (fio_setaffinity(td) == -1) { - td_verror(td, errno); - goto err; + td_verror(td, errno, "cpu_set_affinity"); + goto err_sem; } if (init_iolog(td)) - goto err; + goto err_sem; if (td->ioprio) { if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) { - td_verror(td, errno); - goto err; + td_verror(td, errno, "ioprio_set"); + goto err_sem; } } - if (nice(td->nice) == -1) { - td_verror(td, errno); - goto err; + if (nice(td->o.nice) == -1) { + td_verror(td, errno, "nice"); + goto err_sem; } - if (init_random_state(td)) - goto err; - - if (td->ioscheduler && switch_ioscheduler(td)) - goto err; + if (td->o.ioscheduler && switch_ioscheduler(td)) + goto err_sem; td_set_runstate(td, TD_INITIALIZED); - fio_sem_up(&startup_sem); - fio_sem_down(&td->mutex); + fio_sem_up(startup_sem); + fio_sem_down(td->mutex); + + /* + * the ->mutex semaphore is now no longer used, close it to avoid + * eating a file descriptor + */ + fio_sem_remove(td->mutex); - if (!td->create_serialize && setup_files(td)) + if (!td->o.create_serialize && setup_files(td)) goto err; + + if (td_io_init(td)) + goto err; + if (open_files(td)) goto err; - /* - * Do this late, as some IO engines would like to have the - * files setup prior to initializing structures. - */ - if (td_io_init(td)) + if (init_random_map(td)) goto err; - if (td->exec_prerun) { - if (system(td->exec_prerun) < 0) + if (td->o.exec_prerun) { + if (system(td->o.exec_prerun) < 0) goto err; } fio_gettime(&td->epoch, NULL); + memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch)); getrusage(RUSAGE_SELF, &td->ts.ru_start); runtime[0] = runtime[1] = 0; - while (td->loops--) { + clear_state = 0; + while (td->o.loops--) { fio_gettime(&td->start, NULL); memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start)); - if (td->ratemin) + if (td->o.ratemin) memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate)); - clear_io_state(td); + if (clear_state && clear_io_state(td)) + break; + prune_io_piece_log(td); - if (td->io_ops->flags & FIO_CPUIO) - do_cpuio(td); - else - do_io(td); + do_io(td); + + clear_state = 1; - runtime[td->ddir] += utime_since_now(&td->start); - if (td_rw(td) && td->io_bytes[td->ddir ^ 1]) - runtime[td->ddir ^ 1] = runtime[td->ddir]; + if (td_read(td) && td->io_bytes[DDIR_READ]) { + if (td->rw_end_set[DDIR_READ]) + elapsed = utime_since(&td->start, &td->rw_end[DDIR_READ]); + else + elapsed = utime_since_now(&td->start); + runtime[DDIR_READ] += elapsed; + } + if (td_write(td) && td->io_bytes[DDIR_WRITE]) { + if (td->rw_end_set[DDIR_WRITE]) + elapsed = utime_since(&td->start, &td->rw_end[DDIR_WRITE]); + else + elapsed = utime_since_now(&td->start); + + runtime[DDIR_WRITE] += elapsed; + } + if (td->error || td->terminate) break; - if (td->verify == VERIFY_NONE) + if (td->o.verify == VERIFY_NONE) continue; - clear_io_state(td); + if (clear_io_state(td)) + break; + fio_gettime(&td->start, NULL); do_verify(td); @@ -734,9 +850,11 @@ static void *thread_main(void *data) } update_rusage_stat(td); - fio_gettime(&td->end_time, NULL); - td->runtime[0] = runtime[0] / 1000; - td->runtime[1] = runtime[1] / 1000; + td->ts.runtime[0] = runtime[0] / 1000; + td->ts.runtime[1] = runtime[1] / 1000; + td->ts.total_run_time = mtime_since_now(&td->epoch); + td->ts.io_bytes[0] = td->io_bytes[0]; + td->ts.io_bytes[1] = td->io_bytes[1]; if (td->ts.bw_log) finish_log(td, td->ts.bw_log, "bw"); @@ -744,15 +862,15 @@ static void *thread_main(void *data) finish_log(td, td->ts.slat_log, "slat"); if (td->ts.clat_log) finish_log(td, td->ts.clat_log, "clat"); - if (td->write_iolog_file) + if (td->o.write_iolog_file) write_iolog_close(td); - if (td->exec_postrun) { - if (system(td->exec_postrun) < 0) - log_err("fio: postrun %s failed\n", td->exec_postrun); + if (td->o.exec_postrun) { + if (system(td->o.exec_postrun) < 0) + log_err("fio: postrun %s failed\n", td->o.exec_postrun); } if (exitall_on_terminate) - terminate_threads(td->groupid, 0); + terminate_threads(td->groupid); err: if (td->error) @@ -761,7 +879,10 @@ err: close_ioengine(td); cleanup_io_u(td); td_set_runstate(td, TD_EXITED); - return (void *) td->error; + return (void *) (unsigned long) td->error; +err_sem: + fio_sem_up(startup_sem); + goto err; } /* @@ -784,7 +905,7 @@ static int fork_main(int shmid, int offset) td = data + offset * sizeof(struct thread_data); ret = thread_main(td); shmdt(data); - return (int) ret; + return (int) (unsigned long) ret; } /* @@ -800,66 +921,80 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) */ pending = cputhreads = 0; for_each_td(td, i) { + int flags = 0; + /* * ->io_ops is NULL for a thread that has closed its * io engine */ - if (td->io_ops && td->io_ops->flags & FIO_CPUIO) + if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) cputhreads++; - if (td->runstate < TD_EXITED) { - /* - * check if someone quit or got killed in an unusual way - */ - ret = waitpid(td->pid, &status, WNOHANG); - if (ret < 0) - perror("waitpid"); - else if ((ret == td->pid) && WIFSIGNALED(status)) { - int sig = WTERMSIG(status); - - log_err("fio: pid=%d, got signal=%d\n", td->pid, sig); + if (!td->pid || td->runstate == TD_REAPED) + continue; + if (td->o.use_thread) { + if (td->runstate == TD_EXITED) { td_set_runstate(td, TD_REAPED); goto reaped; } + continue; } - if (td->runstate != TD_EXITED) { - if (td->runstate < TD_RUNNING) - pending++; + flags = WNOHANG; + if (td->runstate == TD_EXITED) + flags = 0; - continue; - } + /* + * check if someone quit or got killed in an unusual way + */ + ret = waitpid(td->pid, &status, flags); + if (ret < 0) { + if (errno == ECHILD) { + log_err("fio: pid=%d disappeared %d\n", td->pid, td->runstate); + td_set_runstate(td, TD_REAPED); + goto reaped; + } + perror("waitpid"); + } else if (ret == td->pid) { + if (WIFSIGNALED(status)) { + int sig = WTERMSIG(status); - if (td->error) - exit_value++; + if (sig != SIGQUIT) + log_err("fio: pid=%d, got signal=%d\n", td->pid, sig); + td_set_runstate(td, TD_REAPED); + goto reaped; + } + if (WIFEXITED(status)) { + if (WEXITSTATUS(status) && !td->error) + td->error = WEXITSTATUS(status); - td_set_runstate(td, TD_REAPED); + td_set_runstate(td, TD_REAPED); + goto reaped; + } + } - if (td->use_thread) { + /* + * thread is not dead, continue + */ + continue; +reaped: + if (td->o.use_thread) { long ret; if (pthread_join(td->thread, (void *) &ret)) - perror("thread_join"); - } else { - int status; - - ret = waitpid(td->pid, &status, 0); - if (ret < 0) - perror("waitpid"); - else if (WIFEXITED(status) && WEXITSTATUS(status)) { - if (!exit_value) - exit_value++; - } + perror("pthread_join"); } -reaped: (*nr_running)--; - (*m_rate) -= td->ratemin; - (*t_rate) -= td->rate; + (*m_rate) -= td->o.ratemin; + (*t_rate) -= td->o.rate; + + if (td->error) + exit_value++; } if (*nr_running == cputhreads && !pending) - terminate_threads(TERMINATE_ALL, 0); + terminate_threads(TERMINATE_ALL); } /* @@ -875,7 +1010,15 @@ static void run_threads(void) return; if (!terse_output) { - printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : ""); + printf("Starting "); + if (nr_thread) + printf("%d thread%s", nr_thread, nr_thread > 1 ? "s" : ""); + if (nr_process) { + if (nr_thread) + printf(" and "); + printf("%d process%s", nr_process, nr_process > 1 ? "es" : ""); + } + printf("\n"); fflush(stdout); } @@ -890,7 +1033,7 @@ static void run_threads(void) for_each_td(td, i) { print_status_init(td->thread_number - 1); - if (!td->create_serialize) { + if (!td->o.create_serialize) { init_disk_util(td); continue; } @@ -911,6 +1054,8 @@ static void run_threads(void) init_disk_util(td); } + set_genesis_time(); + while (todo) { struct thread_data *map[MAX_JOBS]; struct timeval this_start; @@ -932,14 +1077,14 @@ static void run_threads(void) continue; } - if (td->start_delay) { + if (td->o.start_delay) { spent = mtime_since_genesis(); - if (td->start_delay * 1000 > spent) + if (td->o.start_delay * 1000 > spent) continue; } - if (td->stonewall && (nr_started || nr_running)) + if (td->o.stonewall && (nr_started || nr_running)) break; /* @@ -948,23 +1093,22 @@ static void run_threads(void) */ td_set_runstate(td, TD_CREATED); map[this_jobs++] = td; - fio_sem_init(&startup_sem, 1); nr_started++; - if (td->use_thread) { + if (td->o.use_thread) { if (pthread_create(&td->thread, NULL, thread_main, td)) { perror("thread_create"); nr_started--; + break; } } else { - if (fork()) - fio_sem_down(&startup_sem); - else { + if (!fork()) { int ret = fork_main(shm_id, i); exit(ret); } } + fio_sem_down(startup_sem); } /* @@ -1016,10 +1160,10 @@ static void run_threads(void) td_set_runstate(td, TD_RUNNING); nr_running++; nr_started--; - m_rate += td->ratemin; - t_rate += td->rate; + m_rate += td->o.ratemin; + t_rate += td->o.rate; todo--; - fio_sem_up(&td->mutex); + fio_sem_up(td->mutex); } reap_threads(&nr_running, &t_rate, &m_rate); @@ -1062,6 +1206,7 @@ int main(int argc, char *argv[]) return 1; } + page_size = ps; page_mask = ps - 1; if (write_bw_log) { @@ -1069,6 +1214,10 @@ int main(int argc, char *argv[]) setup_log(&agg_io_log[DDIR_WRITE]); } + startup_sem = fio_sem_init(0); + + set_genesis_time(); + disk_util_timer_arm(); run_threads(); @@ -1081,5 +1230,6 @@ int main(int argc, char *argv[]) } } + fio_sem_remove(startup_sem); return exit_value; }