X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=b5fada8983e90c2caeb4ca7c4c9ed1bed9a63e5a;hp=72cd02b42d5ca36402bc1dda6b32c9350225b58a;hb=214e1ecae844a32bf8f66d19132f4948d922dcee;hpb=a978ba684deb758465a0ccb18a008797636e8054 diff --git a/fio.c b/fio.c index 72cd02b4..b5fada89 100644 --- a/fio.c +++ b/fio.c @@ -37,16 +37,19 @@ #include "fio.h" #include "os.h" -static unsigned long page_mask; +unsigned long page_mask; +unsigned long page_size; #define ALIGN(buf) \ (char *) (((unsigned long) (buf) + page_mask) & ~page_mask) int groupid = 0; int thread_number = 0; +int nr_process = 0; +int nr_thread = 0; int shm_id = 0; int temp_stall_ts; -static volatile int startup_sem; +static struct fio_sem *startup_sem; static volatile int fio_abort; static int exit_value; @@ -129,12 +132,12 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) return 0; if (bytes < td->rate_bytes) { - fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin); + log_err("%s: min rate %u not met\n", td->name, td->ratemin); return 1; } else { rate = (bytes - td->rate_bytes) / spent; if (rate < td->ratemin || bytes < td->rate_bytes) { - fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); + log_err("%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); return 1; } } @@ -253,13 +256,16 @@ static void do_verify(struct thread_data *td) { struct fio_file *f; struct io_u *io_u; - int ret, i, min_events; + int ret, min_events; + unsigned int i; /* * sync io first and invalidate cache, to make sure we really * read from disk. */ for_each_file(td, f, i) { + if (!(f->flags & FIO_FILE_OPEN)) + continue; if (fio_io_sync(td, f)) break; if (file_invalidate_cache(td, f)) @@ -301,9 +307,17 @@ static void do_verify(struct thread_data *td) case FIO_Q_COMPLETED: if (io_u->error) ret = -io_u->error; - else if (io_u->xfer_buflen != io_u->resid && io_u->resid) { + else if (io_u->resid) { int bytes = io_u->xfer_buflen - io_u->resid; + /* + * zero read, fail + */ + if (!bytes) { + td_verror(td, ENODATA, "full resid"); + put_io_u(td, io_u); + break; + } io_u->xfer_buflen = io_u->resid; io_u->xfer_buf += bytes; requeue_io_u(td, &io_u); @@ -361,31 +375,6 @@ static void do_verify(struct thread_data *td) td_set_runstate(td, TD_RUNNING); } -/* - * Not really an io thread, all it does is burn CPU cycles in the specified - * manner. - */ -static void do_cpuio(struct thread_data *td) -{ - struct timeval e; - int split = 100 / td->cpuload; - int i = 0; - - while (!td->terminate) { - fio_gettime(&e, NULL); - - if (runtime_exceeded(td, &e)) - break; - - if (!(i % split)) - __usec_sleep(10000); - else - usec_sleep(td, 10000); - - i++; - } -} - /* * Main IO worker function. It retrieves io_u's to process and queues * and reaps them, checking for rate and errors along the way. @@ -394,7 +383,8 @@ static void do_io(struct thread_data *td) { struct timeval s; unsigned long usec; - int i, ret = 0; + unsigned int i; + int ret = 0; td_set_runstate(td, TD_RUNNING); @@ -424,9 +414,18 @@ static void do_io(struct thread_data *td) case FIO_Q_COMPLETED: if (io_u->error) ret = -io_u->error; - else if (io_u->xfer_buflen != io_u->resid && io_u->resid) { + else if (io_u->resid) { int bytes = io_u->xfer_buflen - io_u->resid; + /* + * zero read, fail + */ + if (!bytes) { + td_verror(td, ENODATA, "full resid"); + put_io_u(td, io_u); + break; + } + io_u->xfer_buflen = io_u->resid; io_u->xfer_buf += bytes; requeue_io_u(td, &io_u); @@ -524,8 +523,12 @@ static void do_io(struct thread_data *td) if (should_fsync(td) && td->end_fsync) { td_set_runstate(td, TD_FSYNCING); - for_each_file(td, f, i) + + for_each_file(td, f, i) { + if (!(f->flags & FIO_FILE_OPEN)) + continue; fio_io_sync(td, f); + } } } else cleanup_pending_aio(td); @@ -566,9 +569,6 @@ static int init_io_u(struct thread_data *td) int i, max_units; char *p; - if (td->io_ops->flags & FIO_CPUIO) - return 0; - if (td->io_ops->flags & FIO_SYNCIO) max_units = 1; else @@ -611,7 +611,7 @@ static int switch_ioscheduler(struct thread_data *td) FILE *f; int ret; - if (td->io_ops->flags & FIO_CPUIO) + if (td->io_ops->flags & FIO_DISKLESSIO) return 0; sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); @@ -659,11 +659,13 @@ static int switch_ioscheduler(struct thread_data *td) static int clear_io_state(struct thread_data *td) { struct fio_file *f; - int i, ret; + unsigned int i; + int ret; td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0; td->this_io_bytes[0] = td->this_io_bytes[1] = 0; td->zone_bytes = 0; + td->rate_bytes = 0; td->last_was_sync = 0; @@ -702,37 +704,40 @@ static void *thread_main(void *data) INIT_LIST_HEAD(&td->io_log_list); if (init_io_u(td)) - goto err; + goto err_sem; if (fio_setaffinity(td) == -1) { td_verror(td, errno, "cpu_set_affinity"); - goto err; + goto err_sem; } if (init_iolog(td)) - goto err; + goto err_sem; if (td->ioprio) { if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) { td_verror(td, errno, "ioprio_set"); - goto err; + goto err_sem; } } if (nice(td->nice) == -1) { td_verror(td, errno, "nice"); - goto err; + goto err_sem; } - if (init_random_state(td)) - goto err; - if (td->ioscheduler && switch_ioscheduler(td)) - goto err; + goto err_sem; td_set_runstate(td, TD_INITIALIZED); - fio_sem_up(&startup_sem); - fio_sem_down(&td->mutex); + fio_sem_up(startup_sem); + fio_sem_down(td->mutex); + + /* + * the ->mutex semaphore is now no longer used, close it to avoid + * eating a file descriptor + */ + fio_sem_remove(td->mutex); if (!td->create_serialize && setup_files(td)) goto err; @@ -766,10 +771,7 @@ static void *thread_main(void *data) prune_io_piece_log(td); - if (td->io_ops->flags & FIO_CPUIO) - do_cpuio(td); - else - do_io(td); + do_io(td); clear_state = 1; @@ -828,6 +830,9 @@ err: cleanup_io_u(td); td_set_runstate(td, TD_EXITED); return (void *) (unsigned long) td->error; +err_sem: + fio_sem_up(startup_sem); + goto err; } /* @@ -872,7 +877,7 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) * ->io_ops is NULL for a thread that has closed its * io engine */ - if (td->io_ops && td->io_ops->flags & FIO_CPUIO) + if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) cputhreads++; if (!td->pid || td->runstate == TD_REAPED) @@ -955,7 +960,15 @@ static void run_threads(void) return; if (!terse_output) { - printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : ""); + printf("Starting "); + if (nr_thread) + printf("%d thread%s", nr_thread, nr_thread > 1 ? "s" : ""); + if (nr_process) { + if (nr_thread) + printf(" and "); + printf("%d process%s", nr_process, nr_process > 1 ? "es" : ""); + } + printf("\n"); fflush(stdout); } @@ -1030,23 +1043,22 @@ static void run_threads(void) */ td_set_runstate(td, TD_CREATED); map[this_jobs++] = td; - fio_sem_init(&startup_sem, 1); nr_started++; if (td->use_thread) { if (pthread_create(&td->thread, NULL, thread_main, td)) { perror("thread_create"); nr_started--; + break; } } else { - if (fork()) - fio_sem_down(&startup_sem); - else { + if (!fork()) { int ret = fork_main(shm_id, i); exit(ret); } } + fio_sem_down(startup_sem); } /* @@ -1101,7 +1113,7 @@ static void run_threads(void) m_rate += td->ratemin; t_rate += td->rate; todo--; - fio_sem_up(&td->mutex); + fio_sem_up(td->mutex); } reap_threads(&nr_running, &t_rate, &m_rate); @@ -1144,6 +1156,7 @@ int main(int argc, char *argv[]) return 1; } + page_size = ps; page_mask = ps - 1; if (write_bw_log) { @@ -1151,6 +1164,8 @@ int main(int argc, char *argv[]) setup_log(&agg_io_log[DDIR_WRITE]); } + startup_sem = fio_sem_init(0); + set_genesis_time(); disk_util_timer_arm(); @@ -1165,5 +1180,6 @@ int main(int argc, char *argv[]) } } + fio_sem_remove(startup_sem); return exit_value; }