X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=ed14c41f0c645bc5c9378b84d183e89c81955d34;hp=f81aec599a6a6709be28f205c094a20a86e6bfdf;hb=21972cdef7db6b557c2d56138f8434658a9d1e49;hpb=eecf272f1b2d55c1e49aadd7f65b9a433ba04c15 diff --git a/fio.c b/fio.c index f81aec59..ed14c41f 100644 --- a/fio.c +++ b/fio.c @@ -4,10 +4,12 @@ * Copyright (C) 2005 Jens Axboe * Copyright (C) 2006 Jens Axboe * + * The license below covers all files distributed with fio unless otherwise + * noted in the file itself. + * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -43,9 +45,7 @@ int groupid = 0; int thread_number = 0; int shm_id = 0; int temp_stall_ts; -char *fio_inst_prefix = _INST_PREFIX; - -#define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync)) +const char *fio_inst_prefix = _INST_PREFIX; static volatile int startup_sem; @@ -195,6 +195,50 @@ static void cleanup_pending_aio(struct thread_data *td) } } +/* + * Helper to handle the final sync of a file. Works just like the normal + * io path, just does everything sync. + */ +static int fio_io_sync(struct thread_data *td, struct fio_file *f) +{ + struct io_u *io_u = __get_io_u(td); + struct io_completion_data icd; + int ret; + + if (!io_u) + return 1; + + io_u->ddir = DDIR_SYNC; + io_u->file = f; + + if (td_io_prep(td, io_u)) { + put_io_u(td, io_u); + return 1; + } + + ret = td_io_queue(td, io_u); + if (ret) { + td_verror(td, io_u->error); + put_io_u(td, io_u); + return 1; + } + + ret = td_io_getevents(td, 1, td->cur_depth, NULL); + if (ret < 0) { + td_verror(td, ret); + return 1; + } + + icd.nr = ret; + ios_completed(td, &icd); + if (icd.error) { + td_verror(td, icd.error); + return 1; + } + + return 0; +} + /* * The main verify engine. Runs over the writes we previusly submitted, * reads the blocks back in, and checks the crc/md5 of the data. @@ -212,7 +256,7 @@ void do_verify(struct thread_data *td) * read from disk. */ for_each_file(td, f, i) { - td_io_sync(td, f); + fio_io_sync(td, f); file_invalidate_cache(td, f); } @@ -248,8 +292,8 @@ void do_verify(struct thread_data *td) ret = td_io_queue(td, io_u); if (ret) { + td_verror(td, io_u->error); put_io_u(td, io_u); - td_verror(td, ret); break; } @@ -355,8 +399,8 @@ static void do_io(struct thread_data *td) ret = td_io_queue(td, io_u); if (ret) { + td_verror(td, io_u->error); put_io_u(td, io_u); - td_verror(td, ret); break; } @@ -372,7 +416,7 @@ static void do_io(struct thread_data *td) ret = td_io_getevents(td, min_evts, td->cur_depth, timeout); if (ret < 0) { - td_verror(td, -ret); + td_verror(td, ret); break; } else if (!ret) continue; @@ -392,10 +436,10 @@ static void do_io(struct thread_data *td) gettimeofday(&e, NULL); usec = utime_since(&s, &e); - rate_throttle(td, usec, icd.bytes_done[td->ddir]); + rate_throttle(td, usec, icd.bytes_done[td->ddir], td->ddir); if (check_min_rate(td, &e)) { - if (rate_quit) + if (exitall_on_terminate) terminate_threads(td->groupid); td_verror(td, ENOMEM); break; @@ -406,20 +450,16 @@ static void do_io(struct thread_data *td) if (td->thinktime) usec_sleep(td, td->thinktime); - - if (should_fsync(td) && td->fsync_blocks && - (td->io_blocks[DDIR_WRITE] % td->fsync_blocks) == 0) - td_io_sync(td, f); } - if (!ret) { + if (!td->error) { if (td->cur_depth) cleanup_pending_aio(td); if (should_fsync(td) && td->end_fsync) { td_set_runstate(td, TD_FSYNCING); for_each_file(td, f, i) - td_io_sync(td, f); + fio_io_sync(td, f); } } } @@ -439,9 +479,23 @@ static void cleanup_io_u(struct thread_data *td) free_io_mem(td); } +/* + * "randomly" fill the buffer contents + */ +static void fill_rand_buf(struct io_u *io_u, int max_bs) +{ + int *ptr = io_u->buf; + + while ((void *) ptr - io_u->buf < max_bs) { + *ptr = rand() * 0x9e370001; + ptr++; + } +} + static int init_io_u(struct thread_data *td) { struct io_u *io_u; + unsigned int max_bs; int i, max_units; char *p; @@ -453,7 +507,8 @@ static int init_io_u(struct thread_data *td) else max_units = td->iodepth; - td->orig_buffer_size = td->max_bs * max_units + MASK; + max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]); + td->orig_buffer_size = max_bs * max_units + MASK; if (allocate_io_mem(td)) return 1; @@ -464,7 +519,10 @@ static int init_io_u(struct thread_data *td) memset(io_u, 0, sizeof(*io_u)); INIT_LIST_HEAD(&io_u->list); - io_u->buf = p + td->max_bs * i; + io_u->buf = p + max_bs * i; + if (td_write(td) || td_rw(td)) + fill_rand_buf(io_u, max_bs); + io_u->index = i; list_add(&io_u->list, &td->io_u_freelist); } @@ -478,6 +536,9 @@ static int switch_ioscheduler(struct thread_data *td) FILE *f; int ret; + if (td->io_ops->flags & FIO_CPUIO) + return 0; + sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); f = fopen(tmp, "r+"); @@ -586,14 +647,8 @@ static void *thread_main(void *data) if (init_random_state(td)) goto err; - if (td->ioscheduler) { - int ret = switch_ioscheduler(td); - - free(td->ioscheduler); - free(td->sysfs_root); - if (ret) - goto err; - } + if (td->ioscheduler && switch_ioscheduler(td)) + goto err; td_set_runstate(td, TD_INITIALIZED); fio_sem_up(&startup_sem); @@ -601,13 +656,13 @@ static void *thread_main(void *data) if (!td->create_serialize && setup_files(td)) goto err; + if (open_files(td)) + goto err; gettimeofday(&td->epoch, NULL); - if (td->exec_prerun) { + if (td->exec_prerun) system(td->exec_prerun); - free(td->exec_prerun); - } while (td->loops--) { getrusage(RUSAGE_SELF, &td->ru_start); @@ -654,12 +709,10 @@ static void *thread_main(void *data) finish_log(td, td->slat_log, "slat"); if (td->clat_log) finish_log(td, td->clat_log, "clat"); - if (td->write_iolog) + if (td->write_iolog_file) write_iolog_close(td); - if (td->exec_postrun) { + if (td->exec_postrun) system(td->exec_postrun); - free(td->exec_postrun); - } if (exitall_on_terminate) terminate_threads(td->groupid); @@ -700,12 +753,12 @@ static void *fork_main(int shmid, int offset) static void reap_threads(int *nr_running, int *t_rate, int *m_rate) { struct thread_data *td; - int i, cputhreads; + int i, cputhreads, pending; /* * reap exited threads (TD_EXITED -> TD_REAPED) */ - cputhreads = 0; + pending = cputhreads = 0; for_each_td(td, i) { /* * ->io_ops is NULL for a thread that has closed its @@ -714,8 +767,12 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) if (td->io_ops && td->io_ops->flags & FIO_CPUIO) cputhreads++; - if (td->runstate != TD_EXITED) + if (td->runstate != TD_EXITED) { + if (td->runstate < TD_RUNNING) + pending++; + continue; + } td_set_runstate(td, TD_REAPED); @@ -732,7 +789,7 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate) (*t_rate) -= td->rate; } - if (*nr_running == cputhreads) + if (*nr_running == cputhreads && !pending) terminate_threads(TERMINATE_ALL); }