*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include <unistd.h>
/*
* Check if we are above the minimum rate given.
*/
-static bool __check_min_rate(struct thread_data *td, struct timeval *now,
+static bool __check_min_rate(struct thread_data *td, struct timespec *now,
enum fio_ddir ddir)
{
unsigned long long bytes = 0;
return false;
}
-static bool check_min_rate(struct thread_data *td, struct timeval *now)
+static bool check_min_rate(struct thread_data *td, struct timespec *now)
{
bool ret = false;
return ret;
}
-static inline void __update_tv_cache(struct thread_data *td)
+static inline void __update_ts_cache(struct thread_data *td)
{
- fio_gettime(&td->tv_cache, NULL);
+ fio_gettime(&td->ts_cache, NULL);
}
-static inline void update_tv_cache(struct thread_data *td)
+static inline void update_ts_cache(struct thread_data *td)
{
- if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
- __update_tv_cache(td);
+ if ((++td->ts_cache_nr & td->ts_cache_mask) == td->ts_cache_mask)
+ __update_ts_cache(td);
}
-static inline bool runtime_exceeded(struct thread_data *td, struct timeval *t)
+static inline bool runtime_exceeded(struct thread_data *td, struct timespec *t)
{
if (in_ramp_time(td))
return false;
}
}
-static int wait_for_completions(struct thread_data *td, struct timeval *time)
+static int wait_for_completions(struct thread_data *td, struct timespec *time)
{
const int full = queue_full(td);
int min_evts = 0;
int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret,
enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify,
- struct timeval *comp_time)
+ struct timespec *comp_time)
{
int ret2;
if (ddir_rw(io_u->ddir))
td->ts.short_io_u[io_u->ddir]++;
- f = io_u->file;
if (io_u->offset == f->real_file_size)
goto sync_done;
return ret;
}
+/*
+ * Check if io_u will overlap an in-flight IO in the queue
+ */
+static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u)
+{
+ bool overlap;
+ struct io_u *check_io_u;
+ unsigned long long x1, x2, y1, y2;
+ int i;
+
+ x1 = io_u->offset;
+ x2 = io_u->offset + io_u->buflen;
+ overlap = false;
+ io_u_qiter(q, check_io_u, i) {
+ if (check_io_u->flags & IO_U_F_FLIGHT) {
+ y1 = check_io_u->offset;
+ y2 = check_io_u->offset + check_io_u->buflen;
+
+ if (x1 < y2 && y1 < x2) {
+ overlap = true;
+ dprint(FD_IO, "in-flight overlap: %llu/%lu, %llu/%lu\n",
+ x1, io_u->buflen,
+ y1, check_io_u->buflen);
+ break;
+ }
+ }
+ }
+
+ return overlap;
+}
+
+static int io_u_submit(struct thread_data *td, struct io_u *io_u)
+{
+ /*
+ * Check for overlap if the user asked us to, and we have
+ * at least one IO in flight besides this one.
+ */
+ if (td->o.serialize_overlap && td->cur_depth > 1 &&
+ in_flight_overlap(&td->io_u_all, io_u))
+ return FIO_Q_BUSY;
+
+ return td_io_queue(td, io_u);
+}
+
/*
* The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
enum fio_ddir ddir;
int full;
- update_tv_cache(td);
+ update_ts_cache(td);
check_update_rusage(td);
- if (runtime_exceeded(td, &td->tv_cache)) {
- __update_tv_cache(td);
- if (runtime_exceeded(td, &td->tv_cache)) {
+ if (runtime_exceeded(td, &td->ts_cache)) {
+ __update_ts_cache(td);
+ if (runtime_exceeded(td, &td->ts_cache)) {
fio_mark_td_terminate(td);
break;
}
if (!td->o.disable_slat)
fio_gettime(&io_u->start_time, NULL);
- ret = td_io_queue(td, io_u);
+ ret = io_u_submit(td, io_u);
if (io_queue_event(td, io_u, &ret, ddir, NULL, 1, NULL))
break;
uint64_t val;
iops = bps / td->o.bs[ddir];
val = (int64_t) (1000000 / iops) *
- -logf(__rand_0_1(&td->poisson_state));
+ -logf(__rand_0_1(&td->poisson_state[ddir]));
if (val) {
- dprint(FD_RATE, "poisson rate iops=%llu\n",
- (unsigned long long) 1000000 / val);
+ dprint(FD_RATE, "poisson rate iops=%llu, ddir=%d\n",
+ (unsigned long long) 1000000 / val,
+ ddir);
}
- td->last_usec += val;
- return td->last_usec;
+ td->last_usec[ddir] += val;
+ return td->last_usec[ddir];
} else if (bps) {
secs = bytes / bps;
remainder = bytes % bps;
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
td->o.time_based) {
- struct timeval comp_time;
+ struct timespec comp_time;
struct io_u *io_u;
int full;
enum fio_ddir ddir;
if (td->terminate || td->done)
break;
- update_tv_cache(td);
+ update_ts_cache(td);
- if (runtime_exceeded(td, &td->tv_cache)) {
- __update_tv_cache(td);
- if (runtime_exceeded(td, &td->tv_cache)) {
+ if (runtime_exceeded(td, &td->ts_cache)) {
+ __update_ts_cache(td);
+ if (runtime_exceeded(td, &td->ts_cache)) {
fio_mark_td_terminate(td);
break;
}
td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
} else {
- ret = td_io_queue(td, io_u);
+ ret = io_u_submit(td, io_u);
if (should_check_rate(td))
td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
if (td->done)
return false;
+ if (td->terminate)
+ return false;
if (td->o.time_based)
return true;
if (td->o.loops) {
struct thread_data *td = fd->td;
struct thread_options *o = &td->o;
struct sk_out *sk_out = fd->sk_out;
+ uint64_t bytes_done[DDIR_RWDIR_CNT];
int deadlock_loop_cnt;
- int clear_state;
+ bool clear_state, did_some_io;
int ret;
sk_out_assign(sk_out);
sizeof(td->bw_sample_time));
}
- clear_state = 0;
+ memset(bytes_done, 0, sizeof(bytes_done));
+ clear_state = false;
+ did_some_io = false;
+
while (keep_running(td)) {
uint64_t verify_bytes;
fio_gettime(&td->start, NULL);
- memcpy(&td->tv_cache, &td->start, sizeof(td->start));
+ memcpy(&td->ts_cache, &td->start, sizeof(td->start));
if (clear_state) {
clear_io_state(td, 0);
if (td->o.verify_only && td_write(td))
verify_bytes = do_dry_run(td);
else {
- uint64_t bytes_done[DDIR_RWDIR_CNT];
-
do_io(td, bytes_done);
if (!ddir_rw_sum(bytes_done)) {
if (td->runstate >= TD_EXITED)
break;
- clear_state = 1;
+ clear_state = true;
/*
* Make sure we've successfully updated the rusage stats
td_ioengine_flagged(td, FIO_UNIDIR))
continue;
+ if (ddir_rw_sum(bytes_done))
+ did_some_io = true;
+
clear_io_state(td, 0);
fio_gettime(&td->start, NULL);
break;
}
+ /*
+ * If td ended up with no I/O when it should have had,
+ * then something went wrong unless FIO_NOIO or FIO_DISKLESSIO.
+ * (Are we not missing other flags that can be ignored ?)
+ */
+ if ((td->o.size || td->o.io_size) && !ddir_rw_sum(bytes_done) &&
+ !did_some_io && !td->o.create_only &&
+ !(td_ioengine_flagged(td, FIO_NOIO) ||
+ td_ioengine_flagged(td, FIO_DISKLESSIO)))
+ log_err("%s: No I/O performed by %s, "
+ "perhaps try --debug=io option for details?\n",
+ td->o.name, td->io_ops->name);
+
td_set_runstate(td, TD_FINISHING);
update_rusage_stat(td);
if (o->write_iolog_file)
write_iolog_close(td);
- fio_mutex_remove(td->mutex);
- td->mutex = NULL;
-
td_set_runstate(td, TD_EXITED);
/*
return (void *) (uintptr_t) td->error;
}
-static void dump_td_info(struct thread_data *td)
-{
- log_err("fio: job '%s' (state=%d) hasn't exited in %lu seconds, it "
- "appears to be stuck. Doing forceful exit of this job.\n",
- td->o.name, td->runstate,
- (unsigned long) time_since_now(&td->terminate_time));
-}
-
/*
* Run over the job map and reap the threads that have exited, if any.
*/
for_each_td(td, i) {
int flags = 0;
- /*
- * ->io_ops is NULL for a thread that has closed its
- * io engine
- */
- if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
+ if (!strcmp(td->o.ioengine, "cpuio"))
cputhreads++;
else
realthreads++;
if (td->terminate &&
td->runstate < TD_FSYNCING &&
time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) {
- dump_td_info(td);
+ log_err("fio: job '%s' (state=%d) hasn't exited in "
+ "%lu seconds, it appears to be stuck. Doing "
+ "forceful exit of this job.\n",
+ td->o.name, td->runstate,
+ (unsigned long) time_since_now(&td->terminate_time));
td_set_runstate(td, TD_REAPED);
goto reaped;
}
static bool trigger_timedout(void)
{
if (trigger_timeout)
- return time_since_genesis() >= trigger_timeout;
+ if (time_since_genesis() >= trigger_timeout) {
+ trigger_timeout = 0;
+ return true;
+ }
return false;
}
{
int ret;
- if (!cmd)
+ if (!cmd || cmd[0] == '\0')
return;
ret = system(cmd);
if (!td_write(td) || td->o.allow_mounted_write)
return false;
+ /*
+ * If FIO_HAVE_CHARDEV_SIZE is defined, it's likely that chrdevs
+ * are mkfs'd and mounted.
+ */
for_each_file(td, f, i) {
+#ifdef FIO_HAVE_CHARDEV_SIZE
+ if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR)
+#else
if (f->filetype != FIO_TYPE_BLOCK)
+#endif
continue;
if (device_is_mounted(f->file_name))
goto mounted;
while (todo) {
struct thread_data *map[REAL_MAX_JOBS];
- struct timeval this_start;
+ struct timespec this_start;
int this_jobs = 0, left;
struct fork_data *fd;
fio_terminate_threads(TERMINATE_ALL);
fio_abort = 1;
nr_started--;
+ free(fd);
break;
}
dprint(FD_MUTEX, "done waiting on startup_mutex\n");
fio_mutex_remove(td->rusage_sem);
td->rusage_sem = NULL;
}
+ fio_mutex_remove(td->mutex);
+ td->mutex = NULL;
}
free_disk_util();