int temp_stall_ts;
unsigned long done_secs = 0;
-#define PAGE_ALIGN(buf) \
- (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask)
-
#define JOB_START_TIMEOUT (5 * 1000)
static void sig_int(int sig)
/*
* Check if we are above the minimum rate given.
*/
-static bool __check_min_rate(struct thread_data *td, struct timeval *now,
+static bool __check_min_rate(struct thread_data *td, struct timespec *now,
enum fio_ddir ddir)
{
unsigned long long bytes = 0;
return false;
}
-static bool check_min_rate(struct thread_data *td, struct timeval *now)
+static bool check_min_rate(struct thread_data *td, struct timespec *now)
{
bool ret = false;
return ret;
}
-static inline void __update_tv_cache(struct thread_data *td)
+static inline void __update_ts_cache(struct thread_data *td)
{
- fio_gettime(&td->tv_cache, NULL);
+ fio_gettime(&td->ts_cache, NULL);
}
-static inline void update_tv_cache(struct thread_data *td)
+static inline void update_ts_cache(struct thread_data *td)
{
- if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
- __update_tv_cache(td);
+ if ((++td->ts_cache_nr & td->ts_cache_mask) == td->ts_cache_mask)
+ __update_ts_cache(td);
}
-static inline bool runtime_exceeded(struct thread_data *td, struct timeval *t)
+static inline bool runtime_exceeded(struct thread_data *td, struct timespec *t)
{
if (in_ramp_time(td))
return false;
}
}
-static int wait_for_completions(struct thread_data *td, struct timeval *time)
+static int wait_for_completions(struct thread_data *td, struct timespec *time)
{
const int full = queue_full(td);
int min_evts = 0;
int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret,
enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify,
- struct timeval *comp_time)
+ struct timespec *comp_time)
{
int ret2;
enum fio_ddir ddir;
int full;
- update_tv_cache(td);
+ update_ts_cache(td);
check_update_rusage(td);
- if (runtime_exceeded(td, &td->tv_cache)) {
- __update_tv_cache(td);
- if (runtime_exceeded(td, &td->tv_cache)) {
+ if (runtime_exceeded(td, &td->ts_cache)) {
+ __update_ts_cache(td);
+ if (runtime_exceeded(td, &td->ts_cache)) {
fio_mark_td_terminate(td);
break;
}
else
bytes = this_bytes[DDIR_TRIM];
- if (td->o.io_limit)
- limit = td->o.io_limit;
+ if (td->o.io_size)
+ limit = td->o.io_size;
else
limit = td->o.size;
uint64_t val;
iops = bps / td->o.bs[ddir];
val = (int64_t) (1000000 / iops) *
- -logf(__rand_0_1(&td->poisson_state));
+ -logf(__rand_0_1(&td->poisson_state[ddir]));
if (val) {
- dprint(FD_RATE, "poisson rate iops=%llu\n",
- (unsigned long long) 1000000 / val);
+ dprint(FD_RATE, "poisson rate iops=%llu, ddir=%d\n",
+ (unsigned long long) 1000000 / val,
+ ddir);
}
- td->last_usec += val;
- return td->last_usec;
+ td->last_usec[ddir] += val;
+ return td->last_usec[ddir];
} else if (bps) {
secs = bytes / bps;
remainder = bytes % bps;
total_bytes = td->o.size;
/*
- * Allow random overwrite workloads to write up to io_limit
+ * Allow random overwrite workloads to write up to io_size
* before starting verification phase as 'size' doesn't apply.
*/
if (td_write(td) && td_random(td) && td->o.norandommap)
- total_bytes = max(total_bytes, (uint64_t) td->o.io_limit);
+ total_bytes = max(total_bytes, (uint64_t) td->o.io_size);
/*
* If verify_backlog is enabled, we'll run the verify in this
* handler as well. For that case, we may need up to twice the
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
td->o.time_based) {
- struct timeval comp_time;
+ struct timespec comp_time;
struct io_u *io_u;
int full;
enum fio_ddir ddir;
if (td->terminate || td->done)
break;
- update_tv_cache(td);
+ update_ts_cache(td);
- if (runtime_exceeded(td, &td->tv_cache)) {
- __update_tv_cache(td);
- if (runtime_exceeded(td, &td->tv_cache)) {
+ if (runtime_exceeded(td, &td->ts_cache)) {
+ __update_ts_cache(td);
+ if (runtime_exceeded(td, &td->ts_cache)) {
fio_mark_td_terminate(td);
break;
}
if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
td_ioengine_flagged(td, FIO_RAWIO))
- p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
+ p = PTR_ALIGN(td->orig_buffer, page_mask) + td->o.mem_align;
else
p = td->orig_buffer;
return 0;
}
+/*
+ * This function is Linux specific.
+ * FIO_HAVE_IOSCHED_SWITCH enabled currently means it's Linux.
+ */
static int switch_ioscheduler(struct thread_data *td)
{
#ifdef FIO_HAVE_IOSCHED_SWITCH
if (td_ioengine_flagged(td, FIO_DISKLESSIO))
return 0;
- sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
+ assert(td->files && td->files[0]);
+ sprintf(tmp, "%s/queue/scheduler", td->files[0]->du->sysfs_root);
f = fopen(tmp, "r+");
if (!f) {
if (exceeds_number_ios(td))
return false;
- if (td->o.io_limit)
- limit = td->o.io_limit;
+ if (td->o.io_size)
+ limit = td->o.io_size;
else
limit = td->o.size;
uint64_t diff;
/*
- * If the difference is less than the minimum IO size, we
+ * If the difference is less than the maximum IO size, we
* are done.
*/
diff = limit - ddir_rw_sum(td->io_bytes);
if (diff < td_max_bs(td))
return false;
- if (fio_files_done(td) && !td->o.io_limit)
+ if (fio_files_done(td) && !td->o.io_size)
return false;
return true;
struct thread_data *td = fd->td;
struct thread_options *o = &td->o;
struct sk_out *sk_out = fd->sk_out;
+ uint64_t bytes_done[DDIR_RWDIR_CNT];
int deadlock_loop_cnt;
int clear_state;
int ret;
sizeof(td->bw_sample_time));
}
+ memset(bytes_done, 0, sizeof(bytes_done));
clear_state = 0;
+
while (keep_running(td)) {
uint64_t verify_bytes;
fio_gettime(&td->start, NULL);
- memcpy(&td->tv_cache, &td->start, sizeof(td->start));
+ memcpy(&td->ts_cache, &td->start, sizeof(td->start));
if (clear_state) {
clear_io_state(td, 0);
prune_io_piece_log(td);
- if (td->o.verify_only && (td_write(td) || td_rw(td)))
+ if (td->o.verify_only && td_write(td))
verify_bytes = do_dry_run(td);
else {
- uint64_t bytes_done[DDIR_RWDIR_CNT];
-
do_io(td, bytes_done);
if (!ddir_rw_sum(bytes_done)) {
break;
}
+ /*
+ * If td ended up with no I/O when it should have had,
+ * then something went wrong unless FIO_NOIO or FIO_DISKLESSIO.
+ * (Are we not missing other flags that can be ignored ?)
+ */
+ if ((td->o.size || td->o.io_size) && !ddir_rw_sum(bytes_done) &&
+ !(td_ioengine_flagged(td, FIO_NOIO) ||
+ td_ioengine_flagged(td, FIO_DISKLESSIO)))
+ log_err("%s: No I/O performed by %s, "
+ "perhaps try --debug=io option for details?\n",
+ td->o.name, td->io_ops->name);
+
td_set_runstate(td, TD_FINISHING);
update_rusage_stat(td);
if (o->write_iolog_file)
write_iolog_close(td);
- fio_mutex_remove(td->mutex);
- td->mutex = NULL;
-
td_set_runstate(td, TD_EXITED);
/*
return (void *) (uintptr_t) td->error;
}
-static void dump_td_info(struct thread_data *td)
-{
- log_err("fio: job '%s' (state=%d) hasn't exited in %lu seconds, it "
- "appears to be stuck. Doing forceful exit of this job.\n",
- td->o.name, td->runstate,
- (unsigned long) time_since_now(&td->terminate_time));
-}
-
/*
* Run over the job map and reap the threads that have exited, if any.
*/
if (td->terminate &&
td->runstate < TD_FSYNCING &&
time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) {
- dump_td_info(td);
+ log_err("fio: job '%s' (state=%d) hasn't exited in "
+ "%lu seconds, it appears to be stuck. Doing "
+ "forceful exit of this job.\n",
+ td->o.name, td->runstate,
+ (unsigned long) time_since_now(&td->terminate_time));
td_set_runstate(td, TD_REAPED);
goto reaped;
}
if (!td_write(td) || td->o.allow_mounted_write)
return false;
+ /*
+ * If FIO_HAVE_CHARDEV_SIZE is defined, it's likely that chrdevs
+ * are mkfs'd and mounted.
+ */
for_each_file(td, f, i) {
- if (f->filetype != FIO_TYPE_BD)
+#ifdef FIO_HAVE_CHARDEV_SIZE
+ if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR)
+#else
+ if (f->filetype != FIO_TYPE_BLOCK)
+#endif
continue;
if (device_is_mounted(f->file_name))
goto mounted;
return false;
mounted:
- log_err("fio: %s appears mounted, and 'allow_mounted_write' isn't set. Aborting.", f->file_name);
+ log_err("fio: %s appears mounted, and 'allow_mounted_write' isn't set. Aborting.\n", f->file_name);
return true;
}
while (todo) {
struct thread_data *map[REAL_MAX_JOBS];
- struct timeval this_start;
+ struct timespec this_start;
int this_jobs = 0, left;
struct fork_data *fd;
fio_mutex_remove(td->rusage_sem);
td->rusage_sem = NULL;
}
+ fio_mutex_remove(td->mutex);
+ td->mutex = NULL;
}
free_disk_util();