return 0;
if (!td->o.timeout)
return 0;
- if (mtime_since(&td->epoch, t) >= td->o.timeout )
+ if (utime_since(&td->epoch, t) >= td->o.timeout)
return 1;
return 0;
static int io_bytes_exceeded(struct thread_data *td)
{
+ unsigned long long number_ios = 0;
unsigned long long bytes;
if (td_rw(td))
else
bytes = td->this_io_bytes[DDIR_TRIM];
- return bytes >= td->o.size;
+ if (td->o.number_ios) {
+ number_ios = ddir_rw_sum(td->this_io_blocks);
+ number_ios += td->io_u_queued + td->io_u_in_flight;
+ }
+
+ return bytes >= td->o.size ||
+ (number_ios && number_ios >= td->o.number_ios);
}
/*
return 1;
}
+ if (td->o.number_ios) {
+ unsigned long long number_ios = ddir_rw_sum(td->this_io_blocks);
+
+ number_ios += td->io_u_queued + td->io_u_in_flight;
+ if (number_ios >= td->o.number_ios)
+ return 0;
+ }
+
if (td->o.size != -1ULL && ddir_rw_sum(td->io_bytes) < td->o.size) {
uint64_t diff;
td->ts.total_io_u[io_u->ddir]++;
}
+ if (td_write(td) && io_u->ddir == DDIR_WRITE &&
+ td->o.do_verify &&
+ td->o.verify != VERIFY_NONE &&
+ !td->o.experimental_verify)
+ log_io_piece(td, io_u);
+
ret = io_u_sync_complete(td, io_u, bytes_done);
(void) ret;
}
* allocations.
*/
if (o->cpumask_set) {
+ if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
+ ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
+ if (!ret) {
+ log_err("fio: no CPUs set\n");
+ log_err("fio: Try increasing number of available CPUs\n");
+ td_verror(td, EINVAL, "cpus_split");
+ goto err;
+ }
+ }
ret = fio_setaffinity(td->pid, o->cpumask);
if (ret == -1) {
td_verror(td, errno, "cpu_set_affinity");
static void run_threads(void)
{
struct thread_data *td;
- unsigned long spent;
unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
+ uint64_t spent;
if (fio_gtod_offload && fio_start_gtod_thread())
return;
}
if (td->o.start_delay) {
- spent = mtime_since_genesis();
+ spent = utime_since_genesis();
if (td->o.start_delay > spent)
continue;