int groupid = 0;
unsigned int thread_number = 0;
+unsigned int nr_segments = 0;
+unsigned int cur_segment = 0;
unsigned int stat_number = 0;
-int shm_id = 0;
int temp_stall_ts;
unsigned long done_secs = 0;
#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
static void sig_int(int sig)
{
- if (threads) {
+ if (nr_segments) {
if (is_backend)
fio_server_got_signal(sig);
else {
if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
min_evts = 1;
- if (time && __should_check_rate(td))
+ if (time && should_check_rate(td))
fio_gettime(time, NULL);
do {
requeue_io_u(td, &io_u);
} else {
sync_done:
- if (comp_time && __should_check_rate(td))
+ if (comp_time && should_check_rate(td))
fio_gettime(comp_time, NULL);
*ret = io_u_sync_complete(td, io_u);
return 0;
}
-static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir)
+static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir,
+ struct timespec *time)
{
unsigned long long b;
uint64_t total;
int left;
- b = ddir_rw_sum(td->io_blocks);
- if (b % td->o.thinktime_blocks)
+ b = ddir_rw_sum(td->thinktime_blocks_counter);
+ if (b % td->o.thinktime_blocks || !b)
return;
io_u_quiesce(td);
/* adjust for rate_process=poisson */
td->last_usec[ddir] += total;
}
+
+ if (time && should_check_rate(td))
+ fio_gettime(time, NULL);
}
/*
}
if (ret < 0)
break;
+
+ if (ddir_rw(ddir) && td->o.thinktime)
+ handle_thinktime(td, ddir, &comp_time);
+
if (!ddir_rw_sum(td->bytes_done) &&
!td_ioengine_flagged(td, FIO_NOIO))
continue;
}
if (!in_ramp_time(td) && td->o.latency_target)
lat_target_check(td);
-
- if (ddir_rw(ddir) && td->o.thinktime)
- handle_thinktime(td, ddir);
}
check_update_rusage(td);
if (rate_submit_init(td, sk_out))
goto err;
+ if (td->o.thinktime_blocks_type == THINKTIME_BLOCKS_TYPE_COMPLETE)
+ td->thinktime_blocks_counter = td->io_blocks;
+ else
+ td->thinktime_blocks_counter = td->io_issues;
+
set_epoch_time(td, o->log_unix_epoch);
fio_getrusage(&td->ru_start);
memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));