#include <sys/wait.h>
#include <sys/ipc.h>
#include <sys/mman.h>
+#include <math.h>
#include "fio.h"
#ifndef FIO_NO_HAVE_SHM_H
}
}
-static void sig_show_status(int sig)
+void sig_show_status(int sig)
{
show_running_run_stats();
}
/*
* if the queue is full, we MUST reap at least 1 event
*/
- min_evts = min(td->o.iodepth_batch_complete, td->cur_depth);
- if ((full && !min_evts) || !td->o.iodepth_batch_complete)
+ min_evts = min(td->o.iodepth_batch_complete_min, td->cur_depth);
+ if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
min_evts = 1;
if (time && (__should_check_rate(td, DDIR_READ) ||
return 0;
}
+static inline int io_in_polling(struct thread_data *td)
+{
+ return !td->o.iodepth_batch_complete_min &&
+ !td->o.iodepth_batch_complete_max;
+}
+
/*
* The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
- if (full || !td->o.iodepth_batch_complete)
+ if (full || io_in_polling(td))
ret = wait_for_completions(td, NULL);
if (ret < 0)
*/
static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
{
- uint64_t secs, remainder, bps, bytes;
+ uint64_t secs, remainder, bps, bytes, iops;
assert(!(td->flags & TD_F_CHILD));
bytes = td->rate_io_issue_bytes[ddir];
bps = td->rate_bps[ddir];
- if (bps) {
+
+ if (td->o.rate_process == RATE_PROCESS_POISSON) {
+ uint64_t val;
+ iops = bps / td->o.bs[ddir];
+ val = (int64_t) (1000000 / iops) *
+ -logf(__rand_0_1(&td->poisson_state));
+ if (val) {
+ dprint(FD_RATE, "poisson rate iops=%llu\n",
+ (unsigned long long) 1000000 / val);
+ }
+ td->last_usec += val;
+ return td->last_usec;
+ } else if (bps) {
secs = bytes / bps;
remainder = bytes % bps;
return remainder * 1000000 / bps + secs * 1000000;
- } else
- return 0;
+ }
+
+ return 0;
}
/*
reap:
full = queue_full(td) ||
(ret == FIO_Q_BUSY && td->cur_depth);
- if (full || !td->o.iodepth_batch_complete)
+ if (full || io_in_polling(td))
ret = wait_for_completions(td, &comp_time);
}
if (ret < 0)
fio_gettime(&td->epoch, NULL);
fio_getrusage(&td->ru_start);
+ memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
+ memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
+
+ if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
+ o->ratemin[DDIR_TRIM]) {
+ memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
+ sizeof(td->bw_sample_time));
+ memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
+ sizeof(td->bw_sample_time));
+ memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
+ sizeof(td->bw_sample_time));
+ }
+
clear_state = 0;
while (keep_running(td)) {
uint64_t verify_bytes;
fio_gettime(&td->start, NULL);
- memcpy(&td->bw_sample_time, &td->start, sizeof(td->start));
- memcpy(&td->iops_sample_time, &td->start, sizeof(td->start));
memcpy(&td->tv_cache, &td->start, sizeof(td->start));
- if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
- o->ratemin[DDIR_TRIM]) {
- memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
- sizeof(td->bw_sample_time));
- memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
- sizeof(td->bw_sample_time));
- memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
- sizeof(td->bw_sample_time));
- }
-
if (clear_state)
- clear_io_state(td);
+ clear_io_state(td, 0);
prune_io_piece_log(td);
(td->io_ops->flags & FIO_UNIDIR))
continue;
- clear_io_state(td);
+ clear_io_state(td, 0);
fio_gettime(&td->start, NULL);
nr_process++;
}
- if (output_format == FIO_OUTPUT_NORMAL) {
+ if (output_format & FIO_OUTPUT_NORMAL) {
log_info("Starting ");
if (nr_thread)
log_info("%d thread%s", nr_thread,