ramp_time=time If set, fio will run the specified workload for this amount
of time before logging any performance numbers. Useful for
letting performance settle before logging results, thus
- minimizing the runtime required for stable results.
+ minimizing the runtime required for stable results. Note
+ that the ramp_time is considered lead in time for a job,
+ thus it will increase the total runtime if a special timeout
+ or runtime is specified.
invalidate=bool Invalidate the buffer/page cache parts for this file prior
to starting io. Defaults to true.
case TD_EXITED:
c = 'E';
break;
+ case TD_RAMP:
+ c = '/';
+ break;
case TD_RUNNING:
if (td_rw(td)) {
if (td_random(td))
/*
* Best effort calculation of the estimated pending runtime of a job.
*/
-static int thread_eta(struct thread_data *td, unsigned long elapsed)
+static int thread_eta(struct thread_data *td)
{
unsigned long long bytes_total, bytes_done;
unsigned long eta_sec = 0;
+ unsigned long elapsed;
+
+ elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
bytes_total = td->total_io_size;
eta_sec > (td->o.timeout + done_secs - elapsed))
eta_sec = td->o.timeout + done_secs - elapsed;
} else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
- || td->runstate == TD_INITIALIZED) {
+ || td->runstate == TD_INITIALIZED
+ || td->runstate == TD_RAMP) {
int t_eta = 0, r_eta = 0;
/*
* We can only guess - assume it'll run the full timeout
* if given, otherwise assume it'll run at the specified rate.
*/
- if (td->o.timeout)
+ if (td->o.timeout) {
t_eta = td->o.timeout + td->o.start_delay;
+
+ if (in_ramp_time(td)) {
+ unsigned long ramp_left;
+
+ ramp_left = mtime_since_now(&td->start);
+ ramp_left = (ramp_left + 999) / 1000;
+ if (ramp_left <= t_eta)
+ t_eta -= ramp_left;
+ }
+ }
if (td->o.rate) {
r_eta = (bytes_total / 1024) / td->o.rate;
r_eta += td->o.start_delay;
*/
void print_thread_status(void)
{
- unsigned long elapsed = mtime_since_genesis() / 1000;
- int i, nr_running, nr_pending, t_rate, m_rate;
+ unsigned long elapsed = (mtime_since_genesis() + 999) / 1000;
+ int i, nr_ramp, nr_running, nr_pending, t_rate, m_rate;
int t_iops, m_iops, files_open;
struct thread_data *td;
char eta_str[128];
io_bytes[0] = io_bytes[1] = 0;
nr_pending = nr_running = t_rate = m_rate = t_iops = m_iops = 0;
+ nr_ramp = 0;
bw_avg_time = ULONG_MAX;
files_open = 0;
for_each_td(td, i) {
t_iops += td->o.rate_iops;
m_iops += td->o.rate_iops_min;
files_open += td->nr_open_files;
+ } else if (td->runstate == TD_RAMP) {
+ nr_running++;
+ nr_ramp++;
} else if (td->runstate < TD_RUNNING)
nr_pending++;
if (elapsed >= 3)
- eta_secs[i] = thread_eta(td, elapsed);
+ eta_secs[i] = thread_eta(td);
else
eta_secs[i] = INT_MAX;
check_str_update(td);
- io_bytes[0] += td->io_bytes[0];
- io_bytes[1] += td->io_bytes[1];
+
+ if (td->runstate > TD_RAMP) {
+ io_bytes[0] += td->io_bytes[0];
+ io_bytes[1] += td->io_bytes[1];
+ }
}
if (exitall_on_terminate)
fio_gettime(&now, NULL);
rate_time = mtime_since(&rate_prev_time, &now);
- if (write_bw_log && rate_time > bw_avg_time) {
+ if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
calc_rate(rate_time, io_bytes, rate_io_bytes, rate);
memcpy(&rate_prev_time, &now, sizeof(now));
add_agg_sample(rate[DDIR_READ], DDIR_READ);
char perc_str[32];
int ll;
- if (!eta_sec && !eta_good)
+ if ((!eta_sec && !eta_good) || nr_ramp == nr_running)
strcpy(perc_str, "-.-% done");
else {
eta_good = 1;
#define TERMINATE_ALL (-1)
#define JOB_START_TIMEOUT (5 * 1000)
-static inline void td_set_runstate(struct thread_data *td, int runstate)
+void td_set_runstate(struct thread_data *td, int runstate)
{
if (td->runstate == runstate)
return;
unsigned int i;
int ret = 0;
- td_set_runstate(td, TD_RUNNING);
+ if (in_ramp_time(td))
+ td_set_runstate(td, TD_RAMP);
+ else
+ td_set_runstate(td, TD_RUNNING);
while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) {
struct timeval comp_time;
if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ) {
io_u->end_io = verify_io_u;
td_set_runstate(td, TD_VERIFYING);
- } else
+ } else if (in_ramp_time(td))
+ td_set_runstate(td, TD_RAMP);
+ else
td_set_runstate(td, TD_RUNNING);
ret = td_io_queue(td, io_u);
* of completions except the very first one which may look
* a little bursty
*/
- if (ramp_time_over(td)) {
+ if (!in_ramp_time(td)) {
usec = utime_since(&s, &comp_time);
rate_throttle(td, usec, bytes_done);
return 0;
}
-static int clear_io_state(struct thread_data *td)
+static void reset_io_counters(struct thread_data *td)
{
- struct fio_file *f;
- unsigned int i;
- int ret;
-
td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
td->zone_bytes = 0;
*/
if (td->o.time_based || td->o.loops)
td->nr_done_files = 0;
+}
+
+void reset_all_stats(struct thread_data *td)
+{
+ struct timeval tv;
+ int i;
+
+ reset_io_counters(td);
+
+ for (i = 0; i < 2; i++) {
+ td->io_bytes[i] = 0;
+ td->io_blocks[i] = 0;
+ td->io_issues[i] = 0;
+ td->ts.total_io_u[i] = 0;
+ }
+
+ fio_gettime(&tv, NULL);
+ memcpy(&td->epoch, &tv, sizeof(tv));
+ memcpy(&td->start, &tv, sizeof(tv));
+}
+
+static int clear_io_state(struct thread_data *td)
+{
+ struct fio_file *f;
+ unsigned int i;
+ int ret;
+
+ reset_io_counters(td);
close_files(td);
if (td->runstate != TD_INITIALIZED)
continue;
- td_set_runstate(td, TD_RUNNING);
+ if (in_ramp_time(td))
+ td_set_runstate(td, TD_RAMP);
+ else
+ td_set_runstate(td, TD_RUNNING);
nr_running++;
nr_started--;
m_rate += td->o.ratemin;
extern void fio_gettime(struct timeval *, void *);
extern void set_genesis_time(void);
extern int ramp_time_over(struct thread_data *);
+extern int in_ramp_time(struct thread_data *);
/*
* Init/option functions
TD_NOT_CREATED = 0,
TD_CREATED,
TD_INITIALIZED,
+ TD_RAMP,
TD_RUNNING,
TD_VERIFYING,
TD_FSYNCING,
TD_REAPED,
};
+extern void td_set_runstate(struct thread_data *, int);
+
/*
* Verify helpers
*/
void io_u_mark_complete(struct thread_data *, unsigned int);
void io_u_mark_submit(struct thread_data *, unsigned int);
+/*
+ * Reset stats after ramp time completes
+ */
+extern void reset_all_stats(struct thread_data *);
+
/*
* io engine entry points
*/
if (!io_u->error) {
unsigned int bytes = io_u->buflen - io_u->resid;
const enum fio_ddir idx = io_u->ddir;
- int ret, ramp_done;
+ int ret;
- ramp_done = ramp_time_over(td);
-
- if (ramp_done) {
- td->io_blocks[idx]++;
- td->io_bytes[idx] += bytes;
- td->this_io_bytes[idx] += bytes;
+ td->io_blocks[idx]++;
+ td->io_bytes[idx] += bytes;
+ td->this_io_bytes[idx] += bytes;
+ if (ramp_time_over(td)) {
usec = utime_since(&io_u->issue_time, &icd->time);
add_clat_sample(td, idx, usec);
td->o.verify != VERIFY_NONE)
log_io_piece(td, io_u);
- if (ramp_done)
- icd->bytes_done[idx] += bytes;
+ icd->bytes_done[idx] += bytes;
if (io_u->end_io) {
ret = io_u->end_io(td, io_u);
return mtime_since_now(&genesis);
}
+int in_ramp_time(struct thread_data *td)
+{
+ return td->o.ramp_time && !td->ramp_time_over;
+}
+
int ramp_time_over(struct thread_data *td)
{
struct timeval tv;
fio_gettime(&tv, NULL);
if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
td->ramp_time_over = 1;
- memcpy(&td->start, &tv, sizeof(tv));
+ reset_all_stats(td);
+ td_set_runstate(td, TD_RAMP);
return 1;
}