summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
9c5a385)
Sometimes it's useful to let a job settle for a little while
before taking any measurements on latency and throughput, since
the initial rate on eg a write workload may be much higher than
the longer sustained rate.
So add a ramp_time option that allows the user to specify a lead
ramp time that must have passed before fio takes any performance
numbers into account.
Suggested by "Jenkins, Lee" <Lee.Jenkins@hp.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
written. It will simply loop over the same workload
as many times as the runtime allows.
written. It will simply loop over the same workload
as many times as the runtime allows.
+ramp_time If set, fio will run the specified workload for this amount
+ of time before logging any performance numbers. Useful for
+ letting performance settle before logging results, thus
+ minimizing the runtime required for stable results.
+
invalidate=bool Invalidate the buffer/page cache parts for this file prior
to starting io. Defaults to true.
invalidate=bool Invalidate the buffer/page cache parts for this file prior
to starting io. Defaults to true.
* of completions except the very first one which may look
* a little bursty
*/
* of completions except the very first one which may look
* a little bursty
*/
- usec = utime_since(&s, &comp_time);
+ if (ramp_time_over(td)) {
+ usec = utime_since(&s, &comp_time);
- rate_throttle(td, usec, bytes_done);
+ rate_throttle(td, usec, bytes_done);
- if (check_min_rate(td, &comp_time)) {
- if (exitall_on_terminate)
- terminate_threads(td->groupid);
- td_verror(td, EIO, "check_min_rate");
- break;
+ if (check_min_rate(td, &comp_time)) {
+ if (exitall_on_terminate)
+ terminate_threads(td->groupid);
+ td_verror(td, EIO, "check_min_rate");
+ break;
+ }
unsigned int fsync_blocks;
unsigned int start_delay;
unsigned long long timeout;
unsigned int fsync_blocks;
unsigned int start_delay;
unsigned long long timeout;
+ unsigned long long ramp_time;
unsigned int overwrite;
unsigned int bw_avg_time;
unsigned int loops;
unsigned int overwrite;
unsigned int bw_avg_time;
unsigned int loops;
struct timeval rw_end[2];
struct timeval last_issue;
unsigned int rw_end_set[2];
struct timeval rw_end[2];
struct timeval last_issue;
unsigned int rw_end_set[2];
+ unsigned int ramp_time_over;
/*
* read/write mixed workload state
/*
* read/write mixed workload state
extern void fill_start_time(struct timeval *);
extern void fio_gettime(struct timeval *, void *);
extern void set_genesis_time(void);
extern void fill_start_time(struct timeval *);
extern void fio_gettime(struct timeval *, void *);
extern void set_genesis_time(void);
+extern int ramp_time_over(struct thread_data *);
/*
* Init/option functions
/*
* Init/option functions
if (!io_u->error) {
unsigned int bytes = io_u->buflen - io_u->resid;
const enum fio_ddir idx = io_u->ddir;
if (!io_u->error) {
unsigned int bytes = io_u->buflen - io_u->resid;
const enum fio_ddir idx = io_u->ddir;
- td->io_blocks[idx]++;
- td->io_bytes[idx] += bytes;
- td->this_io_bytes[idx] += bytes;
+ ramp_done = ramp_time_over(td);
- usec = utime_since(&io_u->issue_time, &icd->time);
+ if (ramp_done) {
+ td->io_blocks[idx]++;
+ td->io_bytes[idx] += bytes;
+ td->this_io_bytes[idx] += bytes;
- add_clat_sample(td, idx, usec);
- add_bw_sample(td, idx, &icd->time);
- io_u_mark_latency(td, usec);
+ usec = utime_since(&io_u->issue_time, &icd->time);
+
+ add_clat_sample(td, idx, usec);
+ add_bw_sample(td, idx, &icd->time);
+ io_u_mark_latency(td, usec);
+ }
if (td_write(td) && idx == DDIR_WRITE &&
td->o.do_verify &&
td->o.verify != VERIFY_NONE)
log_io_piece(td, io_u);
if (td_write(td) && idx == DDIR_WRITE &&
td->o.do_verify &&
td->o.verify != VERIFY_NONE)
log_io_piece(td, io_u);
- icd->bytes_done[idx] += bytes;
+ if (ramp_done)
+ icd->bytes_done[idx] += bytes;
if (io_u->end_io) {
ret = io_u->end_io(td, io_u);
if (io_u->end_io) {
ret = io_u->end_io(td, io_u);
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
},
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
},
+ {
+ .name = "ramp_time",
+ .type = FIO_OPT_STR_VAL_TIME,
+ .off1 = td_var_offset(ramp_time),
+ .help = "Ramp up time before measuring performance",
+ },
{
.name = "mem",
.alias = "iomem",
{
.name = "mem",
.alias = "iomem",
return mtime_since_now(&genesis);
}
return mtime_since_now(&genesis);
}
+int ramp_time_over(struct thread_data *td)
+{
+ struct timeval tv;
+
+ if (!td->o.ramp_time || td->ramp_time_over)
+ return 1;
+
+ fio_gettime(&tv, NULL);
+ if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
+ td->ramp_time_over = 1;
+ memcpy(&td->start, &tv, sizeof(tv));
+ return 1;
+ }
+
+ return 0;
+}
+
static void fio_init time_init(void)
{
int i;
static void fio_init time_init(void)
{
int i;