X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=time.c;h=f1833c7b9ea42d368c06eda180528d68db8e3c2c;hp=4fbc98bbc57f8b781d95c7a8362ce8b8ab348700;hb=df8472e12d8bdbe32ff1a4cf4f0c73224b8e8020;hpb=2bfe24bdd9ce313e5323d947952067b1475dd2cc diff --git a/time.c b/time.c index 4fbc98bb..f1833c7b 100644 --- a/time.c +++ b/time.c @@ -6,97 +6,32 @@ static struct timeval genesis; static unsigned long ns_granularity; -unsigned long utime_since(struct timeval *s, struct timeval *e) -{ - long sec, usec, ret; - - sec = e->tv_sec - s->tv_sec; - usec = e->tv_usec - s->tv_usec; - if (sec > 0 && usec < 0) { - sec--; - usec += 1000000; - } - - sec *= 1000000UL; - ret = sec + usec; - - /* - * time warp bug on some kernels? - */ - if (ret < 0) - ret = 0; - - return ret; -} - -unsigned long utime_since_now(struct timeval *s) -{ - struct timeval t; - - fio_gettime(&t, NULL); - return utime_since(s, &t); -} - -unsigned long mtime_since(struct timeval *s, struct timeval *e) -{ - long sec, usec, ret; - - sec = e->tv_sec - s->tv_sec; - usec = e->tv_usec - s->tv_usec; - if (sec > 0 && usec < 0) { - sec--; - usec += 1000000; - } - - sec *= 1000UL; - usec /= 1000UL; - ret = sec + usec; - - /* - * time warp bug on some kernels? - */ - if (ret < 0) - ret = 0; - - return ret; -} - -unsigned long mtime_since_now(struct timeval *s) -{ - struct timeval t; - void *p = __builtin_return_address(0); - - fio_gettime(&t, p); - return mtime_since(s, &t); -} - -unsigned long time_since_now(struct timeval *s) -{ - return mtime_since_now(s) / 1000; -} - /* * busy looping version for the last few usec */ -void __usec_sleep(unsigned int usec) +uint64_t usec_spin(unsigned int usec) { struct timeval start; + uint64_t t; fio_gettime(&start, NULL); - while (utime_since_now(&start) < usec) + while ((t = utime_since_now(&start)) < usec) nop; + + return t; } -void usec_sleep(struct thread_data *td, unsigned long usec) +uint64_t usec_sleep(struct thread_data *td, unsigned long usec) { struct timespec req; struct timeval tv; + uint64_t t = 0; do { unsigned long ts = usec; if (usec < ns_granularity) { - __usec_sleep(usec); + t += usec_spin(usec); break; } @@ -115,59 +50,60 @@ void usec_sleep(struct thread_data *td, unsigned long usec) break; ts = utime_since_now(&tv); + t += ts; if (ts >= usec) break; usec -= ts; } while (!td->terminate); + + return t; } -void rate_throttle(struct thread_data *td, unsigned long time_spent, - unsigned int bytes) +uint64_t time_since_genesis(void) { - unsigned long usec_cycle; - unsigned int bs; - - if (!td->o.rate && !td->o.rate_iops) - return; - - if (td_rw(td)) - bs = td->o.rw_min_bs; - else if (td_read(td)) - bs = td->o.min_bs[DDIR_READ]; - else - bs = td->o.min_bs[DDIR_WRITE]; + return time_since_now(&genesis); +} - usec_cycle = td->rate_usec_cycle * (bytes / bs); +uint64_t mtime_since_genesis(void) +{ + return mtime_since_now(&genesis); +} - if (time_spent < usec_cycle) { - unsigned long s = usec_cycle - time_spent; +uint64_t utime_since_genesis(void) +{ + return utime_since_now(&genesis); +} - td->rate_pending_usleep += s; +int in_ramp_time(struct thread_data *td) +{ + return td->o.ramp_time && !td->ramp_time_over; +} - if (td->rate_pending_usleep >= 100000) { - struct timeval t; +int ramp_time_over(struct thread_data *td) +{ + struct timeval tv; - fio_gettime(&t, NULL); - usec_sleep(td, td->rate_pending_usleep); - td->rate_pending_usleep -= utime_since_now(&t); - } - } else { - long overtime = time_spent - usec_cycle; + if (!td->o.ramp_time || td->ramp_time_over) + return 1; - td->rate_pending_usleep -= overtime; + fio_gettime(&tv, NULL); + if (utime_since(&td->epoch, &tv) >= td->o.ramp_time) { + td->ramp_time_over = 1; + reset_all_stats(td); + td_set_runstate(td, TD_RAMP); + return 1; } -} -unsigned long mtime_since_genesis(void) -{ - return mtime_since_now(&genesis); + return 0; } -static void fio_init time_init(void) +void fio_time_init(void) { int i; + fio_clock_init(); + /* * Check the granularity of the nanosleep function */