Add support for limiting only rate in only one direction
[fio.git] / time.c
diff --git a/time.c b/time.c
index 4f1c13a08a5c6a77d07bb1fe9cc26bb4f84ba868..ee9d33fa20d1682e6da44f72edf46023587eaee1 100644 (file)
--- a/time.c
+++ b/time.c
@@ -78,7 +78,7 @@ unsigned long time_since_now(struct timeval *s)
 /*
  * busy looping version for the last few usec
  */
-void __usec_sleep(unsigned int usec)
+void usec_spin(unsigned int usec)
 {
        struct timeval start;
 
@@ -96,7 +96,7 @@ void usec_sleep(struct thread_data *td, unsigned long usec)
                unsigned long ts = usec;
 
                if (usec < ns_granularity) {
-                       __usec_sleep(usec);
+                       usec_spin(usec);
                        break;
                }
 
@@ -122,41 +122,28 @@ void usec_sleep(struct thread_data *td, unsigned long usec)
        } while (!td->terminate);
 }
 
-void rate_throttle(struct thread_data *td, unsigned long time_spent,
-                  unsigned int bytes)
+long rate_throttle(struct thread_data *td, unsigned long time_spent,
+                  unsigned long bytes, enum fio_ddir ddir)
 {
+       unsigned int bs = td->o.min_bs[ddir];
        unsigned long usec_cycle;
-       unsigned int bs;
 
-       if (!td->o.rate && !td->o.rate_iops)
-               return;
-
-       if (td_rw(td))
-               bs = td->o.rw_min_bs;
-       else if (td_read(td))
-               bs = td->o.min_bs[DDIR_READ];
-       else
-               bs = td->o.min_bs[DDIR_WRITE];
+       if (!td->o.rate[ddir] && !td->o.rate_iops[ddir])
+               return 0;
 
-       usec_cycle = td->rate_usec_cycle * (bytes / bs);
+       usec_cycle = td->rate_usec_cycle[ddir] * (bytes / bs);
 
        if (time_spent < usec_cycle) {
                unsigned long s = usec_cycle - time_spent;
 
-               td->rate_pending_usleep += s;
-
-               if (td->rate_pending_usleep >= 100000) {
-                       struct timeval t;
-
-                       fio_gettime(&t, NULL);
-                       usec_sleep(td, td->rate_pending_usleep);
-                       td->rate_pending_usleep -= utime_since_now(&t);
-               }
+               td->rate_pending_usleep[ddir] += s;
        } else {
                long overtime = time_spent - usec_cycle;
 
-               td->rate_pending_usleep -= overtime;
+               td->rate_pending_usleep[ddir] -= overtime;
        }
+
+       return td->rate_pending_usleep[ddir];
 }
 
 unsigned long mtime_since_genesis(void)
@@ -164,6 +151,11 @@ unsigned long mtime_since_genesis(void)
        return mtime_since_now(&genesis);
 }
 
+int in_ramp_time(struct thread_data *td)
+{
+       return td->o.ramp_time && !td->ramp_time_over;
+}
+
 int ramp_time_over(struct thread_data *td)
 {
        struct timeval tv;
@@ -174,7 +166,8 @@ int ramp_time_over(struct thread_data *td)
        fio_gettime(&tv, NULL);
        if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
                td->ramp_time_over = 1;
-               memcpy(&td->start, &tv, sizeof(tv));
+               reset_all_stats(td);
+               td_set_runstate(td, TD_RAMP);
                return 1;
        }