A few more kb_base fixups
[fio.git] / time.c
diff --git a/time.c b/time.c
index e84fb013ed0caec000f10e202ca8da3fc9a33963..6397f20e01dc8e2ce0cb0067311c155e9c7dce82 100644 (file)
--- a/time.c
+++ b/time.c
@@ -78,7 +78,7 @@ unsigned long time_since_now(struct timeval *s)
 /*
  * busy looping version for the last few usec
  */
-void __usec_sleep(unsigned int usec)
+void usec_spin(unsigned int usec)
 {
        struct timeval start;
 
@@ -96,7 +96,7 @@ void usec_sleep(struct thread_data *td, unsigned long usec)
                unsigned long ts = usec;
 
                if (usec < ns_granularity) {
-                       __usec_sleep(usec);
+                       usec_spin(usec);
                        break;
                }
 
@@ -122,43 +122,6 @@ void usec_sleep(struct thread_data *td, unsigned long usec)
        } while (!td->terminate);
 }
 
-void rate_throttle(struct thread_data *td, unsigned long time_spent,
-                  unsigned int bytes)
-{
-       unsigned long usec_cycle;
-       unsigned int bs;
-
-       if (!td->o.rate && !td->o.rate_iops)
-               return;
-
-       if (td_rw(td))
-               bs = td->o.rw_min_bs;
-       else if (td_read(td))
-               bs = td->o.min_bs[DDIR_READ];
-       else
-               bs = td->o.min_bs[DDIR_WRITE];
-
-       usec_cycle = td->rate_usec_cycle * (bytes / bs);
-
-       if (time_spent < usec_cycle) {
-               unsigned long s = usec_cycle - time_spent;
-
-               td->rate_pending_usleep += s;
-
-               if (td->rate_pending_usleep >= 100000) {
-                       struct timeval t;
-
-                       fio_gettime(&t, NULL);
-                       usec_sleep(td, td->rate_pending_usleep);
-                       td->rate_pending_usleep -= utime_since_now(&t);
-               }
-       } else {
-               long overtime = time_spent - usec_cycle;
-
-               td->rate_pending_usleep -= overtime;
-       }
-}
-
 unsigned long mtime_since_genesis(void)
 {
        return mtime_since_now(&genesis);