[PATCH] Basic support for a cpu cycle eater job
[fio.git] / time.c
1 #include <time.h>
2 #include <sys/time.h>
3
4 #include "fio.h"
5
6 unsigned long utime_since(struct timeval *s, struct timeval *e)
7 {
8         double sec, usec;
9
10         sec = e->tv_sec - s->tv_sec;
11         usec = e->tv_usec - s->tv_usec;
12         if (sec > 0 && usec < 0) {
13                 sec--;
14                 usec += 1000000;
15         }
16
17         sec *= (double) 1000000;
18
19         return sec + usec;
20 }
21
22 static unsigned long utime_since_now(struct timeval *s)
23 {
24         struct timeval t;
25
26         gettimeofday(&t, NULL);
27         return utime_since(s, &t);
28 }
29
30 unsigned long mtime_since(struct timeval *s, struct timeval *e)
31 {
32         double sec, usec;
33
34         sec = e->tv_sec - s->tv_sec;
35         usec = e->tv_usec - s->tv_usec;
36         if (sec > 0 && usec < 0) {
37                 sec--;
38                 usec += 1000000;
39         }
40
41         sec *= (double) 1000;
42         usec /= (double) 1000;
43
44         return sec + usec;
45 }
46
47 unsigned long mtime_since_now(struct timeval *s)
48 {
49         struct timeval t;
50
51         gettimeofday(&t, NULL);
52         return mtime_since(s, &t);
53 }
54
55 unsigned long time_since_now(struct timeval *s)
56 {
57         return mtime_since_now(s) / 1000;
58 }
59
60 /*
61  * busy looping version for the last few usec
62  */
63 void __usec_sleep(unsigned int usec)
64 {
65         struct timeval start;
66
67         gettimeofday(&start, NULL);
68         while (utime_since_now(&start) < usec)
69                 nop;
70 }
71
72 void usec_sleep(struct thread_data *td, unsigned long usec)
73 {
74         struct timespec req, rem;
75
76         req.tv_sec = usec / 1000000;
77         req.tv_nsec = usec * 1000 - req.tv_sec * 1000000;
78
79         do {
80                 if (usec < 5000) {
81                         __usec_sleep(usec);
82                         break;
83                 }
84
85                 rem.tv_sec = rem.tv_nsec = 0;
86                 if (nanosleep(&req, &rem) < 0)
87                         break;
88
89                 if ((rem.tv_sec + rem.tv_nsec) == 0)
90                         break;
91
92                 req.tv_nsec = rem.tv_nsec;
93                 req.tv_sec = rem.tv_sec;
94
95                 usec = rem.tv_sec * 1000000 + rem.tv_nsec / 1000;
96         } while (!td->terminate);
97 }
98
99 void rate_throttle(struct thread_data *td, unsigned long time_spent,
100                    unsigned int bytes)
101 {
102         unsigned long usec_cycle;
103
104         if (!td->rate)
105                 return;
106
107         usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
108
109         if (time_spent < usec_cycle) {
110                 unsigned long s = usec_cycle - time_spent;
111
112                 td->rate_pending_usleep += s;
113                 if (td->rate_pending_usleep >= 100000) {
114                         usec_sleep(td, td->rate_pending_usleep);
115                         td->rate_pending_usleep = 0;
116                 }
117         } else {
118                 long overtime = time_spent - usec_cycle;
119
120                 td->rate_pending_usleep -= overtime;
121         }
122 }