| 1 | #include <time.h> |
| 2 | #include <sys/time.h> |
| 3 | |
| 4 | #include "fio.h" |
| 5 | |
| 6 | static struct timeval genesis; |
| 7 | static unsigned long ns_granularity; |
| 8 | |
| 9 | unsigned long long utime_since(struct timeval *s, struct timeval *e) |
| 10 | { |
| 11 | long sec, usec; |
| 12 | unsigned long long ret; |
| 13 | |
| 14 | sec = e->tv_sec - s->tv_sec; |
| 15 | usec = e->tv_usec - s->tv_usec; |
| 16 | if (sec > 0 && usec < 0) { |
| 17 | sec--; |
| 18 | usec += 1000000; |
| 19 | } |
| 20 | |
| 21 | /* |
| 22 | * time warp bug on some kernels? |
| 23 | */ |
| 24 | if (sec < 0 || (sec == 0 && usec < 0)) |
| 25 | return 0; |
| 26 | |
| 27 | ret = sec * 1000000ULL + usec; |
| 28 | |
| 29 | return ret; |
| 30 | } |
| 31 | |
| 32 | unsigned long long utime_since_now(struct timeval *s) |
| 33 | { |
| 34 | struct timeval t; |
| 35 | |
| 36 | fio_gettime(&t, NULL); |
| 37 | return utime_since(s, &t); |
| 38 | } |
| 39 | |
| 40 | unsigned long mtime_since(struct timeval *s, struct timeval *e) |
| 41 | { |
| 42 | long sec, usec, ret; |
| 43 | |
| 44 | sec = e->tv_sec - s->tv_sec; |
| 45 | usec = e->tv_usec - s->tv_usec; |
| 46 | if (sec > 0 && usec < 0) { |
| 47 | sec--; |
| 48 | usec += 1000000; |
| 49 | } |
| 50 | |
| 51 | sec *= 1000UL; |
| 52 | usec /= 1000UL; |
| 53 | ret = sec + usec; |
| 54 | |
| 55 | /* |
| 56 | * time warp bug on some kernels? |
| 57 | */ |
| 58 | if (ret < 0) |
| 59 | ret = 0; |
| 60 | |
| 61 | return ret; |
| 62 | } |
| 63 | |
| 64 | unsigned long mtime_since_now(struct timeval *s) |
| 65 | { |
| 66 | struct timeval t; |
| 67 | void *p = __builtin_return_address(0); |
| 68 | |
| 69 | fio_gettime(&t, p); |
| 70 | return mtime_since(s, &t); |
| 71 | } |
| 72 | |
| 73 | unsigned long time_since_now(struct timeval *s) |
| 74 | { |
| 75 | return mtime_since_now(s) / 1000; |
| 76 | } |
| 77 | |
| 78 | /* |
| 79 | * busy looping version for the last few usec |
| 80 | */ |
| 81 | void usec_spin(unsigned int usec) |
| 82 | { |
| 83 | struct timeval start; |
| 84 | |
| 85 | fio_gettime(&start, NULL); |
| 86 | while (utime_since_now(&start) < usec) |
| 87 | nop; |
| 88 | } |
| 89 | |
| 90 | void usec_sleep(struct thread_data *td, unsigned long usec) |
| 91 | { |
| 92 | struct timespec req; |
| 93 | struct timeval tv; |
| 94 | |
| 95 | do { |
| 96 | unsigned long ts = usec; |
| 97 | |
| 98 | if (usec < ns_granularity) { |
| 99 | usec_spin(usec); |
| 100 | break; |
| 101 | } |
| 102 | |
| 103 | ts = usec - ns_granularity; |
| 104 | |
| 105 | if (ts >= 1000000) { |
| 106 | req.tv_sec = ts / 1000000; |
| 107 | ts -= 1000000 * req.tv_sec; |
| 108 | } else |
| 109 | req.tv_sec = 0; |
| 110 | |
| 111 | req.tv_nsec = ts * 1000; |
| 112 | fio_gettime(&tv, NULL); |
| 113 | |
| 114 | if (nanosleep(&req, NULL) < 0) |
| 115 | break; |
| 116 | |
| 117 | ts = utime_since_now(&tv); |
| 118 | if (ts >= usec) |
| 119 | break; |
| 120 | |
| 121 | usec -= ts; |
| 122 | } while (!td->terminate); |
| 123 | } |
| 124 | |
| 125 | void rate_throttle(struct thread_data *td, unsigned long time_spent, |
| 126 | unsigned int bytes) |
| 127 | { |
| 128 | unsigned long usec_cycle; |
| 129 | unsigned int bs; |
| 130 | |
| 131 | if (!td->o.rate && !td->o.rate_iops) |
| 132 | return; |
| 133 | |
| 134 | if (td_rw(td)) |
| 135 | bs = td->o.rw_min_bs; |
| 136 | else if (td_read(td)) |
| 137 | bs = td->o.min_bs[DDIR_READ]; |
| 138 | else |
| 139 | bs = td->o.min_bs[DDIR_WRITE]; |
| 140 | |
| 141 | usec_cycle = td->rate_usec_cycle * (bytes / bs); |
| 142 | |
| 143 | if (time_spent < usec_cycle) { |
| 144 | unsigned long s = usec_cycle - time_spent; |
| 145 | |
| 146 | td->rate_pending_usleep += s; |
| 147 | |
| 148 | if (td->rate_pending_usleep >= 100000) { |
| 149 | struct timeval t; |
| 150 | |
| 151 | fio_gettime(&t, NULL); |
| 152 | usec_sleep(td, td->rate_pending_usleep); |
| 153 | td->rate_pending_usleep -= utime_since_now(&t); |
| 154 | } |
| 155 | } else { |
| 156 | long overtime = time_spent - usec_cycle; |
| 157 | |
| 158 | td->rate_pending_usleep -= overtime; |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | unsigned long mtime_since_genesis(void) |
| 163 | { |
| 164 | return mtime_since_now(&genesis); |
| 165 | } |
| 166 | |
| 167 | int in_ramp_time(struct thread_data *td) |
| 168 | { |
| 169 | return td->o.ramp_time && !td->ramp_time_over; |
| 170 | } |
| 171 | |
| 172 | int ramp_time_over(struct thread_data *td) |
| 173 | { |
| 174 | struct timeval tv; |
| 175 | |
| 176 | if (!td->o.ramp_time || td->ramp_time_over) |
| 177 | return 1; |
| 178 | |
| 179 | fio_gettime(&tv, NULL); |
| 180 | if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) { |
| 181 | td->ramp_time_over = 1; |
| 182 | reset_all_stats(td); |
| 183 | td_set_runstate(td, TD_RAMP); |
| 184 | return 1; |
| 185 | } |
| 186 | |
| 187 | return 0; |
| 188 | } |
| 189 | |
| 190 | static void fio_init time_init(void) |
| 191 | { |
| 192 | int i; |
| 193 | |
| 194 | /* |
| 195 | * Check the granularity of the nanosleep function |
| 196 | */ |
| 197 | for (i = 0; i < 10; i++) { |
| 198 | struct timeval tv; |
| 199 | struct timespec ts; |
| 200 | unsigned long elapsed; |
| 201 | |
| 202 | fio_gettime(&tv, NULL); |
| 203 | ts.tv_sec = 0; |
| 204 | ts.tv_nsec = 1000; |
| 205 | |
| 206 | nanosleep(&ts, NULL); |
| 207 | elapsed = utime_since_now(&tv); |
| 208 | |
| 209 | if (elapsed > ns_granularity) |
| 210 | ns_granularity = elapsed; |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | void set_genesis_time(void) |
| 215 | { |
| 216 | fio_gettime(&genesis, NULL); |
| 217 | } |
| 218 | |
| 219 | void fill_start_time(struct timeval *t) |
| 220 | { |
| 221 | memcpy(t, &genesis, sizeof(genesis)); |
| 222 | } |