Merge branch 'msys2' of https://github.com/sitsofe/fio into master
[fio.git] / idletime.c
CommitLineData
f2a2ce0e 1#include <math.h>
3d2d14bc 2#include "fio.h"
f2a2ce0e
HL
3#include "json.h"
4#include "idletime.h"
5
6static volatile struct idle_prof_common ipc;
7
680b5abc
JA
8/*
9 * Get time to complete an unit work on a particular cpu.
f2a2ce0e
HL
10 * The minimum number in CALIBRATE_RUNS runs is returned.
11 */
12static double calibrate_unit(unsigned char *data)
13{
14 unsigned long t, i, j, k;
8b6a404c 15 struct timespec tps;
f2a2ce0e
HL
16 double tunit = 0.0;
17
680b5abc 18 for (i = 0; i < CALIBRATE_RUNS; i++) {
f2a2ce0e
HL
19
20 fio_gettime(&tps, NULL);
21 /* scale for less variance */
680b5abc 22 for (j = 0; j < CALIBRATE_SCALE; j++) {
f2a2ce0e
HL
23 /* unit of work */
24 for (k=0; k < page_size; k++) {
680b5abc
JA
25 data[(k + j) % page_size] = k % 256;
26 /*
27 * we won't see STOP here. this is to match
f2a2ce0e
HL
28 * the same statement in the profiling loop.
29 */
30 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP)
31 return 0.0;
32 }
33 }
34
35 t = utime_since_now(&tps);
36 if (!t)
37 continue;
38
39 /* get the minimum time to complete CALIBRATE_SCALE units */
680b5abc 40 if ((i == 0) || ((double)t < tunit))
f2a2ce0e
HL
41 tunit = (double)t;
42 }
43
680b5abc 44 return tunit / CALIBRATE_SCALE;
f2a2ce0e
HL
45}
46
54ed125b
JA
47static void free_cpu_affinity(struct idle_prof_thread *ipt)
48{
49#if defined(FIO_HAVE_CPU_AFFINITY)
50 fio_cpuset_exit(&ipt->cpu_mask);
51#endif
52}
53
59358c8e
JA
54static int set_cpu_affinity(struct idle_prof_thread *ipt)
55{
56#if defined(FIO_HAVE_CPU_AFFINITY)
54ed125b
JA
57 if (fio_cpuset_init(&ipt->cpu_mask)) {
58 log_err("fio: cpuset init failed\n");
59 return -1;
60 }
59358c8e 61
54ed125b 62 fio_cpu_set(&ipt->cpu_mask, ipt->cpu);
59358c8e 63
54ed125b 64 if (fio_setaffinity(gettid(), ipt->cpu_mask)) {
59358c8e 65 log_err("fio: fio_setaffinity failed\n");
54ed125b 66 fio_cpuset_exit(&ipt->cpu_mask);
59358c8e
JA
67 return -1;
68 }
69
70 return 0;
71#else
72 log_err("fio: fio_setaffinity not supported\n");
73 return -1;
74#endif
75}
76
f2a2ce0e
HL
77static void *idle_prof_thread_fn(void *data)
78{
79 int retval;
80 unsigned long j, k;
81 struct idle_prof_thread *ipt = data;
82
83 /* wait for all threads are spawned */
84 pthread_mutex_lock(&ipt->init_lock);
85
86 /* exit if any other thread failed to start */
d5b351d4
JA
87 if (ipc.status == IDLE_PROF_STATUS_ABORT) {
88 pthread_mutex_unlock(&ipt->init_lock);
f2a2ce0e 89 return NULL;
d5b351d4 90 }
f2a2ce0e 91
59358c8e 92 retval = set_cpu_affinity(ipt);
f2a2ce0e
HL
93 if (retval == -1) {
94 ipt->state = TD_EXITED;
95 pthread_mutex_unlock(&ipt->init_lock);
96 return NULL;
97 }
98
99 ipt->cali_time = calibrate_unit(ipt->data);
100
101 /* delay to set IDLE class till now for better calibration accuracy */
7e09a9f1 102#if defined(CONFIG_SCHED_IDLE)
f2a2ce0e
HL
103 if ((retval = fio_set_sched_idle()))
104 log_err("fio: fio_set_sched_idle failed\n");
105#else
106 retval = -1;
107 log_err("fio: fio_set_sched_idle not supported\n");
108#endif
109 if (retval == -1) {
110 ipt->state = TD_EXITED;
111 pthread_mutex_unlock(&ipt->init_lock);
54ed125b 112 goto do_exit;
f2a2ce0e
HL
113 }
114
115 ipt->state = TD_INITIALIZED;
116
117 /* signal the main thread that calibration is done */
118 pthread_cond_signal(&ipt->cond);
119 pthread_mutex_unlock(&ipt->init_lock);
120
121 /* wait for other calibration to finish */
122 pthread_mutex_lock(&ipt->start_lock);
123
124 /* exit if other threads failed to initialize */
735ed278
JA
125 if (ipc.status == IDLE_PROF_STATUS_ABORT) {
126 pthread_mutex_unlock(&ipt->start_lock);
54ed125b 127 goto do_exit;
735ed278 128 }
f2a2ce0e
HL
129
130 /* exit if we are doing calibration only */
735ed278
JA
131 if (ipc.status == IDLE_PROF_STATUS_CALI_STOP) {
132 pthread_mutex_unlock(&ipt->start_lock);
54ed125b 133 goto do_exit;
735ed278 134 }
f2a2ce0e
HL
135
136 fio_gettime(&ipt->tps, NULL);
137 ipt->state = TD_RUNNING;
138
139 j = 0;
140 while (1) {
680b5abc
JA
141 for (k = 0; k < page_size; k++) {
142 ipt->data[(k + j) % page_size] = k % 256;
f2a2ce0e
HL
143 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) {
144 fio_gettime(&ipt->tpe, NULL);
145 goto idle_prof_done;
146 }
147 }
148 j++;
149 }
150
151idle_prof_done:
152
680b5abc 153 ipt->loops = j + (double) k / page_size;
f2a2ce0e
HL
154 ipt->state = TD_EXITED;
155 pthread_mutex_unlock(&ipt->start_lock);
156
54ed125b
JA
157do_exit:
158 free_cpu_affinity(ipt);
f2a2ce0e
HL
159 return NULL;
160}
161
162/* calculate mean and standard deviation to complete an unit of work */
163static void calibration_stats(void)
164{
165 int i;
680b5abc 166 double sum = 0.0, var = 0.0;
f2a2ce0e
HL
167 struct idle_prof_thread *ipt;
168
169 for (i = 0; i < ipc.nr_cpus; i++) {
170 ipt = &ipc.ipts[i];
171 sum += ipt->cali_time;
172 }
173
174 ipc.cali_mean = sum/ipc.nr_cpus;
175
176 for (i = 0; i < ipc.nr_cpus; i++) {
177 ipt = &ipc.ipts[i];
178 var += pow(ipt->cali_time-ipc.cali_mean, 2);
179 }
180
181 ipc.cali_stddev = sqrt(var/(ipc.nr_cpus-1));
182}
183
184void fio_idle_prof_init(void)
185{
186 int i, ret;
f2a2ce0e 187 struct timespec ts;
680b5abc 188 pthread_attr_t tattr;
78b66d32 189 pthread_condattr_t cattr;
f2a2ce0e
HL
190 struct idle_prof_thread *ipt;
191
192 ipc.nr_cpus = cpus_online();
193 ipc.status = IDLE_PROF_STATUS_OK;
194
195 if (ipc.opt == IDLE_PROF_OPT_NONE)
196 return;
197
78b66d32
BVA
198 ret = pthread_condattr_init(&cattr);
199 assert(ret == 0);
200#ifdef CONFIG_PTHREAD_CONDATTR_SETCLOCK
201 ret = pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC);
202 assert(ret == 0);
203#endif
204
f2a2ce0e
HL
205 if ((ret = pthread_attr_init(&tattr))) {
206 log_err("fio: pthread_attr_init %s\n", strerror(ret));
207 return;
208 }
209 if ((ret = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM))) {
210 log_err("fio: pthread_attr_setscope %s\n", strerror(ret));
211 return;
212 }
213
214 ipc.ipts = malloc(ipc.nr_cpus * sizeof(struct idle_prof_thread));
215 if (!ipc.ipts) {
216 log_err("fio: malloc failed\n");
217 return;
218 }
219
220 ipc.buf = malloc(ipc.nr_cpus * page_size);
221 if (!ipc.buf) {
222 log_err("fio: malloc failed\n");
223 free(ipc.ipts);
224 return;
225 }
226
680b5abc
JA
227 /*
228 * profiling aborts on any single thread failure since the
f2a2ce0e
HL
229 * result won't be accurate if any cpu is not used.
230 */
231 for (i = 0; i < ipc.nr_cpus; i++) {
232 ipt = &ipc.ipts[i];
233
234 ipt->cpu = i;
235 ipt->state = TD_NOT_CREATED;
236 ipt->data = (unsigned char *)(ipc.buf + page_size * i);
237
238 if ((ret = pthread_mutex_init(&ipt->init_lock, NULL))) {
239 ipc.status = IDLE_PROF_STATUS_ABORT;
240 log_err("fio: pthread_mutex_init %s\n", strerror(ret));
241 break;
242 }
243
244 if ((ret = pthread_mutex_init(&ipt->start_lock, NULL))) {
245 ipc.status = IDLE_PROF_STATUS_ABORT;
246 log_err("fio: pthread_mutex_init %s\n", strerror(ret));
247 break;
248 }
249
78b66d32 250 if ((ret = pthread_cond_init(&ipt->cond, &cattr))) {
f2a2ce0e
HL
251 ipc.status = IDLE_PROF_STATUS_ABORT;
252 log_err("fio: pthread_cond_init %s\n", strerror(ret));
253 break;
254 }
255
256 /* make sure all threads are spawned before they start */
257 pthread_mutex_lock(&ipt->init_lock);
258
259 /* make sure all threads finish init before profiling starts */
260 pthread_mutex_lock(&ipt->start_lock);
261
262 if ((ret = pthread_create(&ipt->thread, &tattr, idle_prof_thread_fn, ipt))) {
263 ipc.status = IDLE_PROF_STATUS_ABORT;
264 log_err("fio: pthread_create %s\n", strerror(ret));
265 break;
680b5abc 266 } else
f2a2ce0e 267 ipt->state = TD_CREATED;
f2a2ce0e
HL
268
269 if ((ret = pthread_detach(ipt->thread))) {
270 /* log error and let the thread spin */
5bb39b7c 271 log_err("fio: pthread_detach %s\n", strerror(ret));
f2a2ce0e
HL
272 }
273 }
274
680b5abc
JA
275 /*
276 * let good threads continue so that they can exit
277 * if errors on other threads occurred previously.
f2a2ce0e
HL
278 */
279 for (i = 0; i < ipc.nr_cpus; i++) {
280 ipt = &ipc.ipts[i];
281 pthread_mutex_unlock(&ipt->init_lock);
282 }
283
284 if (ipc.status == IDLE_PROF_STATUS_ABORT)
285 return;
286
287 /* wait for calibration to finish */
288 for (i = 0; i < ipc.nr_cpus; i++) {
289 ipt = &ipc.ipts[i];
290 pthread_mutex_lock(&ipt->init_lock);
680b5abc
JA
291 while ((ipt->state != TD_EXITED) &&
292 (ipt->state!=TD_INITIALIZED)) {
78b66d32
BVA
293#ifdef CONFIG_PTHREAD_CONDATTR_SETCLOCK
294 clock_gettime(CLOCK_MONOTONIC, &ts);
295#else
296 clock_gettime(CLOCK_REALTIME, &ts);
297#endif
8b6a404c 298 ts.tv_sec += 1;
f2a2ce0e
HL
299 pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts);
300 }
301 pthread_mutex_unlock(&ipt->init_lock);
302
680b5abc
JA
303 /*
304 * any thread failed to initialize would abort other threads
f2a2ce0e
HL
305 * later after fio_idle_prof_start.
306 */
307 if (ipt->state == TD_EXITED)
308 ipc.status = IDLE_PROF_STATUS_ABORT;
309 }
310
311 if (ipc.status != IDLE_PROF_STATUS_ABORT)
312 calibration_stats();
680b5abc 313 else
f2a2ce0e
HL
314 ipc.cali_mean = ipc.cali_stddev = 0.0;
315
316 if (ipc.opt == IDLE_PROF_OPT_CALI)
317 ipc.status = IDLE_PROF_STATUS_CALI_STOP;
318}
319
320void fio_idle_prof_start(void)
321{
322 int i;
323 struct idle_prof_thread *ipt;
324
325 if (ipc.opt == IDLE_PROF_OPT_NONE)
326 return;
327
328 /* unlock regardless abort is set or not */
329 for (i = 0; i < ipc.nr_cpus; i++) {
330 ipt = &ipc.ipts[i];
331 pthread_mutex_unlock(&ipt->start_lock);
332 }
333}
334
335void fio_idle_prof_stop(void)
336{
337 int i;
338 uint64_t runt;
f2a2ce0e
HL
339 struct timespec ts;
340 struct idle_prof_thread *ipt;
341
342 if (ipc.opt == IDLE_PROF_OPT_NONE)
343 return;
344
345 if (ipc.opt == IDLE_PROF_OPT_CALI)
346 return;
347
348 ipc.status = IDLE_PROF_STATUS_PROF_STOP;
349
350 /* wait for all threads to exit from profiling */
351 for (i = 0; i < ipc.nr_cpus; i++) {
352 ipt = &ipc.ipts[i];
353 pthread_mutex_lock(&ipt->start_lock);
680b5abc
JA
354 while ((ipt->state != TD_EXITED) &&
355 (ipt->state!=TD_NOT_CREATED)) {
8b6a404c
VF
356 fio_gettime(&ts, NULL);
357 ts.tv_sec += 1;
f2a2ce0e
HL
358 /* timed wait in case a signal is not received */
359 pthread_cond_timedwait(&ipt->cond, &ipt->start_lock, &ts);
360 }
361 pthread_mutex_unlock(&ipt->start_lock);
362
363 /* calculate idleness */
364 if (ipc.cali_mean != 0.0) {
365 runt = utime_since(&ipt->tps, &ipt->tpe);
9da67e75
JA
366 if (runt)
367 ipt->idleness = ipt->loops * ipc.cali_mean / runt;
368 else
369 ipt->idleness = 0.0;
680b5abc 370 } else
f2a2ce0e
HL
371 ipt->idleness = 0.0;
372 }
373
680b5abc
JA
374 /*
375 * memory allocations are freed via explicit fio_idle_prof_cleanup
f2a2ce0e
HL
376 * after profiling stats are collected by apps.
377 */
f2a2ce0e
HL
378}
379
680b5abc
JA
380/*
381 * return system idle percentage when cpu is -1;
f2a2ce0e
HL
382 * return one cpu idle percentage otherwise.
383 */
384static double fio_idle_prof_cpu_stat(int cpu)
385{
386 int i, nr_cpus = ipc.nr_cpus;
387 struct idle_prof_thread *ipt;
388 double p = 0.0;
389
390 if (ipc.opt == IDLE_PROF_OPT_NONE)
391 return 0.0;
392
393 if ((cpu >= nr_cpus) || (cpu < -1)) {
394 log_err("fio: idle profiling invalid cpu index\n");
395 return 0.0;
396 }
397
398 if (cpu == -1) {
399 for (i = 0; i < nr_cpus; i++) {
400 ipt = &ipc.ipts[i];
401 p += ipt->idleness;
402 }
403 p /= nr_cpus;
404 } else {
405 ipt = &ipc.ipts[cpu];
406 p = ipt->idleness;
407 }
408
680b5abc 409 return p * 100.0;
f2a2ce0e
HL
410}
411
7ceefc09 412void fio_idle_prof_cleanup(void)
f2a2ce0e
HL
413{
414 if (ipc.ipts) {
415 free(ipc.ipts);
416 ipc.ipts = NULL;
417 }
418
419 if (ipc.buf) {
420 free(ipc.buf);
421 ipc.buf = NULL;
422 }
423}
424
425int fio_idle_prof_parse_opt(const char *args)
426{
427 ipc.opt = IDLE_PROF_OPT_NONE; /* default */
428
429 if (!args) {
430 log_err("fio: empty idle-prof option string\n");
431 return -1;
432 }
433
7e09a9f1 434#if defined(FIO_HAVE_CPU_AFFINITY) && defined(CONFIG_SCHED_IDLE)
f2a2ce0e
HL
435 if (strcmp("calibrate", args) == 0) {
436 ipc.opt = IDLE_PROF_OPT_CALI;
437 fio_idle_prof_init();
438 fio_idle_prof_start();
439 fio_idle_prof_stop();
a666cab8 440 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, NULL);
f2a2ce0e
HL
441 return 1;
442 } else if (strcmp("system", args) == 0) {
443 ipc.opt = IDLE_PROF_OPT_SYSTEM;
444 return 0;
445 } else if (strcmp("percpu", args) == 0) {
446 ipc.opt = IDLE_PROF_OPT_PERCPU;
447 return 0;
448 } else {
4e0a8fa2 449 log_err("fio: incorrect idle-prof option: %s\n", args);
f2a2ce0e
HL
450 return -1;
451 }
452#else
453 log_err("fio: idle-prof not supported on this platform\n");
454 return -1;
455#endif
456}
457
a666cab8
JA
458void show_idle_prof_stats(int output, struct json_object *parent,
459 struct buf_output *out)
f2a2ce0e
HL
460{
461 int i, nr_cpus = ipc.nr_cpus;
462 struct json_object *tmp;
463 char s[MAX_CPU_STR_LEN];
680b5abc 464
f2a2ce0e
HL
465 if (output == FIO_OUTPUT_NORMAL) {
466 if (ipc.opt > IDLE_PROF_OPT_CALI)
a666cab8 467 log_buf(out, "\nCPU idleness:\n");
f2a2ce0e 468 else if (ipc.opt == IDLE_PROF_OPT_CALI)
a666cab8 469 log_buf(out, "CPU idleness:\n");
f2a2ce0e
HL
470
471 if (ipc.opt >= IDLE_PROF_OPT_SYSTEM)
a666cab8 472 log_buf(out, " system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1));
f2a2ce0e
HL
473
474 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
a666cab8 475 log_buf(out, " percpu: %3.2f%%", fio_idle_prof_cpu_stat(0));
680b5abc 476 for (i = 1; i < nr_cpus; i++)
a666cab8
JA
477 log_buf(out, ", %3.2f%%", fio_idle_prof_cpu_stat(i));
478 log_buf(out, "\n");
f2a2ce0e
HL
479 }
480
481 if (ipc.opt >= IDLE_PROF_OPT_CALI) {
a666cab8
JA
482 log_buf(out, " unit work: mean=%3.2fus,", ipc.cali_mean);
483 log_buf(out, " stddev=%3.2f\n", ipc.cali_stddev);
f2a2ce0e
HL
484 }
485
f2a2ce0e
HL
486 return;
487 }
680b5abc 488
236d24df 489 if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output & FIO_OUTPUT_JSON)) {
f2a2ce0e
HL
490 if (!parent)
491 return;
492
493 tmp = json_create_object();
494 if (!tmp)
495 return;
496
497 json_object_add_value_object(parent, "cpu_idleness", tmp);
498 json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1));
499
500 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
680b5abc 501 for (i = 0; i < nr_cpus; i++) {
f2a2ce0e
HL
502 snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i);
503 json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i));
504 }
505 }
506
507 json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean);
508 json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev);
f2a2ce0e
HL
509 }
510}