parse: make it clear that 'cp' can't be NULL for o->off1
[fio.git] / idletime.c
CommitLineData
f2a2ce0e
HL
1#include <math.h>
2#include "json.h"
3#include "idletime.h"
4
5static volatile struct idle_prof_common ipc;
6
680b5abc
JA
7/*
8 * Get time to complete an unit work on a particular cpu.
f2a2ce0e
HL
9 * The minimum number in CALIBRATE_RUNS runs is returned.
10 */
11static double calibrate_unit(unsigned char *data)
12{
13 unsigned long t, i, j, k;
14 struct timeval tps;
15 double tunit = 0.0;
16
680b5abc 17 for (i = 0; i < CALIBRATE_RUNS; i++) {
f2a2ce0e
HL
18
19 fio_gettime(&tps, NULL);
20 /* scale for less variance */
680b5abc 21 for (j = 0; j < CALIBRATE_SCALE; j++) {
f2a2ce0e
HL
22 /* unit of work */
23 for (k=0; k < page_size; k++) {
680b5abc
JA
24 data[(k + j) % page_size] = k % 256;
25 /*
26 * we won't see STOP here. this is to match
f2a2ce0e
HL
27 * the same statement in the profiling loop.
28 */
29 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP)
30 return 0.0;
31 }
32 }
33
34 t = utime_since_now(&tps);
35 if (!t)
36 continue;
37
38 /* get the minimum time to complete CALIBRATE_SCALE units */
680b5abc 39 if ((i == 0) || ((double)t < tunit))
f2a2ce0e
HL
40 tunit = (double)t;
41 }
42
680b5abc 43 return tunit / CALIBRATE_SCALE;
f2a2ce0e
HL
44}
45
59358c8e
JA
46static int set_cpu_affinity(struct idle_prof_thread *ipt)
47{
48#if defined(FIO_HAVE_CPU_AFFINITY)
49 os_cpu_mask_t cpu_mask;
50
51 memset(&cpu_mask, 0, sizeof(cpu_mask));
52 fio_cpu_set(&cpu_mask, ipt->cpu);
53
54 if (fio_setaffinity(gettid(), cpu_mask)) {
55 log_err("fio: fio_setaffinity failed\n");
56 return -1;
57 }
58
59 return 0;
60#else
61 log_err("fio: fio_setaffinity not supported\n");
62 return -1;
63#endif
64}
65
f2a2ce0e
HL
66static void *idle_prof_thread_fn(void *data)
67{
68 int retval;
69 unsigned long j, k;
70 struct idle_prof_thread *ipt = data;
71
72 /* wait for all threads are spawned */
73 pthread_mutex_lock(&ipt->init_lock);
74
75 /* exit if any other thread failed to start */
76 if (ipc.status == IDLE_PROF_STATUS_ABORT)
77 return NULL;
78
59358c8e 79 retval = set_cpu_affinity(ipt);
f2a2ce0e
HL
80 if (retval == -1) {
81 ipt->state = TD_EXITED;
82 pthread_mutex_unlock(&ipt->init_lock);
83 return NULL;
84 }
85
86 ipt->cali_time = calibrate_unit(ipt->data);
87
88 /* delay to set IDLE class till now for better calibration accuracy */
7e09a9f1 89#if defined(CONFIG_SCHED_IDLE)
f2a2ce0e
HL
90 if ((retval = fio_set_sched_idle()))
91 log_err("fio: fio_set_sched_idle failed\n");
92#else
93 retval = -1;
94 log_err("fio: fio_set_sched_idle not supported\n");
95#endif
96 if (retval == -1) {
97 ipt->state = TD_EXITED;
98 pthread_mutex_unlock(&ipt->init_lock);
99 return NULL;
100 }
101
102 ipt->state = TD_INITIALIZED;
103
104 /* signal the main thread that calibration is done */
105 pthread_cond_signal(&ipt->cond);
106 pthread_mutex_unlock(&ipt->init_lock);
107
108 /* wait for other calibration to finish */
109 pthread_mutex_lock(&ipt->start_lock);
110
111 /* exit if other threads failed to initialize */
735ed278
JA
112 if (ipc.status == IDLE_PROF_STATUS_ABORT) {
113 pthread_mutex_unlock(&ipt->start_lock);
f2a2ce0e 114 return NULL;
735ed278 115 }
f2a2ce0e
HL
116
117 /* exit if we are doing calibration only */
735ed278
JA
118 if (ipc.status == IDLE_PROF_STATUS_CALI_STOP) {
119 pthread_mutex_unlock(&ipt->start_lock);
f2a2ce0e 120 return NULL;
735ed278 121 }
f2a2ce0e
HL
122
123 fio_gettime(&ipt->tps, NULL);
124 ipt->state = TD_RUNNING;
125
126 j = 0;
127 while (1) {
680b5abc
JA
128 for (k = 0; k < page_size; k++) {
129 ipt->data[(k + j) % page_size] = k % 256;
f2a2ce0e
HL
130 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) {
131 fio_gettime(&ipt->tpe, NULL);
132 goto idle_prof_done;
133 }
134 }
135 j++;
136 }
137
138idle_prof_done:
139
680b5abc 140 ipt->loops = j + (double) k / page_size;
f2a2ce0e
HL
141 ipt->state = TD_EXITED;
142 pthread_mutex_unlock(&ipt->start_lock);
143
144 return NULL;
145}
146
147/* calculate mean and standard deviation to complete an unit of work */
148static void calibration_stats(void)
149{
150 int i;
680b5abc 151 double sum = 0.0, var = 0.0;
f2a2ce0e
HL
152 struct idle_prof_thread *ipt;
153
154 for (i = 0; i < ipc.nr_cpus; i++) {
155 ipt = &ipc.ipts[i];
156 sum += ipt->cali_time;
157 }
158
159 ipc.cali_mean = sum/ipc.nr_cpus;
160
161 for (i = 0; i < ipc.nr_cpus; i++) {
162 ipt = &ipc.ipts[i];
163 var += pow(ipt->cali_time-ipc.cali_mean, 2);
164 }
165
166 ipc.cali_stddev = sqrt(var/(ipc.nr_cpus-1));
167}
168
169void fio_idle_prof_init(void)
170{
171 int i, ret;
172 struct timeval tp;
173 struct timespec ts;
680b5abc 174 pthread_attr_t tattr;
f2a2ce0e
HL
175 struct idle_prof_thread *ipt;
176
177 ipc.nr_cpus = cpus_online();
178 ipc.status = IDLE_PROF_STATUS_OK;
179
180 if (ipc.opt == IDLE_PROF_OPT_NONE)
181 return;
182
183 if ((ret = pthread_attr_init(&tattr))) {
184 log_err("fio: pthread_attr_init %s\n", strerror(ret));
185 return;
186 }
187 if ((ret = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM))) {
188 log_err("fio: pthread_attr_setscope %s\n", strerror(ret));
189 return;
190 }
191
192 ipc.ipts = malloc(ipc.nr_cpus * sizeof(struct idle_prof_thread));
193 if (!ipc.ipts) {
194 log_err("fio: malloc failed\n");
195 return;
196 }
197
198 ipc.buf = malloc(ipc.nr_cpus * page_size);
199 if (!ipc.buf) {
200 log_err("fio: malloc failed\n");
201 free(ipc.ipts);
202 return;
203 }
204
680b5abc
JA
205 /*
206 * profiling aborts on any single thread failure since the
f2a2ce0e
HL
207 * result won't be accurate if any cpu is not used.
208 */
209 for (i = 0; i < ipc.nr_cpus; i++) {
210 ipt = &ipc.ipts[i];
211
212 ipt->cpu = i;
213 ipt->state = TD_NOT_CREATED;
214 ipt->data = (unsigned char *)(ipc.buf + page_size * i);
215
216 if ((ret = pthread_mutex_init(&ipt->init_lock, NULL))) {
217 ipc.status = IDLE_PROF_STATUS_ABORT;
218 log_err("fio: pthread_mutex_init %s\n", strerror(ret));
219 break;
220 }
221
222 if ((ret = pthread_mutex_init(&ipt->start_lock, NULL))) {
223 ipc.status = IDLE_PROF_STATUS_ABORT;
224 log_err("fio: pthread_mutex_init %s\n", strerror(ret));
225 break;
226 }
227
228 if ((ret = pthread_cond_init(&ipt->cond, NULL))) {
229 ipc.status = IDLE_PROF_STATUS_ABORT;
230 log_err("fio: pthread_cond_init %s\n", strerror(ret));
231 break;
232 }
233
234 /* make sure all threads are spawned before they start */
235 pthread_mutex_lock(&ipt->init_lock);
236
237 /* make sure all threads finish init before profiling starts */
238 pthread_mutex_lock(&ipt->start_lock);
239
240 if ((ret = pthread_create(&ipt->thread, &tattr, idle_prof_thread_fn, ipt))) {
241 ipc.status = IDLE_PROF_STATUS_ABORT;
242 log_err("fio: pthread_create %s\n", strerror(ret));
243 break;
680b5abc 244 } else
f2a2ce0e 245 ipt->state = TD_CREATED;
f2a2ce0e
HL
246
247 if ((ret = pthread_detach(ipt->thread))) {
248 /* log error and let the thread spin */
249 log_err("fio: pthread_detatch %s\n", strerror(ret));
250 }
251 }
252
680b5abc
JA
253 /*
254 * let good threads continue so that they can exit
255 * if errors on other threads occurred previously.
f2a2ce0e
HL
256 */
257 for (i = 0; i < ipc.nr_cpus; i++) {
258 ipt = &ipc.ipts[i];
259 pthread_mutex_unlock(&ipt->init_lock);
260 }
261
262 if (ipc.status == IDLE_PROF_STATUS_ABORT)
263 return;
264
265 /* wait for calibration to finish */
266 for (i = 0; i < ipc.nr_cpus; i++) {
267 ipt = &ipc.ipts[i];
268 pthread_mutex_lock(&ipt->init_lock);
680b5abc
JA
269 while ((ipt->state != TD_EXITED) &&
270 (ipt->state!=TD_INITIALIZED)) {
f2a2ce0e
HL
271 fio_gettime(&tp, NULL);
272 ts.tv_sec = tp.tv_sec + 1;
273 ts.tv_nsec = tp.tv_usec * 1000;
274 pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts);
275 }
276 pthread_mutex_unlock(&ipt->init_lock);
277
680b5abc
JA
278 /*
279 * any thread failed to initialize would abort other threads
f2a2ce0e
HL
280 * later after fio_idle_prof_start.
281 */
282 if (ipt->state == TD_EXITED)
283 ipc.status = IDLE_PROF_STATUS_ABORT;
284 }
285
286 if (ipc.status != IDLE_PROF_STATUS_ABORT)
287 calibration_stats();
680b5abc 288 else
f2a2ce0e
HL
289 ipc.cali_mean = ipc.cali_stddev = 0.0;
290
291 if (ipc.opt == IDLE_PROF_OPT_CALI)
292 ipc.status = IDLE_PROF_STATUS_CALI_STOP;
293}
294
295void fio_idle_prof_start(void)
296{
297 int i;
298 struct idle_prof_thread *ipt;
299
300 if (ipc.opt == IDLE_PROF_OPT_NONE)
301 return;
302
303 /* unlock regardless abort is set or not */
304 for (i = 0; i < ipc.nr_cpus; i++) {
305 ipt = &ipc.ipts[i];
306 pthread_mutex_unlock(&ipt->start_lock);
307 }
308}
309
310void fio_idle_prof_stop(void)
311{
312 int i;
313 uint64_t runt;
314 struct timeval tp;
315 struct timespec ts;
316 struct idle_prof_thread *ipt;
317
318 if (ipc.opt == IDLE_PROF_OPT_NONE)
319 return;
320
321 if (ipc.opt == IDLE_PROF_OPT_CALI)
322 return;
323
324 ipc.status = IDLE_PROF_STATUS_PROF_STOP;
325
326 /* wait for all threads to exit from profiling */
327 for (i = 0; i < ipc.nr_cpus; i++) {
328 ipt = &ipc.ipts[i];
329 pthread_mutex_lock(&ipt->start_lock);
680b5abc
JA
330 while ((ipt->state != TD_EXITED) &&
331 (ipt->state!=TD_NOT_CREATED)) {
f2a2ce0e
HL
332 fio_gettime(&tp, NULL);
333 ts.tv_sec = tp.tv_sec + 1;
334 ts.tv_nsec = tp.tv_usec * 1000;
335 /* timed wait in case a signal is not received */
336 pthread_cond_timedwait(&ipt->cond, &ipt->start_lock, &ts);
337 }
338 pthread_mutex_unlock(&ipt->start_lock);
339
340 /* calculate idleness */
341 if (ipc.cali_mean != 0.0) {
342 runt = utime_since(&ipt->tps, &ipt->tpe);
9da67e75
JA
343 if (runt)
344 ipt->idleness = ipt->loops * ipc.cali_mean / runt;
345 else
346 ipt->idleness = 0.0;
680b5abc 347 } else
f2a2ce0e
HL
348 ipt->idleness = 0.0;
349 }
350
680b5abc
JA
351 /*
352 * memory allocations are freed via explicit fio_idle_prof_cleanup
f2a2ce0e
HL
353 * after profiling stats are collected by apps.
354 */
f2a2ce0e
HL
355}
356
680b5abc
JA
357/*
358 * return system idle percentage when cpu is -1;
f2a2ce0e
HL
359 * return one cpu idle percentage otherwise.
360 */
361static double fio_idle_prof_cpu_stat(int cpu)
362{
363 int i, nr_cpus = ipc.nr_cpus;
364 struct idle_prof_thread *ipt;
365 double p = 0.0;
366
367 if (ipc.opt == IDLE_PROF_OPT_NONE)
368 return 0.0;
369
370 if ((cpu >= nr_cpus) || (cpu < -1)) {
371 log_err("fio: idle profiling invalid cpu index\n");
372 return 0.0;
373 }
374
375 if (cpu == -1) {
376 for (i = 0; i < nr_cpus; i++) {
377 ipt = &ipc.ipts[i];
378 p += ipt->idleness;
379 }
380 p /= nr_cpus;
381 } else {
382 ipt = &ipc.ipts[cpu];
383 p = ipt->idleness;
384 }
385
680b5abc 386 return p * 100.0;
f2a2ce0e
HL
387}
388
10aa136b 389static void fio_idle_prof_cleanup(void)
f2a2ce0e
HL
390{
391 if (ipc.ipts) {
392 free(ipc.ipts);
393 ipc.ipts = NULL;
394 }
395
396 if (ipc.buf) {
397 free(ipc.buf);
398 ipc.buf = NULL;
399 }
400}
401
402int fio_idle_prof_parse_opt(const char *args)
403{
404 ipc.opt = IDLE_PROF_OPT_NONE; /* default */
405
406 if (!args) {
407 log_err("fio: empty idle-prof option string\n");
408 return -1;
409 }
410
7e09a9f1 411#if defined(FIO_HAVE_CPU_AFFINITY) && defined(CONFIG_SCHED_IDLE)
f2a2ce0e
HL
412 if (strcmp("calibrate", args) == 0) {
413 ipc.opt = IDLE_PROF_OPT_CALI;
414 fio_idle_prof_init();
415 fio_idle_prof_start();
416 fio_idle_prof_stop();
417 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL);
418 return 1;
419 } else if (strcmp("system", args) == 0) {
420 ipc.opt = IDLE_PROF_OPT_SYSTEM;
421 return 0;
422 } else if (strcmp("percpu", args) == 0) {
423 ipc.opt = IDLE_PROF_OPT_PERCPU;
424 return 0;
425 } else {
4e0a8fa2 426 log_err("fio: incorrect idle-prof option: %s\n", args);
f2a2ce0e
HL
427 return -1;
428 }
429#else
430 log_err("fio: idle-prof not supported on this platform\n");
431 return -1;
432#endif
433}
434
435void show_idle_prof_stats(int output, struct json_object *parent)
436{
437 int i, nr_cpus = ipc.nr_cpus;
438 struct json_object *tmp;
439 char s[MAX_CPU_STR_LEN];
680b5abc 440
f2a2ce0e
HL
441 if (output == FIO_OUTPUT_NORMAL) {
442 if (ipc.opt > IDLE_PROF_OPT_CALI)
443 log_info("\nCPU idleness:\n");
444 else if (ipc.opt == IDLE_PROF_OPT_CALI)
445 log_info("CPU idleness:\n");
446
447 if (ipc.opt >= IDLE_PROF_OPT_SYSTEM)
448 log_info(" system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1));
449
450 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
451 log_info(" percpu: %3.2f%%", fio_idle_prof_cpu_stat(0));
680b5abc 452 for (i = 1; i < nr_cpus; i++)
f2a2ce0e 453 log_info(", %3.2f%%", fio_idle_prof_cpu_stat(i));
f2a2ce0e
HL
454 log_info("\n");
455 }
456
457 if (ipc.opt >= IDLE_PROF_OPT_CALI) {
458 log_info(" unit work: mean=%3.2fus,", ipc.cali_mean);
459 log_info(" stddev=%3.2f\n", ipc.cali_stddev);
460 }
461
462 /* dynamic mem allocations can now be freed */
463 if (ipc.opt != IDLE_PROF_OPT_NONE)
464 fio_idle_prof_cleanup();
465
466 return;
467 }
680b5abc 468
f2a2ce0e
HL
469 if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output == FIO_OUTPUT_JSON)) {
470 if (!parent)
471 return;
472
473 tmp = json_create_object();
474 if (!tmp)
475 return;
476
477 json_object_add_value_object(parent, "cpu_idleness", tmp);
478 json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1));
479
480 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
680b5abc 481 for (i = 0; i < nr_cpus; i++) {
f2a2ce0e
HL
482 snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i);
483 json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i));
484 }
485 }
486
487 json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean);
488 json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev);
489
490 fio_idle_prof_cleanup();
f2a2ce0e
HL
491 }
492}