fio: ioengine flag cleanup
[fio.git] / idletime.c
1 #include <math.h>
2 #include "fio.h"
3 #include "json.h"
4 #include "idletime.h"
5
6 static volatile struct idle_prof_common ipc;
7
8 /*
9  * Get time to complete an unit work on a particular cpu.
10  * The minimum number in CALIBRATE_RUNS runs is returned.
11  */
12 static double calibrate_unit(unsigned char *data)
13 {
14         unsigned long t, i, j, k;
15         struct timespec tps;
16         double tunit = 0.0;
17
18         for (i = 0; i < CALIBRATE_RUNS; i++) {
19
20                 fio_gettime(&tps, NULL);
21                 /* scale for less variance */
22                 for (j = 0; j < CALIBRATE_SCALE; j++) {
23                         /* unit of work */
24                         for (k=0; k < page_size; k++) {
25                                 data[(k + j) % page_size] = k % 256;
26                                 /*
27                                  * we won't see STOP here. this is to match
28                                  * the same statement in the profiling loop.
29                                  */
30                                 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP)
31                                         return 0.0;
32                         }
33                 }
34
35                 t = utime_since_now(&tps);
36                 if (!t)
37                         continue;
38
39                 /* get the minimum time to complete CALIBRATE_SCALE units */
40                 if ((i == 0) || ((double)t < tunit))
41                         tunit = (double)t;
42         }
43
44         return tunit / CALIBRATE_SCALE;
45 }
46
47 static void free_cpu_affinity(struct idle_prof_thread *ipt)
48 {
49 #if defined(FIO_HAVE_CPU_AFFINITY)
50         fio_cpuset_exit(&ipt->cpu_mask);
51 #endif
52 }
53
54 static int set_cpu_affinity(struct idle_prof_thread *ipt)
55 {
56 #if defined(FIO_HAVE_CPU_AFFINITY)
57         if (fio_cpuset_init(&ipt->cpu_mask)) {
58                 log_err("fio: cpuset init failed\n");
59                 return -1;
60         }
61
62         fio_cpu_set(&ipt->cpu_mask, ipt->cpu);
63
64         if (fio_setaffinity(gettid(), ipt->cpu_mask)) {
65                 log_err("fio: fio_setaffinity failed\n");
66                 fio_cpuset_exit(&ipt->cpu_mask);
67                 return -1;
68         }
69
70         return 0;
71 #else
72         log_err("fio: fio_setaffinity not supported\n");
73         return -1;
74 #endif
75 }
76
77 static void *idle_prof_thread_fn(void *data)
78 {
79         int retval;
80         unsigned long j, k;
81         struct idle_prof_thread *ipt = data;
82
83         /* wait for all threads are spawned */
84         pthread_mutex_lock(&ipt->init_lock);
85
86         /* exit if any other thread failed to start */
87         if (ipc.status == IDLE_PROF_STATUS_ABORT) {
88                 pthread_mutex_unlock(&ipt->init_lock);
89                 return NULL;
90         }
91
92         retval = set_cpu_affinity(ipt);
93         if (retval == -1) {
94                 ipt->state = TD_EXITED;
95                 pthread_mutex_unlock(&ipt->init_lock);
96                 return NULL;
97         }
98
99         ipt->cali_time = calibrate_unit(ipt->data);
100
101         /* delay to set IDLE class till now for better calibration accuracy */
102 #if defined(CONFIG_SCHED_IDLE)
103         if ((retval = fio_set_sched_idle()))
104                 log_err("fio: fio_set_sched_idle failed\n");
105 #else
106         retval = -1;
107         log_err("fio: fio_set_sched_idle not supported\n");
108 #endif
109         if (retval == -1) {
110                 ipt->state = TD_EXITED;
111                 pthread_mutex_unlock(&ipt->init_lock);
112                 goto do_exit;
113         }
114
115         ipt->state = TD_INITIALIZED;
116
117         /* signal the main thread that calibration is done */
118         pthread_cond_signal(&ipt->cond);
119         pthread_mutex_unlock(&ipt->init_lock);
120
121         /* wait for other calibration to finish */
122         pthread_mutex_lock(&ipt->start_lock);
123
124         /* exit if other threads failed to initialize */
125         if (ipc.status == IDLE_PROF_STATUS_ABORT) {
126                 pthread_mutex_unlock(&ipt->start_lock);
127                 goto do_exit;
128         }
129
130         /* exit if we are doing calibration only */
131         if (ipc.status == IDLE_PROF_STATUS_CALI_STOP) {
132                 pthread_mutex_unlock(&ipt->start_lock);
133                 goto do_exit;
134         }
135
136         fio_gettime(&ipt->tps, NULL);
137         ipt->state = TD_RUNNING;
138
139         j = 0;
140         while (1) {
141                 for (k = 0; k < page_size; k++) {
142                         ipt->data[(k + j) % page_size] = k % 256;
143                         if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) {
144                                 fio_gettime(&ipt->tpe, NULL);
145                                 goto idle_prof_done;
146                         }
147                 }
148                 j++;
149         }
150
151 idle_prof_done:
152
153         ipt->loops = j + (double) k / page_size;
154         ipt->state = TD_EXITED;
155         pthread_mutex_unlock(&ipt->start_lock);
156
157 do_exit:
158         free_cpu_affinity(ipt);
159         return NULL;
160 }
161
162 /* calculate mean and standard deviation to complete an unit of work */
163 static void calibration_stats(void)
164 {
165         int i;
166         double sum = 0.0, var = 0.0;
167         struct idle_prof_thread *ipt;
168
169         for (i = 0; i < ipc.nr_cpus; i++) {
170                 ipt = &ipc.ipts[i];
171                 sum += ipt->cali_time;
172         }
173
174         ipc.cali_mean = sum/ipc.nr_cpus;
175
176         for (i = 0; i < ipc.nr_cpus; i++) {
177                 ipt = &ipc.ipts[i];
178                 var += pow(ipt->cali_time-ipc.cali_mean, 2);
179         }
180
181         ipc.cali_stddev = sqrt(var/(ipc.nr_cpus-1));
182 }
183
184 void fio_idle_prof_init(void)
185 {
186         int i, ret;
187         struct timespec ts;
188         pthread_attr_t tattr;
189         pthread_condattr_t cattr;
190         struct idle_prof_thread *ipt;
191
192         ipc.nr_cpus = cpus_configured();
193         ipc.status = IDLE_PROF_STATUS_OK;
194
195         if (ipc.opt == IDLE_PROF_OPT_NONE)
196                 return;
197
198         ret = pthread_condattr_init(&cattr);
199         assert(ret == 0);
200 #ifdef CONFIG_PTHREAD_CONDATTR_SETCLOCK
201         ret = pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC);
202         assert(ret == 0);
203 #endif
204
205         if ((ret = pthread_attr_init(&tattr))) {
206                 log_err("fio: pthread_attr_init %s\n", strerror(ret));
207                 return;
208         }
209         if ((ret = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM))) {
210                 log_err("fio: pthread_attr_setscope %s\n", strerror(ret));
211                 return;
212         }
213
214         ipc.ipts = malloc(ipc.nr_cpus * sizeof(struct idle_prof_thread));
215         if (!ipc.ipts) {
216                 log_err("fio: malloc failed\n");
217                 return;
218         }
219
220         ipc.buf = malloc(ipc.nr_cpus * page_size);
221         if (!ipc.buf) {
222                 log_err("fio: malloc failed\n");
223                 free(ipc.ipts);
224                 return;
225         }
226
227         /*
228          * profiling aborts on any single thread failure since the
229          * result won't be accurate if any cpu is not used.
230          */
231         for (i = 0; i < ipc.nr_cpus; i++) {
232                 ipt = &ipc.ipts[i];
233
234                 ipt->cpu = i;   
235                 ipt->state = TD_NOT_CREATED;
236                 ipt->data = (unsigned char *)(ipc.buf + page_size * i);
237
238                 if ((ret = pthread_mutex_init(&ipt->init_lock, NULL))) {
239                         ipc.status = IDLE_PROF_STATUS_ABORT;
240                         log_err("fio: pthread_mutex_init %s\n", strerror(ret));
241                         break;
242                 }
243
244                 if ((ret = pthread_mutex_init(&ipt->start_lock, NULL))) {
245                         ipc.status = IDLE_PROF_STATUS_ABORT;
246                         log_err("fio: pthread_mutex_init %s\n", strerror(ret));
247                         break;
248                 }
249
250                 if ((ret = pthread_cond_init(&ipt->cond, &cattr))) {
251                         ipc.status = IDLE_PROF_STATUS_ABORT;
252                         log_err("fio: pthread_cond_init %s\n", strerror(ret));
253                         break;
254                 }
255
256                 /* make sure all threads are spawned before they start */
257                 pthread_mutex_lock(&ipt->init_lock);
258
259                 /* make sure all threads finish init before profiling starts */
260                 pthread_mutex_lock(&ipt->start_lock);
261
262                 if ((ret = pthread_create(&ipt->thread, &tattr, idle_prof_thread_fn, ipt))) {
263                         ipc.status = IDLE_PROF_STATUS_ABORT;
264                         log_err("fio: pthread_create %s\n", strerror(ret));
265                         break;
266                 } else
267                         ipt->state = TD_CREATED;
268
269                 if ((ret = pthread_detach(ipt->thread))) {
270                         /* log error and let the thread spin */
271                         log_err("fio: pthread_detach %s\n", strerror(ret));
272                 }
273         }
274
275         /*
276          * let good threads continue so that they can exit
277          * if errors on other threads occurred previously.
278          */
279         for (i = 0; i < ipc.nr_cpus; i++) {
280                 ipt = &ipc.ipts[i];
281                 pthread_mutex_unlock(&ipt->init_lock);
282         }
283         
284         if (ipc.status == IDLE_PROF_STATUS_ABORT)
285                 return;
286         
287         /* wait for calibration to finish */
288         for (i = 0; i < ipc.nr_cpus; i++) {
289                 ipt = &ipc.ipts[i];
290                 pthread_mutex_lock(&ipt->init_lock);
291                 while ((ipt->state != TD_EXITED) &&
292                        (ipt->state!=TD_INITIALIZED)) {
293 #ifdef CONFIG_PTHREAD_CONDATTR_SETCLOCK
294                         clock_gettime(CLOCK_MONOTONIC, &ts);
295 #else
296                         clock_gettime(CLOCK_REALTIME, &ts);
297 #endif
298                         ts.tv_sec += 1;
299                         pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts);
300                 }
301                 pthread_mutex_unlock(&ipt->init_lock);
302         
303                 /*
304                  * any thread failed to initialize would abort other threads
305                  * later after fio_idle_prof_start. 
306                  */     
307                 if (ipt->state == TD_EXITED)
308                         ipc.status = IDLE_PROF_STATUS_ABORT;
309         }
310
311         if (ipc.status != IDLE_PROF_STATUS_ABORT)
312                 calibration_stats();
313         else
314                 ipc.cali_mean = ipc.cali_stddev = 0.0;
315
316         if (ipc.opt == IDLE_PROF_OPT_CALI)
317                 ipc.status = IDLE_PROF_STATUS_CALI_STOP;
318 }
319
320 void fio_idle_prof_start(void)
321 {
322         int i;
323         struct idle_prof_thread *ipt;
324
325         if (ipc.opt == IDLE_PROF_OPT_NONE)
326                 return;
327
328         /* unlock regardless abort is set or not */
329         for (i = 0; i < ipc.nr_cpus; i++) {
330                 ipt = &ipc.ipts[i];
331                 pthread_mutex_unlock(&ipt->start_lock);
332         }
333 }
334
335 void fio_idle_prof_stop(void)
336 {
337         int i;
338         uint64_t runt;
339         struct timespec ts;
340         struct idle_prof_thread *ipt;
341
342         if (ipc.opt == IDLE_PROF_OPT_NONE)
343                 return;
344
345         if (ipc.opt == IDLE_PROF_OPT_CALI)
346                 return;
347
348         ipc.status = IDLE_PROF_STATUS_PROF_STOP;
349
350         /* wait for all threads to exit from profiling */
351         for (i = 0; i < ipc.nr_cpus; i++) {
352                 ipt = &ipc.ipts[i];
353                 pthread_mutex_lock(&ipt->start_lock);
354                 while ((ipt->state != TD_EXITED) &&
355                        (ipt->state!=TD_NOT_CREATED)) {
356                         fio_gettime(&ts, NULL);
357                         ts.tv_sec += 1;
358                         /* timed wait in case a signal is not received */
359                         pthread_cond_timedwait(&ipt->cond, &ipt->start_lock, &ts);
360                 }
361                 pthread_mutex_unlock(&ipt->start_lock);
362
363                 /* calculate idleness */
364                 if (ipc.cali_mean != 0.0) {
365                         runt = utime_since(&ipt->tps, &ipt->tpe);
366                         if (runt)
367                                 ipt->idleness = ipt->loops * ipc.cali_mean / runt;
368                         else
369                                 ipt->idleness = 0.0;
370                 } else
371                         ipt->idleness = 0.0;
372         }
373
374         /*
375          * memory allocations are freed via explicit fio_idle_prof_cleanup
376          * after profiling stats are collected by apps.  
377          */
378 }
379
380 /*
381  * return system idle percentage when cpu is -1;
382  * return one cpu idle percentage otherwise.
383  */
384 static double fio_idle_prof_cpu_stat(int cpu)
385 {
386         int i, nr_cpus = ipc.nr_cpus;
387         struct idle_prof_thread *ipt;
388         double p = 0.0;
389
390         if (ipc.opt == IDLE_PROF_OPT_NONE)
391                 return 0.0;
392
393         if ((cpu >= nr_cpus) || (cpu < -1)) {
394                 log_err("fio: idle profiling invalid cpu index\n");
395                 return 0.0;
396         }
397
398         if (cpu == -1) {
399                 for (i = 0; i < nr_cpus; i++) {
400                         ipt = &ipc.ipts[i];
401                         p += ipt->idleness;
402                 }
403                 p /= nr_cpus;
404         } else {
405                 ipt = &ipc.ipts[cpu];
406                 p = ipt->idleness;
407         }
408
409         return p * 100.0;
410 }
411
412 void fio_idle_prof_cleanup(void)
413 {
414         if (ipc.ipts) {
415                 free(ipc.ipts);
416                 ipc.ipts = NULL;
417         }
418
419         if (ipc.buf) {
420                 free(ipc.buf);
421                 ipc.buf = NULL;
422         }
423 }
424
425 int fio_idle_prof_parse_opt(const char *args)
426 {
427         ipc.opt = IDLE_PROF_OPT_NONE; /* default */
428
429         if (!args) {
430                 log_err("fio: empty idle-prof option string\n");
431                 return -1;
432         }       
433
434 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(CONFIG_SCHED_IDLE)
435         if (strcmp("calibrate", args) == 0) {
436                 ipc.opt = IDLE_PROF_OPT_CALI;
437                 fio_idle_prof_init();
438                 fio_idle_prof_start();
439                 fio_idle_prof_stop();
440                 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, NULL);
441                 return 1;
442         } else if (strcmp("system", args) == 0) {
443                 ipc.opt = IDLE_PROF_OPT_SYSTEM;
444                 return 0;
445         } else if (strcmp("percpu", args) == 0) {
446                 ipc.opt = IDLE_PROF_OPT_PERCPU;
447                 return 0;
448         } else {
449                 log_err("fio: incorrect idle-prof option: %s\n", args);
450                 return -1;
451         }       
452 #else
453         log_err("fio: idle-prof not supported on this platform\n");
454         return -1;
455 #endif
456 }
457
458 void show_idle_prof_stats(int output, struct json_object *parent,
459                           struct buf_output *out)
460 {
461         int i, nr_cpus = ipc.nr_cpus;
462         struct json_object *tmp;
463         char s[MAX_CPU_STR_LEN];
464
465         if (output == FIO_OUTPUT_NORMAL) {
466                 if (ipc.opt > IDLE_PROF_OPT_CALI)
467                         log_buf(out, "\nCPU idleness:\n");
468                 else if (ipc.opt == IDLE_PROF_OPT_CALI)
469                         log_buf(out, "CPU idleness:\n");
470
471                 if (ipc.opt >= IDLE_PROF_OPT_SYSTEM)
472                         log_buf(out, "  system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1));
473
474                 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
475                         log_buf(out, "  percpu: %3.2f%%", fio_idle_prof_cpu_stat(0));
476                         for (i = 1; i < nr_cpus; i++)
477                                 log_buf(out, ", %3.2f%%", fio_idle_prof_cpu_stat(i));
478                         log_buf(out, "\n");
479                 }
480
481                 if (ipc.opt >= IDLE_PROF_OPT_CALI) {
482                         log_buf(out, "  unit work: mean=%3.2fus,", ipc.cali_mean);
483                         log_buf(out, " stddev=%3.2f\n", ipc.cali_stddev);
484                 }
485
486                 return;
487         }
488
489         if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output & FIO_OUTPUT_JSON)) {
490                 if (!parent)
491                         return;
492
493                 tmp = json_create_object();
494                 if (!tmp)
495                         return;
496
497                 json_object_add_value_object(parent, "cpu_idleness", tmp);
498                 json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1));
499
500                 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
501                         for (i = 0; i < nr_cpus; i++) {
502                                 snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i);
503                                 json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i));
504                         }
505                 }
506
507                 json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean);
508                 json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev);
509         }
510 }