5 static volatile struct idle_prof_common ipc;
8 * Get time to complete an unit work on a particular cpu.
9 * The minimum number in CALIBRATE_RUNS runs is returned.
11 static double calibrate_unit(unsigned char *data)
13 unsigned long t, i, j, k;
17 for (i = 0; i < CALIBRATE_RUNS; i++) {
19 fio_gettime(&tps, NULL);
20 /* scale for less variance */
21 for (j = 0; j < CALIBRATE_SCALE; j++) {
23 for (k=0; k < page_size; k++) {
24 data[(k + j) % page_size] = k % 256;
26 * we won't see STOP here. this is to match
27 * the same statement in the profiling loop.
29 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP)
34 t = utime_since_now(&tps);
38 /* get the minimum time to complete CALIBRATE_SCALE units */
39 if ((i == 0) || ((double)t < tunit))
43 return tunit / CALIBRATE_SCALE;
46 static int set_cpu_affinity(struct idle_prof_thread *ipt)
48 #if defined(FIO_HAVE_CPU_AFFINITY)
49 os_cpu_mask_t cpu_mask;
51 memset(&cpu_mask, 0, sizeof(cpu_mask));
52 fio_cpu_set(&cpu_mask, ipt->cpu);
54 if (fio_setaffinity(gettid(), cpu_mask)) {
55 log_err("fio: fio_setaffinity failed\n");
61 log_err("fio: fio_setaffinity not supported\n");
66 static void *idle_prof_thread_fn(void *data)
70 struct idle_prof_thread *ipt = data;
72 /* wait for all threads are spawned */
73 pthread_mutex_lock(&ipt->init_lock);
75 /* exit if any other thread failed to start */
76 if (ipc.status == IDLE_PROF_STATUS_ABORT) {
77 pthread_mutex_unlock(&ipt->init_lock);
81 retval = set_cpu_affinity(ipt);
83 ipt->state = TD_EXITED;
84 pthread_mutex_unlock(&ipt->init_lock);
88 ipt->cali_time = calibrate_unit(ipt->data);
90 /* delay to set IDLE class till now for better calibration accuracy */
91 #if defined(CONFIG_SCHED_IDLE)
92 if ((retval = fio_set_sched_idle()))
93 log_err("fio: fio_set_sched_idle failed\n");
96 log_err("fio: fio_set_sched_idle not supported\n");
99 ipt->state = TD_EXITED;
100 pthread_mutex_unlock(&ipt->init_lock);
104 ipt->state = TD_INITIALIZED;
106 /* signal the main thread that calibration is done */
107 pthread_cond_signal(&ipt->cond);
108 pthread_mutex_unlock(&ipt->init_lock);
110 /* wait for other calibration to finish */
111 pthread_mutex_lock(&ipt->start_lock);
113 /* exit if other threads failed to initialize */
114 if (ipc.status == IDLE_PROF_STATUS_ABORT) {
115 pthread_mutex_unlock(&ipt->start_lock);
119 /* exit if we are doing calibration only */
120 if (ipc.status == IDLE_PROF_STATUS_CALI_STOP) {
121 pthread_mutex_unlock(&ipt->start_lock);
125 fio_gettime(&ipt->tps, NULL);
126 ipt->state = TD_RUNNING;
130 for (k = 0; k < page_size; k++) {
131 ipt->data[(k + j) % page_size] = k % 256;
132 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) {
133 fio_gettime(&ipt->tpe, NULL);
142 ipt->loops = j + (double) k / page_size;
143 ipt->state = TD_EXITED;
144 pthread_mutex_unlock(&ipt->start_lock);
149 /* calculate mean and standard deviation to complete an unit of work */
150 static void calibration_stats(void)
153 double sum = 0.0, var = 0.0;
154 struct idle_prof_thread *ipt;
156 for (i = 0; i < ipc.nr_cpus; i++) {
158 sum += ipt->cali_time;
161 ipc.cali_mean = sum/ipc.nr_cpus;
163 for (i = 0; i < ipc.nr_cpus; i++) {
165 var += pow(ipt->cali_time-ipc.cali_mean, 2);
168 ipc.cali_stddev = sqrt(var/(ipc.nr_cpus-1));
171 void fio_idle_prof_init(void)
176 pthread_attr_t tattr;
177 struct idle_prof_thread *ipt;
179 ipc.nr_cpus = cpus_online();
180 ipc.status = IDLE_PROF_STATUS_OK;
182 if (ipc.opt == IDLE_PROF_OPT_NONE)
185 if ((ret = pthread_attr_init(&tattr))) {
186 log_err("fio: pthread_attr_init %s\n", strerror(ret));
189 if ((ret = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM))) {
190 log_err("fio: pthread_attr_setscope %s\n", strerror(ret));
194 ipc.ipts = malloc(ipc.nr_cpus * sizeof(struct idle_prof_thread));
196 log_err("fio: malloc failed\n");
200 ipc.buf = malloc(ipc.nr_cpus * page_size);
202 log_err("fio: malloc failed\n");
208 * profiling aborts on any single thread failure since the
209 * result won't be accurate if any cpu is not used.
211 for (i = 0; i < ipc.nr_cpus; i++) {
215 ipt->state = TD_NOT_CREATED;
216 ipt->data = (unsigned char *)(ipc.buf + page_size * i);
218 if ((ret = pthread_mutex_init(&ipt->init_lock, NULL))) {
219 ipc.status = IDLE_PROF_STATUS_ABORT;
220 log_err("fio: pthread_mutex_init %s\n", strerror(ret));
224 if ((ret = pthread_mutex_init(&ipt->start_lock, NULL))) {
225 ipc.status = IDLE_PROF_STATUS_ABORT;
226 log_err("fio: pthread_mutex_init %s\n", strerror(ret));
230 if ((ret = pthread_cond_init(&ipt->cond, NULL))) {
231 ipc.status = IDLE_PROF_STATUS_ABORT;
232 log_err("fio: pthread_cond_init %s\n", strerror(ret));
236 /* make sure all threads are spawned before they start */
237 pthread_mutex_lock(&ipt->init_lock);
239 /* make sure all threads finish init before profiling starts */
240 pthread_mutex_lock(&ipt->start_lock);
242 if ((ret = pthread_create(&ipt->thread, &tattr, idle_prof_thread_fn, ipt))) {
243 ipc.status = IDLE_PROF_STATUS_ABORT;
244 log_err("fio: pthread_create %s\n", strerror(ret));
247 ipt->state = TD_CREATED;
249 if ((ret = pthread_detach(ipt->thread))) {
250 /* log error and let the thread spin */
251 log_err("fio: pthread_detatch %s\n", strerror(ret));
256 * let good threads continue so that they can exit
257 * if errors on other threads occurred previously.
259 for (i = 0; i < ipc.nr_cpus; i++) {
261 pthread_mutex_unlock(&ipt->init_lock);
264 if (ipc.status == IDLE_PROF_STATUS_ABORT)
267 /* wait for calibration to finish */
268 for (i = 0; i < ipc.nr_cpus; i++) {
270 pthread_mutex_lock(&ipt->init_lock);
271 while ((ipt->state != TD_EXITED) &&
272 (ipt->state!=TD_INITIALIZED)) {
273 fio_gettime(&tp, NULL);
274 ts.tv_sec = tp.tv_sec + 1;
275 ts.tv_nsec = tp.tv_usec * 1000;
276 pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts);
278 pthread_mutex_unlock(&ipt->init_lock);
281 * any thread failed to initialize would abort other threads
282 * later after fio_idle_prof_start.
284 if (ipt->state == TD_EXITED)
285 ipc.status = IDLE_PROF_STATUS_ABORT;
288 if (ipc.status != IDLE_PROF_STATUS_ABORT)
291 ipc.cali_mean = ipc.cali_stddev = 0.0;
293 if (ipc.opt == IDLE_PROF_OPT_CALI)
294 ipc.status = IDLE_PROF_STATUS_CALI_STOP;
297 void fio_idle_prof_start(void)
300 struct idle_prof_thread *ipt;
302 if (ipc.opt == IDLE_PROF_OPT_NONE)
305 /* unlock regardless abort is set or not */
306 for (i = 0; i < ipc.nr_cpus; i++) {
308 pthread_mutex_unlock(&ipt->start_lock);
312 void fio_idle_prof_stop(void)
318 struct idle_prof_thread *ipt;
320 if (ipc.opt == IDLE_PROF_OPT_NONE)
323 if (ipc.opt == IDLE_PROF_OPT_CALI)
326 ipc.status = IDLE_PROF_STATUS_PROF_STOP;
328 /* wait for all threads to exit from profiling */
329 for (i = 0; i < ipc.nr_cpus; i++) {
331 pthread_mutex_lock(&ipt->start_lock);
332 while ((ipt->state != TD_EXITED) &&
333 (ipt->state!=TD_NOT_CREATED)) {
334 fio_gettime(&tp, NULL);
335 ts.tv_sec = tp.tv_sec + 1;
336 ts.tv_nsec = tp.tv_usec * 1000;
337 /* timed wait in case a signal is not received */
338 pthread_cond_timedwait(&ipt->cond, &ipt->start_lock, &ts);
340 pthread_mutex_unlock(&ipt->start_lock);
342 /* calculate idleness */
343 if (ipc.cali_mean != 0.0) {
344 runt = utime_since(&ipt->tps, &ipt->tpe);
346 ipt->idleness = ipt->loops * ipc.cali_mean / runt;
354 * memory allocations are freed via explicit fio_idle_prof_cleanup
355 * after profiling stats are collected by apps.
360 * return system idle percentage when cpu is -1;
361 * return one cpu idle percentage otherwise.
363 static double fio_idle_prof_cpu_stat(int cpu)
365 int i, nr_cpus = ipc.nr_cpus;
366 struct idle_prof_thread *ipt;
369 if (ipc.opt == IDLE_PROF_OPT_NONE)
372 if ((cpu >= nr_cpus) || (cpu < -1)) {
373 log_err("fio: idle profiling invalid cpu index\n");
378 for (i = 0; i < nr_cpus; i++) {
384 ipt = &ipc.ipts[cpu];
391 static void fio_idle_prof_cleanup(void)
404 int fio_idle_prof_parse_opt(const char *args)
406 ipc.opt = IDLE_PROF_OPT_NONE; /* default */
409 log_err("fio: empty idle-prof option string\n");
413 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(CONFIG_SCHED_IDLE)
414 if (strcmp("calibrate", args) == 0) {
415 ipc.opt = IDLE_PROF_OPT_CALI;
416 fio_idle_prof_init();
417 fio_idle_prof_start();
418 fio_idle_prof_stop();
419 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL);
421 } else if (strcmp("system", args) == 0) {
422 ipc.opt = IDLE_PROF_OPT_SYSTEM;
424 } else if (strcmp("percpu", args) == 0) {
425 ipc.opt = IDLE_PROF_OPT_PERCPU;
428 log_err("fio: incorrect idle-prof option: %s\n", args);
432 log_err("fio: idle-prof not supported on this platform\n");
437 void show_idle_prof_stats(int output, struct json_object *parent)
439 int i, nr_cpus = ipc.nr_cpus;
440 struct json_object *tmp;
441 char s[MAX_CPU_STR_LEN];
443 if (output == FIO_OUTPUT_NORMAL) {
444 if (ipc.opt > IDLE_PROF_OPT_CALI)
445 log_info("\nCPU idleness:\n");
446 else if (ipc.opt == IDLE_PROF_OPT_CALI)
447 log_info("CPU idleness:\n");
449 if (ipc.opt >= IDLE_PROF_OPT_SYSTEM)
450 log_info(" system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1));
452 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
453 log_info(" percpu: %3.2f%%", fio_idle_prof_cpu_stat(0));
454 for (i = 1; i < nr_cpus; i++)
455 log_info(", %3.2f%%", fio_idle_prof_cpu_stat(i));
459 if (ipc.opt >= IDLE_PROF_OPT_CALI) {
460 log_info(" unit work: mean=%3.2fus,", ipc.cali_mean);
461 log_info(" stddev=%3.2f\n", ipc.cali_stddev);
464 /* dynamic mem allocations can now be freed */
465 if (ipc.opt != IDLE_PROF_OPT_NONE)
466 fio_idle_prof_cleanup();
471 if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output == FIO_OUTPUT_JSON)) {
475 tmp = json_create_object();
479 json_object_add_value_object(parent, "cpu_idleness", tmp);
480 json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1));
482 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
483 for (i = 0; i < nr_cpus; i++) {
484 snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i);
485 json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i));
489 json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean);
490 json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev);
492 fio_idle_prof_cleanup();