6 static volatile struct idle_prof_common ipc;
9 * Get time to complete an unit work on a particular cpu.
10 * The minimum number in CALIBRATE_RUNS runs is returned.
12 static double calibrate_unit(unsigned char *data)
14 unsigned long t, i, j, k;
18 for (i = 0; i < CALIBRATE_RUNS; i++) {
20 fio_gettime(&tps, NULL);
21 /* scale for less variance */
22 for (j = 0; j < CALIBRATE_SCALE; j++) {
24 for (k=0; k < page_size; k++) {
25 data[(k + j) % page_size] = k % 256;
27 * we won't see STOP here. this is to match
28 * the same statement in the profiling loop.
30 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP)
35 t = utime_since_now(&tps);
39 /* get the minimum time to complete CALIBRATE_SCALE units */
40 if ((i == 0) || ((double)t < tunit))
44 return tunit / CALIBRATE_SCALE;
47 static void free_cpu_affinity(struct idle_prof_thread *ipt)
49 #if defined(FIO_HAVE_CPU_AFFINITY)
50 fio_cpuset_exit(&ipt->cpu_mask);
54 static int set_cpu_affinity(struct idle_prof_thread *ipt)
56 #if defined(FIO_HAVE_CPU_AFFINITY)
57 if (fio_cpuset_init(&ipt->cpu_mask)) {
58 log_err("fio: cpuset init failed\n");
62 fio_cpu_set(&ipt->cpu_mask, ipt->cpu);
64 if (fio_setaffinity(gettid(), ipt->cpu_mask)) {
65 log_err("fio: fio_setaffinity failed\n");
66 fio_cpuset_exit(&ipt->cpu_mask);
72 log_err("fio: fio_setaffinity not supported\n");
77 static void *idle_prof_thread_fn(void *data)
81 struct idle_prof_thread *ipt = data;
83 /* wait for all threads are spawned */
84 pthread_mutex_lock(&ipt->init_lock);
86 /* exit if any other thread failed to start */
87 if (ipc.status == IDLE_PROF_STATUS_ABORT) {
88 pthread_mutex_unlock(&ipt->init_lock);
92 retval = set_cpu_affinity(ipt);
94 ipt->state = TD_EXITED;
95 pthread_mutex_unlock(&ipt->init_lock);
99 ipt->cali_time = calibrate_unit(ipt->data);
101 /* delay to set IDLE class till now for better calibration accuracy */
102 #if defined(CONFIG_SCHED_IDLE)
103 if ((retval = fio_set_sched_idle()))
104 log_err("fio: fio_set_sched_idle failed\n");
107 log_err("fio: fio_set_sched_idle not supported\n");
110 ipt->state = TD_EXITED;
111 pthread_mutex_unlock(&ipt->init_lock);
115 ipt->state = TD_INITIALIZED;
117 /* signal the main thread that calibration is done */
118 pthread_cond_signal(&ipt->cond);
119 pthread_mutex_unlock(&ipt->init_lock);
121 /* wait for other calibration to finish */
122 pthread_mutex_lock(&ipt->start_lock);
124 /* exit if other threads failed to initialize */
125 if (ipc.status == IDLE_PROF_STATUS_ABORT) {
126 pthread_mutex_unlock(&ipt->start_lock);
130 /* exit if we are doing calibration only */
131 if (ipc.status == IDLE_PROF_STATUS_CALI_STOP) {
132 pthread_mutex_unlock(&ipt->start_lock);
136 fio_gettime(&ipt->tps, NULL);
137 ipt->state = TD_RUNNING;
141 for (k = 0; k < page_size; k++) {
142 ipt->data[(k + j) % page_size] = k % 256;
143 if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) {
144 fio_gettime(&ipt->tpe, NULL);
153 ipt->loops = j + (double) k / page_size;
154 ipt->state = TD_EXITED;
155 pthread_mutex_unlock(&ipt->start_lock);
158 free_cpu_affinity(ipt);
162 /* calculate mean and standard deviation to complete an unit of work */
163 static void calibration_stats(void)
166 double sum = 0.0, var = 0.0;
167 struct idle_prof_thread *ipt;
169 for (i = 0; i < ipc.nr_cpus; i++) {
171 sum += ipt->cali_time;
174 ipc.cali_mean = sum/ipc.nr_cpus;
176 for (i = 0; i < ipc.nr_cpus; i++) {
178 var += pow(ipt->cali_time-ipc.cali_mean, 2);
181 ipc.cali_stddev = sqrt(var/(ipc.nr_cpus-1));
184 void fio_idle_prof_init(void)
188 pthread_attr_t tattr;
189 struct idle_prof_thread *ipt;
191 ipc.nr_cpus = cpus_online();
192 ipc.status = IDLE_PROF_STATUS_OK;
194 if (ipc.opt == IDLE_PROF_OPT_NONE)
197 if ((ret = pthread_attr_init(&tattr))) {
198 log_err("fio: pthread_attr_init %s\n", strerror(ret));
201 if ((ret = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM))) {
202 log_err("fio: pthread_attr_setscope %s\n", strerror(ret));
206 ipc.ipts = malloc(ipc.nr_cpus * sizeof(struct idle_prof_thread));
208 log_err("fio: malloc failed\n");
212 ipc.buf = malloc(ipc.nr_cpus * page_size);
214 log_err("fio: malloc failed\n");
220 * profiling aborts on any single thread failure since the
221 * result won't be accurate if any cpu is not used.
223 for (i = 0; i < ipc.nr_cpus; i++) {
227 ipt->state = TD_NOT_CREATED;
228 ipt->data = (unsigned char *)(ipc.buf + page_size * i);
230 if ((ret = pthread_mutex_init(&ipt->init_lock, NULL))) {
231 ipc.status = IDLE_PROF_STATUS_ABORT;
232 log_err("fio: pthread_mutex_init %s\n", strerror(ret));
236 if ((ret = pthread_mutex_init(&ipt->start_lock, NULL))) {
237 ipc.status = IDLE_PROF_STATUS_ABORT;
238 log_err("fio: pthread_mutex_init %s\n", strerror(ret));
242 if ((ret = pthread_cond_init(&ipt->cond, NULL))) {
243 ipc.status = IDLE_PROF_STATUS_ABORT;
244 log_err("fio: pthread_cond_init %s\n", strerror(ret));
248 /* make sure all threads are spawned before they start */
249 pthread_mutex_lock(&ipt->init_lock);
251 /* make sure all threads finish init before profiling starts */
252 pthread_mutex_lock(&ipt->start_lock);
254 if ((ret = pthread_create(&ipt->thread, &tattr, idle_prof_thread_fn, ipt))) {
255 ipc.status = IDLE_PROF_STATUS_ABORT;
256 log_err("fio: pthread_create %s\n", strerror(ret));
259 ipt->state = TD_CREATED;
261 if ((ret = pthread_detach(ipt->thread))) {
262 /* log error and let the thread spin */
263 log_err("fio: pthread_detach %s\n", strerror(ret));
268 * let good threads continue so that they can exit
269 * if errors on other threads occurred previously.
271 for (i = 0; i < ipc.nr_cpus; i++) {
273 pthread_mutex_unlock(&ipt->init_lock);
276 if (ipc.status == IDLE_PROF_STATUS_ABORT)
279 /* wait for calibration to finish */
280 for (i = 0; i < ipc.nr_cpus; i++) {
282 pthread_mutex_lock(&ipt->init_lock);
283 while ((ipt->state != TD_EXITED) &&
284 (ipt->state!=TD_INITIALIZED)) {
285 fio_gettime(&ts, NULL);
287 pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts);
289 pthread_mutex_unlock(&ipt->init_lock);
292 * any thread failed to initialize would abort other threads
293 * later after fio_idle_prof_start.
295 if (ipt->state == TD_EXITED)
296 ipc.status = IDLE_PROF_STATUS_ABORT;
299 if (ipc.status != IDLE_PROF_STATUS_ABORT)
302 ipc.cali_mean = ipc.cali_stddev = 0.0;
304 if (ipc.opt == IDLE_PROF_OPT_CALI)
305 ipc.status = IDLE_PROF_STATUS_CALI_STOP;
308 void fio_idle_prof_start(void)
311 struct idle_prof_thread *ipt;
313 if (ipc.opt == IDLE_PROF_OPT_NONE)
316 /* unlock regardless abort is set or not */
317 for (i = 0; i < ipc.nr_cpus; i++) {
319 pthread_mutex_unlock(&ipt->start_lock);
323 void fio_idle_prof_stop(void)
328 struct idle_prof_thread *ipt;
330 if (ipc.opt == IDLE_PROF_OPT_NONE)
333 if (ipc.opt == IDLE_PROF_OPT_CALI)
336 ipc.status = IDLE_PROF_STATUS_PROF_STOP;
338 /* wait for all threads to exit from profiling */
339 for (i = 0; i < ipc.nr_cpus; i++) {
341 pthread_mutex_lock(&ipt->start_lock);
342 while ((ipt->state != TD_EXITED) &&
343 (ipt->state!=TD_NOT_CREATED)) {
344 fio_gettime(&ts, NULL);
346 /* timed wait in case a signal is not received */
347 pthread_cond_timedwait(&ipt->cond, &ipt->start_lock, &ts);
349 pthread_mutex_unlock(&ipt->start_lock);
351 /* calculate idleness */
352 if (ipc.cali_mean != 0.0) {
353 runt = utime_since(&ipt->tps, &ipt->tpe);
355 ipt->idleness = ipt->loops * ipc.cali_mean / runt;
363 * memory allocations are freed via explicit fio_idle_prof_cleanup
364 * after profiling stats are collected by apps.
369 * return system idle percentage when cpu is -1;
370 * return one cpu idle percentage otherwise.
372 static double fio_idle_prof_cpu_stat(int cpu)
374 int i, nr_cpus = ipc.nr_cpus;
375 struct idle_prof_thread *ipt;
378 if (ipc.opt == IDLE_PROF_OPT_NONE)
381 if ((cpu >= nr_cpus) || (cpu < -1)) {
382 log_err("fio: idle profiling invalid cpu index\n");
387 for (i = 0; i < nr_cpus; i++) {
393 ipt = &ipc.ipts[cpu];
400 static void fio_idle_prof_cleanup(void)
413 int fio_idle_prof_parse_opt(const char *args)
415 ipc.opt = IDLE_PROF_OPT_NONE; /* default */
418 log_err("fio: empty idle-prof option string\n");
422 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(CONFIG_SCHED_IDLE)
423 if (strcmp("calibrate", args) == 0) {
424 ipc.opt = IDLE_PROF_OPT_CALI;
425 fio_idle_prof_init();
426 fio_idle_prof_start();
427 fio_idle_prof_stop();
428 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, NULL);
430 } else if (strcmp("system", args) == 0) {
431 ipc.opt = IDLE_PROF_OPT_SYSTEM;
433 } else if (strcmp("percpu", args) == 0) {
434 ipc.opt = IDLE_PROF_OPT_PERCPU;
437 log_err("fio: incorrect idle-prof option: %s\n", args);
441 log_err("fio: idle-prof not supported on this platform\n");
446 void show_idle_prof_stats(int output, struct json_object *parent,
447 struct buf_output *out)
449 int i, nr_cpus = ipc.nr_cpus;
450 struct json_object *tmp;
451 char s[MAX_CPU_STR_LEN];
453 if (output == FIO_OUTPUT_NORMAL) {
454 if (ipc.opt > IDLE_PROF_OPT_CALI)
455 log_buf(out, "\nCPU idleness:\n");
456 else if (ipc.opt == IDLE_PROF_OPT_CALI)
457 log_buf(out, "CPU idleness:\n");
459 if (ipc.opt >= IDLE_PROF_OPT_SYSTEM)
460 log_buf(out, " system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1));
462 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
463 log_buf(out, " percpu: %3.2f%%", fio_idle_prof_cpu_stat(0));
464 for (i = 1; i < nr_cpus; i++)
465 log_buf(out, ", %3.2f%%", fio_idle_prof_cpu_stat(i));
469 if (ipc.opt >= IDLE_PROF_OPT_CALI) {
470 log_buf(out, " unit work: mean=%3.2fus,", ipc.cali_mean);
471 log_buf(out, " stddev=%3.2f\n", ipc.cali_stddev);
474 /* dynamic mem allocations can now be freed */
475 if (ipc.opt != IDLE_PROF_OPT_NONE)
476 fio_idle_prof_cleanup();
481 if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output & FIO_OUTPUT_JSON)) {
485 tmp = json_create_object();
489 json_object_add_value_object(parent, "cpu_idleness", tmp);
490 json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1));
492 if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
493 for (i = 0; i < nr_cpus; i++) {
494 snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i);
495 json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i));
499 json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean);
500 json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev);
502 fio_idle_prof_cleanup();