static volatile struct idle_prof_common ipc;
-/* Get time to complete an unit work on a particular cpu.
+/*
+ * Get time to complete an unit work on a particular cpu.
* The minimum number in CALIBRATE_RUNS runs is returned.
*/
static double calibrate_unit(unsigned char *data)
struct timeval tps;
double tunit = 0.0;
- for (i=0; i<CALIBRATE_RUNS; i++) {
+ for (i = 0; i < CALIBRATE_RUNS; i++) {
fio_gettime(&tps, NULL);
/* scale for less variance */
- for (j=0; j < CALIBRATE_SCALE; j++) {
+ for (j = 0; j < CALIBRATE_SCALE; j++) {
/* unit of work */
for (k=0; k < page_size; k++) {
- data[(k+j)%page_size] = k%256;
- /* we won't see STOP here. this is to match
+ data[(k + j) % page_size] = k % 256;
+ /*
+ * we won't see STOP here. this is to match
* the same statement in the profiling loop.
*/
if (ipc.status == IDLE_PROF_STATUS_PROF_STOP)
continue;
/* get the minimum time to complete CALIBRATE_SCALE units */
- if ((i==0) || ((double)t < tunit))
+ if ((i == 0) || ((double)t < tunit))
tunit = (double)t;
}
- return tunit/CALIBRATE_SCALE;
+ return tunit / CALIBRATE_SCALE;
}
static void *idle_prof_thread_fn(void *data)
ipt->cali_time = calibrate_unit(ipt->data);
/* delay to set IDLE class till now for better calibration accuracy */
-#if defined(FIO_HAVE_SCHED_IDLE)
+#if defined(CONFIG_SCHED_IDLE)
if ((retval = fio_set_sched_idle()))
log_err("fio: fio_set_sched_idle failed\n");
#else
j = 0;
while (1) {
- for (k=0; k < page_size; k++) {
- ipt->data[(k+j)%page_size] = k%256;
+ for (k = 0; k < page_size; k++) {
+ ipt->data[(k + j) % page_size] = k % 256;
if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) {
fio_gettime(&ipt->tpe, NULL);
goto idle_prof_done;
idle_prof_done:
- ipt->loops = j + (double)k/page_size;
+ ipt->loops = j + (double) k / page_size;
ipt->state = TD_EXITED;
pthread_mutex_unlock(&ipt->start_lock);
static void calibration_stats(void)
{
int i;
- double sum=0.0, var=0.0;
+ double sum = 0.0, var = 0.0;
struct idle_prof_thread *ipt;
for (i = 0; i < ipc.nr_cpus; i++) {
int i, ret;
struct timeval tp;
struct timespec ts;
- pthread_attr_t tattr;
+ pthread_attr_t tattr;
struct idle_prof_thread *ipt;
ipc.nr_cpus = cpus_online();
return;
}
- /* profiling aborts on any single thread failure since the
+ /*
+ * profiling aborts on any single thread failure since the
* result won't be accurate if any cpu is not used.
*/
for (i = 0; i < ipc.nr_cpus; i++) {
ipc.status = IDLE_PROF_STATUS_ABORT;
log_err("fio: pthread_create %s\n", strerror(ret));
break;
- } else {
+ } else
ipt->state = TD_CREATED;
- }
if ((ret = pthread_detach(ipt->thread))) {
/* log error and let the thread spin */
}
}
- /* let good threads continue so that they can exit
- * if errors on other threads occurred previously.
+ /*
+ * let good threads continue so that they can exit
+ * if errors on other threads occurred previously.
*/
for (i = 0; i < ipc.nr_cpus; i++) {
ipt = &ipc.ipts[i];
for (i = 0; i < ipc.nr_cpus; i++) {
ipt = &ipc.ipts[i];
pthread_mutex_lock(&ipt->init_lock);
- while ((ipt->state!=TD_EXITED) && (ipt->state!=TD_INITIALIZED)) {
+ while ((ipt->state != TD_EXITED) &&
+ (ipt->state!=TD_INITIALIZED)) {
fio_gettime(&tp, NULL);
ts.tv_sec = tp.tv_sec + 1;
ts.tv_nsec = tp.tv_usec * 1000;
}
pthread_mutex_unlock(&ipt->init_lock);
- /* any thread failed to initialize would abort other threads
+ /*
+ * any thread failed to initialize would abort other threads
* later after fio_idle_prof_start.
*/
if (ipt->state == TD_EXITED)
if (ipc.status != IDLE_PROF_STATUS_ABORT)
calibration_stats();
- else
+ else
ipc.cali_mean = ipc.cali_stddev = 0.0;
if (ipc.opt == IDLE_PROF_OPT_CALI)
for (i = 0; i < ipc.nr_cpus; i++) {
ipt = &ipc.ipts[i];
pthread_mutex_lock(&ipt->start_lock);
- while ((ipt->state!=TD_EXITED) && (ipt->state!=TD_NOT_CREATED)) {
+ while ((ipt->state != TD_EXITED) &&
+ (ipt->state!=TD_NOT_CREATED)) {
fio_gettime(&tp, NULL);
ts.tv_sec = tp.tv_sec + 1;
ts.tv_nsec = tp.tv_usec * 1000;
if (ipc.cali_mean != 0.0) {
runt = utime_since(&ipt->tps, &ipt->tpe);
ipt->idleness = ipt->loops * ipc.cali_mean / runt;
- } else
+ } else
ipt->idleness = 0.0;
}
- /* memory allocations are freed via explicit fio_idle_prof_cleanup
+ /*
+ * memory allocations are freed via explicit fio_idle_prof_cleanup
* after profiling stats are collected by apps.
*/
-
- return;
}
-/* return system idle percentage when cpu is -1;
+/*
+ * return system idle percentage when cpu is -1;
* return one cpu idle percentage otherwise.
*/
static double fio_idle_prof_cpu_stat(int cpu)
p = ipt->idleness;
}
- return p*100.0;
+ return p * 100.0;
}
void fio_idle_prof_cleanup(void)
return -1;
}
-#if defined(FIO_HAVE_CPU_AFFINITY) && defined(FIO_HAVE_SCHED_IDLE)
+#if defined(FIO_HAVE_CPU_AFFINITY) && defined(CONFIG_SCHED_IDLE)
if (strcmp("calibrate", args) == 0) {
ipc.opt = IDLE_PROF_OPT_CALI;
fio_idle_prof_init();
ipc.opt = IDLE_PROF_OPT_PERCPU;
return 0;
} else {
- log_err("fio: incorrect idle-prof option\n", args);
+ log_err("fio: incorrect idle-prof option: %s\n", args);
return -1;
}
#else
int i, nr_cpus = ipc.nr_cpus;
struct json_object *tmp;
char s[MAX_CPU_STR_LEN];
-
+
if (output == FIO_OUTPUT_NORMAL) {
if (ipc.opt > IDLE_PROF_OPT_CALI)
log_info("\nCPU idleness:\n");
if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
log_info(" percpu: %3.2f%%", fio_idle_prof_cpu_stat(0));
- for (i=1; i<nr_cpus; i++) {
+ for (i = 1; i < nr_cpus; i++)
log_info(", %3.2f%%", fio_idle_prof_cpu_stat(i));
- }
log_info("\n");
}
return;
}
-
+
if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output == FIO_OUTPUT_JSON)) {
if (!parent)
return;
json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1));
if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
- for (i=0; i<nr_cpus; i++) {
+ for (i = 0; i < nr_cpus; i++) {
snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i);
json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i));
}
json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev);
fio_idle_prof_cleanup();
-
- return;
}
}