configure: add gettid() test
[fio.git] / idletime.c
index fb6f9ddcb13350815ead74fc895901151be534c0..2f59f5104b4152c7b156a1bd255f898eb1ba7c72 100644 (file)
@@ -1,27 +1,30 @@
 #include <math.h>
+#include "fio.h"
 #include "json.h"
 #include "idletime.h"
 
 static volatile struct idle_prof_common ipc;
 
-/* Get time to complete an unit work on a particular cpu.
+/*
+ * Get time to complete an unit work on a particular cpu.
  * The minimum number in CALIBRATE_RUNS runs is returned.
  */
 static double calibrate_unit(unsigned char *data)
 {
        unsigned long t, i, j, k;
-       struct timeval tps;
+       struct timespec tps;
        double tunit = 0.0;
 
-       for (i=0; i<CALIBRATE_RUNS; i++) {
+       for (i = 0; i < CALIBRATE_RUNS; i++) {
 
                fio_gettime(&tps, NULL);
                /* scale for less variance */
-               for (j=0; j < CALIBRATE_SCALE; j++) {
+               for (j = 0; j < CALIBRATE_SCALE; j++) {
                        /* unit of work */
                        for (k=0; k < page_size; k++) {
-                               data[(k+j)%page_size] = k%256;
-                               /* we won't see STOP here. this is to match
+                               data[(k + j) % page_size] = k % 256;
+                               /*
+                                * we won't see STOP here. this is to match
                                 * the same statement in the profiling loop.
                                 */
                                if (ipc.status == IDLE_PROF_STATUS_PROF_STOP)
@@ -34,11 +37,41 @@ static double calibrate_unit(unsigned char *data)
                        continue;
 
                /* get the minimum time to complete CALIBRATE_SCALE units */
-               if ((i==0) || ((double)t < tunit))
+               if ((i == 0) || ((double)t < tunit))
                        tunit = (double)t;
        }
 
-       return tunit/CALIBRATE_SCALE;
+       return tunit / CALIBRATE_SCALE;
+}
+
+static void free_cpu_affinity(struct idle_prof_thread *ipt)
+{
+#if defined(FIO_HAVE_CPU_AFFINITY)
+       fio_cpuset_exit(&ipt->cpu_mask);
+#endif
+}
+
+static int set_cpu_affinity(struct idle_prof_thread *ipt)
+{
+#if defined(FIO_HAVE_CPU_AFFINITY)
+       if (fio_cpuset_init(&ipt->cpu_mask)) {
+               log_err("fio: cpuset init failed\n");
+               return -1;
+       }
+
+       fio_cpu_set(&ipt->cpu_mask, ipt->cpu);
+
+       if (fio_setaffinity(gettid(), ipt->cpu_mask)) {
+               log_err("fio: fio_setaffinity failed\n");
+               fio_cpuset_exit(&ipt->cpu_mask);
+               return -1;
+       }
+
+       return 0;
+#else
+       log_err("fio: fio_setaffinity not supported\n");
+       return -1;
+#endif
 }
 
 static void *idle_prof_thread_fn(void *data)
@@ -51,20 +84,12 @@ static void *idle_prof_thread_fn(void *data)
        pthread_mutex_lock(&ipt->init_lock);
 
        /* exit if any other thread failed to start */
-       if (ipc.status == IDLE_PROF_STATUS_ABORT)
+       if (ipc.status == IDLE_PROF_STATUS_ABORT) {
+               pthread_mutex_unlock(&ipt->init_lock);
                return NULL;
+       }
 
-#if defined(FIO_HAVE_CPU_AFFINITY)
-       os_cpu_mask_t cpu_mask;
-       memset(&cpu_mask, 0, sizeof(cpu_mask));
-       fio_cpu_set(&cpu_mask, ipt->cpu);
-
-       if ((retval=fio_setaffinity(gettid(), cpu_mask)) == -1)
-               log_err("fio: fio_setaffinity failed\n");
-#else
-       retval = -1;
-       log_err("fio: fio_setaffinity not supported\n");
-#endif
+       retval = set_cpu_affinity(ipt);
        if (retval == -1) {
                ipt->state = TD_EXITED;
                pthread_mutex_unlock(&ipt->init_lock);
@@ -84,7 +109,7 @@ static void *idle_prof_thread_fn(void *data)
        if (retval == -1) {
                ipt->state = TD_EXITED;
                pthread_mutex_unlock(&ipt->init_lock);
-               return NULL;
+               goto do_exit;
        }
 
        ipt->state = TD_INITIALIZED;
@@ -97,20 +122,24 @@ static void *idle_prof_thread_fn(void *data)
        pthread_mutex_lock(&ipt->start_lock);
 
        /* exit if other threads failed to initialize */
-       if (ipc.status == IDLE_PROF_STATUS_ABORT)
-               return NULL;
+       if (ipc.status == IDLE_PROF_STATUS_ABORT) {
+               pthread_mutex_unlock(&ipt->start_lock);
+               goto do_exit;
+       }
 
        /* exit if we are doing calibration only */
-       if (ipc.status == IDLE_PROF_STATUS_CALI_STOP)
-               return NULL;
+       if (ipc.status == IDLE_PROF_STATUS_CALI_STOP) {
+               pthread_mutex_unlock(&ipt->start_lock);
+               goto do_exit;
+       }
 
        fio_gettime(&ipt->tps, NULL);
        ipt->state = TD_RUNNING;
 
        j = 0;
        while (1) {
-               for (k=0; k < page_size; k++) {
-                       ipt->data[(k+j)%page_size] = k%256;
+               for (k = 0; k < page_size; k++) {
+                       ipt->data[(k + j) % page_size] = k % 256;
                        if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) {
                                fio_gettime(&ipt->tpe, NULL);
                                goto idle_prof_done;
@@ -121,10 +150,12 @@ static void *idle_prof_thread_fn(void *data)
 
 idle_prof_done:
 
-       ipt->loops = j + (double)k/page_size;
+       ipt->loops = j + (double) k / page_size;
        ipt->state = TD_EXITED;
        pthread_mutex_unlock(&ipt->start_lock);
 
+do_exit:
+       free_cpu_affinity(ipt);
        return NULL;
 }
 
@@ -132,7 +163,7 @@ idle_prof_done:
 static void calibration_stats(void)
 {
        int i;
-       double sum=0.0, var=0.0;
+       double sum = 0.0, var = 0.0;
        struct idle_prof_thread *ipt;
 
        for (i = 0; i < ipc.nr_cpus; i++) {
@@ -153,9 +184,8 @@ static void calibration_stats(void)
 void fio_idle_prof_init(void)
 {
        int i, ret;
-       struct timeval tp;
        struct timespec ts;
-        pthread_attr_t tattr;
+       pthread_attr_t tattr;
        struct idle_prof_thread *ipt;
 
        ipc.nr_cpus = cpus_online();
@@ -186,7 +216,8 @@ void fio_idle_prof_init(void)
                return;
        }
 
-       /* profiling aborts on any single thread failure since the
+       /*
+        * profiling aborts on any single thread failure since the
         * result won't be accurate if any cpu is not used.
         */
        for (i = 0; i < ipc.nr_cpus; i++) {
@@ -224,18 +255,18 @@ void fio_idle_prof_init(void)
                        ipc.status = IDLE_PROF_STATUS_ABORT;
                        log_err("fio: pthread_create %s\n", strerror(ret));
                        break;
-               } else {
+               } else
                        ipt->state = TD_CREATED;
-               }
 
                if ((ret = pthread_detach(ipt->thread))) {
                        /* log error and let the thread spin */
-                       log_err("fio: pthread_detatch %s\n", strerror(ret));
+                       log_err("fio: pthread_detach %s\n", strerror(ret));
                }
        }
 
-       /* let good threads continue so that they can exit
-        * if errors on other threads occurred previously. 
+       /*
+        * let good threads continue so that they can exit
+        * if errors on other threads occurred previously.
         */
        for (i = 0; i < ipc.nr_cpus; i++) {
                ipt = &ipc.ipts[i];
@@ -249,15 +280,16 @@ void fio_idle_prof_init(void)
        for (i = 0; i < ipc.nr_cpus; i++) {
                ipt = &ipc.ipts[i];
                pthread_mutex_lock(&ipt->init_lock);
-               while ((ipt->state!=TD_EXITED) && (ipt->state!=TD_INITIALIZED)) {
-                       fio_gettime(&tp, NULL);
-                       ts.tv_sec = tp.tv_sec + 1;
-                       ts.tv_nsec = tp.tv_usec * 1000;
+               while ((ipt->state != TD_EXITED) &&
+                      (ipt->state!=TD_INITIALIZED)) {
+                       fio_gettime(&ts, NULL);
+                       ts.tv_sec += 1;
                        pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts);
                }
                pthread_mutex_unlock(&ipt->init_lock);
        
-               /* any thread failed to initialize would abort other threads
+               /*
+                * any thread failed to initialize would abort other threads
                 * later after fio_idle_prof_start. 
                 */     
                if (ipt->state == TD_EXITED)
@@ -266,7 +298,7 @@ void fio_idle_prof_init(void)
 
        if (ipc.status != IDLE_PROF_STATUS_ABORT)
                calibration_stats();
-       else 
+       else
                ipc.cali_mean = ipc.cali_stddev = 0.0;
 
        if (ipc.opt == IDLE_PROF_OPT_CALI)
@@ -292,7 +324,6 @@ void fio_idle_prof_stop(void)
 {
        int i;
        uint64_t runt;
-       struct timeval tp;
        struct timespec ts;
        struct idle_prof_thread *ipt;
 
@@ -308,10 +339,10 @@ void fio_idle_prof_stop(void)
        for (i = 0; i < ipc.nr_cpus; i++) {
                ipt = &ipc.ipts[i];
                pthread_mutex_lock(&ipt->start_lock);
-               while ((ipt->state!=TD_EXITED) && (ipt->state!=TD_NOT_CREATED)) {
-                       fio_gettime(&tp, NULL);
-                       ts.tv_sec = tp.tv_sec + 1;
-                       ts.tv_nsec = tp.tv_usec * 1000;
+               while ((ipt->state != TD_EXITED) &&
+                      (ipt->state!=TD_NOT_CREATED)) {
+                       fio_gettime(&ts, NULL);
+                       ts.tv_sec += 1;
                        /* timed wait in case a signal is not received */
                        pthread_cond_timedwait(&ipt->cond, &ipt->start_lock, &ts);
                }
@@ -320,19 +351,22 @@ void fio_idle_prof_stop(void)
                /* calculate idleness */
                if (ipc.cali_mean != 0.0) {
                        runt = utime_since(&ipt->tps, &ipt->tpe);
-                       ipt->idleness = ipt->loops * ipc.cali_mean / runt;
-               } else 
+                       if (runt)
+                               ipt->idleness = ipt->loops * ipc.cali_mean / runt;
+                       else
+                               ipt->idleness = 0.0;
+               } else
                        ipt->idleness = 0.0;
        }
 
-       /* memory allocations are freed via explicit fio_idle_prof_cleanup
+       /*
+        * memory allocations are freed via explicit fio_idle_prof_cleanup
         * after profiling stats are collected by apps.  
         */
-
-       return;
 }
 
-/* return system idle percentage when cpu is -1;
+/*
+ * return system idle percentage when cpu is -1;
  * return one cpu idle percentage otherwise.
  */
 static double fio_idle_prof_cpu_stat(int cpu)
@@ -360,7 +394,7 @@ static double fio_idle_prof_cpu_stat(int cpu)
                p = ipt->idleness;
        }
 
-       return p*100.0;
+       return p * 100.0;
 }
 
 void fio_idle_prof_cleanup(void)
@@ -391,7 +425,7 @@ int fio_idle_prof_parse_opt(const char *args)
                fio_idle_prof_init();
                fio_idle_prof_start();
                fio_idle_prof_stop();
-               show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL);
+               show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, NULL);
                return 1;
        } else if (strcmp("system", args) == 0) {
                ipc.opt = IDLE_PROF_OPT_SYSTEM;
@@ -400,7 +434,7 @@ int fio_idle_prof_parse_opt(const char *args)
                ipc.opt = IDLE_PROF_OPT_PERCPU;
                return 0;
        } else {
-               log_err("fio: incorrect idle-prof option\n", args);
+               log_err("fio: incorrect idle-prof option: %s\n", args);
                return -1;
        }       
 #else
@@ -409,42 +443,38 @@ int fio_idle_prof_parse_opt(const char *args)
 #endif
 }
 
-void show_idle_prof_stats(int output, struct json_object *parent)
+void show_idle_prof_stats(int output, struct json_object *parent,
+                         struct buf_output *out)
 {
        int i, nr_cpus = ipc.nr_cpus;
        struct json_object *tmp;
        char s[MAX_CPU_STR_LEN];
+
        if (output == FIO_OUTPUT_NORMAL) {
                if (ipc.opt > IDLE_PROF_OPT_CALI)
-                       log_info("\nCPU idleness:\n");
+                       log_buf(out, "\nCPU idleness:\n");
                else if (ipc.opt == IDLE_PROF_OPT_CALI)
-                       log_info("CPU idleness:\n");
+                       log_buf(out, "CPU idleness:\n");
 
                if (ipc.opt >= IDLE_PROF_OPT_SYSTEM)
-                       log_info("  system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1));
+                       log_buf(out, "  system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1));
 
                if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
-                       log_info("  percpu: %3.2f%%", fio_idle_prof_cpu_stat(0));
-                       for (i=1; i<nr_cpus; i++) {
-                               log_info(", %3.2f%%", fio_idle_prof_cpu_stat(i));
-                       }
-                       log_info("\n");
+                       log_buf(out, "  percpu: %3.2f%%", fio_idle_prof_cpu_stat(0));
+                       for (i = 1; i < nr_cpus; i++)
+                               log_buf(out, ", %3.2f%%", fio_idle_prof_cpu_stat(i));
+                       log_buf(out, "\n");
                }
 
                if (ipc.opt >= IDLE_PROF_OPT_CALI) {
-                       log_info("  unit work: mean=%3.2fus,", ipc.cali_mean);
-                       log_info(" stddev=%3.2f\n", ipc.cali_stddev);
+                       log_buf(out, "  unit work: mean=%3.2fus,", ipc.cali_mean);
+                       log_buf(out, " stddev=%3.2f\n", ipc.cali_stddev);
                }
 
-               /* dynamic mem allocations can now be freed */
-               if (ipc.opt != IDLE_PROF_OPT_NONE)
-                       fio_idle_prof_cleanup();
-
                return;
        }
-       if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output == FIO_OUTPUT_JSON)) {
+
+       if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output & FIO_OUTPUT_JSON)) {
                if (!parent)
                        return;
 
@@ -456,7 +486,7 @@ void show_idle_prof_stats(int output, struct json_object *parent)
                json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1));
 
                if (ipc.opt == IDLE_PROF_OPT_PERCPU) {
-                       for (i=0; i<nr_cpus; i++) {
+                       for (i = 0; i < nr_cpus; i++) {
                                snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i);
                                json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i));
                        }
@@ -464,9 +494,5 @@ void show_idle_prof_stats(int output, struct json_object *parent)
 
                json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean);
                json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev);
-               
-               fio_idle_prof_cleanup();
-
-               return;
        }
 }