Add more context to the error messages
[fio.git] / fio.c
diff --git a/fio.c b/fio.c
index 95e06dd8e045136ce9954a8e759aef3dac09a7d0..920639395cbda1101ba57c36b7cc421f6a3d4f27 100644 (file)
--- a/fio.c
+++ b/fio.c
@@ -160,9 +160,19 @@ static void cleanup_pending_aio(struct thread_data *td)
                list_for_each_safe(entry, n, &td->io_u_busylist) {
                        io_u = list_entry(entry, struct io_u, list);
 
-                       r = td->io_ops->cancel(td, io_u);
-                       if (!r)
+                       /*
+                        * if the io_u isn't in flight, then that generally
+                        * means someone leaked an io_u. complain but fix
+                        * it up, so we don't stall here.
+                        */
+                       if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
+                               log_err("fio: non-busy IO on busy list\n");
                                put_io_u(td, io_u);
+                       } else {
+                               r = td->io_ops->cancel(td, io_u);
+                               if (!r)
+                                       put_io_u(td, io_u);
+                       }
                }
        }
 
@@ -193,15 +203,15 @@ static int fio_io_sync(struct thread_data *td, struct fio_file *f)
 requeue:
        ret = td_io_queue(td, io_u);
        if (ret < 0) {
-               td_verror(td, io_u->error);
+               td_verror(td, io_u->error, "td_io_queue");
                put_io_u(td, io_u);
                return 1;
        } else if (ret == FIO_Q_QUEUED) {
-               if (io_u_queued_complete(td, 1, NULL))
+               if (io_u_queued_complete(td, 1, NULL) < 0)
                        return 1;
        } else if (ret == FIO_Q_COMPLETED) {
                if (io_u->error) {
-                       td_verror(td, io_u->error);
+                       td_verror(td, io_u->error, "td_io_queue");
                        return 1;
                }
 
@@ -248,15 +258,20 @@ static void do_verify(struct thread_data *td)
                if (!io_u)
                        break;
 
-               if (runtime_exceeded(td, &io_u->start_time))
+               if (runtime_exceeded(td, &io_u->start_time)) {
+                       put_io_u(td, io_u);
                        break;
+               }
 
-               if (get_next_verify(td, io_u))
+               if (get_next_verify(td, io_u)) {
+                       put_io_u(td, io_u);
                        break;
+               }
 
-               if (td_io_prep(td, io_u))
+               if (td_io_prep(td, io_u)) {
+                       put_io_u(td, io_u);
                        break;
-
+               }
 requeue:
                ret = td_io_queue(td, io_u);
 
@@ -272,7 +287,7 @@ requeue:
                                goto requeue;
                        }
                        ret = io_u_sync_complete(td, io_u, verify_io_u);
-                       if (ret)
+                       if (ret < 0)
                                break;
                        continue;
                case FIO_Q_QUEUED:
@@ -283,7 +298,7 @@ requeue:
                        break;
                default:
                        assert(ret < 0);
-                       td_verror(td, -ret);
+                       td_verror(td, -ret, "td_io_queue");
                        break;
                }
 
@@ -306,13 +321,10 @@ requeue:
                 * Reap required number of io units, if any, and do the
                 * verification on them through the callback handler
                 */
-               if (io_u_queued_complete(td, min_events, verify_io_u))
+               if (io_u_queued_complete(td, min_events, verify_io_u) < 0)
                        break;
        }
 
-       if (io_u)
-               put_io_u(td, io_u);
-
        if (td->cur_depth)
                cleanup_pending_aio(td);
 
@@ -451,7 +463,7 @@ requeue:
                if (check_min_rate(td, &comp_time)) {
                        if (exitall_on_terminate)
                                terminate_threads(td->groupid, 0);
-                       td_verror(td, ENODATA);
+                       td_verror(td, ENODATA, "check_min_rate");
                        break;
                }
 
@@ -551,9 +563,12 @@ static int init_io_u(struct thread_data *td)
                        fill_rand_buf(io_u, max_bs);
 
                io_u->index = i;
+               io_u->flags = IO_U_F_FREE;
                list_add(&io_u->list, &td->io_u_freelist);
        }
 
+       io_u_init_timeout();
+
        return 0;
 }
 
@@ -570,7 +585,7 @@ static int switch_ioscheduler(struct thread_data *td)
 
        f = fopen(tmp, "r+");
        if (!f) {
-               td_verror(td, errno);
+               td_verror(td, errno, "fopen");
                return 1;
        }
 
@@ -579,7 +594,7 @@ static int switch_ioscheduler(struct thread_data *td)
         */
        ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
        if (ferror(f) || ret != 1) {
-               td_verror(td, errno);
+               td_verror(td, errno, "fwrite");
                fclose(f);
                return 1;
        }
@@ -591,7 +606,7 @@ static int switch_ioscheduler(struct thread_data *td)
         */
        ret = fread(tmp, 1, sizeof(tmp), f);
        if (ferror(f) || ret < 0) {
-               td_verror(td, errno);
+               td_verror(td, errno, "fread");
                fclose(f);
                return 1;
        }
@@ -599,7 +614,7 @@ static int switch_ioscheduler(struct thread_data *td)
        sprintf(tmp2, "[%s]", td->ioscheduler);
        if (!strstr(tmp, tmp2)) {
                log_err("fio: io scheduler %s not found\n", td->ioscheduler);
-               td_verror(td, EINVAL);
+               td_verror(td, EINVAL, "iosched_switch");
                fclose(f);
                return 1;
        }
@@ -655,7 +670,7 @@ static void *thread_main(void *data)
                goto err;
 
        if (fio_setaffinity(td) == -1) {
-               td_verror(td, errno);
+               td_verror(td, errno, "cpu_set_affinity");
                goto err;
        }
 
@@ -664,13 +679,13 @@ static void *thread_main(void *data)
 
        if (td->ioprio) {
                if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
-                       td_verror(td, errno);
+                       td_verror(td, errno, "ioprio_set");
                        goto err;
                }
        }
 
        if (nice(td->nice) == -1) {
-               td_verror(td, errno);
+               td_verror(td, errno, "nice");
                goto err;
        }
 
@@ -702,6 +717,7 @@ static void *thread_main(void *data)
        }
 
        fio_gettime(&td->epoch, NULL);
+       memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch));
        getrusage(RUSAGE_SELF, &td->ts.ru_start);
 
        runtime[0] = runtime[1] = 0;
@@ -769,7 +785,7 @@ err:
        close_ioengine(td);
        cleanup_io_u(td);
        td_set_runstate(td, TD_EXITED);
-       return (void *) td->error;
+       return (void *) (unsigned long) td->error;
 }
 
 /*
@@ -792,7 +808,7 @@ static int fork_main(int shmid, int offset)
        td = data + offset * sizeof(struct thread_data);
        ret = thread_main(td);
        shmdt(data);
-       return (int) ret;
+       return (int) (unsigned long) ret;
 }
 
 /*
@@ -808,6 +824,8 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
         */
        pending = cputhreads = 0;
        for_each_td(td, i) {
+               int flags = 0;
+
                /*
                 * ->io_ops is NULL for a thread that has closed its
                 * io engine
@@ -815,55 +833,66 @@ static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
                if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
                        cputhreads++;
 
-               if (td->runstate < TD_EXITED) {
-                       /*
-                        * check if someone quit or got killed in an unusual way
-                        */
-                       ret = waitpid(td->pid, &status, WNOHANG);
-                       if (ret < 0)
-                               perror("waitpid");
-                       else if ((ret == td->pid) && WIFSIGNALED(status)) {
-                               int sig = WTERMSIG(status);
-
-                               log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
+               if (!td->pid || td->runstate == TD_REAPED)
+                       continue;
+               if (td->use_thread) {
+                       if (td->runstate == TD_EXITED) {
                                td_set_runstate(td, TD_REAPED);
                                goto reaped;
                        }
+                       continue;
                }
 
-               if (td->runstate != TD_EXITED) {
-                       if (td->runstate < TD_RUNNING)
-                               pending++;
+               flags = WNOHANG;
+               if (td->runstate == TD_EXITED)
+                       flags = 0;
 
-                       continue;
-               }
+               /*
+                * check if someone quit or got killed in an unusual way
+                */
+               ret = waitpid(td->pid, &status, flags);
+               if (ret < 0) {
+                       if (errno == ECHILD) {
+                               log_err("fio: pid=%d disappeared %d\n", td->pid, td->runstate);
+                               td_set_runstate(td, TD_REAPED);
+                               goto reaped;
+                       }
+                       perror("waitpid");
+               } else if (ret == td->pid) {
+                       if (WIFSIGNALED(status)) {
+                               int sig = WTERMSIG(status);
 
-               if (td->error)
-                       exit_value++;
+                               log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
+                               td_set_runstate(td, TD_REAPED);
+                               goto reaped;
+                       }
+                       if (WIFEXITED(status)) {
+                               if (WEXITSTATUS(status) && !td->error)
+                                       td->error = WEXITSTATUS(status);
 
-               td_set_runstate(td, TD_REAPED);
+                               td_set_runstate(td, TD_REAPED);
+                               goto reaped;
+                       }
+               }
 
+               /*
+                * thread is not dead, continue
+                */
+               continue;
+reaped:
                if (td->use_thread) {
                        long ret;
 
                        if (pthread_join(td->thread, (void *) &ret))
-                               perror("thread_join");
-               } else {
-                       int status;
-
-                       ret = waitpid(td->pid, &status, 0);
-                       if (ret < 0)
-                               perror("waitpid");
-                       else if (WIFEXITED(status) && WEXITSTATUS(status)) {
-                               if (!exit_value)
-                                       exit_value++;
-                       }
+                               perror("pthread_join");
                }
 
-reaped:
                (*nr_running)--;
                (*m_rate) -= td->ratemin;
                (*t_rate) -= td->rate;
+
+               if (td->error)
+                       exit_value++;
        }
 
        if (*nr_running == cputhreads && !pending)
@@ -919,6 +948,8 @@ static void run_threads(void)
                init_disk_util(td);
        }
 
+       set_genesis_time();
+
        while (todo) {
                struct thread_data *map[MAX_JOBS];
                struct timeval this_start;
@@ -1077,6 +1108,8 @@ int main(int argc, char *argv[])
                setup_log(&agg_io_log[DDIR_WRITE]);
        }
 
+       set_genesis_time();
+
        disk_util_timer_arm();
 
        run_threads();