td->runstate = runstate;
}
-static void terminate_threads(int group_id, int forced_kill)
+static void terminate_threads(int group_id)
{
struct thread_data *td;
int i;
for_each_td(td, i) {
if (group_id == TERMINATE_ALL || groupid == td->groupid) {
- kill(td->pid, SIGQUIT);
+ /*
+ * if the thread is running, just let it exit
+ */
+ if (td->runstate < TD_RUNNING)
+ kill(td->pid, SIGQUIT);
td->terminate = 1;
td->start_delay = 0;
- if (forced_kill)
- td_set_runstate(td, TD_EXITED);
}
}
}
default:
printf("\nfio: terminating on signal %d\n", sig);
fflush(stdout);
- terminate_threads(TERMINATE_ALL, 0);
+ terminate_threads(TERMINATE_ALL);
break;
}
}
}
io_u->end_io = verify_io_u;
-requeue:
- ret = td_io_queue(td, io_u);
+ ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
if (io_u->error)
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
- goto requeue;
+ requeue_io_u(td, &io_u);
+ } else {
+ ret = io_u_sync_complete(td, io_u);
+ if (ret < 0)
+ break;
}
- ret = io_u_sync_complete(td, io_u);
- if (ret < 0)
- break;
continue;
case FIO_Q_QUEUED:
break;
put_io_u(td, io_u);
break;
}
-requeue:
- ret = td_io_queue(td, io_u);
+ ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
if (io_u->error)
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
- goto requeue;
+ requeue_io_u(td, &io_u);
+ } else {
+ fio_gettime(&comp_time, NULL);
+ bytes_done = io_u_sync_complete(td, io_u);
+ if (bytes_done < 0)
+ ret = bytes_done;
}
- fio_gettime(&comp_time, NULL);
- bytes_done = io_u_sync_complete(td, io_u);
- if (bytes_done < 0)
- ret = bytes_done;
break;
case FIO_Q_QUEUED:
/*
if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate)
- terminate_threads(td->groupid, 0);
+ terminate_threads(td->groupid);
td_verror(td, ENODATA, "check_min_rate");
break;
}
}
if (exitall_on_terminate)
- terminate_threads(td->groupid, 0);
+ terminate_threads(td->groupid);
err:
if (td->error)
}
if (*nr_running == cputhreads && !pending)
- terminate_threads(TERMINATE_ALL, 0);
+ terminate_threads(TERMINATE_ALL);
}
/*