#include "fio.h"
#include "hash.h"
#include "smalloc.h"
+#include "verify.h"
+#include "diskutil.h"
unsigned long page_mask;
unsigned long page_size;
-#define ALIGN(buf) \
+
+#define PAGE_ALIGN(buf) \
(char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
int groupid = 0;
unsigned long done_secs = 0;
static struct fio_mutex *startup_mutex;
+static struct fio_mutex *writeout_mutex;
static volatile int fio_abort;
static int exit_value;
static struct itimerval itimer;
+static pthread_t gtod_thread;
struct io_log *agg_io_log[2];
}
}
+/*
+ * Happens on thread runs with ctrl-c, ignore our own SIGQUIT
+ */
+static void sig_quit(int sig)
+{
+}
+
static void sig_int(int sig)
{
if (threads) {
act.sa_handler = sig_ill;
act.sa_flags = SA_RESTART;
sigaction(SIGILL, &act, NULL);
-}
-static inline int should_check_rate(struct thread_data *td)
-{
- /*
- * No minimum rate set, always ok
- */
- if (!td->o.ratemin && !td->o.rate_iops_min)
- return 0;
-
- return 1;
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = sig_quit;
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGQUIT, &act, NULL);
}
/*
* Check if we are above the minimum rate given.
*/
-static int check_min_rate(struct thread_data *td, struct timeval *now)
+static int __check_min_rate(struct thread_data *td, struct timeval *now,
+ enum td_ddir ddir)
{
unsigned long long bytes = 0;
unsigned long iops = 0;
unsigned long spent;
unsigned long rate;
+ unsigned int ratemin = 0;
+ unsigned int rate_iops = 0;
+ unsigned int rate_iops_min = 0;
+
+ if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir])
+ return 0;
/*
* allow a 2 second settle period in the beginning
if (mtime_since(&td->start, now) < 2000)
return 0;
- if (td_read(td)) {
- iops += td->io_blocks[DDIR_READ];
- bytes += td->this_io_bytes[DDIR_READ];
- }
- if (td_write(td)) {
- iops += td->io_blocks[DDIR_WRITE];
- bytes += td->this_io_bytes[DDIR_WRITE];
- }
+ iops += td->io_blocks[ddir];
+ bytes += td->this_io_bytes[ddir];
+ ratemin += td->o.ratemin[ddir];
+ rate_iops += td->o.rate_iops[ddir];
+ rate_iops_min += td->o.rate_iops_min[ddir];
/*
* if rate blocks is set, sample is running
*/
- if (td->rate_bytes || td->rate_blocks) {
- spent = mtime_since(&td->lastrate, now);
+ if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
+ spent = mtime_since(&td->lastrate[ddir], now);
if (spent < td->o.ratecycle)
return 0;
- if (td->o.rate) {
+ if (td->o.rate[ddir]) {
/*
* check bandwidth specified rate
*/
- if (bytes < td->rate_bytes) {
+ if (bytes < td->rate_bytes[ddir]) {
log_err("%s: min rate %u not met\n", td->o.name,
- td->o.ratemin);
+ ratemin);
return 1;
} else {
- rate = (bytes - td->rate_bytes) / spent;
- if (rate < td->o.ratemin ||
- bytes < td->rate_bytes) {
+ rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
+ if (rate < ratemin ||
+ bytes < td->rate_bytes[ddir]) {
log_err("%s: min rate %u not met, got"
- " %luKiB/sec\n", td->o.name,
- td->o.ratemin, rate);
+ " %luKB/sec\n", td->o.name,
+ ratemin, rate);
return 1;
}
}
/*
* checks iops specified rate
*/
- if (iops < td->o.rate_iops) {
+ if (iops < rate_iops) {
log_err("%s: min iops rate %u not met\n",
- td->o.name, td->o.rate_iops);
+ td->o.name, rate_iops);
return 1;
} else {
- rate = (iops - td->rate_blocks) / spent;
- if (rate < td->o.rate_iops_min ||
- iops < td->rate_blocks) {
+ rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
+ if (rate < rate_iops_min ||
+ iops < td->rate_blocks[ddir]) {
log_err("%s: min iops rate %u not met,"
" got %lu\n", td->o.name,
- td->o.rate_iops_min,
- rate);
+ rate_iops_min, rate);
}
}
}
}
- td->rate_bytes = bytes;
- td->rate_blocks = iops;
- memcpy(&td->lastrate, now, sizeof(*now));
+ td->rate_bytes[ddir] = bytes;
+ td->rate_blocks[ddir] = iops;
+ memcpy(&td->lastrate[ddir], now, sizeof(*now));
return 0;
}
+static int check_min_rate(struct thread_data *td, struct timeval *now,
+ unsigned long *bytes_done)
+{
+ int ret = 0;
+
+ if (bytes_done[0])
+ ret |= __check_min_rate(td, now, 0);
+ if (bytes_done[1])
+ ret |= __check_min_rate(td, now, 1);
+
+ return ret;
+}
+
static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
{
if (!td->o.timeout)
/*
* get immediately available events, if any
*/
- r = io_u_queued_complete(td, 0);
+ r = io_u_queued_complete(td, 0, NULL);
if (r < 0)
return;
}
if (td->cur_depth)
- r = io_u_queued_complete(td, td->cur_depth);
+ r = io_u_queued_complete(td, td->cur_depth, NULL);
}
/*
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
- if (io_u_queued_complete(td, 1) < 0)
+ if (io_u_queued_complete(td, 1, NULL) < 0)
return 1;
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
return 1;
}
- if (io_u_sync_complete(td, io_u) < 0)
+ if (io_u_sync_complete(td, io_u, NULL) < 0)
return 1;
} else if (ret == FIO_Q_BUSY) {
if (td_io_commit(td))
fio_gettime(&td->tv_cache, NULL);
}
+static int break_on_this_error(struct thread_data *td, int *retptr)
+{
+ int ret = *retptr;
+
+ if (ret < 0 || td->error) {
+ int err;
+
+ if (!td->o.continue_on_error)
+ return 1;
+
+ if (ret < 0)
+ err = -ret;
+ else
+ err = td->error;
+
+ if (td_non_fatal_error(err)) {
+ /*
+ * Continue with the I/Os in case of
+ * a non fatal error.
+ */
+ update_error_count(td, err);
+ td_clear_error(td);
+ *retptr = 0;
+ return 0;
+ } else if (td->o.fill_device && err == ENOSPC) {
+ /*
+ * We expect to hit this error if
+ * fill_device option is set.
+ */
+ td_clear_error(td);
+ td->terminate = 1;
+ return 1;
+ } else {
+ /*
+ * Stop the I/O in case of a fatal
+ * error.
+ */
+ update_error_count(td, err);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/*
* The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
* read from disk.
*/
for_each_file(td, f, i) {
- if (!(f->flags & FIO_FILE_OPEN))
+ if (!fio_file_open(f))
continue;
if (fio_io_sync(td, f))
break;
while (!td->terminate) {
int ret2, full;
- io_u = __get_io_u(td);
- if (!io_u)
- break;
-
update_tv_cache(td);
if (runtime_exceeded(td, &td->tv_cache)) {
- put_io_u(td, io_u);
td->terminate = 1;
break;
}
+ io_u = __get_io_u(td);
+ if (!io_u)
+ break;
+
if (get_next_verify(td, io_u)) {
put_io_u(td, io_u);
break;
break;
}
- io_u->end_io = verify_io_u;
+ if (td->o.verify_async)
+ io_u->end_io = verify_io_u_async;
+ else
+ io_u->end_io = verify_io_u;
ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
- if (io_u->error)
+ if (io_u->error) {
ret = -io_u->error;
- else if (io_u->resid) {
+ clear_io_u(td, io_u);
+ } else if (io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
struct fio_file *f = io_u->file;
requeue_io_u(td, &io_u);
} else {
sync_done:
- ret = io_u_sync_complete(td, io_u);
+ ret = io_u_sync_complete(td, io_u, NULL);
if (ret < 0)
break;
}
break;
}
- if (ret < 0 || td->error)
+ if (break_on_this_error(td, &ret))
break;
/*
* and do the verification on them through
* the callback handler
*/
- if (io_u_queued_complete(td, min_events) < 0) {
+ if (io_u_queued_complete(td, min_events, NULL) < 0) {
ret = -1;
break;
}
min_events = td->cur_depth;
if (min_events)
- ret = io_u_queued_complete(td, min_events);
+ ret = io_u_queued_complete(td, min_events, NULL);
} else
cleanup_pending_aio(td);
*/
static void do_io(struct thread_data *td)
{
- unsigned long usec;
unsigned int i;
int ret = 0;
while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) {
struct timeval comp_time;
- long bytes_done = 0;
+ unsigned long bytes_done[2] = { 0, 0 };
int min_evts = 0;
struct io_u *io_u;
int ret2, full;
if (td->terminate)
break;
- io_u = get_io_u(td);
- if (!io_u)
- break;
-
update_tv_cache(td);
if (runtime_exceeded(td, &td->tv_cache)) {
- put_io_u(td, io_u);
td->terminate = 1;
break;
}
+ io_u = get_io_u(td);
+ if (!io_u)
+ break;
+
/*
* Add verification end_io handler, if asked to verify
* a previously written file.
*/
if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ) {
- io_u->end_io = verify_io_u;
+ if (td->o.verify_async)
+ io_u->end_io = verify_io_u_async;
+ else
+ io_u->end_io = verify_io_u;
td_set_runstate(td, TD_VERIFYING);
} else if (in_ramp_time(td))
td_set_runstate(td, TD_RAMP);
ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
- if (io_u->error)
+ if (io_u->error) {
ret = -io_u->error;
- else if (io_u->resid) {
+ clear_io_u(td, io_u);
+ } else if (io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
struct fio_file *f = io_u->file;
requeue_io_u(td, &io_u);
} else {
sync_done:
- if (should_check_rate(td))
+ if (__should_check_rate(td, 0) ||
+ __should_check_rate(td, 1))
fio_gettime(&comp_time, NULL);
- bytes_done = io_u_sync_complete(td, io_u);
- if (bytes_done < 0)
- ret = bytes_done;
+ ret = io_u_sync_complete(td, io_u, bytes_done);
+ if (ret < 0)
+ break;
}
break;
case FIO_Q_QUEUED:
break;
}
- if (ret < 0 || td->error)
+ if (break_on_this_error(td, &ret))
break;
/*
if (full && !min_evts)
min_evts = 1;
- if (should_check_rate(td))
+ if (__should_check_rate(td, 0) ||
+ __should_check_rate(td, 1))
fio_gettime(&comp_time, NULL);
do {
- ret = io_u_queued_complete(td, min_evts);
- if (ret <= 0)
+ ret = io_u_queued_complete(td, min_evts, bytes_done);
+ if (ret < 0)
break;
- bytes_done += ret;
} while (full && (td->cur_depth > td->o.iodepth_low));
}
if (ret < 0)
break;
- if (!bytes_done)
+ if (!(bytes_done[0] + bytes_done[1]))
continue;
- /*
- * the rate is batched for now, it should work for batches
- * of completions except the very first one which may look
- * a little bursty
- */
- if (!in_ramp_time(td) && should_check_rate(td)) {
- usec = utime_since(&td->tv_cache, &comp_time);
-
- rate_throttle(td, usec, bytes_done);
-
- if (check_min_rate(td, &comp_time)) {
+ if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) {
+ if (check_min_rate(td, &comp_time, bytes_done)) {
if (exitall_on_terminate)
terminate_threads(td->groupid);
td_verror(td, EIO, "check_min_rate");
int left;
if (td->o.thinktime_spin)
- __usec_sleep(td->o.thinktime_spin);
+ usec_spin(td->o.thinktime_spin);
left = td->o.thinktime - td->o.thinktime_spin;
if (left)
i = td->cur_depth;
if (i)
- ret = io_u_queued_complete(td, i);
+ ret = io_u_queued_complete(td, i, NULL);
if (should_fsync(td) && td->o.end_fsync) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i) {
- if (!(f->flags & FIO_FILE_OPEN))
+ if (!fio_file_open(f))
continue;
fio_io_sync(td, f);
}
{
struct io_u *io_u;
unsigned int max_bs;
- int i, max_units;
+ int cl_align, i, max_units;
char *p;
max_units = td->o.iodepth;
if (allocate_io_mem(td))
return 1;
- if (td->o.odirect)
- p = ALIGN(td->orig_buffer);
+ if (td->o.odirect || td->o.mem_align)
+ p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
else
p = td->orig_buffer;
+ cl_align = os_cache_line_size();
+
for (i = 0; i < max_units; i++) {
+ void *ptr;
+
if (td->terminate)
return 1;
- io_u = malloc(sizeof(*io_u));
+
+ if (posix_memalign(&ptr, cl_align, sizeof(*io_u))) {
+ log_err("fio: posix_memalign=%s\n", strerror(errno));
+ break;
+ }
+
+ io_u = ptr;
memset(io_u, 0, sizeof(*io_u));
INIT_FLIST_HEAD(&io_u->list);
+ dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
if (!(td->io_ops->flags & FIO_NOIO)) {
io_u->buf = p + max_bs * i;
+ dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
if (td_write(td) && !td->o.refill_buffers)
io_u_fill_buffer(td, io_u, max_bs);
td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
td->zone_bytes = 0;
- td->rate_bytes = 0;
- td->rate_blocks = 0;
- td->rw_end_set[0] = td->rw_end_set[1] = 0;
+ td->rate_bytes[0] = td->rate_bytes[1] = 0;
+ td->rate_blocks[0] = td->rate_blocks[1] = 0;
td->last_was_sync = 0;
*/
if (td->o.time_based || td->o.loops)
td->nr_done_files = 0;
+
+ /*
+ * Set the same seed to get repeatable runs
+ */
+ td_fill_rand_seeds(td);
}
void reset_all_stats(struct thread_data *td)
memcpy(&td->start, &tv, sizeof(tv));
}
-static int clear_io_state(struct thread_data *td)
+static void clear_io_state(struct thread_data *td)
{
struct fio_file *f;
unsigned int i;
- int ret;
reset_io_counters(td);
close_files(td);
+ for_each_file(td, f, i)
+ fio_file_clear_done(f);
+}
- ret = 0;
- for_each_file(td, f, i) {
- f->flags &= ~FIO_FILE_DONE;
- ret = td_io_open_file(td, f);
- if (ret)
- break;
- }
+static int exec_string(const char *string)
+{
+ int ret, newlen = strlen(string) + 1 + 8;
+ char *str;
+
+ str = malloc(newlen);
+ sprintf(str, "sh -c %s", string);
+ ret = system(str);
+ if (ret == -1)
+ log_err("fio: exec of cmd <%s> failed\n", str);
+
+ free(str);
return ret;
}
{
unsigned long long runtime[2], elapsed;
struct thread_data *td = data;
+ pthread_condattr_t attr;
int clear_state;
if (!td->o.use_thread)
INIT_FLIST_HEAD(&td->io_u_requeues);
INIT_FLIST_HEAD(&td->io_log_list);
INIT_FLIST_HEAD(&td->io_hist_list);
+ INIT_FLIST_HEAD(&td->verify_list);
+ pthread_mutex_init(&td->io_u_lock, NULL);
td->io_hist_tree = RB_ROOT;
+ pthread_condattr_init(&attr);
+ pthread_cond_init(&td->verify_cond, &attr);
+ pthread_cond_init(&td->free_cond, &attr);
+
td_set_runstate(td, TD_INITIALIZED);
+ dprint(FD_MUTEX, "up startup_mutex\n");
fio_mutex_up(startup_mutex);
+ dprint(FD_MUTEX, "wait on td->mutex\n");
fio_mutex_down(td->mutex);
+ dprint(FD_MUTEX, "done waiting on td->mutex\n");
/*
* the ->mutex mutex is now no longer used, close it to avoid
if (init_io_u(td))
goto err;
- if (td->o.cpumask_set && fio_setaffinity(td) == -1) {
+ if (td->o.verify_async && verify_async_init(td))
+ goto err;
+
+ if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) {
td_verror(td, errno, "cpu_set_affinity");
goto err;
}
+ /*
+ * If we have a gettimeofday() thread, make sure we exclude that
+ * thread from this job
+ */
+ if (td->o.gtod_cpu) {
+ fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu);
+ if (fio_setaffinity(td->pid, td->o.cpumask) == -1) {
+ td_verror(td, errno, "cpu_set_affinity");
+ goto err;
+ }
+ }
+
if (td->ioprio_set) {
if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
td_verror(td, errno, "ioprio_set");
if (td_io_init(td))
goto err;
- if (open_files(td))
- goto err;
-
if (init_random_map(td))
goto err;
if (td->o.exec_prerun) {
- if (system(td->o.exec_prerun) < 0)
+ if (exec_string(td->o.exec_prerun))
+ goto err;
+ }
+
+ if (td->o.pre_read) {
+ if (pre_read_files(td) < 0)
goto err;
}
clear_state = 0;
while (keep_running(td)) {
fio_gettime(&td->start, NULL);
- memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
+ memcpy(&td->ts.stat_sample_time[0], &td->start,
+ sizeof(td->start));
+ memcpy(&td->ts.stat_sample_time[1], &td->start,
+ sizeof(td->start));
memcpy(&td->tv_cache, &td->start, sizeof(td->start));
- if (td->o.ratemin)
+ if (td->o.ratemin[0] || td->o.ratemin[1])
memcpy(&td->lastrate, &td->ts.stat_sample_time,
sizeof(td->lastrate));
- if (clear_state && clear_io_state(td))
- break;
+ if (clear_state)
+ clear_io_state(td);
prune_io_piece_log(td);
clear_state = 1;
if (td_read(td) && td->io_bytes[DDIR_READ]) {
- if (td->rw_end_set[DDIR_READ])
- elapsed = utime_since(&td->start,
- &td->rw_end[DDIR_READ]);
- else
- elapsed = utime_since_now(&td->start);
-
+ elapsed = utime_since_now(&td->start);
runtime[DDIR_READ] += elapsed;
}
if (td_write(td) && td->io_bytes[DDIR_WRITE]) {
- if (td->rw_end_set[DDIR_WRITE])
- elapsed = utime_since(&td->start,
- &td->rw_end[DDIR_WRITE]);
- else
- elapsed = utime_since_now(&td->start);
-
+ elapsed = utime_since_now(&td->start);
runtime[DDIR_WRITE] += elapsed;
}
(td->io_ops->flags & FIO_UNIDIR))
continue;
- if (clear_io_state(td))
- break;
+ clear_io_state(td);
fio_gettime(&td->start, NULL);
td->ts.io_bytes[0] = td->io_bytes[0];
td->ts.io_bytes[1] = td->io_bytes[1];
- if (td->ts.bw_log)
- finish_log(td, td->ts.bw_log, "bw");
- if (td->ts.slat_log)
- finish_log(td, td->ts.slat_log, "slat");
- if (td->ts.clat_log)
- finish_log(td, td->ts.clat_log, "clat");
- if (td->o.exec_postrun) {
- if (system(td->o.exec_postrun) < 0)
- log_err("fio: postrun %s failed\n", td->o.exec_postrun);
+ fio_mutex_down(writeout_mutex);
+ if (td->ts.bw_log) {
+ if (td->o.bw_log_file) {
+ finish_log_named(td, td->ts.bw_log,
+ td->o.bw_log_file, "bw");
+ } else
+ finish_log(td, td->ts.bw_log, "bw");
+ }
+ if (td->ts.slat_log) {
+ if (td->o.lat_log_file) {
+ finish_log_named(td, td->ts.slat_log,
+ td->o.lat_log_file, "slat");
+ } else
+ finish_log(td, td->ts.slat_log, "slat");
+ }
+ if (td->ts.clat_log) {
+ if (td->o.lat_log_file) {
+ finish_log_named(td, td->ts.clat_log,
+ td->o.lat_log_file, "clat");
+ } else
+ finish_log(td, td->ts.clat_log, "clat");
}
+ fio_mutex_up(writeout_mutex);
+ if (td->o.exec_postrun)
+ exec_string(td->o.exec_postrun);
if (exitall_on_terminate)
terminate_threads(td->groupid);
close_ioengine(td);
cleanup_io_u(td);
+ if (td->o.cpumask_set) {
+ int ret = fio_cpuset_exit(&td->o.cpumask);
+
+ td_verror(td, ret, "fio_cpuset_exit");
+ }
+
+ if (td->o.verify_async)
+ verify_async_exit(td);
+
/*
* do this very late, it will log file closing as well
*/
continue;
reaped:
(*nr_running)--;
- (*m_rate) -= td->o.ratemin;
- (*t_rate) -= td->o.rate;
+ (*m_rate) -= (td->o.ratemin[0] + td->o.ratemin[1]);
+ (*t_rate) -= (td->o.rate[0] + td->o.rate[1]);
if (!td->pid)
pending--;
terminate_threads(TERMINATE_ALL);
}
+static void *gtod_thread_main(void *data)
+{
+ fio_mutex_up(startup_mutex);
+
+ /*
+ * As long as we have jobs around, update the clock. It would be nice
+ * to have some way of NOT hammering that CPU with gettimeofday(),
+ * but I'm not sure what to use outside of a simple CPU nop to relax
+ * it - we don't want to lose precision.
+ */
+ while (threads) {
+ fio_gtod_update();
+ nop;
+ }
+
+ return NULL;
+}
+
+static int fio_start_gtod_thread(void)
+{
+ int ret;
+
+ ret = pthread_create(>od_thread, NULL, gtod_thread_main, NULL);
+ if (ret) {
+ log_err("Can't create gtod thread: %s\n", strerror(ret));
+ return 1;
+ }
+
+ ret = pthread_detach(gtod_thread);
+ if (ret) {
+ log_err("Can't detatch gtod thread: %s\n", strerror(ret));
+ return 1;
+ }
+
+ dprint(FD_MUTEX, "wait on startup_mutex\n");
+ fio_mutex_down(startup_mutex);
+ dprint(FD_MUTEX, "done waiting on startup_mutex\n");
+ return 0;
+}
+
/*
* Main function for kicking off and reaping jobs, as needed.
*/
if (fio_pin_memory())
return;
+ if (fio_gtod_offload && fio_start_gtod_thread())
+ return;
+
if (!terse_output) {
printf("Starting ");
if (nr_thread)
* its own files. so close them, if we opened them
* for creation
*/
- for_each_file(td, f, i)
- td_io_close_file(td, f);
+ for_each_file(td, f, i) {
+ if (fio_file_open(f))
+ td_io_close_file(td, f);
+ }
}
init_disk_util(td);
nr_started++;
if (td->o.use_thread) {
+ int ret;
+
dprint(FD_PROCESS, "will pthread_create\n");
- if (pthread_create(&td->thread, NULL,
- thread_main, td)) {
- perror("pthread_create");
+ ret = pthread_create(&td->thread, NULL,
+ thread_main, td);
+ if (ret) {
+ log_err("pthread_create: %s\n",
+ strerror(ret));
nr_started--;
break;
}
- if (pthread_detach(td->thread) < 0)
- perror("pthread_detach");
+ ret = pthread_detach(td->thread);
+ if (ret)
+ log_err("pthread_detach: %s",
+ strerror(ret));
} else {
pid_t pid;
dprint(FD_PROCESS, "will fork\n");
} else if (i == fio_debug_jobno)
*fio_debug_jobp = pid;
}
- fio_mutex_down(startup_mutex);
+ dprint(FD_MUTEX, "wait on startup_mutex\n");
+ if (fio_mutex_down_timeout(startup_mutex, 10)) {
+ log_err("fio: job startup hung? exiting.\n");
+ terminate_threads(TERMINATE_ALL);
+ fio_abort = 1;
+ nr_started--;
+ break;
+ }
+ dprint(FD_MUTEX, "done waiting on startup_mutex\n");
}
/*
td_set_runstate(td, TD_RUNNING);
nr_running++;
nr_started--;
- m_rate += td->o.ratemin;
- t_rate += td->o.rate;
+ m_rate += td->o.ratemin[0] + td->o.ratemin[1];
+ t_rate += td->o.rate[0] + td->o.rate[1];
todo--;
fio_mutex_up(td->mutex);
}
}
startup_mutex = fio_mutex_init(0);
+ writeout_mutex = fio_mutex_init(1);
set_genesis_time();
}
fio_mutex_remove(startup_mutex);
+ fio_mutex_remove(writeout_mutex);
return exit_value;
}