#include "workqueue.h"
#include "lib/mountcheck.h"
#include "rate-submit.h"
-
-static pthread_t helper_thread;
-static pthread_mutex_t helper_lock;
-pthread_cond_t helper_cond;
-int helper_do_stat = 0;
+#include "helper_thread.h"
static struct fio_mutex *startup_mutex;
static struct flist_head *cgroup_list;
int shm_id = 0;
int temp_stall_ts;
unsigned long done_secs = 0;
-volatile int helper_exit = 0;
#define PAGE_ALIGN(buf) \
(char *) (((uintptr_t) (buf) + page_mask) & ~page_mask)
put_io_u(td, io_u);
return true;
} else if (ret == FIO_Q_QUEUED) {
+ if (td_io_commit(td))
+ return true;
if (io_u_queued_complete(td, 1) < 0)
return true;
} else if (ret == FIO_Q_COMPLETED) {
int min_evts = 0;
int ret;
+ if (td->flags & TD_F_REGROW_LOGS) {
+ ret = io_u_quiesce(td);
+ regrow_logs(td);
+ return ret;
+ }
+
/*
* if the queue is full, we MUST reap at least 1 event
*/
if (*ret < 0)
break;
}
+
+ if (td->flags & TD_F_REGROW_LOGS)
+ regrow_logs(td);
+
+ /*
+ * when doing I/O (not when verifying),
+ * check for any errors that are to be ignored
+ */
+ if (!from_verify)
+ break;
+
return 0;
case FIO_Q_QUEUED:
/*
return !td->o.iodepth_batch_complete_min &&
!td->o.iodepth_batch_complete_max;
}
+/*
+ * Unlinks files from thread data fio_file structure
+ */
+static int unlink_all_files(struct thread_data *td)
+{
+ struct fio_file *f;
+ unsigned int i;
+ int ret = 0;
+
+ for_each_file(td, f, i) {
+ if (f->filetype != FIO_TYPE_FILE)
+ continue;
+ ret = td_io_unlink_file(td, f);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ td_verror(td, ret, "unlink_all_files");
+
+ return ret;
+}
/*
* The main verify engine. Runs over the writes we previously submitted,
if (td->error)
return;
+ /*
+ * verify_state needs to be reset before verification
+ * proceeds so that expected random seeds match actual
+ * random seeds in headers. The main loop will reset
+ * all random number generators if randrepeat is set.
+ */
+ if (!td->o.rand_repeatable)
+ td_fill_verify_state_seed(td);
+
td_set_runstate(td, TD_VERIFYING);
io_u = NULL;
break;
while ((io_u = get_io_u(td)) != NULL) {
- if (IS_ERR(io_u)) {
+ if (IS_ERR_OR_NULL(io_u)) {
io_u = NULL;
ret = FIO_Q_BUSY;
goto reap;
continue;
} else if (io_u->ddir == DDIR_TRIM) {
io_u->ddir = DDIR_READ;
- io_u_set(io_u, IO_U_F_TRIMMED);
+ io_u_set(td, io_u, IO_U_F_TRIMMED);
break;
} else if (io_u->ddir == DDIR_WRITE) {
io_u->ddir = DDIR_READ;
return number_ios >= (td->o.number_ios * td->loops);
}
-static bool io_issue_bytes_exceeded(struct thread_data *td)
+static bool io_bytes_exceeded(struct thread_data *td, uint64_t *this_bytes)
{
unsigned long long bytes, limit;
if (td_rw(td))
- bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE];
+ bytes = this_bytes[DDIR_READ] + this_bytes[DDIR_WRITE];
else if (td_write(td))
- bytes = td->io_issue_bytes[DDIR_WRITE];
+ bytes = this_bytes[DDIR_WRITE];
else if (td_read(td))
- bytes = td->io_issue_bytes[DDIR_READ];
+ bytes = this_bytes[DDIR_READ];
else
- bytes = td->io_issue_bytes[DDIR_TRIM];
+ bytes = this_bytes[DDIR_TRIM];
if (td->o.io_limit)
limit = td->o.io_limit;
return bytes >= limit || exceeds_number_ios(td);
}
-static bool io_complete_bytes_exceeded(struct thread_data *td)
+static bool io_issue_bytes_exceeded(struct thread_data *td)
{
- unsigned long long bytes, limit;
-
- if (td_rw(td))
- bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
- else if (td_write(td))
- bytes = td->this_io_bytes[DDIR_WRITE];
- else if (td_read(td))
- bytes = td->this_io_bytes[DDIR_READ];
- else
- bytes = td->this_io_bytes[DDIR_TRIM];
-
- if (td->o.io_limit)
- limit = td->o.io_limit;
- else
- limit = td->o.size;
+ return io_bytes_exceeded(td, td->io_issue_bytes);
+}
- limit *= td->loops;
- return bytes >= limit || exceeds_number_ios(td);
+static bool io_complete_bytes_exceeded(struct thread_data *td)
+{
+ return io_bytes_exceeded(td, td->this_io_bytes);
}
/*
if (flow_threshold_exceeded(td))
continue;
- if (!td->o.time_based && bytes_issued >= total_bytes)
+ /*
+ * Break if we exceeded the bytes. The exception is time
+ * based runs, but we still need to break out of the loop
+ * for those to run verification, if enabled.
+ */
+ if (bytes_issued >= total_bytes &&
+ (!td->o.time_based ||
+ (td->o.time_based && td->o.verify != VERIFY_NONE)))
break;
io_u = get_io_u(td);
if (ret < 0)
break;
if (!ddir_rw_sum(td->bytes_done) &&
- !(td->io_ops->flags & FIO_NOIO))
+ !td_ioengine_flagged(td, FIO_NOIO))
continue;
if (!in_ramp_time(td) && should_check_rate(td)) {
bytes_done[i] = td->bytes_done[i] - bytes_done[i];
}
+static void free_file_completion_logging(struct thread_data *td)
+{
+ struct fio_file *f;
+ unsigned int i;
+
+ for_each_file(td, f, i) {
+ if (!f->last_write_comp)
+ break;
+ sfree(f->last_write_comp);
+ }
+}
+
+static int init_file_completion_logging(struct thread_data *td,
+ unsigned int depth)
+{
+ struct fio_file *f;
+ unsigned int i;
+
+ if (td->o.verify == VERIFY_NONE || !td->o.verify_state_save)
+ return 0;
+
+ for_each_file(td, f, i) {
+ f->last_write_comp = scalloc(depth, sizeof(uint64_t));
+ if (!f->last_write_comp)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ free_file_completion_logging(td);
+ log_err("fio: failed to alloc write comp data\n");
+ return 1;
+}
+
static void cleanup_io_u(struct thread_data *td)
{
struct io_u *io_u;
io_u_qexit(&td->io_u_freelist);
io_u_qexit(&td->io_u_all);
- if (td->last_write_comp)
- sfree(td->last_write_comp);
+ free_file_completion_logging(td);
}
static int init_io_u(struct thread_data *td)
td->orig_buffer_size = (unsigned long long) max_bs
* (unsigned long long) max_units;
- if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td)))
+ if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
data_xfer = 0;
err = 0;
* lucky and the allocator gives us an aligned address.
*/
if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
- (td->io_ops->flags & FIO_RAWIO))
+ td_ioengine_flagged(td, FIO_RAWIO))
td->orig_buffer_size += page_mask + td->o.mem_align;
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
return 1;
if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
- (td->io_ops->flags & FIO_RAWIO))
+ td_ioengine_flagged(td, FIO_RAWIO))
p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
else
p = td->orig_buffer;
p += max_bs;
}
- if (td->o.verify != VERIFY_NONE) {
- td->last_write_comp = scalloc(max_units, sizeof(uint64_t));
- if (!td->last_write_comp) {
- log_err("fio: failed to alloc write comp data\n");
- return 1;
- }
- }
+ if (init_file_completion_logging(td, max_units))
+ return 1;
return 0;
}
static int switch_ioscheduler(struct thread_data *td)
{
+#ifdef FIO_HAVE_IOSCHED_SWITCH
char tmp[256], tmp2[128];
FILE *f;
int ret;
- if (td->io_ops->flags & FIO_DISKLESSIO)
+ if (td_ioengine_flagged(td, FIO_DISKLESSIO))
return 0;
sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
*/
tmp[strlen(tmp) - 1] = '\0';
+ /*
+ * Write to "none" entry doesn't fail, so check the result here.
+ */
+ if (!strcmp(tmp, "none")) {
+ log_err("fio: io scheduler is not tunable\n");
+ fclose(f);
+ return 0;
+ }
sprintf(tmp2, "[%s]", td->o.ioscheduler);
if (!strstr(tmp, tmp2)) {
fclose(f);
return 0;
+#else
+ return 0;
+#endif
}
static bool keep_running(struct thread_data *td)
break;
io_u = get_io_u(td);
- if (!io_u)
+ if (IS_ERR_OR_NULL(io_u))
break;
- io_u_set(io_u, IO_U_F_FLIGHT);
+ io_u_set(td, io_u, IO_U_F_FLIGHT);
io_u->error = 0;
io_u->resid = 0;
if (ddir_rw(acct_ddir(io_u)))
struct thread_data *td = fd->td;
struct thread_options *o = &td->o;
struct sk_out *sk_out = fd->sk_out;
- pthread_condattr_t attr;
+ int deadlock_loop_cnt;
int clear_state;
int ret;
INIT_FLIST_HEAD(&td->verify_list);
INIT_FLIST_HEAD(&td->trim_list);
INIT_FLIST_HEAD(&td->next_rand_list);
- pthread_mutex_init(&td->io_u_lock, NULL);
td->io_hist_tree = RB_ROOT;
- pthread_condattr_init(&attr);
- pthread_cond_init(&td->verify_cond, &attr);
- pthread_cond_init(&td->free_cond, &attr);
+ ret = mutex_cond_init_pshared(&td->io_u_lock, &td->free_cond);
+ if (ret) {
+ td_verror(td, ret, "mutex_cond_init_pshared");
+ goto err;
+ }
+ ret = cond_init_pshared(&td->verify_cond);
+ if (ret) {
+ td_verror(td, ret, "mutex_cond_pshared");
+ goto err;
+ }
td_set_runstate(td, TD_INITIALIZED);
dprint(FD_MUTEX, "up startup_mutex\n");
goto err;
}
+ /*
+ * Do this early, we don't want the compress threads to be limited
+ * to the same CPUs as the IO workers. So do this before we set
+ * any potential CPU affinity
+ */
+ if (iolog_compress_init(td, sk_out))
+ goto err;
+
/*
* If we have a gettimeofday() thread, make sure we exclude that
* thread from this job
goto err;
}
- if (iolog_compress_init(td, sk_out))
- goto err;
-
fio_verify_init(td);
if (rate_submit_init(td, sk_out))
goto err;
- fio_gettime(&td->epoch, NULL);
+ set_epoch_time(td, o->log_unix_epoch);
fio_getrusage(&td->ru_start);
memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
fio_gettime(&td->start, NULL);
memcpy(&td->tv_cache, &td->start, sizeof(td->start));
- if (clear_state)
+ if (clear_state) {
clear_io_state(td, 0);
+ if (o->unlink_each_loop && unlink_all_files(td))
+ break;
+ }
+
prune_io_piece_log(td);
if (td->o.verify_only && (td_write(td) || td_rw(td)))
}
}
+ /*
+ * If we took too long to shut down, the main thread could
+ * already consider us reaped/exited. If that happens, break
+ * out and clean up.
+ */
+ if (td->runstate >= TD_EXITED)
+ break;
+
clear_state = 1;
/*
* the rusage_sem, which would never get upped because
* this thread is waiting for the stat mutex.
*/
- check_update_rusage(td);
+ deadlock_loop_cnt = 0;
+ do {
+ check_update_rusage(td);
+ if (!fio_mutex_down_trylock(stat_mutex))
+ break;
+ usleep(1000);
+ if (deadlock_loop_cnt++ > 5000) {
+ log_err("fio seems to be stuck grabbing stat_mutex, forcibly exiting\n");
+ td->error = EDEADLK;
+ goto err;
+ }
+ } while (1);
- fio_mutex_down(stat_mutex);
if (td_read(td) && td->io_bytes[DDIR_READ])
update_runtime(td, elapsed_us, DDIR_READ);
if (td_write(td) && td->io_bytes[DDIR_WRITE])
if (!o->do_verify ||
o->verify == VERIFY_NONE ||
- (td->io_ops->flags & FIO_UNIDIR))
+ td_ioengine_flagged(td, FIO_UNIDIR))
continue;
clear_io_state(td, 0);
break;
}
+ td_set_runstate(td, TD_FINISHING);
+
update_rusage_stat(td);
td->ts.total_run_time = mtime_since_now(&td->epoch);
td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
fio_unpin_memory(td);
- fio_writeout_logs(td);
+ td_writeout_logs(td, true);
iolog_compress_exit(td);
rate_submit_exit(td);
cgroup_shutdown(td, &cgroup_mnt);
verify_free_state(td);
+ if (td->zone_state_index) {
+ int i;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ free(td->zone_state_index[i]);
+ free(td->zone_state_index);
+ td->zone_state_index = NULL;
+ }
+
if (fio_option_is_set(o, cpumask)) {
ret = fio_cpuset_exit(&o->cpumask);
if (ret)
return (void *) (uintptr_t) td->error;
}
-
-/*
- * We cannot pass the td data into a forked process, so attach the td and
- * pass it to the thread worker.
- */
-static int fork_main(struct sk_out *sk_out, int shmid, int offset)
-{
- struct fork_data *fd;
- void *data, *ret;
-
-#if !defined(__hpux) && !defined(CONFIG_NO_SHM)
- data = shmat(shmid, NULL, 0);
- if (data == (void *) -1) {
- int __err = errno;
-
- perror("shmat");
- return __err;
- }
-#else
- /*
- * HP-UX inherits shm mappings?
- */
- data = threads;
-#endif
-
- fd = calloc(1, sizeof(*fd));
- fd->td = data + offset * sizeof(struct thread_data);
- fd->sk_out = sk_out;
- ret = thread_main(fd);
- shmdt(data);
- return (int) (uintptr_t) ret;
-}
-
static void dump_td_info(struct thread_data *td)
{
- log_err("fio: job '%s' hasn't exited in %lu seconds, it appears to "
- "be stuck. Doing forceful exit of this job.\n", td->o.name,
+ log_err("fio: job '%s' (state=%d) hasn't exited in %lu seconds, it "
+ "appears to be stuck. Doing forceful exit of this job.\n",
+ td->o.name, td->runstate,
(unsigned long) time_since_now(&td->terminate_time));
}
* move on.
*/
if (td->terminate &&
+ td->runstate < TD_FSYNCING &&
time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) {
dump_td_info(td);
td_set_runstate(td, TD_REAPED);
if (is_backend) {
void *data;
- int ver;
ret = fio_server_get_verify_state(td->o.name,
- td->thread_number - 1, &data, &ver);
+ td->thread_number - 1, &data);
if (!ret)
- verify_convert_assign_state(td, data, ver);
+ verify_assign_state(td, data);
} else
ret = verify_load_state(td, "local");
struct thread_data *map[REAL_MAX_JOBS];
struct timeval this_start;
int this_jobs = 0, left;
+ struct fork_data *fd;
/*
* create threads (TD_NOT_CREATED -> TD_CREATED)
map[this_jobs++] = td;
nr_started++;
+ fd = calloc(1, sizeof(*fd));
+ fd->td = td;
+ fd->sk_out = sk_out;
+
if (td->o.use_thread) {
- struct fork_data *fd;
int ret;
- fd = calloc(1, sizeof(*fd));
- fd->td = td;
- fd->sk_out = sk_out;
-
dprint(FD_PROCESS, "will pthread_create\n");
ret = pthread_create(&td->thread, NULL,
thread_main, fd);
dprint(FD_PROCESS, "will fork\n");
pid = fork();
if (!pid) {
- int ret = fork_main(sk_out, shm_id, i);
+ int ret;
+ ret = (int)(uintptr_t)thread_main(fd);
_exit(ret);
} else if (i == fio_debug_jobno)
*fio_debug_jobp = pid;
update_io_ticks();
}
-static void wait_for_helper_thread_exit(void)
-{
- void *ret;
-
- helper_exit = 1;
- pthread_cond_signal(&helper_cond);
- pthread_join(helper_thread, &ret);
-}
-
static void free_disk_util(void)
{
disk_util_prune_entries();
-
- pthread_cond_destroy(&helper_cond);
-}
-
-static void *helper_thread_main(void *data)
-{
- struct sk_out *sk_out = data;
- int ret = 0;
-
- sk_out_assign(sk_out);
-
- fio_mutex_up(startup_mutex);
-
- while (!ret) {
- uint64_t sec = DISK_UTIL_MSEC / 1000;
- uint64_t nsec = (DISK_UTIL_MSEC % 1000) * 1000000;
- struct timespec ts;
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- ts.tv_sec = tv.tv_sec + sec;
- ts.tv_nsec = (tv.tv_usec * 1000) + nsec;
-
- if (ts.tv_nsec >= 1000000000ULL) {
- ts.tv_nsec -= 1000000000ULL;
- ts.tv_sec++;
- }
-
- pthread_cond_timedwait(&helper_cond, &helper_lock, &ts);
-
- ret = update_io_ticks();
-
- if (helper_do_stat) {
- helper_do_stat = 0;
- __show_running_run_stats();
- }
-
- if (!is_backend)
- print_thread_status();
- }
-
- sk_out_drop();
- return NULL;
-}
-
-static int create_helper_thread(struct sk_out *sk_out)
-{
- int ret;
-
- setup_disk_util();
-
- pthread_cond_init(&helper_cond, NULL);
- pthread_mutex_init(&helper_lock, NULL);
-
- ret = pthread_create(&helper_thread, NULL, helper_thread_main, sk_out);
- if (ret) {
- log_err("Can't create helper thread: %s\n", strerror(ret));
- return 1;
- }
-
- dprint(FD_MUTEX, "wait on startup_mutex\n");
- fio_mutex_down(startup_mutex);
- dprint(FD_MUTEX, "done waiting on startup_mutex\n");
- return 0;
+ helper_thread_destroy();
}
int fio_backend(struct sk_out *sk_out)
set_genesis_time();
stat_init();
- create_helper_thread(sk_out);
+ helper_thread_create(startup_mutex, sk_out);
cgroup_list = smalloc(sizeof(*cgroup_list));
INIT_FLIST_HEAD(cgroup_list);
run_threads(sk_out);
- wait_for_helper_thread_exit();
+ helper_thread_exit();
if (!fio_abort) {
__show_run_stats();
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
struct io_log *log = agg_io_log[i];
- flush_log(log, 0);
+ flush_log(log, false);
free_log(log);
}
}