*/
static void cleanup_pending_aio(struct thread_data *td)
{
- struct flist_head *entry, *n;
- struct io_u *io_u;
int r;
/*
* now cancel remaining active events
*/
if (td->io_ops->cancel) {
- flist_for_each_safe(entry, n, &td->io_u_busylist) {
- io_u = flist_entry(entry, struct io_u, list);
+ struct io_u *io_u;
+ int i;
- /*
- * if the io_u isn't in flight, then that generally
- * means someone leaked an io_u. complain but fix
- * it up, so we don't stall here.
- */
- if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
- log_err("fio: non-busy IO on busy list\n");
- put_io_u(td, io_u);
- } else {
+ io_u_qiter(&td->io_u_all, io_u, i) {
+ if (io_u->flags & IO_U_F_FLIGHT) {
r = td->io_ops->cancel(td, io_u);
if (!r)
put_io_u(td, io_u);
if (!(b % td->o.thinktime_blocks)) {
int left;
+ io_u_quiesce(td);
+
if (td->o.thinktime_spin)
usec_spin(td->o.thinktime_spin);
check_update_rusage(td);
if (td->trim_entries)
- log_err("fio: %d trim entries leaked?\n", td->trim_entries);
+ log_err("fio: %lu trim entries leaked?\n", td->trim_entries);
if (td->o.fill_device && td->error == ENOSPC) {
td->error = 0;
static void cleanup_io_u(struct thread_data *td)
{
- struct flist_head *entry, *n;
struct io_u *io_u;
- flist_for_each_safe(entry, n, &td->io_u_freelist) {
- io_u = flist_entry(entry, struct io_u, list);
-
- flist_del(&io_u->list);
+ while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) {
if (td->io_ops->io_u_free)
td->io_ops->io_u_free(td, io_u);
}
free_io_mem(td);
+
+ io_u_rexit(&td->io_u_requeues);
+ io_u_qexit(&td->io_u_freelist);
+ io_u_qexit(&td->io_u_all);
}
static int init_io_u(struct thread_data *td)
struct io_u *io_u;
unsigned int max_bs, min_write;
int cl_align, i, max_units;
- int data_xfer = 1;
+ int data_xfer = 1, err;
char *p;
max_units = td->o.iodepth;
if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td)))
data_xfer = 0;
+ err = 0;
+ err += io_u_rinit(&td->io_u_requeues, td->o.iodepth);
+ err += io_u_qinit(&td->io_u_freelist, td->o.iodepth);
+ err += io_u_qinit(&td->io_u_all, td->o.iodepth);
+
+ if (err) {
+ log_err("fio: failed setting up IO queues\n");
+ return 1;
+ }
+
+ /*
+ * if we may later need to do address alignment, then add any
+ * possible adjustment here so that we don't cause a buffer
+ * overflow later. this adjustment may be too much if we get
+ * lucky and the allocator gives us an aligned address.
+ */
+ if (td->o.odirect || td->o.mem_align || (td->io_ops->flags & FIO_RAWIO))
+ td->orig_buffer_size += page_mask + td->o.mem_align;
+
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
unsigned long bs;
io_u = ptr;
memset(io_u, 0, sizeof(*io_u));
- INIT_FLIST_HEAD(&io_u->list);
+ INIT_FLIST_HEAD(&io_u->verify_list);
dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
if (data_xfer) {
io_u->index = i;
io_u->flags = IO_U_F_FREE;
- flist_add(&io_u->list, &td->io_u_freelist);
+ io_u_qpush(&td->io_u_freelist, io_u);
+
+ /*
+ * io_u never leaves this stack, used for iteration of all
+ * io_u buffers.
+ */
+ io_u_qpush(&td->io_u_all, io_u);
if (td->io_ops->io_u_init) {
int ret = td->io_ops->io_u_init(td, io_u);
struct thread_options *o = &td->o;
pthread_condattr_t attr;
int clear_state;
+ int ret;
if (!o->use_thread) {
setsid();
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
- INIT_FLIST_HEAD(&td->io_u_freelist);
- INIT_FLIST_HEAD(&td->io_u_busylist);
- INIT_FLIST_HEAD(&td->io_u_requeues);
+ if (is_backend)
+ fio_server_send_start(td);
+
INIT_FLIST_HEAD(&td->io_log_list);
INIT_FLIST_HEAD(&td->io_hist_list);
INIT_FLIST_HEAD(&td->verify_list);
* eating a file descriptor
*/
fio_mutex_remove(td->mutex);
+ td->mutex = NULL;
/*
* A new gid requires privilege, so we need to do this before setting
* allocations.
*/
if (o->cpumask_set) {
- if (fio_setaffinity(td->pid, o->cpumask) == -1) {
+ ret = fio_setaffinity(td->pid, o->cpumask);
+ if (ret == -1) {
td_verror(td, errno, "cpu_set_affinity");
goto err;
}
if (o->verify_async && verify_async_init(td))
goto err;
- if (td->ioprio_set) {
- if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
+ if (o->ioprio) {
+ ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
+ if (ret == -1) {
td_verror(td, errno, "ioprio_set");
goto err;
}
exit_value++;
done_secs += mtime_since_now(&td->epoch) / 1000;
+ profile_td_exit(td);
}
if (*nr_running == cputhreads && !pending && realthreads)
fio_terminate_threads(TERMINATE_ALL);
}
+static void do_usleep(unsigned int usecs)
+{
+ check_for_running_stats();
+ usleep(usecs);
+}
+
/*
* Main function for kicking off and reaping jobs, as needed.
*/
if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
break;
- usleep(100000);
+ do_usleep(100000);
for (i = 0; i < this_jobs; i++) {
td = map[i];
reap_threads(&nr_running, &t_rate, &m_rate);
- if (todo) {
- if (is_backend)
- fio_server_idle_loop();
- else
- usleep(100000);
- }
+ if (todo)
+ do_usleep(100000);
}
while (nr_running) {
reap_threads(&nr_running, &t_rate, &m_rate);
-
- if (is_backend)
- fio_server_idle_loop();
- else
- usleep(10000);
+ do_usleep(10000);
}
fio_idle_prof_stop();
return 0;
if (write_bw_log) {
- setup_log(&agg_io_log[DDIR_READ], 0);
- setup_log(&agg_io_log[DDIR_WRITE], 0);
- setup_log(&agg_io_log[DDIR_TRIM], 0);
+ setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW);
+ setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW);
+ setup_log(&agg_io_log[DDIR_TRIM], 0, IO_LOG_TYPE_BW);
}
startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
return 1;
set_genesis_time();
+ stat_init();
create_disk_util_thread();
cgroup_list = smalloc(sizeof(*cgroup_list));
fio_mutex_remove(startup_mutex);
fio_mutex_remove(writeout_mutex);
fio_mutex_remove(disk_thread_mutex);
+ stat_exit();
return exit_value;
}