#include <sys/mman.h>
#include "fio.h"
-#include "os.h"
+#include "hash.h"
unsigned long page_mask;
unsigned long page_size;
static inline void td_set_runstate(struct thread_data *td, int runstate)
{
+ dprint(FD_PROCESS, "pid=%d: runstate %d -> %d\n", td->pid, td->runstate,
+ runstate);
td->runstate = runstate;
}
for_each_td(td, i) {
if (group_id == TERMINATE_ALL || groupid == td->groupid) {
+ dprint(FD_PROCESS, "setting terminate on %d\n",td->pid);
/*
* if the thread is running, just let it exit
*/
if (runtime_exceeded(td, &io_u->start_time)) {
put_io_u(td, io_u);
+ td->terminate = 1;
break;
}
ret = -io_u->error;
else if (io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
+ struct fio_file *f = io_u->file;
/*
* zero read, fail
put_io_u(td, io_u);
break;
}
+
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
+ io_u->offset += bytes;
+ f->last_completed_pos = io_u->offset;
+
+ td->ts.short_io_u[io_u->ddir]++;
+
+ if (io_u->offset == f->real_file_size)
+ goto sync_done;
+
requeue_io_u(td, &io_u);
} else {
+sync_done:
ret = io_u_sync_complete(td, io_u);
if (ret < 0)
break;
td_set_runstate(td, TD_RUNNING);
- while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) {
+ while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) {
struct timeval comp_time;
long bytes_done = 0;
int min_evts = 0;
if (runtime_exceeded(td, &s)) {
put_io_u(td, io_u);
+ td->terminate = 1;
break;
}
+ /*
+ * Add verification end_io handler, if asked to verify
+ * a previously written file.
+ */
+ if (td->o.verify != VERIFY_NONE)
+ io_u->end_io = verify_io_u;
+
ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
ret = -io_u->error;
else if (io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
+ struct fio_file *f = io_u->file;
/*
* zero read, fail
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
+ io_u->offset += bytes;
+ f->last_completed_pos = io_u->offset;
+
+ td->ts.short_io_u[io_u->ddir]++;
+
+ if (io_u->offset == f->real_file_size)
+ goto sync_done;
+
requeue_io_u(td, &io_u);
} else {
+sync_done:
fio_gettime(&comp_time, NULL);
bytes_done = io_u_sync_complete(td, io_u);
if (bytes_done < 0)
}
}
+ if (td->o.fill_device && td->error == ENOSPC) {
+ td->error = 0;
+ td->terminate = 1;
+ }
if (!td->error) {
struct fio_file *f;
}
} else
cleanup_pending_aio(td);
+
+ /*
+ * stop job if we failed doing any IO
+ */
+ if ((td->this_io_bytes[0] + td->this_io_bytes[1]) == 0)
+ td->done = 1;
}
static void cleanup_io_u(struct thread_data *td)
/*
* "randomly" fill the buffer contents
*/
-static void fill_rand_buf(struct io_u *io_u, int max_bs)
+static void fill_io_buf(struct thread_data *td, struct io_u *io_u, int max_bs)
{
- int *ptr = io_u->buf;
+ long *ptr = io_u->buf;
- while ((void *) ptr - io_u->buf < max_bs) {
- *ptr = rand() * 0x9e370001;
- ptr++;
- }
+ if (!td->o.zero_buffers) {
+ while ((void *) ptr - io_u->buf < max_bs) {
+ *ptr = rand() * GOLDEN_RATIO_PRIME;
+ ptr++;
+ }
+ } else
+ memset(ptr, 0, max_bs);
}
static int init_io_u(struct thread_data *td)
{
- unsigned long long buf_size;
struct io_u *io_u;
unsigned int max_bs;
int i, max_units;
max_units = td->o.iodepth;
max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
- buf_size = (unsigned long long) max_bs * (unsigned long long) max_units;
- buf_size += page_mask;
- if (buf_size != (size_t) buf_size) {
- log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
- return 1;
- }
-
- td->orig_buffer_size = buf_size;
+ td->orig_buffer_size = (unsigned long long) max_bs * (unsigned long long) max_units;
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE)
td->orig_buffer_size = (td->orig_buffer_size + td->o.hugepage_size - 1) & ~(td->o.hugepage_size - 1);
- else if (td->orig_buffer_size & page_mask)
- td->orig_buffer_size = (td->orig_buffer_size + page_mask) & ~page_mask;
+
+ if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
+ log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
+ return 1;
+ }
if (allocate_io_mem(td))
return 1;
- p = ALIGN(td->orig_buffer);
+ if (td->o.odirect)
+ p = ALIGN(td->orig_buffer);
+ else
+ p = td->orig_buffer;
+
for (i = 0; i < max_units; i++) {
+ if (td->terminate)
+ return 1;
io_u = malloc(sizeof(*io_u));
memset(io_u, 0, sizeof(*io_u));
INIT_LIST_HEAD(&io_u->list);
- io_u->buf = p + max_bs * i;
- if (td_write(td) || td_rw(td))
- fill_rand_buf(io_u, max_bs);
+ if (!(td->io_ops->flags & FIO_NOIO)) {
+ io_u->buf = p + max_bs * i;
+
+ if (td_write(td))
+ fill_io_buf(td, io_u, max_bs);
+ }
io_u->index = i;
io_u->flags = IO_U_F_FREE;
f = fopen(tmp, "r+");
if (!f) {
- td_verror(td, errno, "fopen");
+ if (errno == ENOENT) {
+ log_err("fio: os or kernel doesn't support IO scheduler switching\n");
+ return 0;
+ }
+ td_verror(td, errno, "fopen iosched");
return 1;
}
return 0;
}
+static int keep_running(struct thread_data *td)
+{
+ unsigned long long io_done;
+
+ if (td->done)
+ return 0;
+ if (td->o.time_based)
+ return 1;
+ if (td->o.loops) {
+ td->o.loops--;
+ return 1;
+ }
+
+ io_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE] + td->io_skip_bytes;
+ if (io_done < td->o.size)
+ return 1;
+
+ return 0;
+}
+
static int clear_io_state(struct thread_data *td)
{
struct fio_file *f;
td->last_was_sync = 0;
+ /*
+ * reset file done count if we are to start over
+ */
+ if (td->o.time_based || td->o.loops)
+ td->nr_done_files = 0;
+
for_each_file(td, f, i)
td_io_close_file(td, f);
ret = 0;
for_each_file(td, f, i) {
+ f->flags &= ~FIO_FILE_DONE;
ret = td_io_open_file(td, f);
if (ret)
break;
*/
static void *thread_main(void *data)
{
- unsigned long long runtime[2];
+ unsigned long long runtime[2], elapsed;
struct thread_data *td = data;
- unsigned long elapsed;
int clear_state;
if (!td->o.use_thread)
td->pid = getpid();
+ dprint(FD_PROCESS, "jobs pid=%d started\n", td->pid);
+
INIT_LIST_HEAD(&td->io_u_freelist);
INIT_LIST_HEAD(&td->io_u_busylist);
INIT_LIST_HEAD(&td->io_u_requeues);
INIT_LIST_HEAD(&td->io_log_list);
+ INIT_LIST_HEAD(&td->io_hist_list);
td->io_hist_tree = RB_ROOT;
+ td_set_runstate(td, TD_INITIALIZED);
+ fio_sem_up(startup_sem);
+ fio_sem_down(td->mutex);
+
+ /*
+ * the ->mutex semaphore is now no longer used, close it to avoid
+ * eating a file descriptor
+ */
+ fio_sem_remove(td->mutex);
+
+ /*
+ * May alter parameters that init_io_u() will use, so we need to
+ * do this first.
+ */
+ if (init_iolog(td))
+ goto err;
+
if (init_io_u(td))
- goto err_sem;
+ goto err;
- if (fio_setaffinity(td) == -1) {
+ if (td->o.cpumask_set && fio_setaffinity(td) == -1) {
td_verror(td, errno, "cpu_set_affinity");
- goto err_sem;
+ goto err;
}
- if (init_iolog(td))
- goto err_sem;
-
- if (td->ioprio) {
+ if (td->ioprio_set) {
if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
td_verror(td, errno, "ioprio_set");
- goto err_sem;
+ goto err;
}
}
if (nice(td->o.nice) == -1) {
td_verror(td, errno, "nice");
- goto err_sem;
+ goto err;
}
if (td->o.ioscheduler && switch_ioscheduler(td))
- goto err_sem;
-
- td_set_runstate(td, TD_INITIALIZED);
- fio_sem_up(startup_sem);
- fio_sem_down(td->mutex);
-
- /*
- * the ->mutex semaphore is now no longer used, close it to avoid
- * eating a file descriptor
- */
- fio_sem_remove(td->mutex);
+ goto err;
if (!td->o.create_serialize && setup_files(td))
goto err;
runtime[0] = runtime[1] = 0;
clear_state = 0;
- while (td->o.loops--) {
+ while (keep_running(td)) {
fio_gettime(&td->start, NULL);
memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
if (td->error || td->terminate)
break;
- if (td->o.verify == VERIFY_NONE)
+ if (!td->o.do_verify ||
+ td->o.verify == VERIFY_NONE ||
+ (td->io_ops->flags & FIO_UNIDIR))
continue;
if (clear_io_state(td))
}
update_rusage_stat(td);
- td->ts.runtime[0] = runtime[0] / 1000;
- td->ts.runtime[1] = runtime[1] / 1000;
+ td->ts.runtime[0] = (runtime[0] + 999) / 1000;
+ td->ts.runtime[1] = (runtime[1] + 999) / 1000;
td->ts.total_run_time = mtime_since_now(&td->epoch);
td->ts.io_bytes[0] = td->io_bytes[0];
td->ts.io_bytes[1] = td->io_bytes[1];
finish_log(td, td->ts.slat_log, "slat");
if (td->ts.clat_log)
finish_log(td, td->ts.clat_log, "clat");
- if (td->o.write_iolog_file)
- write_iolog_close(td);
if (td->o.exec_postrun) {
if (system(td->o.exec_postrun) < 0)
log_err("fio: postrun %s failed\n", td->o.exec_postrun);
close_files(td);
close_ioengine(td);
cleanup_io_u(td);
+
+ /*
+ * do this very late, it will log file closing as well
+ */
+ if (td->o.write_iolog_file)
+ write_iolog_close(td);
+
options_mem_free(td);
td_set_runstate(td, TD_EXITED);
return (void *) (unsigned long) td->error;
-err_sem:
- fio_sem_up(startup_sem);
- goto err;
}
/*
static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
{
struct thread_data *td;
- int i, cputhreads, pending, status, ret;
+ int i, cputhreads, realthreads, pending, status, ret;
/*
* reap exited threads (TD_EXITED -> TD_REAPED)
*/
- pending = cputhreads = 0;
+ realthreads = pending = cputhreads = 0;
for_each_td(td, i) {
int flags = 0;
*/
if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
cputhreads++;
+ else
+ realthreads++;
if (!td->pid || td->runstate == TD_REAPED)
continue;
/*
* thread is not dead, continue
*/
+ pending++;
continue;
reaped:
if (td->o.use_thread) {
long ret;
- if (pthread_join(td->thread, (void *) &ret))
+ dprint(FD_PROCESS, "joining tread %d\n", td->pid);
+ if (pthread_join(td->thread, (void *) &ret)) {
+ dprint(FD_PROCESS, "join failed %ld\n", ret);
perror("pthread_join");
+ }
}
(*nr_running)--;
(*m_rate) -= td->o.ratemin;
(*t_rate) -= td->o.rate;
+ pending--;
if (td->error)
exit_value++;
}
- if (*nr_running == cputhreads && !pending)
+ if (*nr_running == cputhreads && !pending && realthreads)
terminate_threads(TERMINATE_ALL);
}
nr_started++;
if (td->o.use_thread) {
+ dprint(FD_PROCESS, "will pthread_create\n");
if (pthread_create(&td->thread, NULL, thread_main, td)) {
perror("thread_create");
nr_started--;
break;
}
} else {
+ dprint(FD_PROCESS, "will fork\n");
if (!fork()) {
int ret = fork_main(shm_id, i);
if (parse_options(argc, argv))
return 1;
- if (!thread_number) {
- log_err("Nothing to do\n");
- return 1;
- }
+ if (!thread_number)
+ return 0;
ps = sysconf(_SC_PAGESIZE);
if (ps < 0) {