- struct fio_file *f;
- struct io_u *io_u;
- int ret, min_events;
- unsigned int i;
-
- /*
- * sync io first and invalidate cache, to make sure we really
- * read from disk.
- */
- for_each_file(td, f, i) {
- if (!(f->flags & FIO_FILE_OPEN))
- continue;
- if (fio_io_sync(td, f))
- break;
- if (file_invalidate_cache(td, f))
- break;
- }
-
- if (td->error)
- return;
-
- td_set_runstate(td, TD_VERIFYING);
-
- io_u = NULL;
- while (!td->terminate) {
- int ret2;
-
- io_u = __get_io_u(td);
- if (!io_u)
- break;
-
- if (runtime_exceeded(td, &io_u->start_time)) {
- put_io_u(td, io_u);
- td->terminate = 1;
- break;
- }
-
- if (get_next_verify(td, io_u)) {
- put_io_u(td, io_u);
- break;
- }
-
- if (td_io_prep(td, io_u)) {
- put_io_u(td, io_u);
- break;
- }
-
- io_u->end_io = verify_io_u;
-
- ret = td_io_queue(td, io_u);
- switch (ret) {
- case FIO_Q_COMPLETED:
- if (io_u->error)
- ret = -io_u->error;
- else if (io_u->resid) {
- int bytes = io_u->xfer_buflen - io_u->resid;
- struct fio_file *f = io_u->file;
-
- /*
- * zero read, fail
- */
- if (!bytes) {
- td_verror(td, ENODATA, "full resid");
- put_io_u(td, io_u);
- break;
- }
-
- io_u->xfer_buflen = io_u->resid;
- io_u->xfer_buf += bytes;
- io_u->offset += bytes;
- f->last_completed_pos = io_u->offset;
-
- td->ts.short_io_u[io_u->ddir]++;
-
- if (io_u->offset == f->real_file_size)
- goto sync_done;
-
- requeue_io_u(td, &io_u);
- } else {
-sync_done:
- ret = io_u_sync_complete(td, io_u);
- if (ret < 0)
- break;
- }
- continue;
- case FIO_Q_QUEUED:
- break;
- case FIO_Q_BUSY:
- requeue_io_u(td, &io_u);
- ret2 = td_io_commit(td);
- if (ret2 < 0)
- ret = ret2;
- break;
- default:
- assert(ret < 0);
- td_verror(td, -ret, "td_io_queue");
- break;
- }
-
- if (ret < 0 || td->error)
- break;
-
- /*
- * if we can queue more, do so. but check if there are
- * completed io_u's first.
- */
- min_events = 0;
- if (queue_full(td) || ret == FIO_Q_BUSY) {
- min_events = 1;
-
- if (td->cur_depth > td->o.iodepth_low)
- min_events = td->cur_depth - td->o.iodepth_low;
- }
-
- /*
- * Reap required number of io units, if any, and do the
- * verification on them through the callback handler
- */
- if (io_u_queued_complete(td, min_events) < 0)
- break;
- }
-
- if (!td->error) {
- min_events = td->cur_depth;
-
- if (min_events)
- ret = io_u_queued_complete(td, min_events);
- } else
- cleanup_pending_aio(td);
-
- td_set_runstate(td, TD_RUNNING);
-}
-
-/*
- * Main IO worker function. It retrieves io_u's to process and queues
- * and reaps them, checking for rate and errors along the way.
- */
-static void do_io(struct thread_data *td)
-{
- struct timeval s;
- unsigned long usec;
- unsigned int i;
- int ret = 0;
-
- td_set_runstate(td, TD_RUNNING);
-
- while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) {
- struct timeval comp_time;
- long bytes_done = 0;
- int min_evts = 0;
- struct io_u *io_u;
- int ret2;
-
- if (td->terminate)
- break;
-
- io_u = get_io_u(td);
- if (!io_u)
- break;
-
- memcpy(&s, &io_u->start_time, sizeof(s));
-
- if (runtime_exceeded(td, &s)) {
- put_io_u(td, io_u);
- td->terminate = 1;
- break;
- }
-
- ret = td_io_queue(td, io_u);
- switch (ret) {
- case FIO_Q_COMPLETED:
- if (io_u->error)
- ret = -io_u->error;
- else if (io_u->resid) {
- int bytes = io_u->xfer_buflen - io_u->resid;
- struct fio_file *f = io_u->file;
-
- /*
- * zero read, fail
- */
- if (!bytes) {
- td_verror(td, ENODATA, "full resid");
- put_io_u(td, io_u);
- break;
- }
-
- io_u->xfer_buflen = io_u->resid;
- io_u->xfer_buf += bytes;
- io_u->offset += bytes;
- f->last_completed_pos = io_u->offset;
-
- td->ts.short_io_u[io_u->ddir]++;
-
- if (io_u->offset == f->real_file_size)
- goto sync_done;
-
- requeue_io_u(td, &io_u);
- } else {
-sync_done:
- fio_gettime(&comp_time, NULL);
- bytes_done = io_u_sync_complete(td, io_u);
- if (bytes_done < 0)
- ret = bytes_done;
- }
- break;
- case FIO_Q_QUEUED:
- /*
- * if the engine doesn't have a commit hook,
- * the io_u is really queued. if it does have such
- * a hook, it has to call io_u_queued() itself.
- */
- if (td->io_ops->commit == NULL)
- io_u_queued(td, io_u);
- break;
- case FIO_Q_BUSY:
- requeue_io_u(td, &io_u);
- ret2 = td_io_commit(td);
- if (ret2 < 0)
- ret = ret2;
- break;
- default:
- assert(ret < 0);
- put_io_u(td, io_u);
- break;
- }
-
- if (ret < 0 || td->error)
- break;
-
- /*
- * See if we need to complete some commands
- */
- if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) {
- min_evts = 0;
- if (queue_full(td) || ret == FIO_Q_BUSY) {
- min_evts = 1;
-
- if (td->cur_depth > td->o.iodepth_low)
- min_evts = td->cur_depth - td->o.iodepth_low;
- }
-
- fio_gettime(&comp_time, NULL);
- bytes_done = io_u_queued_complete(td, min_evts);
- if (bytes_done < 0)
- break;
- }
-
- if (!bytes_done)
- continue;
-
- /*
- * the rate is batched for now, it should work for batches
- * of completions except the very first one which may look
- * a little bursty
- */
- usec = utime_since(&s, &comp_time);
-
- rate_throttle(td, usec, bytes_done);
-
- if (check_min_rate(td, &comp_time)) {
- if (exitall_on_terminate)
- terminate_threads(td->groupid);
- td_verror(td, ENODATA, "check_min_rate");
- break;
- }
-
- if (td->o.thinktime) {
- unsigned long long b;
-
- b = td->io_blocks[0] + td->io_blocks[1];
- if (!(b % td->o.thinktime_blocks)) {
- int left;
-
- if (td->o.thinktime_spin)
- __usec_sleep(td->o.thinktime_spin);
-
- left = td->o.thinktime - td->o.thinktime_spin;
- if (left)
- usec_sleep(td, left);
- }
- }
- }
-
- if (!td->error) {
- struct fio_file *f;
-
- i = td->cur_depth;
- if (i)
- ret = io_u_queued_complete(td, i);
-
- if (should_fsync(td) && td->o.end_fsync) {
- td_set_runstate(td, TD_FSYNCING);
-
- for_each_file(td, f, i) {
- if (!(f->flags & FIO_FILE_OPEN))
- continue;
- fio_io_sync(td, f);
- }
- }
- } else
- cleanup_pending_aio(td);
-}
-
-static void cleanup_io_u(struct thread_data *td)
-{
- struct list_head *entry, *n;
- struct io_u *io_u;
-
- list_for_each_safe(entry, n, &td->io_u_freelist) {
- io_u = list_entry(entry, struct io_u, list);
-
- list_del(&io_u->list);
- free(io_u);
- }
-
- free_io_mem(td);
-}
-
-/*
- * "randomly" fill the buffer contents
- */
-static void fill_io_buf(struct thread_data *td, struct io_u *io_u, int max_bs)
-{
- long *ptr = io_u->buf;
-
- if (!td->o.zero_buffers) {
- while ((void *) ptr - io_u->buf < max_bs) {
- *ptr = rand() * GOLDEN_RATIO_PRIME;
- ptr++;
- }
- } else
- memset(ptr, 0, max_bs);
-}
-
-static int init_io_u(struct thread_data *td)
-{
- struct io_u *io_u;
- unsigned int max_bs;
- int i, max_units;
- char *p;
-
- if (td->io_ops->flags & FIO_SYNCIO)
- max_units = 1;
- else
- max_units = td->o.iodepth;
-
- max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
- td->orig_buffer_size = (unsigned long long) max_bs * (unsigned long long) max_units;
-
- if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE)
- td->orig_buffer_size = (td->orig_buffer_size + td->o.hugepage_size - 1) & ~(td->o.hugepage_size - 1);
-
- if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
- log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
- return 1;
- }
-
- if (allocate_io_mem(td))
- return 1;
-
- p = ALIGN(td->orig_buffer);
- for (i = 0; i < max_units; i++) {
- if (td->terminate)
- return 1;
- io_u = malloc(sizeof(*io_u));
- memset(io_u, 0, sizeof(*io_u));
- INIT_LIST_HEAD(&io_u->list);
-
- io_u->buf = p + max_bs * i;
-
- if (td_write(td))
- fill_io_buf(td, io_u, max_bs);
-
- io_u->index = i;
- io_u->flags = IO_U_F_FREE;
- list_add(&io_u->list, &td->io_u_freelist);
- }
-
- io_u_init_timeout();
-
- return 0;
-}
-
-static int switch_ioscheduler(struct thread_data *td)
-{
- char tmp[256], tmp2[128];
- FILE *f;
- int ret;
-
- if (td->io_ops->flags & FIO_DISKLESSIO)
- return 0;
-
- sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
-
- f = fopen(tmp, "r+");
- if (!f) {
- if (errno == ENOENT) {
- log_err("fio: os or kernel doesn't support IO scheduler switching\n");
- return 0;
- }
- td_verror(td, errno, "fopen iosched");
- return 1;
- }
-
- /*
- * Set io scheduler.
- */
- ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
- if (ferror(f) || ret != 1) {
- td_verror(td, errno, "fwrite");
- fclose(f);
- return 1;
- }
-
- rewind(f);
-
- /*
- * Read back and check that the selected scheduler is now the default.
- */
- ret = fread(tmp, 1, sizeof(tmp), f);
- if (ferror(f) || ret < 0) {
- td_verror(td, errno, "fread");
- fclose(f);
- return 1;
- }
-
- sprintf(tmp2, "[%s]", td->o.ioscheduler);
- if (!strstr(tmp, tmp2)) {
- log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
- td_verror(td, EINVAL, "iosched_switch");
- fclose(f);
- return 1;
- }
-
- fclose(f);
- return 0;
-}
-
-static int keep_running(struct thread_data *td)
-{
- unsigned long long io_done;
-
- if (td->o.time_based)
- return 1;
- if (td->o.loops) {
- td->o.loops--;
- return 1;
- }