iolog: Added option read_iolog_chunked. Used to avoid reading large iologs at once.
[fio.git] / backend.c
index e3ff777ba73a478bf344e400fbb2f00a196f0663..8a2b2ab8c2ebb69a50bd8c44a3cb7c1bcbd7be8e 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -50,7 +50,7 @@
 
 static struct fio_sem *startup_sem;
 static struct flist_head *cgroup_list;
-static char *cgroup_mnt;
+static struct cgroup_mnt *cgroup_mnt;
 static int exit_value;
 static volatile int fio_abort;
 static unsigned int nr_process = 0;
@@ -461,7 +461,7 @@ int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret,
                                *bytes_issued += bytes;
 
                        if (!from_verify)
-                               trim_io_piece(td, io_u);
+                               trim_io_piece(io_u);
 
                        /*
                         * zero read, fail
@@ -966,8 +966,10 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done)
                 * Break if we exceeded the bytes. The exception is time
                 * based runs, but we still need to break out of the loop
                 * for those to run verification, if enabled.
+                * Jobs read from iolog do not use this stop condition.
                 */
                if (bytes_issued >= total_bytes &&
+                   !td->o.read_iolog_file &&
                    (!td->o.time_based ||
                     (td->o.time_based && td->o.verify != VERIFY_NONE)))
                        break;
@@ -1034,7 +1036,7 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done)
 
                if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
                        const unsigned long blen = io_u->xfer_buflen;
-                       const enum fio_ddir ddir = acct_ddir(io_u);
+                       const enum fio_ddir __ddir = acct_ddir(io_u);
 
                        if (td->error)
                                break;
@@ -1042,14 +1044,14 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done)
                        workqueue_enqueue(&td->io_wq, &io_u->work);
                        ret = FIO_Q_QUEUED;
 
-                       if (ddir_rw(ddir)) {
-                               td->io_issues[ddir]++;
-                               td->io_issue_bytes[ddir] += blen;
-                               td->rate_io_issue_bytes[ddir] += blen;
+                       if (ddir_rw(__ddir)) {
+                               td->io_issues[__ddir]++;
+                               td->io_issue_bytes[__ddir] += blen;
+                               td->rate_io_issue_bytes[__ddir] += blen;
                        }
 
                        if (should_check_rate(td))
-                               td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
+                               td->rate_next_io_time[__ddir] = usec_for_io(td, __ddir);
 
                } else {
                        ret = io_u_submit(td, io_u);
@@ -1529,7 +1531,7 @@ static void *thread_main(void *data)
        } else
                td->pid = gettid();
 
-       fio_local_clock_init(o->use_thread);
+       fio_local_clock_init();
 
        dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
 
@@ -1886,7 +1888,7 @@ err:
        close_and_free_files(td);
        cleanup_io_u(td);
        close_ioengine(td);
-       cgroup_shutdown(td, &cgroup_mnt);
+       cgroup_shutdown(td, cgroup_mnt);
        verify_free_state(td);
 
        if (td->zone_state_index) {
@@ -1909,6 +1911,8 @@ err:
         */
        if (o->write_iolog_file)
                write_iolog_close(td);
+       if (td->io_log_rfile)
+               fclose(td->io_log_rfile);
 
        td_set_runstate(td, TD_EXITED);
 
@@ -2508,7 +2512,6 @@ int fio_backend(struct sk_out *sk_out)
                cgroup_kill(cgroup_list);
                sfree(cgroup_list);
        }
-       sfree(cgroup_mnt);
 
        fio_sem_remove(startup_sem);
        stat_exit();