Add a 'continue_on_error' option to fio
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 47c02e0ad135dea4da458da5e0116084dd7885d7..276f3b0cd2ddf41c3a007e7a4e3aa74255a2a42a 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -7,6 +7,7 @@
 
 #include "fio.h"
 #include "hash.h"
+#include "verify.h"
 
 struct io_completion_data {
        int nr;                         /* input */
@@ -306,6 +307,53 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
        return DDIR_WRITE;
 }
 
+static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
+{
+       enum fio_ddir odir = ddir ^ 1;
+       struct timeval t;
+       long usec;
+
+       if (td->rate_pending_usleep[ddir] <= 0)
+               return ddir;
+
+       /*
+        * We have too much pending sleep in this direction. See if we
+        * should switch.
+        */
+       if (td_rw(td)) {
+               /*
+                * Other direction does not have too much pending, switch
+                */
+               if (td->rate_pending_usleep[odir] < 100000)
+                       return odir;
+
+               /*
+                * Both directions have pending sleep. Sleep the minimum time
+                * and deduct from both.
+                */
+               if (td->rate_pending_usleep[ddir] <=
+                       td->rate_pending_usleep[odir]) {
+                       usec = td->rate_pending_usleep[ddir];
+               } else {
+                       usec = td->rate_pending_usleep[odir];
+                       ddir = odir;
+               }
+       } else
+               usec = td->rate_pending_usleep[ddir];
+
+       fio_gettime(&t, NULL);
+       usec_sleep(td, usec);
+       usec = utime_since_now(&t);
+
+       td->rate_pending_usleep[ddir] -= usec;
+
+       odir = ddir ^ 1;
+       if (td_rw(td) && __should_check_rate(td, odir))
+               td->rate_pending_usleep[odir] -= usec;
+       
+       return ddir;
+}
+
 /*
  * Return the data direction for the next io_u. If the job is a
  * mixed read/write workload, check the rwmix cycle and switch if
@@ -313,41 +361,33 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
  */
 static enum fio_ddir get_rw_ddir(struct thread_data *td)
 {
+       enum fio_ddir ddir;
+
        if (td_rw(td)) {
                /*
                 * Check if it's time to seed a new data direction.
                 */
                if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
-                       unsigned long long max_bytes;
-                       enum fio_ddir ddir;
-
                        /*
                         * Put a top limit on how many bytes we do for
                         * one data direction, to avoid overflowing the
                         * ranges too much
                         */
                        ddir = get_rand_ddir(td);
-                       max_bytes = td->this_io_bytes[ddir];
-                       if (max_bytes >=
-                           (td->o.size * td->o.rwmix[ddir] / 100)) {
-                               if (!td->rw_end_set[ddir]) {
-                                       td->rw_end_set[ddir] = 1;
-                                       fio_gettime(&td->rw_end[ddir], NULL);
-                               }
-
-                               ddir ^= 1;
-                       }
 
                        if (ddir != td->rwmix_ddir)
                                set_rwmix_bytes(td);
 
                        td->rwmix_ddir = ddir;
                }
-               return td->rwmix_ddir;
+               ddir = td->rwmix_ddir;
        } else if (td_read(td))
-               return DDIR_READ;
+               ddir = DDIR_READ;
        else
-               return DDIR_WRITE;
+               ddir = DDIR_WRITE;
+
+       td->rwmix_ddir = rate_ddir(td, ddir);
+       return td->rwmix_ddir;
 }
 
 static void put_file_log(struct thread_data *td, struct fio_file *f)
@@ -372,6 +412,12 @@ void put_io_u(struct thread_data *td, struct io_u *io_u)
        td->cur_depth--;
 }
 
+void clear_io_u(struct thread_data *td, struct io_u *io_u)
+{
+       io_u->flags &= ~IO_U_F_FLIGHT;
+       put_io_u(td, io_u);
+}
+
 void requeue_io_u(struct thread_data *td, struct io_u **io_u)
 {
        struct io_u *__io_u = *io_u;
@@ -621,8 +667,9 @@ static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
 /*
  * Get next file to service by choosing one at random
  */
-static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf,
-                                          int badf)
+static struct fio_file *get_next_file_rand(struct thread_data *td,
+                                          enum fio_file_flags goodf,
+                                          enum fio_file_flags badf)
 {
        struct fio_file *f;
        int fno;
@@ -634,10 +681,10 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf,
                fno = (unsigned int) ((double) td->o.nr_files
                        * (r / (OS_RAND_MAX + 1.0)));
                f = td->files[fno];
-               if (f->flags & FIO_FILE_DONE)
+               if (fio_file_done(f))
                        continue;
 
-               if (!(f->flags & FIO_FILE_OPEN)) {
+               if (!fio_file_open(f)) {
                        int err;
 
                        err = td_io_open_file(td, f);
@@ -674,12 +721,12 @@ static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
                        td->next_file = 0;
 
                dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
-               if (f->flags & FIO_FILE_DONE) {
+               if (fio_file_done(f)) {
                        f = NULL;
                        continue;
                }
 
-               if (!(f->flags & FIO_FILE_OPEN)) {
+               if (!fio_file_open(f)) {
                        int err;
 
                        err = td_io_open_file(td, f);
@@ -721,7 +768,7 @@ static struct fio_file *get_next_file(struct thread_data *td)
        }
 
        f = td->file_service_file;
-       if (f && (f->flags & FIO_FILE_OPEN) && !(f->flags & FIO_FILE_CLOSING)) {
+       if (f && fio_file_open(f) && !fio_file_closing(f)) {
                if (td->o.file_service_type == FIO_FSERVICE_SEQ)
                        goto out;
                if (td->file_service_left--)
@@ -730,9 +777,9 @@ static struct fio_file *get_next_file(struct thread_data *td)
 
        if (td->o.file_service_type == FIO_FSERVICE_RR ||
            td->o.file_service_type == FIO_FSERVICE_SEQ)
-               f = get_next_file_rr(td, FIO_FILE_OPEN, FIO_FILE_CLOSING);
+               f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
        else
-               f = get_next_file_rand(td, FIO_FILE_OPEN, FIO_FILE_CLOSING);
+               f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
 
        td->file_service_file = f;
        td->file_service_left = td->file_service_nr - 1;
@@ -759,7 +806,7 @@ static int set_io_u_file(struct thread_data *td, struct io_u *io_u)
                put_file_log(td, f);
                td_io_close_file(td, f);
                io_u->file = NULL;
-               f->flags |= FIO_FILE_DONE;
+               fio_file_set_done(f);
                td->nr_done_files++;
                dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, td->nr_done_files, td->o.nr_files);
        } while (1);
@@ -829,7 +876,7 @@ struct io_u *get_io_u(struct thread_data *td)
        }
 
        f = io_u->file;
-       assert(f->flags & FIO_FILE_OPEN);
+       assert(fio_file_open(f));
 
        if (io_u->ddir != DDIR_SYNC) {
                if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
@@ -912,16 +959,29 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
                td->this_io_bytes[idx] += bytes;
 
                if (ramp_time_over(td)) {
+                       unsigned long uninitialized_var(lusec);
+                       unsigned long uninitialized_var(rusec);
+
                        if (!td->o.disable_clat || !td->o.disable_bw)
-                               usec = utime_since(&io_u->issue_time,
+                               lusec = utime_since(&io_u->issue_time,
+                                                       &icd->time);
+                       if (__should_check_rate(td, idx) ||
+                           __should_check_rate(td, idx ^ 1))
+                               rusec = utime_since(&io_u->start_time,
                                                        &icd->time);
 
                        if (!td->o.disable_clat) {
                                add_clat_sample(td, idx, usec, bytes);
-                               io_u_mark_latency(td, usec);
+                               io_u_mark_latency(td, lusec);
                        }
                        if (!td->o.disable_bw)
                                add_bw_sample(td, idx, bytes, &icd->time);
+                       if (__should_check_rate(td, idx)) {
+                               td->rate_pending_usleep[idx] +=
+                                       (long) td->rate_usec_cycle[idx] - rusec;
+                       }
+                       if (__should_check_rate(td, idx ^ 1))
+                               td->rate_pending_usleep[idx ^ 1] -= rusec;
                }
 
                if (td_write(td) && idx == DDIR_WRITE &&
@@ -940,6 +1000,17 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
                icd->error = io_u->error;
                io_u_log_error(td, io_u);
        }
+       if (td->o.continue_on_error && icd->error &&
+           td_non_fatal_error(icd->error)) {
+               /*
+                * If there is a non_fatal error, then add to the error count
+                * and clear all the errors.
+                */
+               update_error_count(td, icd->error);
+               td_clear_error(td);
+               icd->error = 0;
+               io_u->error = 0;
+       }
 }
 
 static void init_icd(struct thread_data *td, struct io_completion_data *icd,
@@ -971,7 +1042,8 @@ static void ios_completed(struct thread_data *td,
 /*
  * Complete a single io_u for the sync engines.
  */
-long io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
+int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
+                      unsigned long *bytes)
 {
        struct io_completion_data icd;
 
@@ -979,17 +1051,24 @@ long io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
        io_completed(td, io_u, &icd);
        put_io_u(td, io_u);
 
-       if (!icd.error)
-               return icd.bytes_done[0] + icd.bytes_done[1];
+       if (icd.error) {
+               td_verror(td, icd.error, "io_u_sync_complete");
+               return -1;
+       }
+
+       if (bytes) {
+               bytes[0] += icd.bytes_done[0];
+               bytes[1] += icd.bytes_done[1];
+       }
 
-       td_verror(td, icd.error, "io_u_sync_complete");
-       return -1;
+       return 0;
 }
 
 /*
  * Called to complete min_events number of io for the async engines.
  */
-long io_u_queued_complete(struct thread_data *td, int min_evts)
+int io_u_queued_complete(struct thread_data *td, int min_evts,
+                        unsigned long *bytes)
 {
        struct io_completion_data icd;
        struct timespec *tvp = NULL;
@@ -1010,11 +1089,17 @@ long io_u_queued_complete(struct thread_data *td, int min_evts)
 
        init_icd(td, &icd, ret);
        ios_completed(td, &icd);
-       if (!icd.error)
-               return icd.bytes_done[0] + icd.bytes_done[1];
+       if (icd.error) {
+               td_verror(td, icd.error, "io_u_queued_complete");
+               return -1;
+       }
 
-       td_verror(td, icd.error, "io_u_queued_complete");
-       return -1;
+       if (bytes) {
+               bytes[0] += icd.bytes_done[0];
+               bytes[1] += icd.bytes_done[1];
+       }
+
+       return 0;
 }
 
 /*
@@ -1026,7 +1111,7 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u)
                unsigned long slat_time;
 
                slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
-               add_slat_sample(td, io_u->ddir, io_u->xfer_buflen, slat_time);
+               add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
        }
 }