Support for setting rated based on IOPS
[fio.git] / fio.c
diff --git a/fio.c b/fio.c
index 36dde3a6562ea06ce35ae07ba14c94ce2e3f5a3f..478ef28aa7e2461365f2c30775e62949eb811400 100644 (file)
--- a/fio.c
+++ b/fio.c
 #include "fio.h"
 #include "os.h"
 
-static unsigned long page_mask;
+unsigned long page_mask;
+unsigned long page_size;
 #define ALIGN(buf)     \
        (char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
 
 int groupid = 0;
 int thread_number = 0;
+int nr_process = 0;
+int nr_thread = 0;
 int shm_id = 0;
 int temp_stall_ts;
 
@@ -100,13 +103,14 @@ static void sig_handler(int sig)
 static int check_min_rate(struct thread_data *td, struct timeval *now)
 {
        unsigned long long bytes = 0;
+       unsigned long iops = 0;
        unsigned long spent;
        unsigned long rate;
 
        /*
         * No minimum rate set, always ok
         */
-       if (!td->ratemin)
+       if (!td->ratemin && !td->rate_iops_min)
                return 0;
 
        /*
@@ -115,32 +119,55 @@ static int check_min_rate(struct thread_data *td, struct timeval *now)
        if (mtime_since(&td->start, now) < 2000)
                return 0;
 
-       if (td_read(td))
+       if (td_read(td)) {
+               iops += td->io_blocks[DDIR_READ];
                bytes += td->this_io_bytes[DDIR_READ];
-       if (td_write(td))
+       }
+       if (td_write(td)) {
+               iops += td->io_blocks[DDIR_WRITE];
                bytes += td->this_io_bytes[DDIR_WRITE];
+       }
 
        /*
         * if rate blocks is set, sample is running
         */
-       if (td->rate_bytes) {
+       if (td->rate_bytes || td->rate_blocks) {
                spent = mtime_since(&td->lastrate, now);
                if (spent < td->ratecycle)
                        return 0;
 
-               if (bytes < td->rate_bytes) {
-                       fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin);
-                       return 1;
+               if (td->rate) {
+                       /*
+                        * check bandwidth specified rate
+                        */
+                       if (bytes < td->rate_bytes) {
+                               log_err("%s: min rate %u not met\n", td->name, td->ratemin);
+                               return 1;
+                       } else {
+                               rate = (bytes - td->rate_bytes) / spent;
+                               if (rate < td->ratemin || bytes < td->rate_bytes) {
+                                       log_err("%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+                                       return 1;
+                               }
+                       }
                } else {
-                       rate = (bytes - td->rate_bytes) / spent;
-                       if (rate < td->ratemin || bytes < td->rate_bytes) {
-                               fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+                       /*
+                        * checks iops specified rate
+                        */
+                       if (iops < td->rate_iops) {
+                               log_err("%s: min iops rate %u not met\n", td->name, td->rate_iops);
                                return 1;
+                       } else {
+                               rate = (iops - td->rate_blocks) / spent;
+                               if (rate < td->rate_iops_min || iops < td->rate_blocks) {
+                                       log_err("%s: min iops rate %u not met, got %lu\n", td->name, td->rate_iops_min, rate);
+                               }
                        }
                }
        }
 
        td->rate_bytes = bytes;
+       td->rate_blocks = iops;
        memcpy(&td->lastrate, now, sizeof(*now));
        return 0;
 }
@@ -253,13 +280,16 @@ static void do_verify(struct thread_data *td)
 {
        struct fio_file *f;
        struct io_u *io_u;
-       int ret, i, min_events;
+       int ret, min_events;
+       unsigned int i;
 
        /*
         * sync io first and invalidate cache, to make sure we really
         * read from disk.
         */
        for_each_file(td, f, i) {
+               if (!(f->flags & FIO_FILE_OPEN))
+                       continue;
                if (fio_io_sync(td, f))
                        break;
                if (file_invalidate_cache(td, f))
@@ -301,9 +331,17 @@ static void do_verify(struct thread_data *td)
                case FIO_Q_COMPLETED:
                        if (io_u->error)
                                ret = -io_u->error;
-                       else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+                       else if (io_u->resid) {
                                int bytes = io_u->xfer_buflen - io_u->resid;
 
+                               /*
+                                * zero read, fail
+                                */
+                               if (!bytes) {
+                                       td_verror(td, ENODATA, "full resid");
+                                       put_io_u(td, io_u);
+                                       break;
+                               }
                                io_u->xfer_buflen = io_u->resid;
                                io_u->xfer_buf += bytes;
                                requeue_io_u(td, &io_u);
@@ -369,7 +407,8 @@ static void do_io(struct thread_data *td)
 {
        struct timeval s;
        unsigned long usec;
-       int i, ret = 0;
+       unsigned int i;
+       int ret = 0;
 
        td_set_runstate(td, TD_RUNNING);
 
@@ -399,9 +438,18 @@ static void do_io(struct thread_data *td)
                case FIO_Q_COMPLETED:
                        if (io_u->error)
                                ret = -io_u->error;
-                       else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+                       else if (io_u->resid) {
                                int bytes = io_u->xfer_buflen - io_u->resid;
 
+                               /*
+                                * zero read, fail
+                                */
+                               if (!bytes) {
+                                       td_verror(td, ENODATA, "full resid");
+                                       put_io_u(td, io_u);
+                                       break;
+                               }
+
                                io_u->xfer_buflen = io_u->resid;
                                io_u->xfer_buf += bytes;
                                requeue_io_u(td, &io_u);
@@ -499,8 +547,12 @@ static void do_io(struct thread_data *td)
 
                if (should_fsync(td) && td->end_fsync) {
                        td_set_runstate(td, TD_FSYNCING);
-                       for_each_file(td, f, i)
+
+                       for_each_file(td, f, i) {
+                               if (!(f->flags & FIO_FILE_OPEN))
+                                       continue;
                                fio_io_sync(td, f);
+                       }
                }
        } else
                cleanup_pending_aio(td);
@@ -631,11 +683,14 @@ static int switch_ioscheduler(struct thread_data *td)
 static int clear_io_state(struct thread_data *td)
 {
        struct fio_file *f;
-       int i, ret;
+       unsigned int i;
+       int ret;
 
        td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
        td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
        td->zone_bytes = 0;
+       td->rate_bytes = 0;
+       td->rate_blocks = 0;
 
        td->last_was_sync = 0;
 
@@ -674,33 +729,30 @@ static void *thread_main(void *data)
        INIT_LIST_HEAD(&td->io_log_list);
 
        if (init_io_u(td))
-               goto err;
+               goto err_sem;
 
        if (fio_setaffinity(td) == -1) {
                td_verror(td, errno, "cpu_set_affinity");
-               goto err;
+               goto err_sem;
        }
 
        if (init_iolog(td))
-               goto err;
+               goto err_sem;
 
        if (td->ioprio) {
                if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
                        td_verror(td, errno, "ioprio_set");
-                       goto err;
+                       goto err_sem;
                }
        }
 
        if (nice(td->nice) == -1) {
                td_verror(td, errno, "nice");
-               goto err;
+               goto err_sem;
        }
 
-       if (init_random_state(td))
-               goto err;
-
        if (td->ioscheduler && switch_ioscheduler(td))
-               goto err;
+               goto err_sem;
 
        td_set_runstate(td, TD_INITIALIZED);
        fio_sem_up(startup_sem);
@@ -803,6 +855,9 @@ err:
        cleanup_io_u(td);
        td_set_runstate(td, TD_EXITED);
        return (void *) (unsigned long) td->error;
+err_sem:
+       fio_sem_up(startup_sem);
+       goto err;
 }
 
 /*
@@ -930,7 +985,15 @@ static void run_threads(void)
                return;
 
        if (!terse_output) {
-               printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
+               printf("Starting ");
+               if (nr_thread)
+                       printf("%d thread%s", nr_thread, nr_thread > 1 ? "s" : "");
+               if (nr_process) {
+                       if (nr_thread)
+                               printf(" and ");
+                       printf("%d process%s", nr_process, nr_process > 1 ? "es" : "");
+               }
+               printf("\n");
                fflush(stdout);
        }
 
@@ -1011,6 +1074,7 @@ static void run_threads(void)
                                if (pthread_create(&td->thread, NULL, thread_main, td)) {
                                        perror("thread_create");
                                        nr_started--;
+                                       break;
                                }
                        } else {
                                if (!fork()) {
@@ -1117,6 +1181,7 @@ int main(int argc, char *argv[])
                return 1;
        }
 
+       page_size = ps;
        page_mask = ps - 1;
 
        if (write_bw_log) {