Move tp.[ch] to lib/
[fio.git] / backend.c
index c1c8f96774850a755681dffa710d0ee1550fe16b..68540abb9a2c1bd80c346d311f94646b464e2ff9 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -53,6 +53,7 @@
 #include "lib/getrusage.h"
 #include "idletime.h"
 #include "err.h"
+#include "lib/tp.h"
 
 static pthread_t disk_util_thread;
 static struct fio_mutex *disk_thread_mutex;
@@ -204,7 +205,11 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now,
                                                td->o.name, rate_iops);
                                return 1;
                        } else {
-                               rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
+                               if (spent)
+                                       rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
+                               else
+                                       rate = 0;
+
                                if (rate < rate_iops_min ||
                                    iops < td->rate_blocks[ddir]) {
                                        log_err("%s: min iops rate %u not met,"
@@ -641,7 +646,7 @@ static unsigned int exceeds_number_ios(struct thread_data *td)
 
 static int io_bytes_exceeded(struct thread_data *td)
 {
-       unsigned long long bytes;
+       unsigned long long bytes, limit;
 
        if (td_rw(td))
                bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
@@ -652,7 +657,12 @@ static int io_bytes_exceeded(struct thread_data *td)
        else
                bytes = td->this_io_bytes[DDIR_TRIM];
 
-       return bytes >= td->o.size || exceeds_number_ios(td);
+       if (td->o.io_limit)
+               limit = td->o.io_limit;
+       else
+               limit = td->o.size;
+
+       return bytes >= limit || exceeds_number_ios(td);
 }
 
 /*
@@ -771,16 +781,21 @@ static uint64_t do_io(struct thread_data *td)
                case FIO_Q_COMPLETED:
                        if (io_u->error) {
                                ret = -io_u->error;
+                               unlog_io_piece(td, io_u);
                                clear_io_u(td, io_u);
                        } else if (io_u->resid) {
                                int bytes = io_u->xfer_buflen - io_u->resid;
                                struct fio_file *f = io_u->file;
 
                                bytes_issued += bytes;
+
+                               trim_io_piece(td, io_u);
+
                                /*
                                 * zero read, fail
                                 */
                                if (!bytes) {
+                                       unlog_io_piece(td, io_u);
                                        td_verror(td, EIO, "full resid");
                                        put_io_u(td, io_u);
                                        break;
@@ -821,6 +836,7 @@ sync_done:
                        bytes_issued += io_u->xfer_buflen;
                        break;
                case FIO_Q_BUSY:
+                       unlog_io_piece(td, io_u);
                        requeue_io_u(td, &io_u);
                        ret2 = td_io_commit(td);
                        if (ret2 < 0)
@@ -1114,13 +1130,14 @@ static int switch_ioscheduler(struct thread_data *td)
        /*
         * Read back and check that the selected scheduler is now the default.
         */
-       memset(tmp, 0, sizeof(tmp));
-       ret = fread(tmp, 1, sizeof(tmp) - 1, f);
+       ret = fread(tmp, sizeof(tmp), 1, f);
        if (ferror(f) || ret < 0) {
                td_verror(td, errno, "fread");
                fclose(f);
                return 1;
        }
+       tmp[sizeof(tmp) - 1] = '\0';
+
 
        sprintf(tmp2, "[%s]", td->o.ioscheduler);
        if (!strstr(tmp, tmp2)) {
@@ -1136,6 +1153,8 @@ static int switch_ioscheduler(struct thread_data *td)
 
 static int keep_running(struct thread_data *td)
 {
+       unsigned long long limit;
+
        if (td->done)
                return 0;
        if (td->o.time_based)
@@ -1147,14 +1166,19 @@ static int keep_running(struct thread_data *td)
        if (exceeds_number_ios(td))
                return 0;
 
-       if (td->o.size != -1ULL && ddir_rw_sum(td->io_bytes) < td->o.size) {
+       if (td->o.io_limit)
+               limit = td->o.io_limit;
+       else
+               limit = td->o.size;
+
+       if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) {
                uint64_t diff;
 
                /*
                 * If the difference is less than the minimum IO size, we
                 * are done.
                 */
-               diff = td->o.size - ddir_rw_sum(td->io_bytes);
+               diff = limit - ddir_rw_sum(td->io_bytes);
                if (diff < td_max_bs(td))
                        return 0;
 
@@ -1247,11 +1271,6 @@ static void *thread_main(void *data)
        } else
                td->pid = gettid();
 
-       /*
-        * fio_time_init() may not have been called yet if running as a server
-        */
-       fio_time_init();
-
        fio_local_clock_init(o->use_thread);
 
        dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
@@ -1322,6 +1341,7 @@ static void *thread_main(void *data)
 #ifdef CONFIG_LIBNUMA
        /* numa node setup */
        if (o->numa_cpumask_set || o->numa_memmask_set) {
+               struct bitmask *mask;
                int ret;
 
                if (numa_available() < 0) {
@@ -1330,7 +1350,9 @@ static void *thread_main(void *data)
                }
 
                if (o->numa_cpumask_set) {
-                       ret = numa_run_on_node_mask(o->numa_cpunodesmask);
+                       mask = numa_parse_nodestring(o->numa_cpunodes);
+                       ret = numa_run_on_node_mask(mask);
+                       numa_free_nodemask(mask);
                        if (ret == -1) {
                                td_verror(td, errno, \
                                        "numa_run_on_node_mask failed\n");
@@ -1340,12 +1362,16 @@ static void *thread_main(void *data)
 
                if (o->numa_memmask_set) {
 
+                       mask = NULL;
+                       if (o->numa_memnodes)
+                               mask = numa_parse_nodestring(o->numa_memnodes);
+
                        switch (o->numa_mem_mode) {
                        case MPOL_INTERLEAVE:
-                               numa_set_interleave_mask(o->numa_memnodesmask);
+                               numa_set_interleave_mask(mask);
                                break;
                        case MPOL_BIND:
-                               numa_set_membind(o->numa_memnodesmask);
+                               numa_set_membind(mask);
                                break;
                        case MPOL_LOCAL:
                                numa_set_localalloc();
@@ -1358,6 +1384,9 @@ static void *thread_main(void *data)
                                break;
                        }
 
+                       if (mask)
+                               numa_free_nodemask(mask);
+
                }
        }
 #endif
@@ -1415,6 +1444,9 @@ static void *thread_main(void *data)
                        goto err;
        }
 
+       if (td->flags & TD_F_COMPRESS_LOG)
+               tp_init(&td->tp_data);
+
        fio_verify_init(td);
 
        fio_gettime(&td->epoch, NULL);
@@ -1496,6 +1528,9 @@ static void *thread_main(void *data)
 
        fio_writeout_logs(td);
 
+       if (td->flags & TD_F_COMPRESS_LOG)
+               tp_exit(&td->tp_data);
+
        if (o->exec_postrun)
                exec_string(o, o->exec_postrun, (const char *)"postrun");
 
@@ -1547,7 +1582,7 @@ static int fork_main(int shmid, int offset)
        struct thread_data *td;
        void *data, *ret;
 
-#ifndef __hpux
+#if !defined(__hpux) && !defined(CONFIG_NO_SHM)
        data = shmat(shmid, NULL, 0);
        if (data == (void *) -1) {
                int __err = errno;
@@ -1992,9 +2027,13 @@ int fio_backend(void)
                return 0;
 
        if (write_bw_log) {
-               setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW);
-               setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW);
-               setup_log(&agg_io_log[DDIR_TRIM], 0, IO_LOG_TYPE_BW);
+               struct log_params p = {
+                       .log_type = IO_LOG_TYPE_BW,
+               };
+
+               setup_log(&agg_io_log[DDIR_READ], &p, "agg-read_bw.log");
+               setup_log(&agg_io_log[DDIR_WRITE], &p, "agg-write_bw.log");
+               setup_log(&agg_io_log[DDIR_TRIM], &p, "agg-trim_bw.log");
        }
 
        startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
@@ -2013,11 +2052,14 @@ int fio_backend(void)
        if (!fio_abort) {
                show_run_stats();
                if (write_bw_log) {
-                       __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log");
-                       __finish_log(agg_io_log[DDIR_WRITE],
-                                       "agg-write_bw.log");
-                       __finish_log(agg_io_log[DDIR_TRIM],
-                                       "agg-write_bw.log");
+                       int i;
+
+                       for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+                               struct io_log *log = agg_io_log[i];
+
+                               flush_log(log);
+                               free_log(log);
+                       }
                }
        }