Avoid using the rbtree if we don't have to
[fio.git] / log.c
diff --git a/log.c b/log.c
index 994f4971ae0967202e49c69b4ccc9002cca0622c..6c7c4d6b2680546663eeac7d179b2e11917b7997 100644 (file)
--- a/log.c
+++ b/log.c
@@ -29,11 +29,11 @@ int read_iolog_get(struct thread_data *td, struct io_u *io_u)
 void prune_io_piece_log(struct thread_data *td)
 {
        struct io_piece *ipo;
+       struct rb_node *n;
 
-       while (!list_empty(&td->io_hist_list)) {
-               ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
-
-               list_del(&ipo->list);
+       while ((n = rb_first(&td->io_hist_tree)) != NULL) {
+               ipo = rb_entry(n, struct io_piece, rb_node);
+               rb_erase(n, &td->io_hist_tree);
                free(ipo);
        }
 }
@@ -43,36 +43,48 @@ void prune_io_piece_log(struct thread_data *td)
  */
 void log_io_piece(struct thread_data *td, struct io_u *io_u)
 {
-       struct io_piece *ipo = malloc(sizeof(struct io_piece));
-       struct list_head *entry;
+       struct rb_node **p, *parent;
+       struct io_piece *ipo, *__ipo;
 
-       INIT_LIST_HEAD(&ipo->list);
+       ipo = malloc(sizeof(struct io_piece));
        ipo->file = io_u->file;
        ipo->offset = io_u->offset;
        ipo->len = io_u->buflen;
 
        /*
-        * for random io where the writes extend the file, it will typically
-        * be laid out with the block scattered as written. it's faster to
-        * read them in in that order again, so don't sort
+        * We don't need to sort the entries, if:
+        *
+        *      Sequential writes, or
+        *      Random writes that lay out the file as it goes along
+        *
+        * For both these cases, just reading back data in the order we
+        * wrote it out is the fastest.
         */
-       if (!td_random(td) || !td->overwrite) {
+       if (!td_random(td) || !td->o.overwrite) {
+               INIT_LIST_HEAD(&ipo->list);
                list_add_tail(&ipo->list, &td->io_hist_list);
                return;
        }
 
+       RB_CLEAR_NODE(&ipo->rb_node);
+       p = &td->io_hist_tree.rb_node;
+       parent = NULL;
+
        /*
-        * for random io, sort the list so verify will run faster
+        * Sort the entry into the verification list
         */
-       entry = &td->io_hist_list;
-       while ((entry = entry->prev) != &td->io_hist_list) {
-               struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
-
-               if (__ipo->offset < ipo->offset)
-                       break;
+       while (*p) {
+               parent = *p;
+
+               __ipo = rb_entry(parent, struct io_piece, rb_node);
+               if (ipo->offset <= __ipo->offset)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
        }
 
-       list_add(&ipo->list, entry);
+       rb_link_node(&ipo->rb_node, parent, p);
+       rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
 }
 
 void write_iolog_close(struct thread_data *td)
@@ -93,7 +105,7 @@ static int init_iolog_read(struct thread_data *td)
        FILE *f;
        int rw, reads, writes;
 
-       f = fopen(td->read_iolog_file, "r");
+       f = fopen(td->o.read_iolog_file, "r");
        if (!f) {
                perror("fopen read iolog");
                return 1;
@@ -116,7 +128,7 @@ static int init_iolog_read(struct thread_data *td)
                        reads++;
                else if (rw == DDIR_WRITE)
                        writes++;
-               else {
+               else if (rw != DDIR_SYNC) {
                        log_err("bad ddir: %d\n", rw);
                        continue;
                }
@@ -126,8 +138,8 @@ static int init_iolog_read(struct thread_data *td)
                ipo->offset = offset;
                ipo->len = bytes;
                ipo->ddir = (enum fio_ddir) rw;
-               if (bytes > td->max_bs[rw])
-                       td->max_bs[rw] = bytes;
+               if (bytes > td->o.max_bs[rw])
+                       td->o.max_bs[rw] = bytes;
                list_add_tail(&ipo->list, &td->io_log_list);
        }
 
@@ -137,11 +149,11 @@ static int init_iolog_read(struct thread_data *td)
        if (!reads && !writes)
                return 1;
        else if (reads && !writes)
-               td->td_ddir = TD_DDIR_READ;
+               td->o.td_ddir = TD_DDIR_READ;
        else if (!reads && writes)
-               td->td_ddir = TD_DDIR_READ;
+               td->o.td_ddir = TD_DDIR_READ;
        else
-               td->td_ddir = TD_DDIR_RW;
+               td->o.td_ddir = TD_DDIR_RW;
 
        return 0;
 }
@@ -153,7 +165,7 @@ static int init_iolog_write(struct thread_data *td)
 {
        FILE *f;
 
-       f = fopen(td->write_iolog_file, "w+");
+       f = fopen(td->o.write_iolog_file, "w+");
        if (!f) {
                perror("fopen write iolog");
                return 1;
@@ -172,50 +184,17 @@ int init_iolog(struct thread_data *td)
 {
        int ret = 0;
 
-       if (td->io_ops->flags & FIO_CPUIO)
+       if (td->io_ops->flags & FIO_DISKLESSIO)
                return 0;
 
-       if (td->read_iolog_file)
+       if (td->o.read_iolog_file)
                ret = init_iolog_read(td);
-       else if (td->write_iolog_file)
+       else if (td->o.write_iolog_file)
                ret = init_iolog_write(td);
 
        return ret;
 }
 
-int setup_rate(struct thread_data *td)
-{
-       unsigned long long rate;
-       int nr_reads_per_msec;
-       unsigned int bs;
-
-       if (!td->rate)
-               return 0;
-
-       if (td->rate < td->ratemin) {
-               log_err("min rate larger than nominal rate\n");
-               return -1;
-       }
-
-       if (td_rw(td))
-               bs = td->rw_min_bs;
-       else if (td_read(td))
-               bs = td->min_bs[DDIR_READ];
-       else
-               bs = td->min_bs[DDIR_WRITE];
-
-       rate = td->rate;
-       nr_reads_per_msec = (rate * 1024 * 1000) / bs;
-       if (!nr_reads_per_msec) {
-               log_err("rate lower than supported\n");
-               return -1;
-       }
-
-       td->rate_usec_cycle = 1000000000ULL / nr_reads_per_msec;
-       td->rate_pending_usleep = 0;
-       return 0;
-}
-
 void setup_log(struct io_log **log)
 {
        struct io_log *l = malloc(sizeof(*l));