X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=log.c;h=6c7c4d6b2680546663eeac7d179b2e11917b7997;hp=a705e5021415ad7069e169ba06906435724d1f37;hb=8de8f047bd025f12d23cfc3fc1793434c6d8ff94;hpb=bb3884d855100fa8fa6a1d2aac79e867dfd47bf9;ds=sidebyside diff --git a/log.c b/log.c index a705e502..6c7c4d6b 100644 --- a/log.c +++ b/log.c @@ -5,7 +5,7 @@ void write_iolog_put(struct thread_data *td, struct io_u *io_u) { - fprintf(td->iolog_f, "%u,%llu,%u\n", io_u->ddir, io_u->offset, io_u->buflen); + fprintf(td->iolog_f, "%u,%llu,%lu\n", io_u->ddir, io_u->offset, io_u->buflen); } int read_iolog_get(struct thread_data *td, struct io_u *io_u) @@ -29,50 +29,62 @@ int read_iolog_get(struct thread_data *td, struct io_u *io_u) void prune_io_piece_log(struct thread_data *td) { struct io_piece *ipo; + struct rb_node *n; - while (!list_empty(&td->io_hist_list)) { - ipo = list_entry(td->io_hist_list.next, struct io_piece, list); - - list_del(&ipo->list); + while ((n = rb_first(&td->io_hist_tree)) != NULL) { + ipo = rb_entry(n, struct io_piece, rb_node); + rb_erase(n, &td->io_hist_tree); free(ipo); } } /* - * log a succesful write, so we can unwind the log for verify + * log a successful write, so we can unwind the log for verify */ void log_io_piece(struct thread_data *td, struct io_u *io_u) { - struct io_piece *ipo = malloc(sizeof(struct io_piece)); - struct list_head *entry; + struct rb_node **p, *parent; + struct io_piece *ipo, *__ipo; - INIT_LIST_HEAD(&ipo->list); + ipo = malloc(sizeof(struct io_piece)); ipo->file = io_u->file; ipo->offset = io_u->offset; ipo->len = io_u->buflen; /* - * for random io where the writes extend the file, it will typically - * be laid out with the block scattered as written. it's faster to - * read them in in that order again, so don't sort + * We don't need to sort the entries, if: + * + * Sequential writes, or + * Random writes that lay out the file as it goes along + * + * For both these cases, just reading back data in the order we + * wrote it out is the fastest. */ - if (td->sequential || !td->overwrite) { + if (!td_random(td) || !td->o.overwrite) { + INIT_LIST_HEAD(&ipo->list); list_add_tail(&ipo->list, &td->io_hist_list); return; } + RB_CLEAR_NODE(&ipo->rb_node); + p = &td->io_hist_tree.rb_node; + parent = NULL; + /* - * for random io, sort the list so verify will run faster + * Sort the entry into the verification list */ - entry = &td->io_hist_list; - while ((entry = entry->prev) != &td->io_hist_list) { - struct io_piece *__ipo = list_entry(entry, struct io_piece, list); - - if (__ipo->offset < ipo->offset) - break; + while (*p) { + parent = *p; + + __ipo = rb_entry(parent, struct io_piece, rb_node); + if (ipo->offset <= __ipo->offset) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; } - list_add(&ipo->list, entry); + rb_link_node(&ipo->rb_node, parent, p); + rb_insert_color(&ipo->rb_node, &td->io_hist_tree); } void write_iolog_close(struct thread_data *td) @@ -93,7 +105,7 @@ static int init_iolog_read(struct thread_data *td) FILE *f; int rw, reads, writes; - f = fopen(td->read_iolog_file, "r"); + f = fopen(td->o.read_iolog_file, "r"); if (!f) { perror("fopen read iolog"); return 1; @@ -116,7 +128,7 @@ static int init_iolog_read(struct thread_data *td) reads++; else if (rw == DDIR_WRITE) writes++; - else { + else if (rw != DDIR_SYNC) { log_err("bad ddir: %d\n", rw); continue; } @@ -126,8 +138,8 @@ static int init_iolog_read(struct thread_data *td) ipo->offset = offset; ipo->len = bytes; ipo->ddir = (enum fio_ddir) rw; - if (bytes > td->max_bs[rw]) - td->max_bs[rw] = bytes; + if (bytes > td->o.max_bs[rw]) + td->o.max_bs[rw] = bytes; list_add_tail(&ipo->list, &td->io_log_list); } @@ -137,11 +149,11 @@ static int init_iolog_read(struct thread_data *td) if (!reads && !writes) return 1; else if (reads && !writes) - td->ddir = DDIR_READ; + td->o.td_ddir = TD_DDIR_READ; else if (!reads && writes) - td->ddir = DDIR_READ; + td->o.td_ddir = TD_DDIR_READ; else - td->iomix = 1; + td->o.td_ddir = TD_DDIR_RW; return 0; } @@ -153,7 +165,7 @@ static int init_iolog_write(struct thread_data *td) { FILE *f; - f = fopen(td->write_iolog_file, "w+"); + f = fopen(td->o.write_iolog_file, "w+"); if (!f) { perror("fopen write iolog"); return 1; @@ -172,42 +184,17 @@ int init_iolog(struct thread_data *td) { int ret = 0; - if (td->io_ops->flags & FIO_CPUIO) + if (td->io_ops->flags & FIO_DISKLESSIO) return 0; - if (td->read_iolog_file) + if (td->o.read_iolog_file) ret = init_iolog_read(td); - else if (td->write_iolog_file) + else if (td->o.write_iolog_file) ret = init_iolog_write(td); return ret; } -int setup_rate(struct thread_data *td) -{ - unsigned long long rate; - int nr_reads_per_msec; - - if (!td->rate) - return 0; - - if (td->rate < td->ratemin) { - log_err("min rate larger than nominal rate\n"); - return -1; - } - - rate = td->rate; - nr_reads_per_msec = (rate * 1024 * 1000) / td->min_bs[DDIR_READ]; - if (!nr_reads_per_msec) { - log_err("rate lower than supported\n"); - return -1; - } - - td->rate_usec_cycle = 1000000000ULL / nr_reads_per_msec; - td->rate_pending_usleep = 0; - return 0; -} - void setup_log(struct io_log **log) { struct io_log *l = malloc(sizeof(*l));