X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=log.c;h=5ed30eb6f103efb8ed6150d04a807a9dc018ef63;hp=dbca3ccb7f023f37df7d4a9a52aa291cfd45a7a7;hb=6b3eccb158d1f4cebc2924fc6abf84c1bf59d002;hpb=2dc1bbeb58edc85f2829eed6729862c438ea2353 diff --git a/log.c b/log.c index dbca3ccb..5ed30eb6 100644 --- a/log.c +++ b/log.c @@ -8,6 +8,24 @@ void write_iolog_put(struct thread_data *td, struct io_u *io_u) fprintf(td->iolog_f, "%u,%llu,%lu\n", io_u->ddir, io_u->offset, io_u->buflen); } +static void iolog_delay(struct thread_data *td, unsigned long delay) +{ + unsigned long usec = utime_since_now(&td->last_issue); + + if (delay < usec) + return; + + delay -= usec; + + /* + * less than 100 usec delay, just regard it as noise + */ + if (delay < 100) + return; + + usec_sleep(td, delay); +} + int read_iolog_get(struct thread_data *td, struct io_u *io_u) { struct io_piece *ipo; @@ -19,6 +37,15 @@ int read_iolog_get(struct thread_data *td, struct io_u *io_u) io_u->buflen = ipo->len; io_u->ddir = ipo->ddir; io_u->file = ipo->file; + + if (ipo->delay) + iolog_delay(td, ipo->delay); + + /* + * work around, this needs a format change to work for > 1 file + */ + if (!io_u->file) + io_u->file = &td->files[0]; free(ipo); return 0; } @@ -29,11 +56,11 @@ int read_iolog_get(struct thread_data *td, struct io_u *io_u) void prune_io_piece_log(struct thread_data *td) { struct io_piece *ipo; + struct rb_node *n; - while (!list_empty(&td->io_hist_list)) { - ipo = list_entry(td->io_hist_list.next, struct io_piece, list); - - list_del(&ipo->list); + while ((n = rb_first(&td->io_hist_tree)) != NULL) { + ipo = rb_entry(n, struct io_piece, rb_node); + rb_erase(n, &td->io_hist_tree); free(ipo); } } @@ -43,36 +70,49 @@ void prune_io_piece_log(struct thread_data *td) */ void log_io_piece(struct thread_data *td, struct io_u *io_u) { - struct io_piece *ipo = malloc(sizeof(struct io_piece)); - struct list_head *entry; + struct rb_node **p, *parent; + struct io_piece *ipo, *__ipo; - INIT_LIST_HEAD(&ipo->list); + ipo = malloc(sizeof(struct io_piece)); ipo->file = io_u->file; ipo->offset = io_u->offset; ipo->len = io_u->buflen; /* - * for random io where the writes extend the file, it will typically - * be laid out with the block scattered as written. it's faster to - * read them in in that order again, so don't sort + * We don't need to sort the entries, if: + * + * Sequential writes, or + * Random writes that lay out the file as it goes along + * + * For both these cases, just reading back data in the order we + * wrote it out is the fastest. */ - if (!td_random(td) || !td->o.overwrite) { + if (!td_random(td) || !td->o.overwrite || + (io_u->file->flags & FIO_FILE_NOSORT)) { + INIT_LIST_HEAD(&ipo->list); list_add_tail(&ipo->list, &td->io_hist_list); return; } + RB_CLEAR_NODE(&ipo->rb_node); + p = &td->io_hist_tree.rb_node; + parent = NULL; + /* - * for random io, sort the list so verify will run faster + * Sort the entry into the verification list */ - entry = &td->io_hist_list; - while ((entry = entry->prev) != &td->io_hist_list) { - struct io_piece *__ipo = list_entry(entry, struct io_piece, list); - - if (__ipo->offset < ipo->offset) - break; + while (*p) { + parent = *p; + + __ipo = rb_entry(parent, struct io_piece, rb_node); + if (ipo->offset <= __ipo->offset) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; } - list_add(&ipo->list, entry); + rb_link_node(&ipo->rb_node, parent, p); + rb_insert_color(&ipo->rb_node, &td->io_hist_tree); } void write_iolog_close(struct thread_data *td) @@ -116,12 +156,13 @@ static int init_iolog_read(struct thread_data *td) reads++; else if (rw == DDIR_WRITE) writes++; - else { + else if (rw != DDIR_SYNC) { log_err("bad ddir: %d\n", rw); continue; } ipo = malloc(sizeof(*ipo)); + memset(ipo, 0, sizeof(*ipo)); INIT_LIST_HEAD(&ipo->list); ipo->offset = offset; ipo->len = bytes; @@ -139,7 +180,7 @@ static int init_iolog_read(struct thread_data *td) else if (reads && !writes) td->o.td_ddir = TD_DDIR_READ; else if (!reads && writes) - td->o.td_ddir = TD_DDIR_READ; + td->o.td_ddir = TD_DDIR_WRITE; else td->o.td_ddir = TD_DDIR_RW; @@ -153,6 +194,11 @@ static int init_iolog_write(struct thread_data *td) { FILE *f; + if (td->o.nr_files > 1) { + log_err("fio: write_iolog only works with 1 file currently\n"); + return 1; + } + f = fopen(td->o.write_iolog_file, "w+"); if (!f) { perror("fopen write iolog"); @@ -175,9 +221,16 @@ int init_iolog(struct thread_data *td) if (td->io_ops->flags & FIO_DISKLESSIO) return 0; - if (td->o.read_iolog_file) - ret = init_iolog_read(td); - else if (td->o.write_iolog_file) + if (td->o.read_iolog_file) { + /* + * Check if it's a blktrace file and load that if possible. + * Otherwise assume it's a normal log file and load that. + */ + if (is_blktrace(td->o.read_iolog_file)) + ret = load_blktrace(td, td->o.read_iolog_file); + else + ret = init_iolog_read(td); + } else if (td->o.write_iolog_file) ret = init_iolog_write(td); return ret;