X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=log.c;h=8754eb051a1ecfb3ba38290b8d88d5a6bb547c8a;hb=4c3ecec4160909d7eba4acf1a07a8a0cd36a6365;hp=cc90f437041dcc6723f673c76b1c9c992d0077ec;hpb=21bd2987dc5d82a18af485cd1e7841e94137fc0c;p=fio.git diff --git a/log.c b/log.c index cc90f437..8754eb05 100644 --- a/log.c +++ b/log.c @@ -84,6 +84,9 @@ int read_iolog_get(struct thread_data *td, struct io_u *io_u) io_u->file = &td->files[ipo->fileno]; get_file(io_u->file); + dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset, + io_u->buflen, io_u->file->file_name); + if (ipo->delay) iolog_delay(td, ipo->delay); @@ -91,6 +94,7 @@ int read_iolog_get(struct thread_data *td, struct io_u *io_u) return 0; } + td->done = 1; return 1; } @@ -104,6 +108,12 @@ void prune_io_piece_log(struct thread_data *td) rb_erase(n, &td->io_hist_tree); free(ipo); } + + while (!list_empty(&td->io_hist_list)) { + ipo = list_entry(td->io_hist_list.next, struct io_piece, list); + list_del(&ipo->list); + free(ipo); + } } /* @@ -128,8 +138,7 @@ void log_io_piece(struct thread_data *td, struct io_u *io_u) * For both these cases, just reading back data in the order we * wrote it out is the fastest. */ - if (!td_random(td) || !td->o.overwrite || - (io_u->file->flags & FIO_FILE_NOSORT)) { + if (!td_random(td) || !td->o.overwrite) { INIT_LIST_HEAD(&ipo->list); list_add_tail(&ipo->list, &td->io_hist_list); return; @@ -232,9 +241,14 @@ static int read_iolog2(struct thread_data *td, FILE *f) if (rw == DDIR_READ) reads++; - else if (rw == DDIR_WRITE) + else if (rw == DDIR_WRITE) { writes++; - else if (rw != DDIR_SYNC && rw != DDIR_INVAL) { + /* + * Don't add a write for ro mode + */ + if (read_only) + continue; + } else if (rw != DDIR_SYNC && rw != DDIR_INVAL) { log_err("bad ddir: %d\n", rw); continue; } @@ -255,12 +269,18 @@ static int read_iolog2(struct thread_data *td, FILE *f) ipo->file_action = file_action; } list_add_tail(&ipo->list, &td->io_log_list); + td->total_io_size += bytes; } free(str); free(act); free(fname); + if (writes && read_only) { + log_err("fio: <%s> skips replay of %d writes due to read-only\n", td->o.name, writes); + writes = 0; + } + if (!reads && !writes) return 1; else if (reads && !writes) @@ -282,7 +302,7 @@ static int read_iolog(struct thread_data *td, FILE *f) unsigned int bytes; char *str, *p; int reads, writes; - enum fio_ddir rw; + int rw; /* * Read in the read iolog and store it, reuse the infrastructure @@ -299,9 +319,14 @@ static int read_iolog(struct thread_data *td, FILE *f) } if (rw == DDIR_READ) reads++; - else if (rw == DDIR_WRITE) + else if (rw == DDIR_WRITE) { writes++; - else if (rw != DDIR_SYNC) { + /* + * Don't add a write for ro mode + */ + if (read_only) + continue; + } else if (rw != DDIR_SYNC) { log_err("bad ddir: %d\n", rw); continue; } @@ -311,14 +336,20 @@ static int read_iolog(struct thread_data *td, FILE *f) INIT_LIST_HEAD(&ipo->list); ipo->offset = offset; ipo->len = bytes; - ipo->ddir = rw; + ipo->ddir = (enum fio_ddir) rw; if (bytes > td->o.max_bs[rw]) td->o.max_bs[rw] = bytes; list_add_tail(&ipo->list, &td->io_log_list); + td->total_io_size += bytes; } free(str); + if (writes && read_only) { + log_err("fio: <%s> skips replay of %d writes due to read-only\n", td->o.name, writes); + writes = 0; + } + if (!reads && !writes) return 1; else if (reads && !writes)