2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
14 static const char iolog_ver2[] = "fio version 2 iolog";
16 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
18 flist_add_tail(&ipo->list, &td->io_log_list);
19 td->total_io_size += ipo->len;
22 void log_io_u(struct thread_data *td, struct io_u *io_u)
24 const char *act[] = { "read", "write", "sync", "datasync",
25 "sync_file_range", "wait", "trim" };
27 assert(io_u->ddir <= 6);
29 if (!td->o.write_iolog_file)
32 fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
33 act[io_u->ddir], io_u->offset,
37 void log_file(struct thread_data *td, struct fio_file *f,
38 enum file_log_act what)
40 const char *act[] = { "add", "open", "close" };
44 if (!td->o.write_iolog_file)
49 * this happens on the pre-open/close done before the job starts
54 fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
57 static void iolog_delay(struct thread_data *td, unsigned long delay)
59 unsigned long usec = utime_since_now(&td->last_issue);
67 * less than 100 usec delay, just regard it as noise
72 usec_sleep(td, delay);
75 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
83 if (ipo->ddir != DDIR_INVAL)
86 f = td->files[ipo->fileno];
88 switch (ipo->file_action) {
89 case FIO_LOG_OPEN_FILE:
90 ret = td_io_open_file(td, f);
93 td_verror(td, ret, "iolog open file");
95 case FIO_LOG_CLOSE_FILE:
96 td_io_close_file(td, f);
98 case FIO_LOG_UNLINK_FILE:
102 log_err("fio: bad file action %d\n", ipo->file_action);
109 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
111 struct io_piece *ipo;
112 unsigned long elapsed;
114 while (!flist_empty(&td->io_log_list)) {
117 ipo = flist_entry(td->io_log_list.next, struct io_piece, list);
118 flist_del(&ipo->list);
119 remove_trim_entry(td, ipo);
121 ret = ipo_special(td, ipo);
125 } else if (ret > 0) {
130 io_u->ddir = ipo->ddir;
131 if (ipo->ddir != DDIR_WAIT) {
132 io_u->offset = ipo->offset;
133 io_u->buflen = ipo->len;
134 io_u->file = td->files[ipo->fileno];
135 get_file(io_u->file);
136 dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
137 io_u->buflen, io_u->file->file_name);
139 iolog_delay(td, ipo->delay);
141 elapsed = mtime_since_genesis();
142 if (ipo->delay > elapsed)
143 usec_sleep(td, (ipo->delay - elapsed) * 1000);
148 if (io_u->ddir != DDIR_WAIT)
156 void prune_io_piece_log(struct thread_data *td)
158 struct io_piece *ipo;
161 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
162 ipo = rb_entry(n, struct io_piece, rb_node);
163 rb_erase(n, &td->io_hist_tree);
164 remove_trim_entry(td, ipo);
169 while (!flist_empty(&td->io_hist_list)) {
170 ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
171 flist_del(&ipo->list);
172 remove_trim_entry(td, ipo);
179 * log a successful write, so we can unwind the log for verify
181 void log_io_piece(struct thread_data *td, struct io_u *io_u)
183 struct rb_node **p, *parent;
184 struct io_piece *ipo, *__ipo;
186 ipo = malloc(sizeof(struct io_piece));
188 ipo->file = io_u->file;
189 ipo->offset = io_u->offset;
190 ipo->len = io_u->buflen;
192 if (io_u_should_trim(td, io_u)) {
193 flist_add_tail(&ipo->trim_list, &td->trim_list);
198 * We don't need to sort the entries, if:
200 * Sequential writes, or
201 * Random writes that lay out the file as it goes along
203 * For both these cases, just reading back data in the order we
204 * wrote it out is the fastest.
206 * One exception is if we don't have a random map AND we are doing
207 * verifies, in that case we need to check for duplicate blocks and
208 * drop the old one, which we rely on the rb insert/lookup for
211 if ((!td_random(td) || !td->o.overwrite) &&
212 (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
213 INIT_FLIST_HEAD(&ipo->list);
214 flist_add_tail(&ipo->list, &td->io_hist_list);
215 ipo->flags |= IP_F_ONLIST;
220 RB_CLEAR_NODE(&ipo->rb_node);
223 * Sort the entry into the verification list
226 p = &td->io_hist_tree.rb_node;
231 __ipo = rb_entry(parent, struct io_piece, rb_node);
232 if (ipo->file < __ipo->file)
234 else if (ipo->file > __ipo->file)
236 else if (ipo->offset < __ipo->offset)
238 else if (ipo->offset > __ipo->offset)
241 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
242 __ipo->offset, __ipo->len,
243 ipo->offset, ipo->len);
245 rb_erase(parent, &td->io_hist_tree);
246 remove_trim_entry(td, __ipo);
252 rb_link_node(&ipo->rb_node, parent, p);
253 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
254 ipo->flags |= IP_F_ONRB;
258 void write_iolog_close(struct thread_data *td)
264 td->iolog_buf = NULL;
268 * Read version 2 iolog data. It is enhanced to include per-file logging,
271 static int read_iolog2(struct thread_data *td, FILE *f)
273 unsigned long long offset;
275 int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
280 free_release_files(td);
283 * Read in the read iolog and store it, reuse the infrastructure
284 * for doing verifications.
287 fname = malloc(256+16);
288 act = malloc(256+16);
290 reads = writes = waits = 0;
291 while ((p = fgets(str, 4096, f)) != NULL) {
292 struct io_piece *ipo;
295 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
301 if (!strcmp(act, "wait"))
303 else if (!strcmp(act, "read"))
305 else if (!strcmp(act, "write"))
307 else if (!strcmp(act, "sync"))
309 else if (!strcmp(act, "datasync"))
311 else if (!strcmp(act, "trim"))
314 log_err("fio: bad iolog file action: %s\n",
320 if (!strcmp(act, "add")) {
322 fileno = add_file(td, fname);
323 file_action = FIO_LOG_ADD_FILE;
325 } else if (!strcmp(act, "open")) {
326 fileno = get_fileno(td, fname);
327 file_action = FIO_LOG_OPEN_FILE;
328 } else if (!strcmp(act, "close")) {
329 fileno = get_fileno(td, fname);
330 file_action = FIO_LOG_CLOSE_FILE;
332 log_err("fio: bad iolog file action: %s\n",
337 log_err("bad iolog2: %s", p);
343 else if (rw == DDIR_WRITE) {
345 * Don't add a write for ro mode
350 } else if (rw == DDIR_WAIT) {
352 } else if (rw == DDIR_INVAL) {
353 } else if (!ddir_sync(rw)) {
354 log_err("bad ddir: %d\n", rw);
361 ipo = malloc(sizeof(*ipo));
364 if (rw == DDIR_WAIT) {
367 ipo->offset = offset;
369 if (bytes > td->o.max_bs[rw])
370 td->o.max_bs[rw] = bytes;
371 ipo->fileno = fileno;
372 ipo->file_action = file_action;
375 queue_io_piece(td, ipo);
382 if (writes && read_only) {
383 log_err("fio: <%s> skips replay of %d writes due to"
384 " read-only\n", td->o.name, writes);
388 if (!reads && !writes && !waits)
390 else if (reads && !writes)
391 td->o.td_ddir = TD_DDIR_READ;
392 else if (!reads && writes)
393 td->o.td_ddir = TD_DDIR_WRITE;
395 td->o.td_ddir = TD_DDIR_RW;
401 * open iolog, check version, and call appropriate parser
403 static int init_iolog_read(struct thread_data *td)
405 char buffer[256], *p;
409 f = fopen(td->o.read_iolog_file, "r");
411 perror("fopen read iolog");
415 p = fgets(buffer, sizeof(buffer), f);
417 td_verror(td, errno, "iolog read");
418 log_err("fio: unable to read iolog\n");
424 * version 2 of the iolog stores a specific string as the
425 * first line, check for that
427 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
428 ret = read_iolog2(td, f);
430 log_err("fio: iolog version 1 is no longer supported\n");
439 * Set up a log for storing io patterns.
441 static int init_iolog_write(struct thread_data *td)
447 f = fopen(td->o.write_iolog_file, "a");
449 perror("fopen write iolog");
454 * That's it for writing, setup a log buffer and we're done.
457 td->iolog_buf = malloc(8192);
458 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
461 * write our version line
463 if (fprintf(f, "%s\n", iolog_ver2) < 0) {
464 perror("iolog init\n");
469 * add all known files
471 for_each_file(td, ff, i)
472 log_file(td, ff, FIO_LOG_ADD_FILE);
477 int init_iolog(struct thread_data *td)
481 if (td->o.read_iolog_file) {
483 * Check if it's a blktrace file and load that if possible.
484 * Otherwise assume it's a normal log file and load that.
486 if (is_blktrace(td->o.read_iolog_file))
487 ret = load_blktrace(td, td->o.read_iolog_file);
489 ret = init_iolog_read(td);
490 } else if (td->o.write_iolog_file)
491 ret = init_iolog_write(td);
496 void setup_log(struct io_log **log, unsigned long avg_msec, int log_type)
498 struct io_log *l = malloc(sizeof(*l));
500 memset(l, 0, sizeof(*l));
502 l->max_samples = 1024;
503 l->log_type = log_type;
504 l->log = malloc(l->max_samples * sizeof(struct io_sample));
505 l->avg_msec = avg_msec;
509 void __finish_log(struct io_log *log, const char *name)
514 f = fopen(name, "a");
520 for (i = 0; i < log->nr_samples; i++) {
521 fprintf(f, "%lu, %lu, %u, %u\n",
522 (unsigned long) log->log[i].time,
523 (unsigned long) log->log[i].val,
524 log->log[i].ddir, log->log[i].bs);
532 void finish_log_named(struct thread_data *td, struct io_log *log,
533 const char *prefix, const char *postfix)
535 char file_name[256], *p;
537 snprintf(file_name, 200, "%s_%s.log", prefix, postfix);
538 p = basename(file_name);
540 if (td->client_type == FIO_CLIENT_TYPE_GUI) {
541 fio_send_iolog(td, log, p);
545 __finish_log(log, p);
548 void finish_log(struct thread_data *td, struct io_log *log, const char *name)
550 finish_log_named(td, log, td->o.name, name);