2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
23 static const char iolog_ver2[] = "fio version 2 iolog";
25 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
27 flist_add_tail(&ipo->list, &td->io_log_list);
28 td->total_io_size += ipo->len;
31 void log_io_u(const struct thread_data *td, const struct io_u *io_u)
33 if (!td->o.write_iolog_file)
36 fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
37 io_ddir_name(io_u->ddir),
38 io_u->offset, io_u->buflen);
41 void log_file(struct thread_data *td, struct fio_file *f,
42 enum file_log_act what)
44 const char *act[] = { "add", "open", "close" };
48 if (!td->o.write_iolog_file)
53 * this happens on the pre-open/close done before the job starts
58 fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
61 static void iolog_delay(struct thread_data *td, unsigned long delay)
63 uint64_t usec = utime_since_now(&td->last_issue);
67 if (delay < td->time_offset) {
72 delay -= td->time_offset;
78 fio_gettime(&tv, NULL);
79 while (delay && !td->terminate) {
81 if (this_delay > 500000)
84 usec_sleep(td, this_delay);
88 usec = utime_since_now(&tv);
90 td->time_offset = usec - delay;
95 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
103 if (ipo->ddir != DDIR_INVAL)
106 f = td->files[ipo->fileno];
108 switch (ipo->file_action) {
109 case FIO_LOG_OPEN_FILE:
110 ret = td_io_open_file(td, f);
113 td_verror(td, ret, "iolog open file");
115 case FIO_LOG_CLOSE_FILE:
116 td_io_close_file(td, f);
118 case FIO_LOG_UNLINK_FILE:
119 td_io_unlink_file(td, f);
122 log_err("fio: bad file action %d\n", ipo->file_action);
129 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
131 struct io_piece *ipo;
132 unsigned long elapsed;
134 while (!flist_empty(&td->io_log_list)) {
137 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
138 flist_del(&ipo->list);
139 remove_trim_entry(td, ipo);
141 ret = ipo_special(td, ipo);
145 } else if (ret > 0) {
150 io_u->ddir = ipo->ddir;
151 if (ipo->ddir != DDIR_WAIT) {
152 io_u->offset = ipo->offset;
153 io_u->buflen = ipo->len;
154 io_u->file = td->files[ipo->fileno];
155 get_file(io_u->file);
156 dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
157 io_u->buflen, io_u->file->file_name);
159 iolog_delay(td, ipo->delay);
161 elapsed = mtime_since_genesis();
162 if (ipo->delay > elapsed)
163 usec_sleep(td, (ipo->delay - elapsed) * 1000);
168 if (io_u->ddir != DDIR_WAIT)
176 void prune_io_piece_log(struct thread_data *td)
178 struct io_piece *ipo;
181 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
182 ipo = rb_entry(n, struct io_piece, rb_node);
183 rb_erase(n, &td->io_hist_tree);
184 remove_trim_entry(td, ipo);
189 while (!flist_empty(&td->io_hist_list)) {
190 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
191 flist_del(&ipo->list);
192 remove_trim_entry(td, ipo);
199 * log a successful write, so we can unwind the log for verify
201 void log_io_piece(struct thread_data *td, struct io_u *io_u)
203 struct rb_node **p, *parent;
204 struct io_piece *ipo, *__ipo;
206 ipo = malloc(sizeof(struct io_piece));
208 ipo->file = io_u->file;
209 ipo->offset = io_u->offset;
210 ipo->len = io_u->buflen;
211 ipo->numberio = io_u->numberio;
212 ipo->flags = IP_F_IN_FLIGHT;
216 if (io_u_should_trim(td, io_u)) {
217 flist_add_tail(&ipo->trim_list, &td->trim_list);
222 * We don't need to sort the entries, if:
224 * Sequential writes, or
225 * Random writes that lay out the file as it goes along
227 * For both these cases, just reading back data in the order we
228 * wrote it out is the fastest.
230 * One exception is if we don't have a random map AND we are doing
231 * verifies, in that case we need to check for duplicate blocks and
232 * drop the old one, which we rely on the rb insert/lookup for
235 if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
236 (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
237 INIT_FLIST_HEAD(&ipo->list);
238 flist_add_tail(&ipo->list, &td->io_hist_list);
239 ipo->flags |= IP_F_ONLIST;
244 RB_CLEAR_NODE(&ipo->rb_node);
247 * Sort the entry into the verification list
250 p = &td->io_hist_tree.rb_node;
256 __ipo = rb_entry(parent, struct io_piece, rb_node);
257 if (ipo->file < __ipo->file)
259 else if (ipo->file > __ipo->file)
261 else if (ipo->offset < __ipo->offset) {
263 overlap = ipo->offset + ipo->len > __ipo->offset;
265 else if (ipo->offset > __ipo->offset) {
267 overlap = __ipo->offset + __ipo->len > ipo->offset;
273 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
274 __ipo->offset, __ipo->len,
275 ipo->offset, ipo->len);
277 rb_erase(parent, &td->io_hist_tree);
278 remove_trim_entry(td, __ipo);
284 rb_link_node(&ipo->rb_node, parent, p);
285 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
286 ipo->flags |= IP_F_ONRB;
290 void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
292 struct io_piece *ipo = io_u->ipo;
294 if (td->ts.nr_block_infos) {
295 uint32_t *info = io_u_block_info(td, io_u);
296 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
297 if (io_u->ddir == DDIR_TRIM)
298 *info = BLOCK_INFO_SET_STATE(*info,
299 BLOCK_STATE_TRIM_FAILURE);
300 else if (io_u->ddir == DDIR_WRITE)
301 *info = BLOCK_INFO_SET_STATE(*info,
302 BLOCK_STATE_WRITE_FAILURE);
309 if (ipo->flags & IP_F_ONRB)
310 rb_erase(&ipo->rb_node, &td->io_hist_tree);
311 else if (ipo->flags & IP_F_ONLIST)
312 flist_del(&ipo->list);
319 void trim_io_piece(struct thread_data *td, const struct io_u *io_u)
321 struct io_piece *ipo = io_u->ipo;
326 ipo->len = io_u->xfer_buflen - io_u->resid;
329 void write_iolog_close(struct thread_data *td)
335 td->iolog_buf = NULL;
339 * Read version 2 iolog data. It is enhanced to include per-file logging,
342 static int read_iolog2(struct thread_data *td, FILE *f)
344 unsigned long long offset;
346 int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
351 free_release_files(td);
354 * Read in the read iolog and store it, reuse the infrastructure
355 * for doing verifications.
358 fname = malloc(256+16);
359 act = malloc(256+16);
361 reads = writes = waits = 0;
362 while ((p = fgets(str, 4096, f)) != NULL) {
363 struct io_piece *ipo;
366 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
372 if (!strcmp(act, "wait"))
374 else if (!strcmp(act, "read"))
376 else if (!strcmp(act, "write"))
378 else if (!strcmp(act, "sync"))
380 else if (!strcmp(act, "datasync"))
382 else if (!strcmp(act, "trim"))
385 log_err("fio: bad iolog file action: %s\n",
389 fileno = get_fileno(td, fname);
392 if (!strcmp(act, "add")) {
393 fileno = add_file(td, fname, 0, 1);
394 file_action = FIO_LOG_ADD_FILE;
396 } else if (!strcmp(act, "open")) {
397 fileno = get_fileno(td, fname);
398 file_action = FIO_LOG_OPEN_FILE;
399 } else if (!strcmp(act, "close")) {
400 fileno = get_fileno(td, fname);
401 file_action = FIO_LOG_CLOSE_FILE;
403 log_err("fio: bad iolog file action: %s\n",
408 log_err("bad iolog2: %s", p);
414 else if (rw == DDIR_WRITE) {
416 * Don't add a write for ro mode
421 } else if (rw == DDIR_WAIT) {
423 } else if (rw == DDIR_INVAL) {
424 } else if (!ddir_sync(rw)) {
425 log_err("bad ddir: %d\n", rw);
432 ipo = malloc(sizeof(*ipo));
435 if (rw == DDIR_WAIT) {
438 ipo->offset = offset;
440 if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
441 td->o.max_bs[rw] = bytes;
442 ipo->fileno = fileno;
443 ipo->file_action = file_action;
447 queue_io_piece(td, ipo);
454 if (writes && read_only) {
455 log_err("fio: <%s> skips replay of %d writes due to"
456 " read-only\n", td->o.name, writes);
460 if (!reads && !writes && !waits)
462 else if (reads && !writes)
463 td->o.td_ddir = TD_DDIR_READ;
464 else if (!reads && writes)
465 td->o.td_ddir = TD_DDIR_WRITE;
467 td->o.td_ddir = TD_DDIR_RW;
473 * open iolog, check version, and call appropriate parser
475 static int init_iolog_read(struct thread_data *td)
477 char buffer[256], *p;
481 f = fopen(td->o.read_iolog_file, "r");
483 perror("fopen read iolog");
487 p = fgets(buffer, sizeof(buffer), f);
489 td_verror(td, errno, "iolog read");
490 log_err("fio: unable to read iolog\n");
496 * version 2 of the iolog stores a specific string as the
497 * first line, check for that
499 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
500 ret = read_iolog2(td, f);
502 log_err("fio: iolog version 1 is no longer supported\n");
511 * Set up a log for storing io patterns.
513 static int init_iolog_write(struct thread_data *td)
519 f = fopen(td->o.write_iolog_file, "a");
521 perror("fopen write iolog");
526 * That's it for writing, setup a log buffer and we're done.
529 td->iolog_buf = malloc(8192);
530 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
533 * write our version line
535 if (fprintf(f, "%s\n", iolog_ver2) < 0) {
536 perror("iolog init\n");
541 * add all known files
543 for_each_file(td, ff, i)
544 log_file(td, ff, FIO_LOG_ADD_FILE);
549 int init_iolog(struct thread_data *td)
553 if (td->o.read_iolog_file) {
557 * Check if it's a blktrace file and load that if possible.
558 * Otherwise assume it's a normal log file and load that.
560 if (is_blktrace(td->o.read_iolog_file, &need_swap))
561 ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
563 ret = init_iolog_read(td);
564 } else if (td->o.write_iolog_file)
565 ret = init_iolog_write(td);
568 td_verror(td, EINVAL, "failed initializing iolog");
573 void setup_log(struct io_log **log, struct log_params *p,
574 const char *filename)
578 l = smalloc(sizeof(*l));
580 l->log_type = p->log_type;
581 l->log_offset = p->log_offset;
582 l->log_gz = p->log_gz;
583 l->log_gz_store = p->log_gz_store;
584 l->avg_msec = p->avg_msec;
585 l->filename = strdup(filename);
589 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
591 INIT_FLIST_HEAD(&l->chunk_list);
593 if (l->log_gz && !p->td)
595 else if (l->log_gz || l->log_gz_store) {
596 pthread_mutex_init(&l->chunk_lock, NULL);
597 p->td->flags |= TD_F_COMPRESS_LOG;
603 #ifdef CONFIG_SETVBUF
604 static void *set_file_buffer(FILE *f)
606 size_t size = 1048576;
610 setvbuf(f, buf, _IOFBF, size);
614 static void clear_file_buffer(void *buf)
619 static void *set_file_buffer(FILE *f)
624 static void clear_file_buffer(void *buf)
629 void free_log(struct io_log *log)
636 void flush_samples(FILE *f, void *samples, uint64_t sample_size)
640 uint64_t i, nr_samples;
645 s = __get_sample(samples, 0, 0);
646 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
648 nr_samples = sample_size / __log_entry_sz(log_offset);
650 for (i = 0; i < nr_samples; i++) {
651 s = __get_sample(samples, log_offset, i);
654 fprintf(f, "%lu, %lu, %u, %u\n",
655 (unsigned long) s->time,
656 (unsigned long) s->val,
657 io_sample_ddir(s), s->bs);
659 struct io_sample_offset *so = (void *) s;
661 fprintf(f, "%lu, %lu, %u, %u, %llu\n",
662 (unsigned long) s->time,
663 (unsigned long) s->val,
664 io_sample_ddir(s), s->bs,
665 (unsigned long long) so->offset);
672 struct iolog_flush_data {
673 struct workqueue_work work;
679 #define GZ_CHUNK 131072
681 static struct iolog_compress *get_new_chunk(unsigned int seq)
683 struct iolog_compress *c;
685 c = malloc(sizeof(*c));
686 INIT_FLIST_HEAD(&c->list);
687 c->buf = malloc(GZ_CHUNK);
693 static void free_chunk(struct iolog_compress *ic)
699 static int z_stream_init(z_stream *stream, int gz_hdr)
703 stream->zalloc = Z_NULL;
704 stream->zfree = Z_NULL;
705 stream->opaque = Z_NULL;
706 stream->next_in = Z_NULL;
709 * zlib magic - add 32 for auto-detection of gz header or not,
710 * if we decide to store files in a gzip friendly format.
715 if (inflateInit2(stream, wbits) != Z_OK)
721 struct inflate_chunk_iter {
730 static void finish_chunk(z_stream *stream, FILE *f,
731 struct inflate_chunk_iter *iter)
735 ret = inflateEnd(stream);
737 log_err("fio: failed to end log inflation (%d)\n", ret);
739 flush_samples(f, iter->buf, iter->buf_used);
742 iter->buf_size = iter->buf_used = 0;
746 * Iterative chunk inflation. Handles cases where we cross into a new
747 * sequence, doing flush finish of previous chunk if needed.
749 static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
750 z_stream *stream, struct inflate_chunk_iter *iter)
754 dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u",
755 (unsigned long) ic->len, ic->seq);
757 if (ic->seq != iter->seq) {
759 finish_chunk(stream, f, iter);
761 z_stream_init(stream, gz_hdr);
765 stream->avail_in = ic->len;
766 stream->next_in = ic->buf;
768 if (!iter->buf_size) {
769 iter->buf_size = iter->chunk_sz;
770 iter->buf = malloc(iter->buf_size);
773 while (stream->avail_in) {
774 size_t this_out = iter->buf_size - iter->buf_used;
777 stream->avail_out = this_out;
778 stream->next_out = iter->buf + iter->buf_used;
780 err = inflate(stream, Z_NO_FLUSH);
782 log_err("fio: failed inflating log: %d\n", err);
787 iter->buf_used += this_out - stream->avail_out;
789 if (!stream->avail_out) {
790 iter->buf_size += iter->chunk_sz;
791 iter->buf = realloc(iter->buf, iter->buf_size);
795 if (err == Z_STREAM_END)
799 ret = (void *) stream->next_in - ic->buf;
801 dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) ret);
807 * Inflate stored compressed chunks, or write them directly to the log
808 * file if so instructed.
810 static int inflate_gz_chunks(struct io_log *log, FILE *f)
812 struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
815 while (!flist_empty(&log->chunk_list)) {
816 struct iolog_compress *ic;
818 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
819 flist_del(&ic->list);
821 if (log->log_gz_store) {
824 dprint(FD_COMPRESS, "log write chunk size=%lu, "
825 "seq=%u\n", (unsigned long) ic->len, ic->seq);
827 ret = fwrite(ic->buf, ic->len, 1, f);
828 if (ret != 1 || ferror(f)) {
830 log_err("fio: error writing compressed log\n");
833 inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
839 finish_chunk(&stream, f, &iter);
847 * Open compressed log file and decompress the stored chunks and
848 * write them to stdout. The chunks are stored sequentially in the
849 * file, so we iterate over them and do them one-by-one.
851 int iolog_file_inflate(const char *file)
853 struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
854 struct iolog_compress ic;
862 f = fopen(file, "r");
868 if (stat(file, &sb) < 0) {
874 ic.buf = buf = malloc(sb.st_size);
878 ret = fread(ic.buf, ic.len, 1, f);
884 } else if (ret != 1) {
885 log_err("fio: short read on reading log\n");
894 * Each chunk will return Z_STREAM_END. We don't know how many
895 * chunks are in the file, so we just keep looping and incrementing
896 * the sequence number until we have consumed the whole compressed
903 iret = inflate_chunk(&ic, 1, stdout, &stream, &iter);
916 finish_chunk(&stream, stdout, &iter);
926 static int inflate_gz_chunks(struct io_log *log, FILE *f)
931 int iolog_file_inflate(const char *file)
933 log_err("fio: log inflation not possible without zlib\n");
939 void flush_log(struct io_log *log, int do_append)
945 f = fopen(log->filename, "w");
947 f = fopen(log->filename, "a");
953 buf = set_file_buffer(f);
955 inflate_gz_chunks(log, f);
957 flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
960 clear_file_buffer(buf);
963 static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
965 if (td->flags & TD_F_COMPRESS_LOG)
969 if (fio_trylock_file(log->filename))
972 fio_lock_file(log->filename);
974 if (td->client_type == FIO_CLIENT_TYPE_GUI || is_backend)
975 fio_send_iolog(td, log, log->filename);
977 flush_log(log, !td->o.per_job_logs);
979 fio_unlock_file(log->filename);
984 size_t log_chunk_sizes(struct io_log *log)
986 struct flist_head *entry;
989 if (flist_empty(&log->chunk_list))
993 pthread_mutex_lock(&log->chunk_lock);
994 flist_for_each(entry, &log->chunk_list) {
995 struct iolog_compress *c;
997 c = flist_entry(entry, struct iolog_compress, list);
1000 pthread_mutex_unlock(&log->chunk_lock);
1006 static int gz_work(struct iolog_flush_data *data)
1008 struct iolog_compress *c;
1009 struct flist_head list;
1015 INIT_FLIST_HEAD(&list);
1017 stream.zalloc = Z_NULL;
1018 stream.zfree = Z_NULL;
1019 stream.opaque = Z_NULL;
1021 ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
1023 log_err("fio: failed to init gz stream\n");
1027 seq = ++data->log->chunk_seq;
1029 stream.next_in = (void *) data->samples;
1030 stream.avail_in = data->nr_samples * log_entry_sz(data->log);
1032 dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u\n",
1033 (unsigned long) stream.avail_in, seq);
1035 c = get_new_chunk(seq);
1036 stream.avail_out = GZ_CHUNK;
1037 stream.next_out = c->buf;
1038 ret = deflate(&stream, Z_NO_FLUSH);
1040 log_err("fio: deflate log (%d)\n", ret);
1045 c->len = GZ_CHUNK - stream.avail_out;
1046 flist_add_tail(&c->list, &list);
1048 } while (stream.avail_in);
1050 stream.next_out = c->buf + c->len;
1051 stream.avail_out = GZ_CHUNK - c->len;
1053 ret = deflate(&stream, Z_FINISH);
1054 if (ret == Z_STREAM_END)
1055 c->len = GZ_CHUNK - stream.avail_out;
1058 c = get_new_chunk(seq);
1059 stream.avail_out = GZ_CHUNK;
1060 stream.next_out = c->buf;
1061 ret = deflate(&stream, Z_FINISH);
1062 c->len = GZ_CHUNK - stream.avail_out;
1064 flist_add_tail(&c->list, &list);
1065 } while (ret != Z_STREAM_END);
1068 dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1070 ret = deflateEnd(&stream);
1072 log_err("fio: deflateEnd %d\n", ret);
1074 free(data->samples);
1076 if (!flist_empty(&list)) {
1077 pthread_mutex_lock(&data->log->chunk_lock);
1078 flist_splice_tail(&list, &data->log->chunk_list);
1079 pthread_mutex_unlock(&data->log->chunk_lock);
1087 while (!flist_empty(&list)) {
1088 c = flist_first_entry(list.next, struct iolog_compress, list);
1089 flist_del(&c->list);
1097 * Invoked from our compress helper thread, when logging would have exceeded
1098 * the specified memory limitation. Compresses the previously stored
1101 static int gz_work_async(struct submit_worker *sw, struct workqueue_work *work)
1103 return gz_work(container_of(work, struct iolog_flush_data, work));
1106 static int gz_init_worker(struct submit_worker *sw)
1108 struct thread_data *td = sw->wq->td;
1110 if (!fio_option_is_set(&td->o, log_gz_cpumask))
1113 if (fio_setaffinity(gettid(), td->o.log_gz_cpumask) == -1) {
1114 log_err("gz: failed to set CPU affinity\n");
1121 static struct workqueue_ops log_compress_wq_ops = {
1122 .fn = gz_work_async,
1123 .init_worker_fn = gz_init_worker,
1127 int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1129 if (!(td->flags & TD_F_COMPRESS_LOG))
1132 workqueue_init(td, &td->log_compress_wq, &log_compress_wq_ops, 1, sk_out);
1136 void iolog_compress_exit(struct thread_data *td)
1138 if (!(td->flags & TD_F_COMPRESS_LOG))
1141 workqueue_exit(&td->log_compress_wq);
1145 * Queue work item to compress the existing log entries. We reset the
1146 * current log to a small size, and reference the existing log in the
1147 * data that we queue for compression. Once compression has been done,
1148 * this old log is freed. If called with wait == 1, will not return until
1149 * the log compression has completed.
1151 int iolog_flush(struct io_log *log, int wait)
1153 struct iolog_flush_data *data;
1155 io_u_quiesce(log->td);
1157 data = malloc(sizeof(*data));
1163 data->samples = log->log;
1164 data->nr_samples = log->nr_samples;
1166 log->nr_samples = 0;
1167 log->max_samples = DEF_LOG_ENTRIES;
1168 log->log = malloc(log->max_samples * log_entry_sz(log));
1171 workqueue_enqueue(&log->td->log_compress_wq, &data->work);
1180 int iolog_flush(struct io_log *log, int wait)
1185 int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1190 void iolog_compress_exit(struct thread_data *td)
1196 static int __write_log(struct thread_data *td, struct io_log *log, int try)
1199 return finish_log(td, log, try);
1204 static int write_iops_log(struct thread_data *td, int try, bool unit_log)
1208 if (per_unit_log(td->iops_log) != unit_log)
1211 ret = __write_log(td, td->iops_log, try);
1213 td->iops_log = NULL;
1218 static int write_slat_log(struct thread_data *td, int try, bool unit_log)
1225 ret = __write_log(td, td->slat_log, try);
1227 td->slat_log = NULL;
1232 static int write_clat_log(struct thread_data *td, int try, bool unit_log)
1239 ret = __write_log(td, td->clat_log, try);
1241 td->clat_log = NULL;
1246 static int write_lat_log(struct thread_data *td, int try, bool unit_log)
1253 ret = __write_log(td, td->lat_log, try);
1260 static int write_bandw_log(struct thread_data *td, int try, bool unit_log)
1264 if (per_unit_log(td->bw_log) != unit_log)
1267 ret = __write_log(td, td->bw_log, try);
1286 int (*fn)(struct thread_data *, int, bool);
1289 static struct log_type log_types[] = {
1291 .mask = BW_LOG_MASK,
1292 .fn = write_bandw_log,
1295 .mask = LAT_LOG_MASK,
1296 .fn = write_lat_log,
1299 .mask = SLAT_LOG_MASK,
1300 .fn = write_slat_log,
1303 .mask = CLAT_LOG_MASK,
1304 .fn = write_clat_log,
1307 .mask = IOPS_LOG_MASK,
1308 .fn = write_iops_log,
1312 void td_writeout_logs(struct thread_data *td, bool unit_logs)
1314 unsigned int log_mask = 0;
1315 unsigned int log_left = ALL_LOG_NR;
1318 old_state = td_bump_runstate(td, TD_FINISHING);
1320 finalize_logs(td, unit_logs);
1323 int prev_log_left = log_left;
1325 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1326 struct log_type *lt = &log_types[i];
1329 if (!(log_mask & lt->mask)) {
1330 ret = lt->fn(td, log_left != 1, unit_logs);
1333 log_mask |= lt->mask;
1338 if (prev_log_left == log_left)
1342 td_restore_runstate(td, old_state);
1345 void fio_writeout_logs(bool unit_logs)
1347 struct thread_data *td;
1351 td_writeout_logs(td, unit_logs);