2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
22 static const char iolog_ver2[] = "fio version 2 iolog";
24 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
26 flist_add_tail(&ipo->list, &td->io_log_list);
27 td->total_io_size += ipo->len;
30 void log_io_u(const struct thread_data *td, const struct io_u *io_u)
32 if (!td->o.write_iolog_file)
35 fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
36 io_ddir_name(io_u->ddir),
37 io_u->offset, io_u->buflen);
40 void log_file(struct thread_data *td, struct fio_file *f,
41 enum file_log_act what)
43 const char *act[] = { "add", "open", "close" };
47 if (!td->o.write_iolog_file)
52 * this happens on the pre-open/close done before the job starts
57 fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
60 static void iolog_delay(struct thread_data *td, unsigned long delay)
62 uint64_t usec = utime_since_now(&td->last_issue);
66 if (delay < td->time_offset) {
71 delay -= td->time_offset;
77 fio_gettime(&tv, NULL);
78 while (delay && !td->terminate) {
80 if (this_delay > 500000)
83 usec_sleep(td, this_delay);
87 usec = utime_since_now(&tv);
89 td->time_offset = usec - delay;
94 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
102 if (ipo->ddir != DDIR_INVAL)
105 f = td->files[ipo->fileno];
107 switch (ipo->file_action) {
108 case FIO_LOG_OPEN_FILE:
109 ret = td_io_open_file(td, f);
112 td_verror(td, ret, "iolog open file");
114 case FIO_LOG_CLOSE_FILE:
115 td_io_close_file(td, f);
117 case FIO_LOG_UNLINK_FILE:
118 td_io_unlink_file(td, f);
121 log_err("fio: bad file action %d\n", ipo->file_action);
128 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
130 struct io_piece *ipo;
131 unsigned long elapsed;
133 while (!flist_empty(&td->io_log_list)) {
136 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
137 flist_del(&ipo->list);
138 remove_trim_entry(td, ipo);
140 ret = ipo_special(td, ipo);
144 } else if (ret > 0) {
149 io_u->ddir = ipo->ddir;
150 if (ipo->ddir != DDIR_WAIT) {
151 io_u->offset = ipo->offset;
152 io_u->buflen = ipo->len;
153 io_u->file = td->files[ipo->fileno];
154 get_file(io_u->file);
155 dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
156 io_u->buflen, io_u->file->file_name);
158 iolog_delay(td, ipo->delay);
160 elapsed = mtime_since_genesis();
161 if (ipo->delay > elapsed)
162 usec_sleep(td, (ipo->delay - elapsed) * 1000);
167 if (io_u->ddir != DDIR_WAIT)
175 void prune_io_piece_log(struct thread_data *td)
177 struct io_piece *ipo;
180 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
181 ipo = rb_entry(n, struct io_piece, rb_node);
182 rb_erase(n, &td->io_hist_tree);
183 remove_trim_entry(td, ipo);
188 while (!flist_empty(&td->io_hist_list)) {
189 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
190 flist_del(&ipo->list);
191 remove_trim_entry(td, ipo);
198 * log a successful write, so we can unwind the log for verify
200 void log_io_piece(struct thread_data *td, struct io_u *io_u)
202 struct rb_node **p, *parent;
203 struct io_piece *ipo, *__ipo;
205 ipo = malloc(sizeof(struct io_piece));
207 ipo->file = io_u->file;
208 ipo->offset = io_u->offset;
209 ipo->len = io_u->buflen;
210 ipo->numberio = io_u->numberio;
211 ipo->flags = IP_F_IN_FLIGHT;
215 if (io_u_should_trim(td, io_u)) {
216 flist_add_tail(&ipo->trim_list, &td->trim_list);
221 * We don't need to sort the entries, if:
223 * Sequential writes, or
224 * Random writes that lay out the file as it goes along
226 * For both these cases, just reading back data in the order we
227 * wrote it out is the fastest.
229 * One exception is if we don't have a random map AND we are doing
230 * verifies, in that case we need to check for duplicate blocks and
231 * drop the old one, which we rely on the rb insert/lookup for
234 if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
235 (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
236 INIT_FLIST_HEAD(&ipo->list);
237 flist_add_tail(&ipo->list, &td->io_hist_list);
238 ipo->flags |= IP_F_ONLIST;
243 RB_CLEAR_NODE(&ipo->rb_node);
246 * Sort the entry into the verification list
249 p = &td->io_hist_tree.rb_node;
255 __ipo = rb_entry(parent, struct io_piece, rb_node);
256 if (ipo->file < __ipo->file)
258 else if (ipo->file > __ipo->file)
260 else if (ipo->offset < __ipo->offset) {
262 overlap = ipo->offset + ipo->len > __ipo->offset;
264 else if (ipo->offset > __ipo->offset) {
266 overlap = __ipo->offset + __ipo->len > ipo->offset;
272 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
273 __ipo->offset, __ipo->len,
274 ipo->offset, ipo->len);
276 rb_erase(parent, &td->io_hist_tree);
277 remove_trim_entry(td, __ipo);
283 rb_link_node(&ipo->rb_node, parent, p);
284 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
285 ipo->flags |= IP_F_ONRB;
289 void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
291 struct io_piece *ipo = io_u->ipo;
293 if (td->ts.nr_block_infos) {
294 uint32_t *info = io_u_block_info(td, io_u);
295 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
296 if (io_u->ddir == DDIR_TRIM)
297 *info = BLOCK_INFO_SET_STATE(*info,
298 BLOCK_STATE_TRIM_FAILURE);
299 else if (io_u->ddir == DDIR_WRITE)
300 *info = BLOCK_INFO_SET_STATE(*info,
301 BLOCK_STATE_WRITE_FAILURE);
308 if (ipo->flags & IP_F_ONRB)
309 rb_erase(&ipo->rb_node, &td->io_hist_tree);
310 else if (ipo->flags & IP_F_ONLIST)
311 flist_del(&ipo->list);
318 void trim_io_piece(struct thread_data *td, const struct io_u *io_u)
320 struct io_piece *ipo = io_u->ipo;
325 ipo->len = io_u->xfer_buflen - io_u->resid;
328 void write_iolog_close(struct thread_data *td)
334 td->iolog_buf = NULL;
338 * Read version 2 iolog data. It is enhanced to include per-file logging,
341 static int read_iolog2(struct thread_data *td, FILE *f)
343 unsigned long long offset;
345 int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
350 free_release_files(td);
353 * Read in the read iolog and store it, reuse the infrastructure
354 * for doing verifications.
357 fname = malloc(256+16);
358 act = malloc(256+16);
360 reads = writes = waits = 0;
361 while ((p = fgets(str, 4096, f)) != NULL) {
362 struct io_piece *ipo;
365 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
371 if (!strcmp(act, "wait"))
373 else if (!strcmp(act, "read"))
375 else if (!strcmp(act, "write"))
377 else if (!strcmp(act, "sync"))
379 else if (!strcmp(act, "datasync"))
381 else if (!strcmp(act, "trim"))
384 log_err("fio: bad iolog file action: %s\n",
388 fileno = get_fileno(td, fname);
391 if (!strcmp(act, "add")) {
392 fileno = add_file(td, fname, 0, 1);
393 file_action = FIO_LOG_ADD_FILE;
395 } else if (!strcmp(act, "open")) {
396 fileno = get_fileno(td, fname);
397 file_action = FIO_LOG_OPEN_FILE;
398 } else if (!strcmp(act, "close")) {
399 fileno = get_fileno(td, fname);
400 file_action = FIO_LOG_CLOSE_FILE;
402 log_err("fio: bad iolog file action: %s\n",
407 log_err("bad iolog2: %s", p);
413 else if (rw == DDIR_WRITE) {
415 * Don't add a write for ro mode
420 } else if (rw == DDIR_WAIT) {
422 } else if (rw == DDIR_INVAL) {
423 } else if (!ddir_sync(rw)) {
424 log_err("bad ddir: %d\n", rw);
431 ipo = malloc(sizeof(*ipo));
434 if (rw == DDIR_WAIT) {
437 ipo->offset = offset;
439 if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
440 td->o.max_bs[rw] = bytes;
441 ipo->fileno = fileno;
442 ipo->file_action = file_action;
446 queue_io_piece(td, ipo);
453 if (writes && read_only) {
454 log_err("fio: <%s> skips replay of %d writes due to"
455 " read-only\n", td->o.name, writes);
459 if (!reads && !writes && !waits)
461 else if (reads && !writes)
462 td->o.td_ddir = TD_DDIR_READ;
463 else if (!reads && writes)
464 td->o.td_ddir = TD_DDIR_WRITE;
466 td->o.td_ddir = TD_DDIR_RW;
472 * open iolog, check version, and call appropriate parser
474 static int init_iolog_read(struct thread_data *td)
476 char buffer[256], *p;
480 f = fopen(td->o.read_iolog_file, "r");
482 perror("fopen read iolog");
486 p = fgets(buffer, sizeof(buffer), f);
488 td_verror(td, errno, "iolog read");
489 log_err("fio: unable to read iolog\n");
495 * version 2 of the iolog stores a specific string as the
496 * first line, check for that
498 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
499 ret = read_iolog2(td, f);
501 log_err("fio: iolog version 1 is no longer supported\n");
510 * Set up a log for storing io patterns.
512 static int init_iolog_write(struct thread_data *td)
518 f = fopen(td->o.write_iolog_file, "a");
520 perror("fopen write iolog");
525 * That's it for writing, setup a log buffer and we're done.
528 td->iolog_buf = malloc(8192);
529 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
532 * write our version line
534 if (fprintf(f, "%s\n", iolog_ver2) < 0) {
535 perror("iolog init\n");
540 * add all known files
542 for_each_file(td, ff, i)
543 log_file(td, ff, FIO_LOG_ADD_FILE);
548 int init_iolog(struct thread_data *td)
552 if (td->o.read_iolog_file) {
556 * Check if it's a blktrace file and load that if possible.
557 * Otherwise assume it's a normal log file and load that.
559 if (is_blktrace(td->o.read_iolog_file, &need_swap))
560 ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
562 ret = init_iolog_read(td);
563 } else if (td->o.write_iolog_file)
564 ret = init_iolog_write(td);
567 td_verror(td, EINVAL, "failed initializing iolog");
572 void setup_log(struct io_log **log, struct log_params *p,
573 const char *filename)
577 l = calloc(1, sizeof(*l));
579 l->max_samples = DEF_LOG_ENTRIES;
580 l->log_type = p->log_type;
581 l->log_offset = p->log_offset;
582 l->log_gz = p->log_gz;
583 l->log_gz_store = p->log_gz_store;
584 l->log = malloc(l->max_samples * log_entry_sz(l));
585 l->avg_msec = p->avg_msec;
586 l->filename = strdup(filename);
590 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
592 INIT_FLIST_HEAD(&l->chunk_list);
594 if (l->log_gz && !p->td)
596 else if (l->log_gz || l->log_gz_store) {
597 pthread_mutex_init(&l->chunk_lock, NULL);
598 p->td->flags |= TD_F_COMPRESS_LOG;
604 #ifdef CONFIG_SETVBUF
605 static void *set_file_buffer(FILE *f)
607 size_t size = 1048576;
611 setvbuf(f, buf, _IOFBF, size);
615 static void clear_file_buffer(void *buf)
620 static void *set_file_buffer(FILE *f)
625 static void clear_file_buffer(void *buf)
630 void free_log(struct io_log *log)
637 void flush_samples(FILE *f, void *samples, uint64_t sample_size)
641 uint64_t i, nr_samples;
646 s = __get_sample(samples, 0, 0);
647 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
649 nr_samples = sample_size / __log_entry_sz(log_offset);
651 for (i = 0; i < nr_samples; i++) {
652 s = __get_sample(samples, log_offset, i);
655 fprintf(f, "%lu, %lu, %u, %u\n",
656 (unsigned long) s->time,
657 (unsigned long) s->val,
658 io_sample_ddir(s), s->bs);
660 struct io_sample_offset *so = (void *) s;
662 fprintf(f, "%lu, %lu, %u, %u, %llu\n",
663 (unsigned long) s->time,
664 (unsigned long) s->val,
665 io_sample_ddir(s), s->bs,
666 (unsigned long long) so->offset);
673 struct iolog_flush_data {
674 struct workqueue_work work;
680 #define GZ_CHUNK 131072
682 static struct iolog_compress *get_new_chunk(unsigned int seq)
684 struct iolog_compress *c;
686 c = malloc(sizeof(*c));
687 INIT_FLIST_HEAD(&c->list);
688 c->buf = malloc(GZ_CHUNK);
694 static void free_chunk(struct iolog_compress *ic)
700 static int z_stream_init(z_stream *stream, int gz_hdr)
704 stream->zalloc = Z_NULL;
705 stream->zfree = Z_NULL;
706 stream->opaque = Z_NULL;
707 stream->next_in = Z_NULL;
710 * zlib magic - add 32 for auto-detection of gz header or not,
711 * if we decide to store files in a gzip friendly format.
716 if (inflateInit2(stream, wbits) != Z_OK)
722 struct inflate_chunk_iter {
731 static void finish_chunk(z_stream *stream, FILE *f,
732 struct inflate_chunk_iter *iter)
736 ret = inflateEnd(stream);
738 log_err("fio: failed to end log inflation (%d)\n", ret);
740 flush_samples(f, iter->buf, iter->buf_used);
743 iter->buf_size = iter->buf_used = 0;
747 * Iterative chunk inflation. Handles cases where we cross into a new
748 * sequence, doing flush finish of previous chunk if needed.
750 static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
751 z_stream *stream, struct inflate_chunk_iter *iter)
755 dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u",
756 (unsigned long) ic->len, ic->seq);
758 if (ic->seq != iter->seq) {
760 finish_chunk(stream, f, iter);
762 z_stream_init(stream, gz_hdr);
766 stream->avail_in = ic->len;
767 stream->next_in = ic->buf;
769 if (!iter->buf_size) {
770 iter->buf_size = iter->chunk_sz;
771 iter->buf = malloc(iter->buf_size);
774 while (stream->avail_in) {
775 size_t this_out = iter->buf_size - iter->buf_used;
778 stream->avail_out = this_out;
779 stream->next_out = iter->buf + iter->buf_used;
781 err = inflate(stream, Z_NO_FLUSH);
783 log_err("fio: failed inflating log: %d\n", err);
788 iter->buf_used += this_out - stream->avail_out;
790 if (!stream->avail_out) {
791 iter->buf_size += iter->chunk_sz;
792 iter->buf = realloc(iter->buf, iter->buf_size);
796 if (err == Z_STREAM_END)
800 ret = (void *) stream->next_in - ic->buf;
802 dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) ret);
808 * Inflate stored compressed chunks, or write them directly to the log
809 * file if so instructed.
811 static int inflate_gz_chunks(struct io_log *log, FILE *f)
813 struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
816 while (!flist_empty(&log->chunk_list)) {
817 struct iolog_compress *ic;
819 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
820 flist_del(&ic->list);
822 if (log->log_gz_store) {
825 dprint(FD_COMPRESS, "log write chunk size=%lu, "
826 "seq=%u\n", (unsigned long) ic->len, ic->seq);
828 ret = fwrite(ic->buf, ic->len, 1, f);
829 if (ret != 1 || ferror(f)) {
831 log_err("fio: error writing compressed log\n");
834 inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
840 finish_chunk(&stream, f, &iter);
848 * Open compressed log file and decompress the stored chunks and
849 * write them to stdout. The chunks are stored sequentially in the
850 * file, so we iterate over them and do them one-by-one.
852 int iolog_file_inflate(const char *file)
854 struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
855 struct iolog_compress ic;
863 f = fopen(file, "r");
869 if (stat(file, &sb) < 0) {
875 ic.buf = buf = malloc(sb.st_size);
879 ret = fread(ic.buf, ic.len, 1, f);
885 } else if (ret != 1) {
886 log_err("fio: short read on reading log\n");
895 * Each chunk will return Z_STREAM_END. We don't know how many
896 * chunks are in the file, so we just keep looping and incrementing
897 * the sequence number until we have consumed the whole compressed
904 iret = inflate_chunk(&ic, 1, stdout, &stream, &iter);
917 finish_chunk(&stream, stdout, &iter);
927 static int inflate_gz_chunks(struct io_log *log, FILE *f)
932 int iolog_file_inflate(const char *file)
934 log_err("fio: log inflation not possible without zlib\n");
940 void flush_log(struct io_log *log, int do_append)
946 f = fopen(log->filename, "w");
948 f = fopen(log->filename, "a");
954 buf = set_file_buffer(f);
956 inflate_gz_chunks(log, f);
958 flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
961 clear_file_buffer(buf);
964 static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
966 if (td->flags & TD_F_COMPRESS_LOG)
970 if (fio_trylock_file(log->filename))
973 fio_lock_file(log->filename);
975 if (td->client_type == FIO_CLIENT_TYPE_GUI || is_backend)
976 fio_send_iolog(td, log, log->filename);
978 flush_log(log, !td->o.per_job_logs);
980 fio_unlock_file(log->filename);
985 size_t log_chunk_sizes(struct io_log *log)
987 struct flist_head *entry;
990 if (flist_empty(&log->chunk_list))
994 pthread_mutex_lock(&log->chunk_lock);
995 flist_for_each(entry, &log->chunk_list) {
996 struct iolog_compress *c;
998 c = flist_entry(entry, struct iolog_compress, list);
1001 pthread_mutex_unlock(&log->chunk_lock);
1007 static int gz_work(struct iolog_flush_data *data)
1009 struct iolog_compress *c;
1010 struct flist_head list;
1016 INIT_FLIST_HEAD(&list);
1018 stream.zalloc = Z_NULL;
1019 stream.zfree = Z_NULL;
1020 stream.opaque = Z_NULL;
1022 ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
1024 log_err("fio: failed to init gz stream\n");
1028 seq = ++data->log->chunk_seq;
1030 stream.next_in = (void *) data->samples;
1031 stream.avail_in = data->nr_samples * log_entry_sz(data->log);
1033 dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u\n",
1034 (unsigned long) stream.avail_in, seq);
1036 c = get_new_chunk(seq);
1037 stream.avail_out = GZ_CHUNK;
1038 stream.next_out = c->buf;
1039 ret = deflate(&stream, Z_NO_FLUSH);
1041 log_err("fio: deflate log (%d)\n", ret);
1046 c->len = GZ_CHUNK - stream.avail_out;
1047 flist_add_tail(&c->list, &list);
1049 } while (stream.avail_in);
1051 stream.next_out = c->buf + c->len;
1052 stream.avail_out = GZ_CHUNK - c->len;
1054 ret = deflate(&stream, Z_FINISH);
1055 if (ret == Z_STREAM_END)
1056 c->len = GZ_CHUNK - stream.avail_out;
1059 c = get_new_chunk(seq);
1060 stream.avail_out = GZ_CHUNK;
1061 stream.next_out = c->buf;
1062 ret = deflate(&stream, Z_FINISH);
1063 c->len = GZ_CHUNK - stream.avail_out;
1065 flist_add_tail(&c->list, &list);
1066 } while (ret != Z_STREAM_END);
1069 dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1071 ret = deflateEnd(&stream);
1073 log_err("fio: deflateEnd %d\n", ret);
1075 free(data->samples);
1077 if (!flist_empty(&list)) {
1078 pthread_mutex_lock(&data->log->chunk_lock);
1079 flist_splice_tail(&list, &data->log->chunk_list);
1080 pthread_mutex_unlock(&data->log->chunk_lock);
1088 while (!flist_empty(&list)) {
1089 c = flist_first_entry(list.next, struct iolog_compress, list);
1090 flist_del(&c->list);
1098 * Invoked from our compress helper thread, when logging would have exceeded
1099 * the specified memory limitation. Compresses the previously stored
1102 static int gz_work_async(struct submit_worker *sw, struct workqueue_work *work)
1104 return gz_work(container_of(work, struct iolog_flush_data, work));
1107 static int gz_init_worker(struct submit_worker *sw)
1109 struct thread_data *td = sw->wq->td;
1111 if (!fio_option_is_set(&td->o, log_gz_cpumask))
1114 if (fio_setaffinity(gettid(), td->o.log_gz_cpumask) == -1) {
1115 log_err("gz: failed to set CPU affinity\n");
1122 static struct workqueue_ops log_compress_wq_ops = {
1123 .fn = gz_work_async,
1124 .init_worker_fn = gz_init_worker,
1128 int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1130 if (!(td->flags & TD_F_COMPRESS_LOG))
1133 workqueue_init(td, &td->log_compress_wq, &log_compress_wq_ops, 1, sk_out);
1137 void iolog_compress_exit(struct thread_data *td)
1139 if (!(td->flags & TD_F_COMPRESS_LOG))
1142 workqueue_exit(&td->log_compress_wq);
1146 * Queue work item to compress the existing log entries. We reset the
1147 * current log to a small size, and reference the existing log in the
1148 * data that we queue for compression. Once compression has been done,
1149 * this old log is freed. If called with wait == 1, will not return until
1150 * the log compression has completed.
1152 int iolog_flush(struct io_log *log, int wait)
1154 struct iolog_flush_data *data;
1156 io_u_quiesce(log->td);
1158 data = malloc(sizeof(*data));
1164 data->samples = log->log;
1165 data->nr_samples = log->nr_samples;
1167 log->nr_samples = 0;
1168 log->max_samples = DEF_LOG_ENTRIES;
1169 log->log = malloc(log->max_samples * log_entry_sz(log));
1172 workqueue_enqueue(&log->td->log_compress_wq, &data->work);
1181 int iolog_flush(struct io_log *log, int wait)
1186 int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1191 void iolog_compress_exit(struct thread_data *td)
1197 static int __write_log(struct thread_data *td, struct io_log *log, int try)
1200 return finish_log(td, log, try);
1205 static int write_iops_log(struct thread_data *td, int try)
1207 return __write_log(td, td->iops_log, try);
1210 static int write_slat_log(struct thread_data *td, int try)
1212 return __write_log(td, td->slat_log, try);
1215 static int write_clat_log(struct thread_data *td, int try)
1217 return __write_log(td, td->clat_log, try);
1220 static int write_lat_log(struct thread_data *td, int try)
1222 return __write_log(td, td->lat_log, try);
1225 static int write_bandw_log(struct thread_data *td, int try)
1227 return __write_log(td, td->bw_log, try);
1242 int (*fn)(struct thread_data *, int);
1245 static struct log_type log_types[] = {
1247 .mask = BW_LOG_MASK,
1248 .fn = write_bandw_log,
1251 .mask = LAT_LOG_MASK,
1252 .fn = write_lat_log,
1255 .mask = SLAT_LOG_MASK,
1256 .fn = write_slat_log,
1259 .mask = CLAT_LOG_MASK,
1260 .fn = write_clat_log,
1263 .mask = IOPS_LOG_MASK,
1264 .fn = write_iops_log,
1268 void fio_writeout_logs(struct thread_data *td)
1270 unsigned int log_mask = 0;
1271 unsigned int log_left = ALL_LOG_NR;
1274 old_state = td_bump_runstate(td, TD_FINISHING);
1279 int prev_log_left = log_left;
1281 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1282 struct log_type *lt = &log_types[i];
1285 if (!(log_mask & lt->mask)) {
1286 ret = lt->fn(td, log_left != 1);
1289 log_mask |= lt->mask;
1294 if (prev_log_left == log_left)
1298 td_restore_runstate(td, old_state);