2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
22 static const char iolog_ver2[] = "fio version 2 iolog";
24 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
26 flist_add_tail(&ipo->list, &td->io_log_list);
27 td->total_io_size += ipo->len;
30 void log_io_u(const struct thread_data *td, const struct io_u *io_u)
32 if (!td->o.write_iolog_file)
35 fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
36 io_ddir_name(io_u->ddir),
37 io_u->offset, io_u->buflen);
40 void log_file(struct thread_data *td, struct fio_file *f,
41 enum file_log_act what)
43 const char *act[] = { "add", "open", "close" };
47 if (!td->o.write_iolog_file)
52 * this happens on the pre-open/close done before the job starts
57 fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
60 static void iolog_delay(struct thread_data *td, unsigned long delay)
62 uint64_t usec = utime_since_now(&td->last_issue);
66 if (delay < td->time_offset) {
71 delay -= td->time_offset;
77 fio_gettime(&tv, NULL);
78 while (delay && !td->terminate) {
80 if (this_delay > 500000)
83 usec_sleep(td, this_delay);
87 usec = utime_since_now(&tv);
89 td->time_offset = usec - delay;
94 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
102 if (ipo->ddir != DDIR_INVAL)
105 f = td->files[ipo->fileno];
107 switch (ipo->file_action) {
108 case FIO_LOG_OPEN_FILE:
109 ret = td_io_open_file(td, f);
112 td_verror(td, ret, "iolog open file");
114 case FIO_LOG_CLOSE_FILE:
115 td_io_close_file(td, f);
117 case FIO_LOG_UNLINK_FILE:
118 td_io_unlink_file(td, f);
121 log_err("fio: bad file action %d\n", ipo->file_action);
128 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
130 struct io_piece *ipo;
131 unsigned long elapsed;
133 while (!flist_empty(&td->io_log_list)) {
136 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
137 flist_del(&ipo->list);
138 remove_trim_entry(td, ipo);
140 ret = ipo_special(td, ipo);
144 } else if (ret > 0) {
149 io_u->ddir = ipo->ddir;
150 if (ipo->ddir != DDIR_WAIT) {
151 io_u->offset = ipo->offset;
152 io_u->buflen = ipo->len;
153 io_u->file = td->files[ipo->fileno];
154 get_file(io_u->file);
155 dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
156 io_u->buflen, io_u->file->file_name);
158 iolog_delay(td, ipo->delay);
160 elapsed = mtime_since_genesis();
161 if (ipo->delay > elapsed)
162 usec_sleep(td, (ipo->delay - elapsed) * 1000);
167 if (io_u->ddir != DDIR_WAIT)
175 void prune_io_piece_log(struct thread_data *td)
177 struct io_piece *ipo;
180 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
181 ipo = rb_entry(n, struct io_piece, rb_node);
182 rb_erase(n, &td->io_hist_tree);
183 remove_trim_entry(td, ipo);
188 while (!flist_empty(&td->io_hist_list)) {
189 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
190 flist_del(&ipo->list);
191 remove_trim_entry(td, ipo);
198 * log a successful write, so we can unwind the log for verify
200 void log_io_piece(struct thread_data *td, struct io_u *io_u)
202 struct rb_node **p, *parent;
203 struct io_piece *ipo, *__ipo;
205 ipo = malloc(sizeof(struct io_piece));
207 ipo->file = io_u->file;
208 ipo->offset = io_u->offset;
209 ipo->len = io_u->buflen;
210 ipo->numberio = io_u->numberio;
211 ipo->flags = IP_F_IN_FLIGHT;
215 if (io_u_should_trim(td, io_u)) {
216 flist_add_tail(&ipo->trim_list, &td->trim_list);
221 * We don't need to sort the entries, if:
223 * Sequential writes, or
224 * Random writes that lay out the file as it goes along
226 * For both these cases, just reading back data in the order we
227 * wrote it out is the fastest.
229 * One exception is if we don't have a random map AND we are doing
230 * verifies, in that case we need to check for duplicate blocks and
231 * drop the old one, which we rely on the rb insert/lookup for
234 if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
235 (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
236 INIT_FLIST_HEAD(&ipo->list);
237 flist_add_tail(&ipo->list, &td->io_hist_list);
238 ipo->flags |= IP_F_ONLIST;
243 RB_CLEAR_NODE(&ipo->rb_node);
246 * Sort the entry into the verification list
249 p = &td->io_hist_tree.rb_node;
255 __ipo = rb_entry(parent, struct io_piece, rb_node);
256 if (ipo->file < __ipo->file)
258 else if (ipo->file > __ipo->file)
260 else if (ipo->offset < __ipo->offset) {
262 overlap = ipo->offset + ipo->len > __ipo->offset;
264 else if (ipo->offset > __ipo->offset) {
266 overlap = __ipo->offset + __ipo->len > ipo->offset;
272 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
273 __ipo->offset, __ipo->len,
274 ipo->offset, ipo->len);
276 rb_erase(parent, &td->io_hist_tree);
277 remove_trim_entry(td, __ipo);
283 rb_link_node(&ipo->rb_node, parent, p);
284 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
285 ipo->flags |= IP_F_ONRB;
289 void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
291 struct io_piece *ipo = io_u->ipo;
293 if (td->ts.nr_block_infos) {
294 uint32_t *info = io_u_block_info(td, io_u);
295 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
296 if (io_u->ddir == DDIR_TRIM)
297 *info = BLOCK_INFO_SET_STATE(*info,
298 BLOCK_STATE_TRIM_FAILURE);
299 else if (io_u->ddir == DDIR_WRITE)
300 *info = BLOCK_INFO_SET_STATE(*info,
301 BLOCK_STATE_WRITE_FAILURE);
308 if (ipo->flags & IP_F_ONRB)
309 rb_erase(&ipo->rb_node, &td->io_hist_tree);
310 else if (ipo->flags & IP_F_ONLIST)
311 flist_del(&ipo->list);
318 void trim_io_piece(struct thread_data *td, const struct io_u *io_u)
320 struct io_piece *ipo = io_u->ipo;
325 ipo->len = io_u->xfer_buflen - io_u->resid;
328 void write_iolog_close(struct thread_data *td)
334 td->iolog_buf = NULL;
338 * Read version 2 iolog data. It is enhanced to include per-file logging,
341 static int read_iolog2(struct thread_data *td, FILE *f)
343 unsigned long long offset;
345 int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
350 free_release_files(td);
353 * Read in the read iolog and store it, reuse the infrastructure
354 * for doing verifications.
357 fname = malloc(256+16);
358 act = malloc(256+16);
360 reads = writes = waits = 0;
361 while ((p = fgets(str, 4096, f)) != NULL) {
362 struct io_piece *ipo;
365 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
371 if (!strcmp(act, "wait"))
373 else if (!strcmp(act, "read"))
375 else if (!strcmp(act, "write"))
377 else if (!strcmp(act, "sync"))
379 else if (!strcmp(act, "datasync"))
381 else if (!strcmp(act, "trim"))
384 log_err("fio: bad iolog file action: %s\n",
388 fileno = get_fileno(td, fname);
391 if (!strcmp(act, "add")) {
392 fileno = add_file(td, fname, 0, 1);
393 file_action = FIO_LOG_ADD_FILE;
395 } else if (!strcmp(act, "open")) {
396 fileno = get_fileno(td, fname);
397 file_action = FIO_LOG_OPEN_FILE;
398 } else if (!strcmp(act, "close")) {
399 fileno = get_fileno(td, fname);
400 file_action = FIO_LOG_CLOSE_FILE;
402 log_err("fio: bad iolog file action: %s\n",
407 log_err("bad iolog2: %s", p);
413 else if (rw == DDIR_WRITE) {
415 * Don't add a write for ro mode
420 } else if (rw == DDIR_WAIT) {
422 } else if (rw == DDIR_INVAL) {
423 } else if (!ddir_sync(rw)) {
424 log_err("bad ddir: %d\n", rw);
431 ipo = malloc(sizeof(*ipo));
434 if (rw == DDIR_WAIT) {
437 ipo->offset = offset;
439 if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
440 td->o.max_bs[rw] = bytes;
441 ipo->fileno = fileno;
442 ipo->file_action = file_action;
446 queue_io_piece(td, ipo);
453 if (writes && read_only) {
454 log_err("fio: <%s> skips replay of %d writes due to"
455 " read-only\n", td->o.name, writes);
459 if (!reads && !writes && !waits)
461 else if (reads && !writes)
462 td->o.td_ddir = TD_DDIR_READ;
463 else if (!reads && writes)
464 td->o.td_ddir = TD_DDIR_WRITE;
466 td->o.td_ddir = TD_DDIR_RW;
472 * open iolog, check version, and call appropriate parser
474 static int init_iolog_read(struct thread_data *td)
476 char buffer[256], *p;
480 f = fopen(td->o.read_iolog_file, "r");
482 perror("fopen read iolog");
486 p = fgets(buffer, sizeof(buffer), f);
488 td_verror(td, errno, "iolog read");
489 log_err("fio: unable to read iolog\n");
495 * version 2 of the iolog stores a specific string as the
496 * first line, check for that
498 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
499 ret = read_iolog2(td, f);
501 log_err("fio: iolog version 1 is no longer supported\n");
510 * Set up a log for storing io patterns.
512 static int init_iolog_write(struct thread_data *td)
518 f = fopen(td->o.write_iolog_file, "a");
520 perror("fopen write iolog");
525 * That's it for writing, setup a log buffer and we're done.
528 td->iolog_buf = malloc(8192);
529 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
532 * write our version line
534 if (fprintf(f, "%s\n", iolog_ver2) < 0) {
535 perror("iolog init\n");
540 * add all known files
542 for_each_file(td, ff, i)
543 log_file(td, ff, FIO_LOG_ADD_FILE);
548 int init_iolog(struct thread_data *td)
552 if (td->o.read_iolog_file) {
556 * Check if it's a blktrace file and load that if possible.
557 * Otherwise assume it's a normal log file and load that.
559 if (is_blktrace(td->o.read_iolog_file, &need_swap))
560 ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
562 ret = init_iolog_read(td);
563 } else if (td->o.write_iolog_file)
564 ret = init_iolog_write(td);
567 td_verror(td, EINVAL, "failed initializing iolog");
572 void setup_log(struct io_log **log, struct log_params *p,
573 const char *filename)
577 l = calloc(1, sizeof(*l));
579 l->max_samples = 1024;
580 l->log_type = p->log_type;
581 l->log_offset = p->log_offset;
582 l->log_gz = p->log_gz;
583 l->log_gz_store = p->log_gz_store;
584 l->log = malloc(l->max_samples * log_entry_sz(l));
585 l->avg_msec = p->avg_msec;
586 l->filename = strdup(filename);
590 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
592 INIT_FLIST_HEAD(&l->chunk_list);
594 if (l->log_gz && !p->td)
596 else if (l->log_gz || l->log_gz_store) {
597 pthread_mutex_init(&l->chunk_lock, NULL);
598 p->td->flags |= TD_F_COMPRESS_LOG;
604 #ifdef CONFIG_SETVBUF
605 static void *set_file_buffer(FILE *f)
607 size_t size = 1048576;
611 setvbuf(f, buf, _IOFBF, size);
615 static void clear_file_buffer(void *buf)
620 static void *set_file_buffer(FILE *f)
625 static void clear_file_buffer(void *buf)
630 void free_log(struct io_log *log)
637 static void flush_samples(FILE *f, void *samples, uint64_t sample_size)
641 uint64_t i, nr_samples;
646 s = __get_sample(samples, 0, 0);
647 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
649 nr_samples = sample_size / __log_entry_sz(log_offset);
651 for (i = 0; i < nr_samples; i++) {
652 s = __get_sample(samples, log_offset, i);
655 fprintf(f, "%lu, %lu, %u, %u\n",
656 (unsigned long) s->time,
657 (unsigned long) s->val,
658 io_sample_ddir(s), s->bs);
660 struct io_sample_offset *so = (void *) s;
662 fprintf(f, "%lu, %lu, %u, %u, %llu\n",
663 (unsigned long) s->time,
664 (unsigned long) s->val,
665 io_sample_ddir(s), s->bs,
666 (unsigned long long) so->offset);
673 struct iolog_flush_data {
674 struct workqueue_work work;
675 pthread_mutex_t lock;
685 struct iolog_compress {
686 struct flist_head list;
692 #define GZ_CHUNK 131072
694 static struct iolog_compress *get_new_chunk(unsigned int seq)
696 struct iolog_compress *c;
698 c = malloc(sizeof(*c));
699 INIT_FLIST_HEAD(&c->list);
700 c->buf = malloc(GZ_CHUNK);
706 static void free_chunk(struct iolog_compress *ic)
712 static int z_stream_init(z_stream *stream, int gz_hdr)
716 stream->zalloc = Z_NULL;
717 stream->zfree = Z_NULL;
718 stream->opaque = Z_NULL;
719 stream->next_in = Z_NULL;
722 * zlib magic - add 32 for auto-detection of gz header or not,
723 * if we decide to store files in a gzip friendly format.
728 if (inflateInit2(stream, wbits) != Z_OK)
734 struct inflate_chunk_iter {
743 static void finish_chunk(z_stream *stream, FILE *f,
744 struct inflate_chunk_iter *iter)
748 ret = inflateEnd(stream);
750 log_err("fio: failed to end log inflation (%d)\n", ret);
752 flush_samples(f, iter->buf, iter->buf_used);
755 iter->buf_size = iter->buf_used = 0;
759 * Iterative chunk inflation. Handles cases where we cross into a new
760 * sequence, doing flush finish of previous chunk if needed.
762 static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
763 z_stream *stream, struct inflate_chunk_iter *iter)
767 dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u",
768 (unsigned long) ic->len, ic->seq);
770 if (ic->seq != iter->seq) {
772 finish_chunk(stream, f, iter);
774 z_stream_init(stream, gz_hdr);
778 stream->avail_in = ic->len;
779 stream->next_in = ic->buf;
781 if (!iter->buf_size) {
782 iter->buf_size = iter->chunk_sz;
783 iter->buf = malloc(iter->buf_size);
786 while (stream->avail_in) {
787 size_t this_out = iter->buf_size - iter->buf_used;
790 stream->avail_out = this_out;
791 stream->next_out = iter->buf + iter->buf_used;
793 err = inflate(stream, Z_NO_FLUSH);
795 log_err("fio: failed inflating log: %d\n", err);
800 iter->buf_used += this_out - stream->avail_out;
802 if (!stream->avail_out) {
803 iter->buf_size += iter->chunk_sz;
804 iter->buf = realloc(iter->buf, iter->buf_size);
808 if (err == Z_STREAM_END)
812 ret = (void *) stream->next_in - ic->buf;
814 dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) ret);
820 * Inflate stored compressed chunks, or write them directly to the log
821 * file if so instructed.
823 static int inflate_gz_chunks(struct io_log *log, FILE *f)
825 struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
828 while (!flist_empty(&log->chunk_list)) {
829 struct iolog_compress *ic;
831 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
832 flist_del(&ic->list);
834 if (log->log_gz_store) {
837 dprint(FD_COMPRESS, "log write chunk size=%lu, "
838 "seq=%u\n", (unsigned long) ic->len, ic->seq);
840 ret = fwrite(ic->buf, ic->len, 1, f);
841 if (ret != 1 || ferror(f)) {
843 log_err("fio: error writing compressed log\n");
846 inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
852 finish_chunk(&stream, f, &iter);
860 * Open compressed log file and decompress the stored chunks and
861 * write them to stdout. The chunks are stored sequentially in the
862 * file, so we iterate over them and do them one-by-one.
864 int iolog_file_inflate(const char *file)
866 struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
867 struct iolog_compress ic;
875 f = fopen(file, "r");
881 if (stat(file, &sb) < 0) {
887 ic.buf = buf = malloc(sb.st_size);
891 ret = fread(ic.buf, ic.len, 1, f);
897 } else if (ret != 1) {
898 log_err("fio: short read on reading log\n");
907 * Each chunk will return Z_STREAM_END. We don't know how many
908 * chunks are in the file, so we just keep looping and incrementing
909 * the sequence number until we have consumed the whole compressed
916 iret = inflate_chunk(&ic, 1, stdout, &stream, &iter);
929 finish_chunk(&stream, stdout, &iter);
939 static int inflate_gz_chunks(struct io_log *log, FILE *f)
944 int iolog_file_inflate(const char *file)
946 log_err("fio: log inflation not possible without zlib\n");
952 void flush_log(struct io_log *log, int do_append)
958 f = fopen(log->filename, "w");
960 f = fopen(log->filename, "a");
966 buf = set_file_buffer(f);
968 inflate_gz_chunks(log, f);
970 flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
973 clear_file_buffer(buf);
976 static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
978 if (td->flags & TD_F_COMPRESS_LOG)
982 if (fio_trylock_file(log->filename))
985 fio_lock_file(log->filename);
987 if (td->client_type == FIO_CLIENT_TYPE_GUI)
988 fio_send_iolog(td, log, log->filename);
990 flush_log(log, !td->o.per_job_logs);
992 fio_unlock_file(log->filename);
999 static void drop_data_unlock(struct iolog_flush_data *data)
1003 refs = --data->refs;
1004 pthread_mutex_unlock(&data->lock);
1008 pthread_mutex_destroy(&data->lock);
1009 pthread_cond_destroy(&data->cv);
1014 * Invoked from our compress helper thread, when logging would have exceeded
1015 * the specified memory limitation. Compresses the previously stored
1018 static int gz_work(struct submit_worker *sw, struct workqueue_work *work)
1020 struct iolog_flush_data *data;
1021 struct iolog_compress *c;
1022 struct flist_head list;
1028 INIT_FLIST_HEAD(&list);
1030 data = container_of(work, struct iolog_flush_data, work);
1032 stream.zalloc = Z_NULL;
1033 stream.zfree = Z_NULL;
1034 stream.opaque = Z_NULL;
1036 ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
1038 log_err("fio: failed to init gz stream\n");
1042 seq = ++data->log->chunk_seq;
1044 stream.next_in = (void *) data->samples;
1045 stream.avail_in = data->nr_samples * log_entry_sz(data->log);
1047 dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u\n",
1048 (unsigned long) stream.avail_in, seq);
1050 c = get_new_chunk(seq);
1051 stream.avail_out = GZ_CHUNK;
1052 stream.next_out = c->buf;
1053 ret = deflate(&stream, Z_NO_FLUSH);
1055 log_err("fio: deflate log (%d)\n", ret);
1060 c->len = GZ_CHUNK - stream.avail_out;
1061 flist_add_tail(&c->list, &list);
1063 } while (stream.avail_in);
1065 stream.next_out = c->buf + c->len;
1066 stream.avail_out = GZ_CHUNK - c->len;
1068 ret = deflate(&stream, Z_FINISH);
1069 if (ret == Z_STREAM_END)
1070 c->len = GZ_CHUNK - stream.avail_out;
1073 c = get_new_chunk(seq);
1074 stream.avail_out = GZ_CHUNK;
1075 stream.next_out = c->buf;
1076 ret = deflate(&stream, Z_FINISH);
1077 c->len = GZ_CHUNK - stream.avail_out;
1079 flist_add_tail(&c->list, &list);
1080 } while (ret != Z_STREAM_END);
1083 dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1085 ret = deflateEnd(&stream);
1087 log_err("fio: deflateEnd %d\n", ret);
1089 free(data->samples);
1091 if (!flist_empty(&list)) {
1092 pthread_mutex_lock(&data->log->chunk_lock);
1093 flist_splice_tail(&list, &data->log->chunk_list);
1094 pthread_mutex_unlock(&data->log->chunk_lock);
1100 pthread_mutex_lock(&data->lock);
1102 pthread_cond_signal(&data->cv);
1104 drop_data_unlock(data);
1109 while (!flist_empty(&list)) {
1110 c = flist_first_entry(list.next, struct iolog_compress, list);
1111 flist_del(&c->list);
1118 static int gz_init_worker(struct submit_worker *sw)
1120 struct thread_data *td = sw->wq->td;
1122 if (!fio_option_is_set(&td->o, log_gz_cpumask))
1125 if (fio_setaffinity(gettid(), td->o.log_gz_cpumask) == -1) {
1126 log_err("gz: failed to set CPU affinity\n");
1133 static struct workqueue_ops log_compress_wq_ops = {
1135 .init_worker_fn = gz_init_worker,
1139 int iolog_compress_init(struct thread_data *td)
1141 if (!(td->flags & TD_F_COMPRESS_LOG))
1144 workqueue_init(td, &td->log_compress_wq, &log_compress_wq_ops, 1);
1148 void iolog_compress_exit(struct thread_data *td)
1150 if (!(td->flags & TD_F_COMPRESS_LOG))
1153 workqueue_exit(&td->log_compress_wq);
1157 * Queue work item to compress the existing log entries. We reset the
1158 * current log to a small size, and reference the existing log in the
1159 * data that we queue for compression. Once compression has been done,
1160 * this old log is freed. If called with wait == 1, will not return until
1161 * the log compression has completed.
1163 int iolog_flush(struct io_log *log, int wait)
1165 struct iolog_flush_data *data;
1167 data = malloc(sizeof(*data));
1173 data->samples = log->log;
1174 data->nr_samples = log->nr_samples;
1176 log->nr_samples = 0;
1177 log->max_samples = 128;
1178 log->log = malloc(log->max_samples * log_entry_sz(log));
1182 pthread_mutex_init(&data->lock, NULL);
1183 pthread_cond_init(&data->cv, NULL);
1188 workqueue_enqueue(&log->td->log_compress_wq, &data->work);
1191 pthread_mutex_lock(&data->lock);
1193 pthread_cond_wait(&data->cv, &data->lock);
1195 drop_data_unlock(data);
1203 int iolog_flush(struct io_log *log, int wait)
1208 int iolog_compress_init(struct thread_data *td)
1213 void iolog_compress_exit(struct thread_data *td)
1219 static int __write_log(struct thread_data *td, struct io_log *log, int try)
1222 return finish_log(td, log, try);
1227 static int write_iops_log(struct thread_data *td, int try)
1229 return __write_log(td, td->iops_log, try);
1232 static int write_slat_log(struct thread_data *td, int try)
1234 return __write_log(td, td->slat_log, try);
1237 static int write_clat_log(struct thread_data *td, int try)
1239 return __write_log(td, td->clat_log, try);
1242 static int write_lat_log(struct thread_data *td, int try)
1244 return __write_log(td, td->lat_log, try);
1247 static int write_bandw_log(struct thread_data *td, int try)
1249 return __write_log(td, td->bw_log, try);
1264 int (*fn)(struct thread_data *, int);
1267 static struct log_type log_types[] = {
1269 .mask = BW_LOG_MASK,
1270 .fn = write_bandw_log,
1273 .mask = LAT_LOG_MASK,
1274 .fn = write_lat_log,
1277 .mask = SLAT_LOG_MASK,
1278 .fn = write_slat_log,
1281 .mask = CLAT_LOG_MASK,
1282 .fn = write_clat_log,
1285 .mask = IOPS_LOG_MASK,
1286 .fn = write_iops_log,
1290 void fio_writeout_logs(struct thread_data *td)
1292 unsigned int log_mask = 0;
1293 unsigned int log_left = ALL_LOG_NR;
1296 old_state = td_bump_runstate(td, TD_FINISHING);
1301 int prev_log_left = log_left;
1303 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1304 struct log_type *lt = &log_types[i];
1307 if (!(log_mask & lt->mask)) {
1308 ret = lt->fn(td, log_left != 1);
1311 log_mask |= lt->mask;
1316 if (prev_log_left == log_left)
1320 td_restore_runstate(td, old_state);