2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
22 static const char iolog_ver2[] = "fio version 2 iolog";
24 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
26 flist_add_tail(&ipo->list, &td->io_log_list);
27 td->total_io_size += ipo->len;
30 void log_io_u(const struct thread_data *td, const struct io_u *io_u)
32 if (!td->o.write_iolog_file)
35 fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
36 io_ddir_name(io_u->ddir),
37 io_u->offset, io_u->buflen);
40 void log_file(struct thread_data *td, struct fio_file *f,
41 enum file_log_act what)
43 const char *act[] = { "add", "open", "close" };
47 if (!td->o.write_iolog_file)
52 * this happens on the pre-open/close done before the job starts
57 fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
60 static void iolog_delay(struct thread_data *td, unsigned long delay)
62 uint64_t usec = utime_since_now(&td->last_issue);
66 if (delay < td->time_offset) {
71 delay -= td->time_offset;
77 fio_gettime(&tv, NULL);
78 while (delay && !td->terminate) {
80 if (this_delay > 500000)
83 usec_sleep(td, this_delay);
87 usec = utime_since_now(&tv);
89 td->time_offset = usec - delay;
94 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
102 if (ipo->ddir != DDIR_INVAL)
105 f = td->files[ipo->fileno];
107 switch (ipo->file_action) {
108 case FIO_LOG_OPEN_FILE:
109 ret = td_io_open_file(td, f);
112 td_verror(td, ret, "iolog open file");
114 case FIO_LOG_CLOSE_FILE:
115 td_io_close_file(td, f);
117 case FIO_LOG_UNLINK_FILE:
118 td_io_unlink_file(td, f);
121 log_err("fio: bad file action %d\n", ipo->file_action);
128 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
130 struct io_piece *ipo;
131 unsigned long elapsed;
133 while (!flist_empty(&td->io_log_list)) {
136 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
137 flist_del(&ipo->list);
138 remove_trim_entry(td, ipo);
140 ret = ipo_special(td, ipo);
144 } else if (ret > 0) {
149 io_u->ddir = ipo->ddir;
150 if (ipo->ddir != DDIR_WAIT) {
151 io_u->offset = ipo->offset;
152 io_u->buflen = ipo->len;
153 io_u->file = td->files[ipo->fileno];
154 get_file(io_u->file);
155 dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
156 io_u->buflen, io_u->file->file_name);
158 iolog_delay(td, ipo->delay);
160 elapsed = mtime_since_genesis();
161 if (ipo->delay > elapsed)
162 usec_sleep(td, (ipo->delay - elapsed) * 1000);
167 if (io_u->ddir != DDIR_WAIT)
175 void prune_io_piece_log(struct thread_data *td)
177 struct io_piece *ipo;
180 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
181 ipo = rb_entry(n, struct io_piece, rb_node);
182 rb_erase(n, &td->io_hist_tree);
183 remove_trim_entry(td, ipo);
188 while (!flist_empty(&td->io_hist_list)) {
189 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
190 flist_del(&ipo->list);
191 remove_trim_entry(td, ipo);
198 * log a successful write, so we can unwind the log for verify
200 void log_io_piece(struct thread_data *td, struct io_u *io_u)
202 struct rb_node **p, *parent;
203 struct io_piece *ipo, *__ipo;
205 ipo = malloc(sizeof(struct io_piece));
207 ipo->file = io_u->file;
208 ipo->offset = io_u->offset;
209 ipo->len = io_u->buflen;
210 ipo->numberio = io_u->numberio;
211 ipo->flags = IP_F_IN_FLIGHT;
215 if (io_u_should_trim(td, io_u)) {
216 flist_add_tail(&ipo->trim_list, &td->trim_list);
221 * We don't need to sort the entries, if:
223 * Sequential writes, or
224 * Random writes that lay out the file as it goes along
226 * For both these cases, just reading back data in the order we
227 * wrote it out is the fastest.
229 * One exception is if we don't have a random map AND we are doing
230 * verifies, in that case we need to check for duplicate blocks and
231 * drop the old one, which we rely on the rb insert/lookup for
234 if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
235 (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
236 INIT_FLIST_HEAD(&ipo->list);
237 flist_add_tail(&ipo->list, &td->io_hist_list);
238 ipo->flags |= IP_F_ONLIST;
243 RB_CLEAR_NODE(&ipo->rb_node);
246 * Sort the entry into the verification list
249 p = &td->io_hist_tree.rb_node;
255 __ipo = rb_entry(parent, struct io_piece, rb_node);
256 if (ipo->file < __ipo->file)
258 else if (ipo->file > __ipo->file)
260 else if (ipo->offset < __ipo->offset) {
262 overlap = ipo->offset + ipo->len > __ipo->offset;
264 else if (ipo->offset > __ipo->offset) {
266 overlap = __ipo->offset + __ipo->len > ipo->offset;
272 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
273 __ipo->offset, __ipo->len,
274 ipo->offset, ipo->len);
276 rb_erase(parent, &td->io_hist_tree);
277 remove_trim_entry(td, __ipo);
283 rb_link_node(&ipo->rb_node, parent, p);
284 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
285 ipo->flags |= IP_F_ONRB;
289 void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
291 struct io_piece *ipo = io_u->ipo;
293 if (td->ts.nr_block_infos) {
294 uint32_t *info = io_u_block_info(td, io_u);
295 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
296 if (io_u->ddir == DDIR_TRIM)
297 *info = BLOCK_INFO_SET_STATE(*info,
298 BLOCK_STATE_TRIM_FAILURE);
299 else if (io_u->ddir == DDIR_WRITE)
300 *info = BLOCK_INFO_SET_STATE(*info,
301 BLOCK_STATE_WRITE_FAILURE);
308 if (ipo->flags & IP_F_ONRB)
309 rb_erase(&ipo->rb_node, &td->io_hist_tree);
310 else if (ipo->flags & IP_F_ONLIST)
311 flist_del(&ipo->list);
318 void trim_io_piece(struct thread_data *td, const struct io_u *io_u)
320 struct io_piece *ipo = io_u->ipo;
325 ipo->len = io_u->xfer_buflen - io_u->resid;
328 void write_iolog_close(struct thread_data *td)
334 td->iolog_buf = NULL;
338 * Read version 2 iolog data. It is enhanced to include per-file logging,
341 static int read_iolog2(struct thread_data *td, FILE *f)
343 unsigned long long offset;
345 int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
350 free_release_files(td);
353 * Read in the read iolog and store it, reuse the infrastructure
354 * for doing verifications.
357 fname = malloc(256+16);
358 act = malloc(256+16);
360 reads = writes = waits = 0;
361 while ((p = fgets(str, 4096, f)) != NULL) {
362 struct io_piece *ipo;
365 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
371 if (!strcmp(act, "wait"))
373 else if (!strcmp(act, "read"))
375 else if (!strcmp(act, "write"))
377 else if (!strcmp(act, "sync"))
379 else if (!strcmp(act, "datasync"))
381 else if (!strcmp(act, "trim"))
384 log_err("fio: bad iolog file action: %s\n",
388 fileno = get_fileno(td, fname);
391 if (!strcmp(act, "add")) {
392 fileno = add_file(td, fname, 0, 1);
393 file_action = FIO_LOG_ADD_FILE;
395 } else if (!strcmp(act, "open")) {
396 fileno = get_fileno(td, fname);
397 file_action = FIO_LOG_OPEN_FILE;
398 } else if (!strcmp(act, "close")) {
399 fileno = get_fileno(td, fname);
400 file_action = FIO_LOG_CLOSE_FILE;
402 log_err("fio: bad iolog file action: %s\n",
407 log_err("bad iolog2: %s", p);
413 else if (rw == DDIR_WRITE) {
415 * Don't add a write for ro mode
420 } else if (rw == DDIR_WAIT) {
422 } else if (rw == DDIR_INVAL) {
423 } else if (!ddir_sync(rw)) {
424 log_err("bad ddir: %d\n", rw);
431 ipo = malloc(sizeof(*ipo));
434 if (rw == DDIR_WAIT) {
437 ipo->offset = offset;
439 if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
440 td->o.max_bs[rw] = bytes;
441 ipo->fileno = fileno;
442 ipo->file_action = file_action;
446 queue_io_piece(td, ipo);
453 if (writes && read_only) {
454 log_err("fio: <%s> skips replay of %d writes due to"
455 " read-only\n", td->o.name, writes);
459 if (!reads && !writes && !waits)
461 else if (reads && !writes)
462 td->o.td_ddir = TD_DDIR_READ;
463 else if (!reads && writes)
464 td->o.td_ddir = TD_DDIR_WRITE;
466 td->o.td_ddir = TD_DDIR_RW;
472 * open iolog, check version, and call appropriate parser
474 static int init_iolog_read(struct thread_data *td)
476 char buffer[256], *p;
480 f = fopen(td->o.read_iolog_file, "r");
482 perror("fopen read iolog");
486 p = fgets(buffer, sizeof(buffer), f);
488 td_verror(td, errno, "iolog read");
489 log_err("fio: unable to read iolog\n");
495 * version 2 of the iolog stores a specific string as the
496 * first line, check for that
498 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
499 ret = read_iolog2(td, f);
501 log_err("fio: iolog version 1 is no longer supported\n");
510 * Set up a log for storing io patterns.
512 static int init_iolog_write(struct thread_data *td)
518 f = fopen(td->o.write_iolog_file, "a");
520 perror("fopen write iolog");
525 * That's it for writing, setup a log buffer and we're done.
528 td->iolog_buf = malloc(8192);
529 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
532 * write our version line
534 if (fprintf(f, "%s\n", iolog_ver2) < 0) {
535 perror("iolog init\n");
540 * add all known files
542 for_each_file(td, ff, i)
543 log_file(td, ff, FIO_LOG_ADD_FILE);
548 int init_iolog(struct thread_data *td)
552 if (td->o.read_iolog_file) {
556 * Check if it's a blktrace file and load that if possible.
557 * Otherwise assume it's a normal log file and load that.
559 if (is_blktrace(td->o.read_iolog_file, &need_swap))
560 ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
562 ret = init_iolog_read(td);
563 } else if (td->o.write_iolog_file)
564 ret = init_iolog_write(td);
567 td_verror(td, EINVAL, "failed initializing iolog");
572 void setup_log(struct io_log **log, struct log_params *p,
573 const char *filename)
577 l = calloc(1, sizeof(*l));
579 l->max_samples = 1024;
580 l->log_type = p->log_type;
581 l->log_offset = p->log_offset;
582 l->log_gz = p->log_gz;
583 l->log_gz_store = p->log_gz_store;
584 l->log = malloc(l->max_samples * log_entry_sz(l));
585 l->avg_msec = p->avg_msec;
586 l->filename = strdup(filename);
590 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
592 INIT_FLIST_HEAD(&l->chunk_list);
594 if (l->log_gz && !p->td)
596 else if (l->log_gz || l->log_gz_store) {
597 pthread_mutex_init(&l->chunk_lock, NULL);
598 p->td->flags |= TD_F_COMPRESS_LOG;
604 #ifdef CONFIG_SETVBUF
605 static void *set_file_buffer(FILE *f)
607 size_t size = 1048576;
611 setvbuf(f, buf, _IOFBF, size);
615 static void clear_file_buffer(void *buf)
620 static void *set_file_buffer(FILE *f)
625 static void clear_file_buffer(void *buf)
630 void free_log(struct io_log *log)
637 void flush_samples(FILE *f, void *samples, uint64_t sample_size)
641 uint64_t i, nr_samples;
646 s = __get_sample(samples, 0, 0);
647 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
649 nr_samples = sample_size / __log_entry_sz(log_offset);
651 for (i = 0; i < nr_samples; i++) {
652 s = __get_sample(samples, log_offset, i);
655 fprintf(f, "%lu, %lu, %u, %u\n",
656 (unsigned long) s->time,
657 (unsigned long) s->val,
658 io_sample_ddir(s), s->bs);
660 struct io_sample_offset *so = (void *) s;
662 fprintf(f, "%lu, %lu, %u, %u, %llu\n",
663 (unsigned long) s->time,
664 (unsigned long) s->val,
665 io_sample_ddir(s), s->bs,
666 (unsigned long long) so->offset);
673 struct iolog_flush_data {
674 struct workqueue_work work;
675 pthread_mutex_t lock;
685 #define GZ_CHUNK 131072
687 static struct iolog_compress *get_new_chunk(unsigned int seq)
689 struct iolog_compress *c;
691 c = malloc(sizeof(*c));
692 INIT_FLIST_HEAD(&c->list);
693 c->buf = malloc(GZ_CHUNK);
699 static void free_chunk(struct iolog_compress *ic)
705 static int z_stream_init(z_stream *stream, int gz_hdr)
709 stream->zalloc = Z_NULL;
710 stream->zfree = Z_NULL;
711 stream->opaque = Z_NULL;
712 stream->next_in = Z_NULL;
715 * zlib magic - add 32 for auto-detection of gz header or not,
716 * if we decide to store files in a gzip friendly format.
721 if (inflateInit2(stream, wbits) != Z_OK)
727 struct inflate_chunk_iter {
736 static void finish_chunk(z_stream *stream, FILE *f,
737 struct inflate_chunk_iter *iter)
741 ret = inflateEnd(stream);
743 log_err("fio: failed to end log inflation (%d)\n", ret);
745 flush_samples(f, iter->buf, iter->buf_used);
748 iter->buf_size = iter->buf_used = 0;
752 * Iterative chunk inflation. Handles cases where we cross into a new
753 * sequence, doing flush finish of previous chunk if needed.
755 static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
756 z_stream *stream, struct inflate_chunk_iter *iter)
760 dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u",
761 (unsigned long) ic->len, ic->seq);
763 if (ic->seq != iter->seq) {
765 finish_chunk(stream, f, iter);
767 z_stream_init(stream, gz_hdr);
771 stream->avail_in = ic->len;
772 stream->next_in = ic->buf;
774 if (!iter->buf_size) {
775 iter->buf_size = iter->chunk_sz;
776 iter->buf = malloc(iter->buf_size);
779 while (stream->avail_in) {
780 size_t this_out = iter->buf_size - iter->buf_used;
783 stream->avail_out = this_out;
784 stream->next_out = iter->buf + iter->buf_used;
786 err = inflate(stream, Z_NO_FLUSH);
788 log_err("fio: failed inflating log: %d\n", err);
793 iter->buf_used += this_out - stream->avail_out;
795 if (!stream->avail_out) {
796 iter->buf_size += iter->chunk_sz;
797 iter->buf = realloc(iter->buf, iter->buf_size);
801 if (err == Z_STREAM_END)
805 ret = (void *) stream->next_in - ic->buf;
807 dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) ret);
813 * Inflate stored compressed chunks, or write them directly to the log
814 * file if so instructed.
816 static int inflate_gz_chunks(struct io_log *log, FILE *f)
818 struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
821 while (!flist_empty(&log->chunk_list)) {
822 struct iolog_compress *ic;
824 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
825 flist_del(&ic->list);
827 if (log->log_gz_store) {
830 dprint(FD_COMPRESS, "log write chunk size=%lu, "
831 "seq=%u\n", (unsigned long) ic->len, ic->seq);
833 ret = fwrite(ic->buf, ic->len, 1, f);
834 if (ret != 1 || ferror(f)) {
836 log_err("fio: error writing compressed log\n");
839 inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
845 finish_chunk(&stream, f, &iter);
853 * Open compressed log file and decompress the stored chunks and
854 * write them to stdout. The chunks are stored sequentially in the
855 * file, so we iterate over them and do them one-by-one.
857 int iolog_file_inflate(const char *file)
859 struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
860 struct iolog_compress ic;
868 f = fopen(file, "r");
874 if (stat(file, &sb) < 0) {
880 ic.buf = buf = malloc(sb.st_size);
884 ret = fread(ic.buf, ic.len, 1, f);
890 } else if (ret != 1) {
891 log_err("fio: short read on reading log\n");
900 * Each chunk will return Z_STREAM_END. We don't know how many
901 * chunks are in the file, so we just keep looping and incrementing
902 * the sequence number until we have consumed the whole compressed
909 iret = inflate_chunk(&ic, 1, stdout, &stream, &iter);
922 finish_chunk(&stream, stdout, &iter);
932 static int inflate_gz_chunks(struct io_log *log, FILE *f)
937 int iolog_file_inflate(const char *file)
939 log_err("fio: log inflation not possible without zlib\n");
945 void flush_log(struct io_log *log, int do_append)
951 f = fopen(log->filename, "w");
953 f = fopen(log->filename, "a");
959 buf = set_file_buffer(f);
961 inflate_gz_chunks(log, f);
963 flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
966 clear_file_buffer(buf);
969 static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
971 if (td->flags & TD_F_COMPRESS_LOG)
975 if (fio_trylock_file(log->filename))
978 fio_lock_file(log->filename);
980 if (td->client_type == FIO_CLIENT_TYPE_GUI || is_backend)
981 fio_send_iolog(td, log, log->filename);
983 flush_log(log, !td->o.per_job_logs);
985 fio_unlock_file(log->filename);
990 size_t log_chunk_sizes(struct io_log *log)
992 struct flist_head *entry;
995 if (flist_empty(&log->chunk_list))
999 pthread_mutex_lock(&log->chunk_lock);
1000 flist_for_each(entry, &log->chunk_list) {
1001 struct iolog_compress *c;
1003 c = flist_entry(entry, struct iolog_compress, list);
1006 pthread_mutex_unlock(&log->chunk_lock);
1012 static void drop_data_unlock(struct iolog_flush_data *data)
1016 refs = --data->refs;
1017 pthread_mutex_unlock(&data->lock);
1021 pthread_mutex_destroy(&data->lock);
1022 pthread_cond_destroy(&data->cv);
1027 * Invoked from our compress helper thread, when logging would have exceeded
1028 * the specified memory limitation. Compresses the previously stored
1031 static int gz_work(struct submit_worker *sw, struct workqueue_work *work)
1033 struct iolog_flush_data *data;
1034 struct iolog_compress *c;
1035 struct flist_head list;
1041 INIT_FLIST_HEAD(&list);
1043 data = container_of(work, struct iolog_flush_data, work);
1045 stream.zalloc = Z_NULL;
1046 stream.zfree = Z_NULL;
1047 stream.opaque = Z_NULL;
1049 ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
1051 log_err("fio: failed to init gz stream\n");
1055 seq = ++data->log->chunk_seq;
1057 stream.next_in = (void *) data->samples;
1058 stream.avail_in = data->nr_samples * log_entry_sz(data->log);
1060 dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u\n",
1061 (unsigned long) stream.avail_in, seq);
1063 c = get_new_chunk(seq);
1064 stream.avail_out = GZ_CHUNK;
1065 stream.next_out = c->buf;
1066 ret = deflate(&stream, Z_NO_FLUSH);
1068 log_err("fio: deflate log (%d)\n", ret);
1073 c->len = GZ_CHUNK - stream.avail_out;
1074 flist_add_tail(&c->list, &list);
1076 } while (stream.avail_in);
1078 stream.next_out = c->buf + c->len;
1079 stream.avail_out = GZ_CHUNK - c->len;
1081 ret = deflate(&stream, Z_FINISH);
1082 if (ret == Z_STREAM_END)
1083 c->len = GZ_CHUNK - stream.avail_out;
1086 c = get_new_chunk(seq);
1087 stream.avail_out = GZ_CHUNK;
1088 stream.next_out = c->buf;
1089 ret = deflate(&stream, Z_FINISH);
1090 c->len = GZ_CHUNK - stream.avail_out;
1092 flist_add_tail(&c->list, &list);
1093 } while (ret != Z_STREAM_END);
1096 dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1098 ret = deflateEnd(&stream);
1100 log_err("fio: deflateEnd %d\n", ret);
1102 free(data->samples);
1104 if (!flist_empty(&list)) {
1105 pthread_mutex_lock(&data->log->chunk_lock);
1106 flist_splice_tail(&list, &data->log->chunk_list);
1107 pthread_mutex_unlock(&data->log->chunk_lock);
1113 pthread_mutex_lock(&data->lock);
1115 pthread_cond_signal(&data->cv);
1117 drop_data_unlock(data);
1122 while (!flist_empty(&list)) {
1123 c = flist_first_entry(list.next, struct iolog_compress, list);
1124 flist_del(&c->list);
1131 static int gz_init_worker(struct submit_worker *sw)
1133 struct thread_data *td = sw->wq->td;
1135 if (!fio_option_is_set(&td->o, log_gz_cpumask))
1138 if (fio_setaffinity(gettid(), td->o.log_gz_cpumask) == -1) {
1139 log_err("gz: failed to set CPU affinity\n");
1146 static struct workqueue_ops log_compress_wq_ops = {
1148 .init_worker_fn = gz_init_worker,
1152 int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1154 if (!(td->flags & TD_F_COMPRESS_LOG))
1157 workqueue_init(td, &td->log_compress_wq, &log_compress_wq_ops, 1, sk_out);
1161 void iolog_compress_exit(struct thread_data *td)
1163 if (!(td->flags & TD_F_COMPRESS_LOG))
1166 workqueue_exit(&td->log_compress_wq);
1170 * Queue work item to compress the existing log entries. We reset the
1171 * current log to a small size, and reference the existing log in the
1172 * data that we queue for compression. Once compression has been done,
1173 * this old log is freed. If called with wait == 1, will not return until
1174 * the log compression has completed.
1176 int iolog_flush(struct io_log *log, int wait)
1178 struct iolog_flush_data *data;
1180 io_u_quiesce(log->td);
1182 data = malloc(sizeof(*data));
1188 data->samples = log->log;
1189 data->nr_samples = log->nr_samples;
1191 log->nr_samples = 0;
1192 log->max_samples = 128;
1193 log->log = malloc(log->max_samples * log_entry_sz(log));
1197 pthread_mutex_init(&data->lock, NULL);
1198 pthread_cond_init(&data->cv, NULL);
1203 workqueue_enqueue(&log->td->log_compress_wq, &data->work);
1206 pthread_mutex_lock(&data->lock);
1208 pthread_cond_wait(&data->cv, &data->lock);
1210 drop_data_unlock(data);
1218 int iolog_flush(struct io_log *log, int wait)
1223 int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1228 void iolog_compress_exit(struct thread_data *td)
1234 static int __write_log(struct thread_data *td, struct io_log *log, int try)
1237 return finish_log(td, log, try);
1242 static int write_iops_log(struct thread_data *td, int try)
1244 return __write_log(td, td->iops_log, try);
1247 static int write_slat_log(struct thread_data *td, int try)
1249 return __write_log(td, td->slat_log, try);
1252 static int write_clat_log(struct thread_data *td, int try)
1254 return __write_log(td, td->clat_log, try);
1257 static int write_lat_log(struct thread_data *td, int try)
1259 return __write_log(td, td->lat_log, try);
1262 static int write_bandw_log(struct thread_data *td, int try)
1264 return __write_log(td, td->bw_log, try);
1279 int (*fn)(struct thread_data *, int);
1282 static struct log_type log_types[] = {
1284 .mask = BW_LOG_MASK,
1285 .fn = write_bandw_log,
1288 .mask = LAT_LOG_MASK,
1289 .fn = write_lat_log,
1292 .mask = SLAT_LOG_MASK,
1293 .fn = write_slat_log,
1296 .mask = CLAT_LOG_MASK,
1297 .fn = write_clat_log,
1300 .mask = IOPS_LOG_MASK,
1301 .fn = write_iops_log,
1305 void fio_writeout_logs(struct thread_data *td)
1307 unsigned int log_mask = 0;
1308 unsigned int log_left = ALL_LOG_NR;
1311 old_state = td_bump_runstate(td, TD_FINISHING);
1316 int prev_log_left = log_left;
1318 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1319 struct log_type *lt = &log_types[i];
1322 if (!(log_mask & lt->mask)) {
1323 ret = lt->fn(td, log_left != 1);
1326 log_mask |= lt->mask;
1331 if (prev_log_left == log_left)
1335 td_restore_runstate(td, old_state);