2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
23 static const char iolog_ver2[] = "fio version 2 iolog";
25 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
27 flist_add_tail(&ipo->list, &td->io_log_list);
28 td->total_io_size += ipo->len;
31 void log_io_u(const struct thread_data *td, const struct io_u *io_u)
33 if (!td->o.write_iolog_file)
36 fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
37 io_ddir_name(io_u->ddir),
38 io_u->offset, io_u->buflen);
41 void log_file(struct thread_data *td, struct fio_file *f,
42 enum file_log_act what)
44 const char *act[] = { "add", "open", "close" };
48 if (!td->o.write_iolog_file)
53 * this happens on the pre-open/close done before the job starts
58 fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
61 static void iolog_delay(struct thread_data *td, unsigned long delay)
63 uint64_t usec = utime_since_now(&td->last_issue);
67 if (delay < td->time_offset) {
72 delay -= td->time_offset;
78 fio_gettime(&tv, NULL);
79 while (delay && !td->terminate) {
81 if (this_delay > 500000)
84 usec_sleep(td, this_delay);
88 usec = utime_since_now(&tv);
90 td->time_offset = usec - delay;
95 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
103 if (ipo->ddir != DDIR_INVAL)
106 f = td->files[ipo->fileno];
108 switch (ipo->file_action) {
109 case FIO_LOG_OPEN_FILE:
110 ret = td_io_open_file(td, f);
113 td_verror(td, ret, "iolog open file");
115 case FIO_LOG_CLOSE_FILE:
116 td_io_close_file(td, f);
118 case FIO_LOG_UNLINK_FILE:
119 td_io_unlink_file(td, f);
122 log_err("fio: bad file action %d\n", ipo->file_action);
129 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
131 struct io_piece *ipo;
132 unsigned long elapsed;
134 while (!flist_empty(&td->io_log_list)) {
137 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
138 flist_del(&ipo->list);
139 remove_trim_entry(td, ipo);
141 ret = ipo_special(td, ipo);
145 } else if (ret > 0) {
150 io_u->ddir = ipo->ddir;
151 if (ipo->ddir != DDIR_WAIT) {
152 io_u->offset = ipo->offset;
153 io_u->buflen = ipo->len;
154 io_u->file = td->files[ipo->fileno];
155 get_file(io_u->file);
156 dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
157 io_u->buflen, io_u->file->file_name);
159 iolog_delay(td, ipo->delay);
161 elapsed = mtime_since_genesis();
162 if (ipo->delay > elapsed)
163 usec_sleep(td, (ipo->delay - elapsed) * 1000);
168 if (io_u->ddir != DDIR_WAIT)
176 void prune_io_piece_log(struct thread_data *td)
178 struct io_piece *ipo;
181 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
182 ipo = rb_entry(n, struct io_piece, rb_node);
183 rb_erase(n, &td->io_hist_tree);
184 remove_trim_entry(td, ipo);
189 while (!flist_empty(&td->io_hist_list)) {
190 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
191 flist_del(&ipo->list);
192 remove_trim_entry(td, ipo);
199 * log a successful write, so we can unwind the log for verify
201 void log_io_piece(struct thread_data *td, struct io_u *io_u)
203 struct rb_node **p, *parent;
204 struct io_piece *ipo, *__ipo;
206 ipo = malloc(sizeof(struct io_piece));
208 ipo->file = io_u->file;
209 ipo->offset = io_u->offset;
210 ipo->len = io_u->buflen;
211 ipo->numberio = io_u->numberio;
212 ipo->flags = IP_F_IN_FLIGHT;
216 if (io_u_should_trim(td, io_u)) {
217 flist_add_tail(&ipo->trim_list, &td->trim_list);
222 * We don't need to sort the entries, if:
224 * Sequential writes, or
225 * Random writes that lay out the file as it goes along
227 * For both these cases, just reading back data in the order we
228 * wrote it out is the fastest.
230 * One exception is if we don't have a random map AND we are doing
231 * verifies, in that case we need to check for duplicate blocks and
232 * drop the old one, which we rely on the rb insert/lookup for
235 if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
236 (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
237 INIT_FLIST_HEAD(&ipo->list);
238 flist_add_tail(&ipo->list, &td->io_hist_list);
239 ipo->flags |= IP_F_ONLIST;
244 RB_CLEAR_NODE(&ipo->rb_node);
247 * Sort the entry into the verification list
250 p = &td->io_hist_tree.rb_node;
256 __ipo = rb_entry(parent, struct io_piece, rb_node);
257 if (ipo->file < __ipo->file)
259 else if (ipo->file > __ipo->file)
261 else if (ipo->offset < __ipo->offset) {
263 overlap = ipo->offset + ipo->len > __ipo->offset;
265 else if (ipo->offset > __ipo->offset) {
267 overlap = __ipo->offset + __ipo->len > ipo->offset;
273 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
274 __ipo->offset, __ipo->len,
275 ipo->offset, ipo->len);
277 rb_erase(parent, &td->io_hist_tree);
278 remove_trim_entry(td, __ipo);
284 rb_link_node(&ipo->rb_node, parent, p);
285 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
286 ipo->flags |= IP_F_ONRB;
290 void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
292 struct io_piece *ipo = io_u->ipo;
294 if (td->ts.nr_block_infos) {
295 uint32_t *info = io_u_block_info(td, io_u);
296 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
297 if (io_u->ddir == DDIR_TRIM)
298 *info = BLOCK_INFO_SET_STATE(*info,
299 BLOCK_STATE_TRIM_FAILURE);
300 else if (io_u->ddir == DDIR_WRITE)
301 *info = BLOCK_INFO_SET_STATE(*info,
302 BLOCK_STATE_WRITE_FAILURE);
309 if (ipo->flags & IP_F_ONRB)
310 rb_erase(&ipo->rb_node, &td->io_hist_tree);
311 else if (ipo->flags & IP_F_ONLIST)
312 flist_del(&ipo->list);
319 void trim_io_piece(struct thread_data *td, const struct io_u *io_u)
321 struct io_piece *ipo = io_u->ipo;
326 ipo->len = io_u->xfer_buflen - io_u->resid;
329 void write_iolog_close(struct thread_data *td)
335 td->iolog_buf = NULL;
339 * Read version 2 iolog data. It is enhanced to include per-file logging,
342 static int read_iolog2(struct thread_data *td, FILE *f)
344 unsigned long long offset;
346 int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
351 free_release_files(td);
354 * Read in the read iolog and store it, reuse the infrastructure
355 * for doing verifications.
358 fname = malloc(256+16);
359 act = malloc(256+16);
361 reads = writes = waits = 0;
362 while ((p = fgets(str, 4096, f)) != NULL) {
363 struct io_piece *ipo;
366 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
372 if (!strcmp(act, "wait"))
374 else if (!strcmp(act, "read"))
376 else if (!strcmp(act, "write"))
378 else if (!strcmp(act, "sync"))
380 else if (!strcmp(act, "datasync"))
382 else if (!strcmp(act, "trim"))
385 log_err("fio: bad iolog file action: %s\n",
389 fileno = get_fileno(td, fname);
392 if (!strcmp(act, "add")) {
393 fileno = add_file(td, fname, 0, 1);
394 file_action = FIO_LOG_ADD_FILE;
396 } else if (!strcmp(act, "open")) {
397 fileno = get_fileno(td, fname);
398 file_action = FIO_LOG_OPEN_FILE;
399 } else if (!strcmp(act, "close")) {
400 fileno = get_fileno(td, fname);
401 file_action = FIO_LOG_CLOSE_FILE;
403 log_err("fio: bad iolog file action: %s\n",
408 log_err("bad iolog2: %s", p);
414 else if (rw == DDIR_WRITE) {
416 * Don't add a write for ro mode
421 } else if (rw == DDIR_WAIT) {
423 } else if (rw == DDIR_INVAL) {
424 } else if (!ddir_sync(rw)) {
425 log_err("bad ddir: %d\n", rw);
432 ipo = malloc(sizeof(*ipo));
435 if (rw == DDIR_WAIT) {
438 ipo->offset = offset;
440 if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
441 td->o.max_bs[rw] = bytes;
442 ipo->fileno = fileno;
443 ipo->file_action = file_action;
447 queue_io_piece(td, ipo);
454 if (writes && read_only) {
455 log_err("fio: <%s> skips replay of %d writes due to"
456 " read-only\n", td->o.name, writes);
460 if (!reads && !writes && !waits)
462 else if (reads && !writes)
463 td->o.td_ddir = TD_DDIR_READ;
464 else if (!reads && writes)
465 td->o.td_ddir = TD_DDIR_WRITE;
467 td->o.td_ddir = TD_DDIR_RW;
473 * open iolog, check version, and call appropriate parser
475 static int init_iolog_read(struct thread_data *td)
477 char buffer[256], *p;
481 f = fopen(td->o.read_iolog_file, "r");
483 perror("fopen read iolog");
487 p = fgets(buffer, sizeof(buffer), f);
489 td_verror(td, errno, "iolog read");
490 log_err("fio: unable to read iolog\n");
496 * version 2 of the iolog stores a specific string as the
497 * first line, check for that
499 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
500 ret = read_iolog2(td, f);
502 log_err("fio: iolog version 1 is no longer supported\n");
511 * Set up a log for storing io patterns.
513 static int init_iolog_write(struct thread_data *td)
519 f = fopen(td->o.write_iolog_file, "a");
521 perror("fopen write iolog");
526 * That's it for writing, setup a log buffer and we're done.
529 td->iolog_buf = malloc(8192);
530 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
533 * write our version line
535 if (fprintf(f, "%s\n", iolog_ver2) < 0) {
536 perror("iolog init\n");
541 * add all known files
543 for_each_file(td, ff, i)
544 log_file(td, ff, FIO_LOG_ADD_FILE);
549 int init_iolog(struct thread_data *td)
553 if (td->o.read_iolog_file) {
557 * Check if it's a blktrace file and load that if possible.
558 * Otherwise assume it's a normal log file and load that.
560 if (is_blktrace(td->o.read_iolog_file, &need_swap))
561 ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
563 ret = init_iolog_read(td);
564 } else if (td->o.write_iolog_file)
565 ret = init_iolog_write(td);
568 td_verror(td, EINVAL, "failed initializing iolog");
573 void setup_log(struct io_log **log, struct log_params *p,
574 const char *filename)
578 l = calloc(1, sizeof(*l));
580 l->max_samples = 1024;
581 l->log_type = p->log_type;
582 l->log_offset = p->log_offset;
583 l->log_gz = p->log_gz;
584 l->log_gz_store = p->log_gz_store;
585 l->log = malloc(l->max_samples * log_entry_sz(l));
586 l->avg_msec = p->avg_msec;
587 l->filename = strdup(filename);
591 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
593 INIT_FLIST_HEAD(&l->chunk_list);
595 if (l->log_gz && !p->td)
597 else if (l->log_gz || l->log_gz_store) {
598 pthread_mutex_init(&l->chunk_lock, NULL);
599 p->td->flags |= TD_F_COMPRESS_LOG;
605 #ifdef CONFIG_SETVBUF
606 static void *set_file_buffer(FILE *f)
608 size_t size = 1048576;
612 setvbuf(f, buf, _IOFBF, size);
616 static void clear_file_buffer(void *buf)
621 static void *set_file_buffer(FILE *f)
626 static void clear_file_buffer(void *buf)
631 void free_log(struct io_log *log)
638 static void flush_samples(FILE *f, void *samples, uint64_t sample_size)
642 uint64_t i, nr_samples;
647 s = __get_sample(samples, 0, 0);
648 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
650 nr_samples = sample_size / __log_entry_sz(log_offset);
652 for (i = 0; i < nr_samples; i++) {
653 s = __get_sample(samples, log_offset, i);
656 fprintf(f, "%lu, %lu, %u, %u\n",
657 (unsigned long) s->time,
658 (unsigned long) s->val,
659 io_sample_ddir(s), s->bs);
661 struct io_sample_offset *so = (void *) s;
663 fprintf(f, "%lu, %lu, %u, %u, %llu\n",
664 (unsigned long) s->time,
665 (unsigned long) s->val,
666 io_sample_ddir(s), s->bs,
667 (unsigned long long) so->offset);
674 struct iolog_flush_data {
681 struct iolog_compress {
682 struct flist_head list;
688 #define GZ_CHUNK 131072
690 static struct iolog_compress *get_new_chunk(unsigned int seq)
692 struct iolog_compress *c;
694 c = malloc(sizeof(*c));
695 INIT_FLIST_HEAD(&c->list);
696 c->buf = malloc(GZ_CHUNK);
702 static void free_chunk(struct iolog_compress *ic)
708 static int z_stream_init(z_stream *stream, int gz_hdr)
712 stream->zalloc = Z_NULL;
713 stream->zfree = Z_NULL;
714 stream->opaque = Z_NULL;
715 stream->next_in = Z_NULL;
718 * zlib magic - add 32 for auto-detection of gz header or not,
719 * if we decide to store files in a gzip friendly format.
724 if (inflateInit2(stream, wbits) != Z_OK)
730 struct inflate_chunk_iter {
739 static void finish_chunk(z_stream *stream, FILE *f,
740 struct inflate_chunk_iter *iter)
744 ret = inflateEnd(stream);
746 log_err("fio: failed to end log inflation (%d)\n", ret);
748 flush_samples(f, iter->buf, iter->buf_used);
751 iter->buf_size = iter->buf_used = 0;
755 * Iterative chunk inflation. Handles cases where we cross into a new
756 * sequence, doing flush finish of previous chunk if needed.
758 static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
759 z_stream *stream, struct inflate_chunk_iter *iter)
763 dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u",
764 (unsigned long) ic->len, ic->seq);
766 if (ic->seq != iter->seq) {
768 finish_chunk(stream, f, iter);
770 z_stream_init(stream, gz_hdr);
774 stream->avail_in = ic->len;
775 stream->next_in = ic->buf;
777 if (!iter->buf_size) {
778 iter->buf_size = iter->chunk_sz;
779 iter->buf = malloc(iter->buf_size);
782 while (stream->avail_in) {
783 size_t this_out = iter->buf_size - iter->buf_used;
786 stream->avail_out = this_out;
787 stream->next_out = iter->buf + iter->buf_used;
789 err = inflate(stream, Z_NO_FLUSH);
791 log_err("fio: failed inflating log: %d\n", err);
796 iter->buf_used += this_out - stream->avail_out;
798 if (!stream->avail_out) {
799 iter->buf_size += iter->chunk_sz;
800 iter->buf = realloc(iter->buf, iter->buf_size);
804 if (err == Z_STREAM_END)
808 ret = (void *) stream->next_in - ic->buf;
810 dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) ret);
816 * Inflate stored compressed chunks, or write them directly to the log
817 * file if so instructed.
819 static int inflate_gz_chunks(struct io_log *log, FILE *f)
821 struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
824 while (!flist_empty(&log->chunk_list)) {
825 struct iolog_compress *ic;
827 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
828 flist_del(&ic->list);
830 if (log->log_gz_store) {
833 dprint(FD_COMPRESS, "log write chunk size=%lu, "
834 "seq=%u\n", (unsigned long) ic->len, ic->seq);
836 ret = fwrite(ic->buf, ic->len, 1, f);
837 if (ret != 1 || ferror(f)) {
839 log_err("fio: error writing compressed log\n");
842 inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
848 finish_chunk(&stream, f, &iter);
856 * Open compressed log file and decompress the stored chunks and
857 * write them to stdout. The chunks are stored sequentially in the
858 * file, so we iterate over them and do them one-by-one.
860 int iolog_file_inflate(const char *file)
862 struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
863 struct iolog_compress ic;
871 f = fopen(file, "r");
877 if (stat(file, &sb) < 0) {
883 ic.buf = buf = malloc(sb.st_size);
887 ret = fread(ic.buf, ic.len, 1, f);
893 } else if (ret != 1) {
894 log_err("fio: short read on reading log\n");
903 * Each chunk will return Z_STREAM_END. We don't know how many
904 * chunks are in the file, so we just keep looping and incrementing
905 * the sequence number until we have consumed the whole compressed
912 iret = inflate_chunk(&ic, 1, stdout, &stream, &iter);
925 finish_chunk(&stream, stdout, &iter);
935 static int inflate_gz_chunks(struct io_log *log, FILE *f)
940 int iolog_file_inflate(const char *file)
942 log_err("fio: log inflation not possible without zlib\n");
948 void flush_log(struct io_log *log, int do_append)
954 f = fopen(log->filename, "w");
956 f = fopen(log->filename, "a");
962 buf = set_file_buffer(f);
964 inflate_gz_chunks(log, f);
966 flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
969 clear_file_buffer(buf);
972 static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
978 if (fio_trylock_file(log->filename))
981 fio_lock_file(log->filename);
983 if (td->client_type == FIO_CLIENT_TYPE_GUI)
984 fio_send_iolog(td, log, log->filename);
986 flush_log(log, !td->o.per_job_logs);
988 fio_unlock_file(log->filename);
996 * Invoked from our compress helper thread, when logging would have exceeded
997 * the specified memory limitation. Compresses the previously stored
1000 static int gz_work(struct tp_work *work)
1002 struct iolog_flush_data *data;
1003 struct iolog_compress *c;
1004 struct flist_head list;
1010 INIT_FLIST_HEAD(&list);
1012 data = container_of(work, struct iolog_flush_data, work);
1014 stream.zalloc = Z_NULL;
1015 stream.zfree = Z_NULL;
1016 stream.opaque = Z_NULL;
1018 ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
1020 log_err("fio: failed to init gz stream\n");
1024 seq = ++data->log->chunk_seq;
1026 stream.next_in = (void *) data->samples;
1027 stream.avail_in = data->nr_samples * log_entry_sz(data->log);
1029 dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u\n",
1030 (unsigned long) stream.avail_in, seq);
1032 c = get_new_chunk(seq);
1033 stream.avail_out = GZ_CHUNK;
1034 stream.next_out = c->buf;
1035 ret = deflate(&stream, Z_NO_FLUSH);
1037 log_err("fio: deflate log (%d)\n", ret);
1042 c->len = GZ_CHUNK - stream.avail_out;
1043 flist_add_tail(&c->list, &list);
1045 } while (stream.avail_in);
1047 stream.next_out = c->buf + c->len;
1048 stream.avail_out = GZ_CHUNK - c->len;
1050 ret = deflate(&stream, Z_FINISH);
1051 if (ret == Z_STREAM_END)
1052 c->len = GZ_CHUNK - stream.avail_out;
1055 c = get_new_chunk(seq);
1056 stream.avail_out = GZ_CHUNK;
1057 stream.next_out = c->buf;
1058 ret = deflate(&stream, Z_FINISH);
1059 c->len = GZ_CHUNK - stream.avail_out;
1061 flist_add_tail(&c->list, &list);
1062 } while (ret != Z_STREAM_END);
1065 dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1067 ret = deflateEnd(&stream);
1069 log_err("fio: deflateEnd %d\n", ret);
1071 free(data->samples);
1073 if (!flist_empty(&list)) {
1074 pthread_mutex_lock(&data->log->chunk_lock);
1075 flist_splice_tail(&list, &data->log->chunk_list);
1076 pthread_mutex_unlock(&data->log->chunk_lock);
1083 pthread_cond_signal(&work->cv);
1089 while (!flist_empty(&list)) {
1090 c = flist_first_entry(list.next, struct iolog_compress, list);
1091 flist_del(&c->list);
1099 * Queue work item to compress the existing log entries. We copy the
1100 * samples, and reset the log sample count to 0 (so the logging will
1101 * continue to use the memory associated with the log). If called with
1102 * wait == 1, will not return until the log compression has completed.
1104 int iolog_flush(struct io_log *log, int wait)
1106 struct tp_data *tdat = log->td->tp_data;
1107 struct iolog_flush_data *data;
1110 data = malloc(sizeof(*data));
1116 sample_size = log->nr_samples * log_entry_sz(log);
1117 data->samples = malloc(sample_size);
1118 if (!data->samples) {
1123 memcpy(data->samples, log->log, sample_size);
1124 data->nr_samples = log->nr_samples;
1125 data->work.fn = gz_work;
1126 log->nr_samples = 0;
1129 pthread_mutex_init(&data->work.lock, NULL);
1130 pthread_cond_init(&data->work.cv, NULL);
1131 data->work.wait = 1;
1133 data->work.wait = 0;
1135 data->work.prio = 1;
1136 tp_queue_work(tdat, &data->work);
1139 pthread_mutex_lock(&data->work.lock);
1140 while (!data->work.done)
1141 pthread_cond_wait(&data->work.cv, &data->work.lock);
1142 pthread_mutex_unlock(&data->work.lock);
1151 int iolog_flush(struct io_log *log, int wait)
1158 static int write_iops_log(struct thread_data *td, int try)
1160 struct io_log *log = td->iops_log;
1165 return finish_log(td, log, try);
1168 static int write_slat_log(struct thread_data *td, int try)
1170 struct io_log *log = td->slat_log;
1175 return finish_log(td, log, try);
1178 static int write_clat_log(struct thread_data *td, int try)
1180 struct io_log *log = td->clat_log;
1185 return finish_log(td, log, try);
1188 static int write_lat_log(struct thread_data *td, int try)
1190 struct io_log *log = td->lat_log;
1195 return finish_log(td, log, try);
1198 static int write_bandw_log(struct thread_data *td, int try)
1200 struct io_log *log = td->bw_log;
1205 return finish_log(td, log, try);
1220 int (*fn)(struct thread_data *, int);
1223 static struct log_type log_types[] = {
1225 .mask = BW_LOG_MASK,
1226 .fn = write_bandw_log,
1229 .mask = LAT_LOG_MASK,
1230 .fn = write_lat_log,
1233 .mask = SLAT_LOG_MASK,
1234 .fn = write_slat_log,
1237 .mask = CLAT_LOG_MASK,
1238 .fn = write_clat_log,
1241 .mask = IOPS_LOG_MASK,
1242 .fn = write_iops_log,
1246 void fio_writeout_logs(struct thread_data *td)
1248 unsigned int log_mask = 0;
1249 unsigned int log_left = ALL_LOG_NR;
1252 old_state = td_bump_runstate(td, TD_FINISHING);
1257 int prev_log_left = log_left;
1259 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1260 struct log_type *lt = &log_types[i];
1263 if (!(log_mask & lt->mask)) {
1264 ret = lt->fn(td, log_left != 1);
1267 log_mask |= lt->mask;
1272 if (prev_log_left == log_left)
1276 td_restore_runstate(td, old_state);