2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
22 #include "lib/roundup.h"
24 #include <netinet/in.h>
25 #include <netinet/tcp.h>
26 #include <arpa/inet.h>
28 #include <sys/socket.h>
31 static int iolog_flush(struct io_log *log);
33 static const char iolog_ver2[] = "fio version 2 iolog";
34 static const char iolog_ver3[] = "fio version 3 iolog";
36 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
38 flist_add_tail(&ipo->list, &td->io_log_list);
39 td->total_io_size += ipo->len;
42 void log_io_u(const struct thread_data *td, const struct io_u *io_u)
46 if (!td->o.write_iolog_file)
49 fio_gettime(&now, NULL);
50 fprintf(td->iolog_f, "%llu %s %s %llu %llu\n",
51 (unsigned long long) utime_since_now(&td->io_log_start_time),
52 io_u->file->file_name, io_ddir_name(io_u->ddir), io_u->offset,
57 void log_file(struct thread_data *td, struct fio_file *f,
58 enum file_log_act what)
60 const char *act[] = { "add", "open", "close" };
65 if (!td->o.write_iolog_file)
70 * this happens on the pre-open/close done before the job starts
75 fio_gettime(&now, NULL);
76 fprintf(td->iolog_f, "%llu %s %s\n",
77 (unsigned long long) utime_since_now(&td->io_log_start_time),
78 f->file_name, act[what]);
81 static void iolog_delay(struct thread_data *td, unsigned long delay)
83 uint64_t usec = utime_since_now(&td->last_issue);
84 unsigned long orig_delay = delay;
88 if (delay < td->time_offset) {
93 delay -= td->time_offset;
99 fio_gettime(&ts, NULL);
101 while (delay && !td->terminate) {
102 ret = io_u_queued_complete(td, 0);
104 td_verror(td, -ret, "io_u_queued_complete");
105 if (utime_since_now(&ts) > delay)
109 usec = utime_since_now(&ts);
110 if (usec > orig_delay)
111 td->time_offset = usec - orig_delay;
116 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
124 if (ipo->ddir != DDIR_INVAL)
127 f = td->files[ipo->fileno];
130 iolog_delay(td, ipo->delay);
131 if (fio_fill_issue_time(td))
132 fio_gettime(&td->last_issue, NULL);
133 switch (ipo->file_action) {
134 case FIO_LOG_OPEN_FILE:
135 if (td->o.replay_redirect && fio_file_open(f)) {
136 dprint(FD_FILE, "iolog: ignoring re-open of file %s\n",
140 ret = td_io_open_file(td, f);
143 td_verror(td, ret, "iolog open file");
145 case FIO_LOG_CLOSE_FILE:
146 td_io_close_file(td, f);
148 case FIO_LOG_UNLINK_FILE:
149 td_io_unlink_file(td, f);
151 case FIO_LOG_ADD_FILE:
157 log_err("fio: bad file action %d\n", ipo->file_action);
164 static bool read_iolog(struct thread_data *td);
166 unsigned long long delay_since_ttime(const struct thread_data *td,
167 unsigned long long time)
171 const unsigned long long *last_ttime = &td->io_log_last_ttime;
173 if (!*last_ttime || td->o.no_stall || time < *last_ttime)
175 else if (td->o.replay_time_scale == 100)
176 return time - *last_ttime;
179 scale = (double) 100.0 / (double) td->o.replay_time_scale;
180 tmp = time - *last_ttime;
184 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
186 struct io_piece *ipo;
187 unsigned long elapsed;
189 while (!flist_empty(&td->io_log_list)) {
192 if (td->o.read_iolog_chunked) {
193 if (td->io_log_checkmark == td->io_log_current) {
194 if (td->io_log_blktrace) {
195 if (!read_blktrace(td))
202 td->io_log_current--;
204 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
205 flist_del(&ipo->list);
206 remove_trim_entry(td, ipo);
208 ret = ipo_special(td, ipo);
212 } else if (ret > 0) {
217 io_u->ddir = ipo->ddir;
218 if (ipo->ddir != DDIR_WAIT) {
219 io_u->offset = ipo->offset;
220 io_u->verify_offset = ipo->offset;
221 io_u->buflen = ipo->len;
222 io_u->file = td->files[ipo->fileno];
223 get_file(io_u->file);
224 dprint(FD_IO, "iolog: get %llu/%llu/%s\n", io_u->offset,
225 io_u->buflen, io_u->file->file_name);
227 iolog_delay(td, ipo->delay);
229 elapsed = mtime_since_genesis();
230 if (ipo->delay > elapsed)
231 usec_sleep(td, (ipo->delay - elapsed) * 1000);
236 if (io_u->ddir != DDIR_WAIT)
244 void prune_io_piece_log(struct thread_data *td)
246 struct io_piece *ipo;
247 struct fio_rb_node *n;
249 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
250 ipo = rb_entry(n, struct io_piece, rb_node);
251 rb_erase(n, &td->io_hist_tree);
252 remove_trim_entry(td, ipo);
257 while (!flist_empty(&td->io_hist_list)) {
258 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
259 flist_del(&ipo->list);
260 remove_trim_entry(td, ipo);
267 * log a successful write, so we can unwind the log for verify
269 void log_io_piece(struct thread_data *td, struct io_u *io_u)
271 struct fio_rb_node **p, *parent;
272 struct io_piece *ipo, *__ipo;
274 ipo = calloc(1, sizeof(struct io_piece));
276 ipo->file = io_u->file;
277 ipo->offset = io_u->offset;
278 ipo->len = io_u->buflen;
279 ipo->numberio = io_u->numberio;
280 ipo->flags = IP_F_IN_FLIGHT;
284 if (io_u_should_trim(td, io_u)) {
285 flist_add_tail(&ipo->trim_list, &td->trim_list);
290 * Only sort writes if we don't have a random map in which case we need
291 * to check for duplicate blocks and drop the old one, which we rely on
292 * the rb insert/lookup for handling.
294 if (file_randommap(td, ipo->file)) {
295 INIT_FLIST_HEAD(&ipo->list);
296 flist_add_tail(&ipo->list, &td->io_hist_list);
297 ipo->flags |= IP_F_ONLIST;
302 RB_CLEAR_NODE(&ipo->rb_node);
305 * Sort the entry into the verification list
308 p = &td->io_hist_tree.rb_node;
314 __ipo = rb_entry(parent, struct io_piece, rb_node);
315 if (ipo->file < __ipo->file)
317 else if (ipo->file > __ipo->file)
319 else if (ipo->offset < __ipo->offset) {
321 overlap = ipo->offset + ipo->len > __ipo->offset;
323 else if (ipo->offset > __ipo->offset) {
325 overlap = __ipo->offset + __ipo->len > ipo->offset;
331 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu\n",
332 __ipo->offset, __ipo->len,
333 ipo->offset, ipo->len);
335 rb_erase(parent, &td->io_hist_tree);
336 remove_trim_entry(td, __ipo);
337 if (!(__ipo->flags & IP_F_IN_FLIGHT))
343 rb_link_node(&ipo->rb_node, parent, p);
344 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
345 ipo->flags |= IP_F_ONRB;
349 void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
351 struct io_piece *ipo = io_u->ipo;
353 if (td->ts.nr_block_infos) {
354 uint32_t *info = io_u_block_info(td, io_u);
355 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
356 if (io_u->ddir == DDIR_TRIM)
357 *info = BLOCK_INFO_SET_STATE(*info,
358 BLOCK_STATE_TRIM_FAILURE);
359 else if (io_u->ddir == DDIR_WRITE)
360 *info = BLOCK_INFO_SET_STATE(*info,
361 BLOCK_STATE_WRITE_FAILURE);
368 if (ipo->flags & IP_F_ONRB)
369 rb_erase(&ipo->rb_node, &td->io_hist_tree);
370 else if (ipo->flags & IP_F_ONLIST)
371 flist_del(&ipo->list);
378 void trim_io_piece(const struct io_u *io_u)
380 struct io_piece *ipo = io_u->ipo;
385 ipo->len = io_u->xfer_buflen - io_u->resid;
388 void write_iolog_close(struct thread_data *td)
397 td->iolog_buf = NULL;
400 int64_t iolog_items_to_fetch(struct thread_data *td)
405 int64_t items_to_fetch;
407 if (!td->io_log_highmark)
411 fio_gettime(&now, NULL);
412 elapsed = ntime_since(&td->io_log_highmark_time, &now);
414 for_1s = (td->io_log_highmark - td->io_log_current) * 1000000000 / elapsed;
415 items_to_fetch = for_1s - td->io_log_current;
416 if (items_to_fetch < 0)
421 td->io_log_highmark = td->io_log_current + items_to_fetch;
422 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
423 fio_gettime(&td->io_log_highmark_time, NULL);
425 return items_to_fetch;
428 #define io_act(_td, _r) (((_td)->io_log_version == 3 && (r) == 5) || \
429 ((_td)->io_log_version == 2 && (r) == 4))
430 #define file_act(_td, _r) (((_td)->io_log_version == 3 && (r) == 3) || \
431 ((_td)->io_log_version == 2 && (r) == 2))
434 * Read version 2 and 3 iolog data. It is enhanced to include per-file logging,
437 static bool read_iolog(struct thread_data *td)
439 unsigned long long offset;
441 unsigned long long delay = 0;
442 int reads, writes, trims, waits, fileno = 0, file_action = 0; /* stupid gcc */
443 char *rfname, *fname, *act;
446 bool realloc = false;
447 int64_t items_to_fetch = 0;
450 if (td->o.read_iolog_chunked) {
451 items_to_fetch = iolog_items_to_fetch(td);
457 * Read in the read iolog and store it, reuse the infrastructure
458 * for doing verifications.
461 rfname = fname = malloc(256+16);
462 act = malloc(256+16);
464 syncs = reads = writes = trims = waits = 0;
465 while ((p = fgets(str, 4096, td->io_log_rfile)) != NULL) {
466 struct io_piece *ipo;
468 unsigned long long ttime;
470 if (td->io_log_version == 3) {
471 r = sscanf(p, "%llu %256s %256s %llu %u", &ttime, rfname, act,
473 delay = delay_since_ttime(td, ttime);
474 td->io_log_last_ttime = ttime;
476 * "wait" is not allowed with version 3
478 if (!strcmp(act, "wait")) {
479 log_err("iolog: ignoring wait command with"
480 " version 3 for file %s\n", fname);
483 } else /* version 2 */
484 r = sscanf(p, "%256s %256s %llu %u", rfname, act, &offset, &bytes);
486 if (td->o.replay_redirect)
487 fname = td->o.replay_redirect;
493 if (!strcmp(act, "wait"))
495 else if (!strcmp(act, "read")) {
496 if (td->o.replay_skip & (1u << DDIR_READ))
499 } else if (!strcmp(act, "write")) {
500 if (td->o.replay_skip & (1u << DDIR_WRITE))
503 } else if (!strcmp(act, "sync")) {
504 if (td->o.replay_skip & (1u << DDIR_SYNC))
507 } else if (!strcmp(act, "datasync"))
509 else if (!strcmp(act, "trim")) {
510 if (td->o.replay_skip & (1u << DDIR_TRIM))
514 log_err("fio: bad iolog file action: %s\n",
518 fileno = get_fileno(td, fname);
519 } else if (file_act(td, r)) {
521 if (!strcmp(act, "add")) {
522 if (td->o.replay_redirect &&
523 get_fileno(td, fname) != -1) {
524 dprint(FD_FILE, "iolog: ignoring"
525 " re-add of file %s\n", fname);
527 fileno = add_file(td, fname, td->subjob_number, 1);
528 file_action = FIO_LOG_ADD_FILE;
530 } else if (!strcmp(act, "open")) {
531 fileno = get_fileno(td, fname);
532 file_action = FIO_LOG_OPEN_FILE;
533 } else if (!strcmp(act, "close")) {
534 fileno = get_fileno(td, fname);
535 file_action = FIO_LOG_CLOSE_FILE;
537 log_err("fio: bad iolog file action: %s\n",
542 log_err("bad iolog%d: %s\n", td->io_log_version, p);
548 else if (rw == DDIR_WRITE) {
550 * Don't add a write for ro mode
555 } else if (rw == DDIR_TRIM) {
557 * Don't add a trim for ro mode
562 } else if (rw == DDIR_WAIT) {
566 } else if (rw == DDIR_INVAL) {
567 } else if (ddir_sync(rw)) {
570 log_err("bad ddir: %d\n", rw);
577 ipo = calloc(1, sizeof(*ipo));
580 if (td->io_log_version == 3)
582 if (rw == DDIR_WAIT) {
585 if (td->o.replay_scale)
586 ipo->offset = offset / td->o.replay_scale;
588 ipo->offset = offset;
589 ipo_bytes_align(td->o.replay_align, ipo);
592 if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw]) {
594 td->o.max_bs[rw] = bytes;
596 ipo->fileno = fileno;
597 ipo->file_action = file_action;
601 queue_io_piece(td, ipo);
603 if (td->o.read_iolog_chunked) {
604 td->io_log_current++;
606 if (items_to_fetch == 0)
615 if (td->o.read_iolog_chunked) {
616 td->io_log_highmark = td->io_log_current;
617 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
618 fio_gettime(&td->io_log_highmark_time, NULL);
621 if (writes && read_only) {
622 log_err("fio: <%s> skips replay of %d writes due to"
623 " read-only\n", td->o.name, writes);
627 td->flags |= TD_F_SYNCS;
629 if (td->o.read_iolog_chunked) {
630 if (td->io_log_current == 0) {
633 td->o.td_ddir = TD_DDIR_RW;
634 if (realloc && td->orig_buffer)
638 if (init_io_u_buffers(td))
644 if (!reads && !writes && !waits && !trims)
649 td->o.td_ddir |= TD_DDIR_READ;
651 td->o.td_ddir |= TD_DDIR_WRITE;
653 td->o.td_ddir |= TD_DDIR_TRIM;
658 static bool is_socket(const char *path)
663 r = stat(path, &buf);
667 return S_ISSOCK(buf.st_mode);
670 static int open_socket(const char *path)
672 struct sockaddr_un addr;
675 fd = socket(AF_UNIX, SOCK_STREAM, 0);
679 addr.sun_family = AF_UNIX;
680 if (snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", path) >=
681 sizeof(addr.sun_path)) {
682 log_err("%s: path name %s is too long for a Unix socket\n",
686 ret = connect(fd, (const struct sockaddr *)&addr, strlen(path) + sizeof(addr.sun_family));
695 * open iolog, check version, and call appropriate parser
697 static bool init_iolog_read(struct thread_data *td, char *fname)
699 char buffer[256], *p;
702 dprint(FD_IO, "iolog: name=%s\n", fname);
704 if (is_socket(fname)) {
707 fd = open_socket(fname);
710 } else if (!strcmp(fname, "-")) {
713 f = fopen(fname, "r");
716 perror("fopen read iolog");
720 p = fgets(buffer, sizeof(buffer), f);
722 td_verror(td, errno, "iolog read");
723 log_err("fio: unable to read iolog\n");
729 * versions 2 and 3 of the iolog store a specific string as the
730 * first line, check for that
732 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
733 td->io_log_version = 2;
734 else if (!strncmp(iolog_ver3, buffer, strlen(iolog_ver3)))
735 td->io_log_version = 3;
737 log_err("fio: iolog version 1 is no longer supported\n");
742 free_release_files(td);
743 td->io_log_rfile = f;
744 return read_iolog(td);
748 * Set up a log for storing io patterns.
750 static bool init_iolog_write(struct thread_data *td)
756 f = fopen(td->o.write_iolog_file, "a");
758 perror("fopen write iolog");
763 * That's it for writing, setup a log buffer and we're done.
766 td->iolog_buf = malloc(8192);
767 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
768 fio_gettime(&td->io_log_start_time, NULL);
771 * write our version line
773 if (fprintf(f, "%s\n", iolog_ver3) < 0) {
774 perror("iolog init\n");
779 * add all known files
781 for_each_file(td, ff, i)
782 log_file(td, ff, FIO_LOG_ADD_FILE);
787 bool init_iolog(struct thread_data *td)
791 if (td->o.read_iolog_file) {
793 char * fname = get_name_by_idx(td->o.read_iolog_file, td->subjob_number);
796 * Check if it's a blktrace file and load that if possible.
797 * Otherwise assume it's a normal log file and load that.
799 if (is_blktrace(fname, &need_swap)) {
800 td->io_log_blktrace = 1;
801 ret = init_blktrace_read(td, fname, need_swap);
803 td->io_log_blktrace = 0;
804 ret = init_iolog_read(td, fname);
807 } else if (td->o.write_iolog_file)
808 ret = init_iolog_write(td);
813 td_verror(td, EINVAL, "failed initializing iolog");
818 void setup_log(struct io_log **log, struct log_params *p,
819 const char *filename)
823 struct io_u_plat_entry *entry;
824 struct flist_head *list;
826 l = scalloc(1, sizeof(*l));
827 INIT_FLIST_HEAD(&l->io_logs);
828 l->log_type = p->log_type;
829 l->log_offset = p->log_offset;
830 l->log_prio = p->log_prio;
831 l->log_gz = p->log_gz;
832 l->log_gz_store = p->log_gz_store;
833 l->avg_msec = p->avg_msec;
834 l->hist_msec = p->hist_msec;
835 l->hist_coarseness = p->hist_coarseness;
836 l->filename = strdup(filename);
839 /* Initialize histogram lists for each r/w direction,
840 * with initial io_u_plat of all zeros:
842 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
843 list = &l->hist_window[i].list;
844 INIT_FLIST_HEAD(list);
845 entry = calloc(1, sizeof(struct io_u_plat_entry));
846 flist_add(&entry->list, list);
849 if (l->td && l->td->o.io_submit_mode != IO_MODE_OFFLOAD) {
850 unsigned int def_samples = DEF_LOG_ENTRIES;
853 __p = calloc(1, sizeof(*l->pending));
854 if (l->td->o.iodepth > DEF_LOG_ENTRIES)
855 def_samples = roundup_pow2(l->td->o.iodepth);
856 __p->max_samples = def_samples;
857 __p->log = calloc(__p->max_samples, log_entry_sz(l));
862 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
864 l->log_ddir_mask |= LOG_PRIO_SAMPLE_BIT;
866 INIT_FLIST_HEAD(&l->chunk_list);
868 if (l->log_gz && !p->td)
870 else if (l->log_gz || l->log_gz_store) {
871 mutex_init_pshared(&l->chunk_lock);
872 mutex_init_pshared(&l->deferred_free_lock);
873 p->td->flags |= TD_F_COMPRESS_LOG;
879 #ifdef CONFIG_SETVBUF
880 static void *set_file_buffer(FILE *f)
882 size_t size = 1048576;
886 setvbuf(f, buf, _IOFBF, size);
890 static void clear_file_buffer(void *buf)
895 static void *set_file_buffer(FILE *f)
900 static void clear_file_buffer(void *buf)
905 void free_log(struct io_log *log)
907 while (!flist_empty(&log->io_logs)) {
908 struct io_logs *cur_log;
910 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
911 flist_del_init(&cur_log->list);
917 free(log->pending->log);
927 uint64_t hist_sum(int j, int stride, uint64_t *io_u_plat,
928 uint64_t *io_u_plat_last)
933 if (io_u_plat_last) {
934 for (k = sum = 0; k < stride; k++)
935 sum += io_u_plat[j + k] - io_u_plat_last[j + k];
937 for (k = sum = 0; k < stride; k++)
938 sum += io_u_plat[j + k];
944 static void flush_hist_samples(FILE *f, int hist_coarseness, void *samples,
945 uint64_t sample_size)
949 uint64_t i, j, nr_samples;
950 struct io_u_plat_entry *entry, *entry_before;
952 uint64_t *io_u_plat_before;
954 int stride = 1 << hist_coarseness;
959 s = __get_sample(samples, 0, 0);
960 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
962 nr_samples = sample_size / __log_entry_sz(log_offset);
964 for (i = 0; i < nr_samples; i++) {
965 s = __get_sample(samples, log_offset, i);
967 entry = s->data.plat_entry;
968 io_u_plat = entry->io_u_plat;
970 entry_before = flist_first_entry(&entry->list, struct io_u_plat_entry, list);
971 io_u_plat_before = entry_before->io_u_plat;
973 fprintf(f, "%lu, %u, %llu, ", (unsigned long) s->time,
974 io_sample_ddir(s), (unsigned long long) s->bs);
975 for (j = 0; j < FIO_IO_U_PLAT_NR - stride; j += stride) {
976 fprintf(f, "%llu, ", (unsigned long long)
977 hist_sum(j, stride, io_u_plat, io_u_plat_before));
979 fprintf(f, "%llu\n", (unsigned long long)
980 hist_sum(FIO_IO_U_PLAT_NR - stride, stride, io_u_plat,
983 flist_del(&entry_before->list);
988 void flush_samples(FILE *f, void *samples, uint64_t sample_size)
991 int log_offset, log_prio;
992 uint64_t i, nr_samples;
993 unsigned int prio_val;
999 s = __get_sample(samples, 0, 0);
1000 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
1001 log_prio = (s->__ddir & LOG_PRIO_SAMPLE_BIT) != 0;
1005 fmt = "%lu, %" PRId64 ", %u, %llu, %llu, 0x%04x\n";
1007 fmt = "%lu, %" PRId64 ", %u, %llu, %llu, %u\n";
1010 fmt = "%lu, %" PRId64 ", %u, %llu, 0x%04x\n";
1012 fmt = "%lu, %" PRId64 ", %u, %llu, %u\n";
1015 nr_samples = sample_size / __log_entry_sz(log_offset);
1017 for (i = 0; i < nr_samples; i++) {
1018 s = __get_sample(samples, log_offset, i);
1021 prio_val = s->priority;
1023 prio_val = ioprio_value_is_class_rt(s->priority);
1027 (unsigned long) s->time,
1029 io_sample_ddir(s), (unsigned long long) s->bs,
1032 struct io_sample_offset *so = (void *) s;
1035 (unsigned long) s->time,
1037 io_sample_ddir(s), (unsigned long long) s->bs,
1038 (unsigned long long) so->offset,
1046 struct iolog_flush_data {
1047 struct workqueue_work work;
1050 uint32_t nr_samples;
1054 #define GZ_CHUNK 131072
1056 static struct iolog_compress *get_new_chunk(unsigned int seq)
1058 struct iolog_compress *c;
1060 c = malloc(sizeof(*c));
1061 INIT_FLIST_HEAD(&c->list);
1062 c->buf = malloc(GZ_CHUNK);
1068 static void free_chunk(struct iolog_compress *ic)
1074 static int z_stream_init(z_stream *stream, int gz_hdr)
1078 memset(stream, 0, sizeof(*stream));
1079 stream->zalloc = Z_NULL;
1080 stream->zfree = Z_NULL;
1081 stream->opaque = Z_NULL;
1082 stream->next_in = Z_NULL;
1085 * zlib magic - add 32 for auto-detection of gz header or not,
1086 * if we decide to store files in a gzip friendly format.
1091 if (inflateInit2(stream, wbits) != Z_OK)
1097 struct inflate_chunk_iter {
1106 static void finish_chunk(z_stream *stream, FILE *f,
1107 struct inflate_chunk_iter *iter)
1111 ret = inflateEnd(stream);
1113 log_err("fio: failed to end log inflation seq %d (%d)\n",
1116 flush_samples(f, iter->buf, iter->buf_used);
1119 iter->buf_size = iter->buf_used = 0;
1123 * Iterative chunk inflation. Handles cases where we cross into a new
1124 * sequence, doing flush finish of previous chunk if needed.
1126 static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
1127 z_stream *stream, struct inflate_chunk_iter *iter)
1131 dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u\n",
1132 (unsigned long) ic->len, ic->seq);
1134 if (ic->seq != iter->seq) {
1136 finish_chunk(stream, f, iter);
1138 z_stream_init(stream, gz_hdr);
1139 iter->seq = ic->seq;
1142 stream->avail_in = ic->len;
1143 stream->next_in = ic->buf;
1145 if (!iter->buf_size) {
1146 iter->buf_size = iter->chunk_sz;
1147 iter->buf = malloc(iter->buf_size);
1150 while (stream->avail_in) {
1151 size_t this_out = iter->buf_size - iter->buf_used;
1154 stream->avail_out = this_out;
1155 stream->next_out = iter->buf + iter->buf_used;
1157 err = inflate(stream, Z_NO_FLUSH);
1159 log_err("fio: failed inflating log: %d\n", err);
1164 iter->buf_used += this_out - stream->avail_out;
1166 if (!stream->avail_out) {
1167 iter->buf_size += iter->chunk_sz;
1168 iter->buf = realloc(iter->buf, iter->buf_size);
1172 if (err == Z_STREAM_END)
1176 ret = (void *) stream->next_in - ic->buf;
1178 dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) iter->buf_size);
1184 * Inflate stored compressed chunks, or write them directly to the log
1185 * file if so instructed.
1187 static int inflate_gz_chunks(struct io_log *log, FILE *f)
1189 struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
1192 while (!flist_empty(&log->chunk_list)) {
1193 struct iolog_compress *ic;
1195 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
1196 flist_del(&ic->list);
1198 if (log->log_gz_store) {
1201 dprint(FD_COMPRESS, "log write chunk size=%lu, "
1202 "seq=%u\n", (unsigned long) ic->len, ic->seq);
1204 ret = fwrite(ic->buf, ic->len, 1, f);
1205 if (ret != 1 || ferror(f)) {
1207 log_err("fio: error writing compressed log\n");
1210 inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
1216 finish_chunk(&stream, f, &iter);
1224 * Open compressed log file and decompress the stored chunks and
1225 * write them to stdout. The chunks are stored sequentially in the
1226 * file, so we iterate over them and do them one-by-one.
1228 int iolog_file_inflate(const char *file)
1230 struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
1231 struct iolog_compress ic;
1239 f = fopen(file, "rb");
1245 if (stat(file, &sb) < 0) {
1251 ic.buf = buf = malloc(sb.st_size);
1252 ic.len = sb.st_size;
1255 ret = fread(ic.buf, ic.len, 1, f);
1256 if (ret == 0 && ferror(f)) {
1261 } else if (ferror(f) || (!feof(f) && ret != 1)) {
1262 log_err("fio: short read on reading log\n");
1271 * Each chunk will return Z_STREAM_END. We don't know how many
1272 * chunks are in the file, so we just keep looping and incrementing
1273 * the sequence number until we have consumed the whole compressed
1280 iret = inflate_chunk(&ic, 1, stdout, &stream, &iter);
1293 finish_chunk(&stream, stdout, &iter);
1303 static int inflate_gz_chunks(struct io_log *log, FILE *f)
1308 int iolog_file_inflate(const char *file)
1310 log_err("fio: log inflation not possible without zlib\n");
1316 void flush_log(struct io_log *log, bool do_append)
1322 * If log_gz_store is true, we are writing a binary file.
1323 * Set the mode appropriately (on all platforms) to avoid issues
1324 * on windows (line-ending conversions, etc.)
1327 if (log->log_gz_store)
1328 f = fopen(log->filename, "wb");
1330 f = fopen(log->filename, "w");
1332 if (log->log_gz_store)
1333 f = fopen(log->filename, "ab");
1335 f = fopen(log->filename, "a");
1337 perror("fopen log");
1341 buf = set_file_buffer(f);
1343 inflate_gz_chunks(log, f);
1345 while (!flist_empty(&log->io_logs)) {
1346 struct io_logs *cur_log;
1348 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
1349 flist_del_init(&cur_log->list);
1351 if (log->td && log == log->td->clat_hist_log)
1352 flush_hist_samples(f, log->hist_coarseness, cur_log->log,
1353 log_sample_sz(log, cur_log));
1355 flush_samples(f, cur_log->log, log_sample_sz(log, cur_log));
1361 clear_file_buffer(buf);
1364 static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
1366 if (td->flags & TD_F_COMPRESS_LOG)
1370 if (fio_trylock_file(log->filename))
1373 fio_lock_file(log->filename);
1375 if (td->client_type == FIO_CLIENT_TYPE_GUI || is_backend)
1376 fio_send_iolog(td, log, log->filename);
1378 flush_log(log, !td->o.per_job_logs);
1380 fio_unlock_file(log->filename);
1385 size_t log_chunk_sizes(struct io_log *log)
1387 struct flist_head *entry;
1390 if (flist_empty(&log->chunk_list))
1394 pthread_mutex_lock(&log->chunk_lock);
1395 flist_for_each(entry, &log->chunk_list) {
1396 struct iolog_compress *c;
1398 c = flist_entry(entry, struct iolog_compress, list);
1401 pthread_mutex_unlock(&log->chunk_lock);
1407 static void iolog_put_deferred(struct io_log *log, void *ptr)
1412 pthread_mutex_lock(&log->deferred_free_lock);
1413 if (log->deferred < IOLOG_MAX_DEFER) {
1414 log->deferred_items[log->deferred] = ptr;
1416 } else if (!fio_did_warn(FIO_WARN_IOLOG_DROP))
1417 log_err("fio: had to drop log entry free\n");
1418 pthread_mutex_unlock(&log->deferred_free_lock);
1421 static void iolog_free_deferred(struct io_log *log)
1428 pthread_mutex_lock(&log->deferred_free_lock);
1430 for (i = 0; i < log->deferred; i++) {
1431 free(log->deferred_items[i]);
1432 log->deferred_items[i] = NULL;
1436 pthread_mutex_unlock(&log->deferred_free_lock);
1439 static int gz_work(struct iolog_flush_data *data)
1441 struct iolog_compress *c = NULL;
1442 struct flist_head list;
1448 INIT_FLIST_HEAD(&list);
1450 memset(&stream, 0, sizeof(stream));
1451 stream.zalloc = Z_NULL;
1452 stream.zfree = Z_NULL;
1453 stream.opaque = Z_NULL;
1455 ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
1457 log_err("fio: failed to init gz stream\n");
1461 seq = ++data->log->chunk_seq;
1463 stream.next_in = (void *) data->samples;
1464 stream.avail_in = data->nr_samples * log_entry_sz(data->log);
1466 dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u, log=%s\n",
1467 (unsigned long) stream.avail_in, seq,
1468 data->log->filename);
1471 dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq,
1472 (unsigned long) c->len);
1473 c = get_new_chunk(seq);
1474 stream.avail_out = GZ_CHUNK;
1475 stream.next_out = c->buf;
1476 ret = deflate(&stream, Z_NO_FLUSH);
1478 log_err("fio: deflate log (%d)\n", ret);
1483 c->len = GZ_CHUNK - stream.avail_out;
1484 flist_add_tail(&c->list, &list);
1486 } while (stream.avail_in);
1488 stream.next_out = c->buf + c->len;
1489 stream.avail_out = GZ_CHUNK - c->len;
1491 ret = deflate(&stream, Z_FINISH);
1494 * Z_BUF_ERROR is special, it just means we need more
1495 * output space. We'll handle that below. Treat any other
1498 if (ret != Z_BUF_ERROR) {
1499 log_err("fio: deflate log (%d)\n", ret);
1500 flist_del(&c->list);
1507 c->len = GZ_CHUNK - stream.avail_out;
1509 dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq, (unsigned long) c->len);
1511 if (ret != Z_STREAM_END) {
1513 c = get_new_chunk(seq);
1514 stream.avail_out = GZ_CHUNK;
1515 stream.next_out = c->buf;
1516 ret = deflate(&stream, Z_FINISH);
1517 c->len = GZ_CHUNK - stream.avail_out;
1519 flist_add_tail(&c->list, &list);
1520 dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq,
1521 (unsigned long) c->len);
1522 } while (ret != Z_STREAM_END);
1525 dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1527 ret = deflateEnd(&stream);
1529 log_err("fio: deflateEnd %d\n", ret);
1531 iolog_put_deferred(data->log, data->samples);
1533 if (!flist_empty(&list)) {
1534 pthread_mutex_lock(&data->log->chunk_lock);
1535 flist_splice_tail(&list, &data->log->chunk_list);
1536 pthread_mutex_unlock(&data->log->chunk_lock);
1545 while (!flist_empty(&list)) {
1546 c = flist_first_entry(list.next, struct iolog_compress, list);
1547 flist_del(&c->list);
1555 * Invoked from our compress helper thread, when logging would have exceeded
1556 * the specified memory limitation. Compresses the previously stored
1559 static int gz_work_async(struct submit_worker *sw, struct workqueue_work *work)
1561 return gz_work(container_of(work, struct iolog_flush_data, work));
1564 static int gz_init_worker(struct submit_worker *sw)
1566 struct thread_data *td = sw->wq->td;
1568 if (!fio_option_is_set(&td->o, log_gz_cpumask))
1571 if (fio_setaffinity(gettid(), td->o.log_gz_cpumask) == -1) {
1572 log_err("gz: failed to set CPU affinity\n");
1579 static struct workqueue_ops log_compress_wq_ops = {
1580 .fn = gz_work_async,
1581 .init_worker_fn = gz_init_worker,
1585 int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1587 if (!(td->flags & TD_F_COMPRESS_LOG))
1590 workqueue_init(td, &td->log_compress_wq, &log_compress_wq_ops, 1, sk_out);
1594 void iolog_compress_exit(struct thread_data *td)
1596 if (!(td->flags & TD_F_COMPRESS_LOG))
1599 workqueue_exit(&td->log_compress_wq);
1603 * Queue work item to compress the existing log entries. We reset the
1604 * current log to a small size, and reference the existing log in the
1605 * data that we queue for compression. Once compression has been done,
1606 * this old log is freed. Will not return until the log compression
1607 * has completed, and will flush all previous logs too
1609 static int iolog_flush(struct io_log *log)
1611 struct iolog_flush_data *data;
1613 workqueue_flush(&log->td->log_compress_wq);
1614 data = malloc(sizeof(*data));
1621 while (!flist_empty(&log->io_logs)) {
1622 struct io_logs *cur_log;
1624 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
1625 flist_del_init(&cur_log->list);
1627 data->samples = cur_log->log;
1628 data->nr_samples = cur_log->nr_samples;
1639 int iolog_cur_flush(struct io_log *log, struct io_logs *cur_log)
1641 struct iolog_flush_data *data;
1643 data = smalloc(sizeof(*data));
1649 data->samples = cur_log->log;
1650 data->nr_samples = cur_log->nr_samples;
1653 cur_log->nr_samples = cur_log->max_samples = 0;
1654 cur_log->log = NULL;
1656 workqueue_enqueue(&log->td->log_compress_wq, &data->work);
1658 iolog_free_deferred(log);
1664 static int iolog_flush(struct io_log *log)
1669 int iolog_cur_flush(struct io_log *log, struct io_logs *cur_log)
1674 int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1679 void iolog_compress_exit(struct thread_data *td)
1685 struct io_logs *iolog_cur_log(struct io_log *log)
1687 if (flist_empty(&log->io_logs))
1690 return flist_last_entry(&log->io_logs, struct io_logs, list);
1693 uint64_t iolog_nr_samples(struct io_log *iolog)
1695 struct flist_head *entry;
1698 flist_for_each(entry, &iolog->io_logs) {
1699 struct io_logs *cur_log;
1701 cur_log = flist_entry(entry, struct io_logs, list);
1702 ret += cur_log->nr_samples;
1708 static int __write_log(struct thread_data *td, struct io_log *log, int try)
1711 return finish_log(td, log, try);
1716 static int write_iops_log(struct thread_data *td, int try, bool unit_log)
1720 if (per_unit_log(td->iops_log) != unit_log)
1723 ret = __write_log(td, td->iops_log, try);
1725 td->iops_log = NULL;
1730 static int write_slat_log(struct thread_data *td, int try, bool unit_log)
1737 ret = __write_log(td, td->slat_log, try);
1739 td->slat_log = NULL;
1744 static int write_clat_log(struct thread_data *td, int try, bool unit_log)
1751 ret = __write_log(td, td->clat_log, try);
1753 td->clat_log = NULL;
1758 static int write_clat_hist_log(struct thread_data *td, int try, bool unit_log)
1765 ret = __write_log(td, td->clat_hist_log, try);
1767 td->clat_hist_log = NULL;
1772 static int write_lat_log(struct thread_data *td, int try, bool unit_log)
1779 ret = __write_log(td, td->lat_log, try);
1786 static int write_bandw_log(struct thread_data *td, int try, bool unit_log)
1790 if (per_unit_log(td->bw_log) != unit_log)
1793 ret = __write_log(td, td->bw_log, try);
1806 CLAT_HIST_LOG_MASK = 32,
1813 int (*fn)(struct thread_data *, int, bool);
1816 static struct log_type log_types[] = {
1818 .mask = BW_LOG_MASK,
1819 .fn = write_bandw_log,
1822 .mask = LAT_LOG_MASK,
1823 .fn = write_lat_log,
1826 .mask = SLAT_LOG_MASK,
1827 .fn = write_slat_log,
1830 .mask = CLAT_LOG_MASK,
1831 .fn = write_clat_log,
1834 .mask = IOPS_LOG_MASK,
1835 .fn = write_iops_log,
1838 .mask = CLAT_HIST_LOG_MASK,
1839 .fn = write_clat_hist_log,
1843 void td_writeout_logs(struct thread_data *td, bool unit_logs)
1845 unsigned int log_mask = 0;
1846 unsigned int log_left = ALL_LOG_NR;
1849 old_state = td_bump_runstate(td, TD_FINISHING);
1851 finalize_logs(td, unit_logs);
1854 int prev_log_left = log_left;
1856 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1857 struct log_type *lt = &log_types[i];
1860 if (!(log_mask & lt->mask)) {
1861 ret = lt->fn(td, log_left != 1, unit_logs);
1864 log_mask |= lt->mask;
1869 if (prev_log_left == log_left)
1873 td_restore_runstate(td, old_state);
1876 void fio_writeout_logs(bool unit_logs)
1879 td_writeout_logs(td, unit_logs);