Add string support for buffer_pattern
[fio.git] / iolog.c
1 /*
2  * Code related to writing an iolog of what a thread is doing, and to
3  * later read that back and replay
4  */
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <libgen.h>
8 #include <assert.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12 #ifdef CONFIG_ZLIB
13 #include <zlib.h>
14 #endif
15
16 #include "flist.h"
17 #include "fio.h"
18 #include "verify.h"
19 #include "trim.h"
20 #include "filelock.h"
21 #include "lib/tp.h"
22
23 static const char iolog_ver2[] = "fio version 2 iolog";
24
25 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
26 {
27         flist_add_tail(&ipo->list, &td->io_log_list);
28         td->total_io_size += ipo->len;
29 }
30
31 void log_io_u(struct thread_data *td, struct io_u *io_u)
32 {
33         const char *act[] = { "read", "write", "sync", "datasync",
34                                 "sync_file_range", "wait", "trim" };
35
36         assert(io_u->ddir <= 6);
37
38         if (!td->o.write_iolog_file)
39                 return;
40
41         fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
42                                                 act[io_u->ddir], io_u->offset,
43                                                 io_u->buflen);
44 }
45
46 void log_file(struct thread_data *td, struct fio_file *f,
47               enum file_log_act what)
48 {
49         const char *act[] = { "add", "open", "close" };
50
51         assert(what < 3);
52
53         if (!td->o.write_iolog_file)
54                 return;
55
56
57         /*
58          * this happens on the pre-open/close done before the job starts
59          */
60         if (!td->iolog_f)
61                 return;
62
63         fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
64 }
65
66 static void iolog_delay(struct thread_data *td, unsigned long delay)
67 {
68         unsigned long usec = utime_since_now(&td->last_issue);
69         unsigned long this_delay;
70
71         if (delay < usec)
72                 return;
73
74         delay -= usec;
75
76         while (delay && !td->terminate) {
77                 this_delay = delay;
78                 if (this_delay > 500000)
79                         this_delay = 500000;
80
81                 usec_sleep(td, this_delay);
82                 delay -= this_delay;
83         }
84 }
85
86 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
87 {
88         struct fio_file *f;
89         int ret;
90
91         /*
92          * Not a special ipo
93          */
94         if (ipo->ddir != DDIR_INVAL)
95                 return 0;
96
97         f = td->files[ipo->fileno];
98
99         switch (ipo->file_action) {
100         case FIO_LOG_OPEN_FILE:
101                 ret = td_io_open_file(td, f);
102                 if (!ret)
103                         break;
104                 td_verror(td, ret, "iolog open file");
105                 return -1;
106         case FIO_LOG_CLOSE_FILE:
107                 td_io_close_file(td, f);
108                 break;
109         case FIO_LOG_UNLINK_FILE:
110                 td_io_unlink_file(td, f);
111                 break;
112         default:
113                 log_err("fio: bad file action %d\n", ipo->file_action);
114                 break;
115         }
116
117         return 1;
118 }
119
120 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
121 {
122         struct io_piece *ipo;
123         unsigned long elapsed;
124
125         while (!flist_empty(&td->io_log_list)) {
126                 int ret;
127
128                 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
129                 flist_del(&ipo->list);
130                 remove_trim_entry(td, ipo);
131
132                 ret = ipo_special(td, ipo);
133                 if (ret < 0) {
134                         free(ipo);
135                         break;
136                 } else if (ret > 0) {
137                         free(ipo);
138                         continue;
139                 }
140
141                 io_u->ddir = ipo->ddir;
142                 if (ipo->ddir != DDIR_WAIT) {
143                         io_u->offset = ipo->offset;
144                         io_u->buflen = ipo->len;
145                         io_u->file = td->files[ipo->fileno];
146                         get_file(io_u->file);
147                         dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
148                                                 io_u->buflen, io_u->file->file_name);
149                         if (ipo->delay)
150                                 iolog_delay(td, ipo->delay);
151                 } else {
152                         elapsed = mtime_since_genesis();
153                         if (ipo->delay > elapsed)
154                                 usec_sleep(td, (ipo->delay - elapsed) * 1000);
155                 }
156
157                 free(ipo);
158
159                 if (io_u->ddir != DDIR_WAIT)
160                         return 0;
161         }
162
163         td->done = 1;
164         return 1;
165 }
166
167 void prune_io_piece_log(struct thread_data *td)
168 {
169         struct io_piece *ipo;
170         struct rb_node *n;
171
172         while ((n = rb_first(&td->io_hist_tree)) != NULL) {
173                 ipo = rb_entry(n, struct io_piece, rb_node);
174                 rb_erase(n, &td->io_hist_tree);
175                 remove_trim_entry(td, ipo);
176                 td->io_hist_len--;
177                 free(ipo);
178         }
179
180         while (!flist_empty(&td->io_hist_list)) {
181                 ipo = flist_entry(&td->io_hist_list, struct io_piece, list);
182                 flist_del(&ipo->list);
183                 remove_trim_entry(td, ipo);
184                 td->io_hist_len--;
185                 free(ipo);
186         }
187 }
188
189 /*
190  * log a successful write, so we can unwind the log for verify
191  */
192 void log_io_piece(struct thread_data *td, struct io_u *io_u)
193 {
194         struct rb_node **p, *parent;
195         struct io_piece *ipo, *__ipo;
196
197         ipo = malloc(sizeof(struct io_piece));
198         init_ipo(ipo);
199         ipo->file = io_u->file;
200         ipo->offset = io_u->offset;
201         ipo->len = io_u->buflen;
202         ipo->numberio = io_u->numberio;
203         ipo->flags = IP_F_IN_FLIGHT;
204
205         io_u->ipo = ipo;
206
207         if (io_u_should_trim(td, io_u)) {
208                 flist_add_tail(&ipo->trim_list, &td->trim_list);
209                 td->trim_entries++;
210         }
211
212         /*
213          * We don't need to sort the entries, if:
214          *
215          *      Sequential writes, or
216          *      Random writes that lay out the file as it goes along
217          *
218          * For both these cases, just reading back data in the order we
219          * wrote it out is the fastest.
220          *
221          * One exception is if we don't have a random map AND we are doing
222          * verifies, in that case we need to check for duplicate blocks and
223          * drop the old one, which we rely on the rb insert/lookup for
224          * handling.
225          */
226         if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
227               (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
228                 INIT_FLIST_HEAD(&ipo->list);
229                 flist_add_tail(&ipo->list, &td->io_hist_list);
230                 ipo->flags |= IP_F_ONLIST;
231                 td->io_hist_len++;
232                 return;
233         }
234
235         RB_CLEAR_NODE(&ipo->rb_node);
236
237         /*
238          * Sort the entry into the verification list
239          */
240 restart:
241         p = &td->io_hist_tree.rb_node;
242         parent = NULL;
243         while (*p) {
244                 parent = *p;
245
246                 __ipo = rb_entry(parent, struct io_piece, rb_node);
247                 if (ipo->file < __ipo->file)
248                         p = &(*p)->rb_left;
249                 else if (ipo->file > __ipo->file)
250                         p = &(*p)->rb_right;
251                 else if (ipo->offset < __ipo->offset)
252                         p = &(*p)->rb_left;
253                 else if (ipo->offset > __ipo->offset)
254                         p = &(*p)->rb_right;
255                 else {
256                         dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
257                                 __ipo->offset, __ipo->len,
258                                 ipo->offset, ipo->len);
259                         td->io_hist_len--;
260                         rb_erase(parent, &td->io_hist_tree);
261                         remove_trim_entry(td, __ipo);
262                         free(__ipo);
263                         goto restart;
264                 }
265         }
266
267         rb_link_node(&ipo->rb_node, parent, p);
268         rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
269         ipo->flags |= IP_F_ONRB;
270         td->io_hist_len++;
271 }
272
273 void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
274 {
275         struct io_piece *ipo = io_u->ipo;
276
277         if (!ipo)
278                 return;
279
280         if (ipo->flags & IP_F_ONRB)
281                 rb_erase(&ipo->rb_node, &td->io_hist_tree);
282         else if (ipo->flags & IP_F_ONLIST)
283                 flist_del(&ipo->list);
284
285         free(ipo);
286         io_u->ipo = NULL;
287         td->io_hist_len--;
288 }
289
290 void trim_io_piece(struct thread_data *td, struct io_u *io_u)
291 {
292         struct io_piece *ipo = io_u->ipo;
293
294         if (!ipo)
295                 return;
296
297         ipo->len = io_u->xfer_buflen - io_u->resid;
298 }
299
300 void write_iolog_close(struct thread_data *td)
301 {
302         fflush(td->iolog_f);
303         fclose(td->iolog_f);
304         free(td->iolog_buf);
305         td->iolog_f = NULL;
306         td->iolog_buf = NULL;
307 }
308
309 /*
310  * Read version 2 iolog data. It is enhanced to include per-file logging,
311  * syncs, etc.
312  */
313 static int read_iolog2(struct thread_data *td, FILE *f)
314 {
315         unsigned long long offset;
316         unsigned int bytes;
317         int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
318         char *fname, *act;
319         char *str, *p;
320         enum fio_ddir rw;
321
322         free_release_files(td);
323
324         /*
325          * Read in the read iolog and store it, reuse the infrastructure
326          * for doing verifications.
327          */
328         str = malloc(4096);
329         fname = malloc(256+16);
330         act = malloc(256+16);
331
332         reads = writes = waits = 0;
333         while ((p = fgets(str, 4096, f)) != NULL) {
334                 struct io_piece *ipo;
335                 int r;
336
337                 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
338                                                                         &bytes);
339                 if (r == 4) {
340                         /*
341                          * Check action first
342                          */
343                         if (!strcmp(act, "wait"))
344                                 rw = DDIR_WAIT;
345                         else if (!strcmp(act, "read"))
346                                 rw = DDIR_READ;
347                         else if (!strcmp(act, "write"))
348                                 rw = DDIR_WRITE;
349                         else if (!strcmp(act, "sync"))
350                                 rw = DDIR_SYNC;
351                         else if (!strcmp(act, "datasync"))
352                                 rw = DDIR_DATASYNC;
353                         else if (!strcmp(act, "trim"))
354                                 rw = DDIR_TRIM;
355                         else {
356                                 log_err("fio: bad iolog file action: %s\n",
357                                                                         act);
358                                 continue;
359                         }
360                         fileno = get_fileno(td, fname);
361                 } else if (r == 2) {
362                         rw = DDIR_INVAL;
363                         if (!strcmp(act, "add")) {
364                                 fileno = add_file(td, fname, 0, 1);
365                                 file_action = FIO_LOG_ADD_FILE;
366                                 continue;
367                         } else if (!strcmp(act, "open")) {
368                                 fileno = get_fileno(td, fname);
369                                 file_action = FIO_LOG_OPEN_FILE;
370                         } else if (!strcmp(act, "close")) {
371                                 fileno = get_fileno(td, fname);
372                                 file_action = FIO_LOG_CLOSE_FILE;
373                         } else {
374                                 log_err("fio: bad iolog file action: %s\n",
375                                                                         act);
376                                 continue;
377                         }
378                 } else {
379                         log_err("bad iolog2: %s", p);
380                         continue;
381                 }
382
383                 if (rw == DDIR_READ)
384                         reads++;
385                 else if (rw == DDIR_WRITE) {
386                         /*
387                          * Don't add a write for ro mode
388                          */
389                         if (read_only)
390                                 continue;
391                         writes++;
392                 } else if (rw == DDIR_WAIT) {
393                         waits++;
394                 } else if (rw == DDIR_INVAL) {
395                 } else if (!ddir_sync(rw)) {
396                         log_err("bad ddir: %d\n", rw);
397                         continue;
398                 }
399
400                 /*
401                  * Make note of file
402                  */
403                 ipo = malloc(sizeof(*ipo));
404                 init_ipo(ipo);
405                 ipo->ddir = rw;
406                 if (rw == DDIR_WAIT) {
407                         ipo->delay = offset;
408                 } else {
409                         ipo->offset = offset;
410                         ipo->len = bytes;
411                         if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
412                                 td->o.max_bs[rw] = bytes;
413                         ipo->fileno = fileno;
414                         ipo->file_action = file_action;
415                         td->o.size += bytes;
416                 }
417
418                 queue_io_piece(td, ipo);
419         }
420
421         free(str);
422         free(act);
423         free(fname);
424
425         if (writes && read_only) {
426                 log_err("fio: <%s> skips replay of %d writes due to"
427                         " read-only\n", td->o.name, writes);
428                 writes = 0;
429         }
430
431         if (!reads && !writes && !waits)
432                 return 1;
433         else if (reads && !writes)
434                 td->o.td_ddir = TD_DDIR_READ;
435         else if (!reads && writes)
436                 td->o.td_ddir = TD_DDIR_WRITE;
437         else
438                 td->o.td_ddir = TD_DDIR_RW;
439
440         return 0;
441 }
442
443 /*
444  * open iolog, check version, and call appropriate parser
445  */
446 static int init_iolog_read(struct thread_data *td)
447 {
448         char buffer[256], *p;
449         FILE *f;
450         int ret;
451
452         f = fopen(td->o.read_iolog_file, "r");
453         if (!f) {
454                 perror("fopen read iolog");
455                 return 1;
456         }
457
458         p = fgets(buffer, sizeof(buffer), f);
459         if (!p) {
460                 td_verror(td, errno, "iolog read");
461                 log_err("fio: unable to read iolog\n");
462                 fclose(f);
463                 return 1;
464         }
465
466         /*
467          * version 2 of the iolog stores a specific string as the
468          * first line, check for that
469          */
470         if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
471                 ret = read_iolog2(td, f);
472         else {
473                 log_err("fio: iolog version 1 is no longer supported\n");
474                 ret = 1;
475         }
476
477         fclose(f);
478         return ret;
479 }
480
481 /*
482  * Set up a log for storing io patterns.
483  */
484 static int init_iolog_write(struct thread_data *td)
485 {
486         struct fio_file *ff;
487         FILE *f;
488         unsigned int i;
489
490         f = fopen(td->o.write_iolog_file, "a");
491         if (!f) {
492                 perror("fopen write iolog");
493                 return 1;
494         }
495
496         /*
497          * That's it for writing, setup a log buffer and we're done.
498           */
499         td->iolog_f = f;
500         td->iolog_buf = malloc(8192);
501         setvbuf(f, td->iolog_buf, _IOFBF, 8192);
502
503         /*
504          * write our version line
505          */
506         if (fprintf(f, "%s\n", iolog_ver2) < 0) {
507                 perror("iolog init\n");
508                 return 1;
509         }
510
511         /*
512          * add all known files
513          */
514         for_each_file(td, ff, i)
515                 log_file(td, ff, FIO_LOG_ADD_FILE);
516
517         return 0;
518 }
519
520 int init_iolog(struct thread_data *td)
521 {
522         int ret = 0;
523
524         if (td->o.read_iolog_file) {
525                 int need_swap;
526
527                 /*
528                  * Check if it's a blktrace file and load that if possible.
529                  * Otherwise assume it's a normal log file and load that.
530                  */
531                 if (is_blktrace(td->o.read_iolog_file, &need_swap))
532                         ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
533                 else
534                         ret = init_iolog_read(td);
535         } else if (td->o.write_iolog_file)
536                 ret = init_iolog_write(td);
537
538         if (ret)
539                 td_verror(td, EINVAL, "failed initializing iolog");
540
541         return ret;
542 }
543
544 void setup_log(struct io_log **log, struct log_params *p,
545                const char *filename)
546 {
547         struct io_log *l = malloc(sizeof(*l));
548
549         memset(l, 0, sizeof(*l));
550         l->nr_samples = 0;
551         l->max_samples = 1024;
552         l->log_type = p->log_type;
553         l->log_offset = p->log_offset;
554         l->log_gz = p->log_gz;
555         l->log_gz_store = p->log_gz_store;
556         l->log = malloc(l->max_samples * log_entry_sz(l));
557         l->avg_msec = p->avg_msec;
558         l->filename = strdup(filename);
559         l->td = p->td;
560
561         if (l->log_offset)
562                 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
563
564         INIT_FLIST_HEAD(&l->chunk_list);
565
566         if (l->log_gz && !p->td)
567                 l->log_gz = 0;
568         else if (l->log_gz) {
569                 pthread_mutex_init(&l->chunk_lock, NULL);
570                 p->td->flags |= TD_F_COMPRESS_LOG;
571         }
572
573         *log = l;
574 }
575
576 #ifdef CONFIG_SETVBUF
577 static void *set_file_buffer(FILE *f)
578 {
579         size_t size = 1048576;
580         void *buf;
581
582         buf = malloc(size);
583         setvbuf(f, buf, _IOFBF, size);
584         return buf;
585 }
586
587 static void clear_file_buffer(void *buf)
588 {
589         free(buf);
590 }
591 #else
592 static void *set_file_buffer(FILE *f)
593 {
594         return NULL;
595 }
596
597 static void clear_file_buffer(void *buf)
598 {
599 }
600 #endif
601
602 void free_log(struct io_log *log)
603 {
604         free(log->log);
605         free(log->filename);
606         free(log);
607 }
608
609 static void flush_samples(FILE *f, void *samples, uint64_t sample_size)
610 {
611         struct io_sample *s;
612         int log_offset;
613         uint64_t i, nr_samples;
614
615         if (!sample_size)
616                 return;
617
618         s = __get_sample(samples, 0, 0);
619         log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
620
621         nr_samples = sample_size / __log_entry_sz(log_offset);
622
623         for (i = 0; i < nr_samples; i++) {
624                 s = __get_sample(samples, log_offset, i);
625
626                 if (!log_offset) {
627                         fprintf(f, "%lu, %lu, %u, %u\n",
628                                         (unsigned long) s->time,
629                                         (unsigned long) s->val,
630                                         io_sample_ddir(s), s->bs);
631                 } else {
632                         struct io_sample_offset *so = (void *) s;
633
634                         fprintf(f, "%lu, %lu, %u, %u, %llu\n",
635                                         (unsigned long) s->time,
636                                         (unsigned long) s->val,
637                                         io_sample_ddir(s), s->bs,
638                                         (unsigned long long) so->offset);
639                 }
640         }
641 }
642
643 #ifdef CONFIG_ZLIB
644
645 struct iolog_flush_data {
646         struct tp_work work;
647         struct io_log *log;
648         void *samples;
649         uint64_t nr_samples;
650 };
651
652 struct iolog_compress {
653         struct flist_head list;
654         void *buf;
655         size_t len;
656         unsigned int seq;
657 };
658
659 #define GZ_CHUNK        131072
660
661 static struct iolog_compress *get_new_chunk(unsigned int seq)
662 {
663         struct iolog_compress *c;
664
665         c = malloc(sizeof(*c));
666         INIT_FLIST_HEAD(&c->list);
667         c->buf = malloc(GZ_CHUNK);
668         c->len = 0;
669         c->seq = seq;
670         return c;
671 }
672
673 static void free_chunk(struct iolog_compress *ic)
674 {
675         free(ic->buf);
676         free(ic);
677 }
678
679 static int z_stream_init(z_stream *stream, int gz_hdr)
680 {
681         int wbits = 15;
682
683         stream->zalloc = Z_NULL;
684         stream->zfree = Z_NULL;
685         stream->opaque = Z_NULL;
686         stream->next_in = Z_NULL;
687
688         /*
689          * zlib magic - add 32 for auto-detection of gz header or not,
690          * if we decide to store files in a gzip friendly format.
691          */
692         if (gz_hdr)
693                 wbits += 32;
694
695         if (inflateInit2(stream, wbits) != Z_OK)
696                 return 1;
697
698         return 0;
699 }
700
701 struct inflate_chunk_iter {
702         unsigned int seq;
703         int err;
704         void *buf;
705         size_t buf_size;
706         size_t buf_used;
707         size_t chunk_sz;
708 };
709
710 static void finish_chunk(z_stream *stream, FILE *f,
711                          struct inflate_chunk_iter *iter)
712 {
713         int ret;
714
715         ret = inflateEnd(stream);
716         if (ret != Z_OK)
717                 log_err("fio: failed to end log inflation (%d)\n", ret);
718
719         flush_samples(f, iter->buf, iter->buf_used);
720         free(iter->buf);
721         iter->buf = NULL;
722         iter->buf_size = iter->buf_used = 0;
723 }
724
725 /*
726  * Iterative chunk inflation. Handles cases where we cross into a new
727  * sequence, doing flush finish of previous chunk if needed.
728  */
729 static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
730                             z_stream *stream, struct inflate_chunk_iter *iter)
731 {
732         size_t ret;
733
734         dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u",
735                                 (unsigned long) ic->len, ic->seq);
736
737         if (ic->seq != iter->seq) {
738                 if (iter->seq)
739                         finish_chunk(stream, f, iter);
740
741                 z_stream_init(stream, gz_hdr);
742                 iter->seq = ic->seq;
743         }
744
745         stream->avail_in = ic->len;
746         stream->next_in = ic->buf;
747
748         if (!iter->buf_size) {
749                 iter->buf_size = iter->chunk_sz;
750                 iter->buf = malloc(iter->buf_size);
751         }
752
753         while (stream->avail_in) {
754                 size_t this_out = iter->buf_size - iter->buf_used;
755                 int err;
756
757                 stream->avail_out = this_out;
758                 stream->next_out = iter->buf + iter->buf_used;
759
760                 err = inflate(stream, Z_NO_FLUSH);
761                 if (err < 0) {
762                         log_err("fio: failed inflating log: %d\n", err);
763                         iter->err = err;
764                         break;
765                 }
766
767                 iter->buf_used += this_out - stream->avail_out;
768
769                 if (!stream->avail_out) {
770                         iter->buf_size += iter->chunk_sz;
771                         iter->buf = realloc(iter->buf, iter->buf_size);
772                         continue;
773                 }
774
775                 if (err == Z_STREAM_END)
776                         break;
777         }
778
779         ret = (void *) stream->next_in - ic->buf;
780
781         dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) ret);
782
783         return ret;
784 }
785
786 /*
787  * Inflate stored compressed chunks, or write them directly to the log
788  * file if so instructed.
789  */
790 static int inflate_gz_chunks(struct io_log *log, FILE *f)
791 {
792         struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
793         z_stream stream;
794
795         while (!flist_empty(&log->chunk_list)) {
796                 struct iolog_compress *ic;
797
798                 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
799                 flist_del(&ic->list);
800
801                 if (log->log_gz_store) {
802                         size_t ret;
803
804                         dprint(FD_COMPRESS, "log write chunk size=%lu, "
805                                 "seq=%u\n", (unsigned long) ic->len, ic->seq);
806
807                         ret = fwrite(ic->buf, ic->len, 1, f);
808                         if (ret != 1 || ferror(f)) {
809                                 iter.err = errno;
810                                 log_err("fio: error writing compressed log\n");
811                         }
812                 } else
813                         inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
814
815                 free_chunk(ic);
816         }
817
818         if (iter.seq) {
819                 finish_chunk(&stream, f, &iter);
820                 free(iter.buf);
821         }
822
823         return iter.err;
824 }
825
826 /*
827  * Open compressed log file and decompress the stored chunks and
828  * write them to stdout. The chunks are stored sequentially in the
829  * file, so we iterate over them and do them one-by-one.
830  */
831 int iolog_file_inflate(const char *file)
832 {
833         struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
834         struct iolog_compress ic;
835         z_stream stream;
836         struct stat sb;
837         ssize_t ret;
838         size_t total;
839         void *buf;
840         FILE *f;
841
842         f = fopen(file, "r");
843         if (!f) {
844                 perror("fopen");
845                 return 1;
846         }
847
848         if (stat(file, &sb) < 0) {
849                 fclose(f);
850                 perror("stat");
851                 return 1;
852         }
853
854         ic.buf = buf = malloc(sb.st_size);
855         ic.len = sb.st_size;
856         ic.seq = 1;
857
858         ret = fread(ic.buf, ic.len, 1, f);
859         if (ret < 0) {
860                 perror("fread");
861                 fclose(f);
862                 return 1;
863         } else if (ret != 1) {
864                 log_err("fio: short read on reading log\n");
865                 fclose(f);
866                 return 1;
867         }
868
869         fclose(f);
870
871         /*
872          * Each chunk will return Z_STREAM_END. We don't know how many
873          * chunks are in the file, so we just keep looping and incrementing
874          * the sequence number until we have consumed the whole compressed
875          * file.
876          */
877         total = ic.len;
878         do {
879                 size_t ret;
880
881                 ret = inflate_chunk(&ic,  1, stdout, &stream, &iter);
882                 total -= ret;
883                 if (!total)
884                         break;
885                 if (iter.err)
886                         break;
887
888                 ic.seq++;
889                 ic.len -= ret;
890                 ic.buf += ret;
891         } while (1);
892
893         if (iter.seq) {
894                 finish_chunk(&stream, stdout, &iter);
895                 free(iter.buf);
896         }
897
898         free(buf);
899         return iter.err;
900 }
901
902 #else
903
904 static int inflate_gz_chunks(struct io_log *log, FILE *f)
905 {
906         return 0;
907 }
908
909 int iolog_file_inflate(const char *file)
910 {
911         log_err("fio: log inflation not possible without zlib\n");
912         return 1;
913 }
914
915 #endif
916
917 void flush_log(struct io_log *log)
918 {
919         void *buf;
920         FILE *f;
921
922         f = fopen(log->filename, "w");
923         if (!f) {
924                 perror("fopen log");
925                 return;
926         }
927
928         buf = set_file_buffer(f);
929
930         inflate_gz_chunks(log, f);
931
932         flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
933
934         fclose(f);
935         clear_file_buffer(buf);
936 }
937
938 static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
939 {
940         if (td->tp_data)
941                 iolog_flush(log, 1);
942
943         if (trylock) {
944                 if (fio_trylock_file(log->filename))
945                         return 1;
946         } else
947                 fio_lock_file(log->filename);
948
949         if (td->client_type == FIO_CLIENT_TYPE_GUI)
950                 fio_send_iolog(td, log, log->filename);
951         else
952                 flush_log(log);
953
954         fio_unlock_file(log->filename);
955         free_log(log);
956         return 0;
957 }
958
959 #ifdef CONFIG_ZLIB
960
961 /*
962  * Invoked from our compress helper thread, when logging would have exceeded
963  * the specified memory limitation. Compresses the previously stored
964  * entries.
965  */
966 static int gz_work(struct tp_work *work)
967 {
968         struct iolog_flush_data *data;
969         struct iolog_compress *c;
970         struct flist_head list;
971         unsigned int seq;
972         z_stream stream;
973         size_t total = 0;
974         int ret;
975
976         INIT_FLIST_HEAD(&list);
977
978         data = container_of(work, struct iolog_flush_data, work);
979
980         stream.zalloc = Z_NULL;
981         stream.zfree = Z_NULL;
982         stream.opaque = Z_NULL;
983
984         ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
985         if (ret != Z_OK) {
986                 log_err("fio: failed to init gz stream\n");
987                 return 0;
988         }
989
990         seq = ++data->log->chunk_seq;
991
992         stream.next_in = (void *) data->samples;
993         stream.avail_in = data->nr_samples * log_entry_sz(data->log);
994
995         dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u\n",
996                                 (unsigned long) stream.avail_in, seq);
997         do {
998                 c = get_new_chunk(seq);
999                 stream.avail_out = GZ_CHUNK;
1000                 stream.next_out = c->buf;
1001                 ret = deflate(&stream, Z_NO_FLUSH);
1002                 if (ret < 0) {
1003                         log_err("fio: deflate log (%d)\n", ret);
1004                         free_chunk(c);
1005                         goto err;
1006                 }
1007
1008                 c->len = GZ_CHUNK - stream.avail_out;
1009                 flist_add_tail(&c->list, &list);
1010                 total += c->len;
1011         } while (stream.avail_in);
1012
1013         stream.next_out = c->buf + c->len;
1014         stream.avail_out = GZ_CHUNK - c->len;
1015
1016         ret = deflate(&stream, Z_FINISH);
1017         if (ret == Z_STREAM_END)
1018                 c->len = GZ_CHUNK - stream.avail_out;
1019         else {
1020                 do {
1021                         c = get_new_chunk(seq);
1022                         stream.avail_out = GZ_CHUNK;
1023                         stream.next_out = c->buf;
1024                         ret = deflate(&stream, Z_FINISH);
1025                         c->len = GZ_CHUNK - stream.avail_out;
1026                         total += c->len;
1027                         flist_add_tail(&c->list, &list);
1028                 } while (ret != Z_STREAM_END);
1029         }
1030
1031         dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1032
1033         ret = deflateEnd(&stream);
1034         if (ret != Z_OK)
1035                 log_err("fio: deflateEnd %d\n", ret);
1036
1037         free(data->samples);
1038
1039         if (!flist_empty(&list)) {
1040                 pthread_mutex_lock(&data->log->chunk_lock);
1041                 flist_splice_tail(&list, &data->log->chunk_list);
1042                 pthread_mutex_unlock(&data->log->chunk_lock);
1043         }
1044
1045         ret = 0;
1046 done:
1047         if (work->wait) {
1048                 work->done = 1;
1049                 pthread_cond_signal(&work->cv);
1050         } else
1051                 free(data);
1052
1053         return ret;
1054 err:
1055         while (!flist_empty(&list)) {
1056                 c = flist_first_entry(list.next, struct iolog_compress, list);
1057                 flist_del(&c->list);
1058                 free_chunk(c);
1059         }
1060         ret = 1;
1061         goto done;
1062 }
1063
1064 /*
1065  * Queue work item to compress the existing log entries. We copy the
1066  * samples, and reset the log sample count to 0 (so the logging will
1067  * continue to use the memory associated with the log). If called with
1068  * wait == 1, will not return until the log compression has completed.
1069  */
1070 int iolog_flush(struct io_log *log, int wait)
1071 {
1072         struct tp_data *tdat = log->td->tp_data;
1073         struct iolog_flush_data *data;
1074         size_t sample_size;
1075
1076         data = malloc(sizeof(*data));
1077         if (!data)
1078                 return 1;
1079
1080         data->log = log;
1081
1082         sample_size = log->nr_samples * log_entry_sz(log);
1083         data->samples = malloc(sample_size);
1084         if (!data->samples) {
1085                 free(data);
1086                 return 1;
1087         }
1088
1089         memcpy(data->samples, log->log, sample_size);
1090         data->nr_samples = log->nr_samples;
1091         data->work.fn = gz_work;
1092         log->nr_samples = 0;
1093
1094         if (wait) {
1095                 pthread_mutex_init(&data->work.lock, NULL);
1096                 pthread_cond_init(&data->work.cv, NULL);
1097                 data->work.wait = 1;
1098         } else
1099                 data->work.wait = 0;
1100
1101         data->work.prio = 1;
1102         tp_queue_work(tdat, &data->work);
1103
1104         if (wait) {
1105                 pthread_mutex_lock(&data->work.lock);
1106                 while (!data->work.done)
1107                         pthread_cond_wait(&data->work.cv, &data->work.lock);
1108                 pthread_mutex_unlock(&data->work.lock);
1109                 free(data);
1110         }
1111
1112         return 0;
1113 }
1114
1115 #else
1116
1117 int iolog_flush(struct io_log *log, int wait)
1118 {
1119         return 1;
1120 }
1121
1122 #endif
1123
1124 static int write_iops_log(struct thread_data *td, int try)
1125 {
1126         struct io_log *log = td->iops_log;
1127
1128         if (!log)
1129                 return 0;
1130
1131         return finish_log(td, log, try);
1132 }
1133
1134 static int write_slat_log(struct thread_data *td, int try)
1135 {
1136         struct io_log *log = td->slat_log;
1137
1138         if (!log)
1139                 return 0;
1140
1141         return finish_log(td, log, try);
1142 }
1143
1144 static int write_clat_log(struct thread_data *td, int try)
1145 {
1146         struct io_log *log = td->clat_log;
1147
1148         if (!log)
1149                 return 0;
1150
1151         return finish_log(td, log, try);
1152 }
1153
1154 static int write_lat_log(struct thread_data *td, int try)
1155 {
1156         struct io_log *log = td->lat_log;
1157
1158         if (!log)
1159                 return 0;
1160
1161         return finish_log(td, log, try);
1162 }
1163
1164 static int write_bandw_log(struct thread_data *td, int try)
1165 {
1166         struct io_log *log = td->bw_log;
1167
1168         if (!log)
1169                 return 0;
1170
1171         return finish_log(td, log, try);
1172 }
1173
1174 enum {
1175         BW_LOG_MASK     = 1,
1176         LAT_LOG_MASK    = 2,
1177         SLAT_LOG_MASK   = 4,
1178         CLAT_LOG_MASK   = 8,
1179         IOPS_LOG_MASK   = 16,
1180
1181         ALL_LOG_NR      = 5,
1182 };
1183
1184 struct log_type {
1185         unsigned int mask;
1186         int (*fn)(struct thread_data *, int);
1187 };
1188
1189 static struct log_type log_types[] = {
1190         {
1191                 .mask   = BW_LOG_MASK,
1192                 .fn     = write_bandw_log,
1193         },
1194         {
1195                 .mask   = LAT_LOG_MASK,
1196                 .fn     = write_lat_log,
1197         },
1198         {
1199                 .mask   = SLAT_LOG_MASK,
1200                 .fn     = write_slat_log,
1201         },
1202         {
1203                 .mask   = CLAT_LOG_MASK,
1204                 .fn     = write_clat_log,
1205         },
1206         {
1207                 .mask   = IOPS_LOG_MASK,
1208                 .fn     = write_iops_log,
1209         },
1210 };
1211
1212 void fio_writeout_logs(struct thread_data *td)
1213 {
1214         unsigned int log_mask = 0;
1215         unsigned int log_left = ALL_LOG_NR;
1216         int old_state, i;
1217
1218         old_state = td_bump_runstate(td, TD_FINISHING);
1219
1220         finalize_logs(td);
1221
1222         while (log_left) {
1223                 int prev_log_left = log_left;
1224
1225                 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1226                         struct log_type *lt = &log_types[i];
1227                         int ret;
1228
1229                         if (!(log_mask & lt->mask)) {
1230                                 ret = lt->fn(td, log_left != 1);
1231                                 if (!ret) {
1232                                         log_left--;
1233                                         log_mask |= lt->mask;
1234                                 }
1235                         }
1236                 }
1237
1238                 if (prev_log_left == log_left)
1239                         usleep(5000);
1240         }
1241
1242         td_restore_runstate(td, old_state);
1243 }