fio: fix alignement to prevent bus error on ARM
[fio.git] / iolog.c
1 /*
2  * Code related to writing an iolog of what a thread is doing, and to
3  * later read that back and replay
4  */
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <libgen.h>
8 #include <assert.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12 #ifdef CONFIG_ZLIB
13 #include <zlib.h>
14 #endif
15
16 #include "flist.h"
17 #include "fio.h"
18 #include "verify.h"
19 #include "trim.h"
20 #include "filelock.h"
21 #include "lib/tp.h"
22
23 static const char iolog_ver2[] = "fio version 2 iolog";
24
25 void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
26 {
27         flist_add_tail(&ipo->list, &td->io_log_list);
28         td->total_io_size += ipo->len;
29 }
30
31 void log_io_u(const struct thread_data *td, const struct io_u *io_u)
32 {
33         if (!td->o.write_iolog_file)
34                 return;
35
36         fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
37                                                 io_ddir_name(io_u->ddir),
38                                                 io_u->offset, io_u->buflen);
39 }
40
41 void log_file(struct thread_data *td, struct fio_file *f,
42               enum file_log_act what)
43 {
44         const char *act[] = { "add", "open", "close" };
45
46         assert(what < 3);
47
48         if (!td->o.write_iolog_file)
49                 return;
50
51
52         /*
53          * this happens on the pre-open/close done before the job starts
54          */
55         if (!td->iolog_f)
56                 return;
57
58         fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
59 }
60
61 static void iolog_delay(struct thread_data *td, unsigned long delay)
62 {
63         unsigned long usec = utime_since_now(&td->last_issue);
64         unsigned long this_delay;
65
66         if (delay < usec)
67                 return;
68
69         delay -= usec;
70
71         while (delay && !td->terminate) {
72                 this_delay = delay;
73                 if (this_delay > 500000)
74                         this_delay = 500000;
75
76                 usec_sleep(td, this_delay);
77                 delay -= this_delay;
78         }
79 }
80
81 static int ipo_special(struct thread_data *td, struct io_piece *ipo)
82 {
83         struct fio_file *f;
84         int ret;
85
86         /*
87          * Not a special ipo
88          */
89         if (ipo->ddir != DDIR_INVAL)
90                 return 0;
91
92         f = td->files[ipo->fileno];
93
94         switch (ipo->file_action) {
95         case FIO_LOG_OPEN_FILE:
96                 ret = td_io_open_file(td, f);
97                 if (!ret)
98                         break;
99                 td_verror(td, ret, "iolog open file");
100                 return -1;
101         case FIO_LOG_CLOSE_FILE:
102                 td_io_close_file(td, f);
103                 break;
104         case FIO_LOG_UNLINK_FILE:
105                 td_io_unlink_file(td, f);
106                 break;
107         default:
108                 log_err("fio: bad file action %d\n", ipo->file_action);
109                 break;
110         }
111
112         return 1;
113 }
114
115 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
116 {
117         struct io_piece *ipo;
118         unsigned long elapsed;
119
120         while (!flist_empty(&td->io_log_list)) {
121                 int ret;
122
123                 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
124                 flist_del(&ipo->list);
125                 remove_trim_entry(td, ipo);
126
127                 ret = ipo_special(td, ipo);
128                 if (ret < 0) {
129                         free(ipo);
130                         break;
131                 } else if (ret > 0) {
132                         free(ipo);
133                         continue;
134                 }
135
136                 io_u->ddir = ipo->ddir;
137                 if (ipo->ddir != DDIR_WAIT) {
138                         io_u->offset = ipo->offset;
139                         io_u->buflen = ipo->len;
140                         io_u->file = td->files[ipo->fileno];
141                         get_file(io_u->file);
142                         dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
143                                                 io_u->buflen, io_u->file->file_name);
144                         if (ipo->delay)
145                                 iolog_delay(td, ipo->delay);
146                 } else {
147                         elapsed = mtime_since_genesis();
148                         if (ipo->delay > elapsed)
149                                 usec_sleep(td, (ipo->delay - elapsed) * 1000);
150                 }
151
152                 free(ipo);
153
154                 if (io_u->ddir != DDIR_WAIT)
155                         return 0;
156         }
157
158         td->done = 1;
159         return 1;
160 }
161
162 void prune_io_piece_log(struct thread_data *td)
163 {
164         struct io_piece *ipo;
165         struct rb_node *n;
166
167         while ((n = rb_first(&td->io_hist_tree)) != NULL) {
168                 ipo = rb_entry(n, struct io_piece, rb_node);
169                 rb_erase(n, &td->io_hist_tree);
170                 remove_trim_entry(td, ipo);
171                 td->io_hist_len--;
172                 free(ipo);
173         }
174
175         while (!flist_empty(&td->io_hist_list)) {
176                 ipo = flist_entry(&td->io_hist_list, struct io_piece, list);
177                 flist_del(&ipo->list);
178                 remove_trim_entry(td, ipo);
179                 td->io_hist_len--;
180                 free(ipo);
181         }
182 }
183
184 /*
185  * log a successful write, so we can unwind the log for verify
186  */
187 void log_io_piece(struct thread_data *td, struct io_u *io_u)
188 {
189         struct rb_node **p, *parent;
190         struct io_piece *ipo, *__ipo;
191
192         ipo = malloc(sizeof(struct io_piece));
193         init_ipo(ipo);
194         ipo->file = io_u->file;
195         ipo->offset = io_u->offset;
196         ipo->len = io_u->buflen;
197         ipo->numberio = io_u->numberio;
198         ipo->flags = IP_F_IN_FLIGHT;
199
200         io_u->ipo = ipo;
201
202         if (io_u_should_trim(td, io_u)) {
203                 flist_add_tail(&ipo->trim_list, &td->trim_list);
204                 td->trim_entries++;
205         }
206
207         /*
208          * We don't need to sort the entries, if:
209          *
210          *      Sequential writes, or
211          *      Random writes that lay out the file as it goes along
212          *
213          * For both these cases, just reading back data in the order we
214          * wrote it out is the fastest.
215          *
216          * One exception is if we don't have a random map AND we are doing
217          * verifies, in that case we need to check for duplicate blocks and
218          * drop the old one, which we rely on the rb insert/lookup for
219          * handling.
220          */
221         if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
222               (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
223                 INIT_FLIST_HEAD(&ipo->list);
224                 flist_add_tail(&ipo->list, &td->io_hist_list);
225                 ipo->flags |= IP_F_ONLIST;
226                 td->io_hist_len++;
227                 return;
228         }
229
230         RB_CLEAR_NODE(&ipo->rb_node);
231
232         /*
233          * Sort the entry into the verification list
234          */
235 restart:
236         p = &td->io_hist_tree.rb_node;
237         parent = NULL;
238         while (*p) {
239                 parent = *p;
240
241                 __ipo = rb_entry(parent, struct io_piece, rb_node);
242                 if (ipo->file < __ipo->file)
243                         p = &(*p)->rb_left;
244                 else if (ipo->file > __ipo->file)
245                         p = &(*p)->rb_right;
246                 else if (ipo->offset < __ipo->offset)
247                         p = &(*p)->rb_left;
248                 else if (ipo->offset > __ipo->offset)
249                         p = &(*p)->rb_right;
250                 else {
251                         dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
252                                 __ipo->offset, __ipo->len,
253                                 ipo->offset, ipo->len);
254                         td->io_hist_len--;
255                         rb_erase(parent, &td->io_hist_tree);
256                         remove_trim_entry(td, __ipo);
257                         free(__ipo);
258                         goto restart;
259                 }
260         }
261
262         rb_link_node(&ipo->rb_node, parent, p);
263         rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
264         ipo->flags |= IP_F_ONRB;
265         td->io_hist_len++;
266 }
267
268 void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
269 {
270         struct io_piece *ipo = io_u->ipo;
271
272         if (!ipo)
273                 return;
274
275         if (ipo->flags & IP_F_ONRB)
276                 rb_erase(&ipo->rb_node, &td->io_hist_tree);
277         else if (ipo->flags & IP_F_ONLIST)
278                 flist_del(&ipo->list);
279
280         free(ipo);
281         io_u->ipo = NULL;
282         td->io_hist_len--;
283 }
284
285 void trim_io_piece(struct thread_data *td, const struct io_u *io_u)
286 {
287         struct io_piece *ipo = io_u->ipo;
288
289         if (!ipo)
290                 return;
291
292         ipo->len = io_u->xfer_buflen - io_u->resid;
293 }
294
295 void write_iolog_close(struct thread_data *td)
296 {
297         fflush(td->iolog_f);
298         fclose(td->iolog_f);
299         free(td->iolog_buf);
300         td->iolog_f = NULL;
301         td->iolog_buf = NULL;
302 }
303
304 /*
305  * Read version 2 iolog data. It is enhanced to include per-file logging,
306  * syncs, etc.
307  */
308 static int read_iolog2(struct thread_data *td, FILE *f)
309 {
310         unsigned long long offset;
311         unsigned int bytes;
312         int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
313         char *fname, *act;
314         char *str, *p;
315         enum fio_ddir rw;
316
317         free_release_files(td);
318
319         /*
320          * Read in the read iolog and store it, reuse the infrastructure
321          * for doing verifications.
322          */
323         str = malloc(4096);
324         fname = malloc(256+16);
325         act = malloc(256+16);
326
327         reads = writes = waits = 0;
328         while ((p = fgets(str, 4096, f)) != NULL) {
329                 struct io_piece *ipo;
330                 int r;
331
332                 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
333                                                                         &bytes);
334                 if (r == 4) {
335                         /*
336                          * Check action first
337                          */
338                         if (!strcmp(act, "wait"))
339                                 rw = DDIR_WAIT;
340                         else if (!strcmp(act, "read"))
341                                 rw = DDIR_READ;
342                         else if (!strcmp(act, "write"))
343                                 rw = DDIR_WRITE;
344                         else if (!strcmp(act, "sync"))
345                                 rw = DDIR_SYNC;
346                         else if (!strcmp(act, "datasync"))
347                                 rw = DDIR_DATASYNC;
348                         else if (!strcmp(act, "trim"))
349                                 rw = DDIR_TRIM;
350                         else {
351                                 log_err("fio: bad iolog file action: %s\n",
352                                                                         act);
353                                 continue;
354                         }
355                         fileno = get_fileno(td, fname);
356                 } else if (r == 2) {
357                         rw = DDIR_INVAL;
358                         if (!strcmp(act, "add")) {
359                                 fileno = add_file(td, fname, 0, 1);
360                                 file_action = FIO_LOG_ADD_FILE;
361                                 continue;
362                         } else if (!strcmp(act, "open")) {
363                                 fileno = get_fileno(td, fname);
364                                 file_action = FIO_LOG_OPEN_FILE;
365                         } else if (!strcmp(act, "close")) {
366                                 fileno = get_fileno(td, fname);
367                                 file_action = FIO_LOG_CLOSE_FILE;
368                         } else {
369                                 log_err("fio: bad iolog file action: %s\n",
370                                                                         act);
371                                 continue;
372                         }
373                 } else {
374                         log_err("bad iolog2: %s", p);
375                         continue;
376                 }
377
378                 if (rw == DDIR_READ)
379                         reads++;
380                 else if (rw == DDIR_WRITE) {
381                         /*
382                          * Don't add a write for ro mode
383                          */
384                         if (read_only)
385                                 continue;
386                         writes++;
387                 } else if (rw == DDIR_WAIT) {
388                         waits++;
389                 } else if (rw == DDIR_INVAL) {
390                 } else if (!ddir_sync(rw)) {
391                         log_err("bad ddir: %d\n", rw);
392                         continue;
393                 }
394
395                 /*
396                  * Make note of file
397                  */
398                 ipo = malloc(sizeof(*ipo));
399                 init_ipo(ipo);
400                 ipo->ddir = rw;
401                 if (rw == DDIR_WAIT) {
402                         ipo->delay = offset;
403                 } else {
404                         ipo->offset = offset;
405                         ipo->len = bytes;
406                         if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
407                                 td->o.max_bs[rw] = bytes;
408                         ipo->fileno = fileno;
409                         ipo->file_action = file_action;
410                         td->o.size += bytes;
411                 }
412
413                 queue_io_piece(td, ipo);
414         }
415
416         free(str);
417         free(act);
418         free(fname);
419
420         if (writes && read_only) {
421                 log_err("fio: <%s> skips replay of %d writes due to"
422                         " read-only\n", td->o.name, writes);
423                 writes = 0;
424         }
425
426         if (!reads && !writes && !waits)
427                 return 1;
428         else if (reads && !writes)
429                 td->o.td_ddir = TD_DDIR_READ;
430         else if (!reads && writes)
431                 td->o.td_ddir = TD_DDIR_WRITE;
432         else
433                 td->o.td_ddir = TD_DDIR_RW;
434
435         return 0;
436 }
437
438 /*
439  * open iolog, check version, and call appropriate parser
440  */
441 static int init_iolog_read(struct thread_data *td)
442 {
443         char buffer[256], *p;
444         FILE *f;
445         int ret;
446
447         f = fopen(td->o.read_iolog_file, "r");
448         if (!f) {
449                 perror("fopen read iolog");
450                 return 1;
451         }
452
453         p = fgets(buffer, sizeof(buffer), f);
454         if (!p) {
455                 td_verror(td, errno, "iolog read");
456                 log_err("fio: unable to read iolog\n");
457                 fclose(f);
458                 return 1;
459         }
460
461         /*
462          * version 2 of the iolog stores a specific string as the
463          * first line, check for that
464          */
465         if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
466                 ret = read_iolog2(td, f);
467         else {
468                 log_err("fio: iolog version 1 is no longer supported\n");
469                 ret = 1;
470         }
471
472         fclose(f);
473         return ret;
474 }
475
476 /*
477  * Set up a log for storing io patterns.
478  */
479 static int init_iolog_write(struct thread_data *td)
480 {
481         struct fio_file *ff;
482         FILE *f;
483         unsigned int i;
484
485         f = fopen(td->o.write_iolog_file, "a");
486         if (!f) {
487                 perror("fopen write iolog");
488                 return 1;
489         }
490
491         /*
492          * That's it for writing, setup a log buffer and we're done.
493           */
494         td->iolog_f = f;
495         td->iolog_buf = malloc(8192);
496         setvbuf(f, td->iolog_buf, _IOFBF, 8192);
497
498         /*
499          * write our version line
500          */
501         if (fprintf(f, "%s\n", iolog_ver2) < 0) {
502                 perror("iolog init\n");
503                 return 1;
504         }
505
506         /*
507          * add all known files
508          */
509         for_each_file(td, ff, i)
510                 log_file(td, ff, FIO_LOG_ADD_FILE);
511
512         return 0;
513 }
514
515 int init_iolog(struct thread_data *td)
516 {
517         int ret = 0;
518
519         if (td->o.read_iolog_file) {
520                 int need_swap;
521
522                 /*
523                  * Check if it's a blktrace file and load that if possible.
524                  * Otherwise assume it's a normal log file and load that.
525                  */
526                 if (is_blktrace(td->o.read_iolog_file, &need_swap))
527                         ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
528                 else
529                         ret = init_iolog_read(td);
530         } else if (td->o.write_iolog_file)
531                 ret = init_iolog_write(td);
532
533         if (ret)
534                 td_verror(td, EINVAL, "failed initializing iolog");
535
536         return ret;
537 }
538
539 void setup_log(struct io_log **log, struct log_params *p,
540                const char *filename)
541 {
542         struct io_log *l;
543
544         l = calloc(1, sizeof(*l));
545         l->nr_samples = 0;
546         l->max_samples = 1024;
547         l->log_type = p->log_type;
548         l->log_offset = p->log_offset;
549         l->log_gz = p->log_gz;
550         l->log_gz_store = p->log_gz_store;
551         l->log = malloc(l->max_samples * log_entry_sz(l));
552         l->avg_msec = p->avg_msec;
553         l->filename = strdup(filename);
554         l->td = p->td;
555
556         if (l->log_offset)
557                 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
558
559         INIT_FLIST_HEAD(&l->chunk_list);
560
561         if (l->log_gz && !p->td)
562                 l->log_gz = 0;
563         else if (l->log_gz) {
564                 pthread_mutex_init(&l->chunk_lock, NULL);
565                 p->td->flags |= TD_F_COMPRESS_LOG;
566         }
567
568         *log = l;
569 }
570
571 #ifdef CONFIG_SETVBUF
572 static void *set_file_buffer(FILE *f)
573 {
574         size_t size = 1048576;
575         void *buf;
576
577         buf = malloc(size);
578         setvbuf(f, buf, _IOFBF, size);
579         return buf;
580 }
581
582 static void clear_file_buffer(void *buf)
583 {
584         free(buf);
585 }
586 #else
587 static void *set_file_buffer(FILE *f)
588 {
589         return NULL;
590 }
591
592 static void clear_file_buffer(void *buf)
593 {
594 }
595 #endif
596
597 void free_log(struct io_log *log)
598 {
599         free(log->log);
600         free(log->filename);
601         free(log);
602 }
603
604 static void flush_samples(FILE *f, void *samples, uint64_t sample_size)
605 {
606         struct io_sample *s;
607         int log_offset;
608         uint64_t i, nr_samples;
609
610         if (!sample_size)
611                 return;
612
613         s = __get_sample(samples, 0, 0);
614         log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
615
616         nr_samples = sample_size / __log_entry_sz(log_offset);
617
618         for (i = 0; i < nr_samples; i++) {
619                 s = __get_sample(samples, log_offset, i);
620
621                 if (!log_offset) {
622                         fprintf(f, "%lu, %lu, %u, %u\n",
623                                         (unsigned long) s->time,
624                                         (unsigned long) s->val,
625                                         io_sample_ddir(s), s->bs);
626                 } else {
627                         struct io_sample_offset *so = (void *) s;
628
629                         fprintf(f, "%lu, %lu, %u, %u, %llu\n",
630                                         (unsigned long) s->time,
631                                         (unsigned long) s->val,
632                                         io_sample_ddir(s), s->bs,
633                                         (unsigned long long) so->offset);
634                 }
635         }
636 }
637
638 #ifdef CONFIG_ZLIB
639
640 struct iolog_flush_data {
641         struct tp_work work;
642         struct io_log *log;
643         void *samples;
644         uint64_t nr_samples;
645 };
646
647 struct iolog_compress {
648         struct flist_head list;
649         void *buf;
650         size_t len;
651         unsigned int seq;
652 };
653
654 #define GZ_CHUNK        131072
655
656 static struct iolog_compress *get_new_chunk(unsigned int seq)
657 {
658         struct iolog_compress *c;
659
660         c = malloc(sizeof(*c));
661         INIT_FLIST_HEAD(&c->list);
662         c->buf = malloc(GZ_CHUNK);
663         c->len = 0;
664         c->seq = seq;
665         return c;
666 }
667
668 static void free_chunk(struct iolog_compress *ic)
669 {
670         free(ic->buf);
671         free(ic);
672 }
673
674 static int z_stream_init(z_stream *stream, int gz_hdr)
675 {
676         int wbits = 15;
677
678         stream->zalloc = Z_NULL;
679         stream->zfree = Z_NULL;
680         stream->opaque = Z_NULL;
681         stream->next_in = Z_NULL;
682
683         /*
684          * zlib magic - add 32 for auto-detection of gz header or not,
685          * if we decide to store files in a gzip friendly format.
686          */
687         if (gz_hdr)
688                 wbits += 32;
689
690         if (inflateInit2(stream, wbits) != Z_OK)
691                 return 1;
692
693         return 0;
694 }
695
696 struct inflate_chunk_iter {
697         unsigned int seq;
698         int err;
699         void *buf;
700         size_t buf_size;
701         size_t buf_used;
702         size_t chunk_sz;
703 };
704
705 static void finish_chunk(z_stream *stream, FILE *f,
706                          struct inflate_chunk_iter *iter)
707 {
708         int ret;
709
710         ret = inflateEnd(stream);
711         if (ret != Z_OK)
712                 log_err("fio: failed to end log inflation (%d)\n", ret);
713
714         flush_samples(f, iter->buf, iter->buf_used);
715         free(iter->buf);
716         iter->buf = NULL;
717         iter->buf_size = iter->buf_used = 0;
718 }
719
720 /*
721  * Iterative chunk inflation. Handles cases where we cross into a new
722  * sequence, doing flush finish of previous chunk if needed.
723  */
724 static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
725                             z_stream *stream, struct inflate_chunk_iter *iter)
726 {
727         size_t ret;
728
729         dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u",
730                                 (unsigned long) ic->len, ic->seq);
731
732         if (ic->seq != iter->seq) {
733                 if (iter->seq)
734                         finish_chunk(stream, f, iter);
735
736                 z_stream_init(stream, gz_hdr);
737                 iter->seq = ic->seq;
738         }
739
740         stream->avail_in = ic->len;
741         stream->next_in = ic->buf;
742
743         if (!iter->buf_size) {
744                 iter->buf_size = iter->chunk_sz;
745                 iter->buf = malloc(iter->buf_size);
746         }
747
748         while (stream->avail_in) {
749                 size_t this_out = iter->buf_size - iter->buf_used;
750                 int err;
751
752                 stream->avail_out = this_out;
753                 stream->next_out = iter->buf + iter->buf_used;
754
755                 err = inflate(stream, Z_NO_FLUSH);
756                 if (err < 0) {
757                         log_err("fio: failed inflating log: %d\n", err);
758                         iter->err = err;
759                         break;
760                 }
761
762                 iter->buf_used += this_out - stream->avail_out;
763
764                 if (!stream->avail_out) {
765                         iter->buf_size += iter->chunk_sz;
766                         iter->buf = realloc(iter->buf, iter->buf_size);
767                         continue;
768                 }
769
770                 if (err == Z_STREAM_END)
771                         break;
772         }
773
774         ret = (void *) stream->next_in - ic->buf;
775
776         dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) ret);
777
778         return ret;
779 }
780
781 /*
782  * Inflate stored compressed chunks, or write them directly to the log
783  * file if so instructed.
784  */
785 static int inflate_gz_chunks(struct io_log *log, FILE *f)
786 {
787         struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
788         z_stream stream;
789
790         while (!flist_empty(&log->chunk_list)) {
791                 struct iolog_compress *ic;
792
793                 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
794                 flist_del(&ic->list);
795
796                 if (log->log_gz_store) {
797                         size_t ret;
798
799                         dprint(FD_COMPRESS, "log write chunk size=%lu, "
800                                 "seq=%u\n", (unsigned long) ic->len, ic->seq);
801
802                         ret = fwrite(ic->buf, ic->len, 1, f);
803                         if (ret != 1 || ferror(f)) {
804                                 iter.err = errno;
805                                 log_err("fio: error writing compressed log\n");
806                         }
807                 } else
808                         inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
809
810                 free_chunk(ic);
811         }
812
813         if (iter.seq) {
814                 finish_chunk(&stream, f, &iter);
815                 free(iter.buf);
816         }
817
818         return iter.err;
819 }
820
821 /*
822  * Open compressed log file and decompress the stored chunks and
823  * write them to stdout. The chunks are stored sequentially in the
824  * file, so we iterate over them and do them one-by-one.
825  */
826 int iolog_file_inflate(const char *file)
827 {
828         struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
829         struct iolog_compress ic;
830         z_stream stream;
831         struct stat sb;
832         ssize_t ret;
833         size_t total;
834         void *buf;
835         FILE *f;
836
837         f = fopen(file, "r");
838         if (!f) {
839                 perror("fopen");
840                 return 1;
841         }
842
843         if (stat(file, &sb) < 0) {
844                 fclose(f);
845                 perror("stat");
846                 return 1;
847         }
848
849         ic.buf = buf = malloc(sb.st_size);
850         ic.len = sb.st_size;
851         ic.seq = 1;
852
853         ret = fread(ic.buf, ic.len, 1, f);
854         if (ret < 0) {
855                 perror("fread");
856                 fclose(f);
857                 return 1;
858         } else if (ret != 1) {
859                 log_err("fio: short read on reading log\n");
860                 fclose(f);
861                 return 1;
862         }
863
864         fclose(f);
865
866         /*
867          * Each chunk will return Z_STREAM_END. We don't know how many
868          * chunks are in the file, so we just keep looping and incrementing
869          * the sequence number until we have consumed the whole compressed
870          * file.
871          */
872         total = ic.len;
873         do {
874                 size_t ret;
875
876                 ret = inflate_chunk(&ic,  1, stdout, &stream, &iter);
877                 total -= ret;
878                 if (!total)
879                         break;
880                 if (iter.err)
881                         break;
882
883                 ic.seq++;
884                 ic.len -= ret;
885                 ic.buf += ret;
886         } while (1);
887
888         if (iter.seq) {
889                 finish_chunk(&stream, stdout, &iter);
890                 free(iter.buf);
891         }
892
893         free(buf);
894         return iter.err;
895 }
896
897 #else
898
899 static int inflate_gz_chunks(struct io_log *log, FILE *f)
900 {
901         return 0;
902 }
903
904 int iolog_file_inflate(const char *file)
905 {
906         log_err("fio: log inflation not possible without zlib\n");
907         return 1;
908 }
909
910 #endif
911
912 void flush_log(struct io_log *log)
913 {
914         void *buf;
915         FILE *f;
916
917         f = fopen(log->filename, "w");
918         if (!f) {
919                 perror("fopen log");
920                 return;
921         }
922
923         buf = set_file_buffer(f);
924
925         inflate_gz_chunks(log, f);
926
927         flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
928
929         fclose(f);
930         clear_file_buffer(buf);
931 }
932
933 static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
934 {
935         if (td->tp_data)
936                 iolog_flush(log, 1);
937
938         if (trylock) {
939                 if (fio_trylock_file(log->filename))
940                         return 1;
941         } else
942                 fio_lock_file(log->filename);
943
944         if (td->client_type == FIO_CLIENT_TYPE_GUI)
945                 fio_send_iolog(td, log, log->filename);
946         else
947                 flush_log(log);
948
949         fio_unlock_file(log->filename);
950         free_log(log);
951         return 0;
952 }
953
954 #ifdef CONFIG_ZLIB
955
956 /*
957  * Invoked from our compress helper thread, when logging would have exceeded
958  * the specified memory limitation. Compresses the previously stored
959  * entries.
960  */
961 static int gz_work(struct tp_work *work)
962 {
963         struct iolog_flush_data *data;
964         struct iolog_compress *c;
965         struct flist_head list;
966         unsigned int seq;
967         z_stream stream;
968         size_t total = 0;
969         int ret;
970
971         INIT_FLIST_HEAD(&list);
972
973         data = container_of(work, struct iolog_flush_data, work);
974
975         stream.zalloc = Z_NULL;
976         stream.zfree = Z_NULL;
977         stream.opaque = Z_NULL;
978
979         ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
980         if (ret != Z_OK) {
981                 log_err("fio: failed to init gz stream\n");
982                 return 0;
983         }
984
985         seq = ++data->log->chunk_seq;
986
987         stream.next_in = (void *) data->samples;
988         stream.avail_in = data->nr_samples * log_entry_sz(data->log);
989
990         dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u\n",
991                                 (unsigned long) stream.avail_in, seq);
992         do {
993                 c = get_new_chunk(seq);
994                 stream.avail_out = GZ_CHUNK;
995                 stream.next_out = c->buf;
996                 ret = deflate(&stream, Z_NO_FLUSH);
997                 if (ret < 0) {
998                         log_err("fio: deflate log (%d)\n", ret);
999                         free_chunk(c);
1000                         goto err;
1001                 }
1002
1003                 c->len = GZ_CHUNK - stream.avail_out;
1004                 flist_add_tail(&c->list, &list);
1005                 total += c->len;
1006         } while (stream.avail_in);
1007
1008         stream.next_out = c->buf + c->len;
1009         stream.avail_out = GZ_CHUNK - c->len;
1010
1011         ret = deflate(&stream, Z_FINISH);
1012         if (ret == Z_STREAM_END)
1013                 c->len = GZ_CHUNK - stream.avail_out;
1014         else {
1015                 do {
1016                         c = get_new_chunk(seq);
1017                         stream.avail_out = GZ_CHUNK;
1018                         stream.next_out = c->buf;
1019                         ret = deflate(&stream, Z_FINISH);
1020                         c->len = GZ_CHUNK - stream.avail_out;
1021                         total += c->len;
1022                         flist_add_tail(&c->list, &list);
1023                 } while (ret != Z_STREAM_END);
1024         }
1025
1026         dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1027
1028         ret = deflateEnd(&stream);
1029         if (ret != Z_OK)
1030                 log_err("fio: deflateEnd %d\n", ret);
1031
1032         free(data->samples);
1033
1034         if (!flist_empty(&list)) {
1035                 pthread_mutex_lock(&data->log->chunk_lock);
1036                 flist_splice_tail(&list, &data->log->chunk_list);
1037                 pthread_mutex_unlock(&data->log->chunk_lock);
1038         }
1039
1040         ret = 0;
1041 done:
1042         if (work->wait) {
1043                 work->done = 1;
1044                 pthread_cond_signal(&work->cv);
1045         } else
1046                 free(data);
1047
1048         return ret;
1049 err:
1050         while (!flist_empty(&list)) {
1051                 c = flist_first_entry(list.next, struct iolog_compress, list);
1052                 flist_del(&c->list);
1053                 free_chunk(c);
1054         }
1055         ret = 1;
1056         goto done;
1057 }
1058
1059 /*
1060  * Queue work item to compress the existing log entries. We copy the
1061  * samples, and reset the log sample count to 0 (so the logging will
1062  * continue to use the memory associated with the log). If called with
1063  * wait == 1, will not return until the log compression has completed.
1064  */
1065 int iolog_flush(struct io_log *log, int wait)
1066 {
1067         struct tp_data *tdat = log->td->tp_data;
1068         struct iolog_flush_data *data;
1069         size_t sample_size;
1070
1071         data = malloc(sizeof(*data));
1072         if (!data)
1073                 return 1;
1074
1075         data->log = log;
1076
1077         sample_size = log->nr_samples * log_entry_sz(log);
1078         data->samples = malloc(sample_size);
1079         if (!data->samples) {
1080                 free(data);
1081                 return 1;
1082         }
1083
1084         memcpy(data->samples, log->log, sample_size);
1085         data->nr_samples = log->nr_samples;
1086         data->work.fn = gz_work;
1087         log->nr_samples = 0;
1088
1089         if (wait) {
1090                 pthread_mutex_init(&data->work.lock, NULL);
1091                 pthread_cond_init(&data->work.cv, NULL);
1092                 data->work.wait = 1;
1093         } else
1094                 data->work.wait = 0;
1095
1096         data->work.prio = 1;
1097         tp_queue_work(tdat, &data->work);
1098
1099         if (wait) {
1100                 pthread_mutex_lock(&data->work.lock);
1101                 while (!data->work.done)
1102                         pthread_cond_wait(&data->work.cv, &data->work.lock);
1103                 pthread_mutex_unlock(&data->work.lock);
1104                 free(data);
1105         }
1106
1107         return 0;
1108 }
1109
1110 #else
1111
1112 int iolog_flush(struct io_log *log, int wait)
1113 {
1114         return 1;
1115 }
1116
1117 #endif
1118
1119 static int write_iops_log(struct thread_data *td, int try)
1120 {
1121         struct io_log *log = td->iops_log;
1122
1123         if (!log)
1124                 return 0;
1125
1126         return finish_log(td, log, try);
1127 }
1128
1129 static int write_slat_log(struct thread_data *td, int try)
1130 {
1131         struct io_log *log = td->slat_log;
1132
1133         if (!log)
1134                 return 0;
1135
1136         return finish_log(td, log, try);
1137 }
1138
1139 static int write_clat_log(struct thread_data *td, int try)
1140 {
1141         struct io_log *log = td->clat_log;
1142
1143         if (!log)
1144                 return 0;
1145
1146         return finish_log(td, log, try);
1147 }
1148
1149 static int write_lat_log(struct thread_data *td, int try)
1150 {
1151         struct io_log *log = td->lat_log;
1152
1153         if (!log)
1154                 return 0;
1155
1156         return finish_log(td, log, try);
1157 }
1158
1159 static int write_bandw_log(struct thread_data *td, int try)
1160 {
1161         struct io_log *log = td->bw_log;
1162
1163         if (!log)
1164                 return 0;
1165
1166         return finish_log(td, log, try);
1167 }
1168
1169 enum {
1170         BW_LOG_MASK     = 1,
1171         LAT_LOG_MASK    = 2,
1172         SLAT_LOG_MASK   = 4,
1173         CLAT_LOG_MASK   = 8,
1174         IOPS_LOG_MASK   = 16,
1175
1176         ALL_LOG_NR      = 5,
1177 };
1178
1179 struct log_type {
1180         unsigned int mask;
1181         int (*fn)(struct thread_data *, int);
1182 };
1183
1184 static struct log_type log_types[] = {
1185         {
1186                 .mask   = BW_LOG_MASK,
1187                 .fn     = write_bandw_log,
1188         },
1189         {
1190                 .mask   = LAT_LOG_MASK,
1191                 .fn     = write_lat_log,
1192         },
1193         {
1194                 .mask   = SLAT_LOG_MASK,
1195                 .fn     = write_slat_log,
1196         },
1197         {
1198                 .mask   = CLAT_LOG_MASK,
1199                 .fn     = write_clat_log,
1200         },
1201         {
1202                 .mask   = IOPS_LOG_MASK,
1203                 .fn     = write_iops_log,
1204         },
1205 };
1206
1207 void fio_writeout_logs(struct thread_data *td)
1208 {
1209         unsigned int log_mask = 0;
1210         unsigned int log_left = ALL_LOG_NR;
1211         int old_state, i;
1212
1213         old_state = td_bump_runstate(td, TD_FINISHING);
1214
1215         finalize_logs(td);
1216
1217         while (log_left) {
1218                 int prev_log_left = log_left;
1219
1220                 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1221                         struct log_type *lt = &log_types[i];
1222                         int ret;
1223
1224                         if (!(log_mask & lt->mask)) {
1225                                 ret = lt->fn(td, log_left != 1);
1226                                 if (!ret) {
1227                                         log_left--;
1228                                         log_mask |= lt->mask;
1229                                 }
1230                         }
1231                 }
1232
1233                 if (prev_log_left == log_left)
1234                         usleep(5000);
1235         }
1236
1237         td_restore_runstate(td, old_state);
1238 }