zbd: introduce per job maximum open zones limit
[fio.git] / iolog.c
... / ...
CommitLineData
1/*
2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <assert.h>
8#include <sys/types.h>
9#include <sys/stat.h>
10#include <unistd.h>
11#ifdef CONFIG_ZLIB
12#include <zlib.h>
13#endif
14
15#include "flist.h"
16#include "fio.h"
17#include "trim.h"
18#include "filelock.h"
19#include "smalloc.h"
20#include "blktrace.h"
21#include "pshared.h"
22
23#include <netinet/in.h>
24#include <netinet/tcp.h>
25#include <arpa/inet.h>
26#include <sys/stat.h>
27#include <sys/socket.h>
28#include <sys/un.h>
29
30static int iolog_flush(struct io_log *log);
31
32static const char iolog_ver2[] = "fio version 2 iolog";
33
34void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
35{
36 flist_add_tail(&ipo->list, &td->io_log_list);
37 td->total_io_size += ipo->len;
38}
39
40void log_io_u(const struct thread_data *td, const struct io_u *io_u)
41{
42 if (!td->o.write_iolog_file)
43 return;
44
45 fprintf(td->iolog_f, "%s %s %llu %llu\n", io_u->file->file_name,
46 io_ddir_name(io_u->ddir),
47 io_u->offset, io_u->buflen);
48}
49
50void log_file(struct thread_data *td, struct fio_file *f,
51 enum file_log_act what)
52{
53 const char *act[] = { "add", "open", "close" };
54
55 assert(what < 3);
56
57 if (!td->o.write_iolog_file)
58 return;
59
60
61 /*
62 * this happens on the pre-open/close done before the job starts
63 */
64 if (!td->iolog_f)
65 return;
66
67 fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
68}
69
70static void iolog_delay(struct thread_data *td, unsigned long delay)
71{
72 uint64_t usec = utime_since_now(&td->last_issue);
73 unsigned long orig_delay = delay;
74 uint64_t this_delay;
75 struct timespec ts;
76
77 if (delay < td->time_offset) {
78 td->time_offset = 0;
79 return;
80 }
81
82 delay -= td->time_offset;
83 if (delay < usec)
84 return;
85
86 delay -= usec;
87
88 fio_gettime(&ts, NULL);
89 while (delay && !td->terminate) {
90 this_delay = delay;
91 if (this_delay > 500000)
92 this_delay = 500000;
93
94 usec_sleep(td, this_delay);
95 delay -= this_delay;
96 }
97
98 usec = utime_since_now(&ts);
99 if (usec > orig_delay)
100 td->time_offset = usec - orig_delay;
101 else
102 td->time_offset = 0;
103}
104
105static int ipo_special(struct thread_data *td, struct io_piece *ipo)
106{
107 struct fio_file *f;
108 int ret;
109
110 /*
111 * Not a special ipo
112 */
113 if (ipo->ddir != DDIR_INVAL)
114 return 0;
115
116 f = td->files[ipo->fileno];
117
118 switch (ipo->file_action) {
119 case FIO_LOG_OPEN_FILE:
120 if (td->o.replay_redirect && fio_file_open(f)) {
121 dprint(FD_FILE, "iolog: ignoring re-open of file %s\n",
122 f->file_name);
123 break;
124 }
125 ret = td_io_open_file(td, f);
126 if (!ret)
127 break;
128 td_verror(td, ret, "iolog open file");
129 return -1;
130 case FIO_LOG_CLOSE_FILE:
131 td_io_close_file(td, f);
132 break;
133 case FIO_LOG_UNLINK_FILE:
134 td_io_unlink_file(td, f);
135 break;
136 default:
137 log_err("fio: bad file action %d\n", ipo->file_action);
138 break;
139 }
140
141 return 1;
142}
143
144static bool read_iolog2(struct thread_data *td);
145
146int read_iolog_get(struct thread_data *td, struct io_u *io_u)
147{
148 struct io_piece *ipo;
149 unsigned long elapsed;
150
151 while (!flist_empty(&td->io_log_list)) {
152 int ret;
153 if (td->o.read_iolog_chunked) {
154 if (td->io_log_checkmark == td->io_log_current) {
155 if (!read_iolog2(td))
156 return 1;
157 }
158 td->io_log_current--;
159 }
160 ipo = flist_first_entry(&td->io_log_list, struct io_piece, list);
161 flist_del(&ipo->list);
162 remove_trim_entry(td, ipo);
163
164 ret = ipo_special(td, ipo);
165 if (ret < 0) {
166 free(ipo);
167 break;
168 } else if (ret > 0) {
169 free(ipo);
170 continue;
171 }
172
173 io_u->ddir = ipo->ddir;
174 if (ipo->ddir != DDIR_WAIT) {
175 io_u->offset = ipo->offset;
176 io_u->buflen = ipo->len;
177 io_u->file = td->files[ipo->fileno];
178 get_file(io_u->file);
179 dprint(FD_IO, "iolog: get %llu/%llu/%s\n", io_u->offset,
180 io_u->buflen, io_u->file->file_name);
181 if (ipo->delay)
182 iolog_delay(td, ipo->delay);
183 } else {
184 elapsed = mtime_since_genesis();
185 if (ipo->delay > elapsed)
186 usec_sleep(td, (ipo->delay - elapsed) * 1000);
187 }
188
189 free(ipo);
190
191 if (io_u->ddir != DDIR_WAIT)
192 return 0;
193 }
194
195 td->done = 1;
196 return 1;
197}
198
199void prune_io_piece_log(struct thread_data *td)
200{
201 struct io_piece *ipo;
202 struct fio_rb_node *n;
203
204 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
205 ipo = rb_entry(n, struct io_piece, rb_node);
206 rb_erase(n, &td->io_hist_tree);
207 remove_trim_entry(td, ipo);
208 td->io_hist_len--;
209 free(ipo);
210 }
211
212 while (!flist_empty(&td->io_hist_list)) {
213 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
214 flist_del(&ipo->list);
215 remove_trim_entry(td, ipo);
216 td->io_hist_len--;
217 free(ipo);
218 }
219}
220
221/*
222 * log a successful write, so we can unwind the log for verify
223 */
224void log_io_piece(struct thread_data *td, struct io_u *io_u)
225{
226 struct fio_rb_node **p, *parent;
227 struct io_piece *ipo, *__ipo;
228
229 ipo = calloc(1, sizeof(struct io_piece));
230 init_ipo(ipo);
231 ipo->file = io_u->file;
232 ipo->offset = io_u->offset;
233 ipo->len = io_u->buflen;
234 ipo->numberio = io_u->numberio;
235 ipo->flags = IP_F_IN_FLIGHT;
236
237 io_u->ipo = ipo;
238
239 if (io_u_should_trim(td, io_u)) {
240 flist_add_tail(&ipo->trim_list, &td->trim_list);
241 td->trim_entries++;
242 }
243
244 /*
245 * Only sort writes if we don't have a random map in which case we need
246 * to check for duplicate blocks and drop the old one, which we rely on
247 * the rb insert/lookup for handling.
248 */
249 if (file_randommap(td, ipo->file)) {
250 INIT_FLIST_HEAD(&ipo->list);
251 flist_add_tail(&ipo->list, &td->io_hist_list);
252 ipo->flags |= IP_F_ONLIST;
253 td->io_hist_len++;
254 return;
255 }
256
257 RB_CLEAR_NODE(&ipo->rb_node);
258
259 /*
260 * Sort the entry into the verification list
261 */
262restart:
263 p = &td->io_hist_tree.rb_node;
264 parent = NULL;
265 while (*p) {
266 int overlap = 0;
267 parent = *p;
268
269 __ipo = rb_entry(parent, struct io_piece, rb_node);
270 if (ipo->file < __ipo->file)
271 p = &(*p)->rb_left;
272 else if (ipo->file > __ipo->file)
273 p = &(*p)->rb_right;
274 else if (ipo->offset < __ipo->offset) {
275 p = &(*p)->rb_left;
276 overlap = ipo->offset + ipo->len > __ipo->offset;
277 }
278 else if (ipo->offset > __ipo->offset) {
279 p = &(*p)->rb_right;
280 overlap = __ipo->offset + __ipo->len > ipo->offset;
281 }
282 else
283 overlap = 1;
284
285 if (overlap) {
286 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu\n",
287 __ipo->offset, __ipo->len,
288 ipo->offset, ipo->len);
289 td->io_hist_len--;
290 rb_erase(parent, &td->io_hist_tree);
291 remove_trim_entry(td, __ipo);
292 if (!(__ipo->flags & IP_F_IN_FLIGHT))
293 free(__ipo);
294 goto restart;
295 }
296 }
297
298 rb_link_node(&ipo->rb_node, parent, p);
299 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
300 ipo->flags |= IP_F_ONRB;
301 td->io_hist_len++;
302}
303
304void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
305{
306 struct io_piece *ipo = io_u->ipo;
307
308 if (td->ts.nr_block_infos) {
309 uint32_t *info = io_u_block_info(td, io_u);
310 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
311 if (io_u->ddir == DDIR_TRIM)
312 *info = BLOCK_INFO_SET_STATE(*info,
313 BLOCK_STATE_TRIM_FAILURE);
314 else if (io_u->ddir == DDIR_WRITE)
315 *info = BLOCK_INFO_SET_STATE(*info,
316 BLOCK_STATE_WRITE_FAILURE);
317 }
318 }
319
320 if (!ipo)
321 return;
322
323 if (ipo->flags & IP_F_ONRB)
324 rb_erase(&ipo->rb_node, &td->io_hist_tree);
325 else if (ipo->flags & IP_F_ONLIST)
326 flist_del(&ipo->list);
327
328 free(ipo);
329 io_u->ipo = NULL;
330 td->io_hist_len--;
331}
332
333void trim_io_piece(const struct io_u *io_u)
334{
335 struct io_piece *ipo = io_u->ipo;
336
337 if (!ipo)
338 return;
339
340 ipo->len = io_u->xfer_buflen - io_u->resid;
341}
342
343void write_iolog_close(struct thread_data *td)
344{
345 if (!td->iolog_f)
346 return;
347
348 fflush(td->iolog_f);
349 fclose(td->iolog_f);
350 free(td->iolog_buf);
351 td->iolog_f = NULL;
352 td->iolog_buf = NULL;
353}
354
355static int64_t iolog_items_to_fetch(struct thread_data *td)
356{
357 struct timespec now;
358 uint64_t elapsed;
359 uint64_t for_1s;
360 int64_t items_to_fetch;
361
362 if (!td->io_log_highmark)
363 return 10;
364
365
366 fio_gettime(&now, NULL);
367 elapsed = ntime_since(&td->io_log_highmark_time, &now);
368 if (elapsed) {
369 for_1s = (td->io_log_highmark - td->io_log_current) * 1000000000 / elapsed;
370 items_to_fetch = for_1s - td->io_log_current;
371 if (items_to_fetch < 0)
372 items_to_fetch = 0;
373 } else
374 items_to_fetch = 0;
375
376 td->io_log_highmark = td->io_log_current + items_to_fetch;
377 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
378 fio_gettime(&td->io_log_highmark_time, NULL);
379
380 return items_to_fetch;
381}
382
383/*
384 * Read version 2 iolog data. It is enhanced to include per-file logging,
385 * syncs, etc.
386 */
387static bool read_iolog2(struct thread_data *td)
388{
389 unsigned long long offset;
390 unsigned int bytes;
391 int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
392 char *rfname, *fname, *act;
393 char *str, *p;
394 enum fio_ddir rw;
395 bool realloc = false;
396 int64_t items_to_fetch = 0;
397
398 if (td->o.read_iolog_chunked) {
399 items_to_fetch = iolog_items_to_fetch(td);
400 if (!items_to_fetch)
401 return true;
402 }
403
404 /*
405 * Read in the read iolog and store it, reuse the infrastructure
406 * for doing verifications.
407 */
408 str = malloc(4096);
409 rfname = fname = malloc(256+16);
410 act = malloc(256+16);
411
412 reads = writes = waits = 0;
413 while ((p = fgets(str, 4096, td->io_log_rfile)) != NULL) {
414 struct io_piece *ipo;
415 int r;
416
417 r = sscanf(p, "%256s %256s %llu %u", rfname, act, &offset,
418 &bytes);
419
420 if (td->o.replay_redirect)
421 fname = td->o.replay_redirect;
422
423 if (r == 4) {
424 /*
425 * Check action first
426 */
427 if (!strcmp(act, "wait"))
428 rw = DDIR_WAIT;
429 else if (!strcmp(act, "read"))
430 rw = DDIR_READ;
431 else if (!strcmp(act, "write"))
432 rw = DDIR_WRITE;
433 else if (!strcmp(act, "sync"))
434 rw = DDIR_SYNC;
435 else if (!strcmp(act, "datasync"))
436 rw = DDIR_DATASYNC;
437 else if (!strcmp(act, "trim"))
438 rw = DDIR_TRIM;
439 else {
440 log_err("fio: bad iolog file action: %s\n",
441 act);
442 continue;
443 }
444 fileno = get_fileno(td, fname);
445 } else if (r == 2) {
446 rw = DDIR_INVAL;
447 if (!strcmp(act, "add")) {
448 if (td->o.replay_redirect &&
449 get_fileno(td, fname) != -1) {
450 dprint(FD_FILE, "iolog: ignoring"
451 " re-add of file %s\n", fname);
452 } else {
453 fileno = add_file(td, fname, td->subjob_number, 1);
454 file_action = FIO_LOG_ADD_FILE;
455 }
456 continue;
457 } else if (!strcmp(act, "open")) {
458 fileno = get_fileno(td, fname);
459 file_action = FIO_LOG_OPEN_FILE;
460 } else if (!strcmp(act, "close")) {
461 fileno = get_fileno(td, fname);
462 file_action = FIO_LOG_CLOSE_FILE;
463 } else {
464 log_err("fio: bad iolog file action: %s\n",
465 act);
466 continue;
467 }
468 } else {
469 log_err("bad iolog2: %s\n", p);
470 continue;
471 }
472
473 if (rw == DDIR_READ)
474 reads++;
475 else if (rw == DDIR_WRITE) {
476 /*
477 * Don't add a write for ro mode
478 */
479 if (read_only)
480 continue;
481 writes++;
482 } else if (rw == DDIR_WAIT) {
483 if (td->o.no_stall)
484 continue;
485 waits++;
486 } else if (rw == DDIR_INVAL) {
487 } else if (!ddir_sync(rw)) {
488 log_err("bad ddir: %d\n", rw);
489 continue;
490 }
491
492 /*
493 * Make note of file
494 */
495 ipo = calloc(1, sizeof(*ipo));
496 init_ipo(ipo);
497 ipo->ddir = rw;
498 if (rw == DDIR_WAIT) {
499 ipo->delay = offset;
500 } else {
501 if (td->o.replay_scale)
502 ipo->offset = offset / td->o.replay_scale;
503 else
504 ipo->offset = offset;
505 ipo_bytes_align(td->o.replay_align, ipo);
506
507 ipo->len = bytes;
508 if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw]) {
509 realloc = true;
510 td->o.max_bs[rw] = bytes;
511 }
512 ipo->fileno = fileno;
513 ipo->file_action = file_action;
514 td->o.size += bytes;
515 }
516
517 queue_io_piece(td, ipo);
518
519 if (td->o.read_iolog_chunked) {
520 td->io_log_current++;
521 items_to_fetch--;
522 if (items_to_fetch == 0)
523 break;
524 }
525 }
526
527 free(str);
528 free(act);
529 free(rfname);
530
531 if (td->o.read_iolog_chunked) {
532 td->io_log_highmark = td->io_log_current;
533 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
534 fio_gettime(&td->io_log_highmark_time, NULL);
535 }
536
537 if (writes && read_only) {
538 log_err("fio: <%s> skips replay of %d writes due to"
539 " read-only\n", td->o.name, writes);
540 writes = 0;
541 }
542
543 if (td->o.read_iolog_chunked) {
544 if (td->io_log_current == 0) {
545 return false;
546 }
547 td->o.td_ddir = TD_DDIR_RW;
548 if (realloc && td->orig_buffer)
549 {
550 io_u_quiesce(td);
551 free_io_mem(td);
552 init_io_u_buffers(td);
553 }
554 return true;
555 }
556
557 if (!reads && !writes && !waits)
558 return false;
559 else if (reads && !writes)
560 td->o.td_ddir = TD_DDIR_READ;
561 else if (!reads && writes)
562 td->o.td_ddir = TD_DDIR_WRITE;
563 else
564 td->o.td_ddir = TD_DDIR_RW;
565
566 return true;
567}
568
569static bool is_socket(const char *path)
570{
571 struct stat buf;
572 int r;
573
574 r = stat(path, &buf);
575 if (r == -1)
576 return false;
577
578 return S_ISSOCK(buf.st_mode);
579}
580
581static int open_socket(const char *path)
582{
583 struct sockaddr_un addr;
584 int ret, fd;
585
586 fd = socket(AF_UNIX, SOCK_STREAM, 0);
587 if (fd < 0)
588 return fd;
589
590 addr.sun_family = AF_UNIX;
591 if (snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", path) >=
592 sizeof(addr.sun_path)) {
593 log_err("%s: path name %s is too long for a Unix socket\n",
594 __func__, path);
595 }
596
597 ret = connect(fd, (const struct sockaddr *)&addr, strlen(path) + sizeof(addr.sun_family));
598 if (!ret)
599 return fd;
600
601 close(fd);
602 return -1;
603}
604
605/*
606 * open iolog, check version, and call appropriate parser
607 */
608static bool init_iolog_read(struct thread_data *td)
609{
610 char buffer[256], *p, *fname;
611 FILE *f = NULL;
612
613 fname = get_name_by_idx(td->o.read_iolog_file, td->subjob_number);
614 dprint(FD_IO, "iolog: name=%s\n", fname);
615
616 if (is_socket(fname)) {
617 int fd;
618
619 fd = open_socket(fname);
620 if (fd >= 0)
621 f = fdopen(fd, "r");
622 } else
623 f = fopen(fname, "r");
624
625 free(fname);
626
627 if (!f) {
628 perror("fopen read iolog");
629 return false;
630 }
631
632 p = fgets(buffer, sizeof(buffer), f);
633 if (!p) {
634 td_verror(td, errno, "iolog read");
635 log_err("fio: unable to read iolog\n");
636 fclose(f);
637 return false;
638 }
639
640 /*
641 * version 2 of the iolog stores a specific string as the
642 * first line, check for that
643 */
644 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2))) {
645 free_release_files(td);
646 td->io_log_rfile = f;
647 return read_iolog2(td);
648 }
649
650 log_err("fio: iolog version 1 is no longer supported\n");
651 fclose(f);
652 return false;
653}
654
655/*
656 * Set up a log for storing io patterns.
657 */
658static bool init_iolog_write(struct thread_data *td)
659{
660 struct fio_file *ff;
661 FILE *f;
662 unsigned int i;
663
664 f = fopen(td->o.write_iolog_file, "a");
665 if (!f) {
666 perror("fopen write iolog");
667 return false;
668 }
669
670 /*
671 * That's it for writing, setup a log buffer and we're done.
672 */
673 td->iolog_f = f;
674 td->iolog_buf = malloc(8192);
675 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
676
677 /*
678 * write our version line
679 */
680 if (fprintf(f, "%s\n", iolog_ver2) < 0) {
681 perror("iolog init\n");
682 return false;
683 }
684
685 /*
686 * add all known files
687 */
688 for_each_file(td, ff, i)
689 log_file(td, ff, FIO_LOG_ADD_FILE);
690
691 return true;
692}
693
694bool init_iolog(struct thread_data *td)
695{
696 bool ret;
697
698 if (td->o.read_iolog_file) {
699 int need_swap;
700
701 /*
702 * Check if it's a blktrace file and load that if possible.
703 * Otherwise assume it's a normal log file and load that.
704 */
705 if (is_blktrace(td->o.read_iolog_file, &need_swap))
706 ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
707 else
708 ret = init_iolog_read(td);
709 } else if (td->o.write_iolog_file)
710 ret = init_iolog_write(td);
711 else
712 ret = true;
713
714 if (!ret)
715 td_verror(td, EINVAL, "failed initializing iolog");
716
717 return ret;
718}
719
720void setup_log(struct io_log **log, struct log_params *p,
721 const char *filename)
722{
723 struct io_log *l;
724 int i;
725 struct io_u_plat_entry *entry;
726 struct flist_head *list;
727
728 l = scalloc(1, sizeof(*l));
729 INIT_FLIST_HEAD(&l->io_logs);
730 l->log_type = p->log_type;
731 l->log_offset = p->log_offset;
732 l->log_gz = p->log_gz;
733 l->log_gz_store = p->log_gz_store;
734 l->avg_msec = p->avg_msec;
735 l->hist_msec = p->hist_msec;
736 l->hist_coarseness = p->hist_coarseness;
737 l->filename = strdup(filename);
738 l->td = p->td;
739
740 /* Initialize histogram lists for each r/w direction,
741 * with initial io_u_plat of all zeros:
742 */
743 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
744 list = &l->hist_window[i].list;
745 INIT_FLIST_HEAD(list);
746 entry = calloc(1, sizeof(struct io_u_plat_entry));
747 flist_add(&entry->list, list);
748 }
749
750 if (l->td && l->td->o.io_submit_mode != IO_MODE_OFFLOAD) {
751 struct io_logs *__p;
752
753 __p = calloc(1, sizeof(*l->pending));
754 __p->max_samples = DEF_LOG_ENTRIES;
755 __p->log = calloc(__p->max_samples, log_entry_sz(l));
756 l->pending = __p;
757 }
758
759 if (l->log_offset)
760 l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
761
762 INIT_FLIST_HEAD(&l->chunk_list);
763
764 if (l->log_gz && !p->td)
765 l->log_gz = 0;
766 else if (l->log_gz || l->log_gz_store) {
767 mutex_init_pshared(&l->chunk_lock);
768 mutex_init_pshared(&l->deferred_free_lock);
769 p->td->flags |= TD_F_COMPRESS_LOG;
770 }
771
772 *log = l;
773}
774
775#ifdef CONFIG_SETVBUF
776static void *set_file_buffer(FILE *f)
777{
778 size_t size = 1048576;
779 void *buf;
780
781 buf = malloc(size);
782 setvbuf(f, buf, _IOFBF, size);
783 return buf;
784}
785
786static void clear_file_buffer(void *buf)
787{
788 free(buf);
789}
790#else
791static void *set_file_buffer(FILE *f)
792{
793 return NULL;
794}
795
796static void clear_file_buffer(void *buf)
797{
798}
799#endif
800
801void free_log(struct io_log *log)
802{
803 while (!flist_empty(&log->io_logs)) {
804 struct io_logs *cur_log;
805
806 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
807 flist_del_init(&cur_log->list);
808 free(cur_log->log);
809 sfree(cur_log);
810 }
811
812 if (log->pending) {
813 free(log->pending->log);
814 free(log->pending);
815 log->pending = NULL;
816 }
817
818 free(log->pending);
819 free(log->filename);
820 sfree(log);
821}
822
823uint64_t hist_sum(int j, int stride, uint64_t *io_u_plat,
824 uint64_t *io_u_plat_last)
825{
826 uint64_t sum;
827 int k;
828
829 if (io_u_plat_last) {
830 for (k = sum = 0; k < stride; k++)
831 sum += io_u_plat[j + k] - io_u_plat_last[j + k];
832 } else {
833 for (k = sum = 0; k < stride; k++)
834 sum += io_u_plat[j + k];
835 }
836
837 return sum;
838}
839
840static void flush_hist_samples(FILE *f, int hist_coarseness, void *samples,
841 uint64_t sample_size)
842{
843 struct io_sample *s;
844 int log_offset;
845 uint64_t i, j, nr_samples;
846 struct io_u_plat_entry *entry, *entry_before;
847 uint64_t *io_u_plat;
848 uint64_t *io_u_plat_before;
849
850 int stride = 1 << hist_coarseness;
851
852 if (!sample_size)
853 return;
854
855 s = __get_sample(samples, 0, 0);
856 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
857
858 nr_samples = sample_size / __log_entry_sz(log_offset);
859
860 for (i = 0; i < nr_samples; i++) {
861 s = __get_sample(samples, log_offset, i);
862
863 entry = s->data.plat_entry;
864 io_u_plat = entry->io_u_plat;
865
866 entry_before = flist_first_entry(&entry->list, struct io_u_plat_entry, list);
867 io_u_plat_before = entry_before->io_u_plat;
868
869 fprintf(f, "%lu, %u, %llu, ", (unsigned long) s->time,
870 io_sample_ddir(s), (unsigned long long) s->bs);
871 for (j = 0; j < FIO_IO_U_PLAT_NR - stride; j += stride) {
872 fprintf(f, "%llu, ", (unsigned long long)
873 hist_sum(j, stride, io_u_plat, io_u_plat_before));
874 }
875 fprintf(f, "%llu\n", (unsigned long long)
876 hist_sum(FIO_IO_U_PLAT_NR - stride, stride, io_u_plat,
877 io_u_plat_before));
878
879 flist_del(&entry_before->list);
880 free(entry_before);
881 }
882}
883
884void flush_samples(FILE *f, void *samples, uint64_t sample_size)
885{
886 struct io_sample *s;
887 int log_offset;
888 uint64_t i, nr_samples;
889
890 if (!sample_size)
891 return;
892
893 s = __get_sample(samples, 0, 0);
894 log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
895
896 nr_samples = sample_size / __log_entry_sz(log_offset);
897
898 for (i = 0; i < nr_samples; i++) {
899 s = __get_sample(samples, log_offset, i);
900
901 if (!log_offset) {
902 fprintf(f, "%lu, %" PRId64 ", %u, %llu, %u\n",
903 (unsigned long) s->time,
904 s->data.val,
905 io_sample_ddir(s), (unsigned long long) s->bs, s->priority_bit);
906 } else {
907 struct io_sample_offset *so = (void *) s;
908
909 fprintf(f, "%lu, %" PRId64 ", %u, %llu, %llu, %u\n",
910 (unsigned long) s->time,
911 s->data.val,
912 io_sample_ddir(s), (unsigned long long) s->bs,
913 (unsigned long long) so->offset, s->priority_bit);
914 }
915 }
916}
917
918#ifdef CONFIG_ZLIB
919
920struct iolog_flush_data {
921 struct workqueue_work work;
922 struct io_log *log;
923 void *samples;
924 uint32_t nr_samples;
925 bool free;
926};
927
928#define GZ_CHUNK 131072
929
930static struct iolog_compress *get_new_chunk(unsigned int seq)
931{
932 struct iolog_compress *c;
933
934 c = malloc(sizeof(*c));
935 INIT_FLIST_HEAD(&c->list);
936 c->buf = malloc(GZ_CHUNK);
937 c->len = 0;
938 c->seq = seq;
939 return c;
940}
941
942static void free_chunk(struct iolog_compress *ic)
943{
944 free(ic->buf);
945 free(ic);
946}
947
948static int z_stream_init(z_stream *stream, int gz_hdr)
949{
950 int wbits = 15;
951
952 memset(stream, 0, sizeof(*stream));
953 stream->zalloc = Z_NULL;
954 stream->zfree = Z_NULL;
955 stream->opaque = Z_NULL;
956 stream->next_in = Z_NULL;
957
958 /*
959 * zlib magic - add 32 for auto-detection of gz header or not,
960 * if we decide to store files in a gzip friendly format.
961 */
962 if (gz_hdr)
963 wbits += 32;
964
965 if (inflateInit2(stream, wbits) != Z_OK)
966 return 1;
967
968 return 0;
969}
970
971struct inflate_chunk_iter {
972 unsigned int seq;
973 int err;
974 void *buf;
975 size_t buf_size;
976 size_t buf_used;
977 size_t chunk_sz;
978};
979
980static void finish_chunk(z_stream *stream, FILE *f,
981 struct inflate_chunk_iter *iter)
982{
983 int ret;
984
985 ret = inflateEnd(stream);
986 if (ret != Z_OK)
987 log_err("fio: failed to end log inflation seq %d (%d)\n",
988 iter->seq, ret);
989
990 flush_samples(f, iter->buf, iter->buf_used);
991 free(iter->buf);
992 iter->buf = NULL;
993 iter->buf_size = iter->buf_used = 0;
994}
995
996/*
997 * Iterative chunk inflation. Handles cases where we cross into a new
998 * sequence, doing flush finish of previous chunk if needed.
999 */
1000static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
1001 z_stream *stream, struct inflate_chunk_iter *iter)
1002{
1003 size_t ret;
1004
1005 dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u\n",
1006 (unsigned long) ic->len, ic->seq);
1007
1008 if (ic->seq != iter->seq) {
1009 if (iter->seq)
1010 finish_chunk(stream, f, iter);
1011
1012 z_stream_init(stream, gz_hdr);
1013 iter->seq = ic->seq;
1014 }
1015
1016 stream->avail_in = ic->len;
1017 stream->next_in = ic->buf;
1018
1019 if (!iter->buf_size) {
1020 iter->buf_size = iter->chunk_sz;
1021 iter->buf = malloc(iter->buf_size);
1022 }
1023
1024 while (stream->avail_in) {
1025 size_t this_out = iter->buf_size - iter->buf_used;
1026 int err;
1027
1028 stream->avail_out = this_out;
1029 stream->next_out = iter->buf + iter->buf_used;
1030
1031 err = inflate(stream, Z_NO_FLUSH);
1032 if (err < 0) {
1033 log_err("fio: failed inflating log: %d\n", err);
1034 iter->err = err;
1035 break;
1036 }
1037
1038 iter->buf_used += this_out - stream->avail_out;
1039
1040 if (!stream->avail_out) {
1041 iter->buf_size += iter->chunk_sz;
1042 iter->buf = realloc(iter->buf, iter->buf_size);
1043 continue;
1044 }
1045
1046 if (err == Z_STREAM_END)
1047 break;
1048 }
1049
1050 ret = (void *) stream->next_in - ic->buf;
1051
1052 dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) iter->buf_size);
1053
1054 return ret;
1055}
1056
1057/*
1058 * Inflate stored compressed chunks, or write them directly to the log
1059 * file if so instructed.
1060 */
1061static int inflate_gz_chunks(struct io_log *log, FILE *f)
1062{
1063 struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
1064 z_stream stream;
1065
1066 while (!flist_empty(&log->chunk_list)) {
1067 struct iolog_compress *ic;
1068
1069 ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
1070 flist_del(&ic->list);
1071
1072 if (log->log_gz_store) {
1073 size_t ret;
1074
1075 dprint(FD_COMPRESS, "log write chunk size=%lu, "
1076 "seq=%u\n", (unsigned long) ic->len, ic->seq);
1077
1078 ret = fwrite(ic->buf, ic->len, 1, f);
1079 if (ret != 1 || ferror(f)) {
1080 iter.err = errno;
1081 log_err("fio: error writing compressed log\n");
1082 }
1083 } else
1084 inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
1085
1086 free_chunk(ic);
1087 }
1088
1089 if (iter.seq) {
1090 finish_chunk(&stream, f, &iter);
1091 free(iter.buf);
1092 }
1093
1094 return iter.err;
1095}
1096
1097/*
1098 * Open compressed log file and decompress the stored chunks and
1099 * write them to stdout. The chunks are stored sequentially in the
1100 * file, so we iterate over them and do them one-by-one.
1101 */
1102int iolog_file_inflate(const char *file)
1103{
1104 struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
1105 struct iolog_compress ic;
1106 z_stream stream;
1107 struct stat sb;
1108 size_t ret;
1109 size_t total;
1110 void *buf;
1111 FILE *f;
1112
1113 f = fopen(file, "r");
1114 if (!f) {
1115 perror("fopen");
1116 return 1;
1117 }
1118
1119 if (stat(file, &sb) < 0) {
1120 fclose(f);
1121 perror("stat");
1122 return 1;
1123 }
1124
1125 ic.buf = buf = malloc(sb.st_size);
1126 ic.len = sb.st_size;
1127 ic.seq = 1;
1128
1129 ret = fread(ic.buf, ic.len, 1, f);
1130 if (ret == 0 && ferror(f)) {
1131 perror("fread");
1132 fclose(f);
1133 free(buf);
1134 return 1;
1135 } else if (ferror(f) || (!feof(f) && ret != 1)) {
1136 log_err("fio: short read on reading log\n");
1137 fclose(f);
1138 free(buf);
1139 return 1;
1140 }
1141
1142 fclose(f);
1143
1144 /*
1145 * Each chunk will return Z_STREAM_END. We don't know how many
1146 * chunks are in the file, so we just keep looping and incrementing
1147 * the sequence number until we have consumed the whole compressed
1148 * file.
1149 */
1150 total = ic.len;
1151 do {
1152 size_t iret;
1153
1154 iret = inflate_chunk(&ic, 1, stdout, &stream, &iter);
1155 total -= iret;
1156 if (!total)
1157 break;
1158 if (iter.err)
1159 break;
1160
1161 ic.seq++;
1162 ic.len -= iret;
1163 ic.buf += iret;
1164 } while (1);
1165
1166 if (iter.seq) {
1167 finish_chunk(&stream, stdout, &iter);
1168 free(iter.buf);
1169 }
1170
1171 free(buf);
1172 return iter.err;
1173}
1174
1175#else
1176
1177static int inflate_gz_chunks(struct io_log *log, FILE *f)
1178{
1179 return 0;
1180}
1181
1182int iolog_file_inflate(const char *file)
1183{
1184 log_err("fio: log inflation not possible without zlib\n");
1185 return 1;
1186}
1187
1188#endif
1189
1190void flush_log(struct io_log *log, bool do_append)
1191{
1192 void *buf;
1193 FILE *f;
1194
1195 if (!do_append)
1196 f = fopen(log->filename, "w");
1197 else
1198 f = fopen(log->filename, "a");
1199 if (!f) {
1200 perror("fopen log");
1201 return;
1202 }
1203
1204 buf = set_file_buffer(f);
1205
1206 inflate_gz_chunks(log, f);
1207
1208 while (!flist_empty(&log->io_logs)) {
1209 struct io_logs *cur_log;
1210
1211 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
1212 flist_del_init(&cur_log->list);
1213
1214 if (log->td && log == log->td->clat_hist_log)
1215 flush_hist_samples(f, log->hist_coarseness, cur_log->log,
1216 log_sample_sz(log, cur_log));
1217 else
1218 flush_samples(f, cur_log->log, log_sample_sz(log, cur_log));
1219
1220 sfree(cur_log);
1221 }
1222
1223 fclose(f);
1224 clear_file_buffer(buf);
1225}
1226
1227static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
1228{
1229 if (td->flags & TD_F_COMPRESS_LOG)
1230 iolog_flush(log);
1231
1232 if (trylock) {
1233 if (fio_trylock_file(log->filename))
1234 return 1;
1235 } else
1236 fio_lock_file(log->filename);
1237
1238 if (td->client_type == FIO_CLIENT_TYPE_GUI || is_backend)
1239 fio_send_iolog(td, log, log->filename);
1240 else
1241 flush_log(log, !td->o.per_job_logs);
1242
1243 fio_unlock_file(log->filename);
1244 free_log(log);
1245 return 0;
1246}
1247
1248size_t log_chunk_sizes(struct io_log *log)
1249{
1250 struct flist_head *entry;
1251 size_t ret;
1252
1253 if (flist_empty(&log->chunk_list))
1254 return 0;
1255
1256 ret = 0;
1257 pthread_mutex_lock(&log->chunk_lock);
1258 flist_for_each(entry, &log->chunk_list) {
1259 struct iolog_compress *c;
1260
1261 c = flist_entry(entry, struct iolog_compress, list);
1262 ret += c->len;
1263 }
1264 pthread_mutex_unlock(&log->chunk_lock);
1265 return ret;
1266}
1267
1268#ifdef CONFIG_ZLIB
1269
1270static void iolog_put_deferred(struct io_log *log, void *ptr)
1271{
1272 if (!ptr)
1273 return;
1274
1275 pthread_mutex_lock(&log->deferred_free_lock);
1276 if (log->deferred < IOLOG_MAX_DEFER) {
1277 log->deferred_items[log->deferred] = ptr;
1278 log->deferred++;
1279 } else if (!fio_did_warn(FIO_WARN_IOLOG_DROP))
1280 log_err("fio: had to drop log entry free\n");
1281 pthread_mutex_unlock(&log->deferred_free_lock);
1282}
1283
1284static void iolog_free_deferred(struct io_log *log)
1285{
1286 int i;
1287
1288 if (!log->deferred)
1289 return;
1290
1291 pthread_mutex_lock(&log->deferred_free_lock);
1292
1293 for (i = 0; i < log->deferred; i++) {
1294 free(log->deferred_items[i]);
1295 log->deferred_items[i] = NULL;
1296 }
1297
1298 log->deferred = 0;
1299 pthread_mutex_unlock(&log->deferred_free_lock);
1300}
1301
1302static int gz_work(struct iolog_flush_data *data)
1303{
1304 struct iolog_compress *c = NULL;
1305 struct flist_head list;
1306 unsigned int seq;
1307 z_stream stream;
1308 size_t total = 0;
1309 int ret;
1310
1311 INIT_FLIST_HEAD(&list);
1312
1313 memset(&stream, 0, sizeof(stream));
1314 stream.zalloc = Z_NULL;
1315 stream.zfree = Z_NULL;
1316 stream.opaque = Z_NULL;
1317
1318 ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
1319 if (ret != Z_OK) {
1320 log_err("fio: failed to init gz stream\n");
1321 goto err;
1322 }
1323
1324 seq = ++data->log->chunk_seq;
1325
1326 stream.next_in = (void *) data->samples;
1327 stream.avail_in = data->nr_samples * log_entry_sz(data->log);
1328
1329 dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u, log=%s\n",
1330 (unsigned long) stream.avail_in, seq,
1331 data->log->filename);
1332 do {
1333 if (c)
1334 dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq,
1335 (unsigned long) c->len);
1336 c = get_new_chunk(seq);
1337 stream.avail_out = GZ_CHUNK;
1338 stream.next_out = c->buf;
1339 ret = deflate(&stream, Z_NO_FLUSH);
1340 if (ret < 0) {
1341 log_err("fio: deflate log (%d)\n", ret);
1342 free_chunk(c);
1343 goto err;
1344 }
1345
1346 c->len = GZ_CHUNK - stream.avail_out;
1347 flist_add_tail(&c->list, &list);
1348 total += c->len;
1349 } while (stream.avail_in);
1350
1351 stream.next_out = c->buf + c->len;
1352 stream.avail_out = GZ_CHUNK - c->len;
1353
1354 ret = deflate(&stream, Z_FINISH);
1355 if (ret < 0) {
1356 /*
1357 * Z_BUF_ERROR is special, it just means we need more
1358 * output space. We'll handle that below. Treat any other
1359 * error as fatal.
1360 */
1361 if (ret != Z_BUF_ERROR) {
1362 log_err("fio: deflate log (%d)\n", ret);
1363 flist_del(&c->list);
1364 free_chunk(c);
1365 goto err;
1366 }
1367 }
1368
1369 total -= c->len;
1370 c->len = GZ_CHUNK - stream.avail_out;
1371 total += c->len;
1372 dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq, (unsigned long) c->len);
1373
1374 if (ret != Z_STREAM_END) {
1375 do {
1376 c = get_new_chunk(seq);
1377 stream.avail_out = GZ_CHUNK;
1378 stream.next_out = c->buf;
1379 ret = deflate(&stream, Z_FINISH);
1380 c->len = GZ_CHUNK - stream.avail_out;
1381 total += c->len;
1382 flist_add_tail(&c->list, &list);
1383 dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq,
1384 (unsigned long) c->len);
1385 } while (ret != Z_STREAM_END);
1386 }
1387
1388 dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);
1389
1390 ret = deflateEnd(&stream);
1391 if (ret != Z_OK)
1392 log_err("fio: deflateEnd %d\n", ret);
1393
1394 iolog_put_deferred(data->log, data->samples);
1395
1396 if (!flist_empty(&list)) {
1397 pthread_mutex_lock(&data->log->chunk_lock);
1398 flist_splice_tail(&list, &data->log->chunk_list);
1399 pthread_mutex_unlock(&data->log->chunk_lock);
1400 }
1401
1402 ret = 0;
1403done:
1404 if (data->free)
1405 sfree(data);
1406 return ret;
1407err:
1408 while (!flist_empty(&list)) {
1409 c = flist_first_entry(list.next, struct iolog_compress, list);
1410 flist_del(&c->list);
1411 free_chunk(c);
1412 }
1413 ret = 1;
1414 goto done;
1415}
1416
1417/*
1418 * Invoked from our compress helper thread, when logging would have exceeded
1419 * the specified memory limitation. Compresses the previously stored
1420 * entries.
1421 */
1422static int gz_work_async(struct submit_worker *sw, struct workqueue_work *work)
1423{
1424 return gz_work(container_of(work, struct iolog_flush_data, work));
1425}
1426
1427static int gz_init_worker(struct submit_worker *sw)
1428{
1429 struct thread_data *td = sw->wq->td;
1430
1431 if (!fio_option_is_set(&td->o, log_gz_cpumask))
1432 return 0;
1433
1434 if (fio_setaffinity(gettid(), td->o.log_gz_cpumask) == -1) {
1435 log_err("gz: failed to set CPU affinity\n");
1436 return 1;
1437 }
1438
1439 return 0;
1440}
1441
1442static struct workqueue_ops log_compress_wq_ops = {
1443 .fn = gz_work_async,
1444 .init_worker_fn = gz_init_worker,
1445 .nice = 1,
1446};
1447
1448int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1449{
1450 if (!(td->flags & TD_F_COMPRESS_LOG))
1451 return 0;
1452
1453 workqueue_init(td, &td->log_compress_wq, &log_compress_wq_ops, 1, sk_out);
1454 return 0;
1455}
1456
1457void iolog_compress_exit(struct thread_data *td)
1458{
1459 if (!(td->flags & TD_F_COMPRESS_LOG))
1460 return;
1461
1462 workqueue_exit(&td->log_compress_wq);
1463}
1464
1465/*
1466 * Queue work item to compress the existing log entries. We reset the
1467 * current log to a small size, and reference the existing log in the
1468 * data that we queue for compression. Once compression has been done,
1469 * this old log is freed. If called with finish == true, will not return
1470 * until the log compression has completed, and will flush all previous
1471 * logs too
1472 */
1473static int iolog_flush(struct io_log *log)
1474{
1475 struct iolog_flush_data *data;
1476
1477 data = malloc(sizeof(*data));
1478 if (!data)
1479 return 1;
1480
1481 data->log = log;
1482 data->free = false;
1483
1484 while (!flist_empty(&log->io_logs)) {
1485 struct io_logs *cur_log;
1486
1487 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
1488 flist_del_init(&cur_log->list);
1489
1490 data->samples = cur_log->log;
1491 data->nr_samples = cur_log->nr_samples;
1492
1493 sfree(cur_log);
1494
1495 gz_work(data);
1496 }
1497
1498 free(data);
1499 return 0;
1500}
1501
1502int iolog_cur_flush(struct io_log *log, struct io_logs *cur_log)
1503{
1504 struct iolog_flush_data *data;
1505
1506 data = smalloc(sizeof(*data));
1507 if (!data)
1508 return 1;
1509
1510 data->log = log;
1511
1512 data->samples = cur_log->log;
1513 data->nr_samples = cur_log->nr_samples;
1514 data->free = true;
1515
1516 cur_log->nr_samples = cur_log->max_samples = 0;
1517 cur_log->log = NULL;
1518
1519 workqueue_enqueue(&log->td->log_compress_wq, &data->work);
1520
1521 iolog_free_deferred(log);
1522
1523 return 0;
1524}
1525#else
1526
1527static int iolog_flush(struct io_log *log)
1528{
1529 return 1;
1530}
1531
1532int iolog_cur_flush(struct io_log *log, struct io_logs *cur_log)
1533{
1534 return 1;
1535}
1536
1537int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out)
1538{
1539 return 0;
1540}
1541
1542void iolog_compress_exit(struct thread_data *td)
1543{
1544}
1545
1546#endif
1547
1548struct io_logs *iolog_cur_log(struct io_log *log)
1549{
1550 if (flist_empty(&log->io_logs))
1551 return NULL;
1552
1553 return flist_last_entry(&log->io_logs, struct io_logs, list);
1554}
1555
1556uint64_t iolog_nr_samples(struct io_log *iolog)
1557{
1558 struct flist_head *entry;
1559 uint64_t ret = 0;
1560
1561 flist_for_each(entry, &iolog->io_logs) {
1562 struct io_logs *cur_log;
1563
1564 cur_log = flist_entry(entry, struct io_logs, list);
1565 ret += cur_log->nr_samples;
1566 }
1567
1568 return ret;
1569}
1570
1571static int __write_log(struct thread_data *td, struct io_log *log, int try)
1572{
1573 if (log)
1574 return finish_log(td, log, try);
1575
1576 return 0;
1577}
1578
1579static int write_iops_log(struct thread_data *td, int try, bool unit_log)
1580{
1581 int ret;
1582
1583 if (per_unit_log(td->iops_log) != unit_log)
1584 return 0;
1585
1586 ret = __write_log(td, td->iops_log, try);
1587 if (!ret)
1588 td->iops_log = NULL;
1589
1590 return ret;
1591}
1592
1593static int write_slat_log(struct thread_data *td, int try, bool unit_log)
1594{
1595 int ret;
1596
1597 if (!unit_log)
1598 return 0;
1599
1600 ret = __write_log(td, td->slat_log, try);
1601 if (!ret)
1602 td->slat_log = NULL;
1603
1604 return ret;
1605}
1606
1607static int write_clat_log(struct thread_data *td, int try, bool unit_log)
1608{
1609 int ret;
1610
1611 if (!unit_log)
1612 return 0;
1613
1614 ret = __write_log(td, td->clat_log, try);
1615 if (!ret)
1616 td->clat_log = NULL;
1617
1618 return ret;
1619}
1620
1621static int write_clat_hist_log(struct thread_data *td, int try, bool unit_log)
1622{
1623 int ret;
1624
1625 if (!unit_log)
1626 return 0;
1627
1628 ret = __write_log(td, td->clat_hist_log, try);
1629 if (!ret)
1630 td->clat_hist_log = NULL;
1631
1632 return ret;
1633}
1634
1635static int write_lat_log(struct thread_data *td, int try, bool unit_log)
1636{
1637 int ret;
1638
1639 if (!unit_log)
1640 return 0;
1641
1642 ret = __write_log(td, td->lat_log, try);
1643 if (!ret)
1644 td->lat_log = NULL;
1645
1646 return ret;
1647}
1648
1649static int write_bandw_log(struct thread_data *td, int try, bool unit_log)
1650{
1651 int ret;
1652
1653 if (per_unit_log(td->bw_log) != unit_log)
1654 return 0;
1655
1656 ret = __write_log(td, td->bw_log, try);
1657 if (!ret)
1658 td->bw_log = NULL;
1659
1660 return ret;
1661}
1662
1663enum {
1664 BW_LOG_MASK = 1,
1665 LAT_LOG_MASK = 2,
1666 SLAT_LOG_MASK = 4,
1667 CLAT_LOG_MASK = 8,
1668 IOPS_LOG_MASK = 16,
1669 CLAT_HIST_LOG_MASK = 32,
1670
1671 ALL_LOG_NR = 6,
1672};
1673
1674struct log_type {
1675 unsigned int mask;
1676 int (*fn)(struct thread_data *, int, bool);
1677};
1678
1679static struct log_type log_types[] = {
1680 {
1681 .mask = BW_LOG_MASK,
1682 .fn = write_bandw_log,
1683 },
1684 {
1685 .mask = LAT_LOG_MASK,
1686 .fn = write_lat_log,
1687 },
1688 {
1689 .mask = SLAT_LOG_MASK,
1690 .fn = write_slat_log,
1691 },
1692 {
1693 .mask = CLAT_LOG_MASK,
1694 .fn = write_clat_log,
1695 },
1696 {
1697 .mask = IOPS_LOG_MASK,
1698 .fn = write_iops_log,
1699 },
1700 {
1701 .mask = CLAT_HIST_LOG_MASK,
1702 .fn = write_clat_hist_log,
1703 }
1704};
1705
1706void td_writeout_logs(struct thread_data *td, bool unit_logs)
1707{
1708 unsigned int log_mask = 0;
1709 unsigned int log_left = ALL_LOG_NR;
1710 int old_state, i;
1711
1712 old_state = td_bump_runstate(td, TD_FINISHING);
1713
1714 finalize_logs(td, unit_logs);
1715
1716 while (log_left) {
1717 int prev_log_left = log_left;
1718
1719 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1720 struct log_type *lt = &log_types[i];
1721 int ret;
1722
1723 if (!(log_mask & lt->mask)) {
1724 ret = lt->fn(td, log_left != 1, unit_logs);
1725 if (!ret) {
1726 log_left--;
1727 log_mask |= lt->mask;
1728 }
1729 }
1730 }
1731
1732 if (prev_log_left == log_left)
1733 usleep(5000);
1734 }
1735
1736 td_restore_runstate(td, old_state);
1737}
1738
1739void fio_writeout_logs(bool unit_logs)
1740{
1741 struct thread_data *td;
1742 int i;
1743
1744 for_each_td(td, i)
1745 td_writeout_logs(td, unit_logs);
1746}