Commit | Line | Data |
---|---|---|
ac9b9101 JA |
1 | /* |
2 | * Code related to writing an iolog of what a thread is doing, and to | |
3 | * later read that back and replay | |
4 | */ | |
5 | #include <stdio.h> | |
6 | #include <stdlib.h> | |
7 | #include <libgen.h> | |
8 | #include <assert.h> | |
b26317c9 JA |
9 | #include <sys/types.h> |
10 | #include <sys/stat.h> | |
11 | #include <unistd.h> | |
aee2ab67 JA |
12 | #ifdef CONFIG_ZLIB |
13 | #include <zlib.h> | |
14 | #endif | |
15 | ||
ac9b9101 JA |
16 | #include "flist.h" |
17 | #include "fio.h" | |
18 | #include "verify.h" | |
19 | #include "trim.h" | |
243bfe19 | 20 | #include "filelock.h" |
78d55e72 | 21 | #include "lib/tp.h" |
ac9b9101 JA |
22 | |
23 | static const char iolog_ver2[] = "fio version 2 iolog"; | |
24 | ||
25 | void queue_io_piece(struct thread_data *td, struct io_piece *ipo) | |
26 | { | |
27 | flist_add_tail(&ipo->list, &td->io_log_list); | |
28 | td->total_io_size += ipo->len; | |
29 | } | |
30 | ||
31 | void log_io_u(struct thread_data *td, struct io_u *io_u) | |
32 | { | |
33 | const char *act[] = { "read", "write", "sync", "datasync", | |
34 | "sync_file_range", "wait", "trim" }; | |
35 | ||
36 | assert(io_u->ddir <= 6); | |
37 | ||
38 | if (!td->o.write_iolog_file) | |
39 | return; | |
40 | ||
41 | fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name, | |
42 | act[io_u->ddir], io_u->offset, | |
43 | io_u->buflen); | |
44 | } | |
45 | ||
46 | void log_file(struct thread_data *td, struct fio_file *f, | |
47 | enum file_log_act what) | |
48 | { | |
49 | const char *act[] = { "add", "open", "close" }; | |
50 | ||
51 | assert(what < 3); | |
52 | ||
53 | if (!td->o.write_iolog_file) | |
54 | return; | |
55 | ||
56 | ||
57 | /* | |
58 | * this happens on the pre-open/close done before the job starts | |
59 | */ | |
60 | if (!td->iolog_f) | |
61 | return; | |
62 | ||
63 | fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]); | |
64 | } | |
65 | ||
66 | static void iolog_delay(struct thread_data *td, unsigned long delay) | |
67 | { | |
68 | unsigned long usec = utime_since_now(&td->last_issue); | |
30b18672 | 69 | unsigned long this_delay; |
ac9b9101 JA |
70 | |
71 | if (delay < usec) | |
72 | return; | |
73 | ||
74 | delay -= usec; | |
75 | ||
76 | /* | |
77 | * less than 100 usec delay, just regard it as noise | |
78 | */ | |
79 | if (delay < 100) | |
80 | return; | |
81 | ||
30b18672 JA |
82 | while (delay && !td->terminate) { |
83 | this_delay = delay; | |
84 | if (this_delay > 500000) | |
85 | this_delay = 500000; | |
86 | ||
87 | usec_sleep(td, this_delay); | |
88 | delay -= this_delay; | |
89 | } | |
ac9b9101 JA |
90 | } |
91 | ||
92 | static int ipo_special(struct thread_data *td, struct io_piece *ipo) | |
93 | { | |
94 | struct fio_file *f; | |
95 | int ret; | |
96 | ||
97 | /* | |
98 | * Not a special ipo | |
99 | */ | |
100 | if (ipo->ddir != DDIR_INVAL) | |
101 | return 0; | |
102 | ||
103 | f = td->files[ipo->fileno]; | |
104 | ||
105 | switch (ipo->file_action) { | |
106 | case FIO_LOG_OPEN_FILE: | |
107 | ret = td_io_open_file(td, f); | |
108 | if (!ret) | |
109 | break; | |
110 | td_verror(td, ret, "iolog open file"); | |
111 | return -1; | |
112 | case FIO_LOG_CLOSE_FILE: | |
113 | td_io_close_file(td, f); | |
114 | break; | |
115 | case FIO_LOG_UNLINK_FILE: | |
116 | unlink(f->file_name); | |
117 | break; | |
118 | default: | |
119 | log_err("fio: bad file action %d\n", ipo->file_action); | |
120 | break; | |
121 | } | |
122 | ||
123 | return 1; | |
124 | } | |
125 | ||
126 | int read_iolog_get(struct thread_data *td, struct io_u *io_u) | |
127 | { | |
128 | struct io_piece *ipo; | |
129 | unsigned long elapsed; | |
3c3ed070 | 130 | |
ac9b9101 JA |
131 | while (!flist_empty(&td->io_log_list)) { |
132 | int ret; | |
133 | ||
9342d5f8 | 134 | ipo = flist_first_entry(&td->io_log_list, struct io_piece, list); |
ac9b9101 JA |
135 | flist_del(&ipo->list); |
136 | remove_trim_entry(td, ipo); | |
137 | ||
138 | ret = ipo_special(td, ipo); | |
139 | if (ret < 0) { | |
140 | free(ipo); | |
141 | break; | |
142 | } else if (ret > 0) { | |
143 | free(ipo); | |
144 | continue; | |
145 | } | |
146 | ||
147 | io_u->ddir = ipo->ddir; | |
148 | if (ipo->ddir != DDIR_WAIT) { | |
149 | io_u->offset = ipo->offset; | |
150 | io_u->buflen = ipo->len; | |
151 | io_u->file = td->files[ipo->fileno]; | |
152 | get_file(io_u->file); | |
153 | dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset, | |
154 | io_u->buflen, io_u->file->file_name); | |
155 | if (ipo->delay) | |
156 | iolog_delay(td, ipo->delay); | |
157 | } else { | |
158 | elapsed = mtime_since_genesis(); | |
159 | if (ipo->delay > elapsed) | |
160 | usec_sleep(td, (ipo->delay - elapsed) * 1000); | |
ac9b9101 JA |
161 | } |
162 | ||
163 | free(ipo); | |
3c3ed070 | 164 | |
ac9b9101 JA |
165 | if (io_u->ddir != DDIR_WAIT) |
166 | return 0; | |
167 | } | |
168 | ||
169 | td->done = 1; | |
170 | return 1; | |
171 | } | |
172 | ||
173 | void prune_io_piece_log(struct thread_data *td) | |
174 | { | |
175 | struct io_piece *ipo; | |
176 | struct rb_node *n; | |
177 | ||
178 | while ((n = rb_first(&td->io_hist_tree)) != NULL) { | |
179 | ipo = rb_entry(n, struct io_piece, rb_node); | |
180 | rb_erase(n, &td->io_hist_tree); | |
181 | remove_trim_entry(td, ipo); | |
182 | td->io_hist_len--; | |
183 | free(ipo); | |
184 | } | |
185 | ||
186 | while (!flist_empty(&td->io_hist_list)) { | |
9342d5f8 | 187 | ipo = flist_entry(&td->io_hist_list, struct io_piece, list); |
ac9b9101 JA |
188 | flist_del(&ipo->list); |
189 | remove_trim_entry(td, ipo); | |
190 | td->io_hist_len--; | |
191 | free(ipo); | |
192 | } | |
193 | } | |
194 | ||
195 | /* | |
196 | * log a successful write, so we can unwind the log for verify | |
197 | */ | |
198 | void log_io_piece(struct thread_data *td, struct io_u *io_u) | |
199 | { | |
200 | struct rb_node **p, *parent; | |
201 | struct io_piece *ipo, *__ipo; | |
202 | ||
203 | ipo = malloc(sizeof(struct io_piece)); | |
204 | init_ipo(ipo); | |
205 | ipo->file = io_u->file; | |
206 | ipo->offset = io_u->offset; | |
207 | ipo->len = io_u->buflen; | |
da0a7bd2 | 208 | ipo->numberio = io_u->numberio; |
f9401285 JA |
209 | ipo->flags = IP_F_IN_FLIGHT; |
210 | ||
211 | io_u->ipo = ipo; | |
ac9b9101 JA |
212 | |
213 | if (io_u_should_trim(td, io_u)) { | |
214 | flist_add_tail(&ipo->trim_list, &td->trim_list); | |
215 | td->trim_entries++; | |
216 | } | |
217 | ||
218 | /* | |
219 | * We don't need to sort the entries, if: | |
220 | * | |
221 | * Sequential writes, or | |
222 | * Random writes that lay out the file as it goes along | |
223 | * | |
224 | * For both these cases, just reading back data in the order we | |
225 | * wrote it out is the fastest. | |
226 | * | |
227 | * One exception is if we don't have a random map AND we are doing | |
228 | * verifies, in that case we need to check for duplicate blocks and | |
229 | * drop the old one, which we rely on the rb insert/lookup for | |
230 | * handling. | |
231 | */ | |
c4b6117b | 232 | if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) && |
ac9b9101 JA |
233 | (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) { |
234 | INIT_FLIST_HEAD(&ipo->list); | |
235 | flist_add_tail(&ipo->list, &td->io_hist_list); | |
236 | ipo->flags |= IP_F_ONLIST; | |
237 | td->io_hist_len++; | |
238 | return; | |
239 | } | |
240 | ||
241 | RB_CLEAR_NODE(&ipo->rb_node); | |
242 | ||
243 | /* | |
244 | * Sort the entry into the verification list | |
245 | */ | |
246 | restart: | |
247 | p = &td->io_hist_tree.rb_node; | |
248 | parent = NULL; | |
249 | while (*p) { | |
250 | parent = *p; | |
251 | ||
252 | __ipo = rb_entry(parent, struct io_piece, rb_node); | |
253 | if (ipo->file < __ipo->file) | |
254 | p = &(*p)->rb_left; | |
255 | else if (ipo->file > __ipo->file) | |
256 | p = &(*p)->rb_right; | |
257 | else if (ipo->offset < __ipo->offset) | |
258 | p = &(*p)->rb_left; | |
259 | else if (ipo->offset > __ipo->offset) | |
260 | p = &(*p)->rb_right; | |
261 | else { | |
885ac623 JA |
262 | dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu", |
263 | __ipo->offset, __ipo->len, | |
264 | ipo->offset, ipo->len); | |
ac9b9101 JA |
265 | td->io_hist_len--; |
266 | rb_erase(parent, &td->io_hist_tree); | |
267 | remove_trim_entry(td, __ipo); | |
268 | free(__ipo); | |
269 | goto restart; | |
270 | } | |
271 | } | |
272 | ||
273 | rb_link_node(&ipo->rb_node, parent, p); | |
274 | rb_insert_color(&ipo->rb_node, &td->io_hist_tree); | |
275 | ipo->flags |= IP_F_ONRB; | |
276 | td->io_hist_len++; | |
277 | } | |
278 | ||
890b6656 JA |
279 | void unlog_io_piece(struct thread_data *td, struct io_u *io_u) |
280 | { | |
281 | struct io_piece *ipo = io_u->ipo; | |
282 | ||
283 | if (!ipo) | |
284 | return; | |
285 | ||
286 | if (ipo->flags & IP_F_ONRB) | |
287 | rb_erase(&ipo->rb_node, &td->io_hist_tree); | |
288 | else if (ipo->flags & IP_F_ONLIST) | |
289 | flist_del(&ipo->list); | |
290 | ||
291 | free(ipo); | |
292 | io_u->ipo = NULL; | |
293 | td->io_hist_len--; | |
294 | } | |
295 | ||
296 | void trim_io_piece(struct thread_data *td, struct io_u *io_u) | |
297 | { | |
298 | struct io_piece *ipo = io_u->ipo; | |
299 | ||
300 | if (!ipo) | |
301 | return; | |
302 | ||
303 | ipo->len = io_u->xfer_buflen - io_u->resid; | |
304 | } | |
305 | ||
ac9b9101 JA |
306 | void write_iolog_close(struct thread_data *td) |
307 | { | |
308 | fflush(td->iolog_f); | |
309 | fclose(td->iolog_f); | |
310 | free(td->iolog_buf); | |
311 | td->iolog_f = NULL; | |
312 | td->iolog_buf = NULL; | |
313 | } | |
314 | ||
315 | /* | |
316 | * Read version 2 iolog data. It is enhanced to include per-file logging, | |
317 | * syncs, etc. | |
318 | */ | |
319 | static int read_iolog2(struct thread_data *td, FILE *f) | |
320 | { | |
321 | unsigned long long offset; | |
322 | unsigned int bytes; | |
323 | int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */ | |
324 | char *fname, *act; | |
325 | char *str, *p; | |
326 | enum fio_ddir rw; | |
327 | ||
328 | free_release_files(td); | |
329 | ||
330 | /* | |
331 | * Read in the read iolog and store it, reuse the infrastructure | |
332 | * for doing verifications. | |
333 | */ | |
334 | str = malloc(4096); | |
335 | fname = malloc(256+16); | |
336 | act = malloc(256+16); | |
337 | ||
338 | reads = writes = waits = 0; | |
339 | while ((p = fgets(str, 4096, f)) != NULL) { | |
340 | struct io_piece *ipo; | |
341 | int r; | |
342 | ||
343 | r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset, | |
344 | &bytes); | |
345 | if (r == 4) { | |
346 | /* | |
347 | * Check action first | |
348 | */ | |
349 | if (!strcmp(act, "wait")) | |
350 | rw = DDIR_WAIT; | |
351 | else if (!strcmp(act, "read")) | |
352 | rw = DDIR_READ; | |
353 | else if (!strcmp(act, "write")) | |
354 | rw = DDIR_WRITE; | |
355 | else if (!strcmp(act, "sync")) | |
356 | rw = DDIR_SYNC; | |
357 | else if (!strcmp(act, "datasync")) | |
358 | rw = DDIR_DATASYNC; | |
359 | else if (!strcmp(act, "trim")) | |
360 | rw = DDIR_TRIM; | |
361 | else { | |
362 | log_err("fio: bad iolog file action: %s\n", | |
363 | act); | |
364 | continue; | |
365 | } | |
033ace1e | 366 | fileno = get_fileno(td, fname); |
ac9b9101 JA |
367 | } else if (r == 2) { |
368 | rw = DDIR_INVAL; | |
369 | if (!strcmp(act, "add")) { | |
5903e7b7 | 370 | fileno = add_file(td, fname, 0, 1); |
ac9b9101 JA |
371 | file_action = FIO_LOG_ADD_FILE; |
372 | continue; | |
373 | } else if (!strcmp(act, "open")) { | |
374 | fileno = get_fileno(td, fname); | |
375 | file_action = FIO_LOG_OPEN_FILE; | |
376 | } else if (!strcmp(act, "close")) { | |
377 | fileno = get_fileno(td, fname); | |
378 | file_action = FIO_LOG_CLOSE_FILE; | |
379 | } else { | |
380 | log_err("fio: bad iolog file action: %s\n", | |
381 | act); | |
382 | continue; | |
383 | } | |
384 | } else { | |
385 | log_err("bad iolog2: %s", p); | |
386 | continue; | |
387 | } | |
388 | ||
389 | if (rw == DDIR_READ) | |
390 | reads++; | |
391 | else if (rw == DDIR_WRITE) { | |
392 | /* | |
393 | * Don't add a write for ro mode | |
394 | */ | |
395 | if (read_only) | |
396 | continue; | |
397 | writes++; | |
398 | } else if (rw == DDIR_WAIT) { | |
399 | waits++; | |
400 | } else if (rw == DDIR_INVAL) { | |
401 | } else if (!ddir_sync(rw)) { | |
402 | log_err("bad ddir: %d\n", rw); | |
403 | continue; | |
404 | } | |
405 | ||
406 | /* | |
407 | * Make note of file | |
408 | */ | |
409 | ipo = malloc(sizeof(*ipo)); | |
410 | init_ipo(ipo); | |
411 | ipo->ddir = rw; | |
412 | if (rw == DDIR_WAIT) { | |
413 | ipo->delay = offset; | |
414 | } else { | |
415 | ipo->offset = offset; | |
416 | ipo->len = bytes; | |
42793d94 | 417 | if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw]) |
ac9b9101 JA |
418 | td->o.max_bs[rw] = bytes; |
419 | ipo->fileno = fileno; | |
420 | ipo->file_action = file_action; | |
70afff59 | 421 | td->o.size += bytes; |
ac9b9101 | 422 | } |
3c3ed070 | 423 | |
ac9b9101 JA |
424 | queue_io_piece(td, ipo); |
425 | } | |
426 | ||
427 | free(str); | |
428 | free(act); | |
429 | free(fname); | |
430 | ||
431 | if (writes && read_only) { | |
432 | log_err("fio: <%s> skips replay of %d writes due to" | |
433 | " read-only\n", td->o.name, writes); | |
434 | writes = 0; | |
435 | } | |
436 | ||
437 | if (!reads && !writes && !waits) | |
438 | return 1; | |
439 | else if (reads && !writes) | |
440 | td->o.td_ddir = TD_DDIR_READ; | |
441 | else if (!reads && writes) | |
442 | td->o.td_ddir = TD_DDIR_WRITE; | |
443 | else | |
444 | td->o.td_ddir = TD_DDIR_RW; | |
445 | ||
446 | return 0; | |
447 | } | |
448 | ||
449 | /* | |
450 | * open iolog, check version, and call appropriate parser | |
451 | */ | |
452 | static int init_iolog_read(struct thread_data *td) | |
453 | { | |
454 | char buffer[256], *p; | |
455 | FILE *f; | |
456 | int ret; | |
457 | ||
458 | f = fopen(td->o.read_iolog_file, "r"); | |
459 | if (!f) { | |
460 | perror("fopen read iolog"); | |
461 | return 1; | |
462 | } | |
463 | ||
464 | p = fgets(buffer, sizeof(buffer), f); | |
465 | if (!p) { | |
466 | td_verror(td, errno, "iolog read"); | |
467 | log_err("fio: unable to read iolog\n"); | |
468 | fclose(f); | |
469 | return 1; | |
470 | } | |
471 | ||
472 | /* | |
473 | * version 2 of the iolog stores a specific string as the | |
474 | * first line, check for that | |
475 | */ | |
476 | if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2))) | |
477 | ret = read_iolog2(td, f); | |
478 | else { | |
479 | log_err("fio: iolog version 1 is no longer supported\n"); | |
480 | ret = 1; | |
481 | } | |
482 | ||
483 | fclose(f); | |
484 | return ret; | |
485 | } | |
486 | ||
487 | /* | |
488 | * Set up a log for storing io patterns. | |
489 | */ | |
490 | static int init_iolog_write(struct thread_data *td) | |
491 | { | |
492 | struct fio_file *ff; | |
493 | FILE *f; | |
494 | unsigned int i; | |
495 | ||
496 | f = fopen(td->o.write_iolog_file, "a"); | |
497 | if (!f) { | |
498 | perror("fopen write iolog"); | |
499 | return 1; | |
500 | } | |
501 | ||
502 | /* | |
503 | * That's it for writing, setup a log buffer and we're done. | |
504 | */ | |
505 | td->iolog_f = f; | |
506 | td->iolog_buf = malloc(8192); | |
507 | setvbuf(f, td->iolog_buf, _IOFBF, 8192); | |
508 | ||
509 | /* | |
510 | * write our version line | |
511 | */ | |
512 | if (fprintf(f, "%s\n", iolog_ver2) < 0) { | |
513 | perror("iolog init\n"); | |
514 | return 1; | |
515 | } | |
516 | ||
517 | /* | |
518 | * add all known files | |
519 | */ | |
520 | for_each_file(td, ff, i) | |
521 | log_file(td, ff, FIO_LOG_ADD_FILE); | |
522 | ||
523 | return 0; | |
524 | } | |
525 | ||
526 | int init_iolog(struct thread_data *td) | |
527 | { | |
528 | int ret = 0; | |
529 | ||
530 | if (td->o.read_iolog_file) { | |
d95b34a6 JA |
531 | int need_swap; |
532 | ||
ac9b9101 JA |
533 | /* |
534 | * Check if it's a blktrace file and load that if possible. | |
535 | * Otherwise assume it's a normal log file and load that. | |
536 | */ | |
d95b34a6 JA |
537 | if (is_blktrace(td->o.read_iolog_file, &need_swap)) |
538 | ret = load_blktrace(td, td->o.read_iolog_file, need_swap); | |
ac9b9101 JA |
539 | else |
540 | ret = init_iolog_read(td); | |
541 | } else if (td->o.write_iolog_file) | |
542 | ret = init_iolog_write(td); | |
543 | ||
f01b34ae JA |
544 | if (ret) |
545 | td_verror(td, EINVAL, "failed initializing iolog"); | |
546 | ||
ac9b9101 JA |
547 | return ret; |
548 | } | |
549 | ||
aee2ab67 JA |
550 | void setup_log(struct io_log **log, struct log_params *p, |
551 | const char *filename) | |
ac9b9101 JA |
552 | { |
553 | struct io_log *l = malloc(sizeof(*l)); | |
554 | ||
b8bc8cba | 555 | memset(l, 0, sizeof(*l)); |
ac9b9101 JA |
556 | l->nr_samples = 0; |
557 | l->max_samples = 1024; | |
aee2ab67 JA |
558 | l->log_type = p->log_type; |
559 | l->log_offset = p->log_offset; | |
560 | l->log_gz = p->log_gz; | |
b26317c9 | 561 | l->log_gz_store = p->log_gz_store; |
ae588852 | 562 | l->log = malloc(l->max_samples * log_entry_sz(l)); |
aee2ab67 | 563 | l->avg_msec = p->avg_msec; |
cb7e0ace | 564 | l->filename = strdup(filename); |
aee2ab67 JA |
565 | l->td = p->td; |
566 | ||
b26317c9 | 567 | if (l->log_offset) |
49e98daa | 568 | l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT; |
b26317c9 | 569 | |
aee2ab67 JA |
570 | INIT_FLIST_HEAD(&l->chunk_list); |
571 | ||
572 | if (l->log_gz && !p->td) | |
573 | l->log_gz = 0; | |
574 | else if (l->log_gz) { | |
575 | pthread_mutex_init(&l->chunk_lock, NULL); | |
576 | p->td->flags |= TD_F_COMPRESS_LOG; | |
577 | } | |
578 | ||
ac9b9101 JA |
579 | *log = l; |
580 | } | |
581 | ||
2e802282 JA |
582 | #ifdef CONFIG_SETVBUF |
583 | static void *set_file_buffer(FILE *f) | |
584 | { | |
585 | size_t size = 1048576; | |
586 | void *buf; | |
587 | ||
588 | buf = malloc(size); | |
589 | setvbuf(f, buf, _IOFBF, size); | |
590 | return buf; | |
591 | } | |
592 | ||
593 | static void clear_file_buffer(void *buf) | |
594 | { | |
595 | free(buf); | |
596 | } | |
597 | #else | |
598 | static void *set_file_buffer(FILE *f) | |
599 | { | |
600 | return NULL; | |
601 | } | |
602 | ||
603 | static void clear_file_buffer(void *buf) | |
604 | { | |
605 | } | |
606 | #endif | |
607 | ||
518dac09 | 608 | void free_log(struct io_log *log) |
cb7e0ace JA |
609 | { |
610 | free(log->log); | |
611 | free(log->filename); | |
612 | free(log); | |
613 | } | |
614 | ||
b26317c9 | 615 | static void flush_samples(FILE *f, void *samples, uint64_t sample_size) |
ac9b9101 | 616 | { |
b26317c9 JA |
617 | struct io_sample *s; |
618 | int log_offset; | |
619 | uint64_t i, nr_samples; | |
620 | ||
621 | if (!sample_size) | |
622 | return; | |
623 | ||
624 | s = __get_sample(samples, 0, 0); | |
49e98daa | 625 | log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0; |
b26317c9 JA |
626 | |
627 | nr_samples = sample_size / __log_entry_sz(log_offset); | |
ac9b9101 | 628 | |
aee2ab67 | 629 | for (i = 0; i < nr_samples; i++) { |
b26317c9 | 630 | s = __get_sample(samples, log_offset, i); |
ac9b9101 | 631 | |
aee2ab67 | 632 | if (!log_offset) { |
ae588852 JA |
633 | fprintf(f, "%lu, %lu, %u, %u\n", |
634 | (unsigned long) s->time, | |
635 | (unsigned long) s->val, | |
b26317c9 | 636 | io_sample_ddir(s), s->bs); |
ae588852 JA |
637 | } else { |
638 | struct io_sample_offset *so = (void *) s; | |
639 | ||
640 | fprintf(f, "%lu, %lu, %u, %u, %llu\n", | |
641 | (unsigned long) s->time, | |
642 | (unsigned long) s->val, | |
b26317c9 | 643 | io_sample_ddir(s), s->bs, |
ae588852 JA |
644 | (unsigned long long) so->offset); |
645 | } | |
ac9b9101 | 646 | } |
aee2ab67 JA |
647 | } |
648 | ||
649 | #ifdef CONFIG_ZLIB | |
97eabb2a JA |
650 | |
651 | struct iolog_flush_data { | |
652 | struct tp_work work; | |
653 | struct io_log *log; | |
654 | void *samples; | |
655 | uint64_t nr_samples; | |
656 | }; | |
657 | ||
658 | struct iolog_compress { | |
659 | struct flist_head list; | |
660 | void *buf; | |
661 | size_t len; | |
662 | unsigned int seq; | |
97eabb2a JA |
663 | }; |
664 | ||
665 | #define GZ_CHUNK 131072 | |
666 | ||
667 | static struct iolog_compress *get_new_chunk(unsigned int seq) | |
668 | { | |
669 | struct iolog_compress *c; | |
670 | ||
671 | c = malloc(sizeof(*c)); | |
672 | INIT_FLIST_HEAD(&c->list); | |
673 | c->buf = malloc(GZ_CHUNK); | |
674 | c->len = 0; | |
675 | c->seq = seq; | |
97eabb2a JA |
676 | return c; |
677 | } | |
678 | ||
679 | static void free_chunk(struct iolog_compress *ic) | |
680 | { | |
c07826cd JA |
681 | free(ic->buf); |
682 | free(ic); | |
97eabb2a JA |
683 | } |
684 | ||
b26317c9 | 685 | static int z_stream_init(z_stream *stream, int gz_hdr) |
aee2ab67 | 686 | { |
b26317c9 JA |
687 | int wbits = 15; |
688 | ||
aee2ab67 JA |
689 | stream->zalloc = Z_NULL; |
690 | stream->zfree = Z_NULL; | |
691 | stream->opaque = Z_NULL; | |
692 | stream->next_in = Z_NULL; | |
693 | ||
1a8e7458 JA |
694 | /* |
695 | * zlib magic - add 32 for auto-detection of gz header or not, | |
696 | * if we decide to store files in a gzip friendly format. | |
697 | */ | |
b26317c9 JA |
698 | if (gz_hdr) |
699 | wbits += 32; | |
700 | ||
701 | if (inflateInit2(stream, wbits) != Z_OK) | |
aee2ab67 JA |
702 | return 1; |
703 | ||
704 | return 0; | |
705 | } | |
706 | ||
1a8e7458 | 707 | struct inflate_chunk_iter { |
aee2ab67 JA |
708 | unsigned int seq; |
709 | void *buf; | |
710 | size_t buf_size; | |
711 | size_t buf_used; | |
712 | size_t chunk_sz; | |
713 | }; | |
714 | ||
b26317c9 | 715 | static void finish_chunk(z_stream *stream, FILE *f, |
1a8e7458 | 716 | struct inflate_chunk_iter *iter) |
aee2ab67 | 717 | { |
aee2ab67 JA |
718 | int ret; |
719 | ||
720 | ret = inflateEnd(stream); | |
721 | if (ret != Z_OK) | |
722 | log_err("fio: failed to end log inflation (%d)\n", ret); | |
723 | ||
b26317c9 | 724 | flush_samples(f, iter->buf, iter->buf_used); |
aee2ab67 JA |
725 | free(iter->buf); |
726 | iter->buf = NULL; | |
727 | iter->buf_size = iter->buf_used = 0; | |
728 | } | |
729 | ||
1a8e7458 JA |
730 | /* |
731 | * Iterative chunk inflation. Handles cases where we cross into a new | |
732 | * sequence, doing flush finish of previous chunk if needed. | |
733 | */ | |
734 | static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f, | |
735 | z_stream *stream, struct inflate_chunk_iter *iter) | |
aee2ab67 JA |
736 | { |
737 | if (ic->seq != iter->seq) { | |
738 | if (iter->seq) | |
b26317c9 | 739 | finish_chunk(stream, f, iter); |
aee2ab67 | 740 | |
b26317c9 | 741 | z_stream_init(stream, gz_hdr); |
aee2ab67 JA |
742 | iter->seq = ic->seq; |
743 | } | |
744 | ||
745 | stream->avail_in = ic->len; | |
746 | stream->next_in = ic->buf; | |
747 | ||
748 | if (!iter->buf_size) { | |
749 | iter->buf_size = iter->chunk_sz; | |
750 | iter->buf = malloc(iter->buf_size); | |
751 | } | |
752 | ||
753 | while (stream->avail_in) { | |
b26317c9 | 754 | size_t this_out = iter->buf_size - iter->buf_used; |
aee2ab67 JA |
755 | int err; |
756 | ||
b26317c9 | 757 | stream->avail_out = this_out; |
aee2ab67 JA |
758 | stream->next_out = iter->buf + iter->buf_used; |
759 | ||
760 | err = inflate(stream, Z_NO_FLUSH); | |
761 | if (err < 0) { | |
762 | log_err("fio: failed inflating log: %d\n", err); | |
763 | break; | |
764 | } | |
765 | ||
b26317c9 JA |
766 | iter->buf_used += this_out - stream->avail_out; |
767 | ||
768 | if (!stream->avail_out) { | |
769 | iter->buf_size += iter->chunk_sz; | |
770 | iter->buf = realloc(iter->buf, iter->buf_size); | |
771 | continue; | |
772 | } | |
773 | ||
774 | if (err == Z_STREAM_END) | |
775 | break; | |
aee2ab67 JA |
776 | } |
777 | ||
f302710c | 778 | return (void *) stream->next_in - ic->buf; |
aee2ab67 JA |
779 | } |
780 | ||
1a8e7458 JA |
781 | /* |
782 | * Inflate stored compressed chunks, or write them directly to the log | |
783 | * file if so instructed. | |
784 | */ | |
785 | static void inflate_gz_chunks(struct io_log *log, FILE *f) | |
aee2ab67 | 786 | { |
1a8e7458 | 787 | struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, }; |
aee2ab67 JA |
788 | z_stream stream; |
789 | ||
790 | while (!flist_empty(&log->chunk_list)) { | |
791 | struct iolog_compress *ic; | |
792 | ||
9342d5f8 | 793 | ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list); |
aee2ab67 | 794 | flist_del(&ic->list); |
b26317c9 | 795 | |
1a8e7458 JA |
796 | if (log->log_gz_store) { |
797 | size_t ret; | |
798 | ||
799 | ret = fwrite(ic->buf, ic->len, 1, f); | |
800 | if (ret != 1 || ferror(f)) | |
801 | log_err("fio: error writing compressed log\n"); | |
802 | } else | |
803 | inflate_chunk(ic, log->log_gz_store, f, &stream, &iter); | |
c07826cd JA |
804 | |
805 | free_chunk(ic); | |
aee2ab67 JA |
806 | } |
807 | ||
808 | if (iter.seq) { | |
b26317c9 | 809 | finish_chunk(&stream, f, &iter); |
aee2ab67 JA |
810 | free(iter.buf); |
811 | } | |
812 | } | |
813 | ||
1a8e7458 JA |
814 | /* |
815 | * Open compressed log file and decompress the stored chunks and | |
816 | * write them to stdout. The chunks are stored sequentially in the | |
817 | * file, so we iterate over them and do them one-by-one. | |
818 | */ | |
b26317c9 JA |
819 | int iolog_file_inflate(const char *file) |
820 | { | |
1a8e7458 | 821 | struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, }; |
b26317c9 JA |
822 | struct iolog_compress ic; |
823 | z_stream stream; | |
824 | struct stat sb; | |
fd771971 | 825 | ssize_t ret; |
f302710c JA |
826 | size_t total; |
827 | void *buf; | |
b26317c9 JA |
828 | FILE *f; |
829 | ||
b26317c9 JA |
830 | f = fopen(file, "r"); |
831 | if (!f) { | |
832 | perror("fopen"); | |
833 | return 1; | |
834 | } | |
835 | ||
fd771971 JA |
836 | if (stat(file, &sb) < 0) { |
837 | fclose(f); | |
838 | perror("stat"); | |
839 | return 1; | |
840 | } | |
841 | ||
f302710c | 842 | ic.buf = buf = malloc(sb.st_size); |
b26317c9 | 843 | ic.len = sb.st_size; |
b26317c9 JA |
844 | ic.seq = 1; |
845 | ||
846 | ret = fread(ic.buf, ic.len, 1, f); | |
847 | if (ret < 0) { | |
848 | perror("fread"); | |
849 | fclose(f); | |
850 | return 1; | |
851 | } else if (ret != 1) { | |
852 | log_err("fio: short read on reading log\n"); | |
853 | fclose(f); | |
854 | return 1; | |
855 | } | |
856 | ||
857 | fclose(f); | |
858 | ||
1a8e7458 JA |
859 | /* |
860 | * Each chunk will return Z_STREAM_END. We don't know how many | |
861 | * chunks are in the file, so we just keep looping and incrementing | |
862 | * the sequence number until we have consumed the whole compressed | |
863 | * file. | |
864 | */ | |
f302710c JA |
865 | total = ic.len; |
866 | do { | |
867 | size_t ret; | |
868 | ||
1a8e7458 | 869 | ret = inflate_chunk(&ic, 1, stdout, &stream, &iter); |
f302710c JA |
870 | total -= ret; |
871 | if (!total) | |
872 | break; | |
873 | ||
874 | ic.seq++; | |
875 | ic.len -= ret; | |
876 | ic.buf += ret; | |
877 | } while (1); | |
b26317c9 JA |
878 | |
879 | if (iter.seq) { | |
880 | finish_chunk(&stream, stdout, &iter); | |
881 | free(iter.buf); | |
882 | } | |
883 | ||
f302710c | 884 | free(buf); |
b26317c9 JA |
885 | return 0; |
886 | } | |
887 | ||
aee2ab67 JA |
888 | #else |
889 | ||
1a8e7458 | 890 | static void inflate_gz_chunks(struct io_log *log, FILE *f) |
aee2ab67 JA |
891 | { |
892 | } | |
893 | ||
894 | #endif | |
895 | ||
896 | void flush_log(struct io_log *log) | |
897 | { | |
898 | void *buf; | |
899 | FILE *f; | |
900 | ||
901 | f = fopen(log->filename, "w"); | |
902 | if (!f) { | |
903 | perror("fopen log"); | |
904 | return; | |
905 | } | |
906 | ||
907 | buf = set_file_buffer(f); | |
908 | ||
1a8e7458 | 909 | inflate_gz_chunks(log, f); |
aee2ab67 | 910 | |
b26317c9 | 911 | flush_samples(f, log->log, log->nr_samples * log_entry_sz(log)); |
ac9b9101 JA |
912 | |
913 | fclose(f); | |
2e802282 | 914 | clear_file_buffer(buf); |
ac9b9101 JA |
915 | } |
916 | ||
cb7e0ace | 917 | static int finish_log(struct thread_data *td, struct io_log *log, int trylock) |
ac9b9101 | 918 | { |
aee2ab67 JA |
919 | if (td->tp_data) |
920 | iolog_flush(log, 1); | |
921 | ||
243bfe19 | 922 | if (trylock) { |
cb7e0ace | 923 | if (fio_trylock_file(log->filename)) |
243bfe19 JA |
924 | return 1; |
925 | } else | |
cb7e0ace | 926 | fio_lock_file(log->filename); |
243bfe19 | 927 | |
aee2ab67 | 928 | if (td->client_type == FIO_CLIENT_TYPE_GUI) |
cb7e0ace | 929 | fio_send_iolog(td, log, log->filename); |
aee2ab67 JA |
930 | else |
931 | flush_log(log); | |
243bfe19 | 932 | |
cb7e0ace | 933 | fio_unlock_file(log->filename); |
518dac09 | 934 | free_log(log); |
243bfe19 | 935 | return 0; |
ac9b9101 JA |
936 | } |
937 | ||
aee2ab67 JA |
938 | #ifdef CONFIG_ZLIB |
939 | ||
1a8e7458 JA |
940 | /* |
941 | * Invoked from our compress helper thread, when logging would have exceeded | |
942 | * the specified memory limitation. Compresses the previously stored | |
943 | * entries. | |
944 | */ | |
aee2ab67 JA |
945 | static int gz_work(struct tp_work *work) |
946 | { | |
947 | struct iolog_flush_data *data; | |
948 | struct iolog_compress *c; | |
949 | struct flist_head list; | |
950 | unsigned int seq; | |
951 | z_stream stream; | |
952 | size_t total = 0; | |
d73ac887 | 953 | int ret; |
aee2ab67 JA |
954 | |
955 | INIT_FLIST_HEAD(&list); | |
956 | ||
957 | data = container_of(work, struct iolog_flush_data, work); | |
958 | ||
959 | stream.zalloc = Z_NULL; | |
960 | stream.zfree = Z_NULL; | |
961 | stream.opaque = Z_NULL; | |
962 | ||
b26317c9 | 963 | ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION); |
b26317c9 | 964 | if (ret != Z_OK) { |
aee2ab67 JA |
965 | log_err("fio: failed to init gz stream\n"); |
966 | return 0; | |
967 | } | |
968 | ||
969 | seq = ++data->log->chunk_seq; | |
b26317c9 | 970 | |
aee2ab67 JA |
971 | stream.next_in = (void *) data->samples; |
972 | stream.avail_in = data->nr_samples * log_entry_sz(data->log); | |
973 | ||
974 | do { | |
975 | c = get_new_chunk(seq); | |
976 | stream.avail_out = GZ_CHUNK; | |
977 | stream.next_out = c->buf; | |
978 | ret = deflate(&stream, Z_NO_FLUSH); | |
979 | if (ret < 0) { | |
980 | log_err("fio: deflate log (%d)\n", ret); | |
981 | break; | |
982 | } | |
983 | ||
984 | c->len = GZ_CHUNK - stream.avail_out; | |
985 | flist_add_tail(&c->list, &list); | |
986 | total += c->len; | |
987 | } while (stream.avail_in); | |
988 | ||
989 | stream.next_out = c->buf + c->len; | |
990 | stream.avail_out = GZ_CHUNK - c->len; | |
991 | ||
992 | ret = deflate(&stream, Z_FINISH); | |
993 | if (ret == Z_STREAM_END) | |
994 | c->len = GZ_CHUNK - stream.avail_out; | |
995 | else { | |
996 | do { | |
997 | c = get_new_chunk(seq); | |
998 | stream.avail_out = GZ_CHUNK; | |
999 | stream.next_out = c->buf; | |
1000 | ret = deflate(&stream, Z_FINISH); | |
1001 | c->len = GZ_CHUNK - stream.avail_out; | |
1002 | flist_add_tail(&c->list, &list); | |
1003 | } while (ret != Z_STREAM_END); | |
1004 | } | |
1005 | ||
1006 | ret = deflateEnd(&stream); | |
1007 | if (ret != Z_OK) | |
1008 | log_err("fio: deflateEnd %d\n", ret); | |
1009 | ||
1010 | free(data->samples); | |
1011 | ||
1012 | if (!flist_empty(&list)) { | |
1013 | pthread_mutex_lock(&data->log->chunk_lock); | |
1014 | flist_splice_tail(&list, &data->log->chunk_list); | |
1015 | pthread_mutex_unlock(&data->log->chunk_lock); | |
1016 | } | |
1017 | ||
1018 | if (work->wait) { | |
1019 | work->done = 1; | |
1020 | pthread_cond_signal(&work->cv); | |
1021 | } else | |
1022 | free(data); | |
1023 | ||
1024 | return 0; | |
1025 | } | |
1026 | ||
1a8e7458 JA |
1027 | /* |
1028 | * Queue work item to compress the existing log entries. We copy the | |
1029 | * samples, and reset the log sample count to 0 (so the logging will | |
1030 | * continue to use the memory associated with the log). If called with | |
1031 | * wait == 1, will not return until the log compression has completed. | |
1032 | */ | |
aee2ab67 JA |
1033 | int iolog_flush(struct io_log *log, int wait) |
1034 | { | |
b26317c9 | 1035 | struct tp_data *tdat = log->td->tp_data; |
aee2ab67 JA |
1036 | struct iolog_flush_data *data; |
1037 | size_t sample_size; | |
1038 | ||
1039 | data = malloc(sizeof(*data)); | |
1040 | if (!data) | |
1041 | return 1; | |
1042 | ||
1043 | data->log = log; | |
1044 | ||
1045 | sample_size = log->nr_samples * log_entry_sz(log); | |
1046 | data->samples = malloc(sample_size); | |
1047 | if (!data->samples) { | |
1048 | free(data); | |
1049 | return 1; | |
1050 | } | |
1051 | ||
1052 | memcpy(data->samples, log->log, sample_size); | |
1053 | data->nr_samples = log->nr_samples; | |
1054 | data->work.fn = gz_work; | |
1055 | log->nr_samples = 0; | |
1056 | ||
1057 | if (wait) { | |
1058 | pthread_mutex_init(&data->work.lock, NULL); | |
1059 | pthread_cond_init(&data->work.cv, NULL); | |
1060 | data->work.wait = 1; | |
1061 | } else | |
1062 | data->work.wait = 0; | |
1063 | ||
b26317c9 | 1064 | tp_queue_work(tdat, &data->work); |
aee2ab67 JA |
1065 | |
1066 | if (wait) { | |
1067 | pthread_mutex_lock(&data->work.lock); | |
1068 | while (!data->work.done) | |
1069 | pthread_cond_wait(&data->work.cv, &data->work.lock); | |
1070 | pthread_mutex_unlock(&data->work.lock); | |
1071 | free(data); | |
1072 | } | |
1073 | ||
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | #else | |
1078 | ||
1079 | int iolog_flush(struct io_log *log, int wait) | |
1080 | { | |
1081 | return 1; | |
1082 | } | |
1083 | ||
1084 | #endif | |
1085 | ||
cb7e0ace | 1086 | static int write_iops_log(struct thread_data *td, int try) |
905e3d4f | 1087 | { |
cb7e0ace | 1088 | struct io_log *log = td->iops_log; |
905e3d4f JA |
1089 | |
1090 | if (!log) | |
1091 | return 0; | |
1092 | ||
cb7e0ace | 1093 | return finish_log(td, log, try); |
905e3d4f JA |
1094 | } |
1095 | ||
1096 | static int write_slat_log(struct thread_data *td, int try) | |
1097 | { | |
cb7e0ace | 1098 | struct io_log *log = td->slat_log; |
905e3d4f | 1099 | |
cb7e0ace JA |
1100 | if (!log) |
1101 | return 0; | |
1102 | ||
1103 | return finish_log(td, log, try); | |
905e3d4f JA |
1104 | } |
1105 | ||
1106 | static int write_clat_log(struct thread_data *td, int try) | |
1107 | { | |
cb7e0ace | 1108 | struct io_log *log = td->clat_log; |
905e3d4f | 1109 | |
cb7e0ace JA |
1110 | if (!log) |
1111 | return 0; | |
1112 | ||
1113 | return finish_log(td, log, try); | |
905e3d4f JA |
1114 | } |
1115 | ||
1116 | static int write_lat_log(struct thread_data *td, int try) | |
1117 | { | |
cb7e0ace JA |
1118 | struct io_log *log = td->lat_log; |
1119 | ||
1120 | if (!log) | |
1121 | return 0; | |
905e3d4f | 1122 | |
cb7e0ace | 1123 | return finish_log(td, log, try); |
905e3d4f JA |
1124 | } |
1125 | ||
1126 | static int write_bandw_log(struct thread_data *td, int try) | |
1127 | { | |
cb7e0ace JA |
1128 | struct io_log *log = td->bw_log; |
1129 | ||
1130 | if (!log) | |
1131 | return 0; | |
905e3d4f | 1132 | |
cb7e0ace | 1133 | return finish_log(td, log, try); |
905e3d4f JA |
1134 | } |
1135 | ||
1136 | enum { | |
1137 | BW_LOG_MASK = 1, | |
1138 | LAT_LOG_MASK = 2, | |
1139 | SLAT_LOG_MASK = 4, | |
1140 | CLAT_LOG_MASK = 8, | |
1141 | IOPS_LOG_MASK = 16, | |
1142 | ||
905e3d4f JA |
1143 | ALL_LOG_NR = 5, |
1144 | }; | |
1145 | ||
1146 | struct log_type { | |
1147 | unsigned int mask; | |
1148 | int (*fn)(struct thread_data *, int); | |
1149 | }; | |
1150 | ||
1151 | static struct log_type log_types[] = { | |
1152 | { | |
1153 | .mask = BW_LOG_MASK, | |
1154 | .fn = write_bandw_log, | |
1155 | }, | |
1156 | { | |
1157 | .mask = LAT_LOG_MASK, | |
1158 | .fn = write_lat_log, | |
1159 | }, | |
1160 | { | |
1161 | .mask = SLAT_LOG_MASK, | |
1162 | .fn = write_slat_log, | |
1163 | }, | |
1164 | { | |
1165 | .mask = CLAT_LOG_MASK, | |
1166 | .fn = write_clat_log, | |
1167 | }, | |
1168 | { | |
1169 | .mask = IOPS_LOG_MASK, | |
1170 | .fn = write_iops_log, | |
1171 | }, | |
1172 | }; | |
1173 | ||
1174 | void fio_writeout_logs(struct thread_data *td) | |
1175 | { | |
ea5409f9 | 1176 | unsigned int log_mask = 0; |
905e3d4f JA |
1177 | unsigned int log_left = ALL_LOG_NR; |
1178 | int old_state, i; | |
1179 | ||
1180 | old_state = td_bump_runstate(td, TD_FINISHING); | |
1181 | ||
1182 | finalize_logs(td); | |
1183 | ||
1184 | while (log_left) { | |
1185 | int prev_log_left = log_left; | |
1186 | ||
1187 | for (i = 0; i < ALL_LOG_NR && log_left; i++) { | |
1188 | struct log_type *lt = &log_types[i]; | |
1189 | int ret; | |
1190 | ||
ea5409f9 | 1191 | if (!(log_mask & lt->mask)) { |
905e3d4f JA |
1192 | ret = lt->fn(td, log_left != 1); |
1193 | if (!ret) { | |
1194 | log_left--; | |
ea5409f9 | 1195 | log_mask |= lt->mask; |
905e3d4f JA |
1196 | } |
1197 | } | |
1198 | } | |
1199 | ||
1200 | if (prev_log_left == log_left) | |
1201 | usleep(5000); | |
1202 | } | |
1203 | ||
1204 | td_restore_runstate(td, old_state); | |
1205 | } |