Commit | Line | Data |
---|---|---|
d8296fdd BE |
1 | #!/usr/bin/env python |
2 | ||
3 | # module to parse fio histogram log files, not using pandas | |
4 | # runs in python v2 or v3 | |
5 | # to get help with the CLI: $ python fio-histo-log-pctiles.py -h | |
6 | # this can be run standalone as a script but is callable | |
7 | # assumes all threads run for same time duration | |
8 | # assumes all threads are doing the same thing for the entire run | |
9 | ||
10 | # percentiles: | |
11 | # 0 - min latency | |
12 | # 50 - median | |
13 | # 100 - max latency | |
14 | ||
15 | # TO-DO: | |
16 | # separate read and write stats for randrw mixed workload | |
17 | # report average latency if needed | |
18 | # prove that it works (partially done with unit tests) | |
19 | ||
20 | # to run unit tests, set UNITTEST environment variable to anything | |
21 | # if you do this, don't pass normal CLI parameters to it | |
22 | # otherwise it runs the CLI | |
23 | ||
24 | import sys, os, math, copy | |
25 | from copy import deepcopy | |
26 | ||
27 | import unittest2 | |
28 | ||
29 | msec_per_sec = 1000 | |
30 | nsec_per_usec = 1000 | |
31 | ||
32 | class FioHistoLogExc(Exception): | |
33 | pass | |
34 | ||
35 | # if there is an error, print message, print syntax, and exit with error status | |
36 | ||
37 | def usage(msg): | |
38 | print('ERROR: ' + msg) | |
39 | print('usage: fio-histo-log-pctiles.py ') | |
40 | print(' [ --fio-version 2|3 (default 3) ]') | |
41 | print(' [ --bucket-groups positive-int (default 29) ]') | |
42 | print(' [ --bucket-bits small-positive-int (default 6) ]') | |
43 | print(' [ --percentiles p1,p2,...,pN ] (default 0,50,95,99,100)') | |
44 | print(' [ --time-quantum positive-int (default 1 sec) ]') | |
45 | print(' [ --output-unit msec|usec|nsec (default msec) ]') | |
46 | print(' log-file1 log-file2 ...') | |
47 | sys.exit(1) | |
48 | ||
49 | ||
50 | # convert histogram log file into a list of | |
51 | # (time_ms, direction, bsz, buckets) tuples where | |
52 | # - time_ms is the time in msec at which the log record was written | |
53 | # - direction is 0 (read) or 1 (write) | |
54 | # - bsz is block size (not used) | |
55 | # - buckets is a CSV list of counters that make up the histogram | |
56 | # caller decides if the expected number of counters are present | |
57 | ||
58 | ||
59 | def exception_suffix( record_num, pathname ): | |
60 | return 'in histogram record %d file %s' % (record_num+1, pathname) | |
61 | ||
62 | # log file parser raises FioHistoLogExc exceptions | |
63 | # it returns histogram buckets in whatever unit fio uses | |
64 | ||
65 | def parse_hist_file(logfn, buckets_per_interval): | |
66 | max_timestamp_ms = 0.0 | |
67 | ||
68 | with open(logfn, 'r') as f: | |
69 | records = [ l.strip() for l in f.readlines() ] | |
70 | intervals = [] | |
71 | for k, r in enumerate(records): | |
72 | if r == '': | |
73 | continue | |
74 | tokens = r.split(',') | |
75 | try: | |
76 | int_tokens = [ int(t) for t in tokens ] | |
77 | except ValueError as e: | |
78 | raise FioHistoLogExc('non-integer value %s' % exception_suffix(k+1, logfn)) | |
79 | ||
80 | neg_ints = list(filter( lambda tk : tk < 0, int_tokens )) | |
81 | if len(neg_ints) > 0: | |
82 | raise FioHistoLogExc('negative integer value %s' % exception_suffix(k+1, logfn)) | |
83 | ||
84 | if len(int_tokens) < 3: | |
85 | raise FioHistoLogExc('too few numbers %s' % exception_suffix(k+1, logfn)) | |
86 | ||
87 | time_ms = int_tokens[0] | |
88 | if time_ms > max_timestamp_ms: | |
89 | max_timestamp_ms = time_ms | |
90 | ||
91 | direction = int_tokens[1] | |
92 | if direction != 0 and direction != 1: | |
93 | raise FioHistoLogExc('invalid I/O direction %s' % exception_suffix(k+1, logfn)) | |
94 | ||
95 | bsz = int_tokens[2] | |
96 | if bsz > (1 << 24): | |
97 | raise FioHistoLogExc('block size too large %s' % exception_suffix(k+1, logfn)) | |
98 | ||
99 | buckets = int_tokens[3:] | |
100 | if len(buckets) != buckets_per_interval: | |
101 | raise FioHistoLogExc('%d buckets per interval but %d expected in %s' % | |
102 | (len(buckets), buckets_per_interval, exception_suffix(k+1, logfn))) | |
103 | intervals.append((time_ms, direction, bsz, buckets)) | |
104 | if len(intervals) == 0: | |
105 | raise FioHistoLogExc('no records in %s' % logfn) | |
106 | return (intervals, max_timestamp_ms) | |
107 | ||
108 | ||
109 | # compute time range for each bucket index in histogram record | |
110 | # see comments in https://github.com/axboe/fio/blob/master/stat.h | |
111 | # for description of bucket groups and buckets | |
112 | # fio v3 bucket ranges are in nanosec (since response times are measured in nanosec) | |
113 | # but we convert fio v3 nanosecs to floating-point microseconds | |
114 | ||
115 | def time_ranges(groups, counters_per_group, fio_version=3): | |
116 | bucket_width = 1 | |
117 | bucket_base = 0 | |
118 | bucket_intervals = [] | |
119 | for g in range(0, groups): | |
120 | for b in range(0, counters_per_group): | |
121 | rmin = float(bucket_base) | |
122 | rmax = rmin + bucket_width | |
123 | if fio_version == 3: | |
124 | rmin /= nsec_per_usec | |
125 | rmax /= nsec_per_usec | |
126 | bucket_intervals.append( [rmin, rmax] ) | |
127 | bucket_base += bucket_width | |
128 | if g != 0: | |
129 | bucket_width *= 2 | |
130 | return bucket_intervals | |
131 | ||
132 | ||
133 | # compute number of time quantum intervals in the test | |
134 | ||
135 | def get_time_intervals(time_quantum, max_timestamp_ms): | |
136 | # round down to nearest second | |
137 | max_timestamp = max_timestamp_ms // msec_per_sec | |
138 | # round up to nearest whole multiple of time_quantum | |
139 | time_interval_count = (max_timestamp + time_quantum) // time_quantum | |
140 | end_time = time_interval_count * time_quantum | |
141 | return (end_time, time_interval_count) | |
142 | ||
143 | # align raw histogram log data to time quantum so | |
144 | # we can then combine histograms from different threads with addition | |
145 | # for randrw workload we count both reads and writes in same output bucket | |
146 | # but we separate reads and writes for purposes of calculating | |
147 | # end time for histogram record. | |
148 | # this requires us to weight a raw histogram bucket by the | |
149 | # fraction of time quantum that the bucket overlaps the current | |
150 | # time quantum interval | |
151 | # for example, if we have a bucket with 515 samples for time interval | |
152 | # [ 1010, 2014 ] msec since start of test, and time quantum is 1 sec, then | |
153 | # for time quantum interval [ 1000, 2000 ] msec, the overlap is | |
154 | # (2000 - 1010) / (2000 - 1000) = 0.99 | |
155 | # so the contribution of this bucket to this time quantum is | |
156 | # 515 x 0.99 = 509.85 | |
157 | ||
158 | def align_histo_log(raw_histogram_log, time_quantum, bucket_count, max_timestamp_ms): | |
159 | ||
160 | # slice up test time int intervals of time_quantum seconds | |
161 | ||
162 | (end_time, time_interval_count) = get_time_intervals(time_quantum, max_timestamp_ms) | |
163 | time_qtm_ms = time_quantum * msec_per_sec | |
164 | end_time_ms = end_time * msec_per_sec | |
165 | aligned_intervals = [] | |
166 | for j in range(0, time_interval_count): | |
167 | aligned_intervals.append(( | |
168 | j * time_qtm_ms, | |
169 | [ 0.0 for j in range(0, bucket_count) ] )) | |
170 | ||
171 | log_record_count = len(raw_histogram_log) | |
172 | for k, record in enumerate(raw_histogram_log): | |
173 | ||
174 | # find next record with same direction to get end-time | |
175 | # have to avoid going past end of array | |
176 | # for fio randrw workload, | |
177 | # we have read and write records on same time interval | |
178 | # sometimes read and write records are in opposite order | |
179 | # assertion checks that next read/write record | |
180 | # can be separated by at most 2 other records | |
181 | ||
182 | (time_msec, direction, sz, interval_buckets) = record | |
183 | if k+1 < log_record_count: | |
184 | (time_msec_end, direction2, _, _) = raw_histogram_log[k+1] | |
185 | if direction2 != direction: | |
186 | if k+2 < log_record_count: | |
187 | (time_msec_end, direction2, _, _) = raw_histogram_log[k+2] | |
188 | if direction2 != direction: | |
189 | if k+3 < log_record_count: | |
190 | (time_msec_end, direction2, _, _) = raw_histogram_log[k+3] | |
191 | assert direction2 == direction | |
192 | else: | |
193 | time_msec_end = end_time_ms | |
194 | else: | |
195 | time_msec_end = end_time_ms | |
196 | else: | |
197 | time_msec_end = end_time_ms | |
198 | ||
199 | # calculate first quantum that overlaps this histogram record | |
200 | ||
201 | qtm_start_ms = (time_msec // time_qtm_ms) * time_qtm_ms | |
202 | qtm_end_ms = ((time_msec + time_qtm_ms) // time_qtm_ms) * time_qtm_ms | |
203 | qtm_index = qtm_start_ms // time_qtm_ms | |
204 | ||
205 | # for each quantum that overlaps this histogram record's time interval | |
206 | ||
207 | while qtm_start_ms < time_msec_end: # while quantum overlaps record | |
208 | ||
209 | # calculate fraction of time that this quantum | |
210 | # overlaps histogram record's time interval | |
211 | ||
212 | overlap_start = max(qtm_start_ms, time_msec) | |
213 | overlap_end = min(qtm_end_ms, time_msec_end) | |
214 | weight = float(overlap_end - overlap_start) | |
215 | weight /= (time_msec_end - time_msec) | |
216 | (_,aligned_histogram) = aligned_intervals[qtm_index] | |
217 | for bx, b in enumerate(interval_buckets): | |
218 | weighted_bucket = weight * b | |
219 | aligned_histogram[bx] += weighted_bucket | |
220 | ||
221 | # advance to the next time quantum | |
222 | ||
223 | qtm_start_ms += time_qtm_ms | |
224 | qtm_end_ms += time_qtm_ms | |
225 | qtm_index += 1 | |
226 | ||
227 | return aligned_intervals | |
228 | ||
229 | # add histogram in "source" to histogram in "target" | |
230 | # it is assumed that the 2 histograms are precisely time-aligned | |
231 | ||
232 | def add_to_histo_from( target, source ): | |
233 | for b in range(0, len(source)): | |
234 | target[b] += source[b] | |
235 | ||
236 | # compute percentiles | |
237 | # inputs: | |
238 | # buckets: histogram bucket array | |
239 | # wanted: list of floating-pt percentiles to calculate | |
240 | # time_ranges: [tmin,tmax) time interval for each bucket | |
241 | # returns None if no I/O reported. | |
242 | # otherwise we would be dividing by zero | |
243 | # think of buckets as probability distribution function | |
244 | # and this loop is integrating to get cumulative distribution function | |
245 | ||
246 | def get_pctiles(buckets, wanted, time_ranges): | |
247 | ||
248 | # get total of IO requests done | |
249 | total_ios = 0 | |
250 | for io_count in buckets: | |
251 | total_ios += io_count | |
252 | ||
253 | # don't return percentiles if no I/O was done during interval | |
254 | if total_ios == 0.0: | |
255 | return None | |
256 | ||
257 | pctile_count = len(wanted) | |
258 | ||
259 | # results returned as dictionary keyed by percentile | |
260 | pctile_result = {} | |
261 | ||
262 | # index of next percentile in list | |
263 | pctile_index = 0 | |
264 | ||
265 | # next percentile | |
266 | next_pctile = wanted[pctile_index] | |
267 | ||
268 | # no one is interested in percentiles bigger than this but not 100.0 | |
269 | # this prevents floating-point error from preventing loop exit | |
270 | almost_100 = 99.9999 | |
271 | ||
272 | total_so_far = 0 | |
273 | for b, io_count in enumerate(buckets): | |
274 | total_so_far += io_count | |
275 | pct_lt = 100.0 * float(total_so_far) / total_ios | |
276 | # a single bucket could satisfy multiple pctiles | |
277 | # so this must be a while loop | |
278 | # consider both the 0-percentile (min latency) | |
279 | # and 100-percentile (max latency) case here | |
280 | while ((next_pctile == 100.0 and pct_lt >= almost_100) or | |
281 | (next_pctile < 100.0 and pct_lt > next_pctile)): | |
282 | # FIXME: interpolate between these fractions | |
283 | range_max_time = time_ranges[b][1] | |
284 | pctile_result[next_pctile] = range_max_time | |
285 | pctile_index += 1 | |
286 | if pctile_index == pctile_count: | |
287 | break | |
288 | next_pctile = wanted[pctile_index] | |
289 | if pctile_index == pctile_count: | |
290 | break | |
291 | assert pctile_index == pctile_count | |
292 | return pctile_result | |
293 | ||
294 | ||
295 | # parse parameters | |
296 | # returns a tuple of command line parameters | |
297 | # parameters have default values unless otherwise shown | |
298 | ||
299 | def parse_cli_params(): | |
300 | ||
301 | # default values for input parameters | |
302 | ||
303 | fio_version = 3 # we are using fio 3.x now | |
304 | bucket_groups = None # defaulting comes later | |
305 | bucket_bits = 6 # default in fio 3.x | |
306 | pctiles_wanted = [ 0, 50, 90, 95, 99, 100 ] | |
307 | time_quantum = 1 | |
308 | output_unit = 'usec' | |
309 | ||
310 | # parse command line parameters and display them | |
311 | ||
312 | argindex = 1 | |
313 | argct = len(sys.argv) | |
314 | if argct < 2: | |
315 | usage('must supply at least one histogram log file') | |
316 | while argindex < argct: | |
317 | if argct < argindex + 2: | |
318 | break | |
319 | pname = sys.argv[argindex] | |
320 | pval = sys.argv[argindex+1] | |
321 | if not pname.startswith('--'): | |
322 | break | |
323 | argindex += 2 | |
324 | pname = pname[2:] | |
325 | ||
326 | if pname == 'bucket-groups': | |
327 | bucket_groups = int(pval) | |
328 | elif pname == 'bucket-bits': | |
329 | bucket_bits = int(pval) | |
330 | elif pname == 'time-quantum': | |
331 | time_quantum = int(pval) | |
332 | elif pname == 'percentiles': | |
333 | pctiles_wanted = [ float(p) for p in pval.split(',') ] | |
334 | elif pname == 'output-unit': | |
335 | if pval == 'msec' or pval == 'usec': | |
336 | output_unit = pval | |
337 | else: | |
338 | usage('output-unit must be usec (microseconds) or msec (milliseconds)') | |
339 | elif pname == 'fio-version': | |
340 | if pval != '2' and pval != '3': | |
341 | usage('invalid fio version, must be 2 or 3') | |
342 | fio_version = int(pval) | |
343 | else: | |
344 | usage('invalid parameter name --%s' % pname) | |
345 | ||
346 | if not bucket_groups: | |
347 | # default changes based on fio version | |
348 | if fio_version == 2: | |
349 | bucket_groups = 19 | |
350 | else: | |
351 | # default in fio 3.x | |
352 | bucket_groups = 29 | |
353 | ||
354 | filename_list = sys.argv[argindex:] | |
355 | for f in filename_list: | |
356 | if not os.path.exists(f): | |
357 | usage('file %s does not exist' % f) | |
358 | return (bucket_groups, bucket_bits, fio_version, pctiles_wanted, | |
359 | filename_list, time_quantum, output_unit) | |
360 | ||
361 | ||
362 | # this is really the main program | |
363 | ||
364 | def compute_percentiles_from_logs(): | |
365 | (bucket_groups, bucket_bits, fio_version, pctiles_wanted, | |
366 | file_list, time_quantum, output_unit) = parse_cli_params() | |
367 | ||
368 | print('bucket groups = %d' % bucket_groups) | |
369 | print('bucket bits = %d' % bucket_bits) | |
370 | print('time quantum = %d sec' % time_quantum) | |
371 | print('percentiles = %s' % ','.join([ str(p) for p in pctiles_wanted ])) | |
372 | buckets_per_group = 1 << bucket_bits | |
373 | print('buckets per group = %d' % buckets_per_group) | |
374 | buckets_per_interval = buckets_per_group * bucket_groups | |
375 | print('buckets per interval = %d ' % buckets_per_interval) | |
376 | bucket_index_range = range(0, buckets_per_interval) | |
377 | if time_quantum == 0: | |
378 | usage('time-quantum must be a positive number of seconds') | |
379 | print('output unit = ' + output_unit) | |
380 | if output_unit == 'msec': | |
381 | time_divisor = 1000.0 | |
382 | elif output_unit == 'usec': | |
383 | time_divisor = 1.0 | |
384 | ||
385 | # calculate response time interval associated with each histogram bucket | |
386 | ||
387 | bucket_times = time_ranges(bucket_groups, buckets_per_group, fio_version=fio_version) | |
388 | ||
389 | # construct template for each histogram bucket array with buckets all zeroes | |
390 | # we just copy this for each new histogram | |
391 | ||
392 | zeroed_buckets = [ 0.0 for r in bucket_index_range ] | |
393 | ||
394 | # print CSV header just like fiologparser_hist does | |
395 | ||
396 | header = 'msec, ' | |
397 | for p in pctiles_wanted: | |
398 | header += '%3.1f, ' % p | |
399 | print('time (millisec), percentiles in increasing order with values in ' + output_unit) | |
400 | print(header) | |
401 | ||
402 | # parse the histogram logs | |
403 | # assumption: each bucket has a monotonically increasing time | |
404 | # assumption: time ranges do not overlap for a single thread's records | |
405 | # (exception: if randrw workload, then there is a read and a write | |
406 | # record for the same time interval) | |
407 | ||
408 | max_timestamp_all_logs = 0 | |
409 | hist_files = {} | |
410 | for fn in file_list: | |
411 | try: | |
412 | (hist_files[fn], max_timestamp_ms) = parse_hist_file(fn, buckets_per_interval) | |
413 | except FioHistoLogExc as e: | |
414 | usage(str(e)) | |
415 | max_timestamp_all_logs = max(max_timestamp_all_logs, max_timestamp_ms) | |
416 | ||
417 | (end_time, time_interval_count) = get_time_intervals(time_quantum, max_timestamp_all_logs) | |
418 | all_threads_histograms = [ ((j*time_quantum*msec_per_sec), deepcopy(zeroed_buckets)) | |
419 | for j in range(0, time_interval_count) ] | |
420 | ||
421 | for logfn in hist_files.keys(): | |
422 | aligned_per_thread = align_histo_log(hist_files[logfn], | |
423 | time_quantum, | |
424 | buckets_per_interval, | |
425 | max_timestamp_all_logs) | |
426 | for t in range(0, time_interval_count): | |
427 | (_, all_threads_histo_t) = all_threads_histograms[t] | |
428 | (_, log_histo_t) = aligned_per_thread[t] | |
429 | pct = get_pctiles(log_histo_t, pctiles_wanted, bucket_times) | |
430 | add_to_histo_from( all_threads_histo_t, log_histo_t ) | |
431 | ||
432 | print('percentiles for entire set of threads') | |
433 | for (t_msec, all_threads_histo_t) in all_threads_histograms: | |
434 | record = '%d, ' % t_msec | |
435 | pct = get_pctiles(all_threads_histo_t, pctiles_wanted, bucket_times) | |
436 | if not pct: | |
437 | for w in pctiles_wanted: | |
438 | record += ', ' | |
439 | else: | |
440 | pct_keys = [ k for k in pct.keys() ] | |
441 | pct_values = [ str(pct[wanted]/time_divisor) for wanted in sorted(pct_keys) ] | |
442 | record += ', '.join(pct_values) | |
443 | print(record) | |
444 | ||
445 | ||
446 | ||
447 | #end of MAIN PROGRAM | |
448 | ||
449 | ||
450 | ||
451 | ##### below are unit tests ############## | |
452 | ||
453 | import tempfile, shutil | |
454 | from os.path import join | |
455 | should_not_get_here = False | |
456 | ||
457 | class Test(unittest2.TestCase): | |
458 | tempdir = None | |
459 | ||
460 | # a little less typing please | |
461 | def A(self, boolean_val): | |
462 | self.assertTrue(boolean_val) | |
463 | ||
464 | # initialize unit test environment | |
465 | ||
466 | @classmethod | |
467 | def setUpClass(cls): | |
468 | d = tempfile.mkdtemp() | |
469 | Test.tempdir = d | |
470 | ||
471 | # remove anything left by unit test environment | |
472 | # unless user sets UNITTEST_LEAVE_FILES environment variable | |
473 | ||
474 | @classmethod | |
475 | def tearDownClass(cls): | |
476 | if not os.getenv("UNITTEST_LEAVE_FILES"): | |
477 | shutil.rmtree(cls.tempdir) | |
478 | ||
479 | def setUp(self): | |
480 | self.fn = join(Test.tempdir, self.id()) | |
481 | ||
482 | def test_a_add_histos(self): | |
483 | a = [ 1.0, 2.0 ] | |
484 | b = [ 1.5, 2.5 ] | |
485 | add_to_histo_from( a, b ) | |
486 | self.A(a == [2.5, 4.5]) | |
487 | self.A(b == [1.5, 2.5]) | |
488 | ||
489 | def test_b1_parse_log(self): | |
490 | with open(self.fn, 'w') as f: | |
491 | f.write('1234, 0, 4096, 1, 2, 3, 4\n') | |
492 | f.write('5678,1,16384,5,6,7,8 \n') | |
493 | (raw_histo_log, max_timestamp) = parse_hist_file(self.fn, 4) # 4 buckets per interval | |
494 | self.A(len(raw_histo_log) == 2 and max_timestamp == 5678) | |
495 | (time_ms, direction, bsz, histo) = raw_histo_log[0] | |
496 | self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ]) | |
497 | (time_ms, direction, bsz, histo) = raw_histo_log[1] | |
498 | self.A(time_ms == 5678 and direction == 1 and bsz == 16384 and histo == [ 5, 6, 7, 8 ]) | |
499 | ||
500 | def test_b2_parse_empty_log(self): | |
501 | with open(self.fn, 'w') as f: | |
502 | pass | |
503 | try: | |
504 | (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) | |
505 | self.A(should_not_get_here) | |
506 | except FioHistoLogExc as e: | |
507 | self.A(str(e).startswith('no records')) | |
508 | ||
509 | def test_b3_parse_empty_records(self): | |
510 | with open(self.fn, 'w') as f: | |
511 | f.write('\n') | |
512 | f.write('1234, 0, 4096, 1, 2, 3, 4\n') | |
513 | f.write('5678,1,16384,5,6,7,8 \n') | |
514 | f.write('\n') | |
515 | (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) | |
516 | self.A(len(raw_histo_log) == 2 and max_timestamp_ms == 5678) | |
517 | (time_ms, direction, bsz, histo) = raw_histo_log[0] | |
518 | self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ]) | |
519 | (time_ms, direction, bsz, histo) = raw_histo_log[1] | |
520 | self.A(time_ms == 5678 and direction == 1 and bsz == 16384 and histo == [ 5, 6, 7, 8 ]) | |
521 | ||
522 | def test_b4_parse_non_int(self): | |
523 | with open(self.fn, 'w') as f: | |
524 | f.write('12, 0, 4096, 1a, 2, 3, 4\n') | |
525 | try: | |
526 | (raw_histo_log, _) = parse_hist_file(self.fn, 4) | |
527 | self.A(False) | |
528 | except FioHistoLogExc as e: | |
529 | self.A(str(e).startswith('non-integer')) | |
530 | ||
531 | def test_b5_parse_neg_int(self): | |
532 | with open(self.fn, 'w') as f: | |
533 | f.write('-12, 0, 4096, 1, 2, 3, 4\n') | |
534 | try: | |
535 | (raw_histo_log, _) = parse_hist_file(self.fn, 4) | |
536 | self.A(False) | |
537 | except FioHistoLogExc as e: | |
538 | self.A(str(e).startswith('negative integer')) | |
539 | ||
540 | def test_b6_parse_too_few_int(self): | |
541 | with open(self.fn, 'w') as f: | |
542 | f.write('0, 0\n') | |
543 | try: | |
544 | (raw_histo_log, _) = parse_hist_file(self.fn, 4) | |
545 | self.A(False) | |
546 | except FioHistoLogExc as e: | |
547 | self.A(str(e).startswith('too few numbers')) | |
548 | ||
549 | def test_b7_parse_invalid_direction(self): | |
550 | with open(self.fn, 'w') as f: | |
551 | f.write('100, 2, 4096, 1, 2, 3, 4\n') | |
552 | try: | |
553 | (raw_histo_log, _) = parse_hist_file(self.fn, 4) | |
554 | self.A(False) | |
555 | except FioHistoLogExc as e: | |
556 | self.A(str(e).startswith('invalid I/O direction')) | |
557 | ||
558 | def test_b8_parse_bsz_too_big(self): | |
559 | with open(self.fn+'_good', 'w') as f: | |
560 | f.write('100, 1, %d, 1, 2, 3, 4\n' % (1<<24)) | |
561 | (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn+'_good', 4) | |
562 | with open(self.fn+'_bad', 'w') as f: | |
563 | f.write('100, 1, 20000000, 1, 2, 3, 4\n') | |
564 | try: | |
565 | (raw_histo_log, _) = parse_hist_file(self.fn+'_bad', 4) | |
566 | self.A(False) | |
567 | except FioHistoLogExc as e: | |
568 | self.A(str(e).startswith('block size too large')) | |
569 | ||
570 | def test_b9_parse_wrong_bucket_count(self): | |
571 | with open(self.fn, 'w') as f: | |
572 | f.write('100, 1, %d, 1, 2, 3, 4, 5\n' % (1<<24)) | |
573 | try: | |
574 | (raw_histo_log, _) = parse_hist_file(self.fn, 4) | |
575 | self.A(False) | |
576 | except FioHistoLogExc as e: | |
577 | self.A(str(e).__contains__('buckets per interval')) | |
578 | ||
579 | def test_c1_time_ranges(self): | |
580 | ranges = time_ranges(3, 2) # fio_version defaults to 3 | |
581 | expected_ranges = [ # fio_version 3 is in nanoseconds | |
582 | [0.000, 0.001], [0.001, 0.002], # first group | |
583 | [0.002, 0.003], [0.003, 0.004], # second group same width | |
584 | [0.004, 0.006], [0.006, 0.008]] # subsequent groups double width | |
585 | self.A(ranges == expected_ranges) | |
586 | ranges = time_ranges(3, 2, fio_version=3) | |
587 | self.A(ranges == expected_ranges) | |
588 | ranges = time_ranges(3, 2, fio_version=2) | |
589 | expected_ranges_v2 = [ [ 1000.0 * min_or_max for min_or_max in time_range ] | |
590 | for time_range in expected_ranges ] | |
591 | self.A(ranges == expected_ranges_v2) | |
592 | # see fio V3 stat.h for why 29 groups and 2^6 buckets/group | |
593 | normal_ranges_v3 = time_ranges(29, 64) | |
594 | # for v3, bucket time intervals are measured in nanoseconds | |
595 | self.A(len(normal_ranges_v3) == 29 * 64 and normal_ranges_v3[-1][1] == 64*(1<<(29-1))/1000.0) | |
596 | normal_ranges_v2 = time_ranges(19, 64, fio_version=2) | |
597 | # for v2, bucket time intervals are measured in microseconds so we have fewer buckets | |
598 | self.A(len(normal_ranges_v2) == 19 * 64 and normal_ranges_v2[-1][1] == 64*(1<<(19-1))) | |
599 | ||
600 | def test_d1_align_histo_log_1_quantum(self): | |
601 | with open(self.fn, 'w') as f: | |
602 | f.write('100, 1, 4096, 1, 2, 3, 4') | |
603 | (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) | |
604 | self.A(max_timestamp_ms == 100) | |
605 | aligned_log = align_histo_log(raw_histo_log, 5, 4, max_timestamp_ms) | |
606 | self.A(len(aligned_log) == 1) | |
607 | (time_ms0, h) = aligned_log[0] | |
608 | self.A(time_ms0 == 0 and h == [1.0, 2.0, 3.0, 4.0]) | |
609 | ||
610 | # we need this to compare 2 lists of floating point numbers for equality | |
611 | # because of floating-point imprecision | |
612 | ||
613 | def compare_2_floats(self, x, y): | |
614 | if x == 0.0 or y == 0.0: | |
615 | return (x+y) < 0.0000001 | |
616 | else: | |
617 | return (math.fabs(x-y)/x) < 0.00001 | |
618 | ||
619 | def is_close(self, buckets, buckets_expected): | |
620 | if len(buckets) != len(buckets_expected): | |
621 | return False | |
622 | compare_buckets = lambda k: self.compare_2_floats(buckets[k], buckets_expected[k]) | |
623 | indices_close = list(filter(compare_buckets, range(0, len(buckets)))) | |
624 | return len(indices_close) == len(buckets) | |
625 | ||
626 | def test_d2_align_histo_log_2_quantum(self): | |
627 | with open(self.fn, 'w') as f: | |
628 | f.write('2000, 1, 4096, 1, 2, 3, 4\n') | |
629 | f.write('7000, 1, 4096, 1, 2, 3, 4\n') | |
630 | (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) | |
631 | self.A(max_timestamp_ms == 7000) | |
632 | (_, _, _, raw_buckets1) = raw_histo_log[0] | |
633 | (_, _, _, raw_buckets2) = raw_histo_log[1] | |
634 | aligned_log = align_histo_log(raw_histo_log, 5, 4, max_timestamp_ms) | |
635 | self.A(len(aligned_log) == 2) | |
636 | (time_ms1, h1) = aligned_log[0] | |
637 | (time_ms2, h2) = aligned_log[1] | |
638 | # because first record is from time interval [2000, 7000] | |
639 | # we weight it according | |
640 | expect1 = [float(b) * 0.6 for b in raw_buckets1] | |
641 | expect2 = [float(b) * 0.4 for b in raw_buckets1] | |
642 | for e in range(0, len(expect2)): | |
643 | expect2[e] += raw_buckets2[e] | |
644 | self.A(time_ms1 == 0 and self.is_close(h1, expect1)) | |
645 | self.A(time_ms2 == 5000 and self.is_close(h2, expect2)) | |
646 | ||
647 | def test_e1_get_pctiles(self): | |
648 | with open(self.fn, 'w') as f: | |
649 | buckets = [ 100 for j in range(0, 128) ] | |
650 | f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets])) | |
651 | (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 128) | |
652 | self.A(max_timestamp_ms == 9000) | |
653 | aligned_log = align_histo_log(raw_histo_log, 5, 128, max_timestamp_ms) | |
654 | time_intervals = time_ranges(4, 32) | |
655 | # since buckets are all equal, then median is halfway through time_intervals | |
656 | # and max latency interval is at end of time_intervals | |
657 | self.A(time_intervals[64][1] == 0.066 and time_intervals[127][1] == 0.256) | |
658 | pctiles_wanted = [ 0, 50, 100 ] | |
659 | pct_vs_time = [] | |
660 | for (time_ms, histo) in aligned_log: | |
661 | pct_vs_time.append(get_pctiles(histo, pctiles_wanted, time_intervals)) | |
662 | self.A(pct_vs_time[0] == None) # no I/O in this time interval | |
663 | expected_pctiles = { 0:0.001, 50:0.066, 100:0.256 } | |
664 | self.A(pct_vs_time[1] == expected_pctiles) | |
665 | ||
666 | # we are using this module as a standalone program | |
667 | ||
668 | if __name__ == '__main__': | |
669 | if os.getenv('UNITTEST'): | |
670 | sys.exit(unittest2.main()) | |
671 | else: | |
672 | compute_percentiles_from_logs() | |
673 |