Merge branch 'histo-log-dup-timestamp' of https://github.com/parallel-fs-utils/fio
[fio.git] / tools / hist / fio-histo-log-pctiles.py
CommitLineData
d8296fdd
BE
1#!/usr/bin/env python
2
3# module to parse fio histogram log files, not using pandas
4# runs in python v2 or v3
5# to get help with the CLI: $ python fio-histo-log-pctiles.py -h
6# this can be run standalone as a script but is callable
7# assumes all threads run for same time duration
8# assumes all threads are doing the same thing for the entire run
9
10# percentiles:
11# 0 - min latency
12# 50 - median
13# 100 - max latency
14
15# TO-DO:
16# separate read and write stats for randrw mixed workload
17# report average latency if needed
18# prove that it works (partially done with unit tests)
19
20# to run unit tests, set UNITTEST environment variable to anything
21# if you do this, don't pass normal CLI parameters to it
22# otherwise it runs the CLI
23
24import sys, os, math, copy
25from copy import deepcopy
c670cb44 26import argparse
088a092e
BE
27
28unittest2_imported = True
29try:
30 import unittest2
31except ImportError:
32 unittest2_imported = False
d8296fdd
BE
33
34msec_per_sec = 1000
35nsec_per_usec = 1000
36
37class FioHistoLogExc(Exception):
38 pass
39
c670cb44 40# if there is an error, print message, and exit with error status
d8296fdd 41
c670cb44 42def myabort(msg):
d8296fdd 43 print('ERROR: ' + msg)
d8296fdd
BE
44 sys.exit(1)
45
d8296fdd
BE
46# convert histogram log file into a list of
47# (time_ms, direction, bsz, buckets) tuples where
48# - time_ms is the time in msec at which the log record was written
49# - direction is 0 (read) or 1 (write)
50# - bsz is block size (not used)
51# - buckets is a CSV list of counters that make up the histogram
52# caller decides if the expected number of counters are present
53
54
55def exception_suffix( record_num, pathname ):
56 return 'in histogram record %d file %s' % (record_num+1, pathname)
57
58# log file parser raises FioHistoLogExc exceptions
59# it returns histogram buckets in whatever unit fio uses
60
61def parse_hist_file(logfn, buckets_per_interval):
62 max_timestamp_ms = 0.0
63
64 with open(logfn, 'r') as f:
65 records = [ l.strip() for l in f.readlines() ]
66 intervals = []
fbcf1e71
BE
67 last_time_ms = -1
68 last_direction = -1
d8296fdd
BE
69 for k, r in enumerate(records):
70 if r == '':
71 continue
72 tokens = r.split(',')
73 try:
74 int_tokens = [ int(t) for t in tokens ]
75 except ValueError as e:
76 raise FioHistoLogExc('non-integer value %s' % exception_suffix(k+1, logfn))
77
78 neg_ints = list(filter( lambda tk : tk < 0, int_tokens ))
79 if len(neg_ints) > 0:
80 raise FioHistoLogExc('negative integer value %s' % exception_suffix(k+1, logfn))
81
82 if len(int_tokens) < 3:
83 raise FioHistoLogExc('too few numbers %s' % exception_suffix(k+1, logfn))
84
85 time_ms = int_tokens[0]
86 if time_ms > max_timestamp_ms:
87 max_timestamp_ms = time_ms
88
89 direction = int_tokens[1]
90 if direction != 0 and direction != 1:
91 raise FioHistoLogExc('invalid I/O direction %s' % exception_suffix(k+1, logfn))
92
93 bsz = int_tokens[2]
94 if bsz > (1 << 24):
95 raise FioHistoLogExc('block size too large %s' % exception_suffix(k+1, logfn))
96
97 buckets = int_tokens[3:]
98 if len(buckets) != buckets_per_interval:
99 raise FioHistoLogExc('%d buckets per interval but %d expected in %s' %
100 (len(buckets), buckets_per_interval, exception_suffix(k+1, logfn)))
fbcf1e71
BE
101
102 # hack to filter out records with the same timestamp
103 # we should not have to do this if fio logs histogram records correctly
104
105 if time_ms == last_time_ms and direction == last_direction:
106 continue
107 last_time_ms = time_ms
108 last_direction = direction
109
d8296fdd
BE
110 intervals.append((time_ms, direction, bsz, buckets))
111 if len(intervals) == 0:
112 raise FioHistoLogExc('no records in %s' % logfn)
113 return (intervals, max_timestamp_ms)
114
115
116# compute time range for each bucket index in histogram record
117# see comments in https://github.com/axboe/fio/blob/master/stat.h
118# for description of bucket groups and buckets
119# fio v3 bucket ranges are in nanosec (since response times are measured in nanosec)
120# but we convert fio v3 nanosecs to floating-point microseconds
121
122def time_ranges(groups, counters_per_group, fio_version=3):
123 bucket_width = 1
124 bucket_base = 0
125 bucket_intervals = []
126 for g in range(0, groups):
127 for b in range(0, counters_per_group):
128 rmin = float(bucket_base)
129 rmax = rmin + bucket_width
130 if fio_version == 3:
131 rmin /= nsec_per_usec
132 rmax /= nsec_per_usec
133 bucket_intervals.append( [rmin, rmax] )
134 bucket_base += bucket_width
135 if g != 0:
136 bucket_width *= 2
137 return bucket_intervals
138
139
140# compute number of time quantum intervals in the test
141
142def get_time_intervals(time_quantum, max_timestamp_ms):
143 # round down to nearest second
144 max_timestamp = max_timestamp_ms // msec_per_sec
145 # round up to nearest whole multiple of time_quantum
146 time_interval_count = (max_timestamp + time_quantum) // time_quantum
147 end_time = time_interval_count * time_quantum
148 return (end_time, time_interval_count)
149
150# align raw histogram log data to time quantum so
151# we can then combine histograms from different threads with addition
152# for randrw workload we count both reads and writes in same output bucket
153# but we separate reads and writes for purposes of calculating
154# end time for histogram record.
155# this requires us to weight a raw histogram bucket by the
156# fraction of time quantum that the bucket overlaps the current
157# time quantum interval
158# for example, if we have a bucket with 515 samples for time interval
159# [ 1010, 2014 ] msec since start of test, and time quantum is 1 sec, then
160# for time quantum interval [ 1000, 2000 ] msec, the overlap is
161# (2000 - 1010) / (2000 - 1000) = 0.99
162# so the contribution of this bucket to this time quantum is
163# 515 x 0.99 = 509.85
164
165def align_histo_log(raw_histogram_log, time_quantum, bucket_count, max_timestamp_ms):
166
167 # slice up test time int intervals of time_quantum seconds
168
169 (end_time, time_interval_count) = get_time_intervals(time_quantum, max_timestamp_ms)
170 time_qtm_ms = time_quantum * msec_per_sec
171 end_time_ms = end_time * msec_per_sec
172 aligned_intervals = []
173 for j in range(0, time_interval_count):
174 aligned_intervals.append((
175 j * time_qtm_ms,
176 [ 0.0 for j in range(0, bucket_count) ] ))
177
178 log_record_count = len(raw_histogram_log)
179 for k, record in enumerate(raw_histogram_log):
180
181 # find next record with same direction to get end-time
182 # have to avoid going past end of array
183 # for fio randrw workload,
184 # we have read and write records on same time interval
185 # sometimes read and write records are in opposite order
186 # assertion checks that next read/write record
187 # can be separated by at most 2 other records
188
189 (time_msec, direction, sz, interval_buckets) = record
190 if k+1 < log_record_count:
191 (time_msec_end, direction2, _, _) = raw_histogram_log[k+1]
192 if direction2 != direction:
193 if k+2 < log_record_count:
194 (time_msec_end, direction2, _, _) = raw_histogram_log[k+2]
195 if direction2 != direction:
196 if k+3 < log_record_count:
197 (time_msec_end, direction2, _, _) = raw_histogram_log[k+3]
198 assert direction2 == direction
199 else:
200 time_msec_end = end_time_ms
201 else:
202 time_msec_end = end_time_ms
203 else:
204 time_msec_end = end_time_ms
205
206 # calculate first quantum that overlaps this histogram record
207
208 qtm_start_ms = (time_msec // time_qtm_ms) * time_qtm_ms
209 qtm_end_ms = ((time_msec + time_qtm_ms) // time_qtm_ms) * time_qtm_ms
210 qtm_index = qtm_start_ms // time_qtm_ms
211
212 # for each quantum that overlaps this histogram record's time interval
213
214 while qtm_start_ms < time_msec_end: # while quantum overlaps record
215
216 # calculate fraction of time that this quantum
217 # overlaps histogram record's time interval
218
219 overlap_start = max(qtm_start_ms, time_msec)
220 overlap_end = min(qtm_end_ms, time_msec_end)
221 weight = float(overlap_end - overlap_start)
222 weight /= (time_msec_end - time_msec)
223 (_,aligned_histogram) = aligned_intervals[qtm_index]
224 for bx, b in enumerate(interval_buckets):
225 weighted_bucket = weight * b
226 aligned_histogram[bx] += weighted_bucket
227
228 # advance to the next time quantum
229
230 qtm_start_ms += time_qtm_ms
231 qtm_end_ms += time_qtm_ms
232 qtm_index += 1
233
234 return aligned_intervals
235
236# add histogram in "source" to histogram in "target"
237# it is assumed that the 2 histograms are precisely time-aligned
238
239def add_to_histo_from( target, source ):
240 for b in range(0, len(source)):
241 target[b] += source[b]
242
243# compute percentiles
244# inputs:
245# buckets: histogram bucket array
246# wanted: list of floating-pt percentiles to calculate
247# time_ranges: [tmin,tmax) time interval for each bucket
248# returns None if no I/O reported.
249# otherwise we would be dividing by zero
250# think of buckets as probability distribution function
251# and this loop is integrating to get cumulative distribution function
252
253def get_pctiles(buckets, wanted, time_ranges):
254
255 # get total of IO requests done
256 total_ios = 0
257 for io_count in buckets:
258 total_ios += io_count
259
260 # don't return percentiles if no I/O was done during interval
261 if total_ios == 0.0:
262 return None
263
264 pctile_count = len(wanted)
265
266 # results returned as dictionary keyed by percentile
267 pctile_result = {}
268
269 # index of next percentile in list
270 pctile_index = 0
271
272 # next percentile
273 next_pctile = wanted[pctile_index]
274
275 # no one is interested in percentiles bigger than this but not 100.0
276 # this prevents floating-point error from preventing loop exit
277 almost_100 = 99.9999
278
0456267b
BE
279 # pct is the percentile corresponding to
280 # all I/O requests up through bucket b
281 pct = 0.0
d8296fdd
BE
282 total_so_far = 0
283 for b, io_count in enumerate(buckets):
0456267b
BE
284 if io_count == 0:
285 continue
d8296fdd 286 total_so_far += io_count
0456267b
BE
287 # last_pct_lt is the percentile corresponding to
288 # all I/O requests up to, but not including, bucket b
289 last_pct = pct
290 pct = 100.0 * float(total_so_far) / total_ios
d8296fdd
BE
291 # a single bucket could satisfy multiple pctiles
292 # so this must be a while loop
0456267b
BE
293 # for 100-percentile (max latency) case, no bucket exceeds it
294 # so we must stop there.
295 while ((next_pctile == 100.0 and pct >= almost_100) or
296 (next_pctile < 100.0 and pct > next_pctile)):
297 # interpolate between min and max time for bucket time interval
298 # we keep the time_ranges access inside this loop,
299 # even though it could be above the loop,
300 # because in many cases we will not be even entering
301 # the loop so we optimize out these accesses
d8296fdd 302 range_max_time = time_ranges[b][1]
0456267b
BE
303 range_min_time = time_ranges[b][0]
304 offset_frac = (next_pctile - last_pct)/(pct - last_pct)
305 interpolation = range_min_time + (offset_frac*(range_max_time - range_min_time))
306 pctile_result[next_pctile] = interpolation
d8296fdd
BE
307 pctile_index += 1
308 if pctile_index == pctile_count:
309 break
310 next_pctile = wanted[pctile_index]
311 if pctile_index == pctile_count:
312 break
313 assert pctile_index == pctile_count
314 return pctile_result
315
316
c670cb44 317# this is really the main program
d8296fdd 318
c670cb44
BE
319def compute_percentiles_from_logs():
320 parser = argparse.ArgumentParser()
321 parser.add_argument("--fio-version", dest="fio_version",
322 default="3", choices=[2,3], type=int,
323 help="fio version (default=3)")
324 parser.add_argument("--bucket-groups", dest="bucket_groups", default="29", type=int,
325 help="fio histogram bucket groups (default=29)")
326 parser.add_argument("--bucket-bits", dest="bucket_bits",
327 default="6", type=int,
328 help="fio histogram buckets-per-group bits (default=6 means 64 buckets/group)")
329 parser.add_argument("--percentiles", dest="pctiles_wanted",
4b34a0eb 330 default=[ 0., 50., 95., 99., 100.], type=float, nargs='+',
c670cb44
BE
331 help="fio histogram buckets-per-group bits (default=6 means 64 buckets/group)")
332 parser.add_argument("--time-quantum", dest="time_quantum",
333 default="1", type=int,
334 help="time quantum in seconds (default=1)")
335 parser.add_argument("--output-unit", dest="output_unit",
336 default="usec", type=str,
337 help="Latency percentile output unit: msec|usec|nsec (default usec)")
4b34a0eb
BE
338 parser.add_argument("file_list", nargs='+',
339 help='list of files, preceded by " -- " if necessary')
c670cb44 340 args = parser.parse_args()
c670cb44 341
4b34a0eb
BE
342 # default changes based on fio version
343 if args.fio_version == 2:
344 args.bucket_groups = 19
d8296fdd 345
c670cb44 346 # print parameters
d8296fdd 347
4b34a0eb 348 print('fio version = %d' % args.fio_version)
c670cb44
BE
349 print('bucket groups = %d' % args.bucket_groups)
350 print('bucket bits = %d' % args.bucket_bits)
351 print('time quantum = %d sec' % args.time_quantum)
352 print('percentiles = %s' % ','.join([ str(p) for p in args.pctiles_wanted ]))
353 buckets_per_group = 1 << args.bucket_bits
d8296fdd 354 print('buckets per group = %d' % buckets_per_group)
c670cb44 355 buckets_per_interval = buckets_per_group * args.bucket_groups
d8296fdd
BE
356 print('buckets per interval = %d ' % buckets_per_interval)
357 bucket_index_range = range(0, buckets_per_interval)
c670cb44
BE
358 if args.time_quantum == 0:
359 print('ERROR: time-quantum must be a positive number of seconds')
360 print('output unit = ' + args.output_unit)
361 if args.output_unit == 'msec':
d8296fdd 362 time_divisor = 1000.0
c670cb44 363 elif args.output_unit == 'usec':
d8296fdd
BE
364 time_divisor = 1.0
365
366 # calculate response time interval associated with each histogram bucket
367
c670cb44 368 bucket_times = time_ranges(args.bucket_groups, buckets_per_group, fio_version=args.fio_version)
d8296fdd
BE
369
370 # construct template for each histogram bucket array with buckets all zeroes
371 # we just copy this for each new histogram
372
373 zeroed_buckets = [ 0.0 for r in bucket_index_range ]
374
375 # print CSV header just like fiologparser_hist does
376
377 header = 'msec, '
c670cb44 378 for p in args.pctiles_wanted:
d8296fdd 379 header += '%3.1f, ' % p
c670cb44 380 print('time (millisec), percentiles in increasing order with values in ' + args.output_unit)
d8296fdd
BE
381 print(header)
382
383 # parse the histogram logs
384 # assumption: each bucket has a monotonically increasing time
385 # assumption: time ranges do not overlap for a single thread's records
386 # (exception: if randrw workload, then there is a read and a write
387 # record for the same time interval)
388
389 max_timestamp_all_logs = 0
390 hist_files = {}
c670cb44 391 for fn in args.file_list:
d8296fdd
BE
392 try:
393 (hist_files[fn], max_timestamp_ms) = parse_hist_file(fn, buckets_per_interval)
394 except FioHistoLogExc as e:
c670cb44 395 myabort(str(e))
d8296fdd
BE
396 max_timestamp_all_logs = max(max_timestamp_all_logs, max_timestamp_ms)
397
c670cb44
BE
398 (end_time, time_interval_count) = get_time_intervals(args.time_quantum, max_timestamp_all_logs)
399 all_threads_histograms = [ ((j*args.time_quantum*msec_per_sec), deepcopy(zeroed_buckets))
d8296fdd
BE
400 for j in range(0, time_interval_count) ]
401
402 for logfn in hist_files.keys():
403 aligned_per_thread = align_histo_log(hist_files[logfn],
c670cb44 404 args.time_quantum,
d8296fdd
BE
405 buckets_per_interval,
406 max_timestamp_all_logs)
407 for t in range(0, time_interval_count):
408 (_, all_threads_histo_t) = all_threads_histograms[t]
409 (_, log_histo_t) = aligned_per_thread[t]
d8296fdd
BE
410 add_to_histo_from( all_threads_histo_t, log_histo_t )
411
c670cb44
BE
412 # calculate percentiles across aggregate histogram for all threads
413
d8296fdd
BE
414 for (t_msec, all_threads_histo_t) in all_threads_histograms:
415 record = '%d, ' % t_msec
c670cb44 416 pct = get_pctiles(all_threads_histo_t, args.pctiles_wanted, bucket_times)
d8296fdd 417 if not pct:
c670cb44 418 for w in args.pctiles_wanted:
d8296fdd
BE
419 record += ', '
420 else:
421 pct_keys = [ k for k in pct.keys() ]
422 pct_values = [ str(pct[wanted]/time_divisor) for wanted in sorted(pct_keys) ]
423 record += ', '.join(pct_values)
424 print(record)
425
426
427
428#end of MAIN PROGRAM
429
430
d8296fdd
BE
431##### below are unit tests ##############
432
088a092e
BE
433if unittest2_imported:
434 import tempfile, shutil
435 from os.path import join
436 should_not_get_here = False
d8296fdd 437
088a092e 438 class Test(unittest2.TestCase):
d8296fdd
BE
439 tempdir = None
440
441 # a little less typing please
442 def A(self, boolean_val):
443 self.assertTrue(boolean_val)
444
445 # initialize unit test environment
446
447 @classmethod
448 def setUpClass(cls):
449 d = tempfile.mkdtemp()
450 Test.tempdir = d
451
452 # remove anything left by unit test environment
453 # unless user sets UNITTEST_LEAVE_FILES environment variable
454
455 @classmethod
456 def tearDownClass(cls):
457 if not os.getenv("UNITTEST_LEAVE_FILES"):
458 shutil.rmtree(cls.tempdir)
459
460 def setUp(self):
461 self.fn = join(Test.tempdir, self.id())
462
463 def test_a_add_histos(self):
464 a = [ 1.0, 2.0 ]
465 b = [ 1.5, 2.5 ]
466 add_to_histo_from( a, b )
467 self.A(a == [2.5, 4.5])
468 self.A(b == [1.5, 2.5])
469
470 def test_b1_parse_log(self):
471 with open(self.fn, 'w') as f:
472 f.write('1234, 0, 4096, 1, 2, 3, 4\n')
473 f.write('5678,1,16384,5,6,7,8 \n')
474 (raw_histo_log, max_timestamp) = parse_hist_file(self.fn, 4) # 4 buckets per interval
475 self.A(len(raw_histo_log) == 2 and max_timestamp == 5678)
476 (time_ms, direction, bsz, histo) = raw_histo_log[0]
477 self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ])
478 (time_ms, direction, bsz, histo) = raw_histo_log[1]
479 self.A(time_ms == 5678 and direction == 1 and bsz == 16384 and histo == [ 5, 6, 7, 8 ])
480
481 def test_b2_parse_empty_log(self):
482 with open(self.fn, 'w') as f:
483 pass
484 try:
485 (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4)
486 self.A(should_not_get_here)
487 except FioHistoLogExc as e:
488 self.A(str(e).startswith('no records'))
489
490 def test_b3_parse_empty_records(self):
491 with open(self.fn, 'w') as f:
492 f.write('\n')
493 f.write('1234, 0, 4096, 1, 2, 3, 4\n')
494 f.write('5678,1,16384,5,6,7,8 \n')
495 f.write('\n')
496 (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4)
497 self.A(len(raw_histo_log) == 2 and max_timestamp_ms == 5678)
498 (time_ms, direction, bsz, histo) = raw_histo_log[0]
499 self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ])
500 (time_ms, direction, bsz, histo) = raw_histo_log[1]
501 self.A(time_ms == 5678 and direction == 1 and bsz == 16384 and histo == [ 5, 6, 7, 8 ])
502
503 def test_b4_parse_non_int(self):
504 with open(self.fn, 'w') as f:
505 f.write('12, 0, 4096, 1a, 2, 3, 4\n')
506 try:
507 (raw_histo_log, _) = parse_hist_file(self.fn, 4)
508 self.A(False)
509 except FioHistoLogExc as e:
510 self.A(str(e).startswith('non-integer'))
511
512 def test_b5_parse_neg_int(self):
513 with open(self.fn, 'w') as f:
514 f.write('-12, 0, 4096, 1, 2, 3, 4\n')
515 try:
516 (raw_histo_log, _) = parse_hist_file(self.fn, 4)
517 self.A(False)
518 except FioHistoLogExc as e:
519 self.A(str(e).startswith('negative integer'))
520
521 def test_b6_parse_too_few_int(self):
522 with open(self.fn, 'w') as f:
523 f.write('0, 0\n')
524 try:
525 (raw_histo_log, _) = parse_hist_file(self.fn, 4)
526 self.A(False)
527 except FioHistoLogExc as e:
528 self.A(str(e).startswith('too few numbers'))
529
530 def test_b7_parse_invalid_direction(self):
531 with open(self.fn, 'w') as f:
532 f.write('100, 2, 4096, 1, 2, 3, 4\n')
533 try:
534 (raw_histo_log, _) = parse_hist_file(self.fn, 4)
535 self.A(False)
536 except FioHistoLogExc as e:
537 self.A(str(e).startswith('invalid I/O direction'))
538
539 def test_b8_parse_bsz_too_big(self):
540 with open(self.fn+'_good', 'w') as f:
541 f.write('100, 1, %d, 1, 2, 3, 4\n' % (1<<24))
542 (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn+'_good', 4)
543 with open(self.fn+'_bad', 'w') as f:
544 f.write('100, 1, 20000000, 1, 2, 3, 4\n')
545 try:
546 (raw_histo_log, _) = parse_hist_file(self.fn+'_bad', 4)
547 self.A(False)
548 except FioHistoLogExc as e:
549 self.A(str(e).startswith('block size too large'))
550
551 def test_b9_parse_wrong_bucket_count(self):
552 with open(self.fn, 'w') as f:
553 f.write('100, 1, %d, 1, 2, 3, 4, 5\n' % (1<<24))
554 try:
555 (raw_histo_log, _) = parse_hist_file(self.fn, 4)
556 self.A(False)
557 except FioHistoLogExc as e:
558 self.A(str(e).__contains__('buckets per interval'))
559
560 def test_c1_time_ranges(self):
561 ranges = time_ranges(3, 2) # fio_version defaults to 3
562 expected_ranges = [ # fio_version 3 is in nanoseconds
563 [0.000, 0.001], [0.001, 0.002], # first group
564 [0.002, 0.003], [0.003, 0.004], # second group same width
565 [0.004, 0.006], [0.006, 0.008]] # subsequent groups double width
566 self.A(ranges == expected_ranges)
567 ranges = time_ranges(3, 2, fio_version=3)
568 self.A(ranges == expected_ranges)
569 ranges = time_ranges(3, 2, fio_version=2)
570 expected_ranges_v2 = [ [ 1000.0 * min_or_max for min_or_max in time_range ]
571 for time_range in expected_ranges ]
572 self.A(ranges == expected_ranges_v2)
573 # see fio V3 stat.h for why 29 groups and 2^6 buckets/group
574 normal_ranges_v3 = time_ranges(29, 64)
575 # for v3, bucket time intervals are measured in nanoseconds
576 self.A(len(normal_ranges_v3) == 29 * 64 and normal_ranges_v3[-1][1] == 64*(1<<(29-1))/1000.0)
577 normal_ranges_v2 = time_ranges(19, 64, fio_version=2)
578 # for v2, bucket time intervals are measured in microseconds so we have fewer buckets
579 self.A(len(normal_ranges_v2) == 19 * 64 and normal_ranges_v2[-1][1] == 64*(1<<(19-1)))
580
581 def test_d1_align_histo_log_1_quantum(self):
582 with open(self.fn, 'w') as f:
583 f.write('100, 1, 4096, 1, 2, 3, 4')
584 (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4)
585 self.A(max_timestamp_ms == 100)
586 aligned_log = align_histo_log(raw_histo_log, 5, 4, max_timestamp_ms)
587 self.A(len(aligned_log) == 1)
588 (time_ms0, h) = aligned_log[0]
589 self.A(time_ms0 == 0 and h == [1.0, 2.0, 3.0, 4.0])
590
591 # we need this to compare 2 lists of floating point numbers for equality
592 # because of floating-point imprecision
593
594 def compare_2_floats(self, x, y):
595 if x == 0.0 or y == 0.0:
596 return (x+y) < 0.0000001
597 else:
598 return (math.fabs(x-y)/x) < 0.00001
599
600 def is_close(self, buckets, buckets_expected):
601 if len(buckets) != len(buckets_expected):
602 return False
603 compare_buckets = lambda k: self.compare_2_floats(buckets[k], buckets_expected[k])
604 indices_close = list(filter(compare_buckets, range(0, len(buckets))))
605 return len(indices_close) == len(buckets)
606
607 def test_d2_align_histo_log_2_quantum(self):
608 with open(self.fn, 'w') as f:
609 f.write('2000, 1, 4096, 1, 2, 3, 4\n')
610 f.write('7000, 1, 4096, 1, 2, 3, 4\n')
611 (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4)
612 self.A(max_timestamp_ms == 7000)
613 (_, _, _, raw_buckets1) = raw_histo_log[0]
614 (_, _, _, raw_buckets2) = raw_histo_log[1]
615 aligned_log = align_histo_log(raw_histo_log, 5, 4, max_timestamp_ms)
616 self.A(len(aligned_log) == 2)
617 (time_ms1, h1) = aligned_log[0]
618 (time_ms2, h2) = aligned_log[1]
619 # because first record is from time interval [2000, 7000]
620 # we weight it according
621 expect1 = [float(b) * 0.6 for b in raw_buckets1]
622 expect2 = [float(b) * 0.4 for b in raw_buckets1]
623 for e in range(0, len(expect2)):
624 expect2[e] += raw_buckets2[e]
625 self.A(time_ms1 == 0 and self.is_close(h1, expect1))
626 self.A(time_ms2 == 5000 and self.is_close(h2, expect2))
627
0456267b
BE
628 # what to expect if histogram buckets are all equal
629 def test_e1_get_pctiles_flat_histo(self):
d8296fdd
BE
630 with open(self.fn, 'w') as f:
631 buckets = [ 100 for j in range(0, 128) ]
632 f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets]))
633 (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 128)
634 self.A(max_timestamp_ms == 9000)
635 aligned_log = align_histo_log(raw_histo_log, 5, 128, max_timestamp_ms)
636 time_intervals = time_ranges(4, 32)
637 # since buckets are all equal, then median is halfway through time_intervals
638 # and max latency interval is at end of time_intervals
639 self.A(time_intervals[64][1] == 0.066 and time_intervals[127][1] == 0.256)
640 pctiles_wanted = [ 0, 50, 100 ]
641 pct_vs_time = []
642 for (time_ms, histo) in aligned_log:
643 pct_vs_time.append(get_pctiles(histo, pctiles_wanted, time_intervals))
644 self.A(pct_vs_time[0] == None) # no I/O in this time interval
0456267b 645 expected_pctiles = { 0:0.000, 50:0.064, 100:0.256 }
d8296fdd
BE
646 self.A(pct_vs_time[1] == expected_pctiles)
647
0456267b
BE
648 # what to expect if just the highest histogram bucket is used
649 def test_e2_get_pctiles_highest_pct(self):
650 fio_v3_bucket_count = 29 * 64
651 with open(self.fn, 'w') as f:
652 # make a empty fio v3 histogram
653 buckets = [ 0 for j in range(0, fio_v3_bucket_count) ]
654 # add one I/O request to last bucket
655 buckets[-1] = 1
656 f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets]))
657 (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, fio_v3_bucket_count)
658 self.A(max_timestamp_ms == 9000)
659 aligned_log = align_histo_log(raw_histo_log, 5, fio_v3_bucket_count, max_timestamp_ms)
660 (time_ms, histo) = aligned_log[1]
661 time_intervals = time_ranges(29, 64)
662 expected_pctiles = { 100.0:(64*(1<<28))/1000.0 }
663 pct = get_pctiles( histo, [ 100.0 ], time_intervals )
664 self.A(pct == expected_pctiles)
665
d8296fdd
BE
666# we are using this module as a standalone program
667
668if __name__ == '__main__':
669 if os.getenv('UNITTEST'):
088a092e
BE
670 if unittest2_imported:
671 sys.exit(unittest2.main())
672 else:
673 raise Exception('you must install unittest2 module to run unit test')
d8296fdd
BE
674 else:
675 compute_percentiles_from_logs()
676