hash: cleanups
[fio.git] / tools / hist / fio-histo-log-pctiles.py
CommitLineData
8629f5f5 1#!/usr/bin/env python3
d8296fdd
BE
2
3# module to parse fio histogram log files, not using pandas
4# runs in python v2 or v3
5# to get help with the CLI: $ python fio-histo-log-pctiles.py -h
6# this can be run standalone as a script but is callable
7# assumes all threads run for same time duration
8# assumes all threads are doing the same thing for the entire run
9
10# percentiles:
11# 0 - min latency
12# 50 - median
13# 100 - max latency
14
15# TO-DO:
16# separate read and write stats for randrw mixed workload
17# report average latency if needed
18# prove that it works (partially done with unit tests)
19
20# to run unit tests, set UNITTEST environment variable to anything
21# if you do this, don't pass normal CLI parameters to it
22# otherwise it runs the CLI
23
18c1783a 24import sys, os, math, copy, time
d8296fdd 25from copy import deepcopy
c670cb44 26import argparse
8629f5f5 27from functools import reduce
088a092e
BE
28
29unittest2_imported = True
30try:
31 import unittest2
32except ImportError:
33 unittest2_imported = False
d8296fdd
BE
34
35msec_per_sec = 1000
36nsec_per_usec = 1000
18c1783a
BE
37direction_read = 0
38direction_write = 1
d8296fdd
BE
39
40class FioHistoLogExc(Exception):
41 pass
42
c670cb44 43# if there is an error, print message, and exit with error status
d8296fdd 44
c670cb44 45def myabort(msg):
d8296fdd 46 print('ERROR: ' + msg)
d8296fdd
BE
47 sys.exit(1)
48
d8296fdd
BE
49# convert histogram log file into a list of
50# (time_ms, direction, bsz, buckets) tuples where
51# - time_ms is the time in msec at which the log record was written
52# - direction is 0 (read) or 1 (write)
53# - bsz is block size (not used)
54# - buckets is a CSV list of counters that make up the histogram
55# caller decides if the expected number of counters are present
56
57
58def exception_suffix( record_num, pathname ):
59 return 'in histogram record %d file %s' % (record_num+1, pathname)
60
61# log file parser raises FioHistoLogExc exceptions
62# it returns histogram buckets in whatever unit fio uses
18c1783a
BE
63# inputs:
64# logfn: pathname to histogram log file
65# buckets_per_interval - how many histogram buckets to expect
66# log_hist_msec - if not None, expected time interval between histogram records
67
68def parse_hist_file(logfn, buckets_per_interval, log_hist_msec):
69 previous_ts_ms_read = -1
70 previous_ts_ms_write = -1
71
d8296fdd
BE
72 with open(logfn, 'r') as f:
73 records = [ l.strip() for l in f.readlines() ]
74 intervals = []
fbcf1e71
BE
75 last_time_ms = -1
76 last_direction = -1
d8296fdd
BE
77 for k, r in enumerate(records):
78 if r == '':
79 continue
80 tokens = r.split(',')
81 try:
82 int_tokens = [ int(t) for t in tokens ]
83 except ValueError as e:
84 raise FioHistoLogExc('non-integer value %s' % exception_suffix(k+1, logfn))
85
8629f5f5 86 neg_ints = list([tk for tk in int_tokens if tk < 0])
d8296fdd
BE
87 if len(neg_ints) > 0:
88 raise FioHistoLogExc('negative integer value %s' % exception_suffix(k+1, logfn))
89
90 if len(int_tokens) < 3:
91 raise FioHistoLogExc('too few numbers %s' % exception_suffix(k+1, logfn))
92
d8296fdd 93 direction = int_tokens[1]
18c1783a 94 if direction != direction_read and direction != direction_write:
d8296fdd
BE
95 raise FioHistoLogExc('invalid I/O direction %s' % exception_suffix(k+1, logfn))
96
18c1783a
BE
97 time_ms = int_tokens[0]
98 if direction == direction_read:
99 if time_ms < previous_ts_ms_read:
100 raise FioHistoLogExc('read timestamp in column 1 decreased %s' % exception_suffix(k+1, logfn))
101 previous_ts_ms_read = time_ms
102 elif direction == direction_write:
103 if time_ms < previous_ts_ms_write:
104 raise FioHistoLogExc('write timestamp in column 1 decreased %s' % exception_suffix(k+1, logfn))
105 previous_ts_ms_write = time_ms
106
d8296fdd
BE
107 bsz = int_tokens[2]
108 if bsz > (1 << 24):
109 raise FioHistoLogExc('block size too large %s' % exception_suffix(k+1, logfn))
110
111 buckets = int_tokens[3:]
112 if len(buckets) != buckets_per_interval:
113 raise FioHistoLogExc('%d buckets per interval but %d expected in %s' %
114 (len(buckets), buckets_per_interval, exception_suffix(k+1, logfn)))
fbcf1e71
BE
115
116 # hack to filter out records with the same timestamp
117 # we should not have to do this if fio logs histogram records correctly
118
119 if time_ms == last_time_ms and direction == last_direction:
120 continue
121 last_time_ms = time_ms
122 last_direction = direction
123
d8296fdd
BE
124 intervals.append((time_ms, direction, bsz, buckets))
125 if len(intervals) == 0:
126 raise FioHistoLogExc('no records in %s' % logfn)
18c1783a
BE
127 (first_timestamp, _, _, _) = intervals[0]
128 if first_timestamp < 1000000:
129 start_time = 0 # assume log_unix_epoch = 0
130 elif log_hist_msec != None:
131 start_time = first_timestamp - log_hist_msec
132 elif len(intervals) > 1:
133 (second_timestamp, _, _, _) = intervals[1]
134 start_time = first_timestamp - (second_timestamp - first_timestamp)
bcbabf43
BE
135 else:
136 raise FioHistoLogExc('no way to estimate test start time')
18c1783a
BE
137 (end_timestamp, _, _, _) = intervals[-1]
138
139 return (intervals, start_time, end_timestamp)
d8296fdd
BE
140
141
142# compute time range for each bucket index in histogram record
143# see comments in https://github.com/axboe/fio/blob/master/stat.h
144# for description of bucket groups and buckets
145# fio v3 bucket ranges are in nanosec (since response times are measured in nanosec)
146# but we convert fio v3 nanosecs to floating-point microseconds
147
148def time_ranges(groups, counters_per_group, fio_version=3):
149 bucket_width = 1
150 bucket_base = 0
151 bucket_intervals = []
152 for g in range(0, groups):
153 for b in range(0, counters_per_group):
154 rmin = float(bucket_base)
155 rmax = rmin + bucket_width
156 if fio_version == 3:
157 rmin /= nsec_per_usec
158 rmax /= nsec_per_usec
159 bucket_intervals.append( [rmin, rmax] )
160 bucket_base += bucket_width
161 if g != 0:
162 bucket_width *= 2
163 return bucket_intervals
164
165
166# compute number of time quantum intervals in the test
167
18c1783a 168def get_time_intervals(time_quantum, min_timestamp_ms, max_timestamp_ms):
d8296fdd
BE
169 # round down to nearest second
170 max_timestamp = max_timestamp_ms // msec_per_sec
18c1783a 171 min_timestamp = min_timestamp_ms // msec_per_sec
d8296fdd 172 # round up to nearest whole multiple of time_quantum
18c1783a
BE
173 time_interval_count = ((max_timestamp - min_timestamp) + time_quantum) // time_quantum
174 end_time = min_timestamp + (time_interval_count * time_quantum)
d8296fdd
BE
175 return (end_time, time_interval_count)
176
177# align raw histogram log data to time quantum so
178# we can then combine histograms from different threads with addition
179# for randrw workload we count both reads and writes in same output bucket
180# but we separate reads and writes for purposes of calculating
181# end time for histogram record.
182# this requires us to weight a raw histogram bucket by the
183# fraction of time quantum that the bucket overlaps the current
184# time quantum interval
185# for example, if we have a bucket with 515 samples for time interval
186# [ 1010, 2014 ] msec since start of test, and time quantum is 1 sec, then
187# for time quantum interval [ 1000, 2000 ] msec, the overlap is
188# (2000 - 1010) / (2000 - 1000) = 0.99
189# so the contribution of this bucket to this time quantum is
190# 515 x 0.99 = 509.85
191
18c1783a 192def align_histo_log(raw_histogram_log, time_quantum, bucket_count, min_timestamp_ms, max_timestamp_ms):
d8296fdd
BE
193
194 # slice up test time int intervals of time_quantum seconds
195
18c1783a 196 (end_time, time_interval_count) = get_time_intervals(time_quantum, min_timestamp_ms, max_timestamp_ms)
d8296fdd
BE
197 time_qtm_ms = time_quantum * msec_per_sec
198 end_time_ms = end_time * msec_per_sec
199 aligned_intervals = []
200 for j in range(0, time_interval_count):
201 aligned_intervals.append((
18c1783a 202 min_timestamp_ms + (j * time_qtm_ms),
d8296fdd
BE
203 [ 0.0 for j in range(0, bucket_count) ] ))
204
205 log_record_count = len(raw_histogram_log)
206 for k, record in enumerate(raw_histogram_log):
207
208 # find next record with same direction to get end-time
209 # have to avoid going past end of array
210 # for fio randrw workload,
211 # we have read and write records on same time interval
212 # sometimes read and write records are in opposite order
213 # assertion checks that next read/write record
214 # can be separated by at most 2 other records
215
216 (time_msec, direction, sz, interval_buckets) = record
217 if k+1 < log_record_count:
218 (time_msec_end, direction2, _, _) = raw_histogram_log[k+1]
219 if direction2 != direction:
220 if k+2 < log_record_count:
221 (time_msec_end, direction2, _, _) = raw_histogram_log[k+2]
222 if direction2 != direction:
223 if k+3 < log_record_count:
224 (time_msec_end, direction2, _, _) = raw_histogram_log[k+3]
225 assert direction2 == direction
226 else:
227 time_msec_end = end_time_ms
228 else:
229 time_msec_end = end_time_ms
230 else:
231 time_msec_end = end_time_ms
232
233 # calculate first quantum that overlaps this histogram record
234
18c1783a
BE
235 offset_from_min_ts = time_msec - min_timestamp_ms
236 qtm_start_ms = min_timestamp_ms + (offset_from_min_ts // time_qtm_ms) * time_qtm_ms
237 qtm_end_ms = min_timestamp_ms + ((offset_from_min_ts + time_qtm_ms) // time_qtm_ms) * time_qtm_ms
238 qtm_index = offset_from_min_ts // time_qtm_ms
d8296fdd
BE
239
240 # for each quantum that overlaps this histogram record's time interval
241
242 while qtm_start_ms < time_msec_end: # while quantum overlaps record
243
18c1783a
BE
244 # some histogram logs may be longer than others
245
246 if len(aligned_intervals) <= qtm_index:
247 break
248
d8296fdd
BE
249 # calculate fraction of time that this quantum
250 # overlaps histogram record's time interval
251
252 overlap_start = max(qtm_start_ms, time_msec)
253 overlap_end = min(qtm_end_ms, time_msec_end)
254 weight = float(overlap_end - overlap_start)
255 weight /= (time_msec_end - time_msec)
256 (_,aligned_histogram) = aligned_intervals[qtm_index]
257 for bx, b in enumerate(interval_buckets):
258 weighted_bucket = weight * b
259 aligned_histogram[bx] += weighted_bucket
260
261 # advance to the next time quantum
262
263 qtm_start_ms += time_qtm_ms
264 qtm_end_ms += time_qtm_ms
265 qtm_index += 1
266
267 return aligned_intervals
268
269# add histogram in "source" to histogram in "target"
270# it is assumed that the 2 histograms are precisely time-aligned
271
272def add_to_histo_from( target, source ):
273 for b in range(0, len(source)):
274 target[b] += source[b]
275
a2cd1075
BE
276
277# calculate total samples in the histogram buckets
278
279def get_samples(buckets):
280 return reduce( lambda x,y: x + y, buckets)
281
282
d8296fdd
BE
283# compute percentiles
284# inputs:
285# buckets: histogram bucket array
286# wanted: list of floating-pt percentiles to calculate
287# time_ranges: [tmin,tmax) time interval for each bucket
288# returns None if no I/O reported.
289# otherwise we would be dividing by zero
290# think of buckets as probability distribution function
291# and this loop is integrating to get cumulative distribution function
292
293def get_pctiles(buckets, wanted, time_ranges):
294
295 # get total of IO requests done
296 total_ios = 0
297 for io_count in buckets:
298 total_ios += io_count
299
300 # don't return percentiles if no I/O was done during interval
301 if total_ios == 0.0:
302 return None
303
304 pctile_count = len(wanted)
305
306 # results returned as dictionary keyed by percentile
307 pctile_result = {}
308
309 # index of next percentile in list
310 pctile_index = 0
311
312 # next percentile
313 next_pctile = wanted[pctile_index]
314
315 # no one is interested in percentiles bigger than this but not 100.0
316 # this prevents floating-point error from preventing loop exit
317 almost_100 = 99.9999
318
0456267b
BE
319 # pct is the percentile corresponding to
320 # all I/O requests up through bucket b
321 pct = 0.0
d8296fdd
BE
322 total_so_far = 0
323 for b, io_count in enumerate(buckets):
0456267b
BE
324 if io_count == 0:
325 continue
d8296fdd 326 total_so_far += io_count
0456267b
BE
327 # last_pct_lt is the percentile corresponding to
328 # all I/O requests up to, but not including, bucket b
329 last_pct = pct
330 pct = 100.0 * float(total_so_far) / total_ios
d8296fdd
BE
331 # a single bucket could satisfy multiple pctiles
332 # so this must be a while loop
0456267b
BE
333 # for 100-percentile (max latency) case, no bucket exceeds it
334 # so we must stop there.
335 while ((next_pctile == 100.0 and pct >= almost_100) or
336 (next_pctile < 100.0 and pct > next_pctile)):
337 # interpolate between min and max time for bucket time interval
338 # we keep the time_ranges access inside this loop,
339 # even though it could be above the loop,
340 # because in many cases we will not be even entering
341 # the loop so we optimize out these accesses
d8296fdd 342 range_max_time = time_ranges[b][1]
0456267b
BE
343 range_min_time = time_ranges[b][0]
344 offset_frac = (next_pctile - last_pct)/(pct - last_pct)
345 interpolation = range_min_time + (offset_frac*(range_max_time - range_min_time))
346 pctile_result[next_pctile] = interpolation
d8296fdd
BE
347 pctile_index += 1
348 if pctile_index == pctile_count:
349 break
350 next_pctile = wanted[pctile_index]
351 if pctile_index == pctile_count:
352 break
353 assert pctile_index == pctile_count
354 return pctile_result
355
356
c670cb44 357# this is really the main program
d8296fdd 358
c670cb44
BE
359def compute_percentiles_from_logs():
360 parser = argparse.ArgumentParser()
361 parser.add_argument("--fio-version", dest="fio_version",
362 default="3", choices=[2,3], type=int,
363 help="fio version (default=3)")
364 parser.add_argument("--bucket-groups", dest="bucket_groups", default="29", type=int,
365 help="fio histogram bucket groups (default=29)")
366 parser.add_argument("--bucket-bits", dest="bucket_bits",
367 default="6", type=int,
368 help="fio histogram buckets-per-group bits (default=6 means 64 buckets/group)")
369 parser.add_argument("--percentiles", dest="pctiles_wanted",
4b34a0eb 370 default=[ 0., 50., 95., 99., 100.], type=float, nargs='+',
c670cb44
BE
371 help="fio histogram buckets-per-group bits (default=6 means 64 buckets/group)")
372 parser.add_argument("--time-quantum", dest="time_quantum",
373 default="1", type=int,
374 help="time quantum in seconds (default=1)")
18c1783a
BE
375 parser.add_argument("--log-hist-msec", dest="log_hist_msec",
376 type=int, default=None,
377 help="log_hist_msec value in fio job file")
c670cb44
BE
378 parser.add_argument("--output-unit", dest="output_unit",
379 default="usec", type=str,
380 help="Latency percentile output unit: msec|usec|nsec (default usec)")
4b34a0eb
BE
381 parser.add_argument("file_list", nargs='+',
382 help='list of files, preceded by " -- " if necessary')
c670cb44 383 args = parser.parse_args()
c670cb44 384
4b34a0eb
BE
385 # default changes based on fio version
386 if args.fio_version == 2:
387 args.bucket_groups = 19
d8296fdd 388
c670cb44 389 # print parameters
d8296fdd 390
4b34a0eb 391 print('fio version = %d' % args.fio_version)
c670cb44
BE
392 print('bucket groups = %d' % args.bucket_groups)
393 print('bucket bits = %d' % args.bucket_bits)
394 print('time quantum = %d sec' % args.time_quantum)
395 print('percentiles = %s' % ','.join([ str(p) for p in args.pctiles_wanted ]))
396 buckets_per_group = 1 << args.bucket_bits
d8296fdd 397 print('buckets per group = %d' % buckets_per_group)
c670cb44 398 buckets_per_interval = buckets_per_group * args.bucket_groups
d8296fdd
BE
399 print('buckets per interval = %d ' % buckets_per_interval)
400 bucket_index_range = range(0, buckets_per_interval)
18c1783a
BE
401 if args.log_hist_msec != None:
402 print('log_hist_msec = %d' % args.log_hist_msec)
c670cb44
BE
403 if args.time_quantum == 0:
404 print('ERROR: time-quantum must be a positive number of seconds')
405 print('output unit = ' + args.output_unit)
406 if args.output_unit == 'msec':
18c1783a 407 time_divisor = float(msec_per_sec)
c670cb44 408 elif args.output_unit == 'usec':
d8296fdd
BE
409 time_divisor = 1.0
410
d8296fdd
BE
411 # construct template for each histogram bucket array with buckets all zeroes
412 # we just copy this for each new histogram
413
414 zeroed_buckets = [ 0.0 for r in bucket_index_range ]
415
18c1783a 416 # calculate response time interval associated with each histogram bucket
d8296fdd 417
18c1783a 418 bucket_times = time_ranges(args.bucket_groups, buckets_per_group, fio_version=args.fio_version)
d8296fdd
BE
419
420 # parse the histogram logs
421 # assumption: each bucket has a monotonically increasing time
422 # assumption: time ranges do not overlap for a single thread's records
423 # (exception: if randrw workload, then there is a read and a write
424 # record for the same time interval)
425
18c1783a
BE
426 test_start_time = 0
427 test_end_time = 1.0e18
d8296fdd 428 hist_files = {}
c670cb44 429 for fn in args.file_list:
d8296fdd 430 try:
18c1783a 431 (hist_files[fn], log_start_time, log_end_time) = parse_hist_file(fn, buckets_per_interval, args.log_hist_msec)
d8296fdd 432 except FioHistoLogExc as e:
c670cb44 433 myabort(str(e))
18c1783a
BE
434 # we consider the test started when all threads have started logging
435 test_start_time = max(test_start_time, log_start_time)
436 # we consider the test over when one of the logs has ended
437 test_end_time = min(test_end_time, log_end_time)
438
439 if test_start_time >= test_end_time:
440 raise FioHistoLogExc('no time interval when all threads logs overlapped')
441 if test_start_time > 0:
442 print('all threads running as of unix epoch time %d = %s' % (
443 test_start_time/float(msec_per_sec),
444 time.ctime(test_start_time/1000.0)))
445
446 (end_time, time_interval_count) = get_time_intervals(args.time_quantum, test_start_time, test_end_time)
c670cb44 447 all_threads_histograms = [ ((j*args.time_quantum*msec_per_sec), deepcopy(zeroed_buckets))
18c1783a 448 for j in range(0, time_interval_count) ]
d8296fdd
BE
449
450 for logfn in hist_files.keys():
451 aligned_per_thread = align_histo_log(hist_files[logfn],
c670cb44 452 args.time_quantum,
d8296fdd 453 buckets_per_interval,
18c1783a
BE
454 test_start_time,
455 test_end_time)
d8296fdd
BE
456 for t in range(0, time_interval_count):
457 (_, all_threads_histo_t) = all_threads_histograms[t]
458 (_, log_histo_t) = aligned_per_thread[t]
d8296fdd
BE
459 add_to_histo_from( all_threads_histo_t, log_histo_t )
460
c670cb44 461 # calculate percentiles across aggregate histogram for all threads
18c1783a
BE
462 # print CSV header just like fiologparser_hist does
463
a2cd1075 464 header = 'msec-since-start, samples, '
18c1783a 465 for p in args.pctiles_wanted:
a2cd1075
BE
466 if p == 0.:
467 next_pctile_header = 'min'
468 elif p == 100.:
469 next_pctile_header = 'max'
470 elif p == 50.:
471 next_pctile_header = 'median'
472 else:
473 next_pctile_header = '%3.1f' % p
474 header += '%s, ' % next_pctile_header
475
18c1783a
BE
476 print('time (millisec), percentiles in increasing order with values in ' + args.output_unit)
477 print(header)
c670cb44 478
d8296fdd 479 for (t_msec, all_threads_histo_t) in all_threads_histograms:
a2cd1075
BE
480 samples = get_samples(all_threads_histo_t)
481 record = '%8d, %8d, ' % (t_msec, samples)
c670cb44 482 pct = get_pctiles(all_threads_histo_t, args.pctiles_wanted, bucket_times)
d8296fdd 483 if not pct:
c670cb44 484 for w in args.pctiles_wanted:
d8296fdd
BE
485 record += ', '
486 else:
487 pct_keys = [ k for k in pct.keys() ]
488 pct_values = [ str(pct[wanted]/time_divisor) for wanted in sorted(pct_keys) ]
489 record += ', '.join(pct_values)
490 print(record)
491
492
493
494#end of MAIN PROGRAM
495
496
d8296fdd
BE
497##### below are unit tests ##############
498
088a092e
BE
499if unittest2_imported:
500 import tempfile, shutil
501 from os.path import join
502 should_not_get_here = False
d8296fdd 503
088a092e 504 class Test(unittest2.TestCase):
d8296fdd
BE
505 tempdir = None
506
507 # a little less typing please
508 def A(self, boolean_val):
509 self.assertTrue(boolean_val)
510
511 # initialize unit test environment
512
513 @classmethod
514 def setUpClass(cls):
515 d = tempfile.mkdtemp()
516 Test.tempdir = d
517
518 # remove anything left by unit test environment
519 # unless user sets UNITTEST_LEAVE_FILES environment variable
520
521 @classmethod
522 def tearDownClass(cls):
523 if not os.getenv("UNITTEST_LEAVE_FILES"):
524 shutil.rmtree(cls.tempdir)
525
526 def setUp(self):
527 self.fn = join(Test.tempdir, self.id())
528
529 def test_a_add_histos(self):
530 a = [ 1.0, 2.0 ]
531 b = [ 1.5, 2.5 ]
532 add_to_histo_from( a, b )
533 self.A(a == [2.5, 4.5])
534 self.A(b == [1.5, 2.5])
535
536 def test_b1_parse_log(self):
537 with open(self.fn, 'w') as f:
538 f.write('1234, 0, 4096, 1, 2, 3, 4\n')
539 f.write('5678,1,16384,5,6,7,8 \n')
18c1783a
BE
540 (raw_histo_log, min_timestamp, max_timestamp) = parse_hist_file(self.fn, 4, None) # 4 buckets per interval
541 # if not log_unix_epoch=1, then min_timestamp will always be set to zero
542 self.A(len(raw_histo_log) == 2 and min_timestamp == 0 and max_timestamp == 5678)
d8296fdd
BE
543 (time_ms, direction, bsz, histo) = raw_histo_log[0]
544 self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ])
545 (time_ms, direction, bsz, histo) = raw_histo_log[1]
546 self.A(time_ms == 5678 and direction == 1 and bsz == 16384 and histo == [ 5, 6, 7, 8 ])
547
548 def test_b2_parse_empty_log(self):
549 with open(self.fn, 'w') as f:
550 pass
551 try:
18c1783a 552 (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None)
d8296fdd
BE
553 self.A(should_not_get_here)
554 except FioHistoLogExc as e:
555 self.A(str(e).startswith('no records'))
556
557 def test_b3_parse_empty_records(self):
558 with open(self.fn, 'w') as f:
559 f.write('\n')
560 f.write('1234, 0, 4096, 1, 2, 3, 4\n')
561 f.write('5678,1,16384,5,6,7,8 \n')
562 f.write('\n')
18c1783a 563 (raw_histo_log, _, max_timestamp_ms) = parse_hist_file(self.fn, 4, None)
d8296fdd
BE
564 self.A(len(raw_histo_log) == 2 and max_timestamp_ms == 5678)
565 (time_ms, direction, bsz, histo) = raw_histo_log[0]
566 self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ])
567 (time_ms, direction, bsz, histo) = raw_histo_log[1]
568 self.A(time_ms == 5678 and direction == 1 and bsz == 16384 and histo == [ 5, 6, 7, 8 ])
569
570 def test_b4_parse_non_int(self):
571 with open(self.fn, 'w') as f:
572 f.write('12, 0, 4096, 1a, 2, 3, 4\n')
573 try:
18c1783a 574 (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None)
d8296fdd
BE
575 self.A(False)
576 except FioHistoLogExc as e:
577 self.A(str(e).startswith('non-integer'))
578
579 def test_b5_parse_neg_int(self):
580 with open(self.fn, 'w') as f:
581 f.write('-12, 0, 4096, 1, 2, 3, 4\n')
582 try:
18c1783a 583 (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None)
d8296fdd
BE
584 self.A(False)
585 except FioHistoLogExc as e:
586 self.A(str(e).startswith('negative integer'))
587
588 def test_b6_parse_too_few_int(self):
589 with open(self.fn, 'w') as f:
590 f.write('0, 0\n')
591 try:
18c1783a 592 (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None)
d8296fdd
BE
593 self.A(False)
594 except FioHistoLogExc as e:
595 self.A(str(e).startswith('too few numbers'))
596
597 def test_b7_parse_invalid_direction(self):
598 with open(self.fn, 'w') as f:
599 f.write('100, 2, 4096, 1, 2, 3, 4\n')
600 try:
18c1783a 601 (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None)
d8296fdd
BE
602 self.A(False)
603 except FioHistoLogExc as e:
604 self.A(str(e).startswith('invalid I/O direction'))
605
606 def test_b8_parse_bsz_too_big(self):
607 with open(self.fn+'_good', 'w') as f:
608 f.write('100, 1, %d, 1, 2, 3, 4\n' % (1<<24))
18c1783a 609 (raw_histo_log, _, _) = parse_hist_file(self.fn+'_good', 4, None)
d8296fdd
BE
610 with open(self.fn+'_bad', 'w') as f:
611 f.write('100, 1, 20000000, 1, 2, 3, 4\n')
612 try:
18c1783a 613 (raw_histo_log, _, _) = parse_hist_file(self.fn+'_bad', 4, None)
d8296fdd
BE
614 self.A(False)
615 except FioHistoLogExc as e:
616 self.A(str(e).startswith('block size too large'))
617
618 def test_b9_parse_wrong_bucket_count(self):
619 with open(self.fn, 'w') as f:
620 f.write('100, 1, %d, 1, 2, 3, 4, 5\n' % (1<<24))
621 try:
18c1783a 622 (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None)
d8296fdd
BE
623 self.A(False)
624 except FioHistoLogExc as e:
625 self.A(str(e).__contains__('buckets per interval'))
626
627 def test_c1_time_ranges(self):
628 ranges = time_ranges(3, 2) # fio_version defaults to 3
629 expected_ranges = [ # fio_version 3 is in nanoseconds
630 [0.000, 0.001], [0.001, 0.002], # first group
631 [0.002, 0.003], [0.003, 0.004], # second group same width
632 [0.004, 0.006], [0.006, 0.008]] # subsequent groups double width
633 self.A(ranges == expected_ranges)
634 ranges = time_ranges(3, 2, fio_version=3)
635 self.A(ranges == expected_ranges)
636 ranges = time_ranges(3, 2, fio_version=2)
637 expected_ranges_v2 = [ [ 1000.0 * min_or_max for min_or_max in time_range ]
638 for time_range in expected_ranges ]
639 self.A(ranges == expected_ranges_v2)
640 # see fio V3 stat.h for why 29 groups and 2^6 buckets/group
641 normal_ranges_v3 = time_ranges(29, 64)
642 # for v3, bucket time intervals are measured in nanoseconds
643 self.A(len(normal_ranges_v3) == 29 * 64 and normal_ranges_v3[-1][1] == 64*(1<<(29-1))/1000.0)
644 normal_ranges_v2 = time_ranges(19, 64, fio_version=2)
645 # for v2, bucket time intervals are measured in microseconds so we have fewer buckets
646 self.A(len(normal_ranges_v2) == 19 * 64 and normal_ranges_v2[-1][1] == 64*(1<<(19-1)))
647
648 def test_d1_align_histo_log_1_quantum(self):
649 with open(self.fn, 'w') as f:
650 f.write('100, 1, 4096, 1, 2, 3, 4')
18c1783a
BE
651 (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 4, None)
652 self.A(min_timestamp_ms == 0 and max_timestamp_ms == 100)
653 aligned_log = align_histo_log(raw_histo_log, 5, 4, min_timestamp_ms, max_timestamp_ms)
654 self.A(len(aligned_log) == 1)
655 (time_ms0, h) = aligned_log[0]
656 self.A(time_ms0 == 0 and h == [1., 2., 3., 4.])
657
658 # handle case with log_unix_epoch=1 timestamps, 1-second time quantum
659 # here both records will be separated into 2 aligned intervals
660
661 def test_d1a_align_2rec_histo_log_epoch_1_quantum_1sec(self):
662 with open(self.fn, 'w') as f:
663 f.write('1536504002123, 1, 4096, 1, 2, 3, 4\n')
664 f.write('1536504003123, 1, 4096, 4, 3, 2, 1\n')
665 (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 4, None)
666 self.A(min_timestamp_ms == 1536504001123 and max_timestamp_ms == 1536504003123)
667 aligned_log = align_histo_log(raw_histo_log, 1, 4, min_timestamp_ms, max_timestamp_ms)
668 self.A(len(aligned_log) == 3)
669 (time_ms0, h) = aligned_log[0]
670 self.A(time_ms0 == 1536504001123 and h == [0., 0., 0., 0.])
671 (time_ms1, h) = aligned_log[1]
672 self.A(time_ms1 == 1536504002123 and h == [1., 2., 3., 4.])
673 (time_ms2, h) = aligned_log[2]
674 self.A(time_ms2 == 1536504003123 and h == [4., 3., 2., 1.])
675
676 # handle case with log_unix_epoch=1 timestamps, 5-second time quantum
677 # here both records will be merged into a single aligned time interval
678
679 def test_d1b_align_2rec_histo_log_epoch_1_quantum_5sec(self):
680 with open(self.fn, 'w') as f:
681 f.write('1536504002123, 1, 4096, 1, 2, 3, 4\n')
682 f.write('1536504003123, 1, 4096, 4, 3, 2, 1\n')
683 (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 4, None)
684 self.A(min_timestamp_ms == 1536504001123 and max_timestamp_ms == 1536504003123)
685 aligned_log = align_histo_log(raw_histo_log, 5, 4, min_timestamp_ms, max_timestamp_ms)
d8296fdd
BE
686 self.A(len(aligned_log) == 1)
687 (time_ms0, h) = aligned_log[0]
18c1783a 688 self.A(time_ms0 == 1536504001123 and h == [5., 5., 5., 5.])
d8296fdd
BE
689
690 # we need this to compare 2 lists of floating point numbers for equality
691 # because of floating-point imprecision
692
693 def compare_2_floats(self, x, y):
694 if x == 0.0 or y == 0.0:
695 return (x+y) < 0.0000001
696 else:
697 return (math.fabs(x-y)/x) < 0.00001
698
699 def is_close(self, buckets, buckets_expected):
700 if len(buckets) != len(buckets_expected):
701 return False
702 compare_buckets = lambda k: self.compare_2_floats(buckets[k], buckets_expected[k])
703 indices_close = list(filter(compare_buckets, range(0, len(buckets))))
704 return len(indices_close) == len(buckets)
705
706 def test_d2_align_histo_log_2_quantum(self):
707 with open(self.fn, 'w') as f:
708 f.write('2000, 1, 4096, 1, 2, 3, 4\n')
709 f.write('7000, 1, 4096, 1, 2, 3, 4\n')
18c1783a
BE
710 (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 4, None)
711 self.A(min_timestamp_ms == 0 and max_timestamp_ms == 7000)
d8296fdd
BE
712 (_, _, _, raw_buckets1) = raw_histo_log[0]
713 (_, _, _, raw_buckets2) = raw_histo_log[1]
18c1783a 714 aligned_log = align_histo_log(raw_histo_log, 5, 4, min_timestamp_ms, max_timestamp_ms)
d8296fdd
BE
715 self.A(len(aligned_log) == 2)
716 (time_ms1, h1) = aligned_log[0]
717 (time_ms2, h2) = aligned_log[1]
718 # because first record is from time interval [2000, 7000]
719 # we weight it according
720 expect1 = [float(b) * 0.6 for b in raw_buckets1]
721 expect2 = [float(b) * 0.4 for b in raw_buckets1]
722 for e in range(0, len(expect2)):
723 expect2[e] += raw_buckets2[e]
724 self.A(time_ms1 == 0 and self.is_close(h1, expect1))
725 self.A(time_ms2 == 5000 and self.is_close(h2, expect2))
726
0456267b
BE
727 # what to expect if histogram buckets are all equal
728 def test_e1_get_pctiles_flat_histo(self):
d8296fdd
BE
729 with open(self.fn, 'w') as f:
730 buckets = [ 100 for j in range(0, 128) ]
731 f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets]))
18c1783a
BE
732 (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 128, None)
733 self.A(min_timestamp_ms == 0 and max_timestamp_ms == 9000)
734 aligned_log = align_histo_log(raw_histo_log, 5, 128, min_timestamp_ms, max_timestamp_ms)
d8296fdd
BE
735 time_intervals = time_ranges(4, 32)
736 # since buckets are all equal, then median is halfway through time_intervals
737 # and max latency interval is at end of time_intervals
738 self.A(time_intervals[64][1] == 0.066 and time_intervals[127][1] == 0.256)
739 pctiles_wanted = [ 0, 50, 100 ]
740 pct_vs_time = []
741 for (time_ms, histo) in aligned_log:
742 pct_vs_time.append(get_pctiles(histo, pctiles_wanted, time_intervals))
743 self.A(pct_vs_time[0] == None) # no I/O in this time interval
0456267b 744 expected_pctiles = { 0:0.000, 50:0.064, 100:0.256 }
d8296fdd
BE
745 self.A(pct_vs_time[1] == expected_pctiles)
746
0456267b
BE
747 # what to expect if just the highest histogram bucket is used
748 def test_e2_get_pctiles_highest_pct(self):
749 fio_v3_bucket_count = 29 * 64
750 with open(self.fn, 'w') as f:
fc002f14 751 # make an empty fio v3 histogram
0456267b
BE
752 buckets = [ 0 for j in range(0, fio_v3_bucket_count) ]
753 # add one I/O request to last bucket
754 buckets[-1] = 1
755 f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets]))
18c1783a
BE
756 (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, fio_v3_bucket_count, None)
757 self.A(min_timestamp_ms == 0 and max_timestamp_ms == 9000)
758 aligned_log = align_histo_log(raw_histo_log, 5, fio_v3_bucket_count, min_timestamp_ms, max_timestamp_ms)
0456267b
BE
759 (time_ms, histo) = aligned_log[1]
760 time_intervals = time_ranges(29, 64)
761 expected_pctiles = { 100.0:(64*(1<<28))/1000.0 }
762 pct = get_pctiles( histo, [ 100.0 ], time_intervals )
763 self.A(pct == expected_pctiles)
764
d8296fdd
BE
765# we are using this module as a standalone program
766
767if __name__ == '__main__':
768 if os.getenv('UNITTEST'):
088a092e
BE
769 if unittest2_imported:
770 sys.exit(unittest2.main())
771 else:
772 raise Exception('you must install unittest2 module to run unit test')
d8296fdd
BE
773 else:
774 compute_percentiles_from_logs()
775