X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=tools%2Fhist%2Ffio-histo-log-pctiles.py;h=a150e8e30250fb22420b3334397e411bf57a2eec;hb=18c1783afe57072cf5e89ce7114c2fd50a8acf79;hp=5efa6458cb4bb80616437fde52dca8d6a8300e9a;hpb=0456267b7c0b04a8aa6777ded4c17858a9e6feb7;p=fio.git diff --git a/tools/hist/fio-histo-log-pctiles.py b/tools/hist/fio-histo-log-pctiles.py index 5efa6458..a150e8e3 100755 --- a/tools/hist/fio-histo-log-pctiles.py +++ b/tools/hist/fio-histo-log-pctiles.py @@ -21,32 +21,25 @@ # if you do this, don't pass normal CLI parameters to it # otherwise it runs the CLI -import sys, os, math, copy +import sys, os, math, copy, time from copy import deepcopy - +import argparse import unittest2 msec_per_sec = 1000 nsec_per_usec = 1000 +direction_read = 0 +direction_write = 1 class FioHistoLogExc(Exception): pass -# if there is an error, print message, print syntax, and exit with error status +# if there is an error, print message, and exit with error status -def usage(msg): +def myabort(msg): print('ERROR: ' + msg) - print('usage: fio-histo-log-pctiles.py ') - print(' [ --fio-version 2|3 (default 3) ]') - print(' [ --bucket-groups positive-int (default 29) ]') - print(' [ --bucket-bits small-positive-int (default 6) ]') - print(' [ --percentiles p1,p2,...,pN ] (default 0,50,95,99,100)') - print(' [ --time-quantum positive-int (default 1 sec) ]') - print(' [ --output-unit msec|usec|nsec (default msec) ]') - print(' log-file1 log-file2 ...') sys.exit(1) - # convert histogram log file into a list of # (time_ms, direction, bsz, buckets) tuples where # - time_ms is the time in msec at which the log record was written @@ -61,10 +54,15 @@ def exception_suffix( record_num, pathname ): # log file parser raises FioHistoLogExc exceptions # it returns histogram buckets in whatever unit fio uses - -def parse_hist_file(logfn, buckets_per_interval): - max_timestamp_ms = 0.0 - +# inputs: +# logfn: pathname to histogram log file +# buckets_per_interval - how many histogram buckets to expect +# log_hist_msec - if not None, expected time interval between histogram records + +def parse_hist_file(logfn, buckets_per_interval, log_hist_msec): + previous_ts_ms_read = -1 + previous_ts_ms_write = -1 + with open(logfn, 'r') as f: records = [ l.strip() for l in f.readlines() ] intervals = [] @@ -84,14 +82,20 @@ def parse_hist_file(logfn, buckets_per_interval): if len(int_tokens) < 3: raise FioHistoLogExc('too few numbers %s' % exception_suffix(k+1, logfn)) - time_ms = int_tokens[0] - if time_ms > max_timestamp_ms: - max_timestamp_ms = time_ms - direction = int_tokens[1] - if direction != 0 and direction != 1: + if direction != direction_read and direction != direction_write: raise FioHistoLogExc('invalid I/O direction %s' % exception_suffix(k+1, logfn)) + time_ms = int_tokens[0] + if direction == direction_read: + if time_ms < previous_ts_ms_read: + raise FioHistoLogExc('read timestamp in column 1 decreased %s' % exception_suffix(k+1, logfn)) + previous_ts_ms_read = time_ms + elif direction == direction_write: + if time_ms < previous_ts_ms_write: + raise FioHistoLogExc('write timestamp in column 1 decreased %s' % exception_suffix(k+1, logfn)) + previous_ts_ms_write = time_ms + bsz = int_tokens[2] if bsz > (1 << 24): raise FioHistoLogExc('block size too large %s' % exception_suffix(k+1, logfn)) @@ -103,7 +107,17 @@ def parse_hist_file(logfn, buckets_per_interval): intervals.append((time_ms, direction, bsz, buckets)) if len(intervals) == 0: raise FioHistoLogExc('no records in %s' % logfn) - return (intervals, max_timestamp_ms) + (first_timestamp, _, _, _) = intervals[0] + if first_timestamp < 1000000: + start_time = 0 # assume log_unix_epoch = 0 + elif log_hist_msec != None: + start_time = first_timestamp - log_hist_msec + elif len(intervals) > 1: + (second_timestamp, _, _, _) = intervals[1] + start_time = first_timestamp - (second_timestamp - first_timestamp) + (end_timestamp, _, _, _) = intervals[-1] + + return (intervals, start_time, end_timestamp) # compute time range for each bucket index in histogram record @@ -132,12 +146,13 @@ def time_ranges(groups, counters_per_group, fio_version=3): # compute number of time quantum intervals in the test -def get_time_intervals(time_quantum, max_timestamp_ms): +def get_time_intervals(time_quantum, min_timestamp_ms, max_timestamp_ms): # round down to nearest second max_timestamp = max_timestamp_ms // msec_per_sec + min_timestamp = min_timestamp_ms // msec_per_sec # round up to nearest whole multiple of time_quantum - time_interval_count = (max_timestamp + time_quantum) // time_quantum - end_time = time_interval_count * time_quantum + time_interval_count = ((max_timestamp - min_timestamp) + time_quantum) // time_quantum + end_time = min_timestamp + (time_interval_count * time_quantum) return (end_time, time_interval_count) # align raw histogram log data to time quantum so @@ -155,17 +170,17 @@ def get_time_intervals(time_quantum, max_timestamp_ms): # so the contribution of this bucket to this time quantum is # 515 x 0.99 = 509.85 -def align_histo_log(raw_histogram_log, time_quantum, bucket_count, max_timestamp_ms): +def align_histo_log(raw_histogram_log, time_quantum, bucket_count, min_timestamp_ms, max_timestamp_ms): # slice up test time int intervals of time_quantum seconds - (end_time, time_interval_count) = get_time_intervals(time_quantum, max_timestamp_ms) + (end_time, time_interval_count) = get_time_intervals(time_quantum, min_timestamp_ms, max_timestamp_ms) time_qtm_ms = time_quantum * msec_per_sec end_time_ms = end_time * msec_per_sec aligned_intervals = [] for j in range(0, time_interval_count): aligned_intervals.append(( - j * time_qtm_ms, + min_timestamp_ms + (j * time_qtm_ms), [ 0.0 for j in range(0, bucket_count) ] )) log_record_count = len(raw_histogram_log) @@ -198,14 +213,20 @@ def align_histo_log(raw_histogram_log, time_quantum, bucket_count, max_timestamp # calculate first quantum that overlaps this histogram record - qtm_start_ms = (time_msec // time_qtm_ms) * time_qtm_ms - qtm_end_ms = ((time_msec + time_qtm_ms) // time_qtm_ms) * time_qtm_ms - qtm_index = qtm_start_ms // time_qtm_ms + offset_from_min_ts = time_msec - min_timestamp_ms + qtm_start_ms = min_timestamp_ms + (offset_from_min_ts // time_qtm_ms) * time_qtm_ms + qtm_end_ms = min_timestamp_ms + ((offset_from_min_ts + time_qtm_ms) // time_qtm_ms) * time_qtm_ms + qtm_index = offset_from_min_ts // time_qtm_ms # for each quantum that overlaps this histogram record's time interval while qtm_start_ms < time_msec_end: # while quantum overlaps record + # some histogram logs may be longer than others + + if len(aligned_intervals) <= qtm_index: + break + # calculate fraction of time that this quantum # overlaps histogram record's time interval @@ -307,112 +328,68 @@ def get_pctiles(buckets, wanted, time_ranges): return pctile_result -# parse parameters -# returns a tuple of command line parameters -# parameters have default values unless otherwise shown - -def parse_cli_params(): - - # default values for input parameters - - fio_version = 3 # we are using fio 3.x now - bucket_groups = None # defaulting comes later - bucket_bits = 6 # default in fio 3.x - pctiles_wanted = [ 0, 50, 90, 95, 99, 100 ] - time_quantum = 1 - output_unit = 'usec' - - # parse command line parameters and display them - - argindex = 1 - argct = len(sys.argv) - if argct < 2: - usage('must supply at least one histogram log file') - while argindex < argct: - if argct < argindex + 2: - break - pname = sys.argv[argindex] - pval = sys.argv[argindex+1] - if not pname.startswith('--'): - break - argindex += 2 - pname = pname[2:] - - if pname == 'bucket-groups': - bucket_groups = int(pval) - elif pname == 'bucket-bits': - bucket_bits = int(pval) - elif pname == 'time-quantum': - time_quantum = int(pval) - elif pname == 'percentiles': - pctiles_wanted = [ float(p) for p in pval.split(',') ] - elif pname == 'output-unit': - if pval == 'msec' or pval == 'usec': - output_unit = pval - else: - usage('output-unit must be usec (microseconds) or msec (milliseconds)') - elif pname == 'fio-version': - if pval != '2' and pval != '3': - usage('invalid fio version, must be 2 or 3') - fio_version = int(pval) - else: - usage('invalid parameter name --%s' % pname) - - if not bucket_groups: - # default changes based on fio version - if fio_version == 2: - bucket_groups = 19 - else: - # default in fio 3.x - bucket_groups = 29 - - filename_list = sys.argv[argindex:] - for f in filename_list: - if not os.path.exists(f): - usage('file %s does not exist' % f) - return (bucket_groups, bucket_bits, fio_version, pctiles_wanted, - filename_list, time_quantum, output_unit) - - # this is really the main program def compute_percentiles_from_logs(): - (bucket_groups, bucket_bits, fio_version, pctiles_wanted, - file_list, time_quantum, output_unit) = parse_cli_params() - - print('bucket groups = %d' % bucket_groups) - print('bucket bits = %d' % bucket_bits) - print('time quantum = %d sec' % time_quantum) - print('percentiles = %s' % ','.join([ str(p) for p in pctiles_wanted ])) - buckets_per_group = 1 << bucket_bits + parser = argparse.ArgumentParser() + parser.add_argument("--fio-version", dest="fio_version", + default="3", choices=[2,3], type=int, + help="fio version (default=3)") + parser.add_argument("--bucket-groups", dest="bucket_groups", default="29", type=int, + help="fio histogram bucket groups (default=29)") + parser.add_argument("--bucket-bits", dest="bucket_bits", + default="6", type=int, + help="fio histogram buckets-per-group bits (default=6 means 64 buckets/group)") + parser.add_argument("--percentiles", dest="pctiles_wanted", + default=[ 0., 50., 95., 99., 100.], type=float, nargs='+', + help="fio histogram buckets-per-group bits (default=6 means 64 buckets/group)") + parser.add_argument("--time-quantum", dest="time_quantum", + default="1", type=int, + help="time quantum in seconds (default=1)") + parser.add_argument("--log-hist-msec", dest="log_hist_msec", + type=int, default=None, + help="log_hist_msec value in fio job file") + parser.add_argument("--output-unit", dest="output_unit", + default="usec", type=str, + help="Latency percentile output unit: msec|usec|nsec (default usec)") + parser.add_argument("file_list", nargs='+', + help='list of files, preceded by " -- " if necessary') + args = parser.parse_args() + + # default changes based on fio version + if args.fio_version == 2: + args.bucket_groups = 19 + + # print parameters + + print('fio version = %d' % args.fio_version) + print('bucket groups = %d' % args.bucket_groups) + print('bucket bits = %d' % args.bucket_bits) + print('time quantum = %d sec' % args.time_quantum) + print('percentiles = %s' % ','.join([ str(p) for p in args.pctiles_wanted ])) + buckets_per_group = 1 << args.bucket_bits print('buckets per group = %d' % buckets_per_group) - buckets_per_interval = buckets_per_group * bucket_groups + buckets_per_interval = buckets_per_group * args.bucket_groups print('buckets per interval = %d ' % buckets_per_interval) bucket_index_range = range(0, buckets_per_interval) - if time_quantum == 0: - usage('time-quantum must be a positive number of seconds') - print('output unit = ' + output_unit) - if output_unit == 'msec': - time_divisor = 1000.0 - elif output_unit == 'usec': + if args.log_hist_msec != None: + print('log_hist_msec = %d' % args.log_hist_msec) + if args.time_quantum == 0: + print('ERROR: time-quantum must be a positive number of seconds') + print('output unit = ' + args.output_unit) + if args.output_unit == 'msec': + time_divisor = float(msec_per_sec) + elif args.output_unit == 'usec': time_divisor = 1.0 - # calculate response time interval associated with each histogram bucket - - bucket_times = time_ranges(bucket_groups, buckets_per_group, fio_version=fio_version) - # construct template for each histogram bucket array with buckets all zeroes # we just copy this for each new histogram zeroed_buckets = [ 0.0 for r in bucket_index_range ] - # print CSV header just like fiologparser_hist does + # calculate response time interval associated with each histogram bucket - header = 'msec, ' - for p in pctiles_wanted: - header += '%3.1f, ' % p - print('time (millisec), percentiles in increasing order with values in ' + output_unit) - print(header) + bucket_times = time_ranges(args.bucket_groups, buckets_per_group, fio_version=args.fio_version) # parse the histogram logs # assumption: each bucket has a monotonically increasing time @@ -420,35 +397,55 @@ def compute_percentiles_from_logs(): # (exception: if randrw workload, then there is a read and a write # record for the same time interval) - max_timestamp_all_logs = 0 + test_start_time = 0 + test_end_time = 1.0e18 hist_files = {} - for fn in file_list: + for fn in args.file_list: try: - (hist_files[fn], max_timestamp_ms) = parse_hist_file(fn, buckets_per_interval) + (hist_files[fn], log_start_time, log_end_time) = parse_hist_file(fn, buckets_per_interval, args.log_hist_msec) except FioHistoLogExc as e: - usage(str(e)) - max_timestamp_all_logs = max(max_timestamp_all_logs, max_timestamp_ms) - - (end_time, time_interval_count) = get_time_intervals(time_quantum, max_timestamp_all_logs) - all_threads_histograms = [ ((j*time_quantum*msec_per_sec), deepcopy(zeroed_buckets)) - for j in range(0, time_interval_count) ] + myabort(str(e)) + # we consider the test started when all threads have started logging + test_start_time = max(test_start_time, log_start_time) + # we consider the test over when one of the logs has ended + test_end_time = min(test_end_time, log_end_time) + + if test_start_time >= test_end_time: + raise FioHistoLogExc('no time interval when all threads logs overlapped') + if test_start_time > 0: + print('all threads running as of unix epoch time %d = %s' % ( + test_start_time/float(msec_per_sec), + time.ctime(test_start_time/1000.0))) + + (end_time, time_interval_count) = get_time_intervals(args.time_quantum, test_start_time, test_end_time) + all_threads_histograms = [ ((j*args.time_quantum*msec_per_sec), deepcopy(zeroed_buckets)) + for j in range(0, time_interval_count) ] for logfn in hist_files.keys(): aligned_per_thread = align_histo_log(hist_files[logfn], - time_quantum, + args.time_quantum, buckets_per_interval, - max_timestamp_all_logs) + test_start_time, + test_end_time) for t in range(0, time_interval_count): (_, all_threads_histo_t) = all_threads_histograms[t] (_, log_histo_t) = aligned_per_thread[t] add_to_histo_from( all_threads_histo_t, log_histo_t ) - print('percentiles for entire set of threads') + # calculate percentiles across aggregate histogram for all threads + # print CSV header just like fiologparser_hist does + + header = 'msec-since-start, ' + for p in args.pctiles_wanted: + header += '%3.1f, ' % p + print('time (millisec), percentiles in increasing order with values in ' + args.output_unit) + print(header) + for (t_msec, all_threads_histo_t) in all_threads_histograms: - record = '%d, ' % t_msec - pct = get_pctiles(all_threads_histo_t, pctiles_wanted, bucket_times) + record = '%8d, ' % t_msec + pct = get_pctiles(all_threads_histo_t, args.pctiles_wanted, bucket_times) if not pct: - for w in pctiles_wanted: + for w in args.pctiles_wanted: record += ', ' else: pct_keys = [ k for k in pct.keys() ] @@ -504,8 +501,9 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: f.write('1234, 0, 4096, 1, 2, 3, 4\n') f.write('5678,1,16384,5,6,7,8 \n') - (raw_histo_log, max_timestamp) = parse_hist_file(self.fn, 4) # 4 buckets per interval - self.A(len(raw_histo_log) == 2 and max_timestamp == 5678) + (raw_histo_log, min_timestamp, max_timestamp) = parse_hist_file(self.fn, 4, None) # 4 buckets per interval + # if not log_unix_epoch=1, then min_timestamp will always be set to zero + self.A(len(raw_histo_log) == 2 and min_timestamp == 0 and max_timestamp == 5678) (time_ms, direction, bsz, histo) = raw_histo_log[0] self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ]) (time_ms, direction, bsz, histo) = raw_histo_log[1] @@ -515,7 +513,7 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: pass try: - (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) + (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None) self.A(should_not_get_here) except FioHistoLogExc as e: self.A(str(e).startswith('no records')) @@ -526,7 +524,7 @@ class Test(unittest2.TestCase): f.write('1234, 0, 4096, 1, 2, 3, 4\n') f.write('5678,1,16384,5,6,7,8 \n') f.write('\n') - (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) + (raw_histo_log, _, max_timestamp_ms) = parse_hist_file(self.fn, 4, None) self.A(len(raw_histo_log) == 2 and max_timestamp_ms == 5678) (time_ms, direction, bsz, histo) = raw_histo_log[0] self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ]) @@ -537,7 +535,7 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: f.write('12, 0, 4096, 1a, 2, 3, 4\n') try: - (raw_histo_log, _) = parse_hist_file(self.fn, 4) + (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None) self.A(False) except FioHistoLogExc as e: self.A(str(e).startswith('non-integer')) @@ -546,7 +544,7 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: f.write('-12, 0, 4096, 1, 2, 3, 4\n') try: - (raw_histo_log, _) = parse_hist_file(self.fn, 4) + (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None) self.A(False) except FioHistoLogExc as e: self.A(str(e).startswith('negative integer')) @@ -555,7 +553,7 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: f.write('0, 0\n') try: - (raw_histo_log, _) = parse_hist_file(self.fn, 4) + (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None) self.A(False) except FioHistoLogExc as e: self.A(str(e).startswith('too few numbers')) @@ -564,7 +562,7 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: f.write('100, 2, 4096, 1, 2, 3, 4\n') try: - (raw_histo_log, _) = parse_hist_file(self.fn, 4) + (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None) self.A(False) except FioHistoLogExc as e: self.A(str(e).startswith('invalid I/O direction')) @@ -572,11 +570,11 @@ class Test(unittest2.TestCase): def test_b8_parse_bsz_too_big(self): with open(self.fn+'_good', 'w') as f: f.write('100, 1, %d, 1, 2, 3, 4\n' % (1<<24)) - (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn+'_good', 4) + (raw_histo_log, _, _) = parse_hist_file(self.fn+'_good', 4, None) with open(self.fn+'_bad', 'w') as f: f.write('100, 1, 20000000, 1, 2, 3, 4\n') try: - (raw_histo_log, _) = parse_hist_file(self.fn+'_bad', 4) + (raw_histo_log, _, _) = parse_hist_file(self.fn+'_bad', 4, None) self.A(False) except FioHistoLogExc as e: self.A(str(e).startswith('block size too large')) @@ -585,7 +583,7 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: f.write('100, 1, %d, 1, 2, 3, 4, 5\n' % (1<<24)) try: - (raw_histo_log, _) = parse_hist_file(self.fn, 4) + (raw_histo_log, _, _) = parse_hist_file(self.fn, 4, None) self.A(False) except FioHistoLogExc as e: self.A(str(e).__contains__('buckets per interval')) @@ -614,12 +612,44 @@ class Test(unittest2.TestCase): def test_d1_align_histo_log_1_quantum(self): with open(self.fn, 'w') as f: f.write('100, 1, 4096, 1, 2, 3, 4') - (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) - self.A(max_timestamp_ms == 100) - aligned_log = align_histo_log(raw_histo_log, 5, 4, max_timestamp_ms) + (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 4, None) + self.A(min_timestamp_ms == 0 and max_timestamp_ms == 100) + aligned_log = align_histo_log(raw_histo_log, 5, 4, min_timestamp_ms, max_timestamp_ms) + self.A(len(aligned_log) == 1) + (time_ms0, h) = aligned_log[0] + self.A(time_ms0 == 0 and h == [1., 2., 3., 4.]) + + # handle case with log_unix_epoch=1 timestamps, 1-second time quantum + # here both records will be separated into 2 aligned intervals + + def test_d1a_align_2rec_histo_log_epoch_1_quantum_1sec(self): + with open(self.fn, 'w') as f: + f.write('1536504002123, 1, 4096, 1, 2, 3, 4\n') + f.write('1536504003123, 1, 4096, 4, 3, 2, 1\n') + (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 4, None) + self.A(min_timestamp_ms == 1536504001123 and max_timestamp_ms == 1536504003123) + aligned_log = align_histo_log(raw_histo_log, 1, 4, min_timestamp_ms, max_timestamp_ms) + self.A(len(aligned_log) == 3) + (time_ms0, h) = aligned_log[0] + self.A(time_ms0 == 1536504001123 and h == [0., 0., 0., 0.]) + (time_ms1, h) = aligned_log[1] + self.A(time_ms1 == 1536504002123 and h == [1., 2., 3., 4.]) + (time_ms2, h) = aligned_log[2] + self.A(time_ms2 == 1536504003123 and h == [4., 3., 2., 1.]) + + # handle case with log_unix_epoch=1 timestamps, 5-second time quantum + # here both records will be merged into a single aligned time interval + + def test_d1b_align_2rec_histo_log_epoch_1_quantum_5sec(self): + with open(self.fn, 'w') as f: + f.write('1536504002123, 1, 4096, 1, 2, 3, 4\n') + f.write('1536504003123, 1, 4096, 4, 3, 2, 1\n') + (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 4, None) + self.A(min_timestamp_ms == 1536504001123 and max_timestamp_ms == 1536504003123) + aligned_log = align_histo_log(raw_histo_log, 5, 4, min_timestamp_ms, max_timestamp_ms) self.A(len(aligned_log) == 1) (time_ms0, h) = aligned_log[0] - self.A(time_ms0 == 0 and h == [1.0, 2.0, 3.0, 4.0]) + self.A(time_ms0 == 1536504001123 and h == [5., 5., 5., 5.]) # we need this to compare 2 lists of floating point numbers for equality # because of floating-point imprecision @@ -641,11 +671,11 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: f.write('2000, 1, 4096, 1, 2, 3, 4\n') f.write('7000, 1, 4096, 1, 2, 3, 4\n') - (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) - self.A(max_timestamp_ms == 7000) + (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 4, None) + self.A(min_timestamp_ms == 0 and max_timestamp_ms == 7000) (_, _, _, raw_buckets1) = raw_histo_log[0] (_, _, _, raw_buckets2) = raw_histo_log[1] - aligned_log = align_histo_log(raw_histo_log, 5, 4, max_timestamp_ms) + aligned_log = align_histo_log(raw_histo_log, 5, 4, min_timestamp_ms, max_timestamp_ms) self.A(len(aligned_log) == 2) (time_ms1, h1) = aligned_log[0] (time_ms2, h2) = aligned_log[1] @@ -663,9 +693,9 @@ class Test(unittest2.TestCase): with open(self.fn, 'w') as f: buckets = [ 100 for j in range(0, 128) ] f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets])) - (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 128) - self.A(max_timestamp_ms == 9000) - aligned_log = align_histo_log(raw_histo_log, 5, 128, max_timestamp_ms) + (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, 128, None) + self.A(min_timestamp_ms == 0 and max_timestamp_ms == 9000) + aligned_log = align_histo_log(raw_histo_log, 5, 128, min_timestamp_ms, max_timestamp_ms) time_intervals = time_ranges(4, 32) # since buckets are all equal, then median is halfway through time_intervals # and max latency interval is at end of time_intervals @@ -687,9 +717,9 @@ class Test(unittest2.TestCase): # add one I/O request to last bucket buckets[-1] = 1 f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets])) - (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, fio_v3_bucket_count) - self.A(max_timestamp_ms == 9000) - aligned_log = align_histo_log(raw_histo_log, 5, fio_v3_bucket_count, max_timestamp_ms) + (raw_histo_log, min_timestamp_ms, max_timestamp_ms) = parse_hist_file(self.fn, fio_v3_bucket_count, None) + self.A(min_timestamp_ms == 0 and max_timestamp_ms == 9000) + aligned_log = align_histo_log(raw_histo_log, 5, fio_v3_bucket_count, min_timestamp_ms, max_timestamp_ms) (time_ms, histo) = aligned_log[1] time_intervals = time_ranges(29, 64) expected_pctiles = { 100.0:(64*(1<<28))/1000.0 }