+def output_interval_data(ctx,directions):
+ fps = [HistFileRdr(f) for f in ctx.FILE]
+
+ print(', '.join(columns))
+
+ start = 0
+ end = ctx.interval
+ while True:
+
+ more_data = False
+
+ # add bins from all files in target intervals
+ arr = None
+ numSamples = 0
+ while True:
+ foundSamples = False
+ for fp in fps:
+ ts = fp.curTS
+ if ts and ts+10 < end: # shift sample time when very close to an end time
+ curdirect = fp.curDir
+ numSamples += 1
+ foundSamples = True
+ if arr is None:
+ arr = {}
+ for d in directions:
+ arr[d] = np.zeros(shape=(__HIST_COLUMNS), dtype=int)
+ if 'm' in arr:
+ arr['m'] = np.add(arr['m'], fp.curBins)
+ if 'r' in arr and curdirect == 0:
+ arr['r'] = np.add(arr['r'], fp.curBins)
+ if 'w' in arr and curdirect == 1:
+ arr['w'] = np.add(arr['w'], fp.curBins)
+ if 't' in arr and curdirect == 2:
+ arr['t'] = np.add(arr['t'], fp.curBins)
+
+ more_data = True
+ fp.nextData()
+ elif ts:
+ more_data = True
+
+ # reached end of all files
+ # or gone through all files without finding sample in interval
+ if not more_data or not foundSamples:
+ break
+
+ if arr is not None:
+ #print("{} size({}) samples({}) nonzero({}):".format(end, arr.size, numSamples, np.count_nonzero(arr)), str(arr), )
+ for d in sorted(arr.keys()):
+ aval = arr[d]
+ process_interval(ctx, aval, end, d)
+
+ # reach end of all files
+ if not more_data:
+ break
+
+ start += ctx.interval
+ end = start + ctx.interval
+
+def main(ctx):
+
+ if ctx.job_file:
+ try:
+ from configparser import SafeConfigParser, NoOptionError
+ except ImportError:
+ from ConfigParser import SafeConfigParser, NoOptionError
+
+ cp = SafeConfigParser(allow_no_value=True)
+ with open(ctx.job_file, 'r') as fp:
+ cp.readfp(fp)
+
+ if ctx.interval is None:
+ # Auto detect --interval value
+ for s in cp.sections():
+ try:
+ hist_msec = cp.get(s, 'log_hist_msec')
+ if hist_msec is not None:
+ ctx.interval = int(hist_msec)
+ except NoOptionError:
+ pass
+
+ if not hasattr(ctx, 'percentiles'):
+ ctx.percentiles = "90,95,99"
+
+ if ctx.directions:
+ ctx.directions = ctx.directions.lower()
+
+ if ctx.interval is None:
+ ctx.interval = 1000
+
+ if ctx.usbin:
+ ctx.time_divisor = 1000.0 # bins are in us
+ else:
+ ctx.time_divisor = 1000000.0 # bins are in ns
+
+ gen_output_columns(ctx)
+
+
+ # Automatically detect how many columns are in the input files,
+ # calculate the corresponding 'coarseness' parameter used to generate
+ # those files, and calculate the appropriate bin latency values:
+ with open(ctx.FILE[0], 'r') as fp:
+ global bin_vals,lower_bin_vals,upper_bin_vals,__HIST_COLUMNS,__TOTAL_COLUMNS
+ __TOTAL_COLUMNS = len(fp.readline().split(','))
+ __HIST_COLUMNS = __TOTAL_COLUMNS - __NON_HIST_COLUMNS
+
+ max_cols = guess_max_from_bins(ctx, __HIST_COLUMNS)
+ coarseness = int(np.log2(float(max_cols) / __HIST_COLUMNS))
+ bin_vals = np.array([plat_idx_to_val_coarse(x, coarseness) for x in np.arange(__HIST_COLUMNS)], dtype=float)
+ lower_bin_vals = np.array([plat_idx_to_val_coarse(x, coarseness, 0.0) for x in np.arange(__HIST_COLUMNS)], dtype=float)
+ upper_bin_vals = np.array([plat_idx_to_val_coarse(x, coarseness, 1.0) for x in np.arange(__HIST_COLUMNS)], dtype=float)
+
+ # indicate which directions to output (read(0), write(1), trim(2), mixed(3))
+ directions = set()
+ if not ctx.directions or 'm' in ctx.directions: directions.add('m')
+ if ctx.directions and 'r' in ctx.directions: directions.add('r')
+ if ctx.directions and 'w' in ctx.directions: directions.add('w')
+ if ctx.directions and 't' in ctx.directions: directions.add('t')
+
+ if ctx.noweight:
+ output_interval_data(ctx, directions)
+ else:
+ output_weighted_interval_data(ctx, directions)
+