2 # SPDX-License-Identifier: GPL-2.0-only
4 # Copyright (c) 2020 Western Digital Corporation or its affiliates.
7 # latency_percentiles.py
9 # Test the code that produces latency percentiles
10 # This is mostly to test the code changes to allow reporting
11 # of slat, clat, and lat percentiles
14 # python3 latency-tests.py [-f fio-path] [-a artifact-root] [--debug]
20 # unified rw reporting
21 # compare with latency log
22 # try various combinations of the ?lat_percentile options
26 # check presence of latency bins
27 # if the json percentiles match those from the raw data
28 # then the latency bin values and counts are probably ok
30 # produce both terse, JSON output and confirm that they match
31 # lat only; both lat and clat
33 # confirm that sync_lat data appears
34 # - MANUAL TESTING normal output:
36 # enable all, but only clat and lat appear
37 # enable subset of latency types
38 # read, write, trim, unified
40 # enable all latency types
41 # enable subset of latency types
42 # read, write, trim, unified
43 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
44 # --ioengine=null --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1
45 # echo confirm that clat and lat percentiles appear
46 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
47 # --ioengine=null --slat_percentiles=0 --clat_percentiles=0 --lat_percentiles=1
48 # echo confirm that only lat percentiles appear
49 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
50 # --ioengine=null --slat_percentiles=0 --clat_percentiles=1 --lat_percentiles=0
51 # echo confirm that only clat percentiles appear
52 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
53 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1
54 # echo confirm that slat, clat, lat percentiles appear
55 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
56 # --ioengine=libaio --slat_percentiles=0 --clat_percentiles=1 --lat_percentiles=1
57 # echo confirm that clat and lat percentiles appear
58 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
59 # --ioengine=libaio -rw=randrw
60 # echo confirm that clat percentiles appear for reads and writes
61 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
62 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=0 --lat_percentiles=0 --rw=randrw
63 # echo confirm that slat percentiles appear for both reads and writes
64 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
65 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1 \
66 # --rw=randrw --unified_rw_reporting=1
67 # echo confirm that slat, clat, and lat percentiles appear for 'mixed' IOs
68 #./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
69 # --ioengine=null --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1 \
70 # --rw=randrw --fsync=32
71 # echo confirm that fsync latencies appear
83 from pathlib import Path
87 """fio latency percentile test."""
89 def __init__(self, artifact_root, test_options, debug):
91 artifact_root root directory for artifacts (subdirectory will be created under here)
92 test test specification
94 self.artifact_root = artifact_root
95 self.test_options = test_options
99 self.terse_data = None
101 self.test_dir = os.path.join(self.artifact_root,
102 "{:03d}".format(self.test_options['test_id']))
103 if not os.path.exists(self.test_dir):
104 os.mkdir(self.test_dir)
106 self.filename = "latency{:03d}".format(self.test_options['test_id'])
108 def run_fio(self, fio_path):
119 "--group_reporting=1",
120 "--write_lat_log={0}".format(self.filename),
121 "--output={0}.out".format(self.filename),
122 "--ioengine={ioengine}".format(**self.test_options),
123 "--rw={rw}".format(**self.test_options),
124 "--runtime={runtime}".format(**self.test_options),
125 "--output-format={output-format}".format(**self.test_options),
127 for opt in ['slat_percentiles', 'clat_percentiles', 'lat_percentiles',
128 'unified_rw_reporting', 'fsync', 'fdatasync', 'numjobs', 'cmdprio_percentage']:
129 if opt in self.test_options:
130 option = '--{0}={{{0}}}'.format(opt)
131 fio_args.append(option.format(**self.test_options))
133 command = [fio_path] + fio_args
134 with open(os.path.join(self.test_dir, "{0}.command".format(self.filename)), "w+") as \
136 command_file.write("%s\n" % command)
139 stdout_file = open(os.path.join(self.test_dir, "{0}.stdout".format(self.filename)), "w+")
140 stderr_file = open(os.path.join(self.test_dir, "{0}.stderr".format(self.filename)), "w+")
141 exitcode_file = open(os.path.join(self.test_dir,
142 "{0}.exitcode".format(self.filename)), "w+")
145 # Avoid using subprocess.run() here because when a timeout occurs,
146 # fio will be stopped with SIGKILL. This does not give fio a
147 # chance to clean up and means that child processes may continue
148 # running and submitting IO.
149 proc = subprocess.Popen(command,
153 universal_newlines=True)
154 proc.communicate(timeout=300)
155 exitcode_file.write('{0}\n'.format(proc.returncode))
156 passed &= (proc.returncode == 0)
157 except subprocess.TimeoutExpired:
161 print("Timeout expired")
168 print("Exception: %s" % sys.exc_info())
173 exitcode_file.close()
176 if 'json' in self.test_options['output-format']:
177 if not self.get_json():
178 print('Unable to decode JSON data')
180 if 'terse' in self.test_options['output-format']:
181 if not self.get_terse():
182 print('Unable to decode terse data')
188 """Convert fio JSON output into a python JSON object"""
190 filename = os.path.join(self.test_dir, "{0}.out".format(self.filename))
191 with open(filename, 'r') as file:
192 file_data = file.read()
195 # Sometimes fio informational messages are included at the top of the
196 # JSON output, especially under Windows. Try to decode output as JSON
197 # data, lopping off up to the first four lines
199 lines = file_data.splitlines()
201 file_data = '\n'.join(lines[i:])
203 self.json_data = json.loads(file_data)
204 except json.JSONDecodeError:
212 """Read fio output and return terse format data."""
214 filename = os.path.join(self.test_dir, "{0}.out".format(self.filename))
215 with open(filename, 'r') as file:
216 file_data = file.read()
219 # Read the first few lines and see if any of them begin with '3;'
220 # If so, the line is probably terse output. Obviously, this only
221 # works for fio terse version 3 and it does not work for
222 # multi-line terse output
224 lines = file_data.splitlines()
227 if file_data.startswith('3;'):
228 self.terse_data = file_data.split(';')
233 def check_latencies(self, jsondata, ddir, slat=True, clat=True, tlat=True, plus=False,
235 """Check fio latency data.
237 ddir data direction to check (0=read, 1=write, 2=trim)
238 slat True if submission latency data available to check
239 clat True if completion latency data available to check
240 tlat True of total latency data available to check
241 plus True if we actually have json+ format data where additional checks can
243 unified True if fio is reporting unified r/w data
254 for lat in ['slat', 'clat', 'lat']:
257 if 'percentile' in jsondata[lat+'_ns']:
259 print('unexpected %s percentiles found' % lat)
261 print("%s percentiles skipped" % lat)
264 if 'percentile' not in jsondata[lat+'_ns']:
266 print('%s percentiles not found in fio output' % lat)
269 # Check only for the presence/absence of json+
270 # latency bins. Future work can check the
271 # accurracy of the bin values and counts.
273 # Because the latency percentiles are based on
274 # the bins, we can be confident that the bin
275 # values and counts are correct if fio's
276 # latency percentiles match what we compute
280 if 'bins' not in jsondata[lat+'_ns']:
281 print('bins not found with json+ output format')
284 if not self.check_jsonplus(jsondata[lat+'_ns']):
287 if 'bins' in jsondata[lat+'_ns']:
288 print('json+ bins found with json output format')
293 lat_file = os.path.join(self.test_dir, "%s_%s.%s.log" % (self.filename, lat, i+1))
294 if not os.path.exists(lat_file):
296 with open(lat_file, 'r', newline='') as file:
297 reader = csv.reader(file)
299 if unified or int(line[2]) == ddir:
300 latencies.append(int(line[1]))
302 if int(jsondata['total_ios']) != len(latencies):
304 print('%s: total_ios = %s, latencies logged = %d' % \
305 (lat, jsondata['total_ios'], len(latencies)))
307 print("total_ios %s match latencies logged" % jsondata['total_ios'])
310 ptiles = jsondata[lat+'_ns']['percentile']
312 for percentile in ptiles.keys():
314 # numpy.percentile(latencies, float(percentile),
315 # interpolation='higher')
316 # produces values that mostly match what fio reports
317 # however, in the tails of the distribution, the values produced
318 # by fio's and numpy.percentile's algorithms are occasionally off
319 # by one latency measurement. So instead of relying on the canned
320 # numpy.percentile routine, implement here fio's algorithm
322 rank = math.ceil(float(percentile)/100 * len(latencies))
327 value = latencies[int(index)]
328 fio_val = int(ptiles[percentile])
329 # The theory in stat.h says that the proportional error will be
331 if not self.similar(fio_val, value):
332 delta = abs(fio_val - value) / value
333 print("Error with %s %sth percentile: "
334 "fio: %d, expected: %d, proportional delta: %f" %
335 (lat, percentile, fio_val, value, delta))
336 print("Rank: %d, index: %d" % (rank, index))
339 print('%s %sth percentile values match: %d, %d' %
340 (lat, percentile, fio_val, value))
343 print("%s percentiles match" % lat)
350 def check_empty(job):
352 Make sure JSON data is empty.
354 Some data structures should be empty. This function makes sure that they are.
356 job JSON object that we need to check for emptiness
359 return job['total_ios'] == 0 and \
360 job['slat_ns']['N'] == 0 and \
361 job['clat_ns']['N'] == 0 and \
362 job['lat_ns']['N'] == 0
364 def check_nocmdprio_lat(self, job):
366 Make sure no high/low priority latencies appear.
368 job JSON object to check
371 for ddir in ['read', 'write', 'trim']:
373 if 'lat_high_prio' in job[ddir] or 'lat_low_prio' in job[ddir] or \
374 'clat_high_prio' in job[ddir] or 'clat_low_prio' in job[ddir]:
375 print("Unexpected high/low priority latencies found in %s output" % ddir)
379 print("No high/low priority latencies found")
384 def similar(approximation, actual):
386 Check whether the approximate values recorded by fio are within the theoretical bound.
388 Since it is impractical to store exact latency measurements for each and every IO, fio
389 groups similar latency measurements into variable-sized bins. The theory in stat.h says
390 that the proportional error will be less than 1/128. This function checks whether this
393 TODO This test will fail when comparing a value from the largest latency bin against its
394 actual measurement. Find some way to detect this and avoid failing.
396 approximation value of the bin used by fio to store a given latency
397 actual actual latency value
400 # Avoid a division by zero. The smallest latency values have no error.
402 return approximation == 0
404 delta = abs(approximation - actual) / actual
405 return delta <= 1/128
407 def check_jsonplus(self, jsondata):
408 """Check consistency of json+ data
410 When we have json+ data we can check the min value, max value, and
411 sample size reported by fio
413 jsondata json+ data that we need to check
418 keys = [int(k) for k in jsondata['bins'].keys()]
419 values = [int(jsondata['bins'][k]) for k in jsondata['bins'].keys()]
422 sampsize = sum(values)
424 if not self.similar(jsondata['min'], smallest):
426 print('reported min %d does not match json+ min %d' % (jsondata['min'], smallest))
428 print('json+ min values match: %d' % jsondata['min'])
430 if not self.similar(jsondata['max'], biggest):
432 print('reported max %d does not match json+ max %d' % (jsondata['max'], biggest))
434 print('json+ max values match: %d' % jsondata['max'])
436 if sampsize != jsondata['N']:
438 print('reported sample size %d does not match json+ total count %d' % \
439 (jsondata['N'], sampsize))
441 print('json+ sample sizes match: %d' % sampsize)
445 def check_sync_lat(self, jsondata, plus=False):
446 """Check fsync latency percentile data.
448 All we can check is that some percentiles are reported, unless we have json+ data.
449 If we actually have json+ data then we can do more checking.
451 jsondata JSON data for fsync operations
452 plus True if we actually have json+ data
456 if 'percentile' not in jsondata['lat_ns']:
457 print("Sync percentile data not found")
460 if int(jsondata['total_ios']) != int(jsondata['lat_ns']['N']):
462 print('Mismatch between total_ios and lat_ns sample size')
464 print('sync sample sizes match: %d' % jsondata['total_ios'])
467 if 'bins' in jsondata['lat_ns']:
468 print('Unexpected json+ bin data found')
471 if not self.check_jsonplus(jsondata['lat_ns']):
476 def check_terse(self, terse, jsondata):
477 """Compare terse latencies with JSON latencies.
479 terse terse format data for checking
480 jsondata JSON format data for checking
486 split = lat.split('%')
488 terse_val = int(split[1][1:])
489 json_val = math.floor(jsondata[pct]/1000)
490 if terse_val != json_val:
492 print('Mismatch with %sth percentile: json value=%d,%d terse value=%d' % \
493 (pct, jsondata[pct], json_val, terse_val))
495 print('Terse %sth percentile matches JSON value: %d' % (pct, terse_val))
499 def check_prio_latencies(self, jsondata, clat=True, plus=False):
500 """Check consistency of high/low priority latencies.
502 clat True if we should check clat data; other check lat data
503 plus True if we have json+ format data where additional checks can
505 unified True if fio is reporting unified r/w data
509 high = 'clat_high_prio'
510 low = 'clat_low_prio'
513 high = 'lat_high_prio'
517 if not high in jsondata or not low in jsondata or not combined in jsondata:
518 print("Error identifying high/low priority latencies")
521 if jsondata[high]['N'] + jsondata[low]['N'] != jsondata[combined]['N']:
522 print("High %d + low %d != combined sample size %d" % \
523 (jsondata[high]['N'], jsondata[low]['N'], jsondata[combined]['N']))
526 print("High %d + low %d == combined sample size %d" % \
527 (jsondata[high]['N'], jsondata[low]['N'], jsondata[combined]['N']))
529 if min(jsondata[high]['min'], jsondata[low]['min']) != jsondata[combined]['min']:
530 print("Min of high %d, low %d min latencies does not match min %d from combined data" % \
531 (jsondata[high]['min'], jsondata[low]['min'], jsondata[combined]['min']))
534 print("Min of high %d, low %d min latencies matches min %d from combined data" % \
535 (jsondata[high]['min'], jsondata[low]['min'], jsondata[combined]['min']))
537 if max(jsondata[high]['max'], jsondata[low]['max']) != jsondata[combined]['max']:
538 print("Max of high %d, low %d max latencies does not match max %d from combined data" % \
539 (jsondata[high]['max'], jsondata[low]['max'], jsondata[combined]['max']))
542 print("Max of high %d, low %d max latencies matches max %d from combined data" % \
543 (jsondata[high]['max'], jsondata[low]['max'], jsondata[combined]['max']))
545 weighted_avg = (jsondata[high]['mean'] * jsondata[high]['N'] + \
546 jsondata[low]['mean'] * jsondata[low]['N']) / jsondata[combined]['N']
547 delta = abs(weighted_avg - jsondata[combined]['mean'])
548 if (delta / jsondata[combined]['mean']) > 0.0001:
549 print("Difference between weighted average %f of high, low means "
550 "and actual mean %f exceeds 0.01%%" % (weighted_avg, jsondata[combined]['mean']))
553 print("Weighted average %f of high, low means matches actual mean %f" % \
554 (weighted_avg, jsondata[combined]['mean']))
557 if not self.check_jsonplus(jsondata[high]):
559 if not self.check_jsonplus(jsondata[low]):
562 bins = {**jsondata[high]['bins'], **jsondata[low]['bins']}
563 for duration in bins.keys():
564 if duration in jsondata[high]['bins'] and duration in jsondata[low]['bins']:
565 bins[duration] = jsondata[high]['bins'][duration] + \
566 jsondata[low]['bins'][duration]
568 if len(bins) != len(jsondata[combined]['bins']):
569 print("Number of combined high/low bins does not match number of overall bins")
572 print("Number of bins from merged high/low data matches number of overall bins")
574 for duration in bins.keys():
575 if bins[duration] != jsondata[combined]['bins'][duration]:
576 print("Merged high/low count does not match overall count for duration %d" \
580 print("Merged high/low priority latency data match combined latency data")
584 """Check test output."""
586 raise NotImplementedError()
589 class Test001(FioLatTest):
590 """Test object for Test 1."""
593 """Check Test 1 output."""
595 job = self.json_data['jobs'][0]
598 if not self.check_empty(job['write']):
599 print("Unexpected write data found in output")
601 if not self.check_empty(job['trim']):
602 print("Unexpected trim data found in output")
604 if not self.check_nocmdprio_lat(job):
605 print("Unexpected high/low priority latencies found")
608 retval &= self.check_latencies(job['read'], 0, slat=False)
613 class Test002(FioLatTest):
614 """Test object for Test 2."""
617 """Check Test 2 output."""
619 job = self.json_data['jobs'][0]
622 if not self.check_empty(job['read']):
623 print("Unexpected read data found in output")
625 if not self.check_empty(job['trim']):
626 print("Unexpected trim data found in output")
628 if not self.check_nocmdprio_lat(job):
629 print("Unexpected high/low priority latencies found")
632 retval &= self.check_latencies(job['write'], 1, slat=False, clat=False)
637 class Test003(FioLatTest):
638 """Test object for Test 3."""
641 """Check Test 3 output."""
643 job = self.json_data['jobs'][0]
646 if not self.check_empty(job['read']):
647 print("Unexpected read data found in output")
649 if not self.check_empty(job['write']):
650 print("Unexpected write data found in output")
652 if not self.check_nocmdprio_lat(job):
653 print("Unexpected high/low priority latencies found")
656 retval &= self.check_latencies(job['trim'], 2, slat=False, tlat=False)
661 class Test004(FioLatTest):
662 """Test object for Tests 4, 13."""
665 """Check Test 4, 13 output."""
667 job = self.json_data['jobs'][0]
670 if not self.check_empty(job['write']):
671 print("Unexpected write data found in output")
673 if not self.check_empty(job['trim']):
674 print("Unexpected trim data found in output")
676 if not self.check_nocmdprio_lat(job):
677 print("Unexpected high/low priority latencies found")
680 retval &= self.check_latencies(job['read'], 0, plus=True)
685 class Test005(FioLatTest):
686 """Test object for Test 5."""
689 """Check Test 5 output."""
691 job = self.json_data['jobs'][0]
694 if not self.check_empty(job['read']):
695 print("Unexpected read data found in output")
697 if not self.check_empty(job['trim']):
698 print("Unexpected trim data found in output")
700 if not self.check_nocmdprio_lat(job):
701 print("Unexpected high/low priority latencies found")
704 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
709 class Test006(FioLatTest):
710 """Test object for Test 6."""
713 """Check Test 6 output."""
715 job = self.json_data['jobs'][0]
718 if not self.check_empty(job['write']):
719 print("Unexpected write data found in output")
721 if not self.check_empty(job['trim']):
722 print("Unexpected trim data found in output")
724 if not self.check_nocmdprio_lat(job):
725 print("Unexpected high/low priority latencies found")
728 retval &= self.check_latencies(job['read'], 0, slat=False, tlat=False, plus=True)
733 class Test007(FioLatTest):
734 """Test object for Test 7."""
737 """Check Test 7 output."""
739 job = self.json_data['jobs'][0]
742 if not self.check_empty(job['trim']):
743 print("Unexpected trim data found in output")
745 if not self.check_nocmdprio_lat(job):
746 print("Unexpected high/low priority latencies found")
749 retval &= self.check_latencies(job['read'], 0, clat=False, tlat=False, plus=True)
750 retval &= self.check_latencies(job['write'], 1, clat=False, tlat=False, plus=True)
755 class Test008(FioLatTest):
756 """Test object for Tests 8, 14."""
759 """Check Test 8, 14 output."""
761 job = self.json_data['jobs'][0]
764 if 'read' in job or 'write'in job or 'trim' in job:
765 print("Unexpected data direction found in fio output")
767 if not self.check_nocmdprio_lat(job):
768 print("Unexpected high/low priority latencies found")
771 retval &= self.check_latencies(job['mixed'], 0, plus=True, unified=True)
776 class Test009(FioLatTest):
777 """Test object for Test 9."""
780 """Check Test 9 output."""
782 job = self.json_data['jobs'][0]
785 if not self.check_empty(job['read']):
786 print("Unexpected read data found in output")
788 if not self.check_empty(job['trim']):
789 print("Unexpected trim data found in output")
791 if not self.check_sync_lat(job['sync'], plus=True):
792 print("Error checking fsync latency data")
794 if not self.check_nocmdprio_lat(job):
795 print("Unexpected high/low priority latencies found")
798 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
803 class Test010(FioLatTest):
804 """Test object for Test 10."""
807 """Check Test 10 output."""
809 job = self.json_data['jobs'][0]
812 if not self.check_empty(job['trim']):
813 print("Unexpected trim data found in output")
815 if not self.check_nocmdprio_lat(job):
816 print("Unexpected high/low priority latencies found")
819 retval &= self.check_latencies(job['read'], 0, plus=True)
820 retval &= self.check_latencies(job['write'], 1, plus=True)
821 retval &= self.check_terse(self.terse_data[17:34], job['read']['lat_ns']['percentile'])
822 retval &= self.check_terse(self.terse_data[58:75], job['write']['lat_ns']['percentile'])
823 # Terse data checking only works for default percentiles.
824 # This needs to be changed if something other than the default is ever used.
829 class Test011(FioLatTest):
830 """Test object for Test 11."""
833 """Check Test 11 output."""
835 job = self.json_data['jobs'][0]
838 if not self.check_empty(job['trim']):
839 print("Unexpected trim data found in output")
841 if not self.check_nocmdprio_lat(job):
842 print("Unexpected high/low priority latencies found")
845 retval &= self.check_latencies(job['read'], 0, slat=False, clat=False, plus=True)
846 retval &= self.check_latencies(job['write'], 1, slat=False, clat=False, plus=True)
847 retval &= self.check_terse(self.terse_data[17:34], job['read']['lat_ns']['percentile'])
848 retval &= self.check_terse(self.terse_data[58:75], job['write']['lat_ns']['percentile'])
849 # Terse data checking only works for default percentiles.
850 # This needs to be changed if something other than the default is ever used.
855 class Test015(FioLatTest):
856 """Test object for Test 15."""
859 """Check Test 15 output."""
861 job = self.json_data['jobs'][0]
864 if not self.check_empty(job['write']):
865 print("Unexpected write data found in output")
867 if not self.check_empty(job['trim']):
868 print("Unexpected trim data found in output")
871 retval &= self.check_latencies(job['read'], 0, plus=True)
872 retval &= self.check_prio_latencies(job['read'], clat=False, plus=True)
877 class Test016(FioLatTest):
878 """Test object for Test 16."""
881 """Check Test 16 output."""
883 job = self.json_data['jobs'][0]
886 if not self.check_empty(job['read']):
887 print("Unexpected read data found in output")
889 if not self.check_empty(job['trim']):
890 print("Unexpected trim data found in output")
893 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
894 retval &= self.check_prio_latencies(job['write'], clat=False, plus=True)
899 class Test017(FioLatTest):
900 """Test object for Test 17."""
903 """Check Test 17 output."""
905 job = self.json_data['jobs'][0]
908 if not self.check_empty(job['write']):
909 print("Unexpected write data found in output")
911 if not self.check_empty(job['trim']):
912 print("Unexpected trim data found in output")
915 retval &= self.check_latencies(job['read'], 0, slat=False, tlat=False, plus=True)
916 retval &= self.check_prio_latencies(job['read'], plus=True)
921 class Test018(FioLatTest):
922 """Test object for Test 18."""
925 """Check Test 18 output."""
927 job = self.json_data['jobs'][0]
930 if not self.check_empty(job['trim']):
931 print("Unexpected trim data found in output")
934 retval &= self.check_latencies(job['read'], 0, clat=False, tlat=False, plus=True)
935 retval &= self.check_latencies(job['write'], 1, clat=False, tlat=False, plus=True)
937 # We actually have json+ data but setting plus=False below avoids checking the
938 # json+ bins which did not exist for clat and lat because this job is run with
939 # clat_percentiles=0, lat_percentiles=0, However, we can still check the summary
941 retval &= self.check_prio_latencies(job['write'], plus=False)
942 retval &= self.check_prio_latencies(job['read'], plus=False)
947 class Test019(FioLatTest):
948 """Test object for Tests 19, 20."""
951 """Check Test 19, 20 output."""
953 job = self.json_data['jobs'][0]
956 if 'read' in job or 'write'in job or 'trim' in job:
957 print("Unexpected data direction found in fio output")
960 retval &= self.check_latencies(job['mixed'], 0, plus=True, unified=True)
961 retval &= self.check_prio_latencies(job['mixed'], clat=False, plus=True)
967 """Parse command-line arguments."""
969 parser = argparse.ArgumentParser()
970 parser.add_argument('-f', '--fio', help='path to file executable (e.g., ./fio)')
971 parser.add_argument('-a', '--artifact-root', help='artifact root directory')
972 parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')
973 parser.add_argument('-s', '--skip', nargs='+', type=int,
974 help='list of test(s) to skip')
975 parser.add_argument('-o', '--run-only', nargs='+', type=int,
976 help='list of test(s) to run, skipping all others')
977 args = parser.parse_args()
983 """Run tests of fio latency percentile reporting"""
987 artifact_root = args.artifact_root if args.artifact_root else \
988 "latency-test-{0}".format(time.strftime("%Y%m%d-%H%M%S"))
989 os.mkdir(artifact_root)
990 print("Artifact directory is %s" % artifact_root)
993 fio = str(Path(args.fio).absolute())
996 print("fio path is %s" % fio)
998 if platform.system() == 'Linux':
1000 elif platform.system() == 'Windows':
1008 # enable slat, clat, lat
1009 # only clat and lat will appear because
1010 # because the null ioengine is syncrhonous
1013 "output-format": "json",
1014 "slat_percentiles": 1,
1015 "clat_percentiles": 1,
1016 "lat_percentiles": 1,
1019 "test_obj": Test001,
1026 "output-format": "json",
1027 "slat_percentiles": 0,
1028 "clat_percentiles": 0,
1029 "lat_percentiles": 1,
1032 "test_obj": Test002,
1039 "output-format": "json",
1040 "slat_percentiles": 0,
1041 "clat_percentiles": 1,
1042 "lat_percentiles": 0,
1045 "test_obj": Test003,
1049 # enable slat, clat, lat
1050 # all will appear because liaio is asynchronous
1053 "output-format": "json+",
1054 "slat_percentiles": 1,
1055 "clat_percentiles": 1,
1056 "lat_percentiles": 1,
1059 "test_obj": Test004,
1063 # enable only clat, lat
1066 "output-format": "json+",
1067 "slat_percentiles": 0,
1068 "clat_percentiles": 1,
1069 "lat_percentiles": 1,
1072 "test_obj": Test005,
1076 # by default only clat should appear
1079 "output-format": "json+",
1082 "test_obj": Test006,
1089 "output-format": "json+",
1090 "slat_percentiles": 1,
1091 "clat_percentiles": 0,
1092 "lat_percentiles": 0,
1095 "test_obj": Test007,
1098 # 50/50 r/w, aio, unified_rw_reporting
1099 # enable slat, clat, lat
1102 "output-format": "json+",
1103 "slat_percentiles": 1,
1104 "clat_percentiles": 1,
1105 "lat_percentiles": 1,
1108 'unified_rw_reporting': 1,
1109 "test_obj": Test008,
1113 # enable slat, clat, lat
1117 "output-format": "json+",
1118 "slat_percentiles": 1,
1119 "clat_percentiles": 1,
1120 "lat_percentiles": 1,
1124 "test_obj": Test009,
1128 # enable slat, clat, lat
1131 "output-format": "terse,json+",
1132 "slat_percentiles": 1,
1133 "clat_percentiles": 1,
1134 "lat_percentiles": 1,
1137 "test_obj": Test010,
1144 "output-format": "terse,json+",
1145 "slat_percentiles": 0,
1146 "clat_percentiles": 0,
1147 "lat_percentiles": 1,
1150 "test_obj": Test011,
1154 # enable slat, clat, lat
1155 # only clat and lat will appear because
1156 # because the null ioengine is syncrhonous
1157 # same as Test 1 except
1158 # numjobs = 4 to test sum_thread_stats() changes
1161 "output-format": "json",
1162 "slat_percentiles": 1,
1163 "clat_percentiles": 1,
1164 "lat_percentiles": 1,
1168 "test_obj": Test001,
1172 # enable slat, clat, lat
1173 # all will appear because liaio is asynchronous
1174 # same as Test 4 except
1175 # numjobs = 4 to test sum_thread_stats() changes
1178 "output-format": "json+",
1179 "slat_percentiles": 1,
1180 "clat_percentiles": 1,
1181 "lat_percentiles": 1,
1185 "test_obj": Test004,
1188 # 50/50 r/w, aio, unified_rw_reporting
1189 # enable slat, clat, lata
1190 # same as Test 8 except
1191 # numjobs = 4 to test sum_thread_stats() changes
1194 "output-format": "json+",
1195 "slat_percentiles": 1,
1196 "clat_percentiles": 1,
1197 "lat_percentiles": 1,
1200 'unified_rw_reporting': 1,
1202 "test_obj": Test008,
1206 # enable slat, clat, lat
1207 # all will appear because liaio is asynchronous
1208 # same as Test 4 except add cmdprio_percentage
1211 "output-format": "json+",
1212 "slat_percentiles": 1,
1213 "clat_percentiles": 1,
1214 "lat_percentiles": 1,
1217 'cmdprio_percentage': 50,
1218 "test_obj": Test015,
1222 # enable only clat, lat
1223 # same as Test 5 except add cmdprio_percentage
1226 "output-format": "json+",
1227 "slat_percentiles": 0,
1228 "clat_percentiles": 1,
1229 "lat_percentiles": 1,
1232 'cmdprio_percentage': 50,
1233 "test_obj": Test016,
1237 # by default only clat should appear
1238 # same as Test 6 except add cmdprio_percentage
1241 "output-format": "json+",
1244 'cmdprio_percentage': 50,
1245 "test_obj": Test017,
1250 # same as Test 7 except add cmdprio_percentage
1253 "output-format": "json+",
1254 "slat_percentiles": 1,
1255 "clat_percentiles": 0,
1256 "lat_percentiles": 0,
1259 'cmdprio_percentage': 50,
1260 "test_obj": Test018,
1263 # 50/50 r/w, aio, unified_rw_reporting
1264 # enable slat, clat, lat
1265 # same as Test 8 except add cmdprio_percentage
1268 "output-format": "json+",
1269 "slat_percentiles": 1,
1270 "clat_percentiles": 1,
1271 "lat_percentiles": 1,
1274 'unified_rw_reporting': 1,
1275 'cmdprio_percentage': 50,
1276 "test_obj": Test019,
1279 # 50/50 r/w, aio, unified_rw_reporting
1280 # enable slat, clat, lat
1281 # same as Test 19 except
1282 # add numjobs = 4 to test sum_thread_stats() changes
1285 "output-format": "json+",
1286 "slat_percentiles": 1,
1287 "clat_percentiles": 1,
1288 "lat_percentiles": 1,
1291 'unified_rw_reporting': 1,
1292 'cmdprio_percentage': 50,
1294 "test_obj": Test019,
1302 for test in test_list:
1303 if (args.skip and test['test_id'] in args.skip) or \
1304 (args.run_only and test['test_id'] not in args.run_only):
1305 skipped = skipped + 1
1306 outcome = 'SKIPPED (User request)'
1307 elif (platform.system() != 'Linux' or os.geteuid() != 0) and 'cmdprio_percentage' in test:
1308 skipped = skipped + 1
1309 outcome = 'SKIPPED (Linux root required for cmdprio_percentage tests)'
1311 test_obj = test['test_obj'](artifact_root, test, args.debug)
1312 status = test_obj.run_fio(fio)
1314 status = test_obj.check()
1322 print("**********Test {0} {1}**********".format(test['test_id'], outcome))
1324 print("{0} tests passed, {1} failed, {2} skipped".format(passed, failed, skipped))
1329 if __name__ == '__main__':