2 # SPDX-License-Identifier: GPL-2.0-only
4 # Copyright (c) 2020 Western Digital Corporation or its affiliates.
7 # latency_percentiles.py
9 # Test the code that produces latency percentiles
10 # This is mostly to test the code changes to allow reporting
11 # of slat, clat, and lat percentiles
14 # python3 latency-tests.py [-f fio-path] [-a artifact-root] [--debug]
20 # unified rw reporting
21 # compare with latency log
22 # try various combinations of the ?lat_percentile options
26 # check presence of latency bins
27 # if the json percentiles match those from the raw data
28 # then the latency bin values and counts are probably ok
30 # produce both terse, JSON output and confirm that they match
31 # lat only; both lat and clat
33 # confirm that sync_lat data appears
34 # - MANUAL TESTING normal output:
36 # enable all, but only clat and lat appear
37 # enable subset of latency types
38 # read, write, trim, unified
40 # enable all latency types
41 # enable subset of latency types
42 # read, write, trim, unified
43 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
44 # --ioengine=null --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1
45 # echo confirm that clat and lat percentiles appear
46 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
47 # --ioengine=null --slat_percentiles=0 --clat_percentiles=0 --lat_percentiles=1
48 # echo confirm that only lat percentiles appear
49 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
50 # --ioengine=null --slat_percentiles=0 --clat_percentiles=1 --lat_percentiles=0
51 # echo confirm that only clat percentiles appear
52 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
53 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1
54 # echo confirm that slat, clat, lat percentiles appear
55 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
56 # --ioengine=libaio --slat_percentiles=0 --clat_percentiles=1 --lat_percentiles=1
57 # echo confirm that clat and lat percentiles appear
58 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
59 # --ioengine=libaio -rw=randrw
60 # echo confirm that clat percentiles appear for reads and writes
61 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
62 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=0 --lat_percentiles=0 --rw=randrw
63 # echo confirm that slat percentiles appear for both reads and writes
64 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
65 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1 \
66 # --rw=randrw --unified_rw_reporting=1
67 # echo confirm that slat, clat, and lat percentiles appear for 'mixed' IOs
68 #./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
69 # --ioengine=null --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1 \
70 # --rw=randrw --fsync=32
71 # echo confirm that fsync latencies appear
83 from pathlib import Path
87 """fio latency percentile test."""
89 def __init__(self, artifact_root, test_options, debug):
91 artifact_root root directory for artifacts (subdirectory will be created under here)
92 test test specification
94 self.artifact_root = artifact_root
95 self.test_options = test_options
99 self.terse_data = None
101 self.test_dir = os.path.join(self.artifact_root,
102 "{:03d}".format(self.test_options['test_id']))
103 if not os.path.exists(self.test_dir):
104 os.mkdir(self.test_dir)
106 self.filename = "latency{:03d}".format(self.test_options['test_id'])
108 def run_fio(self, fio_path):
118 "--group_reporting=1",
119 "--write_lat_log={0}".format(self.filename),
120 "--output={0}.out".format(self.filename),
121 "--ioengine={ioengine}".format(**self.test_options),
122 "--rw={rw}".format(**self.test_options),
123 "--runtime={runtime}".format(**self.test_options),
124 "--output-format={output-format}".format(**self.test_options),
126 for opt in ['slat_percentiles', 'clat_percentiles', 'lat_percentiles',
127 'unified_rw_reporting', 'fsync', 'fdatasync', 'numjobs', 'cmdprio_percentage']:
128 if opt in self.test_options:
129 option = '--{0}={{{0}}}'.format(opt)
130 fio_args.append(option.format(**self.test_options))
132 command = [fio_path] + fio_args
133 with open(os.path.join(self.test_dir, "{0}.command".format(self.filename)), "w+") as \
135 command_file.write("%s\n" % command)
138 stdout_file = open(os.path.join(self.test_dir, "{0}.stdout".format(self.filename)), "w+")
139 stderr_file = open(os.path.join(self.test_dir, "{0}.stderr".format(self.filename)), "w+")
140 exitcode_file = open(os.path.join(self.test_dir,
141 "{0}.exitcode".format(self.filename)), "w+")
144 # Avoid using subprocess.run() here because when a timeout occurs,
145 # fio will be stopped with SIGKILL. This does not give fio a
146 # chance to clean up and means that child processes may continue
147 # running and submitting IO.
148 proc = subprocess.Popen(command,
152 universal_newlines=True)
153 proc.communicate(timeout=300)
154 exitcode_file.write('{0}\n'.format(proc.returncode))
155 passed &= (proc.returncode == 0)
156 except subprocess.TimeoutExpired:
160 print("Timeout expired")
167 print("Exception: %s" % sys.exc_info())
172 exitcode_file.close()
175 if 'json' in self.test_options['output-format']:
176 if not self.get_json():
177 print('Unable to decode JSON data')
179 if 'terse' in self.test_options['output-format']:
180 if not self.get_terse():
181 print('Unable to decode terse data')
187 """Convert fio JSON output into a python JSON object"""
189 filename = os.path.join(self.test_dir, "{0}.out".format(self.filename))
190 with open(filename, 'r') as file:
191 file_data = file.read()
194 # Sometimes fio informational messages are included at the top of the
195 # JSON output, especially under Windows. Try to decode output as JSON
196 # data, lopping off up to the first four lines
198 lines = file_data.splitlines()
200 file_data = '\n'.join(lines[i:])
202 self.json_data = json.loads(file_data)
203 except json.JSONDecodeError:
211 """Read fio output and return terse format data."""
213 filename = os.path.join(self.test_dir, "{0}.out".format(self.filename))
214 with open(filename, 'r') as file:
215 file_data = file.read()
218 # Read the first few lines and see if any of them begin with '3;fio-'
219 # If so, the line is probably terse output. Obviously, this only
220 # works for fio terse version 3 and it does not work for
221 # multi-line terse output
223 lines = file_data.splitlines()
226 if file_data.startswith('3;fio-'):
227 self.terse_data = file_data.split(';')
232 def check_latencies(self, jsondata, ddir, slat=True, clat=True, tlat=True, plus=False,
234 """Check fio latency data.
236 ddir data direction to check (0=read, 1=write, 2=trim)
237 slat True if submission latency data available to check
238 clat True if completion latency data available to check
239 tlat True of total latency data available to check
240 plus True if we actually have json+ format data where additional checks can
242 unified True if fio is reporting unified r/w data
253 for lat in ['slat', 'clat', 'lat']:
256 if 'percentile' in jsondata[lat+'_ns']:
258 print('unexpected %s percentiles found' % lat)
260 print("%s percentiles skipped" % lat)
263 if 'percentile' not in jsondata[lat+'_ns']:
265 print('%s percentiles not found in fio output' % lat)
268 # Check only for the presence/absence of json+
269 # latency bins. Future work can check the
270 # accurracy of the bin values and counts.
272 # Because the latency percentiles are based on
273 # the bins, we can be confident that the bin
274 # values and counts are correct if fio's
275 # latency percentiles match what we compute
279 if 'bins' not in jsondata[lat+'_ns']:
280 print('bins not found with json+ output format')
283 if not self.check_jsonplus(jsondata[lat+'_ns']):
286 if 'bins' in jsondata[lat+'_ns']:
287 print('json+ bins found with json output format')
292 lat_file = os.path.join(self.test_dir, "%s_%s.%s.log" % (self.filename, lat, i+1))
293 if not os.path.exists(lat_file):
295 with open(lat_file, 'r', newline='') as file:
296 reader = csv.reader(file)
298 if unified or int(line[2]) == ddir:
299 latencies.append(int(line[1]))
301 if int(jsondata['total_ios']) != len(latencies):
303 print('%s: total_ios = %s, latencies logged = %d' % \
304 (lat, jsondata['total_ios'], len(latencies)))
306 print("total_ios %s match latencies logged" % jsondata['total_ios'])
309 ptiles = jsondata[lat+'_ns']['percentile']
311 for percentile in ptiles.keys():
313 # numpy.percentile(latencies, float(percentile),
314 # interpolation='higher')
315 # produces values that mostly match what fio reports
316 # however, in the tails of the distribution, the values produced
317 # by fio's and numpy.percentile's algorithms are occasionally off
318 # by one latency measurement. So instead of relying on the canned
319 # numpy.percentile routine, implement here fio's algorithm
321 rank = math.ceil(float(percentile)/100 * len(latencies))
326 value = latencies[int(index)]
327 fio_val = int(ptiles[percentile])
328 # The theory in stat.h says that the proportional error will be
330 if not self.similar(fio_val, value):
331 delta = abs(fio_val - value) / value
332 print("Error with %s %sth percentile: "
333 "fio: %d, expected: %d, proportional delta: %f" %
334 (lat, percentile, fio_val, value, delta))
335 print("Rank: %d, index: %d" % (rank, index))
338 print('%s %sth percentile values match: %d, %d' %
339 (lat, percentile, fio_val, value))
342 print("%s percentiles match" % lat)
349 def check_empty(job):
351 Make sure JSON data is empty.
353 Some data structures should be empty. This function makes sure that they are.
355 job JSON object that we need to check for emptiness
358 return job['total_ios'] == 0 and \
359 job['slat_ns']['N'] == 0 and \
360 job['clat_ns']['N'] == 0 and \
361 job['lat_ns']['N'] == 0
363 def check_nocmdprio_lat(self, job):
365 Make sure no high/low priority latencies appear.
367 job JSON object to check
370 for ddir in ['read', 'write', 'trim']:
372 if 'lat_high_prio' in job[ddir] or 'lat_low_prio' in job[ddir] or \
373 'clat_high_prio' in job[ddir] or 'clat_low_prio' in job[ddir]:
374 print("Unexpected high/low priority latencies found in %s output" % ddir)
378 print("No high/low priority latencies found")
383 def similar(approximation, actual):
385 Check whether the approximate values recorded by fio are within the theoretical bound.
387 Since it is impractical to store exact latency measurements for each and every IO, fio
388 groups similar latency measurements into variable-sized bins. The theory in stat.h says
389 that the proportional error will be less than 1/128. This function checks whether this
392 TODO This test will fail when comparing a value from the largest latency bin against its
393 actual measurement. Find some way to detect this and avoid failing.
395 approximation value of the bin used by fio to store a given latency
396 actual actual latency value
399 # Avoid a division by zero. The smallest latency values have no error.
401 return approximation == 0
403 delta = abs(approximation - actual) / actual
404 return delta <= 1/128
406 def check_jsonplus(self, jsondata):
407 """Check consistency of json+ data
409 When we have json+ data we can check the min value, max value, and
410 sample size reported by fio
412 jsondata json+ data that we need to check
417 keys = [int(k) for k in jsondata['bins'].keys()]
418 values = [int(jsondata['bins'][k]) for k in jsondata['bins'].keys()]
421 sampsize = sum(values)
423 if not self.similar(jsondata['min'], smallest):
425 print('reported min %d does not match json+ min %d' % (jsondata['min'], smallest))
427 print('json+ min values match: %d' % jsondata['min'])
429 if not self.similar(jsondata['max'], biggest):
431 print('reported max %d does not match json+ max %d' % (jsondata['max'], biggest))
433 print('json+ max values match: %d' % jsondata['max'])
435 if sampsize != jsondata['N']:
437 print('reported sample size %d does not match json+ total count %d' % \
438 (jsondata['N'], sampsize))
440 print('json+ sample sizes match: %d' % sampsize)
444 def check_sync_lat(self, jsondata, plus=False):
445 """Check fsync latency percentile data.
447 All we can check is that some percentiles are reported, unless we have json+ data.
448 If we actually have json+ data then we can do more checking.
450 jsondata JSON data for fsync operations
451 plus True if we actually have json+ data
455 if 'percentile' not in jsondata['lat_ns']:
456 print("Sync percentile data not found")
459 if int(jsondata['total_ios']) != int(jsondata['lat_ns']['N']):
461 print('Mismatch between total_ios and lat_ns sample size')
463 print('sync sample sizes match: %d' % jsondata['total_ios'])
466 if 'bins' in jsondata['lat_ns']:
467 print('Unexpected json+ bin data found')
470 if not self.check_jsonplus(jsondata['lat_ns']):
475 def check_terse(self, terse, jsondata):
476 """Compare terse latencies with JSON latencies.
478 terse terse format data for checking
479 jsondata JSON format data for checking
485 split = lat.split('%')
487 terse_val = int(split[1][1:])
488 json_val = math.floor(jsondata[pct]/1000)
489 if terse_val != json_val:
491 print('Mismatch with %sth percentile: json value=%d,%d terse value=%d' % \
492 (pct, jsondata[pct], json_val, terse_val))
494 print('Terse %sth percentile matches JSON value: %d' % (pct, terse_val))
498 def check_prio_latencies(self, jsondata, clat=True, plus=False):
499 """Check consistency of high/low priority latencies.
501 clat True if we should check clat data; other check lat data
502 plus True if we have json+ format data where additional checks can
504 unified True if fio is reporting unified r/w data
508 high = 'clat_high_prio'
509 low = 'clat_low_prio'
512 high = 'lat_high_prio'
516 if not high in jsondata or not low in jsondata or not combined in jsondata:
517 print("Error identifying high/low priority latencies")
520 if jsondata[high]['N'] + jsondata[low]['N'] != jsondata[combined]['N']:
521 print("High %d + low %d != combined sample size %d" % \
522 (jsondata[high]['N'], jsondata[low]['N'], jsondata[combined]['N']))
525 print("High %d + low %d == combined sample size %d" % \
526 (jsondata[high]['N'], jsondata[low]['N'], jsondata[combined]['N']))
528 if min(jsondata[high]['min'], jsondata[low]['min']) != jsondata[combined]['min']:
529 print("Min of high %d, low %d min latencies does not match min %d from combined data" % \
530 (jsondata[high]['min'], jsondata[low]['min'], jsondata[combined]['min']))
533 print("Min of high %d, low %d min latencies matches min %d from combined data" % \
534 (jsondata[high]['min'], jsondata[low]['min'], jsondata[combined]['min']))
536 if max(jsondata[high]['max'], jsondata[low]['max']) != jsondata[combined]['max']:
537 print("Max of high %d, low %d max latencies does not match max %d from combined data" % \
538 (jsondata[high]['max'], jsondata[low]['max'], jsondata[combined]['max']))
541 print("Max of high %d, low %d max latencies matches max %d from combined data" % \
542 (jsondata[high]['max'], jsondata[low]['max'], jsondata[combined]['max']))
544 weighted_avg = (jsondata[high]['mean'] * jsondata[high]['N'] + \
545 jsondata[low]['mean'] * jsondata[low]['N']) / jsondata[combined]['N']
546 delta = abs(weighted_avg - jsondata[combined]['mean'])
547 if (delta / jsondata[combined]['mean']) > 0.0001:
548 print("Difference between weighted average %f of high, low means "
549 "and actual mean %f exceeds 0.01%%" % (weighted_avg, jsondata[combined]['mean']))
552 print("Weighted average %f of high, low means matches actual mean %f" % \
553 (weighted_avg, jsondata[combined]['mean']))
556 if not self.check_jsonplus(jsondata[high]):
558 if not self.check_jsonplus(jsondata[low]):
561 bins = {**jsondata[high]['bins'], **jsondata[low]['bins']}
562 for duration in bins.keys():
563 if duration in jsondata[high]['bins'] and duration in jsondata[low]['bins']:
564 bins[duration] = jsondata[high]['bins'][duration] + \
565 jsondata[low]['bins'][duration]
567 if len(bins) != len(jsondata[combined]['bins']):
568 print("Number of combined high/low bins does not match number of overall bins")
571 print("Number of bins from merged high/low data matches number of overall bins")
573 for duration in bins.keys():
574 if bins[duration] != jsondata[combined]['bins'][duration]:
575 print("Merged high/low count does not match overall count for duration %d" \
579 print("Merged high/low priority latency data match combined latency data")
583 """Check test output."""
585 raise NotImplementedError()
588 class Test001(FioLatTest):
589 """Test object for Test 1."""
592 """Check Test 1 output."""
594 job = self.json_data['jobs'][0]
597 if not self.check_empty(job['write']):
598 print("Unexpected write data found in output")
600 if not self.check_empty(job['trim']):
601 print("Unexpected trim data found in output")
603 if not self.check_nocmdprio_lat(job):
604 print("Unexpected high/low priority latencies found")
607 retval &= self.check_latencies(job['read'], 0, slat=False)
612 class Test002(FioLatTest):
613 """Test object for Test 2."""
616 """Check Test 2 output."""
618 job = self.json_data['jobs'][0]
621 if not self.check_empty(job['read']):
622 print("Unexpected read data found in output")
624 if not self.check_empty(job['trim']):
625 print("Unexpected trim data found in output")
627 if not self.check_nocmdprio_lat(job):
628 print("Unexpected high/low priority latencies found")
631 retval &= self.check_latencies(job['write'], 1, slat=False, clat=False)
636 class Test003(FioLatTest):
637 """Test object for Test 3."""
640 """Check Test 3 output."""
642 job = self.json_data['jobs'][0]
645 if not self.check_empty(job['read']):
646 print("Unexpected read data found in output")
648 if not self.check_empty(job['write']):
649 print("Unexpected write data found in output")
651 if not self.check_nocmdprio_lat(job):
652 print("Unexpected high/low priority latencies found")
655 retval &= self.check_latencies(job['trim'], 2, slat=False, tlat=False)
660 class Test004(FioLatTest):
661 """Test object for Tests 4, 13."""
664 """Check Test 4, 13 output."""
666 job = self.json_data['jobs'][0]
669 if not self.check_empty(job['write']):
670 print("Unexpected write data found in output")
672 if not self.check_empty(job['trim']):
673 print("Unexpected trim data found in output")
675 if not self.check_nocmdprio_lat(job):
676 print("Unexpected high/low priority latencies found")
679 retval &= self.check_latencies(job['read'], 0, plus=True)
684 class Test005(FioLatTest):
685 """Test object for Test 5."""
688 """Check Test 5 output."""
690 job = self.json_data['jobs'][0]
693 if not self.check_empty(job['read']):
694 print("Unexpected read data found in output")
696 if not self.check_empty(job['trim']):
697 print("Unexpected trim data found in output")
699 if not self.check_nocmdprio_lat(job):
700 print("Unexpected high/low priority latencies found")
703 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
708 class Test006(FioLatTest):
709 """Test object for Test 6."""
712 """Check Test 6 output."""
714 job = self.json_data['jobs'][0]
717 if not self.check_empty(job['write']):
718 print("Unexpected write data found in output")
720 if not self.check_empty(job['trim']):
721 print("Unexpected trim data found in output")
723 if not self.check_nocmdprio_lat(job):
724 print("Unexpected high/low priority latencies found")
727 retval &= self.check_latencies(job['read'], 0, slat=False, tlat=False, plus=True)
732 class Test007(FioLatTest):
733 """Test object for Test 7."""
736 """Check Test 7 output."""
738 job = self.json_data['jobs'][0]
741 if not self.check_empty(job['trim']):
742 print("Unexpected trim data found in output")
744 if not self.check_nocmdprio_lat(job):
745 print("Unexpected high/low priority latencies found")
748 retval &= self.check_latencies(job['read'], 0, clat=False, tlat=False, plus=True)
749 retval &= self.check_latencies(job['write'], 1, clat=False, tlat=False, plus=True)
754 class Test008(FioLatTest):
755 """Test object for Tests 8, 14."""
758 """Check Test 8, 14 output."""
760 job = self.json_data['jobs'][0]
763 if 'read' in job or 'write'in job or 'trim' in job:
764 print("Unexpected data direction found in fio output")
766 if not self.check_nocmdprio_lat(job):
767 print("Unexpected high/low priority latencies found")
770 retval &= self.check_latencies(job['mixed'], 0, plus=True, unified=True)
775 class Test009(FioLatTest):
776 """Test object for Test 9."""
779 """Check Test 9 output."""
781 job = self.json_data['jobs'][0]
784 if not self.check_empty(job['read']):
785 print("Unexpected read data found in output")
787 if not self.check_empty(job['trim']):
788 print("Unexpected trim data found in output")
790 if not self.check_sync_lat(job['sync'], plus=True):
791 print("Error checking fsync latency data")
793 if not self.check_nocmdprio_lat(job):
794 print("Unexpected high/low priority latencies found")
797 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
802 class Test010(FioLatTest):
803 """Test object for Test 10."""
806 """Check Test 10 output."""
808 job = self.json_data['jobs'][0]
811 if not self.check_empty(job['trim']):
812 print("Unexpected trim data found in output")
814 if not self.check_nocmdprio_lat(job):
815 print("Unexpected high/low priority latencies found")
818 retval &= self.check_latencies(job['read'], 0, plus=True)
819 retval &= self.check_latencies(job['write'], 1, plus=True)
820 retval &= self.check_terse(self.terse_data[17:34], job['read']['lat_ns']['percentile'])
821 retval &= self.check_terse(self.terse_data[58:75], job['write']['lat_ns']['percentile'])
822 # Terse data checking only works for default percentiles.
823 # This needs to be changed if something other than the default is ever used.
828 class Test011(FioLatTest):
829 """Test object for Test 11."""
832 """Check Test 11 output."""
834 job = self.json_data['jobs'][0]
837 if not self.check_empty(job['trim']):
838 print("Unexpected trim data found in output")
840 if not self.check_nocmdprio_lat(job):
841 print("Unexpected high/low priority latencies found")
844 retval &= self.check_latencies(job['read'], 0, slat=False, clat=False, plus=True)
845 retval &= self.check_latencies(job['write'], 1, slat=False, clat=False, plus=True)
846 retval &= self.check_terse(self.terse_data[17:34], job['read']['lat_ns']['percentile'])
847 retval &= self.check_terse(self.terse_data[58:75], job['write']['lat_ns']['percentile'])
848 # Terse data checking only works for default percentiles.
849 # This needs to be changed if something other than the default is ever used.
854 class Test015(FioLatTest):
855 """Test object for Test 15."""
858 """Check Test 15 output."""
860 job = self.json_data['jobs'][0]
863 if not self.check_empty(job['write']):
864 print("Unexpected write data found in output")
866 if not self.check_empty(job['trim']):
867 print("Unexpected trim data found in output")
870 retval &= self.check_latencies(job['read'], 0, plus=True)
871 retval &= self.check_prio_latencies(job['read'], clat=False, plus=True)
876 class Test016(FioLatTest):
877 """Test object for Test 16."""
880 """Check Test 16 output."""
882 job = self.json_data['jobs'][0]
885 if not self.check_empty(job['read']):
886 print("Unexpected read data found in output")
888 if not self.check_empty(job['trim']):
889 print("Unexpected trim data found in output")
892 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
893 retval &= self.check_prio_latencies(job['write'], clat=False, plus=True)
898 class Test017(FioLatTest):
899 """Test object for Test 17."""
902 """Check Test 17 output."""
904 job = self.json_data['jobs'][0]
907 if not self.check_empty(job['write']):
908 print("Unexpected write data found in output")
910 if not self.check_empty(job['trim']):
911 print("Unexpected trim data found in output")
914 retval &= self.check_latencies(job['read'], 0, slat=False, tlat=False, plus=True)
915 retval &= self.check_prio_latencies(job['read'], plus=True)
920 class Test018(FioLatTest):
921 """Test object for Test 18."""
924 """Check Test 18 output."""
926 job = self.json_data['jobs'][0]
929 if not self.check_empty(job['trim']):
930 print("Unexpected trim data found in output")
933 retval &= self.check_latencies(job['read'], 0, clat=False, tlat=False, plus=True)
934 retval &= self.check_latencies(job['write'], 1, clat=False, tlat=False, plus=True)
936 # We actually have json+ data but setting plus=False below avoids checking the
937 # json+ bins which did not exist for clat and lat because this job is run with
938 # clat_percentiles=0, lat_percentiles=0, However, we can still check the summary
940 retval &= self.check_prio_latencies(job['write'], plus=False)
941 retval &= self.check_prio_latencies(job['read'], plus=False)
946 class Test019(FioLatTest):
947 """Test object for Tests 19, 20."""
950 """Check Test 19, 20 output."""
952 job = self.json_data['jobs'][0]
955 if 'read' in job or 'write'in job or 'trim' in job:
956 print("Unexpected data direction found in fio output")
959 retval &= self.check_latencies(job['mixed'], 0, plus=True, unified=True)
960 retval &= self.check_prio_latencies(job['mixed'], clat=False, plus=True)
966 """Parse command-line arguments."""
968 parser = argparse.ArgumentParser()
969 parser.add_argument('-f', '--fio', help='path to file executable (e.g., ./fio)')
970 parser.add_argument('-a', '--artifact-root', help='artifact root directory')
971 parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')
972 parser.add_argument('-s', '--skip', nargs='+', type=int,
973 help='list of test(s) to skip')
974 parser.add_argument('-o', '--run-only', nargs='+', type=int,
975 help='list of test(s) to run, skipping all others')
976 args = parser.parse_args()
982 """Run tests of fio latency percentile reporting"""
986 artifact_root = args.artifact_root if args.artifact_root else \
987 "latency-test-{0}".format(time.strftime("%Y%m%d-%H%M%S"))
988 os.mkdir(artifact_root)
989 print("Artifact directory is %s" % artifact_root)
992 fio = str(Path(args.fio).absolute())
995 print("fio path is %s" % fio)
997 if platform.system() == 'Linux':
999 elif platform.system() == 'Windows':
1007 # enable slat, clat, lat
1008 # only clat and lat will appear because
1009 # because the null ioengine is syncrhonous
1012 "output-format": "json",
1013 "slat_percentiles": 1,
1014 "clat_percentiles": 1,
1015 "lat_percentiles": 1,
1018 "test_obj": Test001,
1025 "output-format": "json",
1026 "slat_percentiles": 0,
1027 "clat_percentiles": 0,
1028 "lat_percentiles": 1,
1031 "test_obj": Test002,
1038 "output-format": "json",
1039 "slat_percentiles": 0,
1040 "clat_percentiles": 1,
1041 "lat_percentiles": 0,
1044 "test_obj": Test003,
1048 # enable slat, clat, lat
1049 # all will appear because liaio is asynchronous
1052 "output-format": "json+",
1053 "slat_percentiles": 1,
1054 "clat_percentiles": 1,
1055 "lat_percentiles": 1,
1058 "test_obj": Test004,
1062 # enable only clat, lat
1065 "output-format": "json+",
1066 "slat_percentiles": 0,
1067 "clat_percentiles": 1,
1068 "lat_percentiles": 1,
1071 "test_obj": Test005,
1075 # by default only clat should appear
1078 "output-format": "json+",
1081 "test_obj": Test006,
1088 "output-format": "json+",
1089 "slat_percentiles": 1,
1090 "clat_percentiles": 0,
1091 "lat_percentiles": 0,
1094 "test_obj": Test007,
1097 # 50/50 r/w, aio, unified_rw_reporting
1098 # enable slat, clat, lat
1101 "output-format": "json+",
1102 "slat_percentiles": 1,
1103 "clat_percentiles": 1,
1104 "lat_percentiles": 1,
1107 'unified_rw_reporting': 1,
1108 "test_obj": Test008,
1112 # enable slat, clat, lat
1116 "output-format": "json+",
1117 "slat_percentiles": 1,
1118 "clat_percentiles": 1,
1119 "lat_percentiles": 1,
1123 "test_obj": Test009,
1127 # enable slat, clat, lat
1130 "output-format": "terse,json+",
1131 "slat_percentiles": 1,
1132 "clat_percentiles": 1,
1133 "lat_percentiles": 1,
1136 "test_obj": Test010,
1143 "output-format": "terse,json+",
1144 "slat_percentiles": 0,
1145 "clat_percentiles": 0,
1146 "lat_percentiles": 1,
1149 "test_obj": Test011,
1153 # enable slat, clat, lat
1154 # only clat and lat will appear because
1155 # because the null ioengine is syncrhonous
1156 # same as Test 1 except
1157 # numjobs = 4 to test sum_thread_stats() changes
1160 "output-format": "json",
1161 "slat_percentiles": 1,
1162 "clat_percentiles": 1,
1163 "lat_percentiles": 1,
1167 "test_obj": Test001,
1171 # enable slat, clat, lat
1172 # all will appear because liaio is asynchronous
1173 # same as Test 4 except
1174 # numjobs = 4 to test sum_thread_stats() changes
1177 "output-format": "json+",
1178 "slat_percentiles": 1,
1179 "clat_percentiles": 1,
1180 "lat_percentiles": 1,
1184 "test_obj": Test004,
1187 # 50/50 r/w, aio, unified_rw_reporting
1188 # enable slat, clat, lata
1189 # same as Test 8 except
1190 # numjobs = 4 to test sum_thread_stats() changes
1193 "output-format": "json+",
1194 "slat_percentiles": 1,
1195 "clat_percentiles": 1,
1196 "lat_percentiles": 1,
1199 'unified_rw_reporting': 1,
1201 "test_obj": Test008,
1205 # enable slat, clat, lat
1206 # all will appear because liaio is asynchronous
1207 # same as Test 4 except add cmdprio_percentage
1210 "output-format": "json+",
1211 "slat_percentiles": 1,
1212 "clat_percentiles": 1,
1213 "lat_percentiles": 1,
1216 'cmdprio_percentage': 50,
1217 "test_obj": Test015,
1221 # enable only clat, lat
1222 # same as Test 5 except add cmdprio_percentage
1225 "output-format": "json+",
1226 "slat_percentiles": 0,
1227 "clat_percentiles": 1,
1228 "lat_percentiles": 1,
1231 'cmdprio_percentage': 50,
1232 "test_obj": Test016,
1236 # by default only clat should appear
1237 # same as Test 6 except add cmdprio_percentage
1240 "output-format": "json+",
1243 'cmdprio_percentage': 50,
1244 "test_obj": Test017,
1249 # same as Test 7 except add cmdprio_percentage
1252 "output-format": "json+",
1253 "slat_percentiles": 1,
1254 "clat_percentiles": 0,
1255 "lat_percentiles": 0,
1258 'cmdprio_percentage': 50,
1259 "test_obj": Test018,
1262 # 50/50 r/w, aio, unified_rw_reporting
1263 # enable slat, clat, lat
1264 # same as Test 8 except add cmdprio_percentage
1267 "output-format": "json+",
1268 "slat_percentiles": 1,
1269 "clat_percentiles": 1,
1270 "lat_percentiles": 1,
1273 'unified_rw_reporting': 1,
1274 'cmdprio_percentage': 50,
1275 "test_obj": Test019,
1278 # 50/50 r/w, aio, unified_rw_reporting
1279 # enable slat, clat, lat
1280 # same as Test 19 except
1281 # add numjobs = 4 to test sum_thread_stats() changes
1284 "output-format": "json+",
1285 "slat_percentiles": 1,
1286 "clat_percentiles": 1,
1287 "lat_percentiles": 1,
1290 'unified_rw_reporting': 1,
1291 'cmdprio_percentage': 50,
1293 "test_obj": Test019,
1301 for test in test_list:
1302 if (args.skip and test['test_id'] in args.skip) or \
1303 (args.run_only and test['test_id'] not in args.run_only):
1304 skipped = skipped + 1
1305 outcome = 'SKIPPED (User request)'
1306 elif platform.system() != 'Linux' and 'cmdprio_percentage' in test:
1307 skipped = skipped + 1
1308 outcome = 'SKIPPED (Linux required for cmdprio_percentage tests)'
1310 test_obj = test['test_obj'](artifact_root, test, args.debug)
1311 status = test_obj.run_fio(fio)
1313 status = test_obj.check()
1321 print("**********Test {0} {1}**********".format(test['test_id'], outcome))
1323 print("{0} tests passed, {1} failed, {2} skipped".format(passed, failed, skipped))
1328 if __name__ == '__main__':