2 # SPDX-License-Identifier: GPL-2.0-only
4 # Copyright (c) 2020 Western Digital Corporation or its affiliates.
7 # latency_percentiles.py
9 # Test the code that produces latency percentiles
10 # This is mostly to test the code changes to allow reporting
11 # of slat, clat, and lat percentiles
14 # python3 latency-tests.py [-f fio-path] [-a artifact-root] [--debug]
20 # unified rw reporting
21 # compare with latency log
22 # try various combinations of the ?lat_percentile options
26 # check presence of latency bins
27 # if the json percentiles match those from the raw data
28 # then the latency bin values and counts are probably ok
30 # produce both terse, JSON output and confirm that they match
31 # lat only; both lat and clat
33 # confirm that sync_lat data appears
34 # - MANUAL TESTING normal output:
36 # enable all, but only clat and lat appear
37 # enable subset of latency types
38 # read, write, trim, unified
40 # enable all latency types
41 # enable subset of latency types
42 # read, write, trim, unified
43 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
44 # --ioengine=null --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1
45 # echo confirm that clat and lat percentiles appear
46 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
47 # --ioengine=null --slat_percentiles=0 --clat_percentiles=0 --lat_percentiles=1
48 # echo confirm that only lat percentiles appear
49 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
50 # --ioengine=null --slat_percentiles=0 --clat_percentiles=1 --lat_percentiles=0
51 # echo confirm that only clat percentiles appear
52 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
53 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1
54 # echo confirm that slat, clat, lat percentiles appear
55 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
56 # --ioengine=libaio --slat_percentiles=0 --clat_percentiles=1 --lat_percentiles=1
57 # echo confirm that clat and lat percentiles appear
58 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
59 # --ioengine=libaio -rw=randrw
60 # echo confirm that clat percentiles appear for reads and writes
61 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
62 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=0 --lat_percentiles=0 --rw=randrw
63 # echo confirm that slat percentiles appear for both reads and writes
64 # ./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
65 # --ioengine=libaio --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1 \
66 # --rw=randrw --unified_rw_reporting=1
67 # echo confirm that slat, clat, and lat percentiles appear for 'mixed' IOs
68 #./fio/fio --name=test --randrepeat=0 --norandommap --time_based --runtime=2s --size=512M \
69 # --ioengine=null --slat_percentiles=1 --clat_percentiles=1 --lat_percentiles=1 \
70 # --rw=randrw --fsync=32
71 # echo confirm that fsync latencies appear
83 from collections import Counter
84 from pathlib import Path
88 """fio latency percentile test."""
90 def __init__(self, artifact_root, test_options, debug):
92 artifact_root root directory for artifacts (subdirectory will be created under here)
93 test test specification
95 self.artifact_root = artifact_root
96 self.test_options = test_options
100 self.terse_data = None
102 self.test_dir = os.path.join(self.artifact_root,
103 "{:03d}".format(self.test_options['test_id']))
104 if not os.path.exists(self.test_dir):
105 os.mkdir(self.test_dir)
107 self.filename = "latency{:03d}".format(self.test_options['test_id'])
109 def run_fio(self, fio_path):
120 "--group_reporting=1",
121 "--write_lat_log={0}".format(self.filename),
122 "--output={0}.out".format(self.filename),
123 "--ioengine={ioengine}".format(**self.test_options),
124 "--rw={rw}".format(**self.test_options),
125 "--runtime={runtime}".format(**self.test_options),
126 "--output-format={output-format}".format(**self.test_options),
128 for opt in ['slat_percentiles', 'clat_percentiles', 'lat_percentiles',
129 'unified_rw_reporting', 'fsync', 'fdatasync', 'numjobs',
130 'cmdprio_percentage', 'bssplit', 'cmdprio_bssplit']:
131 if opt in self.test_options:
132 option = '--{0}={{{0}}}'.format(opt)
133 fio_args.append(option.format(**self.test_options))
135 command = [fio_path] + fio_args
136 with open(os.path.join(self.test_dir, "{0}.command".format(self.filename)), "w+") as \
138 command_file.write("%s\n" % command)
141 stdout_file = open(os.path.join(self.test_dir, "{0}.stdout".format(self.filename)), "w+")
142 stderr_file = open(os.path.join(self.test_dir, "{0}.stderr".format(self.filename)), "w+")
143 exitcode_file = open(os.path.join(self.test_dir,
144 "{0}.exitcode".format(self.filename)), "w+")
147 # Avoid using subprocess.run() here because when a timeout occurs,
148 # fio will be stopped with SIGKILL. This does not give fio a
149 # chance to clean up and means that child processes may continue
150 # running and submitting IO.
151 proc = subprocess.Popen(command,
155 universal_newlines=True)
156 proc.communicate(timeout=300)
157 exitcode_file.write('{0}\n'.format(proc.returncode))
158 passed &= (proc.returncode == 0)
159 except subprocess.TimeoutExpired:
163 print("Timeout expired")
170 print("Exception: %s" % sys.exc_info())
175 exitcode_file.close()
178 if 'json' in self.test_options['output-format']:
179 if not self.get_json():
180 print('Unable to decode JSON data')
182 if 'terse' in self.test_options['output-format']:
183 if not self.get_terse():
184 print('Unable to decode terse data')
190 """Convert fio JSON output into a python JSON object"""
192 filename = os.path.join(self.test_dir, "{0}.out".format(self.filename))
193 with open(filename, 'r') as file:
194 file_data = file.read()
197 # Sometimes fio informational messages are included at the top of the
198 # JSON output, especially under Windows. Try to decode output as JSON
199 # data, lopping off up to the first four lines
201 lines = file_data.splitlines()
203 file_data = '\n'.join(lines[i:])
205 self.json_data = json.loads(file_data)
206 except json.JSONDecodeError:
214 """Read fio output and return terse format data."""
216 filename = os.path.join(self.test_dir, "{0}.out".format(self.filename))
217 with open(filename, 'r') as file:
218 file_data = file.read()
221 # Read the first few lines and see if any of them begin with '3;'
222 # If so, the line is probably terse output. Obviously, this only
223 # works for fio terse version 3 and it does not work for
224 # multi-line terse output
226 lines = file_data.splitlines()
229 if file_data.startswith('3;'):
230 self.terse_data = file_data.split(';')
235 def check_latencies(self, jsondata, ddir, slat=True, clat=True, tlat=True, plus=False,
237 """Check fio latency data.
239 ddir data direction to check (0=read, 1=write, 2=trim)
240 slat True if submission latency data available to check
241 clat True if completion latency data available to check
242 tlat True of total latency data available to check
243 plus True if we actually have json+ format data where additional checks can
245 unified True if fio is reporting unified r/w data
256 for lat in ['slat', 'clat', 'lat']:
259 if 'percentile' in jsondata[lat+'_ns']:
261 print('unexpected %s percentiles found' % lat)
263 print("%s percentiles skipped" % lat)
266 if 'percentile' not in jsondata[lat+'_ns']:
268 print('%s percentiles not found in fio output' % lat)
271 # Check only for the presence/absence of json+
272 # latency bins. Future work can check the
273 # accurracy of the bin values and counts.
275 # Because the latency percentiles are based on
276 # the bins, we can be confident that the bin
277 # values and counts are correct if fio's
278 # latency percentiles match what we compute
282 if 'bins' not in jsondata[lat+'_ns']:
283 print('bins not found with json+ output format')
286 if not self.check_jsonplus(jsondata[lat+'_ns']):
289 if 'bins' in jsondata[lat+'_ns']:
290 print('json+ bins found with json output format')
295 lat_file = os.path.join(self.test_dir, "%s_%s.%s.log" % (self.filename, lat, i+1))
296 if not os.path.exists(lat_file):
298 with open(lat_file, 'r', newline='') as file:
299 reader = csv.reader(file)
301 if unified or int(line[2]) == ddir:
302 latencies.append(int(line[1]))
304 if int(jsondata['total_ios']) != len(latencies):
306 print('%s: total_ios = %s, latencies logged = %d' % \
307 (lat, jsondata['total_ios'], len(latencies)))
309 print("total_ios %s match latencies logged" % jsondata['total_ios'])
312 ptiles = jsondata[lat+'_ns']['percentile']
314 for percentile in ptiles.keys():
316 # numpy.percentile(latencies, float(percentile),
317 # interpolation='higher')
318 # produces values that mostly match what fio reports
319 # however, in the tails of the distribution, the values produced
320 # by fio's and numpy.percentile's algorithms are occasionally off
321 # by one latency measurement. So instead of relying on the canned
322 # numpy.percentile routine, implement here fio's algorithm
324 rank = math.ceil(float(percentile)/100 * len(latencies))
329 value = latencies[int(index)]
330 fio_val = int(ptiles[percentile])
331 # The theory in stat.h says that the proportional error will be
333 if not self.similar(fio_val, value):
334 delta = abs(fio_val - value) / value
335 print("Error with %s %sth percentile: "
336 "fio: %d, expected: %d, proportional delta: %f" %
337 (lat, percentile, fio_val, value, delta))
338 print("Rank: %d, index: %d" % (rank, index))
341 print('%s %sth percentile values match: %d, %d' %
342 (lat, percentile, fio_val, value))
345 print("%s percentiles match" % lat)
352 def check_empty(job):
354 Make sure JSON data is empty.
356 Some data structures should be empty. This function makes sure that they are.
358 job JSON object that we need to check for emptiness
361 return job['total_ios'] == 0 and \
362 job['slat_ns']['N'] == 0 and \
363 job['clat_ns']['N'] == 0 and \
364 job['lat_ns']['N'] == 0
366 def check_nocmdprio_lat(self, job):
368 Make sure no per priority latencies appear.
370 job JSON object to check
373 for ddir in ['read', 'write', 'trim']:
375 if 'prios' in job[ddir]:
376 print("Unexpected per priority latencies found in %s output" % ddir)
380 print("No per priority latencies found")
385 def similar(approximation, actual):
387 Check whether the approximate values recorded by fio are within the theoretical bound.
389 Since it is impractical to store exact latency measurements for each and every IO, fio
390 groups similar latency measurements into variable-sized bins. The theory in stat.h says
391 that the proportional error will be less than 1/128. This function checks whether this
394 TODO This test will fail when comparing a value from the largest latency bin against its
395 actual measurement. Find some way to detect this and avoid failing.
397 approximation value of the bin used by fio to store a given latency
398 actual actual latency value
401 # Avoid a division by zero. The smallest latency values have no error.
403 return approximation == 0
405 delta = abs(approximation - actual) / actual
406 return delta <= 1/128
408 def check_jsonplus(self, jsondata):
409 """Check consistency of json+ data
411 When we have json+ data we can check the min value, max value, and
412 sample size reported by fio
414 jsondata json+ data that we need to check
419 keys = [int(k) for k in jsondata['bins'].keys()]
420 values = [int(jsondata['bins'][k]) for k in jsondata['bins'].keys()]
423 sampsize = sum(values)
425 if not self.similar(jsondata['min'], smallest):
427 print('reported min %d does not match json+ min %d' % (jsondata['min'], smallest))
429 print('json+ min values match: %d' % jsondata['min'])
431 if not self.similar(jsondata['max'], biggest):
433 print('reported max %d does not match json+ max %d' % (jsondata['max'], biggest))
435 print('json+ max values match: %d' % jsondata['max'])
437 if sampsize != jsondata['N']:
439 print('reported sample size %d does not match json+ total count %d' % \
440 (jsondata['N'], sampsize))
442 print('json+ sample sizes match: %d' % sampsize)
446 def check_sync_lat(self, jsondata, plus=False):
447 """Check fsync latency percentile data.
449 All we can check is that some percentiles are reported, unless we have json+ data.
450 If we actually have json+ data then we can do more checking.
452 jsondata JSON data for fsync operations
453 plus True if we actually have json+ data
457 if 'percentile' not in jsondata['lat_ns']:
458 print("Sync percentile data not found")
461 if int(jsondata['total_ios']) != int(jsondata['lat_ns']['N']):
463 print('Mismatch between total_ios and lat_ns sample size')
465 print('sync sample sizes match: %d' % jsondata['total_ios'])
468 if 'bins' in jsondata['lat_ns']:
469 print('Unexpected json+ bin data found')
472 if not self.check_jsonplus(jsondata['lat_ns']):
477 def check_terse(self, terse, jsondata):
478 """Compare terse latencies with JSON latencies.
480 terse terse format data for checking
481 jsondata JSON format data for checking
487 split = lat.split('%')
489 terse_val = int(split[1][1:])
490 json_val = math.floor(jsondata[pct]/1000)
491 if terse_val != json_val:
493 print('Mismatch with %sth percentile: json value=%d,%d terse value=%d' % \
494 (pct, jsondata[pct], json_val, terse_val))
496 print('Terse %sth percentile matches JSON value: %d' % (pct, terse_val))
500 def check_prio_latencies(self, jsondata, clat=True, plus=False):
501 """Check consistency of per priority latencies.
503 clat True if we should check clat data; other check lat data
504 plus True if we have json+ format data where additional checks can
506 unified True if fio is reporting unified r/w data
510 obj = combined = 'clat_ns'
512 obj = combined = 'lat_ns'
514 if not 'prios' in jsondata or not combined in jsondata:
515 print("Error identifying per priority latencies")
518 sum_sample_size = sum([x[obj]['N'] for x in jsondata['prios']])
519 if sum_sample_size != jsondata[combined]['N']:
520 print("Per prio sample size sum %d != combined sample size %d" %
521 (sum_sample_size, jsondata[combined]['N']))
524 print("Per prio sample size sum %d == combined sample size %d" %
525 (sum_sample_size, jsondata[combined]['N']))
527 min_val = min([x[obj]['min'] for x in jsondata['prios']])
528 if min_val != jsondata[combined]['min']:
529 print("Min per prio min latency %d does not match min %d from combined data" %
530 (min_val, jsondata[combined]['min']))
533 print("Min per prio min latency %d matches min %d from combined data" %
534 (min_val, jsondata[combined]['min']))
536 max_val = max([x[obj]['max'] for x in jsondata['prios']])
537 if max_val != jsondata[combined]['max']:
538 print("Max per prio max latency %d does not match max %d from combined data" %
539 (max_val, jsondata[combined]['max']))
542 print("Max per prio max latency %d matches max %d from combined data" %
543 (max_val, jsondata[combined]['max']))
545 weighted_vals = [x[obj]['mean'] * x[obj]['N'] for x in jsondata['prios']]
546 weighted_avg = sum(weighted_vals) / jsondata[combined]['N']
547 delta = abs(weighted_avg - jsondata[combined]['mean'])
548 if (delta / jsondata[combined]['mean']) > 0.0001:
549 print("Difference between merged per prio weighted average %f mean "
550 "and actual mean %f exceeds 0.01%%" % (weighted_avg, jsondata[combined]['mean']))
553 print("Merged per prio weighted average %f mean matches actual mean %f" %
554 (weighted_avg, jsondata[combined]['mean']))
557 for prio in jsondata['prios']:
558 if not self.check_jsonplus(prio[obj]):
562 for prio in jsondata['prios']:
563 counter.update(prio[obj]['bins'])
567 if len(bins) != len(jsondata[combined]['bins']):
568 print("Number of merged bins %d does not match number of overall bins %d" %
569 (len(bins), len(jsondata[combined]['bins'])))
572 print("Number of merged bins %d matches number of overall bins %d" %
573 (len(bins), len(jsondata[combined]['bins'])))
575 for duration in bins.keys():
576 if bins[duration] != jsondata[combined]['bins'][duration]:
577 print("Merged per prio count does not match overall count for duration %d" %
581 print("Merged per priority latency data match combined latency data")
585 """Check test output."""
587 raise NotImplementedError()
590 class Test001(FioLatTest):
591 """Test object for Test 1."""
594 """Check Test 1 output."""
596 job = self.json_data['jobs'][0]
599 if not self.check_empty(job['write']):
600 print("Unexpected write data found in output")
602 if not self.check_empty(job['trim']):
603 print("Unexpected trim data found in output")
605 if not self.check_nocmdprio_lat(job):
606 print("Unexpected per priority latencies found")
609 retval &= self.check_latencies(job['read'], 0, slat=False)
614 class Test002(FioLatTest):
615 """Test object for Test 2."""
618 """Check Test 2 output."""
620 job = self.json_data['jobs'][0]
623 if not self.check_empty(job['read']):
624 print("Unexpected read data found in output")
626 if not self.check_empty(job['trim']):
627 print("Unexpected trim data found in output")
629 if not self.check_nocmdprio_lat(job):
630 print("Unexpected per priority latencies found")
633 retval &= self.check_latencies(job['write'], 1, slat=False, clat=False)
638 class Test003(FioLatTest):
639 """Test object for Test 3."""
642 """Check Test 3 output."""
644 job = self.json_data['jobs'][0]
647 if not self.check_empty(job['read']):
648 print("Unexpected read data found in output")
650 if not self.check_empty(job['write']):
651 print("Unexpected write data found in output")
653 if not self.check_nocmdprio_lat(job):
654 print("Unexpected per priority latencies found")
657 retval &= self.check_latencies(job['trim'], 2, slat=False, tlat=False)
662 class Test004(FioLatTest):
663 """Test object for Tests 4, 13."""
666 """Check Test 4, 13 output."""
668 job = self.json_data['jobs'][0]
671 if not self.check_empty(job['write']):
672 print("Unexpected write data found in output")
674 if not self.check_empty(job['trim']):
675 print("Unexpected trim data found in output")
677 if not self.check_nocmdprio_lat(job):
678 print("Unexpected per priority latencies found")
681 retval &= self.check_latencies(job['read'], 0, plus=True)
686 class Test005(FioLatTest):
687 """Test object for Test 5."""
690 """Check Test 5 output."""
692 job = self.json_data['jobs'][0]
695 if not self.check_empty(job['read']):
696 print("Unexpected read data found in output")
698 if not self.check_empty(job['trim']):
699 print("Unexpected trim data found in output")
701 if not self.check_nocmdprio_lat(job):
702 print("Unexpected per priority latencies found")
705 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
710 class Test006(FioLatTest):
711 """Test object for Test 6."""
714 """Check Test 6 output."""
716 job = self.json_data['jobs'][0]
719 if not self.check_empty(job['write']):
720 print("Unexpected write data found in output")
722 if not self.check_empty(job['trim']):
723 print("Unexpected trim data found in output")
725 if not self.check_nocmdprio_lat(job):
726 print("Unexpected per priority latencies found")
729 retval &= self.check_latencies(job['read'], 0, slat=False, tlat=False, plus=True)
734 class Test007(FioLatTest):
735 """Test object for Test 7."""
738 """Check Test 7 output."""
740 job = self.json_data['jobs'][0]
743 if not self.check_empty(job['trim']):
744 print("Unexpected trim data found in output")
746 if not self.check_nocmdprio_lat(job):
747 print("Unexpected per priority latencies found")
750 retval &= self.check_latencies(job['read'], 0, clat=False, tlat=False, plus=True)
751 retval &= self.check_latencies(job['write'], 1, clat=False, tlat=False, plus=True)
756 class Test008(FioLatTest):
757 """Test object for Tests 8, 14."""
760 """Check Test 8, 14 output."""
762 job = self.json_data['jobs'][0]
765 if 'read' in job or 'write' in job or 'trim' in job:
766 print("Unexpected data direction found in fio output")
768 if not self.check_nocmdprio_lat(job):
769 print("Unexpected per priority latencies found")
772 retval &= self.check_latencies(job['mixed'], 0, plus=True, unified=True)
777 class Test009(FioLatTest):
778 """Test object for Test 9."""
781 """Check Test 9 output."""
783 job = self.json_data['jobs'][0]
786 if not self.check_empty(job['read']):
787 print("Unexpected read data found in output")
789 if not self.check_empty(job['trim']):
790 print("Unexpected trim data found in output")
792 if not self.check_sync_lat(job['sync'], plus=True):
793 print("Error checking fsync latency data")
795 if not self.check_nocmdprio_lat(job):
796 print("Unexpected per priority latencies found")
799 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
804 class Test010(FioLatTest):
805 """Test object for Test 10."""
808 """Check Test 10 output."""
810 job = self.json_data['jobs'][0]
813 if not self.check_empty(job['trim']):
814 print("Unexpected trim data found in output")
816 if not self.check_nocmdprio_lat(job):
817 print("Unexpected per priority latencies found")
820 retval &= self.check_latencies(job['read'], 0, plus=True)
821 retval &= self.check_latencies(job['write'], 1, plus=True)
822 retval &= self.check_terse(self.terse_data[17:34], job['read']['lat_ns']['percentile'])
823 retval &= self.check_terse(self.terse_data[58:75], job['write']['lat_ns']['percentile'])
824 # Terse data checking only works for default percentiles.
825 # This needs to be changed if something other than the default is ever used.
830 class Test011(FioLatTest):
831 """Test object for Test 11."""
834 """Check Test 11 output."""
836 job = self.json_data['jobs'][0]
839 if not self.check_empty(job['trim']):
840 print("Unexpected trim data found in output")
842 if not self.check_nocmdprio_lat(job):
843 print("Unexpected per priority latencies found")
846 retval &= self.check_latencies(job['read'], 0, slat=False, clat=False, plus=True)
847 retval &= self.check_latencies(job['write'], 1, slat=False, clat=False, plus=True)
848 retval &= self.check_terse(self.terse_data[17:34], job['read']['lat_ns']['percentile'])
849 retval &= self.check_terse(self.terse_data[58:75], job['write']['lat_ns']['percentile'])
850 # Terse data checking only works for default percentiles.
851 # This needs to be changed if something other than the default is ever used.
856 class Test015(FioLatTest):
857 """Test object for Test 15."""
860 """Check Test 15 output."""
862 job = self.json_data['jobs'][0]
865 if not self.check_empty(job['write']):
866 print("Unexpected write data found in output")
868 if not self.check_empty(job['trim']):
869 print("Unexpected trim data found in output")
872 retval &= self.check_latencies(job['read'], 0, plus=True)
873 retval &= self.check_prio_latencies(job['read'], clat=False, plus=True)
878 class Test016(FioLatTest):
879 """Test object for Test 16."""
882 """Check Test 16 output."""
884 job = self.json_data['jobs'][0]
887 if not self.check_empty(job['read']):
888 print("Unexpected read data found in output")
890 if not self.check_empty(job['trim']):
891 print("Unexpected trim data found in output")
894 retval &= self.check_latencies(job['write'], 1, slat=False, plus=True)
895 retval &= self.check_prio_latencies(job['write'], clat=False, plus=True)
900 class Test017(FioLatTest):
901 """Test object for Test 17."""
904 """Check Test 17 output."""
906 job = self.json_data['jobs'][0]
909 if not self.check_empty(job['write']):
910 print("Unexpected write data found in output")
912 if not self.check_empty(job['trim']):
913 print("Unexpected trim data found in output")
916 retval &= self.check_latencies(job['read'], 0, slat=False, tlat=False, plus=True)
917 retval &= self.check_prio_latencies(job['read'], plus=True)
922 class Test018(FioLatTest):
923 """Test object for Test 18."""
926 """Check Test 18 output."""
928 job = self.json_data['jobs'][0]
931 if not self.check_empty(job['trim']):
932 print("Unexpected trim data found in output")
935 retval &= self.check_latencies(job['read'], 0, clat=False, tlat=False, plus=True)
936 retval &= self.check_latencies(job['write'], 1, clat=False, tlat=False, plus=True)
938 # We actually have json+ data but setting plus=False below avoids checking the
939 # json+ bins which did not exist for clat and lat because this job is run with
940 # clat_percentiles=0, lat_percentiles=0, However, we can still check the summary
942 retval &= self.check_prio_latencies(job['write'], plus=False)
943 retval &= self.check_prio_latencies(job['read'], plus=False)
948 class Test019(FioLatTest):
949 """Test object for Tests 19, 20."""
952 """Check Test 19, 20 output."""
954 job = self.json_data['jobs'][0]
957 if 'read' in job or 'write' in job or 'trim' in job:
958 print("Unexpected data direction found in fio output")
961 retval &= self.check_latencies(job['mixed'], 0, plus=True, unified=True)
962 retval &= self.check_prio_latencies(job['mixed'], clat=False, plus=True)
967 class Test021(FioLatTest):
968 """Test object for Test 21."""
971 """Check Test 21 output."""
973 job = self.json_data['jobs'][0]
976 if not self.check_empty(job['trim']):
977 print("Unexpected trim data found in output")
980 retval &= self.check_latencies(job['read'], 0, slat=False, tlat=False, plus=True)
981 retval &= self.check_latencies(job['write'], 1, slat=False, tlat=False, plus=True)
982 retval &= self.check_prio_latencies(job['read'], clat=True, plus=True)
983 retval &= self.check_prio_latencies(job['write'], clat=True, plus=True)
989 """Parse command-line arguments."""
991 parser = argparse.ArgumentParser()
992 parser.add_argument('-f', '--fio', help='path to file executable (e.g., ./fio)')
993 parser.add_argument('-a', '--artifact-root', help='artifact root directory')
994 parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')
995 parser.add_argument('-s', '--skip', nargs='+', type=int,
996 help='list of test(s) to skip')
997 parser.add_argument('-o', '--run-only', nargs='+', type=int,
998 help='list of test(s) to run, skipping all others')
999 args = parser.parse_args()
1005 """Run tests of fio latency percentile reporting"""
1009 artifact_root = args.artifact_root if args.artifact_root else \
1010 "latency-test-{0}".format(time.strftime("%Y%m%d-%H%M%S"))
1011 os.mkdir(artifact_root)
1012 print("Artifact directory is %s" % artifact_root)
1015 fio = str(Path(args.fio).absolute())
1018 print("fio path is %s" % fio)
1020 if platform.system() == 'Linux':
1022 elif platform.system() == 'Windows':
1030 # enable slat, clat, lat
1031 # only clat and lat will appear because
1032 # because the null ioengine is synchronous
1035 "output-format": "json",
1036 "slat_percentiles": 1,
1037 "clat_percentiles": 1,
1038 "lat_percentiles": 1,
1041 "test_obj": Test001,
1048 "output-format": "json",
1049 "slat_percentiles": 0,
1050 "clat_percentiles": 0,
1051 "lat_percentiles": 1,
1054 "test_obj": Test002,
1061 "output-format": "json",
1062 "slat_percentiles": 0,
1063 "clat_percentiles": 1,
1064 "lat_percentiles": 0,
1067 "test_obj": Test003,
1071 # enable slat, clat, lat
1072 # all will appear because libaio is asynchronous
1075 "output-format": "json+",
1076 "slat_percentiles": 1,
1077 "clat_percentiles": 1,
1078 "lat_percentiles": 1,
1081 "test_obj": Test004,
1085 # enable only clat, lat
1088 "output-format": "json+",
1089 "slat_percentiles": 0,
1090 "clat_percentiles": 1,
1091 "lat_percentiles": 1,
1094 "test_obj": Test005,
1098 # by default only clat should appear
1101 "output-format": "json+",
1104 "test_obj": Test006,
1111 "output-format": "json+",
1112 "slat_percentiles": 1,
1113 "clat_percentiles": 0,
1114 "lat_percentiles": 0,
1117 "test_obj": Test007,
1120 # 50/50 r/w, aio, unified_rw_reporting
1121 # enable slat, clat, lat
1124 "output-format": "json+",
1125 "slat_percentiles": 1,
1126 "clat_percentiles": 1,
1127 "lat_percentiles": 1,
1130 'unified_rw_reporting': 1,
1131 "test_obj": Test008,
1135 # enable slat, clat, lat
1139 "output-format": "json+",
1140 "slat_percentiles": 1,
1141 "clat_percentiles": 1,
1142 "lat_percentiles": 1,
1146 "test_obj": Test009,
1150 # enable slat, clat, lat
1153 "output-format": "terse,json+",
1154 "slat_percentiles": 1,
1155 "clat_percentiles": 1,
1156 "lat_percentiles": 1,
1159 "test_obj": Test010,
1166 "output-format": "terse,json+",
1167 "slat_percentiles": 0,
1168 "clat_percentiles": 0,
1169 "lat_percentiles": 1,
1172 "test_obj": Test011,
1176 # enable slat, clat, lat
1177 # only clat and lat will appear because
1178 # because the null ioengine is synchronous
1179 # same as Test 1 except add numjobs = 4 to test
1180 # sum_thread_stats() changes
1183 "output-format": "json",
1184 "slat_percentiles": 1,
1185 "clat_percentiles": 1,
1186 "lat_percentiles": 1,
1190 "test_obj": Test001,
1194 # enable slat, clat, lat
1195 # all will appear because libaio is asynchronous
1196 # same as Test 4 except add numjobs = 4 to test
1197 # sum_thread_stats() changes
1200 "output-format": "json+",
1201 "slat_percentiles": 1,
1202 "clat_percentiles": 1,
1203 "lat_percentiles": 1,
1207 "test_obj": Test004,
1210 # 50/50 r/w, aio, unified_rw_reporting
1211 # enable slat, clat, lata
1212 # same as Test 8 except add numjobs = 4 to test
1213 # sum_thread_stats() changes
1216 "output-format": "json+",
1217 "slat_percentiles": 1,
1218 "clat_percentiles": 1,
1219 "lat_percentiles": 1,
1222 'unified_rw_reporting': 1,
1224 "test_obj": Test008,
1228 # enable slat, clat, lat
1229 # all will appear because libaio is asynchronous
1230 # same as Test 4 except add cmdprio_percentage
1233 "output-format": "json+",
1234 "slat_percentiles": 1,
1235 "clat_percentiles": 1,
1236 "lat_percentiles": 1,
1239 'cmdprio_percentage': 50,
1240 "test_obj": Test015,
1244 # enable only clat, lat
1245 # same as Test 5 except add cmdprio_percentage
1248 "output-format": "json+",
1249 "slat_percentiles": 0,
1250 "clat_percentiles": 1,
1251 "lat_percentiles": 1,
1254 'cmdprio_percentage': 50,
1255 "test_obj": Test016,
1259 # by default only clat should appear
1260 # same as Test 6 except add cmdprio_percentage
1263 "output-format": "json+",
1266 'cmdprio_percentage': 50,
1267 "test_obj": Test017,
1272 # same as Test 7 except add cmdprio_percentage
1275 "output-format": "json+",
1276 "slat_percentiles": 1,
1277 "clat_percentiles": 0,
1278 "lat_percentiles": 0,
1281 'cmdprio_percentage': 50,
1282 "test_obj": Test018,
1285 # 50/50 r/w, aio, unified_rw_reporting
1286 # enable slat, clat, lat
1287 # same as Test 8 except add cmdprio_percentage
1290 "output-format": "json+",
1291 "slat_percentiles": 1,
1292 "clat_percentiles": 1,
1293 "lat_percentiles": 1,
1296 'unified_rw_reporting': 1,
1297 'cmdprio_percentage': 50,
1298 "test_obj": Test019,
1301 # 50/50 r/w, aio, unified_rw_reporting
1302 # enable slat, clat, lat
1303 # same as Test 19 except add numjobs = 4 to test
1304 # sum_thread_stats() changes
1307 "output-format": "json+",
1308 "slat_percentiles": 1,
1309 "clat_percentiles": 1,
1310 "lat_percentiles": 1,
1313 'unified_rw_reporting': 1,
1314 'cmdprio_percentage': 50,
1316 "test_obj": Test019,
1321 # test bssplit and cmdprio_bssplit
1324 "output-format": "json+",
1325 "slat_percentiles": 0,
1326 "clat_percentiles": 1,
1327 "lat_percentiles": 0,
1330 'bssplit': '64k/40:1024k/60',
1331 'cmdprio_bssplit': '64k/25/1/1:64k/75/3/2:1024k/0',
1332 "test_obj": Test021,
1337 # same as Test 21 except add numjobs = 4 to test
1338 # sum_thread_stats() changes
1341 "output-format": "json+",
1342 "slat_percentiles": 0,
1343 "clat_percentiles": 1,
1344 "lat_percentiles": 0,
1347 'bssplit': '64k/40:1024k/60',
1348 'cmdprio_bssplit': '64k/25/1/1:64k/75/3/2:1024k/0',
1350 "test_obj": Test021,
1358 for test in test_list:
1359 if (args.skip and test['test_id'] in args.skip) or \
1360 (args.run_only and test['test_id'] not in args.run_only):
1361 skipped = skipped + 1
1362 outcome = 'SKIPPED (User request)'
1363 elif (platform.system() != 'Linux' or os.geteuid() != 0) and \
1364 ('cmdprio_percentage' in test or 'cmdprio_bssplit' in test):
1365 skipped = skipped + 1
1366 outcome = 'SKIPPED (Linux root required for cmdprio tests)'
1368 test_obj = test['test_obj'](artifact_root, test, args.debug)
1369 status = test_obj.run_fio(fio)
1371 status = test_obj.check()
1379 print("**********Test {0} {1}**********".format(test['test_id'], outcome))
1381 print("{0} tests passed, {1} failed, {2} skipped".format(passed, failed, skipped))
1386 if __name__ == '__main__':