aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLeah Rumancik <leah.rumancik@gmail.com>2022-11-08 11:26:21 -0800
committerLeah Rumancik <leah.rumancik@gmail.com>2023-01-27 16:19:26 -0800
commitfddf6eb1473333f1ce523bcbb40c9dccf3071387 (patch)
treef4cd5c78c02e124c02db53b57382c746ccdda659
parent8b1f6d10aa8e050f245f821f3fd3bb072320dd1f (diff)
downloadxfstests-bld-fddf6eb1473333f1ce523bcbb40c9dccf3071387.tar.gz
add scripts to compare results
get_stats.py: read in all xml files from a results directory and store statistics about each test in a single xml file merge_stats.py: merge two stats xml files (output of get_stats.py) diff_stats.py: compare two stats xml files, searching for regressions from the first the second set ex) python get_stats.py results1_run1 --outfile results1_run1.xml python get_stats.py results1_run2 --outfile results1_run2.xml python merge_stats.py results1_run1.xml resutls1_run2.xml \ --outfile results1.xml python get_stats.py results2 --outfile results2.xml python diff_stats.py results1.xml results2.xml Signed-off-by: Leah Rumancik <leah.rumancik@gmail.com>
-rw-r--r--test-appliance/files/usr/lib/python3/dist-packages/diff_stats.py105
-rw-r--r--test-appliance/files/usr/lib/python3/dist-packages/gen_results_summary.py54
-rw-r--r--test-appliance/files/usr/lib/python3/dist-packages/get_stats.py68
-rw-r--r--test-appliance/files/usr/lib/python3/dist-packages/junitparser/junitparser.py6
-rw-r--r--test-appliance/files/usr/lib/python3/dist-packages/merge_stats.py45
5 files changed, 253 insertions, 25 deletions
diff --git a/test-appliance/files/usr/lib/python3/dist-packages/diff_stats.py b/test-appliance/files/usr/lib/python3/dist-packages/diff_stats.py
new file mode 100644
index 00000000..7cd218ea
--- /dev/null
+++ b/test-appliance/files/usr/lib/python3/dist-packages/diff_stats.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python3
+
+import argparse
+import sys
+from gen_results_summary import TestStats
+import xml.etree.ElementTree as ET
+from junitparser import JUnitXml, Property, Properties, Failure, Error, Skipped
+
+
+# s[cfg] = cfg_stats
+# cfg_stats[test] = TestStats()
+# consider s1 the baseline
+def diff_stats(s1, s2, threshold, output_file, input_file1, input_file2):
+ """Compare the statistics between two Stats, report regressions and unexpected results"""
+ print(f"Writing results to {output_file}")
+
+ skip_str=""
+ error_str=""
+ file = open(output_file, 'w')
+ file.write(f'Regression check {input_file1} -> {input_file2}:\n\n')
+ for cfg in s1.keys():
+ if cfg not in s2.keys():
+ file.write(f'***Warning: missing config {cfg} in {input_file2}***\n')
+
+ for cfg in s2.keys():
+ file.write(f'{cfg:-^45}\n')
+ if cfg not in s1.keys():
+ file.write(f'***Warning: missing config {cfg} in {input_file1}***\n')
+ continue
+ for test_name in s2[cfg]:
+ test = s2[cfg][test_name]
+ if test_name not in s1[cfg]:
+ file.write(f'***Warning: {cfg}:{test_name} run on {input_file2} but not on {input_file1}***\n')
+ continue
+ if test.failed > 0:
+ test_1 = s1[cfg][test_name]
+ fail_rate_1 = 100.0 * test_1.failed / test_1.total
+ fail_rate_2 = 100.0 * test.failed / test.total
+ if fail_rate_2 >= fail_rate_1 + threshold:
+ file.write(f'{test_name}: {test_1.failed}/{test_1.total} ({fail_rate_1:.2f}%) -> {test.failed}/{test.total} ({fail_rate_2:.2f}%)\n')
+
+ test_1 = s1[cfg][test_name]
+ skip_rate_1 = 100.0 * test_1.skipped / test_1.total
+ skip_rate_2 = 100.0 * test.skipped / test.total
+ if skip_rate_1 != skip_rate_2:
+ skip_str+=f'{cfg}:{test_name} skip rate changed {test_1.skipped}/{test_1.total} ({skip_rate_1:.2f}%) -> {test.skipped}/{test.total} ({skip_rate_2:.2f}%)\n'
+
+ if test.error > 0:
+ test_1 = s1[cfg][test_name]
+ error_rate_1 = 100.0 * test_1.error / test_1.total
+ error_rate_2 = 100.0 * test.error / test.total
+ # always print error stats
+ error_str+=f'{cfg}:{test_name} ERROR {test_1.error}/{test_1.total} ({error_rate_1:.2f})% -> {test.error}/{test.total} ({error_rate_2:.2f}%)\n'
+ file.write('\n')
+
+ if len(error_str) > 0:
+ file.write('\n*** ERROR(S) occurred in new test set: ***\n')
+ file.write(error_str)
+
+ if len(skip_str) > 0:
+ file.write('\n*** WARNING: skip rate changed between test sets: ***\n')
+ file.write(skip_str)
+ file.close()
+
+
+def read_stats(input_file):
+ """Read test statistics from file"""
+ stats = {}
+ tree = ET.parse(input_file)
+ root = tree.getroot()
+
+ for cfg_element in root.findall('config'):
+ cfg = cfg_element.get('name')
+ if cfg not in stats:
+ stats[cfg] = {}
+ for test_element in cfg_element.findall('test'):
+ test = TestStats()
+
+ name = test_element.get('name')
+ test.failed = int(test_element.get('failed'))
+ test.skipped = int(test_element.get('skipped'))
+ test.error = int(test_element.get('error'))
+ test.total = int(test_element.get('total'))
+
+ stats[cfg][name] = test
+
+ return stats
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('stats_file1', help='First stats file (baseline)', type=str)
+ parser.add_argument('stats_file2', help='Second stats file (file to compare to baseline)', type=str)
+ parser.add_argument('--outfile', help='Diff output file', default="stats.diff", type=str)
+ parser.add_argument('--regression_threshold', help='Percent (int) increase needed in fail rate to determine regression', type=int, default=5)
+ args = parser.parse_args()
+
+ stats1 = read_stats(args.stats_file1)
+ stats2 = read_stats(args.stats_file2)
+
+ diff_stats(stats1, stats2, args.regression_threshold, args.outfile, args.stats_file1, args.stats_file2)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test-appliance/files/usr/lib/python3/dist-packages/gen_results_summary.py b/test-appliance/files/usr/lib/python3/dist-packages/gen_results_summary.py
index ecf5dc1a..691147e6 100644
--- a/test-appliance/files/usr/lib/python3/dist-packages/gen_results_summary.py
+++ b/test-appliance/files/usr/lib/python3/dist-packages/gen_results_summary.py
@@ -131,6 +131,35 @@ def sum_testsuites(testsuites):
errors += testsuite.errors
return (tests, skipped, failures, errors, runtime)
+def get_testsuite_stats(testsuite):
+ """Aggregate stats on individual tests"""
+ Stats = {}
+ for test_case in testsuite:
+ isFail = False
+ isSkipped = False
+ isError = False
+ for entry in test_case.result:
+ if isinstance(entry, Failure):
+ isFail = True
+ if isinstance(entry, Skipped):
+ isSkipped = True
+ if isinstance(entry, Error):
+ isError = True
+ if test_case.name in Stats:
+ s = Stats[test_case.name]
+ else:
+ s = TestStats()
+ Stats[test_case.name] = s
+ s.total += 1
+ if isFail:
+ s.failed += 1
+ if isSkipped:
+ s.skipped += 1
+ if isError:
+ s.error += 1
+
+ return Stats
+
def print_summary(out_f, testsuite, verbose):
"""Print a summary for a particular test suite
@@ -175,30 +204,7 @@ def print_summary(out_f, testsuite, verbose):
out_f.write(" %-12s %-8s %ds\n" %
(test_case.name, status, test_case.time))
else:
- Stats = {}
- for test_case in testsuite:
- isFail = False
- isSkipped = False
- isError = False
- for entry in test_case.result:
- if isinstance(entry, Failure):
- isFail = True
- if isinstance(entry, Skipped):
- isSkipped = True
- if isinstance(entry, Error):
- isError = True
- if test_case.name in Stats:
- s = Stats[test_case.name]
- else:
- s = TestStats()
- Stats[test_case.name] = s
- s.total += 1
- if isFail:
- s.failed += 1
- if isSkipped:
- s.skipped += 1
- if isError:
- s.error += 1
+ Stats = get_testsuite_stats(testsuite)
wp = wrapped_print(out_f, 'Failures', ' ')
for t in Stats:
diff --git a/test-appliance/files/usr/lib/python3/dist-packages/get_stats.py b/test-appliance/files/usr/lib/python3/dist-packages/get_stats.py
new file mode 100644
index 00000000..4cd62815
--- /dev/null
+++ b/test-appliance/files/usr/lib/python3/dist-packages/get_stats.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python3
+
+import argparse
+import sys
+from gen_results_summary import get_property, get_testsuite_stats, get_results
+from junitparser import JUnitXml, Property, Properties, Failure, Error, Skipped
+
+try:
+ from lxml import etree
+except ImportError:
+ from xml.etree import ElementTree as etree
+
+
+# reports is list of results from each xml file
+# stats[cfg] = cfg_stats
+# cfg_stats[test] = TestStats()
+def get_stats_from_dir(results_dir):
+ """From a results dir, return a list of reports and test statistics"""
+ reports = []
+ stats = {}
+ for filename in get_results(results_dir):
+ reports.append(JUnitXml.fromfile(filename))
+
+ if len(reports) == 0:
+ sys.stderr.write(f'Error: could not find any reports in {results_dir}')
+ return None
+
+ for testsuite in reports:
+ cfg = get_property(testsuite.properties(), 'TESTCFG') or get_property(testsuite.properties(), 'FSTESTCFG')
+ if cfg in stats:
+ sys.stderr.write(f'Found duplicate config {cfg}')
+ return None
+ stats[cfg] = get_testsuite_stats(testsuite)
+
+ return stats
+
+# writes all configs into single output file
+# condensing into entries of test->(failed, skipped, error, total)
+# this will let us store stats and easily merge from other runs
+# without having to reprocess everything
+def write_stats(s, output_file):
+ """Write the test statistics to a file"""
+ root = etree.Element("configs")
+ for cfg in s:
+ cfg_element = etree.SubElement(root, "config", name=cfg)
+ for test_name in s[cfg]:
+ test = s[cfg][test_name]
+ etree.SubElement(cfg_element, "test", name=test_name, failed=str(test.failed), skipped=str(test.skipped), error=str(test.error), total=str(test.total))
+
+ tree = etree.ElementTree(root)
+ etree.indent(tree, space="\t", level=0)
+ tree.write(output_file, encoding='utf-8')
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('results_dir', help='Results directory to process', type=str)
+ parser.add_argument('--outfile', help='Diff output file', default='./stats.xml', type=str)
+ args = parser.parse_args()
+
+ stats = get_stats_from_dir(args.results_dir)
+
+ if stats == None:
+ return -1
+
+ write_stats(stats, args.outfile)
+
+if __name__ == "__main__":
+ main()
diff --git a/test-appliance/files/usr/lib/python3/dist-packages/junitparser/junitparser.py b/test-appliance/files/usr/lib/python3/dist-packages/junitparser/junitparser.py
index eb38b298..b3bbd853 100644
--- a/test-appliance/files/usr/lib/python3/dist-packages/junitparser/junitparser.py
+++ b/test-appliance/files/usr/lib/python3/dist-packages/junitparser/junitparser.py
@@ -310,7 +310,11 @@ class JUnitXml(Element):
if parse_func:
tree = parse_func(filepath)
else:
- tree = etree.parse(filepath) # nosec
+ try:
+ tree = etree.parse(filepath) # nosec
+ except etree.XMLSyntaxError:
+ p = etree.XMLParser(huge_tree=True)
+ tree = etree.parse(filepath, parser=p)
root_elem = tree.getroot()
if root_elem.tag == "testsuites":
instance = cls()
diff --git a/test-appliance/files/usr/lib/python3/dist-packages/merge_stats.py b/test-appliance/files/usr/lib/python3/dist-packages/merge_stats.py
new file mode 100644
index 00000000..a3148142
--- /dev/null
+++ b/test-appliance/files/usr/lib/python3/dist-packages/merge_stats.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python3
+
+import argparse
+import sys
+import xml.etree.ElementTree as ET
+import get_stats
+import diff_stats
+from gen_results_summary import TestStats
+from junitparser import JUnitXml, Property, Properties, Failure, Error, Skipped
+
+
+def merge_stats(stats1, stats2):
+ """Merges stats2 into stats1"""
+ for cfg in stats2:
+ if cfg not in stats1:
+ stats1[cfg] = {}
+
+ for test_name in stats2[cfg]:
+ if test_name not in stats1[cfg]:
+ stats1[cfg][test_name] = TestStats()
+ stats1[cfg][test_name].failed += stats2[cfg][test_name].failed
+ stats1[cfg][test_name].skipped += stats2[cfg][test_name].skipped
+ stats1[cfg][test_name].error += stats2[cfg][test_name].error
+ stats1[cfg][test_name].total += stats2[cfg][test_name].total
+
+ return stats1
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('stats_file', help='First stats file', type=str)
+ parser.add_argument('stats_files_merge', nargs='+', help='List of stats files to merge', type=str)
+ parser.add_argument('--outfile', default='merged_stats.xml', help='Output xml file', type=str)
+ args = parser.parse_args()
+
+ stats = diff_stats.read_stats(args.stats_file)
+
+ for file in args.stats_files_merge:
+ stats_merge = diff_stats.read_stats(file)
+ stats = merge_stats(stats, stats_merge)
+
+ get_stats.write_stats(stats, args.outfile)
+
+
+if __name__ == "__main__":
+ main()