summaryrefslogtreecommitdiffstats
path: root/utils/lit
diff options
context:
space:
mode:
authorDaniel Dunbar <daniel@zuster.org>2013-09-11 17:45:11 +0000
committerDaniel Dunbar <daniel@zuster.org>2013-09-11 17:45:11 +0000
commitff058f0a701b601f1593f2a9c8030acb652fdba6 (patch)
tree4fde63c42a5ffd96c7ebf9bb3f0f00b2b10f5895 /utils/lit
parent15f387c93ef8d5c23f110143996c8b9b4a089864 (diff)
downloadexternal_llvm-ff058f0a701b601f1593f2a9c8030acb652fdba6.zip
external_llvm-ff058f0a701b601f1593f2a9c8030acb652fdba6.tar.gz
external_llvm-ff058f0a701b601f1593f2a9c8030acb652fdba6.tar.bz2
[lit] Add support for attach arbitrary metrics to test results.
- This is a work-in-progress and all details are subject to change, but I am trying to build up support for allowing lit to be used as a driver for performance tests (or other tests which might want to record information beyond simple PASS/FAIL). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@190535 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'utils/lit')
-rw-r--r--utils/lit/lit/Test.py43
-rwxr-xr-xutils/lit/lit/main.py15
-rw-r--r--utils/lit/tests/Inputs/test-data/lit.cfg44
-rw-r--r--utils/lit/tests/Inputs/test-data/metrics.ini7
-rw-r--r--utils/lit/tests/test-data.py12
5 files changed, 119 insertions, 2 deletions
diff --git a/utils/lit/lit/Test.py b/utils/lit/lit/Test.py
index 05cae99..d84eb47 100644
--- a/utils/lit/lit/Test.py
+++ b/utils/lit/lit/Test.py
@@ -1,6 +1,6 @@
import os
-# Test results.
+# Test result codes.
class ResultCode(object):
"""Test result codes."""
@@ -31,6 +31,28 @@ XPASS = ResultCode('XPASS', True)
UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
+# Test metric values.
+
+class MetricValue(object):
+ def format(self):
+ raise RuntimeError("abstract method")
+
+class IntMetricValue(MetricValue):
+ def __init__(self, value):
+ self.value = value
+
+ def format(self):
+ return str(self.value)
+
+class RealMetricValue(MetricValue):
+ def __init__(self, value):
+ self.value = value
+
+ def format(self):
+ return '%.4f' % self.value
+
+# Test results.
+
class Result(object):
"""Wrapper for the results of executing an individual test."""
@@ -41,6 +63,25 @@ class Result(object):
self.output = output
# The wall timing to execute the test, if timing.
self.elapsed = elapsed
+ # The metrics reported by this test.
+ self.metrics = {}
+
+ def addMetric(self, name, value):
+ """
+ addMetric(name, value)
+
+ Attach a test metric to the test result, with the given name and list of
+ values. It is an error to attempt to attach the metrics with the same
+ name multiple times.
+
+ Each value must be an instance of a MetricValue subclass.
+ """
+ if name in self.metrics:
+ raise ValueError("result already includes metrics for %r" % (
+ name,))
+ if not isinstance(value, MetricValue):
+ raise TypeError("unexpected metric value: %r" % (value,))
+ self.metrics[name] = value
# Test classes.
diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py
index 50c9a66..b93aa6f 100755
--- a/utils/lit/lit/main.py
+++ b/utils/lit/lit/main.py
@@ -45,15 +45,28 @@ class TestingProgressDisplay(object):
if self.progressBar:
self.progressBar.clear()
- print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
+ # Show the test result line.
+ test_name = test.getFullName()
+ print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
+ # Show the test failure output, if requested.
if test.result.code.isFailure and self.opts.showOutput:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
+ # Report test metrics, if present.
+ if test.result.metrics:
+ print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
+ '*'*10))
+ items = sorted(test.result.metrics.items())
+ for metric_name, value in items:
+ print('%s: %s ' % (metric_name, value.format()))
+ print("*" * 10)
+
+ # Ensure the output is flushed.
sys.stdout.flush()
def main(builtinParameters = {}):
diff --git a/utils/lit/tests/Inputs/test-data/lit.cfg b/utils/lit/tests/Inputs/test-data/lit.cfg
new file mode 100644
index 0000000..f5aba7b
--- /dev/null
+++ b/utils/lit/tests/Inputs/test-data/lit.cfg
@@ -0,0 +1,44 @@
+import os
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+
+import lit.formats
+import lit.Test
+
+class DummyFormat(lit.formats.FileBasedTest):
+ def execute(self, test, lit_config):
+ # In this dummy format, expect that each test file is actually just a
+ # .ini format dump of the results to report.
+
+ source_path = test.getSourcePath()
+
+ cfg = ConfigParser.ConfigParser()
+ cfg.read(source_path)
+
+ # Create the basic test result.
+ result_code = cfg.get('global', 'result_code')
+ result_output = cfg.get('global', 'result_output')
+ result = lit.Test.Result(getattr(lit.Test, result_code),
+ result_output)
+
+ # Load additional metrics.
+ for key,value_str in cfg.items('results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ result.addMetric(key, metric)
+
+ return result
+
+config.name = 'test-data'
+config.suffixes = ['.ini']
+config.test_format = DummyFormat()
+config.test_source_root = None
+config.test_exec_root = None
+config.target_triple = None
diff --git a/utils/lit/tests/Inputs/test-data/metrics.ini b/utils/lit/tests/Inputs/test-data/metrics.ini
new file mode 100644
index 0000000..267e516
--- /dev/null
+++ b/utils/lit/tests/Inputs/test-data/metrics.ini
@@ -0,0 +1,7 @@
+[global]
+result_code = PASS
+result_output = 'Test passed.'
+
+[results]
+value0 = 1
+value1 = 2.3456 \ No newline at end of file
diff --git a/utils/lit/tests/test-data.py b/utils/lit/tests/test-data.py
new file mode 100644
index 0000000..54909d7
--- /dev/null
+++ b/utils/lit/tests/test-data.py
@@ -0,0 +1,12 @@
+# Test features related to formats which support reporting additional test data.
+
+# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
+# RUN: FileCheck < %t.out %s
+
+# CHECK: -- Testing:
+
+# CHECK: PASS: test-data :: metrics.ini
+# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
+# CHECK-NEXT: value0: 1
+# CHECK-NEXT: value1: 2.3456
+# CHECK-NEXT: ***