1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

[lit] Add support for attach arbitrary metrics to test results.

- This is a work-in-progress and all details are subject to change, but I am
   trying to build up support for allowing lit to be used as a driver for
   performance tests (or other tests which might want to record information
   beyond simple PASS/FAIL).

llvm-svn: 190535
This commit is contained in:
Daniel Dunbar 2013-09-11 17:45:11 +00:00
parent 386bd314a1
commit e5789151e6
5 changed files with 119 additions and 2 deletions

View File

@ -1,6 +1,6 @@
import os
# Test results.
# Test result codes.
class ResultCode(object):
"""Test result codes."""
@ -31,6 +31,28 @@ XPASS = ResultCode('XPASS', True)
UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
# Test metric values.
class MetricValue(object):
def format(self):
raise RuntimeError("abstract method")
class IntMetricValue(MetricValue):
def __init__(self, value):
self.value = value
def format(self):
return str(self.value)
class RealMetricValue(MetricValue):
def __init__(self, value):
self.value = value
def format(self):
return '%.4f' % self.value
# Test results.
class Result(object):
"""Wrapper for the results of executing an individual test."""
@ -41,6 +63,25 @@ class Result(object):
self.output = output
# The wall timing to execute the test, if timing.
self.elapsed = elapsed
# The metrics reported by this test.
self.metrics = {}
def addMetric(self, name, value):
"""
addMetric(name, value)
Attach a test metric to the test result, with the given name and list of
values. It is an error to attempt to attach the metrics with the same
name multiple times.
Each value must be an instance of a MetricValue subclass.
"""
if name in self.metrics:
raise ValueError("result already includes metrics for %r" % (
name,))
if not isinstance(value, MetricValue):
raise TypeError("unexpected metric value: %r" % (value,))
self.metrics[name] = value
# Test classes.

View File

@ -45,15 +45,28 @@ class TestingProgressDisplay(object):
if self.progressBar:
self.progressBar.clear()
print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
# Show the test failure output, if requested.
if test.result.code.isFailure and self.opts.showOutput:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Ensure the output is flushed.
sys.stdout.flush()
def main(builtinParameters = {}):

View File

@ -0,0 +1,44 @@
import os
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import lit.formats
import lit.Test
class DummyFormat(lit.formats.FileBasedTest):
def execute(self, test, lit_config):
# In this dummy format, expect that each test file is actually just a
# .ini format dump of the results to report.
source_path = test.getSourcePath()
cfg = ConfigParser.ConfigParser()
cfg.read(source_path)
# Create the basic test result.
result_code = cfg.get('global', 'result_code')
result_output = cfg.get('global', 'result_output')
result = lit.Test.Result(getattr(lit.Test, result_code),
result_output)
# Load additional metrics.
for key,value_str in cfg.items('results'):
value = eval(value_str)
if isinstance(value, int):
metric = lit.Test.IntMetricValue(value)
elif isinstance(value, float):
metric = lit.Test.RealMetricValue(value)
else:
raise RuntimeError("unsupported result type")
result.addMetric(key, metric)
return result
config.name = 'test-data'
config.suffixes = ['.ini']
config.test_format = DummyFormat()
config.test_source_root = None
config.test_exec_root = None
config.target_triple = None

View File

@ -0,0 +1,7 @@
[global]
result_code = PASS
result_output = 'Test passed.'
[results]
value0 = 1
value1 = 2.3456

View File

@ -0,0 +1,12 @@
# Test features related to formats which support reporting additional test data.
# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
# RUN: FileCheck < %t.out %s
# CHECK: -- Testing:
# CHECK: PASS: test-data :: metrics.ini
# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
# CHECK-NEXT: value0: 1
# CHECK-NEXT: value1: 2.3456
# CHECK-NEXT: ***