X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=utils%2Flit%2Flit%2FTest.py;h=38bb41b0252d59a3769a3be2097cbae659f6d109;hb=0d1ad626e89441c576418f92f7fd77924aa7e804;hp=c1bacb3d86d1f8b8efe00df458c411935e01f0d7;hpb=d3bf8a2c0aefe36cb4bfc6f41983aa09caae2acb;p=oota-llvm.git diff --git a/utils/lit/lit/Test.py b/utils/lit/lit/Test.py index c1bacb3d86d..38bb41b0252 100644 --- a/utils/lit/lit/Test.py +++ b/utils/lit/lit/Test.py @@ -1,10 +1,23 @@ import os +from xml.sax.saxutils import escape +from json import JSONEncoder -# Test results. +# Test result codes. class ResultCode(object): """Test result codes.""" + # We override __new__ and __getnewargs__ to ensure that pickling still + # provides unique ResultCode objects in any particular instance. + _instances = {} + def __new__(cls, name, isFailure): + res = cls._instances.get(name) + if res is None: + cls._instances[name] = res = super(ResultCode, cls).__new__(cls) + return res + def __getnewargs__(self): + return (self.name, self.isFailure) + def __init__(self, name, isFailure): self.name = name self.isFailure = isFailure @@ -20,6 +33,85 @@ XPASS = ResultCode('XPASS', True) UNRESOLVED = ResultCode('UNRESOLVED', True) UNSUPPORTED = ResultCode('UNSUPPORTED', False) +# Test metric values. + +class MetricValue(object): + def format(self): + """ + format() -> str + + Convert this metric to a string suitable for displaying as part of the + console output. + """ + raise RuntimeError("abstract method") + + def todata(self): + """ + todata() -> json-serializable data + + Convert this metric to content suitable for serializing in the JSON test + output. + """ + raise RuntimeError("abstract method") + +class IntMetricValue(MetricValue): + def __init__(self, value): + self.value = value + + def format(self): + return str(self.value) + + def todata(self): + return self.value + +class RealMetricValue(MetricValue): + def __init__(self, value): + self.value = value + + def format(self): + return '%.4f' % self.value + + def todata(self): + return self.value + +class JSONMetricValue(MetricValue): + """ + JSONMetricValue is used for types that are representable in the output + but that are otherwise uninterpreted. + """ + def __init__(self, value): + # Ensure the value is a serializable by trying to encode it. + # WARNING: The value may change before it is encoded again, and may + # not be encodable after the change. + try: + e = JSONEncoder() + e.encode(value) + except TypeError: + raise + self.value = value + + def format(self): + e = JSONEncoder(indent=2, sort_keys=True) + return e.encode(self.value) + + def todata(self): + return self.value + +def toMetricValue(value): + if isinstance(value, MetricValue): + return value + elif isinstance(value, int) or isinstance(value, long): + return IntMetricValue(value) + elif isinstance(value, float): + return RealMetricValue(value) + else: + # Try to create a JSONMetricValue and let the constructor throw + # if value is not a valid type. + return JSONMetricValue(value) + + +# Test results. + class Result(object): """Wrapper for the results of executing an individual test.""" @@ -30,6 +122,25 @@ class Result(object): self.output = output # The wall timing to execute the test, if timing. self.elapsed = elapsed + # The metrics reported by this test. + self.metrics = {} + + def addMetric(self, name, value): + """ + addMetric(name, value) + + Attach a test metric to the test result, with the given name and list of + values. It is an error to attempt to attach the metrics with the same + name multiple times. + + Each value must be an instance of a MetricValue subclass. + """ + if name in self.metrics: + raise ValueError("result already includes metrics for %r" % ( + name,)) + if not isinstance(value, MetricValue): + raise TypeError("unexpected metric value: %r" % (value,)) + self.metrics[name] = value # Test classes. @@ -55,10 +166,15 @@ class TestSuite: class Test: """Test - Information on a single test instance.""" - def __init__(self, suite, path_in_suite, config): + def __init__(self, suite, path_in_suite, config, file_path = None): self.suite = suite self.path_in_suite = path_in_suite self.config = config + self.file_path = file_path + # A list of conditions under which this test is expected to fail. These + # can optionally be provided by test format handlers, and will be + # honored when the test result is supplied. + self.xfails = [] # The test result, once complete. self.result = None @@ -70,11 +186,71 @@ class Test: self.result = result + # Apply the XFAIL handling to resolve the result exit code. + if self.isExpectedToFail(): + if self.result.code == PASS: + self.result.code = XPASS + elif self.result.code == FAIL: + self.result.code = XFAIL + def getFullName(self): return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite) + def getFilePath(self): + if self.file_path: + return self.file_path + return self.getSourcePath() + def getSourcePath(self): return self.suite.getSourcePath(self.path_in_suite) def getExecPath(self): return self.suite.getExecPath(self.path_in_suite) + + def isExpectedToFail(self): + """ + isExpectedToFail() -> bool + + Check whether this test is expected to fail in the current + configuration. This check relies on the test xfails property which by + some test formats may not be computed until the test has first been + executed. + """ + + # Check if any of the xfails match an available feature or the target. + for item in self.xfails: + # If this is the wildcard, it always fails. + if item == '*': + return True + + # If this is an exact match for one of the features, it fails. + if item in self.config.available_features: + return True + + # If this is a part of the target triple, it fails. + if item in self.suite.config.target_triple: + return True + + return False + + + def getJUnitXML(self): + test_name = self.path_in_suite[-1] + test_path = self.path_in_suite[:-1] + safe_test_path = [x.replace(".","_") for x in test_path] + safe_name = self.suite.name.replace(".","-") + + if safe_test_path: + class_name = safe_name + "." + "/".join(safe_test_path) + else: + class_name = safe_name + "." + safe_name + + xml = "