4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 shouldShow = test.result.code.isFailure or \
46 (not self.opts.quiet and not self.opts.succinct)
51 self.progressBar.clear()
53 # Show the test result line.
54 test_name = test.getFullName()
55 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
56 self.completed, self.numTests))
58 # Show the test failure output, if requested.
59 if test.result.code.isFailure and self.opts.showOutput:
60 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
62 print(test.result.output)
65 # Report test metrics, if present.
66 if test.result.metrics:
67 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
69 items = sorted(test.result.metrics.items())
70 for metric_name, value in items:
71 print('%s: %s ' % (metric_name, value.format()))
74 # Ensure the output is flushed.
77 def write_test_results(run, lit_config, testing_time, output_path):
81 lit_config.fatal('test output unsupported with Python 2.5')
83 # Construct the data we will write.
85 # Encode the current lit version as a schema version.
86 data['__version__'] = lit.__versioninfo__
87 data['elapsed'] = testing_time
88 # FIXME: Record some information on the lit configuration used?
89 # FIXME: Record information from the individual test suites?
92 data['tests'] = tests_data = []
93 for test in run.tests:
95 'name' : test.getFullName(),
96 'code' : test.result.code.name,
97 'output' : test.result.output,
98 'elapsed' : test.result.elapsed }
100 # Add test metrics, if present.
101 if test.result.metrics:
102 test_data['metrics'] = metrics_data = {}
103 for key, value in test.result.metrics.items():
104 metrics_data[key] = value.todata()
106 tests_data.append(test_data)
109 f = open(output_path, 'w')
111 json.dump(data, f, indent=2, sort_keys=True)
116 def update_incremental_cache(test):
117 if not test.result.code.isFailure:
119 fname = test.getFilePath()
120 os.utime(fname, None)
122 def sort_by_incremental_cache(run):
124 fname = test.getFilePath()
126 return -os.path.getmtime(fname)
129 run.tests.sort(key = lambda t: sortIndex(t))
131 def main(builtinParameters = {}):
132 # Use processes by default on Unix platforms.
133 isWindows = platform.system() == 'Windows'
134 useProcessesIsDefault = not isWindows
137 from optparse import OptionParser, OptionGroup
138 parser = OptionParser("usage: %prog [options] {file-or-path}")
140 parser.add_option("", "--version", dest="show_version",
141 help="Show version and exit",
142 action="store_true", default=False)
143 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
144 help="Number of testing threads",
145 type=int, action="store", default=None)
146 parser.add_option("", "--config-prefix", dest="configPrefix",
147 metavar="NAME", help="Prefix for 'lit' config files",
148 action="store", default=None)
149 parser.add_option("-D", "--param", dest="userParameters",
151 help="Add 'NAME' = 'VAL' to the user defined parameters",
152 type=str, action="append", default=[])
154 group = OptionGroup(parser, "Output Format")
155 # FIXME: I find these names very confusing, although I like the
157 group.add_option("-q", "--quiet", dest="quiet",
158 help="Suppress no error output",
159 action="store_true", default=False)
160 group.add_option("-s", "--succinct", dest="succinct",
161 help="Reduce amount of output",
162 action="store_true", default=False)
163 group.add_option("-v", "--verbose", dest="showOutput",
164 help="Show all test output",
165 action="store_true", default=False)
166 group.add_option("-o", "--output", dest="output_path",
167 help="Write test results to the provided path",
168 action="store", type=str, metavar="PATH")
169 group.add_option("", "--no-progress-bar", dest="useProgressBar",
170 help="Do not use curses based progress bar",
171 action="store_false", default=True)
172 group.add_option("", "--show-unsupported", dest="show_unsupported",
173 help="Show unsupported tests",
174 action="store_true", default=False)
175 group.add_option("", "--show-xfail", dest="show_xfail",
176 help="Show tests that were expected to fail",
177 action="store_true", default=False)
178 parser.add_option_group(group)
180 group = OptionGroup(parser, "Test Execution")
181 group.add_option("", "--path", dest="path",
182 help="Additional paths to add to testing environment",
183 action="append", type=str, default=[])
184 group.add_option("", "--vg", dest="useValgrind",
185 help="Run tests under valgrind",
186 action="store_true", default=False)
187 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
188 help="Check for memory leaks under valgrind",
189 action="store_true", default=False)
190 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
191 help="Specify an extra argument for valgrind",
192 type=str, action="append", default=[])
193 group.add_option("", "--time-tests", dest="timeTests",
194 help="Track elapsed wall time for each test",
195 action="store_true", default=False)
196 group.add_option("", "--no-execute", dest="noExecute",
197 help="Don't execute any tests (assume PASS)",
198 action="store_true", default=False)
199 group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
200 help=("Write XUnit-compatible XML test reports to the"
201 " specified file"), default=None)
202 parser.add_option_group(group)
204 group = OptionGroup(parser, "Test Selection")
205 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
206 help="Maximum number of tests to run",
207 action="store", type=int, default=None)
208 group.add_option("", "--max-time", dest="maxTime", metavar="N",
209 help="Maximum time to spend testing (in seconds)",
210 action="store", type=float, default=None)
211 group.add_option("", "--shuffle", dest="shuffle",
212 help="Run tests in random order",
213 action="store_true", default=False)
214 group.add_option("-i", "--incremental", dest="incremental",
215 help="Run modified and failing tests first (updates "
217 action="store_true", default=False)
218 group.add_option("", "--filter", dest="filter", metavar="REGEX",
219 help=("Only run tests with paths matching the given "
220 "regular expression"),
221 action="store", default=None)
222 parser.add_option_group(group)
224 group = OptionGroup(parser, "Debug and Experimental Options")
225 group.add_option("", "--debug", dest="debug",
226 help="Enable debugging (for 'lit' development)",
227 action="store_true", default=False)
228 group.add_option("", "--show-suites", dest="showSuites",
229 help="Show discovered test suites",
230 action="store_true", default=False)
231 group.add_option("", "--show-tests", dest="showTests",
232 help="Show all discovered tests",
233 action="store_true", default=False)
234 group.add_option("", "--use-processes", dest="useProcesses",
235 help="Run tests in parallel with processes (not threads)",
236 action="store_true", default=useProcessesIsDefault)
237 group.add_option("", "--use-threads", dest="useProcesses",
238 help="Run tests in parallel with threads (not processes)",
239 action="store_false", default=useProcessesIsDefault)
240 parser.add_option_group(group)
242 (opts, args) = parser.parse_args()
244 if opts.show_version:
245 print("lit %s" % (lit.__version__,))
249 parser.error('No inputs specified')
251 if opts.numThreads is None:
252 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
253 # http://bugs.python.org/issue1731717
254 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
255 # threads by default there.
256 if sys.hexversion >= 0x2050200:
257 opts.numThreads = lit.util.detectCPUs()
263 # Create the user defined parameters.
264 userParams = dict(builtinParameters)
265 for entry in opts.userParameters:
269 name,val = entry.split('=', 1)
270 userParams[name] = val
272 # Create the global config object.
273 litConfig = lit.LitConfig.LitConfig(
274 progname = os.path.basename(sys.argv[0]),
277 useValgrind = opts.useValgrind,
278 valgrindLeakCheck = opts.valgrindLeakCheck,
279 valgrindArgs = opts.valgrindArgs,
280 noExecute = opts.noExecute,
282 isWindows = isWindows,
284 config_prefix = opts.configPrefix)
286 # Perform test discovery.
287 run = lit.run.Run(litConfig,
288 lit.discovery.find_tests_for_inputs(litConfig, inputs))
290 if opts.showSuites or opts.showTests:
291 # Aggregate the tests by suite.
293 for result_test in run.tests:
294 if result_test.suite not in suitesAndTests:
295 suitesAndTests[result_test.suite] = []
296 suitesAndTests[result_test.suite].append(result_test)
297 suitesAndTests = list(suitesAndTests.items())
298 suitesAndTests.sort(key = lambda item: item[0].name)
300 # Show the suites, if requested.
302 print('-- Test Suites --')
303 for ts,ts_tests in suitesAndTests:
304 print(' %s - %d tests' %(ts.name, len(ts_tests)))
305 print(' Source Root: %s' % ts.source_root)
306 print(' Exec Root : %s' % ts.exec_root)
308 # Show the tests, if requested.
310 print('-- Available Tests --')
311 for ts,ts_tests in suitesAndTests:
312 ts_tests.sort(key = lambda test: test.path_in_suite)
313 for test in ts_tests:
314 print(' %s' % (test.getFullName(),))
319 # Select and order the tests.
320 numTotalTests = len(run.tests)
322 # First, select based on the filter expression if given.
325 rex = re.compile(opts.filter)
327 parser.error("invalid regular expression for --filter: %r" % (
329 run.tests = [result_test for result_test in run.tests
330 if rex.search(result_test.getFullName())]
332 # Then select the order.
334 random.shuffle(run.tests)
335 elif opts.incremental:
336 sort_by_incremental_cache(run)
338 run.tests.sort(key = lambda result_test: result_test.getFullName())
340 # Finally limit the number of tests, if desired.
341 if opts.maxTests is not None:
342 run.tests = run.tests[:opts.maxTests]
344 # Don't create more threads than tests.
345 opts.numThreads = min(len(run.tests), opts.numThreads)
348 if len(run.tests) != numTotalTests:
349 extra = ' of %d' % numTotalTests
350 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
355 if opts.succinct and opts.useProgressBar:
357 tc = lit.ProgressBar.TerminalController()
358 progressBar = lit.ProgressBar.ProgressBar(tc, header)
361 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
365 startTime = time.time()
366 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
368 run.execute_tests(display, opts.numThreads, opts.maxTime,
370 except KeyboardInterrupt:
374 testing_time = time.time() - startTime
376 print('Testing Time: %.2fs' % (testing_time,))
378 # Write out the test data, if requested.
379 if opts.output_path is not None:
380 write_test_results(run, litConfig, testing_time, opts.output_path)
382 # List test results organized by kind.
385 for test in run.tests:
386 if test.result.code not in byCode:
387 byCode[test.result.code] = []
388 byCode[test.result.code].append(test)
389 if test.result.code.isFailure:
392 # Print each test in any of the failing groups.
393 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
394 ('Failing Tests', lit.Test.FAIL),
395 ('Unresolved Tests', lit.Test.UNRESOLVED),
396 ('Unsupported Tests', lit.Test.UNSUPPORTED),
397 ('Expected Failing Tests', lit.Test.XFAIL)):
398 if (lit.Test.XFAIL == code and not opts.show_xfail) or \
399 (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
401 elts = byCode.get(code)
405 print('%s (%d):' % (title, len(elts)))
407 print(' %s' % test.getFullName())
408 sys.stdout.write('\n')
410 if opts.timeTests and run.tests:
412 test_times = [(test.getFullName(), test.result.elapsed)
413 for test in run.tests]
414 lit.util.printHistogram(test_times, title='Tests')
416 for name,code in (('Expected Passes ', lit.Test.PASS),
417 ('Expected Failures ', lit.Test.XFAIL),
418 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
419 ('Unresolved Tests ', lit.Test.UNRESOLVED),
420 ('Unexpected Passes ', lit.Test.XPASS),
421 ('Unexpected Failures', lit.Test.FAIL)):
422 if opts.quiet and not code.isFailure:
424 N = len(byCode.get(code,[]))
426 print(' %s: %d' % (name,N))
428 if opts.xunit_output_file:
429 # Collect the tests, indexed by test suite
431 for result_test in run.tests:
432 suite = result_test.suite.config.name
433 if suite not in by_suite:
438 by_suite[suite]['tests'].append(result_test)
439 if result_test.result.code.isFailure:
440 by_suite[suite]['failures'] += 1
442 by_suite[suite]['passes'] += 1
443 xunit_output_file = open(opts.xunit_output_file, "w")
444 xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
445 xunit_output_file.write("<testsuites>\n")
446 for suite_name, suite in by_suite.items():
447 safe_suite_name = suite_name.replace(".", "-")
448 xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
449 xunit_output_file.write(" tests='" + str(suite['passes'] +
450 suite['failures']) + "'")
451 xunit_output_file.write(" failures='" + str(suite['failures']) +
453 for result_test in suite['tests']:
454 xunit_output_file.write(result_test.getJUnitXML() + "\n")
455 xunit_output_file.write("</testsuite>\n")
456 xunit_output_file.write("</testsuites>")
457 xunit_output_file.close()
459 # If we encountered any additional errors, exit abnormally.
460 if litConfig.numErrors:
461 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
464 # Warn about warnings.
465 if litConfig.numWarnings:
466 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
472 if __name__=='__main__':