4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 shouldShow = test.result.code.isFailure or \
46 self.opts.showAllOutput or \
47 (not self.opts.quiet and not self.opts.succinct)
52 self.progressBar.clear()
54 # Show the test result line.
55 test_name = test.getFullName()
56 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
57 self.completed, self.numTests))
59 # Show the test failure output, if requested.
60 if (test.result.code.isFailure and self.opts.showOutput) or \
61 self.opts.showAllOutput:
62 if test.result.code.isFailure:
63 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
65 print(test.result.output)
68 # Report test metrics, if present.
69 if test.result.metrics:
70 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
72 items = sorted(test.result.metrics.items())
73 for metric_name, value in items:
74 print('%s: %s ' % (metric_name, value.format()))
77 # Ensure the output is flushed.
80 def write_test_results(run, lit_config, testing_time, output_path):
84 lit_config.fatal('test output unsupported with Python 2.5')
86 # Construct the data we will write.
88 # Encode the current lit version as a schema version.
89 data['__version__'] = lit.__versioninfo__
90 data['elapsed'] = testing_time
91 # FIXME: Record some information on the lit configuration used?
92 # FIXME: Record information from the individual test suites?
95 data['tests'] = tests_data = []
96 for test in run.tests:
98 'name' : test.getFullName(),
99 'code' : test.result.code.name,
100 'output' : test.result.output,
101 'elapsed' : test.result.elapsed }
103 # Add test metrics, if present.
104 if test.result.metrics:
105 test_data['metrics'] = metrics_data = {}
106 for key, value in test.result.metrics.items():
107 metrics_data[key] = value.todata()
109 tests_data.append(test_data)
112 f = open(output_path, 'w')
114 json.dump(data, f, indent=2, sort_keys=True)
119 def update_incremental_cache(test):
120 if not test.result.code.isFailure:
122 fname = test.getFilePath()
123 os.utime(fname, None)
125 def sort_by_incremental_cache(run):
127 fname = test.getFilePath()
129 return -os.path.getmtime(fname)
132 run.tests.sort(key = lambda t: sortIndex(t))
134 def main(builtinParameters = {}):
135 # Use processes by default on Unix platforms.
136 isWindows = platform.system() == 'Windows'
137 useProcessesIsDefault = not isWindows
140 from optparse import OptionParser, OptionGroup
141 parser = OptionParser("usage: %prog [options] {file-or-path}")
143 parser.add_option("", "--version", dest="show_version",
144 help="Show version and exit",
145 action="store_true", default=False)
146 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
147 help="Number of testing threads",
148 type=int, action="store", default=None)
149 parser.add_option("", "--config-prefix", dest="configPrefix",
150 metavar="NAME", help="Prefix for 'lit' config files",
151 action="store", default=None)
152 parser.add_option("-D", "--param", dest="userParameters",
154 help="Add 'NAME' = 'VAL' to the user defined parameters",
155 type=str, action="append", default=[])
157 group = OptionGroup(parser, "Output Format")
158 # FIXME: I find these names very confusing, although I like the
160 group.add_option("-q", "--quiet", dest="quiet",
161 help="Suppress no error output",
162 action="store_true", default=False)
163 group.add_option("-s", "--succinct", dest="succinct",
164 help="Reduce amount of output",
165 action="store_true", default=False)
166 group.add_option("-v", "--verbose", dest="showOutput",
167 help="Show test output for failures",
168 action="store_true", default=False)
169 group.add_option("-a", "--show-all", dest="showAllOutput",
170 help="Display all commandlines and output",
171 action="store_true", default=False)
172 group.add_option("-o", "--output", dest="output_path",
173 help="Write test results to the provided path",
174 action="store", type=str, metavar="PATH")
175 group.add_option("", "--no-progress-bar", dest="useProgressBar",
176 help="Do not use curses based progress bar",
177 action="store_false", default=True)
178 group.add_option("", "--show-unsupported", dest="show_unsupported",
179 help="Show unsupported tests",
180 action="store_true", default=False)
181 group.add_option("", "--show-xfail", dest="show_xfail",
182 help="Show tests that were expected to fail",
183 action="store_true", default=False)
184 parser.add_option_group(group)
186 group = OptionGroup(parser, "Test Execution")
187 group.add_option("", "--path", dest="path",
188 help="Additional paths to add to testing environment",
189 action="append", type=str, default=[])
190 group.add_option("", "--vg", dest="useValgrind",
191 help="Run tests under valgrind",
192 action="store_true", default=False)
193 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
194 help="Check for memory leaks under valgrind",
195 action="store_true", default=False)
196 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
197 help="Specify an extra argument for valgrind",
198 type=str, action="append", default=[])
199 group.add_option("", "--time-tests", dest="timeTests",
200 help="Track elapsed wall time for each test",
201 action="store_true", default=False)
202 group.add_option("", "--no-execute", dest="noExecute",
203 help="Don't execute any tests (assume PASS)",
204 action="store_true", default=False)
205 group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
206 help=("Write XUnit-compatible XML test reports to the"
207 " specified file"), default=None)
208 parser.add_option_group(group)
210 group = OptionGroup(parser, "Test Selection")
211 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
212 help="Maximum number of tests to run",
213 action="store", type=int, default=None)
214 group.add_option("", "--max-time", dest="maxTime", metavar="N",
215 help="Maximum time to spend testing (in seconds)",
216 action="store", type=float, default=None)
217 group.add_option("", "--shuffle", dest="shuffle",
218 help="Run tests in random order",
219 action="store_true", default=False)
220 group.add_option("-i", "--incremental", dest="incremental",
221 help="Run modified and failing tests first (updates "
223 action="store_true", default=False)
224 group.add_option("", "--filter", dest="filter", metavar="REGEX",
225 help=("Only run tests with paths matching the given "
226 "regular expression"),
227 action="store", default=None)
228 parser.add_option_group(group)
230 group = OptionGroup(parser, "Debug and Experimental Options")
231 group.add_option("", "--debug", dest="debug",
232 help="Enable debugging (for 'lit' development)",
233 action="store_true", default=False)
234 group.add_option("", "--show-suites", dest="showSuites",
235 help="Show discovered test suites",
236 action="store_true", default=False)
237 group.add_option("", "--show-tests", dest="showTests",
238 help="Show all discovered tests",
239 action="store_true", default=False)
240 group.add_option("", "--use-processes", dest="useProcesses",
241 help="Run tests in parallel with processes (not threads)",
242 action="store_true", default=useProcessesIsDefault)
243 group.add_option("", "--use-threads", dest="useProcesses",
244 help="Run tests in parallel with threads (not processes)",
245 action="store_false", default=useProcessesIsDefault)
246 parser.add_option_group(group)
248 (opts, args) = parser.parse_args()
250 if opts.show_version:
251 print("lit %s" % (lit.__version__,))
255 parser.error('No inputs specified')
257 if opts.numThreads is None:
258 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
259 # http://bugs.python.org/issue1731717
260 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
261 # threads by default there.
262 if sys.hexversion >= 0x2050200:
263 opts.numThreads = lit.util.detectCPUs()
269 # Create the user defined parameters.
270 userParams = dict(builtinParameters)
271 for entry in opts.userParameters:
275 name,val = entry.split('=', 1)
276 userParams[name] = val
278 # Create the global config object.
279 litConfig = lit.LitConfig.LitConfig(
280 progname = os.path.basename(sys.argv[0]),
283 useValgrind = opts.useValgrind,
284 valgrindLeakCheck = opts.valgrindLeakCheck,
285 valgrindArgs = opts.valgrindArgs,
286 noExecute = opts.noExecute,
288 isWindows = isWindows,
290 config_prefix = opts.configPrefix)
292 # Perform test discovery.
293 run = lit.run.Run(litConfig,
294 lit.discovery.find_tests_for_inputs(litConfig, inputs))
296 if opts.showSuites or opts.showTests:
297 # Aggregate the tests by suite.
299 for result_test in run.tests:
300 if result_test.suite not in suitesAndTests:
301 suitesAndTests[result_test.suite] = []
302 suitesAndTests[result_test.suite].append(result_test)
303 suitesAndTests = list(suitesAndTests.items())
304 suitesAndTests.sort(key = lambda item: item[0].name)
306 # Show the suites, if requested.
308 print('-- Test Suites --')
309 for ts,ts_tests in suitesAndTests:
310 print(' %s - %d tests' %(ts.name, len(ts_tests)))
311 print(' Source Root: %s' % ts.source_root)
312 print(' Exec Root : %s' % ts.exec_root)
314 # Show the tests, if requested.
316 print('-- Available Tests --')
317 for ts,ts_tests in suitesAndTests:
318 ts_tests.sort(key = lambda test: test.path_in_suite)
319 for test in ts_tests:
320 print(' %s' % (test.getFullName(),))
325 # Select and order the tests.
326 numTotalTests = len(run.tests)
328 # First, select based on the filter expression if given.
331 rex = re.compile(opts.filter)
333 parser.error("invalid regular expression for --filter: %r" % (
335 run.tests = [result_test for result_test in run.tests
336 if rex.search(result_test.getFullName())]
338 # Then select the order.
340 random.shuffle(run.tests)
341 elif opts.incremental:
342 sort_by_incremental_cache(run)
344 run.tests.sort(key = lambda result_test: result_test.getFullName())
346 # Finally limit the number of tests, if desired.
347 if opts.maxTests is not None:
348 run.tests = run.tests[:opts.maxTests]
350 # Don't create more threads than tests.
351 opts.numThreads = min(len(run.tests), opts.numThreads)
353 # Because some tests use threads internally, and at least on Linux each
354 # of these threads counts toward the current process limit, try to
355 # raise the (soft) process limit so that tests don't fail due to
356 # resource exhaustion.
358 cpus = lit.util.detectCPUs()
359 desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
361 # Import the resource module here inside this try block because it
362 # will likely fail on Windows.
365 max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
366 desired_limit = min(desired_limit, max_procs_hard)
368 if max_procs_soft < desired_limit:
369 resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
370 litConfig.note('raised the process limit from %d to %d' % \
371 (max_procs_soft, desired_limit))
376 if len(run.tests) != numTotalTests:
377 extra = ' of %d' % numTotalTests
378 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
383 if opts.succinct and opts.useProgressBar:
385 tc = lit.ProgressBar.TerminalController()
386 progressBar = lit.ProgressBar.ProgressBar(tc, header)
389 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
393 startTime = time.time()
394 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
396 run.execute_tests(display, opts.numThreads, opts.maxTime,
398 except KeyboardInterrupt:
402 testing_time = time.time() - startTime
404 print('Testing Time: %.2fs' % (testing_time,))
406 # Write out the test data, if requested.
407 if opts.output_path is not None:
408 write_test_results(run, litConfig, testing_time, opts.output_path)
410 # List test results organized by kind.
413 for test in run.tests:
414 if test.result.code not in byCode:
415 byCode[test.result.code] = []
416 byCode[test.result.code].append(test)
417 if test.result.code.isFailure:
420 # Print each test in any of the failing groups.
421 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
422 ('Failing Tests', lit.Test.FAIL),
423 ('Unresolved Tests', lit.Test.UNRESOLVED),
424 ('Unsupported Tests', lit.Test.UNSUPPORTED),
425 ('Expected Failing Tests', lit.Test.XFAIL)):
426 if (lit.Test.XFAIL == code and not opts.show_xfail) or \
427 (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
429 elts = byCode.get(code)
433 print('%s (%d):' % (title, len(elts)))
435 print(' %s' % test.getFullName())
436 sys.stdout.write('\n')
438 if opts.timeTests and run.tests:
440 test_times = [(test.getFullName(), test.result.elapsed)
441 for test in run.tests]
442 lit.util.printHistogram(test_times, title='Tests')
444 for name,code in (('Expected Passes ', lit.Test.PASS),
445 ('Passes With Retry ', lit.Test.FLAKYPASS),
446 ('Expected Failures ', lit.Test.XFAIL),
447 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
448 ('Unresolved Tests ', lit.Test.UNRESOLVED),
449 ('Unexpected Passes ', lit.Test.XPASS),
450 ('Unexpected Failures', lit.Test.FAIL)):
451 if opts.quiet and not code.isFailure:
453 N = len(byCode.get(code,[]))
455 print(' %s: %d' % (name,N))
457 if opts.xunit_output_file:
458 # Collect the tests, indexed by test suite
460 for result_test in run.tests:
461 suite = result_test.suite.config.name
462 if suite not in by_suite:
467 by_suite[suite]['tests'].append(result_test)
468 if result_test.result.code.isFailure:
469 by_suite[suite]['failures'] += 1
471 by_suite[suite]['passes'] += 1
472 xunit_output_file = open(opts.xunit_output_file, "w")
473 xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
474 xunit_output_file.write("<testsuites>\n")
475 for suite_name, suite in by_suite.items():
476 safe_suite_name = suite_name.replace(".", "-")
477 xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
478 xunit_output_file.write(" tests='" + str(suite['passes'] +
479 suite['failures']) + "'")
480 xunit_output_file.write(" failures='" + str(suite['failures']) +
482 for result_test in suite['tests']:
483 xunit_output_file.write(result_test.getJUnitXML() + "\n")
484 xunit_output_file.write("</testsuite>\n")
485 xunit_output_file.write("</testsuites>")
486 xunit_output_file.close()
488 # If we encountered any additional errors, exit abnormally.
489 if litConfig.numErrors:
490 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
493 # Warn about warnings.
494 if litConfig.numWarnings:
495 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
501 if __name__=='__main__':