a856473c228e73463355a4b944ebd1d1e2f267a0
[oota-llvm.git] / utils / lit / lit.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 import math, os, platform, random, re, sys, time, threading, traceback
10
11 import ProgressBar
12 import TestRunner
13 import Util
14
15 from TestingConfig import TestingConfig
16 import LitConfig
17 import Test
18
19 # Configuration files to look for when discovering test suites. These can be
20 # overridden with --config-prefix.
21 #
22 # FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
23 gConfigName = 'lit.cfg'
24 gSiteConfigName = 'lit.site.cfg'
25
26 kLocalConfigName = 'lit.local.cfg'
27
28 class TestingProgressDisplay:
29     def __init__(self, opts, numTests, progressBar=None):
30         self.opts = opts
31         self.numTests = numTests
32         self.current = None
33         self.lock = threading.Lock()
34         self.progressBar = progressBar
35         self.completed = 0
36
37     def update(self, test):
38         # Avoid locking overhead in quiet mode
39         if self.opts.quiet and not test.result.isFailure:
40             self.completed += 1
41             return
42
43         # Output lock.
44         self.lock.acquire()
45         try:
46             self.handleUpdate(test)
47         finally:
48             self.lock.release()
49
50     def finish(self):
51         if self.progressBar:
52             self.progressBar.clear()
53         elif self.opts.quiet:
54             pass
55         elif self.opts.succinct:
56             sys.stdout.write('\n')
57
58     def handleUpdate(self, test):
59         self.completed += 1
60         if self.progressBar:
61             self.progressBar.update(float(self.completed)/self.numTests,
62                                     test.getFullName())
63
64         if self.opts.succinct and not test.result.isFailure:
65             return
66
67         if self.progressBar:
68             self.progressBar.clear()
69
70         print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
71                                      self.completed, self.numTests)
72
73         if test.result.isFailure and self.opts.showOutput:
74             print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
75                                               '*'*20)
76             print test.output
77             print "*" * 20
78
79         sys.stdout.flush()
80
81 class TestProvider:
82     def __init__(self, tests, maxTime):
83         self.maxTime = maxTime
84         self.iter = iter(tests)
85         self.lock = threading.Lock()
86         self.startTime = time.time()
87
88     def get(self):
89         # Check if we have run out of time.
90         if self.maxTime is not None:
91             if time.time() - self.startTime > self.maxTime:
92                 return None
93
94         # Otherwise take the next test.
95         self.lock.acquire()
96         try:
97             item = self.iter.next()
98         except StopIteration:
99             item = None
100         self.lock.release()
101         return item
102
103 class Tester(threading.Thread):
104     def __init__(self, litConfig, provider, display):
105         threading.Thread.__init__(self)
106         self.litConfig = litConfig
107         self.provider = provider
108         self.display = display
109
110     def run(self):
111         while 1:
112             item = self.provider.get()
113             if item is None:
114                 break
115             self.runTest(item)
116
117     def runTest(self, test):
118         result = None
119         startTime = time.time()
120         try:
121             result, output = test.config.test_format.execute(test,
122                                                              self.litConfig)
123         except KeyboardInterrupt:
124             # This is a sad hack. Unfortunately subprocess goes
125             # bonkers with ctrl-c and we start forking merrily.
126             print '\nCtrl-C detected, goodbye.'
127             os.kill(0,9)
128         except:
129             if self.litConfig.debug:
130                 raise
131             result = Test.UNRESOLVED
132             output = 'Exception during script execution:\n'
133             output += traceback.format_exc()
134             output += '\n'
135         elapsed = time.time() - startTime
136
137         test.setResult(result, output, elapsed)
138         self.display.update(test)
139
140 def dirContainsTestSuite(path):
141     cfgpath = os.path.join(path, gSiteConfigName)
142     if os.path.exists(cfgpath):
143         return cfgpath
144     cfgpath = os.path.join(path, gConfigName)
145     if os.path.exists(cfgpath):
146         return cfgpath
147
148 def getTestSuite(item, litConfig, cache):
149     """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
150
151     Find the test suite containing @arg item.
152
153     @retval (None, ...) - Indicates no test suite contains @arg item.
154     @retval (suite, relative_path) - The suite that @arg item is in, and its
155     relative path inside that suite.
156     """
157     def search1(path):
158         # Check for a site config or a lit config.
159         cfgpath = dirContainsTestSuite(path)
160
161         # If we didn't find a config file, keep looking.
162         if not cfgpath:
163             parent,base = os.path.split(path)
164             if parent == path:
165                 return (None, ())
166
167             ts, relative = search(parent)
168             return (ts, relative + (base,))
169
170         # We found a config file, load it.
171         if litConfig.debug:
172             litConfig.note('loading suite config %r' % cfgpath)
173
174         cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
175         source_root = os.path.realpath(cfg.test_source_root or path)
176         exec_root = os.path.realpath(cfg.test_exec_root or path)
177         return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
178
179     def search(path):
180         # Check for an already instantiated test suite.
181         res = cache.get(path)
182         if res is None:
183             cache[path] = res = search1(path)
184         return res
185
186     # Canonicalize the path.
187     item = os.path.realpath(item)
188
189     # Skip files and virtual components.
190     components = []
191     while not os.path.isdir(item):
192         parent,base = os.path.split(item)
193         if parent == item:
194             return (None, ())
195         components.append(base)
196         item = parent
197     components.reverse()
198
199     ts, relative = search(item)
200     return ts, tuple(relative + tuple(components))
201
202 def getLocalConfig(ts, path_in_suite, litConfig, cache):
203     def search1(path_in_suite):
204         # Get the parent config.
205         if not path_in_suite:
206             parent = ts.config
207         else:
208             parent = search(path_in_suite[:-1])
209
210         # Load the local configuration.
211         source_path = ts.getSourcePath(path_in_suite)
212         cfgpath = os.path.join(source_path, kLocalConfigName)
213         if litConfig.debug:
214             litConfig.note('loading local config %r' % cfgpath)
215         return TestingConfig.frompath(cfgpath, parent, litConfig,
216                                     mustExist = False,
217                                     config = parent.clone(cfgpath))
218
219     def search(path_in_suite):
220         key = (ts, path_in_suite)
221         res = cache.get(key)
222         if res is None:
223             cache[key] = res = search1(path_in_suite)
224         return res
225
226     return search(path_in_suite)
227
228 def getTests(path, litConfig, testSuiteCache, localConfigCache):
229     # Find the test suite for this input and its relative path.
230     ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
231     if ts is None:
232         litConfig.warning('unable to find test suite for %r' % path)
233         return ()
234
235     if litConfig.debug:
236         litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
237                                                         path_in_suite))
238
239     return getTestsInSuite(ts, path_in_suite, litConfig,
240                            testSuiteCache, localConfigCache)
241
242 def getTestsInSuite(ts, path_in_suite, litConfig,
243                     testSuiteCache, localConfigCache):
244     # Check that the source path exists (errors here are reported by the
245     # caller).
246     source_path = ts.getSourcePath(path_in_suite)
247     if not os.path.exists(source_path):
248         return
249
250     # Check if the user named a test directly.
251     if not os.path.isdir(source_path):
252         lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
253         yield Test.Test(ts, path_in_suite, lc)
254         return
255
256     # Otherwise we have a directory to search for tests, start by getting the
257     # local configuration.
258     lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
259
260     # Search for tests.
261     for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
262                                                   litConfig, lc):
263         yield res
264
265     # Search subdirectories.
266     for filename in os.listdir(source_path):
267         # FIXME: This doesn't belong here?
268         if filename in ('Output', '.svn') or filename in lc.excludes:
269             continue
270
271         # Ignore non-directories.
272         file_sourcepath = os.path.join(source_path, filename)
273         if not os.path.isdir(file_sourcepath):
274             continue
275
276         # Check for nested test suites, first in the execpath in case there is a
277         # site configuration and then in the source path.
278         file_execpath = ts.getExecPath(path_in_suite + (filename,))
279         if dirContainsTestSuite(file_execpath):
280             subiter = getTests(file_execpath, litConfig,
281                                testSuiteCache, localConfigCache)
282         elif dirContainsTestSuite(file_sourcepath):
283             subiter = getTests(file_sourcepath, litConfig,
284                                testSuiteCache, localConfigCache)
285         else:
286             # Otherwise, continue loading from inside this test suite.
287             subiter = getTestsInSuite(ts, path_in_suite + (filename,),
288                                       litConfig, testSuiteCache,
289                                       localConfigCache)
290
291         for res in subiter:
292             yield res
293
294 def runTests(numThreads, litConfig, provider, display):
295     # If only using one testing thread, don't use threads at all; this lets us
296     # profile, among other things.
297     if numThreads == 1:
298         t = Tester(litConfig, provider, display)
299         t.run()
300         return
301
302     # Otherwise spin up the testing threads and wait for them to finish.
303     testers = [Tester(litConfig, provider, display)
304                for i in range(numThreads)]
305     for t in testers:
306         t.start()
307     try:
308         for t in testers:
309             t.join()
310     except KeyboardInterrupt:
311         sys.exit(2)
312
313 def main():
314     global options
315     from optparse import OptionParser, OptionGroup
316     parser = OptionParser("usage: %prog [options] {file-or-path}")
317
318     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
319                       help="Number of testing threads",
320                       type=int, action="store", default=None)
321     parser.add_option("", "--config-prefix", dest="configPrefix",
322                       metavar="NAME", help="Prefix for 'lit' config files",
323                       action="store", default=None)
324
325     group = OptionGroup(parser, "Output Format")
326     # FIXME: I find these names very confusing, although I like the
327     # functionality.
328     group.add_option("-q", "--quiet", dest="quiet",
329                      help="Suppress no error output",
330                      action="store_true", default=False)
331     group.add_option("-s", "--succinct", dest="succinct",
332                      help="Reduce amount of output",
333                      action="store_true", default=False)
334     group.add_option("-v", "--verbose", dest="showOutput",
335                      help="Show all test output",
336                      action="store_true", default=False)
337     group.add_option("", "--no-progress-bar", dest="useProgressBar",
338                      help="Do not use curses based progress bar",
339                      action="store_false", default=True)
340     parser.add_option_group(group)
341
342     group = OptionGroup(parser, "Test Execution")
343     group.add_option("", "--path", dest="path",
344                      help="Additional paths to add to testing environment",
345                      action="append", type=str, default=[])
346     group.add_option("", "--vg", dest="useValgrind",
347                      help="Run tests under valgrind",
348                      action="store_true", default=False)
349     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
350                      help="Specify an extra argument for valgrind",
351                      type=str, action="append", default=[])
352     group.add_option("", "--time-tests", dest="timeTests",
353                      help="Track elapsed wall time for each test",
354                      action="store_true", default=False)
355     group.add_option("", "--no-execute", dest="noExecute",
356                      help="Don't execute any tests (assume PASS)",
357                      action="store_true", default=False)
358     parser.add_option_group(group)
359
360     group = OptionGroup(parser, "Test Selection")
361     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
362                      help="Maximum number of tests to run",
363                      action="store", type=int, default=None)
364     group.add_option("", "--max-time", dest="maxTime", metavar="N",
365                      help="Maximum time to spend testing (in seconds)",
366                      action="store", type=float, default=None)
367     group.add_option("", "--shuffle", dest="shuffle",
368                      help="Run tests in random order",
369                      action="store_true", default=False)
370     parser.add_option_group(group)
371
372     group = OptionGroup(parser, "Debug and Experimental Options")
373     group.add_option("", "--debug", dest="debug",
374                       help="Enable debugging (for 'lit' development)",
375                       action="store_true", default=False)
376     group.add_option("", "--show-suites", dest="showSuites",
377                       help="Show discovered test suites",
378                       action="store_true", default=False)
379     group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh",
380                       help="Don't run Tcl scripts using 'sh'",
381                       action="store_false", default=True)
382     parser.add_option_group(group)
383
384     (opts, args) = parser.parse_args()
385
386     if not args:
387         parser.error('No inputs specified')
388
389     if opts.configPrefix is not None:
390         global gConfigName, gSiteConfigName
391         gConfigName = '%s.cfg' % opts.configPrefix
392         gSiteConfigName = '%s.site.cfg' % opts.configPrefix
393
394     if opts.numThreads is None:
395         opts.numThreads = Util.detectCPUs()
396
397     inputs = args
398
399     # Create the global config object.
400     litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
401                                     path = opts.path,
402                                     quiet = opts.quiet,
403                                     useValgrind = opts.useValgrind,
404                                     valgrindArgs = opts.valgrindArgs,
405                                     useTclAsSh = opts.useTclAsSh,
406                                     noExecute = opts.noExecute,
407                                     debug = opts.debug,
408                                     isWindows = (platform.system()=='Windows'))
409
410     # Load the tests from the inputs.
411     tests = []
412     testSuiteCache = {}
413     localConfigCache = {}
414     for input in inputs:
415         prev = len(tests)
416         tests.extend(getTests(input, litConfig,
417                               testSuiteCache, localConfigCache))
418         if prev == len(tests):
419             litConfig.warning('input %r contained no tests' % input)
420
421     # If there were any errors during test discovery, exit now.
422     if litConfig.numErrors:
423         print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
424         sys.exit(2)
425
426     if opts.showSuites:
427         suitesAndTests = dict([(ts,[])
428                                for ts,_ in testSuiteCache.values()
429                                if ts])
430         for t in tests:
431             suitesAndTests[t.suite].append(t)
432
433         print '-- Test Suites --'
434         suitesAndTests = suitesAndTests.items()
435         suitesAndTests.sort(key = lambda (ts,_): ts.name)
436         for ts,tests in suitesAndTests:
437             print '  %s - %d tests' %(ts.name, len(tests))
438             print '    Source Root: %s' % ts.source_root
439             print '    Exec Root  : %s' % ts.exec_root
440
441     # Select and order the tests.
442     numTotalTests = len(tests)
443     if opts.shuffle:
444         random.shuffle(tests)
445     else:
446         tests.sort(key = lambda t: t.getFullName())
447     if opts.maxTests is not None:
448         tests = tests[:opts.maxTests]
449
450     extra = ''
451     if len(tests) != numTotalTests:
452         extra = ' of %d' % numTotalTests
453     header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
454                                                       opts.numThreads)
455
456     progressBar = None
457     if not opts.quiet:
458         if opts.succinct and opts.useProgressBar:
459             try:
460                 tc = ProgressBar.TerminalController()
461                 progressBar = ProgressBar.ProgressBar(tc, header)
462             except ValueError:
463                 print header
464                 progressBar = ProgressBar.SimpleProgressBar('Testing: ')
465         else:
466             print header
467
468     # Don't create more threads than tests.
469     opts.numThreads = min(len(tests), opts.numThreads)
470
471     startTime = time.time()
472     display = TestingProgressDisplay(opts, len(tests), progressBar)
473     provider = TestProvider(tests, opts.maxTime)
474     runTests(opts.numThreads, litConfig, provider, display)
475     display.finish()
476
477     if not opts.quiet:
478         print 'Testing Time: %.2fs'%(time.time() - startTime)
479
480     # Update results for any tests which weren't run.
481     for t in tests:
482         if t.result is None:
483             t.setResult(Test.UNRESOLVED, '', 0.0)
484
485     # List test results organized by kind.
486     hasFailures = False
487     byCode = {}
488     for t in tests:
489         if t.result not in byCode:
490             byCode[t.result] = []
491         byCode[t.result].append(t)
492         if t.result.isFailure:
493             hasFailures = True
494
495     # FIXME: Show unresolved and (optionally) unsupported tests.
496     for title,code in (('Unexpected Passing Tests', Test.XPASS),
497                        ('Failing Tests', Test.FAIL)):
498         elts = byCode.get(code)
499         if not elts:
500             continue
501         print '*'*20
502         print '%s (%d):' % (title, len(elts))
503         for t in elts:
504             print '    %s' % t.getFullName()
505         print
506
507     if opts.timeTests:
508         byTime = list(tests)
509         byTime.sort(key = lambda t: t.elapsed)
510         if byTime:
511             Util.printHistogram([(t.getFullName(), t.elapsed) for t in byTime],
512                                 title='Tests')
513
514     for name,code in (('Expected Passes    ', Test.PASS),
515                       ('Expected Failures  ', Test.XFAIL),
516                       ('Unsupported Tests  ', Test.UNSUPPORTED),
517                       ('Unresolved Tests   ', Test.UNRESOLVED),
518                       ('Unexpected Passes  ', Test.XPASS),
519                       ('Unexpected Failures', Test.FAIL),):
520         if opts.quiet and not code.isFailure:
521             continue
522         N = len(byCode.get(code,[]))
523         if N:
524             print '  %s: %d' % (name,N)
525
526     # If we encountered any additional errors, exit abnormally.
527     if litConfig.numErrors:
528         print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
529         sys.exit(2)
530
531     # Warn about warnings.
532     if litConfig.numWarnings:
533         print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
534
535     if hasFailures:
536         sys.exit(1)
537     sys.exit(0)
538
539 if __name__=='__main__':
540     # Bump the GIL check interval, its more important to get any one thread to a
541     # blocking operation (hopefully exec) than to try and unblock other threads.
542     import sys
543     sys.setcheckinterval(1000)
544     main()