def update(self, test):
self.completed += 1
+
+ if self.opts.incremental:
+ update_incremental_cache(test)
+
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
- if not test.result.code.isFailure and \
- (self.opts.quiet or self.opts.succinct):
+ shouldShow = test.result.code.isFailure or \
+ self.opts.showAllOutput or \
+ (not self.opts.quiet and not self.opts.succinct)
+ if not shouldShow:
return
if self.progressBar:
self.completed, self.numTests))
# Show the test failure output, if requested.
- if test.result.code.isFailure and self.opts.showOutput:
- print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
- '*'*20))
+ if (test.result.code.isFailure and self.opts.showOutput) or \
+ self.opts.showAllOutput:
+ if test.result.code.isFailure:
+ print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
+ '*'*20))
print(test.result.output)
print("*" * 20)
finally:
f.close()
+def update_incremental_cache(test):
+ if not test.result.code.isFailure:
+ return
+ fname = test.getFilePath()
+ os.utime(fname, None)
+
+def sort_by_incremental_cache(run):
+ def sortIndex(test):
+ fname = test.getFilePath()
+ try:
+ return -os.path.getmtime(fname)
+ except:
+ return 0
+ run.tests.sort(key = lambda t: sortIndex(t))
+
def main(builtinParameters = {}):
- # Bump the GIL check interval, its more important to get any one thread to a
- # blocking operation (hopefully exec) than to try and unblock other threads.
- #
- # FIXME: This is a hack.
- sys.setcheckinterval(1000)
+ # Use processes by default on Unix platforms.
+ isWindows = platform.system() == 'Windows'
+ useProcessesIsDefault = not isWindows
global options
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {file-or-path}")
+ parser.add_option("", "--version", dest="show_version",
+ help="Show version and exit",
+ action="store_true", default=False)
parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
help="Number of testing threads",
type=int, action="store", default=None)
parser.add_option("", "--config-prefix", dest="configPrefix",
metavar="NAME", help="Prefix for 'lit' config files",
action="store", default=None)
- parser.add_option("", "--param", dest="userParameters",
+ parser.add_option("-D", "--param", dest="userParameters",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
type=str, action="append", default=[])
help="Reduce amount of output",
action="store_true", default=False)
group.add_option("-v", "--verbose", dest="showOutput",
- help="Show all test output",
+ help="Show test output for failures",
+ action="store_true", default=False)
+ group.add_option("-a", "--show-all", dest="showAllOutput",
+ help="Display all commandlines and output",
action="store_true", default=False)
group.add_option("-o", "--output", dest="output_path",
help="Write test results to the provided path",
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
+ group.add_option("", "--show-unsupported", dest="show_unsupported",
+ help="Show unsupported tests",
+ action="store_true", default=False)
+ group.add_option("", "--show-xfail", dest="show_xfail",
+ help="Show tests that were expected to fail",
+ action="store_true", default=False)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Execution")
group.add_option("", "--no-execute", dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true", default=False)
+ group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
+ help=("Write XUnit-compatible XML test reports to the"
+ " specified file"), default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Selection")
group.add_option("", "--shuffle", dest="shuffle",
help="Run tests in random order",
action="store_true", default=False)
+ group.add_option("-i", "--incremental", dest="incremental",
+ help="Run modified and failing tests first (updates "
+ "mtimes)",
+ action="store_true", default=False)
group.add_option("", "--filter", dest="filter", metavar="REGEX",
help=("Only run tests with paths matching the given "
"regular expression"),
action="store_true", default=False)
group.add_option("", "--use-processes", dest="useProcesses",
help="Run tests in parallel with processes (not threads)",
- action="store_true", default=False)
+ action="store_true", default=useProcessesIsDefault)
group.add_option("", "--use-threads", dest="useProcesses",
help="Run tests in parallel with threads (not processes)",
- action="store_false", default=False)
+ action="store_false", default=useProcessesIsDefault)
parser.add_option_group(group)
(opts, args) = parser.parse_args()
+ if opts.show_version:
+ print("lit %s" % (lit.__version__,))
+ return
+
if not args:
parser.error('No inputs specified')
valgrindArgs = opts.valgrindArgs,
noExecute = opts.noExecute,
debug = opts.debug,
- isWindows = (platform.system()=='Windows'),
+ isWindows = isWindows,
params = userParams,
config_prefix = opts.configPrefix)
if opts.showSuites or opts.showTests:
# Aggregate the tests by suite.
suitesAndTests = {}
- for t in run.tests:
- if t.suite not in suitesAndTests:
- suitesAndTests[t.suite] = []
- suitesAndTests[t.suite].append(t)
+ for result_test in run.tests:
+ if result_test.suite not in suitesAndTests:
+ suitesAndTests[result_test.suite] = []
+ suitesAndTests[result_test.suite].append(result_test)
suitesAndTests = list(suitesAndTests.items())
suitesAndTests.sort(key = lambda item: item[0].name)
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
- run.tests = [t for t in run.tests
- if rex.search(t.getFullName())]
+ run.tests = [result_test for result_test in run.tests
+ if rex.search(result_test.getFullName())]
# Then select the order.
if opts.shuffle:
random.shuffle(run.tests)
+ elif opts.incremental:
+ sort_by_incremental_cache(run)
else:
- run.tests.sort(key = lambda t: t.getFullName())
+ run.tests.sort(key = lambda result_test: result_test.getFullName())
# Finally limit the number of tests, if desired.
if opts.maxTests is not None:
# Don't create more threads than tests.
opts.numThreads = min(len(run.tests), opts.numThreads)
+ # Because some tests use threads internally, and at least on Linux each
+ # of these threads counts toward the current process limit, try to
+ # raise the (soft) process limit so that tests don't fail due to
+ # resource exhaustion.
+ try:
+ cpus = lit.util.detectCPUs()
+ desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
+
+ # Import the resource module here inside this try block because it
+ # will likely fail on Windows.
+ import resource
+
+ max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
+ desired_limit = min(desired_limit, max_procs_hard)
+
+ if max_procs_soft < desired_limit:
+ resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
+ litConfig.note('raised the process limit from %d to %d' % \
+ (max_procs_soft, desired_limit))
+ except:
+ pass
+
extra = ''
if len(run.tests) != numTotalTests:
extra = ' of %d' % numTotalTests
# Print each test in any of the failing groups.
for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
('Failing Tests', lit.Test.FAIL),
- ('Unresolved Tests', lit.Test.UNRESOLVED)):
+ ('Unresolved Tests', lit.Test.UNRESOLVED),
+ ('Unsupported Tests', lit.Test.UNSUPPORTED),
+ ('Expected Failing Tests', lit.Test.XFAIL)):
+ if (lit.Test.XFAIL == code and not opts.show_xfail) or \
+ (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
+ continue
elts = byCode.get(code)
if not elts:
continue
lit.util.printHistogram(test_times, title='Tests')
for name,code in (('Expected Passes ', lit.Test.PASS),
+ ('Passes With Retry ', lit.Test.FLAKYPASS),
('Expected Failures ', lit.Test.XFAIL),
('Unsupported Tests ', lit.Test.UNSUPPORTED),
('Unresolved Tests ', lit.Test.UNRESOLVED),
('Unexpected Passes ', lit.Test.XPASS),
- ('Unexpected Failures', lit.Test.FAIL),):
+ ('Unexpected Failures', lit.Test.FAIL)):
if opts.quiet and not code.isFailure:
continue
N = len(byCode.get(code,[]))
if N:
print(' %s: %d' % (name,N))
+ if opts.xunit_output_file:
+ # Collect the tests, indexed by test suite
+ by_suite = {}
+ for result_test in run.tests:
+ suite = result_test.suite.config.name
+ if suite not in by_suite:
+ by_suite[suite] = {
+ 'passes' : 0,
+ 'failures' : 0,
+ 'tests' : [] }
+ by_suite[suite]['tests'].append(result_test)
+ if result_test.result.code.isFailure:
+ by_suite[suite]['failures'] += 1
+ else:
+ by_suite[suite]['passes'] += 1
+ xunit_output_file = open(opts.xunit_output_file, "w")
+ xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
+ xunit_output_file.write("<testsuites>\n")
+ for suite_name, suite in by_suite.items():
+ safe_suite_name = suite_name.replace(".", "-")
+ xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
+ xunit_output_file.write(" tests='" + str(suite['passes'] +
+ suite['failures']) + "'")
+ xunit_output_file.write(" failures='" + str(suite['failures']) +
+ "'>\n")
+ for result_test in suite['tests']:
+ xunit_output_file.write(result_test.getJUnitXML() + "\n")
+ xunit_output_file.write("</testsuite>\n")
+ xunit_output_file.write("</testsuites>")
+ xunit_output_file.close()
+
# If we encountered any additional errors, exit abnormally.
if litConfig.numErrors:
sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)