Python源码示例:testtools.TestResult()
示例1
def _run_prepared_result(self, result):
"""
Run the test with a result that conforms to testtools' extended
``TestResult`` interface.
This overrides a method in base ``RunTest`` which is intended to be
overwritten.
"""
flaky = _get_flaky_annotation(self._case)
if flaky is not None:
return self._run_flaky_test(self._case, result, flaky)
# No flaky attributes? Then run as normal.
return self._run_test(self._case, result)
示例2
def _run_test(self, case, result):
"""
Run ``case`` with the ``RunTest`` we are wrapping.
:param testtools.TestCase case: The test to run.
:param testtools.TestResult result: The test result to report to.
Must conform to testtools extended test result interface.
:return: The modified ``result``.
"""
run_test = self._run_test_factory(case, *self._args, **self._kwargs)
return run_test._run_prepared_result(result)
示例3
def _attempt_test(self, case):
"""
Run 'case' with a temporary result.
:param testtools.TestCase case: The test to run.
:return: a tuple of ``(successful, result, details)``, where
``successful`` is a boolean indicating whether the test was
succcessful, ``result`` is a _ResultType indicating what the test
result was and ``details`` is a dictionary of testtools details.
"""
tmp_result = testtools.TestResult()
# XXX: Still using internal API of testtools despite improvements in
# #165. Will need to do follow-up work on testtools to ensure that
# RunTest.run(case); RunTest.run(case) is supported.
case._reset()
self._run_test(case, tmp_result)
result_type = _get_result_type(tmp_result)
details = pmap(case.getDetails())
if result_type == _ResultType.skip:
# XXX: Work around a testtools bug where it reports stack traces
# for skips that aren't passed through its supported
# SkipException: https://bugs.launchpad.net/testtools/+bug/1518100
[reason] = list(tmp_result.skip_reasons.keys())
details = details.discard('traceback').set(
'reason', text_content(reason))
return (tmp_result.wasSuccessful(), result_type, details)
示例4
def _get_result_type(result):
"""
Get the _ResultType for ``result``.
:param testtools.TestResult result: A TestResult that has had exactly
one test run on it.
:raise ValueError: If ``result`` has run more than one test, or has more
than one kind of result.
:return: A _ResultType for that result.
"""
if result.testsRun != 1:
raise ValueError('%r has run %d tests, 1 expected' % (
result, result.testsRun))
total = sum(map(len, [
result.errors, result.failures, result.unexpectedSuccesses,
result.expectedFailures, result.skip_reasons]))
if total > 1:
raise ValueError(
'%r has more than one kind of result: %r found' % (result, total))
if len(result.errors) > 0:
return _ResultType.error
elif len(result.failures) > 0:
return _ResultType.failure
elif len(result.unexpectedSuccesses) > 0:
return _ResultType.unexpected_success
elif len(result.expectedFailures) > 0:
return _ResultType.expected_failure
elif len(result.skip_reasons) > 0:
return _ResultType.skip
else:
return _ResultType.success
示例5
def test_attaches_twisted_log(self, base_test_case):
"""
Flocker base test cases attach the Twisted log as a detail.
"""
# XXX: If debugging is enabled (either by setting this to True or by
# removing this line and running --debug-stacktraces, then the log
# fixtures in this test are empty. However, if we run a failing test
# manually, the logs appear in the details. Not sure what's going on,
# so disabling debugging for now.
self.useFixture(DebugTwisted(False))
class SomeTest(base_test_case):
def test_something(self):
from twisted.python import log
log.msg('foo')
test = SomeTest('test_something')
result = TestResult()
test.run(result)
self.expectThat(result, has_results(tests_run=Equals(1)))
self.assertThat(
test.getDetails(),
ContainsDict({
'twisted-log': match_text_content(MatchesRegex(
r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}[+-]\d{4} \[-\] foo'
)),
}))
示例6
def test_separate_eliot_log(self, base_test_case):
"""
Flocker base test cases attach the eliot log as a detail separate from
the Twisted log.
"""
# XXX: If debugging is enabled (either by setting this to True or by
# removing this line and running --debug-stacktraces, then the log
# fixtures in this test are empty. However, if we run a failing test
# manually, the logs appear in the details. Not sure what's going on,
# so disabling debugging for now.
self.useFixture(DebugTwisted(False))
message_type = MessageType(u'foo', fields(name=str), u'test message')
class SomeTest(base_test_case):
def test_something(self):
from twisted.python import log
log.msg('foo')
message_type(name='qux').write()
test = SomeTest('test_something')
result = TestResult()
test.run(result)
self.expectThat(result, has_results(tests_run=Equals(1)))
self.assertThat(
test.getDetails(),
MatchesDict({
'twisted-log': match_text_content(MatchesRegex(
r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}[+-]\d{4} \[-\] foo'
)),
_SplitEliotLogs._ELIOT_LOG_DETAIL_NAME: match_text_content(
Contains(" message_type: 'foo'\n"
" name: 'qux'\n")
),
}))
示例7
def test_logs_after_timeout(self):
"""
We include logs for tests, even if they time out.
"""
message_type = MessageType(u'foo', fields(name=str), u'test message')
class SomeTest(AsyncTestCase):
# Set the timeout super low, because we're not doing anything.
run_tests_with = async_runner(timeout=timedelta(seconds=0.00005))
def test_something(self):
from twisted.python import log
log.msg('foo')
message_type(name='qux').write()
# Return a Deferred that never fires to guarantee a timeout.
return Deferred()
test = SomeTest('test_something')
result = TestResult()
test.run(result)
self.assertThat(
result,
has_results(
tests_run=Equals(1),
errors=MatchesListwise([MatchesListwise([
Equals(test),
MatchesAll(
Contains('[-] foo\n'),
Contains("message_type: 'foo'"),
),
])]),
)
)
示例8
def _run_flaky_test(self, case, result, flaky):
"""
Run a test that has been decorated with the `@flaky` decorator.
:param TestCase case: A ``testtools.TestCase`` to run.
:param TestResult result: A ``TestResult`` object that conforms to the
testtools extended result interface.
:param _FlakyAnnotation flaky: A description of the conditions of
flakiness.
:return: A ``TestResult`` with the result of running the flaky test.
"""
result.startTest(case)
successes = 0
results = []
# Optimization to stop running early if there's no way that we can
# reach the minimum number of successes.
max_fails = flaky.max_runs - flaky.min_passes
while (successes < flaky.min_passes and
len(results) - successes <= max_fails):
was_successful, result_type, details = self._attempt_test(case)
if was_successful:
successes += 1
results.append((result_type, details))
successful = successes >= flaky.min_passes
flaky_data = flaky.to_dict()
flaky_data.update({'runs': len(results), 'passes': successes})
flaky_details = {
'flaky': text_content(pformat(flaky_data)),
}
combined_details = _combine_details(
[flaky_details] + list(r[1] for r in results))
if successful:
skip_reported = False
for result_type, details in results:
if result_type == _ResultType.skip:
result.addSkip(case, details=details)
skip_reported = True
if not skip_reported:
Message.new(
message_type=u"flocker:test:flaky",
id=case.id(),
successes=successes,
passes=len(results),
min_passes=flaky.min_passes,
max_runs=flaky.max_runs,
).write()
result.addSuccess(case, details=combined_details)
else:
# XXX: How are we going to report on tests that sometimes fail,
# sometimes error, sometimes skip? Currently we just error.
result.addError(case, details=combined_details)
result.stopTest(case)
return result