mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-19 04:57:54 +00:00
add --coverage for integration tests
This commit is contained in:
parent
3f9f4537b9
commit
8e4b05214a
@ -8,3 +8,5 @@ source =
|
|||||||
omit =
|
omit =
|
||||||
*/allmydata/test/*
|
*/allmydata/test/*
|
||||||
*/allmydata/_version.py
|
*/allmydata/_version.py
|
||||||
|
parallel = True
|
||||||
|
branch = True
|
||||||
|
@ -41,6 +41,10 @@ def pytest_addoption(parser):
|
|||||||
"--keep-tempdir", action="store_true", dest="keep",
|
"--keep-tempdir", action="store_true", dest="keep",
|
||||||
help="Keep the tmpdir with the client directories (introducer, etc)",
|
help="Keep the tmpdir with the client directories (introducer, etc)",
|
||||||
)
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--coverage", action="store_true", dest="coverage",
|
||||||
|
help="Collect coverage statistics",
|
||||||
|
)
|
||||||
|
|
||||||
@pytest.fixture(autouse=True, scope='session')
|
@pytest.fixture(autouse=True, scope='session')
|
||||||
def eliot_logging():
|
def eliot_logging():
|
||||||
@ -154,6 +158,24 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
|||||||
return furl
|
return furl
|
||||||
|
|
||||||
|
|
||||||
|
def _tahoe_runner_optional_coverage(proto, reactor, request, other_args):
|
||||||
|
"""
|
||||||
|
Internal helper. Calls spawnProcess with `-m
|
||||||
|
allmydata.scripts.runner` and `other_args`, optionally inserting a
|
||||||
|
`--coverage` option if the `request` indicates we should.
|
||||||
|
"""
|
||||||
|
if request.config.getoption('coverage'):
|
||||||
|
args = [sys.executable, '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner', '--coverage']
|
||||||
|
else:
|
||||||
|
args = [sys.executable, '-m', 'allmydata.scripts.runner']
|
||||||
|
args += other_args
|
||||||
|
return reactor.spawnProcess(
|
||||||
|
proto,
|
||||||
|
sys.executable,
|
||||||
|
args,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@log_call(
|
@log_call(
|
||||||
action_type=u"integration:introducer",
|
action_type=u"integration:introducer",
|
||||||
@ -174,11 +196,11 @@ log_gatherer.furl = {log_furl}
|
|||||||
if not exists(intro_dir):
|
if not exists(intro_dir):
|
||||||
mkdir(intro_dir)
|
mkdir(intro_dir)
|
||||||
done_proto = _ProcessExitedProtocol()
|
done_proto = _ProcessExitedProtocol()
|
||||||
reactor.spawnProcess(
|
_tahoe_runner_optional_coverage(
|
||||||
done_proto,
|
done_proto,
|
||||||
sys.executable,
|
reactor,
|
||||||
|
request,
|
||||||
(
|
(
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'create-introducer',
|
'create-introducer',
|
||||||
'--listen=tcp',
|
'--listen=tcp',
|
||||||
'--hostname=localhost',
|
'--hostname=localhost',
|
||||||
@ -195,11 +217,11 @@ log_gatherer.furl = {log_furl}
|
|||||||
# but on linux it means daemonize. "tahoe run" is consistent
|
# but on linux it means daemonize. "tahoe run" is consistent
|
||||||
# between platforms.
|
# between platforms.
|
||||||
protocol = _MagicTextProtocol('introducer running')
|
protocol = _MagicTextProtocol('introducer running')
|
||||||
process = reactor.spawnProcess(
|
process = _tahoe_runner_optional_coverage(
|
||||||
protocol,
|
protocol,
|
||||||
sys.executable,
|
reactor,
|
||||||
|
request,
|
||||||
(
|
(
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'run',
|
'run',
|
||||||
intro_dir,
|
intro_dir,
|
||||||
),
|
),
|
||||||
@ -241,11 +263,11 @@ log_gatherer.furl = {log_furl}
|
|||||||
if not exists(intro_dir):
|
if not exists(intro_dir):
|
||||||
mkdir(intro_dir)
|
mkdir(intro_dir)
|
||||||
done_proto = _ProcessExitedProtocol()
|
done_proto = _ProcessExitedProtocol()
|
||||||
reactor.spawnProcess(
|
_tahoe_runner_optional_coverage(
|
||||||
done_proto,
|
done_proto,
|
||||||
sys.executable,
|
reactor,
|
||||||
|
request,
|
||||||
(
|
(
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'create-introducer',
|
'create-introducer',
|
||||||
'--tor-control-port', 'tcp:localhost:8010',
|
'--tor-control-port', 'tcp:localhost:8010',
|
||||||
'--listen=tor',
|
'--listen=tor',
|
||||||
@ -262,11 +284,11 @@ log_gatherer.furl = {log_furl}
|
|||||||
# but on linux it means daemonize. "tahoe run" is consistent
|
# but on linux it means daemonize. "tahoe run" is consistent
|
||||||
# between platforms.
|
# between platforms.
|
||||||
protocol = _MagicTextProtocol('introducer running')
|
protocol = _MagicTextProtocol('introducer running')
|
||||||
process = reactor.spawnProcess(
|
process = _tahoe_runner_optional_coverage(
|
||||||
protocol,
|
protocol,
|
||||||
sys.executable,
|
reactor,
|
||||||
|
request,
|
||||||
(
|
(
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'run',
|
'run',
|
||||||
intro_dir,
|
intro_dir,
|
||||||
),
|
),
|
||||||
@ -365,11 +387,11 @@ def alice_invite(reactor, alice, temp_dir, request):
|
|||||||
# consistently fail if we don't hack in this pause...)
|
# consistently fail if we don't hack in this pause...)
|
||||||
import time ; time.sleep(5)
|
import time ; time.sleep(5)
|
||||||
proto = _CollectOutputProtocol()
|
proto = _CollectOutputProtocol()
|
||||||
reactor.spawnProcess(
|
_tahoe_runner_optional_coverage(
|
||||||
proto,
|
proto,
|
||||||
sys.executable,
|
reactor,
|
||||||
|
request,
|
||||||
[
|
[
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'magic-folder', 'create',
|
'magic-folder', 'create',
|
||||||
'--poll-interval', '2',
|
'--poll-interval', '2',
|
||||||
'--basedir', node_dir, 'magik:', 'alice',
|
'--basedir', node_dir, 'magik:', 'alice',
|
||||||
@ -380,11 +402,11 @@ def alice_invite(reactor, alice, temp_dir, request):
|
|||||||
|
|
||||||
with start_action(action_type=u"integration:alice:magic_folder:invite") as a:
|
with start_action(action_type=u"integration:alice:magic_folder:invite") as a:
|
||||||
proto = _CollectOutputProtocol()
|
proto = _CollectOutputProtocol()
|
||||||
reactor.spawnProcess(
|
_tahoe_runner_optional_coverage(
|
||||||
proto,
|
proto,
|
||||||
sys.executable,
|
reactor,
|
||||||
|
request,
|
||||||
[
|
[
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'magic-folder', 'invite',
|
'magic-folder', 'invite',
|
||||||
'--basedir', node_dir, 'magik:', 'bob',
|
'--basedir', node_dir, 'magik:', 'bob',
|
||||||
]
|
]
|
||||||
@ -416,13 +438,13 @@ def magic_folder(reactor, alice_invite, alice, bob, temp_dir, request):
|
|||||||
print("pairing magic-folder")
|
print("pairing magic-folder")
|
||||||
bob_dir = join(temp_dir, 'bob')
|
bob_dir = join(temp_dir, 'bob')
|
||||||
proto = _CollectOutputProtocol()
|
proto = _CollectOutputProtocol()
|
||||||
reactor.spawnProcess(
|
_tahoe_runner_optional_coverage(
|
||||||
proto,
|
proto,
|
||||||
sys.executable,
|
reactor,
|
||||||
|
request,
|
||||||
[
|
[
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'magic-folder', 'join',
|
'magic-folder', 'join',
|
||||||
'--poll-interval', '2',
|
'--poll-interval', '1',
|
||||||
'--basedir', bob_dir,
|
'--basedir', bob_dir,
|
||||||
alice_invite,
|
alice_invite,
|
||||||
join(temp_dir, 'magic-bob'),
|
join(temp_dir, 'magic-bob'),
|
||||||
|
@ -408,7 +408,7 @@ def test_alice_adds_files_while_bob_is_offline(reactor, request, temp_dir, magic
|
|||||||
bob_node_dir = join(temp_dir, "bob")
|
bob_node_dir = join(temp_dir, "bob")
|
||||||
|
|
||||||
# Take Bob offline.
|
# Take Bob offline.
|
||||||
yield util.cli(reactor, bob_node_dir, "stop")
|
yield util.cli(request, reactor, bob_node_dir, "stop")
|
||||||
|
|
||||||
# Create a couple files in Alice's local directory.
|
# Create a couple files in Alice's local directory.
|
||||||
some_files = list(
|
some_files = list(
|
||||||
@ -422,7 +422,7 @@ def test_alice_adds_files_while_bob_is_offline(reactor, request, temp_dir, magic
|
|||||||
|
|
||||||
good = False
|
good = False
|
||||||
for i in range(15):
|
for i in range(15):
|
||||||
status = yield util.magic_folder_cli(reactor, alice_node_dir, "status")
|
status = yield util.magic_folder_cli(request, reactor, alice_node_dir, "status")
|
||||||
good = status.count(".added-while-offline (36 B): good, version=0") == len(some_files) * 2
|
good = status.count(".added-while-offline (36 B): good, version=0") == len(some_files) * 2
|
||||||
if good:
|
if good:
|
||||||
# We saw each file as having a local good state and a remote good
|
# We saw each file as having a local good state and a remote good
|
||||||
|
@ -117,8 +117,8 @@ def _cleanup_twistd_process(twistd_process, exited):
|
|||||||
:return: After the process has exited.
|
:return: After the process has exited.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
print("signaling {} with KILL".format(twistd_process.pid))
|
print("signaling {} with TERM".format(twistd_process.pid))
|
||||||
twistd_process.signalProcess('KILL')
|
twistd_process.signalProcess('TERM')
|
||||||
print("signaled, blocking on exit")
|
print("signaled, blocking on exit")
|
||||||
pytest_twisted.blockon(exited)
|
pytest_twisted.blockon(exited)
|
||||||
print("exited, goodbye")
|
print("exited, goodbye")
|
||||||
@ -134,15 +134,18 @@ def _run_node(reactor, node_dir, request, magic_text):
|
|||||||
# on windows, "tahoe start" means: run forever in the foreground,
|
# on windows, "tahoe start" means: run forever in the foreground,
|
||||||
# but on linux it means daemonize. "tahoe run" is consistent
|
# but on linux it means daemonize. "tahoe run" is consistent
|
||||||
# between platforms.
|
# between platforms.
|
||||||
|
if request.config.getoption('coverage'):
|
||||||
|
args = [sys.executable, '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner', '--coverage']
|
||||||
|
else:
|
||||||
|
args = [sys.executable, '-m', 'allmydata.scripts.runner']
|
||||||
process = reactor.spawnProcess(
|
process = reactor.spawnProcess(
|
||||||
protocol,
|
protocol,
|
||||||
sys.executable,
|
sys.executable,
|
||||||
(
|
args + [
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'--eliot-destination', 'file:{}/logs/eliot.json'.format(node_dir),
|
'--eliot-destination', 'file:{}/logs/eliot.json'.format(node_dir),
|
||||||
'run',
|
'run',
|
||||||
node_dir,
|
node_dir,
|
||||||
),
|
],
|
||||||
)
|
)
|
||||||
process.exited = protocol.exited
|
process.exited = protocol.exited
|
||||||
|
|
||||||
@ -178,8 +181,11 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
|||||||
print("creating", node_dir)
|
print("creating", node_dir)
|
||||||
mkdir(node_dir)
|
mkdir(node_dir)
|
||||||
done_proto = _ProcessExitedProtocol()
|
done_proto = _ProcessExitedProtocol()
|
||||||
args = [
|
if request.config.getoption('coverage'):
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
args = [sys.executable, '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner', '--coverage']
|
||||||
|
else:
|
||||||
|
args = [sys.executable, '-m', 'allmydata.scripts.runner']
|
||||||
|
args = args + [
|
||||||
'create-node',
|
'create-node',
|
||||||
'--nickname', name,
|
'--nickname', name,
|
||||||
'--introducer', introducer_furl,
|
'--introducer', introducer_furl,
|
||||||
@ -331,17 +337,24 @@ def await_file_vanishes(path, timeout=10):
|
|||||||
raise FileShouldVanishException(path, timeout)
|
raise FileShouldVanishException(path, timeout)
|
||||||
|
|
||||||
|
|
||||||
def cli(reactor, node_dir, *argv):
|
def cli(request, reactor, node_dir, *argv):
|
||||||
|
"""
|
||||||
|
Run a tahoe CLI subcommand for a given node, optionally running
|
||||||
|
under coverage if '--coverage' was supplied.
|
||||||
|
"""
|
||||||
proto = _CollectOutputProtocol()
|
proto = _CollectOutputProtocol()
|
||||||
|
if request.config.getoption('coverage'):
|
||||||
|
args = [sys.executable, '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner']
|
||||||
|
else:
|
||||||
|
args = [sys.executable, '-m', 'allmydata.scripts.runner']
|
||||||
reactor.spawnProcess(
|
reactor.spawnProcess(
|
||||||
proto,
|
proto,
|
||||||
sys.executable,
|
sys.executable,
|
||||||
[
|
args + [
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'--node-directory', node_dir,
|
'--node-directory', node_dir,
|
||||||
] + list(argv),
|
] + list(argv),
|
||||||
)
|
)
|
||||||
return proto.done
|
return proto.done
|
||||||
|
|
||||||
def magic_folder_cli(reactor, node_dir, *argv):
|
def magic_folder_cli(request, reactor, node_dir, *argv):
|
||||||
return cli(reactor, node_dir, "magic-folder", *argv)
|
return cli(request, reactor, node_dir, "magic-folder", *argv)
|
||||||
|
@ -194,7 +194,45 @@ def run():
|
|||||||
# doesn't return: calls sys.exit(rc)
|
# doesn't return: calls sys.exit(rc)
|
||||||
task.react(_run_with_reactor)
|
task.react(_run_with_reactor)
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_coverage(reactor):
|
||||||
|
"""
|
||||||
|
Arrange for coverage to be collected if the 'coverage' package is
|
||||||
|
installed
|
||||||
|
"""
|
||||||
|
# can we put this _setup_coverage call after we hit
|
||||||
|
# argument-parsing?
|
||||||
|
if not '--coverage' in sys.argv:
|
||||||
|
return
|
||||||
|
sys.argv.remove('--coverage')
|
||||||
|
|
||||||
|
try:
|
||||||
|
import coverage
|
||||||
|
except ImportError:
|
||||||
|
return
|
||||||
|
|
||||||
|
os.environ["COVERAGE_PROCESS_START"] = '.coveragerc'
|
||||||
|
# maybe-start the global coverage, unless it already got started
|
||||||
|
cov = coverage.process_startup()
|
||||||
|
if cov is None:
|
||||||
|
cov = coverage.process_startup.coverage
|
||||||
|
|
||||||
|
def write_coverage_data(*args, **kw):
|
||||||
|
"""
|
||||||
|
Make sure that coverage has stopped; internally, it depends on
|
||||||
|
ataxit handlers running which doesn't always happen (Twisted's
|
||||||
|
shutdown hook also won't run if os._exit() is called, but it
|
||||||
|
runs more-often than atexit handlers).
|
||||||
|
"""
|
||||||
|
cov.stop()
|
||||||
|
cov.save()
|
||||||
|
reactor.addSystemEventTrigger('after', 'shutdown', write_coverage_data)
|
||||||
|
|
||||||
|
|
||||||
def _run_with_reactor(reactor):
|
def _run_with_reactor(reactor):
|
||||||
|
|
||||||
|
_setup_coverage(reactor)
|
||||||
|
|
||||||
d = defer.maybeDeferred(parse_or_exit_with_explanation, sys.argv[1:])
|
d = defer.maybeDeferred(parse_or_exit_with_explanation, sys.argv[1:])
|
||||||
d.addCallback(_maybe_enable_eliot_logging, reactor)
|
d.addCallback(_maybe_enable_eliot_logging, reactor)
|
||||||
d.addCallback(dispatch)
|
d.addCallback(dispatch)
|
||||||
|
@ -2,7 +2,7 @@ import os.path
|
|||||||
from six.moves import cStringIO as StringIO
|
from six.moves import cStringIO as StringIO
|
||||||
import urllib, sys
|
import urllib, sys
|
||||||
import re
|
import re
|
||||||
from mock import patch
|
from mock import patch, Mock
|
||||||
|
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
from twisted.python.monkey import MonkeyPatcher
|
from twisted.python.monkey import MonkeyPatcher
|
||||||
@ -525,7 +525,8 @@ class CLI(CLITestMixin, unittest.TestCase):
|
|||||||
self.failUnlessEqual(exitcode, 1)
|
self.failUnlessEqual(exitcode, 1)
|
||||||
|
|
||||||
def fake_react(f):
|
def fake_react(f):
|
||||||
d = f("reactor")
|
reactor = Mock()
|
||||||
|
d = f(reactor)
|
||||||
# normally this Deferred would be errbacked with SystemExit, but
|
# normally this Deferred would be errbacked with SystemExit, but
|
||||||
# since we mocked out sys.exit, it will be fired with None. So
|
# since we mocked out sys.exit, it will be fired with None. So
|
||||||
# it's safe to drop it on the floor.
|
# it's safe to drop it on the floor.
|
||||||
|
6
tox.ini
6
tox.ini
@ -49,9 +49,13 @@ commands =
|
|||||||
trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:allmydata}
|
trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:allmydata}
|
||||||
|
|
||||||
[testenv:integration]
|
[testenv:integration]
|
||||||
|
setenv =
|
||||||
|
COVERAGE_PROCESS_START=.coveragerc
|
||||||
commands =
|
commands =
|
||||||
# NOTE: 'run with "py.test --keep-tempdir -s -v integration/" to debug failures'
|
# NOTE: 'run with "py.test --keep-tempdir -s -v integration/" to debug failures'
|
||||||
py.test -v integration/
|
py.test --coverage -v integration/
|
||||||
|
coverage combine
|
||||||
|
coverage report
|
||||||
|
|
||||||
[testenv:coverage]
|
[testenv:coverage]
|
||||||
# coverage (with --branch) takes about 65% longer to run
|
# coverage (with --branch) takes about 65% longer to run
|
||||||
|
Loading…
Reference in New Issue
Block a user