Merge pull request #611 from tahoe-lafs/3025.fix-test_runner-hangs

Replace "tahoe start" with "tahoe run" in some of test_runner to fix CI hangs

Fixes: ticket:3025
This commit is contained in:
Jean-Paul Calderone 2019-05-14 04:13:04 -04:00 committed by GitHub
commit 31936111c5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 508 additions and 312 deletions

View File

@ -14,7 +14,7 @@ environment:
install:
- |
%PYTHON%\python.exe -m pip install -U pip
%PYTHON%\python.exe -m pip install wheel tox virtualenv
%PYTHON%\python.exe -m pip install wheel tox==3.9.0 virtualenv
# note:
# %PYTHON% has: python.exe

0
newsfragments/3025.minor Normal file
View File

View File

@ -32,12 +32,12 @@ def stop(config):
print("%s does not look like a running node directory (no twistd.pid)" % quoted_basedir, file=err)
# we define rc=2 to mean "nothing is running, but it wasn't me who
# stopped it"
return 2
return COULD_NOT_STOP
elif pid == -1:
print("%s contains an invalid PID file" % basedir, file=err)
# we define rc=2 to mean "nothing is running, but it wasn't me who
# stopped it"
return 2
return COULD_NOT_STOP
# kill it hard (SIGKILL), delete the twistd.pid file, then wait for the
# process itself to go away. If it hasn't gone away after 20 seconds, warn

View File

@ -0,0 +1,235 @@
__all__ = [
"CLINodeAPI",
"Expect",
"on_stdout",
"on_stdout_and_stderr",
"on_different",
"wait_for_exit",
]
import os
import sys
from errno import ENOENT
import attr
from twisted.internet.error import (
ProcessDone,
ProcessTerminated,
ProcessExitedAlready,
)
from twisted.internet.interfaces import (
IProcessProtocol,
)
from twisted.python.filepath import (
FilePath,
)
from twisted.python.runtime import (
platform,
)
from twisted.internet.protocol import (
Protocol,
ProcessProtocol,
)
from twisted.internet.defer import (
Deferred,
succeed,
)
from twisted.internet.task import (
deferLater,
)
from ..client import (
_Client,
)
from ..scripts.tahoe_stop import (
COULD_NOT_STOP,
)
from ..util.eliotutil import (
inline_callbacks,
)
class Expect(Protocol):
def __init__(self):
self._expectations = []
def get_buffered_output(self):
return self._buffer
def expect(self, expectation):
if expectation in self._buffer:
return succeed(None)
d = Deferred()
self._expectations.append((expectation, d))
return d
def connectionMade(self):
self._buffer = b""
def dataReceived(self, data):
self._buffer += data
for i in range(len(self._expectations) - 1, -1, -1):
expectation, d = self._expectations[i]
if expectation in self._buffer:
del self._expectations[i]
d.callback(None)
def connectionLost(self, reason):
for ignored, d in self._expectations:
d.errback(reason)
class _ProcessProtocolAdapter(ProcessProtocol):
def __init__(self, fds):
self._fds = fds
def connectionMade(self):
for proto in self._fds.values():
proto.makeConnection(self.transport)
def childDataReceived(self, childFD, data):
try:
proto = self._fds[childFD]
except KeyError:
pass
else:
proto.dataReceived(data)
def processEnded(self, reason):
notified = set()
for proto in self._fds.values():
if proto not in notified:
proto.connectionLost(reason)
notified.add(proto)
def on_stdout(protocol):
return _ProcessProtocolAdapter({1: protocol})
def on_stdout_and_stderr(protocol):
return _ProcessProtocolAdapter({1: protocol, 2: protocol})
def on_different(fd_mapping):
return _ProcessProtocolAdapter(fd_mapping)
@attr.s
class CLINodeAPI(object):
reactor = attr.ib()
basedir = attr.ib(type=FilePath)
process = attr.ib(default=None)
@property
def twistd_pid_file(self):
return self.basedir.child(u"twistd.pid")
@property
def node_url_file(self):
return self.basedir.child(u"node.url")
@property
def storage_furl_file(self):
return self.basedir.child(u"private").child(u"storage.furl")
@property
def introducer_furl_file(self):
return self.basedir.child(u"private").child(u"introducer.furl")
@property
def config_file(self):
return self.basedir.child(u"tahoe.cfg")
@property
def exit_trigger_file(self):
return self.basedir.child(_Client.EXIT_TRIGGER_FILE)
def _execute(self, process_protocol, argv):
exe = sys.executable
argv = [
exe,
u"-m",
u"allmydata.scripts.runner",
] + argv
return self.reactor.spawnProcess(
processProtocol=process_protocol,
executable=exe,
args=argv,
env=os.environ,
)
def run(self, protocol, extra_tahoe_args=()):
"""
Start the node running.
:param IProcessProtocol protocol: This protocol will be hooked up to
the node process and can handle output or generate input.
"""
if not IProcessProtocol.providedBy(protocol):
raise TypeError("run requires process protocol, got {}".format(protocol))
self.process = self._execute(
protocol,
list(extra_tahoe_args) + [u"run", self.basedir.asTextMode().path],
)
# Don't let the process run away forever.
try:
self.active()
except OSError as e:
if ENOENT != e.errno:
raise
def stop(self, protocol):
self._execute(
protocol,
[u"stop", self.basedir.asTextMode().path],
)
@inline_callbacks
def stop_and_wait(self):
if platform.isWindows():
# On Windows there is no PID file and no "tahoe stop".
if self.process is not None:
while True:
try:
self.process.signalProcess("TERM")
except ProcessExitedAlready:
break
else:
yield deferLater(self.reactor, 0.1, lambda: None)
else:
protocol, ended = wait_for_exit()
self.stop(protocol)
yield ended
def active(self):
# By writing this file, we get two minutes before the client will
# exit. This ensures that even if the 'stop' command doesn't work (and
# the test fails), the client should still terminate.
self.exit_trigger_file.touch()
def _check_cleanup_reason(self, reason):
# Let it fail because the process has already exited.
reason.trap(ProcessTerminated)
if reason.value.exitCode != COULD_NOT_STOP:
return reason
return None
def cleanup(self):
stopping = self.stop_and_wait()
stopping.addErrback(self._check_cleanup_reason)
return stopping
class _WaitForEnd(ProcessProtocol):
def __init__(self, ended):
self._ended = ended
def processEnded(self, reason):
if reason.check(ProcessDone):
self._ended.callback(None)
else:
self._ended.errback(reason)
def wait_for_exit():
ended = Deferred()
protocol = _WaitForEnd(ended)
return protocol, ended

View File

@ -4,24 +4,40 @@ from __future__ import (
)
import os.path, re, sys
from os import linesep
from twisted.trial import unittest
from twisted.python import usage, runtime
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import reactor
from twisted.python import usage
from twisted.internet.defer import (
inlineCallbacks,
returnValue,
DeferredList,
)
from twisted.python.filepath import FilePath
from twisted.python.runtime import (
platform,
)
from allmydata.util import fileutil, pollmixin
from allmydata.util.encodingutil import unicode_to_argv, unicode_to_output, \
get_filesystem_encoding
from allmydata.client import _Client
from allmydata.test import common_util
import allmydata
from allmydata import __appname__
from .common_util import parse_cli, run_cli
from .cli_node_api import (
CLINodeAPI,
Expect,
on_stdout,
on_stdout_and_stderr,
)
from ._twisted_9607 import (
getProcessOutputAndValue,
)
from ..util.eliotutil import (
inline_callbacks,
)
def get_root_from_file(src):
srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(src))))
@ -39,13 +55,7 @@ def get_root_from_file(src):
srcfile = allmydata.__file__
rootdir = get_root_from_file(srcfile)
class RunBinTahoeMixin:
def skip_if_cannot_daemonize(self):
if runtime.platformType == "win32":
# twistd on windows doesn't daemonize. cygwin should work normally.
raise unittest.SkipTest("twistd does not fork under windows")
@inlineCallbacks
def find_import_location(self):
res = yield self.run_bintahoe(["--version-and-path"])
@ -362,354 +372,283 @@ class CreateNode(unittest.TestCase):
# can't provide all three
_test("create-stats-gatherer --hostname=foo --location=foo --port=foo D")
class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin,
RunBinTahoeMixin):
# exercise "tahoe start", for both introducer, client node, and
# key-generator, by spawning "tahoe start" as a subprocess. This doesn't
# get us figleaf-based line-level coverage, but it does a better job of
# confirming that the user can actually run "./bin/tahoe start" and
# expect it to work. This verifies that bin/tahoe sets up PYTHONPATH and
# the like correctly.
"""
exercise "tahoe run" for both introducer, client node, and key-generator,
by spawning "tahoe run" (or "tahoe start") as a subprocess. This doesn't
get us line-level coverage, but it does a better job of confirming that
the user can actually run "./bin/tahoe run" and expect it to work. This
verifies that bin/tahoe sets up PYTHONPATH and the like correctly.
# This doesn't work on cygwin (it hangs forever), so we skip this test
# when we're on cygwin. It is likely that "tahoe start" itself doesn't
# work on cygwin: twisted seems unable to provide a version of
# spawnProcess which really works there.
This doesn't work on cygwin (it hangs forever), so we skip this test
when we're on cygwin. It is likely that "tahoe start" itself doesn't
work on cygwin: twisted seems unable to provide a version of
spawnProcess which really works there.
"""
def workdir(self, name):
basedir = os.path.join("test_runner", "RunNode", name)
fileutil.make_dirs(basedir)
return basedir
@inline_callbacks
def test_introducer(self):
self.skip_if_cannot_daemonize()
"""
The introducer furl is stable across restarts.
"""
basedir = self.workdir("test_introducer")
c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE)
twistd_pid_file = os.path.join(c1, "twistd.pid")
introducer_furl_file = os.path.join(c1, "private", "introducer.furl")
node_url_file = os.path.join(c1, "node.url")
config_file = os.path.join(c1, "tahoe.cfg")
tahoe = CLINodeAPI(reactor, FilePath(c1))
self.addCleanup(tahoe.stop_and_wait)
d = self.run_bintahoe(["--quiet", "create-introducer", "--basedir", c1, "--hostname", "localhost"])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0)
out, err, rc_or_sig = yield self.run_bintahoe([
"--quiet",
"create-introducer",
"--basedir", c1,
"--hostname", "127.0.0.1",
])
# This makes sure that node.url is written, which allows us to
# detect when the introducer restarts in _node_has_restarted below.
config = fileutil.read(config_file)
self.failUnlessIn('\nweb.port = \n', config)
fileutil.write(config_file, config.replace('\nweb.port = \n', '\nweb.port = 0\n'))
self.assertEqual(rc_or_sig, 0)
# by writing this file, we get ten seconds before the node will
# exit. This insures that even if the test fails (and the 'stop'
# command doesn't work), the client should still terminate.
fileutil.write(exit_trigger_file, "")
# now it's safe to start the node
d.addCallback(_cb)
# This makes sure that node.url is written, which allows us to
# detect when the introducer restarts in _node_has_restarted below.
config = fileutil.read(tahoe.config_file.path)
self.assertIn('{}web.port = {}'.format(linesep, linesep), config)
fileutil.write(
tahoe.config_file.path,
config.replace(
'{}web.port = {}'.format(linesep, linesep),
'{}web.port = 0{}'.format(linesep, linesep),
)
)
def _then_start_the_node(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_then_start_the_node)
p = Expect()
tahoe.run(on_stdout(p))
yield p.expect("introducer running")
tahoe.active()
def _cb2(res):
out, err, rc_or_sig = res
yield self.poll(tahoe.introducer_furl_file.exists)
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# read the introducer.furl file so we can check that the contents
# don't change on restart
furl = fileutil.read(tahoe.introducer_furl_file.path)
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
tahoe.active()
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
# We don't keep track of PIDs in files on Windows.
if not platform.isWindows():
self.assertTrue(tahoe.twistd_pid_file.exists())
self.assertTrue(tahoe.node_url_file.exists())
def _node_has_started():
return os.path.exists(introducer_furl_file)
d.addCallback(lambda res: self.poll(_node_has_started))
# rm this so we can detect when the second incarnation is ready
tahoe.node_url_file.remove()
def _started(res):
# read the introducer.furl file so we can check that the contents
# don't change on restart
self.furl = fileutil.read(introducer_furl_file)
yield tahoe.stop_and_wait()
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file))
self.failUnless(os.path.exists(node_url_file))
p = Expect()
tahoe.run(on_stdout(p))
yield p.expect("introducer running")
# rm this so we can detect when the second incarnation is ready
os.unlink(node_url_file)
return self.run_bintahoe(["--quiet", "restart", c1])
d.addCallback(_started)
# Again, the second incarnation of the node might not be ready yet, so
# poll until it is. This time introducer_furl_file already exists, so
# we check for the existence of node_url_file instead.
yield self.poll(tahoe.node_url_file.exists)
def _then(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_then)
# Again, the second incarnation of the node might not be ready yet,
# so poll until it is. This time introducer_furl_file already
# exists, so we check for the existence of node_url_file instead.
def _node_has_restarted():
return os.path.exists(node_url_file)
d.addCallback(lambda res: self.poll(_node_has_restarted))
def _check_same_furl(res):
self.failUnless(os.path.exists(introducer_furl_file))
self.failUnlessEqual(self.furl, fileutil.read(introducer_furl_file))
d.addCallback(_check_same_furl)
# Now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _after_stopping(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(twistd_pid_file))
d.addCallback(_after_stopping)
d.addBoth(self._remove, exit_trigger_file)
return d
def test_client_no_noise(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_client_no_noise")
c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE)
twistd_pid_file = os.path.join(c1, "twistd.pid")
node_url_file = os.path.join(c1, "node.url")
d = self.run_bintahoe(["--quiet", "create-client", "--basedir", c1, "--webport", "0"])
def _cb(res):
out, err, rc_or_sig = res
errstr = "cc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
assert rc_or_sig == 0, errstr
self.failUnlessEqual(rc_or_sig, 0)
# By writing this file, we get two minutes before the client will exit. This ensures
# that even if the 'stop' command doesn't work (and the test fails), the client should
# still terminate.
fileutil.write(exit_trigger_file, "")
# now it's safe to start the node
d.addCallback(_cb)
def _start(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_start)
def _cb2(res):
out, err, rc_or_sig = res
errstr = "cc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
fileutil.write(exit_trigger_file, "")
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr) # If you emit noise, you fail this test.
errlines = err.split("\n")
self.failIf([True for line in errlines if (line != "" and "UserWarning: Unbuilt egg for setuptools" not in line
and "from pkg_resources import load_entry_point" not in line)], errstr)
if err != "":
raise unittest.SkipTest("This test is known not to pass on Ubuntu Lucid; see #1235.")
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
def _node_has_started():
return os.path.exists(node_url_file)
d.addCallback(lambda res: self.poll(_node_has_started))
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
self.failUnless(os.path.exists(twistd_pid_file),
(twistd_pid_file, os.listdir(os.path.dirname(twistd_pid_file))))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
d.addBoth(self._remove, exit_trigger_file)
return d
# The point of this test! After starting the second time the
# introducer furl file must exist and contain the same contents as it
# did before.
self.assertTrue(tahoe.introducer_furl_file.exists())
self.assertEqual(furl, fileutil.read(tahoe.introducer_furl_file.path))
@inline_callbacks
def test_client(self):
self.skip_if_cannot_daemonize()
"""
Test many things.
0) Verify that "tahoe create-node" takes a --webport option and writes
the value to the configuration file.
1) Verify that "tahoe run" writes a pid file and a node url file (on POSIX).
2) Verify that the storage furl file has a stable value across a
"tahoe run" / "tahoe stop" / "tahoe run" sequence.
3) Verify that the pid file is removed after "tahoe stop" succeeds (on POSIX).
"""
basedir = self.workdir("test_client")
c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE)
twistd_pid_file = os.path.join(c1, "twistd.pid")
node_url_file = os.path.join(c1, "node.url")
storage_furl_file = os.path.join(c1, "private", "storage.furl")
config_file = os.path.join(c1, "tahoe.cfg")
d = self.run_bintahoe(["--quiet", "create-node", "--basedir", c1,
"--webport", "0",
"--hostname", "localhost"])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0)
tahoe = CLINodeAPI(reactor, FilePath(c1))
# Set this up right now so we don't forget later.
self.addCleanup(tahoe.cleanup)
# Check that the --webport option worked.
config = fileutil.read(config_file)
self.failUnlessIn('\nweb.port = 0\n', config)
out, err, rc_or_sig = yield self.run_bintahoe([
"--quiet", "create-node", "--basedir", c1,
"--webport", "0",
"--hostname", "localhost",
])
self.failUnlessEqual(rc_or_sig, 0)
# By writing this file, we get two minutes before the client will
# exit. This ensures that even if the 'stop' command doesn't work
# (and the test fails), the client should still terminate.
fileutil.write(exit_trigger_file, "")
# now it's safe to start the node
d.addCallback(_cb)
# Check that the --webport option worked.
config = fileutil.read(tahoe.config_file.path)
self.assertIn(
'{}web.port = 0{}'.format(linesep, linesep),
config,
)
def _start(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_start)
# After this it's safe to start the node
tahoe.active()
def _cb2(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
p = Expect()
# This will run until we stop it.
tahoe.run(on_stdout(p))
# Wait for startup to have proceeded to a reasonable point.
yield p.expect("client running")
tahoe.active()
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# read the storage.furl file so we can check that its contents don't
# change on restart
storage_furl = fileutil.read(tahoe.storage_furl_file.path)
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
# We don't keep track of PIDs in files on Windows.
if not platform.isWindows():
self.assertTrue(tahoe.twistd_pid_file.exists())
def _node_has_started():
return os.path.exists(node_url_file)
d.addCallback(lambda res: self.poll(_node_has_started))
# rm this so we can detect when the second incarnation is ready
tahoe.node_url_file.remove()
yield tahoe.stop_and_wait()
def _started(res):
# read the storage.furl file so we can check that its contents
# don't change on restart
self.storage_furl = fileutil.read(storage_furl_file)
p = Expect()
# We don't have to add another cleanup for this one, the one from
# above is still registered.
tahoe.run(on_stdout(p))
yield p.expect("client running")
tahoe.active()
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file))
self.assertEqual(
storage_furl,
fileutil.read(tahoe.storage_furl_file.path),
)
# rm this so we can detect when the second incarnation is ready
os.unlink(node_url_file)
return self.run_bintahoe(["--quiet", "restart", c1])
d.addCallback(_started)
if not platform.isWindows():
self.assertTrue(
tahoe.twistd_pid_file.exists(),
"PID file ({}) didn't exist when we expected it to. "
"These exist: {}".format(
tahoe.twistd_pid_file,
tahoe.twistd_pid_file.parent().listdir(),
),
)
yield tahoe.stop_and_wait()
def _cb3(res):
out, err, rc_or_sig = res
if not platform.isWindows():
# twistd.pid should be gone by now.
self.assertFalse(tahoe.twistd_pid_file.exists())
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_cb3)
# again, the second incarnation of the node might not be ready yet,
# so poll until it is
d.addCallback(lambda res: self.poll(_node_has_started))
def _check_same_furl(res):
self.failUnlessEqual(self.storage_furl,
fileutil.read(storage_furl_file))
d.addCallback(_check_same_furl)
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file),
(twistd_pid_file, os.listdir(os.path.dirname(twistd_pid_file))))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _cb4(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(twistd_pid_file))
d.addCallback(_cb4)
d.addBoth(self._remove, exit_trigger_file)
return d
def _remove(self, res, file):
fileutil.remove(file)
return res
def test_baddir(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_baddir")
def test_run_bad_directory(self):
"""
If ``tahoe run`` is pointed at a non-node directory, it reports an error
and exits.
"""
return self._bad_directory_test(
u"test_run_bad_directory",
"tahoe run",
lambda tahoe, p: tahoe.run(p),
"is not a recognizable node directory",
)
def test_run_bogus_directory(self):
"""
If ``tahoe run`` is pointed at a non-directory, it reports an error and
exits.
"""
return self._bad_directory_test(
u"test_run_bogus_directory",
"tahoe run",
lambda tahoe, p: CLINodeAPI(
tahoe.reactor,
tahoe.basedir.sibling(u"bogus"),
).run(p),
"does not look like a directory at all"
)
def test_stop_bad_directory(self):
"""
If ``tahoe run`` is pointed at a directory where no node is running, it
reports an error and exits.
"""
return self._bad_directory_test(
u"test_stop_bad_directory",
"tahoe stop",
lambda tahoe, p: tahoe.stop(p),
"does not look like a running node directory",
)
@inline_callbacks
def _bad_directory_test(self, workdir, description, operation, expected_message):
"""
Verify that a certain ``tahoe`` CLI operation produces a certain expected
message and then exits.
:param unicode workdir: A distinct path name for this test to operate
on.
:param unicode description: A description of the operation being
performed.
:param operation: A two-argument callable implementing the operation.
The first argument is a ``CLINodeAPI`` instance to use to perform
the operation. The second argument is an ``IProcessProtocol`` to
which the operations output must be delivered.
:param unicode expected_message: Some text that is expected in the
stdout or stderr of the operation in the successful case.
:return: A ``Deferred`` that fires when the assertions have been made.
"""
basedir = self.workdir(workdir)
fileutil.make_dirs(basedir)
d = self.run_bintahoe(["--quiet", "start", "--basedir", basedir])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 1)
self.failUnless("is not a recognizable node directory" in err, err)
d.addCallback(_cb)
tahoe = CLINodeAPI(reactor, FilePath(basedir))
# If tahoe ends up thinking it should keep running, make sure it stops
# promptly when the test is done.
self.addCleanup(tahoe.cleanup)
def _then_stop_it(res):
return self.run_bintahoe(["--quiet", "stop", "--basedir", basedir])
d.addCallback(_then_stop_it)
p = Expect()
operation(tahoe, on_stdout_and_stderr(p))
def _cb2(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 2)
self.failUnless("does not look like a running node directory" in err)
d.addCallback(_cb2)
client_running = p.expect(b"client running")
def _then_start_in_bogus_basedir(res):
not_a_dir = os.path.join(basedir, "bogus")
return self.run_bintahoe(["--quiet", "start", "--basedir", not_a_dir])
d.addCallback(_then_start_in_bogus_basedir)
result, index = yield DeferredList([
p.expect(expected_message),
client_running,
], fireOnOneCallback=True, consumeErrors=True,
)
def _cb3(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 1)
self.failUnlessIn("does not look like a directory at all", err)
d.addCallback(_cb3)
return d
self.assertEqual(
index,
0,
"Expected error message from '{}', got something else: {}".format(
description,
p.get_buffered_output(),
),
)
if not platform.isWindows():
# It should not be running.
self.assertFalse(tahoe.twistd_pid_file.exists())
# Wait for the operation to *complete*. If we got this far it's
# because we got the expected message so we can expect the "tahoe ..."
# child process to exit very soon. This other Deferred will fail when
# it eventually does but DeferredList above will consume the error.
# What's left is a perfect indicator that the process has exited and
# we won't get blamed for leaving the reactor dirty.
yield client_running

24
tox.ini
View File

@ -20,8 +20,24 @@ passenv = TAHOE_LAFS_* PIP_* SUBUNITREPORTER_* USERPROFILE HOMEDRIVE HOMEPATH
# available to those systems. Installing it ahead of time (with pip) avoids
# this problem.
deps =
# Pin all of these versions for the same reason you ever want to pin
# anything: to prevent new releases with regressions from introducing
# spurious failures into CI runs for whatever development work is
# happening at the time. The versions selected here are just the current
# versions at the time. Bumping them to keep up with future releases is
# fine as long as those releases are known to actually work.
pip==19.1.1
setuptools==41.0.1
wheel==0.33.4
subunitreporter==19.3.2
# As an exception, we don't pin certifi because it contains CA
# certificates which necessarily change over time. Pinning this is
# guaranteed to cause things to break eventually as old certificates
# expire and as new ones are used in the wild that aren't present in
# whatever version we pin. Hopefully there won't be functionality
# regressions in new releases of this package that cause us the kind of
# suffering we're trying to avoid with the above pins.
certifi
subunitreporter
# We add usedevelop=True for speed, and extras=test to get things like "mock"
# that are required for our unit tests.
usedevelop = True
@ -38,6 +54,12 @@ commands =
[testenv:coverage]
# coverage (with --branch) takes about 65% longer to run
commands =
# As an aid to debugging, dump all of the Python packages and their
# versions that are installed in the test environment. This is
# particularly useful to get from CI runs - though hopefully the
# version pinning we do limits the variability of this output
# somewhat.
pip freeze
tahoe --version
coverage run --branch -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors --reporter=timing} {posargs:allmydata}
coverage xml