Merge remote-tracking branch 'origin/master' into 3029.install-test-plugins

This commit is contained in:
Jean-Paul Calderone 2019-05-14 06:26:19 -04:00
commit b4459c2dc1
33 changed files with 629 additions and 388 deletions

View File

@ -14,7 +14,7 @@ environment:
install:
- |
%PYTHON%\python.exe -m pip install -U pip
%PYTHON%\python.exe -m pip install wheel tox virtualenv
%PYTHON%\python.exe -m pip install wheel tox==3.9.0 virtualenv
# note:
# %PYTHON% has: python.exe

0
newsfragments/3025.minor Normal file
View File

0
newsfragments/3036.minor Normal file
View File

0
newsfragments/3038.minor Normal file
View File

View File

@ -32,12 +32,12 @@ def stop(config):
print("%s does not look like a running node directory (no twistd.pid)" % quoted_basedir, file=err)
# we define rc=2 to mean "nothing is running, but it wasn't me who
# stopped it"
return 2
return COULD_NOT_STOP
elif pid == -1:
print("%s contains an invalid PID file" % basedir, file=err)
# we define rc=2 to mean "nothing is running, but it wasn't me who
# stopped it"
return 2
return COULD_NOT_STOP
# kill it hard (SIGKILL), delete the twistd.pid file, then wait for the
# process itself to go away. If it hasn't gone away after 20 seconds, warn

View File

@ -19,7 +19,6 @@ from .common import (
parse_options,
)
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
def _unsupported(what):
return "{} are not supported by Python on this platform.".format(what)
@ -307,13 +306,6 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
return d
# on our old dapper buildslave, this test takes a long time (usually
# 130s), so we have to bump up the default 120s timeout. The create-alias
# and initial backup alone take 60s, probably because of the handful of
# dirnodes being created (RSA key generation). The backup between check4
# and check4a takes 6s, as does the backup before check4b.
test_backup.timeout = 3000
def _check_filtering(self, filtered, all, included, excluded):
filtered = set(filtered)
all = set(all)

View File

@ -12,8 +12,6 @@ from allmydata.scripts import debug
from ..no_network import GridTestMixin
from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Check(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_check(self):

View File

@ -43,8 +43,6 @@ from twisted.python import usage
from allmydata.util.encodingutil import listdir_unicode, get_io_encoding
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class CLI(CLITestMixin, unittest.TestCase):
def _dump_cap(self, *args):
config = debug.DumpCapOptions()

View File

@ -14,8 +14,6 @@ from ..no_network import GridTestMixin
from .common import CLITestMixin
from ..common_util import skip_if_cannot_represent_filename
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_not_enough_args(self):

View File

@ -9,8 +9,6 @@ from ..no_network import GridTestMixin
from allmydata.util.encodingutil import quote_output, get_io_encoding
from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase):
def _test_webopen(self, args, expected_url):

View File

@ -8,8 +8,6 @@ from ..no_network import GridTestMixin
from allmydata.util.encodingutil import quote_output, get_io_encoding
from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class List(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_list(self):
self.basedir = "cli/List/list"

View File

@ -5,8 +5,6 @@ from ..no_network import GridTestMixin
from allmydata.scripts import tahoe_mv
from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Mv(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_mv_behavior(self):
self.basedir = "cli/Mv/mv_behavior"

View File

@ -11,8 +11,6 @@ from allmydata.util.encodingutil import get_io_encoding, unicode_to_argv
from allmydata.util.fileutil import abspath_expanduser_unicode
from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_unlinked_immutable_stdin(self):
@ -471,4 +469,3 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
self.failUnlessReallyEqual(rc_out_err[1], DATA))
return d

View File

@ -0,0 +1,235 @@
__all__ = [
"CLINodeAPI",
"Expect",
"on_stdout",
"on_stdout_and_stderr",
"on_different",
"wait_for_exit",
]
import os
import sys
from errno import ENOENT
import attr
from twisted.internet.error import (
ProcessDone,
ProcessTerminated,
ProcessExitedAlready,
)
from twisted.internet.interfaces import (
IProcessProtocol,
)
from twisted.python.filepath import (
FilePath,
)
from twisted.python.runtime import (
platform,
)
from twisted.internet.protocol import (
Protocol,
ProcessProtocol,
)
from twisted.internet.defer import (
Deferred,
succeed,
)
from twisted.internet.task import (
deferLater,
)
from ..client import (
_Client,
)
from ..scripts.tahoe_stop import (
COULD_NOT_STOP,
)
from ..util.eliotutil import (
inline_callbacks,
)
class Expect(Protocol):
def __init__(self):
self._expectations = []
def get_buffered_output(self):
return self._buffer
def expect(self, expectation):
if expectation in self._buffer:
return succeed(None)
d = Deferred()
self._expectations.append((expectation, d))
return d
def connectionMade(self):
self._buffer = b""
def dataReceived(self, data):
self._buffer += data
for i in range(len(self._expectations) - 1, -1, -1):
expectation, d = self._expectations[i]
if expectation in self._buffer:
del self._expectations[i]
d.callback(None)
def connectionLost(self, reason):
for ignored, d in self._expectations:
d.errback(reason)
class _ProcessProtocolAdapter(ProcessProtocol):
def __init__(self, fds):
self._fds = fds
def connectionMade(self):
for proto in self._fds.values():
proto.makeConnection(self.transport)
def childDataReceived(self, childFD, data):
try:
proto = self._fds[childFD]
except KeyError:
pass
else:
proto.dataReceived(data)
def processEnded(self, reason):
notified = set()
for proto in self._fds.values():
if proto not in notified:
proto.connectionLost(reason)
notified.add(proto)
def on_stdout(protocol):
return _ProcessProtocolAdapter({1: protocol})
def on_stdout_and_stderr(protocol):
return _ProcessProtocolAdapter({1: protocol, 2: protocol})
def on_different(fd_mapping):
return _ProcessProtocolAdapter(fd_mapping)
@attr.s
class CLINodeAPI(object):
reactor = attr.ib()
basedir = attr.ib(type=FilePath)
process = attr.ib(default=None)
@property
def twistd_pid_file(self):
return self.basedir.child(u"twistd.pid")
@property
def node_url_file(self):
return self.basedir.child(u"node.url")
@property
def storage_furl_file(self):
return self.basedir.child(u"private").child(u"storage.furl")
@property
def introducer_furl_file(self):
return self.basedir.child(u"private").child(u"introducer.furl")
@property
def config_file(self):
return self.basedir.child(u"tahoe.cfg")
@property
def exit_trigger_file(self):
return self.basedir.child(_Client.EXIT_TRIGGER_FILE)
def _execute(self, process_protocol, argv):
exe = sys.executable
argv = [
exe,
u"-m",
u"allmydata.scripts.runner",
] + argv
return self.reactor.spawnProcess(
processProtocol=process_protocol,
executable=exe,
args=argv,
env=os.environ,
)
def run(self, protocol, extra_tahoe_args=()):
"""
Start the node running.
:param IProcessProtocol protocol: This protocol will be hooked up to
the node process and can handle output or generate input.
"""
if not IProcessProtocol.providedBy(protocol):
raise TypeError("run requires process protocol, got {}".format(protocol))
self.process = self._execute(
protocol,
list(extra_tahoe_args) + [u"run", self.basedir.asTextMode().path],
)
# Don't let the process run away forever.
try:
self.active()
except OSError as e:
if ENOENT != e.errno:
raise
def stop(self, protocol):
self._execute(
protocol,
[u"stop", self.basedir.asTextMode().path],
)
@inline_callbacks
def stop_and_wait(self):
if platform.isWindows():
# On Windows there is no PID file and no "tahoe stop".
if self.process is not None:
while True:
try:
self.process.signalProcess("TERM")
except ProcessExitedAlready:
break
else:
yield deferLater(self.reactor, 0.1, lambda: None)
else:
protocol, ended = wait_for_exit()
self.stop(protocol)
yield ended
def active(self):
# By writing this file, we get two minutes before the client will
# exit. This ensures that even if the 'stop' command doesn't work (and
# the test fails), the client should still terminate.
self.exit_trigger_file.touch()
def _check_cleanup_reason(self, reason):
# Let it fail because the process has already exited.
reason.trap(ProcessTerminated)
if reason.value.exitCode != COULD_NOT_STOP:
return reason
return None
def cleanup(self):
stopping = self.stop_and_wait()
stopping.addErrback(self._check_cleanup_reason)
return stopping
class _WaitForEnd(ProcessProtocol):
def __init__(self, ended):
self._ended = ended
def processEnded(self, reason):
if reason.check(ProcessDone):
self._ended.callback(None)
else:
self._ended.errback(reason)
def wait_for_exit():
ended = Deferred()
protocol = _WaitForEnd(ended)
return protocol, ended

View File

@ -16,8 +16,6 @@ from .. import common_util as testutil
SEGSIZE = 128*1024
class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
timeout = 400 # these tests are too big, 120s is not enough on slow
# platforms
def setUp(self):
GridTestMixin.setUp(self)
self.basedir = self.mktemp()

View File

@ -544,5 +544,3 @@ class TooParallel(GridTestMixin, unittest.TestCase):
return res
d.addBoth(_clean_up)
return d
test_immutable.timeout = 80

View File

@ -20,8 +20,6 @@ from .common_web import do_http
from allmydata.test.no_network import GridTestMixin
from .cli.common import CLITestMixin
timeout = 2400 # One of these took 1046.091s on Zandr's ARM box.
class MutableChecker(GridTestMixin, unittest.TestCase, ErrorMixin):
def test_good(self):
self.basedir = "deepcheck/MutableChecker/good"

View File

@ -63,7 +63,6 @@ one_nfd = u"one\u0304"
class Dirnode(GridTestMixin, unittest.TestCase,
testutil.ReallyEqualMixin, testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
timeout = 480 # It occasionally takes longer than 240 seconds on Francois's arm box.
def _do_create_test(self, mdmf=False):
c = self.g.clients[0]
@ -1758,7 +1757,6 @@ class Dirnode2(testutil.ReallyEqualMixin, testutil.ShouldFailMixin, unittest.Tes
class DeepStats(testutil.ReallyEqualMixin, unittest.TestCase):
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
def test_stats(self):
ds = dirnode.DeepStats(None)
ds.add("count-files")
@ -1823,7 +1821,6 @@ class UCWEingNodeMaker(NodeMaker):
class Deleter(GridTestMixin, testutil.ReallyEqualMixin, unittest.TestCase):
timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
def test_retry(self):
# ticket #550, a dirnode.delete which experiences an
# UncoordinatedWriteError will fail with an incorrect "you're

View File

@ -194,7 +194,6 @@ class _Base(GridTestMixin, ShouldFailMixin):
return d
class DownloadTest(_Base, unittest.TestCase):
timeout = 2400 # It takes longer than 240 seconds on Zandr's ARM box.
def test_download(self):
self.basedir = self.mktemp()
self.set_up_grid()

View File

@ -163,7 +163,6 @@ def make_data(length):
return data[:length]
class ValidatedExtendedURIProxy(unittest.TestCase):
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
K = 4
M = 10
SIZE = 200
@ -260,8 +259,6 @@ class ValidatedExtendedURIProxy(unittest.TestCase):
return defer.DeferredList(dl)
class Encode(unittest.TestCase):
timeout = 2400 # It takes longer than 240 seconds on Zandr's ARM box.
def do_encode(self, max_segment_size, datalen, NUM_SHARES, NUM_SEGMENTS,
expected_block_hashes, expected_share_hashes):
data = make_data(datalen)

View File

@ -114,7 +114,6 @@ def upload_data(uploader, data, convergence):
return uploader.upload(u)
class AssistedUpload(unittest.TestCase):
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
def setUp(self):
self.tub = t = Tub()
t.setOption("expose-remote-exception-types", False)

View File

@ -19,15 +19,6 @@ mutable_plaintext = "muta" * 10000
class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin,
unittest.TestCase):
# Many of these tests take around 60 seconds on François's ARM buildslave:
# http://tahoe-lafs.org/buildbot/builders/FranXois%20lenny-armv5tel
# allmydata.test.test_hung_server.HungServerDownloadTest.test_2_good_8_broken_duplicate_share_fail
# once ERRORed after 197 seconds on Midnight Magic's NetBSD buildslave:
# http://tahoe-lafs.org/buildbot/builders/MM%20netbsd4%20i386%20warp
# MM's buildslave varies a lot in how long it takes to run tests.
timeout = 240
def _break(self, servers):
for (id, ss) in servers:
self.g.break_server(id)

View File

@ -715,8 +715,6 @@ class SystemTest(SystemTestMixin, AsyncTestCase):
self.basedir = "introducer/SystemTest/system_v2_server"
os.makedirs(self.basedir)
return self.do_system_test()
test_system_v2_server.timeout = 480
# occasionally takes longer than 350s on "draco"
class FakeRemoteReference:
def notifyOnDisconnect(self, *args, **kwargs): pass

View File

@ -2418,7 +2418,6 @@ class RealTest(SingleMagicFolderTestMixin, AsyncTestCase):
class RealTestAliceBob(MagicFolderAliceBobTestMixin, AsyncTestCase):
"""This is skipped unless both Twisted and the platform support inotify."""
inject_inotify = False
timeout = 15
def setUp(self):
d = super(RealTestAliceBob, self).setUp()

View File

@ -4,26 +4,40 @@ from __future__ import (
)
import os.path, re, sys
from os import linesep
from twisted.trial import unittest
from twisted.python import usage, runtime
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import reactor
from twisted.python import usage
from twisted.internet.defer import (
inlineCallbacks,
returnValue,
DeferredList,
)
from twisted.python.filepath import FilePath
from twisted.python.runtime import (
platform,
)
from allmydata.util import fileutil, pollmixin
from allmydata.util.encodingutil import unicode_to_argv, unicode_to_output, \
get_filesystem_encoding
from allmydata.client import _Client
from allmydata.test import common_util
import allmydata
from allmydata import __appname__
from .common_util import parse_cli, run_cli
from .cli_node_api import (
CLINodeAPI,
Expect,
on_stdout,
on_stdout_and_stderr,
)
from ._twisted_9607 import (
getProcessOutputAndValue,
)
timeout = 240
from ..util.eliotutil import (
inline_callbacks,
)
def get_root_from_file(src):
srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(src))))
@ -41,13 +55,7 @@ def get_root_from_file(src):
srcfile = allmydata.__file__
rootdir = get_root_from_file(srcfile)
class RunBinTahoeMixin:
def skip_if_cannot_daemonize(self):
if runtime.platformType == "win32":
# twistd on windows doesn't daemonize. cygwin should work normally.
raise unittest.SkipTest("twistd does not fork under windows")
@inlineCallbacks
def find_import_location(self):
res = yield self.run_bintahoe(["--version-and-path"])
@ -364,357 +372,283 @@ class CreateNode(unittest.TestCase):
# can't provide all three
_test("create-stats-gatherer --hostname=foo --location=foo --port=foo D")
class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin,
RunBinTahoeMixin):
# exercise "tahoe start", for both introducer, client node, and
# key-generator, by spawning "tahoe start" as a subprocess. This doesn't
# get us figleaf-based line-level coverage, but it does a better job of
# confirming that the user can actually run "./bin/tahoe start" and
# expect it to work. This verifies that bin/tahoe sets up PYTHONPATH and
# the like correctly.
"""
exercise "tahoe run" for both introducer, client node, and key-generator,
by spawning "tahoe run" (or "tahoe start") as a subprocess. This doesn't
get us line-level coverage, but it does a better job of confirming that
the user can actually run "./bin/tahoe run" and expect it to work. This
verifies that bin/tahoe sets up PYTHONPATH and the like correctly.
# This doesn't work on cygwin (it hangs forever), so we skip this test
# when we're on cygwin. It is likely that "tahoe start" itself doesn't
# work on cygwin: twisted seems unable to provide a version of
# spawnProcess which really works there.
This doesn't work on cygwin (it hangs forever), so we skip this test
when we're on cygwin. It is likely that "tahoe start" itself doesn't
work on cygwin: twisted seems unable to provide a version of
spawnProcess which really works there.
"""
def workdir(self, name):
basedir = os.path.join("test_runner", "RunNode", name)
fileutil.make_dirs(basedir)
return basedir
@inline_callbacks
def test_introducer(self):
self.skip_if_cannot_daemonize()
"""
The introducer furl is stable across restarts.
"""
basedir = self.workdir("test_introducer")
c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE)
twistd_pid_file = os.path.join(c1, "twistd.pid")
introducer_furl_file = os.path.join(c1, "private", "introducer.furl")
node_url_file = os.path.join(c1, "node.url")
config_file = os.path.join(c1, "tahoe.cfg")
tahoe = CLINodeAPI(reactor, FilePath(c1))
self.addCleanup(tahoe.stop_and_wait)
d = self.run_bintahoe(["--quiet", "create-introducer", "--basedir", c1, "--hostname", "localhost"])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0)
out, err, rc_or_sig = yield self.run_bintahoe([
"--quiet",
"create-introducer",
"--basedir", c1,
"--hostname", "127.0.0.1",
])
# This makes sure that node.url is written, which allows us to
# detect when the introducer restarts in _node_has_restarted below.
config = fileutil.read(config_file)
self.failUnlessIn('\nweb.port = \n', config)
fileutil.write(config_file, config.replace('\nweb.port = \n', '\nweb.port = 0\n'))
self.assertEqual(rc_or_sig, 0)
# by writing this file, we get ten seconds before the node will
# exit. This insures that even if the test fails (and the 'stop'
# command doesn't work), the client should still terminate.
fileutil.write(exit_trigger_file, "")
# now it's safe to start the node
d.addCallback(_cb)
# This makes sure that node.url is written, which allows us to
# detect when the introducer restarts in _node_has_restarted below.
config = fileutil.read(tahoe.config_file.path)
self.assertIn('{}web.port = {}'.format(linesep, linesep), config)
fileutil.write(
tahoe.config_file.path,
config.replace(
'{}web.port = {}'.format(linesep, linesep),
'{}web.port = 0{}'.format(linesep, linesep),
)
)
def _then_start_the_node(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_then_start_the_node)
p = Expect()
tahoe.run(on_stdout(p))
yield p.expect("introducer running")
tahoe.active()
def _cb2(res):
out, err, rc_or_sig = res
yield self.poll(tahoe.introducer_furl_file.exists)
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# read the introducer.furl file so we can check that the contents
# don't change on restart
furl = fileutil.read(tahoe.introducer_furl_file.path)
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
tahoe.active()
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
# We don't keep track of PIDs in files on Windows.
if not platform.isWindows():
self.assertTrue(tahoe.twistd_pid_file.exists())
self.assertTrue(tahoe.node_url_file.exists())
def _node_has_started():
return os.path.exists(introducer_furl_file)
d.addCallback(lambda res: self.poll(_node_has_started))
# rm this so we can detect when the second incarnation is ready
tahoe.node_url_file.remove()
def _started(res):
# read the introducer.furl file so we can check that the contents
# don't change on restart
self.furl = fileutil.read(introducer_furl_file)
yield tahoe.stop_and_wait()
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file))
self.failUnless(os.path.exists(node_url_file))
p = Expect()
tahoe.run(on_stdout(p))
yield p.expect("introducer running")
# rm this so we can detect when the second incarnation is ready
os.unlink(node_url_file)
return self.run_bintahoe(["--quiet", "restart", c1])
d.addCallback(_started)
# Again, the second incarnation of the node might not be ready yet, so
# poll until it is. This time introducer_furl_file already exists, so
# we check for the existence of node_url_file instead.
yield self.poll(tahoe.node_url_file.exists)
def _then(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_then)
# Again, the second incarnation of the node might not be ready yet,
# so poll until it is. This time introducer_furl_file already
# exists, so we check for the existence of node_url_file instead.
def _node_has_restarted():
return os.path.exists(node_url_file)
d.addCallback(lambda res: self.poll(_node_has_restarted))
def _check_same_furl(res):
self.failUnless(os.path.exists(introducer_furl_file))
self.failUnlessEqual(self.furl, fileutil.read(introducer_furl_file))
d.addCallback(_check_same_furl)
# Now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _after_stopping(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(twistd_pid_file))
d.addCallback(_after_stopping)
d.addBoth(self._remove, exit_trigger_file)
return d
# This test has hit a 240-second timeout on our feisty2.5 buildslave, and a 480-second timeout
# on Francois's Lenny-armv5tel buildslave.
test_introducer.timeout = 960
def test_client_no_noise(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_client_no_noise")
c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE)
twistd_pid_file = os.path.join(c1, "twistd.pid")
node_url_file = os.path.join(c1, "node.url")
d = self.run_bintahoe(["--quiet", "create-client", "--basedir", c1, "--webport", "0"])
def _cb(res):
out, err, rc_or_sig = res
errstr = "cc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
assert rc_or_sig == 0, errstr
self.failUnlessEqual(rc_or_sig, 0)
# By writing this file, we get two minutes before the client will exit. This ensures
# that even if the 'stop' command doesn't work (and the test fails), the client should
# still terminate.
fileutil.write(exit_trigger_file, "")
# now it's safe to start the node
d.addCallback(_cb)
def _start(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_start)
def _cb2(res):
out, err, rc_or_sig = res
errstr = "cc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
fileutil.write(exit_trigger_file, "")
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr) # If you emit noise, you fail this test.
errlines = err.split("\n")
self.failIf([True for line in errlines if (line != "" and "UserWarning: Unbuilt egg for setuptools" not in line
and "from pkg_resources import load_entry_point" not in line)], errstr)
if err != "":
raise unittest.SkipTest("This test is known not to pass on Ubuntu Lucid; see #1235.")
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
def _node_has_started():
return os.path.exists(node_url_file)
d.addCallback(lambda res: self.poll(_node_has_started))
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
self.failUnless(os.path.exists(twistd_pid_file),
(twistd_pid_file, os.listdir(os.path.dirname(twistd_pid_file))))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
d.addBoth(self._remove, exit_trigger_file)
return d
# The point of this test! After starting the second time the
# introducer furl file must exist and contain the same contents as it
# did before.
self.assertTrue(tahoe.introducer_furl_file.exists())
self.assertEqual(furl, fileutil.read(tahoe.introducer_furl_file.path))
@inline_callbacks
def test_client(self):
self.skip_if_cannot_daemonize()
"""
Test many things.
0) Verify that "tahoe create-node" takes a --webport option and writes
the value to the configuration file.
1) Verify that "tahoe run" writes a pid file and a node url file (on POSIX).
2) Verify that the storage furl file has a stable value across a
"tahoe run" / "tahoe stop" / "tahoe run" sequence.
3) Verify that the pid file is removed after "tahoe stop" succeeds (on POSIX).
"""
basedir = self.workdir("test_client")
c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE)
twistd_pid_file = os.path.join(c1, "twistd.pid")
node_url_file = os.path.join(c1, "node.url")
storage_furl_file = os.path.join(c1, "private", "storage.furl")
config_file = os.path.join(c1, "tahoe.cfg")
d = self.run_bintahoe(["--quiet", "create-node", "--basedir", c1,
"--webport", "0",
"--hostname", "localhost"])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0)
tahoe = CLINodeAPI(reactor, FilePath(c1))
# Set this up right now so we don't forget later.
self.addCleanup(tahoe.cleanup)
# Check that the --webport option worked.
config = fileutil.read(config_file)
self.failUnlessIn('\nweb.port = 0\n', config)
out, err, rc_or_sig = yield self.run_bintahoe([
"--quiet", "create-node", "--basedir", c1,
"--webport", "0",
"--hostname", "localhost",
])
self.failUnlessEqual(rc_or_sig, 0)
# By writing this file, we get two minutes before the client will
# exit. This ensures that even if the 'stop' command doesn't work
# (and the test fails), the client should still terminate.
fileutil.write(exit_trigger_file, "")
# now it's safe to start the node
d.addCallback(_cb)
# Check that the --webport option worked.
config = fileutil.read(tahoe.config_file.path)
self.assertIn(
'{}web.port = 0{}'.format(linesep, linesep),
config,
)
def _start(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_start)
# After this it's safe to start the node
tahoe.active()
def _cb2(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
p = Expect()
# This will run until we stop it.
tahoe.run(on_stdout(p))
# Wait for startup to have proceeded to a reasonable point.
yield p.expect("client running")
tahoe.active()
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# read the storage.furl file so we can check that its contents don't
# change on restart
storage_furl = fileutil.read(tahoe.storage_furl_file.path)
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
# We don't keep track of PIDs in files on Windows.
if not platform.isWindows():
self.assertTrue(tahoe.twistd_pid_file.exists())
def _node_has_started():
return os.path.exists(node_url_file)
d.addCallback(lambda res: self.poll(_node_has_started))
# rm this so we can detect when the second incarnation is ready
tahoe.node_url_file.remove()
yield tahoe.stop_and_wait()
def _started(res):
# read the storage.furl file so we can check that its contents
# don't change on restart
self.storage_furl = fileutil.read(storage_furl_file)
p = Expect()
# We don't have to add another cleanup for this one, the one from
# above is still registered.
tahoe.run(on_stdout(p))
yield p.expect("client running")
tahoe.active()
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file))
self.assertEqual(
storage_furl,
fileutil.read(tahoe.storage_furl_file.path),
)
# rm this so we can detect when the second incarnation is ready
os.unlink(node_url_file)
return self.run_bintahoe(["--quiet", "restart", c1])
d.addCallback(_started)
if not platform.isWindows():
self.assertTrue(
tahoe.twistd_pid_file.exists(),
"PID file ({}) didn't exist when we expected it to. "
"These exist: {}".format(
tahoe.twistd_pid_file,
tahoe.twistd_pid_file.parent().listdir(),
),
)
yield tahoe.stop_and_wait()
def _cb3(res):
out, err, rc_or_sig = res
if not platform.isWindows():
# twistd.pid should be gone by now.
self.assertFalse(tahoe.twistd_pid_file.exists())
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_cb3)
# again, the second incarnation of the node might not be ready yet,
# so poll until it is
d.addCallback(lambda res: self.poll(_node_has_started))
def _check_same_furl(res):
self.failUnlessEqual(self.storage_furl,
fileutil.read(storage_furl_file))
d.addCallback(_check_same_furl)
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file),
(twistd_pid_file, os.listdir(os.path.dirname(twistd_pid_file))))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _cb4(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(twistd_pid_file))
d.addCallback(_cb4)
d.addBoth(self._remove, exit_trigger_file)
return d
def _remove(self, res, file):
fileutil.remove(file)
return res
def test_baddir(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_baddir")
def test_run_bad_directory(self):
"""
If ``tahoe run`` is pointed at a non-node directory, it reports an error
and exits.
"""
return self._bad_directory_test(
u"test_run_bad_directory",
"tahoe run",
lambda tahoe, p: tahoe.run(p),
"is not a recognizable node directory",
)
def test_run_bogus_directory(self):
"""
If ``tahoe run`` is pointed at a non-directory, it reports an error and
exits.
"""
return self._bad_directory_test(
u"test_run_bogus_directory",
"tahoe run",
lambda tahoe, p: CLINodeAPI(
tahoe.reactor,
tahoe.basedir.sibling(u"bogus"),
).run(p),
"does not look like a directory at all"
)
def test_stop_bad_directory(self):
"""
If ``tahoe run`` is pointed at a directory where no node is running, it
reports an error and exits.
"""
return self._bad_directory_test(
u"test_stop_bad_directory",
"tahoe stop",
lambda tahoe, p: tahoe.stop(p),
"does not look like a running node directory",
)
@inline_callbacks
def _bad_directory_test(self, workdir, description, operation, expected_message):
"""
Verify that a certain ``tahoe`` CLI operation produces a certain expected
message and then exits.
:param unicode workdir: A distinct path name for this test to operate
on.
:param unicode description: A description of the operation being
performed.
:param operation: A two-argument callable implementing the operation.
The first argument is a ``CLINodeAPI`` instance to use to perform
the operation. The second argument is an ``IProcessProtocol`` to
which the operations output must be delivered.
:param unicode expected_message: Some text that is expected in the
stdout or stderr of the operation in the successful case.
:return: A ``Deferred`` that fires when the assertions have been made.
"""
basedir = self.workdir(workdir)
fileutil.make_dirs(basedir)
d = self.run_bintahoe(["--quiet", "start", "--basedir", basedir])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 1)
self.failUnless("is not a recognizable node directory" in err, err)
d.addCallback(_cb)
tahoe = CLINodeAPI(reactor, FilePath(basedir))
# If tahoe ends up thinking it should keep running, make sure it stops
# promptly when the test is done.
self.addCleanup(tahoe.cleanup)
def _then_stop_it(res):
return self.run_bintahoe(["--quiet", "stop", "--basedir", basedir])
d.addCallback(_then_stop_it)
p = Expect()
operation(tahoe, on_stdout_and_stderr(p))
def _cb2(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 2)
self.failUnless("does not look like a running node directory" in err)
d.addCallback(_cb2)
client_running = p.expect(b"client running")
def _then_start_in_bogus_basedir(res):
not_a_dir = os.path.join(basedir, "bogus")
return self.run_bintahoe(["--quiet", "start", "--basedir", not_a_dir])
d.addCallback(_then_start_in_bogus_basedir)
result, index = yield DeferredList([
p.expect(expected_message),
client_running,
], fireOnOneCallback=True, consumeErrors=True,
)
def _cb3(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 1)
self.failUnlessIn("does not look like a directory at all", err)
d.addCallback(_cb3)
return d
self.assertEqual(
index,
0,
"Expected error message from '{}', got something else: {}".format(
description,
p.get_buffered_output(),
),
)
if not platform.isWindows():
# It should not be running.
self.assertFalse(tahoe.twistd_pid_file.exists())
# Wait for the operation to *complete*. If we got this far it's
# because we got the expected message so we can expect the "tahoe ..."
# child process to exit very soon. This other Deferred will fail when
# it eventually does but DeferredList above will consume the error.
# What's left is a perfect indicator that the process has exited and
# we won't get blamed for leaving the reactor dirty.
yield client_running

View File

@ -35,8 +35,6 @@ from allmydata.test.no_network import GridTestMixin
from allmydata.test.common import ShouldFailMixin
from allmydata.test.common_util import ReallyEqualMixin
timeout = 240
class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCase):
"""This is a no-network unit test of the SFTPUserHandler and the abstractions it uses."""

View File

@ -421,12 +421,6 @@ def _render_section_values(values):
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
# SystemTestMixin tests tend to be a lot of work, and we have a few
# buildslaves that are pretty slow, and every once in a while these tests
# run up against the default 120 second timeout. So increase the default
# timeout. Individual test cases can override this, of course.
timeout = 300
def setUp(self):
self.port_assigner = SameProcessStreamEndpointAssigner()
self.port_assigner.setUp()
@ -752,7 +746,6 @@ class CountingDataUploadable(upload.Data):
return upload.Data.read(self, length)
class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
timeout = 3600 # It takes longer than 960 seconds on Zandr's ARM box.
def test_connections(self):
self.basedir = "system/SystemTest/test_connections"

View File

@ -27,11 +27,6 @@ MiB = 1024*1024
def extract_uri(results):
return results.get_uri()
# Some of these took longer than 480 seconds on Zandr's arm box, but this may
# have been due to an earlier test ERROR'ing out due to timeout, which seems
# to screw up subsequent tests.
timeout = 960
class Uploadable(unittest.TestCase):
def shouldEqual(self, data, expected):
self.failUnless(isinstance(data, list))

View File

@ -1060,7 +1060,6 @@ class Abbreviate(unittest.TestCase):
self.failUnlessIn("fhtagn", str(e))
class Limiter(unittest.TestCase):
timeout = 480 # This takes longer than 240 seconds on Francois's arm box.
def job(self, i, foo):
self.calls.append( (i, foo) )

View File

@ -57,8 +57,6 @@ from ..status import FakeStatus
# create a fake uploader/downloader, and a couple of fake dirnodes, then
# create a webserver that works against them
timeout = 480 # Most of these take longer than 240 seconds on Francois's arm box.
class FakeStatsProvider:
def get_stats(self):
stats = {'stats': {}, 'counters': {}}

View File

@ -0,0 +1,63 @@
"""
Helpers for managing garbage collection.
:ivar fileDescriptorResource: A garbage-collection-informing resource tracker
for file descriptors. This is used to trigger a garbage collection when
it may be possible to reclaim a significant number of file descriptors as
a result. Register allocation and release of *bare* file descriptors with
this object (file objects, socket objects, etc, have their own integration
with the garbage collector and don't need to bother with this).
"""
__all__ = [
"fileDescriptorResource",
]
import gc
import attr
@attr.s
class _ResourceTracker(object):
"""
Keep track of some kind of resource and trigger a full garbage collection
when allocations outnumber releases by some amount.
:ivar int _counter: The number of allocations that have happened in excess
of releases since the last full collection triggered by this tracker.
:ivar int _threshold: The number of excess allocations at which point a
full collection will be triggered.
"""
_counter = attr.ib(default=0)
_threshold = attr.ib(default=25)
def allocate(self):
"""
Register the allocation of an instance of this resource.
"""
self._counter += 1
if self._counter > self._threshold:
gc.collect()
# Garbage collection of this resource has done what it can do. If
# nothing was collected, it doesn't make any sense to trigger
# another full collection the very next time the resource is
# allocated. Start the counter over again. The next collection
# happens when we again exceed the threshold.
self._counter = 0
def release(self):
"""
Register the release of an instance of this resource.
"""
if self._counter > 0:
# If there were any excess allocations at this point, account for
# there now being one fewer. It is not helpful to allow the
# counter to go below zero (as naturally would if a collection is
# triggered and then subsequently resources are released). In
# that case, we would be operating as if we had set a higher
# threshold and that is not desired.
self._counter -= 1
fileDescriptorResource = _ResourceTracker()

View File

@ -2,6 +2,10 @@
import os, re, socket, subprocess, errno
from sys import platform
from zope.interface import implementer
import attr
# from Twisted
from twisted.python.reflect import requireModule
from twisted.internet import defer, threads, reactor
@ -10,7 +14,14 @@ from twisted.internet.error import CannotListenError
from twisted.python.procutils import which
from twisted.python import log
from twisted.internet.endpoints import AdoptedStreamServerEndpoint
from twisted.internet.interfaces import IReactorSocket
from twisted.internet.interfaces import (
IReactorSocket,
IStreamServerEndpoint,
)
from .gcutil import (
fileDescriptorResource,
)
fcntl = requireModule("fcntl")
@ -268,14 +279,23 @@ def _foolscapEndpointForPortNumber(portnum):
s.bind(('', 0))
portnum = s.getsockname()[1]
s.listen(1)
# File descriptors are a relatively scarce resource. The
# cleanup process for the file descriptor we're about to dup
# is unfortunately complicated. In particular, it involves
# the Python garbage collector. See CleanupEndpoint for
# details of that. Here, we need to make sure the garbage
# collector actually runs frequently enough to make a
# difference. Normally, the garbage collector is triggered by
# allocations. It doesn't know about *file descriptor*
# allocation though. So ... we'll "teach" it about those,
# here.
fileDescriptorResource.allocate()
fd = os.dup(s.fileno())
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags = flags | os.O_NONBLOCK | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return (
portnum,
AdoptedStreamServerEndpoint(reactor, fd, socket.AF_INET),
)
endpoint = AdoptedStreamServerEndpoint(reactor, fd, socket.AF_INET)
return (portnum, CleanupEndpoint(endpoint, fd))
finally:
s.close()
else:
@ -287,6 +307,39 @@ def _foolscapEndpointForPortNumber(portnum):
return (portnum, "tcp:%d" % (portnum,))
@implementer(IStreamServerEndpoint)
@attr.s
class CleanupEndpoint(object):
"""
An ``IStreamServerEndpoint`` wrapper which closes a file descriptor if the
wrapped endpoint is never used.
:ivar IStreamServerEndpoint _wrapped: The wrapped endpoint. The
``listen`` implementation is delegated to this object.
:ivar int _fd: The file descriptor to close if ``listen`` is never called
by the time this object is garbage collected.
:ivar bool _listened: A flag recording whether or not ``listen`` has been
called.
"""
_wrapped = attr.ib()
_fd = attr.ib()
_listened = attr.ib(default=False)
def listen(self, protocolFactory):
self._listened = True
return self._wrapped.listen(protocolFactory)
def __del__(self):
"""
If ``listen`` was never called then close the file descriptor.
"""
if not self._listened:
os.close(self._fd)
fileDescriptorResource.release()
def listenOnUnused(tub, portnum=None):
"""
Start listening on an unused TCP port number with the given tub.

24
tox.ini
View File

@ -19,8 +19,24 @@ passenv = TAHOE_LAFS_* PIP_* SUBUNITREPORTER_* USERPROFILE HOMEDRIVE HOMEPATH
# available to those systems. Installing it ahead of time (with pip) avoids
# this problem.
deps =
# Pin all of these versions for the same reason you ever want to pin
# anything: to prevent new releases with regressions from introducing
# spurious failures into CI runs for whatever development work is
# happening at the time. The versions selected here are just the current
# versions at the time. Bumping them to keep up with future releases is
# fine as long as those releases are known to actually work.
pip==19.1.1
setuptools==41.0.1
wheel==0.33.4
subunitreporter==19.3.2
# As an exception, we don't pin certifi because it contains CA
# certificates which necessarily change over time. Pinning this is
# guaranteed to cause things to break eventually as old certificates
# expire and as new ones are used in the wild that aren't present in
# whatever version we pin. Hopefully there won't be functionality
# regressions in new releases of this package that cause us the kind of
# suffering we're trying to avoid with the above pins.
certifi
subunitreporter
# We add usedevelop=False because testing against a true installation gives
# more useful results.
@ -40,6 +56,12 @@ commands =
[testenv:coverage]
# coverage (with --branch) takes about 65% longer to run
commands =
# As an aid to debugging, dump all of the Python packages and their
# versions that are installed in the test environment. This is
# particularly useful to get from CI runs - though hopefully the
# version pinning we do limits the variability of this output
# somewhat.
pip freeze
tahoe --version
coverage run --branch -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors --reporter=timing} {posargs:allmydata}
coverage xml