Merge remote-tracking branch 'origin/master' into 3029.install-test-plugins

This commit is contained in:
Jean-Paul Calderone 2019-05-14 06:26:19 -04:00
commit b4459c2dc1
33 changed files with 629 additions and 388 deletions

View File

@ -14,7 +14,7 @@ environment:
install: install:
- | - |
%PYTHON%\python.exe -m pip install -U pip %PYTHON%\python.exe -m pip install -U pip
%PYTHON%\python.exe -m pip install wheel tox virtualenv %PYTHON%\python.exe -m pip install wheel tox==3.9.0 virtualenv
# note: # note:
# %PYTHON% has: python.exe # %PYTHON% has: python.exe

0
newsfragments/3025.minor Normal file
View File

0
newsfragments/3036.minor Normal file
View File

0
newsfragments/3038.minor Normal file
View File

View File

@ -32,12 +32,12 @@ def stop(config):
print("%s does not look like a running node directory (no twistd.pid)" % quoted_basedir, file=err) print("%s does not look like a running node directory (no twistd.pid)" % quoted_basedir, file=err)
# we define rc=2 to mean "nothing is running, but it wasn't me who # we define rc=2 to mean "nothing is running, but it wasn't me who
# stopped it" # stopped it"
return 2 return COULD_NOT_STOP
elif pid == -1: elif pid == -1:
print("%s contains an invalid PID file" % basedir, file=err) print("%s contains an invalid PID file" % basedir, file=err)
# we define rc=2 to mean "nothing is running, but it wasn't me who # we define rc=2 to mean "nothing is running, but it wasn't me who
# stopped it" # stopped it"
return 2 return COULD_NOT_STOP
# kill it hard (SIGKILL), delete the twistd.pid file, then wait for the # kill it hard (SIGKILL), delete the twistd.pid file, then wait for the
# process itself to go away. If it hasn't gone away after 20 seconds, warn # process itself to go away. If it hasn't gone away after 20 seconds, warn

View File

@ -19,7 +19,6 @@ from .common import (
parse_options, parse_options,
) )
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
def _unsupported(what): def _unsupported(what):
return "{} are not supported by Python on this platform.".format(what) return "{} are not supported by Python on this platform.".format(what)
@ -307,13 +306,6 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
return d return d
# on our old dapper buildslave, this test takes a long time (usually
# 130s), so we have to bump up the default 120s timeout. The create-alias
# and initial backup alone take 60s, probably because of the handful of
# dirnodes being created (RSA key generation). The backup between check4
# and check4a takes 6s, as does the backup before check4b.
test_backup.timeout = 3000
def _check_filtering(self, filtered, all, included, excluded): def _check_filtering(self, filtered, all, included, excluded):
filtered = set(filtered) filtered = set(filtered)
all = set(all) all = set(all)

View File

@ -12,8 +12,6 @@ from allmydata.scripts import debug
from ..no_network import GridTestMixin from ..no_network import GridTestMixin
from .common import CLITestMixin from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Check(GridTestMixin, CLITestMixin, unittest.TestCase): class Check(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_check(self): def test_check(self):

View File

@ -43,8 +43,6 @@ from twisted.python import usage
from allmydata.util.encodingutil import listdir_unicode, get_io_encoding from allmydata.util.encodingutil import listdir_unicode, get_io_encoding
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class CLI(CLITestMixin, unittest.TestCase): class CLI(CLITestMixin, unittest.TestCase):
def _dump_cap(self, *args): def _dump_cap(self, *args):
config = debug.DumpCapOptions() config = debug.DumpCapOptions()

View File

@ -14,8 +14,6 @@ from ..no_network import GridTestMixin
from .common import CLITestMixin from .common import CLITestMixin
from ..common_util import skip_if_cannot_represent_filename from ..common_util import skip_if_cannot_represent_filename
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Cp(GridTestMixin, CLITestMixin, unittest.TestCase): class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_not_enough_args(self): def test_not_enough_args(self):

View File

@ -9,8 +9,6 @@ from ..no_network import GridTestMixin
from allmydata.util.encodingutil import quote_output, get_io_encoding from allmydata.util.encodingutil import quote_output, get_io_encoding
from .common import CLITestMixin from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase): class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase):
def _test_webopen(self, args, expected_url): def _test_webopen(self, args, expected_url):

View File

@ -8,8 +8,6 @@ from ..no_network import GridTestMixin
from allmydata.util.encodingutil import quote_output, get_io_encoding from allmydata.util.encodingutil import quote_output, get_io_encoding
from .common import CLITestMixin from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class List(GridTestMixin, CLITestMixin, unittest.TestCase): class List(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_list(self): def test_list(self):
self.basedir = "cli/List/list" self.basedir = "cli/List/list"

View File

@ -5,8 +5,6 @@ from ..no_network import GridTestMixin
from allmydata.scripts import tahoe_mv from allmydata.scripts import tahoe_mv
from .common import CLITestMixin from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Mv(GridTestMixin, CLITestMixin, unittest.TestCase): class Mv(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_mv_behavior(self): def test_mv_behavior(self):
self.basedir = "cli/Mv/mv_behavior" self.basedir = "cli/Mv/mv_behavior"

View File

@ -11,8 +11,6 @@ from allmydata.util.encodingutil import get_io_encoding, unicode_to_argv
from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.util.fileutil import abspath_expanduser_unicode
from .common import CLITestMixin from .common import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Put(GridTestMixin, CLITestMixin, unittest.TestCase): class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_unlinked_immutable_stdin(self): def test_unlinked_immutable_stdin(self):
@ -471,4 +469,3 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
self.failUnlessReallyEqual(rc_out_err[1], DATA)) self.failUnlessReallyEqual(rc_out_err[1], DATA))
return d return d

View File

@ -0,0 +1,235 @@
__all__ = [
"CLINodeAPI",
"Expect",
"on_stdout",
"on_stdout_and_stderr",
"on_different",
"wait_for_exit",
]
import os
import sys
from errno import ENOENT
import attr
from twisted.internet.error import (
ProcessDone,
ProcessTerminated,
ProcessExitedAlready,
)
from twisted.internet.interfaces import (
IProcessProtocol,
)
from twisted.python.filepath import (
FilePath,
)
from twisted.python.runtime import (
platform,
)
from twisted.internet.protocol import (
Protocol,
ProcessProtocol,
)
from twisted.internet.defer import (
Deferred,
succeed,
)
from twisted.internet.task import (
deferLater,
)
from ..client import (
_Client,
)
from ..scripts.tahoe_stop import (
COULD_NOT_STOP,
)
from ..util.eliotutil import (
inline_callbacks,
)
class Expect(Protocol):
def __init__(self):
self._expectations = []
def get_buffered_output(self):
return self._buffer
def expect(self, expectation):
if expectation in self._buffer:
return succeed(None)
d = Deferred()
self._expectations.append((expectation, d))
return d
def connectionMade(self):
self._buffer = b""
def dataReceived(self, data):
self._buffer += data
for i in range(len(self._expectations) - 1, -1, -1):
expectation, d = self._expectations[i]
if expectation in self._buffer:
del self._expectations[i]
d.callback(None)
def connectionLost(self, reason):
for ignored, d in self._expectations:
d.errback(reason)
class _ProcessProtocolAdapter(ProcessProtocol):
def __init__(self, fds):
self._fds = fds
def connectionMade(self):
for proto in self._fds.values():
proto.makeConnection(self.transport)
def childDataReceived(self, childFD, data):
try:
proto = self._fds[childFD]
except KeyError:
pass
else:
proto.dataReceived(data)
def processEnded(self, reason):
notified = set()
for proto in self._fds.values():
if proto not in notified:
proto.connectionLost(reason)
notified.add(proto)
def on_stdout(protocol):
return _ProcessProtocolAdapter({1: protocol})
def on_stdout_and_stderr(protocol):
return _ProcessProtocolAdapter({1: protocol, 2: protocol})
def on_different(fd_mapping):
return _ProcessProtocolAdapter(fd_mapping)
@attr.s
class CLINodeAPI(object):
reactor = attr.ib()
basedir = attr.ib(type=FilePath)
process = attr.ib(default=None)
@property
def twistd_pid_file(self):
return self.basedir.child(u"twistd.pid")
@property
def node_url_file(self):
return self.basedir.child(u"node.url")
@property
def storage_furl_file(self):
return self.basedir.child(u"private").child(u"storage.furl")
@property
def introducer_furl_file(self):
return self.basedir.child(u"private").child(u"introducer.furl")
@property
def config_file(self):
return self.basedir.child(u"tahoe.cfg")
@property
def exit_trigger_file(self):
return self.basedir.child(_Client.EXIT_TRIGGER_FILE)
def _execute(self, process_protocol, argv):
exe = sys.executable
argv = [
exe,
u"-m",
u"allmydata.scripts.runner",
] + argv
return self.reactor.spawnProcess(
processProtocol=process_protocol,
executable=exe,
args=argv,
env=os.environ,
)
def run(self, protocol, extra_tahoe_args=()):
"""
Start the node running.
:param IProcessProtocol protocol: This protocol will be hooked up to
the node process and can handle output or generate input.
"""
if not IProcessProtocol.providedBy(protocol):
raise TypeError("run requires process protocol, got {}".format(protocol))
self.process = self._execute(
protocol,
list(extra_tahoe_args) + [u"run", self.basedir.asTextMode().path],
)
# Don't let the process run away forever.
try:
self.active()
except OSError as e:
if ENOENT != e.errno:
raise
def stop(self, protocol):
self._execute(
protocol,
[u"stop", self.basedir.asTextMode().path],
)
@inline_callbacks
def stop_and_wait(self):
if platform.isWindows():
# On Windows there is no PID file and no "tahoe stop".
if self.process is not None:
while True:
try:
self.process.signalProcess("TERM")
except ProcessExitedAlready:
break
else:
yield deferLater(self.reactor, 0.1, lambda: None)
else:
protocol, ended = wait_for_exit()
self.stop(protocol)
yield ended
def active(self):
# By writing this file, we get two minutes before the client will
# exit. This ensures that even if the 'stop' command doesn't work (and
# the test fails), the client should still terminate.
self.exit_trigger_file.touch()
def _check_cleanup_reason(self, reason):
# Let it fail because the process has already exited.
reason.trap(ProcessTerminated)
if reason.value.exitCode != COULD_NOT_STOP:
return reason
return None
def cleanup(self):
stopping = self.stop_and_wait()
stopping.addErrback(self._check_cleanup_reason)
return stopping
class _WaitForEnd(ProcessProtocol):
def __init__(self, ended):
self._ended = ended
def processEnded(self, reason):
if reason.check(ProcessDone):
self._ended.callback(None)
else:
self._ended.errback(reason)
def wait_for_exit():
ended = Deferred()
protocol = _WaitForEnd(ended)
return protocol, ended

View File

@ -16,8 +16,6 @@ from .. import common_util as testutil
SEGSIZE = 128*1024 SEGSIZE = 128*1024
class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
timeout = 400 # these tests are too big, 120s is not enough on slow
# platforms
def setUp(self): def setUp(self):
GridTestMixin.setUp(self) GridTestMixin.setUp(self)
self.basedir = self.mktemp() self.basedir = self.mktemp()

View File

@ -544,5 +544,3 @@ class TooParallel(GridTestMixin, unittest.TestCase):
return res return res
d.addBoth(_clean_up) d.addBoth(_clean_up)
return d return d
test_immutable.timeout = 80

View File

@ -20,8 +20,6 @@ from .common_web import do_http
from allmydata.test.no_network import GridTestMixin from allmydata.test.no_network import GridTestMixin
from .cli.common import CLITestMixin from .cli.common import CLITestMixin
timeout = 2400 # One of these took 1046.091s on Zandr's ARM box.
class MutableChecker(GridTestMixin, unittest.TestCase, ErrorMixin): class MutableChecker(GridTestMixin, unittest.TestCase, ErrorMixin):
def test_good(self): def test_good(self):
self.basedir = "deepcheck/MutableChecker/good" self.basedir = "deepcheck/MutableChecker/good"

View File

@ -63,7 +63,6 @@ one_nfd = u"one\u0304"
class Dirnode(GridTestMixin, unittest.TestCase, class Dirnode(GridTestMixin, unittest.TestCase,
testutil.ReallyEqualMixin, testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin): testutil.ReallyEqualMixin, testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
timeout = 480 # It occasionally takes longer than 240 seconds on Francois's arm box.
def _do_create_test(self, mdmf=False): def _do_create_test(self, mdmf=False):
c = self.g.clients[0] c = self.g.clients[0]
@ -1758,7 +1757,6 @@ class Dirnode2(testutil.ReallyEqualMixin, testutil.ShouldFailMixin, unittest.Tes
class DeepStats(testutil.ReallyEqualMixin, unittest.TestCase): class DeepStats(testutil.ReallyEqualMixin, unittest.TestCase):
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
def test_stats(self): def test_stats(self):
ds = dirnode.DeepStats(None) ds = dirnode.DeepStats(None)
ds.add("count-files") ds.add("count-files")
@ -1823,7 +1821,6 @@ class UCWEingNodeMaker(NodeMaker):
class Deleter(GridTestMixin, testutil.ReallyEqualMixin, unittest.TestCase): class Deleter(GridTestMixin, testutil.ReallyEqualMixin, unittest.TestCase):
timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
def test_retry(self): def test_retry(self):
# ticket #550, a dirnode.delete which experiences an # ticket #550, a dirnode.delete which experiences an
# UncoordinatedWriteError will fail with an incorrect "you're # UncoordinatedWriteError will fail with an incorrect "you're

View File

@ -194,7 +194,6 @@ class _Base(GridTestMixin, ShouldFailMixin):
return d return d
class DownloadTest(_Base, unittest.TestCase): class DownloadTest(_Base, unittest.TestCase):
timeout = 2400 # It takes longer than 240 seconds on Zandr's ARM box.
def test_download(self): def test_download(self):
self.basedir = self.mktemp() self.basedir = self.mktemp()
self.set_up_grid() self.set_up_grid()

View File

@ -163,7 +163,6 @@ def make_data(length):
return data[:length] return data[:length]
class ValidatedExtendedURIProxy(unittest.TestCase): class ValidatedExtendedURIProxy(unittest.TestCase):
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
K = 4 K = 4
M = 10 M = 10
SIZE = 200 SIZE = 200
@ -260,8 +259,6 @@ class ValidatedExtendedURIProxy(unittest.TestCase):
return defer.DeferredList(dl) return defer.DeferredList(dl)
class Encode(unittest.TestCase): class Encode(unittest.TestCase):
timeout = 2400 # It takes longer than 240 seconds on Zandr's ARM box.
def do_encode(self, max_segment_size, datalen, NUM_SHARES, NUM_SEGMENTS, def do_encode(self, max_segment_size, datalen, NUM_SHARES, NUM_SEGMENTS,
expected_block_hashes, expected_share_hashes): expected_block_hashes, expected_share_hashes):
data = make_data(datalen) data = make_data(datalen)

View File

@ -114,7 +114,6 @@ def upload_data(uploader, data, convergence):
return uploader.upload(u) return uploader.upload(u)
class AssistedUpload(unittest.TestCase): class AssistedUpload(unittest.TestCase):
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
def setUp(self): def setUp(self):
self.tub = t = Tub() self.tub = t = Tub()
t.setOption("expose-remote-exception-types", False) t.setOption("expose-remote-exception-types", False)

View File

@ -19,15 +19,6 @@ mutable_plaintext = "muta" * 10000
class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin, class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin,
unittest.TestCase): unittest.TestCase):
# Many of these tests take around 60 seconds on François's ARM buildslave:
# http://tahoe-lafs.org/buildbot/builders/FranXois%20lenny-armv5tel
# allmydata.test.test_hung_server.HungServerDownloadTest.test_2_good_8_broken_duplicate_share_fail
# once ERRORed after 197 seconds on Midnight Magic's NetBSD buildslave:
# http://tahoe-lafs.org/buildbot/builders/MM%20netbsd4%20i386%20warp
# MM's buildslave varies a lot in how long it takes to run tests.
timeout = 240
def _break(self, servers): def _break(self, servers):
for (id, ss) in servers: for (id, ss) in servers:
self.g.break_server(id) self.g.break_server(id)

View File

@ -715,8 +715,6 @@ class SystemTest(SystemTestMixin, AsyncTestCase):
self.basedir = "introducer/SystemTest/system_v2_server" self.basedir = "introducer/SystemTest/system_v2_server"
os.makedirs(self.basedir) os.makedirs(self.basedir)
return self.do_system_test() return self.do_system_test()
test_system_v2_server.timeout = 480
# occasionally takes longer than 350s on "draco"
class FakeRemoteReference: class FakeRemoteReference:
def notifyOnDisconnect(self, *args, **kwargs): pass def notifyOnDisconnect(self, *args, **kwargs): pass

View File

@ -2418,7 +2418,6 @@ class RealTest(SingleMagicFolderTestMixin, AsyncTestCase):
class RealTestAliceBob(MagicFolderAliceBobTestMixin, AsyncTestCase): class RealTestAliceBob(MagicFolderAliceBobTestMixin, AsyncTestCase):
"""This is skipped unless both Twisted and the platform support inotify.""" """This is skipped unless both Twisted and the platform support inotify."""
inject_inotify = False inject_inotify = False
timeout = 15
def setUp(self): def setUp(self):
d = super(RealTestAliceBob, self).setUp() d = super(RealTestAliceBob, self).setUp()

View File

@ -4,26 +4,40 @@ from __future__ import (
) )
import os.path, re, sys import os.path, re, sys
from os import linesep
from twisted.trial import unittest from twisted.trial import unittest
from twisted.python import usage, runtime from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue from twisted.python import usage
from twisted.internet.defer import (
inlineCallbacks,
returnValue,
DeferredList,
)
from twisted.python.filepath import FilePath
from twisted.python.runtime import (
platform,
)
from allmydata.util import fileutil, pollmixin from allmydata.util import fileutil, pollmixin
from allmydata.util.encodingutil import unicode_to_argv, unicode_to_output, \ from allmydata.util.encodingutil import unicode_to_argv, unicode_to_output, \
get_filesystem_encoding get_filesystem_encoding
from allmydata.client import _Client
from allmydata.test import common_util from allmydata.test import common_util
import allmydata import allmydata
from allmydata import __appname__ from allmydata import __appname__
from .common_util import parse_cli, run_cli from .common_util import parse_cli, run_cli
from .cli_node_api import (
CLINodeAPI,
Expect,
on_stdout,
on_stdout_and_stderr,
)
from ._twisted_9607 import ( from ._twisted_9607 import (
getProcessOutputAndValue, getProcessOutputAndValue,
) )
from ..util.eliotutil import (
timeout = 240 inline_callbacks,
)
def get_root_from_file(src): def get_root_from_file(src):
srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(src)))) srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(src))))
@ -41,13 +55,7 @@ def get_root_from_file(src):
srcfile = allmydata.__file__ srcfile = allmydata.__file__
rootdir = get_root_from_file(srcfile) rootdir = get_root_from_file(srcfile)
class RunBinTahoeMixin: class RunBinTahoeMixin:
def skip_if_cannot_daemonize(self):
if runtime.platformType == "win32":
# twistd on windows doesn't daemonize. cygwin should work normally.
raise unittest.SkipTest("twistd does not fork under windows")
@inlineCallbacks @inlineCallbacks
def find_import_location(self): def find_import_location(self):
res = yield self.run_bintahoe(["--version-and-path"]) res = yield self.run_bintahoe(["--version-and-path"])
@ -364,357 +372,283 @@ class CreateNode(unittest.TestCase):
# can't provide all three # can't provide all three
_test("create-stats-gatherer --hostname=foo --location=foo --port=foo D") _test("create-stats-gatherer --hostname=foo --location=foo --port=foo D")
class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin, class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin,
RunBinTahoeMixin): RunBinTahoeMixin):
# exercise "tahoe start", for both introducer, client node, and """
# key-generator, by spawning "tahoe start" as a subprocess. This doesn't exercise "tahoe run" for both introducer, client node, and key-generator,
# get us figleaf-based line-level coverage, but it does a better job of by spawning "tahoe run" (or "tahoe start") as a subprocess. This doesn't
# confirming that the user can actually run "./bin/tahoe start" and get us line-level coverage, but it does a better job of confirming that
# expect it to work. This verifies that bin/tahoe sets up PYTHONPATH and the user can actually run "./bin/tahoe run" and expect it to work. This
# the like correctly. verifies that bin/tahoe sets up PYTHONPATH and the like correctly.
# This doesn't work on cygwin (it hangs forever), so we skip this test This doesn't work on cygwin (it hangs forever), so we skip this test
# when we're on cygwin. It is likely that "tahoe start" itself doesn't when we're on cygwin. It is likely that "tahoe start" itself doesn't
# work on cygwin: twisted seems unable to provide a version of work on cygwin: twisted seems unable to provide a version of
# spawnProcess which really works there. spawnProcess which really works there.
"""
def workdir(self, name): def workdir(self, name):
basedir = os.path.join("test_runner", "RunNode", name) basedir = os.path.join("test_runner", "RunNode", name)
fileutil.make_dirs(basedir) fileutil.make_dirs(basedir)
return basedir return basedir
@inline_callbacks
def test_introducer(self): def test_introducer(self):
self.skip_if_cannot_daemonize() """
The introducer furl is stable across restarts.
"""
basedir = self.workdir("test_introducer") basedir = self.workdir("test_introducer")
c1 = os.path.join(basedir, "c1") c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE) tahoe = CLINodeAPI(reactor, FilePath(c1))
twistd_pid_file = os.path.join(c1, "twistd.pid") self.addCleanup(tahoe.stop_and_wait)
introducer_furl_file = os.path.join(c1, "private", "introducer.furl")
node_url_file = os.path.join(c1, "node.url")
config_file = os.path.join(c1, "tahoe.cfg")
d = self.run_bintahoe(["--quiet", "create-introducer", "--basedir", c1, "--hostname", "localhost"]) out, err, rc_or_sig = yield self.run_bintahoe([
def _cb(res): "--quiet",
out, err, rc_or_sig = res "create-introducer",
self.failUnlessEqual(rc_or_sig, 0) "--basedir", c1,
"--hostname", "127.0.0.1",
])
# This makes sure that node.url is written, which allows us to self.assertEqual(rc_or_sig, 0)
# detect when the introducer restarts in _node_has_restarted below.
config = fileutil.read(config_file)
self.failUnlessIn('\nweb.port = \n', config)
fileutil.write(config_file, config.replace('\nweb.port = \n', '\nweb.port = 0\n'))
# by writing this file, we get ten seconds before the node will # This makes sure that node.url is written, which allows us to
# exit. This insures that even if the test fails (and the 'stop' # detect when the introducer restarts in _node_has_restarted below.
# command doesn't work), the client should still terminate. config = fileutil.read(tahoe.config_file.path)
fileutil.write(exit_trigger_file, "") self.assertIn('{}web.port = {}'.format(linesep, linesep), config)
# now it's safe to start the node fileutil.write(
d.addCallback(_cb) tahoe.config_file.path,
config.replace(
'{}web.port = {}'.format(linesep, linesep),
'{}web.port = 0{}'.format(linesep, linesep),
)
)
def _then_start_the_node(res): p = Expect()
return self.run_bintahoe(["--quiet", "start", c1]) tahoe.run(on_stdout(p))
d.addCallback(_then_start_the_node) yield p.expect("introducer running")
tahoe.active()
def _cb2(res): yield self.poll(tahoe.introducer_furl_file.exists)
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "") # read the introducer.furl file so we can check that the contents
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err) # don't change on restart
self.failUnlessEqual(rc_or_sig, 0, errstr) furl = fileutil.read(tahoe.introducer_furl_file.path)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent (twistd) has exited. However, twistd writes the pid tahoe.active()
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# the node is running, but it might not have made it past the # We don't keep track of PIDs in files on Windows.
# first reactor turn yet, and if we kill it too early, it won't if not platform.isWindows():
# remove the twistd.pid file. So wait until it does something self.assertTrue(tahoe.twistd_pid_file.exists())
# that we know it won't do until after the first turn. self.assertTrue(tahoe.node_url_file.exists())
d.addCallback(_cb2)
def _node_has_started(): # rm this so we can detect when the second incarnation is ready
return os.path.exists(introducer_furl_file) tahoe.node_url_file.remove()
d.addCallback(lambda res: self.poll(_node_has_started))
def _started(res): yield tahoe.stop_and_wait()
# read the introducer.furl file so we can check that the contents
# don't change on restart
self.furl = fileutil.read(introducer_furl_file)
fileutil.write(exit_trigger_file, "") p = Expect()
self.failUnless(os.path.exists(twistd_pid_file)) tahoe.run(on_stdout(p))
self.failUnless(os.path.exists(node_url_file)) yield p.expect("introducer running")
# rm this so we can detect when the second incarnation is ready # Again, the second incarnation of the node might not be ready yet, so
os.unlink(node_url_file) # poll until it is. This time introducer_furl_file already exists, so
return self.run_bintahoe(["--quiet", "restart", c1]) # we check for the existence of node_url_file instead.
d.addCallback(_started) yield self.poll(tahoe.node_url_file.exists)
def _then(res): # The point of this test! After starting the second time the
out, err, rc_or_sig = res # introducer furl file must exist and contain the same contents as it
fileutil.write(exit_trigger_file, "") # did before.
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err) self.assertTrue(tahoe.introducer_furl_file.exists())
self.failUnlessEqual(rc_or_sig, 0, errstr) self.assertEqual(furl, fileutil.read(tahoe.introducer_furl_file.path))
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_then)
# Again, the second incarnation of the node might not be ready yet,
# so poll until it is. This time introducer_furl_file already
# exists, so we check for the existence of node_url_file instead.
def _node_has_restarted():
return os.path.exists(node_url_file)
d.addCallback(lambda res: self.poll(_node_has_restarted))
def _check_same_furl(res):
self.failUnless(os.path.exists(introducer_furl_file))
self.failUnlessEqual(self.furl, fileutil.read(introducer_furl_file))
d.addCallback(_check_same_furl)
# Now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _after_stopping(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(twistd_pid_file))
d.addCallback(_after_stopping)
d.addBoth(self._remove, exit_trigger_file)
return d
# This test has hit a 240-second timeout on our feisty2.5 buildslave, and a 480-second timeout
# on Francois's Lenny-armv5tel buildslave.
test_introducer.timeout = 960
def test_client_no_noise(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_client_no_noise")
c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE)
twistd_pid_file = os.path.join(c1, "twistd.pid")
node_url_file = os.path.join(c1, "node.url")
d = self.run_bintahoe(["--quiet", "create-client", "--basedir", c1, "--webport", "0"])
def _cb(res):
out, err, rc_or_sig = res
errstr = "cc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
assert rc_or_sig == 0, errstr
self.failUnlessEqual(rc_or_sig, 0)
# By writing this file, we get two minutes before the client will exit. This ensures
# that even if the 'stop' command doesn't work (and the test fails), the client should
# still terminate.
fileutil.write(exit_trigger_file, "")
# now it's safe to start the node
d.addCallback(_cb)
def _start(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_start)
def _cb2(res):
out, err, rc_or_sig = res
errstr = "cc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
fileutil.write(exit_trigger_file, "")
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr) # If you emit noise, you fail this test.
errlines = err.split("\n")
self.failIf([True for line in errlines if (line != "" and "UserWarning: Unbuilt egg for setuptools" not in line
and "from pkg_resources import load_entry_point" not in line)], errstr)
if err != "":
raise unittest.SkipTest("This test is known not to pass on Ubuntu Lucid; see #1235.")
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
def _node_has_started():
return os.path.exists(node_url_file)
d.addCallback(lambda res: self.poll(_node_has_started))
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
self.failUnless(os.path.exists(twistd_pid_file),
(twistd_pid_file, os.listdir(os.path.dirname(twistd_pid_file))))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
d.addBoth(self._remove, exit_trigger_file)
return d
@inline_callbacks
def test_client(self): def test_client(self):
self.skip_if_cannot_daemonize() """
Test many things.
0) Verify that "tahoe create-node" takes a --webport option and writes
the value to the configuration file.
1) Verify that "tahoe run" writes a pid file and a node url file (on POSIX).
2) Verify that the storage furl file has a stable value across a
"tahoe run" / "tahoe stop" / "tahoe run" sequence.
3) Verify that the pid file is removed after "tahoe stop" succeeds (on POSIX).
"""
basedir = self.workdir("test_client") basedir = self.workdir("test_client")
c1 = os.path.join(basedir, "c1") c1 = os.path.join(basedir, "c1")
exit_trigger_file = os.path.join(c1, _Client.EXIT_TRIGGER_FILE)
twistd_pid_file = os.path.join(c1, "twistd.pid")
node_url_file = os.path.join(c1, "node.url")
storage_furl_file = os.path.join(c1, "private", "storage.furl")
config_file = os.path.join(c1, "tahoe.cfg")
d = self.run_bintahoe(["--quiet", "create-node", "--basedir", c1, tahoe = CLINodeAPI(reactor, FilePath(c1))
"--webport", "0", # Set this up right now so we don't forget later.
"--hostname", "localhost"]) self.addCleanup(tahoe.cleanup)
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0)
# Check that the --webport option worked. out, err, rc_or_sig = yield self.run_bintahoe([
config = fileutil.read(config_file) "--quiet", "create-node", "--basedir", c1,
self.failUnlessIn('\nweb.port = 0\n', config) "--webport", "0",
"--hostname", "localhost",
])
self.failUnlessEqual(rc_or_sig, 0)
# By writing this file, we get two minutes before the client will # Check that the --webport option worked.
# exit. This ensures that even if the 'stop' command doesn't work config = fileutil.read(tahoe.config_file.path)
# (and the test fails), the client should still terminate. self.assertIn(
fileutil.write(exit_trigger_file, "") '{}web.port = 0{}'.format(linesep, linesep),
# now it's safe to start the node config,
d.addCallback(_cb) )
def _start(res): # After this it's safe to start the node
return self.run_bintahoe(["--quiet", "start", c1]) tahoe.active()
d.addCallback(_start)
def _cb2(res): p = Expect()
out, err, rc_or_sig = res # This will run until we stop it.
fileutil.write(exit_trigger_file, "") tahoe.run(on_stdout(p))
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err) # Wait for startup to have proceeded to a reasonable point.
self.failUnlessEqual(rc_or_sig, 0, errstr) yield p.expect("client running")
self.failUnlessEqual(out, "", errstr) tahoe.active()
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent (twistd) has exited. However, twistd writes the pid # read the storage.furl file so we can check that its contents don't
# from the child, not the parent, so we can't expect twistd.pid # change on restart
# to exist quite yet. storage_furl = fileutil.read(tahoe.storage_furl_file.path)
# the node is running, but it might not have made it past the # We don't keep track of PIDs in files on Windows.
# first reactor turn yet, and if we kill it too early, it won't if not platform.isWindows():
# remove the twistd.pid file. So wait until it does something self.assertTrue(tahoe.twistd_pid_file.exists())
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
def _node_has_started(): # rm this so we can detect when the second incarnation is ready
return os.path.exists(node_url_file) tahoe.node_url_file.remove()
d.addCallback(lambda res: self.poll(_node_has_started)) yield tahoe.stop_and_wait()
def _started(res): p = Expect()
# read the storage.furl file so we can check that its contents # We don't have to add another cleanup for this one, the one from
# don't change on restart # above is still registered.
self.storage_furl = fileutil.read(storage_furl_file) tahoe.run(on_stdout(p))
yield p.expect("client running")
tahoe.active()
fileutil.write(exit_trigger_file, "") self.assertEqual(
self.failUnless(os.path.exists(twistd_pid_file)) storage_furl,
fileutil.read(tahoe.storage_furl_file.path),
)
# rm this so we can detect when the second incarnation is ready if not platform.isWindows():
os.unlink(node_url_file) self.assertTrue(
return self.run_bintahoe(["--quiet", "restart", c1]) tahoe.twistd_pid_file.exists(),
d.addCallback(_started) "PID file ({}) didn't exist when we expected it to. "
"These exist: {}".format(
tahoe.twistd_pid_file,
tahoe.twistd_pid_file.parent().listdir(),
),
)
yield tahoe.stop_and_wait()
def _cb3(res): if not platform.isWindows():
out, err, rc_or_sig = res # twistd.pid should be gone by now.
self.assertFalse(tahoe.twistd_pid_file.exists())
fileutil.write(exit_trigger_file, "")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_cb3)
# again, the second incarnation of the node might not be ready yet,
# so poll until it is
d.addCallback(lambda res: self.poll(_node_has_started))
def _check_same_furl(res):
self.failUnlessEqual(self.storage_furl,
fileutil.read(storage_furl_file))
d.addCallback(_check_same_furl)
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
fileutil.write(exit_trigger_file, "")
self.failUnless(os.path.exists(twistd_pid_file),
(twistd_pid_file, os.listdir(os.path.dirname(twistd_pid_file))))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _cb4(res):
out, err, rc_or_sig = res
fileutil.write(exit_trigger_file, "")
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(twistd_pid_file))
d.addCallback(_cb4)
d.addBoth(self._remove, exit_trigger_file)
return d
def _remove(self, res, file): def _remove(self, res, file):
fileutil.remove(file) fileutil.remove(file)
return res return res
def test_baddir(self): def test_run_bad_directory(self):
self.skip_if_cannot_daemonize() """
basedir = self.workdir("test_baddir") If ``tahoe run`` is pointed at a non-node directory, it reports an error
and exits.
"""
return self._bad_directory_test(
u"test_run_bad_directory",
"tahoe run",
lambda tahoe, p: tahoe.run(p),
"is not a recognizable node directory",
)
def test_run_bogus_directory(self):
"""
If ``tahoe run`` is pointed at a non-directory, it reports an error and
exits.
"""
return self._bad_directory_test(
u"test_run_bogus_directory",
"tahoe run",
lambda tahoe, p: CLINodeAPI(
tahoe.reactor,
tahoe.basedir.sibling(u"bogus"),
).run(p),
"does not look like a directory at all"
)
def test_stop_bad_directory(self):
"""
If ``tahoe run`` is pointed at a directory where no node is running, it
reports an error and exits.
"""
return self._bad_directory_test(
u"test_stop_bad_directory",
"tahoe stop",
lambda tahoe, p: tahoe.stop(p),
"does not look like a running node directory",
)
@inline_callbacks
def _bad_directory_test(self, workdir, description, operation, expected_message):
"""
Verify that a certain ``tahoe`` CLI operation produces a certain expected
message and then exits.
:param unicode workdir: A distinct path name for this test to operate
on.
:param unicode description: A description of the operation being
performed.
:param operation: A two-argument callable implementing the operation.
The first argument is a ``CLINodeAPI`` instance to use to perform
the operation. The second argument is an ``IProcessProtocol`` to
which the operations output must be delivered.
:param unicode expected_message: Some text that is expected in the
stdout or stderr of the operation in the successful case.
:return: A ``Deferred`` that fires when the assertions have been made.
"""
basedir = self.workdir(workdir)
fileutil.make_dirs(basedir) fileutil.make_dirs(basedir)
d = self.run_bintahoe(["--quiet", "start", "--basedir", basedir]) tahoe = CLINodeAPI(reactor, FilePath(basedir))
def _cb(res): # If tahoe ends up thinking it should keep running, make sure it stops
out, err, rc_or_sig = res # promptly when the test is done.
self.failUnlessEqual(rc_or_sig, 1) self.addCleanup(tahoe.cleanup)
self.failUnless("is not a recognizable node directory" in err, err)
d.addCallback(_cb)
def _then_stop_it(res): p = Expect()
return self.run_bintahoe(["--quiet", "stop", "--basedir", basedir]) operation(tahoe, on_stdout_and_stderr(p))
d.addCallback(_then_stop_it)
def _cb2(res): client_running = p.expect(b"client running")
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 2)
self.failUnless("does not look like a running node directory" in err)
d.addCallback(_cb2)
def _then_start_in_bogus_basedir(res): result, index = yield DeferredList([
not_a_dir = os.path.join(basedir, "bogus") p.expect(expected_message),
return self.run_bintahoe(["--quiet", "start", "--basedir", not_a_dir]) client_running,
d.addCallback(_then_start_in_bogus_basedir) ], fireOnOneCallback=True, consumeErrors=True,
)
def _cb3(res): self.assertEqual(
out, err, rc_or_sig = res index,
self.failUnlessEqual(rc_or_sig, 1) 0,
self.failUnlessIn("does not look like a directory at all", err) "Expected error message from '{}', got something else: {}".format(
d.addCallback(_cb3) description,
return d p.get_buffered_output(),
),
)
if not platform.isWindows():
# It should not be running.
self.assertFalse(tahoe.twistd_pid_file.exists())
# Wait for the operation to *complete*. If we got this far it's
# because we got the expected message so we can expect the "tahoe ..."
# child process to exit very soon. This other Deferred will fail when
# it eventually does but DeferredList above will consume the error.
# What's left is a perfect indicator that the process has exited and
# we won't get blamed for leaving the reactor dirty.
yield client_running

View File

@ -35,8 +35,6 @@ from allmydata.test.no_network import GridTestMixin
from allmydata.test.common import ShouldFailMixin from allmydata.test.common import ShouldFailMixin
from allmydata.test.common_util import ReallyEqualMixin from allmydata.test.common_util import ReallyEqualMixin
timeout = 240
class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCase): class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCase):
"""This is a no-network unit test of the SFTPUserHandler and the abstractions it uses.""" """This is a no-network unit test of the SFTPUserHandler and the abstractions it uses."""

View File

@ -421,12 +421,6 @@ def _render_section_values(values):
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
# SystemTestMixin tests tend to be a lot of work, and we have a few
# buildslaves that are pretty slow, and every once in a while these tests
# run up against the default 120 second timeout. So increase the default
# timeout. Individual test cases can override this, of course.
timeout = 300
def setUp(self): def setUp(self):
self.port_assigner = SameProcessStreamEndpointAssigner() self.port_assigner = SameProcessStreamEndpointAssigner()
self.port_assigner.setUp() self.port_assigner.setUp()
@ -752,7 +746,6 @@ class CountingDataUploadable(upload.Data):
return upload.Data.read(self, length) return upload.Data.read(self, length)
class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
timeout = 3600 # It takes longer than 960 seconds on Zandr's ARM box.
def test_connections(self): def test_connections(self):
self.basedir = "system/SystemTest/test_connections" self.basedir = "system/SystemTest/test_connections"

View File

@ -27,11 +27,6 @@ MiB = 1024*1024
def extract_uri(results): def extract_uri(results):
return results.get_uri() return results.get_uri()
# Some of these took longer than 480 seconds on Zandr's arm box, but this may
# have been due to an earlier test ERROR'ing out due to timeout, which seems
# to screw up subsequent tests.
timeout = 960
class Uploadable(unittest.TestCase): class Uploadable(unittest.TestCase):
def shouldEqual(self, data, expected): def shouldEqual(self, data, expected):
self.failUnless(isinstance(data, list)) self.failUnless(isinstance(data, list))

View File

@ -1060,7 +1060,6 @@ class Abbreviate(unittest.TestCase):
self.failUnlessIn("fhtagn", str(e)) self.failUnlessIn("fhtagn", str(e))
class Limiter(unittest.TestCase): class Limiter(unittest.TestCase):
timeout = 480 # This takes longer than 240 seconds on Francois's arm box.
def job(self, i, foo): def job(self, i, foo):
self.calls.append( (i, foo) ) self.calls.append( (i, foo) )

View File

@ -57,8 +57,6 @@ from ..status import FakeStatus
# create a fake uploader/downloader, and a couple of fake dirnodes, then # create a fake uploader/downloader, and a couple of fake dirnodes, then
# create a webserver that works against them # create a webserver that works against them
timeout = 480 # Most of these take longer than 240 seconds on Francois's arm box.
class FakeStatsProvider: class FakeStatsProvider:
def get_stats(self): def get_stats(self):
stats = {'stats': {}, 'counters': {}} stats = {'stats': {}, 'counters': {}}

View File

@ -0,0 +1,63 @@
"""
Helpers for managing garbage collection.
:ivar fileDescriptorResource: A garbage-collection-informing resource tracker
for file descriptors. This is used to trigger a garbage collection when
it may be possible to reclaim a significant number of file descriptors as
a result. Register allocation and release of *bare* file descriptors with
this object (file objects, socket objects, etc, have their own integration
with the garbage collector and don't need to bother with this).
"""
__all__ = [
"fileDescriptorResource",
]
import gc
import attr
@attr.s
class _ResourceTracker(object):
"""
Keep track of some kind of resource and trigger a full garbage collection
when allocations outnumber releases by some amount.
:ivar int _counter: The number of allocations that have happened in excess
of releases since the last full collection triggered by this tracker.
:ivar int _threshold: The number of excess allocations at which point a
full collection will be triggered.
"""
_counter = attr.ib(default=0)
_threshold = attr.ib(default=25)
def allocate(self):
"""
Register the allocation of an instance of this resource.
"""
self._counter += 1
if self._counter > self._threshold:
gc.collect()
# Garbage collection of this resource has done what it can do. If
# nothing was collected, it doesn't make any sense to trigger
# another full collection the very next time the resource is
# allocated. Start the counter over again. The next collection
# happens when we again exceed the threshold.
self._counter = 0
def release(self):
"""
Register the release of an instance of this resource.
"""
if self._counter > 0:
# If there were any excess allocations at this point, account for
# there now being one fewer. It is not helpful to allow the
# counter to go below zero (as naturally would if a collection is
# triggered and then subsequently resources are released). In
# that case, we would be operating as if we had set a higher
# threshold and that is not desired.
self._counter -= 1
fileDescriptorResource = _ResourceTracker()

View File

@ -2,6 +2,10 @@
import os, re, socket, subprocess, errno import os, re, socket, subprocess, errno
from sys import platform from sys import platform
from zope.interface import implementer
import attr
# from Twisted # from Twisted
from twisted.python.reflect import requireModule from twisted.python.reflect import requireModule
from twisted.internet import defer, threads, reactor from twisted.internet import defer, threads, reactor
@ -10,7 +14,14 @@ from twisted.internet.error import CannotListenError
from twisted.python.procutils import which from twisted.python.procutils import which
from twisted.python import log from twisted.python import log
from twisted.internet.endpoints import AdoptedStreamServerEndpoint from twisted.internet.endpoints import AdoptedStreamServerEndpoint
from twisted.internet.interfaces import IReactorSocket from twisted.internet.interfaces import (
IReactorSocket,
IStreamServerEndpoint,
)
from .gcutil import (
fileDescriptorResource,
)
fcntl = requireModule("fcntl") fcntl = requireModule("fcntl")
@ -268,14 +279,23 @@ def _foolscapEndpointForPortNumber(portnum):
s.bind(('', 0)) s.bind(('', 0))
portnum = s.getsockname()[1] portnum = s.getsockname()[1]
s.listen(1) s.listen(1)
# File descriptors are a relatively scarce resource. The
# cleanup process for the file descriptor we're about to dup
# is unfortunately complicated. In particular, it involves
# the Python garbage collector. See CleanupEndpoint for
# details of that. Here, we need to make sure the garbage
# collector actually runs frequently enough to make a
# difference. Normally, the garbage collector is triggered by
# allocations. It doesn't know about *file descriptor*
# allocation though. So ... we'll "teach" it about those,
# here.
fileDescriptorResource.allocate()
fd = os.dup(s.fileno()) fd = os.dup(s.fileno())
flags = fcntl.fcntl(fd, fcntl.F_GETFD) flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags = flags | os.O_NONBLOCK | fcntl.FD_CLOEXEC flags = flags | os.O_NONBLOCK | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags) fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return ( endpoint = AdoptedStreamServerEndpoint(reactor, fd, socket.AF_INET)
portnum, return (portnum, CleanupEndpoint(endpoint, fd))
AdoptedStreamServerEndpoint(reactor, fd, socket.AF_INET),
)
finally: finally:
s.close() s.close()
else: else:
@ -287,6 +307,39 @@ def _foolscapEndpointForPortNumber(portnum):
return (portnum, "tcp:%d" % (portnum,)) return (portnum, "tcp:%d" % (portnum,))
@implementer(IStreamServerEndpoint)
@attr.s
class CleanupEndpoint(object):
"""
An ``IStreamServerEndpoint`` wrapper which closes a file descriptor if the
wrapped endpoint is never used.
:ivar IStreamServerEndpoint _wrapped: The wrapped endpoint. The
``listen`` implementation is delegated to this object.
:ivar int _fd: The file descriptor to close if ``listen`` is never called
by the time this object is garbage collected.
:ivar bool _listened: A flag recording whether or not ``listen`` has been
called.
"""
_wrapped = attr.ib()
_fd = attr.ib()
_listened = attr.ib(default=False)
def listen(self, protocolFactory):
self._listened = True
return self._wrapped.listen(protocolFactory)
def __del__(self):
"""
If ``listen`` was never called then close the file descriptor.
"""
if not self._listened:
os.close(self._fd)
fileDescriptorResource.release()
def listenOnUnused(tub, portnum=None): def listenOnUnused(tub, portnum=None):
""" """
Start listening on an unused TCP port number with the given tub. Start listening on an unused TCP port number with the given tub.

24
tox.ini
View File

@ -19,8 +19,24 @@ passenv = TAHOE_LAFS_* PIP_* SUBUNITREPORTER_* USERPROFILE HOMEDRIVE HOMEPATH
# available to those systems. Installing it ahead of time (with pip) avoids # available to those systems. Installing it ahead of time (with pip) avoids
# this problem. # this problem.
deps = deps =
# Pin all of these versions for the same reason you ever want to pin
# anything: to prevent new releases with regressions from introducing
# spurious failures into CI runs for whatever development work is
# happening at the time. The versions selected here are just the current
# versions at the time. Bumping them to keep up with future releases is
# fine as long as those releases are known to actually work.
pip==19.1.1
setuptools==41.0.1
wheel==0.33.4
subunitreporter==19.3.2
# As an exception, we don't pin certifi because it contains CA
# certificates which necessarily change over time. Pinning this is
# guaranteed to cause things to break eventually as old certificates
# expire and as new ones are used in the wild that aren't present in
# whatever version we pin. Hopefully there won't be functionality
# regressions in new releases of this package that cause us the kind of
# suffering we're trying to avoid with the above pins.
certifi certifi
subunitreporter
# We add usedevelop=False because testing against a true installation gives # We add usedevelop=False because testing against a true installation gives
# more useful results. # more useful results.
@ -40,6 +56,12 @@ commands =
[testenv:coverage] [testenv:coverage]
# coverage (with --branch) takes about 65% longer to run # coverage (with --branch) takes about 65% longer to run
commands = commands =
# As an aid to debugging, dump all of the Python packages and their
# versions that are installed in the test environment. This is
# particularly useful to get from CI runs - though hopefully the
# version pinning we do limits the variability of this output
# somewhat.
pip freeze
tahoe --version tahoe --version
coverage run --branch -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors --reporter=timing} {posargs:allmydata} coverage run --branch -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors --reporter=timing} {posargs:allmydata}
coverage xml coverage xml