2021-05-07 13:44:44 +00:00
|
|
|
from past.builtins import unicode
|
|
|
|
|
2016-10-06 05:03:35 +00:00
|
|
|
import sys
|
2016-08-22 23:36:56 +00:00
|
|
|
import time
|
2019-08-09 01:13:02 +00:00
|
|
|
import json
|
2020-10-16 15:27:13 +00:00
|
|
|
from os import mkdir, environ
|
2016-10-06 05:03:35 +00:00
|
|
|
from os.path import exists, join
|
2019-03-24 14:04:00 +00:00
|
|
|
from six.moves import StringIO
|
2019-02-15 17:41:45 +00:00
|
|
|
from functools import partial
|
2021-01-11 20:26:38 +00:00
|
|
|
from subprocess import check_output
|
2016-10-06 05:03:35 +00:00
|
|
|
|
2020-11-18 23:26:52 +00:00
|
|
|
from twisted.python.filepath import (
|
|
|
|
FilePath,
|
|
|
|
)
|
2017-07-26 15:29:15 +00:00
|
|
|
from twisted.internet.defer import Deferred, succeed
|
2016-10-06 05:03:35 +00:00
|
|
|
from twisted.internet.protocol import ProcessProtocol
|
|
|
|
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
2021-01-21 18:54:22 +00:00
|
|
|
from twisted.internet.threads import deferToThread
|
2016-10-06 05:03:35 +00:00
|
|
|
|
2019-07-16 22:51:44 +00:00
|
|
|
import requests
|
|
|
|
|
2021-01-11 19:02:45 +00:00
|
|
|
from paramiko.rsakey import RSAKey
|
2021-01-21 18:54:22 +00:00
|
|
|
from boltons.funcutils import wraps
|
2021-01-11 19:02:45 +00:00
|
|
|
|
2017-07-26 14:49:43 +00:00
|
|
|
from allmydata.util.configutil import (
|
|
|
|
get_config,
|
|
|
|
set_config,
|
|
|
|
write_config,
|
|
|
|
)
|
2019-08-24 18:53:36 +00:00
|
|
|
from allmydata import client
|
2017-07-26 14:49:43 +00:00
|
|
|
|
2019-02-05 16:03:35 +00:00
|
|
|
import pytest_twisted
|
2016-10-06 05:03:35 +00:00
|
|
|
|
|
|
|
|
2021-01-26 17:40:39 +00:00
|
|
|
def block_with_timeout(deferred, reactor, timeout=120):
|
2021-01-12 18:58:28 +00:00
|
|
|
"""Block until Deferred has result, but timeout instead of waiting forever."""
|
|
|
|
deferred.addTimeout(timeout, reactor)
|
|
|
|
return pytest_twisted.blockon(deferred)
|
|
|
|
|
|
|
|
|
2016-10-06 05:03:35 +00:00
|
|
|
class _ProcessExitedProtocol(ProcessProtocol):
|
|
|
|
"""
|
|
|
|
Internal helper that .callback()s on self.done when the process
|
|
|
|
exits (for any reason).
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.done = Deferred()
|
|
|
|
|
|
|
|
def processEnded(self, reason):
|
|
|
|
self.done.callback(None)
|
|
|
|
|
|
|
|
|
|
|
|
class _CollectOutputProtocol(ProcessProtocol):
|
|
|
|
"""
|
|
|
|
Internal helper. Collects all output (stdout + stderr) into
|
|
|
|
self.output, and callback's on done with all of it after the
|
|
|
|
process exits (for any reason).
|
|
|
|
"""
|
|
|
|
def __init__(self):
|
|
|
|
self.done = Deferred()
|
|
|
|
self.output = StringIO()
|
|
|
|
|
|
|
|
def processEnded(self, reason):
|
|
|
|
if not self.done.called:
|
|
|
|
self.done.callback(self.output.getvalue())
|
|
|
|
|
|
|
|
def processExited(self, reason):
|
|
|
|
if not isinstance(reason.value, ProcessDone):
|
|
|
|
self.done.errback(reason)
|
|
|
|
|
|
|
|
def outReceived(self, data):
|
|
|
|
self.output.write(data)
|
|
|
|
|
|
|
|
def errReceived(self, data):
|
2017-09-27 23:11:36 +00:00
|
|
|
print("ERR: {}".format(data))
|
2016-10-06 05:03:35 +00:00
|
|
|
self.output.write(data)
|
|
|
|
|
|
|
|
|
|
|
|
class _DumpOutputProtocol(ProcessProtocol):
|
|
|
|
"""
|
|
|
|
Internal helper.
|
|
|
|
"""
|
|
|
|
def __init__(self, f):
|
|
|
|
self.done = Deferred()
|
|
|
|
self._out = f if f is not None else sys.stdout
|
|
|
|
|
|
|
|
def processEnded(self, reason):
|
|
|
|
if not self.done.called:
|
|
|
|
self.done.callback(None)
|
|
|
|
|
|
|
|
def processExited(self, reason):
|
|
|
|
if not isinstance(reason.value, ProcessDone):
|
|
|
|
self.done.errback(reason)
|
|
|
|
|
|
|
|
def outReceived(self, data):
|
|
|
|
self._out.write(data)
|
|
|
|
|
|
|
|
def errReceived(self, data):
|
|
|
|
self._out.write(data)
|
|
|
|
|
|
|
|
|
|
|
|
class _MagicTextProtocol(ProcessProtocol):
|
|
|
|
"""
|
|
|
|
Internal helper. Monitors all stdout looking for a magic string,
|
|
|
|
and then .callback()s on self.done and .errback's if the process exits
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, magic_text):
|
|
|
|
self.magic_seen = Deferred()
|
|
|
|
self.exited = Deferred()
|
|
|
|
self._magic_text = magic_text
|
|
|
|
self._output = StringIO()
|
|
|
|
|
|
|
|
def processEnded(self, reason):
|
|
|
|
self.exited.callback(None)
|
|
|
|
|
|
|
|
def outReceived(self, data):
|
2021-05-07 14:06:12 +00:00
|
|
|
data = unicode(data, sys.stdout.encoding)
|
2016-10-06 05:03:35 +00:00
|
|
|
sys.stdout.write(data)
|
|
|
|
self._output.write(data)
|
|
|
|
if not self.magic_seen.called and self._magic_text in self._output.getvalue():
|
|
|
|
print("Saw '{}' in the logs".format(self._magic_text))
|
2017-09-27 23:11:36 +00:00
|
|
|
self.magic_seen.callback(self)
|
2016-10-06 05:03:35 +00:00
|
|
|
|
|
|
|
def errReceived(self, data):
|
2021-05-07 14:06:12 +00:00
|
|
|
data = unicode(data, sys.stderr.encoding)
|
2016-10-06 05:03:35 +00:00
|
|
|
sys.stdout.write(data)
|
|
|
|
|
|
|
|
|
2019-08-08 21:28:54 +00:00
|
|
|
def _cleanup_tahoe_process(tahoe_transport, exited):
|
2019-02-15 17:39:30 +00:00
|
|
|
"""
|
|
|
|
Terminate the given process with a kill signal (SIGKILL on POSIX,
|
|
|
|
TerminateProcess on Windows).
|
|
|
|
|
2019-08-08 21:28:54 +00:00
|
|
|
:param tahoe_transport: The `IProcessTransport` representing the process.
|
2019-02-15 17:39:30 +00:00
|
|
|
:param exited: A `Deferred` which fires when the process has exited.
|
|
|
|
|
|
|
|
:return: After the process has exited.
|
|
|
|
"""
|
2021-01-12 18:58:28 +00:00
|
|
|
from twisted.internet import reactor
|
2019-02-15 17:39:30 +00:00
|
|
|
try:
|
2019-08-08 21:28:54 +00:00
|
|
|
print("signaling {} with TERM".format(tahoe_transport.pid))
|
|
|
|
tahoe_transport.signalProcess('TERM')
|
2019-02-15 17:39:30 +00:00
|
|
|
print("signaled, blocking on exit")
|
2021-01-12 18:58:28 +00:00
|
|
|
block_with_timeout(exited, reactor)
|
2019-02-15 17:39:30 +00:00
|
|
|
print("exited, goodbye")
|
|
|
|
except ProcessExitedAlready:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2019-08-07 20:03:16 +00:00
|
|
|
def _tahoe_runner_optional_coverage(proto, reactor, request, other_args):
|
|
|
|
"""
|
|
|
|
Internal helper. Calls spawnProcess with `-m
|
|
|
|
allmydata.scripts.runner` and `other_args`, optionally inserting a
|
|
|
|
`--coverage` option if the `request` indicates we should.
|
|
|
|
"""
|
|
|
|
if request.config.getoption('coverage'):
|
2021-04-16 15:58:37 +00:00
|
|
|
args = [sys.executable, '-b', '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner', '--coverage']
|
2019-08-07 20:03:16 +00:00
|
|
|
else:
|
2021-04-16 15:58:37 +00:00
|
|
|
args = [sys.executable, '-b', '-m', 'allmydata.scripts.runner']
|
2019-08-07 20:03:16 +00:00
|
|
|
args += other_args
|
|
|
|
return reactor.spawnProcess(
|
|
|
|
proto,
|
|
|
|
sys.executable,
|
|
|
|
args,
|
2020-10-16 15:27:13 +00:00
|
|
|
env=environ,
|
2019-08-07 20:03:16 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-08-08 21:28:54 +00:00
|
|
|
class TahoeProcess(object):
|
|
|
|
"""
|
|
|
|
A running Tahoe process, with associated information.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, process_transport, node_dir):
|
|
|
|
self._process_transport = process_transport # IProcessTransport instance
|
|
|
|
self._node_dir = node_dir # path
|
|
|
|
|
|
|
|
@property
|
|
|
|
def transport(self):
|
|
|
|
return self._process_transport
|
|
|
|
|
|
|
|
@property
|
|
|
|
def node_dir(self):
|
|
|
|
return self._node_dir
|
|
|
|
|
2019-08-24 18:53:36 +00:00
|
|
|
def get_config(self):
|
|
|
|
return client.read_config(
|
|
|
|
self._node_dir,
|
|
|
|
u"portnum",
|
|
|
|
)
|
|
|
|
|
2021-01-07 16:25:26 +00:00
|
|
|
def kill(self):
|
|
|
|
"""Kill the process, block until it's done."""
|
|
|
|
_cleanup_tahoe_process(self.transport, self.transport.exited)
|
|
|
|
|
2019-08-14 20:43:17 +00:00
|
|
|
def __str__(self):
|
|
|
|
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
|
|
|
|
2019-08-08 21:28:54 +00:00
|
|
|
|
2021-01-12 16:16:45 +00:00
|
|
|
def _run_node(reactor, node_dir, request, magic_text, finalize=True):
|
2019-08-08 21:28:54 +00:00
|
|
|
"""
|
|
|
|
Run a tahoe process from its node_dir.
|
|
|
|
|
|
|
|
:returns: a TahoeProcess for this node
|
|
|
|
"""
|
2016-10-06 05:03:35 +00:00
|
|
|
if magic_text is None:
|
|
|
|
magic_text = "client running"
|
|
|
|
protocol = _MagicTextProtocol(magic_text)
|
|
|
|
|
2020-12-08 21:02:26 +00:00
|
|
|
# "tahoe run" is consistent across Linux/macOS/Windows, unlike the old
|
|
|
|
# "start" command.
|
2019-08-08 21:28:54 +00:00
|
|
|
transport = _tahoe_runner_optional_coverage(
|
2016-10-06 05:03:35 +00:00
|
|
|
protocol,
|
2019-08-07 20:03:16 +00:00
|
|
|
reactor,
|
|
|
|
request,
|
|
|
|
[
|
2019-02-27 14:11:47 +00:00
|
|
|
'--eliot-destination', 'file:{}/logs/eliot.json'.format(node_dir),
|
2016-10-06 05:03:35 +00:00
|
|
|
'run',
|
|
|
|
node_dir,
|
2019-07-23 16:39:45 +00:00
|
|
|
],
|
2016-10-06 05:03:35 +00:00
|
|
|
)
|
2019-08-08 21:28:54 +00:00
|
|
|
transport.exited = protocol.exited
|
2016-10-06 05:03:35 +00:00
|
|
|
|
2021-01-12 16:16:45 +00:00
|
|
|
if finalize:
|
|
|
|
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
2016-10-06 05:03:35 +00:00
|
|
|
|
2019-08-08 21:28:54 +00:00
|
|
|
# XXX abusing the Deferred; should use .when_magic_seen() pattern
|
2017-09-27 23:11:36 +00:00
|
|
|
|
|
|
|
def got_proto(proto):
|
2019-08-08 21:28:54 +00:00
|
|
|
transport._protocol = proto
|
|
|
|
return TahoeProcess(
|
|
|
|
transport,
|
|
|
|
node_dir,
|
|
|
|
)
|
2017-09-27 23:11:36 +00:00
|
|
|
protocol.magic_seen.addCallback(got_proto)
|
2016-10-06 05:03:35 +00:00
|
|
|
return protocol.magic_seen
|
|
|
|
|
|
|
|
|
2017-02-14 23:36:57 +00:00
|
|
|
def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, name, web_port,
|
|
|
|
storage=True,
|
|
|
|
magic_text=None,
|
|
|
|
needed=2,
|
|
|
|
happy=3,
|
2021-01-12 16:16:45 +00:00
|
|
|
total=4,
|
|
|
|
finalize=True):
|
2016-10-06 05:03:35 +00:00
|
|
|
"""
|
|
|
|
Helper to create a single node, run it and return the instance
|
|
|
|
spawnProcess returned (ITransport)
|
|
|
|
"""
|
|
|
|
node_dir = join(temp_dir, name)
|
|
|
|
if web_port is None:
|
|
|
|
web_port = ''
|
2017-07-26 15:29:15 +00:00
|
|
|
if exists(node_dir):
|
|
|
|
created_d = succeed(None)
|
|
|
|
else:
|
2016-10-06 05:03:35 +00:00
|
|
|
print("creating", node_dir)
|
|
|
|
mkdir(node_dir)
|
|
|
|
done_proto = _ProcessExitedProtocol()
|
2019-08-08 15:53:57 +00:00
|
|
|
args = [
|
2016-10-06 05:03:35 +00:00
|
|
|
'create-node',
|
|
|
|
'--nickname', name,
|
|
|
|
'--introducer', introducer_furl,
|
|
|
|
'--hostname', 'localhost',
|
|
|
|
'--listen', 'tcp',
|
2017-07-26 14:49:43 +00:00
|
|
|
'--webport', web_port,
|
|
|
|
'--shares-needed', unicode(needed),
|
|
|
|
'--shares-happy', unicode(happy),
|
|
|
|
'--shares-total', unicode(total),
|
2019-08-07 00:17:58 +00:00
|
|
|
'--helper',
|
2016-10-06 05:03:35 +00:00
|
|
|
]
|
|
|
|
if not storage:
|
2021-01-26 14:57:11 +00:00
|
|
|
args.append('--no-storage')
|
2016-10-06 05:03:35 +00:00
|
|
|
args.append(node_dir)
|
|
|
|
|
2019-08-08 15:53:57 +00:00
|
|
|
_tahoe_runner_optional_coverage(done_proto, reactor, request, args)
|
2017-02-14 23:36:57 +00:00
|
|
|
created_d = done_proto.done
|
2016-10-06 05:03:35 +00:00
|
|
|
|
2017-02-14 23:36:57 +00:00
|
|
|
def created(_):
|
2017-07-26 14:49:43 +00:00
|
|
|
config_path = join(node_dir, 'tahoe.cfg')
|
|
|
|
config = get_config(config_path)
|
2020-11-20 21:04:29 +00:00
|
|
|
set_config(
|
|
|
|
config,
|
|
|
|
u'node',
|
|
|
|
u'log_gatherer.furl',
|
|
|
|
flog_gatherer.decode("utf-8"),
|
|
|
|
)
|
2020-11-18 23:26:52 +00:00
|
|
|
write_config(FilePath(config_path), config)
|
2017-02-14 23:36:57 +00:00
|
|
|
created_d.addCallback(created)
|
|
|
|
|
|
|
|
d = Deferred()
|
|
|
|
d.callback(None)
|
|
|
|
d.addCallback(lambda _: created_d)
|
2021-01-12 16:16:45 +00:00
|
|
|
d.addCallback(lambda _: _run_node(reactor, node_dir, request, magic_text, finalize=finalize))
|
2017-02-14 23:36:57 +00:00
|
|
|
return d
|
2016-08-22 23:36:56 +00:00
|
|
|
|
|
|
|
|
2018-04-24 16:45:07 +00:00
|
|
|
class UnwantedFilesException(Exception):
|
2018-04-24 16:58:26 +00:00
|
|
|
"""
|
|
|
|
While waiting for some files to appear, some undesired files
|
|
|
|
appeared instead (or in addition).
|
|
|
|
"""
|
2018-04-24 16:45:07 +00:00
|
|
|
def __init__(self, waiting, unwanted):
|
2018-04-25 18:06:25 +00:00
|
|
|
super(UnwantedFilesException, self).__init__(
|
2018-04-24 16:45:07 +00:00
|
|
|
u"While waiting for '{}', unwanted files appeared: {}".format(
|
|
|
|
waiting,
|
2018-04-24 16:58:26 +00:00
|
|
|
u', '.join(unwanted),
|
2018-04-24 16:45:07 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-04-24 16:58:26 +00:00
|
|
|
class ExpectedFileMismatchException(Exception):
|
|
|
|
"""
|
|
|
|
A file or files we wanted weren't found within the timeout.
|
|
|
|
"""
|
|
|
|
def __init__(self, path, timeout):
|
2018-04-25 00:07:05 +00:00
|
|
|
super(ExpectedFileMismatchException, self).__init__(
|
2018-04-24 16:58:26 +00:00
|
|
|
u"Contents of '{}' mismatched after {}s".format(path, timeout),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class ExpectedFileUnfoundException(Exception):
|
|
|
|
"""
|
|
|
|
A file or files we expected to find didn't appear within the
|
|
|
|
timeout.
|
|
|
|
"""
|
|
|
|
def __init__(self, path, timeout):
|
2018-04-25 00:07:05 +00:00
|
|
|
super(ExpectedFileUnfoundException, self).__init__(
|
2018-04-24 16:58:26 +00:00
|
|
|
u"Didn't find '{}' after {}s".format(path, timeout),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class FileShouldVanishException(Exception):
|
|
|
|
"""
|
|
|
|
A file or files we expected to disappear did not within the
|
|
|
|
timeout
|
|
|
|
"""
|
|
|
|
def __init__(self, path, timeout):
|
2019-02-15 18:58:02 +00:00
|
|
|
super(FileShouldVanishException, self).__init__(
|
2018-04-24 16:58:26 +00:00
|
|
|
u"'{}' still exists after {}s".format(path, timeout),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-03-27 22:11:40 +00:00
|
|
|
def await_file_contents(path, contents, timeout=15, error_if=None):
|
|
|
|
"""
|
2018-04-24 16:45:19 +00:00
|
|
|
wait up to `timeout` seconds for the file at `path` (any path-like
|
|
|
|
object) to have the exact content `contents`.
|
2018-03-27 22:11:40 +00:00
|
|
|
|
|
|
|
:param error_if: if specified, a list of additional paths; if any
|
|
|
|
of these paths appear an Exception is raised.
|
|
|
|
"""
|
2016-08-22 23:36:56 +00:00
|
|
|
start_time = time.time()
|
|
|
|
while time.time() - start_time < timeout:
|
|
|
|
print(" waiting for '{}'".format(path))
|
2018-03-27 22:11:40 +00:00
|
|
|
if error_if and any([exists(p) for p in error_if]):
|
2018-04-24 16:45:07 +00:00
|
|
|
raise UnwantedFilesException(
|
|
|
|
waiting=path,
|
|
|
|
unwanted=[p for p in error_if if exists(p)],
|
2018-03-27 22:11:40 +00:00
|
|
|
)
|
2016-08-22 23:36:56 +00:00
|
|
|
if exists(path):
|
2016-09-15 15:52:40 +00:00
|
|
|
try:
|
|
|
|
with open(path, 'r') as f:
|
|
|
|
current = f.read()
|
|
|
|
except IOError:
|
|
|
|
print("IOError; trying again")
|
|
|
|
else:
|
|
|
|
if current == contents:
|
|
|
|
return True
|
|
|
|
print(" file contents still mismatched")
|
|
|
|
print(" wanted: {}".format(contents.replace('\n', ' ')))
|
|
|
|
print(" got: {}".format(current.replace('\n', ' ')))
|
2016-08-22 23:36:56 +00:00
|
|
|
time.sleep(1)
|
|
|
|
if exists(path):
|
2018-04-24 16:58:26 +00:00
|
|
|
raise ExpectedFileMismatchException(path, timeout)
|
|
|
|
raise ExpectedFileUnfoundException(path, timeout)
|
2016-08-22 23:36:56 +00:00
|
|
|
|
|
|
|
|
2018-04-20 23:33:14 +00:00
|
|
|
def await_files_exist(paths, timeout=15, await_all=False):
|
|
|
|
"""
|
|
|
|
wait up to `timeout` seconds for any of the paths to exist; when
|
|
|
|
any exist, a list of all found filenames is returned. Otherwise,
|
|
|
|
an Exception is raised
|
|
|
|
"""
|
|
|
|
start_time = time.time()
|
2019-03-18 20:35:31 +00:00
|
|
|
while time.time() - start_time < timeout:
|
2018-04-20 23:33:14 +00:00
|
|
|
print(" waiting for: {}".format(' '.join(paths)))
|
2018-04-25 05:11:16 +00:00
|
|
|
found = [p for p in paths if exists(p)]
|
|
|
|
print("found: {}".format(found))
|
2018-04-20 23:33:14 +00:00
|
|
|
if await_all:
|
|
|
|
if len(found) == len(paths):
|
|
|
|
return found
|
|
|
|
else:
|
2018-04-25 05:11:16 +00:00
|
|
|
if len(found) > 0:
|
2018-04-20 23:33:14 +00:00
|
|
|
return found
|
|
|
|
time.sleep(1)
|
2018-04-22 07:07:22 +00:00
|
|
|
if await_all:
|
2018-04-24 16:58:26 +00:00
|
|
|
nice_paths = ' and '.join(paths)
|
|
|
|
else:
|
|
|
|
nice_paths = ' or '.join(paths)
|
|
|
|
raise ExpectedFileUnfoundException(nice_paths, timeout)
|
2018-04-20 23:33:14 +00:00
|
|
|
|
|
|
|
|
2016-08-22 23:36:56 +00:00
|
|
|
def await_file_vanishes(path, timeout=10):
|
|
|
|
start_time = time.time()
|
|
|
|
while time.time() - start_time < timeout:
|
|
|
|
print(" waiting for '{}' to vanish".format(path))
|
|
|
|
if not exists(path):
|
|
|
|
return
|
|
|
|
time.sleep(1)
|
2018-04-24 16:58:26 +00:00
|
|
|
raise FileShouldVanishException(path, timeout)
|
2019-03-18 20:35:35 +00:00
|
|
|
|
|
|
|
|
2021-01-07 16:25:26 +00:00
|
|
|
def cli(node, *argv):
|
2019-07-23 16:39:45 +00:00
|
|
|
"""
|
2021-01-07 16:25:26 +00:00
|
|
|
Run a tahoe CLI subcommand for a given node in a blocking manner, returning
|
|
|
|
the output.
|
2019-07-23 16:39:45 +00:00
|
|
|
"""
|
2021-01-07 16:25:26 +00:00
|
|
|
arguments = ["tahoe", '--node-directory', node.node_dir]
|
|
|
|
return check_output(arguments + list(argv))
|
2019-03-18 20:35:35 +00:00
|
|
|
|
2019-07-16 22:51:44 +00:00
|
|
|
|
2019-07-30 21:01:37 +00:00
|
|
|
def node_url(node_dir, uri_fragment):
|
2019-07-16 22:51:44 +00:00
|
|
|
"""
|
2019-07-30 21:01:37 +00:00
|
|
|
Create a fully qualified URL by reading config from `node_dir` and
|
|
|
|
adding the `uri_fragment`
|
2019-07-16 22:51:44 +00:00
|
|
|
"""
|
|
|
|
with open(join(node_dir, "node.url"), "r") as f:
|
|
|
|
base = f.read().strip()
|
|
|
|
url = base + uri_fragment
|
2019-07-30 21:01:37 +00:00
|
|
|
return url
|
|
|
|
|
|
|
|
|
2019-08-07 19:12:52 +00:00
|
|
|
def _check_status(response):
|
|
|
|
"""
|
|
|
|
Check the response code is a 2xx (raise an exception otherwise)
|
|
|
|
"""
|
|
|
|
if response.status_code < 200 or response.status_code >= 300:
|
2019-08-08 21:33:21 +00:00
|
|
|
raise ValueError(
|
2019-08-07 19:12:52 +00:00
|
|
|
"Expected a 2xx code, got {}".format(response.status_code)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-08-14 20:43:17 +00:00
|
|
|
def web_get(tahoe, uri_fragment, **kwargs):
|
2019-07-30 21:01:37 +00:00
|
|
|
"""
|
2019-08-14 20:43:17 +00:00
|
|
|
Make a GET request to the webport of `tahoe` (a `TahoeProcess`,
|
|
|
|
usually from a fixture (e.g. `alice`). This will look like:
|
|
|
|
`http://localhost:<webport>/<uri_fragment>`. All `kwargs` are
|
|
|
|
passed on to `requests.get`
|
2019-07-30 21:01:37 +00:00
|
|
|
"""
|
2019-08-14 20:43:17 +00:00
|
|
|
url = node_url(tahoe.node_dir, uri_fragment)
|
2019-08-08 18:45:45 +00:00
|
|
|
resp = requests.get(url, **kwargs)
|
2019-08-07 19:12:52 +00:00
|
|
|
_check_status(resp)
|
2019-07-16 22:51:44 +00:00
|
|
|
return resp.content
|
|
|
|
|
|
|
|
|
2019-08-14 20:43:17 +00:00
|
|
|
def web_post(tahoe, uri_fragment, **kwargs):
|
2019-07-16 22:51:44 +00:00
|
|
|
"""
|
2019-08-14 20:43:17 +00:00
|
|
|
Make a POST request to the webport of `node` (a `TahoeProcess,
|
|
|
|
usually from a fixture e.g. `alice`). This will look like:
|
|
|
|
`http://localhost:<webport>/<uri_fragment>`. All `kwargs` are
|
|
|
|
passed on to `requests.post`
|
2019-07-16 22:51:44 +00:00
|
|
|
"""
|
2019-08-14 20:43:17 +00:00
|
|
|
url = node_url(tahoe.node_dir, uri_fragment)
|
2019-08-08 18:45:45 +00:00
|
|
|
resp = requests.post(url, **kwargs)
|
2019-08-07 19:12:52 +00:00
|
|
|
_check_status(resp)
|
2019-07-16 22:51:44 +00:00
|
|
|
return resp.content
|
|
|
|
|
|
|
|
|
2019-08-14 20:43:17 +00:00
|
|
|
def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
2019-08-09 01:13:02 +00:00
|
|
|
"""
|
2019-08-14 20:43:17 +00:00
|
|
|
Uses the status API to wait for a client-type node (in `tahoe`, a
|
|
|
|
`TahoeProcess` instance usually from a fixture e.g. `alice`) to be
|
2019-08-09 01:13:02 +00:00
|
|
|
'ready'. A client is deemed ready if:
|
2019-08-14 20:43:17 +00:00
|
|
|
|
|
|
|
- it answers `http://<node_url>/statistics/?t=json/`
|
2019-08-09 01:13:02 +00:00
|
|
|
- there is at least one storage-server connected
|
|
|
|
- every storage-server has a "last_received_data" and it is
|
|
|
|
within the last `liveness` seconds
|
|
|
|
|
|
|
|
We will try for up to `timeout` seconds for the above conditions
|
|
|
|
to be true. Otherwise, an exception is raised
|
|
|
|
"""
|
|
|
|
start = time.time()
|
|
|
|
while (time.time() - start) < float(timeout):
|
|
|
|
try:
|
2019-08-14 20:43:17 +00:00
|
|
|
data = web_get(tahoe, u"", params={u"t": u"json"})
|
2019-08-13 16:33:56 +00:00
|
|
|
js = json.loads(data)
|
|
|
|
except Exception as e:
|
2019-08-09 01:13:02 +00:00
|
|
|
print("waiting because '{}'".format(e))
|
2019-08-13 16:33:56 +00:00
|
|
|
time.sleep(1)
|
|
|
|
continue
|
|
|
|
|
2019-08-09 01:13:02 +00:00
|
|
|
if len(js['servers']) == 0:
|
|
|
|
print("waiting because no servers at all")
|
2019-08-09 01:16:32 +00:00
|
|
|
time.sleep(1)
|
2019-08-09 01:13:02 +00:00
|
|
|
continue
|
|
|
|
server_times = [
|
|
|
|
server['last_received_data']
|
|
|
|
for server in js['servers']
|
|
|
|
]
|
|
|
|
# if any times are null/None that server has never been
|
|
|
|
# contacted (so it's down still, probably)
|
2019-08-13 16:33:56 +00:00
|
|
|
if any(t is None for t in server_times):
|
2019-08-09 01:13:02 +00:00
|
|
|
print("waiting because at least one server not contacted")
|
2019-08-09 01:16:32 +00:00
|
|
|
time.sleep(1)
|
2019-08-09 01:13:02 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# check that all times are 'recent enough'
|
|
|
|
if any([time.time() - t > liveness for t in server_times]):
|
|
|
|
print("waiting because at least one server too old")
|
2019-08-09 01:16:32 +00:00
|
|
|
time.sleep(1)
|
2019-08-09 01:13:02 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# we have a status with at least one server, and all servers
|
|
|
|
# have been contacted recently
|
|
|
|
return True
|
|
|
|
# we only fall out of the loop when we've timed out
|
|
|
|
raise RuntimeError(
|
|
|
|
"Waited {} seconds for {} to be 'ready' but it never was".format(
|
|
|
|
timeout,
|
2019-08-14 20:43:17 +00:00
|
|
|
tahoe,
|
2019-08-09 01:13:02 +00:00
|
|
|
)
|
|
|
|
)
|
2021-01-07 18:59:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
def generate_ssh_key(path):
|
|
|
|
"""Create a new SSH private/public key pair."""
|
2021-01-11 19:02:45 +00:00
|
|
|
key = RSAKey.generate(2048)
|
|
|
|
key.write_private_key_file(path)
|
|
|
|
with open(path + ".pub", "wb") as f:
|
|
|
|
f.write(b"%s %s" % (key.get_name(), key.get_base64()))
|
2021-01-21 18:54:22 +00:00
|
|
|
|
|
|
|
|
|
|
|
def run_in_thread(f):
|
|
|
|
"""Decorator for integration tests that runs code in a thread.
|
|
|
|
|
2021-01-26 15:14:14 +00:00
|
|
|
Because we're using pytest_twisted, tests that rely on the reactor are
|
|
|
|
expected to return a Deferred and use async APIs so the reactor can run.
|
|
|
|
|
|
|
|
In the case of the integration test suite, it launches nodes in the
|
|
|
|
background using Twisted APIs. The nodes stdout and stderr is read via
|
|
|
|
Twisted code. If the reactor doesn't run, reads don't happen, and
|
|
|
|
eventually the buffers fill up, and the nodes block when they try to flush
|
|
|
|
logs.
|
2021-01-21 18:54:22 +00:00
|
|
|
|
|
|
|
We can switch to Twisted APIs (treq instead of requests etc.), but
|
2021-01-26 15:14:14 +00:00
|
|
|
sometimes it's easier or expedient to just have a blocking test. So this
|
|
|
|
decorator allows you to run the test in a thread, and the reactor can keep
|
|
|
|
running in the main thread.
|
|
|
|
|
|
|
|
See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3597 for tracking bug.
|
2021-01-21 18:54:22 +00:00
|
|
|
"""
|
|
|
|
@wraps(f)
|
|
|
|
def test(*args, **kwargs):
|
|
|
|
return deferToThread(lambda: f(*args, **kwargs))
|
|
|
|
return test
|