mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-06-19 07:48:11 +00:00
refactor create_client to be async (works to run, some unit-test fails still)
This commit is contained in:
@ -17,7 +17,7 @@ from allmydata.immutable.offloaded import Helper
|
|||||||
from allmydata.control import ControlServer
|
from allmydata.control import ControlServer
|
||||||
from allmydata.introducer.client import IntroducerClient
|
from allmydata.introducer.client import IntroducerClient
|
||||||
from allmydata.util import (hashutil, base32, pollmixin, log, keyutil, idlib,
|
from allmydata.util import (hashutil, base32, pollmixin, log, keyutil, idlib,
|
||||||
yamlutil)
|
yamlutil, fileutil)
|
||||||
from allmydata.util.encodingutil import (get_filesystem_encoding,
|
from allmydata.util.encodingutil import (get_filesystem_encoding,
|
||||||
from_utf8_or_none)
|
from_utf8_or_none)
|
||||||
from allmydata.util.abbreviate import parse_abbreviated_size
|
from allmydata.util.abbreviate import parse_abbreviated_size
|
||||||
@ -178,6 +178,25 @@ def read_config(basedir, portnumfile, generated_files=[]):
|
|||||||
|
|
||||||
|
|
||||||
#@defer.inlineCallbacks
|
#@defer.inlineCallbacks
|
||||||
|
def create_client(basedir=u"."
|
||||||
|
node.create_node_dir(basedir, CLIENT_README)
|
||||||
|
config = read_config(basedir, u"client.port")
|
||||||
|
|
||||||
|
if _client_factory is None:
|
||||||
|
_client_factory = _Client
|
||||||
|
|
||||||
|
#defer.returnValue(
|
||||||
|
return _client_factory(
|
||||||
|
config,
|
||||||
|
=======
|
||||||
|
PRIV_README="""
|
||||||
|
This directory contains files which contain private data for the Tahoe node,
|
||||||
|
such as private keys. On Unix-like systems, the permissions on this directory
|
||||||
|
are set to disallow users other than its owner from reading the contents of
|
||||||
|
the files. See the 'configuration.rst' documentation file for details."""
|
||||||
|
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def create_client(basedir=u".", _client_factory=None):
|
def create_client(basedir=u".", _client_factory=None):
|
||||||
"""
|
"""
|
||||||
Creates a new client instance (a subclass of Node).
|
Creates a new client instance (a subclass of Node).
|
||||||
@ -191,17 +210,41 @@ def create_client(basedir=u".", _client_factory=None):
|
|||||||
:returns: :class:`allmydata.client._Client` instance (or whatever
|
:returns: :class:`allmydata.client._Client` instance (or whatever
|
||||||
`_client_factory` returns)
|
`_client_factory` returns)
|
||||||
"""
|
"""
|
||||||
|
from allmydata.node import read_config, create_connection_handlers, create_tub_options
|
||||||
|
from allmydata.node import create_main_tub, create_control_tub, create_tub
|
||||||
|
|
||||||
|
# should we check for this directory existing first? (this used to
|
||||||
|
# be in Node's constructor)
|
||||||
node.create_node_dir(basedir, CLIENT_README)
|
node.create_node_dir(basedir, CLIENT_README)
|
||||||
config = read_config(basedir, u"client.port")
|
config = read_config(basedir, u"client.port")
|
||||||
|
|
||||||
if _client_factory is None:
|
if _client_factory is None:
|
||||||
_client_factory = _Client
|
_client_factory = _Client
|
||||||
|
|
||||||
#defer.returnValue(
|
default_connection_handlers, foolscap_connection_handlers = create_connection_handlers(reactor, basedir, config)
|
||||||
return _client_factory(
|
tub_options = create_tub_options(config)
|
||||||
|
|
||||||
|
yield
|
||||||
|
|
||||||
|
i2p_provider = None
|
||||||
|
tor_provider = None
|
||||||
|
reveal_ip = True # XXX FIXME
|
||||||
|
main_tub, is_listening = create_main_tub(
|
||||||
|
basedir, config, tub_options, default_connection_handlers,
|
||||||
|
foolscap_connection_handlers, reveal_ip=reveal_ip,
|
||||||
|
)
|
||||||
|
control_tub = create_control_tub()
|
||||||
|
defer.returnValue(
|
||||||
|
_Client(
|
||||||
config,
|
config,
|
||||||
|
main_tub,
|
||||||
|
control_tub,
|
||||||
|
i2p_provider,
|
||||||
|
tor_provider,
|
||||||
|
basedir,
|
||||||
|
tub_is_listening=is_listening,
|
||||||
)
|
)
|
||||||
#)
|
)
|
||||||
|
|
||||||
|
|
||||||
@implementer(IStatsProducer)
|
@implementer(IStatsProducer)
|
||||||
@ -226,8 +269,8 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
"max_segment_size": 128*KiB,
|
"max_segment_size": 128*KiB,
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider, basedir, tub_is_listening):
|
||||||
node.Node.__init__(self, config)
|
node.Node.__init__(self, config, main_tub, control_tub, i2p_provider, tor_provider, basedir, tub_is_listening)
|
||||||
# All tub.registerReference must happen *after* we upcall, since
|
# All tub.registerReference must happen *after* we upcall, since
|
||||||
# that's what does tub.setLocation()
|
# that's what does tub.setLocation()
|
||||||
self._magic_folders = dict()
|
self._magic_folders = dict()
|
||||||
@ -496,10 +539,24 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
# (and everybody else who wants to use storage servers)
|
# (and everybody else who wants to use storage servers)
|
||||||
ps = self.config.get_config("client", "peers.preferred", "").split(",")
|
ps = self.config.get_config("client", "peers.preferred", "").split(",")
|
||||||
preferred_peers = tuple([p.strip() for p in ps if p != ""])
|
preferred_peers = tuple([p.strip() for p in ps if p != ""])
|
||||||
sb = storage_client.StorageFarmBroker(permute_peers=True,
|
|
||||||
tub_maker=self._create_tub,
|
from allmydata.node import create_tub, create_tub_options
|
||||||
preferred_peers=preferred_peers,
|
|
||||||
)
|
def tub_creator(handler_overrides={}, **kwargs):
|
||||||
|
tub_options = create_tub_options(self.config)
|
||||||
|
return create_tub(
|
||||||
|
tub_options,
|
||||||
|
self._default_connection_handlers,
|
||||||
|
self._foolscap_connection_handlers,
|
||||||
|
handler_overrides=handler_overrides,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
sb = storage_client.StorageFarmBroker(
|
||||||
|
permute_peers=True,
|
||||||
|
tub_maker=tub_creator,
|
||||||
|
preferred_peers=preferred_peers,
|
||||||
|
)
|
||||||
self.storage_broker = sb
|
self.storage_broker = sb
|
||||||
sb.setServiceParent(self)
|
sb.setServiceParent(self)
|
||||||
for ic in self.introducer_clients:
|
for ic in self.introducer_clients:
|
||||||
|
@ -400,6 +400,273 @@ class _Config(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def create_tub_options(config):
|
||||||
|
# XXX this is code moved from Node -- but why are some options
|
||||||
|
# camelCase and some snake_case? can we FIXME?
|
||||||
|
tub_options = {
|
||||||
|
"logLocalFailures": True,
|
||||||
|
"logRemoteFailures": True,
|
||||||
|
"expose-remote-exception-types": False,
|
||||||
|
"accept-gifts": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# see #521 for a discussion of how to pick these timeout values.
|
||||||
|
keepalive_timeout_s = config.get_config("node", "timeout.keepalive", "")
|
||||||
|
if keepalive_timeout_s:
|
||||||
|
tub_options["keepaliveTimeout"] = int(keepalive_timeout_s)
|
||||||
|
disconnect_timeout_s = config.get_config("node", "timeout.disconnect", "")
|
||||||
|
if disconnect_timeout_s:
|
||||||
|
# N.B.: this is in seconds, so use "1800" to get 30min
|
||||||
|
tub_options["disconnectTimeout"] = int(disconnect_timeout_s)
|
||||||
|
return tub_options
|
||||||
|
|
||||||
|
|
||||||
|
def create_i2p_provider(reactor, basedir, config):
|
||||||
|
provider = i2p_provider.Provider(basedir, config, reactor)
|
||||||
|
provider.check_dest_config()
|
||||||
|
#self._i2p_provider.setServiceParent(self)
|
||||||
|
return provider
|
||||||
|
|
||||||
|
|
||||||
|
def create_tor_provider(reactor, basedir, config):
|
||||||
|
provider = tor_provider.Provider(basedir, config, reactor)
|
||||||
|
provider.check_onion_config()
|
||||||
|
##self._tor_provider.setServiceParent(self)
|
||||||
|
return provider
|
||||||
|
|
||||||
|
|
||||||
|
def _make_tcp_handler():
|
||||||
|
# this is always available
|
||||||
|
from foolscap.connections.tcp import default
|
||||||
|
return default()
|
||||||
|
|
||||||
|
|
||||||
|
# XXX shouldn't need this
|
||||||
|
def _make_tor_handler(tor_provider):
|
||||||
|
tor_provider.get_tor_handler()
|
||||||
|
|
||||||
|
|
||||||
|
# XXX shouldn't need this
|
||||||
|
def _make_i2p_handler(i2p_provider):
|
||||||
|
i2p_provider.get_i2p_handler()
|
||||||
|
|
||||||
|
|
||||||
|
def create_connection_handlers(reactor, basedir, config):
|
||||||
|
"""
|
||||||
|
:returns: 2-tuple of default_connection_handlers, foolscap_connection_handlers
|
||||||
|
"""
|
||||||
|
reveal_ip = config.get_config("node", "reveal-IP-address", True, boolean=True)
|
||||||
|
i2p_provider = create_i2p_provider(reactor, basedir, config)
|
||||||
|
tor_provider = create_tor_provider(reactor, basedir, config)
|
||||||
|
|
||||||
|
# We store handlers for everything. None means we were unable to
|
||||||
|
# create that handler, so hints which want it will be ignored.
|
||||||
|
handlers = foolscap_connection_handlers = {
|
||||||
|
"tcp": _make_tcp_handler(),
|
||||||
|
"tor": _make_tor_handler(tor_provider),
|
||||||
|
"i2p": _make_i2p_handler(i2p_provider),
|
||||||
|
}
|
||||||
|
log.msg(
|
||||||
|
format="built Foolscap connection handlers for: %(known_handlers)s",
|
||||||
|
known_handlers=sorted([k for k,v in handlers.items() if v]),
|
||||||
|
facility="tahoe.node",
|
||||||
|
umid="PuLh8g",
|
||||||
|
)
|
||||||
|
|
||||||
|
# then we remember the default mappings from tahoe.cfg
|
||||||
|
default_connection_handlers = {"tor": "tor", "i2p": "i2p"}
|
||||||
|
tcp_handler_name = config.get_config("connections", "tcp", "tcp").lower()
|
||||||
|
if tcp_handler_name == "disabled":
|
||||||
|
default_connection_handlers["tcp"] = None
|
||||||
|
else:
|
||||||
|
if tcp_handler_name not in handlers:
|
||||||
|
raise ValueError(
|
||||||
|
"'tahoe.cfg [connections] tcp=' uses "
|
||||||
|
"unknown handler type '{}'".format(
|
||||||
|
tcp_handler_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if not handlers[tcp_handler_name]:
|
||||||
|
raise ValueError(
|
||||||
|
"'tahoe.cfg [connections] tcp=' uses "
|
||||||
|
"unavailable/unimportable handler type '{}'. "
|
||||||
|
"Please pip install tahoe-lafs[{}] to fix.".format(
|
||||||
|
tcp_handler_name,
|
||||||
|
tcp_handler_name,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
default_connection_handlers["tcp"] = tcp_handler_name
|
||||||
|
|
||||||
|
if not reveal_ip:
|
||||||
|
if default_connection_handlers.get("tcp") == "tcp":
|
||||||
|
raise PrivacyError("tcp = tcp, must be set to 'tor' or 'disabled'")
|
||||||
|
return default_connection_handlers, foolscap_connection_handlers
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def create_tub(tub_options, default_connection_handlers, foolscap_connection_handlers,
|
||||||
|
handler_overrides={}, **kwargs):
|
||||||
|
# Create a Tub with the right options and handlers. It will be
|
||||||
|
# ephemeral unless the caller provides certFile=
|
||||||
|
tub = Tub(**kwargs)
|
||||||
|
for (name, value) in tub_options.items():
|
||||||
|
tub.setOption(name, value)
|
||||||
|
handlers = default_connection_handlers.copy()
|
||||||
|
handlers.update(handler_overrides)
|
||||||
|
tub.removeAllConnectionHintHandlers()
|
||||||
|
for hint_type, handler_name in handlers.items():
|
||||||
|
handler = foolscap_connection_handlers.get(handler_name)
|
||||||
|
if handler:
|
||||||
|
tub.addConnectionHintHandler(hint_type, handler)
|
||||||
|
return tub
|
||||||
|
|
||||||
|
|
||||||
|
def _write_config(basedir, name, value, mode="w"):
|
||||||
|
"""
|
||||||
|
Write a string to a config file.
|
||||||
|
"""
|
||||||
|
fn = os.path.join(basedir, name)
|
||||||
|
try:
|
||||||
|
fileutil.write(fn, value, mode)
|
||||||
|
except EnvironmentError, e:
|
||||||
|
log.msg("Unable to write config file '{}'".format(fn))
|
||||||
|
log.err(e)
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_tub_port(s):
|
||||||
|
if re.search(r'^\d+$', s):
|
||||||
|
return "tcp:{}".format(int(s))
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def _tub_portlocation(config, cfg_tubport, cfg_location, reveal_ip=True):
|
||||||
|
# return None, or tuple of (port, location)
|
||||||
|
|
||||||
|
tubport_disabled = False
|
||||||
|
if cfg_tubport is not None:
|
||||||
|
cfg_tubport = cfg_tubport.strip()
|
||||||
|
if cfg_tubport == "":
|
||||||
|
raise ValueError("tub.port must not be empty")
|
||||||
|
if cfg_tubport == "disabled":
|
||||||
|
tubport_disabled = True
|
||||||
|
|
||||||
|
location_disabled = False
|
||||||
|
if cfg_location is not None:
|
||||||
|
cfg_location = cfg_location.strip()
|
||||||
|
if cfg_location == "":
|
||||||
|
raise ValueError("tub.location must not be empty")
|
||||||
|
if cfg_location == "disabled":
|
||||||
|
location_disabled = True
|
||||||
|
|
||||||
|
if tubport_disabled and location_disabled:
|
||||||
|
return None
|
||||||
|
if tubport_disabled and not location_disabled:
|
||||||
|
raise ValueError("tub.port is disabled, but not tub.location")
|
||||||
|
if location_disabled and not tubport_disabled:
|
||||||
|
raise ValueError("tub.location is disabled, but not tub.port")
|
||||||
|
|
||||||
|
if cfg_tubport is None:
|
||||||
|
# For 'tub.port', tahoe.cfg overrides the individual file on
|
||||||
|
# disk. So only read self._portnumfile if tahoe.cfg doesn't
|
||||||
|
# provide a value.
|
||||||
|
if os.path.exists(config.portnum_fname):
|
||||||
|
file_tubport = fileutil.read(config.portnum_fname).strip()
|
||||||
|
tubport = _convert_tub_port(file_tubport)
|
||||||
|
else:
|
||||||
|
tubport = "tcp:%d" % iputil.allocate_tcp_port()
|
||||||
|
fileutil.write_atomically(config.portnum_fname, tubport + "\n",
|
||||||
|
mode="")
|
||||||
|
else:
|
||||||
|
tubport = _convert_tub_port(cfg_tubport)
|
||||||
|
|
||||||
|
if cfg_location is None:
|
||||||
|
cfg_location = "AUTO"
|
||||||
|
|
||||||
|
local_portnum = None # needed to hush lgtm.com static analyzer
|
||||||
|
# Replace the location "AUTO", if present, with the detected local
|
||||||
|
# addresses. Don't probe for local addresses unless necessary.
|
||||||
|
split_location = cfg_location.split(",")
|
||||||
|
if "AUTO" in split_location:
|
||||||
|
if not reveal_ip:
|
||||||
|
raise PrivacyError("tub.location uses AUTO")
|
||||||
|
local_addresses = iputil.get_local_addresses_sync()
|
||||||
|
# tubport must be like "tcp:12345" or "tcp:12345:morestuff"
|
||||||
|
local_portnum = int(tubport.split(":")[1])
|
||||||
|
new_locations = []
|
||||||
|
for loc in split_location:
|
||||||
|
if loc == "AUTO":
|
||||||
|
new_locations.extend(["tcp:%s:%d" % (ip, local_portnum)
|
||||||
|
for ip in local_addresses])
|
||||||
|
else:
|
||||||
|
if not reveal_ip:
|
||||||
|
# Legacy hints are "host:port". We use Foolscap's utility
|
||||||
|
# function to convert all hints into the modern format
|
||||||
|
# ("tcp:host:port") because that's what the receiving
|
||||||
|
# client will probably do. We test the converted hint for
|
||||||
|
# TCP-ness, but publish the original hint because that
|
||||||
|
# was the user's intent.
|
||||||
|
from foolscap.connections.tcp import convert_legacy_hint
|
||||||
|
converted_hint = convert_legacy_hint(loc)
|
||||||
|
hint_type = converted_hint.split(":")[0]
|
||||||
|
if hint_type == "tcp":
|
||||||
|
raise PrivacyError("tub.location includes tcp: hint")
|
||||||
|
new_locations.append(loc)
|
||||||
|
location = ",".join(new_locations)
|
||||||
|
|
||||||
|
return tubport, location
|
||||||
|
|
||||||
|
|
||||||
|
def create_main_tub(basedir, config, tub_options, default_connection_handlers,
|
||||||
|
foolscap_connection_handlers, handler_overrides={}, reveal_ip=True):
|
||||||
|
certfile = os.path.join(basedir, "private", "node.pem") # FIXME "node.pem" was the CERTFILE option/thing
|
||||||
|
tub = create_tub(tub_options, default_connection_handlers, foolscap_connection_handlers,
|
||||||
|
handler_overrides=handler_overrides, certFile=certfile)
|
||||||
|
|
||||||
|
cfg_tubport = config.get_config("node", "tub.port", None)
|
||||||
|
cfg_location = config.get_config("node", "tub.location", None)
|
||||||
|
portlocation = _tub_portlocation(config, cfg_tubport, cfg_location, reveal_ip)
|
||||||
|
if portlocation:
|
||||||
|
tubport, location = portlocation
|
||||||
|
for port in tubport.split(","):
|
||||||
|
if port in ("0", "tcp:0"):
|
||||||
|
raise ValueError("tub.port cannot be 0: you must choose")
|
||||||
|
if port == "listen:i2p":
|
||||||
|
# the I2P provider will read its section of tahoe.cfg and
|
||||||
|
# return either a fully-formed Endpoint, or a descriptor
|
||||||
|
# that will create one, so we don't have to stuff all the
|
||||||
|
# options into the tub.port string (which would need a lot
|
||||||
|
# of escaping)
|
||||||
|
port_or_endpoint = self._i2p_provider.get_listener()
|
||||||
|
elif port == "listen:tor":
|
||||||
|
port_or_endpoint = self._tor_provider.get_listener()
|
||||||
|
else:
|
||||||
|
port_or_endpoint = port
|
||||||
|
tub.listenOn(port_or_endpoint)
|
||||||
|
tub.setLocation(location)
|
||||||
|
tub_is_listening = True
|
||||||
|
log.msg("Tub location set to %s" % (location,))
|
||||||
|
# the Tub is now ready for tub.registerReference()
|
||||||
|
else:
|
||||||
|
tub_is_listening = False
|
||||||
|
log.msg("Tub is not listening")
|
||||||
|
|
||||||
|
# XXX can we get rid of the tub_is_listening part?
|
||||||
|
return tub, tub_is_listening
|
||||||
|
|
||||||
|
|
||||||
|
def create_control_tub():
|
||||||
|
# the control port uses a localhost-only ephemeral Tub, with no
|
||||||
|
# control over the listening port or location
|
||||||
|
control_tub = Tub()
|
||||||
|
portnum = iputil.allocate_tcp_port()
|
||||||
|
port = "tcp:%d:interface=127.0.0.1" % portnum
|
||||||
|
location = "tcp:127.0.0.1:%d" % portnum
|
||||||
|
control_tub.listenOn(port)
|
||||||
|
control_tub.setLocation(location)
|
||||||
|
log.msg("Control Tub location set to %s" % (location,))
|
||||||
|
return control_tub
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Node(service.MultiService):
|
class Node(service.MultiService):
|
||||||
"""
|
"""
|
||||||
@ -409,30 +676,42 @@ class Node(service.MultiService):
|
|||||||
CERTFILE = "node.pem"
|
CERTFILE = "node.pem"
|
||||||
GENERATED_FILES = []
|
GENERATED_FILES = []
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider, basedir, tub_is_listening):
|
||||||
"""
|
"""
|
||||||
Initialize the node with the given configuration. Its base directory
|
Initialize the node with the given configuration. Its base directory
|
||||||
is the current directory by default.
|
is the current directory by default.
|
||||||
"""
|
"""
|
||||||
service.MultiService.__init__(self)
|
service.MultiService.__init__(self)
|
||||||
|
|
||||||
|
self._tub_is_listening = tub_is_listening # holdover; do we really need this?
|
||||||
self.config = config
|
self.config = config
|
||||||
self.get_config = config.get_config # XXX stopgap
|
self.get_config = config.get_config # XXX stopgap
|
||||||
self.nickname = config.nickname # XXX stopgap
|
self.nickname = config.nickname # XXX stopgap
|
||||||
|
|
||||||
self.init_tempdir()
|
self.init_tempdir()
|
||||||
self.check_privacy()
|
|
||||||
|
|
||||||
self.create_log_tub()
|
self.create_log_tub()
|
||||||
self.logSource = "Node"
|
self.logSource = "Node"
|
||||||
self.setup_logging()
|
self.setup_logging()
|
||||||
|
|
||||||
self.create_i2p_provider()
|
# XXX do we need to save these? or does just "create_client"
|
||||||
self.create_tor_provider()
|
# need them? (note: look in client.py also!)
|
||||||
self.init_connections()
|
# (client.py DOES use them in init_client_storage_broker, but
|
||||||
self.set_tub_options()
|
# we'll want to pull that out as well...so FIXME later)
|
||||||
self.create_main_tub()
|
self._default_connection_handlers, self._foolscap_connection_handlers = create_connection_handlers(reactor, basedir, config)
|
||||||
self.create_control_tub()
|
|
||||||
|
self.tub = main_tub
|
||||||
|
if self.tub is not None:
|
||||||
|
self.nodeid = b32decode(self.tub.tubID.upper()) # binary format
|
||||||
|
self.short_nodeid = b32encode(self.nodeid).lower()[:8] # for printing
|
||||||
|
self.write_config("my_nodeid", b32encode(self.nodeid).lower() + "\n")
|
||||||
|
self.tub.setServiceParent(self) # is this okay in __init__?
|
||||||
|
else:
|
||||||
|
self.nodeid = self.short_nodeid = None
|
||||||
|
|
||||||
|
self.control_tub = control_tub
|
||||||
|
if self.control_tub is not None:
|
||||||
|
self.control_tub.setServiceParent(self) # is this okay in __init__?
|
||||||
|
|
||||||
self.log("Node constructed. " + get_package_versions_string())
|
self.log("Node constructed. " + get_package_versions_string())
|
||||||
iputil.increase_rlimits()
|
iputil.increase_rlimits()
|
||||||
@ -454,225 +733,7 @@ class Node(service.MultiService):
|
|||||||
_assert(os.path.dirname(test_name) == tempdir, test_name, tempdir)
|
_assert(os.path.dirname(test_name) == tempdir, test_name, tempdir)
|
||||||
os.close(temp_fd) # avoid leak of unneeded fd
|
os.close(temp_fd) # avoid leak of unneeded fd
|
||||||
|
|
||||||
def check_privacy(self):
|
# XXX probably want to pull this outside too?
|
||||||
self._reveal_ip = self.config.get_config("node", "reveal-IP-address", True,
|
|
||||||
boolean=True)
|
|
||||||
def create_i2p_provider(self):
|
|
||||||
self._i2p_provider = i2p_provider.Provider(self.config, reactor)
|
|
||||||
self._i2p_provider.check_dest_config()
|
|
||||||
self._i2p_provider.setServiceParent(self)
|
|
||||||
|
|
||||||
def create_tor_provider(self):
|
|
||||||
self._tor_provider = tor_provider.Provider(self.config, reactor)
|
|
||||||
self._tor_provider.check_onion_config()
|
|
||||||
self._tor_provider.setServiceParent(self)
|
|
||||||
|
|
||||||
def _make_tcp_handler(self):
|
|
||||||
# this is always available
|
|
||||||
from foolscap.connections.tcp import default
|
|
||||||
return default()
|
|
||||||
|
|
||||||
def _make_tor_handler(self):
|
|
||||||
return self._tor_provider.get_tor_handler()
|
|
||||||
|
|
||||||
def _make_i2p_handler(self):
|
|
||||||
return self._i2p_provider.get_i2p_handler()
|
|
||||||
|
|
||||||
def init_connections(self):
|
|
||||||
# We store handlers for everything. None means we were unable to
|
|
||||||
# create that handler, so hints which want it will be ignored.
|
|
||||||
handlers = self._foolscap_connection_handlers = {
|
|
||||||
"tcp": self._make_tcp_handler(),
|
|
||||||
"tor": self._make_tor_handler(),
|
|
||||||
"i2p": self._make_i2p_handler(),
|
|
||||||
}
|
|
||||||
self.log(format="built Foolscap connection handlers for: %(known_handlers)s",
|
|
||||||
known_handlers=sorted([k for k, v in handlers.items() if v]),
|
|
||||||
facility="tahoe.node", umid="PuLh8g")
|
|
||||||
|
|
||||||
# then we remember the default mappings from tahoe.cfg
|
|
||||||
self._default_connection_handlers = {"tor": "tor", "i2p": "i2p"}
|
|
||||||
tcp_handler_name = self.config.get_config("connections", "tcp", "tcp").lower()
|
|
||||||
if tcp_handler_name == "disabled":
|
|
||||||
self._default_connection_handlers["tcp"] = None
|
|
||||||
else:
|
|
||||||
if tcp_handler_name not in handlers:
|
|
||||||
raise ValueError("'tahoe.cfg [connections] tcp='"
|
|
||||||
" uses unknown handler type '%s'"
|
|
||||||
% tcp_handler_name)
|
|
||||||
if not handlers[tcp_handler_name]:
|
|
||||||
raise ValueError("'tahoe.cfg [connections] tcp=' uses "
|
|
||||||
"unavailable/unimportable handler type '%s'. "
|
|
||||||
"Please pip install tahoe-lafs[%s] to fix."
|
|
||||||
% (tcp_handler_name, tcp_handler_name))
|
|
||||||
self._default_connection_handlers["tcp"] = tcp_handler_name
|
|
||||||
|
|
||||||
if not self._reveal_ip:
|
|
||||||
if self._default_connection_handlers.get("tcp") == "tcp":
|
|
||||||
raise PrivacyError("tcp = tcp, must be set to 'tor' or 'disabled'")
|
|
||||||
|
|
||||||
def set_tub_options(self):
|
|
||||||
self.tub_options = {
|
|
||||||
"logLocalFailures": True,
|
|
||||||
"logRemoteFailures": True,
|
|
||||||
"expose-remote-exception-types": False,
|
|
||||||
"accept-gifts": False,
|
|
||||||
}
|
|
||||||
|
|
||||||
# see #521 for a discussion of how to pick these timeout values.
|
|
||||||
keepalive_timeout_s = self.config.get_config("node", "timeout.keepalive", "")
|
|
||||||
if keepalive_timeout_s:
|
|
||||||
self.tub_options["keepaliveTimeout"] = int(keepalive_timeout_s)
|
|
||||||
disconnect_timeout_s = self.config.get_config("node", "timeout.disconnect", "")
|
|
||||||
if disconnect_timeout_s:
|
|
||||||
# N.B.: this is in seconds, so use "1800" to get 30min
|
|
||||||
self.tub_options["disconnectTimeout"] = int(disconnect_timeout_s)
|
|
||||||
|
|
||||||
def _create_tub(self, handler_overrides={}, **kwargs):
|
|
||||||
# Create a Tub with the right options and handlers. It will be
|
|
||||||
# ephemeral unless the caller provides certFile=
|
|
||||||
tub = Tub(**kwargs)
|
|
||||||
for (name, value) in self.tub_options.items():
|
|
||||||
tub.setOption(name, value)
|
|
||||||
handlers = self._default_connection_handlers.copy()
|
|
||||||
handlers.update(handler_overrides)
|
|
||||||
tub.removeAllConnectionHintHandlers()
|
|
||||||
for hint_type, handler_name in handlers.items():
|
|
||||||
handler = self._foolscap_connection_handlers.get(handler_name)
|
|
||||||
if handler:
|
|
||||||
tub.addConnectionHintHandler(hint_type, handler)
|
|
||||||
return tub
|
|
||||||
|
|
||||||
def _convert_tub_port(self, s):
|
|
||||||
if re.search(r'^\d+$', s):
|
|
||||||
return "tcp:%d" % int(s)
|
|
||||||
return s
|
|
||||||
|
|
||||||
def get_tub_portlocation(self, cfg_tubport, cfg_location):
|
|
||||||
# return None, or tuple of (port, location)
|
|
||||||
|
|
||||||
tubport_disabled = False
|
|
||||||
if cfg_tubport is not None:
|
|
||||||
cfg_tubport = cfg_tubport.strip()
|
|
||||||
if cfg_tubport == "":
|
|
||||||
raise ValueError("tub.port must not be empty")
|
|
||||||
if cfg_tubport == "disabled":
|
|
||||||
tubport_disabled = True
|
|
||||||
|
|
||||||
location_disabled = False
|
|
||||||
if cfg_location is not None:
|
|
||||||
cfg_location = cfg_location.strip()
|
|
||||||
if cfg_location == "":
|
|
||||||
raise ValueError("tub.location must not be empty")
|
|
||||||
if cfg_location == "disabled":
|
|
||||||
location_disabled = True
|
|
||||||
|
|
||||||
if tubport_disabled and location_disabled:
|
|
||||||
return None
|
|
||||||
if tubport_disabled and not location_disabled:
|
|
||||||
raise ValueError("tub.port is disabled, but not tub.location")
|
|
||||||
if location_disabled and not tubport_disabled:
|
|
||||||
raise ValueError("tub.location is disabled, but not tub.port")
|
|
||||||
|
|
||||||
if cfg_tubport is None:
|
|
||||||
# For 'tub.port', tahoe.cfg overrides the individual file on
|
|
||||||
# disk. So only read self._portnumfile if tahoe.cfg doesn't
|
|
||||||
# provide a value.
|
|
||||||
if os.path.exists(self.config.portnum_fname):
|
|
||||||
file_tubport = fileutil.read(self.config.portnum_fname).strip()
|
|
||||||
tubport = self._convert_tub_port(file_tubport)
|
|
||||||
else:
|
|
||||||
tubport = "tcp:%d" % iputil.allocate_tcp_port()
|
|
||||||
fileutil.write_atomically(self.config.portnum_fname, tubport + "\n",
|
|
||||||
mode="")
|
|
||||||
else:
|
|
||||||
tubport = self._convert_tub_port(cfg_tubport)
|
|
||||||
|
|
||||||
if cfg_location is None:
|
|
||||||
cfg_location = "AUTO"
|
|
||||||
|
|
||||||
local_portnum = None # needed to hush lgtm.com static analyzer
|
|
||||||
# Replace the location "AUTO", if present, with the detected local
|
|
||||||
# addresses. Don't probe for local addresses unless necessary.
|
|
||||||
split_location = cfg_location.split(",")
|
|
||||||
if "AUTO" in split_location:
|
|
||||||
if not self._reveal_ip:
|
|
||||||
raise PrivacyError("tub.location uses AUTO")
|
|
||||||
local_addresses = iputil.get_local_addresses_sync()
|
|
||||||
# tubport must be like "tcp:12345" or "tcp:12345:morestuff"
|
|
||||||
local_portnum = int(tubport.split(":")[1])
|
|
||||||
new_locations = []
|
|
||||||
for loc in split_location:
|
|
||||||
if loc == "AUTO":
|
|
||||||
new_locations.extend(["tcp:%s:%d" % (ip, local_portnum)
|
|
||||||
for ip in local_addresses])
|
|
||||||
else:
|
|
||||||
if not self._reveal_ip:
|
|
||||||
# Legacy hints are "host:port". We use Foolscap's utility
|
|
||||||
# function to convert all hints into the modern format
|
|
||||||
# ("tcp:host:port") because that's what the receiving
|
|
||||||
# client will probably do. We test the converted hint for
|
|
||||||
# TCP-ness, but publish the original hint because that
|
|
||||||
# was the user's intent.
|
|
||||||
from foolscap.connections.tcp import convert_legacy_hint
|
|
||||||
converted_hint = convert_legacy_hint(loc)
|
|
||||||
hint_type = converted_hint.split(":")[0]
|
|
||||||
if hint_type == "tcp":
|
|
||||||
raise PrivacyError("tub.location includes tcp: hint")
|
|
||||||
new_locations.append(loc)
|
|
||||||
location = ",".join(new_locations)
|
|
||||||
|
|
||||||
return tubport, location
|
|
||||||
|
|
||||||
def create_main_tub(self):
|
|
||||||
certfile = self.config.get_private_path(self.CERTFILE)
|
|
||||||
self.tub = self._create_tub(certFile=certfile)
|
|
||||||
|
|
||||||
self.nodeid = b32decode(self.tub.tubID.upper()) # binary format
|
|
||||||
self.config.write_config_file("my_nodeid", b32encode(self.nodeid).lower() + "\n")
|
|
||||||
self.short_nodeid = b32encode(self.nodeid).lower()[:8] # for printing
|
|
||||||
cfg_tubport = self.config.get_config("node", "tub.port", None)
|
|
||||||
cfg_location = self.config.get_config("node", "tub.location", None)
|
|
||||||
portlocation = self.get_tub_portlocation(cfg_tubport, cfg_location)
|
|
||||||
if portlocation:
|
|
||||||
tubport, location = portlocation
|
|
||||||
for port in tubport.split(","):
|
|
||||||
if port in ("0", "tcp:0"):
|
|
||||||
raise ValueError("tub.port cannot be 0: you must choose")
|
|
||||||
if port == "listen:i2p":
|
|
||||||
# the I2P provider will read its section of tahoe.cfg and
|
|
||||||
# return either a fully-formed Endpoint, or a descriptor
|
|
||||||
# that will create one, so we don't have to stuff all the
|
|
||||||
# options into the tub.port string (which would need a lot
|
|
||||||
# of escaping)
|
|
||||||
port_or_endpoint = self._i2p_provider.get_listener()
|
|
||||||
elif port == "listen:tor":
|
|
||||||
port_or_endpoint = self._tor_provider.get_listener()
|
|
||||||
else:
|
|
||||||
port_or_endpoint = port
|
|
||||||
self.tub.listenOn(port_or_endpoint)
|
|
||||||
self.tub.setLocation(location)
|
|
||||||
self._tub_is_listening = True
|
|
||||||
self.log("Tub location set to %s" % (location,))
|
|
||||||
# the Tub is now ready for tub.registerReference()
|
|
||||||
else:
|
|
||||||
self._tub_is_listening = False
|
|
||||||
self.log("Tub is not listening")
|
|
||||||
|
|
||||||
self.tub.setServiceParent(self)
|
|
||||||
|
|
||||||
def create_control_tub(self):
|
|
||||||
# the control port uses a localhost-only ephemeral Tub, with no
|
|
||||||
# control over the listening port or location
|
|
||||||
self.control_tub = Tub()
|
|
||||||
portnum = iputil.allocate_tcp_port()
|
|
||||||
port = "tcp:%d:interface=127.0.0.1" % portnum
|
|
||||||
location = "tcp:127.0.0.1:%d" % portnum
|
|
||||||
self.control_tub.listenOn(port)
|
|
||||||
self.control_tub.setLocation(location)
|
|
||||||
self.log("Control Tub location set to %s" % (location,))
|
|
||||||
self.control_tub.setServiceParent(self)
|
|
||||||
|
|
||||||
def create_log_tub(self):
|
def create_log_tub(self):
|
||||||
# The logport uses a localhost-only ephemeral Tub, with no control
|
# The logport uses a localhost-only ephemeral Tub, with no control
|
||||||
# over the listening port or location. This might change if we
|
# over the listening port or location. This might change if we
|
||||||
@ -688,6 +749,12 @@ class Node(service.MultiService):
|
|||||||
self.log("Log Tub location set to %s" % (location,))
|
self.log("Log Tub location set to %s" % (location,))
|
||||||
self.log_tub.setServiceParent(self)
|
self.log_tub.setServiceParent(self)
|
||||||
|
|
||||||
|
# XXX this should be deprecated; no reason for it to be a method;
|
||||||
|
# use _write_config() instead
|
||||||
|
def write_config(self, name, value, mode="w"):
|
||||||
|
"""Write a string to a config file."""
|
||||||
|
_write_config(self.basedir, name, value, mode=mode)
|
||||||
|
|
||||||
def startService(self):
|
def startService(self):
|
||||||
# Note: this class can be started and stopped at most once.
|
# Note: this class can be started and stopped at most once.
|
||||||
self.log("Node.startService")
|
self.log("Node.startService")
|
||||||
|
@ -4,12 +4,16 @@ from allmydata.scripts.common import BasedirOptions
|
|||||||
from twisted.scripts import twistd
|
from twisted.scripts import twistd
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
from twisted.python.reflect import namedAny
|
from twisted.python.reflect import namedAny
|
||||||
|
from twisted.internet.defer import maybeDeferred, fail
|
||||||
|
from twisted.application.service import Service
|
||||||
|
|
||||||
from allmydata.scripts.default_nodedir import _default_nodedir
|
from allmydata.scripts.default_nodedir import _default_nodedir
|
||||||
from allmydata.util import fileutil
|
from allmydata.util import fileutil
|
||||||
from allmydata.node import read_config
|
from allmydata.node import read_config
|
||||||
from allmydata.util.encodingutil import listdir_unicode, quote_local_unicode_path
|
from allmydata.util.encodingutil import listdir_unicode, quote_local_unicode_path
|
||||||
from allmydata.util.configutil import UnknownConfigError
|
from allmydata.util.configutil import UnknownConfigError
|
||||||
from twisted.application.service import Service
|
from twisted.application.service import Service
|
||||||
|
from allmydata.util.deferredutil import HookMixin
|
||||||
|
|
||||||
|
|
||||||
def get_pidfile(basedir):
|
def get_pidfile(basedir):
|
||||||
@ -102,22 +106,33 @@ class MyTwistdConfig(twistd.ServerOptions):
|
|||||||
subCommands = [("DaemonizeTahoeNode", None, usage.Options, "node")]
|
subCommands = [("DaemonizeTahoeNode", None, usage.Options, "node")]
|
||||||
|
|
||||||
|
|
||||||
class DaemonizeTheRealService(Service):
|
class DaemonizeTheRealService(Service, HookMixin):
|
||||||
|
"""
|
||||||
|
this HookMixin should really be a helper; our hooks:
|
||||||
|
|
||||||
|
- 'running': triggered when startup has completed; it triggers
|
||||||
|
with None of successful or a Failure otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, nodetype, basedir, options):
|
def __init__(self, nodetype, basedir, options):
|
||||||
|
super(DaemonizeTheRealService, self).__init__()
|
||||||
self.nodetype = nodetype
|
self.nodetype = nodetype
|
||||||
self.basedir = basedir
|
self.basedir = basedir
|
||||||
|
# setup for HookMixin
|
||||||
|
self._hooks = {
|
||||||
|
"running": None,
|
||||||
|
}
|
||||||
|
|
||||||
def startService(self):
|
def startService(self):
|
||||||
|
|
||||||
def key_generator_removed():
|
def key_generator_removed():
|
||||||
raise ValueError("key-generator support removed, see #2783")
|
return fail(ValueError("key-generator support removed, see #2783"))
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
node_to_instance = {
|
node_to_instance = {
|
||||||
u"client": lambda: namedAny("allmydata.client.create_client")(self.basedir),
|
u"client": lambda: maybeDeferred(namedAny("allmydata.client.create_client"), self.basedir),
|
||||||
u"introducer": lambda: namedAny("allmydata.introducer.server.create_introducer")(self.basedir),
|
u"introducer": lambda: maybeDeferred(namedAny("allmydata.introducer.server.create_introducer"), self.basedir),
|
||||||
u"stats-gatherer": lambda: namedAny("allmydata.stats.StatsGathererService")(read_config(self.basedir, None), self.basedir, verbose=True),
|
u"stats-gatherer": lambda: maybeDeferred(namedAny("allmydata.stats.StatsGathererService"), read_config(self.basedir, None), self.basedir, verbose=True),
|
||||||
u"key-generator": key_generator_removed,
|
u"key-generator": key_generator_removed,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,15 +141,27 @@ class DaemonizeTheRealService(Service):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise ValueError("unknown nodetype %s" % self.nodetype)
|
raise ValueError("unknown nodetype %s" % self.nodetype)
|
||||||
|
|
||||||
try:
|
def handle_config_error(fail):
|
||||||
srv = service_factory()
|
fail.trap(UnknownConfigError)
|
||||||
srv.setServiceParent(self.parent)
|
sys.stderr.write("\nConfiguration error:\n{}\n\n".format(fail.value))
|
||||||
except UnknownConfigError as e:
|
|
||||||
sys.stderr.write("\nConfiguration error:\n{}\n\n".format(e))
|
|
||||||
reactor.stop()
|
reactor.stop()
|
||||||
|
return
|
||||||
|
|
||||||
|
d = service_factory()
|
||||||
|
|
||||||
|
def created(srv):
|
||||||
|
srv.setServiceParent(self.parent)
|
||||||
|
d.addCallback(created)
|
||||||
|
d.addErrback(handle_config_error)
|
||||||
|
d.addBoth(self._call_hook, 'running')
|
||||||
|
# we've handled error via hook now (otherwise Twisted will
|
||||||
|
# want to fail some things)
|
||||||
|
d.addErrback(lambda _: None)
|
||||||
|
return d
|
||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
reactor.callWhenRunning(start)
|
x = reactor.callWhenRunning(start)
|
||||||
|
print("DING {}".format(x))
|
||||||
|
|
||||||
|
|
||||||
class DaemonizeTahoeNodePlugin(object):
|
class DaemonizeTahoeNodePlugin(object):
|
||||||
|
@ -4,6 +4,7 @@ from mock import patch, Mock
|
|||||||
from StringIO import StringIO
|
from StringIO import StringIO
|
||||||
from sys import getfilesystemencoding
|
from sys import getfilesystemencoding
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
|
from twisted.internet import defer
|
||||||
from allmydata.scripts import runner
|
from allmydata.scripts import runner
|
||||||
from allmydata.scripts.tahoe_daemonize import identify_node_type
|
from allmydata.scripts.tahoe_daemonize import identify_node_type
|
||||||
from allmydata.scripts.tahoe_daemonize import DaemonizeTahoeNodePlugin
|
from allmydata.scripts.tahoe_daemonize import DaemonizeTahoeNodePlugin
|
||||||
@ -45,23 +46,32 @@ class Util(unittest.TestCase):
|
|||||||
|
|
||||||
self.assertTrue(service is not None)
|
self.assertTrue(service is not None)
|
||||||
|
|
||||||
|
# @defer.inlineCallbacks
|
||||||
def test_daemonize_no_keygen(self):
|
def test_daemonize_no_keygen(self):
|
||||||
tmpdir = self.mktemp()
|
tmpdir = self.mktemp()
|
||||||
plug = DaemonizeTahoeNodePlugin('key-generator', tmpdir)
|
plug = DaemonizeTahoeNodePlugin('key-generator', tmpdir)
|
||||||
|
|
||||||
with patch('twisted.internet.reactor') as r:
|
if True:#with patch('twisted.internet.reactor') as r:
|
||||||
def call(fn, *args, **kw):
|
def call(fn, *args, **kw):
|
||||||
fn()
|
fn()
|
||||||
r.callWhenRunning = call
|
# r.callWhenRunning = call
|
||||||
r.stop = lambda: None
|
# r.stop = 'foo'
|
||||||
service = plug.makeService(None)
|
service = plug.makeService(None)
|
||||||
service.parent = Mock()
|
service.parent = Mock()
|
||||||
with self.assertRaises(ValueError) as ctx:
|
# we'll raise ValueError because there's no key-generator
|
||||||
service.startService()
|
# .. BUT we do this in an async function called via
|
||||||
self.assertIn(
|
# "callWhenRunning" .. hence using a hook
|
||||||
"key-generator support removed",
|
d = service.set_hook('running')
|
||||||
str(ctx.exception)
|
service.startService()
|
||||||
)
|
def done(f):
|
||||||
|
print("DONE {}".format(f))
|
||||||
|
self.assertIn(
|
||||||
|
"key-generator support removed",
|
||||||
|
str(str(f)),#ctx.exception)
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
d.addErrback(done)
|
||||||
|
return d
|
||||||
|
|
||||||
def test_daemonize_unknown_nodetype(self):
|
def test_daemonize_unknown_nodetype(self):
|
||||||
tmpdir = self.mktemp()
|
tmpdir = self.mktemp()
|
||||||
|
@ -31,6 +31,7 @@ from allmydata.client import create_client
|
|||||||
from allmydata.storage.server import StorageServer, storage_index_to_dir
|
from allmydata.storage.server import StorageServer, storage_index_to_dir
|
||||||
from allmydata.util import fileutil, idlib, hashutil
|
from allmydata.util import fileutil, idlib, hashutil
|
||||||
from allmydata.util.hashutil import permute_server_hash
|
from allmydata.util.hashutil import permute_server_hash
|
||||||
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||||
from allmydata.interfaces import IStorageBroker, IServer
|
from allmydata.interfaces import IStorageBroker, IServer
|
||||||
from .common import TEST_RSA_KEY_SIZE
|
from .common import TEST_RSA_KEY_SIZE
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
import os, sys
|
import os, sys
|
||||||
|
import mock
|
||||||
import twisted
|
import twisted
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
import mock
|
from twisted.internet import defer
|
||||||
|
|
||||||
import allmydata
|
import allmydata
|
||||||
import allmydata.frontends.magic_folder
|
import allmydata.frontends.magic_folder
|
||||||
@ -34,8 +35,9 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
||||||
BASECONFIG)
|
BASECONFIG)
|
||||||
client.create_client(basedir)
|
return client.create_client(basedir)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_comment(self):
|
def test_comment(self):
|
||||||
should_fail = [r"test#test", r"#testtest", r"test\\#test"]
|
should_fail = [r"test#test", r"#testtest", r"test\\#test"]
|
||||||
should_not_fail = [r"test\#test", r"test\\\#test", r"testtest"]
|
should_not_fail = [r"test\#test", r"test\\\#test", r"testtest"]
|
||||||
@ -51,13 +53,14 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
for s in should_fail:
|
for s in should_fail:
|
||||||
self.failUnless(_Config._contains_unescaped_hash(s))
|
self.failUnless(_Config._contains_unescaped_hash(s))
|
||||||
write_config(s)
|
write_config(s)
|
||||||
e = self.assertRaises(UnescapedHashError, client.create_client, basedir)
|
with self.assertRaises(UnescapedHashError) as ctx:
|
||||||
self.assertIn("[client]introducer.furl", str(e))
|
yield client.create_client(basedir)
|
||||||
|
self.assertIn("[client]introducer.furl", str(ctx.exception))
|
||||||
|
|
||||||
for s in should_not_fail:
|
for s in should_not_fail:
|
||||||
self.failIf(_Config._contains_unescaped_hash(s))
|
self.failIf(_Config._contains_unescaped_hash(s))
|
||||||
write_config(s)
|
write_config(s)
|
||||||
client.create_client(basedir)
|
yield client.create_client(basedir)
|
||||||
|
|
||||||
def test_unreadable_config(self):
|
def test_unreadable_config(self):
|
||||||
if sys.platform == "win32":
|
if sys.platform == "win32":
|
||||||
@ -128,12 +131,13 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ]
|
("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ]
|
||||||
self.failIf(logged, (oldfile, logged_messages))
|
self.failIf(logged, (oldfile, logged_messages))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_secrets(self):
|
def test_secrets(self):
|
||||||
basedir = "test_client.Basic.test_secrets"
|
basedir = "test_client.Basic.test_secrets"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
||||||
BASECONFIG)
|
BASECONFIG)
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
secret_fname = os.path.join(basedir, "private", "secret")
|
secret_fname = os.path.join(basedir, "private", "secret")
|
||||||
self.failUnless(os.path.exists(secret_fname), secret_fname)
|
self.failUnless(os.path.exists(secret_fname), secret_fname)
|
||||||
renew_secret = c.get_renewal_secret()
|
renew_secret = c.get_renewal_secret()
|
||||||
@ -141,22 +145,25 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
cancel_secret = c.get_cancel_secret()
|
cancel_secret = c.get_cancel_secret()
|
||||||
self.failUnless(base32.b2a(cancel_secret))
|
self.failUnless(base32.b2a(cancel_secret))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_nodekey_yes_storage(self):
|
def test_nodekey_yes_storage(self):
|
||||||
basedir = "test_client.Basic.test_nodekey_yes_storage"
|
basedir = "test_client.Basic.test_nodekey_yes_storage"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
||||||
BASECONFIG)
|
BASECONFIG)
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
self.failUnless(c.get_long_nodeid().startswith("v0-"))
|
self.failUnless(c.get_long_nodeid().startswith("v0-"))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_nodekey_no_storage(self):
|
def test_nodekey_no_storage(self):
|
||||||
basedir = "test_client.Basic.test_nodekey_no_storage"
|
basedir = "test_client.Basic.test_nodekey_no_storage"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
||||||
BASECONFIG + "[storage]\n" + "enabled = false\n")
|
BASECONFIG + "[storage]\n" + "enabled = false\n")
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
self.failUnless(c.get_long_nodeid().startswith("v0-"))
|
self.failUnless(c.get_long_nodeid().startswith("v0-"))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_reserved_1(self):
|
def test_reserved_1(self):
|
||||||
basedir = "client.Basic.test_reserved_1"
|
basedir = "client.Basic.test_reserved_1"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -165,9 +172,10 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"[storage]\n" + \
|
"[storage]\n" + \
|
||||||
"enabled = true\n" + \
|
"enabled = true\n" + \
|
||||||
"reserved_space = 1000\n")
|
"reserved_space = 1000\n")
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000)
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_reserved_2(self):
|
def test_reserved_2(self):
|
||||||
basedir = "client.Basic.test_reserved_2"
|
basedir = "client.Basic.test_reserved_2"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -176,9 +184,10 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"[storage]\n" + \
|
"[storage]\n" + \
|
||||||
"enabled = true\n" + \
|
"enabled = true\n" + \
|
||||||
"reserved_space = 10K\n")
|
"reserved_space = 10K\n")
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000)
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_reserved_3(self):
|
def test_reserved_3(self):
|
||||||
basedir = "client.Basic.test_reserved_3"
|
basedir = "client.Basic.test_reserved_3"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -187,10 +196,11 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"[storage]\n" + \
|
"[storage]\n" + \
|
||||||
"enabled = true\n" + \
|
"enabled = true\n" + \
|
||||||
"reserved_space = 5mB\n")
|
"reserved_space = 5mB\n")
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
|
||||||
5*1000*1000)
|
5*1000*1000)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_reserved_4(self):
|
def test_reserved_4(self):
|
||||||
basedir = "client.Basic.test_reserved_4"
|
basedir = "client.Basic.test_reserved_4"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -199,10 +209,11 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"[storage]\n" + \
|
"[storage]\n" + \
|
||||||
"enabled = true\n" + \
|
"enabled = true\n" + \
|
||||||
"reserved_space = 78Gb\n")
|
"reserved_space = 78Gb\n")
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
|
||||||
78*1000*1000*1000)
|
78*1000*1000*1000)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_reserved_bad(self):
|
def test_reserved_bad(self):
|
||||||
basedir = "client.Basic.test_reserved_bad"
|
basedir = "client.Basic.test_reserved_bad"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -211,8 +222,10 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"[storage]\n" + \
|
"[storage]\n" + \
|
||||||
"enabled = true\n" + \
|
"enabled = true\n" + \
|
||||||
"reserved_space = bogus\n")
|
"reserved_space = bogus\n")
|
||||||
self.failUnlessRaises(ValueError, client.create_client, basedir)
|
with self.assertRaises(ValueError) as ctx:
|
||||||
|
yield client.create_client(basedir)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_web_apiauthtoken(self):
|
def test_web_apiauthtoken(self):
|
||||||
"""
|
"""
|
||||||
Client loads the proper API auth token from disk
|
Client loads the proper API auth token from disk
|
||||||
@ -220,7 +233,7 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
basedir = u"client.Basic.test_web_apiauthtoken"
|
basedir = u"client.Basic.test_web_apiauthtoken"
|
||||||
create_node_dir(basedir, "testing")
|
create_node_dir(basedir, "testing")
|
||||||
|
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
# this must come after we create the client, as it will create
|
# this must come after we create the client, as it will create
|
||||||
# a new, random authtoken itself
|
# a new, random authtoken itself
|
||||||
with open(os.path.join(basedir, "private", "api_auth_token"), "w") as f:
|
with open(os.path.join(basedir, "private", "api_auth_token"), "w") as f:
|
||||||
@ -229,6 +242,7 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
token = c.get_auth_token()
|
token = c.get_auth_token()
|
||||||
self.assertEqual("deadbeef", token)
|
self.assertEqual("deadbeef", token)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_web_staticdir(self):
|
def test_web_staticdir(self):
|
||||||
basedir = u"client.Basic.test_web_staticdir"
|
basedir = u"client.Basic.test_web_staticdir"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -237,7 +251,7 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"[node]\n" +
|
"[node]\n" +
|
||||||
"web.port = tcp:0:interface=127.0.0.1\n" +
|
"web.port = tcp:0:interface=127.0.0.1\n" +
|
||||||
"web.static = relative\n")
|
"web.static = relative\n")
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
w = c.getServiceNamed("webish")
|
w = c.getServiceNamed("webish")
|
||||||
abs_basedir = fileutil.abspath_expanduser_unicode(basedir)
|
abs_basedir = fileutil.abspath_expanduser_unicode(basedir)
|
||||||
expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir)
|
expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir)
|
||||||
@ -245,6 +259,7 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
|
|
||||||
# TODO: also test config options for SFTP.
|
# TODO: also test config options for SFTP.
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_ftp_create(self):
|
def test_ftp_create(self):
|
||||||
"""
|
"""
|
||||||
configuration for sftpd results in it being started
|
configuration for sftpd results in it being started
|
||||||
@ -260,9 +275,10 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
'host_privkey_file = privkey\n'
|
'host_privkey_file = privkey\n'
|
||||||
)
|
)
|
||||||
with mock.patch('allmydata.frontends.sftpd.SFTPServer') as p:
|
with mock.patch('allmydata.frontends.sftpd.SFTPServer') as p:
|
||||||
client.create_client(basedir)
|
yield client.create_client(basedir)
|
||||||
self.assertTrue(p.called)
|
self.assertTrue(p.called)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_ftp_auth_keyfile(self):
|
def test_ftp_auth_keyfile(self):
|
||||||
basedir = u"client.Basic.test_ftp_auth_keyfile"
|
basedir = u"client.Basic.test_ftp_auth_keyfile"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -274,9 +290,10 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"accounts.file = private/accounts\n"))
|
"accounts.file = private/accounts\n"))
|
||||||
os.mkdir(os.path.join(basedir, "private"))
|
os.mkdir(os.path.join(basedir, "private"))
|
||||||
fileutil.write(os.path.join(basedir, "private", "accounts"), "\n")
|
fileutil.write(os.path.join(basedir, "private", "accounts"), "\n")
|
||||||
c = client.create_client(basedir) # just make sure it can be instantiated
|
c = yield client.create_client(basedir) # just make sure it can be instantiated
|
||||||
del c
|
del c
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_ftp_auth_url(self):
|
def test_ftp_auth_url(self):
|
||||||
basedir = u"client.Basic.test_ftp_auth_url"
|
basedir = u"client.Basic.test_ftp_auth_url"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -286,7 +303,7 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"enabled = true\n"
|
"enabled = true\n"
|
||||||
"port = tcp:0:interface=127.0.0.1\n"
|
"port = tcp:0:interface=127.0.0.1\n"
|
||||||
"accounts.url = http://0.0.0.0/\n"))
|
"accounts.url = http://0.0.0.0/\n"))
|
||||||
c = client.create_client(basedir) # just make sure it can be instantiated
|
c = yield client.create_client(basedir) # just make sure it can be instantiated
|
||||||
del c
|
del c
|
||||||
|
|
||||||
def test_ftp_auth_no_accountfile_or_url(self):
|
def test_ftp_auth_no_accountfile_or_url(self):
|
||||||
@ -297,7 +314,8 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"[ftpd]\n"
|
"[ftpd]\n"
|
||||||
"enabled = true\n"
|
"enabled = true\n"
|
||||||
"port = tcp:0:interface=127.0.0.1\n"))
|
"port = tcp:0:interface=127.0.0.1\n"))
|
||||||
self.failUnlessRaises(NeedRootcapLookupScheme, client.create_client, basedir)
|
with self.assertRaises(NeedRootcapLookupScheme):
|
||||||
|
yield client.create_client(basedir)
|
||||||
|
|
||||||
def _storage_dir_test(self, basedir, storage_path, expected_path):
|
def _storage_dir_test(self, basedir, storage_path, expected_path):
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -407,6 +425,7 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
sb.servers.clear()
|
sb.servers.clear()
|
||||||
self.failUnlessReallyEqual(self._permute(sb, "one"), [])
|
self.failUnlessReallyEqual(self._permute(sb, "one"), [])
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_versions(self):
|
def test_versions(self):
|
||||||
basedir = "test_client.Basic.test_versions"
|
basedir = "test_client.Basic.test_versions"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
@ -414,7 +433,7 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
BASECONFIG + \
|
BASECONFIG + \
|
||||||
"[storage]\n" + \
|
"[storage]\n" + \
|
||||||
"enabled = true\n")
|
"enabled = true\n")
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
ss = c.getServiceNamed("storage")
|
ss = c.getServiceNamed("storage")
|
||||||
verdict = ss.remote_get_version()
|
verdict = ss.remote_get_version()
|
||||||
self.failUnlessReallyEqual(verdict["application-version"],
|
self.failUnlessReallyEqual(verdict["application-version"],
|
||||||
@ -429,24 +448,27 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
self.failUnless("node.uptime" in stats)
|
self.failUnless("node.uptime" in stats)
|
||||||
self.failUnless(isinstance(stats["node.uptime"], float))
|
self.failUnless(isinstance(stats["node.uptime"], float))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_helper_furl(self):
|
def test_helper_furl(self):
|
||||||
basedir = "test_client.Basic.test_helper_furl"
|
basedir = "test_client.Basic.test_helper_furl"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def _check(config, expected_furl):
|
def _check(config, expected_furl):
|
||||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
||||||
BASECONFIG + config)
|
BASECONFIG + config)
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
uploader = c.getServiceNamed("uploader")
|
uploader = c.getServiceNamed("uploader")
|
||||||
furl, connected = uploader.get_helper_info()
|
furl, connected = uploader.get_helper_info()
|
||||||
self.failUnlessEqual(furl, expected_furl)
|
self.failUnlessEqual(furl, expected_furl)
|
||||||
|
|
||||||
_check("", None)
|
yield _check("", None)
|
||||||
_check("helper.furl =\n", None)
|
yield _check("helper.furl =\n", None)
|
||||||
_check("helper.furl = \n", None)
|
yield _check("helper.furl = \n", None)
|
||||||
_check("helper.furl = None", None)
|
yield _check("helper.furl = None", None)
|
||||||
_check("helper.furl = pb://blah\n", "pb://blah")
|
yield _check("helper.furl = pb://blah\n", "pb://blah")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_create_magic_folder_service(self):
|
def test_create_magic_folder_service(self):
|
||||||
boom = False
|
boom = False
|
||||||
class Boom(Exception):
|
class Boom(Exception):
|
||||||
@ -496,7 +518,8 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
# which config-entry should be missing?
|
# which config-entry should be missing?
|
||||||
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
||||||
config + "local.directory = " + local_dir_utf8 + "\n")
|
config + "local.directory = " + local_dir_utf8 + "\n")
|
||||||
self.failUnlessRaises(IOError, client.create_client, basedir1)
|
with self.assertRaises(IOError):
|
||||||
|
yield client.create_client(basedir1)
|
||||||
|
|
||||||
# local.directory entry missing .. but that won't be an error
|
# local.directory entry missing .. but that won't be an error
|
||||||
# now, it'll just assume there are not magic folders
|
# now, it'll just assume there are not magic folders
|
||||||
@ -508,11 +531,13 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
|
|
||||||
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
||||||
config.replace("[magic_folder]\n", "[drop_upload]\n"))
|
config.replace("[magic_folder]\n", "[drop_upload]\n"))
|
||||||
self.failUnlessRaises(OldConfigOptionError, client.create_client, basedir1)
|
|
||||||
|
with self.assertRaises(OldConfigOptionError):
|
||||||
|
yield client.create_client(basedir1)
|
||||||
|
|
||||||
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
||||||
config + "local.directory = " + local_dir_utf8 + "\n")
|
config + "local.directory = " + local_dir_utf8 + "\n")
|
||||||
c1 = client.create_client(basedir1)
|
c1 = yield client.create_client(basedir1)
|
||||||
magicfolder = c1.getServiceNamed('magic-folder')
|
magicfolder = c1.getServiceNamed('magic-folder')
|
||||||
self.failUnless(isinstance(magicfolder, MockMagicFolder), magicfolder)
|
self.failUnless(isinstance(magicfolder, MockMagicFolder), magicfolder)
|
||||||
self.failUnlessReallyEqual(magicfolder.client, c1)
|
self.failUnlessReallyEqual(magicfolder.client, c1)
|
||||||
@ -534,7 +559,8 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test
|
|||||||
"local.directory = " + local_dir_utf8 + "\n")
|
"local.directory = " + local_dir_utf8 + "\n")
|
||||||
fileutil.write(os.path.join(basedir2, "private", "magic_folder_dircap"), "URI:DIR2:blah")
|
fileutil.write(os.path.join(basedir2, "private", "magic_folder_dircap"), "URI:DIR2:blah")
|
||||||
fileutil.write(os.path.join(basedir2, "private", "collective_dircap"), "URI:DIR2:meow")
|
fileutil.write(os.path.join(basedir2, "private", "collective_dircap"), "URI:DIR2:meow")
|
||||||
self.failUnlessRaises(Boom, client.create_client, basedir2)
|
with self.assertRaises(Boom):
|
||||||
|
yield client.create_client(basedir2)
|
||||||
|
|
||||||
|
|
||||||
def flush_but_dont_ignore(res):
|
def flush_but_dont_ignore(res):
|
||||||
@ -554,53 +580,55 @@ class Run(unittest.TestCase, testutil.StallMixin):
|
|||||||
d.addBoth(flush_but_dont_ignore)
|
d.addBoth(flush_but_dont_ignore)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_loadable(self):
|
def test_loadable(self):
|
||||||
basedir = "test_client.Run.test_loadable"
|
basedir = "test_client.Run.test_loadable"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
||||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
|
||||||
fileutil.write(os.path.join(basedir, client._Client.EXIT_TRIGGER_FILE), "")
|
fileutil.write(os.path.join(basedir, client._Client.EXIT_TRIGGER_FILE), "")
|
||||||
client.create_client(basedir)
|
yield client.create_client(basedir)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_reloadable(self):
|
def test_reloadable(self):
|
||||||
basedir = "test_client.Run.test_reloadable"
|
basedir = "test_client.Run.test_reloadable"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
||||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
|
||||||
c1 = client.create_client(basedir)
|
c1 = yield client.create_client(basedir)
|
||||||
c1.setServiceParent(self.sparent)
|
c1.setServiceParent(self.sparent)
|
||||||
|
|
||||||
# delay to let the service start up completely. I'm not entirely sure
|
# delay to let the service start up completely. I'm not entirely sure
|
||||||
# this is necessary.
|
# this is necessary.
|
||||||
d = self.stall(delay=2.0)
|
yield self.stall(delay=2.0)
|
||||||
d.addCallback(lambda res: c1.disownServiceParent())
|
yield c1.disownServiceParent()
|
||||||
# the cygwin buildslave seems to need more time to let the old
|
# the cygwin buildslave seems to need more time to let the old
|
||||||
# service completely shut down. When delay=0.1, I saw this test fail,
|
# service completely shut down. When delay=0.1, I saw this test fail,
|
||||||
# probably due to the logport trying to reclaim the old socket
|
# probably due to the logport trying to reclaim the old socket
|
||||||
# number. This suggests that either we're dropping a Deferred
|
# number. This suggests that either we're dropping a Deferred
|
||||||
# somewhere in the shutdown sequence, or that cygwin is just cranky.
|
# somewhere in the shutdown sequence, or that cygwin is just cranky.
|
||||||
d.addCallback(self.stall, delay=2.0)
|
yield self.stall(delay=2.0)
|
||||||
def _restart(res):
|
|
||||||
# TODO: pause for slightly over one second, to let
|
# TODO: pause for slightly over one second, to let
|
||||||
# Client._check_exit_trigger poll the file once. That will exercise
|
# Client._check_exit_trigger poll the file once. That will exercise
|
||||||
# another few lines. Then add another test in which we don't
|
# another few lines. Then add another test in which we don't
|
||||||
# update the file at all, and watch to see the node shutdown.
|
# update the file at all, and watch to see the node shutdown.
|
||||||
# (To do this, use a modified node which overrides Node.shutdown(),
|
# (To do this, use a modified node which overrides Node.shutdown(),
|
||||||
# also change _check_exit_trigger to use it instead of a raw
|
# also change _check_exit_trigger to use it instead of a raw
|
||||||
# reactor.stop, also instrument the shutdown event in an
|
# reactor.stop, also instrument the shutdown event in an
|
||||||
# attribute that we can check.)
|
# attribute that we can check.)
|
||||||
c2 = client.create_client(basedir)
|
c2 = yield client.create_client(basedir)
|
||||||
c2.setServiceParent(self.sparent)
|
c2.setServiceParent(self.sparent)
|
||||||
return c2.disownServiceParent()
|
yield c2.disownServiceParent()
|
||||||
d.addCallback(_restart)
|
|
||||||
return d
|
|
||||||
|
|
||||||
class NodeMaker(testutil.ReallyEqualMixin, unittest.TestCase):
|
class NodeMaker(testutil.ReallyEqualMixin, unittest.TestCase):
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_maker(self):
|
def test_maker(self):
|
||||||
basedir = "client/NodeMaker/maker"
|
basedir = "client/NodeMaker/maker"
|
||||||
fileutil.make_dirs(basedir)
|
fileutil.make_dirs(basedir)
|
||||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG)
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG)
|
||||||
c = client.create_client(basedir)
|
c = yield client.create_client(basedir)
|
||||||
|
|
||||||
n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
|
n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
|
||||||
self.failUnless(IFilesystemNode.providedBy(n))
|
self.failUnless(IFilesystemNode.providedBy(n))
|
||||||
|
Reference in New Issue
Block a user