2018-02-07 10:05:21 +00:00
|
|
|
import datetime
|
|
|
|
import os.path
|
|
|
|
import re
|
|
|
|
import types
|
|
|
|
import ConfigParser
|
|
|
|
import tempfile
|
2017-09-06 01:08:35 +00:00
|
|
|
from io import BytesIO
|
2007-08-12 17:29:38 +00:00
|
|
|
from base64 import b32decode, b32encode
|
2007-05-22 21:01:40 +00:00
|
|
|
|
2016-10-22 19:26:36 +00:00
|
|
|
from twisted.internet import reactor
|
2008-09-20 17:35:45 +00:00
|
|
|
from twisted.python import log as twlog
|
2007-03-08 22:10:36 +00:00
|
|
|
from twisted.application import service
|
2016-04-27 04:54:45 +00:00
|
|
|
from foolscap.api import Tub, app_versions
|
2008-07-07 06:49:08 +00:00
|
|
|
import foolscap.logging.log
|
2008-09-23 00:03:51 +00:00
|
|
|
from allmydata import get_package_versions, get_package_versions_string
|
2008-07-03 00:40:29 +00:00
|
|
|
from allmydata.util import log
|
2016-04-27 04:54:45 +00:00
|
|
|
from allmydata.util import fileutil, iputil
|
|
|
|
from allmydata.util.assertutil import _assert
|
2010-07-22 00:14:18 +00:00
|
|
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
2011-08-03 16:38:48 +00:00
|
|
|
from allmydata.util.encodingutil import get_filesystem_encoding, quote_output
|
2015-12-21 21:59:15 +00:00
|
|
|
from allmydata.util import configutil
|
2016-10-22 19:26:36 +00:00
|
|
|
from allmydata.util import i2p_provider, tor_provider
|
2016-08-31 08:50:13 +00:00
|
|
|
|
2016-09-05 22:34:17 +00:00
|
|
|
def _common_config_sections():
|
|
|
|
return {
|
|
|
|
"connections": (
|
|
|
|
"tcp",
|
|
|
|
),
|
|
|
|
"node": (
|
|
|
|
"log_gatherer.furl",
|
|
|
|
"nickname",
|
|
|
|
"reveal-ip-address",
|
|
|
|
"tempdir",
|
|
|
|
"timeout.disconnect",
|
|
|
|
"timeout.keepalive",
|
|
|
|
"tub.location",
|
|
|
|
"tub.port",
|
|
|
|
"web.port",
|
|
|
|
"web.static",
|
|
|
|
),
|
|
|
|
"i2p": (
|
|
|
|
"enabled",
|
|
|
|
"i2p.configdir",
|
|
|
|
"i2p.executable",
|
|
|
|
"launch",
|
|
|
|
"sam.port",
|
2016-10-22 19:26:36 +00:00
|
|
|
"dest",
|
|
|
|
"dest.port",
|
|
|
|
"dest.private_key_file",
|
2016-09-05 22:34:17 +00:00
|
|
|
),
|
|
|
|
"tor": (
|
|
|
|
"control.port",
|
|
|
|
"enabled",
|
|
|
|
"launch",
|
|
|
|
"socks.port",
|
|
|
|
"tor.executable",
|
2016-10-09 05:02:11 +00:00
|
|
|
"onion",
|
|
|
|
"onion.local_port",
|
|
|
|
"onion.external_port",
|
|
|
|
"onion.private_key_file",
|
2016-09-05 22:34:17 +00:00
|
|
|
),
|
|
|
|
}
|
|
|
|
|
2007-12-21 21:42:38 +00:00
|
|
|
# Add our application versions to the data that Foolscap's LogPublisher
|
2008-09-23 00:03:51 +00:00
|
|
|
# reports.
|
2008-09-23 00:13:47 +00:00
|
|
|
for thing, things_version in get_package_versions().iteritems():
|
2008-09-23 00:03:51 +00:00
|
|
|
app_versions.add_version(thing, str(things_version))
|
2007-05-21 20:42:51 +00:00
|
|
|
|
2007-05-22 21:01:40 +00:00
|
|
|
# group 1 will be addr (dotted quad string), group 3 if any will be portnum (string)
|
2018-03-01 19:31:30 +00:00
|
|
|
ADDR_RE = re.compile("^([1-9][0-9]*\.[1-9][0-9]*\.[1-9][0-9]*\.[1-9][0-9]*)(:([1-9][0-9]*))?$")
|
2007-05-22 21:01:40 +00:00
|
|
|
|
2007-10-12 00:30:07 +00:00
|
|
|
|
2007-10-15 03:43:11 +00:00
|
|
|
def formatTimeTahoeStyle(self, when):
|
|
|
|
# we want UTC timestamps that look like:
|
|
|
|
# 2007-10-12 00:26:28.566Z [Client] rnp752lz: 'client running'
|
2007-10-15 03:46:51 +00:00
|
|
|
d = datetime.datetime.utcfromtimestamp(when)
|
|
|
|
if d.microsecond:
|
|
|
|
return d.isoformat(" ")[:-3]+"Z"
|
|
|
|
else:
|
|
|
|
return d.isoformat(" ") + ".000Z"
|
2007-10-12 00:30:07 +00:00
|
|
|
|
2018-03-01 19:31:30 +00:00
|
|
|
PRIV_README = """
|
2007-12-17 23:39:54 +00:00
|
|
|
This directory contains files which contain private data for the Tahoe node,
|
|
|
|
such as private keys. On Unix-like systems, the permissions on this directory
|
|
|
|
are set to disallow users other than its owner from reading the contents of
|
2010-11-28 17:34:44 +00:00
|
|
|
the files. See the 'configuration.rst' documentation file for details."""
|
2007-12-17 23:39:54 +00:00
|
|
|
|
2008-09-30 23:21:49 +00:00
|
|
|
class _None: # used as a marker in get_config()
|
|
|
|
pass
|
|
|
|
|
|
|
|
class MissingConfigEntry(Exception):
|
2011-08-01 23:24:23 +00:00
|
|
|
""" A required config entry was not found. """
|
|
|
|
|
|
|
|
class OldConfigError(Exception):
|
|
|
|
""" An obsolete config file was found. See
|
|
|
|
docs/historical/configuration.rst. """
|
2011-08-03 17:45:46 +00:00
|
|
|
def __str__(self):
|
|
|
|
return ("Found pre-Tahoe-LAFS-v1.3 configuration file(s):\n"
|
|
|
|
"%s\n"
|
|
|
|
"See docs/historical/configuration.rst."
|
|
|
|
% "\n".join([quote_output(fname) for fname in self.args[0]]))
|
|
|
|
|
2011-11-20 23:24:26 +00:00
|
|
|
class OldConfigOptionError(Exception):
|
|
|
|
pass
|
|
|
|
|
2014-05-05 21:55:50 +00:00
|
|
|
class UnescapedHashError(Exception):
|
|
|
|
def __str__(self):
|
|
|
|
return ("The configuration entry %s contained an unescaped '#' character."
|
2014-05-05 22:14:48 +00:00
|
|
|
% quote_output("[%s]%s = %s" % self.args))
|
2014-05-05 21:55:50 +00:00
|
|
|
|
2016-08-31 09:44:27 +00:00
|
|
|
class PrivacyError(Exception):
|
|
|
|
"""reveal-IP-address = false, but the node is configured in such a way
|
|
|
|
that the IP address could be revealed"""
|
2008-09-30 23:21:49 +00:00
|
|
|
|
2017-09-06 01:08:35 +00:00
|
|
|
|
|
|
|
def read_config(basedir, portnumfile, generated_files=[], _valid_config_sections=None):
|
|
|
|
basedir = abspath_expanduser_unicode(unicode(basedir))
|
|
|
|
if _valid_config_sections is None:
|
|
|
|
_valid_config_sections = _common_config_sections
|
|
|
|
|
|
|
|
# complain if there's bad stuff in the config dir
|
|
|
|
_error_about_old_config_files(basedir, generated_files)
|
|
|
|
|
|
|
|
# canonicalize the portnum file
|
|
|
|
portnumfile = os.path.join(basedir, portnumfile)
|
|
|
|
|
|
|
|
# (try to) read the main config file
|
|
|
|
config_fname = os.path.join(basedir, "tahoe.cfg")
|
|
|
|
parser = ConfigParser.SafeConfigParser()
|
|
|
|
try:
|
|
|
|
parser = configutil.get_config(config_fname)
|
|
|
|
except EnvironmentError:
|
|
|
|
if os.path.exists(config_fname):
|
|
|
|
raise
|
|
|
|
configutil.validate_config(config_fname, parser, _valid_config_sections())
|
|
|
|
return _Config(parser, portnumfile, config_fname)
|
|
|
|
|
|
|
|
|
|
|
|
def config_from_string(config_str, portnumfile):
|
|
|
|
parser = ConfigParser.SafeConfigParser()
|
|
|
|
parser.readfp(BytesIO(config_str))
|
|
|
|
return _Config(parser, portnumfile, '<in-memory>')
|
|
|
|
|
|
|
|
|
|
|
|
def _error_about_old_config_files(basedir, generated_files):
|
|
|
|
"""
|
|
|
|
If any old configuration files are detected, raise
|
|
|
|
OldConfigError.
|
|
|
|
"""
|
|
|
|
oldfnames = set()
|
|
|
|
old_names = [
|
|
|
|
'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl',
|
|
|
|
'disconnect_timeout', 'advertised_ip_addresses', 'introducer.furl',
|
|
|
|
'helper.furl', 'key_generator.furl', 'stats_gatherer.furl',
|
|
|
|
'no_storage', 'readonly_storage', 'sizelimit',
|
|
|
|
'debug_discard_storage', 'run_helper'
|
|
|
|
]
|
|
|
|
for fn in generated_files:
|
|
|
|
old_names.remove(fn)
|
|
|
|
for name in old_names:
|
|
|
|
fullfname = os.path.join(basedir, name)
|
|
|
|
if os.path.exists(fullfname):
|
|
|
|
oldfnames.add(fullfname)
|
|
|
|
if oldfnames:
|
|
|
|
e = OldConfigError(oldfnames)
|
|
|
|
twlog.msg(e)
|
|
|
|
raise e
|
|
|
|
|
|
|
|
|
|
|
|
class _Config(object):
|
|
|
|
"""
|
|
|
|
FIXME better name
|
|
|
|
|
|
|
|
pulling out all the 'config' stuff from Node, so we can pass it in
|
|
|
|
as a helper instead.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, configparser, portnum_fname, config_fname):
|
|
|
|
# XXX I think this portnumfile thing is just legacy?
|
|
|
|
self.portnum_fname = portnum_fname
|
|
|
|
self._config_fname = config_fname
|
|
|
|
|
|
|
|
self.config = configparser
|
|
|
|
|
|
|
|
nickname_utf8 = self.get_config("node", "nickname", "<unspecified>")
|
|
|
|
self.nickname = nickname_utf8.decode("utf-8")
|
|
|
|
assert type(self.nickname) is unicode
|
|
|
|
|
|
|
|
def validate(self, valid_config_sections):
|
|
|
|
configutil.validate_config(self._config_fname, self.config, valid_config_sections)
|
|
|
|
|
|
|
|
def read_config(self):
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.config = configutil.get_config(self.config_fname)
|
|
|
|
except EnvironmentError:
|
|
|
|
if os.path.exists(self.config_fname):
|
|
|
|
raise
|
|
|
|
|
|
|
|
def get_config(self, section, option, default=_None, boolean=False):
|
|
|
|
try:
|
|
|
|
if boolean:
|
|
|
|
return self.config.getboolean(section, option)
|
|
|
|
|
|
|
|
item = self.config.get(section, option)
|
|
|
|
if option.endswith(".furl") and self._contains_unescaped_hash(item):
|
|
|
|
raise UnescapedHashError(section, option, item)
|
|
|
|
|
|
|
|
return item
|
|
|
|
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
|
|
|
if default is _None:
|
|
|
|
raise MissingConfigEntry(
|
|
|
|
"{} is missing the [{}]{} entry".format(
|
|
|
|
quote_output(self._config_fname),
|
|
|
|
section,
|
|
|
|
option,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
return default
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _contains_unescaped_hash(item):
|
|
|
|
characters = iter(item)
|
|
|
|
for c in characters:
|
|
|
|
if c == '\\':
|
|
|
|
characters.next()
|
|
|
|
elif c == '#':
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
class Node(service.MultiService):
|
2007-12-03 21:52:42 +00:00
|
|
|
# this implements common functionality of both Client nodes and Introducer
|
|
|
|
# nodes.
|
2006-12-03 01:27:18 +00:00
|
|
|
NODETYPE = "unknown NODETYPE"
|
2007-05-23 19:48:52 +00:00
|
|
|
CERTFILE = "node.pem"
|
2011-08-03 01:32:12 +00:00
|
|
|
GENERATED_FILES = []
|
2006-12-03 01:27:18 +00:00
|
|
|
|
2017-09-06 01:08:35 +00:00
|
|
|
def __init__(self, config, basedir=u"."):
|
2006-12-03 01:27:18 +00:00
|
|
|
service.MultiService.__init__(self)
|
2017-09-06 01:08:35 +00:00
|
|
|
# ideally, this would only be in _Config (or otherwise abstracted)
|
2010-07-22 00:14:18 +00:00
|
|
|
self.basedir = abspath_expanduser_unicode(unicode(basedir))
|
2017-09-06 01:08:35 +00:00
|
|
|
# XXX don't write files in ctor!
|
2007-12-17 23:39:54 +00:00
|
|
|
fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700)
|
2016-09-14 05:17:22 +00:00
|
|
|
with open(os.path.join(self.basedir, "private", "README"), "w") as f:
|
|
|
|
f.write(PRIV_README)
|
2008-09-30 23:21:49 +00:00
|
|
|
|
2017-09-06 01:08:35 +00:00
|
|
|
self.config = config
|
|
|
|
self.get_config = config.get_config # XXX stopgap
|
|
|
|
self.nickname = config.nickname # XXX stopgap
|
2008-09-30 23:21:49 +00:00
|
|
|
|
2009-01-15 03:00:15 +00:00
|
|
|
self.init_tempdir()
|
2016-08-31 09:44:27 +00:00
|
|
|
self.check_privacy()
|
2016-09-30 05:51:23 +00:00
|
|
|
|
|
|
|
self.create_log_tub()
|
2018-03-01 19:31:30 +00:00
|
|
|
self.logSource = "Node"
|
2016-09-30 05:51:23 +00:00
|
|
|
self.setup_logging()
|
|
|
|
|
2016-10-22 19:26:36 +00:00
|
|
|
self.create_i2p_provider()
|
2016-10-09 05:02:11 +00:00
|
|
|
self.create_tor_provider()
|
2016-08-27 23:54:01 +00:00
|
|
|
self.init_connections()
|
2016-08-27 23:53:31 +00:00
|
|
|
self.set_tub_options()
|
|
|
|
self.create_main_tub()
|
2016-06-30 13:04:23 +00:00
|
|
|
self.create_control_tub()
|
2008-09-30 23:21:49 +00:00
|
|
|
|
|
|
|
self.log("Node constructed. " + get_package_versions_string())
|
|
|
|
iputil.increase_rlimits()
|
|
|
|
|
2009-01-15 03:00:15 +00:00
|
|
|
def init_tempdir(self):
|
2017-09-06 01:08:35 +00:00
|
|
|
tempdir_config = self.config.get_config("node", "tempdir", "tmp").decode('utf-8')
|
2015-01-30 00:50:18 +00:00
|
|
|
tempdir = abspath_expanduser_unicode(tempdir_config, base=self.basedir)
|
2009-01-15 03:00:15 +00:00
|
|
|
if not os.path.exists(tempdir):
|
|
|
|
fileutil.make_dirs(tempdir)
|
2015-01-30 00:50:18 +00:00
|
|
|
tempfile.tempdir = tempdir
|
2009-01-15 03:00:15 +00:00
|
|
|
# this should cause twisted.web.http (which uses
|
|
|
|
# tempfile.TemporaryFile) to put large request bodies in the given
|
|
|
|
# directory. Without this, the default temp dir is usually /tmp/,
|
|
|
|
# which is frequently too small.
|
2018-01-28 19:05:53 +00:00
|
|
|
temp_fd, test_name = tempfile.mkstemp()
|
2009-01-15 03:00:15 +00:00
|
|
|
_assert(os.path.dirname(test_name) == tempdir, test_name, tempdir)
|
2018-01-28 19:05:53 +00:00
|
|
|
os.close(temp_fd)
|
2009-01-15 03:00:15 +00:00
|
|
|
|
2016-08-31 09:44:27 +00:00
|
|
|
def check_privacy(self):
|
2017-09-06 01:08:35 +00:00
|
|
|
self._reveal_ip = self.config.get_config("node", "reveal-IP-address", True,
|
2018-03-01 19:31:30 +00:00
|
|
|
boolean=True)
|
2016-10-22 19:26:36 +00:00
|
|
|
def create_i2p_provider(self):
|
2017-09-06 01:08:35 +00:00
|
|
|
self._i2p_provider = i2p_provider.Provider(self.basedir, self.config, reactor)
|
2016-10-22 19:26:36 +00:00
|
|
|
self._i2p_provider.check_dest_config()
|
|
|
|
self._i2p_provider.setServiceParent(self)
|
|
|
|
|
2016-10-09 05:02:11 +00:00
|
|
|
def create_tor_provider(self):
|
2017-09-06 01:08:35 +00:00
|
|
|
self._tor_provider = tor_provider.Provider(self.basedir, self.config, reactor)
|
2016-10-09 05:02:11 +00:00
|
|
|
self._tor_provider.check_onion_config()
|
|
|
|
self._tor_provider.setServiceParent(self)
|
2016-08-31 09:44:27 +00:00
|
|
|
|
2016-08-27 23:54:01 +00:00
|
|
|
def _make_tcp_handler(self):
|
|
|
|
# this is always available
|
|
|
|
from foolscap.connections.tcp import default
|
|
|
|
return default()
|
|
|
|
|
|
|
|
def _make_tor_handler(self):
|
2016-10-09 05:02:11 +00:00
|
|
|
return self._tor_provider.get_tor_handler()
|
2016-08-28 01:23:37 +00:00
|
|
|
|
2016-08-27 23:54:01 +00:00
|
|
|
def _make_i2p_handler(self):
|
2016-10-22 19:26:36 +00:00
|
|
|
return self._i2p_provider.get_i2p_handler()
|
2016-08-27 23:54:01 +00:00
|
|
|
|
|
|
|
def init_connections(self):
|
|
|
|
# We store handlers for everything. None means we were unable to
|
|
|
|
# create that handler, so hints which want it will be ignored.
|
|
|
|
handlers = self._foolscap_connection_handlers = {
|
|
|
|
"tcp": self._make_tcp_handler(),
|
|
|
|
"tor": self._make_tor_handler(),
|
|
|
|
"i2p": self._make_i2p_handler(),
|
|
|
|
}
|
2016-09-30 05:51:23 +00:00
|
|
|
self.log(format="built Foolscap connection handlers for: %(known_handlers)s",
|
2018-03-01 19:31:30 +00:00
|
|
|
known_handlers=sorted([k for k, v in handlers.items() if v]),
|
2016-08-27 23:54:01 +00:00
|
|
|
facility="tahoe.node", umid="PuLh8g")
|
|
|
|
|
|
|
|
# then we remember the default mappings from tahoe.cfg
|
|
|
|
self._default_connection_handlers = {"tor": "tor", "i2p": "i2p"}
|
2017-09-06 01:08:35 +00:00
|
|
|
tcp_handler_name = self.config.get_config("connections", "tcp", "tcp").lower()
|
2016-09-14 23:21:55 +00:00
|
|
|
if tcp_handler_name == "disabled":
|
|
|
|
self._default_connection_handlers["tcp"] = None
|
|
|
|
else:
|
|
|
|
if tcp_handler_name not in handlers:
|
|
|
|
raise ValueError("'tahoe.cfg [connections] tcp='"
|
|
|
|
" uses unknown handler type '%s'"
|
|
|
|
% tcp_handler_name)
|
|
|
|
if not handlers[tcp_handler_name]:
|
|
|
|
raise ValueError("'tahoe.cfg [connections] tcp=' uses "
|
|
|
|
"unavailable/unimportable handler type '%s'. "
|
|
|
|
"Please pip install tahoe-lafs[%s] to fix."
|
|
|
|
% (tcp_handler_name, tcp_handler_name))
|
|
|
|
self._default_connection_handlers["tcp"] = tcp_handler_name
|
2016-08-27 23:54:01 +00:00
|
|
|
|
2016-08-31 09:44:27 +00:00
|
|
|
if not self._reveal_ip:
|
2016-09-14 23:21:55 +00:00
|
|
|
if self._default_connection_handlers.get("tcp") == "tcp":
|
2016-09-19 05:25:45 +00:00
|
|
|
raise PrivacyError("tcp = tcp, must be set to 'tor' or 'disabled'")
|
2016-08-31 09:44:27 +00:00
|
|
|
|
2016-08-27 23:53:31 +00:00
|
|
|
def set_tub_options(self):
|
|
|
|
self.tub_options = {
|
|
|
|
"logLocalFailures": True,
|
|
|
|
"logRemoteFailures": True,
|
|
|
|
"expose-remote-exception-types": False,
|
2016-08-28 22:21:27 +00:00
|
|
|
"accept-gifts": False,
|
2016-08-27 23:53:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# see #521 for a discussion of how to pick these timeout values.
|
2017-09-06 01:08:35 +00:00
|
|
|
keepalive_timeout_s = self.config.get_config("node", "timeout.keepalive", "")
|
2016-08-27 23:53:31 +00:00
|
|
|
if keepalive_timeout_s:
|
|
|
|
self.tub_options["keepaliveTimeout"] = int(keepalive_timeout_s)
|
2017-09-06 01:08:35 +00:00
|
|
|
disconnect_timeout_s = self.config.get_config("node", "timeout.disconnect", "")
|
2016-08-27 23:53:31 +00:00
|
|
|
if disconnect_timeout_s:
|
|
|
|
# N.B.: this is in seconds, so use "1800" to get 30min
|
|
|
|
self.tub_options["disconnectTimeout"] = int(disconnect_timeout_s)
|
|
|
|
|
|
|
|
def _create_tub(self, handler_overrides={}, **kwargs):
|
|
|
|
# Create a Tub with the right options and handlers. It will be
|
|
|
|
# ephemeral unless the caller provides certFile=
|
|
|
|
tub = Tub(**kwargs)
|
|
|
|
for (name, value) in self.tub_options.items():
|
|
|
|
tub.setOption(name, value)
|
2016-08-27 23:54:01 +00:00
|
|
|
handlers = self._default_connection_handlers.copy()
|
|
|
|
handlers.update(handler_overrides)
|
|
|
|
tub.removeAllConnectionHintHandlers()
|
|
|
|
for hint_type, handler_name in handlers.items():
|
|
|
|
handler = self._foolscap_connection_handlers.get(handler_name)
|
|
|
|
if handler:
|
|
|
|
tub.addConnectionHintHandler(hint_type, handler)
|
2016-08-27 23:53:31 +00:00
|
|
|
return tub
|
|
|
|
|
2016-04-27 00:56:08 +00:00
|
|
|
def _convert_tub_port(self, s):
|
|
|
|
if re.search(r'^\d+$', s):
|
|
|
|
return "tcp:%d" % int(s)
|
|
|
|
return s
|
|
|
|
|
2016-09-01 00:16:23 +00:00
|
|
|
def get_tub_portlocation(self, cfg_tubport, cfg_location):
|
|
|
|
# return None, or tuple of (port, location)
|
|
|
|
|
|
|
|
tubport_disabled = False
|
|
|
|
if cfg_tubport is not None:
|
|
|
|
cfg_tubport = cfg_tubport.strip()
|
|
|
|
if cfg_tubport == "":
|
|
|
|
raise ValueError("tub.port must not be empty")
|
|
|
|
if cfg_tubport == "disabled":
|
|
|
|
tubport_disabled = True
|
|
|
|
|
|
|
|
location_disabled = False
|
|
|
|
if cfg_location is not None:
|
|
|
|
cfg_location = cfg_location.strip()
|
|
|
|
if cfg_location == "":
|
|
|
|
raise ValueError("tub.location must not be empty")
|
|
|
|
if cfg_location == "disabled":
|
|
|
|
location_disabled = True
|
|
|
|
|
|
|
|
if tubport_disabled and location_disabled:
|
|
|
|
return None
|
|
|
|
if tubport_disabled and not location_disabled:
|
|
|
|
raise ValueError("tub.port is disabled, but not tub.location")
|
|
|
|
if location_disabled and not tubport_disabled:
|
|
|
|
raise ValueError("tub.location is disabled, but not tub.port")
|
|
|
|
|
|
|
|
if cfg_tubport is None:
|
|
|
|
# For 'tub.port', tahoe.cfg overrides the individual file on
|
|
|
|
# disk. So only read self._portnumfile if tahoe.cfg doesn't
|
|
|
|
# provide a value.
|
2017-09-06 01:08:35 +00:00
|
|
|
if os.path.exists(self.config.portnum_fname):
|
|
|
|
file_tubport = fileutil.read(self.config.portnum_fname).strip()
|
2016-09-01 00:16:23 +00:00
|
|
|
tubport = self._convert_tub_port(file_tubport)
|
|
|
|
else:
|
|
|
|
tubport = "tcp:%d" % iputil.allocate_tcp_port()
|
2017-09-06 01:08:35 +00:00
|
|
|
fileutil.write_atomically(self.config.portnum_fname, tubport + "\n",
|
2016-09-01 00:16:23 +00:00
|
|
|
mode="")
|
|
|
|
else:
|
|
|
|
tubport = self._convert_tub_port(cfg_tubport)
|
|
|
|
|
|
|
|
if cfg_location is None:
|
|
|
|
cfg_location = "AUTO"
|
|
|
|
|
2017-12-30 23:19:41 +00:00
|
|
|
local_portnum = None # needed to hush lgtm.com static analyzer
|
2016-04-27 01:21:36 +00:00
|
|
|
# Replace the location "AUTO", if present, with the detected local
|
|
|
|
# addresses. Don't probe for local addresses unless necessary.
|
2016-09-01 00:16:23 +00:00
|
|
|
split_location = cfg_location.split(",")
|
2016-04-27 01:21:36 +00:00
|
|
|
if "AUTO" in split_location:
|
2016-08-31 09:44:27 +00:00
|
|
|
if not self._reveal_ip:
|
|
|
|
raise PrivacyError("tub.location uses AUTO")
|
2016-04-27 01:21:36 +00:00
|
|
|
local_addresses = iputil.get_local_addresses_sync()
|
|
|
|
# tubport must be like "tcp:12345" or "tcp:12345:morestuff"
|
|
|
|
local_portnum = int(tubport.split(":")[1])
|
|
|
|
new_locations = []
|
|
|
|
for loc in split_location:
|
|
|
|
if loc == "AUTO":
|
|
|
|
new_locations.extend(["tcp:%s:%d" % (ip, local_portnum)
|
|
|
|
for ip in local_addresses])
|
|
|
|
else:
|
2016-08-31 09:44:27 +00:00
|
|
|
if not self._reveal_ip:
|
2016-09-02 16:25:26 +00:00
|
|
|
# Legacy hints are "host:port". We use Foolscap's utility
|
|
|
|
# function to convert all hints into the modern format
|
|
|
|
# ("tcp:host:port") because that's what the receiving
|
|
|
|
# client will probably do. We test the converted hint for
|
|
|
|
# TCP-ness, but publish the original hint because that
|
|
|
|
# was the user's intent.
|
|
|
|
from foolscap.connections.tcp import convert_legacy_hint
|
|
|
|
converted_hint = convert_legacy_hint(loc)
|
|
|
|
hint_type = converted_hint.split(":")[0]
|
2016-08-31 09:44:27 +00:00
|
|
|
if hint_type == "tcp":
|
|
|
|
raise PrivacyError("tub.location includes tcp: hint")
|
2016-04-27 01:21:36 +00:00
|
|
|
new_locations.append(loc)
|
2016-09-01 00:16:23 +00:00
|
|
|
location = ",".join(new_locations)
|
|
|
|
|
|
|
|
return tubport, location
|
2016-04-27 01:21:36 +00:00
|
|
|
|
2016-08-27 23:53:31 +00:00
|
|
|
def create_main_tub(self):
|
2016-06-30 13:04:23 +00:00
|
|
|
certfile = os.path.join(self.basedir, "private", self.CERTFILE)
|
2016-08-27 23:53:31 +00:00
|
|
|
self.tub = self._create_tub(certFile=certfile)
|
2008-09-24 17:51:12 +00:00
|
|
|
|
2007-08-12 17:29:38 +00:00
|
|
|
self.nodeid = b32decode(self.tub.tubID.upper()) # binary format
|
2007-08-28 01:58:39 +00:00
|
|
|
self.write_config("my_nodeid", b32encode(self.nodeid).lower() + "\n")
|
2016-09-01 00:16:23 +00:00
|
|
|
self.short_nodeid = b32encode(self.nodeid).lower()[:8] # for printing
|
2017-09-06 01:08:35 +00:00
|
|
|
cfg_tubport = self.config.get_config("node", "tub.port", None)
|
|
|
|
cfg_location = self.config.get_config("node", "tub.location", None)
|
2016-09-01 00:16:23 +00:00
|
|
|
portlocation = self.get_tub_portlocation(cfg_tubport, cfg_location)
|
|
|
|
if portlocation:
|
|
|
|
tubport, location = portlocation
|
2016-09-15 13:22:54 +00:00
|
|
|
for port in tubport.split(","):
|
|
|
|
if port in ("0", "tcp:0"):
|
|
|
|
raise ValueError("tub.port cannot be 0: you must choose")
|
2017-07-19 17:24:24 +00:00
|
|
|
if port == "listen:i2p":
|
2017-08-16 00:59:59 +00:00
|
|
|
# the I2P provider will read its section of tahoe.cfg and
|
|
|
|
# return either a fully-formed Endpoint, or a descriptor
|
|
|
|
# that will create one, so we don't have to stuff all the
|
|
|
|
# options into the tub.port string (which would need a lot
|
|
|
|
# of escaping)
|
2017-07-19 17:24:24 +00:00
|
|
|
port_or_endpoint = self._i2p_provider.get_listener()
|
|
|
|
elif port == "listen:tor":
|
|
|
|
port_or_endpoint = self._tor_provider.get_listener()
|
|
|
|
else:
|
|
|
|
port_or_endpoint = port
|
|
|
|
self.tub.listenOn(port_or_endpoint)
|
2016-08-30 01:49:20 +00:00
|
|
|
self.tub.setLocation(location)
|
|
|
|
self._tub_is_listening = True
|
|
|
|
self.log("Tub location set to %s" % (location,))
|
|
|
|
# the Tub is now ready for tub.registerReference()
|
|
|
|
else:
|
|
|
|
self._tub_is_listening = False
|
|
|
|
self.log("Tub is not listening")
|
2016-04-27 01:21:36 +00:00
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
self.tub.setServiceParent(self)
|
|
|
|
|
2016-07-07 02:47:00 +00:00
|
|
|
def create_control_tub(self):
|
|
|
|
# the control port uses a localhost-only ephemeral Tub, with no
|
|
|
|
# control over the listening port or location
|
|
|
|
self.control_tub = Tub()
|
|
|
|
portnum = iputil.allocate_tcp_port()
|
|
|
|
port = "tcp:%d:interface=127.0.0.1" % portnum
|
|
|
|
location = "tcp:127.0.0.1:%d" % portnum
|
|
|
|
self.control_tub.listenOn(port)
|
|
|
|
self.control_tub.setLocation(location)
|
|
|
|
self.log("Control Tub location set to %s" % (location,))
|
|
|
|
self.control_tub.setServiceParent(self)
|
|
|
|
|
|
|
|
def create_log_tub(self):
|
|
|
|
# The logport uses a localhost-only ephemeral Tub, with no control
|
|
|
|
# over the listening port or location. This might change if we
|
|
|
|
# discover a compelling reason for it in the future (e.g. being able
|
|
|
|
# to use "flogtool tail" against a remote server), but for now I
|
|
|
|
# think we can live without it.
|
|
|
|
self.log_tub = Tub()
|
|
|
|
portnum = iputil.allocate_tcp_port()
|
|
|
|
port = "tcp:%d:interface=127.0.0.1" % portnum
|
|
|
|
location = "tcp:127.0.0.1:%d" % portnum
|
|
|
|
self.log_tub.listenOn(port)
|
|
|
|
self.log_tub.setLocation(location)
|
|
|
|
self.log("Log Tub location set to %s" % (location,))
|
|
|
|
self.log_tub.setServiceParent(self)
|
|
|
|
|
2008-09-20 18:38:53 +00:00
|
|
|
def get_app_versions(self):
|
|
|
|
# TODO: merge this with allmydata.get_package_versions
|
|
|
|
return dict(app_versions.versions)
|
|
|
|
|
new introducer: signed extensible dictionary-based messages! refs #466
This introduces new client and server halves to the Introducer (renaming the
old one with a _V1 suffix). Both have fallbacks to accomodate talking to a
different version: the publishing client switches on whether the server's
.get_version() advertises V2 support, the server switches on which
subscription method was invoked by the subscribing client.
The V2 protocol sends a three-tuple of (serialized announcement dictionary,
signature, pubkey) for each announcement. The V2 server dispatches messages
to subscribers according to the service-name, and throws errors for invalid
signatures, but does not otherwise examine the messages. The V2 receiver's
subscription callback will receive a (serverid, ann_dict) pair. The
'serverid' will be equal to the pubkey if all of the following are true:
the originating client is V2, and was told a privkey to use
the announcement went through a V2 server
the signature is valid
If not, 'serverid' will be equal to the tubid portion of the announced FURL,
as was the case for V1 receivers.
Servers will create a keypair if one does not exist yet, stored in
private/server.privkey .
The signed announcement dictionary puts the server FURL in a key named
"anonymous-storage-FURL", which anticipates upcoming Accounting-related
changes in the server advertisements. It also provides a key named
"permutation-seed-base32" to tell clients what permutation seed to use. This
is computed at startup, using tubid if there are existing shares, otherwise
the pubkey, to retain share-order compatibility for existing servers.
2011-11-20 10:21:32 +00:00
|
|
|
def get_config_from_file(self, name, required=False):
|
|
|
|
"""Get the (string) contents of a config file, or None if the file
|
|
|
|
did not exist. If required=True, raise an exception rather than
|
|
|
|
returning None. Any leading or trailing whitespace will be stripped
|
|
|
|
from the data."""
|
|
|
|
fn = os.path.join(self.basedir, name)
|
|
|
|
try:
|
|
|
|
return fileutil.read(fn).strip()
|
|
|
|
except EnvironmentError:
|
|
|
|
if not required:
|
|
|
|
return None
|
|
|
|
raise
|
|
|
|
|
2007-12-17 23:39:54 +00:00
|
|
|
def write_private_config(self, name, value):
|
|
|
|
"""Write the (string) contents of a private config file (which is a
|
|
|
|
config file that resides within the subdirectory named 'private'), and
|
2012-05-30 07:17:55 +00:00
|
|
|
return it.
|
2007-12-17 23:39:54 +00:00
|
|
|
"""
|
|
|
|
privname = os.path.join(self.basedir, "private", name)
|
2016-09-14 05:17:22 +00:00
|
|
|
with open(privname, "w") as f:
|
|
|
|
f.write(value)
|
2007-12-17 23:39:54 +00:00
|
|
|
|
2012-06-11 00:46:38 +00:00
|
|
|
def get_private_config(self, name, default=_None):
|
|
|
|
"""Read the (string) contents of a private config file (which is a
|
|
|
|
config file that resides within the subdirectory named 'private'),
|
|
|
|
and return it. Return a default, or raise an error if one was not
|
|
|
|
given.
|
|
|
|
"""
|
|
|
|
privname = os.path.join(self.basedir, "private", name)
|
|
|
|
try:
|
2016-04-12 18:48:53 +00:00
|
|
|
return fileutil.read(privname).strip()
|
2012-06-11 00:46:38 +00:00
|
|
|
except EnvironmentError:
|
|
|
|
if os.path.exists(privname):
|
|
|
|
raise
|
|
|
|
if default is _None:
|
|
|
|
raise MissingConfigEntry("The required configuration file %s is missing."
|
|
|
|
% (quote_output(privname),))
|
|
|
|
return default
|
|
|
|
|
2011-11-20 23:24:26 +00:00
|
|
|
def get_or_create_private_config(self, name, default=_None):
|
2007-12-17 23:39:54 +00:00
|
|
|
"""Try to get the (string) contents of a private config file (which
|
|
|
|
is a config file that resides within the subdirectory named
|
|
|
|
'private'), and return it. Any leading or trailing whitespace will be
|
|
|
|
stripped from the data.
|
2007-08-28 02:07:12 +00:00
|
|
|
|
2011-11-20 23:24:26 +00:00
|
|
|
If the file does not exist, and default is not given, report an error.
|
|
|
|
If the file does not exist and a default is specified, try to create
|
|
|
|
it using that default, and then return the value that was written.
|
|
|
|
If 'default' is a string, use it as a default value. If not, treat it
|
|
|
|
as a zero-argument callable that is expected to return a string.
|
2007-08-28 02:07:12 +00:00
|
|
|
"""
|
2011-08-01 23:24:23 +00:00
|
|
|
privname = os.path.join(self.basedir, "private", name)
|
|
|
|
try:
|
|
|
|
value = fileutil.read(privname)
|
|
|
|
except EnvironmentError:
|
2011-11-20 23:24:26 +00:00
|
|
|
if os.path.exists(privname):
|
|
|
|
raise
|
|
|
|
if default is _None:
|
|
|
|
raise MissingConfigEntry("The required configuration file %s is missing."
|
|
|
|
% (quote_output(privname),))
|
2011-08-01 23:24:23 +00:00
|
|
|
if isinstance(default, basestring):
|
2007-12-17 23:39:54 +00:00
|
|
|
value = default
|
2007-08-28 02:07:12 +00:00
|
|
|
else:
|
2007-12-17 23:39:54 +00:00
|
|
|
value = default()
|
2011-08-01 23:24:23 +00:00
|
|
|
fileutil.write(privname, value)
|
|
|
|
return value.strip()
|
2007-08-28 01:58:39 +00:00
|
|
|
|
|
|
|
def write_config(self, name, value, mode="w"):
|
|
|
|
"""Write a string to a config file."""
|
|
|
|
fn = os.path.join(self.basedir, name)
|
|
|
|
try:
|
2013-03-19 00:30:57 +00:00
|
|
|
fileutil.write(fn, value, mode)
|
2007-08-28 01:58:39 +00:00
|
|
|
except EnvironmentError, e:
|
|
|
|
self.log("Unable to write config file '%s'" % fn)
|
|
|
|
self.log(e)
|
|
|
|
|
2007-05-24 00:54:48 +00:00
|
|
|
def startService(self):
|
2007-10-22 23:55:20 +00:00
|
|
|
# Note: this class can be started and stopped at most once.
|
2007-05-31 20:44:22 +00:00
|
|
|
self.log("Node.startService")
|
2009-07-15 07:29:29 +00:00
|
|
|
# Record the process id in the twisted log, after startService()
|
|
|
|
# (__init__ is called before fork(), but startService is called
|
|
|
|
# after). Note that Foolscap logs handle pid-logging by itself, no
|
|
|
|
# need to send a pid to the foolscap log here.
|
|
|
|
twlog.msg("My pid: %s" % os.getpid())
|
2008-03-27 01:37:54 +00:00
|
|
|
try:
|
|
|
|
os.chmod("twistd.pid", 0644)
|
|
|
|
except EnvironmentError:
|
|
|
|
pass
|
2007-03-08 22:10:36 +00:00
|
|
|
|
|
|
|
service.MultiService.startService(self)
|
2016-04-27 01:21:36 +00:00
|
|
|
self.log("%s running" % self.NODETYPE)
|
2016-08-22 23:36:56 +00:00
|
|
|
twlog.msg("%s running" % self.NODETYPE)
|
2008-03-06 20:53:21 +00:00
|
|
|
|
2007-05-23 22:08:03 +00:00
|
|
|
def stopService(self):
|
2007-05-31 20:44:22 +00:00
|
|
|
self.log("Node.stopService")
|
2016-04-27 04:54:45 +00:00
|
|
|
return service.MultiService.stopService(self)
|
2007-05-31 20:44:22 +00:00
|
|
|
|
2007-03-08 22:10:36 +00:00
|
|
|
def shutdown(self):
|
|
|
|
"""Shut down the node. Returns a Deferred that fires (with None) when
|
|
|
|
it finally stops kicking."""
|
2007-05-31 20:44:22 +00:00
|
|
|
self.log("Node.shutdown")
|
2007-03-08 22:10:36 +00:00
|
|
|
return self.stopService()
|
|
|
|
|
2007-10-12 00:30:07 +00:00
|
|
|
def setup_logging(self):
|
2011-08-01 23:24:23 +00:00
|
|
|
# we replace the formatTime() method of the log observer that
|
|
|
|
# twistd set up for us, with a method that uses our preferred
|
|
|
|
# timestamp format.
|
2008-09-20 17:35:45 +00:00
|
|
|
for o in twlog.theLogPublisher.observers:
|
2007-10-12 00:30:07 +00:00
|
|
|
# o might be a FileLogObserver's .emit method
|
|
|
|
if type(o) is type(self.setup_logging): # bound method
|
|
|
|
ob = o.im_self
|
2008-09-20 17:35:45 +00:00
|
|
|
if isinstance(ob, twlog.FileLogObserver):
|
2007-10-22 23:52:55 +00:00
|
|
|
newmeth = types.UnboundMethodType(formatTimeTahoeStyle, ob, ob.__class__)
|
2007-10-15 03:43:11 +00:00
|
|
|
ob.formatTime = newmeth
|
2007-10-12 00:30:07 +00:00
|
|
|
# TODO: twisted >2.5.0 offers maxRotatedFiles=50
|
|
|
|
|
2010-07-25 01:03:18 +00:00
|
|
|
lgfurl_file = os.path.join(self.basedir, "private", "logport.furl").encode(get_filesystem_encoding())
|
2016-07-07 02:47:00 +00:00
|
|
|
if os.path.exists(lgfurl_file):
|
|
|
|
os.remove(lgfurl_file)
|
|
|
|
self.log_tub.setOption("logport-furlfile", lgfurl_file)
|
2017-09-06 01:08:35 +00:00
|
|
|
lgfurl = self.config.get_config("node", "log_gatherer.furl", "")
|
2008-09-30 23:21:49 +00:00
|
|
|
if lgfurl:
|
|
|
|
# this is in addition to the contents of log-gatherer-furlfile
|
2016-07-07 02:47:00 +00:00
|
|
|
self.log_tub.setOption("log-gatherer-furl", lgfurl)
|
|
|
|
self.log_tub.setOption("log-gatherer-furlfile",
|
|
|
|
os.path.join(self.basedir, "log_gatherer.furl"))
|
2015-11-03 17:35:21 +00:00
|
|
|
|
2008-07-03 00:40:29 +00:00
|
|
|
incident_dir = os.path.join(self.basedir, "logs", "incidents")
|
2012-04-29 02:28:44 +00:00
|
|
|
foolscap.logging.log.setLogDir(incident_dir.encode(get_filesystem_encoding()))
|
2016-08-09 19:27:33 +00:00
|
|
|
twlog.msg("Foolscap logging initialized")
|
|
|
|
twlog.msg("Note to developers: twistd.log does not receive very much.")
|
|
|
|
twlog.msg("Use 'flogtool tail -c NODEDIR/private/logport.furl' instead")
|
|
|
|
twlog.msg("and read docs/logging.rst")
|
2007-12-13 03:31:01 +00:00
|
|
|
|
2008-01-15 04:16:58 +00:00
|
|
|
def log(self, *args, **kwargs):
|
2008-07-03 00:40:29 +00:00
|
|
|
return log.msg(*args, **kwargs)
|
2008-01-15 04:16:58 +00:00
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
def add_service(self, s):
|
|
|
|
s.setServiceParent(self)
|
|
|
|
return s
|