2009-01-15 03:00:15 +00:00
|
|
|
import datetime, os.path, re, types, ConfigParser, tempfile
|
2007-08-12 17:29:38 +00:00
|
|
|
from base64 import b32decode, b32encode
|
2007-05-22 21:01:40 +00:00
|
|
|
|
2008-09-20 17:35:45 +00:00
|
|
|
from twisted.python import log as twlog
|
2007-03-08 22:10:36 +00:00
|
|
|
from twisted.application import service
|
2007-05-23 22:08:03 +00:00
|
|
|
from twisted.internet import defer, reactor
|
2009-05-22 00:38:23 +00:00
|
|
|
from foolscap.api import Tub, eventually, app_versions
|
2008-07-07 06:49:08 +00:00
|
|
|
import foolscap.logging.log
|
2008-09-23 00:03:51 +00:00
|
|
|
from allmydata import get_package_versions, get_package_versions_string
|
2008-07-03 00:40:29 +00:00
|
|
|
from allmydata.util import log
|
2008-10-29 04:35:58 +00:00
|
|
|
from allmydata.util import fileutil, iputil, observer
|
2009-01-15 03:00:15 +00:00
|
|
|
from allmydata.util.assertutil import precondition, _assert
|
2010-07-22 00:14:18 +00:00
|
|
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
|
|
|
from allmydata.util.encodingutil import get_filesystem_encoding
|
2007-12-13 03:31:01 +00:00
|
|
|
|
2007-12-21 21:42:38 +00:00
|
|
|
# Add our application versions to the data that Foolscap's LogPublisher
|
2008-09-23 00:03:51 +00:00
|
|
|
# reports.
|
2008-09-23 00:13:47 +00:00
|
|
|
for thing, things_version in get_package_versions().iteritems():
|
2008-09-23 00:03:51 +00:00
|
|
|
app_versions.add_version(thing, str(things_version))
|
2007-05-21 20:42:51 +00:00
|
|
|
|
2007-05-22 21:01:40 +00:00
|
|
|
# group 1 will be addr (dotted quad string), group 3 if any will be portnum (string)
|
|
|
|
ADDR_RE=re.compile("^([1-9][0-9]*\.[1-9][0-9]*\.[1-9][0-9]*\.[1-9][0-9]*)(:([1-9][0-9]*))?$")
|
|
|
|
|
2007-10-12 00:30:07 +00:00
|
|
|
|
2007-10-15 03:43:11 +00:00
|
|
|
def formatTimeTahoeStyle(self, when):
|
|
|
|
# we want UTC timestamps that look like:
|
|
|
|
# 2007-10-12 00:26:28.566Z [Client] rnp752lz: 'client running'
|
2007-10-15 03:46:51 +00:00
|
|
|
d = datetime.datetime.utcfromtimestamp(when)
|
|
|
|
if d.microsecond:
|
|
|
|
return d.isoformat(" ")[:-3]+"Z"
|
|
|
|
else:
|
|
|
|
return d.isoformat(" ") + ".000Z"
|
2007-10-12 00:30:07 +00:00
|
|
|
|
2007-12-17 23:39:54 +00:00
|
|
|
PRIV_README="""
|
|
|
|
This directory contains files which contain private data for the Tahoe node,
|
|
|
|
such as private keys. On Unix-like systems, the permissions on this directory
|
|
|
|
are set to disallow users other than its owner from reading the contents of
|
2010-11-28 17:34:44 +00:00
|
|
|
the files. See the 'configuration.rst' documentation file for details."""
|
2007-12-17 23:39:54 +00:00
|
|
|
|
2008-09-30 23:21:49 +00:00
|
|
|
class _None: # used as a marker in get_config()
|
|
|
|
pass
|
|
|
|
|
|
|
|
class MissingConfigEntry(Exception):
|
2011-08-01 23:24:23 +00:00
|
|
|
""" A required config entry was not found. """
|
|
|
|
|
|
|
|
class OldConfigError(Exception):
|
|
|
|
""" An obsolete config file was found. See
|
|
|
|
docs/historical/configuration.rst. """
|
2008-09-30 23:21:49 +00:00
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
class Node(service.MultiService):
|
2007-12-03 21:52:42 +00:00
|
|
|
# this implements common functionality of both Client nodes and Introducer
|
|
|
|
# nodes.
|
2006-12-03 01:27:18 +00:00
|
|
|
NODETYPE = "unknown NODETYPE"
|
|
|
|
PORTNUMFILE = None
|
2007-05-23 19:48:52 +00:00
|
|
|
CERTFILE = "node.pem"
|
2006-12-03 01:27:18 +00:00
|
|
|
|
2010-07-22 00:14:18 +00:00
|
|
|
def __init__(self, basedir=u"."):
|
2006-12-03 01:27:18 +00:00
|
|
|
service.MultiService.__init__(self)
|
2010-07-22 00:14:18 +00:00
|
|
|
self.basedir = abspath_expanduser_unicode(unicode(basedir))
|
2008-09-30 23:21:49 +00:00
|
|
|
self._portnumfile = os.path.join(self.basedir, self.PORTNUMFILE)
|
2007-03-08 22:10:36 +00:00
|
|
|
self._tub_ready_observerlist = observer.OneShotObserverList()
|
2007-12-17 23:39:54 +00:00
|
|
|
fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700)
|
|
|
|
open(os.path.join(self.basedir, "private", "README"), "w").write(PRIV_README)
|
2008-09-30 23:21:49 +00:00
|
|
|
|
|
|
|
# creates self.config, populates from distinct files if necessary
|
|
|
|
self.read_config()
|
|
|
|
nickname_utf8 = self.get_config("node", "nickname", "<unspecified>")
|
|
|
|
self.nickname = nickname_utf8.decode("utf-8")
|
2009-06-23 02:10:47 +00:00
|
|
|
assert type(self.nickname) is unicode
|
2008-09-30 23:21:49 +00:00
|
|
|
|
2009-01-15 03:00:15 +00:00
|
|
|
self.init_tempdir()
|
2008-09-30 23:21:49 +00:00
|
|
|
self.create_tub()
|
|
|
|
self.logSource="Node"
|
|
|
|
|
|
|
|
self.setup_ssh()
|
|
|
|
self.setup_logging()
|
|
|
|
self.log("Node constructed. " + get_package_versions_string())
|
|
|
|
iputil.increase_rlimits()
|
|
|
|
|
2009-01-15 03:00:15 +00:00
|
|
|
def init_tempdir(self):
|
2010-07-22 00:14:18 +00:00
|
|
|
local_tempdir_utf8 = "tmp" # default is NODEDIR/tmp/
|
|
|
|
tempdir = self.get_config("node", "tempdir", local_tempdir_utf8).decode('utf-8')
|
2009-01-15 03:00:15 +00:00
|
|
|
tempdir = os.path.join(self.basedir, tempdir)
|
|
|
|
if not os.path.exists(tempdir):
|
|
|
|
fileutil.make_dirs(tempdir)
|
2010-07-22 00:14:18 +00:00
|
|
|
tempfile.tempdir = abspath_expanduser_unicode(tempdir)
|
2009-01-15 03:00:15 +00:00
|
|
|
# this should cause twisted.web.http (which uses
|
|
|
|
# tempfile.TemporaryFile) to put large request bodies in the given
|
|
|
|
# directory. Without this, the default temp dir is usually /tmp/,
|
|
|
|
# which is frequently too small.
|
|
|
|
test_name = tempfile.mktemp()
|
|
|
|
_assert(os.path.dirname(test_name) == tempdir, test_name, tempdir)
|
|
|
|
|
2008-09-30 23:21:49 +00:00
|
|
|
def get_config(self, section, option, default=_None, boolean=False):
|
|
|
|
try:
|
|
|
|
if boolean:
|
|
|
|
return self.config.getboolean(section, option)
|
|
|
|
return self.config.get(section, option)
|
|
|
|
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
|
|
|
if default is _None:
|
|
|
|
fn = os.path.join(self.basedir, "tahoe.cfg")
|
|
|
|
raise MissingConfigEntry("%s is missing the [%s]%s entry"
|
|
|
|
% (fn, section, option))
|
|
|
|
return default
|
|
|
|
|
|
|
|
def set_config(self, section, option, value):
|
|
|
|
if not self.config.has_section(section):
|
|
|
|
self.config.add_section(section)
|
|
|
|
self.config.set(section, option, value)
|
|
|
|
assert self.config.get(section, option) == value
|
|
|
|
|
|
|
|
def read_config(self):
|
2011-08-01 23:24:23 +00:00
|
|
|
self.warn_about_old_config_files()
|
2008-09-30 23:21:49 +00:00
|
|
|
self.config = ConfigParser.SafeConfigParser()
|
|
|
|
self.config.read([os.path.join(self.basedir, "tahoe.cfg")])
|
2011-08-01 23:24:23 +00:00
|
|
|
|
|
|
|
def warn_about_old_config_files(self):
|
|
|
|
""" If any old configuration files are detected, raise OldConfigError. """
|
|
|
|
|
|
|
|
oldfnames = set()
|
|
|
|
for name in [
|
|
|
|
'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl',
|
|
|
|
'disconnect_timeout', 'advertised_ip_addresses', 'introducer.furl',
|
|
|
|
'helper.furl', 'key_generator.furl', 'stats_gatherer.furl',
|
|
|
|
'no_storage', 'readonly_storage', 'sizelimit',
|
|
|
|
'debug_discard_storage', 'run_helper']:
|
|
|
|
fullfname = os.path.join(self.basedir, name)
|
|
|
|
if os.path.exists(fullfname):
|
|
|
|
log.err("Found pre-Tahoe-LAFS-v1.3 configuration file: '%s'. See docs/historical/configuration.rst." % (fullfname,))
|
|
|
|
oldfnames.add(fullfname)
|
|
|
|
if oldfnames:
|
|
|
|
raise OldConfigError(oldfnames)
|
2008-09-30 23:21:49 +00:00
|
|
|
|
|
|
|
def create_tub(self):
|
2007-12-17 23:39:54 +00:00
|
|
|
certfile = os.path.join(self.basedir, "private", self.CERTFILE)
|
2007-05-23 19:41:23 +00:00
|
|
|
self.tub = Tub(certFile=certfile)
|
2007-04-07 03:55:59 +00:00
|
|
|
self.tub.setOption("logLocalFailures", True)
|
|
|
|
self.tub.setOption("logRemoteFailures", True)
|
2009-05-22 00:46:32 +00:00
|
|
|
self.tub.setOption("expose-remote-exception-types", False)
|
2008-09-24 17:51:12 +00:00
|
|
|
|
2008-09-30 23:21:49 +00:00
|
|
|
# see #521 for a discussion of how to pick these timeout values.
|
|
|
|
keepalive_timeout_s = self.get_config("node", "timeout.keepalive", "")
|
2008-09-24 17:51:12 +00:00
|
|
|
if keepalive_timeout_s:
|
|
|
|
self.tub.setOption("keepaliveTimeout", int(keepalive_timeout_s))
|
2008-09-30 23:21:49 +00:00
|
|
|
disconnect_timeout_s = self.get_config("node", "timeout.disconnect", "")
|
2008-09-24 17:51:12 +00:00
|
|
|
if disconnect_timeout_s:
|
|
|
|
# N.B.: this is in seconds, so use "1800" to get 30min
|
|
|
|
self.tub.setOption("disconnectTimeout", int(disconnect_timeout_s))
|
|
|
|
|
2007-08-12 17:29:38 +00:00
|
|
|
self.nodeid = b32decode(self.tub.tubID.upper()) # binary format
|
2007-08-28 01:58:39 +00:00
|
|
|
self.write_config("my_nodeid", b32encode(self.nodeid).lower() + "\n")
|
2007-08-12 17:29:38 +00:00
|
|
|
self.short_nodeid = b32encode(self.nodeid).lower()[:8] # ready for printing
|
2008-09-30 23:21:49 +00:00
|
|
|
|
|
|
|
tubport = self.get_config("node", "tub.port", "tcp:0")
|
|
|
|
self.tub.listenOn(tubport)
|
2006-12-03 01:27:18 +00:00
|
|
|
# we must wait until our service has started before we can find out
|
|
|
|
# our IP address and thus do tub.setLocation, and we can't register
|
|
|
|
# any services with the Tub until after that point
|
|
|
|
self.tub.setServiceParent(self)
|
|
|
|
|
2008-09-30 23:21:49 +00:00
|
|
|
def setup_ssh(self):
|
|
|
|
ssh_port = self.get_config("node", "ssh.port", "")
|
|
|
|
if ssh_port:
|
2010-07-22 00:14:18 +00:00
|
|
|
ssh_keyfile = self.get_config("node", "ssh.authorized_keys_file").decode('utf-8')
|
2008-09-30 23:21:49 +00:00
|
|
|
from allmydata import manhole
|
2010-07-22 00:14:18 +00:00
|
|
|
m = manhole.AuthorizedKeysManhole(ssh_port, ssh_keyfile.encode(get_filesystem_encoding()))
|
2008-09-30 23:21:49 +00:00
|
|
|
m.setServiceParent(self)
|
|
|
|
self.log("AuthorizedKeysManhole listening on %s" % ssh_port)
|
2007-04-27 20:47:38 +00:00
|
|
|
|
2008-09-20 18:38:53 +00:00
|
|
|
def get_app_versions(self):
|
|
|
|
# TODO: merge this with allmydata.get_package_versions
|
|
|
|
return dict(app_versions.versions)
|
|
|
|
|
2007-12-17 23:39:54 +00:00
|
|
|
def write_private_config(self, name, value):
|
|
|
|
"""Write the (string) contents of a private config file (which is a
|
|
|
|
config file that resides within the subdirectory named 'private'), and
|
|
|
|
return it. Any leading or trailing whitespace will be stripped from
|
|
|
|
the data.
|
|
|
|
"""
|
|
|
|
privname = os.path.join(self.basedir, "private", name)
|
|
|
|
open(privname, "w").write(value.strip())
|
|
|
|
|
|
|
|
def get_or_create_private_config(self, name, default):
|
|
|
|
"""Try to get the (string) contents of a private config file (which
|
|
|
|
is a config file that resides within the subdirectory named
|
|
|
|
'private'), and return it. Any leading or trailing whitespace will be
|
|
|
|
stripped from the data.
|
2007-08-28 02:07:12 +00:00
|
|
|
|
2007-12-17 23:39:54 +00:00
|
|
|
If the file does not exist, try to create it using default, and
|
|
|
|
then return the value that was written. If 'default' is a string,
|
2007-08-28 02:07:12 +00:00
|
|
|
use it as a default value. If not, treat it as a 0-argument callable
|
|
|
|
which is expected to return a string.
|
|
|
|
"""
|
2011-08-01 23:24:23 +00:00
|
|
|
privname = os.path.join(self.basedir, "private", name)
|
|
|
|
try:
|
|
|
|
value = fileutil.read(privname)
|
|
|
|
except EnvironmentError:
|
|
|
|
if isinstance(default, basestring):
|
2007-12-17 23:39:54 +00:00
|
|
|
value = default
|
2007-08-28 02:07:12 +00:00
|
|
|
else:
|
2007-12-17 23:39:54 +00:00
|
|
|
value = default()
|
2011-08-01 23:24:23 +00:00
|
|
|
fileutil.write(privname, value)
|
|
|
|
return value.strip()
|
2007-08-28 01:58:39 +00:00
|
|
|
|
|
|
|
def write_config(self, name, value, mode="w"):
|
|
|
|
"""Write a string to a config file."""
|
|
|
|
fn = os.path.join(self.basedir, name)
|
|
|
|
try:
|
|
|
|
open(fn, mode).write(value)
|
|
|
|
except EnvironmentError, e:
|
|
|
|
self.log("Unable to write config file '%s'" % fn)
|
|
|
|
self.log(e)
|
|
|
|
|
2007-05-24 00:54:48 +00:00
|
|
|
def startService(self):
|
2007-10-22 23:55:20 +00:00
|
|
|
# Note: this class can be started and stopped at most once.
|
2007-05-31 20:44:22 +00:00
|
|
|
self.log("Node.startService")
|
2009-07-15 07:29:29 +00:00
|
|
|
# Record the process id in the twisted log, after startService()
|
|
|
|
# (__init__ is called before fork(), but startService is called
|
|
|
|
# after). Note that Foolscap logs handle pid-logging by itself, no
|
|
|
|
# need to send a pid to the foolscap log here.
|
|
|
|
twlog.msg("My pid: %s" % os.getpid())
|
2008-03-27 01:37:54 +00:00
|
|
|
try:
|
|
|
|
os.chmod("twistd.pid", 0644)
|
|
|
|
except EnvironmentError:
|
|
|
|
pass
|
2007-12-03 21:52:42 +00:00
|
|
|
# Delay until the reactor is running.
|
2009-05-22 00:38:23 +00:00
|
|
|
eventually(self._startService)
|
2007-05-24 00:54:48 +00:00
|
|
|
|
2007-05-23 22:08:03 +00:00
|
|
|
def _startService(self):
|
|
|
|
precondition(reactor.running)
|
2007-05-31 20:44:22 +00:00
|
|
|
self.log("Node._startService")
|
2007-03-08 22:10:36 +00:00
|
|
|
|
|
|
|
service.MultiService.startService(self)
|
|
|
|
d = defer.succeed(None)
|
2007-03-08 22:12:52 +00:00
|
|
|
d.addCallback(lambda res: iputil.get_local_addresses_async())
|
2007-03-08 22:10:36 +00:00
|
|
|
d.addCallback(self._setup_tub)
|
|
|
|
def _ready(res):
|
|
|
|
self.log("%s running" % self.NODETYPE)
|
|
|
|
self._tub_ready_observerlist.fire(self)
|
|
|
|
return self
|
|
|
|
d.addCallback(_ready)
|
2008-03-06 22:09:04 +00:00
|
|
|
d.addErrback(self._service_startup_failed)
|
|
|
|
|
|
|
|
def _service_startup_failed(self, failure):
|
|
|
|
self.log('_startService() failed')
|
2008-09-20 17:35:45 +00:00
|
|
|
log.err(failure)
|
2008-03-06 22:09:04 +00:00
|
|
|
print "Node._startService failed, aborting"
|
|
|
|
print failure
|
|
|
|
#reactor.stop() # for unknown reasons, reactor.stop() isn't working. [ ] TODO
|
2008-03-06 20:53:21 +00:00
|
|
|
self.log('calling os.abort()')
|
2008-09-20 17:35:45 +00:00
|
|
|
twlog.msg('calling os.abort()') # make sure it gets into twistd.log
|
2008-03-06 20:53:21 +00:00
|
|
|
print "calling os.abort()"
|
|
|
|
os.abort()
|
|
|
|
|
2007-05-23 22:08:03 +00:00
|
|
|
def stopService(self):
|
2007-05-31 20:44:22 +00:00
|
|
|
self.log("Node.stopService")
|
2007-05-23 22:08:03 +00:00
|
|
|
d = self._tub_ready_observerlist.when_fired()
|
2007-05-31 20:44:22 +00:00
|
|
|
def _really_stopService(ignored):
|
|
|
|
self.log("Node._really_stopService")
|
|
|
|
return service.MultiService.stopService(self)
|
|
|
|
d.addCallback(_really_stopService)
|
2007-05-23 22:08:03 +00:00
|
|
|
return d
|
2007-05-31 20:44:22 +00:00
|
|
|
|
2007-03-08 22:10:36 +00:00
|
|
|
def shutdown(self):
|
|
|
|
"""Shut down the node. Returns a Deferred that fires (with None) when
|
|
|
|
it finally stops kicking."""
|
2007-05-31 20:44:22 +00:00
|
|
|
self.log("Node.shutdown")
|
2007-03-08 22:10:36 +00:00
|
|
|
return self.stopService()
|
|
|
|
|
2007-10-12 00:30:07 +00:00
|
|
|
def setup_logging(self):
|
2011-08-01 23:24:23 +00:00
|
|
|
# we replace the formatTime() method of the log observer that
|
|
|
|
# twistd set up for us, with a method that uses our preferred
|
|
|
|
# timestamp format.
|
2008-09-20 17:35:45 +00:00
|
|
|
for o in twlog.theLogPublisher.observers:
|
2007-10-12 00:30:07 +00:00
|
|
|
# o might be a FileLogObserver's .emit method
|
|
|
|
if type(o) is type(self.setup_logging): # bound method
|
|
|
|
ob = o.im_self
|
2008-09-20 17:35:45 +00:00
|
|
|
if isinstance(ob, twlog.FileLogObserver):
|
2007-10-22 23:52:55 +00:00
|
|
|
newmeth = types.UnboundMethodType(formatTimeTahoeStyle, ob, ob.__class__)
|
2007-10-15 03:43:11 +00:00
|
|
|
ob.formatTime = newmeth
|
2007-10-12 00:30:07 +00:00
|
|
|
# TODO: twisted >2.5.0 offers maxRotatedFiles=50
|
|
|
|
|
2010-07-25 01:03:18 +00:00
|
|
|
lgfurl_file = os.path.join(self.basedir, "private", "logport.furl").encode(get_filesystem_encoding())
|
|
|
|
self.tub.setOption("logport-furlfile", lgfurl_file)
|
2008-09-30 23:21:49 +00:00
|
|
|
lgfurl = self.get_config("node", "log_gatherer.furl", "")
|
|
|
|
if lgfurl:
|
|
|
|
# this is in addition to the contents of log-gatherer-furlfile
|
|
|
|
self.tub.setOption("log-gatherer-furl", lgfurl)
|
2007-12-13 03:31:01 +00:00
|
|
|
self.tub.setOption("log-gatherer-furlfile",
|
|
|
|
os.path.join(self.basedir, "log_gatherer.furl"))
|
2007-12-25 00:24:40 +00:00
|
|
|
self.tub.setOption("bridge-twisted-logs", True)
|
2008-07-03 00:40:29 +00:00
|
|
|
incident_dir = os.path.join(self.basedir, "logs", "incidents")
|
2008-07-03 01:24:00 +00:00
|
|
|
# this doesn't quite work yet: unit tests fail
|
2008-07-07 06:49:08 +00:00
|
|
|
foolscap.logging.log.setLogDir(incident_dir)
|
2007-12-13 03:31:01 +00:00
|
|
|
|
2008-01-15 04:16:58 +00:00
|
|
|
def log(self, *args, **kwargs):
|
2008-07-03 00:40:29 +00:00
|
|
|
return log.msg(*args, **kwargs)
|
2008-01-15 04:16:58 +00:00
|
|
|
|
2007-03-08 01:43:17 +00:00
|
|
|
def _setup_tub(self, local_addresses):
|
2006-12-03 02:37:31 +00:00
|
|
|
# we can't get a dynamically-assigned portnum until our Tub is
|
|
|
|
# running, which means after startService.
|
2006-12-03 01:27:18 +00:00
|
|
|
l = self.tub.getListeners()[0]
|
|
|
|
portnum = l.getPortnum()
|
2008-11-13 01:44:58 +00:00
|
|
|
# record which port we're listening on, so we can grab the same one
|
|
|
|
# next time
|
2007-05-22 21:01:40 +00:00
|
|
|
open(self._portnumfile, "w").write("%d\n" % portnum)
|
|
|
|
|
2008-11-13 01:44:58 +00:00
|
|
|
base_location = ",".join([ "%s:%d" % (addr, portnum)
|
|
|
|
for addr in local_addresses ])
|
|
|
|
location = self.get_config("node", "tub.location", base_location)
|
2007-03-08 01:43:17 +00:00
|
|
|
self.log("Tub location set to %s" % location)
|
|
|
|
self.tub.setLocation(location)
|
2008-11-13 01:44:58 +00:00
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
return self.tub
|
|
|
|
|
2007-03-08 22:10:36 +00:00
|
|
|
def when_tub_ready(self):
|
|
|
|
return self._tub_ready_observerlist.when_fired()
|
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
def add_service(self, s):
|
|
|
|
s.setServiceParent(self)
|
|
|
|
return s
|