2008-12-02 00:24:21 +00:00
|
|
|
import os, stat, time, weakref
|
2008-02-05 20:05:13 +00:00
|
|
|
from allmydata.interfaces import RIStorageServer
|
2007-06-28 00:11:06 +00:00
|
|
|
from allmydata import node
|
2006-11-30 23:23:39 +00:00
|
|
|
|
2008-03-12 02:20:10 +00:00
|
|
|
from zope.interface import implements
|
2007-06-29 01:01:00 +00:00
|
|
|
from twisted.internet import reactor
|
2007-05-25 00:34:42 +00:00
|
|
|
from twisted.application.internet import TimerService
|
2009-05-22 00:38:23 +00:00
|
|
|
from foolscap.api import Referenceable
|
2008-04-02 01:45:13 +00:00
|
|
|
from pycryptopp.publickey import rsa
|
2006-11-30 22:14:47 +00:00
|
|
|
|
2007-04-26 19:01:25 +00:00
|
|
|
import allmydata
|
2009-02-18 21:46:55 +00:00
|
|
|
from allmydata.storage.server import StorageServer
|
2009-06-01 21:06:04 +00:00
|
|
|
from allmydata import storage_client
|
2008-07-16 20:14:39 +00:00
|
|
|
from allmydata.immutable.upload import Uploader
|
|
|
|
from allmydata.immutable.download import Downloader
|
|
|
|
from allmydata.immutable.filenode import FileNode, LiteralFileNode
|
2009-01-07 04:48:22 +00:00
|
|
|
from allmydata.immutable.offloaded import Helper
|
2007-03-08 02:16:06 +00:00
|
|
|
from allmydata.control import ControlServer
|
2008-06-18 19:24:16 +00:00
|
|
|
from allmydata.introducer.client import IntroducerClient
|
2009-06-23 02:10:47 +00:00
|
|
|
from allmydata.util import hashutil, base32, pollmixin, cachedir, log
|
2008-12-02 00:24:21 +00:00
|
|
|
from allmydata.util.abbreviate import parse_abbreviated_size
|
2009-03-19 01:00:09 +00:00
|
|
|
from allmydata.util.time_format import parse_duration, parse_date
|
2009-07-03 01:07:49 +00:00
|
|
|
from allmydata.uri import LiteralFileURI, UnknownURI
|
2009-07-17 01:01:03 +00:00
|
|
|
from allmydata.dirnode import DirectoryNode
|
2009-01-15 00:36:20 +00:00
|
|
|
from allmydata.mutable.filenode import MutableFileNode
|
2009-07-03 01:07:49 +00:00
|
|
|
from allmydata.unknown import UnknownNode
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
from allmydata.stats import StatsProvider
|
2009-01-14 23:14:24 +00:00
|
|
|
from allmydata.history import History
|
2009-07-17 01:01:03 +00:00
|
|
|
from allmydata.interfaces import IURI, IDirectoryURI, IStatsProducer, \
|
|
|
|
IReadonlyDirectoryURI, IFileURI, IMutableFileURI, RIStubClient, \
|
2009-07-15 06:45:10 +00:00
|
|
|
UnhandledCapTypeError
|
2007-11-09 09:54:51 +00:00
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
KiB=1024
|
|
|
|
MiB=1024*KiB
|
|
|
|
GiB=1024*MiB
|
|
|
|
TiB=1024*GiB
|
|
|
|
PiB=1024*TiB
|
|
|
|
|
2008-03-12 02:20:10 +00:00
|
|
|
class StubClient(Referenceable):
|
|
|
|
implements(RIStubClient)
|
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
def _make_secret():
|
|
|
|
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
|
|
|
|
|
2008-10-29 04:15:48 +00:00
|
|
|
class Client(node.Node, pollmixin.PollMixin):
|
2008-04-17 18:13:39 +00:00
|
|
|
implements(IStatsProducer)
|
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
PORTNUMFILE = "client.port"
|
2006-12-01 03:14:23 +00:00
|
|
|
STOREDIR = 'storage'
|
2006-12-03 01:27:18 +00:00
|
|
|
NODETYPE = "client"
|
2007-05-25 00:34:42 +00:00
|
|
|
SUICIDE_PREVENTION_HOTLINE_FILE = "suicide_prevention_hotline"
|
2006-11-30 22:27:06 +00:00
|
|
|
|
2008-07-30 22:51:07 +00:00
|
|
|
# This means that if a storage server treats me as though I were a
|
|
|
|
# 1.0.0 storage client, it will work as they expect.
|
|
|
|
OLDEST_SUPPORTED_VERSION = "1.0.0"
|
2007-04-26 19:01:25 +00:00
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
# this is a tuple of (needed, desired, total, max_segment_size). 'needed'
|
|
|
|
# is the number of shares required to reconstruct a file. 'desired' means
|
|
|
|
# that we will abort an upload unless we can allocate space for at least
|
|
|
|
# this many. 'total' is the total number of shares created by encoding.
|
|
|
|
# If everybody has room then this is is how many we will upload.
|
2008-02-05 20:05:13 +00:00
|
|
|
DEFAULT_ENCODING_PARAMETERS = {"k": 3,
|
|
|
|
"happy": 7,
|
|
|
|
"n": 10,
|
2008-03-08 02:24:51 +00:00
|
|
|
"max_segment_size": 128*KiB,
|
2008-01-16 10:03:35 +00:00
|
|
|
}
|
|
|
|
|
2009-06-29 22:31:24 +00:00
|
|
|
# set this to override the size of the RSA keys created for new mutable
|
|
|
|
# files. The default of None means to let mutable.filenode choose its own
|
|
|
|
# size, which means 2048 bits.
|
|
|
|
DEFAULT_MUTABLE_KEYSIZE = None
|
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
def __init__(self, basedir="."):
|
|
|
|
node.Node.__init__(self, basedir)
|
2008-04-17 18:13:39 +00:00
|
|
|
self.started_timestamp = time.time()
|
2007-08-11 21:52:37 +00:00
|
|
|
self.logSource="Client"
|
2008-11-18 07:29:44 +00:00
|
|
|
self.DEFAULT_ENCODING_PARAMETERS = self.DEFAULT_ENCODING_PARAMETERS.copy()
|
2008-02-05 20:05:13 +00:00
|
|
|
self.init_introducer_client()
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
self.init_stats_provider()
|
2007-12-18 01:34:11 +00:00
|
|
|
self.init_lease_secret()
|
2007-07-04 00:27:07 +00:00
|
|
|
self.init_storage()
|
2008-02-06 02:58:38 +00:00
|
|
|
self.init_control()
|
2008-09-30 23:21:49 +00:00
|
|
|
if self.get_config("helper", "enabled", False, boolean=True):
|
2008-02-06 02:58:38 +00:00
|
|
|
self.init_helper()
|
2008-03-12 02:20:10 +00:00
|
|
|
self.init_client()
|
2008-04-02 01:45:13 +00:00
|
|
|
self._key_generator = None
|
2008-09-30 23:21:49 +00:00
|
|
|
key_gen_furl = self.get_config("client", "key_generator.furl", None)
|
2008-04-02 01:45:13 +00:00
|
|
|
if key_gen_furl:
|
|
|
|
self.init_key_gen(key_gen_furl)
|
2008-01-10 03:25:05 +00:00
|
|
|
# ControlServer and Helper are attached after Tub startup
|
2008-10-06 19:52:36 +00:00
|
|
|
self.init_ftp_server()
|
2008-11-05 01:00:22 +00:00
|
|
|
self.init_sftp_server()
|
2007-08-28 01:58:39 +00:00
|
|
|
|
2007-05-25 00:34:42 +00:00
|
|
|
hotline_file = os.path.join(self.basedir,
|
|
|
|
self.SUICIDE_PREVENTION_HOTLINE_FILE)
|
|
|
|
if os.path.exists(hotline_file):
|
2007-09-19 20:56:00 +00:00
|
|
|
age = time.time() - os.stat(hotline_file)[stat.ST_MTIME]
|
|
|
|
self.log("hotline file noticed (%ds old), starting timer" % age)
|
2007-05-30 00:39:39 +00:00
|
|
|
hotline = TimerService(1.0, self._check_hotline, hotline_file)
|
2007-05-25 00:34:42 +00:00
|
|
|
hotline.setServiceParent(self)
|
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
# this needs to happen last, so it can use getServiceNamed() to
|
|
|
|
# acquire references to StorageServer and other web-statusable things
|
2008-09-30 23:21:49 +00:00
|
|
|
webport = self.get_config("node", "web.port", None)
|
2007-12-03 21:52:42 +00:00
|
|
|
if webport:
|
|
|
|
self.init_web(webport) # strports string
|
|
|
|
|
2008-09-30 23:21:49 +00:00
|
|
|
def read_old_config_files(self):
|
|
|
|
node.Node.read_old_config_files(self)
|
|
|
|
copy = self._copy_config_from_file
|
|
|
|
copy("introducer.furl", "client", "introducer.furl")
|
|
|
|
copy("helper.furl", "client", "helper.furl")
|
|
|
|
copy("key_generator.furl", "client", "key_generator.furl")
|
|
|
|
copy("stats_gatherer.furl", "client", "stats_gatherer.furl")
|
|
|
|
if os.path.exists(os.path.join(self.basedir, "no_storage")):
|
|
|
|
self.set_config("storage", "enabled", "false")
|
|
|
|
if os.path.exists(os.path.join(self.basedir, "readonly_storage")):
|
|
|
|
self.set_config("storage", "readonly", "true")
|
|
|
|
if os.path.exists(os.path.join(self.basedir, "debug_discard_storage")):
|
|
|
|
self.set_config("storage", "debug_discard", "true")
|
|
|
|
if os.path.exists(os.path.join(self.basedir, "run_helper")):
|
|
|
|
self.set_config("helper", "enabled", "true")
|
|
|
|
|
2008-02-05 20:05:13 +00:00
|
|
|
def init_introducer_client(self):
|
2008-09-30 23:21:49 +00:00
|
|
|
self.introducer_furl = self.get_config("client", "introducer.furl")
|
2008-02-05 20:05:13 +00:00
|
|
|
ic = IntroducerClient(self.tub, self.introducer_furl,
|
|
|
|
self.nickname,
|
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
|
|
|
str(allmydata.__full_version__),
|
2008-02-05 20:05:13 +00:00
|
|
|
str(self.OLDEST_SUPPORTED_VERSION))
|
|
|
|
self.introducer_client = ic
|
2008-04-23 21:52:34 +00:00
|
|
|
# hold off on starting the IntroducerClient until our tub has been
|
|
|
|
# started, so we'll have a useful address on our RemoteReference, so
|
|
|
|
# that the introducer's status page will show us.
|
|
|
|
d = self.when_tub_ready()
|
|
|
|
def _start_introducer_client(res):
|
|
|
|
ic.setServiceParent(self)
|
|
|
|
d.addCallback(_start_introducer_client)
|
2008-08-26 01:57:59 +00:00
|
|
|
d.addErrback(log.err, facility="tahoe.init",
|
|
|
|
level=log.BAD, umid="URyI5w")
|
2008-02-05 20:05:13 +00:00
|
|
|
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
def init_stats_provider(self):
|
2008-09-30 23:21:49 +00:00
|
|
|
gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
|
2008-05-08 18:37:30 +00:00
|
|
|
self.stats_provider = StatsProvider(self, gatherer_furl)
|
|
|
|
self.add_service(self.stats_provider)
|
|
|
|
self.stats_provider.register_producer(self)
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
|
2008-04-17 18:13:39 +00:00
|
|
|
def get_stats(self):
|
|
|
|
return { 'node.uptime': time.time() - self.started_timestamp }
|
|
|
|
|
2007-12-18 01:34:11 +00:00
|
|
|
def init_lease_secret(self):
|
2008-03-24 16:46:06 +00:00
|
|
|
secret_s = self.get_or_create_private_config("secret", _make_secret)
|
2008-02-15 02:27:47 +00:00
|
|
|
self._lease_secret = base32.a2b(secret_s)
|
2007-08-28 02:30:26 +00:00
|
|
|
|
2007-07-04 00:27:07 +00:00
|
|
|
def init_storage(self):
|
2008-02-05 20:05:13 +00:00
|
|
|
# should we run a storage server (and publish it for others to use)?
|
2008-09-30 23:21:49 +00:00
|
|
|
if not self.get_config("storage", "enabled", True, boolean=True):
|
2008-02-05 20:05:13 +00:00
|
|
|
return
|
2008-09-30 23:21:49 +00:00
|
|
|
readonly = self.get_config("storage", "readonly", False, boolean=True)
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2007-07-04 00:27:07 +00:00
|
|
|
storedir = os.path.join(self.basedir, self.STOREDIR)
|
2007-08-22 17:29:57 +00:00
|
|
|
|
2008-12-02 00:24:21 +00:00
|
|
|
data = self.get_config("storage", "reserved_space", None)
|
|
|
|
reserved = None
|
|
|
|
try:
|
|
|
|
reserved = parse_abbreviated_size(data)
|
|
|
|
except ValueError:
|
|
|
|
log.msg("[storage]reserved_space= contains unparseable value %s"
|
|
|
|
% data)
|
|
|
|
if reserved is None:
|
|
|
|
reserved = 0
|
2008-09-30 23:21:49 +00:00
|
|
|
discard = self.get_config("storage", "debug_discard", False,
|
|
|
|
boolean=True)
|
2009-03-19 01:00:09 +00:00
|
|
|
|
|
|
|
expire = self.get_config("storage", "expire.enabled", False, boolean=True)
|
|
|
|
if expire:
|
|
|
|
mode = self.get_config("storage", "expire.mode") # require a mode
|
|
|
|
else:
|
|
|
|
mode = self.get_config("storage", "expire.mode", "age")
|
|
|
|
|
|
|
|
o_l_d = self.get_config("storage", "expire.override_lease_duration", None)
|
|
|
|
if o_l_d is not None:
|
|
|
|
o_l_d = parse_duration(o_l_d)
|
|
|
|
|
|
|
|
cutoff_date = None
|
|
|
|
if mode == "cutoff-date":
|
|
|
|
cutoff_date = self.get_config("storage", "expire.cutoff_date")
|
|
|
|
cutoff_date = parse_date(cutoff_date)
|
|
|
|
|
|
|
|
sharetypes = []
|
|
|
|
if self.get_config("storage", "expire.immutable", True, boolean=True):
|
|
|
|
sharetypes.append("immutable")
|
|
|
|
if self.get_config("storage", "expire.mutable", True, boolean=True):
|
|
|
|
sharetypes.append("mutable")
|
|
|
|
expiration_sharetypes = tuple(sharetypes)
|
|
|
|
|
2009-02-18 23:23:01 +00:00
|
|
|
ss = StorageServer(storedir, self.nodeid,
|
2008-12-02 00:24:21 +00:00
|
|
|
reserved_space=reserved,
|
|
|
|
discard_storage=discard,
|
|
|
|
readonly_storage=readonly,
|
2009-03-19 01:00:09 +00:00
|
|
|
stats_provider=self.stats_provider,
|
|
|
|
expiration_enabled=expire,
|
|
|
|
expiration_mode=mode,
|
|
|
|
expiration_override_lease_duration=o_l_d,
|
|
|
|
expiration_cutoff_date=cutoff_date,
|
|
|
|
expiration_sharetypes=expiration_sharetypes)
|
2008-02-05 20:05:13 +00:00
|
|
|
self.add_service(ss)
|
2009-03-19 01:00:09 +00:00
|
|
|
|
2008-02-05 20:05:13 +00:00
|
|
|
d = self.when_tub_ready()
|
|
|
|
# we can't do registerReference until the Tub is ready
|
|
|
|
def _publish(res):
|
|
|
|
furl_file = os.path.join(self.basedir, "private", "storage.furl")
|
|
|
|
furl = self.tub.registerReference(ss, furlFile=furl_file)
|
|
|
|
ri_name = RIStorageServer.__remote_name__
|
|
|
|
self.introducer_client.publish(furl, "storage", ri_name)
|
|
|
|
d.addCallback(_publish)
|
2008-08-26 01:57:59 +00:00
|
|
|
d.addErrback(log.err, facility="tahoe.init",
|
|
|
|
level=log.BAD, umid="aLGBKw")
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2008-03-12 02:20:10 +00:00
|
|
|
def init_client(self):
|
2008-09-30 23:21:49 +00:00
|
|
|
helper_furl = self.get_config("client", "helper.furl", None)
|
2008-11-18 07:29:44 +00:00
|
|
|
DEP = self.DEFAULT_ENCODING_PARAMETERS
|
|
|
|
DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
|
|
|
|
DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
|
|
|
|
DEP["happy"] = int(self.get_config("client", "shares.happy", DEP["happy"]))
|
2008-03-24 16:46:06 +00:00
|
|
|
convergence_s = self.get_or_create_private_config('convergence', _make_secret)
|
|
|
|
self.convergence = base32.a2b(convergence_s)
|
2008-05-09 01:02:55 +00:00
|
|
|
self._node_cache = weakref.WeakValueDictionary() # uri -> node
|
2009-06-01 21:06:04 +00:00
|
|
|
|
|
|
|
self.init_client_storage_broker()
|
2009-01-15 00:36:20 +00:00
|
|
|
self.add_service(History(self.stats_provider))
|
2008-04-10 01:08:59 +00:00
|
|
|
self.add_service(Uploader(helper_furl, self.stats_provider))
|
2008-10-30 20:39:09 +00:00
|
|
|
download_cachedir = os.path.join(self.basedir,
|
|
|
|
"private", "cache", "download")
|
2009-07-07 14:34:04 +00:00
|
|
|
self.download_cache_dirman = cachedir.CacheDirectoryManager(download_cachedir)
|
|
|
|
self.download_cache_dirman.setServiceParent(self)
|
2008-04-10 01:08:59 +00:00
|
|
|
self.add_service(Downloader(self.stats_provider))
|
2009-02-16 21:58:44 +00:00
|
|
|
self.init_stub_client()
|
|
|
|
|
2009-06-01 21:06:04 +00:00
|
|
|
def init_client_storage_broker(self):
|
|
|
|
# create a StorageFarmBroker object, for use by Uploader/Downloader
|
|
|
|
# (and everybody else who wants to use storage servers)
|
2009-06-23 02:10:47 +00:00
|
|
|
sb = storage_client.StorageFarmBroker(self.tub, permute_peers=True)
|
|
|
|
self.storage_broker = sb
|
2009-06-01 21:06:04 +00:00
|
|
|
|
2009-06-23 02:10:47 +00:00
|
|
|
# load static server specifications from tahoe.cfg, if any.
|
|
|
|
# Not quite ready yet.
|
2009-06-01 21:06:04 +00:00
|
|
|
#if self.config.has_section("client-server-selection"):
|
|
|
|
# server_params = {} # maps serverid to dict of parameters
|
|
|
|
# for (name, value) in self.config.items("client-server-selection"):
|
|
|
|
# pieces = name.split(".")
|
|
|
|
# if pieces[0] == "server":
|
|
|
|
# serverid = pieces[1]
|
|
|
|
# if serverid not in server_params:
|
|
|
|
# server_params[serverid] = {}
|
|
|
|
# server_params[serverid][pieces[2]] = value
|
|
|
|
# for serverid, params in server_params.items():
|
|
|
|
# server_type = params.pop("type")
|
|
|
|
# if server_type == "tahoe-foolscap":
|
|
|
|
# s = storage_client.NativeStorageClient(*params)
|
|
|
|
# else:
|
|
|
|
# msg = ("unrecognized server type '%s' in "
|
|
|
|
# "tahoe.cfg [client-server-selection]server.%s.type"
|
|
|
|
# % (server_type, serverid))
|
|
|
|
# raise storage_client.UnknownServerTypeError(msg)
|
|
|
|
# sb.add_server(s.serverid, s)
|
|
|
|
|
|
|
|
# check to see if we're supposed to use the introducer too
|
|
|
|
if self.get_config("client-server-selection", "use_introducer",
|
|
|
|
default=True, boolean=True):
|
|
|
|
sb.use_introducer(self.introducer_client)
|
|
|
|
|
2009-06-02 02:25:11 +00:00
|
|
|
def get_storage_broker(self):
|
|
|
|
return self.storage_broker
|
|
|
|
|
2009-02-16 21:58:44 +00:00
|
|
|
def init_stub_client(self):
|
2008-03-12 02:20:10 +00:00
|
|
|
def _publish(res):
|
|
|
|
# we publish an empty object so that the introducer can count how
|
|
|
|
# many clients are connected and see what versions they're
|
|
|
|
# running.
|
|
|
|
sc = StubClient()
|
|
|
|
furl = self.tub.registerReference(sc)
|
|
|
|
ri_name = RIStubClient.__remote_name__
|
|
|
|
self.introducer_client.publish(furl, "stub_client", ri_name)
|
|
|
|
d = self.when_tub_ready()
|
|
|
|
d.addCallback(_publish)
|
2008-08-26 01:57:59 +00:00
|
|
|
d.addErrback(log.err, facility="tahoe.init",
|
|
|
|
level=log.BAD, umid="OEHq3g")
|
2008-03-12 02:20:10 +00:00
|
|
|
|
2009-01-14 23:14:24 +00:00
|
|
|
def get_history(self):
|
|
|
|
return self.getServiceNamed("history")
|
|
|
|
|
2008-02-06 02:58:38 +00:00
|
|
|
def init_control(self):
|
|
|
|
d = self.when_tub_ready()
|
|
|
|
def _publish(res):
|
|
|
|
c = ControlServer()
|
|
|
|
c.setServiceParent(self)
|
|
|
|
control_url = self.tub.registerReference(c)
|
|
|
|
self.write_private_config("control.furl", control_url + "\n")
|
|
|
|
d.addCallback(_publish)
|
2008-08-26 01:57:59 +00:00
|
|
|
d.addErrback(log.err, facility="tahoe.init",
|
|
|
|
level=log.BAD, umid="d3tNXA")
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2008-02-06 02:58:38 +00:00
|
|
|
def init_helper(self):
|
|
|
|
d = self.when_tub_ready()
|
|
|
|
def _publish(self):
|
2008-03-26 01:19:08 +00:00
|
|
|
h = Helper(os.path.join(self.basedir, "helper"), self.stats_provider)
|
2008-02-06 02:58:38 +00:00
|
|
|
h.setServiceParent(self)
|
|
|
|
# TODO: this is confusing. BASEDIR/private/helper.furl is created
|
|
|
|
# by the helper. BASEDIR/helper.furl is consumed by the client
|
|
|
|
# who wants to use the helper. I like having the filename be the
|
|
|
|
# same, since that makes 'cp' work smoothly, but the difference
|
|
|
|
# between config inputs and generated outputs is hard to see.
|
|
|
|
helper_furlfile = os.path.join(self.basedir,
|
|
|
|
"private", "helper.furl")
|
|
|
|
self.tub.registerReference(h, furlFile=helper_furlfile)
|
|
|
|
d.addCallback(_publish)
|
2008-08-26 01:57:59 +00:00
|
|
|
d.addErrback(log.err, facility="tahoe.init",
|
|
|
|
level=log.BAD, umid="K0mW5w")
|
2007-08-10 01:30:24 +00:00
|
|
|
|
2008-04-02 01:45:13 +00:00
|
|
|
def init_key_gen(self, key_gen_furl):
|
|
|
|
d = self.when_tub_ready()
|
|
|
|
def _subscribe(self):
|
|
|
|
self.tub.connectTo(key_gen_furl, self._got_key_generator)
|
|
|
|
d.addCallback(_subscribe)
|
2008-08-26 01:57:59 +00:00
|
|
|
d.addErrback(log.err, facility="tahoe.init",
|
|
|
|
level=log.BAD, umid="z9DMzw")
|
2008-04-02 01:45:13 +00:00
|
|
|
|
|
|
|
def _got_key_generator(self, key_generator):
|
|
|
|
self._key_generator = key_generator
|
|
|
|
key_generator.notifyOnDisconnect(self._lost_key_generator)
|
|
|
|
|
|
|
|
def _lost_key_generator(self):
|
|
|
|
self._key_generator = None
|
|
|
|
|
2007-08-22 21:54:34 +00:00
|
|
|
def init_web(self, webport):
|
2007-12-03 21:52:42 +00:00
|
|
|
self.log("init_web(webport=%s)", args=(webport,))
|
|
|
|
|
2007-09-04 23:33:06 +00:00
|
|
|
from allmydata.webish import WebishServer
|
2008-01-08 01:04:56 +00:00
|
|
|
nodeurl_path = os.path.join(self.basedir, "node.url")
|
2008-10-29 22:34:31 +00:00
|
|
|
staticdir = self.get_config("node", "web.static", "public_html")
|
|
|
|
staticdir = os.path.expanduser(staticdir)
|
2009-02-20 19:15:54 +00:00
|
|
|
ws = WebishServer(self, webport, nodeurl_path, staticdir)
|
2007-08-22 21:54:34 +00:00
|
|
|
self.add_service(ws)
|
|
|
|
|
2008-10-06 19:52:36 +00:00
|
|
|
def init_ftp_server(self):
|
2008-10-07 01:06:05 +00:00
|
|
|
if self.get_config("ftpd", "enabled", False, boolean=True):
|
2008-11-06 02:34:42 +00:00
|
|
|
accountfile = self.get_config("ftpd", "accounts.file", None)
|
|
|
|
accounturl = self.get_config("ftpd", "accounts.url", None)
|
|
|
|
ftp_portstr = self.get_config("ftpd", "port", "8021")
|
2008-10-07 01:06:05 +00:00
|
|
|
|
2008-11-05 21:07:33 +00:00
|
|
|
from allmydata.frontends import ftpd
|
2008-10-07 01:06:05 +00:00
|
|
|
s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
|
|
|
|
s.setServiceParent(self)
|
2008-10-06 19:52:36 +00:00
|
|
|
|
2008-11-05 01:00:22 +00:00
|
|
|
def init_sftp_server(self):
|
|
|
|
if self.get_config("sftpd", "enabled", False, boolean=True):
|
2008-11-06 02:34:42 +00:00
|
|
|
accountfile = self.get_config("sftpd", "accounts.file", None)
|
|
|
|
accounturl = self.get_config("sftpd", "accounts.url", None)
|
|
|
|
sftp_portstr = self.get_config("sftpd", "port", "8022")
|
|
|
|
pubkey_file = self.get_config("sftpd", "host_pubkey_file")
|
|
|
|
privkey_file = self.get_config("sftpd", "host_privkey_file")
|
2008-11-05 01:00:22 +00:00
|
|
|
|
2008-11-05 21:07:33 +00:00
|
|
|
from allmydata.frontends import sftpd
|
2008-11-05 01:00:22 +00:00
|
|
|
s = sftpd.SFTPServer(self, accountfile, accounturl,
|
|
|
|
sftp_portstr, pubkey_file, privkey_file)
|
|
|
|
s.setServiceParent(self)
|
|
|
|
|
2007-05-25 00:34:42 +00:00
|
|
|
def _check_hotline(self, hotline_file):
|
|
|
|
if os.path.exists(hotline_file):
|
|
|
|
mtime = os.stat(hotline_file)[stat.ST_MTIME]
|
2009-03-08 03:50:39 +00:00
|
|
|
if mtime > time.time() - 120.0:
|
2007-05-25 00:34:42 +00:00
|
|
|
return
|
2007-09-19 20:56:00 +00:00
|
|
|
else:
|
|
|
|
self.log("hotline file too old, shutting down")
|
|
|
|
else:
|
|
|
|
self.log("hotline file missing, shutting down")
|
2007-05-25 00:34:42 +00:00
|
|
|
reactor.stop()
|
|
|
|
|
2007-07-12 22:33:30 +00:00
|
|
|
def get_encoding_parameters(self):
|
2008-02-05 20:05:13 +00:00
|
|
|
return self.DEFAULT_ENCODING_PARAMETERS
|
2007-07-12 22:33:30 +00:00
|
|
|
|
2007-06-10 04:03:57 +00:00
|
|
|
def connected_to_introducer(self):
|
|
|
|
if self.introducer_client:
|
|
|
|
return self.introducer_client.connected_to_introducer()
|
|
|
|
return False
|
2007-08-28 02:00:18 +00:00
|
|
|
|
|
|
|
def get_renewal_secret(self):
|
2007-12-18 01:34:11 +00:00
|
|
|
return hashutil.my_renewal_secret_hash(self._lease_secret)
|
2007-08-28 02:30:26 +00:00
|
|
|
|
2007-08-28 02:00:18 +00:00
|
|
|
def get_cancel_secret(self):
|
2007-12-18 01:34:11 +00:00
|
|
|
return hashutil.my_cancel_secret_hash(self._lease_secret)
|
2007-09-20 22:33:58 +00:00
|
|
|
|
|
|
|
def debug_wait_for_client_connections(self, num_clients):
|
|
|
|
"""Return a Deferred that fires (with None) when we have connections
|
|
|
|
to the given number of peers. Useful for tests that set up a
|
|
|
|
temporary test network and need to know when it is safe to proceed
|
|
|
|
with an upload or download."""
|
|
|
|
def _check():
|
2009-06-23 02:10:47 +00:00
|
|
|
return len(self.storage_broker.get_all_servers()) >= num_clients
|
2007-09-20 22:33:58 +00:00
|
|
|
d = self.poll(_check, 0.5)
|
|
|
|
d.addCallback(lambda res: None)
|
|
|
|
return d
|
|
|
|
|
2007-11-01 22:15:29 +00:00
|
|
|
|
2007-11-09 09:54:51 +00:00
|
|
|
# these four methods are the primitives for creating filenodes and
|
|
|
|
# dirnodes. The first takes a URI and produces a filenode or (new-style)
|
|
|
|
# dirnode. The other three create brand-new filenodes/dirnodes.
|
|
|
|
|
2009-07-03 01:07:49 +00:00
|
|
|
def create_node_from_uri(self, writecap, readcap=None):
|
2007-12-03 21:52:42 +00:00
|
|
|
# this returns synchronously.
|
2009-07-03 01:07:49 +00:00
|
|
|
u = writecap or readcap
|
2009-07-02 22:25:37 +00:00
|
|
|
if not u:
|
2009-07-03 01:07:49 +00:00
|
|
|
# maybe the writecap was hidden because we're in a readonly
|
|
|
|
# directory, and the future cap format doesn't have a readcap, or
|
|
|
|
# something.
|
|
|
|
return UnknownNode(writecap, readcap)
|
2007-11-09 09:54:51 +00:00
|
|
|
u = IURI(u)
|
2009-07-03 01:07:49 +00:00
|
|
|
if isinstance(u, UnknownURI):
|
|
|
|
return UnknownNode(writecap, readcap)
|
2008-05-09 01:02:55 +00:00
|
|
|
u_s = u.to_string()
|
|
|
|
if u_s not in self._node_cache:
|
2009-07-17 01:01:03 +00:00
|
|
|
if IReadonlyDirectoryURI.providedBy(u):
|
|
|
|
# read-only dirnodes
|
|
|
|
node = DirectoryNode(self).init_from_uri(u)
|
|
|
|
elif IDirectoryURI.providedBy(u):
|
|
|
|
# dirnodes
|
|
|
|
node = DirectoryNode(self).init_from_uri(u)
|
2008-05-09 01:02:55 +00:00
|
|
|
elif IFileURI.providedBy(u):
|
2008-07-16 00:23:25 +00:00
|
|
|
if isinstance(u, LiteralFileURI):
|
|
|
|
node = LiteralFileNode(u, self) # LIT
|
|
|
|
else:
|
2009-07-08 00:40:40 +00:00
|
|
|
node = FileNode(u, self, self.download_cache_dirman) # CHK
|
2009-07-15 06:45:10 +00:00
|
|
|
elif IMutableFileURI.providedBy(u):
|
2008-05-09 01:02:55 +00:00
|
|
|
node = MutableFileNode(self).init_from_uri(u)
|
2009-07-15 06:45:10 +00:00
|
|
|
else:
|
|
|
|
raise UnhandledCapTypeError("cap is recognized, but has no Node")
|
2009-07-03 01:07:49 +00:00
|
|
|
self._node_cache[u_s] = node # note: WeakValueDictionary
|
2008-05-09 01:02:55 +00:00
|
|
|
return self._node_cache[u_s]
|
2007-11-09 09:54:51 +00:00
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def create_empty_dirnode(self):
|
2009-07-03 01:07:49 +00:00
|
|
|
d = self.create_mutable_file()
|
2009-07-17 01:01:03 +00:00
|
|
|
d.addCallback(DirectoryNode.create_with_mutablefile, self)
|
2007-11-01 22:15:29 +00:00
|
|
|
return d
|
|
|
|
|
2009-06-29 22:31:24 +00:00
|
|
|
def create_mutable_file(self, contents="", keysize=None):
|
|
|
|
keysize = keysize or self.DEFAULT_MUTABLE_KEYSIZE
|
2007-11-01 22:15:29 +00:00
|
|
|
n = MutableFileNode(self)
|
2009-06-29 22:31:24 +00:00
|
|
|
d = n.create(contents, self._generate_pubprivkeys, keysize=keysize)
|
2007-11-01 22:15:29 +00:00
|
|
|
d.addCallback(lambda res: n)
|
|
|
|
return d
|
|
|
|
|
2008-04-02 01:45:13 +00:00
|
|
|
def _generate_pubprivkeys(self, key_size):
|
|
|
|
if self._key_generator:
|
|
|
|
d = self._key_generator.callRemote('get_rsa_key_pair', key_size)
|
|
|
|
def make_key_objs((verifying_key, signing_key)):
|
|
|
|
v = rsa.create_verifying_key_from_string(verifying_key)
|
|
|
|
s = rsa.create_signing_key_from_string(signing_key)
|
|
|
|
return v, s
|
|
|
|
d.addCallback(make_key_objs)
|
|
|
|
return d
|
|
|
|
else:
|
2008-04-17 20:02:22 +00:00
|
|
|
# RSA key generation for a 2048 bit key takes between 0.8 and 3.2
|
|
|
|
# secs
|
2008-04-02 01:45:13 +00:00
|
|
|
signer = rsa.generate(key_size)
|
|
|
|
verifier = signer.get_verifying_key()
|
|
|
|
return verifier, signer
|
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
def upload(self, uploadable):
|
2007-11-09 09:54:51 +00:00
|
|
|
uploader = self.getServiceNamed("uploader")
|
2009-01-14 23:41:06 +00:00
|
|
|
return uploader.upload(uploadable, history=self.get_history())
|
2007-11-01 22:15:29 +00:00
|
|
|
|
2008-03-03 21:48:52 +00:00
|
|
|
|
2008-04-17 20:02:22 +00:00
|
|
|
def list_all_upload_statuses(self):
|
2009-01-14 23:41:06 +00:00
|
|
|
return self.get_history().list_all_upload_statuses()
|
2008-03-01 05:19:03 +00:00
|
|
|
|
2008-04-17 20:02:22 +00:00
|
|
|
def list_all_download_statuses(self):
|
2009-01-14 23:14:24 +00:00
|
|
|
return self.get_history().list_all_download_statuses()
|
2008-04-17 02:05:41 +00:00
|
|
|
|
2008-04-17 20:02:22 +00:00
|
|
|
def list_all_mapupdate_statuses(self):
|
2009-01-15 00:36:20 +00:00
|
|
|
return self.get_history().list_all_mapupdate_statuses()
|
2008-04-17 20:02:22 +00:00
|
|
|
def list_all_publish_statuses(self):
|
2009-01-15 00:36:20 +00:00
|
|
|
return self.get_history().list_all_publish_statuses()
|
2008-04-17 20:02:22 +00:00
|
|
|
def list_all_retrieve_statuses(self):
|
2009-01-15 00:36:20 +00:00
|
|
|
return self.get_history().list_all_retrieve_statuses()
|
2008-03-04 08:07:44 +00:00
|
|
|
|
2008-04-17 20:02:22 +00:00
|
|
|
def list_all_helper_statuses(self):
|
2008-04-15 06:42:20 +00:00
|
|
|
try:
|
|
|
|
helper = self.getServiceNamed("helper")
|
|
|
|
except KeyError:
|
|
|
|
return []
|
2008-04-17 20:02:22 +00:00
|
|
|
return helper.get_all_upload_statuses()
|