2006-11-30 22:14:47 +00:00
|
|
|
|
2008-02-05 20:05:13 +00:00
|
|
|
import os, stat, time, re
|
|
|
|
from allmydata.interfaces import RIStorageServer
|
2007-06-28 00:11:06 +00:00
|
|
|
from allmydata import node
|
2006-11-30 23:23:39 +00:00
|
|
|
|
2007-06-29 01:01:00 +00:00
|
|
|
from twisted.internet import reactor
|
2007-05-25 00:34:42 +00:00
|
|
|
from twisted.application.internet import TimerService
|
2008-01-16 10:03:35 +00:00
|
|
|
from foolscap.logging import log
|
2006-11-30 22:14:47 +00:00
|
|
|
|
2007-04-26 19:01:25 +00:00
|
|
|
import allmydata
|
2007-07-14 00:25:45 +00:00
|
|
|
from allmydata.storage import StorageServer
|
2006-12-03 01:27:18 +00:00
|
|
|
from allmydata.upload import Uploader
|
2006-12-03 10:01:43 +00:00
|
|
|
from allmydata.download import Downloader
|
2007-10-15 23:16:39 +00:00
|
|
|
from allmydata.checker import Checker
|
2008-01-10 03:25:05 +00:00
|
|
|
from allmydata.offloaded import Helper
|
2007-03-08 02:16:06 +00:00
|
|
|
from allmydata.control import ControlServer
|
2007-03-27 23:12:11 +00:00
|
|
|
from allmydata.introducer import IntroducerClient
|
2008-02-15 02:27:47 +00:00
|
|
|
from allmydata.util import hashutil, base32, testutil
|
2007-12-03 21:52:42 +00:00
|
|
|
from allmydata.filenode import FileNode
|
2007-12-04 18:45:20 +00:00
|
|
|
from allmydata.dirnode import NewDirectoryNode
|
2008-03-04 08:07:44 +00:00
|
|
|
from allmydata.mutable import MutableFileNode, MutableWatcher
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
from allmydata.stats import StatsProvider
|
2007-12-03 21:52:42 +00:00
|
|
|
from allmydata.interfaces import IURI, INewDirectoryURI, \
|
|
|
|
IReadonlyNewDirectoryURI, IFileURI, IMutableFileURI
|
2007-11-09 09:54:51 +00:00
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
KiB=1024
|
|
|
|
MiB=1024*KiB
|
|
|
|
GiB=1024*MiB
|
|
|
|
TiB=1024*GiB
|
|
|
|
PiB=1024*TiB
|
|
|
|
|
2008-02-05 20:05:13 +00:00
|
|
|
class Client(node.Node, testutil.PollMixin):
|
2006-12-03 01:27:18 +00:00
|
|
|
PORTNUMFILE = "client.port"
|
2006-12-01 03:14:23 +00:00
|
|
|
STOREDIR = 'storage'
|
2006-12-03 01:27:18 +00:00
|
|
|
NODETYPE = "client"
|
2007-05-25 00:34:42 +00:00
|
|
|
SUICIDE_PREVENTION_HOTLINE_FILE = "suicide_prevention_hotline"
|
2006-11-30 22:27:06 +00:00
|
|
|
|
2007-04-26 19:01:25 +00:00
|
|
|
# we're pretty narrow-minded right now
|
|
|
|
OLDEST_SUPPORTED_VERSION = allmydata.__version__
|
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
# this is a tuple of (needed, desired, total, max_segment_size). 'needed'
|
|
|
|
# is the number of shares required to reconstruct a file. 'desired' means
|
|
|
|
# that we will abort an upload unless we can allocate space for at least
|
|
|
|
# this many. 'total' is the total number of shares created by encoding.
|
|
|
|
# If everybody has room then this is is how many we will upload.
|
2008-02-05 20:05:13 +00:00
|
|
|
DEFAULT_ENCODING_PARAMETERS = {"k": 3,
|
|
|
|
"happy": 7,
|
|
|
|
"n": 10,
|
2008-03-08 02:24:51 +00:00
|
|
|
"max_segment_size": 128*KiB,
|
2008-01-16 10:03:35 +00:00
|
|
|
}
|
|
|
|
|
2006-12-03 01:27:18 +00:00
|
|
|
def __init__(self, basedir="."):
|
|
|
|
node.Node.__init__(self, basedir)
|
2007-08-11 21:52:37 +00:00
|
|
|
self.logSource="Client"
|
2008-02-05 20:05:13 +00:00
|
|
|
self.nickname = self.get_config("nickname")
|
|
|
|
if self.nickname is None:
|
|
|
|
self.nickname = "<unspecified>"
|
|
|
|
self.init_introducer_client()
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
self.init_stats_provider()
|
2007-12-18 01:34:11 +00:00
|
|
|
self.init_lease_secret()
|
2007-07-04 00:27:07 +00:00
|
|
|
self.init_storage()
|
2008-02-06 02:58:38 +00:00
|
|
|
self.init_control()
|
|
|
|
run_helper = self.get_config("run_helper")
|
|
|
|
if run_helper:
|
|
|
|
self.init_helper()
|
2008-01-09 04:18:54 +00:00
|
|
|
helper_furl = self.get_config("helper.furl")
|
|
|
|
self.add_service(Uploader(helper_furl))
|
2006-12-03 10:01:43 +00:00
|
|
|
self.add_service(Downloader())
|
2007-10-15 23:16:39 +00:00
|
|
|
self.add_service(Checker())
|
2008-03-04 08:07:44 +00:00
|
|
|
self.add_service(MutableWatcher())
|
2008-01-10 03:25:05 +00:00
|
|
|
# ControlServer and Helper are attached after Tub startup
|
2007-08-28 01:58:39 +00:00
|
|
|
|
2007-05-25 00:34:42 +00:00
|
|
|
hotline_file = os.path.join(self.basedir,
|
|
|
|
self.SUICIDE_PREVENTION_HOTLINE_FILE)
|
|
|
|
if os.path.exists(hotline_file):
|
2007-09-19 20:56:00 +00:00
|
|
|
age = time.time() - os.stat(hotline_file)[stat.ST_MTIME]
|
|
|
|
self.log("hotline file noticed (%ds old), starting timer" % age)
|
2007-05-30 00:39:39 +00:00
|
|
|
hotline = TimerService(1.0, self._check_hotline, hotline_file)
|
2007-05-25 00:34:42 +00:00
|
|
|
hotline.setServiceParent(self)
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
webport = self.get_config("webport")
|
|
|
|
if webport:
|
|
|
|
self.init_web(webport) # strports string
|
|
|
|
|
2008-02-05 20:05:13 +00:00
|
|
|
def init_introducer_client(self):
|
|
|
|
self.introducer_furl = self.get_config("introducer.furl", required=True)
|
|
|
|
ic = IntroducerClient(self.tub, self.introducer_furl,
|
|
|
|
self.nickname,
|
|
|
|
str(allmydata.__version__),
|
|
|
|
str(self.OLDEST_SUPPORTED_VERSION))
|
|
|
|
self.introducer_client = ic
|
|
|
|
ic.setServiceParent(self)
|
|
|
|
# nodes that want to upload and download will need storage servers
|
|
|
|
ic.subscribe_to("storage")
|
|
|
|
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
def init_stats_provider(self):
|
|
|
|
gatherer_furl = self.get_config('stats_gatherer.furl')
|
|
|
|
if gatherer_furl:
|
2008-02-01 04:10:15 +00:00
|
|
|
self.stats_provider = StatsProvider(self, gatherer_furl)
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
self.add_service(self.stats_provider)
|
|
|
|
else:
|
|
|
|
self.stats_provider = None
|
|
|
|
|
2007-12-18 01:34:11 +00:00
|
|
|
def init_lease_secret(self):
|
2007-08-28 02:30:26 +00:00
|
|
|
def make_secret():
|
2008-02-15 02:27:47 +00:00
|
|
|
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
|
2007-12-17 23:39:54 +00:00
|
|
|
secret_s = self.get_or_create_private_config("secret", make_secret)
|
2008-02-15 02:27:47 +00:00
|
|
|
self._lease_secret = base32.a2b(secret_s)
|
2007-08-28 02:30:26 +00:00
|
|
|
|
2007-07-04 00:27:07 +00:00
|
|
|
def init_storage(self):
|
2008-02-05 20:05:13 +00:00
|
|
|
# should we run a storage server (and publish it for others to use)?
|
|
|
|
provide_storage = (self.get_config("no_storage") is None)
|
|
|
|
if not provide_storage:
|
|
|
|
return
|
|
|
|
readonly_storage = (self.get_config("readonly_storage") is not None)
|
|
|
|
|
2007-07-04 00:27:07 +00:00
|
|
|
storedir = os.path.join(self.basedir, self.STOREDIR)
|
|
|
|
sizelimit = None
|
2007-08-22 17:29:57 +00:00
|
|
|
|
2007-08-28 01:58:39 +00:00
|
|
|
data = self.get_config("sizelimit")
|
|
|
|
if data:
|
2007-07-04 00:27:07 +00:00
|
|
|
m = re.match(r"^(\d+)([kKmMgG]?[bB]?)$", data)
|
|
|
|
if not m:
|
|
|
|
log.msg("SIZELIMIT_FILE contains unparseable value %s" % data)
|
|
|
|
else:
|
|
|
|
number, suffix = m.groups()
|
|
|
|
suffix = suffix.upper()
|
|
|
|
if suffix.endswith("B"):
|
|
|
|
suffix = suffix[:-1]
|
|
|
|
multiplier = {"": 1,
|
|
|
|
"K": 1000,
|
|
|
|
"M": 1000 * 1000,
|
|
|
|
"G": 1000 * 1000 * 1000,
|
|
|
|
}[suffix]
|
|
|
|
sizelimit = int(number) * multiplier
|
2008-02-05 20:05:13 +00:00
|
|
|
discard_storage = self.get_config("debug_discard_storage") is not None
|
|
|
|
ss = StorageServer(storedir, sizelimit,
|
|
|
|
discard_storage, readonly_storage,
|
|
|
|
self.stats_provider)
|
|
|
|
self.add_service(ss)
|
|
|
|
d = self.when_tub_ready()
|
|
|
|
# we can't do registerReference until the Tub is ready
|
|
|
|
def _publish(res):
|
|
|
|
furl_file = os.path.join(self.basedir, "private", "storage.furl")
|
|
|
|
furl = self.tub.registerReference(ss, furlFile=furl_file)
|
|
|
|
ri_name = RIStorageServer.__remote_name__
|
|
|
|
self.introducer_client.publish(furl, "storage", ri_name)
|
|
|
|
d.addCallback(_publish)
|
2008-02-06 02:58:38 +00:00
|
|
|
d.addErrback(log.err, facility="tahoe.init", level=log.BAD)
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2008-02-06 02:58:38 +00:00
|
|
|
def init_control(self):
|
|
|
|
d = self.when_tub_ready()
|
|
|
|
def _publish(res):
|
|
|
|
c = ControlServer()
|
|
|
|
c.setServiceParent(self)
|
|
|
|
control_url = self.tub.registerReference(c)
|
|
|
|
self.write_private_config("control.furl", control_url + "\n")
|
|
|
|
d.addCallback(_publish)
|
|
|
|
d.addErrback(log.err, facility="tahoe.init", level=log.BAD)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2008-02-06 02:58:38 +00:00
|
|
|
def init_helper(self):
|
|
|
|
d = self.when_tub_ready()
|
|
|
|
def _publish(self):
|
|
|
|
h = Helper(os.path.join(self.basedir, "helper"))
|
|
|
|
h.setServiceParent(self)
|
|
|
|
# TODO: this is confusing. BASEDIR/private/helper.furl is created
|
|
|
|
# by the helper. BASEDIR/helper.furl is consumed by the client
|
|
|
|
# who wants to use the helper. I like having the filename be the
|
|
|
|
# same, since that makes 'cp' work smoothly, but the difference
|
|
|
|
# between config inputs and generated outputs is hard to see.
|
|
|
|
helper_furlfile = os.path.join(self.basedir,
|
|
|
|
"private", "helper.furl")
|
|
|
|
self.tub.registerReference(h, furlFile=helper_furlfile)
|
|
|
|
d.addCallback(_publish)
|
|
|
|
d.addErrback(log.err, facility="tahoe.init", level=log.BAD)
|
2007-08-10 01:30:24 +00:00
|
|
|
|
2007-08-22 21:54:34 +00:00
|
|
|
def init_web(self, webport):
|
2007-12-03 21:52:42 +00:00
|
|
|
self.log("init_web(webport=%s)", args=(webport,))
|
|
|
|
|
2007-09-04 23:33:06 +00:00
|
|
|
from allmydata.webish import WebishServer
|
2008-01-08 01:04:56 +00:00
|
|
|
nodeurl_path = os.path.join(self.basedir, "node.url")
|
|
|
|
ws = WebishServer(webport, nodeurl_path)
|
2007-08-28 01:58:39 +00:00
|
|
|
if self.get_config("webport_allow_localfile") is not None:
|
|
|
|
ws.allow_local_access(True)
|
2007-08-22 21:54:34 +00:00
|
|
|
self.add_service(ws)
|
|
|
|
|
2007-05-25 00:34:42 +00:00
|
|
|
def _check_hotline(self, hotline_file):
|
|
|
|
if os.path.exists(hotline_file):
|
|
|
|
mtime = os.stat(hotline_file)[stat.ST_MTIME]
|
2007-09-26 02:22:33 +00:00
|
|
|
if mtime > time.time() - 20.0:
|
2007-05-25 00:34:42 +00:00
|
|
|
return
|
2007-09-19 20:56:00 +00:00
|
|
|
else:
|
|
|
|
self.log("hotline file too old, shutting down")
|
|
|
|
else:
|
|
|
|
self.log("hotline file missing, shutting down")
|
2007-05-25 00:34:42 +00:00
|
|
|
reactor.stop()
|
|
|
|
|
2007-03-27 23:12:11 +00:00
|
|
|
def get_all_peerids(self):
|
2007-07-17 02:47:42 +00:00
|
|
|
return self.introducer_client.get_all_peerids()
|
2007-03-27 23:12:11 +00:00
|
|
|
|
2008-02-05 20:05:13 +00:00
|
|
|
def get_permuted_peers(self, service_name, key):
|
2007-03-30 03:19:52 +00:00
|
|
|
"""
|
2008-02-05 20:05:13 +00:00
|
|
|
@return: list of (peerid, connection,)
|
2007-03-30 03:19:52 +00:00
|
|
|
"""
|
2008-02-05 20:05:13 +00:00
|
|
|
assert isinstance(service_name, str)
|
|
|
|
assert isinstance(key, str)
|
|
|
|
return self.introducer_client.get_permuted_peers(service_name, key)
|
2007-06-10 04:03:57 +00:00
|
|
|
|
2007-07-12 22:33:30 +00:00
|
|
|
def get_encoding_parameters(self):
|
2008-02-05 20:05:13 +00:00
|
|
|
return self.DEFAULT_ENCODING_PARAMETERS
|
2007-07-12 22:33:30 +00:00
|
|
|
|
2007-06-10 04:03:57 +00:00
|
|
|
def connected_to_introducer(self):
|
|
|
|
if self.introducer_client:
|
|
|
|
return self.introducer_client.connected_to_introducer()
|
|
|
|
return False
|
2007-08-28 02:00:18 +00:00
|
|
|
|
|
|
|
def get_renewal_secret(self):
|
2007-12-18 01:34:11 +00:00
|
|
|
return hashutil.my_renewal_secret_hash(self._lease_secret)
|
2007-08-28 02:30:26 +00:00
|
|
|
|
2007-08-28 02:00:18 +00:00
|
|
|
def get_cancel_secret(self):
|
2007-12-18 01:34:11 +00:00
|
|
|
return hashutil.my_cancel_secret_hash(self._lease_secret)
|
2007-09-20 22:33:58 +00:00
|
|
|
|
|
|
|
def debug_wait_for_client_connections(self, num_clients):
|
|
|
|
"""Return a Deferred that fires (with None) when we have connections
|
|
|
|
to the given number of peers. Useful for tests that set up a
|
|
|
|
temporary test network and need to know when it is safe to proceed
|
|
|
|
with an upload or download."""
|
|
|
|
def _check():
|
|
|
|
current_clients = list(self.get_all_peerids())
|
|
|
|
return len(current_clients) >= num_clients
|
|
|
|
d = self.poll(_check, 0.5)
|
|
|
|
d.addCallback(lambda res: None)
|
|
|
|
return d
|
|
|
|
|
2007-11-01 22:15:29 +00:00
|
|
|
|
2007-11-09 09:54:51 +00:00
|
|
|
# these four methods are the primitives for creating filenodes and
|
|
|
|
# dirnodes. The first takes a URI and produces a filenode or (new-style)
|
|
|
|
# dirnode. The other three create brand-new filenodes/dirnodes.
|
|
|
|
|
|
|
|
def create_node_from_uri(self, u):
|
2007-12-03 21:52:42 +00:00
|
|
|
# this returns synchronously.
|
2007-11-09 09:54:51 +00:00
|
|
|
u = IURI(u)
|
2007-12-03 21:52:42 +00:00
|
|
|
if IReadonlyNewDirectoryURI.providedBy(u):
|
|
|
|
# new-style read-only dirnodes
|
|
|
|
return NewDirectoryNode(self).init_from_uri(u)
|
2007-11-09 09:54:51 +00:00
|
|
|
if INewDirectoryURI.providedBy(u):
|
|
|
|
# new-style dirnodes
|
|
|
|
return NewDirectoryNode(self).init_from_uri(u)
|
|
|
|
if IFileURI.providedBy(u):
|
|
|
|
# CHK
|
|
|
|
return FileNode(u, self)
|
2007-12-03 21:52:42 +00:00
|
|
|
assert IMutableFileURI.providedBy(u), u
|
2007-11-09 09:54:51 +00:00
|
|
|
return MutableFileNode(self).init_from_uri(u)
|
|
|
|
|
2008-03-04 08:07:44 +00:00
|
|
|
def notify_publish(self, p):
|
|
|
|
self.getServiceNamed("mutable-watcher").notify_publish(p)
|
|
|
|
def notify_retrieve(self, r):
|
|
|
|
self.getServiceNamed("mutable-watcher").notify_retrieve(r)
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def create_empty_dirnode(self):
|
2007-11-01 22:15:29 +00:00
|
|
|
n = NewDirectoryNode(self)
|
2008-01-14 21:55:59 +00:00
|
|
|
d = n.create()
|
2007-11-01 22:15:29 +00:00
|
|
|
d.addCallback(lambda res: n)
|
|
|
|
return d
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def create_mutable_file(self, contents=""):
|
2007-11-01 22:15:29 +00:00
|
|
|
n = MutableFileNode(self)
|
2008-01-14 21:55:59 +00:00
|
|
|
d = n.create(contents)
|
2007-11-01 22:15:29 +00:00
|
|
|
d.addCallback(lambda res: n)
|
|
|
|
return d
|
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
def upload(self, uploadable):
|
2007-11-09 09:54:51 +00:00
|
|
|
uploader = self.getServiceNamed("uploader")
|
2008-01-16 10:03:35 +00:00
|
|
|
return uploader.upload(uploadable)
|
2007-11-01 22:15:29 +00:00
|
|
|
|
2008-03-03 21:48:52 +00:00
|
|
|
|
2008-03-01 05:19:03 +00:00
|
|
|
def list_all_uploads(self):
|
2008-02-12 22:39:45 +00:00
|
|
|
uploader = self.getServiceNamed("uploader")
|
2008-03-01 05:19:03 +00:00
|
|
|
return uploader.list_all_uploads()
|
2008-03-03 21:48:52 +00:00
|
|
|
def list_active_uploads(self):
|
|
|
|
uploader = self.getServiceNamed("uploader")
|
|
|
|
return uploader.list_active_uploads()
|
2008-03-01 05:19:03 +00:00
|
|
|
def list_recent_uploads(self):
|
|
|
|
uploader = self.getServiceNamed("uploader")
|
|
|
|
return uploader.list_recent_uploads()
|
|
|
|
|
2008-03-04 08:07:44 +00:00
|
|
|
def list_all_downloads(self):
|
|
|
|
downloader = self.getServiceNamed("downloader")
|
|
|
|
return downloader.list_all_downloads()
|
|
|
|
def list_active_downloads(self):
|
|
|
|
downloader = self.getServiceNamed("downloader")
|
|
|
|
return downloader.list_active_downloads()
|
2008-03-01 05:19:03 +00:00
|
|
|
def list_recent_downloads(self):
|
|
|
|
downloader = self.getServiceNamed("downloader")
|
|
|
|
return downloader.list_recent_downloads()
|
2008-03-04 08:07:44 +00:00
|
|
|
|
|
|
|
def list_all_publish(self):
|
|
|
|
watcher = self.getServiceNamed("mutable-watcher")
|
|
|
|
return watcher.list_all_publish()
|
|
|
|
def list_active_publish(self):
|
|
|
|
watcher = self.getServiceNamed("mutable-watcher")
|
|
|
|
return watcher.list_active_publish()
|
|
|
|
def list_recent_publish(self):
|
|
|
|
watcher = self.getServiceNamed("mutable-watcher")
|
|
|
|
return watcher.list_recent_publish()
|
|
|
|
|
|
|
|
def list_all_retrieve(self):
|
|
|
|
watcher = self.getServiceNamed("mutable-watcher")
|
|
|
|
return watcher.list_all_retrieve()
|
|
|
|
def list_active_retrieve(self):
|
|
|
|
watcher = self.getServiceNamed("mutable-watcher")
|
|
|
|
return watcher.list_active_retrieve()
|
|
|
|
def list_recent_retrieve(self):
|
|
|
|
watcher = self.getServiceNamed("mutable-watcher")
|
|
|
|
return watcher.list_recent_retrieve()
|