2007-12-03 21:52:42 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
import os.path, stat
|
|
|
|
from cStringIO import StringIO
|
2007-12-03 21:52:42 +00:00
|
|
|
from zope.interface import implements
|
2007-12-04 04:37:54 +00:00
|
|
|
from twisted.internet import defer
|
2008-10-06 19:52:36 +00:00
|
|
|
from twisted.internet.interfaces import IPushProducer, IConsumer
|
2008-10-28 20:41:04 +00:00
|
|
|
from twisted.protocols import basic
|
2008-09-23 19:26:10 +00:00
|
|
|
from allmydata.interfaces import IFileNode, IFileURI, ICheckable
|
2008-10-28 20:41:04 +00:00
|
|
|
from allmydata.util import observer, log, base32
|
2008-09-07 19:44:56 +00:00
|
|
|
from allmydata.immutable.checker import SimpleCHKFileChecker, \
|
|
|
|
SimpleCHKFileVerifier
|
2008-10-28 20:41:04 +00:00
|
|
|
from allmydata.immutable import download
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
class _ImmutableFileNodeBase(object):
|
2008-07-16 00:23:25 +00:00
|
|
|
implements(IFileNode, ICheckable)
|
2007-12-03 21:52:42 +00:00
|
|
|
|
|
|
|
def __init__(self, uri, client):
|
2008-09-23 18:52:49 +00:00
|
|
|
self.u = IFileURI(uri)
|
2007-12-03 21:52:42 +00:00
|
|
|
self._client = client
|
|
|
|
|
2008-09-23 18:52:49 +00:00
|
|
|
def get_readonly_uri(self):
|
|
|
|
return self.get_uri()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-05-19 20:03:00 +00:00
|
|
|
def is_mutable(self):
|
|
|
|
return False
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def is_readonly(self):
|
|
|
|
return True
|
|
|
|
|
2008-09-23 18:52:49 +00:00
|
|
|
def __hash__(self):
|
|
|
|
return self.u.__hash__()
|
|
|
|
def __eq__(self, other):
|
|
|
|
if IFileNode.providedBy(other):
|
|
|
|
return self.u.__eq__(other.u)
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
def __ne__(self, other):
|
|
|
|
if IFileNode.providedBy(other):
|
|
|
|
return self.u.__eq__(other.u)
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
class PortionOfFile:
|
|
|
|
# like a list slice (things[2:14]), but for a file on disk
|
|
|
|
def __init__(self, fn, offset=0, size=None):
|
|
|
|
self.f = open(fn, "rb")
|
|
|
|
self.f.seek(offset)
|
|
|
|
self.bytes_left = size
|
|
|
|
|
|
|
|
def read(self, size=None):
|
|
|
|
# bytes_to_read = min(size, self.bytes_left), but None>anything
|
|
|
|
if size is None:
|
|
|
|
bytes_to_read = self.bytes_left
|
|
|
|
elif self.bytes_left is None:
|
|
|
|
bytes_to_read = size
|
|
|
|
else:
|
|
|
|
bytes_to_read = min(size, self.bytes_left)
|
|
|
|
data = self.f.read(bytes_to_read)
|
|
|
|
if self.bytes_left is not None:
|
|
|
|
self.bytes_left -= len(data)
|
|
|
|
return data
|
|
|
|
|
|
|
|
class FileNode(_ImmutableFileNodeBase):
|
2008-09-23 18:52:49 +00:00
|
|
|
checker_class = SimpleCHKFileChecker
|
2008-10-28 20:41:04 +00:00
|
|
|
verifier_class = SimpleCHKFileVerifier
|
2008-09-23 18:52:49 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
def __init__(self, uri, client, cachefile):
|
|
|
|
_ImmutableFileNodeBase.__init__(self, uri, client)
|
|
|
|
self.cachefile = cachefile
|
|
|
|
# five states:
|
|
|
|
# new FileNode, no downloads ever performed
|
|
|
|
# new FileNode, leftover file (partial)
|
|
|
|
# new FileNode, leftover file (whole)
|
|
|
|
# download in progress, not yet complete
|
|
|
|
# download complete
|
|
|
|
self.download_in_progress = False
|
|
|
|
self.fully_cached_observer = observer.OneShotObserverList()
|
2008-09-23 18:52:49 +00:00
|
|
|
|
|
|
|
def get_uri(self):
|
|
|
|
return self.u.to_string()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
|
|
|
def get_size(self):
|
2008-07-17 23:47:09 +00:00
|
|
|
return self.u.get_size()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
|
|
|
def get_verifier(self):
|
2008-07-17 23:47:09 +00:00
|
|
|
return self.u.get_verifier()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-08-12 23:14:07 +00:00
|
|
|
def get_storage_index(self):
|
|
|
|
return self.u.storage_index
|
|
|
|
|
2008-10-22 08:38:18 +00:00
|
|
|
def check(self, monitor, verify=False):
|
2008-10-22 08:42:37 +00:00
|
|
|
# TODO: pass the Monitor to SimpleCHKFileChecker or
|
|
|
|
# SimpleCHKFileVerifier, have it call monitor.raise_if_cancelled()
|
|
|
|
# before sending each request.
|
2008-07-17 23:47:09 +00:00
|
|
|
storage_index = self.u.storage_index
|
|
|
|
k = self.u.needed_shares
|
|
|
|
N = self.u.total_shares
|
|
|
|
size = self.u.size
|
|
|
|
ueb_hash = self.u.uri_extension_hash
|
2008-07-16 00:23:25 +00:00
|
|
|
if verify:
|
2008-07-17 23:47:09 +00:00
|
|
|
v = self.verifier_class(self._client,
|
|
|
|
storage_index, k, N, size, ueb_hash)
|
2008-07-16 00:23:25 +00:00
|
|
|
else:
|
2008-07-17 23:47:09 +00:00
|
|
|
v = self.checker_class(self._client, storage_index, k, N)
|
|
|
|
return v.start()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-10-22 08:38:18 +00:00
|
|
|
def check_and_repair(self, monitor, verify=False):
|
2008-09-09 23:34:49 +00:00
|
|
|
# this is a stub, to allow the deep-check tests to pass.
|
|
|
|
#raise NotImplementedError("not implemented yet")
|
|
|
|
from allmydata.checker_results import CheckAndRepairResults
|
|
|
|
cr = CheckAndRepairResults(self.u.storage_index)
|
|
|
|
d = self.check(verify)
|
|
|
|
def _done(r):
|
|
|
|
cr.pre_repair_results = cr.post_repair_results = r
|
|
|
|
cr.repair_attempted = False
|
|
|
|
return cr
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
2008-09-07 19:44:56 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
def read(self, consumer, offset=0, size=None):
|
|
|
|
if size is None:
|
|
|
|
size = self.get_size() - offset
|
|
|
|
|
|
|
|
assert self.cachefile
|
|
|
|
|
|
|
|
try:
|
|
|
|
filesize = os.stat(self.cachefile)[stat.ST_SIZE]
|
|
|
|
except OSError:
|
|
|
|
filesize = 0
|
|
|
|
if filesize >= offset+size:
|
|
|
|
log.msg(format=("immutable filenode read [%(si)s]: " +
|
|
|
|
"satisfied from cache " +
|
|
|
|
"(read %(start)d+%(size)d, filesize %(filesize)d)"),
|
|
|
|
si=base32.b2a(self.u.storage_index),
|
|
|
|
start=offset, size=size, filesize=filesize,
|
|
|
|
umid="5p5ECA", level=log.OPERATIONAL)
|
|
|
|
f = PortionOfFile(self.cachefile, offset, size)
|
|
|
|
d = basic.FileSender().beginFileTransfer(f, consumer)
|
|
|
|
d.addCallback(lambda lastSent: consumer)
|
|
|
|
return d
|
|
|
|
|
|
|
|
if offset == 0 and size == self.get_size():
|
|
|
|
# don't use the cache, just do a normal streaming download
|
|
|
|
log.msg(format=("immutable filenode read [%(si)s]: " +
|
|
|
|
"doing normal full download"),
|
|
|
|
si=base32.b2a(self.u.storage_index),
|
|
|
|
umid="VRSBwg", level=log.OPERATIONAL)
|
|
|
|
return self.download(download.ConsumerAdapter(consumer))
|
|
|
|
|
|
|
|
if not self.download_in_progress:
|
|
|
|
log.msg(format=("immutable filenode read [%(si)s]: " +
|
|
|
|
"starting download"),
|
|
|
|
si=base32.b2a(self.u.storage_index),
|
|
|
|
umid="h26Heg", level=log.OPERATIONAL)
|
|
|
|
self.start_download_to_cache()
|
|
|
|
|
|
|
|
# The file is being downloaded, but the portion we want isn't yet
|
|
|
|
# available, so we have to wait. First cut: wait for the whole thing
|
|
|
|
# to download. The second cut will be to wait for a specific range
|
|
|
|
# milestone, with a download target that counts bytes and compares
|
|
|
|
# them against a milestone list.
|
|
|
|
log.msg(format=("immutable filenode read [%(si)s]: " +
|
|
|
|
"waiting for download"),
|
|
|
|
si=base32.b2a(self.u.storage_index),
|
|
|
|
umid="l48V7Q", level=log.OPERATIONAL)
|
|
|
|
d = self.when_fully_cached()
|
|
|
|
d.addCallback(lambda ignored: self.read(consumer, offset, size))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def start_download_to_cache(self):
|
|
|
|
assert not self.download_in_progress
|
|
|
|
self.download_in_progress = True
|
|
|
|
downloader = self._client.getServiceNamed("downloader")
|
|
|
|
d = downloader.download_to_filename(self.get_uri(), self.cachefile)
|
|
|
|
d.addBoth(self.fully_cached_observer.fire)
|
|
|
|
|
|
|
|
def when_fully_cached(self):
|
|
|
|
return self.fully_cached_observer.when_fired()
|
|
|
|
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def download(self, target):
|
|
|
|
downloader = self._client.getServiceNamed("downloader")
|
2008-09-23 18:52:49 +00:00
|
|
|
return downloader.download(self.get_uri(), target)
|
2007-12-03 21:52:42 +00:00
|
|
|
|
|
|
|
def download_to_data(self):
|
|
|
|
downloader = self._client.getServiceNamed("downloader")
|
2008-09-23 18:52:49 +00:00
|
|
|
return downloader.download_to_data(self.get_uri())
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-10-06 19:52:36 +00:00
|
|
|
class LiteralProducer:
|
|
|
|
implements(IPushProducer)
|
|
|
|
def resumeProducing(self):
|
|
|
|
pass
|
|
|
|
def stopProducing(self):
|
|
|
|
pass
|
2007-12-04 04:37:54 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
|
|
|
|
class LiteralFileNode(_ImmutableFileNodeBase):
|
2007-12-04 04:37:54 +00:00
|
|
|
|
2008-09-23 18:52:49 +00:00
|
|
|
def __init__(self, uri, client):
|
2008-10-28 20:41:04 +00:00
|
|
|
_ImmutableFileNodeBase.__init__(self, uri, client)
|
2007-12-04 04:37:54 +00:00
|
|
|
|
|
|
|
def get_uri(self):
|
2008-09-23 18:52:49 +00:00
|
|
|
return self.u.to_string()
|
2007-12-04 04:37:54 +00:00
|
|
|
|
|
|
|
def get_size(self):
|
2008-09-23 18:52:49 +00:00
|
|
|
return len(self.u.data)
|
2007-12-04 04:37:54 +00:00
|
|
|
|
|
|
|
def get_verifier(self):
|
|
|
|
return None
|
|
|
|
|
2008-08-12 23:14:07 +00:00
|
|
|
def get_storage_index(self):
|
|
|
|
return None
|
|
|
|
|
2008-10-22 08:38:18 +00:00
|
|
|
def check(self, monitor, verify=False):
|
2008-09-07 19:44:56 +00:00
|
|
|
return defer.succeed(None)
|
2007-12-04 04:37:54 +00:00
|
|
|
|
2008-10-22 08:38:18 +00:00
|
|
|
def check_and_repair(self, monitor, verify=False):
|
2008-09-09 23:34:49 +00:00
|
|
|
return defer.succeed(None)
|
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
def read(self, consumer, offset=0, size=None):
|
|
|
|
if size is None:
|
|
|
|
data = self.u.data[offset:]
|
|
|
|
else:
|
|
|
|
data = self.u.data[offset:offset+size]
|
|
|
|
|
|
|
|
# We use twisted.protocols.basic.FileSender, which only does
|
|
|
|
# non-streaming, i.e. PullProducer, where the receiver/consumer must
|
|
|
|
# ask explicitly for each chunk of data. There are only two places in
|
|
|
|
# the Twisted codebase that can't handle streaming=False, both of
|
|
|
|
# which are in the upload path for an FTP/SFTP server
|
|
|
|
# (protocols.ftp.FileConsumer and
|
|
|
|
# vfs.adapters.ftp._FileToConsumerAdapter), neither of which is
|
|
|
|
# likely to be used as the target for a Tahoe download.
|
|
|
|
|
|
|
|
d = basic.FileSender().beginFileTransfer(StringIO(data), consumer)
|
|
|
|
d.addCallback(lambda lastSent: consumer)
|
|
|
|
return d
|
|
|
|
|
2007-12-04 04:37:54 +00:00
|
|
|
def download(self, target):
|
2008-07-16 00:23:25 +00:00
|
|
|
# note that this does not update the stats_provider
|
2008-09-23 18:52:49 +00:00
|
|
|
data = self.u.data
|
2008-10-06 19:52:36 +00:00
|
|
|
if IConsumer.providedBy(target):
|
|
|
|
target.registerProducer(LiteralProducer(), True)
|
2007-12-04 04:37:54 +00:00
|
|
|
target.open(len(data))
|
|
|
|
target.write(data)
|
2008-10-06 19:52:36 +00:00
|
|
|
if IConsumer.providedBy(target):
|
|
|
|
target.unregisterProducer()
|
2007-12-04 04:37:54 +00:00
|
|
|
target.close()
|
|
|
|
return defer.maybeDeferred(target.finish)
|
|
|
|
|
|
|
|
def download_to_data(self):
|
2008-09-23 18:52:49 +00:00
|
|
|
data = self.u.data
|
2007-12-04 04:37:54 +00:00
|
|
|
return defer.succeed(data)
|