2007-12-03 21:52:42 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
import os.path, stat
|
|
|
|
from cStringIO import StringIO
|
2007-12-03 21:52:42 +00:00
|
|
|
from zope.interface import implements
|
2007-12-04 04:37:54 +00:00
|
|
|
from twisted.internet import defer
|
2008-10-06 19:52:36 +00:00
|
|
|
from twisted.internet.interfaces import IPushProducer, IConsumer
|
2008-10-28 20:41:04 +00:00
|
|
|
from twisted.protocols import basic
|
2008-10-29 00:56:18 +00:00
|
|
|
from foolscap.eventual import eventually
|
|
|
|
from allmydata.interfaces import IFileNode, IFileURI, ICheckable, \
|
|
|
|
IDownloadTarget
|
|
|
|
from allmydata.util import log, base32
|
download: refactor handling of URI Extension Block and crypttext hash tree, simplify things
Refactor into a class the logic of asking each server in turn until one of them gives an answer
that validates. It is called ValidatedThingObtainer.
Refactor the downloading and verification of the URI Extension Block into a class named
ValidatedExtendedURIProxy.
The new logic of validating UEBs is minimalist: it doesn't require the UEB to contain any
unncessary information, but of course it still accepts such information for backwards
compatibility (so that this new download code is able to download files uploaded with old, and
for that matter with current, upload code).
The new logic of validating UEBs follows the practice of doing all validation up front. This
practice advises one to isolate the validation of incoming data into one place, so that all of
the rest of the code can assume only valid data.
If any redundant information is present in the UEB+URI, the new code cross-checks and asserts
that it is all fully consistent. This closes some issues where the uploader could have
uploaded inconsistent redundant data, which would probably have caused the old downloader to
simply reject that download after getting a Python exception, but perhaps could have caused
greater harm to the old downloader.
I removed the notion of selecting an erasure codec from codec.py based on the string that was
passed in the UEB. Currently "crs" is the only such string that works, so
"_assert(codec_name == 'crs')" is simpler and more explicit. This is also in keeping with the
"validate up front" strategy -- now if someone sets a different string than "crs" in their UEB,
the downloader will reject the download in the "validate this UEB" function instead of in a
separate "select the codec instance" function.
I removed the code to check plaintext hashes and plaintext Merkle Trees. Uploaders do not
produce this information any more (since it potentially exposes confidential information about
the file), and the unit tests for it were disabled. The downloader before this patch would
check that plaintext hash or plaintext merkle tree if they were present, but not complain if
they were absent. The new downloader in this patch complains if they are present and doesn't
check them. (We might in the future re-introduce such hashes over the plaintext, but encrypt
the hashes which are stored in the UEB to preserve confidentiality. This would be a double-
check on the correctness of our own source code -- the current Merkle Tree over the ciphertext
is already sufficient to guarantee the integrity of the download unless there is a bug in our
Merkle Tree or AES implementation.)
This patch increases the lines-of-code count by 8 (from 17,770 to 17,778), and reduces the
uncovered-by-tests lines-of-code count by 24 (from 1408 to 1384). Those numbers would be more
meaningful if we omitted src/allmydata/util/ from the test-coverage statistics.
2008-12-05 15:17:54 +00:00
|
|
|
from allmydata.uri import from_string as uri_from_string
|
2008-09-07 19:44:56 +00:00
|
|
|
from allmydata.immutable.checker import SimpleCHKFileChecker, \
|
|
|
|
SimpleCHKFileVerifier
|
2008-10-28 20:41:04 +00:00
|
|
|
from allmydata.immutable import download
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
class _ImmutableFileNodeBase(object):
|
2008-07-16 00:23:25 +00:00
|
|
|
implements(IFileNode, ICheckable)
|
2007-12-03 21:52:42 +00:00
|
|
|
|
|
|
|
def __init__(self, uri, client):
|
2008-09-23 18:52:49 +00:00
|
|
|
self.u = IFileURI(uri)
|
2007-12-03 21:52:42 +00:00
|
|
|
self._client = client
|
|
|
|
|
2008-09-23 18:52:49 +00:00
|
|
|
def get_readonly_uri(self):
|
|
|
|
return self.get_uri()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-05-19 20:03:00 +00:00
|
|
|
def is_mutable(self):
|
|
|
|
return False
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def is_readonly(self):
|
|
|
|
return True
|
|
|
|
|
2008-09-23 18:52:49 +00:00
|
|
|
def __hash__(self):
|
|
|
|
return self.u.__hash__()
|
|
|
|
def __eq__(self, other):
|
|
|
|
if IFileNode.providedBy(other):
|
|
|
|
return self.u.__eq__(other.u)
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
def __ne__(self, other):
|
|
|
|
if IFileNode.providedBy(other):
|
|
|
|
return self.u.__eq__(other.u)
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
class PortionOfFile:
|
|
|
|
# like a list slice (things[2:14]), but for a file on disk
|
|
|
|
def __init__(self, fn, offset=0, size=None):
|
|
|
|
self.f = open(fn, "rb")
|
|
|
|
self.f.seek(offset)
|
|
|
|
self.bytes_left = size
|
|
|
|
|
|
|
|
def read(self, size=None):
|
|
|
|
# bytes_to_read = min(size, self.bytes_left), but None>anything
|
|
|
|
if size is None:
|
|
|
|
bytes_to_read = self.bytes_left
|
|
|
|
elif self.bytes_left is None:
|
|
|
|
bytes_to_read = size
|
|
|
|
else:
|
|
|
|
bytes_to_read = min(size, self.bytes_left)
|
|
|
|
data = self.f.read(bytes_to_read)
|
|
|
|
if self.bytes_left is not None:
|
|
|
|
self.bytes_left -= len(data)
|
|
|
|
return data
|
|
|
|
|
2008-10-30 20:39:09 +00:00
|
|
|
class DownloadCache:
|
2008-10-29 00:56:18 +00:00
|
|
|
implements(IDownloadTarget)
|
2008-09-23 18:52:49 +00:00
|
|
|
|
2008-10-30 20:39:09 +00:00
|
|
|
def __init__(self, node, cachefile):
|
|
|
|
self._downloader = node._client.getServiceNamed("downloader")
|
|
|
|
self._uri = node.get_uri()
|
|
|
|
self._storage_index = node.get_storage_index()
|
2008-10-29 00:56:18 +00:00
|
|
|
self.milestones = set() # of (offset,size,Deferred)
|
2008-10-30 20:39:09 +00:00
|
|
|
self.cachefile = cachefile
|
2008-10-29 00:56:18 +00:00
|
|
|
self.download_in_progress = False
|
2008-10-28 20:41:04 +00:00
|
|
|
# five states:
|
|
|
|
# new FileNode, no downloads ever performed
|
|
|
|
# new FileNode, leftover file (partial)
|
|
|
|
# new FileNode, leftover file (whole)
|
|
|
|
# download in progress, not yet complete
|
|
|
|
# download complete
|
2008-10-29 00:56:18 +00:00
|
|
|
|
|
|
|
def when_range_available(self, offset, size):
|
|
|
|
assert isinstance(offset, (int,long))
|
|
|
|
assert isinstance(size, (int,long))
|
|
|
|
|
|
|
|
d = defer.Deferred()
|
|
|
|
self.milestones.add( (offset,size,d) )
|
|
|
|
self._check_milestones()
|
|
|
|
if self.milestones and not self.download_in_progress:
|
|
|
|
self.download_in_progress = True
|
|
|
|
log.msg(format=("immutable filenode read [%(si)s]: " +
|
|
|
|
"starting download"),
|
2008-10-30 20:39:09 +00:00
|
|
|
si=base32.b2a(self._storage_index),
|
2008-10-29 00:56:18 +00:00
|
|
|
umid="h26Heg", level=log.OPERATIONAL)
|
2008-10-30 20:39:09 +00:00
|
|
|
d2 = self._downloader.download(self._uri, self)
|
2008-10-29 00:56:18 +00:00
|
|
|
d2.addBoth(self._download_done)
|
|
|
|
d2.addErrback(self._download_failed)
|
|
|
|
d2.addErrback(log.err, umid="cQaM9g")
|
|
|
|
return d
|
|
|
|
|
|
|
|
def read(self, consumer, offset, size):
|
|
|
|
assert offset+size <= self.get_filesize()
|
2008-10-30 20:39:09 +00:00
|
|
|
f = PortionOfFile(self.cachefile.get_filename(), offset, size)
|
2008-10-29 00:56:18 +00:00
|
|
|
d = basic.FileSender().beginFileTransfer(f, consumer)
|
|
|
|
d.addCallback(lambda lastSent: consumer)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _download_done(self, res):
|
|
|
|
# clear download_in_progress, so failed downloads can be re-tried
|
2008-10-28 20:41:04 +00:00
|
|
|
self.download_in_progress = False
|
2008-10-29 00:56:18 +00:00
|
|
|
return res
|
|
|
|
|
|
|
|
def _download_failed(self, f):
|
|
|
|
# tell anyone who's waiting that we failed
|
|
|
|
for m in self.milestones:
|
|
|
|
(offset,size,d) = m
|
|
|
|
eventually(d.errback, f)
|
|
|
|
self.milestones.clear()
|
|
|
|
|
|
|
|
def _check_milestones(self):
|
|
|
|
current_size = self.get_filesize()
|
|
|
|
for m in list(self.milestones):
|
|
|
|
(offset,size,d) = m
|
|
|
|
if offset+size <= current_size:
|
|
|
|
log.msg(format=("immutable filenode read [%(si)s] " +
|
|
|
|
"%(offset)d+%(size)d vs %(filesize)d: " +
|
|
|
|
"done"),
|
2008-10-30 20:39:09 +00:00
|
|
|
si=base32.b2a(self._storage_index),
|
2008-10-29 00:56:18 +00:00
|
|
|
offset=offset, size=size, filesize=current_size,
|
|
|
|
umid="nuedUg", level=log.NOISY)
|
|
|
|
self.milestones.discard(m)
|
|
|
|
eventually(d.callback, None)
|
|
|
|
else:
|
|
|
|
log.msg(format=("immutable filenode read [%(si)s] " +
|
|
|
|
"%(offset)d+%(size)d vs %(filesize)d: " +
|
|
|
|
"still waiting"),
|
2008-10-30 20:39:09 +00:00
|
|
|
si=base32.b2a(self._storage_index),
|
2008-10-29 00:56:18 +00:00
|
|
|
offset=offset, size=size, filesize=current_size,
|
|
|
|
umid="8PKOhg", level=log.NOISY)
|
|
|
|
|
|
|
|
def get_filesize(self):
|
|
|
|
try:
|
2008-10-30 20:39:09 +00:00
|
|
|
filesize = os.stat(self.cachefile.get_filename())[stat.ST_SIZE]
|
2008-10-29 00:56:18 +00:00
|
|
|
except OSError:
|
|
|
|
filesize = 0
|
|
|
|
return filesize
|
|
|
|
|
|
|
|
|
|
|
|
def open(self, size):
|
2008-10-30 20:39:09 +00:00
|
|
|
self.f = open(self.cachefile.get_filename(), "wb")
|
2008-10-29 00:56:18 +00:00
|
|
|
|
|
|
|
def write(self, data):
|
|
|
|
self.f.write(data)
|
|
|
|
self._check_milestones()
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
self.f.close()
|
|
|
|
self._check_milestones()
|
|
|
|
|
|
|
|
def fail(self, why):
|
|
|
|
pass
|
|
|
|
def register_canceller(self, cb):
|
|
|
|
pass
|
|
|
|
def finish(self):
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class FileNode(_ImmutableFileNodeBase):
|
|
|
|
checker_class = SimpleCHKFileChecker
|
|
|
|
verifier_class = SimpleCHKFileVerifier
|
|
|
|
|
|
|
|
def __init__(self, uri, client, cachefile):
|
|
|
|
_ImmutableFileNodeBase.__init__(self, uri, client)
|
2008-10-30 20:39:09 +00:00
|
|
|
self.download_cache = DownloadCache(self, cachefile)
|
2008-09-23 18:52:49 +00:00
|
|
|
|
|
|
|
def get_uri(self):
|
|
|
|
return self.u.to_string()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
|
|
|
def get_size(self):
|
2008-07-17 23:47:09 +00:00
|
|
|
return self.u.get_size()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-12-08 19:44:11 +00:00
|
|
|
def get_verify_cap(self):
|
|
|
|
return self.u.get_verify_cap()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-08-12 23:14:07 +00:00
|
|
|
def get_storage_index(self):
|
|
|
|
return self.u.storage_index
|
|
|
|
|
2008-10-22 08:38:18 +00:00
|
|
|
def check(self, monitor, verify=False):
|
2008-10-22 08:42:37 +00:00
|
|
|
# TODO: pass the Monitor to SimpleCHKFileChecker or
|
|
|
|
# SimpleCHKFileVerifier, have it call monitor.raise_if_cancelled()
|
|
|
|
# before sending each request.
|
2008-07-17 23:47:09 +00:00
|
|
|
storage_index = self.u.storage_index
|
download: refactor handling of URI Extension Block and crypttext hash tree, simplify things
Refactor into a class the logic of asking each server in turn until one of them gives an answer
that validates. It is called ValidatedThingObtainer.
Refactor the downloading and verification of the URI Extension Block into a class named
ValidatedExtendedURIProxy.
The new logic of validating UEBs is minimalist: it doesn't require the UEB to contain any
unncessary information, but of course it still accepts such information for backwards
compatibility (so that this new download code is able to download files uploaded with old, and
for that matter with current, upload code).
The new logic of validating UEBs follows the practice of doing all validation up front. This
practice advises one to isolate the validation of incoming data into one place, so that all of
the rest of the code can assume only valid data.
If any redundant information is present in the UEB+URI, the new code cross-checks and asserts
that it is all fully consistent. This closes some issues where the uploader could have
uploaded inconsistent redundant data, which would probably have caused the old downloader to
simply reject that download after getting a Python exception, but perhaps could have caused
greater harm to the old downloader.
I removed the notion of selecting an erasure codec from codec.py based on the string that was
passed in the UEB. Currently "crs" is the only such string that works, so
"_assert(codec_name == 'crs')" is simpler and more explicit. This is also in keeping with the
"validate up front" strategy -- now if someone sets a different string than "crs" in their UEB,
the downloader will reject the download in the "validate this UEB" function instead of in a
separate "select the codec instance" function.
I removed the code to check plaintext hashes and plaintext Merkle Trees. Uploaders do not
produce this information any more (since it potentially exposes confidential information about
the file), and the unit tests for it were disabled. The downloader before this patch would
check that plaintext hash or plaintext merkle tree if they were present, but not complain if
they were absent. The new downloader in this patch complains if they are present and doesn't
check them. (We might in the future re-introduce such hashes over the plaintext, but encrypt
the hashes which are stored in the UEB to preserve confidentiality. This would be a double-
check on the correctness of our own source code -- the current Merkle Tree over the ciphertext
is already sufficient to guarantee the integrity of the download unless there is a bug in our
Merkle Tree or AES implementation.)
This patch increases the lines-of-code count by 8 (from 17,770 to 17,778), and reduces the
uncovered-by-tests lines-of-code count by 24 (from 1408 to 1384). Those numbers would be more
meaningful if we omitted src/allmydata/util/ from the test-coverage statistics.
2008-12-05 15:17:54 +00:00
|
|
|
assert IFileURI.providedBy(self.u), self.u
|
2008-07-17 23:47:09 +00:00
|
|
|
k = self.u.needed_shares
|
|
|
|
N = self.u.total_shares
|
|
|
|
size = self.u.size
|
|
|
|
ueb_hash = self.u.uri_extension_hash
|
2008-07-16 00:23:25 +00:00
|
|
|
if verify:
|
2008-07-17 23:47:09 +00:00
|
|
|
v = self.verifier_class(self._client,
|
download: refactor handling of URI Extension Block and crypttext hash tree, simplify things
Refactor into a class the logic of asking each server in turn until one of them gives an answer
that validates. It is called ValidatedThingObtainer.
Refactor the downloading and verification of the URI Extension Block into a class named
ValidatedExtendedURIProxy.
The new logic of validating UEBs is minimalist: it doesn't require the UEB to contain any
unncessary information, but of course it still accepts such information for backwards
compatibility (so that this new download code is able to download files uploaded with old, and
for that matter with current, upload code).
The new logic of validating UEBs follows the practice of doing all validation up front. This
practice advises one to isolate the validation of incoming data into one place, so that all of
the rest of the code can assume only valid data.
If any redundant information is present in the UEB+URI, the new code cross-checks and asserts
that it is all fully consistent. This closes some issues where the uploader could have
uploaded inconsistent redundant data, which would probably have caused the old downloader to
simply reject that download after getting a Python exception, but perhaps could have caused
greater harm to the old downloader.
I removed the notion of selecting an erasure codec from codec.py based on the string that was
passed in the UEB. Currently "crs" is the only such string that works, so
"_assert(codec_name == 'crs')" is simpler and more explicit. This is also in keeping with the
"validate up front" strategy -- now if someone sets a different string than "crs" in their UEB,
the downloader will reject the download in the "validate this UEB" function instead of in a
separate "select the codec instance" function.
I removed the code to check plaintext hashes and plaintext Merkle Trees. Uploaders do not
produce this information any more (since it potentially exposes confidential information about
the file), and the unit tests for it were disabled. The downloader before this patch would
check that plaintext hash or plaintext merkle tree if they were present, but not complain if
they were absent. The new downloader in this patch complains if they are present and doesn't
check them. (We might in the future re-introduce such hashes over the plaintext, but encrypt
the hashes which are stored in the UEB to preserve confidentiality. This would be a double-
check on the correctness of our own source code -- the current Merkle Tree over the ciphertext
is already sufficient to guarantee the integrity of the download unless there is a bug in our
Merkle Tree or AES implementation.)
This patch increases the lines-of-code count by 8 (from 17,770 to 17,778), and reduces the
uncovered-by-tests lines-of-code count by 24 (from 1408 to 1384). Those numbers would be more
meaningful if we omitted src/allmydata/util/ from the test-coverage statistics.
2008-12-05 15:17:54 +00:00
|
|
|
uri_from_string(self.get_uri()), storage_index,
|
2008-10-30 01:09:17 +00:00
|
|
|
k, N, size, ueb_hash)
|
2008-07-16 00:23:25 +00:00
|
|
|
else:
|
2008-10-30 01:09:17 +00:00
|
|
|
v = self.checker_class(self._client,
|
download: refactor handling of URI Extension Block and crypttext hash tree, simplify things
Refactor into a class the logic of asking each server in turn until one of them gives an answer
that validates. It is called ValidatedThingObtainer.
Refactor the downloading and verification of the URI Extension Block into a class named
ValidatedExtendedURIProxy.
The new logic of validating UEBs is minimalist: it doesn't require the UEB to contain any
unncessary information, but of course it still accepts such information for backwards
compatibility (so that this new download code is able to download files uploaded with old, and
for that matter with current, upload code).
The new logic of validating UEBs follows the practice of doing all validation up front. This
practice advises one to isolate the validation of incoming data into one place, so that all of
the rest of the code can assume only valid data.
If any redundant information is present in the UEB+URI, the new code cross-checks and asserts
that it is all fully consistent. This closes some issues where the uploader could have
uploaded inconsistent redundant data, which would probably have caused the old downloader to
simply reject that download after getting a Python exception, but perhaps could have caused
greater harm to the old downloader.
I removed the notion of selecting an erasure codec from codec.py based on the string that was
passed in the UEB. Currently "crs" is the only such string that works, so
"_assert(codec_name == 'crs')" is simpler and more explicit. This is also in keeping with the
"validate up front" strategy -- now if someone sets a different string than "crs" in their UEB,
the downloader will reject the download in the "validate this UEB" function instead of in a
separate "select the codec instance" function.
I removed the code to check plaintext hashes and plaintext Merkle Trees. Uploaders do not
produce this information any more (since it potentially exposes confidential information about
the file), and the unit tests for it were disabled. The downloader before this patch would
check that plaintext hash or plaintext merkle tree if they were present, but not complain if
they were absent. The new downloader in this patch complains if they are present and doesn't
check them. (We might in the future re-introduce such hashes over the plaintext, but encrypt
the hashes which are stored in the UEB to preserve confidentiality. This would be a double-
check on the correctness of our own source code -- the current Merkle Tree over the ciphertext
is already sufficient to guarantee the integrity of the download unless there is a bug in our
Merkle Tree or AES implementation.)
This patch increases the lines-of-code count by 8 (from 17,770 to 17,778), and reduces the
uncovered-by-tests lines-of-code count by 24 (from 1408 to 1384). Those numbers would be more
meaningful if we omitted src/allmydata/util/ from the test-coverage statistics.
2008-12-05 15:17:54 +00:00
|
|
|
uri_from_string(self.get_uri()), storage_index,
|
2008-10-30 01:09:17 +00:00
|
|
|
k, N)
|
2008-07-17 23:47:09 +00:00
|
|
|
return v.start()
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-10-22 08:38:18 +00:00
|
|
|
def check_and_repair(self, monitor, verify=False):
|
2008-09-09 23:34:49 +00:00
|
|
|
# this is a stub, to allow the deep-check tests to pass.
|
|
|
|
#raise NotImplementedError("not implemented yet")
|
|
|
|
from allmydata.checker_results import CheckAndRepairResults
|
|
|
|
cr = CheckAndRepairResults(self.u.storage_index)
|
|
|
|
d = self.check(verify)
|
|
|
|
def _done(r):
|
|
|
|
cr.pre_repair_results = cr.post_repair_results = r
|
|
|
|
cr.repair_attempted = False
|
|
|
|
return cr
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
2008-09-07 19:44:56 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
def read(self, consumer, offset=0, size=None):
|
|
|
|
if size is None:
|
|
|
|
size = self.get_size() - offset
|
2008-11-04 22:29:19 +00:00
|
|
|
size = min(size, self.get_size() - offset)
|
2008-10-28 20:41:04 +00:00
|
|
|
|
|
|
|
if offset == 0 and size == self.get_size():
|
|
|
|
# don't use the cache, just do a normal streaming download
|
|
|
|
log.msg(format=("immutable filenode read [%(si)s]: " +
|
|
|
|
"doing normal full download"),
|
|
|
|
si=base32.b2a(self.u.storage_index),
|
|
|
|
umid="VRSBwg", level=log.OPERATIONAL)
|
|
|
|
return self.download(download.ConsumerAdapter(consumer))
|
|
|
|
|
2008-10-30 20:39:09 +00:00
|
|
|
d = self.download_cache.when_range_available(offset, size)
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.download_cache.read(consumer, offset, size))
|
2008-10-28 20:41:04 +00:00
|
|
|
return d
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def download(self, target):
|
|
|
|
downloader = self._client.getServiceNamed("downloader")
|
2008-09-23 18:52:49 +00:00
|
|
|
return downloader.download(self.get_uri(), target)
|
2007-12-03 21:52:42 +00:00
|
|
|
|
|
|
|
def download_to_data(self):
|
|
|
|
downloader = self._client.getServiceNamed("downloader")
|
2008-09-23 18:52:49 +00:00
|
|
|
return downloader.download_to_data(self.get_uri())
|
2007-12-03 21:52:42 +00:00
|
|
|
|
2008-10-06 19:52:36 +00:00
|
|
|
class LiteralProducer:
|
|
|
|
implements(IPushProducer)
|
|
|
|
def resumeProducing(self):
|
|
|
|
pass
|
|
|
|
def stopProducing(self):
|
|
|
|
pass
|
2007-12-04 04:37:54 +00:00
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
|
|
|
|
class LiteralFileNode(_ImmutableFileNodeBase):
|
2007-12-04 04:37:54 +00:00
|
|
|
|
2008-09-23 18:52:49 +00:00
|
|
|
def __init__(self, uri, client):
|
2008-10-28 20:41:04 +00:00
|
|
|
_ImmutableFileNodeBase.__init__(self, uri, client)
|
2007-12-04 04:37:54 +00:00
|
|
|
|
|
|
|
def get_uri(self):
|
2008-09-23 18:52:49 +00:00
|
|
|
return self.u.to_string()
|
2007-12-04 04:37:54 +00:00
|
|
|
|
|
|
|
def get_size(self):
|
2008-09-23 18:52:49 +00:00
|
|
|
return len(self.u.data)
|
2007-12-04 04:37:54 +00:00
|
|
|
|
2008-12-08 19:44:11 +00:00
|
|
|
def get_verify_cap(self):
|
2007-12-04 04:37:54 +00:00
|
|
|
return None
|
|
|
|
|
2008-08-12 23:14:07 +00:00
|
|
|
def get_storage_index(self):
|
|
|
|
return None
|
|
|
|
|
2008-10-22 08:38:18 +00:00
|
|
|
def check(self, monitor, verify=False):
|
2008-09-07 19:44:56 +00:00
|
|
|
return defer.succeed(None)
|
2007-12-04 04:37:54 +00:00
|
|
|
|
2008-10-22 08:38:18 +00:00
|
|
|
def check_and_repair(self, monitor, verify=False):
|
2008-09-09 23:34:49 +00:00
|
|
|
return defer.succeed(None)
|
|
|
|
|
2008-10-28 20:41:04 +00:00
|
|
|
def read(self, consumer, offset=0, size=None):
|
|
|
|
if size is None:
|
|
|
|
data = self.u.data[offset:]
|
|
|
|
else:
|
|
|
|
data = self.u.data[offset:offset+size]
|
|
|
|
|
|
|
|
# We use twisted.protocols.basic.FileSender, which only does
|
|
|
|
# non-streaming, i.e. PullProducer, where the receiver/consumer must
|
|
|
|
# ask explicitly for each chunk of data. There are only two places in
|
|
|
|
# the Twisted codebase that can't handle streaming=False, both of
|
|
|
|
# which are in the upload path for an FTP/SFTP server
|
|
|
|
# (protocols.ftp.FileConsumer and
|
|
|
|
# vfs.adapters.ftp._FileToConsumerAdapter), neither of which is
|
|
|
|
# likely to be used as the target for a Tahoe download.
|
|
|
|
|
|
|
|
d = basic.FileSender().beginFileTransfer(StringIO(data), consumer)
|
|
|
|
d.addCallback(lambda lastSent: consumer)
|
|
|
|
return d
|
|
|
|
|
2007-12-04 04:37:54 +00:00
|
|
|
def download(self, target):
|
2008-07-16 00:23:25 +00:00
|
|
|
# note that this does not update the stats_provider
|
2008-09-23 18:52:49 +00:00
|
|
|
data = self.u.data
|
2008-10-06 19:52:36 +00:00
|
|
|
if IConsumer.providedBy(target):
|
|
|
|
target.registerProducer(LiteralProducer(), True)
|
2007-12-04 04:37:54 +00:00
|
|
|
target.open(len(data))
|
|
|
|
target.write(data)
|
2008-10-06 19:52:36 +00:00
|
|
|
if IConsumer.providedBy(target):
|
|
|
|
target.unregisterProducer()
|
2007-12-04 04:37:54 +00:00
|
|
|
target.close()
|
|
|
|
return defer.maybeDeferred(target.finish)
|
|
|
|
|
|
|
|
def download_to_data(self):
|
2008-09-23 18:52:49 +00:00
|
|
|
data = self.u.data
|
2007-12-04 04:37:54 +00:00
|
|
|
return defer.succeed(data)
|