2006-12-03 10:01:43 +00:00
|
|
|
|
2008-03-04 02:19:21 +00:00
|
|
|
import os, random, weakref, itertools, time
|
2007-01-21 22:01:34 +00:00
|
|
|
from zope.interface import implements
|
2006-12-03 10:01:43 +00:00
|
|
|
from twisted.internet import defer
|
2007-09-19 07:34:47 +00:00
|
|
|
from twisted.internet.interfaces import IPushProducer, IConsumer
|
2006-12-03 10:01:43 +00:00
|
|
|
from twisted.application import service
|
2007-09-19 07:34:47 +00:00
|
|
|
from foolscap.eventual import eventually
|
2006-12-03 10:01:43 +00:00
|
|
|
|
2008-03-04 02:19:21 +00:00
|
|
|
from allmydata.util import base32, mathutil, hashutil, log
|
2007-03-30 20:20:01 +00:00
|
|
|
from allmydata.util.assertutil import _assert
|
2007-07-14 00:25:45 +00:00
|
|
|
from allmydata import codec, hashtree, storage, uri
|
2008-02-12 22:38:39 +00:00
|
|
|
from allmydata.interfaces import IDownloadTarget, IDownloader, IFileURI, \
|
2008-03-04 02:19:21 +00:00
|
|
|
IDownloadStatus, IDownloadResults
|
2008-04-15 23:08:32 +00:00
|
|
|
from allmydata.encode import NotEnoughSharesError
|
2007-12-04 00:27:46 +00:00
|
|
|
from pycryptopp.cipher.aes import AES
|
2006-12-03 10:01:43 +00:00
|
|
|
|
|
|
|
class HaveAllPeersError(Exception):
|
|
|
|
# we use this to jump out of the loop
|
|
|
|
pass
|
|
|
|
|
2007-06-08 22:59:16 +00:00
|
|
|
class BadURIExtensionHashValue(Exception):
|
2007-06-02 01:48:01 +00:00
|
|
|
pass
|
2007-06-07 07:15:41 +00:00
|
|
|
class BadPlaintextHashValue(Exception):
|
|
|
|
pass
|
|
|
|
class BadCrypttextHashValue(Exception):
|
|
|
|
pass
|
2007-03-30 17:52:19 +00:00
|
|
|
|
2007-09-19 07:34:47 +00:00
|
|
|
class DownloadStopped(Exception):
|
|
|
|
pass
|
|
|
|
|
2008-03-04 02:19:21 +00:00
|
|
|
class DownloadResults:
|
|
|
|
implements(IDownloadResults)
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.servers_used = set()
|
|
|
|
self.server_problems = {}
|
|
|
|
self.servermap = {}
|
|
|
|
self.timings = {}
|
|
|
|
self.file_size = None
|
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
class Output:
|
2008-02-13 02:01:03 +00:00
|
|
|
def __init__(self, downloadable, key, total_length, log_parent,
|
|
|
|
download_status):
|
2007-03-30 17:52:19 +00:00
|
|
|
self.downloadable = downloadable
|
2007-12-04 00:27:46 +00:00
|
|
|
self._decryptor = AES(key)
|
2007-06-10 03:46:04 +00:00
|
|
|
self._crypttext_hasher = hashutil.crypttext_hasher()
|
|
|
|
self._plaintext_hasher = hashutil.plaintext_hasher()
|
2007-03-30 20:20:01 +00:00
|
|
|
self.length = 0
|
2007-07-03 22:09:00 +00:00
|
|
|
self.total_length = total_length
|
2007-06-07 20:15:58 +00:00
|
|
|
self._segment_number = 0
|
2007-06-07 07:15:41 +00:00
|
|
|
self._plaintext_hash_tree = None
|
|
|
|
self._crypttext_hash_tree = None
|
2007-07-03 20:47:37 +00:00
|
|
|
self._opened = False
|
2008-01-17 08:11:35 +00:00
|
|
|
self._log_parent = log_parent
|
2008-02-13 02:01:03 +00:00
|
|
|
self._status = download_status
|
|
|
|
self._status.set_progress(0.0)
|
2008-02-12 22:38:39 +00:00
|
|
|
|
2008-01-17 08:11:35 +00:00
|
|
|
def log(self, *args, **kwargs):
|
|
|
|
if "parent" not in kwargs:
|
|
|
|
kwargs["parent"] = self._log_parent
|
|
|
|
if "facility" not in kwargs:
|
|
|
|
kwargs["facility"] = "download.output"
|
|
|
|
return log.msg(*args, **kwargs)
|
2007-06-07 07:15:41 +00:00
|
|
|
|
|
|
|
def setup_hashtrees(self, plaintext_hashtree, crypttext_hashtree):
|
|
|
|
self._plaintext_hash_tree = plaintext_hashtree
|
|
|
|
self._crypttext_hash_tree = crypttext_hashtree
|
2007-03-30 20:20:01 +00:00
|
|
|
|
2007-06-07 20:15:58 +00:00
|
|
|
def write_segment(self, crypttext):
|
2007-03-30 20:20:01 +00:00
|
|
|
self.length += len(crypttext)
|
2008-02-13 02:01:03 +00:00
|
|
|
self._status.set_progress( float(self.length) / self.total_length )
|
2007-06-07 20:15:58 +00:00
|
|
|
|
|
|
|
# memory footprint: 'crypttext' is the only segment_size usage
|
|
|
|
# outstanding. While we decrypt it into 'plaintext', we hit
|
|
|
|
# 2*segment_size.
|
2007-06-10 03:46:04 +00:00
|
|
|
self._crypttext_hasher.update(crypttext)
|
2007-06-07 20:15:58 +00:00
|
|
|
if self._crypttext_hash_tree:
|
2007-06-08 04:47:21 +00:00
|
|
|
ch = hashutil.crypttext_segment_hasher()
|
2007-06-07 20:15:58 +00:00
|
|
|
ch.update(crypttext)
|
|
|
|
crypttext_leaves = {self._segment_number: ch.digest()}
|
2008-01-17 08:11:35 +00:00
|
|
|
self.log(format="crypttext leaf hash (%(bytes)sB) [%(segnum)d] is %(hash)s",
|
|
|
|
bytes=len(crypttext),
|
2008-02-15 02:27:47 +00:00
|
|
|
segnum=self._segment_number, hash=base32.b2a(ch.digest()),
|
2008-01-17 08:11:35 +00:00
|
|
|
level=log.NOISY)
|
2007-06-07 20:15:58 +00:00
|
|
|
self._crypttext_hash_tree.set_hashes(leaves=crypttext_leaves)
|
|
|
|
|
2007-12-04 00:27:46 +00:00
|
|
|
plaintext = self._decryptor.process(crypttext)
|
2007-06-07 20:15:58 +00:00
|
|
|
del crypttext
|
|
|
|
|
|
|
|
# now we're back down to 1*segment_size.
|
|
|
|
|
2007-06-10 03:46:04 +00:00
|
|
|
self._plaintext_hasher.update(plaintext)
|
2007-06-07 20:15:58 +00:00
|
|
|
if self._plaintext_hash_tree:
|
2007-06-08 04:47:21 +00:00
|
|
|
ph = hashutil.plaintext_segment_hasher()
|
2007-06-07 20:15:58 +00:00
|
|
|
ph.update(plaintext)
|
|
|
|
plaintext_leaves = {self._segment_number: ph.digest()}
|
2008-01-17 08:11:35 +00:00
|
|
|
self.log(format="plaintext leaf hash (%(bytes)sB) [%(segnum)d] is %(hash)s",
|
|
|
|
bytes=len(plaintext),
|
2008-02-15 02:27:47 +00:00
|
|
|
segnum=self._segment_number, hash=base32.b2a(ph.digest()),
|
2008-01-17 08:11:35 +00:00
|
|
|
level=log.NOISY)
|
2007-06-07 20:15:58 +00:00
|
|
|
self._plaintext_hash_tree.set_hashes(leaves=plaintext_leaves)
|
|
|
|
|
|
|
|
self._segment_number += 1
|
|
|
|
# We're still at 1*segment_size. The Downloadable is responsible for
|
|
|
|
# any memory usage beyond this.
|
2007-07-03 20:47:37 +00:00
|
|
|
if not self._opened:
|
|
|
|
self._opened = True
|
2007-07-03 22:09:00 +00:00
|
|
|
self.downloadable.open(self.total_length)
|
2007-03-30 17:52:19 +00:00
|
|
|
self.downloadable.write(plaintext)
|
2007-03-30 20:20:01 +00:00
|
|
|
|
2007-07-03 20:18:14 +00:00
|
|
|
def fail(self, why):
|
2008-01-17 08:11:35 +00:00
|
|
|
# this is really unusual, and deserves maximum forensics
|
2008-04-15 23:06:09 +00:00
|
|
|
if why.check(DownloadStopped):
|
|
|
|
# except DownloadStopped just means the consumer aborted the
|
|
|
|
# download, not so scary
|
|
|
|
self.log("download stopped", level=log.UNUSUAL)
|
|
|
|
else:
|
|
|
|
self.log("download failed!", failure=why, level=log.SCARY)
|
2007-07-03 20:18:14 +00:00
|
|
|
self.downloadable.fail(why)
|
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
def close(self):
|
2007-06-10 03:46:04 +00:00
|
|
|
self.crypttext_hash = self._crypttext_hasher.digest()
|
|
|
|
self.plaintext_hash = self._plaintext_hasher.digest()
|
2008-01-17 08:11:35 +00:00
|
|
|
self.log("download finished, closing IDownloadable", level=log.NOISY)
|
2007-03-30 17:52:19 +00:00
|
|
|
self.downloadable.close()
|
2007-03-30 20:20:01 +00:00
|
|
|
|
|
|
|
def finish(self):
|
2007-03-30 17:52:19 +00:00
|
|
|
return self.downloadable.finish()
|
|
|
|
|
2007-04-12 20:07:40 +00:00
|
|
|
class ValidatedBucket:
|
2007-06-01 01:31:36 +00:00
|
|
|
"""I am a front-end for a remote storage bucket, responsible for
|
|
|
|
retrieving and validating data from that bucket.
|
|
|
|
|
|
|
|
My get_block() method is used by BlockDownloaders.
|
|
|
|
"""
|
|
|
|
|
2007-04-13 02:41:48 +00:00
|
|
|
def __init__(self, sharenum, bucket,
|
|
|
|
share_hash_tree, roothash,
|
|
|
|
num_blocks):
|
2007-04-12 20:07:40 +00:00
|
|
|
self.sharenum = sharenum
|
|
|
|
self.bucket = bucket
|
2007-04-13 02:41:48 +00:00
|
|
|
self._share_hash = None # None means not validated yet
|
2007-04-12 20:07:40 +00:00
|
|
|
self.share_hash_tree = share_hash_tree
|
2007-04-13 02:41:48 +00:00
|
|
|
self._roothash = roothash
|
2007-04-12 20:13:25 +00:00
|
|
|
self.block_hash_tree = hashtree.IncompleteHashTree(num_blocks)
|
2007-07-13 21:04:49 +00:00
|
|
|
self.started = False
|
2007-04-12 20:07:40 +00:00
|
|
|
|
|
|
|
def get_block(self, blocknum):
|
2007-07-13 21:04:49 +00:00
|
|
|
if not self.started:
|
|
|
|
d = self.bucket.start()
|
|
|
|
def _started(res):
|
|
|
|
self.started = True
|
|
|
|
return self.get_block(blocknum)
|
|
|
|
d.addCallback(_started)
|
|
|
|
return d
|
|
|
|
|
2007-04-13 02:41:48 +00:00
|
|
|
# the first time we use this bucket, we need to fetch enough elements
|
|
|
|
# of the share hash tree to validate it from our share hash up to the
|
|
|
|
# hashroot.
|
|
|
|
if not self._share_hash:
|
2007-07-09 06:27:46 +00:00
|
|
|
d1 = self.bucket.get_share_hashes()
|
2007-04-13 02:41:48 +00:00
|
|
|
else:
|
2007-07-13 23:38:25 +00:00
|
|
|
d1 = defer.succeed([])
|
2007-04-13 02:41:48 +00:00
|
|
|
|
|
|
|
# we might need to grab some elements of our block hash tree, to
|
2007-04-12 20:07:40 +00:00
|
|
|
# validate the requested block up to the share hash
|
2007-04-13 02:41:48 +00:00
|
|
|
needed = self.block_hash_tree.needed_hashes(blocknum)
|
|
|
|
if needed:
|
2007-07-09 06:27:46 +00:00
|
|
|
# TODO: get fewer hashes, use get_block_hashes(needed)
|
|
|
|
d2 = self.bucket.get_block_hashes()
|
2007-04-12 20:07:40 +00:00
|
|
|
else:
|
2007-04-13 02:41:48 +00:00
|
|
|
d2 = defer.succeed([])
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
d3 = self.bucket.get_block(blocknum)
|
2007-04-13 02:41:48 +00:00
|
|
|
|
2007-04-12 20:07:40 +00:00
|
|
|
d = defer.gatherResults([d1, d2, d3])
|
2007-04-13 02:41:48 +00:00
|
|
|
d.addCallback(self._got_data, blocknum)
|
2007-04-12 20:07:40 +00:00
|
|
|
return d
|
|
|
|
|
2007-04-13 02:41:48 +00:00
|
|
|
def _got_data(self, res, blocknum):
|
|
|
|
sharehashes, blockhashes, blockdata = res
|
2008-02-06 09:50:34 +00:00
|
|
|
blockhash = None # to make logging it safe
|
2007-04-13 02:41:48 +00:00
|
|
|
|
2007-04-16 20:08:19 +00:00
|
|
|
try:
|
|
|
|
if not self._share_hash:
|
|
|
|
sh = dict(sharehashes)
|
|
|
|
sh[0] = self._roothash # always use our own root, from the URI
|
|
|
|
sht = self.share_hash_tree
|
|
|
|
if sht.get_leaf_index(self.sharenum) not in sh:
|
2007-04-18 03:37:51 +00:00
|
|
|
raise hashtree.NotEnoughHashesError
|
2007-04-16 20:08:19 +00:00
|
|
|
sht.set_hashes(sh)
|
|
|
|
self._share_hash = sht.get_leaf(self.sharenum)
|
|
|
|
|
2007-06-08 04:47:21 +00:00
|
|
|
blockhash = hashutil.block_hash(blockdata)
|
2007-07-13 23:38:25 +00:00
|
|
|
#log.msg("checking block_hash(shareid=%d, blocknum=%d) len=%d "
|
|
|
|
# "%r .. %r: %s" %
|
|
|
|
# (self.sharenum, blocknum, len(blockdata),
|
2008-02-15 02:27:47 +00:00
|
|
|
# blockdata[:50], blockdata[-50:], base32.b2a(blockhash)))
|
2007-07-13 23:38:25 +00:00
|
|
|
|
2007-04-16 20:08:19 +00:00
|
|
|
# we always validate the blockhash
|
|
|
|
bh = dict(enumerate(blockhashes))
|
|
|
|
# replace blockhash root with validated value
|
|
|
|
bh[0] = self._share_hash
|
|
|
|
self.block_hash_tree.set_hashes(bh, {blocknum: blockhash})
|
|
|
|
|
2007-04-16 23:30:21 +00:00
|
|
|
except (hashtree.BadHashError, hashtree.NotEnoughHashesError):
|
2007-04-16 20:08:19 +00:00
|
|
|
# log.WEIRD: indicates undetected disk/network error, or more
|
|
|
|
# likely a programming error
|
2007-04-18 03:25:52 +00:00
|
|
|
log.msg("hash failure in block=%d, shnum=%d on %s" %
|
|
|
|
(blocknum, self.sharenum, self.bucket))
|
2007-07-13 23:38:25 +00:00
|
|
|
if self._share_hash:
|
|
|
|
log.msg(""" failure occurred when checking the block_hash_tree.
|
|
|
|
This suggests that either the block data was bad, or that the
|
|
|
|
block hashes we received along with it were bad.""")
|
|
|
|
else:
|
|
|
|
log.msg(""" the failure probably occurred when checking the
|
|
|
|
share_hash_tree, which suggests that the share hashes we
|
|
|
|
received from the remote peer were bad.""")
|
|
|
|
log.msg(" have self._share_hash: %s" % bool(self._share_hash))
|
|
|
|
log.msg(" block length: %d" % len(blockdata))
|
2008-02-15 02:27:47 +00:00
|
|
|
log.msg(" block hash: %s" % base32.b2a_or_none(blockhash))
|
2007-07-13 23:38:25 +00:00
|
|
|
if len(blockdata) < 100:
|
|
|
|
log.msg(" block data: %r" % (blockdata,))
|
|
|
|
else:
|
|
|
|
log.msg(" block data start/end: %r .. %r" %
|
|
|
|
(blockdata[:50], blockdata[-50:]))
|
2008-02-15 02:27:47 +00:00
|
|
|
log.msg(" root hash: %s" % base32.b2a(self._roothash))
|
2007-07-13 23:38:25 +00:00
|
|
|
log.msg(" share hash tree:\n" + self.share_hash_tree.dump())
|
|
|
|
log.msg(" block hash tree:\n" + self.block_hash_tree.dump())
|
|
|
|
lines = []
|
|
|
|
for i,h in sorted(sharehashes):
|
2008-02-15 02:27:47 +00:00
|
|
|
lines.append("%3d: %s" % (i, base32.b2a_or_none(h)))
|
2007-07-13 23:38:25 +00:00
|
|
|
log.msg(" sharehashes:\n" + "\n".join(lines) + "\n")
|
|
|
|
lines = []
|
|
|
|
for i,h in enumerate(blockhashes):
|
2008-02-15 02:27:47 +00:00
|
|
|
lines.append("%3d: %s" % (i, base32.b2a_or_none(h)))
|
2007-07-13 23:38:25 +00:00
|
|
|
log.msg(" blockhashes:\n" + "\n".join(lines) + "\n")
|
2007-04-16 20:08:19 +00:00
|
|
|
raise
|
|
|
|
|
2007-04-12 20:07:40 +00:00
|
|
|
# If we made it here, the block is good. If the hash trees didn't
|
|
|
|
# like what they saw, they would have raised a BadHashError, causing
|
|
|
|
# our caller to see a Failure and thus ignore this block (as well as
|
|
|
|
# dropping this bucket).
|
|
|
|
return blockdata
|
|
|
|
|
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
class BlockDownloader:
|
2007-06-01 01:31:36 +00:00
|
|
|
"""I am responsible for downloading a single block (from a single bucket)
|
|
|
|
for a single segment.
|
|
|
|
|
|
|
|
I am a child of the SegmentDownloader.
|
|
|
|
"""
|
|
|
|
|
2008-03-04 03:30:35 +00:00
|
|
|
def __init__(self, vbucket, blocknum, parent, results):
|
2007-04-12 20:07:40 +00:00
|
|
|
self.vbucket = vbucket
|
2007-03-30 17:52:19 +00:00
|
|
|
self.blocknum = blocknum
|
|
|
|
self.parent = parent
|
2008-03-04 03:30:35 +00:00
|
|
|
self.results = results
|
2007-11-20 02:33:41 +00:00
|
|
|
self._log_number = self.parent.log("starting block %d" % blocknum)
|
|
|
|
|
|
|
|
def log(self, msg, parent=None):
|
|
|
|
if parent is None:
|
|
|
|
parent = self._log_number
|
|
|
|
return self.parent.log(msg, parent=parent)
|
2007-11-01 22:22:38 +00:00
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def start(self, segnum):
|
2007-11-20 02:33:41 +00:00
|
|
|
lognum = self.log("get_block(segnum=%d)" % segnum)
|
2008-03-04 03:53:45 +00:00
|
|
|
started = time.time()
|
2007-04-12 20:07:40 +00:00
|
|
|
d = self.vbucket.get_block(segnum)
|
2007-11-20 02:33:41 +00:00
|
|
|
d.addCallbacks(self._hold_block, self._got_block_error,
|
2008-03-04 03:53:45 +00:00
|
|
|
callbackArgs=(started, lognum,), errbackArgs=(lognum,))
|
2007-03-30 17:52:19 +00:00
|
|
|
return d
|
|
|
|
|
2008-03-04 03:53:45 +00:00
|
|
|
def _hold_block(self, data, started, lognum):
|
|
|
|
if self.results:
|
|
|
|
elapsed = time.time() - started
|
|
|
|
peerid = self.vbucket.bucket.get_peerid()
|
|
|
|
if peerid not in self.results.timings["fetch_per_server"]:
|
|
|
|
self.results.timings["fetch_per_server"][peerid] = []
|
|
|
|
self.results.timings["fetch_per_server"][peerid].append(elapsed)
|
2007-11-20 02:33:41 +00:00
|
|
|
self.log("got block", parent=lognum)
|
2007-03-30 17:52:19 +00:00
|
|
|
self.parent.hold_block(self.blocknum, data)
|
|
|
|
|
2007-11-20 02:33:41 +00:00
|
|
|
def _got_block_error(self, f, lognum):
|
|
|
|
self.log("BlockDownloader[%d] got error: %s" % (self.blocknum, f),
|
|
|
|
parent=lognum)
|
2008-03-04 03:30:35 +00:00
|
|
|
if self.results:
|
|
|
|
peerid = self.vbucket.bucket.get_peerid()
|
|
|
|
self.results.server_problems[peerid] = str(f)
|
2007-06-01 01:31:36 +00:00
|
|
|
self.parent.bucket_failed(self.vbucket)
|
2007-03-30 17:52:19 +00:00
|
|
|
|
|
|
|
class SegmentDownloader:
|
2007-06-01 01:31:36 +00:00
|
|
|
"""I am responsible for downloading all the blocks for a single segment
|
|
|
|
of data.
|
|
|
|
|
|
|
|
I am a child of the FileDownloader.
|
|
|
|
"""
|
|
|
|
|
2008-03-04 03:30:35 +00:00
|
|
|
def __init__(self, parent, segmentnumber, needed_shares, results):
|
2007-03-30 20:20:01 +00:00
|
|
|
self.parent = parent
|
2007-03-30 17:52:19 +00:00
|
|
|
self.segmentnumber = segmentnumber
|
|
|
|
self.needed_blocks = needed_shares
|
|
|
|
self.blocks = {} # k: blocknum, v: data
|
2008-03-04 03:30:35 +00:00
|
|
|
self.results = results
|
2007-11-20 02:33:41 +00:00
|
|
|
self._log_number = self.parent.log("starting segment %d" %
|
|
|
|
segmentnumber)
|
|
|
|
|
|
|
|
def log(self, msg, parent=None):
|
|
|
|
if parent is None:
|
|
|
|
parent = self._log_number
|
|
|
|
return self.parent.log(msg, parent=parent)
|
2007-03-30 17:52:19 +00:00
|
|
|
|
|
|
|
def start(self):
|
|
|
|
return self._download()
|
|
|
|
|
|
|
|
def _download(self):
|
|
|
|
d = self._try()
|
|
|
|
def _done(res):
|
|
|
|
if len(self.blocks) >= self.needed_blocks:
|
2007-03-30 20:20:01 +00:00
|
|
|
# we only need self.needed_blocks blocks
|
|
|
|
# we want to get the smallest blockids, because they are
|
|
|
|
# more likely to be fast "primary blocks"
|
|
|
|
blockids = sorted(self.blocks.keys())[:self.needed_blocks]
|
|
|
|
blocks = []
|
|
|
|
for blocknum in blockids:
|
|
|
|
blocks.append(self.blocks[blocknum])
|
|
|
|
return (blocks, blockids)
|
2007-03-30 17:52:19 +00:00
|
|
|
else:
|
|
|
|
return self._download()
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _try(self):
|
2008-04-15 23:08:32 +00:00
|
|
|
# fill our set of active buckets, maybe raising NotEnoughSharesError
|
2007-04-12 20:07:40 +00:00
|
|
|
active_buckets = self.parent._activate_enough_buckets()
|
2007-03-30 17:52:19 +00:00
|
|
|
# Now we have enough buckets, in self.parent.active_buckets.
|
2007-03-30 20:20:01 +00:00
|
|
|
|
|
|
|
# in test cases, bd.start might mutate active_buckets right away, so
|
|
|
|
# we need to put off calling start() until we've iterated all the way
|
2007-04-12 20:07:40 +00:00
|
|
|
# through it.
|
2007-03-30 20:20:01 +00:00
|
|
|
downloaders = []
|
2007-04-12 20:07:40 +00:00
|
|
|
for blocknum, vbucket in active_buckets.iteritems():
|
2008-03-04 03:30:35 +00:00
|
|
|
bd = BlockDownloader(vbucket, blocknum, self, self.results)
|
2007-03-30 20:20:01 +00:00
|
|
|
downloaders.append(bd)
|
2008-03-04 03:53:45 +00:00
|
|
|
if self.results:
|
|
|
|
self.results.servers_used.add(vbucket.bucket.get_peerid())
|
2007-03-30 20:20:01 +00:00
|
|
|
l = [bd.start(self.segmentnumber) for bd in downloaders]
|
2007-04-12 20:07:40 +00:00
|
|
|
return defer.DeferredList(l, fireOnOneErrback=True)
|
2007-03-30 17:52:19 +00:00
|
|
|
|
|
|
|
def hold_block(self, blocknum, data):
|
|
|
|
self.blocks[blocknum] = data
|
|
|
|
|
2007-06-01 01:31:36 +00:00
|
|
|
def bucket_failed(self, vbucket):
|
|
|
|
self.parent.bucket_failed(vbucket)
|
2007-04-12 20:07:40 +00:00
|
|
|
|
2008-02-13 02:01:03 +00:00
|
|
|
class DownloadStatus:
|
|
|
|
implements(IDownloadStatus)
|
2008-03-01 05:19:03 +00:00
|
|
|
statusid_counter = itertools.count(0)
|
2008-02-13 02:01:03 +00:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.storage_index = None
|
|
|
|
self.size = None
|
|
|
|
self.helper = False
|
|
|
|
self.status = "Not started"
|
|
|
|
self.progress = 0.0
|
|
|
|
self.paused = False
|
|
|
|
self.stopped = False
|
2008-02-26 22:35:28 +00:00
|
|
|
self.active = True
|
2008-03-04 02:19:21 +00:00
|
|
|
self.results = None
|
2008-03-01 05:19:03 +00:00
|
|
|
self.counter = self.statusid_counter.next()
|
2008-03-05 01:50:44 +00:00
|
|
|
self.started = time.time()
|
2008-02-13 02:01:03 +00:00
|
|
|
|
2008-03-05 01:50:44 +00:00
|
|
|
def get_started(self):
|
|
|
|
return self.started
|
2008-02-13 02:01:03 +00:00
|
|
|
def get_storage_index(self):
|
|
|
|
return self.storage_index
|
|
|
|
def get_size(self):
|
|
|
|
return self.size
|
|
|
|
def using_helper(self):
|
|
|
|
return self.helper
|
|
|
|
def get_status(self):
|
|
|
|
status = self.status
|
|
|
|
if self.paused:
|
|
|
|
status += " (output paused)"
|
|
|
|
if self.stopped:
|
|
|
|
status += " (output stopped)"
|
|
|
|
return status
|
|
|
|
def get_progress(self):
|
|
|
|
return self.progress
|
2008-02-26 22:35:28 +00:00
|
|
|
def get_active(self):
|
|
|
|
return self.active
|
2008-03-04 02:19:21 +00:00
|
|
|
def get_results(self):
|
|
|
|
return self.results
|
2008-03-01 05:19:03 +00:00
|
|
|
def get_counter(self):
|
|
|
|
return self.counter
|
2008-02-13 02:01:03 +00:00
|
|
|
|
|
|
|
def set_storage_index(self, si):
|
|
|
|
self.storage_index = si
|
|
|
|
def set_size(self, size):
|
|
|
|
self.size = size
|
|
|
|
def set_helper(self, helper):
|
|
|
|
self.helper = helper
|
|
|
|
def set_status(self, status):
|
|
|
|
self.status = status
|
|
|
|
def set_paused(self, paused):
|
|
|
|
self.paused = paused
|
|
|
|
def set_stopped(self, stopped):
|
|
|
|
self.stopped = stopped
|
|
|
|
def set_progress(self, value):
|
|
|
|
self.progress = value
|
2008-02-26 22:35:28 +00:00
|
|
|
def set_active(self, value):
|
|
|
|
self.active = value
|
2008-03-04 02:19:21 +00:00
|
|
|
def set_results(self, value):
|
|
|
|
self.results = value
|
2008-02-13 02:01:03 +00:00
|
|
|
|
2006-12-03 10:01:43 +00:00
|
|
|
class FileDownloader:
|
2008-02-13 02:01:03 +00:00
|
|
|
implements(IPushProducer)
|
2007-06-10 03:46:04 +00:00
|
|
|
check_crypttext_hash = True
|
|
|
|
check_plaintext_hash = True
|
2008-02-13 02:01:03 +00:00
|
|
|
_status = None
|
2006-12-03 10:01:43 +00:00
|
|
|
|
2007-07-12 23:16:59 +00:00
|
|
|
def __init__(self, client, u, downloadable):
|
2007-03-30 17:52:19 +00:00
|
|
|
self._client = client
|
2007-05-23 18:18:49 +00:00
|
|
|
|
2007-07-21 22:40:36 +00:00
|
|
|
u = IFileURI(u)
|
|
|
|
self._storage_index = u.storage_index
|
|
|
|
self._uri_extension_hash = u.uri_extension_hash
|
|
|
|
self._total_shares = u.total_shares
|
|
|
|
self._size = u.size
|
|
|
|
self._num_needed_shares = u.needed_shares
|
2007-03-30 20:20:01 +00:00
|
|
|
|
2008-02-27 00:33:14 +00:00
|
|
|
self._si_s = storage.si_b2a(self._storage_index)
|
2007-11-20 02:07:10 +00:00
|
|
|
self.init_logging()
|
|
|
|
|
2008-03-04 02:19:21 +00:00
|
|
|
self._started = time.time()
|
2008-02-13 02:01:03 +00:00
|
|
|
self._status = s = DownloadStatus()
|
|
|
|
s.set_status("Starting")
|
2008-02-26 22:02:35 +00:00
|
|
|
s.set_storage_index(self._storage_index)
|
|
|
|
s.set_size(self._size)
|
|
|
|
s.set_helper(False)
|
2008-02-26 22:35:28 +00:00
|
|
|
s.set_active(True)
|
2008-02-12 22:38:39 +00:00
|
|
|
|
2008-03-04 02:19:21 +00:00
|
|
|
self._results = DownloadResults()
|
|
|
|
s.set_results(self._results)
|
|
|
|
self._results.file_size = self._size
|
2008-03-04 03:09:32 +00:00
|
|
|
self._results.timings["servers_peer_selection"] = {}
|
2008-03-04 03:53:45 +00:00
|
|
|
self._results.timings["fetch_per_server"] = {}
|
2008-03-04 03:09:32 +00:00
|
|
|
self._results.timings["cumulative_fetch"] = 0.0
|
|
|
|
self._results.timings["cumulative_decode"] = 0.0
|
|
|
|
self._results.timings["cumulative_decrypt"] = 0.0
|
2008-04-21 19:19:17 +00:00
|
|
|
self._results.timings["paused"] = 0.0
|
2008-03-04 02:19:21 +00:00
|
|
|
|
2007-09-19 07:34:47 +00:00
|
|
|
if IConsumer.providedBy(downloadable):
|
|
|
|
downloadable.registerProducer(self, True)
|
|
|
|
self._downloadable = downloadable
|
2008-02-13 02:01:03 +00:00
|
|
|
self._output = Output(downloadable, u.key, self._size, self._log_number,
|
|
|
|
self._status)
|
2007-09-19 07:34:47 +00:00
|
|
|
self._paused = False
|
|
|
|
self._stopped = False
|
2007-03-30 17:52:19 +00:00
|
|
|
|
2007-04-12 20:07:40 +00:00
|
|
|
self.active_buckets = {} # k: shnum, v: bucket
|
2007-06-02 01:48:01 +00:00
|
|
|
self._share_buckets = [] # list of (sharenum, bucket) tuples
|
|
|
|
self._share_vbuckets = {} # k: shnum, v: set of ValidatedBuckets
|
2007-06-08 22:59:16 +00:00
|
|
|
self._uri_extension_sources = []
|
2007-06-02 01:48:01 +00:00
|
|
|
|
2007-06-08 22:59:16 +00:00
|
|
|
self._uri_extension_data = None
|
2006-12-03 10:01:43 +00:00
|
|
|
|
2007-06-08 22:59:16 +00:00
|
|
|
self._fetch_failures = {"uri_extension": 0,
|
2007-06-08 02:32:29 +00:00
|
|
|
"plaintext_hashroot": 0,
|
|
|
|
"plaintext_hashtree": 0,
|
|
|
|
"crypttext_hashroot": 0,
|
|
|
|
"crypttext_hashtree": 0,
|
|
|
|
}
|
|
|
|
|
2007-11-20 02:07:10 +00:00
|
|
|
def init_logging(self):
|
2008-02-13 03:48:37 +00:00
|
|
|
self._log_prefix = prefix = storage.si_b2a(self._storage_index)[:5]
|
2008-01-17 08:11:35 +00:00
|
|
|
num = self._client.log(format="FileDownloader(%(si)s): starting",
|
2008-02-13 03:48:37 +00:00
|
|
|
si=storage.si_b2a(self._storage_index))
|
2007-11-20 02:07:10 +00:00
|
|
|
self._log_number = num
|
|
|
|
|
2008-01-17 08:11:35 +00:00
|
|
|
def log(self, *args, **kwargs):
|
|
|
|
if "parent" not in kwargs:
|
|
|
|
kwargs["parent"] = self._log_number
|
|
|
|
if "facility" not in kwargs:
|
|
|
|
kwargs["facility"] = "tahoe.download"
|
|
|
|
return log.msg(*args, **kwargs)
|
2007-11-20 02:07:10 +00:00
|
|
|
|
2007-09-19 07:34:47 +00:00
|
|
|
def pauseProducing(self):
|
|
|
|
if self._paused:
|
|
|
|
return
|
|
|
|
self._paused = defer.Deferred()
|
2008-04-21 19:19:17 +00:00
|
|
|
self._paused_at = time.time()
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_paused(True)
|
2007-09-19 07:34:47 +00:00
|
|
|
|
|
|
|
def resumeProducing(self):
|
|
|
|
if self._paused:
|
|
|
|
p = self._paused
|
|
|
|
self._paused = None
|
|
|
|
eventually(p.callback, None)
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_paused(False)
|
2007-09-19 07:34:47 +00:00
|
|
|
|
|
|
|
def stopProducing(self):
|
2007-11-20 02:07:10 +00:00
|
|
|
self.log("Download.stopProducing")
|
2007-09-19 07:34:47 +00:00
|
|
|
self._stopped = True
|
2008-04-21 19:19:17 +00:00
|
|
|
paused_for = time.time() - self._paused_at
|
|
|
|
self._results.timings['paused'] += paused_for
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_stopped(True)
|
2008-02-26 22:35:28 +00:00
|
|
|
self._status.set_active(False)
|
2007-09-19 07:34:47 +00:00
|
|
|
|
2006-12-03 10:01:43 +00:00
|
|
|
def start(self):
|
2007-11-20 02:07:10 +00:00
|
|
|
self.log("starting download")
|
2007-03-30 17:52:19 +00:00
|
|
|
|
2007-04-12 20:07:40 +00:00
|
|
|
# first step: who should we download from?
|
2007-03-30 17:52:19 +00:00
|
|
|
d = defer.maybeDeferred(self._get_all_shareholders)
|
|
|
|
d.addCallback(self._got_all_shareholders)
|
2007-06-08 22:59:16 +00:00
|
|
|
# now get the uri_extension block from somebody and validate it
|
|
|
|
d.addCallback(self._obtain_uri_extension)
|
|
|
|
d.addCallback(self._got_uri_extension)
|
2007-06-07 07:15:41 +00:00
|
|
|
d.addCallback(self._get_hashtrees)
|
2007-06-02 01:48:01 +00:00
|
|
|
d.addCallback(self._create_validated_buckets)
|
|
|
|
# once we know that, we can download blocks from everybody
|
2007-03-30 17:52:19 +00:00
|
|
|
d.addCallback(self._download_all_segments)
|
2007-09-19 07:34:47 +00:00
|
|
|
def _finished(res):
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_status("Finished")
|
2008-02-26 22:35:28 +00:00
|
|
|
self._status.set_active(False)
|
2008-03-04 03:09:32 +00:00
|
|
|
self._status.set_paused(False)
|
2007-09-19 07:34:47 +00:00
|
|
|
if IConsumer.providedBy(self._downloadable):
|
|
|
|
self._downloadable.unregisterProducer()
|
|
|
|
return res
|
|
|
|
d.addBoth(_finished)
|
2007-07-03 20:18:14 +00:00
|
|
|
def _failed(why):
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_status("Failed")
|
2008-02-26 22:35:28 +00:00
|
|
|
self._status.set_active(False)
|
2007-07-03 20:18:14 +00:00
|
|
|
self._output.fail(why)
|
|
|
|
return why
|
|
|
|
d.addErrback(_failed)
|
2007-03-30 17:52:19 +00:00
|
|
|
d.addCallback(self._done)
|
|
|
|
return d
|
2006-12-03 10:01:43 +00:00
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def _get_all_shareholders(self):
|
|
|
|
dl = []
|
2008-02-05 20:05:13 +00:00
|
|
|
for (peerid,ss) in self._client.get_permuted_peers("storage",
|
|
|
|
self._storage_index):
|
|
|
|
d = ss.callRemote("get_buckets", self._storage_index)
|
2008-02-27 00:33:14 +00:00
|
|
|
d.addCallbacks(self._got_response, self._got_error,
|
2008-03-04 02:19:21 +00:00
|
|
|
callbackArgs=(peerid,))
|
2007-03-30 17:52:19 +00:00
|
|
|
dl.append(d)
|
2008-02-12 22:38:39 +00:00
|
|
|
self._responses_received = 0
|
|
|
|
self._queries_sent = len(dl)
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_status("Locating Shares (%d/%d)" %
|
|
|
|
(self._responses_received,
|
|
|
|
self._queries_sent))
|
2007-03-30 17:52:19 +00:00
|
|
|
return defer.DeferredList(dl)
|
|
|
|
|
2008-03-04 02:19:21 +00:00
|
|
|
def _got_response(self, buckets, peerid):
|
2008-02-12 22:38:39 +00:00
|
|
|
self._responses_received += 1
|
2008-03-04 02:19:21 +00:00
|
|
|
if self._results:
|
|
|
|
elapsed = time.time() - self._started
|
|
|
|
self._results.timings["servers_peer_selection"][peerid] = elapsed
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_status("Locating Shares (%d/%d)" %
|
|
|
|
(self._responses_received,
|
|
|
|
self._queries_sent))
|
2007-03-30 23:50:50 +00:00
|
|
|
for sharenum, bucket in buckets.iteritems():
|
2008-03-04 02:19:21 +00:00
|
|
|
b = storage.ReadBucketProxy(bucket, peerid, self._si_s)
|
2007-07-09 06:27:46 +00:00
|
|
|
self.add_share_bucket(sharenum, b)
|
|
|
|
self._uri_extension_sources.append(b)
|
2008-03-04 03:09:32 +00:00
|
|
|
if self._results:
|
|
|
|
if peerid not in self._results.servermap:
|
|
|
|
self._results.servermap[peerid] = set()
|
|
|
|
self._results.servermap[peerid].add(sharenum)
|
2007-04-12 20:07:40 +00:00
|
|
|
|
|
|
|
def add_share_bucket(self, sharenum, bucket):
|
2007-06-02 01:48:01 +00:00
|
|
|
# this is split out for the benefit of test_encode.py
|
|
|
|
self._share_buckets.append( (sharenum, bucket) )
|
2007-04-12 20:07:40 +00:00
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def _got_error(self, f):
|
|
|
|
self._client.log("Somebody failed. -- %s" % (f,))
|
|
|
|
|
2007-06-01 01:31:36 +00:00
|
|
|
def bucket_failed(self, vbucket):
|
|
|
|
shnum = vbucket.sharenum
|
|
|
|
del self.active_buckets[shnum]
|
2007-06-02 01:48:01 +00:00
|
|
|
s = self._share_vbuckets[shnum]
|
2007-06-01 01:31:36 +00:00
|
|
|
# s is a set of ValidatedBucket instances
|
|
|
|
s.remove(vbucket)
|
|
|
|
# ... which might now be empty
|
|
|
|
if not s:
|
|
|
|
# there are no more buckets which can provide this share, so
|
|
|
|
# remove the key. This may prompt us to use a different share.
|
2007-06-02 01:48:01 +00:00
|
|
|
del self._share_vbuckets[shnum]
|
2007-06-01 01:31:36 +00:00
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def _got_all_shareholders(self, res):
|
2008-03-04 02:19:21 +00:00
|
|
|
if self._results:
|
|
|
|
now = time.time()
|
|
|
|
self._results.timings["peer_selection"] = now - self._started
|
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
if len(self._share_buckets) < self._num_needed_shares:
|
2008-04-15 23:08:32 +00:00
|
|
|
raise NotEnoughSharesError
|
2008-02-12 22:38:39 +00:00
|
|
|
|
2007-06-02 01:48:01 +00:00
|
|
|
#for s in self._share_vbuckets.values():
|
|
|
|
# for vb in s:
|
|
|
|
# assert isinstance(vb, ValidatedBucket), \
|
|
|
|
# "vb is %s but should be a ValidatedBucket" % (vb,)
|
|
|
|
|
2007-06-08 23:17:54 +00:00
|
|
|
def _unpack_uri_extension_data(self, data):
|
2007-07-12 23:16:59 +00:00
|
|
|
return uri.unpack_extension(data)
|
2007-06-08 23:17:54 +00:00
|
|
|
|
2007-06-08 22:59:16 +00:00
|
|
|
def _obtain_uri_extension(self, ignored):
|
|
|
|
# all shareholders are supposed to have a copy of uri_extension, and
|
|
|
|
# all are supposed to be identical. We compute the hash of the data
|
|
|
|
# that comes back, and compare it against the version in our URI. If
|
|
|
|
# they don't match, ignore their data and try someone else.
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_status("Obtaining URI Extension")
|
2008-02-12 22:38:39 +00:00
|
|
|
|
2008-03-04 02:19:21 +00:00
|
|
|
self._uri_extension_fetch_started = time.time()
|
2007-06-07 06:50:02 +00:00
|
|
|
def _validate(proposal, bucket):
|
2007-06-08 22:59:16 +00:00
|
|
|
h = hashutil.uri_extension_hash(proposal)
|
|
|
|
if h != self._uri_extension_hash:
|
|
|
|
self._fetch_failures["uri_extension"] += 1
|
|
|
|
msg = ("The copy of uri_extension we received from "
|
2008-02-27 00:33:14 +00:00
|
|
|
"%s was bad: wanted %s, got %s" %
|
|
|
|
(bucket,
|
|
|
|
base32.b2a(self._uri_extension_hash),
|
|
|
|
base32.b2a(h)))
|
|
|
|
self.log(msg, level=log.SCARY)
|
2007-06-08 22:59:16 +00:00
|
|
|
raise BadURIExtensionHashValue(msg)
|
2007-06-08 23:17:54 +00:00
|
|
|
return self._unpack_uri_extension_data(proposal)
|
2007-06-07 06:50:02 +00:00
|
|
|
return self._obtain_validated_thing(None,
|
2007-06-08 22:59:16 +00:00
|
|
|
self._uri_extension_sources,
|
|
|
|
"uri_extension",
|
|
|
|
"get_uri_extension", (), _validate)
|
2007-06-07 06:50:02 +00:00
|
|
|
|
|
|
|
def _obtain_validated_thing(self, ignored, sources, name, methname, args,
|
|
|
|
validatorfunc):
|
|
|
|
if not sources:
|
2008-04-15 23:08:32 +00:00
|
|
|
raise NotEnoughSharesError("started with zero peers while fetching "
|
2007-06-08 02:32:29 +00:00
|
|
|
"%s" % name)
|
2007-06-07 06:50:02 +00:00
|
|
|
bucket = sources[0]
|
|
|
|
sources = sources[1:]
|
2007-07-09 06:27:46 +00:00
|
|
|
#d = bucket.callRemote(methname, *args)
|
2007-07-13 21:04:49 +00:00
|
|
|
d = bucket.startIfNecessary()
|
|
|
|
d.addCallback(lambda res: getattr(bucket, methname)(*args))
|
2007-06-07 06:50:02 +00:00
|
|
|
d.addCallback(validatorfunc, bucket)
|
2007-06-02 01:48:01 +00:00
|
|
|
def _bad(f):
|
2008-02-27 00:33:14 +00:00
|
|
|
self.log("%s from vbucket %s failed:" % (name, bucket),
|
|
|
|
failure=f, level=log.WEIRD)
|
2007-06-08 02:32:29 +00:00
|
|
|
if not sources:
|
2008-04-15 23:08:32 +00:00
|
|
|
raise NotEnoughSharesError("ran out of peers, last error was %s"
|
2007-06-08 02:32:29 +00:00
|
|
|
% (f,))
|
2007-06-02 01:48:01 +00:00
|
|
|
# try again with a different one
|
2007-06-07 06:50:02 +00:00
|
|
|
return self._obtain_validated_thing(None, sources, name,
|
|
|
|
methname, args, validatorfunc)
|
2007-06-02 01:48:01 +00:00
|
|
|
d.addErrback(_bad)
|
|
|
|
return d
|
|
|
|
|
2007-06-08 22:59:16 +00:00
|
|
|
def _got_uri_extension(self, uri_extension_data):
|
2008-03-04 02:19:21 +00:00
|
|
|
if self._results:
|
|
|
|
elapsed = time.time() - self._uri_extension_fetch_started
|
|
|
|
self._results.timings["uri_extension"] = elapsed
|
|
|
|
|
2007-06-08 22:59:16 +00:00
|
|
|
d = self._uri_extension_data = uri_extension_data
|
2007-06-02 01:48:01 +00:00
|
|
|
|
|
|
|
self._codec = codec.get_decoder_by_name(d['codec_name'])
|
|
|
|
self._codec.set_serialized_params(d['codec_params'])
|
|
|
|
self._tail_codec = codec.get_decoder_by_name(d['codec_name'])
|
|
|
|
self._tail_codec.set_serialized_params(d['tail_codec_params'])
|
|
|
|
|
2008-03-23 21:46:49 +00:00
|
|
|
crypttext_hash = d.get('crypttext_hash', None) # optional
|
|
|
|
if crypttext_hash:
|
|
|
|
assert isinstance(crypttext_hash, str)
|
|
|
|
assert len(crypttext_hash) == 32
|
2007-06-10 03:46:04 +00:00
|
|
|
self._crypttext_hash = crypttext_hash
|
2008-03-23 21:46:49 +00:00
|
|
|
self._plaintext_hash = d.get('plaintext_hash', None) # optional
|
|
|
|
|
2007-06-02 01:48:01 +00:00
|
|
|
self._roothash = d['share_root_hash']
|
|
|
|
|
|
|
|
self._segment_size = segment_size = d['segment_size']
|
|
|
|
self._total_segments = mathutil.div_ceil(self._size, segment_size)
|
|
|
|
self._current_segnum = 0
|
|
|
|
|
|
|
|
self._share_hashtree = hashtree.IncompleteHashTree(d['total_shares'])
|
|
|
|
self._share_hashtree.set_hashes({0: self._roothash})
|
2007-04-12 20:07:40 +00:00
|
|
|
|
2007-06-07 07:15:41 +00:00
|
|
|
def _get_hashtrees(self, res):
|
2008-03-04 02:19:21 +00:00
|
|
|
self._get_hashtrees_started = time.time()
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_status("Retrieving Hash Trees")
|
2008-03-23 21:46:49 +00:00
|
|
|
d = defer.maybeDeferred(self._get_plaintext_hashtrees)
|
2007-06-07 07:15:41 +00:00
|
|
|
d.addCallback(self._get_crypttext_hashtrees)
|
|
|
|
d.addCallback(self._setup_hashtrees)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _get_plaintext_hashtrees(self):
|
2008-03-23 21:46:49 +00:00
|
|
|
# plaintext hashes are optional. If the root isn't in the UEB, then
|
|
|
|
# the share will be holding an empty list. We don't even bother
|
|
|
|
# fetching it.
|
|
|
|
if "plaintext_root_hash" not in self._uri_extension_data:
|
|
|
|
self._plaintext_hashtree = None
|
|
|
|
return
|
2007-06-07 07:15:41 +00:00
|
|
|
def _validate_plaintext_hashtree(proposal, bucket):
|
2007-06-08 22:59:16 +00:00
|
|
|
if proposal[0] != self._uri_extension_data['plaintext_root_hash']:
|
2007-06-08 02:32:29 +00:00
|
|
|
self._fetch_failures["plaintext_hashroot"] += 1
|
2007-06-07 07:15:41 +00:00
|
|
|
msg = ("The copy of the plaintext_root_hash we received from"
|
|
|
|
" %s was bad" % bucket)
|
|
|
|
raise BadPlaintextHashValue(msg)
|
2007-06-08 02:32:29 +00:00
|
|
|
pt_hashtree = hashtree.IncompleteHashTree(self._total_segments)
|
|
|
|
pt_hashes = dict(list(enumerate(proposal)))
|
|
|
|
try:
|
|
|
|
pt_hashtree.set_hashes(pt_hashes)
|
|
|
|
except hashtree.BadHashError:
|
|
|
|
# the hashes they gave us were not self-consistent, even
|
2007-06-08 22:59:16 +00:00
|
|
|
# though the root matched what we saw in the uri_extension
|
|
|
|
# block
|
2007-06-08 02:32:29 +00:00
|
|
|
self._fetch_failures["plaintext_hashtree"] += 1
|
|
|
|
raise
|
|
|
|
self._plaintext_hashtree = pt_hashtree
|
2007-06-07 07:15:41 +00:00
|
|
|
d = self._obtain_validated_thing(None,
|
2007-06-08 22:59:16 +00:00
|
|
|
self._uri_extension_sources,
|
2007-06-07 07:15:41 +00:00
|
|
|
"plaintext_hashes",
|
|
|
|
"get_plaintext_hashes", (),
|
|
|
|
_validate_plaintext_hashtree)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _get_crypttext_hashtrees(self, res):
|
2008-03-23 21:46:49 +00:00
|
|
|
# crypttext hashes are optional too
|
|
|
|
if "crypttext_root_hash" not in self._uri_extension_data:
|
|
|
|
self._crypttext_hashtree = None
|
|
|
|
return
|
2007-06-07 07:15:41 +00:00
|
|
|
def _validate_crypttext_hashtree(proposal, bucket):
|
2007-06-08 22:59:16 +00:00
|
|
|
if proposal[0] != self._uri_extension_data['crypttext_root_hash']:
|
2007-06-08 02:32:29 +00:00
|
|
|
self._fetch_failures["crypttext_hashroot"] += 1
|
2007-06-07 07:15:41 +00:00
|
|
|
msg = ("The copy of the crypttext_root_hash we received from"
|
|
|
|
" %s was bad" % bucket)
|
|
|
|
raise BadCrypttextHashValue(msg)
|
2007-06-08 02:32:29 +00:00
|
|
|
ct_hashtree = hashtree.IncompleteHashTree(self._total_segments)
|
|
|
|
ct_hashes = dict(list(enumerate(proposal)))
|
|
|
|
try:
|
|
|
|
ct_hashtree.set_hashes(ct_hashes)
|
|
|
|
except hashtree.BadHashError:
|
|
|
|
self._fetch_failures["crypttext_hashtree"] += 1
|
|
|
|
raise
|
|
|
|
ct_hashtree.set_hashes(ct_hashes)
|
|
|
|
self._crypttext_hashtree = ct_hashtree
|
2007-06-07 07:15:41 +00:00
|
|
|
d = self._obtain_validated_thing(None,
|
2007-06-08 22:59:16 +00:00
|
|
|
self._uri_extension_sources,
|
2007-06-07 07:15:41 +00:00
|
|
|
"crypttext_hashes",
|
|
|
|
"get_crypttext_hashes", (),
|
|
|
|
_validate_crypttext_hashtree)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _setup_hashtrees(self, res):
|
2007-06-08 02:32:29 +00:00
|
|
|
self._output.setup_hashtrees(self._plaintext_hashtree,
|
|
|
|
self._crypttext_hashtree)
|
2008-03-04 02:19:21 +00:00
|
|
|
if self._results:
|
|
|
|
elapsed = time.time() - self._get_hashtrees_started
|
|
|
|
self._results.timings["hashtrees"] = elapsed
|
2007-06-07 07:15:41 +00:00
|
|
|
|
2007-06-02 01:48:01 +00:00
|
|
|
def _create_validated_buckets(self, ignored=None):
|
|
|
|
self._share_vbuckets = {}
|
|
|
|
for sharenum, bucket in self._share_buckets:
|
|
|
|
vbucket = ValidatedBucket(sharenum, bucket,
|
|
|
|
self._share_hashtree,
|
|
|
|
self._roothash,
|
|
|
|
self._total_segments)
|
|
|
|
s = self._share_vbuckets.setdefault(sharenum, set())
|
|
|
|
s.add(vbucket)
|
2007-04-12 20:07:40 +00:00
|
|
|
|
|
|
|
def _activate_enough_buckets(self):
|
|
|
|
"""either return a mapping from shnum to a ValidatedBucket that can
|
2008-04-15 23:08:32 +00:00
|
|
|
provide data for that share, or raise NotEnoughSharesError"""
|
2007-04-12 20:07:40 +00:00
|
|
|
|
|
|
|
while len(self.active_buckets) < self._num_needed_shares:
|
|
|
|
# need some more
|
|
|
|
handled_shnums = set(self.active_buckets.keys())
|
2007-06-02 01:48:01 +00:00
|
|
|
available_shnums = set(self._share_vbuckets.keys())
|
2007-04-12 20:07:40 +00:00
|
|
|
potential_shnums = list(available_shnums - handled_shnums)
|
|
|
|
if not potential_shnums:
|
2008-04-15 23:08:32 +00:00
|
|
|
raise NotEnoughSharesError
|
2007-04-12 20:07:40 +00:00
|
|
|
# choose a random share
|
|
|
|
shnum = random.choice(potential_shnums)
|
|
|
|
# and a random bucket that will provide it
|
2007-06-02 01:48:01 +00:00
|
|
|
validated_bucket = random.choice(list(self._share_vbuckets[shnum]))
|
2007-04-12 20:07:40 +00:00
|
|
|
self.active_buckets[shnum] = validated_bucket
|
|
|
|
return self.active_buckets
|
2006-12-03 10:01:43 +00:00
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
def _download_all_segments(self, res):
|
2007-06-02 01:48:01 +00:00
|
|
|
# the promise: upon entry to this function, self._share_vbuckets
|
2007-04-12 20:07:40 +00:00
|
|
|
# contains enough buckets to complete the download, and some extra
|
|
|
|
# ones to tolerate some buckets dropping out or having errors.
|
2007-06-02 01:48:01 +00:00
|
|
|
# self._share_vbuckets is a dictionary that maps from shnum to a set
|
2007-04-12 20:07:40 +00:00
|
|
|
# of ValidatedBuckets, which themselves are wrappers around
|
|
|
|
# RIBucketReader references.
|
|
|
|
self.active_buckets = {} # k: shnum, v: ValidatedBucket instance
|
|
|
|
|
2008-03-04 02:19:21 +00:00
|
|
|
self._started_fetching = time.time()
|
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
d = defer.succeed(None)
|
|
|
|
for segnum in range(self._total_segments-1):
|
|
|
|
d.addCallback(self._download_segment, segnum)
|
2007-09-19 07:34:47 +00:00
|
|
|
# this pause, at the end of write, prevents pre-fetch from
|
|
|
|
# happening until the consumer is ready for more data.
|
|
|
|
d.addCallback(self._check_for_pause)
|
2007-03-30 23:50:50 +00:00
|
|
|
d.addCallback(self._download_tail_segment, self._total_segments-1)
|
|
|
|
return d
|
|
|
|
|
2007-09-19 07:34:47 +00:00
|
|
|
def _check_for_pause(self, res):
|
|
|
|
if self._paused:
|
|
|
|
d = defer.Deferred()
|
|
|
|
self._paused.addCallback(lambda ignored: d.callback(res))
|
|
|
|
return d
|
|
|
|
if self._stopped:
|
|
|
|
raise DownloadStopped("our Consumer called stopProducing()")
|
|
|
|
return res
|
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
def _download_segment(self, res, segnum):
|
2008-02-13 02:01:03 +00:00
|
|
|
if self._status:
|
|
|
|
self._status.set_status("Downloading segment %d of %d" %
|
2008-02-26 22:02:35 +00:00
|
|
|
(segnum+1, self._total_segments))
|
2007-11-20 02:07:10 +00:00
|
|
|
self.log("downloading seg#%d of %d (%d%%)"
|
|
|
|
% (segnum, self._total_segments,
|
|
|
|
100.0 * segnum / self._total_segments))
|
2007-06-07 20:15:58 +00:00
|
|
|
# memory footprint: when the SegmentDownloader finishes pulling down
|
|
|
|
# all shares, we have 1*segment_size of usage.
|
2008-03-04 03:30:35 +00:00
|
|
|
segmentdler = SegmentDownloader(self, segnum, self._num_needed_shares,
|
|
|
|
self._results)
|
2008-03-04 03:09:32 +00:00
|
|
|
started = time.time()
|
2007-03-30 23:50:50 +00:00
|
|
|
d = segmentdler.start()
|
2008-03-04 03:09:32 +00:00
|
|
|
def _finished_fetching(res):
|
|
|
|
elapsed = time.time() - started
|
|
|
|
self._results.timings["cumulative_fetch"] += elapsed
|
|
|
|
return res
|
|
|
|
if self._results:
|
|
|
|
d.addCallback(_finished_fetching)
|
2007-09-19 07:34:47 +00:00
|
|
|
# pause before using more memory
|
|
|
|
d.addCallback(self._check_for_pause)
|
2007-06-07 20:15:58 +00:00
|
|
|
# while the codec does its job, we hit 2*segment_size
|
2008-03-04 03:09:32 +00:00
|
|
|
def _started_decode(res):
|
|
|
|
self._started_decode = time.time()
|
|
|
|
return res
|
|
|
|
if self._results:
|
|
|
|
d.addCallback(_started_decode)
|
2007-03-30 23:50:50 +00:00
|
|
|
d.addCallback(lambda (shares, shareids):
|
|
|
|
self._codec.decode(shares, shareids))
|
2007-06-07 20:15:58 +00:00
|
|
|
# once the codec is done, we drop back to 1*segment_size, because
|
|
|
|
# 'shares' goes out of scope. The memory usage is all in the
|
|
|
|
# plaintext now, spread out into a bunch of tiny buffers.
|
2008-03-04 03:09:32 +00:00
|
|
|
def _finished_decode(res):
|
|
|
|
elapsed = time.time() - self._started_decode
|
|
|
|
self._results.timings["cumulative_decode"] += elapsed
|
|
|
|
return res
|
|
|
|
if self._results:
|
|
|
|
d.addCallback(_finished_decode)
|
2007-09-19 07:34:47 +00:00
|
|
|
|
|
|
|
# pause/check-for-stop just before writing, to honor stopProducing
|
|
|
|
d.addCallback(self._check_for_pause)
|
2007-06-07 20:15:58 +00:00
|
|
|
def _done(buffers):
|
|
|
|
# we start by joining all these buffers together into a single
|
|
|
|
# string. This makes Output.write easier, since it wants to hash
|
|
|
|
# data one segment at a time anyways, and doesn't impact our
|
|
|
|
# memory footprint since we're already peaking at 2*segment_size
|
|
|
|
# inside the codec a moment ago.
|
|
|
|
segment = "".join(buffers)
|
|
|
|
del buffers
|
|
|
|
# we're down to 1*segment_size right now, but write_segment()
|
|
|
|
# will decrypt a copy of the segment internally, which will push
|
|
|
|
# us up to 2*segment_size while it runs.
|
2008-03-04 03:09:32 +00:00
|
|
|
started_decrypt = time.time()
|
2007-06-07 20:15:58 +00:00
|
|
|
self._output.write_segment(segment)
|
2008-03-04 03:09:32 +00:00
|
|
|
if self._results:
|
|
|
|
elapsed = time.time() - started_decrypt
|
|
|
|
self._results.timings["cumulative_decrypt"] += elapsed
|
2007-03-30 17:52:19 +00:00
|
|
|
d.addCallback(_done)
|
2006-12-03 10:01:43 +00:00
|
|
|
return d
|
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
def _download_tail_segment(self, res, segnum):
|
2007-11-20 02:07:10 +00:00
|
|
|
self.log("downloading seg#%d of %d (%d%%)"
|
|
|
|
% (segnum, self._total_segments,
|
|
|
|
100.0 * segnum / self._total_segments))
|
2008-03-04 03:30:35 +00:00
|
|
|
segmentdler = SegmentDownloader(self, segnum, self._num_needed_shares,
|
|
|
|
self._results)
|
2008-03-04 03:09:32 +00:00
|
|
|
started = time.time()
|
2007-03-30 17:52:19 +00:00
|
|
|
d = segmentdler.start()
|
2008-03-04 03:09:32 +00:00
|
|
|
def _finished_fetching(res):
|
|
|
|
elapsed = time.time() - started
|
|
|
|
self._results.timings["cumulative_fetch"] += elapsed
|
|
|
|
return res
|
|
|
|
if self._results:
|
|
|
|
d.addCallback(_finished_fetching)
|
2007-09-19 07:34:47 +00:00
|
|
|
# pause before using more memory
|
|
|
|
d.addCallback(self._check_for_pause)
|
2008-03-04 03:09:32 +00:00
|
|
|
def _started_decode(res):
|
|
|
|
self._started_decode = time.time()
|
|
|
|
return res
|
|
|
|
if self._results:
|
|
|
|
d.addCallback(_started_decode)
|
2007-03-30 20:20:01 +00:00
|
|
|
d.addCallback(lambda (shares, shareids):
|
2007-03-30 23:50:50 +00:00
|
|
|
self._tail_codec.decode(shares, shareids))
|
2008-03-04 03:09:32 +00:00
|
|
|
def _finished_decode(res):
|
|
|
|
elapsed = time.time() - self._started_decode
|
|
|
|
self._results.timings["cumulative_decode"] += elapsed
|
|
|
|
return res
|
|
|
|
if self._results:
|
|
|
|
d.addCallback(_finished_decode)
|
2007-09-19 07:34:47 +00:00
|
|
|
# pause/check-for-stop just before writing, to honor stopProducing
|
|
|
|
d.addCallback(self._check_for_pause)
|
2007-06-07 20:15:58 +00:00
|
|
|
def _done(buffers):
|
2007-03-30 23:50:50 +00:00
|
|
|
# trim off any padding added by the upload side
|
2007-06-07 20:15:58 +00:00
|
|
|
segment = "".join(buffers)
|
|
|
|
del buffers
|
2007-04-17 20:39:35 +00:00
|
|
|
# we never send empty segments. If the data was an exact multiple
|
|
|
|
# of the segment size, the last segment will be full.
|
|
|
|
pad_size = mathutil.pad_size(self._size, self._segment_size)
|
|
|
|
tail_size = self._segment_size - pad_size
|
2007-06-07 20:15:58 +00:00
|
|
|
segment = segment[:tail_size]
|
2008-03-04 03:09:32 +00:00
|
|
|
started_decrypt = time.time()
|
2007-06-07 20:15:58 +00:00
|
|
|
self._output.write_segment(segment)
|
2008-03-04 03:09:32 +00:00
|
|
|
if self._results:
|
|
|
|
elapsed = time.time() - started_decrypt
|
|
|
|
self._results.timings["cumulative_decrypt"] += elapsed
|
2007-03-30 17:52:19 +00:00
|
|
|
d.addCallback(_done)
|
2006-12-03 10:01:43 +00:00
|
|
|
return d
|
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def _done(self, res):
|
2007-11-20 02:07:10 +00:00
|
|
|
self.log("download done")
|
2008-03-04 02:19:21 +00:00
|
|
|
if self._results:
|
|
|
|
now = time.time()
|
|
|
|
self._results.timings["total"] = now - self._started
|
2008-03-04 03:09:32 +00:00
|
|
|
self._results.timings["segments"] = now - self._started_fetching
|
2007-03-30 20:20:01 +00:00
|
|
|
self._output.close()
|
2008-03-23 21:46:49 +00:00
|
|
|
if self.check_crypttext_hash and self._crypttext_hash:
|
2007-06-10 03:46:04 +00:00
|
|
|
_assert(self._crypttext_hash == self._output.crypttext_hash,
|
|
|
|
"bad crypttext_hash: computed=%s, expected=%s" %
|
2008-02-15 02:27:47 +00:00
|
|
|
(base32.b2a(self._output.crypttext_hash),
|
|
|
|
base32.b2a(self._crypttext_hash)))
|
2008-03-23 21:46:49 +00:00
|
|
|
if self.check_plaintext_hash and self._plaintext_hash:
|
2007-06-10 03:46:04 +00:00
|
|
|
_assert(self._plaintext_hash == self._output.plaintext_hash,
|
|
|
|
"bad plaintext_hash: computed=%s, expected=%s" %
|
2008-02-15 02:27:47 +00:00
|
|
|
(base32.b2a(self._output.plaintext_hash),
|
|
|
|
base32.b2a(self._plaintext_hash)))
|
2007-03-30 20:20:01 +00:00
|
|
|
_assert(self._output.length == self._size,
|
|
|
|
got=self._output.length, expected=self._size)
|
2007-03-30 17:52:19 +00:00
|
|
|
return self._output.finish()
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2008-02-13 02:01:03 +00:00
|
|
|
def get_download_status(self):
|
|
|
|
return self._status
|
|
|
|
|
2008-02-12 22:38:39 +00:00
|
|
|
|
2007-07-12 23:16:59 +00:00
|
|
|
class LiteralDownloader:
|
2007-07-21 22:40:36 +00:00
|
|
|
def __init__(self, client, u, downloadable):
|
|
|
|
self._uri = IFileURI(u)
|
|
|
|
assert isinstance(self._uri, uri.LiteralFileURI)
|
2007-07-12 23:16:59 +00:00
|
|
|
self._downloadable = downloadable
|
2008-02-13 02:01:03 +00:00
|
|
|
self._status = s = DownloadStatus()
|
|
|
|
s.set_storage_index(None)
|
|
|
|
s.set_helper(False)
|
|
|
|
s.set_status("Done")
|
2008-02-26 22:35:28 +00:00
|
|
|
s.set_active(False)
|
2008-02-13 02:01:03 +00:00
|
|
|
s.set_progress(1.0)
|
2007-07-12 23:16:59 +00:00
|
|
|
|
|
|
|
def start(self):
|
2007-07-21 22:40:36 +00:00
|
|
|
data = self._uri.data
|
2008-02-13 02:01:03 +00:00
|
|
|
self._status.set_size(len(data))
|
2007-07-12 23:16:59 +00:00
|
|
|
self._downloadable.open(len(data))
|
|
|
|
self._downloadable.write(data)
|
|
|
|
self._downloadable.close()
|
|
|
|
return defer.maybeDeferred(self._downloadable.finish)
|
|
|
|
|
2008-02-13 02:01:03 +00:00
|
|
|
def get_download_status(self):
|
|
|
|
return self._status
|
2006-12-03 10:01:43 +00:00
|
|
|
|
2006-12-04 05:42:19 +00:00
|
|
|
class FileName:
|
|
|
|
implements(IDownloadTarget)
|
|
|
|
def __init__(self, filename):
|
|
|
|
self._filename = filename
|
2007-07-21 22:40:36 +00:00
|
|
|
self.f = None
|
2007-07-03 22:09:00 +00:00
|
|
|
def open(self, size):
|
2006-12-04 05:42:19 +00:00
|
|
|
self.f = open(self._filename, "wb")
|
|
|
|
return self.f
|
|
|
|
def write(self, data):
|
|
|
|
self.f.write(data)
|
|
|
|
def close(self):
|
2007-07-21 22:40:36 +00:00
|
|
|
if self.f:
|
|
|
|
self.f.close()
|
2007-07-03 20:18:14 +00:00
|
|
|
def fail(self, why):
|
2007-07-21 22:40:36 +00:00
|
|
|
if self.f:
|
|
|
|
self.f.close()
|
|
|
|
os.unlink(self._filename)
|
2006-12-04 05:42:19 +00:00
|
|
|
def register_canceller(self, cb):
|
|
|
|
pass # we won't use it
|
|
|
|
def finish(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
class Data:
|
|
|
|
implements(IDownloadTarget)
|
|
|
|
def __init__(self):
|
|
|
|
self._data = []
|
2007-07-03 22:09:00 +00:00
|
|
|
def open(self, size):
|
2006-12-04 05:42:19 +00:00
|
|
|
pass
|
|
|
|
def write(self, data):
|
|
|
|
self._data.append(data)
|
|
|
|
def close(self):
|
|
|
|
self.data = "".join(self._data)
|
|
|
|
del self._data
|
2007-07-03 20:18:14 +00:00
|
|
|
def fail(self, why):
|
2006-12-04 05:42:19 +00:00
|
|
|
del self._data
|
|
|
|
def register_canceller(self, cb):
|
|
|
|
pass # we won't use it
|
|
|
|
def finish(self):
|
|
|
|
return self.data
|
|
|
|
|
|
|
|
class FileHandle:
|
2007-04-16 20:07:36 +00:00
|
|
|
"""Use me to download data to a pre-defined filehandle-like object. I
|
|
|
|
will use the target's write() method. I will *not* close the filehandle:
|
|
|
|
I leave that up to the originator of the filehandle. The download process
|
|
|
|
will return the filehandle when it completes.
|
|
|
|
"""
|
2006-12-04 05:42:19 +00:00
|
|
|
implements(IDownloadTarget)
|
|
|
|
def __init__(self, filehandle):
|
|
|
|
self._filehandle = filehandle
|
2007-07-03 22:09:00 +00:00
|
|
|
def open(self, size):
|
2006-12-04 05:42:19 +00:00
|
|
|
pass
|
|
|
|
def write(self, data):
|
|
|
|
self._filehandle.write(data)
|
|
|
|
def close(self):
|
|
|
|
# the originator of the filehandle reserves the right to close it
|
|
|
|
pass
|
2007-07-03 20:18:14 +00:00
|
|
|
def fail(self, why):
|
2006-12-04 05:42:19 +00:00
|
|
|
pass
|
|
|
|
def register_canceller(self, cb):
|
|
|
|
pass
|
|
|
|
def finish(self):
|
2007-04-16 20:07:36 +00:00
|
|
|
return self._filehandle
|
2006-12-04 05:42:19 +00:00
|
|
|
|
2006-12-03 10:01:43 +00:00
|
|
|
class Downloader(service.MultiService):
|
|
|
|
"""I am a service that allows file downloading.
|
|
|
|
"""
|
2006-12-04 11:06:09 +00:00
|
|
|
implements(IDownloader)
|
2006-12-03 10:01:43 +00:00
|
|
|
name = "downloader"
|
2008-03-01 05:19:03 +00:00
|
|
|
MAX_DOWNLOAD_STATUSES = 10
|
2006-12-03 10:01:43 +00:00
|
|
|
|
2008-04-10 01:08:59 +00:00
|
|
|
def __init__(self, stats_provider=None):
|
2008-02-12 22:38:39 +00:00
|
|
|
service.MultiService.__init__(self)
|
2008-04-10 01:08:59 +00:00
|
|
|
self.stats_provider = stats_provider
|
2008-04-17 20:02:22 +00:00
|
|
|
self._all_downloads = weakref.WeakKeyDictionary() # for debugging
|
|
|
|
self._all_download_statuses = weakref.WeakKeyDictionary()
|
|
|
|
self._recent_download_statuses = []
|
2008-02-12 22:38:39 +00:00
|
|
|
|
2007-07-12 23:16:59 +00:00
|
|
|
def download(self, u, t):
|
2006-12-03 10:01:43 +00:00
|
|
|
assert self.parent
|
|
|
|
assert self.running
|
2007-07-21 22:40:36 +00:00
|
|
|
u = IFileURI(u)
|
2006-12-04 05:42:19 +00:00
|
|
|
t = IDownloadTarget(t)
|
|
|
|
assert t.write
|
|
|
|
assert t.close
|
2008-04-10 01:08:59 +00:00
|
|
|
|
|
|
|
if self.stats_provider:
|
|
|
|
self.stats_provider.count('downloader.files_downloaded', 1)
|
|
|
|
self.stats_provider.count('downloader.bytes_downloaded', u.get_size())
|
|
|
|
|
2007-07-21 22:40:36 +00:00
|
|
|
if isinstance(u, uri.LiteralFileURI):
|
2007-07-12 23:16:59 +00:00
|
|
|
dl = LiteralDownloader(self.parent, u, t)
|
2007-07-21 22:40:36 +00:00
|
|
|
elif isinstance(u, uri.CHKFileURI):
|
|
|
|
dl = FileDownloader(self.parent, u, t)
|
|
|
|
else:
|
|
|
|
raise RuntimeError("I don't know how to download a %s" % u)
|
2008-04-17 20:02:22 +00:00
|
|
|
self._add_download(dl)
|
2006-12-03 10:01:43 +00:00
|
|
|
d = dl.start()
|
|
|
|
return d
|
|
|
|
|
2006-12-04 05:42:19 +00:00
|
|
|
# utility functions
|
2007-01-16 04:22:22 +00:00
|
|
|
def download_to_data(self, uri):
|
|
|
|
return self.download(uri, Data())
|
|
|
|
def download_to_filename(self, uri, filename):
|
|
|
|
return self.download(uri, FileName(filename))
|
|
|
|
def download_to_filehandle(self, uri, filehandle):
|
|
|
|
return self.download(uri, FileHandle(filehandle))
|
2006-12-04 05:42:19 +00:00
|
|
|
|
2008-04-17 20:02:22 +00:00
|
|
|
def _add_download(self, downloader):
|
|
|
|
self._all_downloads[downloader] = None
|
|
|
|
s = downloader.get_download_status()
|
|
|
|
self._all_download_statuses[s] = None
|
|
|
|
self._recent_download_statuses.append(s)
|
|
|
|
while len(self._recent_download_statuses) > self.MAX_DOWNLOAD_STATUSES:
|
|
|
|
self._recent_download_statuses.pop(0)
|
|
|
|
|
|
|
|
def list_all_download_statuses(self):
|
|
|
|
for ds in self._all_download_statuses:
|
|
|
|
yield ds
|