mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-23 23:02:25 +00:00
add more information to NotEnoughSharesError, split out new exceptions for no-servers and no-source-of-ueb-hash
This commit is contained in:
parent
f15c0be5af
commit
67571eb033
@ -11,7 +11,8 @@ from allmydata.util.assertutil import _assert, precondition
|
||||
from allmydata.util.rrefutil import ServerFailure
|
||||
from allmydata import codec, hashtree, uri
|
||||
from allmydata.interfaces import IDownloadTarget, IDownloader, IFileURI, IVerifierURI, \
|
||||
IDownloadStatus, IDownloadResults, IValidatedThingProxy, NotEnoughSharesError
|
||||
IDownloadStatus, IDownloadResults, IValidatedThingProxy, NotEnoughSharesError, \
|
||||
UnableToFetchCriticalDownloadDataError
|
||||
from allmydata.immutable import layout
|
||||
from allmydata.monitor import Monitor
|
||||
from pycryptopp.cipher.aes import AES
|
||||
@ -95,7 +96,7 @@ class ValidatedThingObtainer:
|
||||
op=self._debugname, validatedthingproxy=str(validatedthingproxy),
|
||||
failure=f, level=level, umid="JGXxBA")
|
||||
if not self._validatedthingproxies:
|
||||
raise NotEnoughSharesError("ran out of peers, last error was %s" % (f,))
|
||||
raise UnableToFetchCriticalDownloadDataError("ran out of peers, last error was %s" % (f,))
|
||||
# try again with a different one
|
||||
d = self._try_the_next_one()
|
||||
return d
|
||||
@ -801,7 +802,9 @@ class CiphertextDownloader(log.PrefixingLogMixin):
|
||||
self._results.timings["peer_selection"] = now - self._started
|
||||
|
||||
if len(self._share_buckets) < self._verifycap.needed_shares:
|
||||
raise NotEnoughSharesError(len(self._share_buckets), self._verifycap.needed_shares)
|
||||
raise NotEnoughSharesError("Failed to get enough shareholders",
|
||||
len(self._share_buckets),
|
||||
self._verifycap.needed_shares)
|
||||
|
||||
#for s in self._share_vbuckets.values():
|
||||
# for vb in s:
|
||||
@ -886,7 +889,9 @@ class CiphertextDownloader(log.PrefixingLogMixin):
|
||||
available_shnums = set(self._share_vbuckets.keys())
|
||||
potential_shnums = list(available_shnums - handled_shnums)
|
||||
if len(potential_shnums) < (self._verifycap.needed_shares - len(self.active_buckets)):
|
||||
raise NotEnoughSharesError
|
||||
have = len(potential_shnums) + len(self.active_buckets)
|
||||
raise NotEnoughSharesError("Unable to activate enough shares",
|
||||
have, self._verifycap.needed_shares)
|
||||
# For the next share, choose a primary share if available, else a randomly chosen
|
||||
# secondary share.
|
||||
potential_shnums.sort()
|
||||
|
@ -488,7 +488,8 @@ class Encoder(object):
|
||||
level=log.WEIRD, umid="TQGFRw")
|
||||
if len(self.landlords) < self.shares_of_happiness:
|
||||
msg = "lost too many shareholders during upload: %s" % why
|
||||
raise NotEnoughSharesError(msg)
|
||||
raise NotEnoughSharesError(msg, len(self.landlords),
|
||||
self.shares_of_happiness)
|
||||
self.log("but we can still continue with %s shares, we'll be happy "
|
||||
"with at least %s" % (len(self.landlords),
|
||||
self.shares_of_happiness),
|
||||
|
@ -18,7 +18,7 @@ from allmydata.util.assertutil import precondition
|
||||
from allmydata.util.rrefutil import get_versioned_remote_reference
|
||||
from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \
|
||||
IEncryptedUploadable, RIEncryptedUploadable, IUploadStatus, \
|
||||
NotEnoughSharesError, InsufficientVersionError
|
||||
NotEnoughSharesError, InsufficientVersionError, NoServersError
|
||||
from allmydata.immutable import layout
|
||||
from pycryptopp.cipher.aes import AES
|
||||
|
||||
@ -169,7 +169,7 @@ class Tahoe2PeerSelector:
|
||||
|
||||
peers = client.get_permuted_peers("storage", storage_index)
|
||||
if not peers:
|
||||
raise NotEnoughSharesError("client gave us zero peers")
|
||||
raise NoServersError("client gave us zero peers")
|
||||
|
||||
# this needed_hashes computation should mirror
|
||||
# Encoder.send_all_share_hash_trees. We use an IncompleteHashTree
|
||||
@ -195,7 +195,7 @@ class Tahoe2PeerSelector:
|
||||
peers = [peer for peer in peers
|
||||
if _get_maxsize(peer) >= allocated_size]
|
||||
if not peers:
|
||||
raise NotEnoughSharesError("no peers could accept an allocated_size of %d" % allocated_size)
|
||||
raise NoServersError("no peers could accept an allocated_size of %d" % allocated_size)
|
||||
|
||||
# decide upon the renewal/cancel secrets, to include them in the
|
||||
# allocat_buckets query.
|
||||
@ -298,7 +298,8 @@ class Tahoe2PeerSelector:
|
||||
if self.last_failure_msg:
|
||||
msg += " (%s)" % (self.last_failure_msg,)
|
||||
log.msg(msg, level=log.UNUSUAL, parent=self._log_parent)
|
||||
raise NotEnoughSharesError(msg)
|
||||
raise NotEnoughSharesError(msg, placed_shares,
|
||||
self.shares_of_happiness)
|
||||
else:
|
||||
# we placed enough to be happy, so we're done
|
||||
if self._status:
|
||||
|
@ -715,7 +715,19 @@ class IMutableFileNode(IFileNode, IMutableFilesystemNode):
|
||||
"""
|
||||
|
||||
class NotEnoughSharesError(Exception):
|
||||
servermap = None
|
||||
def __init__(self, msg, got, needed):
|
||||
Exception.__init__(self, msg)
|
||||
self.got = got
|
||||
self.needed = needed
|
||||
self.servermap = None
|
||||
|
||||
class UnableToFetchCriticalDownloadDataError(Exception):
|
||||
"""I was unable to fetch some piece of critical data which is supposed to
|
||||
be identically present in all shares."""
|
||||
|
||||
class NoServersError(Exception):
|
||||
"""Upload wasn't given any servers to work with, usually indicating a
|
||||
network or Introducer problem."""
|
||||
|
||||
class ExistingChildError(Exception):
|
||||
"""A directory node was asked to add or replace a child that already
|
||||
|
@ -466,7 +466,8 @@ class Retrieve:
|
||||
self.log(format=format,
|
||||
level=log.WEIRD, umid="ezTfjw", **args)
|
||||
err = NotEnoughSharesError("%s, last failure: %s" %
|
||||
(format % args, self._last_failure))
|
||||
(format % args, self._last_failure),
|
||||
len(self.shares), k)
|
||||
if self._bad_shares:
|
||||
self.log("We found some bad shares this pass. You should "
|
||||
"update the servermap and try again to check "
|
||||
|
@ -99,7 +99,7 @@ class FakeCHKFileNode:
|
||||
|
||||
def download(self, target):
|
||||
if self.my_uri.to_string() not in self.all_contents:
|
||||
f = failure.Failure(NotEnoughSharesError())
|
||||
f = failure.Failure(NotEnoughSharesError(None, 0, 3))
|
||||
target.fail(f)
|
||||
return defer.fail(f)
|
||||
data = self.all_contents[self.my_uri.to_string()]
|
||||
@ -109,14 +109,14 @@ class FakeCHKFileNode:
|
||||
return defer.maybeDeferred(target.finish)
|
||||
def download_to_data(self):
|
||||
if self.my_uri.to_string() not in self.all_contents:
|
||||
return defer.fail(NotEnoughSharesError())
|
||||
return defer.fail(NotEnoughSharesError(None, 0, 3))
|
||||
data = self.all_contents[self.my_uri.to_string()]
|
||||
return defer.succeed(data)
|
||||
def get_size(self):
|
||||
try:
|
||||
data = self.all_contents[self.my_uri.to_string()]
|
||||
except KeyError, le:
|
||||
raise NotEnoughSharesError(le)
|
||||
raise NotEnoughSharesError(le, 0, 3)
|
||||
return len(data)
|
||||
def read(self, consumer, offset=0, size=None):
|
||||
d = self.download_to_data()
|
||||
@ -269,7 +269,7 @@ class FakeMutableFileNode:
|
||||
|
||||
def download(self, target):
|
||||
if self.storage_index not in self.all_contents:
|
||||
f = failure.Failure(NotEnoughSharesError())
|
||||
f = failure.Failure(NotEnoughSharesError(None, 0, 3))
|
||||
target.fail(f)
|
||||
return defer.fail(f)
|
||||
data = self.all_contents[self.storage_index]
|
||||
@ -279,7 +279,7 @@ class FakeMutableFileNode:
|
||||
return defer.maybeDeferred(target.finish)
|
||||
def download_to_data(self):
|
||||
if self.storage_index not in self.all_contents:
|
||||
return defer.fail(NotEnoughSharesError())
|
||||
return defer.fail(NotEnoughSharesError(None, 0, 3))
|
||||
data = self.all_contents[self.storage_index]
|
||||
return defer.succeed(data)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user