mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-21 13:57:51 +00:00
interfaces.py: promote immutable.encode.NotEnoughSharesError.. it isn't just for immutable files any more
This commit is contained in:
parent
4b48d94c52
commit
914655c52b
@ -11,9 +11,8 @@ from allmydata.util import base32, mathutil, hashutil, log, observer
|
|||||||
from allmydata.util.assertutil import _assert
|
from allmydata.util.assertutil import _assert
|
||||||
from allmydata import codec, hashtree, storage, uri
|
from allmydata import codec, hashtree, storage, uri
|
||||||
from allmydata.interfaces import IDownloadTarget, IDownloader, IFileURI, \
|
from allmydata.interfaces import IDownloadTarget, IDownloader, IFileURI, \
|
||||||
IDownloadStatus, IDownloadResults
|
IDownloadStatus, IDownloadResults, NotEnoughSharesError
|
||||||
from allmydata.immutable import layout
|
from allmydata.immutable import layout
|
||||||
from allmydata.immutable.encode import NotEnoughSharesError
|
|
||||||
from pycryptopp.cipher.aes import AES
|
from pycryptopp.cipher.aes import AES
|
||||||
|
|
||||||
class HaveAllPeersError(Exception):
|
class HaveAllPeersError(Exception):
|
||||||
|
@ -10,7 +10,7 @@ from allmydata.util import mathutil, hashutil, base32, log
|
|||||||
from allmydata.util.assertutil import _assert, precondition
|
from allmydata.util.assertutil import _assert, precondition
|
||||||
from allmydata.codec import CRSEncoder
|
from allmydata.codec import CRSEncoder
|
||||||
from allmydata.interfaces import IEncoder, IStorageBucketWriter, \
|
from allmydata.interfaces import IEncoder, IStorageBucketWriter, \
|
||||||
IEncryptedUploadable, IUploadStatus
|
IEncryptedUploadable, IUploadStatus, NotEnoughSharesError
|
||||||
|
|
||||||
"""
|
"""
|
||||||
The goal of the encoder is to turn the original file into a series of
|
The goal of the encoder is to turn the original file into a series of
|
||||||
@ -60,10 +60,6 @@ hash tree is put into the URI.
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class NotEnoughSharesError(Exception):
|
|
||||||
servermap = None
|
|
||||||
pass
|
|
||||||
|
|
||||||
class UploadAborted(Exception):
|
class UploadAborted(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ from allmydata.immutable import encode
|
|||||||
from allmydata.util import base32, idlib, mathutil
|
from allmydata.util import base32, idlib, mathutil
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \
|
from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \
|
||||||
IEncryptedUploadable, RIEncryptedUploadable, IUploadStatus
|
IEncryptedUploadable, RIEncryptedUploadable, IUploadStatus, NotEnoughSharesError
|
||||||
from allmydata.immutable import layout
|
from allmydata.immutable import layout
|
||||||
from pycryptopp.cipher.aes import AES
|
from pycryptopp.cipher.aes import AES
|
||||||
|
|
||||||
@ -161,7 +161,7 @@ class Tahoe2PeerSelector:
|
|||||||
|
|
||||||
peers = client.get_permuted_peers("storage", storage_index)
|
peers = client.get_permuted_peers("storage", storage_index)
|
||||||
if not peers:
|
if not peers:
|
||||||
raise encode.NotEnoughSharesError("client gave us zero peers")
|
raise NotEnoughSharesError("client gave us zero peers")
|
||||||
|
|
||||||
# figure out how much space to ask for
|
# figure out how much space to ask for
|
||||||
|
|
||||||
@ -273,7 +273,7 @@ class Tahoe2PeerSelector:
|
|||||||
if self.last_failure_msg:
|
if self.last_failure_msg:
|
||||||
msg += " (%s)" % (self.last_failure_msg,)
|
msg += " (%s)" % (self.last_failure_msg,)
|
||||||
log.msg(msg, level=log.UNUSUAL, parent=self._log_parent)
|
log.msg(msg, level=log.UNUSUAL, parent=self._log_parent)
|
||||||
raise encode.NotEnoughSharesError(msg)
|
raise NotEnoughSharesError(msg)
|
||||||
else:
|
else:
|
||||||
# we placed enough to be happy, so we're done
|
# we placed enough to be happy, so we're done
|
||||||
if self._status:
|
if self._status:
|
||||||
|
@ -649,6 +649,9 @@ class IMutableFileNode(IFileNode, IMutableFilesystemNode):
|
|||||||
writer-visible data using this writekey.
|
writer-visible data using this writekey.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
class NotEnoughSharesError(Exception):
|
||||||
|
servermap = None
|
||||||
|
|
||||||
class ExistingChildError(Exception):
|
class ExistingChildError(Exception):
|
||||||
"""A directory node was asked to add or replace a child that already
|
"""A directory node was asked to add or replace a child that already
|
||||||
exists, and overwrite= was set to False."""
|
exists, and overwrite= was set to False."""
|
||||||
|
@ -6,12 +6,11 @@ from zope.interface import implements
|
|||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from foolscap.eventual import eventually
|
from foolscap.eventual import eventually
|
||||||
from allmydata.interfaces import IMutableFileNode, IMutableFileURI, \
|
from allmydata.interfaces import IMutableFileNode, IMutableFileURI, \
|
||||||
ICheckable, ICheckerResults
|
ICheckable, ICheckerResults, NotEnoughSharesError
|
||||||
from allmydata.util import hashutil, log
|
from allmydata.util import hashutil, log
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
from allmydata.uri import WriteableSSKFileURI
|
from allmydata.uri import WriteableSSKFileURI
|
||||||
from allmydata.monitor import Monitor
|
from allmydata.monitor import Monitor
|
||||||
from allmydata.immutable.encode import NotEnoughSharesError
|
|
||||||
from pycryptopp.publickey import rsa
|
from pycryptopp.publickey import rsa
|
||||||
from pycryptopp.cipher.aes import AES
|
from pycryptopp.cipher.aes import AES
|
||||||
|
|
||||||
|
@ -6,10 +6,9 @@ from twisted.internet import defer
|
|||||||
from twisted.python import failure
|
from twisted.python import failure
|
||||||
from foolscap import DeadReferenceError
|
from foolscap import DeadReferenceError
|
||||||
from foolscap.eventual import eventually, fireEventually
|
from foolscap.eventual import eventually, fireEventually
|
||||||
from allmydata.interfaces import IRetrieveStatus
|
from allmydata.interfaces import IRetrieveStatus, NotEnoughSharesError
|
||||||
from allmydata.util import hashutil, idlib, log
|
from allmydata.util import hashutil, idlib, log
|
||||||
from allmydata import hashtree, codec, storage
|
from allmydata import hashtree, codec, storage
|
||||||
from allmydata.immutable.encode import NotEnoughSharesError
|
|
||||||
from pycryptopp.cipher.aes import AES
|
from pycryptopp.cipher.aes import AES
|
||||||
from pycryptopp.publickey import rsa
|
from pycryptopp.publickey import rsa
|
||||||
|
|
||||||
|
@ -10,8 +10,7 @@ from foolscap.eventual import flushEventualQueue, fireEventually
|
|||||||
from allmydata import uri, dirnode, client
|
from allmydata import uri, dirnode, client
|
||||||
from allmydata.introducer.server import IntroducerNode
|
from allmydata.introducer.server import IntroducerNode
|
||||||
from allmydata.interfaces import IURI, IMutableFileNode, IFileNode, \
|
from allmydata.interfaces import IURI, IMutableFileNode, IFileNode, \
|
||||||
FileTooLargeError, ICheckable
|
FileTooLargeError, NotEnoughSharesError, ICheckable
|
||||||
from allmydata.immutable.encode import NotEnoughSharesError
|
|
||||||
from allmydata.checker_results import CheckerResults, CheckAndRepairResults, \
|
from allmydata.checker_results import CheckerResults, CheckAndRepairResults, \
|
||||||
DeepCheckResults, DeepCheckAndRepairResults
|
DeepCheckResults, DeepCheckAndRepairResults
|
||||||
from allmydata.mutable.common import CorruptShareError
|
from allmydata.mutable.common import CorruptShareError
|
||||||
|
@ -9,7 +9,7 @@ from allmydata import hashtree, uri
|
|||||||
from allmydata.immutable import encode, upload, download
|
from allmydata.immutable import encode, upload, download
|
||||||
from allmydata.util import hashutil, testutil
|
from allmydata.util import hashutil, testutil
|
||||||
from allmydata.util.assertutil import _assert
|
from allmydata.util.assertutil import _assert
|
||||||
from allmydata.interfaces import IStorageBucketWriter, IStorageBucketReader
|
from allmydata.interfaces import IStorageBucketWriter, IStorageBucketReader, NotEnoughSharesError
|
||||||
|
|
||||||
class LostPeerError(Exception):
|
class LostPeerError(Exception):
|
||||||
pass
|
pass
|
||||||
@ -455,7 +455,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
d = self.send_and_recover((4,8,10), AVAILABLE_SHARES=2)
|
d = self.send_and_recover((4,8,10), AVAILABLE_SHARES=2)
|
||||||
def _done(res):
|
def _done(res):
|
||||||
self.failUnless(isinstance(res, Failure))
|
self.failUnless(isinstance(res, Failure))
|
||||||
self.failUnless(res.check(download.NotEnoughSharesError))
|
self.failUnless(res.check(NotEnoughSharesError))
|
||||||
d.addBoth(_done)
|
d.addBoth(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@ -527,7 +527,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
||||||
def _done(res):
|
def _done(res):
|
||||||
self.failUnless(isinstance(res, Failure))
|
self.failUnless(isinstance(res, Failure))
|
||||||
self.failUnless(res.check(download.NotEnoughSharesError))
|
self.failUnless(res.check(NotEnoughSharesError))
|
||||||
d.addBoth(_done)
|
d.addBoth(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@ -550,7 +550,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
||||||
def _done(res):
|
def _done(res):
|
||||||
self.failUnless(isinstance(res, Failure))
|
self.failUnless(isinstance(res, Failure))
|
||||||
self.failUnless(res.check(download.NotEnoughSharesError))
|
self.failUnless(res.check(NotEnoughSharesError))
|
||||||
d.addBoth(_done)
|
d.addBoth(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@ -680,7 +680,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
||||||
def _done(res):
|
def _done(res):
|
||||||
self.failUnless(isinstance(res, Failure))
|
self.failUnless(isinstance(res, Failure))
|
||||||
self.failUnless(res.check(download.NotEnoughSharesError))
|
self.failUnless(res.check(NotEnoughSharesError))
|
||||||
d.addBoth(_done)
|
d.addBoth(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@ -703,7 +703,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
||||||
def _done(res):
|
def _done(res):
|
||||||
self.failUnless(isinstance(res, Failure))
|
self.failUnless(isinstance(res, Failure))
|
||||||
self.failUnless(res.check(download.NotEnoughSharesError))
|
self.failUnless(res.check(NotEnoughSharesError))
|
||||||
d.addBoth(_done)
|
d.addBoth(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@ -732,7 +732,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
||||||
def _done(res):
|
def _done(res):
|
||||||
self.failUnless(isinstance(res, Failure))
|
self.failUnless(isinstance(res, Failure))
|
||||||
self.failUnless(res.check(encode.NotEnoughSharesError), res)
|
self.failUnless(res.check(NotEnoughSharesError), res)
|
||||||
d.addBoth(_done)
|
d.addBoth(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@ -743,7 +743,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
|
||||||
def _done(res):
|
def _done(res):
|
||||||
self.failUnless(isinstance(res, Failure))
|
self.failUnless(isinstance(res, Failure))
|
||||||
self.failUnless(res.check(encode.NotEnoughSharesError))
|
self.failUnless(res.check(NotEnoughSharesError))
|
||||||
d.addBoth(_done)
|
d.addBoth(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
from allmydata.immutable import encode, upload
|
from allmydata.immutable import upload
|
||||||
from allmydata.test.common import SystemTestMixin, ShareManglingMixin
|
from allmydata.test.common import SystemTestMixin, ShareManglingMixin
|
||||||
from allmydata.util import testutil
|
from allmydata.util import testutil
|
||||||
from allmydata.monitor import Monitor
|
from allmydata.monitor import Monitor
|
||||||
from allmydata.interfaces import IURI
|
from allmydata.interfaces import IURI, NotEnoughSharesError
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
import random, struct
|
import random, struct
|
||||||
@ -273,7 +273,7 @@ class Test(ShareManglingMixin, unittest.TestCase):
|
|||||||
self.fail() # should have gotten an errback instead
|
self.fail() # should have gotten an errback instead
|
||||||
return result
|
return result
|
||||||
def _after_download_errb(failure):
|
def _after_download_errb(failure):
|
||||||
failure.trap(encode.NotEnoughSharesError)
|
failure.trap(NotEnoughSharesError)
|
||||||
return None # success!
|
return None # success!
|
||||||
d.addCallbacks(_after_download_callb, _after_download_errb)
|
d.addCallbacks(_after_download_callb, _after_download_errb)
|
||||||
d.addCallback(_then_download)
|
d.addCallback(_then_download)
|
||||||
|
@ -6,13 +6,12 @@ from twisted.internet import defer, reactor
|
|||||||
from twisted.python import failure
|
from twisted.python import failure
|
||||||
from allmydata import uri, storage
|
from allmydata import uri, storage
|
||||||
from allmydata.immutable import download
|
from allmydata.immutable import download
|
||||||
from allmydata.immutable.encode import NotEnoughSharesError
|
|
||||||
from allmydata.util import base32, testutil, idlib
|
from allmydata.util import base32, testutil, idlib
|
||||||
from allmydata.util.idlib import shortnodeid_b2a
|
from allmydata.util.idlib import shortnodeid_b2a
|
||||||
from allmydata.util.hashutil import tagged_hash
|
from allmydata.util.hashutil import tagged_hash
|
||||||
from allmydata.util.fileutil import make_dirs
|
from allmydata.util.fileutil import make_dirs
|
||||||
from allmydata.interfaces import IURI, IMutableFileURI, IUploadable, \
|
from allmydata.interfaces import IURI, IMutableFileURI, IUploadable, \
|
||||||
FileTooLargeError, IRepairResults
|
FileTooLargeError, NotEnoughSharesError, IRepairResults
|
||||||
from allmydata.monitor import Monitor
|
from allmydata.monitor import Monitor
|
||||||
from allmydata.test.common import ShouldFailMixin
|
from allmydata.test.common import ShouldFailMixin
|
||||||
from foolscap.eventual import eventually, fireEventually
|
from foolscap.eventual import eventually, fireEventually
|
||||||
|
@ -15,7 +15,7 @@ from allmydata.util import log, base32
|
|||||||
from allmydata.scripts import runner
|
from allmydata.scripts import runner
|
||||||
from allmydata.interfaces import IDirectoryNode, IFileNode, IFileURI, \
|
from allmydata.interfaces import IDirectoryNode, IFileNode, IFileURI, \
|
||||||
ICheckerResults, ICheckAndRepairResults, IDeepCheckResults, \
|
ICheckerResults, ICheckAndRepairResults, IDeepCheckResults, \
|
||||||
IDeepCheckAndRepairResults, NoSuchChildError
|
IDeepCheckAndRepairResults, NoSuchChildError, NotEnoughSharesError
|
||||||
from allmydata.monitor import Monitor, OperationCancelledError
|
from allmydata.monitor import Monitor, OperationCancelledError
|
||||||
from allmydata.mutable.common import NotMutableError
|
from allmydata.mutable.common import NotMutableError
|
||||||
from allmydata.mutable import layout as mutable_layout
|
from allmydata.mutable import layout as mutable_layout
|
||||||
@ -194,7 +194,7 @@ class SystemTest(SystemTestMixin, unittest.TestCase):
|
|||||||
log.msg("finished downloading non-existend URI",
|
log.msg("finished downloading non-existend URI",
|
||||||
level=log.UNUSUAL, facility="tahoe.tests")
|
level=log.UNUSUAL, facility="tahoe.tests")
|
||||||
self.failUnless(isinstance(res, Failure))
|
self.failUnless(isinstance(res, Failure))
|
||||||
self.failUnless(res.check(download.NotEnoughSharesError),
|
self.failUnless(res.check(NotEnoughSharesError),
|
||||||
"expected NotEnoughSharesError, got %s" % res)
|
"expected NotEnoughSharesError, got %s" % res)
|
||||||
# TODO: files that have zero peers should get a special kind
|
# TODO: files that have zero peers should get a special kind
|
||||||
# of NotEnoughSharesError, which can be used to suggest that
|
# of NotEnoughSharesError, which can be used to suggest that
|
||||||
|
@ -7,8 +7,8 @@ from twisted.internet import defer
|
|||||||
from cStringIO import StringIO
|
from cStringIO import StringIO
|
||||||
|
|
||||||
from allmydata import uri
|
from allmydata import uri
|
||||||
from allmydata.immutable import upload, encode
|
from allmydata.immutable import upload
|
||||||
from allmydata.interfaces import IFileURI, FileTooLargeError
|
from allmydata.interfaces import IFileURI, FileTooLargeError, NotEnoughSharesError
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
from allmydata.util.deferredutil import DeferredListShouldSucceed
|
from allmydata.util.deferredutil import DeferredListShouldSucceed
|
||||||
from allmydata.util.testutil import ShouldFailMixin
|
from allmydata.util.testutil import ShouldFailMixin
|
||||||
@ -347,7 +347,7 @@ class FullServer(unittest.TestCase):
|
|||||||
self.u.parent = self.node
|
self.u.parent = self.node
|
||||||
|
|
||||||
def _should_fail(self, f):
|
def _should_fail(self, f):
|
||||||
self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughSharesError), f)
|
self.failUnless(isinstance(f, Failure) and f.check(NotEnoughSharesError), f)
|
||||||
|
|
||||||
def test_data_large(self):
|
def test_data_large(self):
|
||||||
data = DATA
|
data = DATA
|
||||||
|
Loading…
Reference in New Issue
Block a user