2009-01-10 22:07:39 +00:00
|
|
|
from allmydata.test import common
|
2008-10-22 08:38:18 +00:00
|
|
|
from allmydata.monitor import Monitor
|
2009-01-06 20:37:03 +00:00
|
|
|
from allmydata import check_results
|
2008-10-27 20:34:49 +00:00
|
|
|
from allmydata.interfaces import IURI, NotEnoughSharesError
|
2008-12-31 21:18:38 +00:00
|
|
|
from allmydata.immutable import upload
|
2009-01-08 06:40:12 +00:00
|
|
|
from allmydata.util import hashutil, log
|
2008-09-25 17:16:53 +00:00
|
|
|
from twisted.internet import defer
|
|
|
|
from twisted.trial import unittest
|
|
|
|
import random, struct
|
2008-10-29 04:28:31 +00:00
|
|
|
import common_util as testutil
|
2008-09-25 17:16:53 +00:00
|
|
|
|
2009-01-10 22:07:39 +00:00
|
|
|
class Test(common.ShareManglingMixin, unittest.TestCase):
|
2008-09-25 17:16:53 +00:00
|
|
|
def test_test_code(self):
|
|
|
|
# The following process of stashing the shares, running
|
|
|
|
# replace_shares, and asserting that the new set of shares equals the
|
|
|
|
# old is more to test this test code than to test the Tahoe code...
|
|
|
|
d = defer.succeed(None)
|
|
|
|
d.addCallback(self.find_shares)
|
|
|
|
stash = [None]
|
|
|
|
def _stash_it(res):
|
|
|
|
stash[0] = res
|
|
|
|
return res
|
|
|
|
d.addCallback(_stash_it)
|
|
|
|
|
2008-09-26 22:23:53 +00:00
|
|
|
# The following process of deleting 8 of the shares and asserting that you can't
|
|
|
|
# download it is more to test this test code than to test the Tahoe code...
|
|
|
|
def _then_delete_8(unused=None):
|
|
|
|
self.replace_shares(stash[0], storage_index=self.uri.storage_index)
|
2009-01-02 23:49:41 +00:00
|
|
|
for i in range(8):
|
2008-09-26 22:23:53 +00:00
|
|
|
self._delete_a_share()
|
|
|
|
d.addCallback(_then_delete_8)
|
|
|
|
|
|
|
|
def _then_download(unused=None):
|
|
|
|
self.downloader = self.clients[1].getServiceNamed("downloader")
|
|
|
|
d = self.downloader.download_to_data(self.uri)
|
|
|
|
|
|
|
|
def _after_download_callb(result):
|
|
|
|
self.fail() # should have gotten an errback instead
|
|
|
|
return result
|
|
|
|
def _after_download_errb(failure):
|
2008-10-27 20:34:49 +00:00
|
|
|
failure.trap(NotEnoughSharesError)
|
2008-09-26 22:23:53 +00:00
|
|
|
return None # success!
|
|
|
|
d.addCallbacks(_after_download_callb, _after_download_errb)
|
|
|
|
d.addCallback(_then_download)
|
|
|
|
|
2008-09-25 17:16:53 +00:00
|
|
|
return d
|
|
|
|
|
2009-01-02 23:54:59 +00:00
|
|
|
def test_download(self):
|
|
|
|
""" Basic download. (This functionality is more or less already tested by test code in
|
|
|
|
other modules, but this module is also going to test some more specific things about
|
|
|
|
immutable download.)
|
|
|
|
"""
|
|
|
|
d = defer.succeed(None)
|
|
|
|
before_download_reads = self._count_reads()
|
|
|
|
def _after_download(unused=None):
|
|
|
|
after_download_reads = self._count_reads()
|
2009-01-08 06:40:12 +00:00
|
|
|
self.failIf(after_download_reads-before_download_reads > 27, (after_download_reads, before_download_reads))
|
2009-01-02 23:54:59 +00:00
|
|
|
d.addCallback(self._download_and_check_plaintext)
|
|
|
|
d.addCallback(_after_download)
|
|
|
|
return d
|
2008-10-14 23:09:20 +00:00
|
|
|
|
2009-01-02 23:54:59 +00:00
|
|
|
def test_download_from_only_3_remaining_shares(self):
|
|
|
|
""" Test download after 7 random shares (of the 10) have been removed. """
|
|
|
|
d = defer.succeed(None)
|
|
|
|
def _then_delete_7(unused=None):
|
|
|
|
for i in range(7):
|
|
|
|
self._delete_a_share()
|
|
|
|
before_download_reads = self._count_reads()
|
|
|
|
d.addCallback(_then_delete_7)
|
|
|
|
def _after_download(unused=None):
|
|
|
|
after_download_reads = self._count_reads()
|
2009-01-08 06:40:12 +00:00
|
|
|
self.failIf(after_download_reads-before_download_reads > 27, (after_download_reads, before_download_reads))
|
2009-01-02 23:54:59 +00:00
|
|
|
d.addCallback(self._download_and_check_plaintext)
|
|
|
|
d.addCallback(_after_download)
|
|
|
|
return d
|
|
|
|
|
2009-01-08 06:40:12 +00:00
|
|
|
def test_download_from_only_3_shares_with_good_crypttext_hash(self):
|
|
|
|
""" Test download after 7 random shares (of the 10) have had their crypttext hash tree corrupted. """
|
|
|
|
d = defer.succeed(None)
|
|
|
|
def _then_corrupt_7(unused=None):
|
|
|
|
shnums = range(10)
|
|
|
|
random.shuffle(shnums)
|
|
|
|
for i in shnums[:7]:
|
2009-01-10 22:07:39 +00:00
|
|
|
self._corrupt_a_share(None, common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes, i)
|
2009-01-08 06:40:12 +00:00
|
|
|
before_download_reads = self._count_reads()
|
|
|
|
d.addCallback(_then_corrupt_7)
|
|
|
|
d.addCallback(self._download_and_check_plaintext)
|
|
|
|
return d
|
|
|
|
|
2009-01-02 23:54:59 +00:00
|
|
|
def test_download_abort_if_too_many_missing_shares(self):
|
|
|
|
""" Test that download gives up quickly when it realizes there aren't enough shares out
|
|
|
|
there."""
|
|
|
|
d = defer.succeed(None)
|
|
|
|
def _then_delete_8(unused=None):
|
|
|
|
for i in range(8):
|
|
|
|
self._delete_a_share()
|
|
|
|
d.addCallback(_then_delete_8)
|
|
|
|
|
|
|
|
before_download_reads = self._count_reads()
|
|
|
|
def _attempt_to_download(unused=None):
|
|
|
|
downloader = self.clients[1].getServiceNamed("downloader")
|
|
|
|
d = downloader.download_to_data(self.uri)
|
|
|
|
|
|
|
|
def _callb(res):
|
|
|
|
self.fail("Should have gotten an error from attempt to download, not %r" % (res,))
|
|
|
|
def _errb(f):
|
|
|
|
self.failUnless(f.check(NotEnoughSharesError))
|
|
|
|
d.addCallbacks(_callb, _errb)
|
|
|
|
return d
|
|
|
|
|
|
|
|
d.addCallback(_attempt_to_download)
|
|
|
|
|
|
|
|
def _after_attempt(unused=None):
|
|
|
|
after_download_reads = self._count_reads()
|
|
|
|
# To pass this test, you are required to give up before actually trying to read any
|
|
|
|
# share data.
|
|
|
|
self.failIf(after_download_reads-before_download_reads > 0, (after_download_reads, before_download_reads))
|
|
|
|
d.addCallback(_after_attempt)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_download_abort_if_too_many_corrupted_shares(self):
|
|
|
|
""" Test that download gives up quickly when it realizes there aren't enough uncorrupted
|
|
|
|
shares out there. It should be able to tell because the corruption occurs in the
|
|
|
|
sharedata version number, which it checks first."""
|
|
|
|
d = defer.succeed(None)
|
|
|
|
def _then_corrupt_8(unused=None):
|
|
|
|
shnums = range(10)
|
|
|
|
random.shuffle(shnums)
|
|
|
|
for shnum in shnums[:8]:
|
2009-01-10 22:07:39 +00:00
|
|
|
self._corrupt_a_share(None, common._corrupt_sharedata_version_number, shnum)
|
2009-01-02 23:54:59 +00:00
|
|
|
d.addCallback(_then_corrupt_8)
|
|
|
|
|
|
|
|
before_download_reads = self._count_reads()
|
|
|
|
def _attempt_to_download(unused=None):
|
|
|
|
downloader = self.clients[1].getServiceNamed("downloader")
|
|
|
|
d = downloader.download_to_data(self.uri)
|
|
|
|
|
|
|
|
def _callb(res):
|
|
|
|
self.fail("Should have gotten an error from attempt to download, not %r" % (res,))
|
|
|
|
def _errb(f):
|
|
|
|
self.failUnless(f.check(NotEnoughSharesError))
|
|
|
|
d.addCallbacks(_callb, _errb)
|
|
|
|
return d
|
|
|
|
|
|
|
|
d.addCallback(_attempt_to_download)
|
|
|
|
|
|
|
|
def _after_attempt(unused=None):
|
|
|
|
after_download_reads = self._count_reads()
|
|
|
|
# To pass this test, you are required to give up before reading all of the share
|
2009-01-03 18:41:09 +00:00
|
|
|
# data. Actually, we could give up sooner than 45 reads, but currently our download
|
|
|
|
# code does 45 reads. This test then serves as a "performance regression detector"
|
2009-01-02 23:54:59 +00:00
|
|
|
# -- if you change download code so that it takes *more* reads, then this test will
|
|
|
|
# fail.
|
2009-01-03 18:41:09 +00:00
|
|
|
self.failIf(after_download_reads-before_download_reads > 45, (after_download_reads, before_download_reads))
|
2009-01-02 23:54:59 +00:00
|
|
|
d.addCallback(_after_attempt)
|
|
|
|
return d
|
2008-10-14 23:09:20 +00:00
|
|
|
|
2008-12-31 21:18:38 +00:00
|
|
|
|
|
|
|
# XXX extend these tests to show bad behavior of various kinds from servers: raising exception from each remove_foo() method, for example
|
|
|
|
|
|
|
|
# XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit
|
2009-01-08 06:40:12 +00:00
|
|
|
|