2008-09-25 17:16:53 +00:00
from allmydata . test . common import SystemTestMixin , ShareManglingMixin
2008-10-22 08:38:18 +00:00
from allmydata . monitor import Monitor
2009-01-06 20:37:03 +00:00
from allmydata import check_results
2008-10-27 20:34:49 +00:00
from allmydata . interfaces import IURI , NotEnoughSharesError
2008-12-31 21:18:38 +00:00
from allmydata . immutable import upload
2009-01-08 06:40:12 +00:00
from allmydata . util import hashutil , log
2008-09-25 17:16:53 +00:00
from twisted . internet import defer
from twisted . trial import unittest
import random , struct
2008-10-29 04:28:31 +00:00
import common_util as testutil
2008-09-25 17:16:53 +00:00
2008-09-26 17:47:19 +00:00
TEST_DATA = " \x02 " * ( upload . Uploader . URI_LIT_SIZE_THRESHOLD + 1 )
2008-12-31 21:18:38 +00:00
def corrupt_field ( data , offset , size , debug = False ) :
2008-10-14 23:09:20 +00:00
if random . random ( ) < 0.5 :
2008-12-31 21:18:38 +00:00
newdata = testutil . flip_one_bit ( data , offset , size )
if debug :
log . msg ( " testing: corrupting offset %d , size %d flipping one bit orig: %r , newdata: %r " % ( offset , size , data [ offset : offset + size ] , newdata [ offset : offset + size ] ) )
return newdata
2008-10-14 23:09:20 +00:00
else :
2008-12-31 21:18:38 +00:00
newval = testutil . insecurerandstr ( size )
if debug :
log . msg ( " testing: corrupting offset %d , size %d randomizing field, orig: %r , newval: %r " % ( offset , size , data [ offset : offset + size ] , newval ) )
return data [ : offset ] + newval + data [ offset + size : ]
2008-10-14 23:09:20 +00:00
2009-01-06 01:28:18 +00:00
def _corrupt_nothing ( data ) :
""" Leave the data pristine. """
return data
2008-10-14 23:09:20 +00:00
def _corrupt_file_version_number ( data ) :
""" Scramble the file data -- the share file version number have one bit flipped or else
will be changed to a random value . """
return corrupt_field ( data , 0x00 , 4 )
def _corrupt_size_of_file_data ( data ) :
2008-12-31 21:18:38 +00:00
""" Scramble the file data -- the field showing the size of the share data within the file
will be set to one smaller . """
2008-10-14 23:09:20 +00:00
return corrupt_field ( data , 0x04 , 4 )
def _corrupt_sharedata_version_number ( data ) :
""" Scramble the file data -- the share data version number will have one bit flipped or
2008-12-31 21:18:38 +00:00
else will be changed to a random value , but not 1 or 2. """
2008-10-14 23:09:20 +00:00
return corrupt_field ( data , 0x0c , 4 )
2008-12-31 21:18:38 +00:00
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
newsharevernum = sharevernum
while newsharevernum in ( 1 , 2 ) :
newsharevernum = random . randrange ( 0 , 2 * * 32 )
newsharevernumbytes = struct . pack ( " >l " , newsharevernum )
return data [ : 0x0c ] + newsharevernumbytes + data [ 0x0c + 4 : ]
2009-01-06 01:28:18 +00:00
def _corrupt_sharedata_version_number_to_plausible_version ( data ) :
2008-12-31 21:18:38 +00:00
""" Scramble the file data -- the share data version number will
be changed to 2 if it is 1 or else to 1 if it is 2. """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
newsharevernum = 2
else :
newsharevernum = 1
newsharevernumbytes = struct . pack ( " >l " , newsharevernum )
return data [ : 0x0c ] + newsharevernumbytes + data [ 0x0c + 4 : ]
2008-10-14 23:09:20 +00:00
def _corrupt_segment_size ( data ) :
""" Scramble the file data -- the field showing the size of the segment will have one
bit flipped or else be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
2009-01-03 18:44:27 +00:00
return corrupt_field ( data , 0x0c + 0x04 , 4 , debug = False )
2008-10-14 23:09:20 +00:00
else :
2009-01-03 18:44:27 +00:00
return corrupt_field ( data , 0x0c + 0x04 , 8 , debug = False )
2008-10-14 23:09:20 +00:00
def _corrupt_size_of_sharedata ( data ) :
""" Scramble the file data -- the field showing the size of the data within the share
data will have one bit flipped or else will be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x08 , 4 )
else :
return corrupt_field ( data , 0x0c + 0x0c , 8 )
def _corrupt_offset_of_sharedata ( data ) :
""" Scramble the file data -- the field showing the offset of the data within the share
data will have one bit flipped or else be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x0c , 4 )
else :
return corrupt_field ( data , 0x0c + 0x14 , 8 )
def _corrupt_offset_of_ciphertext_hash_tree ( data ) :
""" Scramble the file data -- the field showing the offset of the ciphertext hash tree
within the share data will have one bit flipped or else be changed to a random value .
"""
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
2009-01-03 18:44:27 +00:00
return corrupt_field ( data , 0x0c + 0x14 , 4 , debug = False )
2008-10-14 23:09:20 +00:00
else :
2009-01-03 18:44:27 +00:00
return corrupt_field ( data , 0x0c + 0x24 , 8 , debug = False )
2008-10-14 23:09:20 +00:00
def _corrupt_offset_of_block_hashes ( data ) :
""" Scramble the file data -- the field showing the offset of the block hash tree within
the share data will have one bit flipped or else will be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x18 , 4 )
else :
return corrupt_field ( data , 0x0c + 0x2c , 8 )
2009-01-08 06:40:12 +00:00
def _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes ( data ) :
""" Scramble the file data -- the field showing the offset of the block hash tree within the
share data will have a multiple of hash size subtracted from it , thus causing the downloader
to download an incomplete crypttext hash tree . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
curval = struct . unpack ( " >L " , data [ 0x0c + 0x18 : 0x0c + 0x18 + 4 ] ) [ 0 ]
newval = random . randrange ( 0 , max ( 1 , ( curval / hashutil . CRYPTO_VAL_SIZE ) / 2 ) ) * hashutil . CRYPTO_VAL_SIZE
newvalstr = struct . pack ( " >L " , newval )
return data [ : 0x0c + 0x18 ] + newvalstr + data [ 0x0c + 0x18 + 4 : ]
else :
curval = struct . unpack ( " >Q " , data [ 0x0c + 0x2c : 0x0c + 0x2c + 8 ] ) [ 0 ]
newval = random . randrange ( 0 , max ( 1 , ( curval / hashutil . CRYPTO_VAL_SIZE ) / 2 ) ) * hashutil . CRYPTO_VAL_SIZE
newvalstr = struct . pack ( " >Q " , newval )
return data [ : 0x0c + 0x2c ] + newvalstr + data [ 0x0c + 0x2c + 8 : ]
2008-10-14 23:09:20 +00:00
def _corrupt_offset_of_share_hashes ( data ) :
""" Scramble the file data -- the field showing the offset of the share hash tree within
the share data will have one bit flipped or else will be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x1c , 4 )
else :
return corrupt_field ( data , 0x0c + 0x34 , 8 )
def _corrupt_offset_of_uri_extension ( data ) :
""" Scramble the file data -- the field showing the offset of the uri extension will
have one bit flipped or else will be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x20 , 4 )
else :
return corrupt_field ( data , 0x0c + 0x3c , 8 )
2009-01-03 18:44:27 +00:00
def _corrupt_offset_of_uri_extension_to_force_short_read ( data , debug = False ) :
""" Scramble the file data -- the field showing the offset of the uri extension will be set
to the size of the file minus 3. This means when the client tries to read the length field
from that location it will get a short read - - the result string will be only 3 bytes long ,
not the 4 or 8 bytes necessary to do a successful struct . unpack . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
# The "-0x0c" in here is to skip the server-side header in the share file, which the client doesn't see when seeking and reading.
if sharevernum == 1 :
if debug :
log . msg ( " testing: corrupting offset %d , size %d , changing %d to %d (len(data) == %d ) " % ( 0x2c , 4 , struct . unpack ( " >L " , data [ 0x2c : 0x2c + 4 ] ) [ 0 ] , len ( data ) - 0x0c - 3 , len ( data ) ) )
return data [ : 0x2c ] + struct . pack ( " >L " , len ( data ) - 0x0c - 3 ) + data [ 0x2c + 4 : ]
else :
if debug :
log . msg ( " testing: corrupting offset %d , size %d , changing %d to %d (len(data) == %d ) " % ( 0x48 , 8 , struct . unpack ( " >Q " , data [ 0x48 : 0x48 + 8 ] ) [ 0 ] , len ( data ) - 0x0c - 3 , len ( data ) ) )
return data [ : 0x48 ] + struct . pack ( " >Q " , len ( data ) - 0x0c - 3 ) + data [ 0x48 + 8 : ]
2008-10-14 23:09:20 +00:00
def _corrupt_share_data ( data ) :
""" Scramble the file data -- the field containing the share data itself will have one
bit flipped or else will be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
sharedatasize = struct . unpack ( " >L " , data [ 0x0c + 0x08 : 0x0c + 0x08 + 4 ] ) [ 0 ]
return corrupt_field ( data , 0x0c + 0x24 , sharedatasize )
else :
sharedatasize = struct . unpack ( " >Q " , data [ 0x0c + 0x08 : 0x0c + 0x0c + 8 ] ) [ 0 ]
return corrupt_field ( data , 0x0c + 0x44 , sharedatasize )
def _corrupt_crypttext_hash_tree ( data ) :
""" Scramble the file data -- the field containing the crypttext hash tree will have one
bit flipped or else will be changed to a random value .
"""
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
crypttexthashtreeoffset = struct . unpack ( " >L " , data [ 0x0c + 0x14 : 0x0c + 0x14 + 4 ] ) [ 0 ]
blockhashesoffset = struct . unpack ( " >L " , data [ 0x0c + 0x18 : 0x0c + 0x18 + 4 ] ) [ 0 ]
else :
crypttexthashtreeoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x24 : 0x0c + 0x24 + 8 ] ) [ 0 ]
blockhashesoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x2c : 0x0c + 0x2c + 8 ] ) [ 0 ]
return corrupt_field ( data , crypttexthashtreeoffset , blockhashesoffset - crypttexthashtreeoffset )
def _corrupt_block_hashes ( data ) :
""" Scramble the file data -- the field containing the block hash tree will have one bit
flipped or else will be changed to a random value .
"""
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
blockhashesoffset = struct . unpack ( " >L " , data [ 0x0c + 0x18 : 0x0c + 0x18 + 4 ] ) [ 0 ]
sharehashesoffset = struct . unpack ( " >L " , data [ 0x0c + 0x1c : 0x0c + 0x1c + 4 ] ) [ 0 ]
else :
blockhashesoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x2c : 0x0c + 0x2c + 8 ] ) [ 0 ]
sharehashesoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x34 : 0x0c + 0x34 + 8 ] ) [ 0 ]
return corrupt_field ( data , blockhashesoffset , sharehashesoffset - blockhashesoffset )
def _corrupt_share_hashes ( data ) :
""" Scramble the file data -- the field containing the share hash chain will have one
bit flipped or else will be changed to a random value .
"""
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
sharehashesoffset = struct . unpack ( " >L " , data [ 0x0c + 0x1c : 0x0c + 0x1c + 4 ] ) [ 0 ]
uriextoffset = struct . unpack ( " >L " , data [ 0x0c + 0x20 : 0x0c + 0x20 + 4 ] ) [ 0 ]
else :
sharehashesoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x34 : 0x0c + 0x34 + 8 ] ) [ 0 ]
uriextoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x3c : 0x0c + 0x3c + 8 ] ) [ 0 ]
return corrupt_field ( data , sharehashesoffset , uriextoffset - sharehashesoffset )
def _corrupt_length_of_uri_extension ( data ) :
""" Scramble the file data -- the field showing the length of the uri extension will
have one bit flipped or else will be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
uriextoffset = struct . unpack ( " >L " , data [ 0x0c + 0x20 : 0x0c + 0x20 + 4 ] ) [ 0 ]
return corrupt_field ( data , uriextoffset , 4 )
else :
uriextoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x3c : 0x0c + 0x3c + 8 ] ) [ 0 ]
return corrupt_field ( data , uriextoffset , 8 )
def _corrupt_uri_extension ( data ) :
""" Scramble the file data -- the field containing the uri extension will have one bit
flipped or else will be changed to a random value . """
sharevernum = struct . unpack ( " >l " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
uriextoffset = struct . unpack ( " >L " , data [ 0x0c + 0x20 : 0x0c + 0x20 + 4 ] ) [ 0 ]
uriextlen = struct . unpack ( " >L " , data [ 0x0c + uriextoffset : 0x0c + uriextoffset + 4 ] ) [ 0 ]
else :
uriextoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x3c : 0x0c + 0x3c + 8 ] ) [ 0 ]
uriextlen = struct . unpack ( " >Q " , data [ 0x0c + uriextoffset : 0x0c + uriextoffset + 8 ] ) [ 0 ]
return corrupt_field ( data , uriextoffset , uriextlen )
2008-09-25 17:16:53 +00:00
class Test ( ShareManglingMixin , unittest . TestCase ) :
def setUp ( self ) :
# Set self.basedir to a temp dir which has the name of the current test method in its
# name.
self . basedir = self . mktemp ( )
d = defer . maybeDeferred ( SystemTestMixin . setUp , self )
d . addCallback ( lambda x : self . set_up_nodes ( ) )
def _upload_a_file ( ignored ) :
2009-01-08 06:40:12 +00:00
client = self . clients [ 0 ]
2009-01-08 18:20:48 +00:00
# We need multiple segments to test crypttext hash trees that are non-trivial
2009-01-08 06:40:12 +00:00
# (i.e. they have more than just one hash in them).
client . DEFAULT_ENCODING_PARAMETERS [ ' max_segment_size ' ] = 12
d2 = client . upload ( upload . Data ( TEST_DATA , convergence = " " ) )
2008-09-26 17:47:19 +00:00
def _after_upload ( u ) :
2008-09-26 22:23:53 +00:00
self . uri = IURI ( u . uri )
2008-09-26 17:47:19 +00:00
return self . clients [ 0 ] . create_node_from_uri ( self . uri )
d2 . addCallback ( _after_upload )
2008-09-25 17:16:53 +00:00
return d2
d . addCallback ( _upload_a_file )
def _stash_it ( filenode ) :
self . filenode = filenode
d . addCallback ( _stash_it )
return d
2008-09-26 17:47:19 +00:00
def _download_and_check_plaintext ( self , unused = None ) :
self . downloader = self . clients [ 1 ] . getServiceNamed ( " downloader " )
d = self . downloader . download_to_data ( self . uri )
def _after_download ( result ) :
self . failUnlessEqual ( result , TEST_DATA )
d . addCallback ( _after_download )
return d
def _delete_a_share ( self , unused = None , sharenum = None ) :
2008-09-25 18:24:36 +00:00
""" Delete one share. """
2008-09-25 17:16:53 +00:00
shares = self . find_shares ( )
ks = shares . keys ( )
2008-09-26 17:47:19 +00:00
if sharenum is not None :
2008-10-14 23:09:20 +00:00
k = [ key for key in shares . keys ( ) if key [ 1 ] == sharenum ] [ 0 ]
2008-09-26 17:47:19 +00:00
else :
k = random . choice ( ks )
2008-09-25 17:16:53 +00:00
del shares [ k ]
2008-09-26 22:23:53 +00:00
self . replace_shares ( shares , storage_index = self . uri . storage_index )
2008-09-25 17:16:53 +00:00
return unused
def test_test_code ( self ) :
# The following process of stashing the shares, running
# replace_shares, and asserting that the new set of shares equals the
# old is more to test this test code than to test the Tahoe code...
d = defer . succeed ( None )
d . addCallback ( self . find_shares )
stash = [ None ]
def _stash_it ( res ) :
stash [ 0 ] = res
return res
d . addCallback ( _stash_it )
2008-09-26 22:23:53 +00:00
d . addCallback ( self . replace_shares , storage_index = self . uri . storage_index )
2008-09-25 17:16:53 +00:00
def _compare ( res ) :
oldshares = stash [ 0 ]
self . failUnless ( isinstance ( oldshares , dict ) , oldshares )
self . failUnlessEqual ( oldshares , res )
d . addCallback ( self . find_shares )
d . addCallback ( _compare )
2008-09-26 22:23:53 +00:00
d . addCallback ( lambda ignore : self . replace_shares ( { } , storage_index = self . uri . storage_index ) )
2008-09-25 17:16:53 +00:00
d . addCallback ( self . find_shares )
d . addCallback ( lambda x : self . failUnlessEqual ( x , { } ) )
2008-09-26 22:23:53 +00:00
# The following process of deleting 8 of the shares and asserting that you can't
# download it is more to test this test code than to test the Tahoe code...
def _then_delete_8 ( unused = None ) :
self . replace_shares ( stash [ 0 ] , storage_index = self . uri . storage_index )
2009-01-02 23:49:41 +00:00
for i in range ( 8 ) :
2008-09-26 22:23:53 +00:00
self . _delete_a_share ( )
d . addCallback ( _then_delete_8 )
def _then_download ( unused = None ) :
self . downloader = self . clients [ 1 ] . getServiceNamed ( " downloader " )
d = self . downloader . download_to_data ( self . uri )
def _after_download_callb ( result ) :
self . fail ( ) # should have gotten an errback instead
return result
def _after_download_errb ( failure ) :
2008-10-27 20:34:49 +00:00
failure . trap ( NotEnoughSharesError )
2008-09-26 22:23:53 +00:00
return None # success!
d . addCallbacks ( _after_download_callb , _after_download_errb )
d . addCallback ( _then_download )
# The following process of leaving 8 of the shares deleted and asserting that you can't
# repair it is more to test this test code than to test the Tahoe code...
2009-01-06 01:28:18 +00:00
#TODO def _then_repair(unused=None):
#TODO d2 = self.filenode.check_and_repair(Monitor(), verify=False)
#TODO def _after_repair(checkandrepairresults):
#TODO prerepairres = checkandrepairresults.get_pre_repair_results()
#TODO postrepairres = checkandrepairresults.get_post_repair_results()
#TODO self.failIf(prerepairres.is_healthy())
#TODO self.failIf(postrepairres.is_healthy())
#TODO d2.addCallback(_after_repair)
#TODO return d2
#TODO d.addCallback(_then_repair)
2008-09-25 17:16:53 +00:00
return d
def _count_reads ( self ) :
sum_of_read_counts = 0
for client in self . clients :
counters = client . stats_provider . get_stats ( ) [ ' counters ' ]
sum_of_read_counts + = counters . get ( ' storage_server.read ' , 0 )
return sum_of_read_counts
def _count_allocates ( self ) :
sum_of_allocate_counts = 0
for client in self . clients :
counters = client . stats_provider . get_stats ( ) [ ' counters ' ]
sum_of_allocate_counts + = counters . get ( ' storage_server.allocate ' , 0 )
return sum_of_allocate_counts
2009-01-02 23:54:59 +00:00
def _corrupt_a_share ( self , unused , corruptor_func , sharenum ) :
shares = self . find_shares ( )
ks = [ key for key in shares . keys ( ) if key [ 1 ] == sharenum ]
assert ks , ( shares . keys ( ) , sharenum )
k = ks [ 0 ]
shares [ k ] = corruptor_func ( shares [ k ] )
self . replace_shares ( shares , storage_index = self . uri . storage_index )
2009-01-06 01:28:18 +00:00
return corruptor_func
2009-01-02 23:54:59 +00:00
2009-01-03 18:44:27 +00:00
def _corrupt_all_shares ( self , unused , corruptor_func ) :
""" All shares on disk will be corrupted by corruptor_func. """
shares = self . find_shares ( )
for k in shares . keys ( ) :
self . _corrupt_a_share ( unused , corruptor_func , k [ 1 ] )
2009-01-06 01:28:18 +00:00
return corruptor_func
2009-01-03 18:44:27 +00:00
2008-10-14 23:09:20 +00:00
def _corrupt_a_random_share ( self , unused , corruptor_func ) :
""" Exactly one share on disk will be corrupted by corruptor_func. """
shares = self . find_shares ( )
ks = shares . keys ( )
k = random . choice ( ks )
2009-01-06 01:28:18 +00:00
self . _corrupt_a_share ( unused , corruptor_func , k [ 1 ] )
return corruptor_func
2008-10-14 23:09:20 +00:00
2009-01-02 23:54:59 +00:00
def test_download ( self ) :
""" Basic download. (This functionality is more or less already tested by test code in
other modules , but this module is also going to test some more specific things about
immutable download . )
"""
d = defer . succeed ( None )
before_download_reads = self . _count_reads ( )
def _after_download ( unused = None ) :
after_download_reads = self . _count_reads ( )
2009-01-08 06:40:12 +00:00
self . failIf ( after_download_reads - before_download_reads > 27 , ( after_download_reads , before_download_reads ) )
2009-01-02 23:54:59 +00:00
d . addCallback ( self . _download_and_check_plaintext )
d . addCallback ( _after_download )
return d
2008-10-14 23:09:20 +00:00
2009-01-02 23:54:59 +00:00
def test_download_from_only_3_remaining_shares ( self ) :
""" Test download after 7 random shares (of the 10) have been removed. """
d = defer . succeed ( None )
def _then_delete_7 ( unused = None ) :
for i in range ( 7 ) :
self . _delete_a_share ( )
before_download_reads = self . _count_reads ( )
d . addCallback ( _then_delete_7 )
def _after_download ( unused = None ) :
after_download_reads = self . _count_reads ( )
2009-01-08 06:40:12 +00:00
self . failIf ( after_download_reads - before_download_reads > 27 , ( after_download_reads , before_download_reads ) )
2009-01-02 23:54:59 +00:00
d . addCallback ( self . _download_and_check_plaintext )
d . addCallback ( _after_download )
return d
2009-01-08 06:40:12 +00:00
def test_download_from_only_3_shares_with_good_crypttext_hash ( self ) :
""" Test download after 7 random shares (of the 10) have had their crypttext hash tree corrupted. """
d = defer . succeed ( None )
def _then_corrupt_7 ( unused = None ) :
shnums = range ( 10 )
random . shuffle ( shnums )
for i in shnums [ : 7 ] :
self . _corrupt_a_share ( None , _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes , i )
before_download_reads = self . _count_reads ( )
d . addCallback ( _then_corrupt_7 )
d . addCallback ( self . _download_and_check_plaintext )
return d
2009-01-02 23:54:59 +00:00
def test_download_abort_if_too_many_missing_shares ( self ) :
""" Test that download gives up quickly when it realizes there aren ' t enough shares out
there . """
d = defer . succeed ( None )
def _then_delete_8 ( unused = None ) :
for i in range ( 8 ) :
self . _delete_a_share ( )
d . addCallback ( _then_delete_8 )
before_download_reads = self . _count_reads ( )
def _attempt_to_download ( unused = None ) :
downloader = self . clients [ 1 ] . getServiceNamed ( " downloader " )
d = downloader . download_to_data ( self . uri )
def _callb ( res ) :
self . fail ( " Should have gotten an error from attempt to download, not %r " % ( res , ) )
def _errb ( f ) :
self . failUnless ( f . check ( NotEnoughSharesError ) )
d . addCallbacks ( _callb , _errb )
return d
d . addCallback ( _attempt_to_download )
def _after_attempt ( unused = None ) :
after_download_reads = self . _count_reads ( )
# To pass this test, you are required to give up before actually trying to read any
# share data.
self . failIf ( after_download_reads - before_download_reads > 0 , ( after_download_reads , before_download_reads ) )
d . addCallback ( _after_attempt )
return d
def test_download_abort_if_too_many_corrupted_shares ( self ) :
""" Test that download gives up quickly when it realizes there aren ' t enough uncorrupted
shares out there . It should be able to tell because the corruption occurs in the
sharedata version number , which it checks first . """
d = defer . succeed ( None )
def _then_corrupt_8 ( unused = None ) :
shnums = range ( 10 )
random . shuffle ( shnums )
for shnum in shnums [ : 8 ] :
self . _corrupt_a_share ( None , _corrupt_sharedata_version_number , shnum )
d . addCallback ( _then_corrupt_8 )
before_download_reads = self . _count_reads ( )
def _attempt_to_download ( unused = None ) :
downloader = self . clients [ 1 ] . getServiceNamed ( " downloader " )
d = downloader . download_to_data ( self . uri )
def _callb ( res ) :
self . fail ( " Should have gotten an error from attempt to download, not %r " % ( res , ) )
def _errb ( f ) :
self . failUnless ( f . check ( NotEnoughSharesError ) )
d . addCallbacks ( _callb , _errb )
return d
d . addCallback ( _attempt_to_download )
def _after_attempt ( unused = None ) :
after_download_reads = self . _count_reads ( )
# To pass this test, you are required to give up before reading all of the share
2009-01-03 18:41:09 +00:00
# data. Actually, we could give up sooner than 45 reads, but currently our download
# code does 45 reads. This test then serves as a "performance regression detector"
2009-01-02 23:54:59 +00:00
# -- if you change download code so that it takes *more* reads, then this test will
# fail.
2009-01-03 18:41:09 +00:00
self . failIf ( after_download_reads - before_download_reads > 45 , ( after_download_reads , before_download_reads ) )
2009-01-02 23:54:59 +00:00
d . addCallback ( _after_attempt )
return d
2008-10-14 23:09:20 +00:00
2008-09-25 17:16:53 +00:00
def test_check_without_verify ( self ) :
2008-10-14 23:09:20 +00:00
""" Check says the file is healthy when none of the shares have been touched. It says
that the file is unhealthy when all of them have been removed . It doesn ' t use any reads.
"""
2008-09-25 17:16:53 +00:00
d = defer . succeed ( self . filenode )
def _check1 ( filenode ) :
before_check_reads = self . _count_reads ( )
2008-10-22 08:38:18 +00:00
d2 = filenode . check ( Monitor ( ) , verify = False )
2008-09-25 17:16:53 +00:00
def _after_check ( checkresults ) :
after_check_reads = self . _count_reads ( )
self . failIf ( after_check_reads - before_check_reads > 0 , after_check_reads - before_check_reads )
self . failUnless ( checkresults . is_healthy ( ) )
d2 . addCallback ( _after_check )
return d2
d . addCallback ( _check1 )
2008-09-26 22:23:53 +00:00
d . addCallback ( lambda ignore : self . replace_shares ( { } , storage_index = self . uri . storage_index ) )
2008-10-14 23:09:20 +00:00
def _check2 ( ignored ) :
2008-09-25 17:16:53 +00:00
before_check_reads = self . _count_reads ( )
2008-10-22 08:38:18 +00:00
d2 = self . filenode . check ( Monitor ( ) , verify = False )
2008-09-25 17:16:53 +00:00
def _after_check ( checkresults ) :
after_check_reads = self . _count_reads ( )
self . failIf ( after_check_reads - before_check_reads > 0 , after_check_reads - before_check_reads )
self . failIf ( checkresults . is_healthy ( ) )
d2 . addCallback ( _after_check )
return d2
2008-10-14 23:09:20 +00:00
d . addCallback ( _check2 )
2008-09-25 17:16:53 +00:00
return d
2009-01-06 01:28:18 +00:00
def _help_test_verify ( self , corruptor_funcs , judgement_func ) :
2009-01-08 06:40:12 +00:00
LEEWAY = 18 # We'll allow you to pass this test even if you trigger eighteen times as many disk reads and blocks sends as would be optimal.
2008-12-31 21:18:38 +00:00
DELTA_READS = 10 * LEEWAY # N = 10
2009-01-06 01:28:18 +00:00
d = defer . succeed ( None )
2008-09-25 17:16:53 +00:00
2008-10-14 23:09:20 +00:00
d . addCallback ( self . find_shares )
stash = [ None ]
def _stash_it ( res ) :
stash [ 0 ] = res
return res
d . addCallback ( _stash_it )
def _put_it_all_back ( ignored ) :
self . replace_shares ( stash [ 0 ] , storage_index = self . uri . storage_index )
return ignored
2009-01-06 01:28:18 +00:00
def _verify_after_corruption ( corruptor_func ) :
2008-12-31 21:18:38 +00:00
before_check_reads = self . _count_reads ( )
d2 = self . filenode . check ( Monitor ( ) , verify = True )
def _after_check ( checkresults ) :
after_check_reads = self . _count_reads ( )
2009-01-08 06:40:12 +00:00
self . failIf ( after_check_reads - before_check_reads > DELTA_READS , ( after_check_reads , before_check_reads ) )
2009-01-06 01:28:18 +00:00
try :
return judgement_func ( checkresults )
except Exception , le :
le . args = tuple ( le . args + ( " corruptor_func: " + corruptor_func . __name__ , ) )
raise
2008-12-31 21:18:38 +00:00
d2 . addCallback ( _after_check )
return d2
2009-01-06 01:28:18 +00:00
for corruptor_func in corruptor_funcs :
2008-12-31 21:18:38 +00:00
d . addCallback ( self . _corrupt_a_random_share , corruptor_func )
2009-01-06 01:28:18 +00:00
d . addCallback ( _verify_after_corruption )
2008-12-31 21:18:38 +00:00
d . addCallback ( _put_it_all_back )
2009-01-06 01:28:18 +00:00
return d
2008-12-31 21:18:38 +00:00
2009-01-06 01:28:18 +00:00
def test_verify_no_problem ( self ) :
""" Verify says the file is healthy when none of the shares have been touched in a way
that matters . It doesn ' t use more than seven times as many reads as it needs. " " "
def judge ( checkresults ) :
self . failUnless ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 10 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 10 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 0 , data )
return self . _help_test_verify ( [
_corrupt_nothing ,
_corrupt_size_of_file_data ,
_corrupt_size_of_sharedata ,
_corrupt_segment_size , ] , judge )
def test_verify_server_visible_corruption ( self ) :
""" Corruption which is detected by the server means that the server will send you back
a Failure in response to get_bucket instead of giving you the share data . Test that
verifier handles these answers correctly . It doesn ' t use more than seven times as many
reads as it needs . """
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
# The server might fail to serve up its other share as well as the corrupted
# one, so count-shares-good could be 8 or 9.
self . failUnless ( data [ ' count-shares-good ' ] in ( 8 , 9 ) , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) in ( 8 , 9 , ) , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
# The server may have served up the non-corrupted share, or it may not have, so
# the checker could have detected either 4 or 5 good servers.
self . failUnless ( data [ ' count-good-share-hosts ' ] in ( 4 , 5 ) , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) in ( 4 , 5 ) , data )
# If the server served up the other share, then the checker should consider it good, else it should
# not.
self . failUnless ( ( data [ ' count-shares-good ' ] == 9 ) == ( data [ ' count-good-share-hosts ' ] == 5 ) , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 0 , data )
return self . _help_test_verify ( [
_corrupt_file_version_number ,
] , judge )
def test_verify_share_incompatibility ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
return self . _help_test_verify ( [
2008-12-31 21:18:38 +00:00
_corrupt_sharedata_version_number ,
2009-01-06 01:28:18 +00:00
] , judge )
def test_verify_server_invisible_corruption ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
2008-10-14 23:09:20 +00:00
_corrupt_offset_of_sharedata ,
_corrupt_offset_of_uri_extension ,
2009-01-03 18:44:27 +00:00
_corrupt_offset_of_uri_extension_to_force_short_read ,
2008-10-14 23:09:20 +00:00
_corrupt_share_data ,
_corrupt_length_of_uri_extension ,
_corrupt_uri_extension ,
2009-01-06 01:28:18 +00:00
] , judge )
2009-01-08 06:40:12 +00:00
def test_verify_server_invisible_corruption_offset_of_block_hashtree_to_truncate_crypttext_hashtree_TODO ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
_corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes ,
] , judge )
test_verify_server_invisible_corruption_offset_of_block_hashtree_to_truncate_crypttext_hashtree_TODO . todo = " Verifier doesn ' t yet properly detect this kind of corruption. "
2009-01-06 01:28:18 +00:00
def test_verify_server_invisible_corruption_offset_of_block_hashtree_TODO ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
_corrupt_offset_of_block_hashes ,
] , judge )
test_verify_server_invisible_corruption_offset_of_block_hashtree_TODO . todo = " Verifier doesn ' t yet properly detect this kind of corruption. "
def test_verify_server_invisible_corruption_sharedata_plausible_version ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
_corrupt_sharedata_version_number_to_plausible_version ,
] , judge )
def test_verify_server_invisible_corruption_offset_of_share_hashtree_TODO ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
_corrupt_offset_of_share_hashes ,
] , judge )
test_verify_server_invisible_corruption_offset_of_share_hashtree_TODO . todo = " Verifier doesn ' t yet properly detect this kind of corruption. "
def test_verify_server_invisible_corruption_offset_of_ciphertext_hashtree_TODO ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
_corrupt_offset_of_ciphertext_hash_tree ,
] , judge )
test_verify_server_invisible_corruption_offset_of_ciphertext_hashtree_TODO . todo = " Verifier doesn ' t yet properly detect this kind of corruption. "
def test_verify_server_invisible_corruption_cryptext_hash_tree_TODO ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
_corrupt_crypttext_hash_tree ,
] , judge )
test_verify_server_invisible_corruption_cryptext_hash_tree_TODO . todo = " Verifier doesn ' t yet properly detect this kind of corruption. "
def test_verify_server_invisible_corruption_block_hash_tree_TODO ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
_corrupt_block_hashes ,
] , judge )
test_verify_server_invisible_corruption_block_hash_tree_TODO . todo = " Verifier doesn ' t yet properly detect this kind of corruption. "
2008-09-25 17:16:53 +00:00
2009-01-06 20:04:49 +00:00
def test_verify_server_invisible_corruption_share_hash_tree_TODO ( self ) :
def judge ( checkresults ) :
self . failIf ( checkresults . is_healthy ( ) , ( checkresults , checkresults . is_healthy ( ) , checkresults . get_data ( ) ) )
data = checkresults . get_data ( )
self . failUnless ( data [ ' count-shares-good ' ] == 9 , data )
self . failUnless ( data [ ' count-shares-needed ' ] == 3 , data )
self . failUnless ( data [ ' count-shares-expected ' ] == 10 , data )
self . failUnless ( data [ ' count-good-share-hosts ' ] == 5 , data )
self . failUnless ( data [ ' count-corrupt-shares ' ] == 1 , ( data , ) )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == 1 , data )
self . failUnless ( len ( data [ ' list-corrupt-shares ' ] ) == data [ ' count-corrupt-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == data [ ' count-incompatible-shares ' ] , data )
self . failUnless ( len ( data [ ' list-incompatible-shares ' ] ) == 0 , data )
self . failUnless ( len ( data [ ' servers-responding ' ] ) == 5 , data )
self . failUnless ( len ( data [ ' sharemap ' ] ) == 9 , data )
return self . _help_test_verify ( [
_corrupt_share_hashes ,
] , judge )
test_verify_server_invisible_corruption_share_hash_tree_TODO . todo = " Verifier doesn ' t yet properly detect this kind of corruption. "
2008-09-25 17:16:53 +00:00
def test_repair ( self ) :
""" Repair replaces a share that got deleted. """
2008-12-31 21:18:38 +00:00
# N == 10. 7 is the "efficiency leeway" -- we'll allow you to pass this test even if
# you trigger seven times as many disk reads and blocks sends as would be optimal.
DELTA_READS = 10 * 7
2008-09-25 17:16:53 +00:00
# We'll allow you to pass this test only if you repair the missing share using only a
# single allocate.
DELTA_ALLOCATES = 1
d = defer . succeed ( self . filenode )
2008-09-26 17:47:19 +00:00
d . addCallback ( self . _delete_a_share , sharenum = 2 )
2008-09-25 17:16:53 +00:00
2008-09-26 17:47:19 +00:00
def _repair_from_deletion_of_1 ( filenode ) :
2008-09-25 17:16:53 +00:00
before_repair_reads = self . _count_reads ( )
before_repair_allocates = self . _count_allocates ( )
2008-10-22 08:38:18 +00:00
d2 = filenode . check_and_repair ( Monitor ( ) , verify = False )
2008-09-25 17:16:53 +00:00
def _after_repair ( checkandrepairresults ) :
2009-01-06 20:37:03 +00:00
assert isinstance ( checkandrepairresults , check_results . CheckAndRepairResults ) , checkandrepairresults
2008-09-25 17:16:53 +00:00
prerepairres = checkandrepairresults . get_pre_repair_results ( )
2009-01-06 20:37:03 +00:00
assert isinstance ( prerepairres , check_results . CheckResults ) , prerepairres
2008-09-25 17:16:53 +00:00
postrepairres = checkandrepairresults . get_post_repair_results ( )
2009-01-06 20:37:03 +00:00
assert isinstance ( postrepairres , check_results . CheckResults ) , postrepairres
2008-09-25 17:16:53 +00:00
after_repair_reads = self . _count_reads ( )
after_repair_allocates = self . _count_allocates ( )
# print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
self . failIf ( after_repair_reads - before_repair_reads > DELTA_READS )
self . failIf ( after_repair_allocates - before_repair_allocates > DELTA_ALLOCATES )
self . failIf ( prerepairres . is_healthy ( ) )
self . failUnless ( postrepairres . is_healthy ( ) )
2008-09-26 17:47:19 +00:00
# Now we inspect the filesystem to make sure that it has 10 shares.
shares = self . find_shares ( )
self . failIf ( len ( shares ) < 10 )
# Now we delete seven of the other shares, then try to download the file and
# assert that it succeeds at downloading and has the right contents. This can't
# work unless it has already repaired the previously-deleted share #2.
for sharenum in range ( 3 , 10 ) :
2008-09-26 22:23:53 +00:00
self . _delete_a_share ( sharenum = sharenum )
2008-09-26 17:47:19 +00:00
return self . _download_and_check_plaintext ( )
d2 . addCallback ( _after_repair )
return d2
d . addCallback ( _repair_from_deletion_of_1 )
# Now we repair again to get all of those 7 back...
def _repair_from_deletion_of_7 ( filenode ) :
before_repair_reads = self . _count_reads ( )
before_repair_allocates = self . _count_allocates ( )
2008-10-22 08:38:18 +00:00
d2 = filenode . check_and_repair ( Monitor ( ) , verify = False )
2008-09-26 17:47:19 +00:00
def _after_repair ( checkandrepairresults ) :
prerepairres = checkandrepairresults . get_pre_repair_results ( )
postrepairres = checkandrepairresults . get_post_repair_results ( )
after_repair_reads = self . _count_reads ( )
after_repair_allocates = self . _count_allocates ( )
# print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
self . failIf ( after_repair_reads - before_repair_reads > DELTA_READS )
self . failIf ( after_repair_allocates - before_repair_allocates > ( DELTA_ALLOCATES * 7 ) )
self . failIf ( prerepairres . is_healthy ( ) )
self . failUnless ( postrepairres . is_healthy ( ) )
# Now we inspect the filesystem to make sure that it has 10 shares.
2008-09-25 17:16:53 +00:00
shares = self . find_shares ( )
self . failIf ( len ( shares ) < 10 )
2008-09-26 17:47:19 +00:00
return self . _download_and_check_plaintext ( )
2008-09-25 17:16:53 +00:00
d2 . addCallback ( _after_repair )
return d2
2008-09-26 17:47:19 +00:00
d . addCallback ( _repair_from_deletion_of_7 )
2008-09-25 22:07:12 +00:00
def _repair_from_corruption ( filenode ) :
before_repair_reads = self . _count_reads ( )
before_repair_allocates = self . _count_allocates ( )
2008-10-22 08:38:18 +00:00
d2 = filenode . check_and_repair ( Monitor ( ) , verify = False )
2008-09-25 22:07:12 +00:00
def _after_repair ( checkandrepairresults ) :
prerepairres = checkandrepairresults . get_pre_repair_results ( )
postrepairres = checkandrepairresults . get_post_repair_results ( )
after_repair_reads = self . _count_reads ( )
after_repair_allocates = self . _count_allocates ( )
# print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
self . failIf ( after_repair_reads - before_repair_reads > DELTA_READS )
self . failIf ( after_repair_allocates - before_repair_allocates > DELTA_ALLOCATES )
self . failIf ( prerepairres . is_healthy ( ) )
self . failUnless ( postrepairres . is_healthy ( ) )
2008-09-26 17:47:19 +00:00
return self . _download_and_check_plaintext ( )
2008-09-25 22:07:12 +00:00
d2 . addCallback ( _after_repair )
return d2
2008-10-14 23:09:20 +00:00
for corruptor_func in (
_corrupt_file_version_number ,
_corrupt_sharedata_version_number ,
2009-01-06 01:28:18 +00:00
_corrupt_sharedata_version_number_to_plausible_version ,
2008-10-14 23:09:20 +00:00
_corrupt_offset_of_sharedata ,
_corrupt_offset_of_ciphertext_hash_tree ,
_corrupt_offset_of_block_hashes ,
_corrupt_offset_of_share_hashes ,
_corrupt_offset_of_uri_extension ,
_corrupt_share_data ,
_corrupt_crypttext_hash_tree ,
_corrupt_block_hashes ,
_corrupt_share_hashes ,
_corrupt_length_of_uri_extension ,
_corrupt_uri_extension ,
) :
# Now we corrupt a share...
d . addCallback ( self . _corrupt_a_random_share , corruptor_func )
# And repair...
d . addCallback ( _repair_from_corruption )
2008-09-25 22:07:12 +00:00
2008-09-25 17:16:53 +00:00
return d
2008-09-25 22:07:12 +00:00
test_repair . todo = " We haven ' t implemented a repairer yet. "
2008-12-31 21:18:38 +00:00
# XXX extend these tests to show that the checker detects which specific share on which specific server is broken -- this is necessary so that the checker results can be passed to the repairer and the repairer can go ahead and upload fixes without first doing what is effectively a check (/verify) run
# XXX extend these tests to show bad behavior of various kinds from servers: raising exception from each remove_foo() method, for example
# XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit
2009-01-08 06:40:12 +00:00
# XXX test corruption that truncates other hash trees than just the crypttext hash tree