2020-09-22 12:36:39 +00:00
"""
This module has been ported to Python 3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future . utils import PY2
if PY2 :
from future . builtins import filter , map , zip , ascii , chr , hex , input , next , oct , open , pow , round , super , bytes , dict , list , object , range , str , max , min # noqa: F401
2015-07-17 21:03:53 +00:00
2009-01-12 22:41:20 +00:00
import random
2008-09-25 17:16:53 +00:00
2011-08-01 17:44:44 +00:00
from twisted . trial import unittest
from twisted . internet import defer
2010-09-09 04:16:54 +00:00
from foolscap . api import eventually
2011-08-01 17:44:44 +00:00
from allmydata . test import common
from allmydata . test . no_network import GridTestMixin
from allmydata . test . common import TEST_DATA
from allmydata import uri
2010-09-09 04:16:54 +00:00
from allmydata . util import log
2011-08-01 17:44:44 +00:00
from allmydata . util . consumer import download_to_data
2010-09-09 04:16:54 +00:00
2011-08-01 17:44:44 +00:00
from allmydata . interfaces import NotEnoughSharesError
from allmydata . immutable . upload import Data
2010-09-09 04:16:54 +00:00
from allmydata . immutable . downloader import finder
2020-09-28 20:49:30 +00:00
from allmydata . immutable . literal import LiteralFileNode
2010-09-09 04:16:54 +00:00
2019-05-31 17:41:07 +00:00
from . no_network import (
NoNetworkServer ,
)
2015-07-17 21:03:53 +00:00
class MockShareHashTree ( object ) :
def needed_hashes ( self ) :
return False
2010-09-09 04:16:54 +00:00
class MockNode ( object ) :
def __init__ ( self , check_reneging , check_fetch_failed ) :
self . got = 0
self . finished_d = defer . Deferred ( )
self . segment_size = 78
self . guessed_segment_size = 78
self . _no_more_shares = False
self . check_reneging = check_reneging
self . check_fetch_failed = check_fetch_failed
self . _si_prefix = ' aa '
self . have_UEB = True
2015-07-17 21:03:53 +00:00
self . share_hash_tree = MockShareHashTree ( )
2010-09-09 04:16:54 +00:00
self . on_want_more_shares = None
def when_finished ( self ) :
return self . finished_d
def get_num_segments ( self ) :
return ( 5 , True )
def _calculate_sizes ( self , guessed_segment_size ) :
return { ' block_size ' : 4 , ' num_segments ' : 5 }
def no_more_shares ( self ) :
self . _no_more_shares = True
def got_shares ( self , shares ) :
if self . check_reneging :
if self . _no_more_shares :
self . finished_d . errback ( unittest . FailTest ( " The node was told by the share finder that it is destined to remain hungry, then was given another share. " ) )
return
self . got + = len ( shares )
log . msg ( " yyy 3 %s .got_shares( %s ) got: %s " % ( self , shares , self . got ) )
if self . got == 3 :
self . finished_d . callback ( True )
def get_desired_ciphertext_hashes ( self , * args , * * kwargs ) :
return iter ( [ ] )
def fetch_failed ( self , * args , * * kwargs ) :
if self . check_fetch_failed :
if self . finished_d :
self . finished_d . errback ( unittest . FailTest ( " The node was told by the segment fetcher that the download failed. " ) )
self . finished_d = None
def want_more_shares ( self ) :
if self . on_want_more_shares :
self . on_want_more_shares ( )
def process_blocks ( self , * args , * * kwargs ) :
if self . finished_d :
self . finished_d . callback ( None )
class TestShareFinder ( unittest . TestCase ) :
def test_no_reneging_on_no_more_shares_ever ( self ) :
# ticket #1191
# Suppose that K=3 and you send two DYHB requests, the first
# response offers two shares, and then the last offers one
# share. If you tell your share consumer "no more shares,
# ever", and then immediately tell them "oh, and here's
# another share", then you lose.
2020-09-22 12:36:39 +00:00
rcap = uri . CHKFileURI ( b ' a ' * 32 , b ' a ' * 32 , 3 , 99 , 100 )
2010-09-09 04:16:54 +00:00
vcap = rcap . get_verify_cap ( )
2015-07-17 21:03:53 +00:00
class MockBuckets ( object ) :
pass
2010-09-09 04:16:54 +00:00
class MockServer ( object ) :
def __init__ ( self , buckets ) :
self . version = {
2020-09-22 12:36:39 +00:00
b ' http://allmydata.org/tahoe/protocols/storage/v1 ' : {
b " tolerates-immutable-read-overrun " : True
2010-09-09 04:16:54 +00:00
}
}
self . buckets = buckets
self . d = defer . Deferred ( )
self . s = None
def callRemote ( self , methname , * args , * * kwargs ) :
d = defer . Deferred ( )
# Even after the 3rd answer we're still hungry because
# we're interested in finding a share on a 3rd server
# so we don't have to download more than one share
# from the first server. This is actually necessary to
# trigger the bug.
def _give_buckets_and_hunger_again ( ) :
d . callback ( self . buckets )
self . s . hungry ( )
eventually ( _give_buckets_and_hunger_again )
return d
2015-07-17 21:03:53 +00:00
class MockStorageBroker ( object ) :
def __init__ ( self , servers ) :
self . servers = servers
def get_servers_for_psi ( self , si ) :
return self . servers
class MockDownloadStatus ( object ) :
def add_dyhb_request ( self , server , when ) :
return MockDYHBEvent ( )
class MockDYHBEvent ( object ) :
def finished ( self , shnums , when ) :
pass
mockserver1 = MockServer ( { 1 : MockBuckets ( ) , 2 : MockBuckets ( ) } )
2010-09-09 04:16:54 +00:00
mockserver2 = MockServer ( { } )
2015-07-17 21:03:53 +00:00
mockserver3 = MockServer ( { 3 : MockBuckets ( ) } )
2020-09-22 12:36:39 +00:00
servers = [ NoNetworkServer ( b " ms1 " , mockserver1 ) ,
NoNetworkServer ( b " ms2 " , mockserver2 ) ,
NoNetworkServer ( b " ms3 " , mockserver3 ) , ]
2015-07-17 21:03:53 +00:00
mockstoragebroker = MockStorageBroker ( servers )
mockdownloadstatus = MockDownloadStatus ( )
2010-09-09 04:16:54 +00:00
mocknode = MockNode ( check_reneging = True , check_fetch_failed = True )
s = finder . ShareFinder ( mockstoragebroker , vcap , mocknode , mockdownloadstatus )
mockserver1 . s = s
mockserver2 . s = s
mockserver3 . s = s
s . hungry ( )
return mocknode . when_finished ( )
2011-08-01 17:44:44 +00:00
class Test ( GridTestMixin , unittest . TestCase , common . ShouldFailMixin ) :
def startup ( self , basedir ) :
self . basedir = basedir
self . set_up_grid ( num_clients = 2 , num_servers = 5 )
c1 = self . g . clients [ 1 ]
# We need multiple segments to test crypttext hash trees that are
# non-trivial (i.e. they have more than just one hash in them).
2014-04-21 21:40:28 +00:00
c1 . encoding_params [ ' max_segment_size ' ] = 12
2011-08-01 17:44:44 +00:00
# Tests that need to test servers of happiness using this should
# set their own value for happy -- the default (7) breaks stuff.
2014-04-21 21:40:28 +00:00
c1 . encoding_params [ ' happy ' ] = 1
2020-09-22 12:36:39 +00:00
d = c1 . upload ( Data ( TEST_DATA , convergence = b " " ) )
2011-08-01 17:44:44 +00:00
def _after_upload ( ur ) :
2012-05-22 04:14:44 +00:00
self . uri = ur . get_uri ( )
self . filenode = self . g . clients [ 0 ] . create_node_from_uri ( ur . get_uri ( ) )
2011-08-01 17:44:44 +00:00
return self . uri
d . addCallback ( _after_upload )
return d
def _stash_shares ( self , shares ) :
self . shares = shares
def _download_and_check_plaintext ( self , ign = None ) :
num_reads = self . _count_reads ( )
d = download_to_data ( self . filenode )
def _after_download ( result ) :
self . failUnlessEqual ( result , TEST_DATA )
return self . _count_reads ( ) - num_reads
d . addCallback ( _after_download )
return d
def _shuffled ( self , num_shnums ) :
2020-09-22 12:36:39 +00:00
shnums = list ( range ( 10 ) )
2011-08-01 17:44:44 +00:00
random . shuffle ( shnums )
return shnums [ : num_shnums ]
def _count_reads ( self ) :
return sum ( [ s . stats_provider . get_stats ( ) [ ' counters ' ] . get ( ' storage_server.read ' , 0 )
for s in self . g . servers_by_number . values ( ) ] )
def _count_allocates ( self ) :
return sum ( [ s . stats_provider . get_stats ( ) [ ' counters ' ] . get ( ' storage_server.allocate ' , 0 )
for s in self . g . servers_by_number . values ( ) ] )
def _count_writes ( self ) :
return sum ( [ s . stats_provider . get_stats ( ) [ ' counters ' ] . get ( ' storage_server.write ' , 0 )
for s in self . g . servers_by_number . values ( ) ] )
2008-09-25 17:16:53 +00:00
def test_test_code ( self ) :
# The following process of stashing the shares, running
# replace_shares, and asserting that the new set of shares equals the
# old is more to test this test code than to test the Tahoe code...
2011-08-01 17:44:44 +00:00
d = self . startup ( " immutable/Test/code " )
d . addCallback ( self . copy_shares )
d . addCallback ( self . _stash_shares )
d . addCallback ( self . _download_and_check_plaintext )
2008-09-25 17:16:53 +00:00
2010-08-04 07:27:10 +00:00
# The following process of deleting 8 of the shares and asserting
# that you can't download it is more to test this test code than to
# test the Tahoe code...
2011-08-01 17:44:44 +00:00
def _then_delete_8 ( ign ) :
self . restore_all_shares ( self . shares )
self . delete_shares_numbered ( self . uri , range ( 8 ) )
2008-09-26 22:23:53 +00:00
d . addCallback ( _then_delete_8 )
2011-08-01 17:44:44 +00:00
d . addCallback ( lambda ign :
self . shouldFail ( NotEnoughSharesError , " download-2 " ,
" ran out of shares " ,
download_to_data , self . filenode ) )
2008-09-25 17:16:53 +00:00
return d
2009-01-02 23:54:59 +00:00
def test_download ( self ) :
2010-08-04 07:27:10 +00:00
""" Basic download. (This functionality is more or less already
tested by test code in other modules , but this module is also going
to test some more specific things about immutable download . )
2009-01-02 23:54:59 +00:00
"""
2011-08-01 17:44:44 +00:00
d = self . startup ( " immutable/Test/download " )
2009-01-02 23:54:59 +00:00
d . addCallback ( self . _download_and_check_plaintext )
2011-08-01 17:44:44 +00:00
def _after_download ( ign ) :
num_reads = self . _count_reads ( )
2020-09-11 14:28:22 +00:00
#print(num_reads)
2011-08-01 17:44:44 +00:00
self . failIf ( num_reads > 41 , num_reads )
2009-01-02 23:54:59 +00:00
d . addCallback ( _after_download )
return d
2008-10-14 23:09:20 +00:00
2009-01-02 23:54:59 +00:00
def test_download_from_only_3_remaining_shares ( self ) :
2010-08-04 07:27:10 +00:00
""" Test download after 7 random shares (of the 10) have been
removed . """
2011-08-01 17:44:44 +00:00
d = self . startup ( " immutable/Test/download_from_only_3_remaining_shares " )
d . addCallback ( lambda ign :
self . delete_shares_numbered ( self . uri , range ( 7 ) ) )
2009-01-02 23:54:59 +00:00
d . addCallback ( self . _download_and_check_plaintext )
2011-08-01 17:44:44 +00:00
def _after_download ( num_reads ) :
2020-09-11 14:28:22 +00:00
#print(num_reads)
2011-08-01 17:44:44 +00:00
self . failIf ( num_reads > 41 , num_reads )
2009-01-02 23:54:59 +00:00
d . addCallback ( _after_download )
return d
2009-01-08 06:40:12 +00:00
def test_download_from_only_3_shares_with_good_crypttext_hash ( self ) :
2010-08-04 07:27:10 +00:00
""" Test download after 7 random shares (of the 10) have had their
crypttext hash tree corrupted . """
2011-08-01 17:44:44 +00:00
d = self . startup ( " download_from_only_3_shares_with_good_crypttext_hash " )
def _corrupt_7 ( ign ) :
c = common . _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes
self . corrupt_shares_numbered ( self . uri , self . _shuffled ( 7 ) , c )
d . addCallback ( _corrupt_7 )
2009-01-08 06:40:12 +00:00
d . addCallback ( self . _download_and_check_plaintext )
return d
2009-01-02 23:54:59 +00:00
def test_download_abort_if_too_many_missing_shares ( self ) :
2010-08-04 07:27:10 +00:00
""" Test that download gives up quickly when it realizes there aren ' t
enough shares out there . """
2011-08-01 17:44:44 +00:00
d = self . startup ( " download_abort_if_too_many_missing_shares " )
d . addCallback ( lambda ign :
self . delete_shares_numbered ( self . uri , range ( 8 ) ) )
d . addCallback ( lambda ign :
self . shouldFail ( NotEnoughSharesError , " delete 8 " ,
" Last failure: None " ,
download_to_data , self . filenode ) )
2010-08-04 07:27:10 +00:00
# the new downloader pipelines a bunch of read requests in parallel,
# so don't bother asserting anything about the number of reads
2009-01-02 23:54:59 +00:00
return d
def test_download_abort_if_too_many_corrupted_shares ( self ) :
2010-08-04 07:27:10 +00:00
""" Test that download gives up quickly when it realizes there aren ' t
enough uncorrupted shares out there . It should be able to tell
because the corruption occurs in the sharedata version number , which
it checks first . """
2011-08-01 17:44:44 +00:00
d = self . startup ( " download_abort_if_too_many_corrupted_shares " )
def _corrupt_8 ( ign ) :
c = common . _corrupt_sharedata_version_number
self . corrupt_shares_numbered ( self . uri , self . _shuffled ( 8 ) , c )
d . addCallback ( _corrupt_8 )
def _try_download ( ign ) :
start_reads = self . _count_reads ( )
d2 = self . shouldFail ( NotEnoughSharesError , " corrupt 8 " ,
" LayoutInvalid " ,
download_to_data , self . filenode )
def _check_numreads ( ign ) :
num_reads = self . _count_reads ( ) - start_reads
2020-09-11 14:28:22 +00:00
#print(num_reads)
2011-08-01 17:44:44 +00:00
# To pass this test, you are required to give up before
# reading all of the share data. Actually, we could give up
# sooner than 45 reads, but currently our download code does
# 45 reads. This test then serves as a "performance
# regression detector" -- if you change download code so that
# it takes *more* reads, then this test will fail.
self . failIf ( num_reads > 45 , num_reads )
d2 . addCallback ( _check_numreads )
2009-01-12 18:00:22 +00:00
return d2
2011-08-01 17:44:44 +00:00
d . addCallback ( _try_download )
2009-01-02 23:54:59 +00:00
return d
2008-10-14 23:09:20 +00:00
2011-08-02 02:09:05 +00:00
def test_download_to_data ( self ) :
2011-08-02 03:28:10 +00:00
d = self . startup ( " download_to_data " )
d . addCallback ( lambda ign : self . filenode . download_to_data ( ) )
2011-08-02 02:09:05 +00:00
d . addCallback ( lambda data :
self . failUnlessEqual ( data , common . TEST_DATA ) )
return d
def test_download_best_version ( self ) :
2011-08-02 03:28:10 +00:00
d = self . startup ( " download_best_version " )
d . addCallback ( lambda ign : self . filenode . download_best_version ( ) )
2011-08-02 02:09:05 +00:00
d . addCallback ( lambda data :
self . failUnlessEqual ( data , common . TEST_DATA ) )
return d
def test_get_best_readable_version ( self ) :
2011-08-02 03:28:10 +00:00
d = self . startup ( " get_best_readable_version " )
d . addCallback ( lambda ign : self . filenode . get_best_readable_version ( ) )
2011-08-02 02:09:05 +00:00
d . addCallback ( lambda n2 :
2011-08-02 03:28:10 +00:00
self . failUnlessEqual ( n2 , self . filenode ) )
2011-08-02 02:09:05 +00:00
return d
def test_get_size_of_best_version ( self ) :
2011-08-02 03:28:10 +00:00
d = self . startup ( " get_size_of_best_version " )
d . addCallback ( lambda ign : self . filenode . get_size_of_best_version ( ) )
2011-08-02 02:09:05 +00:00
d . addCallback ( lambda size :
self . failUnlessEqual ( size , len ( common . TEST_DATA ) ) )
return d
2008-12-31 21:18:38 +00:00
2020-09-28 20:49:30 +00:00
class LiteralFileNodeTests ( unittest . TestCase ) :
""" Tests for LiteralFileNode. """
def test_equality ( self ) :
""" LiteralFileNodes are equal iff they have the same URI. """
uri1 = uri . LiteralFileURI ( b " 1 " )
uri2 = uri . LiteralFileURI ( b " 2 " )
lfn1 = LiteralFileNode ( uri1 )
lfn1b = LiteralFileNode ( uri1 )
lfn2 = LiteralFileNode ( uri2 )
self . assertTrue ( lfn1 == lfn1b )
self . assertFalse ( lfn1 != lfn1b )
self . assertTrue ( lfn1 != lfn2 )
self . assertFalse ( lfn1 == lfn2 )
self . assertTrue ( lfn1 != 300 )
self . assertFalse ( lfn1 == 300 )
2010-08-04 07:27:10 +00:00
# XXX extend these tests to show bad behavior of various kinds from servers:
# raising exception from each remove_foo() method, for example
2008-12-31 21:18:38 +00:00
# XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit
2009-01-08 06:40:12 +00:00
2010-08-04 07:27:10 +00:00
# TODO: delete this whole file