2010-07-19 04:50:47 +00:00
# -*- coding: utf-8 -*-
2009-10-30 09:19:08 +00:00
import os , shutil
2008-10-29 04:28:31 +00:00
from cStringIO import StringIO
2006-12-01 09:54:28 +00:00
from twisted . trial import unittest
2007-03-30 21:54:33 +00:00
from twisted . python . failure import Failure
2008-06-02 23:57:01 +00:00
from twisted . internet import defer
2009-05-22 00:38:23 +00:00
from foolscap . api import fireEventually
2006-12-01 09:54:28 +00:00
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
import allmydata # for __full_version__
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
from allmydata import uri , monitor , client
2009-10-30 09:19:08 +00:00
from allmydata . immutable import upload , encode
2009-12-05 05:34:53 +00:00
from allmydata . interfaces import FileTooLargeError , UploadUnhappinessError
2010-07-19 08:20:00 +00:00
from allmydata . util import log
2007-07-13 22:09:01 +00:00
from allmydata . util . assertutil import precondition
2008-02-07 03:03:35 +00:00
from allmydata . util . deferredutil import DeferredListShouldSucceed
2010-06-03 05:46:08 +00:00
from allmydata . test . no_network import GridTestMixin
from allmydata . test . common_util import ShouldFailMixin
from allmydata . util . happinessutil import servers_of_happiness , \
shares_by_server , merge_peers
2009-06-01 21:06:04 +00:00
from allmydata . storage_client import StorageFarmBroker
2009-10-30 09:19:08 +00:00
from allmydata . storage . server import storage_index_to_dir
2006-12-01 09:54:28 +00:00
2008-01-16 10:03:35 +00:00
MiB = 1024 * 1024
2008-02-06 04:01:38 +00:00
def extract_uri ( results ) :
return results . uri
2009-06-10 16:10:43 +00:00
# Some of these took longer than 480 seconds on Zandr's arm box, but this may
# have been due to an earlier test ERROR'ing out due to timeout, which seems
# to screw up subsequent tests.
timeout = 960
2009-06-08 22:55:57 +00:00
2007-07-20 05:53:29 +00:00
class Uploadable ( unittest . TestCase ) :
def shouldEqual ( self , data , expected ) :
self . failUnless ( isinstance ( data , list ) )
for e in data :
self . failUnless ( isinstance ( e , str ) )
s = " " . join ( data )
self . failUnlessEqual ( s , expected )
2008-01-30 19:24:50 +00:00
def test_filehandle_random_key ( self ) :
2008-03-24 16:46:06 +00:00
return self . _test_filehandle ( convergence = None )
2008-01-30 19:24:50 +00:00
2008-03-24 16:46:06 +00:00
def test_filehandle_convergent_encryption ( self ) :
return self . _test_filehandle ( convergence = " some convergence string " )
2008-01-30 19:24:50 +00:00
2008-03-24 16:46:06 +00:00
def _test_filehandle ( self , convergence ) :
2007-07-20 05:53:29 +00:00
s = StringIO ( " a " * 41 )
2008-03-24 16:46:06 +00:00
u = upload . FileHandle ( s , convergence = convergence )
2007-07-20 05:53:29 +00:00
d = u . get_size ( )
d . addCallback ( self . failUnlessEqual , 41 )
d . addCallback ( lambda res : u . read ( 1 ) )
d . addCallback ( self . shouldEqual , " a " )
d . addCallback ( lambda res : u . read ( 80 ) )
d . addCallback ( self . shouldEqual , " a " * 40 )
d . addCallback ( lambda res : u . close ( ) ) # this doesn't close the filehandle
d . addCallback ( lambda res : s . close ( ) ) # that privilege is reserved for us
return d
def test_filename ( self ) :
basedir = " upload/Uploadable/test_filename "
os . makedirs ( basedir )
fn = os . path . join ( basedir , " file " )
f = open ( fn , " w " )
f . write ( " a " * 41 )
f . close ( )
2008-03-24 16:46:06 +00:00
u = upload . FileName ( fn , convergence = None )
2007-07-20 05:53:29 +00:00
d = u . get_size ( )
d . addCallback ( self . failUnlessEqual , 41 )
d . addCallback ( lambda res : u . read ( 1 ) )
d . addCallback ( self . shouldEqual , " a " )
d . addCallback ( lambda res : u . read ( 80 ) )
d . addCallback ( self . shouldEqual , " a " * 40 )
d . addCallback ( lambda res : u . close ( ) )
return d
def test_data ( self ) :
s = " a " * 41
2008-03-24 16:46:06 +00:00
u = upload . Data ( s , convergence = None )
2007-07-20 05:53:29 +00:00
d = u . get_size ( )
d . addCallback ( self . failUnlessEqual , 41 )
d . addCallback ( lambda res : u . read ( 1 ) )
d . addCallback ( self . shouldEqual , " a " )
d . addCallback ( lambda res : u . read ( 80 ) )
d . addCallback ( self . shouldEqual , " a " * 40 )
d . addCallback ( lambda res : u . close ( ) )
return d
2009-07-16 23:01:20 +00:00
class ServerError ( Exception ) :
pass
2009-10-30 09:19:08 +00:00
class SetDEPMixin :
def set_encoding_parameters ( self , k , happy , n , max_segsize = 1 * MiB ) :
p = { " k " : k ,
" happy " : happy ,
" n " : n ,
" max_segment_size " : max_segsize ,
}
self . node . DEFAULT_ENCODING_PARAMETERS = p
2007-07-13 22:09:01 +00:00
class FakeStorageServer :
def __init__ ( self , mode ) :
self . mode = mode
2007-09-16 08:25:03 +00:00
self . allocated = [ ]
2007-09-16 08:53:00 +00:00
self . queries = 0
2008-11-22 03:28:12 +00:00
self . version = { " http://allmydata.org/tahoe/protocols/storage/v1 " :
{ " maximum-immutable-share-size " : 2 * * 32 } ,
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
" application-version " : str ( allmydata . __full_version__ ) ,
2008-11-22 03:28:12 +00:00
}
2009-02-09 02:41:27 +00:00
if mode == " small " :
self . version = { " http://allmydata.org/tahoe/protocols/storage/v1 " :
{ " maximum-immutable-share-size " : 10 } ,
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
" application-version " : str ( allmydata . __full_version__ ) ,
2009-02-09 02:41:27 +00:00
}
2008-11-22 03:28:12 +00:00
2007-07-13 22:09:01 +00:00
def callRemote ( self , methname , * args , * * kwargs ) :
def _call ( ) :
meth = getattr ( self , methname )
return meth ( * args , * * kwargs )
2009-05-22 00:38:23 +00:00
d = fireEventually ( )
2007-07-13 22:09:01 +00:00
d . addCallback ( lambda res : _call ( ) )
return d
2007-08-28 00:28:51 +00:00
def allocate_buckets ( self , storage_index , renew_secret , cancel_secret ,
sharenums , share_size , canary ) :
2007-07-13 22:09:01 +00:00
#print "FakeStorageServer.allocate_buckets(num=%d, size=%d)" % (len(sharenums), share_size)
2009-07-16 23:01:20 +00:00
if self . mode == " first-fail " :
if self . queries == 0 :
raise ServerError
if self . mode == " second-fail " :
if self . queries == 1 :
raise ServerError
2007-09-16 08:53:00 +00:00
self . queries + = 1
2007-07-13 22:09:01 +00:00
if self . mode == " full " :
return ( set ( ) , { } , )
elif self . mode == " already got them " :
return ( set ( sharenums ) , { } , )
else :
2007-09-16 08:25:03 +00:00
for shnum in sharenums :
self . allocated . append ( ( storage_index , shnum ) )
2007-07-13 22:09:01 +00:00
return ( set ( ) ,
dict ( [ ( shnum , FakeBucketWriter ( share_size ) )
for shnum in sharenums ] ) ,
)
class FakeBucketWriter :
# a diagnostic version of storageserver.BucketWriter
def __init__ ( self , size ) :
self . data = StringIO ( )
self . closed = False
self . _size = size
def callRemote ( self , methname , * args , * * kwargs ) :
def _call ( ) :
meth = getattr ( self , " remote_ " + methname )
return meth ( * args , * * kwargs )
2009-05-22 00:38:23 +00:00
d = fireEventually ( )
2007-07-13 22:09:01 +00:00
d . addCallback ( lambda res : _call ( ) )
return d
2010-07-15 23:18:20 +00:00
def callRemoteOnly ( self , methname , * args , * * kwargs ) :
d = self . callRemote ( methname , * args , * * kwargs )
del d # callRemoteOnly ignores this
return None
2007-07-13 22:09:01 +00:00
def remote_write ( self , offset , data ) :
precondition ( not self . closed )
precondition ( offset > = 0 )
precondition ( offset + len ( data ) < = self . _size ,
" offset= %d + data= %d > size= %d " %
( offset , len ( data ) , self . _size ) )
self . data . seek ( offset )
self . data . write ( data )
def remote_close ( self ) :
precondition ( not self . closed )
self . closed = True
2007-03-30 21:54:33 +00:00
2008-01-24 01:07:34 +00:00
def remote_abort ( self ) :
2010-07-15 23:18:20 +00:00
pass
2008-01-24 01:07:34 +00:00
2006-12-01 09:54:28 +00:00
class FakeClient :
2008-01-16 10:03:35 +00:00
DEFAULT_ENCODING_PARAMETERS = { " k " : 25 ,
2010-01-07 19:13:25 +00:00
" happy " : 25 ,
2008-01-16 10:03:35 +00:00
" n " : 100 ,
" max_segment_size " : 1 * MiB ,
}
2007-09-17 00:08:34 +00:00
def __init__ ( self , mode = " good " , num_servers = 50 ) :
self . num_servers = num_servers
2009-07-16 23:01:20 +00:00
if type ( mode ) is str :
mode = dict ( [ i , mode ] for i in range ( num_servers ) )
peers = [ ( " %20d " % fakeid , FakeStorageServer ( mode [ fakeid ] ) )
for fakeid in range ( self . num_servers ) ]
2009-06-23 02:10:47 +00:00
self . storage_broker = StorageFarmBroker ( None , permute_peers = True )
2009-06-01 21:06:04 +00:00
for ( serverid , server ) in peers :
2009-06-23 02:10:47 +00:00
self . storage_broker . test_add_server ( serverid , server )
2009-06-01 21:06:04 +00:00
self . last_peers = [ p [ 1 ] for p in peers ]
2007-12-03 22:27:21 +00:00
def log ( self , * args , * * kwargs ) :
pass
2007-07-12 22:33:30 +00:00
def get_encoding_parameters ( self ) :
2008-01-16 10:03:35 +00:00
return self . DEFAULT_ENCODING_PARAMETERS
2009-06-02 02:25:11 +00:00
def get_storage_broker ( self ) :
return self . storage_broker
2009-11-18 01:54:44 +00:00
_secret_holder = client . SecretHolder ( " lease secret " , " convergence secret " )
2007-08-28 02:00:18 +00:00
2009-02-23 00:27:22 +00:00
class GotTooFarError ( Exception ) :
pass
2008-06-02 23:57:01 +00:00
class GiganticUploadable ( upload . FileHandle ) :
def __init__ ( self , size ) :
self . _size = size
self . _fp = 0
def get_encryption_key ( self ) :
return defer . succeed ( " \x00 " * 16 )
def get_size ( self ) :
return defer . succeed ( self . _size )
def read ( self , length ) :
left = self . _size - self . _fp
length = min ( left , length )
self . _fp + = length
if self . _fp > 1000000 :
# terminate the test early.
2009-02-23 00:27:22 +00:00
raise GotTooFarError ( " we shouldn ' t be allowed to get this far " )
2008-06-02 23:57:01 +00:00
return defer . succeed ( [ " \x00 " * length ] )
def close ( self ) :
pass
2007-07-12 20:22:36 +00:00
DATA = """
Once upon a time , there was a beautiful princess named Buttercup . She lived
in a magical land where every file was stored securely among millions of
machines , and nobody ever worried about their data being lost ever again .
The End .
"""
assert len ( DATA ) > upload . Uploader . URI_LIT_SIZE_THRESHOLD
SIZE_ZERO = 0
SIZE_SMALL = 16
SIZE_LARGE = len ( DATA )
2008-01-31 02:03:19 +00:00
def upload_data ( uploader , data ) :
2008-03-24 16:46:06 +00:00
u = upload . Data ( data , convergence = None )
2008-01-31 02:03:19 +00:00
return uploader . upload ( u )
def upload_filename ( uploader , filename ) :
2008-03-24 16:46:06 +00:00
u = upload . FileName ( filename , convergence = None )
2008-01-31 02:03:19 +00:00
return uploader . upload ( u )
def upload_filehandle ( uploader , fh ) :
2008-03-24 16:46:06 +00:00
u = upload . FileHandle ( fh , convergence = None )
2008-01-31 02:03:19 +00:00
return uploader . upload ( u )
2009-10-30 09:19:08 +00:00
class GoodServer ( unittest . TestCase , ShouldFailMixin , SetDEPMixin ) :
2007-01-16 04:22:22 +00:00
def setUp ( self ) :
2007-03-30 21:54:33 +00:00
self . node = FakeClient ( mode = " good " )
2007-03-30 17:52:19 +00:00
self . u = upload . Uploader ( )
self . u . running = True
self . u . parent = self . node
2007-01-16 04:22:22 +00:00
2007-07-21 22:40:36 +00:00
def _check_small ( self , newuri , size ) :
2009-11-11 22:45:42 +00:00
u = uri . from_string ( newuri )
2007-07-21 22:40:36 +00:00
self . failUnless ( isinstance ( u , uri . LiteralFileURI ) )
self . failUnlessEqual ( len ( u . data ) , size )
def _check_large ( self , newuri , size ) :
2009-11-11 22:45:42 +00:00
u = uri . from_string ( newuri )
2007-07-21 22:40:36 +00:00
self . failUnless ( isinstance ( u , uri . CHKFileURI ) )
2010-02-22 02:45:04 +00:00
self . failUnless ( isinstance ( u . get_storage_index ( ) , str ) )
self . failUnlessEqual ( len ( u . get_storage_index ( ) ) , 16 )
2007-07-21 22:40:36 +00:00
self . failUnless ( isinstance ( u . key , str ) )
self . failUnlessEqual ( len ( u . key ) , 16 )
self . failUnlessEqual ( u . size , size )
2007-07-12 20:22:36 +00:00
def get_data ( self , size ) :
return DATA [ : size ]
2008-06-02 23:57:01 +00:00
def test_too_large ( self ) :
2009-01-13 03:14:42 +00:00
# we've removed the 4GiB share size limit (see ticket #346 for
# details), but still have an 8-byte field, so the limit is now
# 2**64, so make sure we reject files larger than that.
2008-06-02 23:57:01 +00:00
k = 3 ; happy = 7 ; n = 10
self . set_encoding_parameters ( k , happy , n )
2009-01-13 03:14:42 +00:00
big = k * ( 2 * * 64 )
data1 = GiganticUploadable ( big )
2008-06-03 07:01:15 +00:00
d = self . shouldFail ( FileTooLargeError , " test_too_large-data1 " ,
2008-06-02 23:57:01 +00:00
" This file is too large to be uploaded (data_size) " ,
self . u . upload , data1 )
2009-01-13 03:14:42 +00:00
data2 = GiganticUploadable ( big - 3 )
2008-06-02 23:57:01 +00:00
d . addCallback ( lambda res :
2008-06-03 07:01:15 +00:00
self . shouldFail ( FileTooLargeError ,
2008-06-02 23:57:01 +00:00
" test_too_large-data2 " ,
" This file is too large to be uploaded (offsets) " ,
self . u . upload , data2 ) )
# I don't know where the actual limit is.. it depends upon how large
# the hash trees wind up. It's somewhere close to k*4GiB-ln2(size).
return d
2007-07-12 20:22:36 +00:00
def test_data_zero ( self ) :
data = self . get_data ( SIZE_ZERO )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_small , SIZE_ZERO )
return d
2007-03-30 23:50:50 +00:00
2007-07-12 20:22:36 +00:00
def test_data_small ( self ) :
data = self . get_data ( SIZE_SMALL )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_small , SIZE_SMALL )
2007-01-16 04:22:22 +00:00
return d
2007-07-12 20:22:36 +00:00
def test_data_large ( self ) :
data = self . get_data ( SIZE_LARGE )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
return d
2007-07-14 05:24:06 +00:00
def test_data_large_odd_segments ( self ) :
data = self . get_data ( SIZE_LARGE )
segsize = int ( SIZE_LARGE / 2.5 )
# we want 3 segments, since that's not a power of two
2010-01-07 19:13:25 +00:00
self . set_encoding_parameters ( 25 , 25 , 100 , segsize )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-14 05:24:06 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
return d
2007-07-12 20:22:36 +00:00
def test_filehandle_zero ( self ) :
data = self . get_data ( SIZE_ZERO )
2008-01-31 02:03:19 +00:00
d = upload_filehandle ( self . u , StringIO ( data ) )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_small , SIZE_ZERO )
return d
def test_filehandle_small ( self ) :
data = self . get_data ( SIZE_SMALL )
2008-01-31 02:03:19 +00:00
d = upload_filehandle ( self . u , StringIO ( data ) )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_small , SIZE_SMALL )
return d
def test_filehandle_large ( self ) :
data = self . get_data ( SIZE_LARGE )
2008-01-31 02:03:19 +00:00
d = upload_filehandle ( self . u , StringIO ( data ) )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
return d
def test_filename_zero ( self ) :
fn = " Uploader-test_filename_zero.data "
f = open ( fn , " wb " )
data = self . get_data ( SIZE_ZERO )
f . write ( data )
f . close ( )
2008-01-31 02:03:19 +00:00
d = upload_filename ( self . u , fn )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_small , SIZE_ZERO )
return d
def test_filename_small ( self ) :
fn = " Uploader-test_filename_small.data "
f = open ( fn , " wb " )
data = self . get_data ( SIZE_SMALL )
f . write ( data )
f . close ( )
2008-01-31 02:03:19 +00:00
d = upload_filename ( self . u , fn )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_small , SIZE_SMALL )
2007-01-16 04:22:22 +00:00
return d
2007-07-12 20:22:36 +00:00
def test_filename_large ( self ) :
fn = " Uploader-test_filename_large.data "
2007-04-04 23:12:30 +00:00
f = open ( fn , " wb " )
2007-07-12 20:22:36 +00:00
data = self . get_data ( SIZE_LARGE )
2007-01-16 04:22:22 +00:00
f . write ( data )
f . close ( )
2008-01-31 02:03:19 +00:00
d = upload_filename ( self . u , fn )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-07-12 20:22:36 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
2007-01-16 04:22:22 +00:00
return d
2007-03-30 21:54:33 +00:00
2009-10-30 09:19:08 +00:00
class ServerErrors ( unittest . TestCase , ShouldFailMixin , SetDEPMixin ) :
2009-07-16 23:01:20 +00:00
def make_node ( self , mode , num_servers = 10 ) :
self . node = FakeClient ( mode , num_servers )
self . u = upload . Uploader ( )
self . u . running = True
self . u . parent = self . node
def _check_large ( self , newuri , size ) :
2009-11-11 22:45:42 +00:00
u = uri . from_string ( newuri )
2009-07-16 23:01:20 +00:00
self . failUnless ( isinstance ( u , uri . CHKFileURI ) )
2010-02-22 02:45:04 +00:00
self . failUnless ( isinstance ( u . get_storage_index ( ) , str ) )
self . failUnlessEqual ( len ( u . get_storage_index ( ) ) , 16 )
2009-07-16 23:01:20 +00:00
self . failUnless ( isinstance ( u . key , str ) )
self . failUnlessEqual ( len ( u . key ) , 16 )
self . failUnlessEqual ( u . size , size )
def test_first_error ( self ) :
mode = dict ( [ ( 0 , " good " ) ] + [ ( i , " first-fail " ) for i in range ( 1 , 10 ) ] )
self . make_node ( mode )
2010-01-07 19:13:25 +00:00
self . set_encoding_parameters ( k = 25 , happy = 1 , n = 50 )
2009-07-16 23:01:20 +00:00
d = upload_data ( self . u , DATA )
d . addCallback ( extract_uri )
d . addCallback ( self . _check_large , SIZE_LARGE )
return d
def test_first_error_all ( self ) :
self . make_node ( " first-fail " )
2009-12-05 05:34:53 +00:00
d = self . shouldFail ( UploadUnhappinessError , " first_error_all " ,
2009-07-16 23:01:20 +00:00
" peer selection failed " ,
upload_data , self . u , DATA )
def _check ( ( f , ) ) :
self . failUnlessIn ( " placed 0 shares out of 100 total " , str ( f . value ) )
# there should also be a 'last failure was' message
self . failUnlessIn ( " ServerError " , str ( f . value ) )
d . addCallback ( _check )
return d
def test_second_error ( self ) :
# we want to make sure we make it to a third pass. This means that
# the first pass was insufficient to place all shares, and at least
# one of second pass servers (other than the last one) accepted a
# share (so we'll believe that a third pass will be useful). (if
# everyone but the last server throws an error, then we'll send all
# the remaining shares to the last server at the end of the second
# pass, and if that succeeds, we won't make it to a third pass).
#
# we can achieve this 97.5% of the time by using 40 servers, having
# 39 of them fail on the second request, leaving only one to succeed
# on the second request. (we need to keep the number of servers low
# enough to ensure a second pass with 100 shares).
mode = dict ( [ ( 0 , " good " ) ] + [ ( i , " second-fail " ) for i in range ( 1 , 40 ) ] )
self . make_node ( mode , 40 )
d = upload_data ( self . u , DATA )
d . addCallback ( extract_uri )
d . addCallback ( self . _check_large , SIZE_LARGE )
return d
def test_second_error_all ( self ) :
self . make_node ( " second-fail " )
2009-12-05 05:34:53 +00:00
d = self . shouldFail ( UploadUnhappinessError , " second_error_all " ,
2009-07-16 23:01:20 +00:00
" peer selection failed " ,
upload_data , self . u , DATA )
def _check ( ( f , ) ) :
self . failUnlessIn ( " placed 10 shares out of 100 total " , str ( f . value ) )
# there should also be a 'last failure was' message
self . failUnlessIn ( " ServerError " , str ( f . value ) )
d . addCallback ( _check )
return d
2007-03-30 21:54:33 +00:00
class FullServer ( unittest . TestCase ) :
def setUp ( self ) :
self . node = FakeClient ( mode = " full " )
self . u = upload . Uploader ( )
self . u . running = True
self . u . parent = self . node
def _should_fail ( self , f ) :
2009-12-05 05:34:53 +00:00
self . failUnless ( isinstance ( f , Failure ) and f . check ( UploadUnhappinessError ) , f )
2007-03-30 21:54:33 +00:00
2007-07-12 20:22:36 +00:00
def test_data_large ( self ) :
data = DATA
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2007-03-30 21:54:33 +00:00
d . addBoth ( self . _should_fail )
return d
2007-09-16 08:25:03 +00:00
class PeerSelection ( unittest . TestCase ) :
2007-09-17 00:08:34 +00:00
def make_client ( self , num_servers = 50 ) :
self . node = FakeClient ( mode = " good " , num_servers = num_servers )
2007-09-16 08:25:03 +00:00
self . u = upload . Uploader ( )
self . u . running = True
self . u . parent = self . node
def get_data ( self , size ) :
return DATA [ : size ]
def _check_large ( self , newuri , size ) :
2009-11-11 22:45:42 +00:00
u = uri . from_string ( newuri )
2007-09-16 08:25:03 +00:00
self . failUnless ( isinstance ( u , uri . CHKFileURI ) )
2010-02-22 02:45:04 +00:00
self . failUnless ( isinstance ( u . get_storage_index ( ) , str ) )
self . failUnlessEqual ( len ( u . get_storage_index ( ) ) , 16 )
2007-09-16 08:25:03 +00:00
self . failUnless ( isinstance ( u . key , str ) )
self . failUnlessEqual ( len ( u . key ) , 16 )
self . failUnlessEqual ( u . size , size )
2008-01-16 10:03:35 +00:00
def set_encoding_parameters ( self , k , happy , n , max_segsize = 1 * MiB ) :
p = { " k " : k ,
" happy " : happy ,
" n " : n ,
" max_segment_size " : max_segsize ,
}
self . node . DEFAULT_ENCODING_PARAMETERS = p
2007-09-16 08:25:03 +00:00
def test_one_each ( self ) :
# if we have 50 shares, and there are 50 peers, and they all accept a
# share, we should get exactly one share per peer
2007-09-17 00:08:34 +00:00
self . make_client ( )
2007-09-16 08:25:03 +00:00
data = self . get_data ( SIZE_LARGE )
2008-01-16 10:03:35 +00:00
self . set_encoding_parameters ( 25 , 30 , 50 )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-09-16 08:25:03 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
def _check ( res ) :
for p in self . node . last_peers :
2008-02-05 20:05:13 +00:00
allocated = p . allocated
2007-09-16 08:25:03 +00:00
self . failUnlessEqual ( len ( allocated ) , 1 )
2008-02-05 20:05:13 +00:00
self . failUnlessEqual ( p . queries , 1 )
2007-09-16 08:25:03 +00:00
d . addCallback ( _check )
return d
def test_two_each ( self ) :
# if we have 100 shares, and there are 50 peers, and they all accept
# all shares, we should get exactly two shares per peer
2007-09-17 00:08:34 +00:00
self . make_client ( )
2007-09-16 08:25:03 +00:00
data = self . get_data ( SIZE_LARGE )
2010-01-07 19:13:25 +00:00
# if there are 50 peers, then happy needs to be <= 50
self . set_encoding_parameters ( 50 , 50 , 100 )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-09-16 08:25:03 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
def _check ( res ) :
for p in self . node . last_peers :
2008-02-05 20:05:13 +00:00
allocated = p . allocated
2007-09-16 08:25:03 +00:00
self . failUnlessEqual ( len ( allocated ) , 2 )
2008-02-05 20:05:13 +00:00
self . failUnlessEqual ( p . queries , 2 )
2007-09-16 08:25:03 +00:00
d . addCallback ( _check )
return d
def test_one_each_plus_one_extra ( self ) :
# if we have 51 shares, and there are 50 peers, then one peer gets
# two shares and the rest get just one
2007-09-17 00:08:34 +00:00
self . make_client ( )
2007-09-16 08:25:03 +00:00
data = self . get_data ( SIZE_LARGE )
2008-01-16 10:03:35 +00:00
self . set_encoding_parameters ( 24 , 41 , 51 )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-09-16 08:25:03 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
def _check ( res ) :
got_one = [ ]
got_two = [ ]
for p in self . node . last_peers :
2008-02-05 20:05:13 +00:00
allocated = p . allocated
2007-09-16 08:25:03 +00:00
self . failUnless ( len ( allocated ) in ( 1 , 2 ) , len ( allocated ) )
if len ( allocated ) == 1 :
2008-02-05 20:05:13 +00:00
self . failUnlessEqual ( p . queries , 1 )
2007-09-16 08:25:03 +00:00
got_one . append ( p )
else :
2008-02-05 20:05:13 +00:00
self . failUnlessEqual ( p . queries , 2 )
2007-09-16 08:25:03 +00:00
got_two . append ( p )
self . failUnlessEqual ( len ( got_one ) , 49 )
self . failUnlessEqual ( len ( got_two ) , 1 )
d . addCallback ( _check )
return d
2007-09-16 08:53:00 +00:00
def test_four_each ( self ) :
# if we have 200 shares, and there are 50 peers, then each peer gets
# 4 shares. The design goal is to accomplish this with only two
# queries per peer.
2007-09-17 00:08:34 +00:00
self . make_client ( )
2007-09-16 08:53:00 +00:00
data = self . get_data ( SIZE_LARGE )
2010-01-07 19:13:25 +00:00
# if there are 50 peers, then happy should be no more than 50 if
# we want this to work.
self . set_encoding_parameters ( 100 , 50 , 200 )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-09-16 08:53:00 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
def _check ( res ) :
for p in self . node . last_peers :
2008-02-05 20:05:13 +00:00
allocated = p . allocated
2007-09-16 08:53:00 +00:00
self . failUnlessEqual ( len ( allocated ) , 4 )
2008-02-05 20:05:13 +00:00
self . failUnlessEqual ( p . queries , 2 )
2007-09-16 08:53:00 +00:00
d . addCallback ( _check )
return d
2007-09-17 00:08:34 +00:00
def test_three_of_ten ( self ) :
# if we have 10 shares and 3 servers, I want to see 3+3+4 rather than
# 4+4+2
self . make_client ( 3 )
data = self . get_data ( SIZE_LARGE )
2010-01-07 19:13:25 +00:00
self . set_encoding_parameters ( 3 , 3 , 10 )
2008-01-31 02:03:19 +00:00
d = upload_data ( self . u , data )
2008-02-06 04:01:38 +00:00
d . addCallback ( extract_uri )
2007-09-17 00:08:34 +00:00
d . addCallback ( self . _check_large , SIZE_LARGE )
def _check ( res ) :
counts = { }
for p in self . node . last_peers :
2008-02-05 20:05:13 +00:00
allocated = p . allocated
2007-09-17 00:08:34 +00:00
counts [ len ( allocated ) ] = counts . get ( len ( allocated ) , 0 ) + 1
histogram = [ counts . get ( i , 0 ) for i in range ( 5 ) ]
self . failUnlessEqual ( histogram , [ 0 , 0 , 0 , 2 , 1 ] )
d . addCallback ( _check )
return d
2009-02-09 02:41:27 +00:00
def test_some_big_some_small ( self ) :
# 10 shares, 20 servers, but half the servers don't support a
# share-size large enough for our file
2009-07-16 23:01:20 +00:00
mode = dict ( [ ( i , { 0 : " good " , 1 : " small " } [ i % 2 ] ) for i in range ( 20 ) ] )
self . node = FakeClient ( mode , num_servers = 20 )
2009-02-09 02:41:27 +00:00
self . u = upload . Uploader ( )
self . u . running = True
self . u . parent = self . node
data = self . get_data ( SIZE_LARGE )
self . set_encoding_parameters ( 3 , 5 , 10 )
d = upload_data ( self . u , data )
d . addCallback ( extract_uri )
d . addCallback ( self . _check_large , SIZE_LARGE )
def _check ( res ) :
# we should have put one share each on the big peers, and zero
# shares on the small peers
total_allocated = 0
for p in self . node . last_peers :
if p . mode == " good " :
self . failUnlessEqual ( len ( p . allocated ) , 1 )
elif p . mode == " small " :
self . failUnlessEqual ( len ( p . allocated ) , 0 )
total_allocated + = len ( p . allocated )
self . failUnlessEqual ( total_allocated , 10 )
d . addCallback ( _check )
return d
2008-02-07 03:03:35 +00:00
class StorageIndex ( unittest . TestCase ) :
def test_params_must_matter ( self ) :
DATA = " I am some data "
2008-03-24 16:46:06 +00:00
u = upload . Data ( DATA , convergence = " " )
2008-02-07 03:03:35 +00:00
eu = upload . EncryptAnUploadable ( u )
d1 = eu . get_storage_index ( )
# CHK means the same data should encrypt the same way
2008-03-24 16:46:06 +00:00
u = upload . Data ( DATA , convergence = " " )
2008-02-07 03:03:35 +00:00
eu = upload . EncryptAnUploadable ( u )
d1a = eu . get_storage_index ( )
2008-03-24 16:46:06 +00:00
# but if we use a different convergence string it should be different
u = upload . Data ( DATA , convergence = " wheee! " )
eu = upload . EncryptAnUploadable ( u )
d1salt1 = eu . get_storage_index ( )
# and if we add yet a different convergence it should be different again
u = upload . Data ( DATA , convergence = " NOT wheee! " )
eu = upload . EncryptAnUploadable ( u )
d1salt2 = eu . get_storage_index ( )
# and if we use the first string again it should be the same as last time
u = upload . Data ( DATA , convergence = " wheee! " )
eu = upload . EncryptAnUploadable ( u )
d1salt1a = eu . get_storage_index ( )
# and if we change the encoding parameters, it should be different (from the same convergence string with different encoding parameters)
u = upload . Data ( DATA , convergence = " " )
2008-02-07 03:03:35 +00:00
u . encoding_param_k = u . default_encoding_param_k + 1
eu = upload . EncryptAnUploadable ( u )
d2 = eu . get_storage_index ( )
# and if we use a random key, it should be different than the CHK
2008-03-24 16:46:06 +00:00
u = upload . Data ( DATA , convergence = None )
2008-02-07 03:03:35 +00:00
eu = upload . EncryptAnUploadable ( u )
d3 = eu . get_storage_index ( )
# and different from another instance
2008-03-24 16:46:06 +00:00
u = upload . Data ( DATA , convergence = None )
2008-02-07 03:03:35 +00:00
eu = upload . EncryptAnUploadable ( u )
d4 = eu . get_storage_index ( )
2008-03-24 16:46:06 +00:00
d = DeferredListShouldSucceed ( [ d1 , d1a , d1salt1 , d1salt2 , d1salt1a , d2 , d3 , d4 ] )
2008-02-07 03:03:35 +00:00
def _done ( res ) :
2008-03-24 16:46:06 +00:00
si1 , si1a , si1salt1 , si1salt2 , si1salt1a , si2 , si3 , si4 = res
2008-02-07 03:03:35 +00:00
self . failUnlessEqual ( si1 , si1a )
self . failIfEqual ( si1 , si2 )
self . failIfEqual ( si1 , si3 )
self . failIfEqual ( si1 , si4 )
self . failIfEqual ( si3 , si4 )
2008-03-24 16:46:06 +00:00
self . failIfEqual ( si1salt1 , si1 )
self . failIfEqual ( si1salt1 , si1salt2 )
self . failIfEqual ( si1salt2 , si1 )
self . failUnlessEqual ( si1salt1 , si1salt1a )
2008-02-07 03:03:35 +00:00
d . addCallback ( _done )
return d
2010-07-19 04:50:47 +00:00
# copied from python docs because itertools.combinations was added in
# python 2.6 and we support >= 2.4.
def combinations ( iterable , r ) :
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple ( iterable )
n = len ( pool )
if r > n :
return
indices = range ( r )
yield tuple ( pool [ i ] for i in indices )
while True :
for i in reversed ( range ( r ) ) :
if indices [ i ] != i + n - r :
break
else :
return
indices [ i ] + = 1
for j in range ( i + 1 , r ) :
indices [ j ] = indices [ j - 1 ] + 1
yield tuple ( pool [ i ] for i in indices )
def is_happy_enough ( servertoshnums , h , k ) :
""" I calculate whether servertoshnums achieves happiness level h. I do this with a naïve " brute force search " approach. (See src/allmydata/util/happinessutil.py for a better algorithm.) """
if len ( servertoshnums ) < h :
return False
# print "servertoshnums: ", servertoshnums, h, k
for happysetcombo in combinations ( servertoshnums . iterkeys ( ) , h ) :
# print "happysetcombo: ", happysetcombo
for subsetcombo in combinations ( happysetcombo , k ) :
shnums = reduce ( set . union , [ servertoshnums [ s ] for s in subsetcombo ] )
# print "subsetcombo: ", subsetcombo, ", shnums: ", shnums
if len ( shnums ) < k :
# print "NOT HAAPP{Y", shnums, k
return False
# print "HAAPP{Y"
return True
2009-10-30 09:19:08 +00:00
class EncodingParameters ( GridTestMixin , unittest . TestCase , SetDEPMixin ,
ShouldFailMixin ) :
2010-07-19 04:50:47 +00:00
def find_all_shares ( self , unused = None ) :
""" Locate shares on disk. Returns a dict that maps
server to set of sharenums .
"""
assert self . g , " I tried to find a grid at self.g, but failed "
servertoshnums = { } # k: server, v: set(shnum)
for i , c in self . g . servers_by_number . iteritems ( ) :
for ( dirp , dirns , fns ) in os . walk ( c . sharedir ) :
for fn in fns :
try :
sharenum = int ( fn )
except TypeError :
# Whoops, I guess that's not a share file then.
pass
else :
servertoshnums . setdefault ( i , set ( ) ) . add ( sharenum )
return servertoshnums
2009-10-30 09:19:08 +00:00
def _do_upload_with_broken_servers ( self , servers_to_break ) :
"""
I act like a normal upload , but before I send the results of
Tahoe2PeerSelector to the Encoder , I break the first servers_to_break
2010-07-19 04:47:44 +00:00
PeerTrackers in the upload_servers part of the return result .
2009-10-30 09:19:08 +00:00
"""
assert self . g , " I tried to find a grid at self.g, but failed "
broker = self . g . clients [ 0 ] . storage_broker
sh = self . g . clients [ 0 ] . _secret_holder
data = upload . Data ( " data " * 10000 , convergence = " " )
data . encoding_param_k = 3
data . encoding_param_happy = 4
data . encoding_param_n = 10
uploadable = upload . EncryptAnUploadable ( data )
encoder = encode . Encoder ( )
encoder . set_encrypted_uploadable ( uploadable )
status = upload . UploadStatus ( )
selector = upload . Tahoe2PeerSelector ( " dglev " , " test " , status )
storage_index = encoder . get_param ( " storage_index " )
share_size = encoder . get_param ( " share_size " )
block_size = encoder . get_param ( " block_size " )
num_segments = encoder . get_param ( " num_segments " )
d = selector . get_shareholders ( broker , sh , storage_index ,
share_size , block_size , num_segments ,
2010-05-14 01:25:42 +00:00
10 , 3 , 4 )
2010-07-19 04:47:44 +00:00
def _have_shareholders ( ( upload_servers , already_peers ) ) :
assert servers_to_break < = len ( upload_servers )
2009-10-30 09:19:08 +00:00
for index in xrange ( servers_to_break ) :
2010-07-19 04:47:44 +00:00
server = list ( upload_servers ) [ index ]
2009-10-30 09:19:08 +00:00
for share in server . buckets . keys ( ) :
server . buckets [ share ] . abort ( )
buckets = { }
2009-11-04 04:36:02 +00:00
servermap = already_peers . copy ( )
2010-07-19 04:47:44 +00:00
for peer in upload_servers :
2009-10-30 09:19:08 +00:00
buckets . update ( peer . buckets )
2009-11-04 04:36:02 +00:00
for bucket in peer . buckets :
2010-05-14 01:25:42 +00:00
servermap . setdefault ( bucket , set ( ) ) . add ( peer . peerid )
2009-11-04 04:36:02 +00:00
encoder . set_shareholders ( buckets , servermap )
2009-10-30 09:19:08 +00:00
d = encoder . start ( )
return d
d . addCallback ( _have_shareholders )
return d
2010-07-19 04:50:47 +00:00
def _has_happy_share_distribution ( self ) :
servertoshnums = self . find_all_shares ( )
k = self . g . clients [ 0 ] . DEFAULT_ENCODING_PARAMETERS [ ' k ' ]
h = self . g . clients [ 0 ] . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ]
return is_happy_enough ( servertoshnums , h , k )
2009-11-16 20:23:34 +00:00
2009-11-04 12:28:49 +00:00
def _add_server ( self , server_number , readonly = False ) :
2009-10-30 09:19:08 +00:00
assert self . g , " I tried to find a grid at self.g, but failed "
ss = self . g . make_server ( server_number , readonly )
2010-07-19 08:20:00 +00:00
log . msg ( " just created a server, number: %s => %s " % ( server_number , ss , ) )
2009-10-30 09:19:08 +00:00
self . g . add_server ( server_number , ss )
2009-11-04 12:28:49 +00:00
def _add_server_with_share ( self , server_number , share_number = None ,
readonly = False ) :
self . _add_server ( server_number , readonly )
2009-11-16 20:23:34 +00:00
if share_number is not None :
2009-11-04 12:28:49 +00:00
self . _copy_share_to_server ( share_number , server_number )
2009-11-16 20:23:34 +00:00
2009-11-04 12:28:49 +00:00
def _copy_share_to_server ( self , share_number , server_number ) :
ss = self . g . servers_by_number [ server_number ]
2010-07-18 03:27:39 +00:00
# Copy share i from the directory associated with the first
2009-11-04 12:28:49 +00:00
# storage server to the directory associated with this one.
assert self . g , " I tried to find a grid at self.g, but failed "
assert self . shares , " I tried to find shares at self.shares, but failed "
old_share_location = self . shares [ share_number ] [ 2 ]
new_share_location = os . path . join ( ss . storedir , " shares " )
si = uri . from_string ( self . uri ) . get_storage_index ( )
new_share_location = os . path . join ( new_share_location ,
storage_index_to_dir ( si ) )
if not os . path . exists ( new_share_location ) :
os . makedirs ( new_share_location )
new_share_location = os . path . join ( new_share_location ,
str ( share_number ) )
2009-11-16 20:23:34 +00:00
if old_share_location != new_share_location :
shutil . copy ( old_share_location , new_share_location )
2010-07-19 04:50:47 +00:00
shares = self . find_uri_shares ( self . uri )
2009-11-04 12:28:49 +00:00
# Make sure that the storage server has the share.
self . failUnless ( ( share_number , ss . my_nodeid , new_share_location )
in shares )
2010-05-14 01:25:42 +00:00
def _setup_grid ( self ) :
"""
I set up a NoNetworkGrid with a single server and client .
"""
self . set_up_grid ( num_clients = 1 , num_servers = 1 )
2009-10-30 09:19:08 +00:00
2010-05-14 01:25:42 +00:00
def _setup_and_upload ( self , * * kwargs ) :
2009-10-30 09:19:08 +00:00
"""
I set up a NoNetworkGrid with a single server and client ,
upload a file to it , store its uri in self . uri , and store its
sharedata in self . shares .
"""
2010-05-14 01:25:42 +00:00
self . _setup_grid ( )
2009-10-30 09:19:08 +00:00
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 1
2010-05-14 01:25:42 +00:00
if " n " in kwargs and " k " in kwargs :
client . DEFAULT_ENCODING_PARAMETERS [ ' k ' ] = kwargs [ ' k ' ]
client . DEFAULT_ENCODING_PARAMETERS [ ' n ' ] = kwargs [ ' n ' ]
2009-10-30 09:19:08 +00:00
data = upload . Data ( " data " * 10000 , convergence = " " )
self . data = data
d = client . upload ( data )
def _store_uri ( ur ) :
self . uri = ur . uri
d . addCallback ( _store_uri )
d . addCallback ( lambda ign :
2010-07-19 04:50:47 +00:00
self . find_uri_shares ( self . uri ) )
2009-10-30 09:19:08 +00:00
def _store_shares ( shares ) :
self . shares = shares
d . addCallback ( _store_shares )
return d
2009-11-16 20:23:34 +00:00
2008-11-18 07:29:44 +00:00
def test_configure_parameters ( self ) :
self . basedir = self . mktemp ( )
2009-02-17 00:44:57 +00:00
hooks = { 0 : self . _set_up_nodes_extra_config }
self . set_up_grid ( client_config_hooks = hooks )
c0 = self . g . clients [ 0 ]
2008-11-18 07:29:44 +00:00
DATA = " data " * 100
u = upload . Data ( DATA , convergence = " " )
2009-02-17 00:44:57 +00:00
d = c0 . upload ( u )
d . addCallback ( lambda ur : c0 . create_node_from_uri ( ur . uri ) )
2008-11-18 07:29:44 +00:00
m = monitor . Monitor ( )
d . addCallback ( lambda fn : fn . check ( m ) )
def _check ( cr ) :
data = cr . get_data ( )
self . failUnlessEqual ( data [ " count-shares-needed " ] , 7 )
self . failUnlessEqual ( data [ " count-shares-expected " ] , 12 )
d . addCallback ( _check )
return d
2009-11-16 20:23:34 +00:00
2009-10-30 09:19:08 +00:00
def _setUp ( self , ns ) :
2010-05-14 01:25:42 +00:00
# Used by test_happy_semantics and test_preexisting_share_behavior
2009-10-30 09:19:08 +00:00
# to set up the grid.
self . node = FakeClient ( mode = " good " , num_servers = ns )
self . u = upload . Uploader ( )
self . u . running = True
self . u . parent = self . node
2009-11-16 20:23:34 +00:00
2009-10-30 09:19:08 +00:00
def test_happy_semantics ( self ) :
self . _setUp ( 2 )
DATA = upload . Data ( " kittens " * 10000 , convergence = " " )
2010-05-14 01:25:42 +00:00
# These parameters are unsatisfiable with only 2 servers.
2009-10-30 09:19:08 +00:00
self . set_encoding_parameters ( k = 3 , happy = 5 , n = 10 )
2009-12-05 05:34:53 +00:00
d = self . shouldFail ( UploadUnhappinessError , " test_happy_semantics " ,
2010-05-14 01:25:42 +00:00
" shares could be placed or found on only 2 "
" server(s). We were asked to place shares on "
" at least 5 server(s) such that any 3 of them "
" have enough shares to recover the file " ,
2009-10-30 09:19:08 +00:00
self . u . upload , DATA )
# Let's reset the client to have 10 servers
d . addCallback ( lambda ign :
self . _setUp ( 10 ) )
2010-05-14 01:25:42 +00:00
# These parameters are satisfiable with 10 servers.
2009-10-30 09:19:08 +00:00
d . addCallback ( lambda ign :
self . set_encoding_parameters ( k = 3 , happy = 5 , n = 10 ) )
d . addCallback ( lambda ign :
self . u . upload ( DATA ) )
# Let's reset the client to have 7 servers
# (this is less than n, but more than h)
d . addCallback ( lambda ign :
self . _setUp ( 7 ) )
2010-05-14 01:25:42 +00:00
# These parameters are satisfiable with 7 servers.
2009-10-30 09:19:08 +00:00
d . addCallback ( lambda ign :
self . set_encoding_parameters ( k = 3 , happy = 5 , n = 10 ) )
d . addCallback ( lambda ign :
self . u . upload ( DATA ) )
return d
2010-07-18 20:32:25 +00:00
def test_aborted_shares ( self ) :
self . basedir = " upload/EncodingParameters/aborted_shares "
self . set_up_grid ( num_servers = 4 )
c = self . g . clients [ 0 ]
DATA = upload . Data ( 100 * " kittens " , convergence = " " )
# These parameters are unsatisfiable with only 4 servers, but should
# work with 5, as long as the original 4 are not stuck in the open
# BucketWriter state (open() but not
parms = { " k " : 2 , " happy " : 5 , " n " : 5 , " max_segment_size " : 1 * MiB }
c . DEFAULT_ENCODING_PARAMETERS = parms
d = self . shouldFail ( UploadUnhappinessError , " test_aborted_shares " ,
" shares could be placed on only 4 "
" server(s) such that any 2 of them have enough "
" shares to recover the file, but we were asked "
" to place shares on at least 5 such servers " ,
c . upload , DATA )
# now add the 5th server
d . addCallback ( lambda ign : self . _add_server ( 4 , False ) )
# and this time the upload ought to succeed
d . addCallback ( lambda ign : c . upload ( DATA ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2010-07-18 20:32:25 +00:00
return d
2009-11-16 20:23:34 +00:00
def test_problem_layout_comment_52 ( self ) :
def _basedir ( ) :
self . basedir = self . mktemp ( )
_basedir ( )
2010-07-18 03:27:39 +00:00
# This scenario is at
2009-10-30 09:19:08 +00:00
# http://allmydata.org/trac/tahoe/ticket/778#comment:52
#
# The scenario in comment:52 proposes that we have a layout
# like:
2010-05-14 01:25:42 +00:00
# server 0: shares 1 - 9
# server 1: share 0, read-only
# server 2: share 0, read-only
# server 3: share 0, read-only
2010-07-18 03:27:39 +00:00
# To get access to the shares, we will first upload to one
# server, which will then have shares 0 - 9. We'll then
2009-10-30 09:19:08 +00:00
# add three new servers, configure them to not accept any new
2010-05-14 01:25:42 +00:00
# shares, then write share 0 directly into the serverdir of each,
# and then remove share 0 from server 0 in the same way.
2010-07-18 03:27:39 +00:00
# Then each of servers 1 - 3 will report that they have share 0,
2010-05-14 01:25:42 +00:00
# and will not accept any new share, while server 0 will report that
# it has shares 1 - 9 and will accept new shares.
2009-10-30 09:19:08 +00:00
# We'll then set 'happy' = 4, and see that an upload fails
# (as it should)
d = self . _setup_and_upload ( )
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server_with_share ( server_number = 1 , share_number = 0 ,
readonly = True ) )
2009-10-30 09:19:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server_with_share ( server_number = 2 , share_number = 0 ,
readonly = True ) )
2009-10-30 09:19:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server_with_share ( server_number = 3 , share_number = 0 ,
readonly = True ) )
2009-10-30 09:19:08 +00:00
# Remove the first share from server 0.
2010-05-14 01:25:42 +00:00
def _remove_share_0_from_server_0 ( ) :
2009-10-30 09:19:08 +00:00
share_location = self . shares [ 0 ] [ 2 ]
os . remove ( share_location )
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
_remove_share_0_from_server_0 ( ) )
2009-10-30 09:19:08 +00:00
# Set happy = 4 in the client.
def _prepare ( ) :
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 4
return client
d . addCallback ( lambda ign :
_prepare ( ) )
# Uploading data should fail
d . addCallback ( lambda client :
2010-05-14 01:25:42 +00:00
self . shouldFail ( UploadUnhappinessError ,
" test_problem_layout_comment_52_test_1 " ,
" shares could be placed or found on 4 server(s), "
" but they are not spread out evenly enough to "
" ensure that any 3 of these servers would have "
" enough shares to recover the file. "
" We were asked to place shares on at "
" least 4 servers such that any 3 of them have "
" enough shares to recover the file " ,
2009-10-30 09:19:08 +00:00
client . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
2009-11-16 20:23:34 +00:00
# Do comment:52, but like this:
# server 2: empty
# server 3: share 0, read-only
# server 1: share 0, read-only
# server 0: shares 0-9
d . addCallback ( lambda ign :
_basedir ( ) )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 2 ) )
2009-11-16 20:23:34 +00:00
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 3 , share_number = 0 ,
readonly = True ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 1 , share_number = 0 ,
readonly = True ) )
def _prepare2 ( ) :
client = self . g . clients [ 0 ]
2010-05-14 01:25:42 +00:00
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 4
2009-11-16 20:23:34 +00:00
return client
d . addCallback ( lambda ign :
_prepare2 ( ) )
d . addCallback ( lambda client :
2010-05-14 01:25:42 +00:00
self . shouldFail ( UploadUnhappinessError ,
" test_problem_layout_comment_52_test_2 " ,
" shares could be placed on only 3 server(s) such "
" that any 3 of them have enough shares to recover "
" the file, but we were asked to place shares on "
" at least 4 such servers. " ,
2009-11-16 20:23:34 +00:00
client . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
return d
2009-10-30 09:19:08 +00:00
2009-11-16 20:23:34 +00:00
def test_problem_layout_comment_53 ( self ) :
2009-10-30 09:19:08 +00:00
# This scenario is at
# http://allmydata.org/trac/tahoe/ticket/778#comment:53
#
# Set up the grid to have one server
def _change_basedir ( ign ) :
self . basedir = self . mktemp ( )
2009-11-16 20:23:34 +00:00
_change_basedir ( None )
2010-05-14 01:25:42 +00:00
# We start by uploading all of the shares to one server.
2009-10-30 09:19:08 +00:00
# Next, we'll add three new servers to our NoNetworkGrid. We'll add
# one share from our initial upload to each of these.
2010-07-18 03:27:39 +00:00
# The counterintuitive ordering of the share numbers is to deal with
# the permuting of these servers -- distributing the shares this
# way ensures that the Tahoe2PeerSelector sees them in the order
2010-05-14 01:25:42 +00:00
# described below.
d = self . _setup_and_upload ( )
2009-10-30 09:19:08 +00:00
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 1 , share_number = 2 ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 2 , share_number = 0 ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 3 , share_number = 1 ) )
# So, we now have the following layout:
2009-11-04 12:28:49 +00:00
# server 0: shares 0 - 9
2009-11-16 20:23:34 +00:00
# server 1: share 2
# server 2: share 0
# server 3: share 1
2010-07-18 03:27:39 +00:00
# We change the 'happy' parameter in the client to 4.
2009-11-16 20:23:34 +00:00
# The Tahoe2PeerSelector will see the peers permuted as:
# 2, 3, 1, 0
2009-10-30 09:19:08 +00:00
# Ideally, a reupload of our original data should work.
2009-11-16 20:23:34 +00:00
def _reset_encoding_parameters ( ign , happy = 4 ) :
2009-10-30 09:19:08 +00:00
client = self . g . clients [ 0 ]
2009-11-16 20:23:34 +00:00
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = happy
2009-10-30 09:19:08 +00:00
return client
d . addCallback ( _reset_encoding_parameters )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2009-11-04 12:28:49 +00:00
2010-07-18 03:27:39 +00:00
# This scenario is basically comment:53, but changed so that the
2010-05-14 01:25:42 +00:00
# Tahoe2PeerSelector sees the server with all of the shares before
# any of the other servers.
# The layout is:
# server 2: shares 0 - 9
2010-07-18 03:27:39 +00:00
# server 3: share 0
# server 1: share 1
2010-05-14 01:25:42 +00:00
# server 4: share 2
# The Tahoe2PeerSelector sees the peers permuted as:
# 2, 3, 1, 4
2010-07-18 03:27:39 +00:00
# Note that server 0 has been replaced by server 4; this makes it
2010-05-14 01:25:42 +00:00
# easier to ensure that the last server seen by Tahoe2PeerSelector
2010-07-18 03:27:39 +00:00
# has only one share.
2009-11-04 12:28:49 +00:00
d . addCallback ( _change_basedir )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 2 , share_number = 0 ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 3 , share_number = 1 ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 1 , share_number = 2 ) )
# Copy all of the other shares to server number 2
def _copy_shares ( ign ) :
2010-05-14 01:25:42 +00:00
for i in xrange ( 0 , 10 ) :
2009-11-04 12:28:49 +00:00
self . _copy_share_to_server ( i , 2 )
d . addCallback ( _copy_shares )
# Remove the first server, and add a placeholder with share 0
d . addCallback ( lambda ign :
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid ) )
d . addCallback ( lambda ign :
2009-11-16 20:23:34 +00:00
self . _add_server_with_share ( server_number = 4 , share_number = 0 ) )
2010-07-18 03:27:39 +00:00
# Now try uploading.
2009-11-04 12:28:49 +00:00
d . addCallback ( _reset_encoding_parameters )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2010-05-14 01:25:42 +00:00
2009-11-04 12:28:49 +00:00
# Try the same thing, but with empty servers after the first one
# We want to make sure that Tahoe2PeerSelector will redistribute
# shares as necessary, not simply discover an existing layout.
2010-05-14 01:25:42 +00:00
# The layout is:
# server 2: shares 0 - 9
# server 3: empty
# server 1: empty
# server 4: empty
2009-11-04 12:28:49 +00:00
d . addCallback ( _change_basedir )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
d . addCallback ( lambda ign :
self . _add_server ( server_number = 2 ) )
d . addCallback ( lambda ign :
self . _add_server ( server_number = 3 ) )
d . addCallback ( lambda ign :
self . _add_server ( server_number = 1 ) )
2010-05-14 01:25:42 +00:00
d . addCallback ( lambda ign :
self . _add_server ( server_number = 4 ) )
2009-11-04 12:28:49 +00:00
d . addCallback ( _copy_shares )
d . addCallback ( lambda ign :
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid ) )
d . addCallback ( _reset_encoding_parameters )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-05-14 01:25:42 +00:00
# Make sure that only as many shares as necessary to satisfy
# servers of happiness were pushed.
d . addCallback ( lambda results :
self . failUnlessEqual ( results . pushed_shares , 3 ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2009-11-16 20:23:34 +00:00
return d
2010-07-29 14:22:50 +00:00
def test_problem_layout_ticket_1124 ( self ) :
2010-07-18 22:29:07 +00:00
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( k = 2 , n = 4 )
# server 0: shares 0, 1, 2, 3
# server 1: shares 0, 3
# server 2: share 1
# server 3: share 2
# With this layout, an upload should just be satisfied that the current distribution is good enough, right?
def _setup ( ign ) :
self . _add_server_with_share ( server_number = 0 , share_number = None )
self . _add_server_with_share ( server_number = 1 , share_number = 0 )
self . _add_server_with_share ( server_number = 2 , share_number = 1 )
self . _add_server_with_share ( server_number = 3 , share_number = 2 )
# Copy shares
self . _copy_share_to_server ( 3 , 1 )
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 4
return client
d . addCallback ( _setup )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2010-07-18 22:29:07 +00:00
return d
2010-07-29 15:29:27 +00:00
test_problem_layout_ticket_1124 . todo = " Fix this after 1.7.1 release. "
2009-11-16 20:23:34 +00:00
def test_happiness_with_some_readonly_peers ( self ) :
2009-11-04 12:28:49 +00:00
# Try the following layout
2009-11-16 20:23:34 +00:00
# server 2: shares 0-9
# server 4: share 0, read-only
# server 3: share 1, read-only
# server 1: share 2, read-only
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( )
2009-11-04 12:28:49 +00:00
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 2 , share_number = 0 ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 3 , share_number = 1 ,
readonly = True ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 1 , share_number = 2 ,
readonly = True ) )
# Copy all of the other shares to server number 2
2009-11-16 20:23:34 +00:00
def _copy_shares ( ign ) :
for i in xrange ( 1 , 10 ) :
self . _copy_share_to_server ( i , 2 )
2009-11-04 12:28:49 +00:00
d . addCallback ( _copy_shares )
# Remove server 0, and add another in its place
d . addCallback ( lambda ign :
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid ) )
d . addCallback ( lambda ign :
2009-11-16 20:23:34 +00:00
self . _add_server_with_share ( server_number = 4 , share_number = 0 ,
2009-11-04 12:28:49 +00:00
readonly = True ) )
2009-11-16 20:23:34 +00:00
def _reset_encoding_parameters ( ign , happy = 4 ) :
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = happy
return client
d . addCallback ( _reset_encoding_parameters )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2009-11-16 20:23:34 +00:00
return d
def test_happiness_with_all_readonly_peers ( self ) :
# server 3: share 1, read-only
# server 1: share 2, read-only
# server 2: shares 0-9, read-only
# server 4: share 0, read-only
# The idea with this test is to make sure that the survey of
# read-only peers doesn't undercount servers of happiness
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 4 , share_number = 0 ,
readonly = True ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 3 , share_number = 1 ,
readonly = True ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 1 , share_number = 2 ,
readonly = True ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 2 , share_number = 0 ,
readonly = True ) )
def _copy_shares ( ign ) :
for i in xrange ( 1 , 10 ) :
self . _copy_share_to_server ( i , 2 )
d . addCallback ( _copy_shares )
d . addCallback ( lambda ign :
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid ) )
def _reset_encoding_parameters ( ign , happy = 4 ) :
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = happy
return client
2009-11-04 12:28:49 +00:00
d . addCallback ( _reset_encoding_parameters )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2009-10-30 09:19:08 +00:00
return d
def test_dropped_servers_in_encoder ( self ) :
2010-07-18 03:27:39 +00:00
# The Encoder does its own "servers_of_happiness" check if it
# happens to lose a bucket during an upload (it assumes that
2010-05-14 01:25:42 +00:00
# the layout presented to it satisfies "servers_of_happiness"
# until a failure occurs)
2010-07-18 03:27:39 +00:00
#
2010-05-14 01:25:42 +00:00
# This test simulates an upload where servers break after peer
# selection, but before they are written to.
2009-10-30 09:19:08 +00:00
def _set_basedir ( ign = None ) :
self . basedir = self . mktemp ( )
_set_basedir ( )
d = self . _setup_and_upload ( ) ;
2009-11-16 20:23:34 +00:00
# Add 5 servers
2009-10-30 09:19:08 +00:00
def _do_server_setup ( ign ) :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 1 )
self . _add_server ( server_number = 2 )
self . _add_server ( server_number = 3 )
self . _add_server ( server_number = 4 )
self . _add_server ( server_number = 5 )
2009-10-30 09:19:08 +00:00
d . addCallback ( _do_server_setup )
# remove the original server
# (necessary to ensure that the Tahoe2PeerSelector will distribute
# all the shares)
def _remove_server ( ign ) :
server = self . g . servers_by_number [ 0 ]
self . g . remove_server ( server . my_nodeid )
d . addCallback ( _remove_server )
2010-07-18 03:27:39 +00:00
# This should succeed; we still have 4 servers, and the
2010-05-14 01:25:42 +00:00
# happiness of the upload is 4.
2009-10-30 09:19:08 +00:00
d . addCallback ( lambda ign :
self . _do_upload_with_broken_servers ( 1 ) )
# Now, do the same thing over again, but drop 2 servers instead
2010-07-18 03:27:39 +00:00
# of 1. This should fail, because servers_of_happiness is 4 and
2010-05-14 01:25:42 +00:00
# we can't satisfy that.
2009-10-30 09:19:08 +00:00
d . addCallback ( _set_basedir )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
d . addCallback ( _do_server_setup )
d . addCallback ( _remove_server )
d . addCallback ( lambda ign :
2009-12-05 05:34:53 +00:00
self . shouldFail ( UploadUnhappinessError ,
2009-11-16 20:23:34 +00:00
" test_dropped_servers_in_encoder " ,
2010-05-14 01:25:42 +00:00
" shares could be placed on only 3 server(s) "
" such that any 3 of them have enough shares to "
" recover the file, but we were asked to place "
" shares on at least 4 " ,
2009-11-16 20:23:34 +00:00
self . _do_upload_with_broken_servers , 2 ) )
# Now do the same thing over again, but make some of the servers
# readonly, break some of the ones that aren't, and make sure that
# happiness accounting is preserved.
d . addCallback ( _set_basedir )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
def _do_server_setup_2 ( ign ) :
2010-05-14 01:25:42 +00:00
self . _add_server ( 1 )
self . _add_server ( 2 )
self . _add_server ( 3 )
2009-11-16 20:23:34 +00:00
self . _add_server_with_share ( 4 , 7 , readonly = True )
self . _add_server_with_share ( 5 , 8 , readonly = True )
d . addCallback ( _do_server_setup_2 )
d . addCallback ( _remove_server )
d . addCallback ( lambda ign :
self . _do_upload_with_broken_servers ( 1 ) )
d . addCallback ( _set_basedir )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
d . addCallback ( _do_server_setup_2 )
d . addCallback ( _remove_server )
d . addCallback ( lambda ign :
2009-12-05 05:34:53 +00:00
self . shouldFail ( UploadUnhappinessError ,
2009-11-16 20:23:34 +00:00
" test_dropped_servers_in_encoder " ,
2010-05-14 01:25:42 +00:00
" shares could be placed on only 3 server(s) "
" such that any 3 of them have enough shares to "
" recover the file, but we were asked to place "
" shares on at least 4 " ,
2009-10-30 09:19:08 +00:00
self . _do_upload_with_broken_servers , 2 ) )
return d
2010-05-14 01:25:42 +00:00
def test_merge_peers ( self ) :
2010-07-19 04:47:44 +00:00
# merge_peers merges a list of upload_servers and a dict of
2010-05-14 01:25:42 +00:00
# shareid -> peerid mappings.
shares = {
1 : set ( [ " server1 " ] ) ,
2 : set ( [ " server2 " ] ) ,
3 : set ( [ " server3 " ] ) ,
4 : set ( [ " server4 " , " server5 " ] ) ,
5 : set ( [ " server1 " , " server2 " ] ) ,
}
2010-07-19 04:47:44 +00:00
# if not provided with a upload_servers argument, it should just
2010-05-14 01:25:42 +00:00
# return the first argument unchanged.
self . failUnlessEqual ( shares , merge_peers ( shares , set ( [ ] ) ) )
class FakePeerTracker :
pass
trackers = [ ]
for ( i , server ) in [ ( i , " server %d " % i ) for i in xrange ( 5 , 9 ) ] :
t = FakePeerTracker ( )
t . peerid = server
t . buckets = [ i ]
trackers . append ( t )
expected = {
1 : set ( [ " server1 " ] ) ,
2 : set ( [ " server2 " ] ) ,
3 : set ( [ " server3 " ] ) ,
4 : set ( [ " server4 " , " server5 " ] ) ,
5 : set ( [ " server1 " , " server2 " , " server5 " ] ) ,
6 : set ( [ " server6 " ] ) ,
7 : set ( [ " server7 " ] ) ,
8 : set ( [ " server8 " ] ) ,
}
self . failUnlessEqual ( expected , merge_peers ( shares , set ( trackers ) ) )
shares2 = { }
expected = {
5 : set ( [ " server5 " ] ) ,
6 : set ( [ " server6 " ] ) ,
7 : set ( [ " server7 " ] ) ,
8 : set ( [ " server8 " ] ) ,
}
self . failUnlessEqual ( expected , merge_peers ( shares2 , set ( trackers ) ) )
shares3 = { }
trackers = [ ]
expected = { }
for ( i , server ) in [ ( i , " server %d " % i ) for i in xrange ( 10 ) ] :
shares3 [ i ] = set ( [ server ] )
t = FakePeerTracker ( )
t . peerid = server
t . buckets = [ i ]
trackers . append ( t )
expected [ i ] = set ( [ server ] )
self . failUnlessEqual ( expected , merge_peers ( shares3 , set ( trackers ) ) )
def test_servers_of_happiness_utility_function ( self ) :
# These tests are concerned with the servers_of_happiness()
# utility function, and its underlying matching algorithm. Other
# aspects of the servers_of_happiness behavior are tested
# elsehwere These tests exist to ensure that
# servers_of_happiness doesn't under or overcount the happiness
# value for given inputs.
2010-07-18 03:27:39 +00:00
# servers_of_happiness expects a dict of
2010-05-14 01:25:42 +00:00
# shnum => set(peerids) as a preexisting shares argument.
2009-10-30 09:19:08 +00:00
test1 = {
2010-05-14 01:25:42 +00:00
1 : set ( [ " server1 " ] ) ,
2 : set ( [ " server2 " ] ) ,
3 : set ( [ " server3 " ] ) ,
4 : set ( [ " server4 " ] )
2009-10-30 09:19:08 +00:00
}
2010-05-14 01:25:42 +00:00
happy = servers_of_happiness ( test1 )
self . failUnlessEqual ( 4 , happy )
test1 [ 4 ] = set ( [ " server1 " ] )
# We've added a duplicate server, so now servers_of_happiness
# should be 3 instead of 4.
happy = servers_of_happiness ( test1 )
self . failUnlessEqual ( 3 , happy )
2010-07-18 03:27:39 +00:00
# The second argument of merge_peers should be a set of
# objects with peerid and buckets as attributes. In actual use,
# these will be PeerTracker instances, but for testing it is fine
2010-05-14 01:25:42 +00:00
# to make a FakePeerTracker whose job is to hold those instance
# variables to test that part.
2009-10-30 09:19:08 +00:00
class FakePeerTracker :
pass
trackers = [ ]
2009-11-16 20:23:34 +00:00
for ( i , server ) in [ ( i , " server %d " % i ) for i in xrange ( 5 , 9 ) ] :
2009-10-30 09:19:08 +00:00
t = FakePeerTracker ( )
t . peerid = server
2009-11-16 20:23:34 +00:00
t . buckets = [ i ]
2009-10-30 09:19:08 +00:00
trackers . append ( t )
2010-05-14 01:25:42 +00:00
# Recall that test1 is a server layout with servers_of_happiness
# = 3. Since there isn't any overlap between the shnum ->
# set([peerid]) correspondences in test1 and those in trackers,
# the result here should be 7.
test2 = merge_peers ( test1 , set ( trackers ) )
happy = servers_of_happiness ( test2 )
self . failUnlessEqual ( 7 , happy )
# Now add an overlapping server to trackers. This is redundant,
# so it should not cause the previously reported happiness value
# to change.
2009-10-30 09:19:08 +00:00
t = FakePeerTracker ( )
t . peerid = " server1 "
2009-11-16 20:23:34 +00:00
t . buckets = [ 1 ]
2009-10-30 09:19:08 +00:00
trackers . append ( t )
2010-05-14 01:25:42 +00:00
test2 = merge_peers ( test1 , set ( trackers ) )
happy = servers_of_happiness ( test2 )
self . failUnlessEqual ( 7 , happy )
2009-11-16 20:23:34 +00:00
test = { }
2010-05-14 01:25:42 +00:00
happy = servers_of_happiness ( test )
self . failUnlessEqual ( 0 , happy )
2010-07-18 03:27:39 +00:00
# Test a more substantial overlap between the trackers and the
2010-05-14 01:25:42 +00:00
# existing assignments.
test = {
1 : set ( [ ' server1 ' ] ) ,
2 : set ( [ ' server2 ' ] ) ,
3 : set ( [ ' server3 ' ] ) ,
4 : set ( [ ' server4 ' ] ) ,
}
trackers = [ ]
t = FakePeerTracker ( )
t . peerid = ' server5 '
t . buckets = [ 4 ]
trackers . append ( t )
t = FakePeerTracker ( )
t . peerid = ' server6 '
t . buckets = [ 3 , 5 ]
trackers . append ( t )
2010-07-18 03:27:39 +00:00
# The value returned by servers_of_happiness is the size
2010-05-14 01:25:42 +00:00
# of a maximum matching in the bipartite graph that
# servers_of_happiness() makes between peerids and share
# numbers. It should find something like this:
# (server 1, share 1)
# (server 2, share 2)
# (server 3, share 3)
# (server 5, share 4)
# (server 6, share 5)
2010-07-18 03:27:39 +00:00
#
2010-05-14 01:25:42 +00:00
# and, since there are 5 edges in this matching, it should
# return 5.
test2 = merge_peers ( test , set ( trackers ) )
happy = servers_of_happiness ( test2 )
self . failUnlessEqual ( 5 , happy )
2010-07-18 03:27:39 +00:00
# Zooko's first puzzle:
2010-05-14 01:25:42 +00:00
# (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:156)
#
# server 1: shares 0, 1
# server 2: shares 1, 2
# server 3: share 2
#
# This should yield happiness of 3.
test = {
0 : set ( [ ' server1 ' ] ) ,
1 : set ( [ ' server1 ' , ' server2 ' ] ) ,
2 : set ( [ ' server2 ' , ' server3 ' ] ) ,
}
self . failUnlessEqual ( 3 , servers_of_happiness ( test ) )
2010-07-18 03:27:39 +00:00
# Zooko's second puzzle:
2010-05-14 01:25:42 +00:00
# (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:158)
2010-07-18 03:27:39 +00:00
#
2010-05-14 01:25:42 +00:00
# server 1: shares 0, 1
# server 2: share 1
2010-07-18 03:27:39 +00:00
#
2010-05-14 01:25:42 +00:00
# This should yield happiness of 2.
test = {
0 : set ( [ ' server1 ' ] ) ,
1 : set ( [ ' server1 ' , ' server2 ' ] ) ,
}
self . failUnlessEqual ( 2 , servers_of_happiness ( test ) )
2009-10-30 09:19:08 +00:00
2009-11-04 12:13:24 +00:00
def test_shares_by_server ( self ) :
2010-05-14 01:25:42 +00:00
test = dict ( [ ( i , set ( [ " server %d " % i ] ) ) for i in xrange ( 1 , 5 ) ] )
sbs = shares_by_server ( test )
self . failUnlessEqual ( set ( [ 1 ] ) , sbs [ " server1 " ] )
self . failUnlessEqual ( set ( [ 2 ] ) , sbs [ " server2 " ] )
self . failUnlessEqual ( set ( [ 3 ] ) , sbs [ " server3 " ] )
self . failUnlessEqual ( set ( [ 4 ] ) , sbs [ " server4 " ] )
2009-11-04 12:13:24 +00:00
test1 = {
2010-05-14 01:25:42 +00:00
1 : set ( [ " server1 " ] ) ,
2 : set ( [ " server1 " ] ) ,
3 : set ( [ " server1 " ] ) ,
4 : set ( [ " server2 " ] ) ,
5 : set ( [ " server2 " ] )
2009-11-04 12:13:24 +00:00
}
2010-05-14 01:25:42 +00:00
sbs = shares_by_server ( test1 )
self . failUnlessEqual ( set ( [ 1 , 2 , 3 ] ) , sbs [ " server1 " ] )
self . failUnlessEqual ( set ( [ 4 , 5 ] ) , sbs [ " server2 " ] )
# This should fail unless the peerid part of the mapping is a set
test2 = { 1 : " server1 " }
self . shouldFail ( AssertionError ,
" test_shares_by_server " ,
" " ,
shares_by_server , test2 )
2009-11-04 12:13:24 +00:00
2009-11-09 01:37:35 +00:00
def test_existing_share_detection ( self ) :
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( )
# Our final setup should look like this:
2010-05-14 01:25:42 +00:00
# server 1: shares 0 - 9, read-only
2009-11-09 01:37:35 +00:00
# server 2: empty
# server 3: empty
# server 4: empty
# The purpose of this test is to make sure that the peer selector
# knows about the shares on server 1, even though it is read-only.
# It used to simply filter these out, which would cause the test
# to fail when servers_of_happiness = 4.
d . addCallback ( lambda ign :
self . _add_server_with_share ( 1 , 0 , True ) )
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( 2 ) )
2009-11-09 01:37:35 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( 3 ) )
2009-11-09 01:37:35 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( 4 ) )
2009-11-09 01:37:35 +00:00
def _copy_shares ( ign ) :
for i in xrange ( 1 , 10 ) :
self . _copy_share_to_server ( i , 1 )
d . addCallback ( _copy_shares )
d . addCallback ( lambda ign :
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid ) )
def _prepare_client ( ign ) :
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 4
return client
d . addCallback ( _prepare_client )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2009-11-09 01:37:35 +00:00
return d
2010-05-14 01:25:42 +00:00
def test_query_counting ( self ) :
# If peer selection fails, Tahoe2PeerSelector prints out a lot
# of helpful diagnostic information, including query stats.
# This test helps make sure that that information is accurate.
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( )
def _setup ( ign ) :
for i in xrange ( 1 , 11 ) :
self . _add_server ( server_number = i )
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid )
c = self . g . clients [ 0 ]
2010-07-18 03:27:39 +00:00
# We set happy to an unsatisfiable value so that we can check the
2010-05-14 01:25:42 +00:00
# counting in the exception message. The same progress message
# is also used when the upload is successful, but in that case it
# only gets written to a log, so we can't see what it says.
c . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 45
return c
d . addCallback ( _setup )
d . addCallback ( lambda c :
self . shouldFail ( UploadUnhappinessError , " test_query_counting " ,
" 10 queries placed some shares " ,
c . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
# Now try with some readonly servers. We want to make sure that
# the readonly peer share discovery phase is counted correctly.
def _reset ( ign ) :
self . basedir = self . mktemp ( )
self . g = None
d . addCallback ( _reset )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
def _then ( ign ) :
for i in xrange ( 1 , 11 ) :
self . _add_server ( server_number = i )
self . _add_server ( server_number = 11 , readonly = True )
self . _add_server ( server_number = 12 , readonly = True )
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid )
c = self . g . clients [ 0 ]
c . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 45
return c
d . addCallback ( _then )
d . addCallback ( lambda c :
self . shouldFail ( UploadUnhappinessError , " test_query_counting " ,
" 2 placed none (of which 2 placed none due to "
" the server being full " ,
c . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
# Now try the case where the upload process finds a bunch of the
# shares that it wants to place on the first server, including
# the one that it wanted to allocate there. Though no shares will
# be allocated in this request, it should still be called
2010-07-18 03:27:39 +00:00
# productive, since it caused some homeless shares to be
2010-05-14 01:25:42 +00:00
# removed.
d . addCallback ( _reset )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
def _next ( ign ) :
for i in xrange ( 1 , 11 ) :
self . _add_server ( server_number = i )
# Copy all of the shares to server 9, since that will be
# the first one that the selector sees.
for i in xrange ( 10 ) :
self . _copy_share_to_server ( i , 9 )
# Remove server 0, and its contents
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid )
# Make happiness unsatisfiable
c = self . g . clients [ 0 ]
c . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 45
return c
d . addCallback ( _next )
d . addCallback ( lambda c :
self . shouldFail ( UploadUnhappinessError , " test_query_counting " ,
" 1 queries placed some shares " ,
c . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
return d
def test_upper_limit_on_readonly_queries ( self ) :
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( )
def _then ( ign ) :
for i in xrange ( 1 , 11 ) :
self . _add_server ( server_number = i , readonly = True )
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid )
c = self . g . clients [ 0 ]
c . DEFAULT_ENCODING_PARAMETERS [ ' k ' ] = 2
c . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 4
c . DEFAULT_ENCODING_PARAMETERS [ ' n ' ] = 4
return c
d . addCallback ( _then )
d . addCallback ( lambda client :
self . shouldFail ( UploadUnhappinessError ,
" test_upper_limit_on_readonly_queries " ,
" sent 8 queries to 8 peers " ,
client . upload ,
upload . Data ( ' data ' * 10000 , convergence = " " ) ) )
return d
2009-11-16 20:23:34 +00:00
2009-11-23 02:20:08 +00:00
def test_exception_messages_during_peer_selection ( self ) :
2010-05-14 01:25:42 +00:00
# server 1: read-only, no shares
# server 2: read-only, no shares
# server 3: read-only, no shares
# server 4: read-only, no shares
# server 5: read-only, no shares
2009-11-23 02:20:08 +00:00
# This will fail, but we want to make sure that the log messages
# are informative about why it has failed.
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( )
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 1 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 2 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 3 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 4 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 5 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid ) )
2010-05-15 18:43:44 +00:00
def _reset_encoding_parameters ( ign , happy = 4 ) :
2009-11-23 02:20:08 +00:00
client = self . g . clients [ 0 ]
2010-05-15 18:43:44 +00:00
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = happy
2009-11-23 02:20:08 +00:00
return client
d . addCallback ( _reset_encoding_parameters )
d . addCallback ( lambda client :
2009-12-05 05:34:53 +00:00
self . shouldFail ( UploadUnhappinessError , " test_selection_exceptions " ,
2010-05-14 01:25:42 +00:00
" placed 0 shares out of 10 "
" total (10 homeless), want to place shares on at "
" least 4 servers such that any 3 of them have "
" enough shares to recover the file, "
" sent 5 queries to 5 peers, 0 queries placed "
2009-11-23 02:20:08 +00:00
" some shares, 5 placed none "
" (of which 5 placed none due to the server being "
" full and 0 placed none due to an error) " ,
client . upload ,
upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-05-14 01:25:42 +00:00
# server 1: read-only, no shares
2009-11-23 02:20:08 +00:00
# server 2: broken, no shares
2010-05-14 01:25:42 +00:00
# server 3: read-only, no shares
# server 4: read-only, no shares
# server 5: read-only, no shares
2009-11-23 02:20:08 +00:00
def _reset ( ign ) :
self . basedir = self . mktemp ( )
d . addCallback ( _reset )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 1 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 2 ) )
2009-11-23 02:20:08 +00:00
def _break_server_2 ( ign ) :
server = self . g . servers_by_number [ 2 ] . my_nodeid
2010-07-18 03:27:39 +00:00
# We have to break the server in servers_by_id,
2010-05-14 01:25:42 +00:00
# because the one in servers_by_number isn't wrapped,
# and doesn't look at its broken attribute when answering
# queries.
2009-11-23 02:20:08 +00:00
self . g . servers_by_id [ server ] . broken = True
d . addCallback ( _break_server_2 )
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 3 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 4 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
2010-05-14 01:25:42 +00:00
self . _add_server ( server_number = 5 , readonly = True ) )
2009-11-23 02:20:08 +00:00
d . addCallback ( lambda ign :
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid ) )
d . addCallback ( _reset_encoding_parameters )
d . addCallback ( lambda client :
2009-12-05 05:34:53 +00:00
self . shouldFail ( UploadUnhappinessError , " test_selection_exceptions " ,
2010-05-14 01:25:42 +00:00
" placed 0 shares out of 10 "
" total (10 homeless), want to place shares on at "
" least 4 servers such that any 3 of them have "
" enough shares to recover the file, "
" sent 5 queries to 5 peers, 0 queries placed "
2009-11-23 02:20:08 +00:00
" some shares, 5 placed none "
" (of which 4 placed none due to the server being "
" full and 1 placed none due to an error) " ,
client . upload ,
upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-05-14 01:25:42 +00:00
# server 0, server 1 = empty, accepting shares
# This should place all of the shares, but still fail with happy=4.
# We want to make sure that the exception message is worded correctly.
d . addCallback ( _reset )
d . addCallback ( lambda ign :
self . _setup_grid ( ) )
d . addCallback ( lambda ign :
self . _add_server ( server_number = 1 ) )
d . addCallback ( _reset_encoding_parameters )
d . addCallback ( lambda client :
self . shouldFail ( UploadUnhappinessError , " test_selection_exceptions " ,
" shares could be placed or found on only 2 "
" server(s). We were asked to place shares on at "
" least 4 server(s) such that any 3 of them have "
" enough shares to recover the file. " ,
client . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
# servers 0 - 4 = empty, accepting shares
# This too should place all the shares, and this too should fail,
# but since the effective happiness is more than the k encoding
# parameter, it should trigger a different error message than the one
# above.
d . addCallback ( _reset )
d . addCallback ( lambda ign :
self . _setup_grid ( ) )
d . addCallback ( lambda ign :
self . _add_server ( server_number = 1 ) )
d . addCallback ( lambda ign :
self . _add_server ( server_number = 2 ) )
d . addCallback ( lambda ign :
self . _add_server ( server_number = 3 ) )
d . addCallback ( lambda ign :
self . _add_server ( server_number = 4 ) )
d . addCallback ( _reset_encoding_parameters , happy = 7 )
d . addCallback ( lambda client :
self . shouldFail ( UploadUnhappinessError , " test_selection_exceptions " ,
" shares could be placed on only 5 server(s) such "
" that any 3 of them have enough shares to recover "
" the file, but we were asked to place shares on "
" at least 7 such servers. " ,
client . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
# server 0: shares 0 - 9
# server 1: share 0, read-only
# server 2: share 0, read-only
# server 3: share 0, read-only
# This should place all of the shares, but fail with happy=4.
# Since the number of servers with shares is more than the number
# necessary to reconstitute the file, this will trigger a different
2010-07-18 03:27:39 +00:00
# error message than either of those above.
2010-05-14 01:25:42 +00:00
d . addCallback ( _reset )
d . addCallback ( lambda ign :
self . _setup_and_upload ( ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 1 , share_number = 0 ,
readonly = True ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 2 , share_number = 0 ,
readonly = True ) )
d . addCallback ( lambda ign :
self . _add_server_with_share ( server_number = 3 , share_number = 0 ,
readonly = True ) )
d . addCallback ( _reset_encoding_parameters , happy = 7 )
d . addCallback ( lambda client :
self . shouldFail ( UploadUnhappinessError , " test_selection_exceptions " ,
" shares could be placed or found on 4 server(s), "
" but they are not spread out evenly enough to "
" ensure that any 3 of these servers would have "
" enough shares to recover the file. We were asked "
" to place shares on at least 7 servers such that "
" any 3 of them have enough shares to recover the "
" file " ,
client . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
return d
def test_problem_layout_comment_187 ( self ) :
# #778 comment 187 broke an initial attempt at a share
# redistribution algorithm. This test is here to demonstrate the
# breakage, and to test that subsequent algorithms don't also
# break in the same way.
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( k = 2 , n = 3 )
# server 1: shares 0, 1, 2, readonly
# server 2: share 0, readonly
# server 3: share 0
def _setup ( ign ) :
self . _add_server_with_share ( server_number = 1 , share_number = 0 ,
readonly = True )
self . _add_server_with_share ( server_number = 2 , share_number = 0 ,
readonly = True )
self . _add_server_with_share ( server_number = 3 , share_number = 0 )
# Copy shares
self . _copy_share_to_server ( 1 , 1 )
self . _copy_share_to_server ( 2 , 1 )
# Remove server 0
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid )
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 3
return client
d . addCallback ( _setup )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2009-11-23 02:20:08 +00:00
return d
2010-05-14 01:25:42 +00:00
test_problem_layout_comment_187 . todo = " this isn ' t fixed yet "
2009-11-23 02:20:08 +00:00
2010-07-18 22:15:37 +00:00
def test_problem_layout_ticket_1118 ( self ) :
# #1118 includes a report from a user who hit an assertion in
# the upload code with this layout.
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( k = 2 , n = 4 )
2010-07-19 08:16:12 +00:00
# server 0: no shares
# server 1: shares 0, 3
# server 3: share 1
# server 2: share 2
# The order that they get queries is 0, 1, 3, 2
def _setup ( ign ) :
self . _add_server ( server_number = 0 )
self . _add_server_with_share ( server_number = 1 , share_number = 0 )
self . _add_server_with_share ( server_number = 2 , share_number = 2 )
self . _add_server_with_share ( server_number = 3 , share_number = 1 )
# Copy shares
self . _copy_share_to_server ( 3 , 1 )
storedir = self . get_serverdir ( 0 )
# remove the storedir, wiping out any existing shares
shutil . rmtree ( storedir )
# create an empty storedir to replace the one we just removed
os . mkdir ( storedir )
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 4
return client
d . addCallback ( _setup )
# Note: actually it should succeed! See
# test_problem_layout_ticket_1128. But ticket 1118 is just to
# make it realize that it has failed, so if it raises
# UploadUnhappinessError then we'll give it the green light
# for now.
d . addCallback ( lambda ignored :
self . shouldFail ( UploadUnhappinessError ,
" test_problem_layout_ticket_1118 " ,
" " ,
self . g . clients [ 0 ] . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
return d
def test_problem_layout_ticket_1128 ( self ) :
# #1118 includes a report from a user who hit an assertion in
# the upload code with this layout.
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( k = 2 , n = 4 )
2010-07-18 22:15:37 +00:00
# server 0: no shares
# server 1: shares 0, 3
# server 3: share 1
# server 2: share 2
# The order that they get queries is 0, 1, 3, 2
def _setup ( ign ) :
self . _add_server ( server_number = 0 )
self . _add_server_with_share ( server_number = 1 , share_number = 0 )
self . _add_server_with_share ( server_number = 2 , share_number = 2 )
self . _add_server_with_share ( server_number = 3 , share_number = 1 )
# Copy shares
self . _copy_share_to_server ( 3 , 1 )
storedir = self . get_serverdir ( 0 )
# remove the storedir, wiping out any existing shares
shutil . rmtree ( storedir )
# create an empty storedir to replace the one we just removed
os . mkdir ( storedir )
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 4
return client
d . addCallback ( _setup )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2010-07-18 22:15:37 +00:00
return d
2010-07-19 08:16:12 +00:00
test_problem_layout_ticket_1128 . todo = " Invent a smarter uploader that uploads successfully in this case. "
2009-11-23 02:20:08 +00:00
2010-05-15 03:29:13 +00:00
def test_upload_succeeds_with_some_homeless_shares ( self ) :
# If the upload is forced to stop trying to place shares before
# it has placed (or otherwise accounted) for all of them, but it
# has placed enough to satisfy the upload health criteria that
# we're using, it should still succeed.
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( )
def _server_setup ( ign ) :
# Add four servers so that we have a layout like this:
# server 1: share 0, read-only
# server 2: share 1, read-only
# server 3: share 2, read-only
# server 4: share 3, read-only
# If we set happy = 4, the upload will manage to satisfy
# servers of happiness, but not place all of the shares; we
# want to test that the upload is declared successful in
# this case.
self . _add_server_with_share ( server_number = 1 , share_number = 0 ,
readonly = True )
self . _add_server_with_share ( server_number = 2 , share_number = 1 ,
readonly = True )
self . _add_server_with_share ( server_number = 3 , share_number = 2 ,
readonly = True )
self . _add_server_with_share ( server_number = 4 , share_number = 3 ,
readonly = True )
# Remove server 0.
self . g . remove_server ( self . g . servers_by_number [ 0 ] . my_nodeid )
# Set the client appropriately
c = self . g . clients [ 0 ]
c . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 4
return c
d . addCallback ( _server_setup )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2010-05-15 03:29:13 +00:00
return d
def test_uploader_skips_over_servers_with_only_one_share ( self ) :
# We want to make sure that the redistribution logic ignores
# servers with only one share, since placing these shares
# elsewhere will at best keep happiness the same as it was, and
# at worst hurt it.
self . basedir = self . mktemp ( )
d = self . _setup_and_upload ( )
def _server_setup ( ign ) :
# Add some servers so that the upload will need to
# redistribute, but will first pass over a couple of servers
# that don't have enough shares to redistribute before
2010-07-18 03:27:39 +00:00
# finding one that does have shares to redistribute.
2010-05-15 03:29:13 +00:00
self . _add_server_with_share ( server_number = 1 , share_number = 0 )
self . _add_server_with_share ( server_number = 2 , share_number = 2 )
self . _add_server_with_share ( server_number = 3 , share_number = 1 )
self . _add_server_with_share ( server_number = 8 , share_number = 4 )
self . _add_server_with_share ( server_number = 5 , share_number = 5 )
self . _add_server_with_share ( server_number = 10 , share_number = 7 )
for i in xrange ( 4 ) :
self . _copy_share_to_server ( i , 2 )
return self . g . clients [ 0 ]
d . addCallback ( _server_setup )
d . addCallback ( lambda client :
client . upload ( upload . Data ( " data " * 10000 , convergence = " " ) ) )
2010-07-19 04:50:47 +00:00
d . addCallback ( lambda ign :
self . failUnless ( self . _has_happy_share_distribution ( ) ) )
2010-05-15 03:29:13 +00:00
return d
2010-07-16 00:10:46 +00:00
def test_peer_selector_bucket_abort ( self ) :
# If peer selection for an upload fails due to an unhappy
# layout, the peer selection process should abort the buckets it
# allocates before failing, so that the space can be re-used.
self . basedir = self . mktemp ( )
self . set_up_grid ( num_servers = 5 )
# Try to upload a file with happy=7, which is unsatisfiable with
# the current grid. This will fail, but should not take up any
# space on the storage servers after it fails.
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 7
d = defer . succeed ( None )
d . addCallback ( lambda ignored :
self . shouldFail ( UploadUnhappinessError ,
" test_peer_selection_bucket_abort " ,
" " ,
client . upload , upload . Data ( " data " * 10000 ,
convergence = " " ) ) )
# wait for the abort messages to get there.
def _turn_barrier ( res ) :
return fireEventually ( res )
d . addCallback ( _turn_barrier )
def _then ( ignored ) :
for server in self . g . servers_by_number . values ( ) :
self . failUnlessEqual ( server . allocated_size ( ) , 0 )
d . addCallback ( _then )
return d
def test_encoder_bucket_abort ( self ) :
# If enough servers die in the process of encoding and uploading
# a file to make the layout unhappy, we should cancel the
# newly-allocated buckets before dying.
self . basedir = self . mktemp ( )
self . set_up_grid ( num_servers = 4 )
client = self . g . clients [ 0 ]
client . DEFAULT_ENCODING_PARAMETERS [ ' happy ' ] = 7
d = defer . succeed ( None )
d . addCallback ( lambda ignored :
self . shouldFail ( UploadUnhappinessError ,
" test_encoder_bucket_abort " ,
" " ,
self . _do_upload_with_broken_servers , 1 ) )
def _turn_barrier ( res ) :
return fireEventually ( res )
d . addCallback ( _turn_barrier )
def _then ( ignored ) :
for server in self . g . servers_by_number . values ( ) :
self . failUnlessEqual ( server . allocated_size ( ) , 0 )
d . addCallback ( _then )
return d
2009-02-17 00:44:57 +00:00
def _set_up_nodes_extra_config ( self , clientdir ) :
cfgfn = os . path . join ( clientdir , " tahoe.cfg " )
oldcfg = open ( cfgfn , " r " ) . read ( )
f = open ( cfgfn , " wt " )
f . write ( oldcfg )
2008-11-18 07:29:44 +00:00
f . write ( " \n " )
f . write ( " [client] \n " )
f . write ( " shares.needed = 7 \n " )
f . write ( " shares.total = 12 \n " )
f . write ( " \n " )
f . close ( )
2009-02-17 00:44:57 +00:00
return None
2007-04-24 00:30:40 +00:00
# TODO:
# upload with exactly 75 peers (shares_of_happiness)
# have a download fail
# cancel a download (need to implement more cancel stuff)