2009-02-21 04:04:08 +00:00
2009-03-09 03:07:32 +00:00
import time , os . path , stat , re , simplejson , struct
2009-02-21 04:04:08 +00:00
2007-04-18 03:03:44 +00:00
from twisted . trial import unittest
2007-07-13 21:04:49 +00:00
from twisted . internet import defer
2009-02-21 04:04:08 +00:00
from twisted . application import service
from foolscap import eventual
2007-08-28 06:41:40 +00:00
import itertools
2007-07-13 22:09:01 +00:00
from allmydata import interfaces
2009-02-21 04:04:08 +00:00
from allmydata . util import fileutil , hashutil , base32 , pollmixin
2009-03-07 05:45:17 +00:00
from allmydata . storage . server import StorageServer
2009-02-18 21:46:55 +00:00
from allmydata . storage . mutable import MutableShareFile
from allmydata . storage . immutable import BucketWriter , BucketReader
2009-03-09 02:02:01 +00:00
from allmydata . storage . common import DataTooLargeError , storage_index_to_dir , \
2009-03-09 03:07:32 +00:00
UnknownMutableContainerVersionError , UnknownImmutableContainerVersionError
2009-02-18 21:46:55 +00:00
from allmydata . storage . lease import LeaseInfo
2009-02-27 02:42:48 +00:00
from allmydata . storage . crawler import BucketCountingCrawler
2009-03-07 05:45:17 +00:00
from allmydata . storage . expirer import LeaseCheckingCrawler
2008-10-10 01:13:27 +00:00
from allmydata . immutable . layout import WriteBucketProxy , WriteBucketProxy_v2 , \
ReadBucketProxy
2007-10-31 07:10:40 +00:00
from allmydata . interfaces import BadWriteEnablerError
2008-01-14 18:58:58 +00:00
from allmydata . test . common import LoggingServiceParent
2009-03-07 05:45:17 +00:00
from allmydata . test . common_web import WebRenderingMixin
2009-02-21 04:04:08 +00:00
from allmydata . web . storage import StorageStatus , remove_prefix
2007-04-18 03:03:44 +00:00
2008-06-18 00:01:42 +00:00
class Marker :
pass
2008-01-15 04:22:55 +00:00
class FakeCanary :
2008-06-18 00:01:42 +00:00
def __init__ ( self , ignore_disconnectors = False ) :
self . ignore = ignore_disconnectors
self . disconnectors = { }
def notifyOnDisconnect ( self , f , * args , * * kwargs ) :
if self . ignore :
return
m = Marker ( )
self . disconnectors [ m ] = ( f , args , kwargs )
return m
2008-01-15 04:22:55 +00:00
def dontNotifyOnDisconnect ( self , marker ) :
2008-06-18 00:01:42 +00:00
if self . ignore :
return
del self . disconnectors [ marker ]
class FakeStatsProvider :
def count ( self , name , delta = 1 ) :
pass
def register_producer ( self , producer ) :
2008-01-15 04:22:55 +00:00
pass
2007-04-18 22:42:34 +00:00
class Bucket ( unittest . TestCase ) :
2007-04-18 03:03:44 +00:00
def make_workdir ( self , name ) :
2007-07-13 21:04:49 +00:00
basedir = os . path . join ( " storage " , " Bucket " , name )
2007-04-18 03:21:05 +00:00
incoming = os . path . join ( basedir , " tmp " , " bucket " )
final = os . path . join ( basedir , " bucket " )
2007-04-18 03:03:44 +00:00
fileutil . make_dirs ( basedir )
2007-07-13 21:04:49 +00:00
fileutil . make_dirs ( os . path . join ( basedir , " tmp " ) )
2007-04-18 03:21:05 +00:00
return incoming , final
2007-04-18 03:03:44 +00:00
2007-07-04 00:38:49 +00:00
def bucket_writer_closed ( self , bw , consumed ) :
2007-07-04 00:08:02 +00:00
pass
2008-06-16 22:21:55 +00:00
def add_latency ( self , category , latency ) :
pass
2008-06-16 23:35:59 +00:00
def count ( self , name , delta = 1 ) :
pass
2007-07-04 00:08:02 +00:00
2007-09-02 21:47:15 +00:00
def make_lease ( self ) :
owner_num = 0
renew_secret = os . urandom ( 32 )
cancel_secret = os . urandom ( 32 )
expiration_time = time . time ( ) + 5000
2008-07-10 01:06:55 +00:00
return LeaseInfo ( owner_num , renew_secret , cancel_secret ,
expiration_time , " \x00 " * 20 )
2007-09-02 21:47:15 +00:00
2007-04-18 03:03:44 +00:00
def test_create ( self ) :
2007-04-18 03:21:05 +00:00
incoming , final = self . make_workdir ( " test_create " )
2008-01-15 04:22:55 +00:00
bw = BucketWriter ( self , incoming , final , 200 , self . make_lease ( ) ,
FakeCanary ( ) )
2007-07-13 21:04:49 +00:00
bw . remote_write ( 0 , " a " * 25 )
bw . remote_write ( 25 , " b " * 25 )
bw . remote_write ( 50 , " c " * 25 )
bw . remote_write ( 75 , " d " * 7 )
2007-04-18 03:03:44 +00:00
bw . remote_close ( )
def test_readwrite ( self ) :
2007-04-18 03:21:05 +00:00
incoming , final = self . make_workdir ( " test_readwrite " )
2008-01-15 04:22:55 +00:00
bw = BucketWriter ( self , incoming , final , 200 , self . make_lease ( ) ,
FakeCanary ( ) )
2007-07-13 21:04:49 +00:00
bw . remote_write ( 0 , " a " * 25 )
bw . remote_write ( 25 , " b " * 25 )
bw . remote_write ( 50 , " c " * 7 ) # last block may be short
2007-04-18 03:03:44 +00:00
bw . remote_close ( )
# now read from it
2008-06-16 22:21:55 +00:00
br = BucketReader ( self , bw . finalhome )
2007-07-13 21:04:49 +00:00
self . failUnlessEqual ( br . remote_read ( 0 , 25 ) , " a " * 25 )
self . failUnlessEqual ( br . remote_read ( 25 , 25 ) , " b " * 25 )
self . failUnlessEqual ( br . remote_read ( 50 , 7 ) , " c " * 7 )
class RemoteBucket :
def callRemote ( self , methname , * args , * * kwargs ) :
def _call ( ) :
meth = getattr ( self . target , " remote_ " + methname )
return meth ( * args , * * kwargs )
return defer . maybeDeferred ( _call )
class BucketProxy ( unittest . TestCase ) :
def make_bucket ( self , name , size ) :
basedir = os . path . join ( " storage " , " BucketProxy " , name )
incoming = os . path . join ( basedir , " tmp " , " bucket " )
final = os . path . join ( basedir , " bucket " )
fileutil . make_dirs ( basedir )
fileutil . make_dirs ( os . path . join ( basedir , " tmp " ) )
2008-01-15 04:22:55 +00:00
bw = BucketWriter ( self , incoming , final , size , self . make_lease ( ) ,
FakeCanary ( ) )
2007-07-13 21:04:49 +00:00
rb = RemoteBucket ( )
rb . target = bw
return bw , rb , final
2007-09-02 21:47:15 +00:00
def make_lease ( self ) :
owner_num = 0
renew_secret = os . urandom ( 32 )
cancel_secret = os . urandom ( 32 )
expiration_time = time . time ( ) + 5000
2008-07-10 01:06:55 +00:00
return LeaseInfo ( owner_num , renew_secret , cancel_secret ,
expiration_time , " \x00 " * 20 )
2007-09-02 21:47:15 +00:00
2007-07-13 21:04:49 +00:00
def bucket_writer_closed ( self , bw , consumed ) :
pass
2008-06-16 22:21:55 +00:00
def add_latency ( self , category , latency ) :
pass
2008-06-16 23:35:59 +00:00
def count ( self , name , delta = 1 ) :
pass
2007-07-13 21:04:49 +00:00
def test_create ( self ) :
2008-01-31 23:26:28 +00:00
bw , rb , sharefname = self . make_bucket ( " test_create " , 500 )
2007-07-13 22:09:01 +00:00
bp = WriteBucketProxy ( rb ,
data_size = 300 ,
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
block_size = 10 ,
2007-07-13 22:09:01 +00:00
num_segments = 5 ,
num_share_hashes = 3 ,
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
uri_extension_size_max = 500 , nodeid = None )
2007-07-13 21:04:49 +00:00
self . failUnless ( interfaces . IStorageBucketWriter . providedBy ( bp ) )
2008-10-10 02:11:39 +00:00
def _do_test_readwrite ( self , name , header_size , wbp_class , rbp_class ) :
2007-07-13 21:04:49 +00:00
# Let's pretend each share has 100 bytes of data, and that there are
2008-12-19 15:18:07 +00:00
# 4 segments (25 bytes each), and 8 shares total. So the two
# per-segment merkle trees (crypttext_hash_tree,
2007-07-13 21:04:49 +00:00
# block_hashes) will have 4 leaves and 7 nodes each. The per-share
# merkle tree (share_hashes) has 8 leaves and 15 nodes, and we need 3
# nodes. Furthermore, let's assume the uri_extension is 500 bytes
# long. That should make the whole share:
#
2007-09-04 16:00:24 +00:00
# 0x24 + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500 = 1414 bytes long
2008-10-10 01:13:27 +00:00
# 0x44 + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500 = 1446 bytes long
sharesize = header_size + 100 + 7 * 32 + 7 * 32 + 7 * 32 + 3 * ( 2 + 32 ) + 4 + 500
2007-07-13 21:04:49 +00:00
crypttext_hashes = [ hashutil . tagged_hash ( " crypt " , " bar %d " % i )
for i in range ( 7 ) ]
block_hashes = [ hashutil . tagged_hash ( " block " , " bar %d " % i )
for i in range ( 7 ) ]
share_hashes = [ ( i , hashutil . tagged_hash ( " share " , " bar %d " % i ) )
for i in ( 1 , 9 , 13 ) ]
uri_extension = " s " + " E " * 498 + " e "
2008-10-10 02:11:39 +00:00
bw , rb , sharefname = self . make_bucket ( name , sharesize )
2008-10-10 01:13:27 +00:00
bp = wbp_class ( rb ,
data_size = 95 ,
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
block_size = 25 ,
2008-10-10 01:13:27 +00:00
num_segments = 4 ,
num_share_hashes = 3 ,
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
uri_extension_size_max = len ( uri_extension ) ,
2008-10-10 01:13:27 +00:00
nodeid = None )
2007-07-13 21:04:49 +00:00
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
d = bp . put_header ( )
2007-07-13 21:04:49 +00:00
d . addCallback ( lambda res : bp . put_block ( 0 , " a " * 25 ) )
d . addCallback ( lambda res : bp . put_block ( 1 , " b " * 25 ) )
d . addCallback ( lambda res : bp . put_block ( 2 , " c " * 25 ) )
2007-07-13 23:38:25 +00:00
d . addCallback ( lambda res : bp . put_block ( 3 , " d " * 20 ) )
2007-07-13 21:04:49 +00:00
d . addCallback ( lambda res : bp . put_crypttext_hashes ( crypttext_hashes ) )
d . addCallback ( lambda res : bp . put_block_hashes ( block_hashes ) )
d . addCallback ( lambda res : bp . put_share_hashes ( share_hashes ) )
d . addCallback ( lambda res : bp . put_uri_extension ( uri_extension ) )
d . addCallback ( lambda res : bp . close ( ) )
# now read everything back
def _start_reading ( res ) :
2008-06-16 22:21:55 +00:00
br = BucketReader ( self , sharefname )
2007-07-13 21:04:49 +00:00
rb = RemoteBucket ( )
rb . target = br
2008-12-17 00:51:45 +00:00
rbp = rbp_class ( rb , peerid = " abc " , storage_index = " " )
2008-06-18 00:01:42 +00:00
self . failUnless ( " to peer " in repr ( rbp ) )
2007-07-13 21:04:49 +00:00
self . failUnless ( interfaces . IStorageBucketReader . providedBy ( rbp ) )
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
d1 = rbp . get_block_data ( 0 , 25 , 25 )
2007-07-13 21:04:49 +00:00
d1 . addCallback ( lambda res : self . failUnlessEqual ( res , " a " * 25 ) )
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
d1 . addCallback ( lambda res : rbp . get_block_data ( 1 , 25 , 25 ) )
2007-07-13 21:04:49 +00:00
d1 . addCallback ( lambda res : self . failUnlessEqual ( res , " b " * 25 ) )
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
d1 . addCallback ( lambda res : rbp . get_block_data ( 2 , 25 , 25 ) )
2007-07-13 21:04:49 +00:00
d1 . addCallback ( lambda res : self . failUnlessEqual ( res , " c " * 25 ) )
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
d1 . addCallback ( lambda res : rbp . get_block_data ( 3 , 25 , 20 ) )
2007-07-13 23:38:25 +00:00
d1 . addCallback ( lambda res : self . failUnlessEqual ( res , " d " * 20 ) )
2007-07-13 21:04:49 +00:00
d1 . addCallback ( lambda res : rbp . get_crypttext_hashes ( ) )
d1 . addCallback ( lambda res :
self . failUnlessEqual ( res , crypttext_hashes ) )
immutable: refactor downloader to be more reusable for checker/verifier/repairer (and better)
The code for validating the share hash tree and the block hash tree has been rewritten to make sure it handles all cases, to share metadata about the file (such as the share hash tree, block hash trees, and UEB) among different share downloads, and not to require hashes to be stored on the server unnecessarily, such as the roots of the block hash trees (not needed since they are also the leaves of the share hash tree), and the root of the share hash tree (not needed since it is also included in the UEB). It also passes the latest tests including handling corrupted shares well.
ValidatedReadBucketProxy takes a share_hash_tree argument to its constructor, which is a reference to a share hash tree shared by all ValidatedReadBucketProxies for that immutable file download.
ValidatedReadBucketProxy requires the block_size and share_size to be provided in its constructor, and it then uses those to compute the offsets and lengths of blocks when it needs them, instead of reading those values out of the share. The user of ValidatedReadBucketProxy therefore has to have first used a ValidatedExtendedURIProxy to compute those two values from the validated contents of the URI. This is pleasingly simplifies safety analysis: the client knows which span of bytes corresponds to a given block from the validated URI data, rather than from the unvalidated data stored on the storage server. It also simplifies unit testing of verifier/repairer, because now it doesn't care about the contents of the "share size" and "block size" fields in the share. It does not relieve the need for share data v2 layout, because we still need to store and retrieve the offsets of the fields which come after the share data, therefore we still need to use share data v2 with its 8-byte fields if we want to store share data larger than about 2^32.
Specify which subset of the block hashes and share hashes you need while downloading a particular share. In the future this will hopefully be used to fetch only a subset, for network efficiency, but currently all of them are fetched, regardless of which subset you specify.
ReadBucketProxy hides the question of whether it has "started" or not (sent a request to the server to get metadata) from its user.
Download is optimized to do as few roundtrips and as few requests as possible, hopefully speeding up download a bit.
2009-01-05 16:51:45 +00:00
d1 . addCallback ( lambda res : rbp . get_block_hashes ( set ( range ( 4 ) ) ) )
2007-07-13 21:04:49 +00:00
d1 . addCallback ( lambda res : self . failUnlessEqual ( res , block_hashes ) )
d1 . addCallback ( lambda res : rbp . get_share_hashes ( ) )
d1 . addCallback ( lambda res : self . failUnlessEqual ( res , share_hashes ) )
d1 . addCallback ( lambda res : rbp . get_uri_extension ( ) )
d1 . addCallback ( lambda res :
self . failUnlessEqual ( res , uri_extension ) )
return d1
d . addCallback ( _start_reading )
return d
2008-10-10 01:13:27 +00:00
def test_readwrite_v1 ( self ) :
2008-10-10 02:11:39 +00:00
return self . _do_test_readwrite ( " test_readwrite_v1 " ,
0x24 , WriteBucketProxy , ReadBucketProxy )
2007-07-13 21:04:49 +00:00
2008-10-10 01:13:27 +00:00
def test_readwrite_v2 ( self ) :
2008-10-10 02:11:39 +00:00
return self . _do_test_readwrite ( " test_readwrite_v2 " ,
0x44 , WriteBucketProxy_v2 , ReadBucketProxy )
2007-04-18 03:03:44 +00:00
2008-12-02 00:24:21 +00:00
class FakeDiskStorageServer ( StorageServer ) :
def stat_disk ( self , d ) :
return self . DISKAVAIL
2007-04-18 22:42:34 +00:00
class Server ( unittest . TestCase ) :
def setUp ( self ) :
2008-01-14 18:58:58 +00:00
self . sparent = LoggingServiceParent ( )
2009-02-21 04:57:55 +00:00
self . sparent . startService ( )
2007-12-18 01:34:11 +00:00
self . _lease_secret = itertools . count ( )
2007-04-18 22:42:34 +00:00
def tearDown ( self ) :
return self . sparent . stopService ( )
def workdir ( self , name ) :
2007-07-04 00:08:02 +00:00
basedir = os . path . join ( " storage " , " Server " , name )
2007-04-18 22:42:34 +00:00
return basedir
2008-12-02 00:24:21 +00:00
def create ( self , name , reserved_space = 0 , klass = StorageServer ) :
2007-04-18 22:42:34 +00:00
workdir = self . workdir ( name )
2009-02-18 23:23:01 +00:00
ss = klass ( workdir , " \x00 " * 20 , reserved_space = reserved_space ,
2008-12-02 00:24:21 +00:00
stats_provider = FakeStatsProvider ( ) )
2007-04-18 22:42:34 +00:00
ss . setServiceParent ( self . sparent )
return ss
def test_create ( self ) :
ss = self . create ( " test_create " )
2008-06-18 00:01:42 +00:00
def allocate ( self , ss , storage_index , sharenums , size , canary = None ) :
2007-12-18 01:34:11 +00:00
renew_secret = hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) )
cancel_secret = hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) )
2008-06-18 00:01:42 +00:00
if not canary :
canary = FakeCanary ( )
2007-08-28 06:41:40 +00:00
return ss . remote_allocate_buckets ( storage_index ,
renew_secret , cancel_secret ,
2008-06-18 00:01:42 +00:00
sharenums , size , canary )
2007-08-28 06:41:40 +00:00
immutable: storage servers accept any size shares now
Nathan Wilcox observed that the storage server can rely on the size of the share file combined with the count of leases to unambiguously identify the location of the leases. This means that it can hold any size share data, even though the field nominally used to hold the size of the share data is only 32 bits wide.
With this patch, the storage server still writes the "size of the share data" field (just in case the server gets downgraded to an earlier version which requires that field, or the share file gets moved to another server which is of an earlier vintage), but it doesn't use it. Also, with this patch, the server no longer rejects requests to write shares which are >= 2^32 bytes in size, and it no longer rejects attempts to read such shares.
This fixes http://allmydata.org/trac/tahoe/ticket/346 (increase share-size field to 8 bytes, remove 12GiB filesize limit), although there remains open a question of how clients know that a given server can handle large shares (by using the new versioning scheme, probably).
Note that share size is also limited by another factor -- how big of a file we can store on the local filesystem on the server. Currently allmydata.com typically uses ext3 and I think we typically have block size = 4 KiB, which means that the largest file is about 2 TiB. Also, the hard drives themselves are only 1 TB, so the largest share is definitely slightly less than 1 TB, which means (when K == 3), the largest file is less than 3 TB.
This patch also refactors the creation of new sharefiles so that only a single fopen() is used.
This patch also helps with the unit-testing of repairer, since formerly it was unclear what repairer should expect to find if the "share data size" field was corrupted (some corruptions would have no effect, others would cause failure to download). Now it is clear that repairer is not required to notice if this field is corrupted since it has no effect on download. :-)
2008-12-31 22:42:26 +00:00
def test_large_share ( self ) :
ss = self . create ( " test_large_share " )
2008-12-31 22:59:42 +00:00
already , writers = self . allocate ( ss , " allocate " , [ 0 ] , 2 * * 32 + 2 )
immutable: storage servers accept any size shares now
Nathan Wilcox observed that the storage server can rely on the size of the share file combined with the count of leases to unambiguously identify the location of the leases. This means that it can hold any size share data, even though the field nominally used to hold the size of the share data is only 32 bits wide.
With this patch, the storage server still writes the "size of the share data" field (just in case the server gets downgraded to an earlier version which requires that field, or the share file gets moved to another server which is of an earlier vintage), but it doesn't use it. Also, with this patch, the server no longer rejects requests to write shares which are >= 2^32 bytes in size, and it no longer rejects attempts to read such shares.
This fixes http://allmydata.org/trac/tahoe/ticket/346 (increase share-size field to 8 bytes, remove 12GiB filesize limit), although there remains open a question of how clients know that a given server can handle large shares (by using the new versioning scheme, probably).
Note that share size is also limited by another factor -- how big of a file we can store on the local filesystem on the server. Currently allmydata.com typically uses ext3 and I think we typically have block size = 4 KiB, which means that the largest file is about 2 TiB. Also, the hard drives themselves are only 1 TB, so the largest share is definitely slightly less than 1 TB, which means (when K == 3), the largest file is less than 3 TB.
This patch also refactors the creation of new sharefiles so that only a single fopen() is used.
This patch also helps with the unit-testing of repairer, since formerly it was unclear what repairer should expect to find if the "share data size" field was corrupted (some corruptions would have no effect, others would cause failure to download). Now it is clear that repairer is not required to notice if this field is corrupted since it has no effect on download. :-)
2008-12-31 22:42:26 +00:00
self . failUnlessEqual ( already , set ( ) )
2008-12-31 22:59:42 +00:00
self . failUnlessEqual ( set ( writers . keys ( ) ) , set ( [ 0 ] ) )
immutable: storage servers accept any size shares now
Nathan Wilcox observed that the storage server can rely on the size of the share file combined with the count of leases to unambiguously identify the location of the leases. This means that it can hold any size share data, even though the field nominally used to hold the size of the share data is only 32 bits wide.
With this patch, the storage server still writes the "size of the share data" field (just in case the server gets downgraded to an earlier version which requires that field, or the share file gets moved to another server which is of an earlier vintage), but it doesn't use it. Also, with this patch, the server no longer rejects requests to write shares which are >= 2^32 bytes in size, and it no longer rejects attempts to read such shares.
This fixes http://allmydata.org/trac/tahoe/ticket/346 (increase share-size field to 8 bytes, remove 12GiB filesize limit), although there remains open a question of how clients know that a given server can handle large shares (by using the new versioning scheme, probably).
Note that share size is also limited by another factor -- how big of a file we can store on the local filesystem on the server. Currently allmydata.com typically uses ext3 and I think we typically have block size = 4 KiB, which means that the largest file is about 2 TiB. Also, the hard drives themselves are only 1 TB, so the largest share is definitely slightly less than 1 TB, which means (when K == 3), the largest file is less than 3 TB.
This patch also refactors the creation of new sharefiles so that only a single fopen() is used.
This patch also helps with the unit-testing of repairer, since formerly it was unclear what repairer should expect to find if the "share data size" field was corrupted (some corruptions would have no effect, others would cause failure to download). Now it is clear that repairer is not required to notice if this field is corrupted since it has no effect on download. :-)
2008-12-31 22:42:26 +00:00
shnum , bucket = writers . items ( ) [ 0 ]
# This test is going to hammer your filesystem if it doesn't make a sparse file for this. :-(
bucket . remote_write ( 2 * * 32 , " ab " )
bucket . remote_close ( )
readers = ss . remote_get_buckets ( " allocate " )
reader = readers [ shnum ]
self . failUnlessEqual ( reader . remote_read ( 2 * * 32 , 2 ) , " ab " )
2009-01-31 05:16:49 +00:00
test_large_share . skip = " This test can spuriously fail if you have less than 4 GiB free on your filesystem, and if your filesystem doesn ' t support efficient sparse files then it is very expensive (Mac OS X is the only system I know of in the desktop/server area that doesn ' t support efficient sparse files). "
immutable: storage servers accept any size shares now
Nathan Wilcox observed that the storage server can rely on the size of the share file combined with the count of leases to unambiguously identify the location of the leases. This means that it can hold any size share data, even though the field nominally used to hold the size of the share data is only 32 bits wide.
With this patch, the storage server still writes the "size of the share data" field (just in case the server gets downgraded to an earlier version which requires that field, or the share file gets moved to another server which is of an earlier vintage), but it doesn't use it. Also, with this patch, the server no longer rejects requests to write shares which are >= 2^32 bytes in size, and it no longer rejects attempts to read such shares.
This fixes http://allmydata.org/trac/tahoe/ticket/346 (increase share-size field to 8 bytes, remove 12GiB filesize limit), although there remains open a question of how clients know that a given server can handle large shares (by using the new versioning scheme, probably).
Note that share size is also limited by another factor -- how big of a file we can store on the local filesystem on the server. Currently allmydata.com typically uses ext3 and I think we typically have block size = 4 KiB, which means that the largest file is about 2 TiB. Also, the hard drives themselves are only 1 TB, so the largest share is definitely slightly less than 1 TB, which means (when K == 3), the largest file is less than 3 TB.
This patch also refactors the creation of new sharefiles so that only a single fopen() is used.
This patch also helps with the unit-testing of repairer, since formerly it was unclear what repairer should expect to find if the "share data size" field was corrupted (some corruptions would have no effect, others would cause failure to download). Now it is clear that repairer is not required to notice if this field is corrupted since it has no effect on download. :-)
2008-12-31 22:42:26 +00:00
2008-01-31 23:26:28 +00:00
def test_dont_overfill_dirs ( self ) :
"""
This test asserts that if you add a second share whose storage index
share lots of leading bits with an extant share ( but isn ' t the exact
same storage index ) , this won ' t add an entry to the share directory.
"""
ss = self . create ( " test_dont_overfill_dirs " )
already , writers = self . allocate ( ss , " storageindex " , [ 0 ] , 10 )
for i , wb in writers . items ( ) :
wb . remote_write ( 0 , " %10d " % i )
wb . remote_close ( )
storedir = os . path . join ( self . workdir ( " test_dont_overfill_dirs " ) ,
" shares " )
children_of_storedir = set ( os . listdir ( storedir ) )
# Now store another one under another storageindex that has leading
# chars the same as the first storageindex.
already , writers = self . allocate ( ss , " storageindey " , [ 0 ] , 10 )
for i , wb in writers . items ( ) :
wb . remote_write ( 0 , " %10d " % i )
wb . remote_close ( )
storedir = os . path . join ( self . workdir ( " test_dont_overfill_dirs " ) ,
" shares " )
new_children_of_storedir = set ( os . listdir ( storedir ) )
self . failUnlessEqual ( children_of_storedir , new_children_of_storedir )
2007-09-15 21:34:04 +00:00
def test_remove_incoming ( self ) :
ss = self . create ( " test_remove_incoming " )
already , writers = self . allocate ( ss , " vid " , range ( 3 ) , 10 )
for i , wb in writers . items ( ) :
wb . remote_write ( 0 , " %10d " % i )
wb . remote_close ( )
2008-06-26 18:36:17 +00:00
incoming_share_dir = wb . incominghome
incoming_bucket_dir = os . path . dirname ( incoming_share_dir )
incoming_prefix_dir = os . path . dirname ( incoming_bucket_dir )
incoming_dir = os . path . dirname ( incoming_prefix_dir )
self . failIf ( os . path . exists ( incoming_bucket_dir ) )
self . failIf ( os . path . exists ( incoming_prefix_dir ) )
self . failUnless ( os . path . exists ( incoming_dir ) )
2007-09-15 21:34:04 +00:00
2007-04-18 22:42:34 +00:00
def test_allocate ( self ) :
ss = self . create ( " test_allocate " )
2008-06-18 00:01:42 +00:00
self . failUnlessEqual ( ss . remote_get_buckets ( " allocate " ) , { } )
2007-04-18 22:42:34 +00:00
2008-01-15 04:22:55 +00:00
canary = FakeCanary ( )
2008-06-18 00:01:42 +00:00
already , writers = self . allocate ( ss , " allocate " , [ 0 , 1 , 2 ] , 75 )
2007-04-18 22:42:34 +00:00
self . failUnlessEqual ( already , set ( ) )
self . failUnlessEqual ( set ( writers . keys ( ) ) , set ( [ 0 , 1 , 2 ] ) )
# while the buckets are open, they should not count as readable
2008-06-18 00:01:42 +00:00
self . failUnlessEqual ( ss . remote_get_buckets ( " allocate " ) , { } )
2007-04-18 22:42:34 +00:00
2008-06-10 18:53:10 +00:00
# close the buckets
2007-04-18 22:42:34 +00:00
for i , wb in writers . items ( ) :
2007-07-13 21:04:49 +00:00
wb . remote_write ( 0 , " %25d " % i )
2007-04-18 22:42:34 +00:00
wb . remote_close ( )
2008-06-18 00:01:42 +00:00
# aborting a bucket that was already closed is a no-op
wb . remote_abort ( )
2007-04-18 22:42:34 +00:00
# now they should be readable
2008-06-18 00:01:42 +00:00
b = ss . remote_get_buckets ( " allocate " )
2007-04-18 22:42:34 +00:00
self . failUnlessEqual ( set ( b . keys ( ) ) , set ( [ 0 , 1 , 2 ] ) )
2007-07-13 21:04:49 +00:00
self . failUnlessEqual ( b [ 0 ] . remote_read ( 0 , 25 ) , " %25d " % 0 )
2009-03-09 03:05:27 +00:00
b_str = str ( b [ 0 ] )
self . failUnless ( " BucketReader " in b_str , b_str )
self . failUnless ( " mfwgy33dmf2g 0 " in b_str , b_str )
2007-04-18 22:42:34 +00:00
2008-06-18 00:01:42 +00:00
# now if we ask about writing again, the server should offer those
# three buckets as already present. It should offer them even if we
# don't ask about those specific ones.
already , writers = self . allocate ( ss , " allocate " , [ 2 , 3 , 4 ] , 75 )
2007-04-18 22:42:34 +00:00
self . failUnlessEqual ( already , set ( [ 0 , 1 , 2 ] ) )
self . failUnlessEqual ( set ( writers . keys ( ) ) , set ( [ 3 , 4 ] ) )
# while those two buckets are open for writing, the server should
2008-06-10 18:53:10 +00:00
# refuse to offer them to uploaders
2007-04-18 22:42:34 +00:00
2008-06-18 00:01:42 +00:00
already2 , writers2 = self . allocate ( ss , " allocate " , [ 2 , 3 , 4 , 5 ] , 75 )
self . failUnlessEqual ( already2 , set ( [ 0 , 1 , 2 ] ) )
self . failUnlessEqual ( set ( writers2 . keys ( ) ) , set ( [ 5 ] ) )
# aborting the writes should remove the tempfiles
for i , wb in writers2 . items ( ) :
wb . remote_abort ( )
already2 , writers2 = self . allocate ( ss , " allocate " , [ 2 , 3 , 4 , 5 ] , 75 )
self . failUnlessEqual ( already2 , set ( [ 0 , 1 , 2 ] ) )
self . failUnlessEqual ( set ( writers2 . keys ( ) ) , set ( [ 5 ] ) )
for i , wb in writers2 . items ( ) :
wb . remote_abort ( )
for i , wb in writers . items ( ) :
wb . remote_abort ( )
2009-03-09 03:07:32 +00:00
def test_bad_container_version ( self ) :
ss = self . create ( " test_bad_container_version " )
a , w = self . allocate ( ss , " si1 " , [ 0 ] , 10 )
w [ 0 ] . remote_write ( 0 , " \xff " * 10 )
w [ 0 ] . remote_close ( )
fn = os . path . join ( ss . sharedir , storage_index_to_dir ( " si1 " ) , " 0 " )
f = open ( fn , " rb+ " )
f . seek ( 0 )
f . write ( struct . pack ( " >L " , 0 ) ) # this is invalid: minimum used is v1
f . close ( )
b = ss . remote_get_buckets ( " allocate " )
e = self . failUnlessRaises ( UnknownImmutableContainerVersionError ,
ss . remote_get_buckets , " si1 " )
self . failUnless ( " had version 0 but we wanted 1 " in str ( e ) , e )
2008-06-18 00:01:42 +00:00
def test_disconnect ( self ) :
# simulate a disconnection
ss = self . create ( " test_disconnect " )
canary = FakeCanary ( )
already , writers = self . allocate ( ss , " disconnect " , [ 0 , 1 , 2 ] , 75 , canary )
self . failUnlessEqual ( already , set ( ) )
self . failUnlessEqual ( set ( writers . keys ( ) ) , set ( [ 0 , 1 , 2 ] ) )
for ( f , args , kwargs ) in canary . disconnectors . values ( ) :
f ( * args , * * kwargs )
del already
del writers
# that ought to delete the incoming shares
already , writers = self . allocate ( ss , " disconnect " , [ 0 , 1 , 2 ] , 75 )
self . failUnlessEqual ( already , set ( ) )
self . failUnlessEqual ( set ( writers . keys ( ) ) , set ( [ 0 , 1 , 2 ] ) )
2007-04-18 22:42:34 +00:00
2008-12-02 00:24:21 +00:00
def test_reserved_space ( self ) :
ss = self . create ( " test_reserved_space " , reserved_space = 10000 ,
klass = FakeDiskStorageServer )
# the FakeDiskStorageServer doesn't do real statvfs() calls
ss . DISKAVAIL = 15000
# 15k available, 10k reserved, leaves 5k for shares
2007-09-02 21:47:15 +00:00
# a newly created and filled share incurs this much overhead, beyond
# the size we request.
OVERHEAD = 3 * 4
LEASE_SIZE = 4 + 32 + 32 + 4
2008-06-18 00:01:42 +00:00
canary = FakeCanary ( True )
already , writers = self . allocate ( ss , " vid1 " , [ 0 , 1 , 2 ] , 1000 , canary )
2007-07-04 00:08:02 +00:00
self . failUnlessEqual ( len ( writers ) , 3 )
2007-09-02 21:47:15 +00:00
# now the StorageServer should have 3000 bytes provisionally
# allocated, allowing only 2000 more to be claimed
2007-07-13 21:04:49 +00:00
self . failUnlessEqual ( len ( ss . _active_writers ) , 3 )
2007-07-04 00:08:02 +00:00
2007-09-02 21:47:15 +00:00
# allocating 1001-byte shares only leaves room for one
2008-06-18 00:01:42 +00:00
already2 , writers2 = self . allocate ( ss , " vid2 " , [ 0 , 1 , 2 ] , 1001 , canary )
2007-07-04 00:08:02 +00:00
self . failUnlessEqual ( len ( writers2 ) , 1 )
2007-07-13 21:04:49 +00:00
self . failUnlessEqual ( len ( ss . _active_writers ) , 4 )
2007-07-04 00:08:02 +00:00
# we abandon the first set, so their provisional allocation should be
# returned
del already
del writers
2007-07-13 21:04:49 +00:00
self . failUnlessEqual ( len ( ss . _active_writers ) , 1 )
2007-09-02 21:47:15 +00:00
# now we have a provisional allocation of 1001 bytes
2007-07-04 00:08:02 +00:00
# and we close the second set, so their provisional allocation should
2007-09-02 21:47:15 +00:00
# become real, long-term allocation, and grows to include the
# overhead.
2007-07-04 00:08:02 +00:00
for bw in writers2 . values ( ) :
2007-07-13 21:04:49 +00:00
bw . remote_write ( 0 , " a " * 25 )
2007-07-04 00:08:02 +00:00
bw . remote_close ( )
del already2
del writers2
del bw
2007-07-13 21:04:49 +00:00
self . failUnlessEqual ( len ( ss . _active_writers ) , 0 )
2007-07-04 00:08:02 +00:00
2007-09-02 21:47:15 +00:00
allocated = 1001 + OVERHEAD + LEASE_SIZE
2008-12-02 00:24:21 +00:00
# we have to manually increase DISKAVAIL, since we're not doing real
# disk measurements
ss . DISKAVAIL - = allocated
2007-09-02 21:47:15 +00:00
# now there should be ALLOCATED=1001+12+72=1085 bytes allocated, and
# 5000-1085=3915 free, therefore we can fit 39 100byte shares
2008-06-18 00:01:42 +00:00
already3 , writers3 = self . allocate ( ss , " vid3 " , range ( 100 ) , 100 , canary )
2007-09-02 21:47:15 +00:00
self . failUnlessEqual ( len ( writers3 ) , 39 )
self . failUnlessEqual ( len ( ss . _active_writers ) , 39 )
2007-07-04 00:08:02 +00:00
del already3
del writers3
2007-07-13 21:04:49 +00:00
self . failUnlessEqual ( len ( ss . _active_writers ) , 0 )
2007-07-04 00:08:02 +00:00
ss . disownServiceParent ( )
del ss
2007-09-02 21:47:15 +00:00
def test_seek ( self ) :
basedir = self . workdir ( " test_seek_behavior " )
fileutil . make_dirs ( basedir )
filename = os . path . join ( basedir , " testfile " )
f = open ( filename , " wb " )
f . write ( " start " )
f . close ( )
# mode="w" allows seeking-to-create-holes, but truncates pre-existing
# files. mode="a" preserves previous contents but does not allow
# seeking-to-create-holes. mode="r+" allows both.
f = open ( filename , " rb+ " )
f . seek ( 100 )
f . write ( " 100 " )
f . close ( )
filelen = os . stat ( filename ) [ stat . ST_SIZE ]
self . failUnlessEqual ( filelen , 100 + 3 )
f2 = open ( filename , " rb " )
self . failUnlessEqual ( f2 . read ( 5 ) , " start " )
2007-08-28 06:41:40 +00:00
def test_leases ( self ) :
ss = self . create ( " test_leases " )
2008-01-15 04:22:55 +00:00
canary = FakeCanary ( )
2007-08-28 06:41:40 +00:00
sharenums = range ( 5 )
size = 100
2007-12-18 01:34:11 +00:00
rs0 , cs0 = ( hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) ,
hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) )
2007-08-28 06:41:40 +00:00
already , writers = ss . remote_allocate_buckets ( " si0 " , rs0 , cs0 ,
sharenums , size , canary )
self . failUnlessEqual ( len ( already ) , 0 )
self . failUnlessEqual ( len ( writers ) , 5 )
for wb in writers . values ( ) :
wb . remote_close ( )
2007-09-02 21:47:15 +00:00
leases = list ( ss . get_leases ( " si0 " ) )
self . failUnlessEqual ( len ( leases ) , 1 )
2008-07-10 01:06:55 +00:00
self . failUnlessEqual ( set ( [ l . renew_secret for l in leases ] ) , set ( [ rs0 ] ) )
2007-09-02 21:47:15 +00:00
2007-12-18 01:34:11 +00:00
rs1 , cs1 = ( hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) ,
hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) )
2007-08-28 06:41:40 +00:00
already , writers = ss . remote_allocate_buckets ( " si1 " , rs1 , cs1 ,
sharenums , size , canary )
for wb in writers . values ( ) :
wb . remote_close ( )
# take out a second lease on si1
2007-12-18 01:34:11 +00:00
rs2 , cs2 = ( hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) ,
hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) )
2007-08-28 06:41:40 +00:00
already , writers = ss . remote_allocate_buckets ( " si1 " , rs2 , cs2 ,
sharenums , size , canary )
self . failUnlessEqual ( len ( already ) , 5 )
self . failUnlessEqual ( len ( writers ) , 0 )
2007-09-02 21:47:15 +00:00
leases = list ( ss . get_leases ( " si1 " ) )
self . failUnlessEqual ( len ( leases ) , 2 )
2008-07-10 01:06:55 +00:00
self . failUnlessEqual ( set ( [ l . renew_secret for l in leases ] ) , set ( [ rs1 , rs2 ] ) )
2007-09-02 21:47:15 +00:00
2009-02-11 06:39:38 +00:00
# and a third lease, using add-lease
rs2a , cs2a = ( hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) ,
hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) )
ss . remote_add_lease ( " si1 " , rs2a , cs2a )
leases = list ( ss . get_leases ( " si1 " ) )
self . failUnlessEqual ( len ( leases ) , 3 )
self . failUnlessEqual ( set ( [ l . renew_secret for l in leases ] ) , set ( [ rs1 , rs2 , rs2a ] ) )
2009-02-18 02:30:53 +00:00
# add-lease on a missing storage index is silently ignored
self . failUnlessEqual ( ss . remote_add_lease ( " si18 " , " " , " " ) , None )
2009-02-11 06:39:38 +00:00
2007-08-28 06:41:40 +00:00
# check that si0 is readable
readers = ss . remote_get_buckets ( " si0 " )
self . failUnlessEqual ( len ( readers ) , 5 )
# renew the first lease. Only the proper renew_secret should work
ss . remote_renew_lease ( " si0 " , rs0 )
self . failUnlessRaises ( IndexError , ss . remote_renew_lease , " si0 " , cs0 )
self . failUnlessRaises ( IndexError , ss . remote_renew_lease , " si0 " , rs1 )
# check that si0 is still readable
readers = ss . remote_get_buckets ( " si0 " )
self . failUnlessEqual ( len ( readers ) , 5 )
# now cancel it
self . failUnlessRaises ( IndexError , ss . remote_cancel_lease , " si0 " , rs0 )
self . failUnlessRaises ( IndexError , ss . remote_cancel_lease , " si0 " , cs1 )
ss . remote_cancel_lease ( " si0 " , cs0 )
# si0 should now be gone
readers = ss . remote_get_buckets ( " si0 " )
self . failUnlessEqual ( len ( readers ) , 0 )
# and the renew should no longer work
self . failUnlessRaises ( IndexError , ss . remote_renew_lease , " si0 " , rs0 )
2009-02-11 06:39:38 +00:00
# cancel the first lease on si1, leaving the second and third in place
2007-08-28 06:41:40 +00:00
ss . remote_cancel_lease ( " si1 " , cs1 )
readers = ss . remote_get_buckets ( " si1 " )
self . failUnlessEqual ( len ( readers ) , 5 )
# the corresponding renew should no longer work
self . failUnlessRaises ( IndexError , ss . remote_renew_lease , " si1 " , rs1 )
2007-09-02 21:47:15 +00:00
leases = list ( ss . get_leases ( " si1 " ) )
2009-02-11 06:39:38 +00:00
self . failUnlessEqual ( len ( leases ) , 2 )
self . failUnlessEqual ( set ( [ l . renew_secret for l in leases ] ) , set ( [ rs2 , rs2a ] ) )
2007-09-02 21:47:15 +00:00
2007-08-28 06:41:40 +00:00
ss . remote_renew_lease ( " si1 " , rs2 )
2009-02-11 06:39:38 +00:00
# cancelling the second and third should make it go away
2007-08-28 06:41:40 +00:00
ss . remote_cancel_lease ( " si1 " , cs2 )
2009-02-11 06:39:38 +00:00
ss . remote_cancel_lease ( " si1 " , cs2a )
2007-08-28 06:41:40 +00:00
readers = ss . remote_get_buckets ( " si1 " )
self . failUnlessEqual ( len ( readers ) , 0 )
self . failUnlessRaises ( IndexError , ss . remote_renew_lease , " si1 " , rs1 )
self . failUnlessRaises ( IndexError , ss . remote_renew_lease , " si1 " , rs2 )
2009-02-11 06:39:38 +00:00
self . failUnlessRaises ( IndexError , ss . remote_renew_lease , " si1 " , rs2a )
2007-08-28 06:41:40 +00:00
2007-09-02 21:47:15 +00:00
leases = list ( ss . get_leases ( " si1 " ) )
self . failUnlessEqual ( len ( leases ) , 0 )
2007-09-02 21:57:49 +00:00
# test overlapping uploads
2007-12-18 01:34:11 +00:00
rs3 , cs3 = ( hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) ,
hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) )
rs4 , cs4 = ( hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) ,
hashutil . tagged_hash ( " blah " , " %d " % self . _lease_secret . next ( ) ) )
2007-09-02 21:57:49 +00:00
already , writers = ss . remote_allocate_buckets ( " si3 " , rs3 , cs3 ,
sharenums , size , canary )
self . failUnlessEqual ( len ( already ) , 0 )
self . failUnlessEqual ( len ( writers ) , 5 )
already2 , writers2 = ss . remote_allocate_buckets ( " si3 " , rs4 , cs4 ,
sharenums , size , canary )
2008-06-10 18:53:10 +00:00
self . failUnlessEqual ( len ( already2 ) , 0 )
2007-09-02 21:57:49 +00:00
self . failUnlessEqual ( len ( writers2 ) , 0 )
for wb in writers . values ( ) :
wb . remote_close ( )
2008-06-10 18:53:10 +00:00
leases = list ( ss . get_leases ( " si3 " ) )
self . failUnlessEqual ( len ( leases ) , 1 )
already3 , writers3 = ss . remote_allocate_buckets ( " si3 " , rs4 , cs4 ,
sharenums , size , canary )
self . failUnlessEqual ( len ( already3 ) , 5 )
self . failUnlessEqual ( len ( writers3 ) , 0 )
2007-09-02 21:57:49 +00:00
leases = list ( ss . get_leases ( " si3 " ) )
self . failUnlessEqual ( len ( leases ) , 2 )
2008-06-17 00:52:13 +00:00
def test_readonly ( self ) :
workdir = self . workdir ( " test_readonly " )
2009-02-18 23:23:01 +00:00
ss = StorageServer ( workdir , " \x00 " * 20 , readonly_storage = True )
2008-06-17 00:52:13 +00:00
ss . setServiceParent ( self . sparent )
already , writers = self . allocate ( ss , " vid " , [ 0 , 1 , 2 ] , 75 )
self . failUnlessEqual ( already , set ( ) )
self . failUnlessEqual ( writers , { } )
2008-12-02 00:24:21 +00:00
stats = ss . get_stats ( )
self . failUnlessEqual ( stats [ " storage_server.accepting_immutable_shares " ] ,
False )
2008-12-03 02:41:02 +00:00
if " storage_server.disk_avail " in stats :
# windows does not have os.statvfs, so it doesn't give us disk
# stats. But if there are stats, readonly_storage means
# disk_avail=0
self . failUnlessEqual ( stats [ " storage_server.disk_avail " ] , 0 )
2008-12-02 00:24:21 +00:00
2008-06-17 00:52:40 +00:00
def test_discard ( self ) :
# discard is really only used for other tests, but we test it anyways
workdir = self . workdir ( " test_discard " )
2009-02-18 23:23:01 +00:00
ss = StorageServer ( workdir , " \x00 " * 20 , discard_storage = True )
2008-06-17 00:52:40 +00:00
ss . setServiceParent ( self . sparent )
canary = FakeCanary ( )
already , writers = self . allocate ( ss , " vid " , [ 0 , 1 , 2 ] , 75 )
self . failUnlessEqual ( already , set ( ) )
self . failUnlessEqual ( set ( writers . keys ( ) ) , set ( [ 0 , 1 , 2 ] ) )
for i , wb in writers . items ( ) :
wb . remote_write ( 0 , " %25d " % i )
wb . remote_close ( )
# since we discard the data, the shares should be present but sparse.
# Since we write with some seeks, the data we read back will be all
# zeros.
b = ss . remote_get_buckets ( " vid " )
self . failUnlessEqual ( set ( b . keys ( ) ) , set ( [ 0 , 1 , 2 ] ) )
self . failUnlessEqual ( b [ 0 ] . remote_read ( 0 , 25 ) , " \x00 " * 25 )
2008-10-24 18:52:48 +00:00
def test_advise_corruption ( self ) :
workdir = self . workdir ( " test_advise_corruption " )
2009-02-18 23:23:01 +00:00
ss = StorageServer ( workdir , " \x00 " * 20 , discard_storage = True )
2008-10-24 18:52:48 +00:00
ss . setServiceParent ( self . sparent )
si0_s = base32 . b2a ( " si0 " )
ss . remote_advise_corrupt_share ( " immutable " , " si0 " , 0 ,
" This share smells funny. \n " )
reportdir = os . path . join ( workdir , " corruption-advisories " )
reports = os . listdir ( reportdir )
self . failUnlessEqual ( len ( reports ) , 1 )
report_si0 = reports [ 0 ]
self . failUnless ( si0_s in report_si0 , report_si0 )
f = open ( os . path . join ( reportdir , report_si0 ) , " r " )
report = f . read ( )
f . close ( )
self . failUnless ( " type: immutable " in report )
self . failUnless ( ( " storage_index: %s " % si0_s ) in report )
self . failUnless ( " share_number: 0 " in report )
self . failUnless ( " This share smells funny. " in report )
# test the RIBucketWriter version too
si1_s = base32 . b2a ( " si1 " )
already , writers = self . allocate ( ss , " si1 " , [ 1 ] , 75 )
self . failUnlessEqual ( already , set ( ) )
self . failUnlessEqual ( set ( writers . keys ( ) ) , set ( [ 1 ] ) )
writers [ 1 ] . remote_write ( 0 , " data " )
writers [ 1 ] . remote_close ( )
b = ss . remote_get_buckets ( " si1 " )
self . failUnlessEqual ( set ( b . keys ( ) ) , set ( [ 1 ] ) )
b [ 1 ] . remote_advise_corrupt_share ( " This share tastes like dust. \n " )
reports = os . listdir ( reportdir )
self . failUnlessEqual ( len ( reports ) , 2 )
report_si1 = [ r for r in reports if si1_s in r ] [ 0 ]
f = open ( os . path . join ( reportdir , report_si1 ) , " r " )
report = f . read ( )
f . close ( )
self . failUnless ( " type: immutable " in report )
self . failUnless ( ( " storage_index: %s " % si1_s ) in report )
self . failUnless ( " share_number: 1 " in report )
self . failUnless ( " This share tastes like dust. " in report )
2007-10-31 02:47:36 +00:00
class MutableServer ( unittest . TestCase ) :
def setUp ( self ) :
2008-01-14 18:58:58 +00:00
self . sparent = LoggingServiceParent ( )
2007-12-18 01:34:11 +00:00
self . _lease_secret = itertools . count ( )
2007-10-31 02:47:36 +00:00
def tearDown ( self ) :
return self . sparent . stopService ( )
def workdir ( self , name ) :
basedir = os . path . join ( " storage " , " MutableServer " , name )
return basedir
2008-12-02 00:24:21 +00:00
def create ( self , name ) :
2007-10-31 02:47:36 +00:00
workdir = self . workdir ( name )
2009-02-18 23:23:01 +00:00
ss = StorageServer ( workdir , " \x00 " * 20 )
2007-10-31 02:47:36 +00:00
ss . setServiceParent ( self . sparent )
return ss
def test_create ( self ) :
ss = self . create ( " test_create " )
def write_enabler ( self , we_tag ) :
return hashutil . tagged_hash ( " we_blah " , we_tag )
2007-10-31 08:31:56 +00:00
def renew_secret ( self , tag ) :
return hashutil . tagged_hash ( " renew_blah " , str ( tag ) )
def cancel_secret ( self , tag ) :
return hashutil . tagged_hash ( " cancel_blah " , str ( tag ) )
2007-10-31 07:38:30 +00:00
def allocate ( self , ss , storage_index , we_tag , lease_tag , sharenums , size ) :
2007-10-31 02:47:36 +00:00
write_enabler = self . write_enabler ( we_tag )
2007-10-31 08:31:56 +00:00
renew_secret = self . renew_secret ( lease_tag )
cancel_secret = self . cancel_secret ( lease_tag )
2007-11-06 03:17:14 +00:00
rstaraw = ss . remote_slot_testv_and_readv_and_writev
testandwritev = dict ( [ ( shnum , ( [ ] , [ ] , None ) )
for shnum in sharenums ] )
readv = [ ]
rc = rstaraw ( storage_index ,
( write_enabler , renew_secret , cancel_secret ) ,
testandwritev ,
readv )
( did_write , readv_data ) = rc
self . failUnless ( did_write )
self . failUnless ( isinstance ( readv_data , dict ) )
self . failUnlessEqual ( len ( readv_data ) , 0 )
2007-10-31 02:47:36 +00:00
2009-03-09 02:02:01 +00:00
def test_bad_magic ( self ) :
ss = self . create ( " test_bad_magic " )
self . allocate ( ss , " si1 " , " we1 " , self . _lease_secret . next ( ) , set ( [ 0 ] ) , 10 )
fn = os . path . join ( ss . sharedir , storage_index_to_dir ( " si1 " ) , " 0 " )
f = open ( fn , " rb+ " )
f . seek ( 0 )
f . write ( " BAD MAGIC " )
f . close ( )
read = ss . remote_slot_readv
e = self . failUnlessRaises ( UnknownMutableContainerVersionError ,
read , " si1 " , [ 0 ] , [ ( 0 , 10 ) ] )
self . failUnless ( " had magic " in str ( e ) , e )
self . failUnless ( " but we wanted " in str ( e ) , e )
2008-06-18 00:01:42 +00:00
def test_container_size ( self ) :
ss = self . create ( " test_container_size " )
self . allocate ( ss , " si1 " , " we1 " , self . _lease_secret . next ( ) ,
set ( [ 0 , 1 , 2 ] ) , 100 )
2009-03-07 05:45:17 +00:00
read = ss . remote_slot_readv
2008-06-18 00:01:42 +00:00
rstaraw = ss . remote_slot_testv_and_readv_and_writev
secrets = ( self . write_enabler ( " we1 " ) ,
self . renew_secret ( " we1 " ) ,
self . cancel_secret ( " we1 " ) )
data = " " . join ( [ ( " %d " % i ) * 10 for i in range ( 10 ) ] )
answer = rstaraw ( " si1 " , secrets ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , len ( data ) + 12 ) } ,
[ ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ ] , 1 : [ ] , 2 : [ ] } ) )
# trying to make the container too large will raise an exception
TOOBIG = MutableShareFile . MAX_SIZE + 10
self . failUnlessRaises ( DataTooLargeError ,
rstaraw , " si1 " , secrets ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , TOOBIG ) } ,
[ ] )
# it should be possible to make the container smaller, although at
2009-03-07 05:45:17 +00:00
# the moment this doesn't actually affect the share, unless the
# container size is dropped to zero, in which case the share is
# deleted.
2008-06-18 00:01:42 +00:00
answer = rstaraw ( " si1 " , secrets ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , len ( data ) + 8 ) } ,
[ ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ ] , 1 : [ ] , 2 : [ ] } ) )
2009-03-07 05:45:17 +00:00
answer = rstaraw ( " si1 " , secrets ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , 0 ) } ,
[ ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ ] , 1 : [ ] , 2 : [ ] } ) )
read_answer = read ( " si1 " , [ 0 ] , [ ( 0 , 10 ) ] )
self . failUnlessEqual ( read_answer , { } )
2007-10-31 02:47:36 +00:00
def test_allocate ( self ) :
ss = self . create ( " test_allocate " )
2007-12-18 01:34:11 +00:00
self . allocate ( ss , " si1 " , " we1 " , self . _lease_secret . next ( ) ,
2008-06-18 00:01:42 +00:00
set ( [ 0 , 1 , 2 ] ) , 100 )
2007-10-31 02:47:36 +00:00
2007-11-06 03:17:14 +00:00
read = ss . remote_slot_readv
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 10 ) ] ) ,
{ 0 : [ " " ] } )
self . failUnlessEqual ( read ( " si1 " , [ ] , [ ( 0 , 10 ) ] ) ,
{ 0 : [ " " ] , 1 : [ " " ] , 2 : [ " " ] } )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 100 , 10 ) ] ) ,
{ 0 : [ " " ] } )
2007-10-31 02:47:36 +00:00
2007-11-06 03:17:14 +00:00
# try writing to one
secrets = ( self . write_enabler ( " we1 " ) ,
self . renew_secret ( " we1 " ) ,
self . cancel_secret ( " we1 " ) )
data = " " . join ( [ ( " %d " % i ) * 10 for i in range ( 10 ) ] )
write = ss . remote_slot_testv_and_readv_and_writev
answer = write ( " si1 " , secrets ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , None ) } ,
[ ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ ] , 1 : [ ] , 2 : [ ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 20 ) ] ) ,
{ 0 : [ " 00000000001111111111 " ] } )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 95 , 10 ) ] ) ,
{ 0 : [ " 99999 " ] } )
#self.failUnlessEqual(s0.remote_get_length(), 100)
bad_secrets = ( " bad write enabler " , secrets [ 1 ] , secrets [ 2 ] )
2008-02-01 00:48:48 +00:00
f = self . failUnlessRaises ( BadWriteEnablerError ,
write , " si1 " , bad_secrets ,
{ } , [ ] )
self . failUnless ( " The write enabler was recorded by nodeid ' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ' . " in f , f )
2007-11-06 03:17:14 +00:00
2007-10-31 07:10:40 +00:00
# this testv should fail
2007-11-06 03:17:14 +00:00
answer = write ( " si1 " , secrets ,
{ 0 : ( [ ( 0 , 12 , " eq " , " 444444444444 " ) ,
( 20 , 5 , " eq " , " 22222 " ) ,
] ,
[ ( 0 , " x " * 100 ) ] ,
None ) ,
} ,
[ ( 0 , 12 ) , ( 20 , 5 ) ] ,
)
self . failUnlessEqual ( answer , ( False ,
{ 0 : [ " 000000000011 " , " 22222 " ] ,
1 : [ " " , " " ] ,
2 : [ " " , " " ] ,
} ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
2007-10-31 07:10:40 +00:00
# as should this one
2007-11-06 03:17:14 +00:00
answer = write ( " si1 " , secrets ,
{ 0 : ( [ ( 10 , 5 , " lt " , " 11111 " ) ,
] ,
[ ( 0 , " x " * 100 ) ] ,
None ) ,
} ,
[ ( 10 , 5 ) ] ,
)
self . failUnlessEqual ( answer , ( False ,
{ 0 : [ " 11111 " ] ,
1 : [ " " ] ,
2 : [ " " ] } ,
) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
2007-10-31 07:10:40 +00:00
def test_operators ( self ) :
# test operators, the data we're comparing is '11111' in all cases.
# test both fail+pass, reset data after each one.
ss = self . create ( " test_operators " )
2007-11-06 03:17:14 +00:00
secrets = ( self . write_enabler ( " we1 " ) ,
self . renew_secret ( " we1 " ) ,
self . cancel_secret ( " we1 " ) )
2007-10-31 07:10:40 +00:00
data = " " . join ( [ ( " %d " % i ) * 10 for i in range ( 10 ) ] )
2007-11-06 03:17:14 +00:00
write = ss . remote_slot_testv_and_readv_and_writev
read = ss . remote_slot_readv
def reset ( ) :
write ( " si1 " , secrets ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , None ) } ,
[ ] )
reset ( )
2007-10-31 07:10:40 +00:00
# lt
2007-11-06 03:17:14 +00:00
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " lt " , " 11110 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " x " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
self . failUnlessEqual ( read ( " si1 " , [ ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " lt " , " 11111 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " x " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " lt " , " 11112 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ " y " * 100 ] } )
reset ( )
2007-10-31 07:10:40 +00:00
# le
2007-11-06 03:17:14 +00:00
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " le " , " 11110 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " x " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " le " , " 11111 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ " y " * 100 ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " le " , " 11112 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ " y " * 100 ] } )
reset ( )
2007-10-31 07:10:40 +00:00
# eq
2007-11-06 03:17:14 +00:00
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " eq " , " 11112 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " x " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " eq " , " 11111 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ " y " * 100 ] } )
reset ( )
2007-10-31 07:10:40 +00:00
# ne
2007-11-06 03:17:14 +00:00
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " ne " , " 11111 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " x " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " ne " , " 11112 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ " y " * 100 ] } )
reset ( )
2007-10-31 07:10:40 +00:00
# ge
2007-11-06 03:17:14 +00:00
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " ge " , " 11110 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ " y " * 100 ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " ge " , " 11111 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ " y " * 100 ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " ge " , " 11112 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
2007-10-31 07:10:40 +00:00
# gt
2007-11-06 03:17:14 +00:00
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " gt " , " 11110 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " y " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( True , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ " y " * 100 ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " gt " , " 11111 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " x " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
answer = write ( " si1 " , secrets , { 0 : ( [ ( 10 , 5 , " gt " , " 11112 " ) ,
2007-10-31 07:10:40 +00:00
] ,
2007-11-06 03:17:14 +00:00
[ ( 0 , " x " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
2007-10-31 07:38:30 +00:00
2008-06-18 00:44:10 +00:00
# finally, test some operators against empty shares
answer = write ( " si1 " , secrets , { 1 : ( [ ( 10 , 5 , " eq " , " 11112 " ) ,
] ,
[ ( 0 , " x " * 100 ) ] ,
None ,
) } , [ ( 10 , 5 ) ] )
self . failUnlessEqual ( answer , ( False , { 0 : [ " 11111 " ] } ) )
self . failUnlessEqual ( read ( " si1 " , [ 0 ] , [ ( 0 , 100 ) ] ) , { 0 : [ data ] } )
reset ( )
2007-11-05 07:37:01 +00:00
def test_readv ( self ) :
2007-11-06 03:17:14 +00:00
ss = self . create ( " test_readv " )
secrets = ( self . write_enabler ( " we1 " ) ,
self . renew_secret ( " we1 " ) ,
self . cancel_secret ( " we1 " ) )
data = " " . join ( [ ( " %d " % i ) * 10 for i in range ( 10 ) ] )
write = ss . remote_slot_testv_and_readv_and_writev
read = ss . remote_slot_readv
2007-11-05 07:37:01 +00:00
data = [ ( " %d " % i ) * 100 for i in range ( 3 ) ]
2007-11-06 03:17:14 +00:00
rc = write ( " si1 " , secrets ,
{ 0 : ( [ ] , [ ( 0 , data [ 0 ] ) ] , None ) ,
1 : ( [ ] , [ ( 0 , data [ 1 ] ) ] , None ) ,
2 : ( [ ] , [ ( 0 , data [ 2 ] ) ] , None ) ,
} , [ ] )
self . failUnlessEqual ( rc , ( True , { } ) )
answer = read ( " si1 " , [ ] , [ ( 0 , 10 ) ] )
2007-11-05 07:37:01 +00:00
self . failUnlessEqual ( answer , { 0 : [ " 0 " * 10 ] ,
1 : [ " 1 " * 10 ] ,
2 : [ " 2 " * 10 ] } )
2008-07-10 01:06:55 +00:00
def compare_leases_without_timestamps ( self , leases_a , leases_b ) :
self . failUnlessEqual ( len ( leases_a ) , len ( leases_b ) )
for i in range ( len ( leases_a ) ) :
2009-03-07 05:45:17 +00:00
a = leases_a [ i ]
b = leases_b [ i ]
2008-07-10 01:06:55 +00:00
self . failUnlessEqual ( a . owner_num , b . owner_num )
self . failUnlessEqual ( a . renew_secret , b . renew_secret )
self . failUnlessEqual ( a . cancel_secret , b . cancel_secret )
self . failUnlessEqual ( a . nodeid , b . nodeid )
def compare_leases ( self , leases_a , leases_b ) :
self . failUnlessEqual ( len ( leases_a ) , len ( leases_b ) )
for i in range ( len ( leases_a ) ) :
2009-03-07 05:45:17 +00:00
a = leases_a [ i ]
b = leases_b [ i ]
2008-07-10 01:06:55 +00:00
self . failUnlessEqual ( a . owner_num , b . owner_num )
self . failUnlessEqual ( a . renew_secret , b . renew_secret )
self . failUnlessEqual ( a . cancel_secret , b . cancel_secret )
self . failUnlessEqual ( a . nodeid , b . nodeid )
self . failUnlessEqual ( a . expiration_time , b . expiration_time )
2007-10-31 19:31:33 +00:00
2007-10-31 07:38:30 +00:00
def test_leases ( self ) :
2008-12-02 00:24:21 +00:00
ss = self . create ( " test_leases " )
2007-11-06 03:17:14 +00:00
def secrets ( n ) :
return ( self . write_enabler ( " we1 " ) ,
self . renew_secret ( " we1- %d " % n ) ,
self . cancel_secret ( " we1- %d " % n ) )
2007-10-31 07:38:30 +00:00
data = " " . join ( [ ( " %d " % i ) * 10 for i in range ( 10 ) ] )
2007-11-06 03:17:14 +00:00
write = ss . remote_slot_testv_and_readv_and_writev
read = ss . remote_slot_readv
rc = write ( " si1 " , secrets ( 0 ) , { 0 : ( [ ] , [ ( 0 , data ) ] , None ) } , [ ] )
self . failUnlessEqual ( rc , ( True , { } ) )
2007-10-31 07:38:30 +00:00
2007-10-31 08:44:01 +00:00
# create a random non-numeric file in the bucket directory, to
# exercise the code that's supposed to ignore those.
bucket_dir = os . path . join ( self . workdir ( " test_leases " ) ,
2008-01-31 23:26:28 +00:00
" shares " , storage_index_to_dir ( " si1 " ) )
2007-10-31 08:44:01 +00:00
f = open ( os . path . join ( bucket_dir , " ignore_me.txt " ) , " w " )
f . write ( " you ought to be ignoring me \n " )
f . close ( )
2009-02-11 06:39:38 +00:00
s0 = MutableShareFile ( os . path . join ( bucket_dir , " 0 " ) )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 1 )
2009-02-11 06:39:38 +00:00
2009-02-18 02:30:53 +00:00
# add-lease on a missing storage index is silently ignored
self . failUnlessEqual ( ss . remote_add_lease ( " si18 " , " " , " " ) , None )
2009-02-11 06:39:38 +00:00
2007-10-31 07:38:30 +00:00
# re-allocate the slots and use the same secrets, that should update
# the lease
2007-11-06 03:17:14 +00:00
write ( " si1 " , secrets ( 0 ) , { 0 : ( [ ] , [ ( 0 , data ) ] , None ) } , [ ] )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 1 )
2007-10-31 07:38:30 +00:00
2007-10-31 08:31:56 +00:00
# renew it directly
2007-11-06 03:17:14 +00:00
ss . remote_renew_lease ( " si1 " , secrets ( 0 ) [ 1 ] )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 1 )
2007-10-31 08:31:56 +00:00
2007-10-31 07:38:30 +00:00
# now allocate them with a bunch of different secrets, to trigger the
2009-02-11 06:39:38 +00:00
# extended lease code. Use add_lease for one of them.
2007-11-06 03:17:14 +00:00
write ( " si1 " , secrets ( 1 ) , { 0 : ( [ ] , [ ( 0 , data ) ] , None ) } , [ ] )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 2 )
2009-02-11 06:39:38 +00:00
secrets2 = secrets ( 2 )
ss . remote_add_lease ( " si1 " , secrets2 [ 1 ] , secrets2 [ 2 ] )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 3 )
2007-11-06 03:17:14 +00:00
write ( " si1 " , secrets ( 3 ) , { 0 : ( [ ] , [ ( 0 , data ) ] , None ) } , [ ] )
write ( " si1 " , secrets ( 4 ) , { 0 : ( [ ] , [ ( 0 , data ) ] , None ) } , [ ] )
write ( " si1 " , secrets ( 5 ) , { 0 : ( [ ] , [ ( 0 , data ) ] , None ) } , [ ] )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 6 )
2009-02-11 06:39:38 +00:00
2007-10-31 08:31:56 +00:00
# cancel one of them
2007-11-06 03:17:14 +00:00
ss . remote_cancel_lease ( " si1 " , secrets ( 5 ) [ 2 ] )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 5 )
2007-10-31 07:38:30 +00:00
2009-03-07 05:45:17 +00:00
all_leases = list ( s0 . get_leases ( ) )
2007-10-31 07:38:30 +00:00
# and write enough data to expand the container, forcing the server
# to move the leases
2007-11-06 03:17:14 +00:00
write ( " si1 " , secrets ( 0 ) ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , 200 ) , } ,
[ ] )
2007-10-31 07:38:30 +00:00
2007-10-31 19:07:47 +00:00
# read back the leases, make sure they're still intact.
2009-03-07 05:45:17 +00:00
self . compare_leases_without_timestamps ( all_leases , list ( s0 . get_leases ( ) ) )
2007-10-31 19:07:47 +00:00
2007-11-06 03:17:14 +00:00
ss . remote_renew_lease ( " si1 " , secrets ( 0 ) [ 1 ] )
ss . remote_renew_lease ( " si1 " , secrets ( 1 ) [ 1 ] )
ss . remote_renew_lease ( " si1 " , secrets ( 2 ) [ 1 ] )
ss . remote_renew_lease ( " si1 " , secrets ( 3 ) [ 1 ] )
ss . remote_renew_lease ( " si1 " , secrets ( 4 ) [ 1 ] )
2009-03-07 05:45:17 +00:00
self . compare_leases_without_timestamps ( all_leases , list ( s0 . get_leases ( ) ) )
2007-10-31 19:31:33 +00:00
# get a new copy of the leases, with the current timestamps. Reading
# data and failing to renew/cancel leases should leave the timestamps
# alone.
2009-03-07 05:45:17 +00:00
all_leases = list ( s0 . get_leases ( ) )
2007-10-31 08:44:01 +00:00
# renewing with a bogus token should prompt an error message
2008-07-10 01:06:55 +00:00
# examine the exception thus raised, make sure the old nodeid is
# present, to provide for share migration
e = self . failUnlessRaises ( IndexError ,
ss . remote_renew_lease , " si1 " ,
secrets ( 20 ) [ 1 ] )
e_s = str ( e )
self . failUnless ( " Unable to renew non-existent lease " in e_s )
self . failUnless ( " I have leases accepted by nodeids: " in e_s )
self . failUnless ( " nodeids: ' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ' . " in e_s )
2007-10-31 08:44:01 +00:00
# same for cancelling
self . failUnlessRaises ( IndexError ,
ss . remote_cancel_lease , " si1 " ,
2007-11-06 03:17:14 +00:00
secrets ( 20 ) [ 2 ] )
2009-03-07 05:45:17 +00:00
self . compare_leases ( all_leases , list ( s0 . get_leases ( ) ) )
2007-11-06 03:17:14 +00:00
# reading shares should not modify the timestamp
read ( " si1 " , [ ] , [ ( 0 , 200 ) ] )
2009-03-07 05:45:17 +00:00
self . compare_leases ( all_leases , list ( s0 . get_leases ( ) ) )
2007-10-31 08:44:01 +00:00
2007-11-06 03:17:14 +00:00
write ( " si1 " , secrets ( 0 ) ,
{ 0 : ( [ ] , [ ( 200 , " make me bigger " ) ] , None ) } , [ ] )
2009-03-07 05:45:17 +00:00
self . compare_leases_without_timestamps ( all_leases , list ( s0 . get_leases ( ) ) )
2007-10-31 19:07:47 +00:00
2007-11-06 03:17:14 +00:00
write ( " si1 " , secrets ( 0 ) ,
{ 0 : ( [ ] , [ ( 500 , " make me really bigger " ) ] , None ) } , [ ] )
2009-03-07 05:45:17 +00:00
self . compare_leases_without_timestamps ( all_leases , list ( s0 . get_leases ( ) ) )
2007-10-31 19:07:47 +00:00
2007-10-31 08:31:56 +00:00
# now cancel them all
2007-11-06 03:17:14 +00:00
ss . remote_cancel_lease ( " si1 " , secrets ( 0 ) [ 2 ] )
ss . remote_cancel_lease ( " si1 " , secrets ( 1 ) [ 2 ] )
ss . remote_cancel_lease ( " si1 " , secrets ( 2 ) [ 2 ] )
ss . remote_cancel_lease ( " si1 " , secrets ( 3 ) [ 2 ] )
2007-10-31 19:07:47 +00:00
# the slot should still be there
2007-11-06 03:17:14 +00:00
remaining_shares = read ( " si1 " , [ ] , [ ( 0 , 10 ) ] )
self . failUnlessEqual ( len ( remaining_shares ) , 1 )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 1 )
2007-10-31 19:07:47 +00:00
2008-06-18 00:01:42 +00:00
# cancelling a non-existent lease should raise an IndexError
self . failUnlessRaises ( IndexError ,
ss . remote_cancel_lease , " si1 " , " nonsecret " )
# and the slot should still be there
remaining_shares = read ( " si1 " , [ ] , [ ( 0 , 10 ) ] )
self . failUnlessEqual ( len ( remaining_shares ) , 1 )
2009-03-07 05:45:17 +00:00
self . failUnlessEqual ( len ( list ( s0 . get_leases ( ) ) ) , 1 )
2008-06-18 00:01:42 +00:00
2007-11-06 03:17:14 +00:00
ss . remote_cancel_lease ( " si1 " , secrets ( 4 ) [ 2 ] )
2007-10-31 08:31:56 +00:00
# now the slot should be gone
2007-11-06 03:17:14 +00:00
no_shares = read ( " si1 " , [ ] , [ ( 0 , 10 ) ] )
self . failUnlessEqual ( no_shares , { } )
2007-10-31 08:31:56 +00:00
2008-06-18 00:01:42 +00:00
# cancelling a lease on a non-existent share should raise an IndexError
self . failUnlessRaises ( IndexError ,
ss . remote_cancel_lease , " si2 " , " nonsecret " )
2009-02-11 06:37:56 +00:00
def test_remove ( self ) :
ss = self . create ( " test_remove " )
self . allocate ( ss , " si1 " , " we1 " , self . _lease_secret . next ( ) ,
set ( [ 0 , 1 , 2 ] ) , 100 )
readv = ss . remote_slot_readv
writev = ss . remote_slot_testv_and_readv_and_writev
secrets = ( self . write_enabler ( " we1 " ) ,
self . renew_secret ( " we1 " ) ,
self . cancel_secret ( " we1 " ) )
# delete sh0 by setting its size to zero
answer = writev ( " si1 " , secrets ,
{ 0 : ( [ ] , [ ] , 0 ) } ,
[ ] )
# the answer should mention all the shares that existed before the
# write
self . failUnlessEqual ( answer , ( True , { 0 : [ ] , 1 : [ ] , 2 : [ ] } ) )
# but a new read should show only sh1 and sh2
self . failUnlessEqual ( readv ( " si1 " , [ ] , [ ( 0 , 10 ) ] ) ,
{ 1 : [ " " ] , 2 : [ " " ] } )
# delete sh1 by setting its size to zero
answer = writev ( " si1 " , secrets ,
{ 1 : ( [ ] , [ ] , 0 ) } ,
[ ] )
self . failUnlessEqual ( answer , ( True , { 1 : [ ] , 2 : [ ] } ) )
self . failUnlessEqual ( readv ( " si1 " , [ ] , [ ( 0 , 10 ) ] ) ,
{ 2 : [ " " ] } )
# delete sh2 by setting its size to zero
answer = writev ( " si1 " , secrets ,
{ 2 : ( [ ] , [ ] , 0 ) } ,
[ ] )
self . failUnlessEqual ( answer , ( True , { 2 : [ ] } ) )
self . failUnlessEqual ( readv ( " si1 " , [ ] , [ ( 0 , 10 ) ] ) ,
{ } )
# and the bucket directory should now be gone
si = base32 . b2a ( " si1 " )
# note: this is a detail of the storage server implementation, and
# may change in the future
prefix = si [ : 2 ]
prefixdir = os . path . join ( self . workdir ( " test_remove " ) , " shares " , prefix )
bucketdir = os . path . join ( prefixdir , si )
self . failUnless ( os . path . exists ( prefixdir ) )
self . failIf ( os . path . exists ( bucketdir ) )
2008-06-18 00:01:42 +00:00
2008-06-16 22:21:55 +00:00
class Stats ( unittest . TestCase ) :
def setUp ( self ) :
self . sparent = LoggingServiceParent ( )
self . _lease_secret = itertools . count ( )
def tearDown ( self ) :
return self . sparent . stopService ( )
def workdir ( self , name ) :
basedir = os . path . join ( " storage " , " Server " , name )
return basedir
2008-12-02 00:24:21 +00:00
def create ( self , name ) :
2008-06-16 22:21:55 +00:00
workdir = self . workdir ( name )
2009-02-18 23:23:01 +00:00
ss = StorageServer ( workdir , " \x00 " * 20 )
2008-06-16 22:21:55 +00:00
ss . setServiceParent ( self . sparent )
return ss
def test_latencies ( self ) :
ss = self . create ( " test_latencies " )
for i in range ( 10000 ) :
ss . add_latency ( " allocate " , 1.0 * i )
for i in range ( 1000 ) :
ss . add_latency ( " renew " , 1.0 * i )
for i in range ( 10 ) :
ss . add_latency ( " cancel " , 2.0 * i )
ss . add_latency ( " get " , 5.0 )
output = ss . get_latencies ( )
self . failUnlessEqual ( sorted ( output . keys ( ) ) ,
sorted ( [ " allocate " , " renew " , " cancel " , " get " ] ) )
self . failUnlessEqual ( len ( ss . latencies [ " allocate " ] ) , 1000 )
self . failUnless ( abs ( output [ " allocate " ] [ " mean " ] - 9500 ) < 1 )
2008-07-12 04:51:02 +00:00
self . failUnless ( abs ( output [ " allocate " ] [ " 01_0_percentile " ] - 9010 ) < 1 )
self . failUnless ( abs ( output [ " allocate " ] [ " 10_0_percentile " ] - 9100 ) < 1 )
self . failUnless ( abs ( output [ " allocate " ] [ " 50_0_percentile " ] - 9500 ) < 1 )
self . failUnless ( abs ( output [ " allocate " ] [ " 90_0_percentile " ] - 9900 ) < 1 )
self . failUnless ( abs ( output [ " allocate " ] [ " 95_0_percentile " ] - 9950 ) < 1 )
self . failUnless ( abs ( output [ " allocate " ] [ " 99_0_percentile " ] - 9990 ) < 1 )
self . failUnless ( abs ( output [ " allocate " ] [ " 99_9_percentile " ] - 9999 ) < 1 )
2008-06-16 22:21:55 +00:00
self . failUnlessEqual ( len ( ss . latencies [ " renew " ] ) , 1000 )
self . failUnless ( abs ( output [ " renew " ] [ " mean " ] - 500 ) < 1 )
2008-07-12 04:51:02 +00:00
self . failUnless ( abs ( output [ " renew " ] [ " 01_0_percentile " ] - 10 ) < 1 )
self . failUnless ( abs ( output [ " renew " ] [ " 10_0_percentile " ] - 100 ) < 1 )
self . failUnless ( abs ( output [ " renew " ] [ " 50_0_percentile " ] - 500 ) < 1 )
self . failUnless ( abs ( output [ " renew " ] [ " 90_0_percentile " ] - 900 ) < 1 )
self . failUnless ( abs ( output [ " renew " ] [ " 95_0_percentile " ] - 950 ) < 1 )
self . failUnless ( abs ( output [ " renew " ] [ " 99_0_percentile " ] - 990 ) < 1 )
self . failUnless ( abs ( output [ " renew " ] [ " 99_9_percentile " ] - 999 ) < 1 )
2008-06-16 22:21:55 +00:00
self . failUnlessEqual ( len ( ss . latencies [ " cancel " ] ) , 10 )
self . failUnless ( abs ( output [ " cancel " ] [ " mean " ] - 9 ) < 1 )
2008-07-12 04:51:02 +00:00
self . failUnless ( abs ( output [ " cancel " ] [ " 01_0_percentile " ] - 0 ) < 1 )
self . failUnless ( abs ( output [ " cancel " ] [ " 10_0_percentile " ] - 2 ) < 1 )
self . failUnless ( abs ( output [ " cancel " ] [ " 50_0_percentile " ] - 10 ) < 1 )
self . failUnless ( abs ( output [ " cancel " ] [ " 90_0_percentile " ] - 18 ) < 1 )
self . failUnless ( abs ( output [ " cancel " ] [ " 95_0_percentile " ] - 18 ) < 1 )
self . failUnless ( abs ( output [ " cancel " ] [ " 99_0_percentile " ] - 18 ) < 1 )
self . failUnless ( abs ( output [ " cancel " ] [ " 99_9_percentile " ] - 18 ) < 1 )
2008-06-16 22:21:55 +00:00
self . failUnlessEqual ( len ( ss . latencies [ " get " ] ) , 1 )
self . failUnless ( abs ( output [ " get " ] [ " mean " ] - 5 ) < 1 )
2008-07-12 04:51:02 +00:00
self . failUnless ( abs ( output [ " get " ] [ " 01_0_percentile " ] - 5 ) < 1 )
self . failUnless ( abs ( output [ " get " ] [ " 10_0_percentile " ] - 5 ) < 1 )
self . failUnless ( abs ( output [ " get " ] [ " 50_0_percentile " ] - 5 ) < 1 )
self . failUnless ( abs ( output [ " get " ] [ " 90_0_percentile " ] - 5 ) < 1 )
self . failUnless ( abs ( output [ " get " ] [ " 95_0_percentile " ] - 5 ) < 1 )
self . failUnless ( abs ( output [ " get " ] [ " 99_0_percentile " ] - 5 ) < 1 )
self . failUnless ( abs ( output [ " get " ] [ " 99_9_percentile " ] - 5 ) < 1 )
2009-02-20 21:29:26 +00:00
2009-02-21 04:04:08 +00:00
def remove_tags ( s ) :
s = re . sub ( r ' <[^>]*> ' , ' ' , s )
s = re . sub ( r ' \ s+ ' , ' ' , s )
return s
2009-02-27 02:42:48 +00:00
class MyBucketCountingCrawler ( BucketCountingCrawler ) :
def finished_prefix ( self , cycle , prefix ) :
BucketCountingCrawler . finished_prefix ( self , cycle , prefix )
if self . hook_ds :
d = self . hook_ds . pop ( 0 )
d . callback ( None )
class MyStorageServer ( StorageServer ) :
def add_bucket_counter ( self ) :
statefile = os . path . join ( self . storedir , " bucket_counter.state " )
self . bucket_counter = MyBucketCountingCrawler ( self , statefile )
self . bucket_counter . setServiceParent ( self )
2009-02-21 04:04:08 +00:00
class BucketCounter ( unittest . TestCase , pollmixin . PollMixin ) :
def setUp ( self ) :
self . s = service . MultiService ( )
self . s . startService ( )
def tearDown ( self ) :
return self . s . stopService ( )
def test_bucket_counter ( self ) :
basedir = " storage/BucketCounter/bucket_counter "
fileutil . make_dirs ( basedir )
ss = StorageServer ( basedir , " \x00 " * 20 )
# to make sure we capture the bucket-counting-crawler in the middle
2009-02-21 21:56:49 +00:00
# of a cycle, we reach in and reduce its maximum slice time to 0. We
# also make it start sooner than usual.
ss . bucket_counter . slow_start = 0
2009-02-21 04:04:08 +00:00
orig_cpu_slice = ss . bucket_counter . cpu_slice
ss . bucket_counter . cpu_slice = 0
ss . setServiceParent ( self . s )
w = StorageStatus ( ss )
# this sample is before the crawler has started doing anything
html = w . renderSynchronously ( )
self . failUnless ( " <h1>Storage Server Status</h1> " in html , html )
s = remove_tags ( html )
self . failUnless ( " Accepting new shares: Yes " in s , s )
self . failUnless ( " Reserved space: - 0 B (0) " in s , s )
self . failUnless ( " Total buckets: Not computed yet " in s , s )
self . failUnless ( " Next crawl in " in s , s )
# give the bucket-counting-crawler one tick to get started. The
# cpu_slice=0 will force it to yield right after it processes the
# first prefix
d = eventual . fireEventually ( )
def _check ( ignored ) :
# are we really right after the first prefix?
state = ss . bucket_counter . get_state ( )
self . failUnlessEqual ( state [ " last-complete-prefix " ] ,
ss . bucket_counter . prefixes [ 0 ] )
ss . bucket_counter . cpu_slice = 100.0 # finish as fast as possible
html = w . renderSynchronously ( )
s = remove_tags ( html )
self . failUnless ( " Current crawl " in s , s )
self . failUnless ( " (next work in " in s , s )
d . addCallback ( _check )
# now give it enough time to complete a full cycle
def _watch ( ) :
return not ss . bucket_counter . get_progress ( ) [ " cycle-in-progress " ]
d . addCallback ( lambda ignored : self . poll ( _watch ) )
def _check2 ( ignored ) :
ss . bucket_counter . cpu_slice = orig_cpu_slice
html = w . renderSynchronously ( )
s = remove_tags ( html )
self . failUnless ( " Total buckets: 0 (the number of " in s , s )
2009-03-07 05:45:17 +00:00
self . failUnless ( " Next crawl in 59 minutes " in s , s )
2009-02-21 04:04:08 +00:00
d . addCallback ( _check2 )
return d
def test_bucket_counter_cleanup ( self ) :
basedir = " storage/BucketCounter/bucket_counter_cleanup "
fileutil . make_dirs ( basedir )
ss = StorageServer ( basedir , " \x00 " * 20 )
# to make sure we capture the bucket-counting-crawler in the middle
# of a cycle, we reach in and reduce its maximum slice time to 0.
2009-02-21 21:56:49 +00:00
ss . bucket_counter . slow_start = 0
2009-02-21 04:04:08 +00:00
orig_cpu_slice = ss . bucket_counter . cpu_slice
ss . bucket_counter . cpu_slice = 0
ss . setServiceParent ( self . s )
d = eventual . fireEventually ( )
def _after_first_prefix ( ignored ) :
ss . bucket_counter . cpu_slice = 100.0 # finish as fast as possible
# now sneak in and mess with its state, to make sure it cleans up
# properly at the end of the cycle
state = ss . bucket_counter . state
self . failUnlessEqual ( state [ " last-complete-prefix " ] ,
ss . bucket_counter . prefixes [ 0 ] )
2009-02-21 04:46:06 +00:00
state [ " bucket-counts " ] [ - 12 ] = { }
2009-02-21 04:04:08 +00:00
state [ " storage-index-samples " ] [ " bogusprefix! " ] = ( - 12 , [ ] )
ss . bucket_counter . save_state ( )
d . addCallback ( _after_first_prefix )
# now give it enough time to complete a cycle
def _watch ( ) :
return not ss . bucket_counter . get_progress ( ) [ " cycle-in-progress " ]
d . addCallback ( lambda ignored : self . poll ( _watch ) )
def _check2 ( ignored ) :
ss . bucket_counter . cpu_slice = orig_cpu_slice
s = ss . bucket_counter . get_state ( )
2009-02-21 04:46:06 +00:00
self . failIf ( - 12 in s [ " bucket-counts " ] , s [ " bucket-counts " ] . keys ( ) )
2009-02-21 04:04:08 +00:00
self . failIf ( " bogusprefix! " in s [ " storage-index-samples " ] ,
s [ " storage-index-samples " ] . keys ( ) )
d . addCallback ( _check2 )
return d
2009-02-27 02:42:48 +00:00
def test_bucket_counter_eta ( self ) :
basedir = " storage/BucketCounter/bucket_counter_eta "
fileutil . make_dirs ( basedir )
ss = MyStorageServer ( basedir , " \x00 " * 20 )
ss . bucket_counter . slow_start = 0
# these will be fired inside finished_prefix()
hooks = ss . bucket_counter . hook_ds = [ defer . Deferred ( ) for i in range ( 3 ) ]
w = StorageStatus ( ss )
d = defer . Deferred ( )
def _check_1 ( ignored ) :
# no ETA is available yet
html = w . renderSynchronously ( )
s = remove_tags ( html )
self . failUnlessIn ( " complete (next work " , s )
def _check_2 ( ignored ) :
# one prefix has finished, so an ETA based upon that elapsed time
# should be available.
html = w . renderSynchronously ( )
s = remove_tags ( html )
self . failUnlessIn ( " complete (ETA " , s )
def _check_3 ( ignored ) :
# two prefixes have finished
html = w . renderSynchronously ( )
s = remove_tags ( html )
self . failUnlessIn ( " complete (ETA " , s )
d . callback ( " done " )
hooks [ 0 ] . addCallback ( _check_1 ) . addErrback ( d . errback )
hooks [ 1 ] . addCallback ( _check_2 ) . addErrback ( d . errback )
hooks [ 2 ] . addCallback ( _check_3 ) . addErrback ( d . errback )
ss . setServiceParent ( self . s )
return d
2009-03-07 05:45:17 +00:00
class InstrumentedLeaseCheckingCrawler ( LeaseCheckingCrawler ) :
stop_after_first_bucket = False
def process_bucket ( self , * args , * * kwargs ) :
LeaseCheckingCrawler . process_bucket ( self , * args , * * kwargs )
if self . stop_after_first_bucket :
self . stop_after_first_bucket = False
self . cpu_slice = - 1.0
def yielding ( self , sleep_time ) :
if not self . stop_after_first_bucket :
self . cpu_slice = 500
class BrokenStatResults :
pass
class No_ST_BLOCKS_LeaseCheckingCrawler ( LeaseCheckingCrawler ) :
def stat ( self , fn ) :
s = os . stat ( fn )
bsr = BrokenStatResults ( )
for attrname in dir ( s ) :
if attrname . startswith ( " _ " ) :
continue
if attrname == " st_blocks " :
continue
setattr ( bsr , attrname , getattr ( s , attrname ) )
return bsr
class InstrumentedStorageServer ( StorageServer ) :
LeaseCheckerClass = InstrumentedLeaseCheckingCrawler
class No_ST_BLOCKS_StorageServer ( StorageServer ) :
LeaseCheckerClass = No_ST_BLOCKS_LeaseCheckingCrawler
class LeaseCrawler ( unittest . TestCase , pollmixin . PollMixin , WebRenderingMixin ) :
def setUp ( self ) :
self . s = service . MultiService ( )
self . s . startService ( )
def tearDown ( self ) :
return self . s . stopService ( )
def make_shares ( self , ss ) :
def make ( si ) :
return ( si , hashutil . tagged_hash ( " renew " , si ) ,
hashutil . tagged_hash ( " cancel " , si ) )
def make_mutable ( si ) :
return ( si , hashutil . tagged_hash ( " renew " , si ) ,
hashutil . tagged_hash ( " cancel " , si ) ,
hashutil . tagged_hash ( " write-enabler " , si ) )
def make_extra_lease ( si , num ) :
return ( hashutil . tagged_hash ( " renew- %d " % num , si ) ,
hashutil . tagged_hash ( " cancel- %d " % num , si ) )
immutable_si_0 , rs0 , cs0 = make ( " \x00 " * 16 )
immutable_si_1 , rs1 , cs1 = make ( " \x01 " * 16 )
rs1a , cs1a = make_extra_lease ( immutable_si_1 , 1 )
mutable_si_2 , rs2 , cs2 , we2 = make_mutable ( " \x02 " * 16 )
mutable_si_3 , rs3 , cs3 , we3 = make_mutable ( " \x03 " * 16 )
rs3a , cs3a = make_extra_lease ( mutable_si_3 , 1 )
sharenums = [ 0 ]
canary = FakeCanary ( )
# note: 'tahoe debug dump-share' will not handle this file, since the
# inner contents are not a valid CHK share
data = " \xff " * 1000
a , w = ss . remote_allocate_buckets ( immutable_si_0 , rs0 , cs0 , sharenums ,
1000 , canary )
w [ 0 ] . remote_write ( 0 , data )
w [ 0 ] . remote_close ( )
a , w = ss . remote_allocate_buckets ( immutable_si_1 , rs1 , cs1 , sharenums ,
1000 , canary )
w [ 0 ] . remote_write ( 0 , data )
w [ 0 ] . remote_close ( )
ss . remote_add_lease ( immutable_si_1 , rs1a , cs1a )
writev = ss . remote_slot_testv_and_readv_and_writev
writev ( mutable_si_2 , ( we2 , rs2 , cs2 ) ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , len ( data ) ) } , [ ] )
writev ( mutable_si_3 , ( we3 , rs3 , cs3 ) ,
{ 0 : ( [ ] , [ ( 0 , data ) ] , len ( data ) ) } , [ ] )
ss . remote_add_lease ( mutable_si_3 , rs3a , cs3a )
self . sis = [ immutable_si_0 , immutable_si_1 , mutable_si_2 , mutable_si_3 ]
self . renew_secrets = [ rs0 , rs1 , rs1a , rs2 , rs3 , rs3a ]
self . cancel_secrets = [ cs0 , cs1 , cs1a , cs2 , cs3 , cs3a ]
def test_basic ( self ) :
basedir = " storage/LeaseCrawler/basic "
fileutil . make_dirs ( basedir )
ss = InstrumentedStorageServer ( basedir , " \x00 " * 20 )
# make it start sooner than usual.
lc = ss . lease_checker
lc . slow_start = 0
lc . cpu_slice = 500
lc . stop_after_first_bucket = True
webstatus = StorageStatus ( ss )
# create a few shares, with some leases on them
self . make_shares ( ss )
[ immutable_si_0 , immutable_si_1 , mutable_si_2 , mutable_si_3 ] = self . sis
# add a non-sharefile to exercise another code path
fn = os . path . join ( ss . sharedir ,
storage_index_to_dir ( immutable_si_0 ) ,
" not-a-share " )
f = open ( fn , " wb " )
f . write ( " I am not a share. \n " )
f . close ( )
# this is before the crawl has started, so we're not in a cycle yet
initial_state = lc . get_state ( )
self . failIf ( lc . get_progress ( ) [ " cycle-in-progress " ] )
self . failIf ( " cycle-to-date " in initial_state )
self . failIf ( " estimated-remaining-cycle " in initial_state )
self . failIf ( " estimated-current-cycle " in initial_state )
self . failUnless ( " history " in initial_state )
self . failUnlessEqual ( initial_state [ " history " ] , { } )
ss . setServiceParent ( self . s )
d = eventual . fireEventually ( )
# now examine the state right after the first bucket has been
# processed.
def _after_first_bucket ( ignored ) :
initial_state = lc . get_state ( )
self . failUnless ( " cycle-to-date " in initial_state )
self . failUnless ( " estimated-remaining-cycle " in initial_state )
self . failUnless ( " estimated-current-cycle " in initial_state )
self . failUnless ( " history " in initial_state )
self . failUnlessEqual ( initial_state [ " history " ] , { } )
so_far = initial_state [ " cycle-to-date " ]
self . failUnlessEqual ( so_far [ " expiration-enabled " ] , False )
self . failUnless ( " configured-expiration-time " in so_far )
self . failUnless ( " lease-age-histogram " in so_far )
lah = so_far [ " lease-age-histogram " ]
self . failUnlessEqual ( type ( lah ) , list )
self . failUnlessEqual ( len ( lah ) , 1 )
self . failUnlessEqual ( lah , [ ( 0.0 , lc . age_limit / 10.0 , 1 ) ] )
self . failUnlessEqual ( so_far [ " leases-per-share-histogram " ] , { 1 : 1 } )
self . failUnlessEqual ( so_far [ " buckets-examined " ] , 1 )
self . failUnlessEqual ( so_far [ " shares-examined " ] , 1 )
sr1 = so_far [ " space-recovered " ]
self . failUnlessEqual ( sr1 [ " actual-numshares " ] , 0 )
self . failUnlessEqual ( sr1 [ " configured-leasetimer-diskbytes " ] , 0 )
self . failUnlessEqual ( sr1 [ " original-leasetimer-sharebytes " ] , 0 )
left = initial_state [ " estimated-remaining-cycle " ]
self . failUnless ( left [ " buckets-examined " ] > 0 ,
left [ " buckets-examined " ] )
self . failUnless ( left [ " shares-examined " ] > 0 ,
left [ " shares-examined " ] )
sr2 = left [ " space-recovered " ]
self . failIfEqual ( sr2 [ " actual-numshares " ] , None )
self . failIfEqual ( sr2 [ " configured-leasetimer-diskbytes " ] , None )
self . failIfEqual ( sr2 [ " original-leasetimer-sharebytes " ] , None )
d . addCallback ( _after_first_bucket )
d . addCallback ( lambda ign : self . render1 ( webstatus ) )
def _check_html_in_cycle ( html ) :
s = remove_tags ( html )
self . failUnlessIn ( " So far, this cycle has examined "
" 1 shares in 1 buckets "
" and has recovered: "
2009-03-07 23:14:42 +00:00
" 0 shares, 0 buckets, 0 B " , s )
2009-03-07 05:45:17 +00:00
self . failUnlessIn ( " If expiration were enabled, "
" we would have recovered: "
2009-03-07 23:14:42 +00:00
" 0 shares, 0 buckets, 0 B by now " , s )
2009-03-07 05:45:17 +00:00
self . failUnlessIn ( " and the remainder of this cycle "
" would probably recover: "
2009-03-07 23:14:42 +00:00
" 0 shares, 0 buckets, 0 B " , s )
2009-03-07 05:45:17 +00:00
self . failUnlessIn ( " and the whole cycle would probably recover: "
2009-03-07 23:14:42 +00:00
" 0 shares, 0 buckets, 0 B " , s )
2009-03-07 05:45:17 +00:00
self . failUnlessIn ( " if we were using each lease ' s default "
" 31-day lease lifetime " , s )
self . failUnlessIn ( " this cycle would be expected to recover: " , s )
d . addCallback ( _check_html_in_cycle )
# wait for the crawler to finish the first cycle. Nothing should have
# been removed.
def _wait ( ) :
return bool ( lc . get_state ( ) [ " last-cycle-finished " ] is not None )
d . addCallback ( lambda ign : self . poll ( _wait ) )
def _after_first_cycle ( ignored ) :
s = lc . get_state ( )
self . failIf ( " cycle-to-date " in s )
self . failIf ( " estimated-remaining-cycle " in s )
self . failIf ( " estimated-current-cycle " in s )
last = s [ " history " ] [ 0 ]
self . failUnless ( " cycle-start-finish-times " in last )
self . failUnlessEqual ( type ( last [ " cycle-start-finish-times " ] ) , tuple )
self . failUnlessEqual ( last [ " expiration-enabled " ] , False )
self . failUnless ( " configured-expiration-time " in last )
self . failUnless ( " lease-age-histogram " in last )
lah = last [ " lease-age-histogram " ]
self . failUnlessEqual ( type ( lah ) , list )
self . failUnlessEqual ( len ( lah ) , 1 )
self . failUnlessEqual ( lah , [ ( 0.0 , lc . age_limit / 10.0 , 6 ) ] )
self . failUnlessEqual ( last [ " leases-per-share-histogram " ] ,
{ 1 : 2 , 2 : 2 } )
self . failUnlessEqual ( last [ " buckets-examined " ] , 4 )
self . failUnlessEqual ( last [ " shares-examined " ] , 4 )
rec = last [ " space-recovered " ]
self . failUnlessEqual ( rec [ " actual-numbuckets " ] , 0 )
self . failUnlessEqual ( rec [ " original-leasetimer-numbuckets " ] , 0 )
self . failUnlessEqual ( rec [ " configured-leasetimer-numbuckets " ] , 0 )
self . failUnlessEqual ( rec [ " actual-numshares " ] , 0 )
self . failUnlessEqual ( rec [ " original-leasetimer-numshares " ] , 0 )
self . failUnlessEqual ( rec [ " configured-leasetimer-numshares " ] , 0 )
self . failUnlessEqual ( rec [ " actual-diskbytes " ] , 0 )
self . failUnlessEqual ( rec [ " original-leasetimer-diskbytes " ] , 0 )
self . failUnlessEqual ( rec [ " configured-leasetimer-diskbytes " ] , 0 )
self . failUnlessEqual ( rec [ " actual-sharebytes " ] , 0 )
self . failUnlessEqual ( rec [ " original-leasetimer-sharebytes " ] , 0 )
self . failUnlessEqual ( rec [ " configured-leasetimer-sharebytes " ] , 0 )
def _get_sharefile ( si ) :
return list ( ss . _iter_share_files ( si ) ) [ 0 ]
def count_leases ( si ) :
return len ( list ( _get_sharefile ( si ) . get_leases ( ) ) )
self . failUnlessEqual ( count_leases ( immutable_si_0 ) , 1 )
self . failUnlessEqual ( count_leases ( immutable_si_1 ) , 2 )
self . failUnlessEqual ( count_leases ( mutable_si_2 ) , 1 )
self . failUnlessEqual ( count_leases ( mutable_si_3 ) , 2 )
d . addCallback ( _after_first_cycle )
d . addCallback ( lambda ign : self . render1 ( webstatus ) )
def _check_html ( html ) :
s = remove_tags ( html )
2009-03-07 23:14:42 +00:00
self . failUnlessIn ( " recovered: 0 shares, 0 buckets, 0 B "
2009-03-07 05:45:17 +00:00
" but expiration was not enabled " , s )
d . addCallback ( _check_html )
return d
def backdate_lease ( self , sf , renew_secret , new_expire_time ) :
# ShareFile.renew_lease ignores attempts to back-date a lease (i.e.
# "renew" a lease with a new_expire_time that is older than what the
# current lease has), so we have to reach inside it.
for i , lease in enumerate ( sf . get_leases ( ) ) :
if lease . renew_secret == renew_secret :
lease . expiration_time = new_expire_time
f = open ( sf . home , ' rb+ ' )
sf . _write_lease_record ( f , i , lease )
f . close ( )
return
raise IndexError ( " unable to renew non-existent lease " )
def test_expire ( self ) :
basedir = " storage/LeaseCrawler/expire "
fileutil . make_dirs ( basedir )
# setting expiration_time to 2000 means that any lease which is more
# than 2000s old will be expired.
ss = InstrumentedStorageServer ( basedir , " \x00 " * 20 ,
expire_leases = True ,
expiration_time = 2000 )
# make it start sooner than usual.
lc = ss . lease_checker
lc . slow_start = 0
lc . stop_after_first_bucket = True
webstatus = StorageStatus ( ss )
# create a few shares, with some leases on them
self . make_shares ( ss )
[ immutable_si_0 , immutable_si_1 , mutable_si_2 , mutable_si_3 ] = self . sis
def count_shares ( si ) :
return len ( list ( ss . _iter_share_files ( si ) ) )
def _get_sharefile ( si ) :
return list ( ss . _iter_share_files ( si ) ) [ 0 ]
def count_leases ( si ) :
return len ( list ( _get_sharefile ( si ) . get_leases ( ) ) )
self . failUnlessEqual ( count_shares ( immutable_si_0 ) , 1 )
self . failUnlessEqual ( count_leases ( immutable_si_0 ) , 1 )
self . failUnlessEqual ( count_shares ( immutable_si_1 ) , 1 )
self . failUnlessEqual ( count_leases ( immutable_si_1 ) , 2 )
self . failUnlessEqual ( count_shares ( mutable_si_2 ) , 1 )
self . failUnlessEqual ( count_leases ( mutable_si_2 ) , 1 )
self . failUnlessEqual ( count_shares ( mutable_si_3 ) , 1 )
self . failUnlessEqual ( count_leases ( mutable_si_3 ) , 2 )
# artificially crank back the expiration time on the first lease of
# each share, to make it look like it expired already (age=1000s).
# Some shares have an extra lease which is set to expire at the
# default time in 31 days from now (age=31days). We then run the
# crawler, which will expire the first lease, making some shares get
# deleted and others stay alive (with one remaining lease)
now = time . time ( )
sf0 = _get_sharefile ( immutable_si_0 )
self . backdate_lease ( sf0 , self . renew_secrets [ 0 ] , now - 1000 )
sf0_size = os . stat ( sf0 . home ) . st_size
# immutable_si_1 gets an extra lease
sf1 = _get_sharefile ( immutable_si_1 )
self . backdate_lease ( sf1 , self . renew_secrets [ 1 ] , now - 1000 )
sf2 = _get_sharefile ( mutable_si_2 )
self . backdate_lease ( sf2 , self . renew_secrets [ 3 ] , now - 1000 )
sf2_size = os . stat ( sf2 . home ) . st_size
# mutable_si_3 gets an extra lease
sf3 = _get_sharefile ( mutable_si_3 )
self . backdate_lease ( sf3 , self . renew_secrets [ 4 ] , now - 1000 )
ss . setServiceParent ( self . s )
d = eventual . fireEventually ( )
# examine the state right after the first bucket has been processed
def _after_first_bucket ( ignored ) :
p = lc . get_progress ( )
self . failUnless ( p [ " cycle-in-progress " ] )
d . addCallback ( _after_first_bucket )
d . addCallback ( lambda ign : self . render1 ( webstatus ) )
def _check_html_in_cycle ( html ) :
s = remove_tags ( html )
# the first bucket encountered gets deleted, and its prefix
# happens to be about 1/6th of the way through the ring, so the
# predictor thinks we'll have 6 shares and that we'll delete them
# all. This part of the test depends upon the SIs landing right
# where they do now.
self . failUnlessIn ( " The remainder of this cycle is expected to "
2009-03-07 23:14:42 +00:00
" recover: 5 shares, 5 buckets " , s )
2009-03-07 05:45:17 +00:00
self . failUnlessIn ( " The whole cycle is expected to examine "
" 6 shares in 6 buckets and to recover: "
2009-03-07 23:14:42 +00:00
" 6 shares, 6 buckets " , s )
2009-03-07 05:45:17 +00:00
d . addCallback ( _check_html_in_cycle )
# wait for the crawler to finish the first cycle. Two shares should
# have been removed
def _wait ( ) :
return bool ( lc . get_state ( ) [ " last-cycle-finished " ] is not None )
d . addCallback ( lambda ign : self . poll ( _wait ) )
def _after_first_cycle ( ignored ) :
self . failUnlessEqual ( count_shares ( immutable_si_0 ) , 0 )
self . failUnlessEqual ( count_shares ( immutable_si_1 ) , 1 )
self . failUnlessEqual ( count_leases ( immutable_si_1 ) , 1 )
self . failUnlessEqual ( count_shares ( mutable_si_2 ) , 0 )
self . failUnlessEqual ( count_shares ( mutable_si_3 ) , 1 )
self . failUnlessEqual ( count_leases ( mutable_si_3 ) , 1 )
s = lc . get_state ( )
last = s [ " history " ] [ 0 ]
self . failUnlessEqual ( last [ " expiration-enabled " ] , True )
self . failUnlessEqual ( last [ " configured-expiration-time " ] , 2000 )
self . failUnlessEqual ( last [ " buckets-examined " ] , 4 )
self . failUnlessEqual ( last [ " shares-examined " ] , 4 )
self . failUnlessEqual ( last [ " leases-per-share-histogram " ] ,
{ 1 : 2 , 2 : 2 } )
rec = last [ " space-recovered " ]
self . failUnlessEqual ( rec [ " actual-numbuckets " ] , 2 )
self . failUnlessEqual ( rec [ " original-leasetimer-numbuckets " ] , 2 )
self . failUnlessEqual ( rec [ " configured-leasetimer-numbuckets " ] , 2 )
self . failUnlessEqual ( rec [ " actual-numshares " ] , 2 )
self . failUnlessEqual ( rec [ " original-leasetimer-numshares " ] , 2 )
self . failUnlessEqual ( rec [ " configured-leasetimer-numshares " ] , 2 )
size = sf0_size + sf2_size
self . failUnlessEqual ( rec [ " actual-sharebytes " ] , size )
self . failUnlessEqual ( rec [ " original-leasetimer-sharebytes " ] , size )
self . failUnlessEqual ( rec [ " configured-leasetimer-sharebytes " ] , size )
2009-03-07 09:45:18 +00:00
# different platforms have different notions of "blocks used by
# this file", so merely assert that it's a number
self . failUnless ( rec [ " actual-diskbytes " ] > = 0 ,
2009-03-07 05:45:17 +00:00
rec [ " actual-diskbytes " ] )
2009-03-07 09:45:18 +00:00
self . failUnless ( rec [ " original-leasetimer-diskbytes " ] > = 0 ,
2009-03-07 05:45:17 +00:00
rec [ " original-leasetimer-diskbytes " ] )
2009-03-07 09:45:18 +00:00
self . failUnless ( rec [ " configured-leasetimer-diskbytes " ] > = 0 ,
2009-03-07 05:45:17 +00:00
rec [ " configured-leasetimer-diskbytes " ] )
d . addCallback ( _after_first_cycle )
d . addCallback ( lambda ign : self . render1 ( webstatus ) )
def _check_html ( html ) :
s = remove_tags ( html )
self . failUnlessIn ( " Expiration Enabled: expired leases will be removed " , s )
2009-03-07 23:14:42 +00:00
self . failUnlessIn ( " recovered: 2 shares, 2 buckets, " , s )
2009-03-07 05:45:17 +00:00
d . addCallback ( _check_html )
return d
def test_limited_history ( self ) :
basedir = " storage/LeaseCrawler/limited_history "
fileutil . make_dirs ( basedir )
ss = StorageServer ( basedir , " \x00 " * 20 )
# make it start sooner than usual.
lc = ss . lease_checker
lc . slow_start = 0
lc . cpu_slice = 500
# create a few shares, with some leases on them
self . make_shares ( ss )
ss . setServiceParent ( self . s )
def _wait_until_15_cycles_done ( ) :
last = lc . state [ " last-cycle-finished " ]
if last is not None and last > = 15 :
return True
if lc . timer :
lc . timer . reset ( 0 )
return False
d = self . poll ( _wait_until_15_cycles_done )
def _check ( ignored ) :
s = lc . get_state ( )
h = s [ " history " ]
self . failUnlessEqual ( len ( h ) , 10 )
self . failUnlessEqual ( max ( h . keys ( ) ) , 15 )
self . failUnlessEqual ( min ( h . keys ( ) ) , 6 )
d . addCallback ( _check )
return d
def test_unpredictable_future ( self ) :
basedir = " storage/LeaseCrawler/unpredictable_future "
fileutil . make_dirs ( basedir )
ss = StorageServer ( basedir , " \x00 " * 20 )
# make it start sooner than usual.
lc = ss . lease_checker
lc . slow_start = 0
lc . cpu_slice = - 1.0 # stop quickly
self . make_shares ( ss )
ss . setServiceParent ( self . s )
d = eventual . fireEventually ( )
def _check ( ignored ) :
# this should fire after the first bucket is complete, but before
# the first prefix is complete, so the progress-measurer won't
# think we've gotten far enough to raise our percent-complete
# above 0%, triggering the cannot-predict-the-future code in
# expirer.py . This will have to change if/when the
# progress-measurer gets smart enough to count buckets (we'll
# have to interrupt it even earlier, before it's finished the
# first bucket).
s = lc . get_state ( )
self . failUnless ( " cycle-to-date " in s )
self . failUnless ( " estimated-remaining-cycle " in s )
self . failUnless ( " estimated-current-cycle " in s )
left = s [ " estimated-remaining-cycle " ] [ " space-recovered " ]
self . failUnlessEqual ( left [ " actual-numbuckets " ] , None )
self . failUnlessEqual ( left [ " original-leasetimer-numbuckets " ] , None )
self . failUnlessEqual ( left [ " configured-leasetimer-numbuckets " ] , None )
self . failUnlessEqual ( left [ " actual-numshares " ] , None )
self . failUnlessEqual ( left [ " original-leasetimer-numshares " ] , None )
self . failUnlessEqual ( left [ " configured-leasetimer-numshares " ] , None )
self . failUnlessEqual ( left [ " actual-diskbytes " ] , None )
self . failUnlessEqual ( left [ " original-leasetimer-diskbytes " ] , None )
self . failUnlessEqual ( left [ " configured-leasetimer-diskbytes " ] , None )
self . failUnlessEqual ( left [ " actual-sharebytes " ] , None )
self . failUnlessEqual ( left [ " original-leasetimer-sharebytes " ] , None )
self . failUnlessEqual ( left [ " configured-leasetimer-sharebytes " ] , None )
full = s [ " estimated-remaining-cycle " ] [ " space-recovered " ]
self . failUnlessEqual ( full [ " actual-numbuckets " ] , None )
self . failUnlessEqual ( full [ " original-leasetimer-numbuckets " ] , None )
self . failUnlessEqual ( full [ " configured-leasetimer-numbuckets " ] , None )
self . failUnlessEqual ( full [ " actual-numshares " ] , None )
self . failUnlessEqual ( full [ " original-leasetimer-numshares " ] , None )
self . failUnlessEqual ( full [ " configured-leasetimer-numshares " ] , None )
self . failUnlessEqual ( full [ " actual-diskbytes " ] , None )
self . failUnlessEqual ( full [ " original-leasetimer-diskbytes " ] , None )
self . failUnlessEqual ( full [ " configured-leasetimer-diskbytes " ] , None )
self . failUnlessEqual ( full [ " actual-sharebytes " ] , None )
self . failUnlessEqual ( full [ " original-leasetimer-sharebytes " ] , None )
self . failUnlessEqual ( full [ " configured-leasetimer-sharebytes " ] , None )
d . addCallback ( _check )
return d
def test_no_st_blocks ( self ) :
basedir = " storage/LeaseCrawler/no_st_blocks "
fileutil . make_dirs ( basedir )
ss = No_ST_BLOCKS_StorageServer ( basedir , " \x00 " * 20 ,
expiration_time = - 1000 )
# a negative expiration_time= means the "configured-leasetimer-"
# space-recovered counts will be non-zero, since all shares will have
# expired by then
# make it start sooner than usual.
lc = ss . lease_checker
lc . slow_start = 0
self . make_shares ( ss )
ss . setServiceParent ( self . s )
def _wait ( ) :
return bool ( lc . get_state ( ) [ " last-cycle-finished " ] is not None )
d = self . poll ( _wait )
def _check ( ignored ) :
s = lc . get_state ( )
last = s [ " history " ] [ 0 ]
rec = last [ " space-recovered " ]
self . failUnlessEqual ( rec [ " configured-leasetimer-numbuckets " ] , 4 )
self . failUnlessEqual ( rec [ " configured-leasetimer-numshares " ] , 4 )
self . failUnless ( rec [ " configured-leasetimer-sharebytes " ] > 0 ,
rec [ " configured-leasetimer-sharebytes " ] )
# without the .st_blocks field in os.stat() results, we should be
# reporting diskbytes==sharebytes
self . failUnlessEqual ( rec [ " configured-leasetimer-sharebytes " ] ,
rec [ " configured-leasetimer-diskbytes " ] )
d . addCallback ( _check )
return d
2009-02-27 02:42:48 +00:00
2009-02-20 23:03:53 +00:00
class NoStatvfsServer ( StorageServer ) :
def do_statvfs ( self ) :
raise AttributeError
2009-02-20 21:29:26 +00:00
2009-03-07 05:45:17 +00:00
class WebStatus ( unittest . TestCase , pollmixin . PollMixin , WebRenderingMixin ) :
2009-02-21 04:04:08 +00:00
def setUp ( self ) :
self . s = service . MultiService ( )
self . s . startService ( )
def tearDown ( self ) :
return self . s . stopService ( )
2009-02-20 21:29:26 +00:00
def test_no_server ( self ) :
w = StorageStatus ( None )
html = w . renderSynchronously ( )
self . failUnless ( " <h1>No Storage Server Running</h1> " in html , html )
def test_status ( self ) :
basedir = " storage/WebStatus/status "
fileutil . make_dirs ( basedir )
ss = StorageServer ( basedir , " \x00 " * 20 )
2009-02-21 04:04:08 +00:00
ss . setServiceParent ( self . s )
2009-02-20 21:29:26 +00:00
w = StorageStatus ( ss )
2009-03-07 05:45:17 +00:00
d = self . render1 ( w )
def _check_html ( html ) :
self . failUnless ( " <h1>Storage Server Status</h1> " in html , html )
s = remove_tags ( html )
self . failUnless ( " Accepting new shares: Yes " in s , s )
self . failUnless ( " Reserved space: - 0 B (0) " in s , s )
d . addCallback ( _check_html )
d . addCallback ( lambda ign : self . render_json ( w ) )
def _check_json ( json ) :
data = simplejson . loads ( json )
s = data [ " stats " ]
self . failUnlessEqual ( s [ " storage_server.accepting_immutable_shares " ] , 1 )
self . failUnlessEqual ( s [ " storage_server.reserved_space " ] , 0 )
self . failUnless ( " bucket-counter " in data )
self . failUnless ( " lease-checker " in data )
d . addCallback ( _check_json )
return d
def render_json ( self , page ) :
d = self . render1 ( page , args = { " t " : [ " json " ] } )
return d
2009-02-20 21:29:26 +00:00
2009-02-20 23:03:53 +00:00
def test_status_no_statvfs ( self ) :
# windows has no os.statvfs . Make sure the code handles that even on
# unix.
basedir = " storage/WebStatus/status_no_statvfs "
fileutil . make_dirs ( basedir )
ss = NoStatvfsServer ( basedir , " \x00 " * 20 )
2009-02-21 04:04:08 +00:00
ss . setServiceParent ( self . s )
2009-02-20 23:03:53 +00:00
w = StorageStatus ( ss )
html = w . renderSynchronously ( )
self . failUnless ( " <h1>Storage Server Status</h1> " in html , html )
2009-02-21 04:04:08 +00:00
s = remove_tags ( html )
2009-02-20 23:03:53 +00:00
self . failUnless ( " Accepting new shares: Yes " in s , s )
self . failUnless ( " Total disk space: ? " in s , s )
2009-02-20 21:29:26 +00:00
def test_readonly ( self ) :
basedir = " storage/WebStatus/readonly "
fileutil . make_dirs ( basedir )
ss = StorageServer ( basedir , " \x00 " * 20 , readonly_storage = True )
2009-02-21 04:04:08 +00:00
ss . setServiceParent ( self . s )
2009-02-20 21:29:26 +00:00
w = StorageStatus ( ss )
html = w . renderSynchronously ( )
self . failUnless ( " <h1>Storage Server Status</h1> " in html , html )
2009-02-21 04:04:08 +00:00
s = remove_tags ( html )
2009-02-20 21:29:26 +00:00
self . failUnless ( " Accepting new shares: No " in s , s )
def test_reserved ( self ) :
basedir = " storage/WebStatus/reserved "
fileutil . make_dirs ( basedir )
ss = StorageServer ( basedir , " \x00 " * 20 , reserved_space = 10e6 )
2009-02-21 04:04:08 +00:00
ss . setServiceParent ( self . s )
2009-02-20 21:29:26 +00:00
w = StorageStatus ( ss )
html = w . renderSynchronously ( )
self . failUnless ( " <h1>Storage Server Status</h1> " in html , html )
2009-02-21 04:04:08 +00:00
s = remove_tags ( html )
self . failUnless ( " Reserved space: - 10.00 MB (10000000) " in s , s )
def test_huge_reserved ( self ) :
basedir = " storage/WebStatus/reserved "
fileutil . make_dirs ( basedir )
ss = StorageServer ( basedir , " \x00 " * 20 , reserved_space = 10e6 )
ss . setServiceParent ( self . s )
w = StorageStatus ( ss )
html = w . renderSynchronously ( )
self . failUnless ( " <h1>Storage Server Status</h1> " in html , html )
s = remove_tags ( html )
self . failUnless ( " Reserved space: - 10.00 MB (10000000) " in s , s )
2009-02-20 21:29:26 +00:00
def test_util ( self ) :
2009-02-21 04:04:08 +00:00
w = StorageStatus ( None )
self . failUnlessEqual ( w . render_space ( None , None ) , " ? " )
2009-02-27 02:58:38 +00:00
self . failUnlessEqual ( w . render_space ( None , 10e6 ) , " 10000000 " )
self . failUnlessEqual ( w . render_abbrev_space ( None , None ) , " ? " )
self . failUnlessEqual ( w . render_abbrev_space ( None , 10e6 ) , " 10.00 MB " )
2009-02-20 21:29:26 +00:00
self . failUnlessEqual ( remove_prefix ( " foo.bar " , " foo. " ) , " bar " )
self . failUnlessEqual ( remove_prefix ( " foo.bar " , " baz. " ) , None )