2019-03-22 16:25:11 +00:00
from __future__ import print_function
2019-03-08 17:17:16 +00:00
__all__ = [
" SyncTestCase " ,
" AsyncTestCase " ,
" AsyncBrokenTestCase " ,
2019-03-04 21:22:06 +00:00
" flush_logged_errors " ,
" skip " ,
" skipIf " ,
2019-03-08 17:17:16 +00:00
]
2009-01-10 22:07:39 +00:00
import os , random , struct
2019-03-07 20:04:37 +00:00
import tempfile
from tempfile import mktemp
from functools import partial
from unittest import case as _case
2019-03-28 18:54:45 +00:00
from socket import (
AF_INET ,
SOCK_STREAM ,
SOMAXCONN ,
socket ,
2019-03-28 20:06:25 +00:00
error as socket_error ,
)
from errno import (
EADDRINUSE ,
2019-03-28 18:54:45 +00:00
)
2019-03-07 20:04:37 +00:00
2017-07-24 21:02:31 +00:00
import treq
2019-02-25 16:52:50 +00:00
2017-02-27 17:56:49 +00:00
from zope . interface import implementer
2019-02-25 16:52:50 +00:00
from testtools import (
TestCase ,
2019-03-04 21:22:06 +00:00
skip ,
skipIf ,
2019-02-25 16:52:50 +00:00
)
from testtools . twistedsupport import (
SynchronousDeferredRunTest ,
2019-03-07 23:55:52 +00:00
AsynchronousDeferredRunTest ,
AsynchronousDeferredRunTestForBrokenTwisted ,
2019-03-04 21:22:06 +00:00
flush_logged_errors ,
2019-02-25 16:52:50 +00:00
)
2019-03-28 18:54:45 +00:00
from twisted . plugin import IPlugin
2007-12-05 06:01:37 +00:00
from twisted . internet import defer
2017-07-24 21:02:31 +00:00
from twisted . internet . defer import inlineCallbacks , returnValue
2009-12-01 22:44:35 +00:00
from twisted . internet . interfaces import IPullProducer
2007-12-05 06:01:37 +00:00
from twisted . python import failure
2019-03-28 18:54:45 +00:00
from twisted . python . filepath import FilePath
2008-01-14 18:58:58 +00:00
from twisted . application import service
2008-10-22 00:03:07 +00:00
from twisted . web . error import Error as WebError
2019-03-28 18:54:45 +00:00
from twisted . internet . interfaces import (
IStreamServerEndpointStringParser ,
IReactorSocket ,
)
from twisted . internet . endpoints import AdoptedStreamServerEndpoint
2019-02-25 16:52:50 +00:00
2016-08-04 05:09:12 +00:00
from allmydata import uri
2011-08-02 01:56:43 +00:00
from allmydata . interfaces import IMutableFileNode , IImmutableFileNode , \
NotEnoughSharesError , ICheckable , \
IMutableUploadable , SDMF_VERSION , \
MDMF_VERSION
2009-01-10 01:00:52 +00:00
from allmydata . check_results import CheckResults , CheckAndRepairResults , \
2008-09-07 19:44:56 +00:00
DeepCheckResults , DeepCheckAndRepairResults
2012-05-25 19:56:03 +00:00
from allmydata . storage_client import StubServer
2009-02-25 03:00:10 +00:00
from allmydata . mutable . layout import unpack_header
2011-08-02 01:56:43 +00:00
from allmydata . mutable . publish import MutableData
2009-02-25 03:00:10 +00:00
from allmydata . storage . mutable import MutableShareFile
2019-03-28 18:54:45 +00:00
from allmydata . util import hashutil , log , iputil
2008-12-19 15:39:24 +00:00
from allmydata . util . assertutil import precondition
2011-08-07 00:44:59 +00:00
from allmydata . util . consumer import download_to_data
2010-02-26 08:14:33 +00:00
import allmydata . test . common_util as testutil
2016-08-05 22:26:18 +00:00
from allmydata . immutable . upload import Uploader
2008-07-25 22:33:49 +00:00
2019-02-25 16:52:50 +00:00
from . eliotutil import (
2019-03-07 23:18:22 +00:00
EliotLoggedRunTest ,
2019-02-25 16:52:50 +00:00
)
2011-08-11 17:54:18 +00:00
TEST_RSA_KEY_SIZE = 522
2008-07-25 22:17:58 +00:00
2019-03-28 18:54:45 +00:00
@implementer ( IPlugin , IStreamServerEndpointStringParser )
class AdoptedServerPort ( object ) :
"""
Parse an ` ` adopt - socket : < fd > ` ` endpoint description by adopting ` ` fd ` ` as
a listening TCP port .
"""
prefix = " adopt-socket "
def parseStreamServer ( self , reactor , fd ) :
log . msg ( " Adopting {} " . format ( fd ) )
# AdoptedStreamServerEndpoint wants to own the file descriptor. It
# will duplicate it and then close the one we pass in. This means it
# is really only possible to adopt a particular file descriptor once.
#
# This wouldn't matter except one of the tests wants to stop one of
# the nodes and start it up again. This results in exactly an attempt
# to adopt a particular file descriptor twice.
#
# So we'll dup it ourselves. AdoptedStreamServerEndpoint can do
# whatever it wants to the result - the original will still be valid
# and reusable.
return AdoptedStreamServerEndpoint ( reactor , os . dup ( int ( fd ) ) , AF_INET )
2019-03-28 20:06:25 +00:00
def really_bind ( s , addr ) :
# Arbitrarily decide we'll try 100 times. We don't want to try forever in
# case this is a persistent problem. Trying is cheap, though, so we may
# as well try a lot. Hopefully the OS isn't so bad at allocating a port
# for us that it takes more than 2 iterations.
for i in range ( 100 ) :
try :
s . bind ( addr )
except socket_error as e :
if e . errno == EADDRINUSE :
continue
raise
else :
return
raise Exception ( " Many bind attempts failed with EADDRINUSE " )
2019-03-28 18:54:45 +00:00
class SameProcessStreamEndpointAssigner ( object ) :
"""
A fixture which can assign streaming server endpoints for use * in this
process only * .
An effort is made to avoid address collisions for this port but the logic
for doing so is platform - dependent ( sorry , Windows ) .
This is more reliable than trying to listen on a hard - coded non - zero port
number . It is at least as reliable as trying to listen on port number
zero on Windows and more reliable than doing that on other platforms .
"""
def setUp ( self ) :
self . _cleanups = [ ]
def tearDown ( self ) :
for c in self . _cleanups :
c ( )
def _patch_plugins ( self ) :
"""
Add the testing package ` ` plugins ` ` directory to the ` ` twisted . plugins ` `
aggregate package . Arrange for it to be removed again when the
fixture is torn down .
"""
import twisted . plugins
testplugins = FilePath ( __file__ ) . sibling ( " plugins " )
twisted . plugins . __path__ . insert ( 0 , testplugins . path )
self . _cleanups . append ( lambda : twisted . plugins . __path__ . remove ( testplugins . path ) )
def assign ( self , reactor ) :
"""
Make a new streaming server endpoint and return its string description .
This is intended to help write config files that will then be read and
used in this process .
: param reactor : The reactor which will be used to listen with the
resulting endpoint . If it provides ` ` IReactorSocket ` ` then
resulting reliability will be extremely high . If it doesn ' t,
resulting reliability will be pretty alright .
: return : A two - tuple of ( location hint , port endpoint description ) as
strings .
"""
if IReactorSocket . providedBy ( reactor ) :
# On this platform, we can reliable pre-allocate a listening port.
# Once it is bound we know it will not fail later with EADDRINUSE.
s = socket ( AF_INET , SOCK_STREAM )
# We need to keep ``s`` alive as long as the file descriptor we put in
# this string might still be used. We could dup() the descriptor
# instead but then we've only inverted the cleanup problem: gone from
# don't-close-too-soon to close-just-late-enough. So we'll leave
# ``s`` alive and use it as the cleanup mechanism.
self . _cleanups . append ( s . close )
s . setblocking ( False )
2019-03-28 20:06:25 +00:00
really_bind ( s , ( " 127.0.0.1 " , 0 ) )
2019-03-28 18:54:45 +00:00
s . listen ( SOMAXCONN )
host , port = s . getsockname ( )
location_hint = " tcp: %s : %d " % ( host , port )
port_endpoint = " adopt-socket:fd= %d " % ( s . fileno ( ) , )
# Make sure `adopt-socket` is recognized. We do this instead of
# providing a dropin because we don't want to make this endpoint
# available to random other applications.
self . _patch_plugins ( )
else :
# On other platforms, we blindly guess and hope we get lucky.
portnum = iputil . allocate_tcp_port ( )
location_hint = " tcp:127.0.0.1: %d " % ( portnum , )
port_endpoint = " tcp: %d :interface=127.0.0.1 " % ( portnum , )
return location_hint , port_endpoint
2017-02-27 17:56:49 +00:00
@implementer ( IPullProducer )
class DummyProducer ( object ) :
2009-12-01 22:44:35 +00:00
def resumeProducing ( self ) :
pass
2017-02-27 17:56:49 +00:00
@implementer ( IImmutableFileNode )
2019-05-15 06:17:44 +00:00
class FakeCHKFileNode ( object ) :
2009-11-20 07:52:55 +00:00
""" I provide IImmutableFileNode, but all of my data is stored in a
class - level dictionary . """
2007-12-05 06:01:37 +00:00
2012-05-22 22:18:26 +00:00
def __init__ ( self , filecap , all_contents ) :
2010-02-22 02:45:04 +00:00
precondition ( isinstance ( filecap , ( uri . CHKFileURI , uri . LiteralFileURI ) ) , filecap )
2012-05-22 22:18:26 +00:00
self . all_contents = all_contents
2009-11-11 22:25:42 +00:00
self . my_uri = filecap
2010-02-22 02:45:04 +00:00
self . storage_index = self . my_uri . get_storage_index ( )
2007-12-05 06:01:37 +00:00
def get_uri ( self ) :
2008-12-19 15:39:24 +00:00
return self . my_uri . to_string ( )
2010-01-27 06:44:30 +00:00
def get_write_uri ( self ) :
return None
2007-12-05 06:01:37 +00:00
def get_readonly_uri ( self ) :
2008-12-19 15:39:24 +00:00
return self . my_uri . to_string ( )
2009-11-18 07:09:00 +00:00
def get_cap ( self ) :
return self . my_uri
2008-12-08 19:44:11 +00:00
def get_verify_cap ( self ) :
2008-12-19 15:39:24 +00:00
return self . my_uri . get_verify_cap ( )
2009-01-23 05:01:36 +00:00
def get_repair_cap ( self ) :
return self . my_uri . get_verify_cap ( )
2008-10-29 01:17:20 +00:00
def get_storage_index ( self ) :
return self . storage_index
2009-02-18 02:32:43 +00:00
def check ( self , monitor , verify = False , add_lease = False ) :
2012-05-25 19:56:20 +00:00
s = StubServer ( " \x00 " * 20 )
2012-05-25 07:14:46 +00:00
r = CheckResults ( self . my_uri , self . storage_index ,
healthy = True , recoverable = True ,
2014-03-20 16:13:57 +00:00
count_happiness = 10 ,
2012-05-25 07:14:46 +00:00
count_shares_needed = 3 ,
count_shares_expected = 10 ,
count_shares_good = 10 ,
count_good_share_hosts = 10 ,
count_recoverable_versions = 1 ,
count_unrecoverable_versions = 0 ,
2012-05-25 19:56:20 +00:00
servers_responding = [ s ] ,
2012-05-25 19:56:03 +00:00
sharemap = { 1 : [ s ] } ,
2012-05-25 07:14:46 +00:00
count_wrong_shares = 0 ,
list_corrupt_shares = [ ] ,
count_corrupt_shares = 0 ,
list_incompatible_shares = [ ] ,
count_incompatible_shares = 0 ,
summary = " " ,
report = [ ] ,
share_problems = [ ] ,
servermap = None )
2008-07-16 22:42:56 +00:00
return defer . succeed ( r )
2009-02-18 02:32:43 +00:00
def check_and_repair ( self , monitor , verify = False , add_lease = False ) :
2008-09-07 19:44:56 +00:00
d = self . check ( verify )
def _got ( cr ) :
r = CheckAndRepairResults ( self . storage_index )
r . pre_repair_results = r . post_repair_results = cr
return r
d . addCallback ( _got )
return d
2007-12-05 06:01:37 +00:00
def is_mutable ( self ) :
return False
def is_readonly ( self ) :
return True
2010-01-27 06:44:30 +00:00
def is_unknown ( self ) :
return False
def is_allowed_in_immutable_directory ( self ) :
return True
def raise_error ( self ) :
pass
2007-12-05 06:01:37 +00:00
def get_size ( self ) :
2010-02-25 04:18:24 +00:00
if isinstance ( self . my_uri , uri . LiteralFileURI ) :
return self . my_uri . get_size ( )
2008-10-28 20:41:04 +00:00
try :
2008-12-19 15:39:24 +00:00
data = self . all_contents [ self . my_uri . to_string ( ) ]
2019-03-28 11:45:28 +00:00
except KeyError as le :
2009-03-04 02:37:15 +00:00
raise NotEnoughSharesError ( le , 0 , 3 )
2007-12-05 06:01:37 +00:00
return len ( data )
2012-05-13 07:41:53 +00:00
def get_current_size ( self ) :
return defer . succeed ( self . get_size ( ) )
2009-12-01 22:44:35 +00:00
2008-10-28 20:41:04 +00:00
def read ( self , consumer , offset = 0 , size = None ) :
2009-12-01 22:44:35 +00:00
# we don't bother to call registerProducer/unregisterProducer,
# because it's a hassle to write a dummy Producer that does the right
# thing (we have to make sure that DummyProducer.resumeProducing
# writes the data into the consumer immediately, otherwise it will
# loop forever).
d = defer . succeed ( None )
d . addCallback ( self . _read , consumer , offset , size )
2008-10-28 20:41:04 +00:00
return d
2007-12-05 06:01:37 +00:00
2009-12-01 22:44:35 +00:00
def _read ( self , ignored , consumer , offset , size ) :
2010-02-25 04:18:24 +00:00
if isinstance ( self . my_uri , uri . LiteralFileURI ) :
data = self . my_uri . data
else :
if self . my_uri . to_string ( ) not in self . all_contents :
raise NotEnoughSharesError ( None , 0 , 3 )
data = self . all_contents [ self . my_uri . to_string ( ) ]
2009-12-01 22:44:35 +00:00
start = offset
if size is not None :
end = offset + size
else :
end = len ( data )
consumer . write ( data [ start : end ] )
return consumer
2011-08-02 01:56:43 +00:00
def get_best_readable_version ( self ) :
return defer . succeed ( self )
2015-11-12 23:16:28 +00:00
def download_to_data ( self , progress = None ) :
return download_to_data ( self , progress = progress )
2011-08-02 01:56:43 +00:00
download_best_version = download_to_data
def get_size_of_best_version ( self ) :
return defer . succeed ( self . get_size )
2009-11-11 22:25:42 +00:00
def make_chk_file_cap ( size ) :
return uri . CHKFileURI ( key = os . urandom ( 16 ) ,
uri_extension_hash = os . urandom ( 32 ) ,
needed_shares = 3 ,
total_shares = 10 ,
size = size )
2007-12-05 06:01:37 +00:00
def make_chk_file_uri ( size ) :
2009-11-11 22:25:42 +00:00
return make_chk_file_cap ( size ) . to_string ( )
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
2012-05-22 22:18:26 +00:00
def create_chk_filenode ( contents , all_contents ) :
2009-11-11 22:25:42 +00:00
filecap = make_chk_file_cap ( len ( contents ) )
2012-05-22 22:18:26 +00:00
n = FakeCHKFileNode ( filecap , all_contents )
all_contents [ filecap . to_string ( ) ] = contents
2007-12-05 06:01:37 +00:00
return n
2017-02-27 17:56:49 +00:00
@implementer ( IMutableFileNode , ICheckable )
2019-05-15 06:17:44 +00:00
class FakeMutableFileNode ( object ) :
2007-12-05 06:01:37 +00:00
""" I provide IMutableFileNode, but all of my data is stored in a
class - level dictionary . """
2008-06-03 07:03:16 +00:00
MUTABLE_SIZELIMIT = 10000
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
def __init__ ( self , storage_broker , secret_holder ,
2012-05-22 22:18:26 +00:00
default_encoding_parameters , history , all_contents ) :
self . all_contents = all_contents
self . file_types = { } # storage index => MDMF_VERSION or SDMF_VERSION
2009-11-11 22:25:42 +00:00
self . init_from_cap ( make_mutable_file_cap ( ) )
2011-08-02 01:56:43 +00:00
self . _k = default_encoding_parameters [ ' k ' ]
self . _segsize = default_encoding_parameters [ ' max_segment_size ' ]
def create ( self , contents , key_generator = None , keysize = None ,
version = SDMF_VERSION ) :
if version == MDMF_VERSION and \
isinstance ( self . my_uri , ( uri . ReadonlySSKFileURI ,
uri . WriteableSSKFileURI ) ) :
self . init_from_cap ( make_mdmf_mutable_file_cap ( ) )
self . file_types [ self . storage_index ] = version
2009-10-13 05:21:54 +00:00
initial_contents = self . _get_initial_contents ( contents )
2011-08-02 01:56:43 +00:00
data = initial_contents . read ( initial_contents . get_size ( ) )
data = " " . join ( data )
self . all_contents [ self . storage_index ] = data
2007-12-05 06:01:37 +00:00
return defer . succeed ( self )
2009-10-13 05:21:54 +00:00
def _get_initial_contents ( self , contents ) :
if contents is None :
2011-08-02 01:56:43 +00:00
return MutableData ( " " )
if IMutableUploadable . providedBy ( contents ) :
return contents
2009-10-13 05:21:54 +00:00
assert callable ( contents ) , " %s should be callable, not %s " % \
( contents , type ( contents ) )
return contents ( self )
2009-11-11 22:25:42 +00:00
def init_from_cap ( self , filecap ) :
assert isinstance ( filecap , ( uri . WriteableSSKFileURI ,
2011-08-02 01:56:43 +00:00
uri . ReadonlySSKFileURI ,
2011-08-27 18:33:57 +00:00
uri . WriteableMDMFFileURI ,
2011-08-02 01:56:43 +00:00
uri . ReadonlyMDMFFileURI ) )
2009-11-11 22:25:42 +00:00
self . my_uri = filecap
2010-02-22 02:45:04 +00:00
self . storage_index = self . my_uri . get_storage_index ( )
2011-08-27 18:33:57 +00:00
if isinstance ( filecap , ( uri . WriteableMDMFFileURI ,
2011-08-02 01:56:43 +00:00
uri . ReadonlyMDMFFileURI ) ) :
self . file_types [ self . storage_index ] = MDMF_VERSION
else :
self . file_types [ self . storage_index ] = SDMF_VERSION
2007-12-05 06:01:37 +00:00
return self
2009-11-11 22:25:42 +00:00
def get_cap ( self ) :
return self . my_uri
def get_readcap ( self ) :
return self . my_uri . get_readonly ( )
2007-12-05 06:01:37 +00:00
def get_uri ( self ) :
2007-12-05 06:42:54 +00:00
return self . my_uri . to_string ( )
2010-01-27 06:44:30 +00:00
def get_write_uri ( self ) :
if self . is_readonly ( ) :
return None
return self . my_uri . to_string ( )
2008-05-20 01:52:19 +00:00
def get_readonly ( self ) :
return self . my_uri . get_readonly ( )
2007-12-05 06:42:54 +00:00
def get_readonly_uri ( self ) :
return self . my_uri . get_readonly ( ) . to_string ( )
2009-02-04 02:22:48 +00:00
def get_verify_cap ( self ) :
return self . my_uri . get_verify_cap ( )
2011-08-02 01:56:43 +00:00
def get_repair_cap ( self ) :
if self . my_uri . is_readonly ( ) :
return None
return self . my_uri
2007-12-05 06:01:37 +00:00
def is_readonly ( self ) :
return self . my_uri . is_readonly ( )
def is_mutable ( self ) :
return self . my_uri . is_mutable ( )
2010-01-27 06:44:30 +00:00
def is_unknown ( self ) :
return False
def is_allowed_in_immutable_directory ( self ) :
return not self . my_uri . is_mutable ( )
def raise_error ( self ) :
pass
2007-12-05 06:01:37 +00:00
def get_writekey ( self ) :
return " \x00 " * 16
2007-12-05 06:57:40 +00:00
def get_size ( self ) :
2009-11-18 19:16:24 +00:00
return len ( self . all_contents [ self . storage_index ] )
def get_current_size ( self ) :
return self . get_size_of_best_version ( )
2008-08-13 02:02:52 +00:00
def get_size_of_best_version ( self ) :
return defer . succeed ( len ( self . all_contents [ self . storage_index ] ) )
2007-12-05 06:01:37 +00:00
2008-07-17 23:47:09 +00:00
def get_storage_index ( self ) :
return self . storage_index
2011-08-02 01:56:43 +00:00
def get_servermap ( self , mode ) :
return defer . succeed ( None )
def get_version ( self ) :
assert self . storage_index in self . file_types
return self . file_types [ self . storage_index ]
2009-02-18 02:32:43 +00:00
def check ( self , monitor , verify = False , add_lease = False ) :
2012-05-25 19:56:20 +00:00
s = StubServer ( " \x00 " * 20 )
2012-05-25 07:14:46 +00:00
r = CheckResults ( self . my_uri , self . storage_index ,
healthy = True , recoverable = True ,
2014-03-20 16:13:57 +00:00
count_happiness = 10 ,
2012-05-25 07:14:46 +00:00
count_shares_needed = 3 ,
count_shares_expected = 10 ,
count_shares_good = 10 ,
count_good_share_hosts = 10 ,
count_recoverable_versions = 1 ,
count_unrecoverable_versions = 0 ,
2012-05-25 19:56:20 +00:00
servers_responding = [ s ] ,
2012-05-25 19:56:03 +00:00
sharemap = { " seq1-abcd-sh0 " : [ s ] } ,
2012-05-25 07:14:46 +00:00
count_wrong_shares = 0 ,
list_corrupt_shares = [ ] ,
count_corrupt_shares = 0 ,
list_incompatible_shares = [ ] ,
count_incompatible_shares = 0 ,
summary = " " ,
report = [ ] ,
share_problems = [ ] ,
servermap = None )
2008-07-16 00:23:25 +00:00
return defer . succeed ( r )
2009-02-18 02:32:43 +00:00
def check_and_repair ( self , monitor , verify = False , add_lease = False ) :
2008-09-07 19:44:56 +00:00
d = self . check ( verify )
def _got ( cr ) :
r = CheckAndRepairResults ( self . storage_index )
r . pre_repair_results = r . post_repair_results = cr
return r
d . addCallback ( _got )
return d
2009-02-18 02:32:43 +00:00
def deep_check ( self , verify = False , add_lease = False ) :
2008-09-07 19:44:56 +00:00
d = self . check ( verify )
def _done ( r ) :
dr = DeepCheckResults ( self . storage_index )
dr . add_check ( r , [ ] )
return dr
d . addCallback ( _done )
return d
2009-02-18 02:32:43 +00:00
def deep_check_and_repair ( self , verify = False , add_lease = False ) :
2008-09-07 19:44:56 +00:00
d = self . check_and_repair ( verify )
2008-07-17 23:47:09 +00:00
def _done ( r ) :
2008-09-07 19:44:56 +00:00
dr = DeepCheckAndRepairResults ( self . storage_index )
dr . add_check ( r , [ ] )
2008-07-17 23:47:09 +00:00
return dr
d . addCallback ( _done )
return d
2015-11-12 23:16:28 +00:00
def download_best_version ( self , progress = None ) :
return defer . succeed ( self . _download_best_version ( progress = progress ) )
2011-08-02 01:56:43 +00:00
2015-11-12 23:16:28 +00:00
def _download_best_version ( self , ignored = None , progress = None ) :
2010-02-25 04:18:24 +00:00
if isinstance ( self . my_uri , uri . LiteralFileURI ) :
2011-08-02 01:56:43 +00:00
return self . my_uri . data
2009-12-01 22:54:38 +00:00
if self . storage_index not in self . all_contents :
2011-08-02 01:56:43 +00:00
raise NotEnoughSharesError ( None , 0 , 3 )
return self . all_contents [ self . storage_index ]
2010-02-25 04:18:24 +00:00
2008-04-18 00:51:38 +00:00
def overwrite ( self , new_contents ) :
2007-12-05 06:01:37 +00:00
assert not self . is_readonly ( )
2011-08-02 01:56:43 +00:00
new_data = new_contents . read ( new_contents . get_size ( ) )
new_data = " " . join ( new_data )
self . all_contents [ self . storage_index ] = new_data
2007-12-05 06:01:37 +00:00
return defer . succeed ( None )
2008-04-18 02:57:04 +00:00
def modify ( self , modifier ) :
2008-06-03 07:03:16 +00:00
# this does not implement FileTooLargeError, but the real one does
2008-04-18 03:06:06 +00:00
return defer . maybeDeferred ( self . _modify , modifier )
def _modify ( self , modifier ) :
2008-04-18 02:57:04 +00:00
assert not self . is_readonly ( )
old_contents = self . all_contents [ self . storage_index ]
2011-08-02 01:56:43 +00:00
new_data = modifier ( old_contents , None , True )
self . all_contents [ self . storage_index ] = new_data
2008-04-18 03:06:06 +00:00
return None
2007-12-05 06:01:37 +00:00
2011-08-02 01:56:43 +00:00
# As actually implemented, MutableFilenode and MutableFileVersion
# are distinct. However, nothing in the webapi uses (yet) that
# distinction -- it just uses the unified download interface
# provided by get_best_readable_version and read. When we start
# doing cooler things like LDMF, we will want to revise this code to
# be less simplistic.
def get_best_readable_version ( self ) :
return defer . succeed ( self )
def get_best_mutable_version ( self ) :
return defer . succeed ( self )
2011-08-27 18:33:57 +00:00
# Ditto for this, which is an implementation of IWriteable.
2011-08-02 01:56:43 +00:00
# XXX: Declare that the same is implemented.
def update ( self , data , offset ) :
assert not self . is_readonly ( )
def modifier ( old , servermap , first_time ) :
new = old [ : offset ] + " " . join ( data . read ( data . get_size ( ) ) )
new + = old [ len ( new ) : ]
return new
return self . modify ( modifier )
def read ( self , consumer , offset = 0 , size = None ) :
data = self . _download_best_version ( )
if size :
data = data [ offset : offset + size ]
consumer . write ( data )
return defer . succeed ( consumer )
2009-11-11 22:25:42 +00:00
def make_mutable_file_cap ( ) :
2007-12-05 06:01:37 +00:00
return uri . WriteableSSKFileURI ( writekey = os . urandom ( 16 ) ,
2009-11-11 22:25:42 +00:00
fingerprint = os . urandom ( 32 ) )
2011-08-02 01:56:43 +00:00
def make_mdmf_mutable_file_cap ( ) :
2011-08-27 18:33:57 +00:00
return uri . WriteableMDMFFileURI ( writekey = os . urandom ( 16 ) ,
2011-08-02 01:56:43 +00:00
fingerprint = os . urandom ( 32 ) )
def make_mutable_file_uri ( mdmf = False ) :
if mdmf :
uri = make_mdmf_mutable_file_cap ( )
else :
uri = make_mutable_file_cap ( )
return uri . to_string ( )
2009-11-11 22:25:42 +00:00
2007-12-05 06:01:37 +00:00
def make_verifier_uri ( ) :
return uri . SSKVerifierURI ( storage_index = os . urandom ( 16 ) ,
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
fingerprint = os . urandom ( 32 ) ) . to_string ( )
2007-12-05 06:01:37 +00:00
2012-05-22 22:18:26 +00:00
def create_mutable_filenode ( contents , mdmf = False , all_contents = None ) :
2013-04-09 19:04:12 +00:00
# XXX: All of these arguments are kind of stupid.
2011-08-02 01:56:43 +00:00
if mdmf :
cap = make_mdmf_mutable_file_cap ( )
else :
cap = make_mutable_file_cap ( )
encoding_params = { }
encoding_params [ ' k ' ] = 3
encoding_params [ ' max_segment_size ' ] = 128 * 1024
2012-05-22 22:18:26 +00:00
filenode = FakeMutableFileNode ( None , None , encoding_params , None ,
all_contents )
2011-08-02 01:56:43 +00:00
filenode . init_from_cap ( cap )
if mdmf :
filenode . create ( MutableData ( contents ) , version = MDMF_VERSION )
else :
filenode . create ( MutableData ( contents ) , version = SDMF_VERSION )
return filenode
2008-01-14 18:58:58 +00:00
class LoggingServiceParent ( service . MultiService ) :
def log ( self , * args , * * kwargs ) :
return log . msg ( * args , * * kwargs )
2008-07-25 22:17:58 +00:00
2008-08-06 01:49:58 +00:00
2016-08-05 22:26:18 +00:00
TEST_DATA = " \x02 " * ( Uploader . URI_LIT_SIZE_THRESHOLD + 1 )
2009-01-10 22:07:39 +00:00
2019-05-15 06:17:44 +00:00
class ShouldFailMixin ( object ) :
2008-08-06 19:05:52 +00:00
def shouldFail ( self , expected_failure , which , substring ,
callable , * args , * * kwargs ) :
""" Assert that a function call raises some exception. This is a
Deferred - friendly version of TestCase . assertRaises ( ) .
Suppose you want to verify the following function :
def broken ( a , b , c ) :
if a < 0 :
raise TypeError ( ' a must not be negative ' )
return defer . succeed ( b + c )
You can use :
d = self . shouldFail ( TypeError , ' test name ' ,
' a must not be negative ' ,
broken , - 4 , 5 , c = 12 )
in your test method . The ' test name ' string will be included in the
error message , if any , because Deferred chains frequently make it
difficult to tell which assertion was tripped .
2010-05-12 06:07:54 +00:00
The substring = argument , if not None , must appear in the ' repr '
of the message wrapped by this Failure , or the test will fail .
2008-08-06 19:05:52 +00:00
"""
assert substring is None or isinstance ( substring , str )
d = defer . maybeDeferred ( callable , * args , * * kwargs )
def done ( res ) :
if isinstance ( res , failure . Failure ) :
res . trap ( expected_failure )
if substring :
2010-05-12 06:07:54 +00:00
message = repr ( res . value . args [ 0 ] )
self . failUnless ( substring in message ,
2011-10-11 00:22:27 +00:00
" %s : substring ' %s ' not in ' %s ' "
% ( which , substring , message ) )
2008-08-06 19:05:52 +00:00
else :
self . fail ( " %s was supposed to raise %s , not get ' %s ' " %
( which , expected_failure , res ) )
d . addBoth ( done )
return d
2008-10-22 00:03:07 +00:00
2019-05-15 06:17:44 +00:00
class WebErrorMixin ( object ) :
2008-10-22 00:03:07 +00:00
def explain_web_error ( self , f ) :
# an error on the server side causes the client-side getPage() to
# return a failure(t.web.error.Error), and its str() doesn't show the
# response body, which is where the useful information lives. Attach
# this method as an errback handler, and it will reveal the hidden
# message.
f . trap ( WebError )
2019-03-22 16:25:11 +00:00
print ( " Web Error: " , f . value , " : " , f . value . response )
2008-10-22 00:03:07 +00:00
return f
2009-03-03 23:56:20 +00:00
2009-03-04 02:40:59 +00:00
def _shouldHTTPError ( self , res , which , validator ) :
2009-03-03 23:56:20 +00:00
if isinstance ( res , failure . Failure ) :
res . trap ( WebError )
2009-03-04 02:40:59 +00:00
return validator ( res )
2009-03-03 23:56:20 +00:00
else :
2009-03-04 02:40:59 +00:00
self . fail ( " %s was supposed to Error, not get ' %s ' " % ( which , res ) )
2009-03-03 23:56:20 +00:00
def shouldHTTPError ( self , which ,
code = None , substring = None , response_substring = None ,
callable = None , * args , * * kwargs ) :
2009-03-04 02:40:59 +00:00
# returns a Deferred with the response body
2009-03-03 23:56:20 +00:00
assert substring is None or isinstance ( substring , str )
assert callable
2009-03-04 02:40:59 +00:00
def _validate ( f ) :
if code is not None :
2011-10-11 00:22:27 +00:00
self . failUnlessEqual ( f . value . status , str ( code ) , which )
2009-03-04 02:40:59 +00:00
if substring :
code_string = str ( f )
self . failUnless ( substring in code_string ,
2011-10-11 00:22:27 +00:00
" %s : substring ' %s ' not in ' %s ' "
% ( which , substring , code_string ) )
2009-03-04 02:40:59 +00:00
response_body = f . value . response
if response_substring :
self . failUnless ( response_substring in response_body ,
2011-10-11 00:22:27 +00:00
" %s : response substring ' %s ' not in ' %s ' "
% ( which , response_substring , response_body ) )
2009-03-04 02:40:59 +00:00
return response_body
2009-03-03 23:56:20 +00:00
d = defer . maybeDeferred ( callable , * args , * * kwargs )
2009-03-04 02:40:59 +00:00
d . addBoth ( self . _shouldHTTPError , which , _validate )
2009-03-03 23:56:20 +00:00
return d
2017-07-24 21:02:31 +00:00
@inlineCallbacks
def assertHTTPError ( self , url , code , response_substring ,
method = " get " , persistent = False ,
* * args ) :
response = yield treq . request ( method , url , persistent = persistent ,
* * args )
body = yield response . content ( )
self . assertEquals ( response . code , code )
if response_substring is not None :
self . assertIn ( response_substring , body )
returnValue ( body )
2008-11-07 05:35:47 +00:00
class ErrorMixin ( WebErrorMixin ) :
def explain_error ( self , f ) :
if f . check ( defer . FirstError ) :
2019-03-22 16:25:11 +00:00
print ( " First Error: " , f . value . subFailure )
2008-11-07 05:35:47 +00:00
return f
2008-10-28 20:41:04 +00:00
2009-01-10 22:07:39 +00:00
def corrupt_field ( data , offset , size , debug = False ) :
if random . random ( ) < 0.5 :
newdata = testutil . flip_one_bit ( data , offset , size )
if debug :
log . msg ( " testing: corrupting offset %d , size %d flipping one bit orig: %r , newdata: %r " % ( offset , size , data [ offset : offset + size ] , newdata [ offset : offset + size ] ) )
return newdata
else :
newval = testutil . insecurerandstr ( size )
if debug :
log . msg ( " testing: corrupting offset %d , size %d randomizing field, orig: %r , newval: %r " % ( offset , size , data [ offset : offset + size ] , newval ) )
return data [ : offset ] + newval + data [ offset + size : ]
2010-01-10 01:36:19 +00:00
def _corrupt_nothing ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Leave the data pristine. """
2009-01-10 22:07:39 +00:00
return data
2010-01-10 01:36:19 +00:00
def _corrupt_file_version_number ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the share file version number have one bit
flipped or else will be changed to a random value . """
2009-01-10 22:07:39 +00:00
return corrupt_field ( data , 0x00 , 4 )
2010-01-10 01:36:19 +00:00
def _corrupt_size_of_file_data ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the size of the share data
within the file will be set to one smaller . """
2009-01-10 22:07:39 +00:00
return corrupt_field ( data , 0x04 , 4 )
2010-01-10 01:36:19 +00:00
def _corrupt_sharedata_version_number ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the share data version number will have one
bit flipped or else will be changed to a random value , but not 1 or 2. """
2009-01-10 22:07:39 +00:00
return corrupt_field ( data , 0x0c , 4 )
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
newsharevernum = sharevernum
while newsharevernum in ( 1 , 2 ) :
newsharevernum = random . randrange ( 0 , 2 * * 32 )
2009-02-07 20:38:17 +00:00
newsharevernumbytes = struct . pack ( " >L " , newsharevernum )
2009-01-10 22:07:39 +00:00
return data [ : 0x0c ] + newsharevernumbytes + data [ 0x0c + 4 : ]
2010-01-10 01:36:19 +00:00
def _corrupt_sharedata_version_number_to_plausible_version ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the share data version number will be
changed to 2 if it is 1 or else to 1 if it is 2. """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
newsharevernum = 2
else :
newsharevernum = 1
2009-02-07 20:38:17 +00:00
newsharevernumbytes = struct . pack ( " >L " , newsharevernum )
2009-01-10 22:07:39 +00:00
return data [ : 0x0c ] + newsharevernumbytes + data [ 0x0c + 4 : ]
2010-01-10 01:36:19 +00:00
def _corrupt_segment_size ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the size of the segment
will have one bit flipped or else be changed to a random value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x04 , 4 , debug = False )
else :
return corrupt_field ( data , 0x0c + 0x04 , 8 , debug = False )
2010-01-10 01:36:19 +00:00
def _corrupt_size_of_sharedata ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the size of the data
within the share data will have one bit flipped or else will be changed
to a random value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x08 , 4 )
else :
return corrupt_field ( data , 0x0c + 0x0c , 8 )
2010-01-10 01:36:19 +00:00
def _corrupt_offset_of_sharedata ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the offset of the data
within the share data will have one bit flipped or else be changed to a
random value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x0c , 4 )
else :
return corrupt_field ( data , 0x0c + 0x14 , 8 )
2010-01-10 01:36:19 +00:00
def _corrupt_offset_of_ciphertext_hash_tree ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the offset of the
ciphertext hash tree within the share data will have one bit flipped or
else be changed to a random value .
2009-01-10 22:07:39 +00:00
"""
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x14 , 4 , debug = False )
else :
return corrupt_field ( data , 0x0c + 0x24 , 8 , debug = False )
2010-01-10 01:36:19 +00:00
def _corrupt_offset_of_block_hashes ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the offset of the block
hash tree within the share data will have one bit flipped or else will be
changed to a random value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x18 , 4 )
else :
return corrupt_field ( data , 0x0c + 0x2c , 8 )
2010-01-10 01:36:19 +00:00
def _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the offset of the block
hash tree within the share data will have a multiple of hash size
subtracted from it , thus causing the downloader to download an incomplete
crypttext hash tree . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
curval = struct . unpack ( " >L " , data [ 0x0c + 0x18 : 0x0c + 0x18 + 4 ] ) [ 0 ]
newval = random . randrange ( 0 , max ( 1 , ( curval / hashutil . CRYPTO_VAL_SIZE ) / 2 ) ) * hashutil . CRYPTO_VAL_SIZE
newvalstr = struct . pack ( " >L " , newval )
return data [ : 0x0c + 0x18 ] + newvalstr + data [ 0x0c + 0x18 + 4 : ]
else :
curval = struct . unpack ( " >Q " , data [ 0x0c + 0x2c : 0x0c + 0x2c + 8 ] ) [ 0 ]
newval = random . randrange ( 0 , max ( 1 , ( curval / hashutil . CRYPTO_VAL_SIZE ) / 2 ) ) * hashutil . CRYPTO_VAL_SIZE
newvalstr = struct . pack ( " >Q " , newval )
return data [ : 0x0c + 0x2c ] + newvalstr + data [ 0x0c + 0x2c + 8 : ]
2010-01-10 01:36:19 +00:00
def _corrupt_offset_of_share_hashes ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the offset of the share
hash tree within the share data will have one bit flipped or else will be
changed to a random value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x1c , 4 )
else :
return corrupt_field ( data , 0x0c + 0x34 , 8 )
2010-01-10 01:36:19 +00:00
def _corrupt_offset_of_uri_extension ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the offset of the uri
extension will have one bit flipped or else will be changed to a random
value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
return corrupt_field ( data , 0x0c + 0x20 , 4 )
else :
return corrupt_field ( data , 0x0c + 0x3c , 8 )
def _corrupt_offset_of_uri_extension_to_force_short_read ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the offset of the uri
extension will be set to the size of the file minus 3. This means when
the client tries to read the length field from that location it will get
a short read - - the result string will be only 3 bytes long , not the 4 or
8 bytes necessary to do a successful struct . unpack . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
2009-10-05 20:01:43 +00:00
# The "-0x0c" in here is to skip the server-side header in the share
# file, which the client doesn't see when seeking and reading.
2009-01-10 22:07:39 +00:00
if sharevernum == 1 :
if debug :
log . msg ( " testing: corrupting offset %d , size %d , changing %d to %d (len(data) == %d ) " % ( 0x2c , 4 , struct . unpack ( " >L " , data [ 0x2c : 0x2c + 4 ] ) [ 0 ] , len ( data ) - 0x0c - 3 , len ( data ) ) )
return data [ : 0x2c ] + struct . pack ( " >L " , len ( data ) - 0x0c - 3 ) + data [ 0x2c + 4 : ]
else :
if debug :
log . msg ( " testing: corrupting offset %d , size %d , changing %d to %d (len(data) == %d ) " % ( 0x48 , 8 , struct . unpack ( " >Q " , data [ 0x48 : 0x48 + 8 ] ) [ 0 ] , len ( data ) - 0x0c - 3 , len ( data ) ) )
return data [ : 0x48 ] + struct . pack ( " >Q " , len ( data ) - 0x0c - 3 ) + data [ 0x48 + 8 : ]
2010-01-10 01:36:19 +00:00
def _corrupt_mutable_share_data ( data , debug = False ) :
2009-02-25 03:00:10 +00:00
prefix = data [ : 32 ]
assert prefix == MutableShareFile . MAGIC , " This function is designed to corrupt mutable shares of v1, and the magic number doesn ' t look right: %r vs %r " % ( prefix , MutableShareFile . MAGIC )
data_offset = MutableShareFile . DATA_OFFSET
sharetype = data [ data_offset : data_offset + 1 ]
assert sharetype == " \x00 " , " non-SDMF mutable shares not supported "
( version , ig_seqnum , ig_roothash , ig_IV , ig_k , ig_N , ig_segsize ,
ig_datalen , offsets ) = unpack_header ( data [ data_offset : ] )
assert version == 0 , " this function only handles v0 SDMF files "
start = data_offset + offsets [ " share_data " ]
length = data_offset + offsets [ " enc_privkey " ] - start
return corrupt_field ( data , start , length )
2010-01-10 01:36:19 +00:00
def _corrupt_share_data ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field containing the share data itself
will have one bit flipped or else will be changed to a random value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-02-25 03:00:10 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways, not v %d . " % sharevernum
2009-01-10 22:07:39 +00:00
if sharevernum == 1 :
sharedatasize = struct . unpack ( " >L " , data [ 0x0c + 0x08 : 0x0c + 0x08 + 4 ] ) [ 0 ]
return corrupt_field ( data , 0x0c + 0x24 , sharedatasize )
else :
sharedatasize = struct . unpack ( " >Q " , data [ 0x0c + 0x08 : 0x0c + 0x0c + 8 ] ) [ 0 ]
return corrupt_field ( data , 0x0c + 0x44 , sharedatasize )
2011-08-02 17:58:41 +00:00
def _corrupt_share_data_last_byte ( data , debug = False ) :
""" Scramble the file data -- flip all bits of the last byte. """
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways, not v %d . " % sharevernum
if sharevernum == 1 :
sharedatasize = struct . unpack ( " >L " , data [ 0x0c + 0x08 : 0x0c + 0x08 + 4 ] ) [ 0 ]
offset = 0x0c + 0x24 + sharedatasize - 1
else :
sharedatasize = struct . unpack ( " >Q " , data [ 0x0c + 0x08 : 0x0c + 0x0c + 8 ] ) [ 0 ]
offset = 0x0c + 0x44 + sharedatasize - 1
newdata = data [ : offset ] + chr ( ord ( data [ offset ] ) ^ 0xFF ) + data [ offset + 1 : ]
if debug :
log . msg ( " testing: flipping all bits of byte at offset %d : %r , newdata: %r " % ( offset , data [ offset ] , newdata [ offset ] ) )
return newdata
2010-01-10 01:36:19 +00:00
def _corrupt_crypttext_hash_tree ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field containing the crypttext hash tree
will have one bit flipped or else will be changed to a random value .
2009-01-10 22:07:39 +00:00
"""
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
crypttexthashtreeoffset = struct . unpack ( " >L " , data [ 0x0c + 0x14 : 0x0c + 0x14 + 4 ] ) [ 0 ]
blockhashesoffset = struct . unpack ( " >L " , data [ 0x0c + 0x18 : 0x0c + 0x18 + 4 ] ) [ 0 ]
else :
crypttexthashtreeoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x24 : 0x0c + 0x24 + 8 ] ) [ 0 ]
blockhashesoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x2c : 0x0c + 0x2c + 8 ] ) [ 0 ]
2010-01-10 20:37:21 +00:00
return corrupt_field ( data , 0x0c + crypttexthashtreeoffset , blockhashesoffset - crypttexthashtreeoffset , debug = debug )
2009-01-10 22:07:39 +00:00
2010-01-10 20:37:21 +00:00
def _corrupt_crypttext_hash_tree_byte_x221 ( data , debug = False ) :
2010-01-14 22:15:29 +00:00
""" Scramble the file data -- the byte at offset 0x221 will have its 7th
( b1 ) bit flipped .
2010-01-10 01:36:19 +00:00
"""
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if debug :
log . msg ( " original data: %r " % ( data , ) )
2010-01-10 20:37:21 +00:00
return data [ : 0x0c + 0x221 ] + chr ( ord ( data [ 0x0c + 0x221 ] ) ^ 0x02 ) + data [ 0x0c + 0x2210 + 1 : ]
2010-01-10 01:36:19 +00:00
def _corrupt_block_hashes ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field containing the block hash tree
will have one bit flipped or else will be changed to a random value .
2009-01-10 22:07:39 +00:00
"""
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
blockhashesoffset = struct . unpack ( " >L " , data [ 0x0c + 0x18 : 0x0c + 0x18 + 4 ] ) [ 0 ]
sharehashesoffset = struct . unpack ( " >L " , data [ 0x0c + 0x1c : 0x0c + 0x1c + 4 ] ) [ 0 ]
else :
blockhashesoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x2c : 0x0c + 0x2c + 8 ] ) [ 0 ]
sharehashesoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x34 : 0x0c + 0x34 + 8 ] ) [ 0 ]
2010-01-10 20:37:21 +00:00
return corrupt_field ( data , 0x0c + blockhashesoffset , sharehashesoffset - blockhashesoffset )
2009-01-10 22:07:39 +00:00
2010-01-10 01:36:19 +00:00
def _corrupt_share_hashes ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field containing the share hash chain
will have one bit flipped or else will be changed to a random value .
2009-01-10 22:07:39 +00:00
"""
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
sharehashesoffset = struct . unpack ( " >L " , data [ 0x0c + 0x1c : 0x0c + 0x1c + 4 ] ) [ 0 ]
uriextoffset = struct . unpack ( " >L " , data [ 0x0c + 0x20 : 0x0c + 0x20 + 4 ] ) [ 0 ]
else :
sharehashesoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x34 : 0x0c + 0x34 + 8 ] ) [ 0 ]
uriextoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x3c : 0x0c + 0x3c + 8 ] ) [ 0 ]
2010-01-10 20:37:21 +00:00
return corrupt_field ( data , 0x0c + sharehashesoffset , uriextoffset - sharehashesoffset )
2009-01-10 22:07:39 +00:00
2010-01-10 01:36:19 +00:00
def _corrupt_length_of_uri_extension ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field showing the length of the uri
extension will have one bit flipped or else will be changed to a random
value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
uriextoffset = struct . unpack ( " >L " , data [ 0x0c + 0x20 : 0x0c + 0x20 + 4 ] ) [ 0 ]
return corrupt_field ( data , uriextoffset , 4 )
else :
uriextoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x3c : 0x0c + 0x3c + 8 ] ) [ 0 ]
2010-01-10 20:37:21 +00:00
return corrupt_field ( data , 0x0c + uriextoffset , 8 )
2009-01-10 22:07:39 +00:00
2010-01-10 01:36:19 +00:00
def _corrupt_uri_extension ( data , debug = False ) :
2009-10-05 20:01:43 +00:00
""" Scramble the file data -- the field containing the uri extension will
have one bit flipped or else will be changed to a random value . """
2009-02-07 20:38:17 +00:00
sharevernum = struct . unpack ( " >L " , data [ 0x0c : 0x0c + 4 ] ) [ 0 ]
2009-01-10 22:07:39 +00:00
assert sharevernum in ( 1 , 2 ) , " This test is designed to corrupt immutable shares of v1 or v2 in specific ways. "
if sharevernum == 1 :
uriextoffset = struct . unpack ( " >L " , data [ 0x0c + 0x20 : 0x0c + 0x20 + 4 ] ) [ 0 ]
uriextlen = struct . unpack ( " >L " , data [ 0x0c + uriextoffset : 0x0c + uriextoffset + 4 ] ) [ 0 ]
else :
uriextoffset = struct . unpack ( " >Q " , data [ 0x0c + 0x3c : 0x0c + 0x3c + 8 ] ) [ 0 ]
uriextlen = struct . unpack ( " >Q " , data [ 0x0c + uriextoffset : 0x0c + uriextoffset + 8 ] ) [ 0 ]
2010-01-10 20:37:21 +00:00
return corrupt_field ( data , 0x0c + uriextoffset , uriextlen )
2019-02-25 16:52:50 +00:00
class _TestCaseMixin ( object ) :
"""
A mixin for ` ` TestCase ` ` which collects helpful behaviors for subclasses .
Those behaviors are :
* All of the features of testtools TestCase .
* Each test method will be run in a unique Eliot action context which
identifies the test and collects all Eliot log messages emitted by that
test ( including setUp and tearDown messages ) .
2019-03-07 20:04:37 +00:00
* trial - compatible mktemp method
* unittest2 - compatible assertRaises helper
2019-03-08 13:23:44 +00:00
* Automatic cleanup of tempfile . tempdir mutation ( pervasive through the
Tahoe - LAFS test suite ) .
2019-02-25 16:52:50 +00:00
"""
2019-03-07 20:04:37 +00:00
def setUp ( self ) :
# Restore the original temporary directory. Node ``init_tempdir``
# mangles it and many tests manage to get that method called.
self . addCleanup (
partial ( setattr , tempfile , " tempdir " , tempfile . tempdir ) ,
)
return super ( _TestCaseMixin , self ) . setUp ( )
class _DummyCase ( _case . TestCase ) :
def dummy ( self ) :
pass
_dummyCase = _DummyCase ( " dummy " )
def mktemp ( self ) :
return mktemp ( )
def assertRaises ( self , * a , * * kw ) :
return self . _dummyCase . assertRaises ( * a , * * kw )
2019-02-25 16:52:50 +00:00
class SyncTestCase ( _TestCaseMixin , TestCase ) :
"""
A ` ` TestCase ` ` which can run tests that may return an already - fired
` ` Deferred ` ` .
"""
2019-03-07 23:18:22 +00:00
run_tests_with = EliotLoggedRunTest . make_factory (
SynchronousDeferredRunTest ,
)
2019-02-25 16:52:50 +00:00
class AsyncTestCase ( _TestCaseMixin , TestCase ) :
"""
A ` ` TestCase ` ` which can run tests that may return a Deferred that will
only fire if the global reactor is running .
"""
2019-03-07 23:18:22 +00:00
run_tests_with = EliotLoggedRunTest . make_factory (
AsynchronousDeferredRunTest . make_factory ( timeout = 60.0 ) ,
)
2019-03-07 23:55:52 +00:00
class AsyncBrokenTestCase ( _TestCaseMixin , TestCase ) :
"""
A ` ` TestCase ` ` like ` ` AsyncTestCase ` ` but which spins the reactor a little
longer than apparently necessary to clean out lingering unaccounted for
event sources .
Tests which require this behavior are broken and should be fixed so they
pass with ` ` AsyncTestCase ` ` .
"""
run_tests_with = EliotLoggedRunTest . make_factory (
AsynchronousDeferredRunTestForBrokenTwisted . make_factory ( timeout = 60.0 ) ,
)