mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-02-18 17:00:24 +00:00
immutable: separate tests of immutable upload/download from tests of immutable checking/repair
This commit is contained in:
parent
b496eba072
commit
8dd3b3185f
@ -1,5 +1,5 @@
|
||||
|
||||
import os
|
||||
import os, random, struct
|
||||
from zope.interface import implements
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IConsumer
|
||||
@ -15,11 +15,12 @@ from allmydata.check_results import CheckResults, CheckAndRepairResults, \
|
||||
DeepCheckResults, DeepCheckAndRepairResults
|
||||
from allmydata.mutable.common import CorruptShareError
|
||||
from allmydata.storage import storage_index_to_dir
|
||||
from allmydata.util import log, fileutil, pollmixin
|
||||
from allmydata.util import hashutil, log, fileutil, pollmixin
|
||||
from allmydata.util.assertutil import precondition
|
||||
from allmydata.stats import StatsGathererService
|
||||
from allmydata.key_generator import KeyGeneratorService
|
||||
import common_util as testutil
|
||||
from allmydata import immutable
|
||||
|
||||
|
||||
def flush_but_dont_ignore(res):
|
||||
@ -36,9 +37,9 @@ class FakeCHKFileNode:
|
||||
all_contents = {}
|
||||
bad_shares = {}
|
||||
|
||||
def __init__(self, u, client):
|
||||
def __init__(self, u, thisclient):
|
||||
precondition(IURI.providedBy(u), u)
|
||||
self.client = client
|
||||
self.client = thisclient
|
||||
self.my_uri = u
|
||||
self.storage_index = u.storage_index
|
||||
|
||||
@ -134,9 +135,9 @@ def make_chk_file_uri(size):
|
||||
total_shares=10,
|
||||
size=size)
|
||||
|
||||
def create_chk_filenode(client, contents):
|
||||
def create_chk_filenode(thisclient, contents):
|
||||
u = make_chk_file_uri(len(contents))
|
||||
n = FakeCHKFileNode(u, client)
|
||||
n = FakeCHKFileNode(u, thisclient)
|
||||
FakeCHKFileNode.all_contents[u.to_string()] = contents
|
||||
return n
|
||||
|
||||
@ -150,8 +151,8 @@ class FakeMutableFileNode:
|
||||
all_contents = {}
|
||||
bad_shares = {}
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
def __init__(self, thisclient):
|
||||
self.client = thisclient
|
||||
self.my_uri = make_mutable_file_uri()
|
||||
self.storage_index = self.my_uri.storage_index
|
||||
def create(self, initial_contents, key_generator=None):
|
||||
@ -406,12 +407,12 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||
def write(name, value):
|
||||
open(os.path.join(basedir, name), "w").write(value+"\n")
|
||||
if i == 0:
|
||||
# client[0] runs a webserver and a helper, no key_generator
|
||||
# clients[0] runs a webserver and a helper, no key_generator
|
||||
write("webport", "tcp:0:interface=127.0.0.1")
|
||||
write("run_helper", "yes")
|
||||
write("keepalive_timeout", "600")
|
||||
if i == 3:
|
||||
# client[3] runs a webserver and uses a helper, uses
|
||||
# clients[3] runs a webserver and uses a helper, uses
|
||||
# key_generator
|
||||
write("webport", "tcp:0:interface=127.0.0.1")
|
||||
write("disconnect_timeout", "1800")
|
||||
@ -426,7 +427,7 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||
# files before they are launched.
|
||||
self._set_up_nodes_extra_config()
|
||||
|
||||
# start client[0], wait for it's tub to be ready (at which point it
|
||||
# start clients[0], wait for it's tub to be ready (at which point it
|
||||
# will have registered the helper furl).
|
||||
c = self.add_service(client.Client(basedir=basedirs[0]))
|
||||
self.clients.append(c)
|
||||
@ -865,8 +866,36 @@ N8L+bvLd4BU9g6hRS8b59lQ6GNjryx2bUnCVtLcey4Jd
|
||||
# To disable the pre-computed tub certs, uncomment this line.
|
||||
#SYSTEM_TEST_CERTS = []
|
||||
|
||||
TEST_DATA="\x02"*(immutable.upload.Uploader.URI_LIT_SIZE_THRESHOLD+1)
|
||||
|
||||
class ShareManglingMixin(SystemTestMixin):
|
||||
|
||||
def setUp(self):
|
||||
# Set self.basedir to a temp dir which has the name of the current test method in its
|
||||
# name.
|
||||
self.basedir = self.mktemp()
|
||||
|
||||
d = defer.maybeDeferred(SystemTestMixin.setUp, self)
|
||||
d.addCallback(lambda x: self.set_up_nodes())
|
||||
|
||||
def _upload_a_file(ignored):
|
||||
cl0 = self.clients[0]
|
||||
# We need multiple segments to test crypttext hash trees that are non-trivial
|
||||
# (i.e. they have more than just one hash in them).
|
||||
cl0.DEFAULT_ENCODING_PARAMETERS['max_segment_size'] = 12
|
||||
d2 = cl0.upload(immutable.upload.Data(TEST_DATA, convergence=""))
|
||||
def _after_upload(u):
|
||||
self.uri = IURI(u.uri)
|
||||
return cl0.create_node_from_uri(self.uri)
|
||||
d2.addCallback(_after_upload)
|
||||
return d2
|
||||
d.addCallback(_upload_a_file)
|
||||
|
||||
def _stash_it(filenode):
|
||||
self.filenode = filenode
|
||||
d.addCallback(_stash_it)
|
||||
return d
|
||||
|
||||
def find_shares(self, unused=None):
|
||||
"""Locate shares on disk. Returns a dict that maps
|
||||
(clientnum,sharenum) to a string that contains the share container
|
||||
@ -913,6 +942,67 @@ class ShareManglingMixin(SystemTestMixin):
|
||||
wf = open(os.path.join(fullsharedirp, str(sharenum)), "w")
|
||||
wf.write(newdata)
|
||||
|
||||
def _delete_a_share(self, unused=None, sharenum=None):
|
||||
""" Delete one share. """
|
||||
|
||||
shares = self.find_shares()
|
||||
ks = shares.keys()
|
||||
if sharenum is not None:
|
||||
k = [ key for key in shares.keys() if key[1] == sharenum ][0]
|
||||
else:
|
||||
k = random.choice(ks)
|
||||
del shares[k]
|
||||
self.replace_shares(shares, storage_index=self.uri.storage_index)
|
||||
|
||||
return unused
|
||||
|
||||
def _corrupt_a_share(self, unused, corruptor_func, sharenum):
|
||||
shares = self.find_shares()
|
||||
ks = [ key for key in shares.keys() if key[1] == sharenum ]
|
||||
assert ks, (shares.keys(), sharenum)
|
||||
k = ks[0]
|
||||
shares[k] = corruptor_func(shares[k])
|
||||
self.replace_shares(shares, storage_index=self.uri.storage_index)
|
||||
return corruptor_func
|
||||
|
||||
def _corrupt_all_shares(self, unused, corruptor_func):
|
||||
""" All shares on disk will be corrupted by corruptor_func. """
|
||||
shares = self.find_shares()
|
||||
for k in shares.keys():
|
||||
self._corrupt_a_share(unused, corruptor_func, k[1])
|
||||
return corruptor_func
|
||||
|
||||
def _corrupt_a_random_share(self, unused, corruptor_func):
|
||||
""" Exactly one share on disk will be corrupted by corruptor_func. """
|
||||
shares = self.find_shares()
|
||||
ks = shares.keys()
|
||||
k = random.choice(ks)
|
||||
self._corrupt_a_share(unused, corruptor_func, k[1])
|
||||
return corruptor_func
|
||||
|
||||
def _count_reads(self):
|
||||
sum_of_read_counts = 0
|
||||
for thisclient in self.clients:
|
||||
counters = thisclient.stats_provider.get_stats()['counters']
|
||||
sum_of_read_counts += counters.get('storage_server.read', 0)
|
||||
return sum_of_read_counts
|
||||
|
||||
def _count_allocates(self):
|
||||
sum_of_allocate_counts = 0
|
||||
for thisclient in self.clients:
|
||||
counters = thisclient.stats_provider.get_stats()['counters']
|
||||
sum_of_allocate_counts += counters.get('storage_server.allocate', 0)
|
||||
return sum_of_allocate_counts
|
||||
|
||||
def _download_and_check_plaintext(self, unused=None):
|
||||
self.downloader = self.clients[1].getServiceNamed("downloader")
|
||||
d = self.downloader.download_to_data(self.uri)
|
||||
|
||||
def _after_download(result):
|
||||
self.failUnlessEqual(result, TEST_DATA)
|
||||
d.addCallback(_after_download)
|
||||
return d
|
||||
|
||||
class ShouldFailMixin:
|
||||
def shouldFail(self, expected_failure, which, substring,
|
||||
callable, *args, **kwargs):
|
||||
@ -990,3 +1080,244 @@ def download_to_data(n, offset=0, size=None):
|
||||
d = n.read(MemoryConsumer(), offset, size)
|
||||
d.addCallback(lambda mc: "".join(mc.chunks))
|
||||
return d
|
||||
|
||||
def corrupt_field(data, offset, size, debug=False):
|
||||
if random.random() < 0.5:
|
||||
newdata = testutil.flip_one_bit(data, offset, size)
|
||||
if debug:
|
||||
log.msg("testing: corrupting offset %d, size %d flipping one bit orig: %r, newdata: %r" % (offset, size, data[offset:offset+size], newdata[offset:offset+size]))
|
||||
return newdata
|
||||
else:
|
||||
newval = testutil.insecurerandstr(size)
|
||||
if debug:
|
||||
log.msg("testing: corrupting offset %d, size %d randomizing field, orig: %r, newval: %r" % (offset, size, data[offset:offset+size], newval))
|
||||
return data[:offset]+newval+data[offset+size:]
|
||||
|
||||
def _corrupt_nothing(data):
|
||||
""" Leave the data pristine. """
|
||||
return data
|
||||
|
||||
def _corrupt_file_version_number(data):
|
||||
""" Scramble the file data -- the share file version number have one bit flipped or else
|
||||
will be changed to a random value."""
|
||||
return corrupt_field(data, 0x00, 4)
|
||||
|
||||
def _corrupt_size_of_file_data(data):
|
||||
""" Scramble the file data -- the field showing the size of the share data within the file
|
||||
will be set to one smaller. """
|
||||
return corrupt_field(data, 0x04, 4)
|
||||
|
||||
def _corrupt_sharedata_version_number(data):
|
||||
""" Scramble the file data -- the share data version number will have one bit flipped or
|
||||
else will be changed to a random value, but not 1 or 2."""
|
||||
return corrupt_field(data, 0x0c, 4)
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
newsharevernum = sharevernum
|
||||
while newsharevernum in (1, 2):
|
||||
newsharevernum = random.randrange(0, 2**32)
|
||||
newsharevernumbytes = struct.pack(">l", newsharevernum)
|
||||
return data[:0x0c] + newsharevernumbytes + data[0x0c+4:]
|
||||
|
||||
def _corrupt_sharedata_version_number_to_plausible_version(data):
|
||||
""" Scramble the file data -- the share data version number will
|
||||
be changed to 2 if it is 1 or else to 1 if it is 2."""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
newsharevernum = 2
|
||||
else:
|
||||
newsharevernum = 1
|
||||
newsharevernumbytes = struct.pack(">l", newsharevernum)
|
||||
return data[:0x0c] + newsharevernumbytes + data[0x0c+4:]
|
||||
|
||||
def _corrupt_segment_size(data):
|
||||
""" Scramble the file data -- the field showing the size of the segment will have one
|
||||
bit flipped or else be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x04, 4, debug=False)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x04, 8, debug=False)
|
||||
|
||||
def _corrupt_size_of_sharedata(data):
|
||||
""" Scramble the file data -- the field showing the size of the data within the share
|
||||
data will have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x08, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x0c, 8)
|
||||
|
||||
def _corrupt_offset_of_sharedata(data):
|
||||
""" Scramble the file data -- the field showing the offset of the data within the share
|
||||
data will have one bit flipped or else be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x0c, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x14, 8)
|
||||
|
||||
def _corrupt_offset_of_ciphertext_hash_tree(data):
|
||||
""" Scramble the file data -- the field showing the offset of the ciphertext hash tree
|
||||
within the share data will have one bit flipped or else be changed to a random value.
|
||||
"""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x14, 4, debug=False)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x24, 8, debug=False)
|
||||
|
||||
def _corrupt_offset_of_block_hashes(data):
|
||||
""" Scramble the file data -- the field showing the offset of the block hash tree within
|
||||
the share data will have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x18, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x2c, 8)
|
||||
|
||||
def _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes(data):
|
||||
""" Scramble the file data -- the field showing the offset of the block hash tree within the
|
||||
share data will have a multiple of hash size subtracted from it, thus causing the downloader
|
||||
to download an incomplete crypttext hash tree."""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
curval = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
|
||||
newval = random.randrange(0, max(1, (curval/hashutil.CRYPTO_VAL_SIZE)/2))*hashutil.CRYPTO_VAL_SIZE
|
||||
newvalstr = struct.pack(">L", newval)
|
||||
return data[:0x0c+0x18]+newvalstr+data[0x0c+0x18+4:]
|
||||
else:
|
||||
curval = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
|
||||
newval = random.randrange(0, max(1, (curval/hashutil.CRYPTO_VAL_SIZE)/2))*hashutil.CRYPTO_VAL_SIZE
|
||||
newvalstr = struct.pack(">Q", newval)
|
||||
return data[:0x0c+0x2c]+newvalstr+data[0x0c+0x2c+8:]
|
||||
|
||||
def _corrupt_offset_of_share_hashes(data):
|
||||
""" Scramble the file data -- the field showing the offset of the share hash tree within
|
||||
the share data will have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x1c, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x34, 8)
|
||||
|
||||
def _corrupt_offset_of_uri_extension(data):
|
||||
""" Scramble the file data -- the field showing the offset of the uri extension will
|
||||
have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x20, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x3c, 8)
|
||||
|
||||
def _corrupt_offset_of_uri_extension_to_force_short_read(data, debug=False):
|
||||
""" Scramble the file data -- the field showing the offset of the uri extension will be set
|
||||
to the size of the file minus 3. This means when the client tries to read the length field
|
||||
from that location it will get a short read -- the result string will be only 3 bytes long,
|
||||
not the 4 or 8 bytes necessary to do a successful struct.unpack."""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
# The "-0x0c" in here is to skip the server-side header in the share file, which the client doesn't see when seeking and reading.
|
||||
if sharevernum == 1:
|
||||
if debug:
|
||||
log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x2c, 4, struct.unpack(">L", data[0x2c:0x2c+4])[0], len(data)-0x0c-3, len(data)))
|
||||
return data[:0x2c] + struct.pack(">L", len(data)-0x0c-3) + data[0x2c+4:]
|
||||
else:
|
||||
if debug:
|
||||
log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x48, 8, struct.unpack(">Q", data[0x48:0x48+8])[0], len(data)-0x0c-3, len(data)))
|
||||
return data[:0x48] + struct.pack(">Q", len(data)-0x0c-3) + data[0x48+8:]
|
||||
|
||||
def _corrupt_share_data(data):
|
||||
""" Scramble the file data -- the field containing the share data itself will have one
|
||||
bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
sharedatasize = struct.unpack(">L", data[0x0c+0x08:0x0c+0x08+4])[0]
|
||||
|
||||
return corrupt_field(data, 0x0c+0x24, sharedatasize)
|
||||
else:
|
||||
sharedatasize = struct.unpack(">Q", data[0x0c+0x08:0x0c+0x0c+8])[0]
|
||||
|
||||
return corrupt_field(data, 0x0c+0x44, sharedatasize)
|
||||
|
||||
def _corrupt_crypttext_hash_tree(data):
|
||||
""" Scramble the file data -- the field containing the crypttext hash tree will have one
|
||||
bit flipped or else will be changed to a random value.
|
||||
"""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
crypttexthashtreeoffset = struct.unpack(">L", data[0x0c+0x14:0x0c+0x14+4])[0]
|
||||
blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
|
||||
else:
|
||||
crypttexthashtreeoffset = struct.unpack(">Q", data[0x0c+0x24:0x0c+0x24+8])[0]
|
||||
blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
|
||||
|
||||
return corrupt_field(data, crypttexthashtreeoffset, blockhashesoffset-crypttexthashtreeoffset)
|
||||
|
||||
def _corrupt_block_hashes(data):
|
||||
""" Scramble the file data -- the field containing the block hash tree will have one bit
|
||||
flipped or else will be changed to a random value.
|
||||
"""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
|
||||
sharehashesoffset = struct.unpack(">L", data[0x0c+0x1c:0x0c+0x1c+4])[0]
|
||||
else:
|
||||
blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
|
||||
sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0]
|
||||
|
||||
return corrupt_field(data, blockhashesoffset, sharehashesoffset-blockhashesoffset)
|
||||
|
||||
def _corrupt_share_hashes(data):
|
||||
""" Scramble the file data -- the field containing the share hash chain will have one
|
||||
bit flipped or else will be changed to a random value.
|
||||
"""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
sharehashesoffset = struct.unpack(">L", data[0x0c+0x1c:0x0c+0x1c+4])[0]
|
||||
uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
|
||||
else:
|
||||
sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0]
|
||||
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
|
||||
|
||||
return corrupt_field(data, sharehashesoffset, uriextoffset-sharehashesoffset)
|
||||
|
||||
def _corrupt_length_of_uri_extension(data):
|
||||
""" Scramble the file data -- the field showing the length of the uri extension will
|
||||
have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
|
||||
return corrupt_field(data, uriextoffset, 4)
|
||||
else:
|
||||
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
|
||||
return corrupt_field(data, uriextoffset, 8)
|
||||
|
||||
def _corrupt_uri_extension(data):
|
||||
""" Scramble the file data -- the field containing the uri extension will have one bit
|
||||
flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
|
||||
uriextlen = struct.unpack(">L", data[0x0c+uriextoffset:0x0c+uriextoffset+4])[0]
|
||||
else:
|
||||
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
|
||||
uriextlen = struct.unpack(">Q", data[0x0c+uriextoffset:0x0c+uriextoffset+8])[0]
|
||||
|
||||
return corrupt_field(data, uriextoffset, uriextlen)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from allmydata.test.common import SystemTestMixin, ShareManglingMixin
|
||||
from allmydata.test import common
|
||||
from allmydata.monitor import Monitor
|
||||
from allmydata import check_results
|
||||
from allmydata.interfaces import IURI, NotEnoughSharesError
|
||||
@ -9,298 +9,7 @@ from twisted.trial import unittest
|
||||
import random, struct
|
||||
import common_util as testutil
|
||||
|
||||
TEST_DATA="\x02"*(upload.Uploader.URI_LIT_SIZE_THRESHOLD+1)
|
||||
|
||||
def corrupt_field(data, offset, size, debug=False):
|
||||
if random.random() < 0.5:
|
||||
newdata = testutil.flip_one_bit(data, offset, size)
|
||||
if debug:
|
||||
log.msg("testing: corrupting offset %d, size %d flipping one bit orig: %r, newdata: %r" % (offset, size, data[offset:offset+size], newdata[offset:offset+size]))
|
||||
return newdata
|
||||
else:
|
||||
newval = testutil.insecurerandstr(size)
|
||||
if debug:
|
||||
log.msg("testing: corrupting offset %d, size %d randomizing field, orig: %r, newval: %r" % (offset, size, data[offset:offset+size], newval))
|
||||
return data[:offset]+newval+data[offset+size:]
|
||||
|
||||
def _corrupt_nothing(data):
|
||||
""" Leave the data pristine. """
|
||||
return data
|
||||
|
||||
def _corrupt_file_version_number(data):
|
||||
""" Scramble the file data -- the share file version number have one bit flipped or else
|
||||
will be changed to a random value."""
|
||||
return corrupt_field(data, 0x00, 4)
|
||||
|
||||
def _corrupt_size_of_file_data(data):
|
||||
""" Scramble the file data -- the field showing the size of the share data within the file
|
||||
will be set to one smaller. """
|
||||
return corrupt_field(data, 0x04, 4)
|
||||
|
||||
def _corrupt_sharedata_version_number(data):
|
||||
""" Scramble the file data -- the share data version number will have one bit flipped or
|
||||
else will be changed to a random value, but not 1 or 2."""
|
||||
return corrupt_field(data, 0x0c, 4)
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
newsharevernum = sharevernum
|
||||
while newsharevernum in (1, 2):
|
||||
newsharevernum = random.randrange(0, 2**32)
|
||||
newsharevernumbytes = struct.pack(">l", newsharevernum)
|
||||
return data[:0x0c] + newsharevernumbytes + data[0x0c+4:]
|
||||
|
||||
def _corrupt_sharedata_version_number_to_plausible_version(data):
|
||||
""" Scramble the file data -- the share data version number will
|
||||
be changed to 2 if it is 1 or else to 1 if it is 2."""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
newsharevernum = 2
|
||||
else:
|
||||
newsharevernum = 1
|
||||
newsharevernumbytes = struct.pack(">l", newsharevernum)
|
||||
return data[:0x0c] + newsharevernumbytes + data[0x0c+4:]
|
||||
|
||||
def _corrupt_segment_size(data):
|
||||
""" Scramble the file data -- the field showing the size of the segment will have one
|
||||
bit flipped or else be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x04, 4, debug=False)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x04, 8, debug=False)
|
||||
|
||||
def _corrupt_size_of_sharedata(data):
|
||||
""" Scramble the file data -- the field showing the size of the data within the share
|
||||
data will have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x08, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x0c, 8)
|
||||
|
||||
def _corrupt_offset_of_sharedata(data):
|
||||
""" Scramble the file data -- the field showing the offset of the data within the share
|
||||
data will have one bit flipped or else be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x0c, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x14, 8)
|
||||
|
||||
def _corrupt_offset_of_ciphertext_hash_tree(data):
|
||||
""" Scramble the file data -- the field showing the offset of the ciphertext hash tree
|
||||
within the share data will have one bit flipped or else be changed to a random value.
|
||||
"""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x14, 4, debug=False)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x24, 8, debug=False)
|
||||
|
||||
def _corrupt_offset_of_block_hashes(data):
|
||||
""" Scramble the file data -- the field showing the offset of the block hash tree within
|
||||
the share data will have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x18, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x2c, 8)
|
||||
|
||||
def _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes(data):
|
||||
""" Scramble the file data -- the field showing the offset of the block hash tree within the
|
||||
share data will have a multiple of hash size subtracted from it, thus causing the downloader
|
||||
to download an incomplete crypttext hash tree."""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
curval = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
|
||||
newval = random.randrange(0, max(1, (curval/hashutil.CRYPTO_VAL_SIZE)/2))*hashutil.CRYPTO_VAL_SIZE
|
||||
newvalstr = struct.pack(">L", newval)
|
||||
return data[:0x0c+0x18]+newvalstr+data[0x0c+0x18+4:]
|
||||
else:
|
||||
curval = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
|
||||
newval = random.randrange(0, max(1, (curval/hashutil.CRYPTO_VAL_SIZE)/2))*hashutil.CRYPTO_VAL_SIZE
|
||||
newvalstr = struct.pack(">Q", newval)
|
||||
return data[:0x0c+0x2c]+newvalstr+data[0x0c+0x2c+8:]
|
||||
|
||||
def _corrupt_offset_of_share_hashes(data):
|
||||
""" Scramble the file data -- the field showing the offset of the share hash tree within
|
||||
the share data will have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x1c, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x34, 8)
|
||||
|
||||
def _corrupt_offset_of_uri_extension(data):
|
||||
""" Scramble the file data -- the field showing the offset of the uri extension will
|
||||
have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
return corrupt_field(data, 0x0c+0x20, 4)
|
||||
else:
|
||||
return corrupt_field(data, 0x0c+0x3c, 8)
|
||||
|
||||
def _corrupt_offset_of_uri_extension_to_force_short_read(data, debug=False):
|
||||
""" Scramble the file data -- the field showing the offset of the uri extension will be set
|
||||
to the size of the file minus 3. This means when the client tries to read the length field
|
||||
from that location it will get a short read -- the result string will be only 3 bytes long,
|
||||
not the 4 or 8 bytes necessary to do a successful struct.unpack."""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
# The "-0x0c" in here is to skip the server-side header in the share file, which the client doesn't see when seeking and reading.
|
||||
if sharevernum == 1:
|
||||
if debug:
|
||||
log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x2c, 4, struct.unpack(">L", data[0x2c:0x2c+4])[0], len(data)-0x0c-3, len(data)))
|
||||
return data[:0x2c] + struct.pack(">L", len(data)-0x0c-3) + data[0x2c+4:]
|
||||
else:
|
||||
if debug:
|
||||
log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x48, 8, struct.unpack(">Q", data[0x48:0x48+8])[0], len(data)-0x0c-3, len(data)))
|
||||
return data[:0x48] + struct.pack(">Q", len(data)-0x0c-3) + data[0x48+8:]
|
||||
|
||||
def _corrupt_share_data(data):
|
||||
""" Scramble the file data -- the field containing the share data itself will have one
|
||||
bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
sharedatasize = struct.unpack(">L", data[0x0c+0x08:0x0c+0x08+4])[0]
|
||||
|
||||
return corrupt_field(data, 0x0c+0x24, sharedatasize)
|
||||
else:
|
||||
sharedatasize = struct.unpack(">Q", data[0x0c+0x08:0x0c+0x0c+8])[0]
|
||||
|
||||
return corrupt_field(data, 0x0c+0x44, sharedatasize)
|
||||
|
||||
def _corrupt_crypttext_hash_tree(data):
|
||||
""" Scramble the file data -- the field containing the crypttext hash tree will have one
|
||||
bit flipped or else will be changed to a random value.
|
||||
"""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
crypttexthashtreeoffset = struct.unpack(">L", data[0x0c+0x14:0x0c+0x14+4])[0]
|
||||
blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
|
||||
else:
|
||||
crypttexthashtreeoffset = struct.unpack(">Q", data[0x0c+0x24:0x0c+0x24+8])[0]
|
||||
blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
|
||||
|
||||
return corrupt_field(data, crypttexthashtreeoffset, blockhashesoffset-crypttexthashtreeoffset)
|
||||
|
||||
def _corrupt_block_hashes(data):
|
||||
""" Scramble the file data -- the field containing the block hash tree will have one bit
|
||||
flipped or else will be changed to a random value.
|
||||
"""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
|
||||
sharehashesoffset = struct.unpack(">L", data[0x0c+0x1c:0x0c+0x1c+4])[0]
|
||||
else:
|
||||
blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
|
||||
sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0]
|
||||
|
||||
return corrupt_field(data, blockhashesoffset, sharehashesoffset-blockhashesoffset)
|
||||
|
||||
def _corrupt_share_hashes(data):
|
||||
""" Scramble the file data -- the field containing the share hash chain will have one
|
||||
bit flipped or else will be changed to a random value.
|
||||
"""
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
sharehashesoffset = struct.unpack(">L", data[0x0c+0x1c:0x0c+0x1c+4])[0]
|
||||
uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
|
||||
else:
|
||||
sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0]
|
||||
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
|
||||
|
||||
return corrupt_field(data, sharehashesoffset, uriextoffset-sharehashesoffset)
|
||||
|
||||
def _corrupt_length_of_uri_extension(data):
|
||||
""" Scramble the file data -- the field showing the length of the uri extension will
|
||||
have one bit flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
|
||||
return corrupt_field(data, uriextoffset, 4)
|
||||
else:
|
||||
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
|
||||
return corrupt_field(data, uriextoffset, 8)
|
||||
|
||||
def _corrupt_uri_extension(data):
|
||||
""" Scramble the file data -- the field containing the uri extension will have one bit
|
||||
flipped or else will be changed to a random value. """
|
||||
sharevernum = struct.unpack(">l", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
|
||||
uriextlen = struct.unpack(">L", data[0x0c+uriextoffset:0x0c+uriextoffset+4])[0]
|
||||
else:
|
||||
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
|
||||
uriextlen = struct.unpack(">Q", data[0x0c+uriextoffset:0x0c+uriextoffset+8])[0]
|
||||
|
||||
return corrupt_field(data, uriextoffset, uriextlen)
|
||||
|
||||
class Test(ShareManglingMixin, unittest.TestCase):
|
||||
def setUp(self):
|
||||
# Set self.basedir to a temp dir which has the name of the current test method in its
|
||||
# name.
|
||||
self.basedir = self.mktemp()
|
||||
|
||||
d = defer.maybeDeferred(SystemTestMixin.setUp, self)
|
||||
d.addCallback(lambda x: self.set_up_nodes())
|
||||
|
||||
def _upload_a_file(ignored):
|
||||
client = self.clients[0]
|
||||
# We need multiple segments to test crypttext hash trees that are non-trivial
|
||||
# (i.e. they have more than just one hash in them).
|
||||
client.DEFAULT_ENCODING_PARAMETERS['max_segment_size'] = 12
|
||||
d2 = client.upload(upload.Data(TEST_DATA, convergence=""))
|
||||
def _after_upload(u):
|
||||
self.uri = IURI(u.uri)
|
||||
return self.clients[0].create_node_from_uri(self.uri)
|
||||
d2.addCallback(_after_upload)
|
||||
return d2
|
||||
d.addCallback(_upload_a_file)
|
||||
|
||||
def _stash_it(filenode):
|
||||
self.filenode = filenode
|
||||
d.addCallback(_stash_it)
|
||||
return d
|
||||
|
||||
def _download_and_check_plaintext(self, unused=None):
|
||||
self.downloader = self.clients[1].getServiceNamed("downloader")
|
||||
d = self.downloader.download_to_data(self.uri)
|
||||
|
||||
def _after_download(result):
|
||||
self.failUnlessEqual(result, TEST_DATA)
|
||||
d.addCallback(_after_download)
|
||||
return d
|
||||
|
||||
def _delete_a_share(self, unused=None, sharenum=None):
|
||||
""" Delete one share. """
|
||||
|
||||
shares = self.find_shares()
|
||||
ks = shares.keys()
|
||||
if sharenum is not None:
|
||||
k = [ key for key in shares.keys() if key[1] == sharenum ][0]
|
||||
else:
|
||||
k = random.choice(ks)
|
||||
del shares[k]
|
||||
self.replace_shares(shares, storage_index=self.uri.storage_index)
|
||||
|
||||
return unused
|
||||
|
||||
class Test(common.ShareManglingMixin, unittest.TestCase):
|
||||
def test_test_code(self):
|
||||
# The following process of stashing the shares, running
|
||||
# replace_shares, and asserting that the new set of shares equals the
|
||||
@ -312,19 +21,6 @@ class Test(ShareManglingMixin, unittest.TestCase):
|
||||
stash[0] = res
|
||||
return res
|
||||
d.addCallback(_stash_it)
|
||||
d.addCallback(self.replace_shares, storage_index=self.uri.storage_index)
|
||||
|
||||
def _compare(res):
|
||||
oldshares = stash[0]
|
||||
self.failUnless(isinstance(oldshares, dict), oldshares)
|
||||
self.failUnlessEqual(oldshares, res)
|
||||
|
||||
d.addCallback(self.find_shares)
|
||||
d.addCallback(_compare)
|
||||
|
||||
d.addCallback(lambda ignore: self.replace_shares({}, storage_index=self.uri.storage_index))
|
||||
d.addCallback(self.find_shares)
|
||||
d.addCallback(lambda x: self.failUnlessEqual(x, {}))
|
||||
|
||||
# The following process of deleting 8 of the shares and asserting that you can't
|
||||
# download it is more to test this test code than to test the Tahoe code...
|
||||
@ -347,58 +43,8 @@ class Test(ShareManglingMixin, unittest.TestCase):
|
||||
d.addCallbacks(_after_download_callb, _after_download_errb)
|
||||
d.addCallback(_then_download)
|
||||
|
||||
# The following process of leaving 8 of the shares deleted and asserting that you can't
|
||||
# repair it is more to test this test code than to test the Tahoe code...
|
||||
#TODO def _then_repair(unused=None):
|
||||
#TODO d2 = self.filenode.check_and_repair(Monitor(), verify=False)
|
||||
#TODO def _after_repair(checkandrepairresults):
|
||||
#TODO prerepairres = checkandrepairresults.get_pre_repair_results()
|
||||
#TODO postrepairres = checkandrepairresults.get_post_repair_results()
|
||||
#TODO self.failIf(prerepairres.is_healthy())
|
||||
#TODO self.failIf(postrepairres.is_healthy())
|
||||
#TODO d2.addCallback(_after_repair)
|
||||
#TODO return d2
|
||||
#TODO d.addCallback(_then_repair)
|
||||
return d
|
||||
|
||||
def _count_reads(self):
|
||||
sum_of_read_counts = 0
|
||||
for client in self.clients:
|
||||
counters = client.stats_provider.get_stats()['counters']
|
||||
sum_of_read_counts += counters.get('storage_server.read', 0)
|
||||
return sum_of_read_counts
|
||||
|
||||
def _count_allocates(self):
|
||||
sum_of_allocate_counts = 0
|
||||
for client in self.clients:
|
||||
counters = client.stats_provider.get_stats()['counters']
|
||||
sum_of_allocate_counts += counters.get('storage_server.allocate', 0)
|
||||
return sum_of_allocate_counts
|
||||
|
||||
def _corrupt_a_share(self, unused, corruptor_func, sharenum):
|
||||
shares = self.find_shares()
|
||||
ks = [ key for key in shares.keys() if key[1] == sharenum ]
|
||||
assert ks, (shares.keys(), sharenum)
|
||||
k = ks[0]
|
||||
shares[k] = corruptor_func(shares[k])
|
||||
self.replace_shares(shares, storage_index=self.uri.storage_index)
|
||||
return corruptor_func
|
||||
|
||||
def _corrupt_all_shares(self, unused, corruptor_func):
|
||||
""" All shares on disk will be corrupted by corruptor_func. """
|
||||
shares = self.find_shares()
|
||||
for k in shares.keys():
|
||||
self._corrupt_a_share(unused, corruptor_func, k[1])
|
||||
return corruptor_func
|
||||
|
||||
def _corrupt_a_random_share(self, unused, corruptor_func):
|
||||
""" Exactly one share on disk will be corrupted by corruptor_func. """
|
||||
shares = self.find_shares()
|
||||
ks = shares.keys()
|
||||
k = random.choice(ks)
|
||||
self._corrupt_a_share(unused, corruptor_func, k[1])
|
||||
return corruptor_func
|
||||
|
||||
def test_download(self):
|
||||
""" Basic download. (This functionality is more or less already tested by test code in
|
||||
other modules, but this module is also going to test some more specific things about
|
||||
@ -435,7 +81,7 @@ class Test(ShareManglingMixin, unittest.TestCase):
|
||||
shnums = range(10)
|
||||
random.shuffle(shnums)
|
||||
for i in shnums[:7]:
|
||||
self._corrupt_a_share(None, _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes, i)
|
||||
self._corrupt_a_share(None, common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes, i)
|
||||
before_download_reads = self._count_reads()
|
||||
d.addCallback(_then_corrupt_7)
|
||||
d.addCallback(self._download_and_check_plaintext)
|
||||
@ -481,7 +127,7 @@ class Test(ShareManglingMixin, unittest.TestCase):
|
||||
shnums = range(10)
|
||||
random.shuffle(shnums)
|
||||
for shnum in shnums[:8]:
|
||||
self._corrupt_a_share(None, _corrupt_sharedata_version_number, shnum)
|
||||
self._corrupt_a_share(None, common._corrupt_sharedata_version_number, shnum)
|
||||
d.addCallback(_then_corrupt_8)
|
||||
|
||||
before_download_reads = self._count_reads()
|
||||
@ -509,450 +155,8 @@ class Test(ShareManglingMixin, unittest.TestCase):
|
||||
d.addCallback(_after_attempt)
|
||||
return d
|
||||
|
||||
def test_check_without_verify(self):
|
||||
""" Check says the file is healthy when none of the shares have been touched. It says
|
||||
that the file is unhealthy when all of them have been removed. It doesn't use any reads.
|
||||
"""
|
||||
d = defer.succeed(self.filenode)
|
||||
def _check1(filenode):
|
||||
before_check_reads = self._count_reads()
|
||||
|
||||
d2 = filenode.check(Monitor(), verify=False)
|
||||
def _after_check(checkresults):
|
||||
after_check_reads = self._count_reads()
|
||||
self.failIf(after_check_reads - before_check_reads > 0, after_check_reads - before_check_reads)
|
||||
self.failUnless(checkresults.is_healthy())
|
||||
|
||||
d2.addCallback(_after_check)
|
||||
return d2
|
||||
d.addCallback(_check1)
|
||||
|
||||
d.addCallback(lambda ignore: self.replace_shares({}, storage_index=self.uri.storage_index))
|
||||
def _check2(ignored):
|
||||
before_check_reads = self._count_reads()
|
||||
d2 = self.filenode.check(Monitor(), verify=False)
|
||||
|
||||
def _after_check(checkresults):
|
||||
after_check_reads = self._count_reads()
|
||||
self.failIf(after_check_reads - before_check_reads > 0, after_check_reads - before_check_reads)
|
||||
self.failIf(checkresults.is_healthy())
|
||||
|
||||
d2.addCallback(_after_check)
|
||||
return d2
|
||||
d.addCallback(_check2)
|
||||
|
||||
return d
|
||||
|
||||
def _help_test_verify(self, corruptor_funcs, judgement_func):
|
||||
LEEWAY = 18 # We'll allow you to pass this test even if you trigger eighteen times as many disk reads and blocks sends as would be optimal.
|
||||
DELTA_READS = 10 * LEEWAY # N = 10
|
||||
d = defer.succeed(None)
|
||||
|
||||
d.addCallback(self.find_shares)
|
||||
stash = [None]
|
||||
def _stash_it(res):
|
||||
stash[0] = res
|
||||
return res
|
||||
d.addCallback(_stash_it)
|
||||
def _put_it_all_back(ignored):
|
||||
self.replace_shares(stash[0], storage_index=self.uri.storage_index)
|
||||
return ignored
|
||||
|
||||
def _verify_after_corruption(corruptor_func):
|
||||
before_check_reads = self._count_reads()
|
||||
d2 = self.filenode.check(Monitor(), verify=True)
|
||||
def _after_check(checkresults):
|
||||
after_check_reads = self._count_reads()
|
||||
self.failIf(after_check_reads - before_check_reads > DELTA_READS, (after_check_reads, before_check_reads))
|
||||
try:
|
||||
return judgement_func(checkresults)
|
||||
except Exception, le:
|
||||
le.args = tuple(le.args + ("corruptor_func: " + corruptor_func.__name__,))
|
||||
raise
|
||||
|
||||
d2.addCallback(_after_check)
|
||||
return d2
|
||||
|
||||
for corruptor_func in corruptor_funcs:
|
||||
d.addCallback(self._corrupt_a_random_share, corruptor_func)
|
||||
d.addCallback(_verify_after_corruption)
|
||||
d.addCallback(_put_it_all_back)
|
||||
|
||||
return d
|
||||
|
||||
def test_verify_no_problem(self):
|
||||
""" Verify says the file is healthy when none of the shares have been touched in a way
|
||||
that matters. It doesn't use more than seven times as many reads as it needs."""
|
||||
def judge(checkresults):
|
||||
self.failUnless(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 10, data)
|
||||
self.failUnless(len(data['sharemap']) == 10, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 0, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_nothing,
|
||||
_corrupt_size_of_file_data,
|
||||
_corrupt_size_of_sharedata,
|
||||
_corrupt_segment_size, ], judge)
|
||||
|
||||
def test_verify_server_visible_corruption(self):
|
||||
""" Corruption which is detected by the server means that the server will send you back
|
||||
a Failure in response to get_bucket instead of giving you the share data. Test that
|
||||
verifier handles these answers correctly. It doesn't use more than seven times as many
|
||||
reads as it needs."""
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
# The server might fail to serve up its other share as well as the corrupted
|
||||
# one, so count-shares-good could be 8 or 9.
|
||||
self.failUnless(data['count-shares-good'] in (8, 9), data)
|
||||
self.failUnless(len(data['sharemap']) in (8, 9,), data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
# The server may have served up the non-corrupted share, or it may not have, so
|
||||
# the checker could have detected either 4 or 5 good servers.
|
||||
self.failUnless(data['count-good-share-hosts'] in (4, 5), data)
|
||||
self.failUnless(len(data['servers-responding']) in (4, 5), data)
|
||||
# If the server served up the other share, then the checker should consider it good, else it should
|
||||
# not.
|
||||
self.failUnless((data['count-shares-good'] == 9) == (data['count-good-share-hosts'] == 5), data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 0, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_file_version_number,
|
||||
], judge)
|
||||
|
||||
def test_verify_share_incompatibility(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_sharedata_version_number,
|
||||
], judge)
|
||||
|
||||
def test_verify_server_invisible_corruption(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_offset_of_sharedata,
|
||||
_corrupt_offset_of_uri_extension,
|
||||
_corrupt_offset_of_uri_extension_to_force_short_read,
|
||||
_corrupt_share_data,
|
||||
_corrupt_length_of_uri_extension,
|
||||
_corrupt_uri_extension,
|
||||
], judge)
|
||||
|
||||
def test_verify_server_invisible_corruption_offset_of_block_hashtree_to_truncate_crypttext_hashtree_TODO(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes,
|
||||
], judge)
|
||||
test_verify_server_invisible_corruption_offset_of_block_hashtree_to_truncate_crypttext_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
|
||||
|
||||
def test_verify_server_invisible_corruption_offset_of_block_hashtree_TODO(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_offset_of_block_hashes,
|
||||
], judge)
|
||||
test_verify_server_invisible_corruption_offset_of_block_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
|
||||
|
||||
def test_verify_server_invisible_corruption_sharedata_plausible_version(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_sharedata_version_number_to_plausible_version,
|
||||
], judge)
|
||||
|
||||
def test_verify_server_invisible_corruption_offset_of_share_hashtree_TODO(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_offset_of_share_hashes,
|
||||
], judge)
|
||||
test_verify_server_invisible_corruption_offset_of_share_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
|
||||
|
||||
def test_verify_server_invisible_corruption_offset_of_ciphertext_hashtree_TODO(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_offset_of_ciphertext_hash_tree,
|
||||
], judge)
|
||||
test_verify_server_invisible_corruption_offset_of_ciphertext_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
|
||||
|
||||
def test_verify_server_invisible_corruption_cryptext_hash_tree_TODO(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_crypttext_hash_tree,
|
||||
], judge)
|
||||
test_verify_server_invisible_corruption_cryptext_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
|
||||
|
||||
def test_verify_server_invisible_corruption_block_hash_tree_TODO(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_block_hashes,
|
||||
], judge)
|
||||
test_verify_server_invisible_corruption_block_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
|
||||
|
||||
def test_verify_server_invisible_corruption_share_hash_tree_TODO(self):
|
||||
def judge(checkresults):
|
||||
self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
|
||||
data = checkresults.get_data()
|
||||
self.failUnless(data['count-shares-good'] == 9, data)
|
||||
self.failUnless(data['count-shares-needed'] == 3, data)
|
||||
self.failUnless(data['count-shares-expected'] == 10, data)
|
||||
self.failUnless(data['count-good-share-hosts'] == 5, data)
|
||||
self.failUnless(data['count-corrupt-shares'] == 1, (data,))
|
||||
self.failUnless(len(data['list-corrupt-shares']) == 1, data)
|
||||
self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
|
||||
self.failUnless(len(data['list-incompatible-shares']) == 0, data)
|
||||
self.failUnless(len(data['servers-responding']) == 5, data)
|
||||
self.failUnless(len(data['sharemap']) == 9, data)
|
||||
return self._help_test_verify([
|
||||
_corrupt_share_hashes,
|
||||
], judge)
|
||||
test_verify_server_invisible_corruption_share_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
|
||||
|
||||
def test_repair(self):
|
||||
""" Repair replaces a share that got deleted. """
|
||||
# N == 10. 7 is the "efficiency leeway" -- we'll allow you to pass this test even if
|
||||
# you trigger seven times as many disk reads and blocks sends as would be optimal.
|
||||
DELTA_READS = 10 * 7
|
||||
# We'll allow you to pass this test only if you repair the missing share using only a
|
||||
# single allocate.
|
||||
DELTA_ALLOCATES = 1
|
||||
|
||||
d = defer.succeed(self.filenode)
|
||||
d.addCallback(self._delete_a_share, sharenum=2)
|
||||
|
||||
def _repair_from_deletion_of_1(filenode):
|
||||
before_repair_reads = self._count_reads()
|
||||
before_repair_allocates = self._count_allocates()
|
||||
|
||||
d2 = filenode.check_and_repair(Monitor(), verify=False)
|
||||
def _after_repair(checkandrepairresults):
|
||||
assert isinstance(checkandrepairresults, check_results.CheckAndRepairResults), checkandrepairresults
|
||||
prerepairres = checkandrepairresults.get_pre_repair_results()
|
||||
assert isinstance(prerepairres, check_results.CheckResults), prerepairres
|
||||
postrepairres = checkandrepairresults.get_post_repair_results()
|
||||
assert isinstance(postrepairres, check_results.CheckResults), postrepairres
|
||||
after_repair_reads = self._count_reads()
|
||||
after_repair_allocates = self._count_allocates()
|
||||
|
||||
# print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
|
||||
self.failIf(after_repair_reads - before_repair_reads > DELTA_READS)
|
||||
self.failIf(after_repair_allocates - before_repair_allocates > DELTA_ALLOCATES)
|
||||
self.failIf(prerepairres.is_healthy())
|
||||
self.failUnless(postrepairres.is_healthy())
|
||||
|
||||
# Now we inspect the filesystem to make sure that it has 10 shares.
|
||||
shares = self.find_shares()
|
||||
self.failIf(len(shares) < 10)
|
||||
|
||||
# Now we delete seven of the other shares, then try to download the file and
|
||||
# assert that it succeeds at downloading and has the right contents. This can't
|
||||
# work unless it has already repaired the previously-deleted share #2.
|
||||
for sharenum in range(3, 10):
|
||||
self._delete_a_share(sharenum=sharenum)
|
||||
|
||||
return self._download_and_check_plaintext()
|
||||
|
||||
d2.addCallback(_after_repair)
|
||||
return d2
|
||||
d.addCallback(_repair_from_deletion_of_1)
|
||||
|
||||
# Now we repair again to get all of those 7 back...
|
||||
def _repair_from_deletion_of_7(filenode):
|
||||
before_repair_reads = self._count_reads()
|
||||
before_repair_allocates = self._count_allocates()
|
||||
|
||||
d2 = filenode.check_and_repair(Monitor(), verify=False)
|
||||
def _after_repair(checkandrepairresults):
|
||||
prerepairres = checkandrepairresults.get_pre_repair_results()
|
||||
postrepairres = checkandrepairresults.get_post_repair_results()
|
||||
after_repair_reads = self._count_reads()
|
||||
after_repair_allocates = self._count_allocates()
|
||||
|
||||
# print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
|
||||
self.failIf(after_repair_reads - before_repair_reads > DELTA_READS)
|
||||
self.failIf(after_repair_allocates - before_repair_allocates > (DELTA_ALLOCATES*7))
|
||||
self.failIf(prerepairres.is_healthy())
|
||||
self.failUnless(postrepairres.is_healthy())
|
||||
|
||||
# Now we inspect the filesystem to make sure that it has 10 shares.
|
||||
shares = self.find_shares()
|
||||
self.failIf(len(shares) < 10)
|
||||
|
||||
return self._download_and_check_plaintext()
|
||||
|
||||
d2.addCallback(_after_repair)
|
||||
return d2
|
||||
d.addCallback(_repair_from_deletion_of_7)
|
||||
|
||||
def _repair_from_corruption(filenode):
|
||||
before_repair_reads = self._count_reads()
|
||||
before_repair_allocates = self._count_allocates()
|
||||
|
||||
d2 = filenode.check_and_repair(Monitor(), verify=False)
|
||||
def _after_repair(checkandrepairresults):
|
||||
prerepairres = checkandrepairresults.get_pre_repair_results()
|
||||
postrepairres = checkandrepairresults.get_post_repair_results()
|
||||
after_repair_reads = self._count_reads()
|
||||
after_repair_allocates = self._count_allocates()
|
||||
|
||||
# print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
|
||||
self.failIf(after_repair_reads - before_repair_reads > DELTA_READS)
|
||||
self.failIf(after_repair_allocates - before_repair_allocates > DELTA_ALLOCATES)
|
||||
self.failIf(prerepairres.is_healthy())
|
||||
self.failUnless(postrepairres.is_healthy())
|
||||
|
||||
return self._download_and_check_plaintext()
|
||||
|
||||
d2.addCallback(_after_repair)
|
||||
return d2
|
||||
|
||||
for corruptor_func in (
|
||||
_corrupt_file_version_number,
|
||||
_corrupt_sharedata_version_number,
|
||||
_corrupt_sharedata_version_number_to_plausible_version,
|
||||
_corrupt_offset_of_sharedata,
|
||||
_corrupt_offset_of_ciphertext_hash_tree,
|
||||
_corrupt_offset_of_block_hashes,
|
||||
_corrupt_offset_of_share_hashes,
|
||||
_corrupt_offset_of_uri_extension,
|
||||
_corrupt_share_data,
|
||||
_corrupt_crypttext_hash_tree,
|
||||
_corrupt_block_hashes,
|
||||
_corrupt_share_hashes,
|
||||
_corrupt_length_of_uri_extension,
|
||||
_corrupt_uri_extension,
|
||||
):
|
||||
# Now we corrupt a share...
|
||||
d.addCallback(self._corrupt_a_random_share, corruptor_func)
|
||||
# And repair...
|
||||
d.addCallback(_repair_from_corruption)
|
||||
|
||||
return d
|
||||
test_repair.todo = "We haven't implemented a repairer yet."
|
||||
|
||||
|
||||
# XXX extend these tests to show that the checker detects which specific share on which specific server is broken -- this is necessary so that the checker results can be passed to the repairer and the repairer can go ahead and upload fixes without first doing what is effectively a check (/verify) run
|
||||
|
||||
# XXX extend these tests to show bad behavior of various kinds from servers: raising exception from each remove_foo() method, for example
|
||||
|
||||
# XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit
|
||||
|
||||
# XXX test corruption that truncates other hash trees than just the crypttext hash tree
|
||||
|
Loading…
x
Reference in New Issue
Block a user