2007-03-29 21:01:28 +00:00
|
|
|
#! /usr/bin/env python
|
2006-12-14 03:32:35 +00:00
|
|
|
|
|
|
|
from twisted.trial import unittest
|
|
|
|
from twisted.internet import defer
|
2007-04-16 23:30:21 +00:00
|
|
|
from twisted.python.failure import Failure
|
2007-03-30 23:50:50 +00:00
|
|
|
from foolscap import eventual
|
2007-04-06 04:17:42 +00:00
|
|
|
from allmydata import encode, download
|
2007-03-30 20:20:01 +00:00
|
|
|
from allmydata.uri import pack_uri
|
2006-12-14 03:32:35 +00:00
|
|
|
from cStringIO import StringIO
|
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
class FakePeer:
|
|
|
|
def __init__(self, mode="good"):
|
|
|
|
self.ss = FakeStorageServer(mode)
|
|
|
|
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
return defer.maybeDeferred(_call)
|
|
|
|
|
|
|
|
def get_service(self, sname):
|
|
|
|
assert sname == "storageserver"
|
|
|
|
return self.ss
|
|
|
|
|
|
|
|
class FakeStorageServer:
|
|
|
|
def __init__(self, mode):
|
|
|
|
self.mode = mode
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
d = eventual.fireEventually()
|
|
|
|
d.addCallback(lambda res: _call())
|
2006-12-14 03:32:35 +00:00
|
|
|
return d
|
2007-03-30 23:50:50 +00:00
|
|
|
def allocate_buckets(self, verifierid, sharenums, shareize, blocksize, canary):
|
|
|
|
if self.mode == "full":
|
|
|
|
return (set(), {},)
|
|
|
|
elif self.mode == "already got them":
|
|
|
|
return (set(sharenums), {},)
|
|
|
|
else:
|
|
|
|
return (set(), dict([(shnum, FakeBucketWriter(),) for shnum in sharenums]),)
|
2006-12-14 03:32:35 +00:00
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
class FakeBucketWriter:
|
2007-04-16 23:30:21 +00:00
|
|
|
# these are used for both reading and writing
|
|
|
|
def __init__(self, mode="good"):
|
|
|
|
self.mode = mode
|
2007-03-30 18:32:57 +00:00
|
|
|
self.blocks = {}
|
2007-03-30 20:20:01 +00:00
|
|
|
self.block_hashes = None
|
|
|
|
self.share_hashes = None
|
2007-03-30 18:32:57 +00:00
|
|
|
self.closed = False
|
|
|
|
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
return defer.maybeDeferred(_call)
|
|
|
|
|
|
|
|
def put_block(self, segmentnum, data):
|
|
|
|
assert not self.closed
|
|
|
|
assert segmentnum not in self.blocks
|
|
|
|
self.blocks[segmentnum] = data
|
|
|
|
|
|
|
|
def put_block_hashes(self, blockhashes):
|
|
|
|
assert not self.closed
|
2007-03-30 20:20:01 +00:00
|
|
|
assert self.block_hashes is None
|
|
|
|
self.block_hashes = blockhashes
|
2007-03-30 18:32:57 +00:00
|
|
|
|
|
|
|
def put_share_hashes(self, sharehashes):
|
|
|
|
assert not self.closed
|
2007-03-30 20:20:01 +00:00
|
|
|
assert self.share_hashes is None
|
|
|
|
self.share_hashes = sharehashes
|
2007-03-30 18:32:57 +00:00
|
|
|
|
|
|
|
def close(self):
|
|
|
|
assert not self.closed
|
|
|
|
self.closed = True
|
|
|
|
|
2007-04-16 23:30:21 +00:00
|
|
|
def flip_bit(self, good):
|
|
|
|
return good[:-1] + chr(ord(good[-1]) ^ 0x01)
|
2007-03-30 18:32:57 +00:00
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
def get_block(self, blocknum):
|
|
|
|
assert isinstance(blocknum, int)
|
2007-04-16 23:30:21 +00:00
|
|
|
if self.mode == "bad block":
|
|
|
|
return self.flip_bit(self.blocks[blocknum])
|
2007-03-30 20:20:01 +00:00
|
|
|
return self.blocks[blocknum]
|
|
|
|
|
|
|
|
def get_block_hashes(self):
|
2007-04-16 23:30:21 +00:00
|
|
|
if self.mode == "bad blockhash":
|
|
|
|
hashes = self.block_hashes[:]
|
|
|
|
hashes[1] = self.flip_bit(hashes[1])
|
|
|
|
return hashes
|
2007-03-30 20:20:01 +00:00
|
|
|
return self.block_hashes
|
|
|
|
def get_share_hashes(self):
|
2007-04-16 23:30:21 +00:00
|
|
|
if self.mode == "bad sharehash":
|
|
|
|
hashes = self.share_hashes[:]
|
|
|
|
hashes[1] = (hashes[1][0], self.flip_bit(hashes[1][1]))
|
|
|
|
return hashes
|
2007-03-30 20:20:01 +00:00
|
|
|
return self.share_hashes
|
|
|
|
|
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
class Encode(unittest.TestCase):
|
2007-03-30 18:32:57 +00:00
|
|
|
def test_send(self):
|
2007-04-06 04:17:42 +00:00
|
|
|
e = encode.Encoder()
|
2007-03-30 18:32:57 +00:00
|
|
|
data = "happy happy joy joy" * 4
|
|
|
|
e.setup(StringIO(data))
|
|
|
|
NUM_SHARES = 100
|
|
|
|
assert e.num_shares == NUM_SHARES # else we'll be completely confused
|
|
|
|
e.segment_size = 25 # force use of multiple segments
|
2007-03-30 18:53:03 +00:00
|
|
|
e.setup_codec() # need to rebuild the codec for that change
|
2007-03-30 18:32:57 +00:00
|
|
|
NUM_SEGMENTS = 4
|
|
|
|
assert (NUM_SEGMENTS-1)*e.segment_size < len(data) <= NUM_SEGMENTS*e.segment_size
|
|
|
|
shareholders = {}
|
|
|
|
all_shareholders = []
|
|
|
|
for shnum in range(NUM_SHARES):
|
2007-03-30 23:50:50 +00:00
|
|
|
peer = FakeBucketWriter()
|
2007-03-30 18:32:57 +00:00
|
|
|
shareholders[shnum] = peer
|
|
|
|
all_shareholders.append(peer)
|
|
|
|
e.set_shareholders(shareholders)
|
|
|
|
d = e.start()
|
|
|
|
def _check(roothash):
|
|
|
|
self.failUnless(isinstance(roothash, str))
|
|
|
|
self.failUnlessEqual(len(roothash), 32)
|
|
|
|
for i,peer in enumerate(all_shareholders):
|
|
|
|
self.failUnless(peer.closed)
|
|
|
|
self.failUnlessEqual(len(peer.blocks), NUM_SEGMENTS)
|
2007-03-30 20:20:01 +00:00
|
|
|
#self.failUnlessEqual(len(peer.block_hashes), NUM_SEGMENTS)
|
2007-03-30 18:32:57 +00:00
|
|
|
# that isn't true: each peer gets a full tree, so it's more
|
|
|
|
# like 2n-1 but with rounding to a power of two
|
2007-03-30 20:20:01 +00:00
|
|
|
for h in peer.block_hashes:
|
2007-03-30 18:32:57 +00:00
|
|
|
self.failUnlessEqual(len(h), 32)
|
2007-03-30 20:20:01 +00:00
|
|
|
#self.failUnlessEqual(len(peer.share_hashes), NUM_SHARES)
|
2007-03-30 18:32:57 +00:00
|
|
|
# that isn't true: each peer only gets the chain they need
|
2007-03-30 20:20:01 +00:00
|
|
|
for (hashnum, h) in peer.share_hashes:
|
2007-03-30 18:32:57 +00:00
|
|
|
self.failUnless(isinstance(hashnum, int))
|
|
|
|
self.failUnlessEqual(len(h), 32)
|
|
|
|
d.addCallback(_check)
|
|
|
|
|
|
|
|
return d
|
2007-03-30 20:20:01 +00:00
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
class Roundtrip(unittest.TestCase):
|
2007-04-16 23:30:21 +00:00
|
|
|
def send_and_recover(self, NUM_SHARES, NUM_SEGMENTS=4, bucket_modes={}):
|
2007-04-06 04:17:42 +00:00
|
|
|
e = encode.Encoder()
|
2007-03-30 20:20:01 +00:00
|
|
|
data = "happy happy joy joy" * 4
|
|
|
|
e.setup(StringIO(data))
|
2007-03-30 23:50:50 +00:00
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
assert e.num_shares == NUM_SHARES # else we'll be completely confused
|
|
|
|
e.segment_size = 25 # force use of multiple segments
|
|
|
|
e.setup_codec() # need to rebuild the codec for that change
|
2007-03-30 23:50:50 +00:00
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
assert (NUM_SEGMENTS-1)*e.segment_size < len(data) <= NUM_SEGMENTS*e.segment_size
|
|
|
|
shareholders = {}
|
|
|
|
all_shareholders = []
|
2007-03-30 23:50:50 +00:00
|
|
|
all_peers = []
|
2007-03-30 20:20:01 +00:00
|
|
|
for shnum in range(NUM_SHARES):
|
2007-04-16 23:30:21 +00:00
|
|
|
mode = bucket_modes.get(shnum, "good")
|
|
|
|
peer = FakeBucketWriter(mode)
|
2007-03-30 20:20:01 +00:00
|
|
|
shareholders[shnum] = peer
|
|
|
|
all_shareholders.append(peer)
|
|
|
|
e.set_shareholders(shareholders)
|
|
|
|
d = e.start()
|
|
|
|
def _uploaded(roothash):
|
|
|
|
URI = pack_uri(e._codec.get_encoder_type(),
|
|
|
|
e._codec.get_serialized_params(),
|
2007-03-30 23:50:50 +00:00
|
|
|
e._tail_codec.get_serialized_params(),
|
2007-03-30 20:20:01 +00:00
|
|
|
"V" * 20,
|
|
|
|
roothash,
|
|
|
|
e.required_shares,
|
|
|
|
e.num_shares,
|
|
|
|
e.file_size,
|
|
|
|
e.segment_size)
|
|
|
|
client = None
|
|
|
|
target = download.Data()
|
|
|
|
fd = download.FileDownloader(client, URI, target)
|
|
|
|
for shnum in range(NUM_SHARES):
|
2007-04-12 20:07:40 +00:00
|
|
|
bucket = all_shareholders[shnum]
|
|
|
|
fd.add_share_bucket(shnum, bucket)
|
2007-03-30 20:20:01 +00:00
|
|
|
fd._got_all_shareholders(None)
|
2007-03-30 23:50:50 +00:00
|
|
|
d2 = fd._download_all_segments(None)
|
2007-03-30 20:20:01 +00:00
|
|
|
d2.addCallback(fd._done)
|
|
|
|
return d2
|
|
|
|
d.addCallback(_uploaded)
|
|
|
|
def _downloaded(newdata):
|
|
|
|
self.failUnless(newdata == data)
|
|
|
|
d.addCallback(_downloaded)
|
|
|
|
|
|
|
|
return d
|
2007-03-30 23:50:50 +00:00
|
|
|
|
|
|
|
def test_one_share_per_peer(self):
|
2007-04-16 23:30:21 +00:00
|
|
|
return self.send_and_recover(100)
|
|
|
|
|
|
|
|
def test_bad_blocks(self):
|
|
|
|
# the first 74 servers have bad blocks, which will be caught by the
|
|
|
|
# blockhashes
|
|
|
|
modemap = dict([(i, "bad block")
|
|
|
|
for i in range(74)]
|
|
|
|
+ [(i, "good")
|
|
|
|
for i in range(74, 100)])
|
|
|
|
return self.send_and_recover(100, bucket_modes=modemap)
|
|
|
|
|
|
|
|
def test_bad_blocks_failure(self):
|
|
|
|
# the first 76 servers have bad blocks, which will be caught by the
|
|
|
|
# blockhashes, and the download will fail
|
|
|
|
modemap = dict([(i, "bad block")
|
|
|
|
for i in range(76)]
|
|
|
|
+ [(i, "good")
|
|
|
|
for i in range(76, 100)])
|
|
|
|
d = self.send_and_recover(100, bucket_modes=modemap)
|
|
|
|
def _done(res):
|
|
|
|
self.failUnless(isinstance(res, Failure))
|
|
|
|
self.failUnless(res.check(download.NotEnoughPeersError))
|
|
|
|
d.addBoth(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_bad_blockhashes(self):
|
|
|
|
# the first 74 servers have bad block hashes, so the blockhash tree
|
|
|
|
# will not validate
|
|
|
|
modemap = dict([(i, "bad blockhash")
|
|
|
|
for i in range(74)]
|
|
|
|
+ [(i, "good")
|
|
|
|
for i in range(74, 100)])
|
|
|
|
return self.send_and_recover(100, bucket_modes=modemap)
|
|
|
|
|
|
|
|
def test_bad_blockhashes_failure(self):
|
|
|
|
# the first 76 servers have bad block hashes, so the blockhash tree
|
|
|
|
# will not validate, and the download will fail
|
|
|
|
modemap = dict([(i, "bad blockhash")
|
|
|
|
for i in range(76)]
|
|
|
|
+ [(i, "good")
|
|
|
|
for i in range(76, 100)])
|
|
|
|
d = self.send_and_recover(100, bucket_modes=modemap)
|
|
|
|
def _done(res):
|
|
|
|
self.failUnless(isinstance(res, Failure))
|
|
|
|
self.failUnless(res.check(download.NotEnoughPeersError))
|
|
|
|
d.addBoth(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_bad_sharehashes(self):
|
|
|
|
# the first 74 servers have bad block hashes, so the sharehash tree
|
|
|
|
# will not validate
|
|
|
|
modemap = dict([(i, "bad sharehash")
|
|
|
|
for i in range(74)]
|
|
|
|
+ [(i, "good")
|
|
|
|
for i in range(74, 100)])
|
|
|
|
return self.send_and_recover(100, bucket_modes=modemap)
|
|
|
|
|
|
|
|
def test_bad_sharehashes_failure(self):
|
|
|
|
# the first 76 servers have bad block hashes, so the sharehash tree
|
|
|
|
# will not validate, and the download will fail
|
|
|
|
modemap = dict([(i, "bad sharehash")
|
|
|
|
for i in range(76)]
|
|
|
|
+ [(i, "good")
|
|
|
|
for i in range(76, 100)])
|
|
|
|
d = self.send_and_recover(100, bucket_modes=modemap)
|
|
|
|
def _done(res):
|
|
|
|
self.failUnless(isinstance(res, Failure))
|
|
|
|
self.failUnless(res.check(download.NotEnoughPeersError))
|
|
|
|
d.addBoth(_done)
|
|
|
|
return d
|
2007-03-30 23:50:50 +00:00
|
|
|
|