2007-03-29 21:01:28 +00:00
|
|
|
#! /usr/bin/env python
|
2006-12-14 03:32:35 +00:00
|
|
|
|
|
|
|
from twisted.trial import unittest
|
|
|
|
from twisted.internet import defer
|
2007-03-30 23:50:50 +00:00
|
|
|
from foolscap import eventual
|
2007-04-06 04:17:42 +00:00
|
|
|
from allmydata import encode, download
|
2007-03-30 20:20:01 +00:00
|
|
|
from allmydata.uri import pack_uri
|
2006-12-14 03:32:35 +00:00
|
|
|
from cStringIO import StringIO
|
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
class FakePeer:
|
|
|
|
def __init__(self, mode="good"):
|
|
|
|
self.ss = FakeStorageServer(mode)
|
|
|
|
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
return defer.maybeDeferred(_call)
|
|
|
|
|
|
|
|
def get_service(self, sname):
|
|
|
|
assert sname == "storageserver"
|
|
|
|
return self.ss
|
|
|
|
|
|
|
|
class FakeStorageServer:
|
|
|
|
def __init__(self, mode):
|
|
|
|
self.mode = mode
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
d = eventual.fireEventually()
|
|
|
|
d.addCallback(lambda res: _call())
|
2006-12-14 03:32:35 +00:00
|
|
|
return d
|
2007-03-30 23:50:50 +00:00
|
|
|
def allocate_buckets(self, verifierid, sharenums, shareize, blocksize, canary):
|
|
|
|
if self.mode == "full":
|
|
|
|
return (set(), {},)
|
|
|
|
elif self.mode == "already got them":
|
|
|
|
return (set(sharenums), {},)
|
|
|
|
else:
|
|
|
|
return (set(), dict([(shnum, FakeBucketWriter(),) for shnum in sharenums]),)
|
2006-12-14 03:32:35 +00:00
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
class FakeBucketWriter:
|
2007-03-30 18:32:57 +00:00
|
|
|
def __init__(self):
|
|
|
|
self.blocks = {}
|
2007-03-30 20:20:01 +00:00
|
|
|
self.block_hashes = None
|
|
|
|
self.share_hashes = None
|
2007-03-30 18:32:57 +00:00
|
|
|
self.closed = False
|
|
|
|
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
return defer.maybeDeferred(_call)
|
|
|
|
|
|
|
|
def put_block(self, segmentnum, data):
|
|
|
|
assert not self.closed
|
|
|
|
assert segmentnum not in self.blocks
|
|
|
|
self.blocks[segmentnum] = data
|
|
|
|
|
|
|
|
def put_block_hashes(self, blockhashes):
|
|
|
|
assert not self.closed
|
2007-03-30 20:20:01 +00:00
|
|
|
assert self.block_hashes is None
|
|
|
|
self.block_hashes = blockhashes
|
2007-03-30 18:32:57 +00:00
|
|
|
|
|
|
|
def put_share_hashes(self, sharehashes):
|
|
|
|
assert not self.closed
|
2007-03-30 20:20:01 +00:00
|
|
|
assert self.share_hashes is None
|
|
|
|
self.share_hashes = sharehashes
|
2007-03-30 18:32:57 +00:00
|
|
|
|
|
|
|
def close(self):
|
|
|
|
assert not self.closed
|
|
|
|
self.closed = True
|
|
|
|
|
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
def get_block(self, blocknum):
|
|
|
|
assert isinstance(blocknum, int)
|
|
|
|
return self.blocks[blocknum]
|
|
|
|
|
|
|
|
def get_block_hashes(self):
|
|
|
|
return self.block_hashes
|
|
|
|
def get_share_hashes(self):
|
|
|
|
return self.share_hashes
|
|
|
|
|
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
class Encode(unittest.TestCase):
|
2007-03-30 18:32:57 +00:00
|
|
|
def test_send(self):
|
2007-04-06 04:17:42 +00:00
|
|
|
e = encode.Encoder()
|
2007-03-30 18:32:57 +00:00
|
|
|
data = "happy happy joy joy" * 4
|
|
|
|
e.setup(StringIO(data))
|
|
|
|
NUM_SHARES = 100
|
|
|
|
assert e.num_shares == NUM_SHARES # else we'll be completely confused
|
|
|
|
e.segment_size = 25 # force use of multiple segments
|
2007-03-30 18:53:03 +00:00
|
|
|
e.setup_codec() # need to rebuild the codec for that change
|
2007-03-30 18:32:57 +00:00
|
|
|
NUM_SEGMENTS = 4
|
|
|
|
assert (NUM_SEGMENTS-1)*e.segment_size < len(data) <= NUM_SEGMENTS*e.segment_size
|
|
|
|
shareholders = {}
|
|
|
|
all_shareholders = []
|
|
|
|
for shnum in range(NUM_SHARES):
|
2007-03-30 23:50:50 +00:00
|
|
|
peer = FakeBucketWriter()
|
2007-03-30 18:32:57 +00:00
|
|
|
shareholders[shnum] = peer
|
|
|
|
all_shareholders.append(peer)
|
|
|
|
e.set_shareholders(shareholders)
|
|
|
|
d = e.start()
|
|
|
|
def _check(roothash):
|
|
|
|
self.failUnless(isinstance(roothash, str))
|
|
|
|
self.failUnlessEqual(len(roothash), 32)
|
|
|
|
for i,peer in enumerate(all_shareholders):
|
|
|
|
self.failUnless(peer.closed)
|
|
|
|
self.failUnlessEqual(len(peer.blocks), NUM_SEGMENTS)
|
2007-03-30 20:20:01 +00:00
|
|
|
#self.failUnlessEqual(len(peer.block_hashes), NUM_SEGMENTS)
|
2007-03-30 18:32:57 +00:00
|
|
|
# that isn't true: each peer gets a full tree, so it's more
|
|
|
|
# like 2n-1 but with rounding to a power of two
|
2007-03-30 20:20:01 +00:00
|
|
|
for h in peer.block_hashes:
|
2007-03-30 18:32:57 +00:00
|
|
|
self.failUnlessEqual(len(h), 32)
|
2007-03-30 20:20:01 +00:00
|
|
|
#self.failUnlessEqual(len(peer.share_hashes), NUM_SHARES)
|
2007-03-30 18:32:57 +00:00
|
|
|
# that isn't true: each peer only gets the chain they need
|
2007-03-30 20:20:01 +00:00
|
|
|
for (hashnum, h) in peer.share_hashes:
|
2007-03-30 18:32:57 +00:00
|
|
|
self.failUnless(isinstance(hashnum, int))
|
|
|
|
self.failUnlessEqual(len(h), 32)
|
|
|
|
d.addCallback(_check)
|
|
|
|
|
|
|
|
return d
|
2007-03-30 20:20:01 +00:00
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
class Roundtrip(unittest.TestCase):
|
|
|
|
def send_and_recover(self, NUM_SHARES, NUM_PEERS, NUM_SEGMENTS=4):
|
2007-04-06 04:17:42 +00:00
|
|
|
e = encode.Encoder()
|
2007-03-30 20:20:01 +00:00
|
|
|
data = "happy happy joy joy" * 4
|
|
|
|
e.setup(StringIO(data))
|
2007-03-30 23:50:50 +00:00
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
assert e.num_shares == NUM_SHARES # else we'll be completely confused
|
|
|
|
e.segment_size = 25 # force use of multiple segments
|
|
|
|
e.setup_codec() # need to rebuild the codec for that change
|
2007-03-30 23:50:50 +00:00
|
|
|
|
2007-03-30 20:20:01 +00:00
|
|
|
assert (NUM_SEGMENTS-1)*e.segment_size < len(data) <= NUM_SEGMENTS*e.segment_size
|
|
|
|
shareholders = {}
|
|
|
|
all_shareholders = []
|
2007-03-30 23:50:50 +00:00
|
|
|
all_peers = []
|
|
|
|
for i in range(NUM_PEERS):
|
|
|
|
all_peers.append(FakeBucketWriter())
|
2007-03-30 20:20:01 +00:00
|
|
|
for shnum in range(NUM_SHARES):
|
2007-03-30 23:50:50 +00:00
|
|
|
peer = all_peers[shnum % NUM_PEERS]
|
2007-03-30 20:20:01 +00:00
|
|
|
shareholders[shnum] = peer
|
|
|
|
all_shareholders.append(peer)
|
|
|
|
e.set_shareholders(shareholders)
|
|
|
|
d = e.start()
|
|
|
|
def _uploaded(roothash):
|
|
|
|
URI = pack_uri(e._codec.get_encoder_type(),
|
|
|
|
e._codec.get_serialized_params(),
|
2007-03-30 23:50:50 +00:00
|
|
|
e._tail_codec.get_serialized_params(),
|
2007-03-30 20:20:01 +00:00
|
|
|
"V" * 20,
|
|
|
|
roothash,
|
|
|
|
e.required_shares,
|
|
|
|
e.num_shares,
|
|
|
|
e.file_size,
|
|
|
|
e.segment_size)
|
|
|
|
client = None
|
|
|
|
target = download.Data()
|
|
|
|
fd = download.FileDownloader(client, URI, target)
|
|
|
|
fd._share_buckets = {}
|
|
|
|
for shnum in range(NUM_SHARES):
|
|
|
|
fd._share_buckets[shnum] = set([all_shareholders[shnum]])
|
|
|
|
fd._got_all_shareholders(None)
|
2007-03-30 23:50:50 +00:00
|
|
|
d2 = fd._download_all_segments(None)
|
2007-03-30 20:20:01 +00:00
|
|
|
d2.addCallback(fd._done)
|
|
|
|
return d2
|
|
|
|
d.addCallback(_uploaded)
|
|
|
|
def _downloaded(newdata):
|
|
|
|
self.failUnless(newdata == data)
|
|
|
|
d.addCallback(_downloaded)
|
|
|
|
|
|
|
|
return d
|
2007-03-30 23:50:50 +00:00
|
|
|
|
|
|
|
def test_one_share_per_peer(self):
|
|
|
|
return self.send_and_recover(100, 100)
|
|
|
|
|