2006-12-01 09:54:28 +00:00
|
|
|
|
2007-07-20 05:53:29 +00:00
|
|
|
import os
|
2006-12-01 09:54:28 +00:00
|
|
|
from twisted.trial import unittest
|
2007-03-30 21:54:33 +00:00
|
|
|
from twisted.python.failure import Failure
|
2008-01-24 01:07:34 +00:00
|
|
|
from twisted.python import log
|
2006-12-02 02:17:26 +00:00
|
|
|
from cStringIO import StringIO
|
2006-12-01 09:54:28 +00:00
|
|
|
|
2007-07-21 22:40:36 +00:00
|
|
|
from allmydata import upload, encode, uri
|
|
|
|
from allmydata.interfaces import IFileURI
|
2007-07-13 22:09:01 +00:00
|
|
|
from allmydata.util.assertutil import precondition
|
2008-02-07 03:03:35 +00:00
|
|
|
from allmydata.util.deferredutil import DeferredListShouldSucceed
|
2007-07-13 22:09:01 +00:00
|
|
|
from foolscap import eventual
|
2006-12-01 09:54:28 +00:00
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
MiB = 1024*1024
|
|
|
|
|
2008-02-06 04:01:38 +00:00
|
|
|
def extract_uri(results):
|
|
|
|
return results.uri
|
|
|
|
|
2007-07-20 05:53:29 +00:00
|
|
|
class Uploadable(unittest.TestCase):
|
|
|
|
def shouldEqual(self, data, expected):
|
|
|
|
self.failUnless(isinstance(data, list))
|
|
|
|
for e in data:
|
|
|
|
self.failUnless(isinstance(e, str))
|
|
|
|
s = "".join(data)
|
|
|
|
self.failUnlessEqual(s, expected)
|
|
|
|
|
2008-01-30 19:24:50 +00:00
|
|
|
def test_filehandle_random_key(self):
|
|
|
|
return self._test_filehandle(True)
|
|
|
|
|
|
|
|
def test_filehandle_content_hash_key(self):
|
|
|
|
return self._test_filehandle(False)
|
|
|
|
|
|
|
|
def _test_filehandle(self, randomkey):
|
2007-07-20 05:53:29 +00:00
|
|
|
s = StringIO("a"*41)
|
2008-01-30 19:24:50 +00:00
|
|
|
u = upload.FileHandle(s, randomkey)
|
2007-07-20 05:53:29 +00:00
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close()) # this doesn't close the filehandle
|
|
|
|
d.addCallback(lambda res: s.close()) # that privilege is reserved for us
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename(self):
|
|
|
|
basedir = "upload/Uploadable/test_filename"
|
|
|
|
os.makedirs(basedir)
|
|
|
|
fn = os.path.join(basedir, "file")
|
|
|
|
f = open(fn, "w")
|
|
|
|
f.write("a"*41)
|
|
|
|
f.close()
|
|
|
|
u = upload.FileName(fn)
|
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_data(self):
|
|
|
|
s = "a"*41
|
|
|
|
u = upload.Data(s)
|
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close())
|
|
|
|
return d
|
|
|
|
|
2007-07-13 22:09:01 +00:00
|
|
|
class FakeStorageServer:
|
|
|
|
def __init__(self, mode):
|
|
|
|
self.mode = mode
|
2007-09-16 08:25:03 +00:00
|
|
|
self.allocated = []
|
2007-09-16 08:53:00 +00:00
|
|
|
self.queries = 0
|
2007-07-13 22:09:01 +00:00
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
d = eventual.fireEventually()
|
|
|
|
d.addCallback(lambda res: _call())
|
|
|
|
return d
|
|
|
|
|
2007-08-28 00:28:51 +00:00
|
|
|
def allocate_buckets(self, storage_index, renew_secret, cancel_secret,
|
|
|
|
sharenums, share_size, canary):
|
2007-07-13 22:09:01 +00:00
|
|
|
#print "FakeStorageServer.allocate_buckets(num=%d, size=%d)" % (len(sharenums), share_size)
|
2007-09-16 08:53:00 +00:00
|
|
|
self.queries += 1
|
2007-07-13 22:09:01 +00:00
|
|
|
if self.mode == "full":
|
|
|
|
return (set(), {},)
|
|
|
|
elif self.mode == "already got them":
|
|
|
|
return (set(sharenums), {},)
|
|
|
|
else:
|
2007-09-16 08:25:03 +00:00
|
|
|
for shnum in sharenums:
|
|
|
|
self.allocated.append( (storage_index, shnum) )
|
2007-07-13 22:09:01 +00:00
|
|
|
return (set(),
|
|
|
|
dict([( shnum, FakeBucketWriter(share_size) )
|
|
|
|
for shnum in sharenums]),
|
|
|
|
)
|
|
|
|
|
|
|
|
class FakeBucketWriter:
|
|
|
|
# a diagnostic version of storageserver.BucketWriter
|
|
|
|
def __init__(self, size):
|
|
|
|
self.data = StringIO()
|
|
|
|
self.closed = False
|
|
|
|
self._size = size
|
|
|
|
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, "remote_" + methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
d = eventual.fireEventually()
|
|
|
|
d.addCallback(lambda res: _call())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def remote_write(self, offset, data):
|
|
|
|
precondition(not self.closed)
|
|
|
|
precondition(offset >= 0)
|
|
|
|
precondition(offset+len(data) <= self._size,
|
|
|
|
"offset=%d + data=%d > size=%d" %
|
|
|
|
(offset, len(data), self._size))
|
|
|
|
self.data.seek(offset)
|
|
|
|
self.data.write(data)
|
|
|
|
|
|
|
|
def remote_close(self):
|
|
|
|
precondition(not self.closed)
|
|
|
|
self.closed = True
|
2007-03-30 21:54:33 +00:00
|
|
|
|
2008-01-24 01:07:34 +00:00
|
|
|
def remote_abort(self):
|
|
|
|
log.err("uh oh, I was asked to abort")
|
|
|
|
|
2006-12-01 09:54:28 +00:00
|
|
|
class FakeClient:
|
2008-01-16 10:03:35 +00:00
|
|
|
DEFAULT_ENCODING_PARAMETERS = {"k":25,
|
|
|
|
"happy": 75,
|
|
|
|
"n": 100,
|
|
|
|
"max_segment_size": 1*MiB,
|
|
|
|
}
|
2007-09-17 00:08:34 +00:00
|
|
|
def __init__(self, mode="good", num_servers=50):
|
2007-03-30 21:54:33 +00:00
|
|
|
self.mode = mode
|
2007-09-17 00:08:34 +00:00
|
|
|
self.num_servers = num_servers
|
2007-12-03 22:27:21 +00:00
|
|
|
def log(self, *args, **kwargs):
|
|
|
|
pass
|
2007-08-10 01:30:24 +00:00
|
|
|
def get_permuted_peers(self, storage_index, include_myself):
|
2008-02-05 20:05:13 +00:00
|
|
|
peers = [ ("%20d"%fakeid, FakeStorageServer(self.mode),)
|
2007-09-17 00:08:34 +00:00
|
|
|
for fakeid in range(self.num_servers) ]
|
2008-02-05 20:05:13 +00:00
|
|
|
self.last_peers = [p[1] for p in peers]
|
2007-09-16 08:25:03 +00:00
|
|
|
return peers
|
2007-07-12 22:33:30 +00:00
|
|
|
def get_encoding_parameters(self):
|
2008-01-16 10:03:35 +00:00
|
|
|
return self.DEFAULT_ENCODING_PARAMETERS
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2007-08-28 02:00:18 +00:00
|
|
|
def get_renewal_secret(self):
|
|
|
|
return ""
|
|
|
|
def get_cancel_secret(self):
|
|
|
|
return ""
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
DATA = """
|
|
|
|
Once upon a time, there was a beautiful princess named Buttercup. She lived
|
|
|
|
in a magical land where every file was stored securely among millions of
|
|
|
|
machines, and nobody ever worried about their data being lost ever again.
|
|
|
|
The End.
|
|
|
|
"""
|
|
|
|
assert len(DATA) > upload.Uploader.URI_LIT_SIZE_THRESHOLD
|
|
|
|
|
|
|
|
SIZE_ZERO = 0
|
|
|
|
SIZE_SMALL = 16
|
|
|
|
SIZE_LARGE = len(DATA)
|
|
|
|
|
2008-01-31 02:03:19 +00:00
|
|
|
def upload_data(uploader, data):
|
|
|
|
u = upload.Data(data)
|
|
|
|
return uploader.upload(u)
|
|
|
|
def upload_filename(uploader, filename):
|
|
|
|
u = upload.FileName(filename)
|
|
|
|
return uploader.upload(u)
|
|
|
|
def upload_filehandle(uploader, fh):
|
|
|
|
u = upload.FileHandle(fh)
|
|
|
|
return uploader.upload(u)
|
|
|
|
|
2007-03-30 21:54:33 +00:00
|
|
|
class GoodServer(unittest.TestCase):
|
2007-01-16 04:22:22 +00:00
|
|
|
def setUp(self):
|
2007-03-30 21:54:33 +00:00
|
|
|
self.node = FakeClient(mode="good")
|
2007-03-30 17:52:19 +00:00
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
|
|
|
|
p = {"k": k,
|
|
|
|
"happy": happy,
|
|
|
|
"n": n,
|
|
|
|
"max_segment_size": max_segsize,
|
|
|
|
}
|
|
|
|
self.node.DEFAULT_ENCODING_PARAMETERS = p
|
|
|
|
|
2007-07-21 22:40:36 +00:00
|
|
|
def _check_small(self, newuri, size):
|
|
|
|
u = IFileURI(newuri)
|
|
|
|
self.failUnless(isinstance(u, uri.LiteralFileURI))
|
|
|
|
self.failUnlessEqual(len(u.data), size)
|
|
|
|
|
|
|
|
def _check_large(self, newuri, size):
|
|
|
|
u = IFileURI(newuri)
|
|
|
|
self.failUnless(isinstance(u, uri.CHKFileURI))
|
|
|
|
self.failUnless(isinstance(u.storage_index, str))
|
2007-07-23 02:48:44 +00:00
|
|
|
self.failUnlessEqual(len(u.storage_index), 16)
|
2007-07-21 22:40:36 +00:00
|
|
|
self.failUnless(isinstance(u.key, str))
|
|
|
|
self.failUnlessEqual(len(u.key), 16)
|
|
|
|
self.failUnlessEqual(u.size, size)
|
2007-07-12 20:22:36 +00:00
|
|
|
|
|
|
|
def get_data(self, size):
|
|
|
|
return DATA[:size]
|
|
|
|
|
|
|
|
def test_data_zero(self):
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
2007-03-30 23:50:50 +00:00
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_small(self):
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_large(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
2007-07-14 05:24:06 +00:00
|
|
|
def test_data_large_odd_segments(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
|
|
|
segsize = int(SIZE_LARGE / 2.5)
|
|
|
|
# we want 3 segments, since that's not a power of two
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(25, 75, 100, segsize)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-14 05:24:06 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_filehandle_zero(self):
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filehandle(self.u, StringIO(data))
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filehandle_small(self):
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filehandle(self.u, StringIO(data))
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filehandle_large(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filehandle(self.u, StringIO(data))
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename_zero(self):
|
|
|
|
fn = "Uploader-test_filename_zero.data"
|
|
|
|
f = open(fn, "wb")
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
|
|
|
f.write(data)
|
|
|
|
f.close()
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filename(self.u, fn)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename_small(self):
|
|
|
|
fn = "Uploader-test_filename_small.data"
|
|
|
|
f = open(fn, "wb")
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
|
|
|
f.write(data)
|
|
|
|
f.close()
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filename(self.u, fn)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_filename_large(self):
|
|
|
|
fn = "Uploader-test_filename_large.data"
|
2007-04-04 23:12:30 +00:00
|
|
|
f = open(fn, "wb")
|
2007-07-12 20:22:36 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2007-01-16 04:22:22 +00:00
|
|
|
f.write(data)
|
|
|
|
f.close()
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filename(self.u, fn)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
2007-03-30 21:54:33 +00:00
|
|
|
|
|
|
|
class FullServer(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.node = FakeClient(mode="full")
|
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
|
|
|
|
|
|
|
def _should_fail(self, f):
|
2007-12-03 21:52:42 +00:00
|
|
|
self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughPeersError), f)
|
2007-03-30 21:54:33 +00:00
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_large(self):
|
|
|
|
data = DATA
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2007-03-30 21:54:33 +00:00
|
|
|
d.addBoth(self._should_fail)
|
|
|
|
return d
|
|
|
|
|
2007-09-16 08:25:03 +00:00
|
|
|
class PeerSelection(unittest.TestCase):
|
2007-09-17 00:08:34 +00:00
|
|
|
|
|
|
|
def make_client(self, num_servers=50):
|
|
|
|
self.node = FakeClient(mode="good", num_servers=num_servers)
|
2007-09-16 08:25:03 +00:00
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
|
|
|
|
|
|
|
def get_data(self, size):
|
|
|
|
return DATA[:size]
|
|
|
|
|
|
|
|
def _check_large(self, newuri, size):
|
|
|
|
u = IFileURI(newuri)
|
|
|
|
self.failUnless(isinstance(u, uri.CHKFileURI))
|
|
|
|
self.failUnless(isinstance(u.storage_index, str))
|
|
|
|
self.failUnlessEqual(len(u.storage_index), 16)
|
|
|
|
self.failUnless(isinstance(u.key, str))
|
|
|
|
self.failUnlessEqual(len(u.key), 16)
|
|
|
|
self.failUnlessEqual(u.size, size)
|
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
|
|
|
|
p = {"k": k,
|
|
|
|
"happy": happy,
|
|
|
|
"n": n,
|
|
|
|
"max_segment_size": max_segsize,
|
|
|
|
}
|
|
|
|
self.node.DEFAULT_ENCODING_PARAMETERS = p
|
|
|
|
|
2007-09-16 08:25:03 +00:00
|
|
|
def test_one_each(self):
|
|
|
|
# if we have 50 shares, and there are 50 peers, and they all accept a
|
|
|
|
# share, we should get exactly one share per peer
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
self.make_client()
|
2007-09-16 08:25:03 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(25, 30, 50)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-16 08:25:03 +00:00
|
|
|
self.failUnlessEqual(len(allocated), 1)
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 1)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_two_each(self):
|
|
|
|
# if we have 100 shares, and there are 50 peers, and they all accept
|
|
|
|
# all shares, we should get exactly two shares per peer
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
self.make_client()
|
2007-09-16 08:25:03 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(50, 75, 100)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-16 08:25:03 +00:00
|
|
|
self.failUnlessEqual(len(allocated), 2)
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 2)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_one_each_plus_one_extra(self):
|
|
|
|
# if we have 51 shares, and there are 50 peers, then one peer gets
|
|
|
|
# two shares and the rest get just one
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
self.make_client()
|
2007-09-16 08:25:03 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(24, 41, 51)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
got_one = []
|
|
|
|
got_two = []
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-16 08:25:03 +00:00
|
|
|
self.failUnless(len(allocated) in (1,2), len(allocated))
|
|
|
|
if len(allocated) == 1:
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 1)
|
2007-09-16 08:25:03 +00:00
|
|
|
got_one.append(p)
|
|
|
|
else:
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 2)
|
2007-09-16 08:25:03 +00:00
|
|
|
got_two.append(p)
|
|
|
|
self.failUnlessEqual(len(got_one), 49)
|
|
|
|
self.failUnlessEqual(len(got_two), 1)
|
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
2007-09-16 08:53:00 +00:00
|
|
|
def test_four_each(self):
|
|
|
|
# if we have 200 shares, and there are 50 peers, then each peer gets
|
|
|
|
# 4 shares. The design goal is to accomplish this with only two
|
|
|
|
# queries per peer.
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
self.make_client()
|
2007-09-16 08:53:00 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(100, 150, 200)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-16 08:53:00 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-16 08:53:00 +00:00
|
|
|
self.failUnlessEqual(len(allocated), 4)
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 2)
|
2007-09-16 08:53:00 +00:00
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
def test_three_of_ten(self):
|
|
|
|
# if we have 10 shares and 3 servers, I want to see 3+3+4 rather than
|
|
|
|
# 4+4+2
|
|
|
|
|
|
|
|
self.make_client(3)
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(3, 5, 10)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-17 00:08:34 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
counts = {}
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-17 00:08:34 +00:00
|
|
|
counts[len(allocated)] = counts.get(len(allocated), 0) + 1
|
|
|
|
histogram = [counts.get(i, 0) for i in range(5)]
|
|
|
|
self.failUnlessEqual(histogram, [0,0,0,2,1])
|
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
2008-02-07 03:03:35 +00:00
|
|
|
class StorageIndex(unittest.TestCase):
|
|
|
|
def test_params_must_matter(self):
|
|
|
|
DATA = "I am some data"
|
|
|
|
u = upload.Data(DATA)
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d1 = eu.get_storage_index()
|
|
|
|
|
|
|
|
# CHK means the same data should encrypt the same way
|
|
|
|
u = upload.Data(DATA)
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d1a = eu.get_storage_index()
|
|
|
|
|
|
|
|
# but if we change the encoding parameters, it should be different
|
|
|
|
u = upload.Data(DATA)
|
|
|
|
u.encoding_param_k = u.default_encoding_param_k + 1
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d2 = eu.get_storage_index()
|
|
|
|
|
|
|
|
# and if we use a random key, it should be different than the CHK
|
|
|
|
u = upload.Data(DATA, contenthashkey=False)
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d3 = eu.get_storage_index()
|
|
|
|
# and different from another instance
|
|
|
|
u = upload.Data(DATA, contenthashkey=False)
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d4 = eu.get_storage_index()
|
|
|
|
|
|
|
|
d = DeferredListShouldSucceed([d1,d1a,d2,d3,d4])
|
|
|
|
def _done(res):
|
|
|
|
si1, si1a, si2, si3, si4 = res
|
|
|
|
self.failUnlessEqual(si1, si1a)
|
|
|
|
self.failIfEqual(si1, si2)
|
|
|
|
self.failIfEqual(si1, si3)
|
|
|
|
self.failIfEqual(si1, si4)
|
|
|
|
self.failIfEqual(si3, si4)
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
2007-04-24 00:30:40 +00:00
|
|
|
|
|
|
|
# TODO:
|
|
|
|
# upload with exactly 75 peers (shares_of_happiness)
|
|
|
|
# have a download fail
|
|
|
|
# cancel a download (need to implement more cancel stuff)
|