2006-12-01 09:54:28 +00:00
|
|
|
|
2007-07-20 05:53:29 +00:00
|
|
|
import os
|
2006-12-01 09:54:28 +00:00
|
|
|
from twisted.trial import unittest
|
2007-03-30 21:54:33 +00:00
|
|
|
from twisted.python.failure import Failure
|
2007-07-13 22:09:01 +00:00
|
|
|
from twisted.internet import defer
|
2006-12-02 02:17:26 +00:00
|
|
|
from cStringIO import StringIO
|
2006-12-01 09:54:28 +00:00
|
|
|
|
2007-07-21 22:40:36 +00:00
|
|
|
from allmydata import upload, encode, uri
|
|
|
|
from allmydata.interfaces import IFileURI
|
2007-07-13 22:09:01 +00:00
|
|
|
from allmydata.util.assertutil import precondition
|
|
|
|
from foolscap import eventual
|
2006-12-01 09:54:28 +00:00
|
|
|
|
2007-07-20 05:53:29 +00:00
|
|
|
class Uploadable(unittest.TestCase):
|
|
|
|
def shouldEqual(self, data, expected):
|
|
|
|
self.failUnless(isinstance(data, list))
|
|
|
|
for e in data:
|
|
|
|
self.failUnless(isinstance(e, str))
|
|
|
|
s = "".join(data)
|
|
|
|
self.failUnlessEqual(s, expected)
|
|
|
|
|
|
|
|
def test_filehandle(self):
|
|
|
|
s = StringIO("a"*41)
|
|
|
|
u = upload.FileHandle(s)
|
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close()) # this doesn't close the filehandle
|
|
|
|
d.addCallback(lambda res: s.close()) # that privilege is reserved for us
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename(self):
|
|
|
|
basedir = "upload/Uploadable/test_filename"
|
|
|
|
os.makedirs(basedir)
|
|
|
|
fn = os.path.join(basedir, "file")
|
|
|
|
f = open(fn, "w")
|
|
|
|
f.write("a"*41)
|
|
|
|
f.close()
|
|
|
|
u = upload.FileName(fn)
|
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_data(self):
|
|
|
|
s = "a"*41
|
|
|
|
u = upload.Data(s)
|
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close())
|
|
|
|
return d
|
|
|
|
|
2007-07-13 22:09:01 +00:00
|
|
|
class FakePeer:
|
|
|
|
def __init__(self, mode="good"):
|
|
|
|
self.ss = FakeStorageServer(mode)
|
|
|
|
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
return defer.maybeDeferred(_call)
|
|
|
|
|
|
|
|
def get_service(self, sname):
|
|
|
|
assert sname == "storageserver"
|
|
|
|
return self.ss
|
|
|
|
|
|
|
|
class FakeStorageServer:
|
|
|
|
def __init__(self, mode):
|
|
|
|
self.mode = mode
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
d = eventual.fireEventually()
|
|
|
|
d.addCallback(lambda res: _call())
|
|
|
|
return d
|
|
|
|
|
2007-08-28 00:28:51 +00:00
|
|
|
def allocate_buckets(self, storage_index, renew_secret, cancel_secret,
|
|
|
|
sharenums, share_size, canary):
|
2007-07-13 22:09:01 +00:00
|
|
|
#print "FakeStorageServer.allocate_buckets(num=%d, size=%d)" % (len(sharenums), share_size)
|
|
|
|
if self.mode == "full":
|
|
|
|
return (set(), {},)
|
|
|
|
elif self.mode == "already got them":
|
|
|
|
return (set(sharenums), {},)
|
|
|
|
else:
|
|
|
|
return (set(),
|
|
|
|
dict([( shnum, FakeBucketWriter(share_size) )
|
|
|
|
for shnum in sharenums]),
|
|
|
|
)
|
|
|
|
|
|
|
|
class FakeBucketWriter:
|
|
|
|
# a diagnostic version of storageserver.BucketWriter
|
|
|
|
def __init__(self, size):
|
|
|
|
self.data = StringIO()
|
|
|
|
self.closed = False
|
|
|
|
self._size = size
|
|
|
|
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, "remote_" + methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
d = eventual.fireEventually()
|
|
|
|
d.addCallback(lambda res: _call())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def remote_write(self, offset, data):
|
|
|
|
precondition(not self.closed)
|
|
|
|
precondition(offset >= 0)
|
|
|
|
precondition(offset+len(data) <= self._size,
|
|
|
|
"offset=%d + data=%d > size=%d" %
|
|
|
|
(offset, len(data), self._size))
|
|
|
|
self.data.seek(offset)
|
|
|
|
self.data.write(data)
|
|
|
|
|
|
|
|
def remote_close(self):
|
|
|
|
precondition(not self.closed)
|
|
|
|
self.closed = True
|
2007-03-30 21:54:33 +00:00
|
|
|
|
2006-12-01 09:54:28 +00:00
|
|
|
class FakeClient:
|
2007-03-30 21:54:33 +00:00
|
|
|
def __init__(self, mode="good"):
|
|
|
|
self.mode = mode
|
2007-08-10 01:30:24 +00:00
|
|
|
def get_permuted_peers(self, storage_index, include_myself):
|
2007-06-10 03:46:04 +00:00
|
|
|
return [ ("%20d"%fakeid, "%20d"%fakeid, FakePeer(self.mode),)
|
|
|
|
for fakeid in range(50) ]
|
2007-08-10 01:30:24 +00:00
|
|
|
def get_push_to_ourselves(self):
|
|
|
|
return None
|
2007-07-12 22:33:30 +00:00
|
|
|
def get_encoding_parameters(self):
|
|
|
|
return None
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
DATA = """
|
|
|
|
Once upon a time, there was a beautiful princess named Buttercup. She lived
|
|
|
|
in a magical land where every file was stored securely among millions of
|
|
|
|
machines, and nobody ever worried about their data being lost ever again.
|
|
|
|
The End.
|
|
|
|
"""
|
|
|
|
assert len(DATA) > upload.Uploader.URI_LIT_SIZE_THRESHOLD
|
|
|
|
|
|
|
|
SIZE_ZERO = 0
|
|
|
|
SIZE_SMALL = 16
|
|
|
|
SIZE_LARGE = len(DATA)
|
|
|
|
|
2007-03-30 21:54:33 +00:00
|
|
|
class GoodServer(unittest.TestCase):
|
2007-01-16 04:22:22 +00:00
|
|
|
def setUp(self):
|
2007-03-30 21:54:33 +00:00
|
|
|
self.node = FakeClient(mode="good")
|
2007-03-30 17:52:19 +00:00
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2007-07-21 22:40:36 +00:00
|
|
|
def _check_small(self, newuri, size):
|
|
|
|
u = IFileURI(newuri)
|
|
|
|
self.failUnless(isinstance(u, uri.LiteralFileURI))
|
|
|
|
self.failUnlessEqual(len(u.data), size)
|
|
|
|
|
|
|
|
def _check_large(self, newuri, size):
|
|
|
|
u = IFileURI(newuri)
|
|
|
|
self.failUnless(isinstance(u, uri.CHKFileURI))
|
|
|
|
self.failUnless(isinstance(u.storage_index, str))
|
2007-07-23 02:48:44 +00:00
|
|
|
self.failUnlessEqual(len(u.storage_index), 16)
|
2007-07-21 22:40:36 +00:00
|
|
|
self.failUnless(isinstance(u.key, str))
|
|
|
|
self.failUnlessEqual(len(u.key), 16)
|
|
|
|
self.failUnlessEqual(u.size, size)
|
2007-07-12 20:22:36 +00:00
|
|
|
|
|
|
|
def get_data(self, size):
|
|
|
|
return DATA[:size]
|
|
|
|
|
|
|
|
def test_data_zero(self):
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
|
|
|
d = self.u.upload_data(data)
|
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
2007-03-30 23:50:50 +00:00
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_small(self):
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
2007-01-16 04:22:22 +00:00
|
|
|
d = self.u.upload_data(data)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_large(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
|
|
|
d = self.u.upload_data(data)
|
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
2007-07-14 05:24:06 +00:00
|
|
|
def test_data_large_odd_segments(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
|
|
|
segsize = int(SIZE_LARGE / 2.5)
|
|
|
|
# we want 3 segments, since that's not a power of two
|
|
|
|
d = self.u.upload_data(data, {"max_segment_size": segsize})
|
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_filehandle_zero(self):
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
2007-01-16 04:22:22 +00:00
|
|
|
d = self.u.upload_filehandle(StringIO(data))
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filehandle_small(self):
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
|
|
|
d = self.u.upload_filehandle(StringIO(data))
|
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filehandle_large(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
|
|
|
d = self.u.upload_filehandle(StringIO(data))
|
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename_zero(self):
|
|
|
|
fn = "Uploader-test_filename_zero.data"
|
|
|
|
f = open(fn, "wb")
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
|
|
|
f.write(data)
|
|
|
|
f.close()
|
|
|
|
d = self.u.upload_filename(fn)
|
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename_small(self):
|
|
|
|
fn = "Uploader-test_filename_small.data"
|
|
|
|
f = open(fn, "wb")
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
|
|
|
f.write(data)
|
|
|
|
f.close()
|
|
|
|
d = self.u.upload_filename(fn)
|
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_filename_large(self):
|
|
|
|
fn = "Uploader-test_filename_large.data"
|
2007-04-04 23:12:30 +00:00
|
|
|
f = open(fn, "wb")
|
2007-07-12 20:22:36 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2007-01-16 04:22:22 +00:00
|
|
|
f.write(data)
|
|
|
|
f.close()
|
|
|
|
d = self.u.upload_filename(fn)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
2007-03-30 21:54:33 +00:00
|
|
|
|
|
|
|
class FullServer(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.node = FakeClient(mode="full")
|
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
|
|
|
|
|
|
|
def _should_fail(self, f):
|
2007-06-08 05:20:55 +00:00
|
|
|
self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughPeersError))
|
2007-03-30 21:54:33 +00:00
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_large(self):
|
|
|
|
data = DATA
|
2007-03-30 21:54:33 +00:00
|
|
|
d = self.u.upload_data(data)
|
|
|
|
d.addBoth(self._should_fail)
|
|
|
|
return d
|
|
|
|
|
2007-04-24 00:30:40 +00:00
|
|
|
|
|
|
|
# TODO:
|
|
|
|
# upload with exactly 75 peers (shares_of_happiness)
|
|
|
|
# have a download fail
|
|
|
|
# cancel a download (need to implement more cancel stuff)
|