2006-12-01 09:54:28 +00:00
|
|
|
|
2007-07-20 05:53:29 +00:00
|
|
|
import os
|
2008-10-29 04:28:31 +00:00
|
|
|
from cStringIO import StringIO
|
2006-12-01 09:54:28 +00:00
|
|
|
from twisted.trial import unittest
|
2007-03-30 21:54:33 +00:00
|
|
|
from twisted.python.failure import Failure
|
2008-01-24 01:07:34 +00:00
|
|
|
from twisted.python import log
|
2008-06-02 23:57:01 +00:00
|
|
|
from twisted.internet import defer
|
2008-10-29 04:28:31 +00:00
|
|
|
from foolscap import eventual
|
2006-12-01 09:54:28 +00:00
|
|
|
|
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
|
|
|
import allmydata # for __full_version__
|
2008-11-18 07:29:44 +00:00
|
|
|
from allmydata import uri, monitor
|
2008-10-27 20:34:49 +00:00
|
|
|
from allmydata.immutable import upload
|
|
|
|
from allmydata.interfaces import IFileURI, FileTooLargeError, NotEnoughSharesError
|
2007-07-13 22:09:01 +00:00
|
|
|
from allmydata.util.assertutil import precondition
|
2008-02-07 03:03:35 +00:00
|
|
|
from allmydata.util.deferredutil import DeferredListShouldSucceed
|
2009-02-17 00:44:57 +00:00
|
|
|
from no_network import GridTestMixin
|
2008-10-29 04:28:31 +00:00
|
|
|
from common_util import ShouldFailMixin
|
2006-12-01 09:54:28 +00:00
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
MiB = 1024*1024
|
|
|
|
|
2008-02-06 04:01:38 +00:00
|
|
|
def extract_uri(results):
|
|
|
|
return results.uri
|
|
|
|
|
2007-07-20 05:53:29 +00:00
|
|
|
class Uploadable(unittest.TestCase):
|
|
|
|
def shouldEqual(self, data, expected):
|
|
|
|
self.failUnless(isinstance(data, list))
|
|
|
|
for e in data:
|
|
|
|
self.failUnless(isinstance(e, str))
|
|
|
|
s = "".join(data)
|
|
|
|
self.failUnlessEqual(s, expected)
|
|
|
|
|
2008-01-30 19:24:50 +00:00
|
|
|
def test_filehandle_random_key(self):
|
2008-03-24 16:46:06 +00:00
|
|
|
return self._test_filehandle(convergence=None)
|
2008-01-30 19:24:50 +00:00
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
def test_filehandle_convergent_encryption(self):
|
|
|
|
return self._test_filehandle(convergence="some convergence string")
|
2008-01-30 19:24:50 +00:00
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
def _test_filehandle(self, convergence):
|
2007-07-20 05:53:29 +00:00
|
|
|
s = StringIO("a"*41)
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.FileHandle(s, convergence=convergence)
|
2007-07-20 05:53:29 +00:00
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close()) # this doesn't close the filehandle
|
|
|
|
d.addCallback(lambda res: s.close()) # that privilege is reserved for us
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename(self):
|
|
|
|
basedir = "upload/Uploadable/test_filename"
|
|
|
|
os.makedirs(basedir)
|
|
|
|
fn = os.path.join(basedir, "file")
|
|
|
|
f = open(fn, "w")
|
|
|
|
f.write("a"*41)
|
|
|
|
f.close()
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.FileName(fn, convergence=None)
|
2007-07-20 05:53:29 +00:00
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_data(self):
|
|
|
|
s = "a"*41
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.Data(s, convergence=None)
|
2007-07-20 05:53:29 +00:00
|
|
|
d = u.get_size()
|
|
|
|
d.addCallback(self.failUnlessEqual, 41)
|
|
|
|
d.addCallback(lambda res: u.read(1))
|
|
|
|
d.addCallback(self.shouldEqual, "a")
|
|
|
|
d.addCallback(lambda res: u.read(80))
|
|
|
|
d.addCallback(self.shouldEqual, "a"*40)
|
|
|
|
d.addCallback(lambda res: u.close())
|
|
|
|
return d
|
|
|
|
|
2007-07-13 22:09:01 +00:00
|
|
|
class FakeStorageServer:
|
|
|
|
def __init__(self, mode):
|
|
|
|
self.mode = mode
|
2007-09-16 08:25:03 +00:00
|
|
|
self.allocated = []
|
2007-09-16 08:53:00 +00:00
|
|
|
self.queries = 0
|
2008-11-22 03:28:12 +00:00
|
|
|
self.version = { "http://allmydata.org/tahoe/protocols/storage/v1" :
|
|
|
|
{ "maximum-immutable-share-size": 2**32 },
|
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
|
|
|
"application-version": str(allmydata.__full_version__),
|
2008-11-22 03:28:12 +00:00
|
|
|
}
|
2009-02-09 02:41:27 +00:00
|
|
|
if mode == "small":
|
|
|
|
self.version = { "http://allmydata.org/tahoe/protocols/storage/v1" :
|
|
|
|
{ "maximum-immutable-share-size": 10 },
|
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
|
|
|
"application-version": str(allmydata.__full_version__),
|
2009-02-09 02:41:27 +00:00
|
|
|
}
|
|
|
|
|
2008-11-22 03:28:12 +00:00
|
|
|
|
2007-07-13 22:09:01 +00:00
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
d = eventual.fireEventually()
|
|
|
|
d.addCallback(lambda res: _call())
|
|
|
|
return d
|
|
|
|
|
2007-08-28 00:28:51 +00:00
|
|
|
def allocate_buckets(self, storage_index, renew_secret, cancel_secret,
|
|
|
|
sharenums, share_size, canary):
|
2007-07-13 22:09:01 +00:00
|
|
|
#print "FakeStorageServer.allocate_buckets(num=%d, size=%d)" % (len(sharenums), share_size)
|
2007-09-16 08:53:00 +00:00
|
|
|
self.queries += 1
|
2007-07-13 22:09:01 +00:00
|
|
|
if self.mode == "full":
|
|
|
|
return (set(), {},)
|
|
|
|
elif self.mode == "already got them":
|
|
|
|
return (set(sharenums), {},)
|
|
|
|
else:
|
2007-09-16 08:25:03 +00:00
|
|
|
for shnum in sharenums:
|
|
|
|
self.allocated.append( (storage_index, shnum) )
|
2007-07-13 22:09:01 +00:00
|
|
|
return (set(),
|
|
|
|
dict([( shnum, FakeBucketWriter(share_size) )
|
|
|
|
for shnum in sharenums]),
|
|
|
|
)
|
|
|
|
|
|
|
|
class FakeBucketWriter:
|
|
|
|
# a diagnostic version of storageserver.BucketWriter
|
|
|
|
def __init__(self, size):
|
|
|
|
self.data = StringIO()
|
|
|
|
self.closed = False
|
|
|
|
self._size = size
|
|
|
|
|
|
|
|
def callRemote(self, methname, *args, **kwargs):
|
|
|
|
def _call():
|
|
|
|
meth = getattr(self, "remote_" + methname)
|
|
|
|
return meth(*args, **kwargs)
|
|
|
|
d = eventual.fireEventually()
|
|
|
|
d.addCallback(lambda res: _call())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def remote_write(self, offset, data):
|
|
|
|
precondition(not self.closed)
|
|
|
|
precondition(offset >= 0)
|
|
|
|
precondition(offset+len(data) <= self._size,
|
|
|
|
"offset=%d + data=%d > size=%d" %
|
|
|
|
(offset, len(data), self._size))
|
|
|
|
self.data.seek(offset)
|
|
|
|
self.data.write(data)
|
|
|
|
|
|
|
|
def remote_close(self):
|
|
|
|
precondition(not self.closed)
|
|
|
|
self.closed = True
|
2007-03-30 21:54:33 +00:00
|
|
|
|
2008-01-24 01:07:34 +00:00
|
|
|
def remote_abort(self):
|
|
|
|
log.err("uh oh, I was asked to abort")
|
|
|
|
|
2006-12-01 09:54:28 +00:00
|
|
|
class FakeClient:
|
2008-01-16 10:03:35 +00:00
|
|
|
DEFAULT_ENCODING_PARAMETERS = {"k":25,
|
|
|
|
"happy": 75,
|
|
|
|
"n": 100,
|
|
|
|
"max_segment_size": 1*MiB,
|
|
|
|
}
|
2007-09-17 00:08:34 +00:00
|
|
|
def __init__(self, mode="good", num_servers=50):
|
2007-03-30 21:54:33 +00:00
|
|
|
self.mode = mode
|
2007-09-17 00:08:34 +00:00
|
|
|
self.num_servers = num_servers
|
2009-02-09 02:41:27 +00:00
|
|
|
if mode == "some_big_some_small":
|
|
|
|
self.peers = []
|
|
|
|
for fakeid in range(num_servers):
|
|
|
|
if fakeid % 2:
|
|
|
|
self.peers.append( ("%20d" % fakeid,
|
|
|
|
FakeStorageServer("good")) )
|
|
|
|
else:
|
|
|
|
self.peers.append( ("%20d" % fakeid,
|
|
|
|
FakeStorageServer("small")) )
|
|
|
|
else:
|
|
|
|
self.peers = [ ("%20d"%fakeid, FakeStorageServer(self.mode),)
|
|
|
|
for fakeid in range(self.num_servers) ]
|
2007-12-03 22:27:21 +00:00
|
|
|
def log(self, *args, **kwargs):
|
|
|
|
pass
|
2007-08-10 01:30:24 +00:00
|
|
|
def get_permuted_peers(self, storage_index, include_myself):
|
2009-02-09 02:41:27 +00:00
|
|
|
self.last_peers = [p[1] for p in self.peers]
|
|
|
|
return self.peers
|
2007-07-12 22:33:30 +00:00
|
|
|
def get_encoding_parameters(self):
|
2008-01-16 10:03:35 +00:00
|
|
|
return self.DEFAULT_ENCODING_PARAMETERS
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2007-08-28 02:00:18 +00:00
|
|
|
def get_renewal_secret(self):
|
|
|
|
return ""
|
|
|
|
def get_cancel_secret(self):
|
|
|
|
return ""
|
|
|
|
|
2008-06-02 23:57:01 +00:00
|
|
|
class GiganticUploadable(upload.FileHandle):
|
|
|
|
def __init__(self, size):
|
|
|
|
self._size = size
|
|
|
|
self._fp = 0
|
|
|
|
|
|
|
|
def get_encryption_key(self):
|
|
|
|
return defer.succeed("\x00" * 16)
|
|
|
|
def get_size(self):
|
|
|
|
return defer.succeed(self._size)
|
|
|
|
def read(self, length):
|
|
|
|
left = self._size - self._fp
|
|
|
|
length = min(left, length)
|
|
|
|
self._fp += length
|
|
|
|
if self._fp > 1000000:
|
|
|
|
# terminate the test early.
|
|
|
|
raise RuntimeError("we shouldn't be allowed to get this far")
|
|
|
|
return defer.succeed(["\x00" * length])
|
|
|
|
def close(self):
|
|
|
|
pass
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
DATA = """
|
|
|
|
Once upon a time, there was a beautiful princess named Buttercup. She lived
|
|
|
|
in a magical land where every file was stored securely among millions of
|
|
|
|
machines, and nobody ever worried about their data being lost ever again.
|
|
|
|
The End.
|
|
|
|
"""
|
|
|
|
assert len(DATA) > upload.Uploader.URI_LIT_SIZE_THRESHOLD
|
|
|
|
|
|
|
|
SIZE_ZERO = 0
|
|
|
|
SIZE_SMALL = 16
|
|
|
|
SIZE_LARGE = len(DATA)
|
|
|
|
|
2008-01-31 02:03:19 +00:00
|
|
|
def upload_data(uploader, data):
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.Data(data, convergence=None)
|
2008-01-31 02:03:19 +00:00
|
|
|
return uploader.upload(u)
|
|
|
|
def upload_filename(uploader, filename):
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.FileName(filename, convergence=None)
|
2008-01-31 02:03:19 +00:00
|
|
|
return uploader.upload(u)
|
|
|
|
def upload_filehandle(uploader, fh):
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.FileHandle(fh, convergence=None)
|
2008-01-31 02:03:19 +00:00
|
|
|
return uploader.upload(u)
|
|
|
|
|
2008-06-02 23:57:01 +00:00
|
|
|
class GoodServer(unittest.TestCase, ShouldFailMixin):
|
2007-01-16 04:22:22 +00:00
|
|
|
def setUp(self):
|
2007-03-30 21:54:33 +00:00
|
|
|
self.node = FakeClient(mode="good")
|
2007-03-30 17:52:19 +00:00
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
|
|
|
|
p = {"k": k,
|
|
|
|
"happy": happy,
|
|
|
|
"n": n,
|
|
|
|
"max_segment_size": max_segsize,
|
|
|
|
}
|
|
|
|
self.node.DEFAULT_ENCODING_PARAMETERS = p
|
|
|
|
|
2007-07-21 22:40:36 +00:00
|
|
|
def _check_small(self, newuri, size):
|
|
|
|
u = IFileURI(newuri)
|
|
|
|
self.failUnless(isinstance(u, uri.LiteralFileURI))
|
|
|
|
self.failUnlessEqual(len(u.data), size)
|
|
|
|
|
|
|
|
def _check_large(self, newuri, size):
|
|
|
|
u = IFileURI(newuri)
|
|
|
|
self.failUnless(isinstance(u, uri.CHKFileURI))
|
|
|
|
self.failUnless(isinstance(u.storage_index, str))
|
2007-07-23 02:48:44 +00:00
|
|
|
self.failUnlessEqual(len(u.storage_index), 16)
|
2007-07-21 22:40:36 +00:00
|
|
|
self.failUnless(isinstance(u.key, str))
|
|
|
|
self.failUnlessEqual(len(u.key), 16)
|
|
|
|
self.failUnlessEqual(u.size, size)
|
2007-07-12 20:22:36 +00:00
|
|
|
|
|
|
|
def get_data(self, size):
|
|
|
|
return DATA[:size]
|
|
|
|
|
2008-06-02 23:57:01 +00:00
|
|
|
def test_too_large(self):
|
2009-01-13 03:14:42 +00:00
|
|
|
# we've removed the 4GiB share size limit (see ticket #346 for
|
|
|
|
# details), but still have an 8-byte field, so the limit is now
|
|
|
|
# 2**64, so make sure we reject files larger than that.
|
2008-06-02 23:57:01 +00:00
|
|
|
k = 3; happy = 7; n = 10
|
|
|
|
self.set_encoding_parameters(k, happy, n)
|
2009-01-13 03:14:42 +00:00
|
|
|
big = k*(2**64)
|
|
|
|
data1 = GiganticUploadable(big)
|
2008-06-03 07:01:15 +00:00
|
|
|
d = self.shouldFail(FileTooLargeError, "test_too_large-data1",
|
2008-06-02 23:57:01 +00:00
|
|
|
"This file is too large to be uploaded (data_size)",
|
|
|
|
self.u.upload, data1)
|
2009-01-13 03:14:42 +00:00
|
|
|
data2 = GiganticUploadable(big-3)
|
2008-06-02 23:57:01 +00:00
|
|
|
d.addCallback(lambda res:
|
2008-06-03 07:01:15 +00:00
|
|
|
self.shouldFail(FileTooLargeError,
|
2008-06-02 23:57:01 +00:00
|
|
|
"test_too_large-data2",
|
|
|
|
"This file is too large to be uploaded (offsets)",
|
|
|
|
self.u.upload, data2))
|
|
|
|
# I don't know where the actual limit is.. it depends upon how large
|
|
|
|
# the hash trees wind up. It's somewhere close to k*4GiB-ln2(size).
|
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_zero(self):
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
2007-03-30 23:50:50 +00:00
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_small(self):
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_large(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
2007-07-14 05:24:06 +00:00
|
|
|
def test_data_large_odd_segments(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
|
|
|
segsize = int(SIZE_LARGE / 2.5)
|
|
|
|
# we want 3 segments, since that's not a power of two
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(25, 75, 100, segsize)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-14 05:24:06 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_filehandle_zero(self):
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filehandle(self.u, StringIO(data))
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filehandle_small(self):
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filehandle(self.u, StringIO(data))
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filehandle_large(self):
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filehandle(self.u, StringIO(data))
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename_zero(self):
|
|
|
|
fn = "Uploader-test_filename_zero.data"
|
|
|
|
f = open(fn, "wb")
|
|
|
|
data = self.get_data(SIZE_ZERO)
|
|
|
|
f.write(data)
|
|
|
|
f.close()
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filename(self.u, fn)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_ZERO)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_filename_small(self):
|
|
|
|
fn = "Uploader-test_filename_small.data"
|
|
|
|
f = open(fn, "wb")
|
|
|
|
data = self.get_data(SIZE_SMALL)
|
|
|
|
f.write(data)
|
|
|
|
f.close()
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filename(self.u, fn)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_small, SIZE_SMALL)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_filename_large(self):
|
|
|
|
fn = "Uploader-test_filename_large.data"
|
2007-04-04 23:12:30 +00:00
|
|
|
f = open(fn, "wb")
|
2007-07-12 20:22:36 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2007-01-16 04:22:22 +00:00
|
|
|
f.write(data)
|
|
|
|
f.close()
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_filename(self.u, fn)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-07-12 20:22:36 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
2007-01-16 04:22:22 +00:00
|
|
|
return d
|
2007-03-30 21:54:33 +00:00
|
|
|
|
|
|
|
class FullServer(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.node = FakeClient(mode="full")
|
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
|
|
|
|
|
|
|
def _should_fail(self, f):
|
2008-10-27 20:34:49 +00:00
|
|
|
self.failUnless(isinstance(f, Failure) and f.check(NotEnoughSharesError), f)
|
2007-03-30 21:54:33 +00:00
|
|
|
|
2007-07-12 20:22:36 +00:00
|
|
|
def test_data_large(self):
|
|
|
|
data = DATA
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2007-03-30 21:54:33 +00:00
|
|
|
d.addBoth(self._should_fail)
|
|
|
|
return d
|
|
|
|
|
2007-09-16 08:25:03 +00:00
|
|
|
class PeerSelection(unittest.TestCase):
|
2007-09-17 00:08:34 +00:00
|
|
|
|
|
|
|
def make_client(self, num_servers=50):
|
|
|
|
self.node = FakeClient(mode="good", num_servers=num_servers)
|
2007-09-16 08:25:03 +00:00
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
|
|
|
|
|
|
|
def get_data(self, size):
|
|
|
|
return DATA[:size]
|
|
|
|
|
|
|
|
def _check_large(self, newuri, size):
|
|
|
|
u = IFileURI(newuri)
|
|
|
|
self.failUnless(isinstance(u, uri.CHKFileURI))
|
|
|
|
self.failUnless(isinstance(u.storage_index, str))
|
|
|
|
self.failUnlessEqual(len(u.storage_index), 16)
|
|
|
|
self.failUnless(isinstance(u.key, str))
|
|
|
|
self.failUnlessEqual(len(u.key), 16)
|
|
|
|
self.failUnlessEqual(u.size, size)
|
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
|
|
|
|
p = {"k": k,
|
|
|
|
"happy": happy,
|
|
|
|
"n": n,
|
|
|
|
"max_segment_size": max_segsize,
|
|
|
|
}
|
|
|
|
self.node.DEFAULT_ENCODING_PARAMETERS = p
|
|
|
|
|
2007-09-16 08:25:03 +00:00
|
|
|
def test_one_each(self):
|
|
|
|
# if we have 50 shares, and there are 50 peers, and they all accept a
|
|
|
|
# share, we should get exactly one share per peer
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
self.make_client()
|
2007-09-16 08:25:03 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(25, 30, 50)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-16 08:25:03 +00:00
|
|
|
self.failUnlessEqual(len(allocated), 1)
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 1)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_two_each(self):
|
|
|
|
# if we have 100 shares, and there are 50 peers, and they all accept
|
|
|
|
# all shares, we should get exactly two shares per peer
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
self.make_client()
|
2007-09-16 08:25:03 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(50, 75, 100)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-16 08:25:03 +00:00
|
|
|
self.failUnlessEqual(len(allocated), 2)
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 2)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_one_each_plus_one_extra(self):
|
|
|
|
# if we have 51 shares, and there are 50 peers, then one peer gets
|
|
|
|
# two shares and the rest get just one
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
self.make_client()
|
2007-09-16 08:25:03 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(24, 41, 51)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-16 08:25:03 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
got_one = []
|
|
|
|
got_two = []
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-16 08:25:03 +00:00
|
|
|
self.failUnless(len(allocated) in (1,2), len(allocated))
|
|
|
|
if len(allocated) == 1:
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 1)
|
2007-09-16 08:25:03 +00:00
|
|
|
got_one.append(p)
|
|
|
|
else:
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 2)
|
2007-09-16 08:25:03 +00:00
|
|
|
got_two.append(p)
|
|
|
|
self.failUnlessEqual(len(got_one), 49)
|
|
|
|
self.failUnlessEqual(len(got_two), 1)
|
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
2007-09-16 08:53:00 +00:00
|
|
|
def test_four_each(self):
|
|
|
|
# if we have 200 shares, and there are 50 peers, then each peer gets
|
|
|
|
# 4 shares. The design goal is to accomplish this with only two
|
|
|
|
# queries per peer.
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
self.make_client()
|
2007-09-16 08:53:00 +00:00
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(100, 150, 200)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-16 08:53:00 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-16 08:53:00 +00:00
|
|
|
self.failUnlessEqual(len(allocated), 4)
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(p.queries, 2)
|
2007-09-16 08:53:00 +00:00
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
2007-09-17 00:08:34 +00:00
|
|
|
def test_three_of_ten(self):
|
|
|
|
# if we have 10 shares and 3 servers, I want to see 3+3+4 rather than
|
|
|
|
# 4+4+2
|
|
|
|
|
|
|
|
self.make_client(3)
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
2008-01-16 10:03:35 +00:00
|
|
|
self.set_encoding_parameters(3, 5, 10)
|
2008-01-31 02:03:19 +00:00
|
|
|
d = upload_data(self.u, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(extract_uri)
|
2007-09-17 00:08:34 +00:00
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
counts = {}
|
|
|
|
for p in self.node.last_peers:
|
2008-02-05 20:05:13 +00:00
|
|
|
allocated = p.allocated
|
2007-09-17 00:08:34 +00:00
|
|
|
counts[len(allocated)] = counts.get(len(allocated), 0) + 1
|
|
|
|
histogram = [counts.get(i, 0) for i in range(5)]
|
|
|
|
self.failUnlessEqual(histogram, [0,0,0,2,1])
|
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
2009-02-09 02:41:27 +00:00
|
|
|
def test_some_big_some_small(self):
|
|
|
|
# 10 shares, 20 servers, but half the servers don't support a
|
|
|
|
# share-size large enough for our file
|
|
|
|
self.node = FakeClient(mode="some_big_some_small", num_servers=20)
|
|
|
|
self.u = upload.Uploader()
|
|
|
|
self.u.running = True
|
|
|
|
self.u.parent = self.node
|
|
|
|
|
|
|
|
data = self.get_data(SIZE_LARGE)
|
|
|
|
self.set_encoding_parameters(3, 5, 10)
|
|
|
|
d = upload_data(self.u, data)
|
|
|
|
d.addCallback(extract_uri)
|
|
|
|
d.addCallback(self._check_large, SIZE_LARGE)
|
|
|
|
def _check(res):
|
|
|
|
# we should have put one share each on the big peers, and zero
|
|
|
|
# shares on the small peers
|
|
|
|
total_allocated = 0
|
|
|
|
for p in self.node.last_peers:
|
|
|
|
if p.mode == "good":
|
|
|
|
self.failUnlessEqual(len(p.allocated), 1)
|
|
|
|
elif p.mode == "small":
|
|
|
|
self.failUnlessEqual(len(p.allocated), 0)
|
|
|
|
total_allocated += len(p.allocated)
|
|
|
|
self.failUnlessEqual(total_allocated, 10)
|
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
2008-02-07 03:03:35 +00:00
|
|
|
class StorageIndex(unittest.TestCase):
|
|
|
|
def test_params_must_matter(self):
|
|
|
|
DATA = "I am some data"
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.Data(DATA, convergence="")
|
2008-02-07 03:03:35 +00:00
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d1 = eu.get_storage_index()
|
|
|
|
|
|
|
|
# CHK means the same data should encrypt the same way
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.Data(DATA, convergence="")
|
2008-02-07 03:03:35 +00:00
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d1a = eu.get_storage_index()
|
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
# but if we use a different convergence string it should be different
|
|
|
|
u = upload.Data(DATA, convergence="wheee!")
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d1salt1 = eu.get_storage_index()
|
|
|
|
|
|
|
|
# and if we add yet a different convergence it should be different again
|
|
|
|
u = upload.Data(DATA, convergence="NOT wheee!")
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d1salt2 = eu.get_storage_index()
|
|
|
|
|
|
|
|
# and if we use the first string again it should be the same as last time
|
|
|
|
u = upload.Data(DATA, convergence="wheee!")
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d1salt1a = eu.get_storage_index()
|
|
|
|
|
|
|
|
# and if we change the encoding parameters, it should be different (from the same convergence string with different encoding parameters)
|
|
|
|
u = upload.Data(DATA, convergence="")
|
2008-02-07 03:03:35 +00:00
|
|
|
u.encoding_param_k = u.default_encoding_param_k + 1
|
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d2 = eu.get_storage_index()
|
|
|
|
|
|
|
|
# and if we use a random key, it should be different than the CHK
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.Data(DATA, convergence=None)
|
2008-02-07 03:03:35 +00:00
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d3 = eu.get_storage_index()
|
|
|
|
# and different from another instance
|
2008-03-24 16:46:06 +00:00
|
|
|
u = upload.Data(DATA, convergence=None)
|
2008-02-07 03:03:35 +00:00
|
|
|
eu = upload.EncryptAnUploadable(u)
|
|
|
|
d4 = eu.get_storage_index()
|
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
d = DeferredListShouldSucceed([d1,d1a,d1salt1,d1salt2,d1salt1a,d2,d3,d4])
|
2008-02-07 03:03:35 +00:00
|
|
|
def _done(res):
|
2008-03-24 16:46:06 +00:00
|
|
|
si1, si1a, si1salt1, si1salt2, si1salt1a, si2, si3, si4 = res
|
2008-02-07 03:03:35 +00:00
|
|
|
self.failUnlessEqual(si1, si1a)
|
|
|
|
self.failIfEqual(si1, si2)
|
|
|
|
self.failIfEqual(si1, si3)
|
|
|
|
self.failIfEqual(si1, si4)
|
|
|
|
self.failIfEqual(si3, si4)
|
2008-03-24 16:46:06 +00:00
|
|
|
self.failIfEqual(si1salt1, si1)
|
|
|
|
self.failIfEqual(si1salt1, si1salt2)
|
|
|
|
self.failIfEqual(si1salt2, si1)
|
|
|
|
self.failUnlessEqual(si1salt1, si1salt1a)
|
2008-02-07 03:03:35 +00:00
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
2009-02-17 00:44:57 +00:00
|
|
|
class EncodingParameters(GridTestMixin, unittest.TestCase):
|
2008-11-18 07:29:44 +00:00
|
|
|
def test_configure_parameters(self):
|
|
|
|
self.basedir = self.mktemp()
|
2009-02-17 00:44:57 +00:00
|
|
|
hooks = {0: self._set_up_nodes_extra_config}
|
|
|
|
self.set_up_grid(client_config_hooks=hooks)
|
|
|
|
c0 = self.g.clients[0]
|
|
|
|
|
2008-11-18 07:29:44 +00:00
|
|
|
DATA = "data" * 100
|
|
|
|
u = upload.Data(DATA, convergence="")
|
2009-02-17 00:44:57 +00:00
|
|
|
d = c0.upload(u)
|
|
|
|
d.addCallback(lambda ur: c0.create_node_from_uri(ur.uri))
|
2008-11-18 07:29:44 +00:00
|
|
|
m = monitor.Monitor()
|
|
|
|
d.addCallback(lambda fn: fn.check(m))
|
|
|
|
def _check(cr):
|
|
|
|
data = cr.get_data()
|
|
|
|
self.failUnlessEqual(data["count-shares-needed"], 7)
|
|
|
|
self.failUnlessEqual(data["count-shares-expected"], 12)
|
|
|
|
d.addCallback(_check)
|
|
|
|
return d
|
|
|
|
|
2009-02-17 00:44:57 +00:00
|
|
|
def _set_up_nodes_extra_config(self, clientdir):
|
|
|
|
cfgfn = os.path.join(clientdir, "tahoe.cfg")
|
|
|
|
oldcfg = open(cfgfn, "r").read()
|
|
|
|
f = open(cfgfn, "wt")
|
|
|
|
f.write(oldcfg)
|
2008-11-18 07:29:44 +00:00
|
|
|
f.write("\n")
|
|
|
|
f.write("[client]\n")
|
|
|
|
f.write("shares.needed = 7\n")
|
|
|
|
f.write("shares.total = 12\n")
|
|
|
|
f.write("\n")
|
|
|
|
f.close()
|
2009-02-17 00:44:57 +00:00
|
|
|
return None
|
2007-04-24 00:30:40 +00:00
|
|
|
|
|
|
|
# TODO:
|
|
|
|
# upload with exactly 75 peers (shares_of_happiness)
|
|
|
|
# have a download fail
|
|
|
|
# cancel a download (need to implement more cancel stuff)
|