2008-01-17 08:18:10 +00:00
|
|
|
import os
|
2012-05-22 04:13:32 +00:00
|
|
|
from twisted.internet import defer
|
2008-01-10 03:25:50 +00:00
|
|
|
from twisted.trial import unittest
|
|
|
|
from twisted.application import service
|
|
|
|
|
2009-05-22 00:38:23 +00:00
|
|
|
from foolscap.api import Tub, fireEventually, flushEventualQueue
|
2008-01-10 03:25:50 +00:00
|
|
|
|
2009-02-18 21:46:55 +00:00
|
|
|
from allmydata.storage.server import si_b2a
|
2009-06-01 21:06:04 +00:00
|
|
|
from allmydata.storage_client import StorageFarmBroker
|
2009-01-07 04:48:22 +00:00
|
|
|
from allmydata.immutable import offloaded, upload
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
from allmydata import uri, client
|
2008-02-13 14:38:08 +00:00
|
|
|
from allmydata.util import hashutil, fileutil, mathutil
|
2008-01-28 19:58:13 +00:00
|
|
|
from pycryptopp.cipher.aes import AES
|
2008-01-10 03:25:50 +00:00
|
|
|
|
2008-01-16 10:03:35 +00:00
|
|
|
MiB = 1024*1024
|
|
|
|
|
2008-02-07 00:51:11 +00:00
|
|
|
DATA = "I need help\n" * 1000
|
|
|
|
|
2008-01-11 11:53:37 +00:00
|
|
|
class CHKUploadHelper_fake(offloaded.CHKUploadHelper):
|
|
|
|
def start_encrypted(self, eu):
|
|
|
|
d = eu.get_size()
|
|
|
|
def _got_size(size):
|
2008-01-16 10:03:35 +00:00
|
|
|
d2 = eu.get_all_encoding_parameters()
|
|
|
|
def _got_parms(parms):
|
2012-05-22 04:14:00 +00:00
|
|
|
# just pretend we did the upload
|
2008-01-16 10:03:35 +00:00
|
|
|
needed_shares, happy, total_shares, segsize = parms
|
2008-02-07 00:51:11 +00:00
|
|
|
ueb_data = {"needed_shares": needed_shares,
|
|
|
|
"total_shares": total_shares,
|
|
|
|
"segment_size": segsize,
|
|
|
|
"size": size,
|
|
|
|
}
|
2012-05-22 04:14:14 +00:00
|
|
|
ueb_hash = "fake"
|
2012-05-22 04:14:00 +00:00
|
|
|
v = uri.CHKFileVerifierURI(self._storage_index, "x"*32,
|
2012-05-22 04:14:14 +00:00
|
|
|
needed_shares, total_shares, size)
|
|
|
|
_UR = upload.UploadResults
|
|
|
|
ur = _UR(file_size=size,
|
|
|
|
ciphertext_fetched=0,
|
|
|
|
preexisting_shares=0,
|
|
|
|
pushed_shares=total_shares,
|
|
|
|
sharemap={},
|
|
|
|
servermap={},
|
|
|
|
timings={},
|
|
|
|
uri_extension_data=ueb_data,
|
|
|
|
uri_extension_hash=ueb_hash,
|
|
|
|
verifycapstr=v.to_string())
|
|
|
|
self._upload_status.set_results(ur)
|
|
|
|
return ur
|
2008-01-16 10:03:35 +00:00
|
|
|
d2.addCallback(_got_parms)
|
|
|
|
return d2
|
2008-01-11 11:53:37 +00:00
|
|
|
d.addCallback(_got_size)
|
|
|
|
return d
|
2008-01-10 03:25:50 +00:00
|
|
|
|
2012-05-22 04:13:32 +00:00
|
|
|
class Helper_fake_upload(offloaded.Helper):
|
2012-05-22 04:14:00 +00:00
|
|
|
def _make_chk_upload_helper(self, storage_index, lp):
|
2012-05-22 04:13:32 +00:00
|
|
|
si_s = si_b2a(storage_index)
|
|
|
|
incoming_file = os.path.join(self._chk_incoming, si_s)
|
|
|
|
encoding_file = os.path.join(self._chk_encoding, si_s)
|
|
|
|
uh = CHKUploadHelper_fake(storage_index, self,
|
|
|
|
self._storage_broker,
|
|
|
|
self._secret_holder,
|
|
|
|
incoming_file, encoding_file,
|
2012-05-22 04:14:00 +00:00
|
|
|
lp)
|
2012-05-22 04:13:32 +00:00
|
|
|
return uh
|
|
|
|
|
|
|
|
class Helper_already_uploaded(Helper_fake_upload):
|
2012-05-22 04:14:00 +00:00
|
|
|
def _check_chk(self, storage_index, lp):
|
|
|
|
res = upload.HelperUploadResults()
|
2008-02-06 08:52:25 +00:00
|
|
|
res.uri_extension_hash = hashutil.uri_extension_hash("")
|
2008-02-07 00:51:11 +00:00
|
|
|
|
|
|
|
# we're pretending that the file they're trying to upload was already
|
|
|
|
# present in the grid. We return some information about the file, so
|
|
|
|
# the client can decide if they like the way it looks. The parameters
|
|
|
|
# used here are chosen to match the defaults.
|
|
|
|
PARAMS = FakeClient.DEFAULT_ENCODING_PARAMETERS
|
|
|
|
ueb_data = {"needed_shares": PARAMS["k"],
|
|
|
|
"total_shares": PARAMS["n"],
|
|
|
|
"segment_size": min(PARAMS["max_segment_size"], len(DATA)),
|
|
|
|
"size": len(DATA),
|
|
|
|
}
|
|
|
|
res.uri_extension_data = ueb_data
|
2012-05-22 04:13:32 +00:00
|
|
|
return defer.succeed(res)
|
2008-01-10 03:25:50 +00:00
|
|
|
|
|
|
|
class FakeClient(service.MultiService):
|
2008-01-16 10:03:35 +00:00
|
|
|
DEFAULT_ENCODING_PARAMETERS = {"k":25,
|
|
|
|
"happy": 75,
|
|
|
|
"n": 100,
|
|
|
|
"max_segment_size": 1*MiB,
|
|
|
|
}
|
2009-08-15 20:17:37 +00:00
|
|
|
|
2008-01-10 03:25:50 +00:00
|
|
|
def get_encoding_parameters(self):
|
2008-01-16 10:03:35 +00:00
|
|
|
return self.DEFAULT_ENCODING_PARAMETERS
|
2008-01-10 03:25:50 +00:00
|
|
|
|
|
|
|
def flush_but_dont_ignore(res):
|
2009-05-22 00:38:23 +00:00
|
|
|
d = flushEventualQueue()
|
2008-01-10 03:25:50 +00:00
|
|
|
def _done(ignored):
|
|
|
|
return res
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
2008-11-22 03:07:27 +00:00
|
|
|
def wait_a_few_turns(ignored=None):
|
2009-05-22 00:38:23 +00:00
|
|
|
d = fireEventually()
|
|
|
|
d.addCallback(fireEventually)
|
|
|
|
d.addCallback(fireEventually)
|
|
|
|
d.addCallback(fireEventually)
|
|
|
|
d.addCallback(fireEventually)
|
|
|
|
d.addCallback(fireEventually)
|
2008-11-22 03:07:27 +00:00
|
|
|
return d
|
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
def upload_data(uploader, data, convergence):
|
|
|
|
u = upload.Data(data, convergence=convergence)
|
2008-01-31 02:03:19 +00:00
|
|
|
return uploader.upload(u)
|
|
|
|
|
2008-01-10 03:25:50 +00:00
|
|
|
class AssistedUpload(unittest.TestCase):
|
2009-06-05 03:14:44 +00:00
|
|
|
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
|
2008-01-10 03:25:50 +00:00
|
|
|
def setUp(self):
|
|
|
|
self.s = FakeClient()
|
2009-08-15 20:17:37 +00:00
|
|
|
self.storage_broker = StorageFarmBroker(None, True)
|
2009-11-18 01:54:44 +00:00
|
|
|
self.secret_holder = client.SecretHolder("lease secret", "convergence")
|
2008-01-10 03:25:50 +00:00
|
|
|
self.s.startService()
|
|
|
|
|
|
|
|
self.tub = t = Tub()
|
2009-05-22 00:46:32 +00:00
|
|
|
t.setOption("expose-remote-exception-types", False)
|
2008-01-10 03:25:50 +00:00
|
|
|
t.setServiceParent(self.s)
|
|
|
|
self.s.tub = t
|
|
|
|
# we never actually use this for network traffic, so it can use a
|
|
|
|
# bogus host/port
|
|
|
|
t.setLocation("bogus:1234")
|
|
|
|
|
2012-05-22 04:13:32 +00:00
|
|
|
def setUpHelper(self, basedir, helper_class=Helper_fake_upload):
|
2008-01-17 08:18:10 +00:00
|
|
|
fileutil.make_dirs(basedir)
|
2012-05-22 04:13:32 +00:00
|
|
|
self.helper = h = helper_class(basedir,
|
|
|
|
self.storage_broker,
|
|
|
|
self.secret_holder,
|
|
|
|
None, None)
|
2008-01-17 08:18:10 +00:00
|
|
|
self.helper_furl = self.tub.registerReference(h)
|
2008-01-10 03:25:50 +00:00
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
d = self.s.stopService()
|
2009-05-22 00:38:23 +00:00
|
|
|
d.addCallback(fireEventually)
|
2008-01-10 03:25:50 +00:00
|
|
|
d.addBoth(flush_but_dont_ignore)
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
def test_one(self):
|
2008-01-17 08:18:10 +00:00
|
|
|
self.basedir = "helper/AssistedUpload/test_one"
|
|
|
|
self.setUpHelper(self.basedir)
|
2008-01-10 03:25:50 +00:00
|
|
|
u = upload.Uploader(self.helper_furl)
|
|
|
|
u.setServiceParent(self.s)
|
|
|
|
|
2008-11-22 03:07:27 +00:00
|
|
|
d = wait_a_few_turns()
|
2008-01-10 03:25:50 +00:00
|
|
|
|
|
|
|
def _ready(res):
|
|
|
|
assert u._helper
|
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
return upload_data(u, DATA, convergence="some convergence string")
|
2008-01-10 03:25:50 +00:00
|
|
|
d.addCallback(_ready)
|
2008-02-06 04:01:38 +00:00
|
|
|
def _uploaded(results):
|
2012-05-22 04:14:44 +00:00
|
|
|
the_uri = results.get_uri()
|
2009-01-09 03:59:41 +00:00
|
|
|
assert "CHK" in the_uri
|
2008-01-10 03:25:50 +00:00
|
|
|
d.addCallback(_uploaded)
|
|
|
|
|
2008-01-17 08:18:10 +00:00
|
|
|
def _check_empty(res):
|
|
|
|
files = os.listdir(os.path.join(self.basedir, "CHK_encoding"))
|
|
|
|
self.failUnlessEqual(files, [])
|
|
|
|
files = os.listdir(os.path.join(self.basedir, "CHK_incoming"))
|
|
|
|
self.failUnlessEqual(files, [])
|
|
|
|
d.addCallback(_check_empty)
|
|
|
|
|
2008-01-10 03:25:50 +00:00
|
|
|
return d
|
|
|
|
|
2008-01-28 19:58:13 +00:00
|
|
|
def test_previous_upload_failed(self):
|
|
|
|
self.basedir = "helper/AssistedUpload/test_previous_upload_failed"
|
|
|
|
self.setUpHelper(self.basedir)
|
|
|
|
|
|
|
|
# we want to make sure that an upload which fails (leaving the
|
|
|
|
# ciphertext in the CHK_encoding/ directory) does not prevent a later
|
|
|
|
# attempt to upload that file from working. We simulate this by
|
2008-02-07 02:50:47 +00:00
|
|
|
# populating the directory manually. The hardest part is guessing the
|
|
|
|
# storage index.
|
|
|
|
|
|
|
|
k = FakeClient.DEFAULT_ENCODING_PARAMETERS["k"]
|
|
|
|
n = FakeClient.DEFAULT_ENCODING_PARAMETERS["n"]
|
|
|
|
max_segsize = FakeClient.DEFAULT_ENCODING_PARAMETERS["max_segment_size"]
|
|
|
|
segsize = min(max_segsize, len(DATA))
|
|
|
|
# this must be a multiple of 'required_shares'==k
|
|
|
|
segsize = mathutil.next_multiple(segsize, k)
|
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
key = hashutil.convergence_hash(k, n, segsize, DATA, "test convergence string")
|
2008-02-07 02:50:47 +00:00
|
|
|
assert len(key) == 16
|
2008-01-28 19:58:13 +00:00
|
|
|
encryptor = AES(key)
|
2008-02-01 19:27:37 +00:00
|
|
|
SI = hashutil.storage_index_hash(key)
|
2009-02-18 21:46:55 +00:00
|
|
|
SI_s = si_b2a(SI)
|
2008-01-28 19:58:13 +00:00
|
|
|
encfile = os.path.join(self.basedir, "CHK_encoding", SI_s)
|
|
|
|
f = open(encfile, "wb")
|
|
|
|
f.write(encryptor.process(DATA))
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
u = upload.Uploader(self.helper_furl)
|
|
|
|
u.setServiceParent(self.s)
|
|
|
|
|
2008-11-22 03:07:27 +00:00
|
|
|
d = wait_a_few_turns()
|
2008-01-28 19:58:13 +00:00
|
|
|
|
|
|
|
def _ready(res):
|
|
|
|
assert u._helper
|
2008-03-24 16:46:06 +00:00
|
|
|
return upload_data(u, DATA, convergence="test convergence string")
|
2008-01-28 19:58:13 +00:00
|
|
|
d.addCallback(_ready)
|
2008-02-06 04:01:38 +00:00
|
|
|
def _uploaded(results):
|
2012-05-22 04:14:44 +00:00
|
|
|
the_uri = results.get_uri()
|
2009-01-09 03:59:41 +00:00
|
|
|
assert "CHK" in the_uri
|
2008-01-28 19:58:13 +00:00
|
|
|
d.addCallback(_uploaded)
|
|
|
|
|
|
|
|
def _check_empty(res):
|
|
|
|
files = os.listdir(os.path.join(self.basedir, "CHK_encoding"))
|
|
|
|
self.failUnlessEqual(files, [])
|
|
|
|
files = os.listdir(os.path.join(self.basedir, "CHK_incoming"))
|
|
|
|
self.failUnlessEqual(files, [])
|
|
|
|
d.addCallback(_check_empty)
|
|
|
|
|
|
|
|
return d
|
2008-01-11 11:53:37 +00:00
|
|
|
|
|
|
|
def test_already_uploaded(self):
|
2008-01-17 08:18:10 +00:00
|
|
|
self.basedir = "helper/AssistedUpload/test_already_uploaded"
|
2012-05-22 04:13:32 +00:00
|
|
|
self.setUpHelper(self.basedir, helper_class=Helper_already_uploaded)
|
2008-01-11 11:53:37 +00:00
|
|
|
u = upload.Uploader(self.helper_furl)
|
|
|
|
u.setServiceParent(self.s)
|
|
|
|
|
2008-11-22 03:07:27 +00:00
|
|
|
d = wait_a_few_turns()
|
2008-01-11 11:53:37 +00:00
|
|
|
|
|
|
|
def _ready(res):
|
|
|
|
assert u._helper
|
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
return upload_data(u, DATA, convergence="some convergence string")
|
2008-01-11 11:53:37 +00:00
|
|
|
d.addCallback(_ready)
|
2008-02-06 04:01:38 +00:00
|
|
|
def _uploaded(results):
|
2012-05-22 04:14:44 +00:00
|
|
|
the_uri = results.get_uri()
|
2009-01-09 03:59:41 +00:00
|
|
|
assert "CHK" in the_uri
|
2008-01-11 11:53:37 +00:00
|
|
|
d.addCallback(_uploaded)
|
|
|
|
|
2008-01-17 08:18:10 +00:00
|
|
|
def _check_empty(res):
|
|
|
|
files = os.listdir(os.path.join(self.basedir, "CHK_encoding"))
|
|
|
|
self.failUnlessEqual(files, [])
|
|
|
|
files = os.listdir(os.path.join(self.basedir, "CHK_incoming"))
|
|
|
|
self.failUnlessEqual(files, [])
|
|
|
|
d.addCallback(_check_empty)
|
|
|
|
|
2008-01-11 11:53:37 +00:00
|
|
|
return d
|