mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-06-01 15:20:55 +00:00
use added secret to protect convergent encryption
Now upload or encode methods take a required argument named "convergence" which can be either None, indicating no convergent encryption at all, or a string, which is the "added secret" to be mixed in to the content hash key. If you want traditional convergent encryption behavior, set the added secret to be the empty string. This patch also renames "content hash key" to "convergent encryption" in a argument names and variable names. (A different and larger renaming is needed in order to clarify that Tahoe supports immutable files which are not encrypted content-hash-key a.k.a. convergent encryption.) This patch also changes a few unit tests to use non-convergent encryption, because it doesn't matter for what they are testing and non-convergent encryption is slightly faster.
This commit is contained in:
parent
7996131a0a
commit
fc3bd0c987
@ -34,6 +34,9 @@ PiB=1024*TiB
|
|||||||
class StubClient(Referenceable):
|
class StubClient(Referenceable):
|
||||||
implements(RIStubClient)
|
implements(RIStubClient)
|
||||||
|
|
||||||
|
def _make_secret():
|
||||||
|
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
|
||||||
|
|
||||||
class Client(node.Node, testutil.PollMixin):
|
class Client(node.Node, testutil.PollMixin):
|
||||||
PORTNUMFILE = "client.port"
|
PORTNUMFILE = "client.port"
|
||||||
STOREDIR = 'storage'
|
STOREDIR = 'storage'
|
||||||
@ -103,9 +106,7 @@ class Client(node.Node, testutil.PollMixin):
|
|||||||
self.stats_provider = None
|
self.stats_provider = None
|
||||||
|
|
||||||
def init_lease_secret(self):
|
def init_lease_secret(self):
|
||||||
def make_secret():
|
secret_s = self.get_or_create_private_config("secret", _make_secret)
|
||||||
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
|
|
||||||
secret_s = self.get_or_create_private_config("secret", make_secret)
|
|
||||||
self._lease_secret = base32.a2b(secret_s)
|
self._lease_secret = base32.a2b(secret_s)
|
||||||
|
|
||||||
def init_storage(self):
|
def init_storage(self):
|
||||||
@ -151,6 +152,8 @@ class Client(node.Node, testutil.PollMixin):
|
|||||||
|
|
||||||
def init_client(self):
|
def init_client(self):
|
||||||
helper_furl = self.get_config("helper.furl")
|
helper_furl = self.get_config("helper.furl")
|
||||||
|
convergence_s = self.get_or_create_private_config('convergence', _make_secret)
|
||||||
|
self.convergence = base32.a2b(convergence_s)
|
||||||
self.add_service(Uploader(helper_furl))
|
self.add_service(Uploader(helper_furl))
|
||||||
self.add_service(Downloader())
|
self.add_service(Downloader())
|
||||||
self.add_service(Checker())
|
self.add_service(Checker())
|
||||||
|
@ -42,9 +42,9 @@ class ControlServer(Referenceable, service.Service, testutil.PollMixin):
|
|||||||
def remote_wait_for_client_connections(self, num_clients):
|
def remote_wait_for_client_connections(self, num_clients):
|
||||||
return self.parent.debug_wait_for_client_connections(num_clients)
|
return self.parent.debug_wait_for_client_connections(num_clients)
|
||||||
|
|
||||||
def remote_upload_from_file_to_uri(self, filename):
|
def remote_upload_from_file_to_uri(self, filename, convergence):
|
||||||
uploader = self.parent.getServiceNamed("uploader")
|
uploader = self.parent.getServiceNamed("uploader")
|
||||||
u = upload.FileName(filename)
|
u = upload.FileName(filename, convergence=convergence)
|
||||||
d = uploader.upload(u)
|
d = uploader.upload(u)
|
||||||
d.addCallback(lambda results: results.uri)
|
d.addCallback(lambda results: results.uri)
|
||||||
return d
|
return d
|
||||||
@ -161,7 +161,7 @@ class SpeedTest:
|
|||||||
d1 = self._n.overwrite(data)
|
d1 = self._n.overwrite(data)
|
||||||
d1.addCallback(lambda res: self._n.get_uri())
|
d1.addCallback(lambda res: self._n.get_uri())
|
||||||
else:
|
else:
|
||||||
up = upload.FileName(fn)
|
up = upload.FileName(fn, convergence=None)
|
||||||
d1 = self.parent.upload(up)
|
d1 = self.parent.upload(up)
|
||||||
d1.addCallback(lambda results: results.uri)
|
d1.addCallback(lambda results: results.uri)
|
||||||
d1.addCallback(_record_uri, i)
|
d1.addCallback(_record_uri, i)
|
||||||
|
@ -1578,11 +1578,14 @@ class RIControlClient(RemoteInterface):
|
|||||||
storage servers.
|
storage servers.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def upload_from_file_to_uri(filename=str):
|
def upload_from_file_to_uri(filename=str, convergence=ChoiceOf(None, StringConstraint(2**20))):
|
||||||
"""Upload a file to the grid. This accepts a filename (which must be
|
"""Upload a file to the grid. This accepts a filename (which must be
|
||||||
absolute) that points to a file on the node's local disk. The node
|
absolute) that points to a file on the node's local disk. The node will
|
||||||
will read the contents of this file, upload it to the grid, then
|
read the contents of this file, upload it to the grid, then return the
|
||||||
return the URI at which it was uploaded.
|
URI at which it was uploaded. If convergence is None then a random
|
||||||
|
encryption key will be used, else the plaintext will be hashed, then
|
||||||
|
that hash will be mixed together with the "convergence" string to form
|
||||||
|
the encryption key.
|
||||||
"""
|
"""
|
||||||
return URI
|
return URI
|
||||||
|
|
||||||
|
@ -367,7 +367,7 @@ this file are ignored.
|
|||||||
if self.mode in ("upload", "upload-self"):
|
if self.mode in ("upload", "upload-self"):
|
||||||
files[name] = self.create_data(name, size)
|
files[name] = self.create_data(name, size)
|
||||||
d = self.control_rref.callRemote("upload_from_file_to_uri",
|
d = self.control_rref.callRemote("upload_from_file_to_uri",
|
||||||
files[name])
|
files[name], convergence=None)
|
||||||
def _done(uri):
|
def _done(uri):
|
||||||
os.remove(files[name])
|
os.remove(files[name])
|
||||||
del files[name]
|
del files[name]
|
||||||
|
@ -120,7 +120,7 @@ class Dirnode(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
def test_readonly(self):
|
def test_readonly(self):
|
||||||
fileuri = make_chk_file_uri(1234)
|
fileuri = make_chk_file_uri(1234)
|
||||||
filenode = self.client.create_node_from_uri(fileuri)
|
filenode = self.client.create_node_from_uri(fileuri)
|
||||||
uploadable = upload.Data("some data")
|
uploadable = upload.Data("some data", convergence="some convergence string")
|
||||||
|
|
||||||
d = self.client.create_empty_dirnode()
|
d = self.client.create_empty_dirnode()
|
||||||
def _created(rw_dn):
|
def _created(rw_dn):
|
||||||
@ -338,7 +338,7 @@ class Dirnode(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
# hundrededths of a second.
|
# hundrededths of a second.
|
||||||
d.addCallback(self.stall, 0.1)
|
d.addCallback(self.stall, 0.1)
|
||||||
d.addCallback(lambda res: n.add_file(u"timestamps",
|
d.addCallback(lambda res: n.add_file(u"timestamps",
|
||||||
upload.Data("stamp me")))
|
upload.Data("stamp me", convergence="some convergence string")))
|
||||||
d.addCallback(self.stall, 0.1)
|
d.addCallback(self.stall, 0.1)
|
||||||
def _stop(res):
|
def _stop(res):
|
||||||
self._stop_timestamp = time.time()
|
self._stop_timestamp = time.time()
|
||||||
@ -393,7 +393,7 @@ class Dirnode(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
self.failUnlessEqual(sorted(children.keys()),
|
self.failUnlessEqual(sorted(children.keys()),
|
||||||
sorted([u"child"])))
|
sorted([u"child"])))
|
||||||
|
|
||||||
uploadable = upload.Data("some data")
|
uploadable = upload.Data("some data", convergence="some convergence string")
|
||||||
d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
|
d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
|
||||||
d.addCallback(lambda newnode:
|
d.addCallback(lambda newnode:
|
||||||
self.failUnless(IFileNode.providedBy(newnode)))
|
self.failUnless(IFileNode.providedBy(newnode)))
|
||||||
@ -406,7 +406,7 @@ class Dirnode(unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
self.failUnlessEqual(sorted(metadata.keys()),
|
self.failUnlessEqual(sorted(metadata.keys()),
|
||||||
["ctime", "mtime"]))
|
["ctime", "mtime"]))
|
||||||
|
|
||||||
uploadable = upload.Data("some data")
|
uploadable = upload.Data("some data", convergence="some convergence string")
|
||||||
d.addCallback(lambda res: n.add_file(u"newfile-metadata",
|
d.addCallback(lambda res: n.add_file(u"newfile-metadata",
|
||||||
uploadable,
|
uploadable,
|
||||||
{"key": "value"}))
|
{"key": "value"}))
|
||||||
|
@ -168,7 +168,7 @@ class Encode(unittest.TestCase):
|
|||||||
data = make_data(datalen)
|
data = make_data(datalen)
|
||||||
# force use of multiple segments
|
# force use of multiple segments
|
||||||
e = encode.Encoder()
|
e = encode.Encoder()
|
||||||
u = upload.Data(data)
|
u = upload.Data(data, convergence="some convergence string")
|
||||||
u.max_segment_size = max_segment_size
|
u.max_segment_size = max_segment_size
|
||||||
u.encoding_param_k = 25
|
u.encoding_param_k = 25
|
||||||
u.encoding_param_happy = 75
|
u.encoding_param_happy = 75
|
||||||
@ -303,7 +303,7 @@ class Roundtrip(unittest.TestCase):
|
|||||||
if AVAILABLE_SHARES is None:
|
if AVAILABLE_SHARES is None:
|
||||||
AVAILABLE_SHARES = NUM_SHARES
|
AVAILABLE_SHARES = NUM_SHARES
|
||||||
e = encode.Encoder()
|
e = encode.Encoder()
|
||||||
u = upload.Data(data)
|
u = upload.Data(data, convergence="some convergence string")
|
||||||
# force use of multiple segments by using a low max_segment_size
|
# force use of multiple segments by using a low max_segment_size
|
||||||
u.max_segment_size = max_segment_size
|
u.max_segment_size = max_segment_size
|
||||||
u.encoding_param_k = k
|
u.encoding_param_k = k
|
||||||
|
@ -72,8 +72,8 @@ def flush_but_dont_ignore(res):
|
|||||||
d.addCallback(_done)
|
d.addCallback(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def upload_data(uploader, data):
|
def upload_data(uploader, data, convergence):
|
||||||
u = upload.Data(data)
|
u = upload.Data(data, convergence=convergence)
|
||||||
return uploader.upload(u)
|
return uploader.upload(u)
|
||||||
|
|
||||||
class AssistedUpload(unittest.TestCase):
|
class AssistedUpload(unittest.TestCase):
|
||||||
@ -116,7 +116,7 @@ class AssistedUpload(unittest.TestCase):
|
|||||||
def _ready(res):
|
def _ready(res):
|
||||||
assert u._helper
|
assert u._helper
|
||||||
|
|
||||||
return upload_data(u, DATA)
|
return upload_data(u, DATA, convergence="some convergence string")
|
||||||
d.addCallback(_ready)
|
d.addCallback(_ready)
|
||||||
def _uploaded(results):
|
def _uploaded(results):
|
||||||
uri = results.uri
|
uri = results.uri
|
||||||
@ -149,7 +149,7 @@ class AssistedUpload(unittest.TestCase):
|
|||||||
# this must be a multiple of 'required_shares'==k
|
# this must be a multiple of 'required_shares'==k
|
||||||
segsize = mathutil.next_multiple(segsize, k)
|
segsize = mathutil.next_multiple(segsize, k)
|
||||||
|
|
||||||
key = hashutil.content_hash_key_hash(k, n, segsize, DATA)
|
key = hashutil.convergence_hash(k, n, segsize, DATA, "test convergence string")
|
||||||
assert len(key) == 16
|
assert len(key) == 16
|
||||||
encryptor = AES(key)
|
encryptor = AES(key)
|
||||||
SI = hashutil.storage_index_hash(key)
|
SI = hashutil.storage_index_hash(key)
|
||||||
@ -169,7 +169,7 @@ class AssistedUpload(unittest.TestCase):
|
|||||||
|
|
||||||
def _ready(res):
|
def _ready(res):
|
||||||
assert u._helper
|
assert u._helper
|
||||||
return upload_data(u, DATA)
|
return upload_data(u, DATA, convergence="test convergence string")
|
||||||
d.addCallback(_ready)
|
d.addCallback(_ready)
|
||||||
def _uploaded(results):
|
def _uploaded(results):
|
||||||
uri = results.uri
|
uri = results.uri
|
||||||
@ -200,7 +200,7 @@ class AssistedUpload(unittest.TestCase):
|
|||||||
def _ready(res):
|
def _ready(res):
|
||||||
assert u._helper
|
assert u._helper
|
||||||
|
|
||||||
return upload_data(u, DATA)
|
return upload_data(u, DATA, convergence="some convergence string")
|
||||||
d.addCallback(_ready)
|
d.addCallback(_ready)
|
||||||
def _uploaded(results):
|
def _uploaded(results):
|
||||||
uri = results.uri
|
uri = results.uri
|
||||||
|
@ -256,15 +256,15 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
|
|
||||||
def test_upload_and_download_random_key(self):
|
def test_upload_and_download_random_key(self):
|
||||||
self.basedir = "system/SystemTest/test_upload_and_download_random_key"
|
self.basedir = "system/SystemTest/test_upload_and_download_random_key"
|
||||||
return self._test_upload_and_download(False)
|
return self._test_upload_and_download(convergence=None)
|
||||||
test_upload_and_download_random_key.timeout = 4800
|
test_upload_and_download_random_key.timeout = 4800
|
||||||
|
|
||||||
def test_upload_and_download_content_hash_key(self):
|
def test_upload_and_download_convergent(self):
|
||||||
self.basedir = "system/SystemTest/test_upload_and_download_CHK"
|
self.basedir = "system/SystemTest/test_upload_and_download_convergent"
|
||||||
return self._test_upload_and_download(True)
|
return self._test_upload_and_download(convergence="some convergence string")
|
||||||
test_upload_and_download_content_hash_key.timeout = 4800
|
test_upload_and_download_convergent.timeout = 4800
|
||||||
|
|
||||||
def _test_upload_and_download(self, contenthashkey):
|
def _test_upload_and_download(self, convergence):
|
||||||
# we use 4000 bytes of data, which will result in about 400k written
|
# we use 4000 bytes of data, which will result in about 400k written
|
||||||
# to disk among all our simulated nodes
|
# to disk among all our simulated nodes
|
||||||
DATA = "Some data to upload\n" * 200
|
DATA = "Some data to upload\n" * 200
|
||||||
@ -287,7 +287,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
# tail segment is not the same length as the others. This actualy
|
# tail segment is not the same length as the others. This actualy
|
||||||
# gets rounded up to 1025 to be a multiple of the number of
|
# gets rounded up to 1025 to be a multiple of the number of
|
||||||
# required shares (since we use 25 out of 100 FEC).
|
# required shares (since we use 25 out of 100 FEC).
|
||||||
up = upload.Data(DATA, contenthashkey=contenthashkey)
|
up = upload.Data(DATA, convergence=convergence)
|
||||||
up.max_segment_size = 1024
|
up.max_segment_size = 1024
|
||||||
d1 = u.upload(up)
|
d1 = u.upload(up)
|
||||||
return d1
|
return d1
|
||||||
@ -301,12 +301,12 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
d.addCallback(_upload_done)
|
d.addCallback(_upload_done)
|
||||||
|
|
||||||
def _upload_again(res):
|
def _upload_again(res):
|
||||||
# Upload again. If contenthashkey then this ought to be
|
# Upload again. If using convergent encryption then this ought to be
|
||||||
# short-circuited, however with the way we currently generate URIs
|
# short-circuited, however with the way we currently generate URIs
|
||||||
# (i.e. because they include the roothash), we have to do all of the
|
# (i.e. because they include the roothash), we have to do all of the
|
||||||
# encoding work, and only get to save on the upload part.
|
# encoding work, and only get to save on the upload part.
|
||||||
log.msg("UPLOADING AGAIN")
|
log.msg("UPLOADING AGAIN")
|
||||||
up = upload.Data(DATA, contenthashkey=contenthashkey)
|
up = upload.Data(DATA, convergence=convergence)
|
||||||
up.max_segment_size = 1024
|
up.max_segment_size = 1024
|
||||||
d1 = self.uploader.upload(up)
|
d1 = self.uploader.upload(up)
|
||||||
d.addCallback(_upload_again)
|
d.addCallback(_upload_again)
|
||||||
@ -372,7 +372,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
|
|
||||||
HELPER_DATA = "Data that needs help to upload" * 1000
|
HELPER_DATA = "Data that needs help to upload" * 1000
|
||||||
def _upload_with_helper(res):
|
def _upload_with_helper(res):
|
||||||
u = upload.Data(HELPER_DATA, contenthashkey=contenthashkey)
|
u = upload.Data(HELPER_DATA, convergence=convergence)
|
||||||
d = self.extra_node.upload(u)
|
d = self.extra_node.upload(u)
|
||||||
def _uploaded(results):
|
def _uploaded(results):
|
||||||
uri = results.uri
|
uri = results.uri
|
||||||
@ -385,7 +385,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
d.addCallback(_upload_with_helper)
|
d.addCallback(_upload_with_helper)
|
||||||
|
|
||||||
def _upload_duplicate_with_helper(res):
|
def _upload_duplicate_with_helper(res):
|
||||||
u = upload.Data(HELPER_DATA, contenthashkey=contenthashkey)
|
u = upload.Data(HELPER_DATA, convergence=convergence)
|
||||||
u.debug_stash_RemoteEncryptedUploadable = True
|
u.debug_stash_RemoteEncryptedUploadable = True
|
||||||
d = self.extra_node.upload(u)
|
d = self.extra_node.upload(u)
|
||||||
def _uploaded(results):
|
def _uploaded(results):
|
||||||
@ -398,13 +398,13 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
"uploadable started uploading, should have been avoided")
|
"uploadable started uploading, should have been avoided")
|
||||||
d.addCallback(_check)
|
d.addCallback(_check)
|
||||||
return d
|
return d
|
||||||
if contenthashkey:
|
if convergence is not None:
|
||||||
d.addCallback(_upload_duplicate_with_helper)
|
d.addCallback(_upload_duplicate_with_helper)
|
||||||
|
|
||||||
def _upload_resumable(res):
|
def _upload_resumable(res):
|
||||||
DATA = "Data that needs help to upload and gets interrupted" * 1000
|
DATA = "Data that needs help to upload and gets interrupted" * 1000
|
||||||
u1 = CountingDataUploadable(DATA, contenthashkey=contenthashkey)
|
u1 = CountingDataUploadable(DATA, convergence=convergence)
|
||||||
u2 = CountingDataUploadable(DATA, contenthashkey=contenthashkey)
|
u2 = CountingDataUploadable(DATA, convergence=convergence)
|
||||||
|
|
||||||
# we interrupt the connection after about 5kB by shutting down
|
# we interrupt the connection after about 5kB by shutting down
|
||||||
# the helper, then restartingit.
|
# the helper, then restartingit.
|
||||||
@ -490,7 +490,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
# to store the key locally and re-use it on the next upload of
|
# to store the key locally and re-use it on the next upload of
|
||||||
# this file, which isn't a bad thing to do, but we currently
|
# this file, which isn't a bad thing to do, but we currently
|
||||||
# don't do it.)
|
# don't do it.)
|
||||||
if contenthashkey:
|
if convergence is not None:
|
||||||
# Make sure we did not have to read the whole file the
|
# Make sure we did not have to read the whole file the
|
||||||
# second time around .
|
# second time around .
|
||||||
self.failUnless(bytes_sent < len(DATA),
|
self.failUnless(bytes_sent < len(DATA),
|
||||||
@ -510,9 +510,9 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
|
|
||||||
def _check(newdata):
|
def _check(newdata):
|
||||||
self.failUnlessEqual(newdata, DATA)
|
self.failUnlessEqual(newdata, DATA)
|
||||||
# If using a content hash key, then also check that the helper
|
# If using convergent encryption, then also check that the
|
||||||
# has removed the temp file from its directories.
|
# helper has removed the temp file from its directories.
|
||||||
if contenthashkey:
|
if convergence is not None:
|
||||||
basedir = os.path.join(self.getdir("client0"), "helper")
|
basedir = os.path.join(self.getdir("client0"), "helper")
|
||||||
files = os.listdir(os.path.join(basedir, "CHK_encoding"))
|
files = os.listdir(os.path.join(basedir, "CHK_encoding"))
|
||||||
self.failUnlessEqual(files, [])
|
self.failUnlessEqual(files, [])
|
||||||
@ -890,7 +890,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
return d
|
return d
|
||||||
|
|
||||||
def _do_publish1(self, res):
|
def _do_publish1(self, res):
|
||||||
ut = upload.Data(self.data)
|
ut = upload.Data(self.data, convergence=None)
|
||||||
c0 = self.clients[0]
|
c0 = self.clients[0]
|
||||||
d = c0.create_empty_dirnode()
|
d = c0.create_empty_dirnode()
|
||||||
def _made_root(new_dirnode):
|
def _made_root(new_dirnode):
|
||||||
@ -910,7 +910,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
return d
|
return d
|
||||||
|
|
||||||
def _do_publish2(self, res):
|
def _do_publish2(self, res):
|
||||||
ut = upload.Data(self.data)
|
ut = upload.Data(self.data, convergence=None)
|
||||||
d = self._subdir1_node.create_empty_directory(u"subdir2")
|
d = self._subdir1_node.create_empty_directory(u"subdir2")
|
||||||
d.addCallback(lambda subdir2: subdir2.add_file(u"mydata992", ut))
|
d.addCallback(lambda subdir2: subdir2.add_file(u"mydata992", ut))
|
||||||
return d
|
return d
|
||||||
@ -927,7 +927,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
|
|
||||||
def _do_publish_private(self, res):
|
def _do_publish_private(self, res):
|
||||||
self.smalldata = "sssh, very secret stuff"
|
self.smalldata = "sssh, very secret stuff"
|
||||||
ut = upload.Data(self.smalldata)
|
ut = upload.Data(self.smalldata, convergence=None)
|
||||||
d = self.clients[0].create_empty_dirnode()
|
d = self.clients[0].create_empty_dirnode()
|
||||||
d.addCallback(self.log, "GOT private directory")
|
d.addCallback(self.log, "GOT private directory")
|
||||||
def _got_new_dir(privnode):
|
def _got_new_dir(privnode):
|
||||||
@ -1009,7 +1009,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "mkdir(nope)", None, dirnode.create_empty_directory, u"nope"))
|
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "mkdir(nope)", None, dirnode.create_empty_directory, u"nope"))
|
||||||
|
|
||||||
d1.addCallback(self.log, "doing add_file(ro)")
|
d1.addCallback(self.log, "doing add_file(ro)")
|
||||||
ut = upload.Data("I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.")
|
ut = upload.Data("I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.", convergence="99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this)")
|
||||||
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut))
|
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut))
|
||||||
|
|
||||||
d1.addCallback(self.log, "doing get(ro)")
|
d1.addCallback(self.log, "doing get(ro)")
|
||||||
@ -1345,7 +1345,7 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|||||||
d.addCallback(self._test_control2, control_furl_file)
|
d.addCallback(self._test_control2, control_furl_file)
|
||||||
return d
|
return d
|
||||||
def _test_control2(self, rref, filename):
|
def _test_control2(self, rref, filename):
|
||||||
d = rref.callRemote("upload_from_file_to_uri", filename)
|
d = rref.callRemote("upload_from_file_to_uri", filename, convergence=None)
|
||||||
downfile = os.path.join(self.basedir, "control.downfile")
|
downfile = os.path.join(self.basedir, "control.downfile")
|
||||||
d.addCallback(lambda uri:
|
d.addCallback(lambda uri:
|
||||||
rref.callRemote("download_from_uri_to_file",
|
rref.callRemote("download_from_uri_to_file",
|
||||||
|
@ -25,14 +25,14 @@ class Uploadable(unittest.TestCase):
|
|||||||
self.failUnlessEqual(s, expected)
|
self.failUnlessEqual(s, expected)
|
||||||
|
|
||||||
def test_filehandle_random_key(self):
|
def test_filehandle_random_key(self):
|
||||||
return self._test_filehandle(True)
|
return self._test_filehandle(convergence=None)
|
||||||
|
|
||||||
def test_filehandle_content_hash_key(self):
|
def test_filehandle_convergent_encryption(self):
|
||||||
return self._test_filehandle(False)
|
return self._test_filehandle(convergence="some convergence string")
|
||||||
|
|
||||||
def _test_filehandle(self, randomkey):
|
def _test_filehandle(self, convergence):
|
||||||
s = StringIO("a"*41)
|
s = StringIO("a"*41)
|
||||||
u = upload.FileHandle(s, randomkey)
|
u = upload.FileHandle(s, convergence=convergence)
|
||||||
d = u.get_size()
|
d = u.get_size()
|
||||||
d.addCallback(self.failUnlessEqual, 41)
|
d.addCallback(self.failUnlessEqual, 41)
|
||||||
d.addCallback(lambda res: u.read(1))
|
d.addCallback(lambda res: u.read(1))
|
||||||
@ -50,7 +50,7 @@ class Uploadable(unittest.TestCase):
|
|||||||
f = open(fn, "w")
|
f = open(fn, "w")
|
||||||
f.write("a"*41)
|
f.write("a"*41)
|
||||||
f.close()
|
f.close()
|
||||||
u = upload.FileName(fn)
|
u = upload.FileName(fn, convergence=None)
|
||||||
d = u.get_size()
|
d = u.get_size()
|
||||||
d.addCallback(self.failUnlessEqual, 41)
|
d.addCallback(self.failUnlessEqual, 41)
|
||||||
d.addCallback(lambda res: u.read(1))
|
d.addCallback(lambda res: u.read(1))
|
||||||
@ -62,7 +62,7 @@ class Uploadable(unittest.TestCase):
|
|||||||
|
|
||||||
def test_data(self):
|
def test_data(self):
|
||||||
s = "a"*41
|
s = "a"*41
|
||||||
u = upload.Data(s)
|
u = upload.Data(s, convergence=None)
|
||||||
d = u.get_size()
|
d = u.get_size()
|
||||||
d.addCallback(self.failUnlessEqual, 41)
|
d.addCallback(self.failUnlessEqual, 41)
|
||||||
d.addCallback(lambda res: u.read(1))
|
d.addCallback(lambda res: u.read(1))
|
||||||
@ -169,13 +169,13 @@ SIZE_SMALL = 16
|
|||||||
SIZE_LARGE = len(DATA)
|
SIZE_LARGE = len(DATA)
|
||||||
|
|
||||||
def upload_data(uploader, data):
|
def upload_data(uploader, data):
|
||||||
u = upload.Data(data)
|
u = upload.Data(data, convergence=None)
|
||||||
return uploader.upload(u)
|
return uploader.upload(u)
|
||||||
def upload_filename(uploader, filename):
|
def upload_filename(uploader, filename):
|
||||||
u = upload.FileName(filename)
|
u = upload.FileName(filename, convergence=None)
|
||||||
return uploader.upload(u)
|
return uploader.upload(u)
|
||||||
def upload_filehandle(uploader, fh):
|
def upload_filehandle(uploader, fh):
|
||||||
u = upload.FileHandle(fh)
|
u = upload.FileHandle(fh, convergence=None)
|
||||||
return uploader.upload(u)
|
return uploader.upload(u)
|
||||||
|
|
||||||
class GoodServer(unittest.TestCase):
|
class GoodServer(unittest.TestCase):
|
||||||
@ -444,38 +444,57 @@ class PeerSelection(unittest.TestCase):
|
|||||||
class StorageIndex(unittest.TestCase):
|
class StorageIndex(unittest.TestCase):
|
||||||
def test_params_must_matter(self):
|
def test_params_must_matter(self):
|
||||||
DATA = "I am some data"
|
DATA = "I am some data"
|
||||||
u = upload.Data(DATA)
|
u = upload.Data(DATA, convergence="")
|
||||||
eu = upload.EncryptAnUploadable(u)
|
eu = upload.EncryptAnUploadable(u)
|
||||||
d1 = eu.get_storage_index()
|
d1 = eu.get_storage_index()
|
||||||
|
|
||||||
# CHK means the same data should encrypt the same way
|
# CHK means the same data should encrypt the same way
|
||||||
u = upload.Data(DATA)
|
u = upload.Data(DATA, convergence="")
|
||||||
eu = upload.EncryptAnUploadable(u)
|
eu = upload.EncryptAnUploadable(u)
|
||||||
d1a = eu.get_storage_index()
|
d1a = eu.get_storage_index()
|
||||||
|
|
||||||
# but if we change the encoding parameters, it should be different
|
# but if we use a different convergence string it should be different
|
||||||
u = upload.Data(DATA)
|
u = upload.Data(DATA, convergence="wheee!")
|
||||||
|
eu = upload.EncryptAnUploadable(u)
|
||||||
|
d1salt1 = eu.get_storage_index()
|
||||||
|
|
||||||
|
# and if we add yet a different convergence it should be different again
|
||||||
|
u = upload.Data(DATA, convergence="NOT wheee!")
|
||||||
|
eu = upload.EncryptAnUploadable(u)
|
||||||
|
d1salt2 = eu.get_storage_index()
|
||||||
|
|
||||||
|
# and if we use the first string again it should be the same as last time
|
||||||
|
u = upload.Data(DATA, convergence="wheee!")
|
||||||
|
eu = upload.EncryptAnUploadable(u)
|
||||||
|
d1salt1a = eu.get_storage_index()
|
||||||
|
|
||||||
|
# and if we change the encoding parameters, it should be different (from the same convergence string with different encoding parameters)
|
||||||
|
u = upload.Data(DATA, convergence="")
|
||||||
u.encoding_param_k = u.default_encoding_param_k + 1
|
u.encoding_param_k = u.default_encoding_param_k + 1
|
||||||
eu = upload.EncryptAnUploadable(u)
|
eu = upload.EncryptAnUploadable(u)
|
||||||
d2 = eu.get_storage_index()
|
d2 = eu.get_storage_index()
|
||||||
|
|
||||||
# and if we use a random key, it should be different than the CHK
|
# and if we use a random key, it should be different than the CHK
|
||||||
u = upload.Data(DATA, contenthashkey=False)
|
u = upload.Data(DATA, convergence=None)
|
||||||
eu = upload.EncryptAnUploadable(u)
|
eu = upload.EncryptAnUploadable(u)
|
||||||
d3 = eu.get_storage_index()
|
d3 = eu.get_storage_index()
|
||||||
# and different from another instance
|
# and different from another instance
|
||||||
u = upload.Data(DATA, contenthashkey=False)
|
u = upload.Data(DATA, convergence=None)
|
||||||
eu = upload.EncryptAnUploadable(u)
|
eu = upload.EncryptAnUploadable(u)
|
||||||
d4 = eu.get_storage_index()
|
d4 = eu.get_storage_index()
|
||||||
|
|
||||||
d = DeferredListShouldSucceed([d1,d1a,d2,d3,d4])
|
d = DeferredListShouldSucceed([d1,d1a,d1salt1,d1salt2,d1salt1a,d2,d3,d4])
|
||||||
def _done(res):
|
def _done(res):
|
||||||
si1, si1a, si2, si3, si4 = res
|
si1, si1a, si1salt1, si1salt2, si1salt1a, si2, si3, si4 = res
|
||||||
self.failUnlessEqual(si1, si1a)
|
self.failUnlessEqual(si1, si1a)
|
||||||
self.failIfEqual(si1, si2)
|
self.failIfEqual(si1, si2)
|
||||||
self.failIfEqual(si1, si3)
|
self.failIfEqual(si1, si3)
|
||||||
self.failIfEqual(si1, si4)
|
self.failIfEqual(si1, si4)
|
||||||
self.failIfEqual(si3, si4)
|
self.failIfEqual(si3, si4)
|
||||||
|
self.failIfEqual(si1salt1, si1)
|
||||||
|
self.failIfEqual(si1salt1, si1salt2)
|
||||||
|
self.failIfEqual(si1salt2, si1)
|
||||||
|
self.failUnlessEqual(si1salt1, si1salt1a)
|
||||||
d.addCallback(_done)
|
d.addCallback(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@ -408,8 +408,8 @@ class HashUtilTests(unittest.TestCase):
|
|||||||
self.failUnlessEqual(h1, h2)
|
self.failUnlessEqual(h1, h2)
|
||||||
|
|
||||||
def test_chk(self):
|
def test_chk(self):
|
||||||
h1 = hashutil.content_hash_key_hash(3, 10, 1000, "data")
|
h1 = hashutil.convergence_hash(3, 10, 1000, "data", "secret")
|
||||||
h2 = hashutil.content_hash_key_hasher(3, 10, 1000)
|
h2 = hashutil.convergence_hasher(3, 10, 1000, "secret")
|
||||||
h2.update("data")
|
h2.update("data")
|
||||||
h2 = h2.digest()
|
h2 = h2.digest()
|
||||||
self.failUnlessEqual(h1, h2)
|
self.failUnlessEqual(h1, h2)
|
||||||
|
@ -35,6 +35,7 @@ class FakeClient(service.MultiService):
|
|||||||
introducer_client = FakeIntroducerClient()
|
introducer_client = FakeIntroducerClient()
|
||||||
_all_upload_status = [upload.UploadStatus()]
|
_all_upload_status = [upload.UploadStatus()]
|
||||||
_all_download_status = [download.DownloadStatus()]
|
_all_download_status = [download.DownloadStatus()]
|
||||||
|
convergence = "some random string"
|
||||||
|
|
||||||
def connected_to_introducer(self):
|
def connected_to_introducer(self):
|
||||||
return False
|
return False
|
||||||
|
@ -11,7 +11,7 @@ from foolscap.logging import log
|
|||||||
from allmydata.util.hashutil import file_renewal_secret_hash, \
|
from allmydata.util.hashutil import file_renewal_secret_hash, \
|
||||||
file_cancel_secret_hash, bucket_renewal_secret_hash, \
|
file_cancel_secret_hash, bucket_renewal_secret_hash, \
|
||||||
bucket_cancel_secret_hash, plaintext_hasher, \
|
bucket_cancel_secret_hash, plaintext_hasher, \
|
||||||
storage_index_hash, plaintext_segment_hasher, content_hash_key_hasher
|
storage_index_hash, plaintext_segment_hasher, convergence_hasher
|
||||||
from allmydata import encode, storage, hashtree, uri
|
from allmydata import encode, storage, hashtree, uri
|
||||||
from allmydata.util import base32, idlib, mathutil
|
from allmydata.util import base32, idlib, mathutil
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
@ -1084,13 +1084,20 @@ class BaseUploadable:
|
|||||||
class FileHandle(BaseUploadable):
|
class FileHandle(BaseUploadable):
|
||||||
implements(IUploadable)
|
implements(IUploadable)
|
||||||
|
|
||||||
def __init__(self, filehandle, contenthashkey=True):
|
def __init__(self, filehandle, convergence):
|
||||||
|
"""
|
||||||
|
Upload the data from the filehandle. If convergence is None then a
|
||||||
|
random encryption key will be used, else the plaintext will be hashed,
|
||||||
|
then the hash will be hashed together with the string in the
|
||||||
|
"convergence" argument to form the encryption key."
|
||||||
|
"""
|
||||||
|
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence))
|
||||||
self._filehandle = filehandle
|
self._filehandle = filehandle
|
||||||
self._key = None
|
self._key = None
|
||||||
self._contenthashkey = contenthashkey
|
self.convergence = convergence
|
||||||
self._size = None
|
self._size = None
|
||||||
|
|
||||||
def _get_encryption_key_content_hash(self):
|
def _get_encryption_key_convergent(self):
|
||||||
if self._key is not None:
|
if self._key is not None:
|
||||||
return defer.succeed(self._key)
|
return defer.succeed(self._key)
|
||||||
|
|
||||||
@ -1100,7 +1107,7 @@ class FileHandle(BaseUploadable):
|
|||||||
def _got(params):
|
def _got(params):
|
||||||
k, happy, n, segsize = params
|
k, happy, n, segsize = params
|
||||||
f = self._filehandle
|
f = self._filehandle
|
||||||
enckey_hasher = content_hash_key_hasher(k, n, segsize)
|
enckey_hasher = convergence_hasher(k, n, segsize, self.convergence)
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
BLOCKSIZE = 64*1024
|
BLOCKSIZE = 64*1024
|
||||||
bytes_read = 0
|
bytes_read = 0
|
||||||
@ -1131,8 +1138,8 @@ class FileHandle(BaseUploadable):
|
|||||||
return defer.succeed(self._key)
|
return defer.succeed(self._key)
|
||||||
|
|
||||||
def get_encryption_key(self):
|
def get_encryption_key(self):
|
||||||
if self._contenthashkey:
|
if self.convergence is not None:
|
||||||
return self._get_encryption_key_content_hash()
|
return self._get_encryption_key_convergent()
|
||||||
else:
|
else:
|
||||||
return self._get_encryption_key_random()
|
return self._get_encryption_key_random()
|
||||||
|
|
||||||
@ -1153,15 +1160,29 @@ class FileHandle(BaseUploadable):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
class FileName(FileHandle):
|
class FileName(FileHandle):
|
||||||
def __init__(self, filename, contenthashkey=True):
|
def __init__(self, filename, convergence):
|
||||||
FileHandle.__init__(self, open(filename, "rb"), contenthashkey=contenthashkey)
|
"""
|
||||||
|
Upload the data from the filename. If convergence is None then a
|
||||||
|
random encryption key will be used, else the plaintext will be hashed,
|
||||||
|
then the hash will be hashed together with the string in the
|
||||||
|
"convergence" argument to form the encryption key."
|
||||||
|
"""
|
||||||
|
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence))
|
||||||
|
FileHandle.__init__(self, open(filename, "rb"), convergence=convergence)
|
||||||
def close(self):
|
def close(self):
|
||||||
FileHandle.close(self)
|
FileHandle.close(self)
|
||||||
self._filehandle.close()
|
self._filehandle.close()
|
||||||
|
|
||||||
class Data(FileHandle):
|
class Data(FileHandle):
|
||||||
def __init__(self, data, contenthashkey=True):
|
def __init__(self, data, convergence):
|
||||||
FileHandle.__init__(self, StringIO(data), contenthashkey=contenthashkey)
|
"""
|
||||||
|
Upload the data from the data argument. If convergence is None then a
|
||||||
|
random encryption key will be used, else the plaintext will be hashed,
|
||||||
|
then the hash will be hashed together with the string in the
|
||||||
|
"convergence" argument to form the encryption key."
|
||||||
|
"""
|
||||||
|
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence))
|
||||||
|
FileHandle.__init__(self, StringIO(data), convergence=convergence)
|
||||||
|
|
||||||
class Uploader(service.MultiService):
|
class Uploader(service.MultiService):
|
||||||
"""I am a service that allows file uploading. I am a service-child of the
|
"""I am a service that allows file uploading. I am a service-child of the
|
||||||
|
@ -68,7 +68,7 @@ PLAINTEXT_TAG = "allmydata_plaintext_v1"
|
|||||||
CIPHERTEXT_TAG = "allmydata_crypttext_v1"
|
CIPHERTEXT_TAG = "allmydata_crypttext_v1"
|
||||||
CIPHERTEXT_SEGMENT_TAG = "allmydata_crypttext_segment_v1"
|
CIPHERTEXT_SEGMENT_TAG = "allmydata_crypttext_segment_v1"
|
||||||
PLAINTEXT_SEGMENT_TAG = "allmydata_plaintext_segment_v1"
|
PLAINTEXT_SEGMENT_TAG = "allmydata_plaintext_segment_v1"
|
||||||
CONTENT_HASH_KEY_TAG = "allmydata_immutable_content_to_key_v1+"
|
CONVERGENT_ENCRYPTION_TAG = "allmydata_immutable_content_to_key_with_added_secret_v1+"
|
||||||
|
|
||||||
CLIENT_RENEWAL_TAG = "allmydata_client_renewal_secret_v1"
|
CLIENT_RENEWAL_TAG = "allmydata_client_renewal_secret_v1"
|
||||||
CLIENT_CANCEL_TAG = "allmydata_client_cancel_secret_v1"
|
CLIENT_CANCEL_TAG = "allmydata_client_cancel_secret_v1"
|
||||||
@ -91,9 +91,9 @@ DIRNODE_CHILD_WRITECAP_TAG = "allmydata_mutable_writekey_and_salt_to_dirnode_chi
|
|||||||
|
|
||||||
def storage_index_hash(key):
|
def storage_index_hash(key):
|
||||||
# storage index is truncated to 128 bits (16 bytes). We're only hashing a
|
# storage index is truncated to 128 bits (16 bytes). We're only hashing a
|
||||||
# 16-byte value to get it, so there's no point in using a larger value.
|
# 16-byte value to get it, so there's no point in using a larger value. We
|
||||||
# We use this same tagged hash to go from encryption key to storage index
|
# use this same tagged hash to go from encryption key to storage index for
|
||||||
# for random-keyed immutable files and content-hash-keyed immutabie
|
# random-keyed immutable files and convergent-encryption immutabie
|
||||||
# files. Mutable files use ssk_storage_index_hash().
|
# files. Mutable files use ssk_storage_index_hash().
|
||||||
return tagged_hash(STORAGE_INDEX_TAG, key, 16)
|
return tagged_hash(STORAGE_INDEX_TAG, key, 16)
|
||||||
|
|
||||||
@ -129,15 +129,14 @@ def plaintext_segment_hasher():
|
|||||||
|
|
||||||
KEYLEN = 16
|
KEYLEN = 16
|
||||||
|
|
||||||
def content_hash_key_hash(k, n, segsize, data):
|
def convergence_hash(k, n, segsize, data, convergence):
|
||||||
# This is defined to return a 16-byte AES key.
|
h = convergence_hasher(k, n, segsize, convergence)
|
||||||
|
h.update(data)
|
||||||
|
return h.digest()
|
||||||
|
def convergence_hasher(k, n, segsize, convergence):
|
||||||
|
assert isinstance(convergence, str)
|
||||||
param_tag = netstring("%d,%d,%d" % (k, n, segsize))
|
param_tag = netstring("%d,%d,%d" % (k, n, segsize))
|
||||||
tag = CONTENT_HASH_KEY_TAG + param_tag
|
tag = CONVERGENT_ENCRYPTION_TAG + netstring(convergence) + param_tag
|
||||||
h = tagged_hash(tag, data, KEYLEN)
|
|
||||||
return h
|
|
||||||
def content_hash_key_hasher(k, n, segsize):
|
|
||||||
param_tag = netstring("%d,%d,%d" % (k, n, segsize))
|
|
||||||
tag = CONTENT_HASH_KEY_TAG + param_tag
|
|
||||||
return tagged_hasher(tag, KEYLEN)
|
return tagged_hasher(tag, KEYLEN)
|
||||||
|
|
||||||
def random_key():
|
def random_key():
|
||||||
|
@ -14,8 +14,10 @@ class UnlinkedPUTCHKUploader(rend.Page):
|
|||||||
# "PUT /uri", to create an unlinked file. This is like PUT but
|
# "PUT /uri", to create an unlinked file. This is like PUT but
|
||||||
# without the associated set_uri.
|
# without the associated set_uri.
|
||||||
|
|
||||||
uploadable = FileHandle(req.content)
|
client = IClient(ctx)
|
||||||
d = IClient(ctx).upload(uploadable)
|
|
||||||
|
uploadable = FileHandle(req.content, client.convergence)
|
||||||
|
d = client.upload(uploadable)
|
||||||
d.addCallback(lambda results: results.uri)
|
d.addCallback(lambda results: results.uri)
|
||||||
# that fires with the URI of the new file
|
# that fires with the URI of the new file
|
||||||
return d
|
return d
|
||||||
@ -52,7 +54,7 @@ class UnlinkedPOSTCHKUploader(status.UploadResultsRendererMixin, rend.Page):
|
|||||||
assert req.method == "POST"
|
assert req.method == "POST"
|
||||||
self._done = observer.OneShotObserverList()
|
self._done = observer.OneShotObserverList()
|
||||||
fileobj = req.fields["file"].file
|
fileobj = req.fields["file"].file
|
||||||
uploadable = FileHandle(fileobj)
|
uploadable = FileHandle(fileobj, client.convergence)
|
||||||
d = client.upload(uploadable)
|
d = client.upload(uploadable)
|
||||||
d.addBoth(self._done.fire)
|
d.addBoth(self._done.fire)
|
||||||
|
|
||||||
|
@ -867,7 +867,7 @@ class POSTHandler(rend.Page):
|
|||||||
return d2
|
return d2
|
||||||
d.addCallback(_checked)
|
d.addCallback(_checked)
|
||||||
else:
|
else:
|
||||||
uploadable = FileHandle(contents.file)
|
uploadable = FileHandle(contents.file, convergence=client.convergence)
|
||||||
d = self._check_replacement(name)
|
d = self._check_replacement(name)
|
||||||
d.addCallback(lambda res: self._node.add_file(name, uploadable))
|
d.addCallback(lambda res: self._node.add_file(name, uploadable))
|
||||||
def _done(newnode):
|
def _done(newnode):
|
||||||
@ -1047,6 +1047,7 @@ class PUTHandler(rend.Page):
|
|||||||
self._replace = replace
|
self._replace = replace
|
||||||
|
|
||||||
def renderHTTP(self, ctx):
|
def renderHTTP(self, ctx):
|
||||||
|
client = IClient(ctx)
|
||||||
req = inevow.IRequest(ctx)
|
req = inevow.IRequest(ctx)
|
||||||
t = self._t
|
t = self._t
|
||||||
localfile = self._localfile
|
localfile = self._localfile
|
||||||
@ -1063,18 +1064,18 @@ class PUTHandler(rend.Page):
|
|||||||
d.addCallback(self._check_replacement, name, self._replace)
|
d.addCallback(self._check_replacement, name, self._replace)
|
||||||
if t == "upload":
|
if t == "upload":
|
||||||
if localfile:
|
if localfile:
|
||||||
d.addCallback(self._upload_localfile, localfile, name)
|
d.addCallback(self._upload_localfile, localfile, name, convergence=client.convergence)
|
||||||
else:
|
else:
|
||||||
# localdir
|
# localdir
|
||||||
# take the last step
|
# take the last step
|
||||||
d.addCallback(self._get_or_create_directories, self._path[-1:])
|
d.addCallback(self._get_or_create_directories, self._path[-1:])
|
||||||
d.addCallback(self._upload_localdir, localdir)
|
d.addCallback(self._upload_localdir, localdir, convergence=client.convergence)
|
||||||
elif t == "uri":
|
elif t == "uri":
|
||||||
d.addCallback(self._attach_uri, req.content, name)
|
d.addCallback(self._attach_uri, req.content, name)
|
||||||
elif t == "mkdir":
|
elif t == "mkdir":
|
||||||
d.addCallback(self._mkdir, name)
|
d.addCallback(self._mkdir, name)
|
||||||
else:
|
else:
|
||||||
d.addCallback(self._upload_file, req.content, name)
|
d.addCallback(self._upload_file, req.content, name, convergence=client.convergence)
|
||||||
|
|
||||||
def _transform_error(f):
|
def _transform_error(f):
|
||||||
errors = {BlockingFileError: http.BAD_REQUEST,
|
errors = {BlockingFileError: http.BAD_REQUEST,
|
||||||
@ -1126,8 +1127,8 @@ class PUTHandler(rend.Page):
|
|||||||
d.addCallback(_done)
|
d.addCallback(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _upload_file(self, node, contents, name):
|
def _upload_file(self, node, contents, name, convergence):
|
||||||
uploadable = FileHandle(contents)
|
uploadable = FileHandle(contents, convergence=convergence)
|
||||||
d = node.add_file(name, uploadable)
|
d = node.add_file(name, uploadable)
|
||||||
def _done(filenode):
|
def _done(filenode):
|
||||||
log.msg("webish upload complete",
|
log.msg("webish upload complete",
|
||||||
@ -1136,8 +1137,8 @@ class PUTHandler(rend.Page):
|
|||||||
d.addCallback(_done)
|
d.addCallback(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _upload_localfile(self, node, localfile, name):
|
def _upload_localfile(self, node, localfile, name, convergence):
|
||||||
uploadable = FileName(localfile)
|
uploadable = FileName(localfile, convergence=convergence)
|
||||||
d = node.add_file(name, uploadable)
|
d = node.add_file(name, uploadable)
|
||||||
d.addCallback(lambda filenode: filenode.get_uri())
|
d.addCallback(lambda filenode: filenode.get_uri())
|
||||||
return d
|
return d
|
||||||
@ -1150,7 +1151,7 @@ class PUTHandler(rend.Page):
|
|||||||
d.addCallback(_done)
|
d.addCallback(_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _upload_localdir(self, node, localdir):
|
def _upload_localdir(self, node, localdir, convergence):
|
||||||
# build up a list of files to upload. TODO: for now, these files and
|
# build up a list of files to upload. TODO: for now, these files and
|
||||||
# directories must have UTF-8 encoded filenames: anything else will
|
# directories must have UTF-8 encoded filenames: anything else will
|
||||||
# cause the upload to break.
|
# cause the upload to break.
|
||||||
@ -1179,7 +1180,7 @@ class PUTHandler(rend.Page):
|
|||||||
if dir:
|
if dir:
|
||||||
d.addCallback(self._makedir, node, dir)
|
d.addCallback(self._makedir, node, dir)
|
||||||
for f in all_files:
|
for f in all_files:
|
||||||
d.addCallback(self._upload_one_file, node, localdir, f)
|
d.addCallback(self._upload_one_file, node, localdir, f, convergence=convergence)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _makedir(self, res, node, dir):
|
def _makedir(self, res, node, dir):
|
||||||
@ -1191,12 +1192,12 @@ class PUTHandler(rend.Page):
|
|||||||
d.addCallback(lambda parent: parent.create_empty_directory(dir[-1]))
|
d.addCallback(lambda parent: parent.create_empty_directory(dir[-1]))
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _upload_one_file(self, res, node, localdir, f):
|
def _upload_one_file(self, res, node, localdir, f, convergence):
|
||||||
# get the parent. We can be sure this exists because we already
|
# get the parent. We can be sure this exists because we already
|
||||||
# went through and created all the directories we require.
|
# went through and created all the directories we require.
|
||||||
localfile = os.path.join(localdir, *f)
|
localfile = os.path.join(localdir, *f)
|
||||||
d = node.get_child_at_path(f[:-1])
|
d = node.get_child_at_path(f[:-1])
|
||||||
d.addCallback(self._upload_localfile, localfile, f[-1])
|
d.addCallback(self._upload_localfile, localfile, f[-1], convergence=convergence)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user