2007-12-04 21:32:04 +00:00
|
|
|
|
2008-02-09 01:43:47 +00:00
|
|
|
import time
|
2007-12-04 21:32:04 +00:00
|
|
|
from zope.interface import implements
|
|
|
|
from twisted.trial import unittest
|
|
|
|
from allmydata import uri, dirnode, upload
|
|
|
|
from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
|
|
|
|
INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode
|
|
|
|
from allmydata.util import hashutil, testutil
|
2007-12-05 06:01:37 +00:00
|
|
|
from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
|
|
|
|
NonGridDirectoryNode, create_chk_filenode
|
2008-02-11 22:12:55 +00:00
|
|
|
from twisted.internet import defer, reactor
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
# to test dirnode.py, we want to construct a tree of real DirectoryNodes that
|
|
|
|
# contain pointers to fake files. We start with a fake MutableFileNode that
|
|
|
|
# stores all of its data in a static table.
|
|
|
|
|
2007-12-12 01:10:29 +00:00
|
|
|
FakeDirectoryNode = NonGridDirectoryNode
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
class Marker:
|
|
|
|
implements(IFileNode, IMutableFileNode) # sure, why not
|
|
|
|
def __init__(self, nodeuri):
|
|
|
|
if not isinstance(nodeuri, str):
|
|
|
|
nodeuri = nodeuri.to_string()
|
|
|
|
self.nodeuri = nodeuri
|
2008-01-03 23:55:43 +00:00
|
|
|
si = hashutil.tagged_hash("tag1", nodeuri)[:16]
|
2007-12-04 21:32:04 +00:00
|
|
|
fp = hashutil.tagged_hash("tag2", nodeuri)
|
|
|
|
self.verifieruri = uri.SSKVerifierURI(storage_index=si,
|
|
|
|
fingerprint=fp).to_string()
|
|
|
|
def get_uri(self):
|
|
|
|
return self.nodeuri
|
|
|
|
def get_readonly_uri(self):
|
|
|
|
return self.nodeuri
|
|
|
|
def get_verifier(self):
|
|
|
|
return self.verifieruri
|
|
|
|
|
|
|
|
# dirnode requires three methods from the client: upload(),
|
|
|
|
# create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
|
|
|
|
# only used by the convenience composite method add_file().
|
|
|
|
|
|
|
|
class FakeClient:
|
|
|
|
implements(IClient)
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def upload(self, uploadable):
|
2007-12-04 21:32:04 +00:00
|
|
|
d = uploadable.get_size()
|
|
|
|
d.addCallback(lambda size: uploadable.read(size))
|
|
|
|
def _got_data(datav):
|
|
|
|
data = "".join(datav)
|
2007-12-05 06:01:37 +00:00
|
|
|
n = create_chk_filenode(self, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
results = upload.UploadResults()
|
|
|
|
results.uri = n.get_uri()
|
|
|
|
return results
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_got_data)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def create_node_from_uri(self, u):
|
|
|
|
u = IURI(u)
|
|
|
|
if (INewDirectoryURI.providedBy(u)
|
|
|
|
or IReadonlyNewDirectoryURI.providedBy(u)):
|
2007-12-12 01:10:29 +00:00
|
|
|
return FakeDirectoryNode(self).init_from_uri(u)
|
2007-12-04 21:32:04 +00:00
|
|
|
return Marker(u.to_string())
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def create_empty_dirnode(self):
|
2007-12-12 01:10:29 +00:00
|
|
|
n = FakeDirectoryNode(self)
|
2008-01-14 21:55:59 +00:00
|
|
|
d = n.create()
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n)
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
class Dirnode(unittest.TestCase, testutil.ShouldFailMixin):
|
|
|
|
def setUp(self):
|
|
|
|
self.client = FakeClient()
|
|
|
|
|
|
|
|
def test_basic(self):
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _done(res):
|
2007-12-12 01:10:29 +00:00
|
|
|
self.failUnless(isinstance(res, FakeDirectoryNode))
|
2007-12-04 21:32:04 +00:00
|
|
|
rep = str(res)
|
|
|
|
self.failUnless("RW" in rep)
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_corrupt(self):
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created(dn):
|
|
|
|
u = make_mutable_file_uri()
|
2008-02-09 01:43:47 +00:00
|
|
|
d = dn.set_uri("child", u, {})
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: dn.list())
|
|
|
|
def _check1(children):
|
|
|
|
self.failUnless("child" in children)
|
|
|
|
d.addCallback(_check1)
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(KeyError, "get bogus", None,
|
|
|
|
dn.get, "bogus"))
|
|
|
|
def _corrupt(res):
|
|
|
|
filenode = dn._node
|
|
|
|
si = IURI(filenode.get_uri()).storage_index
|
|
|
|
old_contents = filenode.all_contents[si]
|
|
|
|
# we happen to know that the writecap is encrypted near the
|
|
|
|
# end of the string. Flip one of its bits and make sure we
|
|
|
|
# detect the corruption.
|
|
|
|
new_contents = testutil.flip_bit(old_contents, -10)
|
2007-12-04 22:55:27 +00:00
|
|
|
# TODO: also test flipping bits in the other portions
|
2007-12-04 21:32:04 +00:00
|
|
|
filenode.all_contents[si] = new_contents
|
|
|
|
d.addCallback(_corrupt)
|
|
|
|
def _check2(res):
|
|
|
|
self.shouldFail(hashutil.IntegrityCheckError, "corrupt",
|
|
|
|
"HMAC does not match, crypttext is corrupted",
|
|
|
|
dn.list)
|
|
|
|
d.addCallback(_check2)
|
|
|
|
return d
|
|
|
|
d.addCallback(_created)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_check(self):
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda dn: dn.check())
|
|
|
|
def _done(res):
|
|
|
|
pass
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_readonly(self):
|
|
|
|
fileuri = make_chk_file_uri(1234)
|
|
|
|
filenode = self.client.create_node_from_uri(fileuri)
|
|
|
|
uploadable = upload.Data("some data")
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created(rw_dn):
|
|
|
|
d2 = rw_dn.set_uri("child", fileuri)
|
|
|
|
d2.addCallback(lambda res: rw_dn)
|
|
|
|
return d2
|
|
|
|
d.addCallback(_created)
|
|
|
|
|
|
|
|
def _ready(rw_dn):
|
|
|
|
ro_uri = rw_dn.get_readonly_uri()
|
|
|
|
ro_dn = self.client.create_node_from_uri(ro_uri)
|
|
|
|
self.failUnless(ro_dn.is_readonly())
|
|
|
|
self.failUnless(ro_dn.is_mutable())
|
|
|
|
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
|
|
|
ro_dn.set_uri, "newchild", fileuri)
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
|
|
|
ro_dn.set_node, "newchild", filenode)
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
|
|
|
ro_dn.add_file, "newchild", uploadable)
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
|
|
|
ro_dn.delete, "child")
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
|
|
|
ro_dn.create_empty_directory, "newchild")
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
|
|
|
ro_dn.move_child_to, "child", rw_dn)
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
|
|
|
rw_dn.move_child_to, "child", ro_dn)
|
|
|
|
return ro_dn.list()
|
|
|
|
d.addCallback(_ready)
|
|
|
|
def _listed(children):
|
|
|
|
self.failUnless("child" in children)
|
|
|
|
d.addCallback(_listed)
|
|
|
|
return d
|
|
|
|
|
2008-02-11 22:12:55 +00:00
|
|
|
def failUnlessGreaterThan(self, a, b):
|
|
|
|
self.failUnless(a > b, "%s should be > %s" % (a, b))
|
|
|
|
|
2008-02-11 21:13:07 +00:00
|
|
|
def failUnlessGreaterOrEqualThan(self, a, b):
|
|
|
|
self.failUnless(a >= b, "%s should be >= %s" % (a, b))
|
|
|
|
|
2008-02-11 22:12:55 +00:00
|
|
|
def stall(self, res, delay=1.0):
|
|
|
|
d = defer.Deferred()
|
|
|
|
reactor.callLater(delay, d.callback, res)
|
|
|
|
return d
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
def test_create(self):
|
|
|
|
self.expected_manifest = []
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _then(n):
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
u = n.get_uri()
|
|
|
|
self.failUnless(u)
|
|
|
|
self.failUnless(u.startswith("URI:DIR2:"), u)
|
|
|
|
u_ro = n.get_readonly_uri()
|
|
|
|
self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
|
|
|
|
u_v = n.get_verifier()
|
|
|
|
self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
|
|
|
|
self.expected_manifest.append(u_v)
|
|
|
|
|
|
|
|
d = n.list()
|
|
|
|
d.addCallback(lambda res: self.failUnlessEqual(res, {}))
|
|
|
|
d.addCallback(lambda res: n.has_child("missing"))
|
|
|
|
d.addCallback(lambda res: self.failIf(res))
|
|
|
|
fake_file_uri = make_mutable_file_uri()
|
|
|
|
m = Marker(fake_file_uri)
|
|
|
|
ffu_v = m.get_verifier()
|
|
|
|
assert isinstance(ffu_v, str)
|
|
|
|
self.expected_manifest.append(ffu_v)
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri("child", fake_file_uri))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
d.addCallback(lambda res: n.create_empty_directory("subdir"))
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created(subdir):
|
2007-12-12 01:10:29 +00:00
|
|
|
self.failUnless(isinstance(subdir, FakeDirectoryNode))
|
2007-12-04 21:32:04 +00:00
|
|
|
self.subdir = subdir
|
|
|
|
new_v = subdir.get_verifier()
|
|
|
|
assert isinstance(new_v, str)
|
|
|
|
self.expected_manifest.append(new_v)
|
|
|
|
d.addCallback(_created)
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
|
|
|
sorted(["child", "subdir"])))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.build_manifest())
|
|
|
|
def _check_manifest(manifest):
|
|
|
|
self.failUnlessEqual(sorted(manifest),
|
|
|
|
sorted(self.expected_manifest))
|
|
|
|
d.addCallback(_check_manifest)
|
|
|
|
|
|
|
|
def _add_subsubdir(res):
|
2008-01-14 21:55:59 +00:00
|
|
|
return self.subdir.create_empty_directory("subsubdir")
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_add_subsubdir)
|
|
|
|
d.addCallback(lambda res: n.get_child_at_path("subdir/subsubdir"))
|
|
|
|
d.addCallback(lambda subsubdir:
|
|
|
|
self.failUnless(isinstance(subsubdir,
|
2007-12-12 01:10:29 +00:00
|
|
|
FakeDirectoryNode)))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n.get_child_at_path(""))
|
|
|
|
d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
|
|
|
|
n.get_uri()))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("child"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
|
|
|
|
# set_uri + metadata
|
|
|
|
# it should be possible to add a child without any metadata
|
|
|
|
d.addCallback(lambda res: n.set_uri("c2", fake_file_uri, {}))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("c2"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
|
|
|
|
|
2008-02-11 21:53:28 +00:00
|
|
|
# if we don't set any defaults, the child should get timestamps
|
|
|
|
d.addCallback(lambda res: n.set_uri("c3", fake_file_uri))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("c3"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
|
|
|
|
# or we can add specific metadata at set_uri() time, which
|
|
|
|
# overrides the timestamps
|
|
|
|
d.addCallback(lambda res: n.set_uri("c4", fake_file_uri,
|
|
|
|
{"key": "value"}))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("c4"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.delete("c2"))
|
|
|
|
d.addCallback(lambda res: n.delete("c3"))
|
|
|
|
d.addCallback(lambda res: n.delete("c4"))
|
|
|
|
|
|
|
|
# set_node + metadata
|
|
|
|
# it should be possible to add a child without any metadata
|
|
|
|
d.addCallback(lambda res: n.set_node("d2", n, {}))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("d2"))
|
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
|
|
|
|
|
|
|
|
# if we don't set any defaults, the child should get timestamps
|
|
|
|
d.addCallback(lambda res: n.set_node("d3", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("d3"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
|
|
|
|
# or we can add specific metadata at set_node() time, which
|
|
|
|
# overrides the timestamps
|
|
|
|
d.addCallback(lambda res: n.set_node("d4", n,
|
|
|
|
{"key": "value"}))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("d4"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.delete("d2"))
|
|
|
|
d.addCallback(lambda res: n.delete("d3"))
|
|
|
|
d.addCallback(lambda res: n.delete("d4"))
|
|
|
|
|
|
|
|
# metadata through set_uris()
|
|
|
|
d.addCallback(lambda res: n.set_uris([ ("e1", fake_file_uri),
|
|
|
|
("e2", fake_file_uri, {}),
|
|
|
|
("e3", fake_file_uri,
|
|
|
|
{"key": "value"}),
|
|
|
|
]))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("e1"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("e2"))
|
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("e3"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.delete("e1"))
|
|
|
|
d.addCallback(lambda res: n.delete("e2"))
|
|
|
|
d.addCallback(lambda res: n.delete("e3"))
|
|
|
|
|
|
|
|
# metadata through set_nodes()
|
|
|
|
d.addCallback(lambda res: n.set_nodes([ ("f1", n),
|
|
|
|
("f2", n, {}),
|
|
|
|
("f3", n,
|
|
|
|
{"key": "value"}),
|
|
|
|
]))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("f1"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("f2"))
|
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("f3"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.delete("f1"))
|
|
|
|
d.addCallback(lambda res: n.delete("f2"))
|
|
|
|
d.addCallback(lambda res: n.delete("f3"))
|
|
|
|
|
|
|
|
|
2008-02-09 01:43:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
n.set_metadata_for("child",
|
2008-02-11 21:53:28 +00:00
|
|
|
{"tags": ["web2.0-compatible"]}))
|
2008-02-09 01:43:47 +00:00
|
|
|
d.addCallback(lambda n1: n1.get_metadata_for("child"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata,
|
2008-02-11 21:53:28 +00:00
|
|
|
{"tags": ["web2.0-compatible"]}))
|
2008-02-09 01:43:47 +00:00
|
|
|
|
|
|
|
def _start(res):
|
|
|
|
self._start_timestamp = time.time()
|
|
|
|
d.addCallback(_start)
|
|
|
|
d.addCallback(lambda res: n.add_file("timestamps",
|
|
|
|
upload.Data("stamp me")))
|
|
|
|
def _stop(res):
|
|
|
|
self._stop_timestamp = time.time()
|
|
|
|
d.addCallback(_stop)
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("timestamps"))
|
2008-02-11 22:12:55 +00:00
|
|
|
def _check_timestamp1(metadata):
|
2008-02-09 01:43:47 +00:00
|
|
|
self.failUnless("ctime" in metadata)
|
|
|
|
self.failUnless("mtime" in metadata)
|
2008-02-11 21:13:07 +00:00
|
|
|
self.failUnlessGreaterOrEqualThan(metadata["ctime"],
|
|
|
|
self._start_timestamp)
|
|
|
|
self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
|
|
|
|
metadata["ctime"])
|
|
|
|
self.failUnlessGreaterOrEqualThan(metadata["mtime"],
|
|
|
|
self._start_timestamp)
|
|
|
|
self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
|
|
|
|
metadata["mtime"])
|
2008-02-11 22:12:55 +00:00
|
|
|
# Our current timestamp rules say that replacing an existing
|
|
|
|
# child should preserve the 'ctime' but update the mtime
|
|
|
|
self._old_ctime = metadata["ctime"]
|
|
|
|
self._old_mtime = metadata["mtime"]
|
|
|
|
d.addCallback(_check_timestamp1)
|
|
|
|
d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
|
|
|
|
d.addCallback(lambda res: n.set_node("timestamps", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("timestamps"))
|
|
|
|
def _check_timestamp2(metadata):
|
|
|
|
self.failUnlessEqual(metadata["ctime"], self._old_ctime,
|
|
|
|
"%s != %s" % (metadata["ctime"],
|
|
|
|
self._old_ctime))
|
|
|
|
self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
|
2008-02-09 01:43:47 +00:00
|
|
|
return n.delete("timestamps")
|
2008-02-11 22:12:55 +00:00
|
|
|
d.addCallback(_check_timestamp2)
|
|
|
|
|
|
|
|
# also make sure we can add/update timestamps on a
|
|
|
|
# previously-existing child that didn't have any, since there are
|
|
|
|
# a lot of 0.7.0-generated edges around out there
|
|
|
|
d.addCallback(lambda res: n.set_node("no_timestamps", n, {}))
|
|
|
|
d.addCallback(lambda res: n.set_node("no_timestamps", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("no_timestamps"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
d.addCallback(lambda res: n.delete("no_timestamps"))
|
2008-02-09 01:43:47 +00:00
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n.delete("subdir"))
|
|
|
|
d.addCallback(lambda old_child:
|
|
|
|
self.failUnlessEqual(old_child.get_uri(),
|
|
|
|
self.subdir.get_uri()))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
|
|
|
sorted(["child"])))
|
|
|
|
|
|
|
|
uploadable = upload.Data("some data")
|
|
|
|
d.addCallback(lambda res: n.add_file("newfile", uploadable))
|
|
|
|
d.addCallback(lambda newnode:
|
|
|
|
self.failUnless(IFileNode.providedBy(newnode)))
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
|
|
|
sorted(["child", "newfile"])))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for("newfile"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
|
|
|
|
uploadable = upload.Data("some data")
|
|
|
|
d.addCallback(lambda res: n.add_file("newfile-metadata",
|
|
|
|
uploadable,
|
|
|
|
{"key": "value"}))
|
|
|
|
d.addCallback(lambda newnode:
|
|
|
|
self.failUnless(IFileNode.providedBy(newnode)))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for("newfile-metadata"))
|
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
d.addCallback(lambda res: n.delete("newfile-metadata"))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
d.addCallback(lambda res: n.create_empty_directory("subdir2"))
|
|
|
|
def _created2(subdir2):
|
|
|
|
self.subdir2 = subdir2
|
|
|
|
d.addCallback(_created2)
|
|
|
|
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
n.move_child_to("child", self.subdir2))
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
|
|
|
sorted(["newfile", "subdir2"])))
|
|
|
|
d.addCallback(lambda res: self.subdir2.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
|
|
|
sorted(["child"])))
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
d.addCallback(_then)
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
2007-12-04 22:55:27 +00:00
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
netstring = hashutil.netstring
|
|
|
|
split_netstring = dirnode.split_netstring
|
|
|
|
|
|
|
|
class Netstring(unittest.TestCase):
|
|
|
|
def test_split(self):
|
|
|
|
a = netstring("hello") + netstring("world")
|
|
|
|
self.failUnlessEqual(split_netstring(a, 2), ("hello", "world"))
|
|
|
|
self.failUnlessEqual(split_netstring(a, 2, False), ("hello", "world"))
|
|
|
|
self.failUnlessEqual(split_netstring(a, 2, True),
|
|
|
|
("hello", "world", ""))
|
|
|
|
self.failUnlessRaises(ValueError, split_netstring, a, 3)
|
|
|
|
self.failUnlessRaises(ValueError, split_netstring, a+" extra", 2)
|
|
|
|
self.failUnlessRaises(ValueError, split_netstring, a+" extra", 2, False)
|
|
|
|
|
|
|
|
def test_extra(self):
|
|
|
|
a = netstring("hello")
|
|
|
|
self.failUnlessEqual(split_netstring(a, 1, True), ("hello", ""))
|
|
|
|
b = netstring("hello") + "extra stuff"
|
|
|
|
self.failUnlessEqual(split_netstring(b, 1, True),
|
|
|
|
("hello", "extra stuff"))
|
|
|
|
|
|
|
|
def test_nested(self):
|
|
|
|
a = netstring("hello") + netstring("world") + "extra stuff"
|
|
|
|
b = netstring("a") + netstring("is") + netstring(a) + netstring(".")
|
|
|
|
top = split_netstring(b, 4)
|
|
|
|
self.failUnlessEqual(len(top), 4)
|
|
|
|
self.failUnlessEqual(top[0], "a")
|
|
|
|
self.failUnlessEqual(top[1], "is")
|
|
|
|
self.failUnlessEqual(top[2], a)
|
|
|
|
self.failUnlessEqual(top[3], ".")
|
|
|
|
self.failUnlessRaises(ValueError, split_netstring, a, 2)
|
|
|
|
self.failUnlessRaises(ValueError, split_netstring, a, 2, False)
|
|
|
|
bottom = split_netstring(a, 2, True)
|
|
|
|
self.failUnlessEqual(bottom, ("hello", "world", "extra stuff"))
|
|
|
|
|