2007-12-04 21:32:04 +00:00
|
|
|
|
2008-02-09 01:43:47 +00:00
|
|
|
import time
|
2007-12-04 21:32:04 +00:00
|
|
|
from zope.interface import implements
|
|
|
|
from twisted.trial import unittest
|
2008-07-17 21:37:04 +00:00
|
|
|
from twisted.internet import defer
|
2008-07-16 20:14:39 +00:00
|
|
|
from allmydata import uri, dirnode
|
2008-09-07 19:44:56 +00:00
|
|
|
from allmydata.immutable import upload
|
2007-12-04 21:32:04 +00:00
|
|
|
from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
|
2008-09-07 19:44:56 +00:00
|
|
|
INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
|
2008-10-27 20:15:25 +00:00
|
|
|
ExistingChildError, NoSuchChildError, \
|
|
|
|
IDeepCheckResults, IDeepCheckAndRepairResults
|
2008-12-07 15:45:19 +00:00
|
|
|
from allmydata.mutable.filenode import MutableFileNode
|
2008-12-06 05:08:37 +00:00
|
|
|
from allmydata.mutable.common import UncoordinatedWriteError
|
2008-11-24 21:40:46 +00:00
|
|
|
from allmydata.util import hashutil, base32
|
2008-10-22 08:38:18 +00:00
|
|
|
from allmydata.monitor import Monitor
|
2007-12-05 06:01:37 +00:00
|
|
|
from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
|
2008-12-06 05:08:37 +00:00
|
|
|
FakeDirectoryNode, create_chk_filenode, ErrorMixin, SystemTestMixin
|
2009-01-06 20:37:03 +00:00
|
|
|
from allmydata.check_results import CheckerResults, CheckAndRepairResults
|
2008-10-29 04:28:31 +00:00
|
|
|
import common_util as testutil
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
# to test dirnode.py, we want to construct a tree of real DirectoryNodes that
|
|
|
|
# contain pointers to fake files. We start with a fake MutableFileNode that
|
|
|
|
# stores all of its data in a static table.
|
|
|
|
|
|
|
|
class Marker:
|
|
|
|
implements(IFileNode, IMutableFileNode) # sure, why not
|
|
|
|
def __init__(self, nodeuri):
|
|
|
|
if not isinstance(nodeuri, str):
|
|
|
|
nodeuri = nodeuri.to_string()
|
|
|
|
self.nodeuri = nodeuri
|
2008-01-03 23:55:43 +00:00
|
|
|
si = hashutil.tagged_hash("tag1", nodeuri)[:16]
|
2008-11-19 23:00:27 +00:00
|
|
|
self.storage_index = si
|
2007-12-04 21:32:04 +00:00
|
|
|
fp = hashutil.tagged_hash("tag2", nodeuri)
|
2008-09-10 08:37:55 +00:00
|
|
|
self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
|
2007-12-04 21:32:04 +00:00
|
|
|
def get_uri(self):
|
|
|
|
return self.nodeuri
|
|
|
|
def get_readonly_uri(self):
|
|
|
|
return self.nodeuri
|
2008-12-08 19:44:11 +00:00
|
|
|
def get_verify_cap(self):
|
2007-12-04 21:32:04 +00:00
|
|
|
return self.verifieruri
|
2008-11-19 23:00:27 +00:00
|
|
|
def get_storage_index(self):
|
|
|
|
return self.storage_index
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-10-22 08:50:54 +00:00
|
|
|
def check(self, monitor, verify=False):
|
2008-12-19 15:39:24 +00:00
|
|
|
r = CheckerResults(uri.from_string(self.nodeuri), None)
|
2008-09-07 19:44:56 +00:00
|
|
|
r.set_healthy(True)
|
2008-11-07 05:35:47 +00:00
|
|
|
r.set_recoverable(True)
|
2008-07-17 21:37:04 +00:00
|
|
|
return defer.succeed(r)
|
|
|
|
|
2008-10-22 08:50:54 +00:00
|
|
|
def check_and_repair(self, monitor, verify=False):
|
2008-09-07 19:44:56 +00:00
|
|
|
d = self.check(verify)
|
|
|
|
def _got(cr):
|
|
|
|
r = CheckAndRepairResults(None)
|
|
|
|
r.pre_repair_results = r.post_repair_results = cr
|
|
|
|
return r
|
|
|
|
d.addCallback(_got)
|
|
|
|
return d
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
# dirnode requires three methods from the client: upload(),
|
|
|
|
# create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
|
|
|
|
# only used by the convenience composite method add_file().
|
|
|
|
|
|
|
|
class FakeClient:
|
|
|
|
implements(IClient)
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def upload(self, uploadable):
|
2007-12-04 21:32:04 +00:00
|
|
|
d = uploadable.get_size()
|
|
|
|
d.addCallback(lambda size: uploadable.read(size))
|
|
|
|
def _got_data(datav):
|
|
|
|
data = "".join(datav)
|
2007-12-05 06:01:37 +00:00
|
|
|
n = create_chk_filenode(self, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
results = upload.UploadResults()
|
|
|
|
results.uri = n.get_uri()
|
|
|
|
return results
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_got_data)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def create_node_from_uri(self, u):
|
|
|
|
u = IURI(u)
|
|
|
|
if (INewDirectoryURI.providedBy(u)
|
|
|
|
or IReadonlyNewDirectoryURI.providedBy(u)):
|
2007-12-12 01:10:29 +00:00
|
|
|
return FakeDirectoryNode(self).init_from_uri(u)
|
2007-12-04 21:32:04 +00:00
|
|
|
return Marker(u.to_string())
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def create_empty_dirnode(self):
|
2007-12-12 01:10:29 +00:00
|
|
|
n = FakeDirectoryNode(self)
|
2008-01-14 21:55:59 +00:00
|
|
|
d = n.create()
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n)
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
2008-11-19 23:02:12 +00:00
|
|
|
class Dirnode(unittest.TestCase,
|
|
|
|
testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
|
2007-12-04 21:32:04 +00:00
|
|
|
def setUp(self):
|
|
|
|
self.client = FakeClient()
|
|
|
|
|
|
|
|
def test_basic(self):
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _done(res):
|
2007-12-12 01:10:29 +00:00
|
|
|
self.failUnless(isinstance(res, FakeDirectoryNode))
|
2007-12-04 21:32:04 +00:00
|
|
|
rep = str(res)
|
|
|
|
self.failUnless("RW" in rep)
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_corrupt(self):
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created(dn):
|
|
|
|
u = make_mutable_file_uri()
|
2008-12-19 15:39:24 +00:00
|
|
|
d = dn.set_uri(u"child", u.to_string(), {})
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: dn.list())
|
|
|
|
def _check1(children):
|
2008-02-14 22:45:56 +00:00
|
|
|
self.failUnless(u"child" in children)
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_check1)
|
|
|
|
d.addCallback(lambda res:
|
2008-10-27 20:15:25 +00:00
|
|
|
self.shouldFail(NoSuchChildError, "get bogus", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
dn.get, u"bogus"))
|
2007-12-04 21:32:04 +00:00
|
|
|
def _corrupt(res):
|
|
|
|
filenode = dn._node
|
|
|
|
si = IURI(filenode.get_uri()).storage_index
|
|
|
|
old_contents = filenode.all_contents[si]
|
2008-12-22 00:35:18 +00:00
|
|
|
# We happen to know that the writecap MAC is near the end of the string. Flip
|
|
|
|
# one of its bits and make sure we ignore the corruption.
|
2007-12-04 21:32:04 +00:00
|
|
|
new_contents = testutil.flip_bit(old_contents, -10)
|
2007-12-04 22:55:27 +00:00
|
|
|
# TODO: also test flipping bits in the other portions
|
2007-12-04 21:32:04 +00:00
|
|
|
filenode.all_contents[si] = new_contents
|
|
|
|
d.addCallback(_corrupt)
|
|
|
|
def _check2(res):
|
2008-12-22 00:35:18 +00:00
|
|
|
d = dn.list()
|
|
|
|
def _c3(res):
|
|
|
|
self.failUnless(res.has_key('child'))
|
|
|
|
d.addCallback(_c3)
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_check2)
|
|
|
|
return d
|
|
|
|
d.addCallback(_created)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_check(self):
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2008-10-22 08:38:18 +00:00
|
|
|
d.addCallback(lambda dn: dn.check(Monitor()))
|
2007-12-04 21:32:04 +00:00
|
|
|
def _done(res):
|
2008-07-16 00:23:25 +00:00
|
|
|
self.failUnless(res.is_healthy())
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
2008-07-17 01:20:57 +00:00
|
|
|
def _test_deepcheck_create(self):
|
2008-07-17 21:37:04 +00:00
|
|
|
# create a small tree with a loop, and some non-directories
|
|
|
|
# root/
|
|
|
|
# root/subdir/
|
|
|
|
# root/subdir/file1
|
|
|
|
# root/subdir/link -> root
|
2008-07-17 01:20:57 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
|
|
|
def _created_root(rootnode):
|
|
|
|
self._rootnode = rootnode
|
2008-07-17 21:37:04 +00:00
|
|
|
return rootnode.create_empty_directory(u"subdir")
|
2008-07-17 01:20:57 +00:00
|
|
|
d.addCallback(_created_root)
|
2008-07-17 21:37:04 +00:00
|
|
|
def _created_subdir(subdir):
|
|
|
|
self._subdir = subdir
|
|
|
|
d = subdir.add_file(u"file1", upload.Data("data", None))
|
|
|
|
d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
|
|
|
|
return d
|
|
|
|
d.addCallback(_created_subdir)
|
2008-07-17 01:20:57 +00:00
|
|
|
def _done(res):
|
|
|
|
return self._rootnode
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_deepcheck(self):
|
|
|
|
d = self._test_deepcheck_create()
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
|
2008-07-17 01:20:57 +00:00
|
|
|
def _check_results(r):
|
2008-09-07 19:44:56 +00:00
|
|
|
self.failUnless(IDeepCheckResults.providedBy(r))
|
|
|
|
c = r.get_counters()
|
|
|
|
self.failUnlessEqual(c,
|
|
|
|
{"count-objects-checked": 3,
|
|
|
|
"count-objects-healthy": 3,
|
|
|
|
"count-objects-unhealthy": 0,
|
2008-11-07 05:35:47 +00:00
|
|
|
"count-objects-unrecoverable": 0,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-corrupt-shares": 0,
|
|
|
|
})
|
|
|
|
self.failIf(r.get_corrupt_shares())
|
|
|
|
self.failUnlessEqual(len(r.get_all_results()), 3)
|
|
|
|
d.addCallback(_check_results)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_deepcheck_and_repair(self):
|
|
|
|
d = self._test_deepcheck_create()
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addCallback(lambda rootnode:
|
|
|
|
rootnode.start_deep_check_and_repair().when_done())
|
2008-09-07 19:44:56 +00:00
|
|
|
def _check_results(r):
|
|
|
|
self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
|
|
|
|
c = r.get_counters()
|
|
|
|
self.failUnlessEqual(c,
|
|
|
|
{"count-objects-checked": 3,
|
|
|
|
"count-objects-healthy-pre-repair": 3,
|
|
|
|
"count-objects-unhealthy-pre-repair": 0,
|
2008-11-07 05:35:47 +00:00
|
|
|
"count-objects-unrecoverable-pre-repair": 0,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-corrupt-shares-pre-repair": 0,
|
|
|
|
"count-objects-healthy-post-repair": 3,
|
|
|
|
"count-objects-unhealthy-post-repair": 0,
|
2008-11-07 05:35:47 +00:00
|
|
|
"count-objects-unrecoverable-post-repair": 0,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-corrupt-shares-post-repair": 0,
|
|
|
|
"count-repairs-attempted": 0,
|
|
|
|
"count-repairs-successful": 0,
|
|
|
|
"count-repairs-unsuccessful": 0,
|
|
|
|
})
|
|
|
|
self.failIf(r.get_corrupt_shares())
|
|
|
|
self.failIf(r.get_remaining_corrupt_shares())
|
|
|
|
self.failUnlessEqual(len(r.get_all_results()), 3)
|
2008-07-17 01:20:57 +00:00
|
|
|
d.addCallback(_check_results)
|
|
|
|
return d
|
|
|
|
|
2008-08-12 04:03:26 +00:00
|
|
|
def _mark_file_bad(self, rootnode):
|
|
|
|
si = IURI(rootnode.get_uri())._filenode_uri.storage_index
|
|
|
|
rootnode._node.bad_shares[si] = "unhealthy"
|
|
|
|
return rootnode
|
|
|
|
|
|
|
|
def test_deepcheck_problems(self):
|
|
|
|
d = self._test_deepcheck_create()
|
|
|
|
d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
|
2008-08-12 04:03:26 +00:00
|
|
|
def _check_results(r):
|
2008-09-07 19:44:56 +00:00
|
|
|
c = r.get_counters()
|
|
|
|
self.failUnlessEqual(c,
|
|
|
|
{"count-objects-checked": 3,
|
|
|
|
"count-objects-healthy": 2,
|
|
|
|
"count-objects-unhealthy": 1,
|
2008-11-07 05:35:47 +00:00
|
|
|
"count-objects-unrecoverable": 0,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-corrupt-shares": 0,
|
|
|
|
})
|
|
|
|
#self.failUnlessEqual(len(r.get_problems()), 1) # TODO
|
2008-08-12 04:03:26 +00:00
|
|
|
d.addCallback(_check_results)
|
|
|
|
return d
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
def test_readonly(self):
|
|
|
|
fileuri = make_chk_file_uri(1234)
|
|
|
|
filenode = self.client.create_node_from_uri(fileuri)
|
2008-03-24 16:46:06 +00:00
|
|
|
uploadable = upload.Data("some data", convergence="some convergence string")
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created(rw_dn):
|
2008-12-19 15:39:24 +00:00
|
|
|
d2 = rw_dn.set_uri(u"child", fileuri.to_string())
|
2007-12-04 21:32:04 +00:00
|
|
|
d2.addCallback(lambda res: rw_dn)
|
|
|
|
return d2
|
|
|
|
d.addCallback(_created)
|
|
|
|
|
|
|
|
def _ready(rw_dn):
|
|
|
|
ro_uri = rw_dn.get_readonly_uri()
|
|
|
|
ro_dn = self.client.create_node_from_uri(ro_uri)
|
|
|
|
self.failUnless(ro_dn.is_readonly())
|
|
|
|
self.failUnless(ro_dn.is_mutable())
|
|
|
|
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-12-19 15:39:24 +00:00
|
|
|
ro_dn.set_uri, u"newchild", fileuri.to_string())
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.set_node, u"newchild", filenode)
|
2008-05-08 23:53:35 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
|
|
|
|
ro_dn.set_nodes, [ (u"newchild", filenode) ])
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.add_file, u"newchild", uploadable)
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.delete, u"child")
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.create_empty_directory, u"newchild")
|
2008-05-08 23:53:35 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
|
|
|
|
ro_dn.set_metadata_for, u"child", {})
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.move_child_to, u"child", rw_dn)
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
rw_dn.move_child_to, u"child", ro_dn)
|
2007-12-04 21:32:04 +00:00
|
|
|
return ro_dn.list()
|
|
|
|
d.addCallback(_ready)
|
|
|
|
def _listed(children):
|
2008-02-14 22:45:56 +00:00
|
|
|
self.failUnless(u"child" in children)
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_listed)
|
|
|
|
return d
|
|
|
|
|
2008-02-11 22:12:55 +00:00
|
|
|
def failUnlessGreaterThan(self, a, b):
|
2008-02-12 00:37:51 +00:00
|
|
|
self.failUnless(a > b, "%r should be > %r" % (a, b))
|
2008-02-11 22:12:55 +00:00
|
|
|
|
2008-02-11 21:13:07 +00:00
|
|
|
def failUnlessGreaterOrEqualThan(self, a, b):
|
2008-02-12 00:37:51 +00:00
|
|
|
self.failUnless(a >= b, "%r should be >= %r" % (a, b))
|
2008-02-11 21:13:07 +00:00
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
def test_create(self):
|
|
|
|
self.expected_manifest = []
|
2008-11-24 21:40:46 +00:00
|
|
|
self.expected_verifycaps = set()
|
|
|
|
self.expected_storage_indexes = set()
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _then(n):
|
2008-05-08 23:19:42 +00:00
|
|
|
# /
|
2007-12-04 21:32:04 +00:00
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
u = n.get_uri()
|
|
|
|
self.failUnless(u)
|
|
|
|
self.failUnless(u.startswith("URI:DIR2:"), u)
|
|
|
|
u_ro = n.get_readonly_uri()
|
|
|
|
self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
|
2008-12-08 19:44:11 +00:00
|
|
|
u_v = n.get_verify_cap().to_string()
|
2007-12-04 21:32:04 +00:00
|
|
|
self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
|
2008-10-07 04:36:18 +00:00
|
|
|
self.expected_manifest.append( ((), u) )
|
2008-11-24 21:40:46 +00:00
|
|
|
self.expected_verifycaps.add(u_v)
|
|
|
|
si = n.get_storage_index()
|
|
|
|
self.expected_storage_indexes.add(base32.b2a(si))
|
2008-08-12 23:14:07 +00:00
|
|
|
expected_si = n._uri._filenode_uri.storage_index
|
2008-11-24 21:40:46 +00:00
|
|
|
self.failUnlessEqual(si, expected_si)
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
d = n.list()
|
|
|
|
d.addCallback(lambda res: self.failUnlessEqual(res, {}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.has_child(u"missing"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: self.failIf(res))
|
|
|
|
fake_file_uri = make_mutable_file_uri()
|
2008-05-16 23:09:47 +00:00
|
|
|
other_file_uri = make_mutable_file_uri()
|
2007-12-04 21:32:04 +00:00
|
|
|
m = Marker(fake_file_uri)
|
2008-12-08 19:44:11 +00:00
|
|
|
ffu_v = m.get_verify_cap().to_string()
|
2008-10-07 04:36:18 +00:00
|
|
|
self.expected_manifest.append( ((u"child",) , m.get_uri()) )
|
2008-11-24 21:40:46 +00:00
|
|
|
self.expected_verifycaps.add(ffu_v)
|
|
|
|
self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri.to_string()))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "set_uri-no",
|
|
|
|
"child 'child' already exists",
|
2008-12-19 15:39:24 +00:00
|
|
|
n.set_uri, u"child", other_file_uri.to_string(),
|
2008-05-16 23:09:47 +00:00
|
|
|
overwrite=False))
|
2008-05-08 23:19:42 +00:00
|
|
|
# /
|
|
|
|
# /child = mutable
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
|
2008-05-16 23:09:47 +00:00
|
|
|
|
2008-05-08 23:19:42 +00:00
|
|
|
# /
|
|
|
|
# /child = mutable
|
|
|
|
# /subdir = directory
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created(subdir):
|
2007-12-12 01:10:29 +00:00
|
|
|
self.failUnless(isinstance(subdir, FakeDirectoryNode))
|
2007-12-04 21:32:04 +00:00
|
|
|
self.subdir = subdir
|
2008-12-08 19:44:11 +00:00
|
|
|
new_v = subdir.get_verify_cap().to_string()
|
2007-12-04 21:32:04 +00:00
|
|
|
assert isinstance(new_v, str)
|
2008-10-07 04:36:18 +00:00
|
|
|
self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
|
2008-11-24 21:40:46 +00:00
|
|
|
self.expected_verifycaps.add(new_v)
|
|
|
|
si = subdir.get_storage_index()
|
|
|
|
self.expected_storage_indexes.add(base32.b2a(si))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_created)
|
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "mkdir-no",
|
|
|
|
"child 'subdir' already exists",
|
|
|
|
n.create_empty_directory, u"subdir",
|
|
|
|
overwrite=False))
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"child", u"subdir"])))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addCallback(lambda res: n.start_deep_stats().when_done())
|
2008-05-08 20:21:14 +00:00
|
|
|
def _check_deepstats(stats):
|
|
|
|
self.failUnless(isinstance(stats, dict))
|
|
|
|
expected = {"count-immutable-files": 0,
|
|
|
|
"count-mutable-files": 1,
|
|
|
|
"count-literal-files": 0,
|
|
|
|
"count-files": 1,
|
|
|
|
"count-directories": 2,
|
|
|
|
"size-immutable-files": 0,
|
|
|
|
"size-literal-files": 0,
|
|
|
|
#"size-directories": 616, # varies
|
|
|
|
#"largest-directory": 616,
|
|
|
|
"largest-directory-children": 2,
|
|
|
|
"largest-immutable-file": 0,
|
|
|
|
}
|
|
|
|
for k,v in expected.iteritems():
|
|
|
|
self.failUnlessEqual(stats[k], v,
|
|
|
|
"stats[%s] was %s, not %s" %
|
|
|
|
(k, stats[k], v))
|
2008-05-08 20:42:42 +00:00
|
|
|
self.failUnless(stats["size-directories"] > 500,
|
2008-05-08 20:33:07 +00:00
|
|
|
stats["size-directories"])
|
2008-05-08 20:42:42 +00:00
|
|
|
self.failUnless(stats["largest-directory"] > 500,
|
2008-05-08 20:33:07 +00:00
|
|
|
stats["largest-directory"])
|
2008-05-08 23:19:42 +00:00
|
|
|
self.failUnlessEqual(stats["size-files-histogram"], [])
|
2008-05-08 20:21:14 +00:00
|
|
|
d.addCallback(_check_deepstats)
|
|
|
|
|
2008-11-19 22:03:47 +00:00
|
|
|
d.addCallback(lambda res: n.build_manifest().when_done())
|
|
|
|
def _check_manifest(res):
|
|
|
|
manifest = res["manifest"]
|
|
|
|
self.failUnlessEqual(sorted(manifest),
|
|
|
|
sorted(self.expected_manifest))
|
|
|
|
stats = res["stats"]
|
|
|
|
_check_deepstats(stats)
|
2008-11-24 21:40:46 +00:00
|
|
|
self.failUnlessEqual(self.expected_verifycaps,
|
|
|
|
res["verifycaps"])
|
|
|
|
self.failUnlessEqual(self.expected_storage_indexes,
|
|
|
|
res["storage-index"])
|
2008-11-19 22:03:47 +00:00
|
|
|
d.addCallback(_check_manifest)
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
def _add_subsubdir(res):
|
2008-02-14 22:45:56 +00:00
|
|
|
return self.subdir.create_empty_directory(u"subsubdir")
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_add_subsubdir)
|
2008-10-03 00:52:03 +00:00
|
|
|
# /
|
|
|
|
# /child = mutable
|
|
|
|
# /subdir = directory
|
|
|
|
# /subdir/subsubdir = directory
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda subsubdir:
|
|
|
|
self.failUnless(isinstance(subsubdir,
|
2007-12-12 01:10:29 +00:00
|
|
|
FakeDirectoryNode)))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_child_at_path(u""))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
|
|
|
|
n.get_uri()))
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"child"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
|
2008-10-03 00:52:03 +00:00
|
|
|
d.addCallback(lambda res:
|
2008-10-27 20:15:25 +00:00
|
|
|
self.shouldFail(NoSuchChildError, "gcamap-no",
|
|
|
|
"nope",
|
2008-10-03 01:08:45 +00:00
|
|
|
n.get_child_and_metadata_at_path,
|
2008-10-03 00:52:03 +00:00
|
|
|
u"subdir/nope"))
|
|
|
|
d.addCallback(lambda res:
|
2008-10-03 01:08:45 +00:00
|
|
|
n.get_child_and_metadata_at_path(u""))
|
2008-10-03 00:52:03 +00:00
|
|
|
def _check_child_and_metadata1(res):
|
|
|
|
child, metadata = res
|
|
|
|
self.failUnless(isinstance(child, FakeDirectoryNode))
|
|
|
|
# edge-metadata needs at least one path segment
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()), [])
|
|
|
|
d.addCallback(_check_child_and_metadata1)
|
|
|
|
d.addCallback(lambda res:
|
2008-10-03 01:08:45 +00:00
|
|
|
n.get_child_and_metadata_at_path(u"child"))
|
2008-10-03 00:52:03 +00:00
|
|
|
|
|
|
|
def _check_child_and_metadata2(res):
|
|
|
|
child, metadata = res
|
|
|
|
self.failUnlessEqual(child.get_uri(),
|
|
|
|
fake_file_uri.to_string())
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"])
|
|
|
|
d.addCallback(_check_child_and_metadata2)
|
|
|
|
|
|
|
|
d.addCallback(lambda res:
|
2008-10-03 01:08:45 +00:00
|
|
|
n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
|
2008-10-03 00:52:03 +00:00
|
|
|
def _check_child_and_metadata3(res):
|
|
|
|
child, metadata = res
|
|
|
|
self.failUnless(isinstance(child, FakeDirectoryNode))
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"])
|
|
|
|
d.addCallback(_check_child_and_metadata3)
|
|
|
|
|
2008-02-11 21:53:28 +00:00
|
|
|
# set_uri + metadata
|
|
|
|
# it should be possible to add a child without any metadata
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), {}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"c2"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
|
|
|
|
|
2008-02-11 21:53:28 +00:00
|
|
|
# if we don't set any defaults, the child should get timestamps
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri.to_string()))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"c3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
|
|
|
|
# or we can add specific metadata at set_uri() time, which
|
|
|
|
# overrides the timestamps
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri.to_string(),
|
2008-02-11 21:53:28 +00:00
|
|
|
{"key": "value"}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"c4"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"c2"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"c3"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"c4"))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
# set_node + metadata
|
|
|
|
# it should be possible to add a child without any metadata
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"d2", n, {}))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res: self.client.create_empty_dirnode())
|
|
|
|
d.addCallback(lambda n2:
|
|
|
|
self.shouldFail(ExistingChildError, "set_node-no",
|
|
|
|
"child 'd2' already exists",
|
|
|
|
n.set_node, u"d2", n2,
|
|
|
|
overwrite=False))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"d2"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
|
|
|
|
|
|
|
|
# if we don't set any defaults, the child should get timestamps
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"d3", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"d3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
|
|
|
|
# or we can add specific metadata at set_node() time, which
|
|
|
|
# overrides the timestamps
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"d4", n,
|
2008-02-11 21:53:28 +00:00
|
|
|
{"key": "value"}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"d4"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"d2"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"d3"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"d4"))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
2008-03-01 01:40:27 +00:00
|
|
|
# metadata through set_children()
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri.to_string()),
|
|
|
|
(u"e2", fake_file_uri.to_string(), {}),
|
|
|
|
(u"e3", fake_file_uri.to_string(),
|
2008-02-11 21:53:28 +00:00
|
|
|
{"key": "value"}),
|
|
|
|
]))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "set_children-no",
|
|
|
|
"child 'e1' already exists",
|
|
|
|
n.set_children,
|
|
|
|
[ (u"e1", other_file_uri),
|
|
|
|
(u"new", other_file_uri), ],
|
|
|
|
overwrite=False))
|
|
|
|
# and 'new' should not have been created
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children: self.failIf(u"new" in children))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"e1"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"e2"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"e3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"e1"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"e2"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"e3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
# metadata through set_nodes()
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
|
|
|
|
(u"f2", n, {}),
|
|
|
|
(u"f3", n,
|
2008-02-11 21:53:28 +00:00
|
|
|
{"key": "value"}),
|
|
|
|
]))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "set_nodes-no",
|
|
|
|
"child 'f1' already exists",
|
|
|
|
n.set_nodes,
|
|
|
|
[ (u"f1", n),
|
|
|
|
(u"new", n), ],
|
|
|
|
overwrite=False))
|
|
|
|
# and 'new' should not have been created
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children: self.failIf(u"new" in children))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"f1"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"f2"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"f3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"f1"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"f2"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"f3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
|
2008-02-09 01:43:47 +00:00
|
|
|
d.addCallback(lambda res:
|
2008-02-14 22:45:56 +00:00
|
|
|
n.set_metadata_for(u"child",
|
2008-02-11 21:53:28 +00:00
|
|
|
{"tags": ["web2.0-compatible"]}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
|
2008-02-09 01:43:47 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata,
|
2008-02-11 21:53:28 +00:00
|
|
|
{"tags": ["web2.0-compatible"]}))
|
2008-02-09 01:43:47 +00:00
|
|
|
|
|
|
|
def _start(res):
|
|
|
|
self._start_timestamp = time.time()
|
|
|
|
d.addCallback(_start)
|
2008-02-12 00:35:17 +00:00
|
|
|
# simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
|
|
|
|
# floats to hundredeths (it uses str(num) instead of repr(num)).
|
|
|
|
# simplejson-1.7.3 does not have this bug. To prevent this bug
|
|
|
|
# from causing the test to fail, stall for more than a few
|
|
|
|
# hundrededths of a second.
|
|
|
|
d.addCallback(self.stall, 0.1)
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.add_file(u"timestamps",
|
2008-03-24 16:46:06 +00:00
|
|
|
upload.Data("stamp me", convergence="some convergence string")))
|
2008-02-12 00:35:17 +00:00
|
|
|
d.addCallback(self.stall, 0.1)
|
2008-02-09 01:43:47 +00:00
|
|
|
def _stop(res):
|
|
|
|
self._stop_timestamp = time.time()
|
|
|
|
d.addCallback(_stop)
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
|
2008-02-11 22:12:55 +00:00
|
|
|
def _check_timestamp1(metadata):
|
2008-02-09 01:43:47 +00:00
|
|
|
self.failUnless("ctime" in metadata)
|
|
|
|
self.failUnless("mtime" in metadata)
|
2008-02-11 21:13:07 +00:00
|
|
|
self.failUnlessGreaterOrEqualThan(metadata["ctime"],
|
|
|
|
self._start_timestamp)
|
|
|
|
self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
|
|
|
|
metadata["ctime"])
|
|
|
|
self.failUnlessGreaterOrEqualThan(metadata["mtime"],
|
|
|
|
self._start_timestamp)
|
|
|
|
self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
|
|
|
|
metadata["mtime"])
|
2008-02-11 22:12:55 +00:00
|
|
|
# Our current timestamp rules say that replacing an existing
|
|
|
|
# child should preserve the 'ctime' but update the mtime
|
|
|
|
self._old_ctime = metadata["ctime"]
|
|
|
|
self._old_mtime = metadata["mtime"]
|
|
|
|
d.addCallback(_check_timestamp1)
|
|
|
|
d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"timestamps", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
|
2008-02-11 22:12:55 +00:00
|
|
|
def _check_timestamp2(metadata):
|
|
|
|
self.failUnlessEqual(metadata["ctime"], self._old_ctime,
|
|
|
|
"%s != %s" % (metadata["ctime"],
|
|
|
|
self._old_ctime))
|
|
|
|
self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
|
2008-02-14 22:45:56 +00:00
|
|
|
return n.delete(u"timestamps")
|
2008-02-11 22:12:55 +00:00
|
|
|
d.addCallback(_check_timestamp2)
|
|
|
|
|
|
|
|
# also make sure we can add/update timestamps on a
|
|
|
|
# previously-existing child that didn't have any, since there are
|
|
|
|
# a lot of 0.7.0-generated edges around out there
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
|
|
|
|
d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
|
2008-02-11 22:12:55 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"no_timestamps"))
|
2008-02-09 01:43:47 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"subdir"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda old_child:
|
|
|
|
self.failUnlessEqual(old_child.get_uri(),
|
|
|
|
self.subdir.get_uri()))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"child"])))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
uploadable = upload.Data("some data", convergence="some convergence string")
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda newnode:
|
|
|
|
self.failUnless(IFileNode.providedBy(newnode)))
|
2008-05-16 23:09:47 +00:00
|
|
|
other_uploadable = upload.Data("some data", convergence="stuff")
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "add_file-no",
|
|
|
|
"child 'newfile' already exists",
|
|
|
|
n.add_file, u"newfile",
|
|
|
|
other_uploadable,
|
|
|
|
overwrite=False))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"child", u"newfile"])))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()),
|
|
|
|
["ctime", "mtime"]))
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.add_file(u"newfile-metadata",
|
2008-02-11 21:53:28 +00:00
|
|
|
uploadable,
|
|
|
|
{"key": "value"}))
|
|
|
|
d.addCallback(lambda newnode:
|
|
|
|
self.failUnless(IFileNode.providedBy(newnode)))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata, {"key": "value"}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"newfile-metadata"))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created2(subdir2):
|
|
|
|
self.subdir2 = subdir2
|
2008-05-16 23:09:47 +00:00
|
|
|
# put something in the way, to make sure it gets overwritten
|
|
|
|
return subdir2.add_file(u"child", upload.Data("overwrite me",
|
|
|
|
"converge"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_created2)
|
|
|
|
|
|
|
|
d.addCallback(lambda res:
|
2008-02-14 22:45:56 +00:00
|
|
|
n.move_child_to(u"child", self.subdir2))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"newfile", u"subdir2"])))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: self.subdir2.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"child"])))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res: self.subdir2.get(u"child"))
|
|
|
|
d.addCallback(lambda child:
|
|
|
|
self.failUnlessEqual(child.get_uri(),
|
|
|
|
fake_file_uri.to_string()))
|
|
|
|
|
|
|
|
# move it back, using new_child_name=
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.subdir2.move_child_to(u"child", n, u"newchild"))
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
|
|
|
sorted([u"newchild", u"newfile",
|
|
|
|
u"subdir2"])))
|
|
|
|
d.addCallback(lambda res: self.subdir2.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()), []))
|
|
|
|
|
|
|
|
# now make sure that we honor overwrite=False
|
|
|
|
d.addCallback(lambda res:
|
2008-12-19 15:39:24 +00:00
|
|
|
self.subdir2.set_uri(u"newchild", other_file_uri.to_string()))
|
2008-05-16 23:09:47 +00:00
|
|
|
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "move_child_to-no",
|
|
|
|
"child 'newchild' already exists",
|
|
|
|
n.move_child_to, u"newchild",
|
|
|
|
self.subdir2,
|
|
|
|
overwrite=False))
|
|
|
|
d.addCallback(lambda res: self.subdir2.get(u"newchild"))
|
|
|
|
d.addCallback(lambda child:
|
|
|
|
self.failUnlessEqual(child.get_uri(),
|
|
|
|
other_file_uri.to_string()))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
d.addCallback(_then)
|
|
|
|
|
2008-11-19 23:02:12 +00:00
|
|
|
d.addErrback(self.explain_error)
|
2007-12-04 21:32:04 +00:00
|
|
|
return d
|
|
|
|
|
2008-05-08 23:19:42 +00:00
|
|
|
class DeepStats(unittest.TestCase):
|
|
|
|
def test_stats(self):
|
2008-10-22 00:03:07 +00:00
|
|
|
ds = dirnode.DeepStats(None)
|
2008-05-08 23:19:42 +00:00
|
|
|
ds.add("count-files")
|
|
|
|
ds.add("size-immutable-files", 123)
|
|
|
|
ds.histogram("size-files-histogram", 123)
|
|
|
|
ds.max("largest-directory", 444)
|
|
|
|
|
|
|
|
s = ds.get_results()
|
|
|
|
self.failUnlessEqual(s["count-files"], 1)
|
|
|
|
self.failUnlessEqual(s["size-immutable-files"], 123)
|
|
|
|
self.failUnlessEqual(s["largest-directory"], 444)
|
|
|
|
self.failUnlessEqual(s["count-literal-files"], 0)
|
|
|
|
|
|
|
|
ds.add("count-files")
|
|
|
|
ds.add("size-immutable-files", 321)
|
|
|
|
ds.histogram("size-files-histogram", 321)
|
|
|
|
ds.max("largest-directory", 2)
|
|
|
|
|
|
|
|
s = ds.get_results()
|
|
|
|
self.failUnlessEqual(s["count-files"], 2)
|
|
|
|
self.failUnlessEqual(s["size-immutable-files"], 444)
|
|
|
|
self.failUnlessEqual(s["largest-directory"], 444)
|
|
|
|
self.failUnlessEqual(s["count-literal-files"], 0)
|
|
|
|
self.failUnlessEqual(s["size-files-histogram"],
|
|
|
|
[ (101, 316, 1), (317, 1000, 1) ])
|
|
|
|
|
2008-10-22 00:03:07 +00:00
|
|
|
ds = dirnode.DeepStats(None)
|
2008-05-08 23:19:42 +00:00
|
|
|
for i in range(1, 1100):
|
|
|
|
ds.histogram("size-files-histogram", i)
|
|
|
|
ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
|
|
|
|
s = ds.get_results()
|
|
|
|
self.failUnlessEqual(s["size-files-histogram"],
|
|
|
|
[ (1, 3, 3),
|
|
|
|
(4, 10, 7),
|
|
|
|
(11, 31, 21),
|
|
|
|
(32, 100, 69),
|
|
|
|
(101, 316, 216),
|
|
|
|
(317, 1000, 684),
|
|
|
|
(1001, 3162, 99),
|
|
|
|
(3162277660169L, 10000000000000L, 1),
|
|
|
|
])
|
|
|
|
|
2008-12-06 05:08:37 +00:00
|
|
|
class UCWEingMutableFileNode(MutableFileNode):
|
|
|
|
please_ucwe_after_next_upload = False
|
|
|
|
|
|
|
|
def _upload(self, new_contents, servermap):
|
|
|
|
d = MutableFileNode._upload(self, new_contents, servermap)
|
|
|
|
def _ucwe(res):
|
2008-12-06 05:49:23 +00:00
|
|
|
if self.please_ucwe_after_next_upload:
|
|
|
|
self.please_ucwe_after_next_upload = False
|
|
|
|
raise UncoordinatedWriteError()
|
|
|
|
return res
|
2008-12-06 05:08:37 +00:00
|
|
|
d.addCallback(_ucwe)
|
|
|
|
return d
|
|
|
|
class UCWEingNewDirectoryNode(dirnode.NewDirectoryNode):
|
|
|
|
filenode_class = UCWEingMutableFileNode
|
|
|
|
|
|
|
|
|
|
|
|
class Deleter(SystemTestMixin, unittest.TestCase):
|
|
|
|
def test_retry(self):
|
|
|
|
# ticket #550, a dirnode.delete which experiences an
|
|
|
|
# UncoordinatedWriteError will fail with an incorrect "you're
|
|
|
|
# deleting something which isn't there" NoSuchChildError exception.
|
|
|
|
|
|
|
|
# to trigger this, we start by creating a directory with a single
|
|
|
|
# file in it. Then we create a special dirnode that uses a modified
|
|
|
|
# MutableFileNode which will raise UncoordinatedWriteError once on
|
|
|
|
# demand. We then call dirnode.delete, which ought to retry and
|
|
|
|
# succeed.
|
|
|
|
|
|
|
|
self.basedir = self.mktemp()
|
|
|
|
d = self.set_up_nodes()
|
|
|
|
d.addCallback(lambda ignored: self.clients[0].create_empty_dirnode())
|
|
|
|
small = upload.Data("Small enough for a LIT", None)
|
|
|
|
def _created_dir(dn):
|
|
|
|
self.root = dn
|
|
|
|
self.root_uri = dn.get_uri()
|
|
|
|
return dn.add_file(u"file", small)
|
|
|
|
d.addCallback(_created_dir)
|
|
|
|
def _do_delete(ignored):
|
|
|
|
n = UCWEingNewDirectoryNode(self.clients[0]).init_from_uri(self.root_uri)
|
2008-12-06 05:49:23 +00:00
|
|
|
assert n._node.please_ucwe_after_next_upload == False
|
|
|
|
n._node.please_ucwe_after_next_upload = True
|
2008-12-06 05:08:37 +00:00
|
|
|
# This should succeed, not raise an exception
|
|
|
|
return n.delete(u"file")
|
|
|
|
d.addCallback(_do_delete)
|
|
|
|
|
|
|
|
return d
|
|
|
|
|