2007-12-04 21:32:04 +00:00
|
|
|
|
2008-02-09 01:43:47 +00:00
|
|
|
import time
|
2007-12-04 21:32:04 +00:00
|
|
|
from zope.interface import implements
|
|
|
|
from twisted.trial import unittest
|
2008-07-17 21:37:04 +00:00
|
|
|
from twisted.internet import defer
|
2008-07-16 20:14:39 +00:00
|
|
|
from allmydata import uri, dirnode
|
2009-07-03 01:07:49 +00:00
|
|
|
from allmydata.client import Client
|
2008-09-07 19:44:56 +00:00
|
|
|
from allmydata.immutable import upload
|
2007-12-04 21:32:04 +00:00
|
|
|
from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
|
2009-07-17 01:01:03 +00:00
|
|
|
IDirectoryURI, IReadonlyDirectoryURI, IFileNode, \
|
2008-10-27 20:15:25 +00:00
|
|
|
ExistingChildError, NoSuchChildError, \
|
2009-07-03 01:07:49 +00:00
|
|
|
IDeepCheckResults, IDeepCheckAndRepairResults, CannotPackUnknownNodeError
|
2008-12-07 15:45:19 +00:00
|
|
|
from allmydata.mutable.filenode import MutableFileNode
|
2008-12-06 05:08:37 +00:00
|
|
|
from allmydata.mutable.common import UncoordinatedWriteError
|
2008-11-24 21:40:46 +00:00
|
|
|
from allmydata.util import hashutil, base32
|
2008-10-22 08:38:18 +00:00
|
|
|
from allmydata.monitor import Monitor
|
2007-12-05 06:01:37 +00:00
|
|
|
from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
|
2009-02-17 00:23:48 +00:00
|
|
|
FakeDirectoryNode, create_chk_filenode, ErrorMixin
|
|
|
|
from allmydata.test.no_network import GridTestMixin
|
2009-01-10 01:00:52 +00:00
|
|
|
from allmydata.check_results import CheckResults, CheckAndRepairResults
|
2009-07-03 01:07:49 +00:00
|
|
|
from allmydata.unknown import UnknownNode
|
2009-07-04 03:43:28 +00:00
|
|
|
from base64 import b32decode
|
2008-10-29 04:28:31 +00:00
|
|
|
import common_util as testutil
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
# to test dirnode.py, we want to construct a tree of real DirectoryNodes that
|
|
|
|
# contain pointers to fake files. We start with a fake MutableFileNode that
|
|
|
|
# stores all of its data in a static table.
|
|
|
|
|
|
|
|
class Marker:
|
|
|
|
implements(IFileNode, IMutableFileNode) # sure, why not
|
|
|
|
def __init__(self, nodeuri):
|
|
|
|
if not isinstance(nodeuri, str):
|
|
|
|
nodeuri = nodeuri.to_string()
|
|
|
|
self.nodeuri = nodeuri
|
2008-01-03 23:55:43 +00:00
|
|
|
si = hashutil.tagged_hash("tag1", nodeuri)[:16]
|
2008-11-19 23:00:27 +00:00
|
|
|
self.storage_index = si
|
2007-12-04 21:32:04 +00:00
|
|
|
fp = hashutil.tagged_hash("tag2", nodeuri)
|
2008-09-10 08:37:55 +00:00
|
|
|
self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
|
2007-12-04 21:32:04 +00:00
|
|
|
def get_uri(self):
|
|
|
|
return self.nodeuri
|
|
|
|
def get_readonly_uri(self):
|
|
|
|
return self.nodeuri
|
2008-12-08 19:44:11 +00:00
|
|
|
def get_verify_cap(self):
|
2007-12-04 21:32:04 +00:00
|
|
|
return self.verifieruri
|
2008-11-19 23:00:27 +00:00
|
|
|
def get_storage_index(self):
|
|
|
|
return self.storage_index
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2009-02-18 02:32:43 +00:00
|
|
|
def check(self, monitor, verify=False, add_lease=False):
|
2009-01-10 01:00:52 +00:00
|
|
|
r = CheckResults(uri.from_string(self.nodeuri), None)
|
2008-09-07 19:44:56 +00:00
|
|
|
r.set_healthy(True)
|
2008-11-07 05:35:47 +00:00
|
|
|
r.set_recoverable(True)
|
2008-07-17 21:37:04 +00:00
|
|
|
return defer.succeed(r)
|
|
|
|
|
2009-02-18 02:32:43 +00:00
|
|
|
def check_and_repair(self, monitor, verify=False, add_lease=False):
|
2008-09-07 19:44:56 +00:00
|
|
|
d = self.check(verify)
|
|
|
|
def _got(cr):
|
|
|
|
r = CheckAndRepairResults(None)
|
|
|
|
r.pre_repair_results = r.post_repair_results = cr
|
|
|
|
return r
|
|
|
|
d.addCallback(_got)
|
|
|
|
return d
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
# dirnode requires three methods from the client: upload(),
|
|
|
|
# create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
|
|
|
|
# only used by the convenience composite method add_file().
|
|
|
|
|
|
|
|
class FakeClient:
|
|
|
|
implements(IClient)
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def upload(self, uploadable):
|
2007-12-04 21:32:04 +00:00
|
|
|
d = uploadable.get_size()
|
|
|
|
d.addCallback(lambda size: uploadable.read(size))
|
|
|
|
def _got_data(datav):
|
|
|
|
data = "".join(datav)
|
2007-12-05 06:01:37 +00:00
|
|
|
n = create_chk_filenode(self, data)
|
2008-02-06 04:01:38 +00:00
|
|
|
results = upload.UploadResults()
|
|
|
|
results.uri = n.get_uri()
|
|
|
|
return results
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_got_data)
|
|
|
|
return d
|
|
|
|
|
2009-07-02 22:25:37 +00:00
|
|
|
def create_node_from_uri(self, u, readcap=None):
|
|
|
|
if not u:
|
|
|
|
u = readcap
|
2007-12-04 21:32:04 +00:00
|
|
|
u = IURI(u)
|
2009-07-17 01:01:03 +00:00
|
|
|
if (IDirectoryURI.providedBy(u)
|
|
|
|
or IReadonlyDirectoryURI.providedBy(u)):
|
2007-12-12 01:10:29 +00:00
|
|
|
return FakeDirectoryNode(self).init_from_uri(u)
|
2007-12-04 21:32:04 +00:00
|
|
|
return Marker(u.to_string())
|
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
def create_empty_dirnode(self):
|
2007-12-12 01:10:29 +00:00
|
|
|
n = FakeDirectoryNode(self)
|
2008-01-14 21:55:59 +00:00
|
|
|
d = n.create()
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n)
|
|
|
|
return d
|
|
|
|
|
2008-11-19 23:02:12 +00:00
|
|
|
class Dirnode(unittest.TestCase,
|
|
|
|
testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
|
2009-06-05 03:14:44 +00:00
|
|
|
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
|
2007-12-04 21:32:04 +00:00
|
|
|
def setUp(self):
|
|
|
|
self.client = FakeClient()
|
2009-07-04 03:43:28 +00:00
|
|
|
# This is a base32-encoded representation of the directory tree
|
|
|
|
# root/file1
|
|
|
|
# root/file2
|
|
|
|
# root/file3
|
|
|
|
# as represented after being fed to _pack_contents.
|
|
|
|
# We have it here so we can decode it, feed it to
|
|
|
|
# _unpack_contents, and verify that _unpack_contents
|
|
|
|
# works correctly.
|
|
|
|
|
|
|
|
self.known_tree = "GM4TOORVHJTGS3DFGEWDSNJ2KVJESOSDJBFTU33MPB2GS3LZNVYG6N3GGI3WU5TIORTXC3DOMJ2G4NB2MVWXUZDONBVTE5LNGRZWK2LYN55GY23XGNYXQMTOMZUWU5TENN4DG23ZG5UTO2L2NQ2DO6LFMRWDMZJWGRQTUMZ2GEYDUMJQFQYTIMZ22XZKZORX5XS7CAQCSK3URR6QOHISHRCMGER5LRFSZRNAS5ZSALCS6TWFQAE754IVOIKJVK73WZPP3VUUEDTX3WHTBBZ5YX3CEKHCPG3ZWQLYA4QM6LDRCF7TJQYWLIZHKGN5ROA3AUZPXESBNLQQ6JTC2DBJU2D47IZJTLR3PKZ4RVF57XLPWY7FX7SZV3T6IJ3ORFW37FXUPGOE3ROPFNUX5DCGMAQJ3PGGULBRGM3TU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGI3TKNRWGEWCAITUMFUG6ZJCHIQHWITMNFXGW3LPORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBSG42TMNRRFQQCE3DJNZVWG4TUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQQCE3LUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQWDGOJRHI2TUZTJNRSTELBZGQ5FKUSJHJBUQSZ2MFYGKZ3SOBSWQ43IO52WO23CNAZWU3DUGVSWSNTIOE5DK33POVTW4ZLNMNWDK6DHPA2GS2THNF2W25DEN5VGY2LQNFRGG5DKNNRHO5TZPFTWI6LNMRYGQ2LCGJTHM4J2GM5DCMB2GQWDCNBSHKVVQBGRYMACKJ27CVQ6O6B4QPR72RFVTGOZUI76XUSWAX73JRV5PYRHMIFYZIA25MXDPGUGML6M2NMRSG4YD4W4K37ZDYSXHMJ3IUVT4F64YTQQVBJFFFOUC7J7LAB2VFCL5UKKGMR2D3F4EPOYC7UYWQZNR5KXHBSNXLCNBX2SNF22DCXJIHSMEKWEWOG5XCJEVVZ7UW5IB6I64XXQSJ34B5CAYZGZIIMR6LBRGMZTU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYFQQCE5DBNBXWKIR2EB5SE3DJNZVW233UNFWWKIR2EAYTENBWGY3DGOBZG4XDIMZQGIYTQLBAEJWGS3TLMNZHI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTAMRRHB6SYIBCNV2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYPUWCYMZZGU5DKOTGNFWGKMZMHE2DUVKSJE5EGSCLHJRW25DDPBYTO2DXPB3GM6DBNYZTI6LJMV3DM2LWNB4TU4LWMNSWW3LKORXWK5DEMN3TI23NNE3WEM3SORRGY5THPA3TKNBUMNZG453BOF2GSZLXMVWWI3DJOFZW623RHIZTUMJQHI2SYMJUGI5BOSHWDPG3WKPAVXCF3XMKA7QVIWPRMWJHDTQHD27AHDCPJWDQENQ5H5ZZILTXQNIXXCIW4LKQABU2GCFRG5FHQN7CHD7HF4EKNRZFIV2ZYQIBM7IQU7F4RGB3XCX3FREPBKQ7UCICHVWPCYFGA6OLH3J45LXQ6GWWICJ3PGWJNLZ7PCRNLAPNYUGU6BENS7OXMBEOOFRIZV3PF2FFWZ5WHDPKXERYP7GNHKRMGEZTOOT3EJRXI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTGNRSGY4SYIBCORQWQ33FEI5CA6ZCNRUW423NN52GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMZTMMRWHEWCAITMNFXGWY3SORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCAITNORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCY==="
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
def test_basic(self):
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _done(res):
|
2007-12-12 01:10:29 +00:00
|
|
|
self.failUnless(isinstance(res, FakeDirectoryNode))
|
2007-12-04 21:32:04 +00:00
|
|
|
rep = str(res)
|
|
|
|
self.failUnless("RW" in rep)
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_check(self):
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2008-10-22 08:38:18 +00:00
|
|
|
d.addCallback(lambda dn: dn.check(Monitor()))
|
2007-12-04 21:32:04 +00:00
|
|
|
def _done(res):
|
2008-07-16 00:23:25 +00:00
|
|
|
self.failUnless(res.is_healthy())
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
2008-07-17 01:20:57 +00:00
|
|
|
def _test_deepcheck_create(self):
|
2008-07-17 21:37:04 +00:00
|
|
|
# create a small tree with a loop, and some non-directories
|
|
|
|
# root/
|
|
|
|
# root/subdir/
|
|
|
|
# root/subdir/file1
|
|
|
|
# root/subdir/link -> root
|
2009-02-13 21:53:37 +00:00
|
|
|
# root/rodir
|
2008-07-17 01:20:57 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
|
|
|
def _created_root(rootnode):
|
|
|
|
self._rootnode = rootnode
|
2008-07-17 21:37:04 +00:00
|
|
|
return rootnode.create_empty_directory(u"subdir")
|
2008-07-17 01:20:57 +00:00
|
|
|
d.addCallback(_created_root)
|
2008-07-17 21:37:04 +00:00
|
|
|
def _created_subdir(subdir):
|
|
|
|
self._subdir = subdir
|
|
|
|
d = subdir.add_file(u"file1", upload.Data("data", None))
|
|
|
|
d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
|
2009-02-13 21:53:37 +00:00
|
|
|
d.addCallback(lambda res: self.client.create_empty_dirnode())
|
|
|
|
d.addCallback(lambda dn:
|
|
|
|
self._rootnode.set_uri(u"rodir",
|
|
|
|
dn.get_readonly_uri()))
|
2008-07-17 21:37:04 +00:00
|
|
|
return d
|
|
|
|
d.addCallback(_created_subdir)
|
2008-07-17 01:20:57 +00:00
|
|
|
def _done(res):
|
|
|
|
return self._rootnode
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_deepcheck(self):
|
|
|
|
d = self._test_deepcheck_create()
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
|
2008-07-17 01:20:57 +00:00
|
|
|
def _check_results(r):
|
2008-09-07 19:44:56 +00:00
|
|
|
self.failUnless(IDeepCheckResults.providedBy(r))
|
|
|
|
c = r.get_counters()
|
|
|
|
self.failUnlessEqual(c,
|
2009-02-13 21:53:37 +00:00
|
|
|
{"count-objects-checked": 4,
|
|
|
|
"count-objects-healthy": 4,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-objects-unhealthy": 0,
|
2008-11-07 05:35:47 +00:00
|
|
|
"count-objects-unrecoverable": 0,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-corrupt-shares": 0,
|
|
|
|
})
|
|
|
|
self.failIf(r.get_corrupt_shares())
|
2009-02-13 21:53:37 +00:00
|
|
|
self.failUnlessEqual(len(r.get_all_results()), 4)
|
2008-09-07 19:44:56 +00:00
|
|
|
d.addCallback(_check_results)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_deepcheck_and_repair(self):
|
|
|
|
d = self._test_deepcheck_create()
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addCallback(lambda rootnode:
|
|
|
|
rootnode.start_deep_check_and_repair().when_done())
|
2008-09-07 19:44:56 +00:00
|
|
|
def _check_results(r):
|
|
|
|
self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
|
|
|
|
c = r.get_counters()
|
|
|
|
self.failUnlessEqual(c,
|
2009-02-13 21:53:37 +00:00
|
|
|
{"count-objects-checked": 4,
|
|
|
|
"count-objects-healthy-pre-repair": 4,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-objects-unhealthy-pre-repair": 0,
|
2008-11-07 05:35:47 +00:00
|
|
|
"count-objects-unrecoverable-pre-repair": 0,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-corrupt-shares-pre-repair": 0,
|
2009-02-13 21:53:37 +00:00
|
|
|
"count-objects-healthy-post-repair": 4,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-objects-unhealthy-post-repair": 0,
|
2008-11-07 05:35:47 +00:00
|
|
|
"count-objects-unrecoverable-post-repair": 0,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-corrupt-shares-post-repair": 0,
|
|
|
|
"count-repairs-attempted": 0,
|
|
|
|
"count-repairs-successful": 0,
|
|
|
|
"count-repairs-unsuccessful": 0,
|
|
|
|
})
|
|
|
|
self.failIf(r.get_corrupt_shares())
|
|
|
|
self.failIf(r.get_remaining_corrupt_shares())
|
2009-02-13 21:53:37 +00:00
|
|
|
self.failUnlessEqual(len(r.get_all_results()), 4)
|
2008-07-17 01:20:57 +00:00
|
|
|
d.addCallback(_check_results)
|
|
|
|
return d
|
|
|
|
|
2008-08-12 04:03:26 +00:00
|
|
|
def _mark_file_bad(self, rootnode):
|
|
|
|
si = IURI(rootnode.get_uri())._filenode_uri.storage_index
|
|
|
|
rootnode._node.bad_shares[si] = "unhealthy"
|
|
|
|
return rootnode
|
|
|
|
|
|
|
|
def test_deepcheck_problems(self):
|
|
|
|
d = self._test_deepcheck_create()
|
|
|
|
d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
|
2008-08-12 04:03:26 +00:00
|
|
|
def _check_results(r):
|
2008-09-07 19:44:56 +00:00
|
|
|
c = r.get_counters()
|
|
|
|
self.failUnlessEqual(c,
|
2009-02-13 21:53:37 +00:00
|
|
|
{"count-objects-checked": 4,
|
|
|
|
"count-objects-healthy": 3,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-objects-unhealthy": 1,
|
2008-11-07 05:35:47 +00:00
|
|
|
"count-objects-unrecoverable": 0,
|
2008-09-07 19:44:56 +00:00
|
|
|
"count-corrupt-shares": 0,
|
|
|
|
})
|
|
|
|
#self.failUnlessEqual(len(r.get_problems()), 1) # TODO
|
2008-08-12 04:03:26 +00:00
|
|
|
d.addCallback(_check_results)
|
|
|
|
return d
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
def test_readonly(self):
|
|
|
|
fileuri = make_chk_file_uri(1234)
|
|
|
|
filenode = self.client.create_node_from_uri(fileuri)
|
2008-03-24 16:46:06 +00:00
|
|
|
uploadable = upload.Data("some data", convergence="some convergence string")
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created(rw_dn):
|
2008-12-19 15:39:24 +00:00
|
|
|
d2 = rw_dn.set_uri(u"child", fileuri.to_string())
|
2007-12-04 21:32:04 +00:00
|
|
|
d2.addCallback(lambda res: rw_dn)
|
|
|
|
return d2
|
|
|
|
d.addCallback(_created)
|
|
|
|
|
|
|
|
def _ready(rw_dn):
|
|
|
|
ro_uri = rw_dn.get_readonly_uri()
|
|
|
|
ro_dn = self.client.create_node_from_uri(ro_uri)
|
|
|
|
self.failUnless(ro_dn.is_readonly())
|
|
|
|
self.failUnless(ro_dn.is_mutable())
|
|
|
|
|
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-12-19 15:39:24 +00:00
|
|
|
ro_dn.set_uri, u"newchild", fileuri.to_string())
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.set_node, u"newchild", filenode)
|
2008-05-08 23:53:35 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
|
|
|
|
ro_dn.set_nodes, [ (u"newchild", filenode) ])
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.add_file, u"newchild", uploadable)
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.delete, u"child")
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.create_empty_directory, u"newchild")
|
2008-05-08 23:53:35 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
|
|
|
|
ro_dn.set_metadata_for, u"child", {})
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
ro_dn.move_child_to, u"child", rw_dn)
|
2007-12-04 21:32:04 +00:00
|
|
|
self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
|
2008-02-14 22:45:56 +00:00
|
|
|
rw_dn.move_child_to, u"child", ro_dn)
|
2007-12-04 21:32:04 +00:00
|
|
|
return ro_dn.list()
|
|
|
|
d.addCallback(_ready)
|
|
|
|
def _listed(children):
|
2008-02-14 22:45:56 +00:00
|
|
|
self.failUnless(u"child" in children)
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_listed)
|
|
|
|
return d
|
|
|
|
|
2008-02-11 22:12:55 +00:00
|
|
|
def failUnlessGreaterThan(self, a, b):
|
2008-02-12 00:37:51 +00:00
|
|
|
self.failUnless(a > b, "%r should be > %r" % (a, b))
|
2008-02-11 22:12:55 +00:00
|
|
|
|
2008-02-11 21:13:07 +00:00
|
|
|
def failUnlessGreaterOrEqualThan(self, a, b):
|
2008-02-12 00:37:51 +00:00
|
|
|
self.failUnless(a >= b, "%r should be >= %r" % (a, b))
|
2008-02-11 21:13:07 +00:00
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
def test_create(self):
|
|
|
|
self.expected_manifest = []
|
2008-11-24 21:40:46 +00:00
|
|
|
self.expected_verifycaps = set()
|
|
|
|
self.expected_storage_indexes = set()
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self.client.create_empty_dirnode()
|
2007-12-04 21:32:04 +00:00
|
|
|
def _then(n):
|
2008-05-08 23:19:42 +00:00
|
|
|
# /
|
2007-12-04 21:32:04 +00:00
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
u = n.get_uri()
|
|
|
|
self.failUnless(u)
|
|
|
|
self.failUnless(u.startswith("URI:DIR2:"), u)
|
|
|
|
u_ro = n.get_readonly_uri()
|
|
|
|
self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
|
2008-12-08 19:44:11 +00:00
|
|
|
u_v = n.get_verify_cap().to_string()
|
2007-12-04 21:32:04 +00:00
|
|
|
self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
|
2009-01-23 04:44:49 +00:00
|
|
|
u_r = n.get_repair_cap().to_string()
|
|
|
|
self.failUnlessEqual(u_r, u)
|
2008-10-07 04:36:18 +00:00
|
|
|
self.expected_manifest.append( ((), u) )
|
2008-11-24 21:40:46 +00:00
|
|
|
self.expected_verifycaps.add(u_v)
|
|
|
|
si = n.get_storage_index()
|
|
|
|
self.expected_storage_indexes.add(base32.b2a(si))
|
2008-08-12 23:14:07 +00:00
|
|
|
expected_si = n._uri._filenode_uri.storage_index
|
2008-11-24 21:40:46 +00:00
|
|
|
self.failUnlessEqual(si, expected_si)
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
d = n.list()
|
|
|
|
d.addCallback(lambda res: self.failUnlessEqual(res, {}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.has_child(u"missing"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: self.failIf(res))
|
|
|
|
fake_file_uri = make_mutable_file_uri()
|
2008-05-16 23:09:47 +00:00
|
|
|
other_file_uri = make_mutable_file_uri()
|
2007-12-04 21:32:04 +00:00
|
|
|
m = Marker(fake_file_uri)
|
2008-12-08 19:44:11 +00:00
|
|
|
ffu_v = m.get_verify_cap().to_string()
|
2008-10-07 04:36:18 +00:00
|
|
|
self.expected_manifest.append( ((u"child",) , m.get_uri()) )
|
2008-11-24 21:40:46 +00:00
|
|
|
self.expected_verifycaps.add(ffu_v)
|
|
|
|
self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri.to_string()))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "set_uri-no",
|
|
|
|
"child 'child' already exists",
|
2008-12-19 15:39:24 +00:00
|
|
|
n.set_uri, u"child", other_file_uri.to_string(),
|
2008-05-16 23:09:47 +00:00
|
|
|
overwrite=False))
|
2008-05-08 23:19:42 +00:00
|
|
|
# /
|
|
|
|
# /child = mutable
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
|
2008-05-16 23:09:47 +00:00
|
|
|
|
2008-05-08 23:19:42 +00:00
|
|
|
# /
|
|
|
|
# /child = mutable
|
|
|
|
# /subdir = directory
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created(subdir):
|
2007-12-12 01:10:29 +00:00
|
|
|
self.failUnless(isinstance(subdir, FakeDirectoryNode))
|
2007-12-04 21:32:04 +00:00
|
|
|
self.subdir = subdir
|
2008-12-08 19:44:11 +00:00
|
|
|
new_v = subdir.get_verify_cap().to_string()
|
2007-12-04 21:32:04 +00:00
|
|
|
assert isinstance(new_v, str)
|
2008-10-07 04:36:18 +00:00
|
|
|
self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
|
2008-11-24 21:40:46 +00:00
|
|
|
self.expected_verifycaps.add(new_v)
|
|
|
|
si = subdir.get_storage_index()
|
|
|
|
self.expected_storage_indexes.add(base32.b2a(si))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_created)
|
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "mkdir-no",
|
|
|
|
"child 'subdir' already exists",
|
|
|
|
n.create_empty_directory, u"subdir",
|
|
|
|
overwrite=False))
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"child", u"subdir"])))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addCallback(lambda res: n.start_deep_stats().when_done())
|
2008-05-08 20:21:14 +00:00
|
|
|
def _check_deepstats(stats):
|
|
|
|
self.failUnless(isinstance(stats, dict))
|
|
|
|
expected = {"count-immutable-files": 0,
|
|
|
|
"count-mutable-files": 1,
|
|
|
|
"count-literal-files": 0,
|
|
|
|
"count-files": 1,
|
|
|
|
"count-directories": 2,
|
|
|
|
"size-immutable-files": 0,
|
|
|
|
"size-literal-files": 0,
|
|
|
|
#"size-directories": 616, # varies
|
|
|
|
#"largest-directory": 616,
|
|
|
|
"largest-directory-children": 2,
|
|
|
|
"largest-immutable-file": 0,
|
|
|
|
}
|
|
|
|
for k,v in expected.iteritems():
|
|
|
|
self.failUnlessEqual(stats[k], v,
|
|
|
|
"stats[%s] was %s, not %s" %
|
|
|
|
(k, stats[k], v))
|
2008-05-08 20:42:42 +00:00
|
|
|
self.failUnless(stats["size-directories"] > 500,
|
2008-05-08 20:33:07 +00:00
|
|
|
stats["size-directories"])
|
2008-05-08 20:42:42 +00:00
|
|
|
self.failUnless(stats["largest-directory"] > 500,
|
2008-05-08 20:33:07 +00:00
|
|
|
stats["largest-directory"])
|
2008-05-08 23:19:42 +00:00
|
|
|
self.failUnlessEqual(stats["size-files-histogram"], [])
|
2008-05-08 20:21:14 +00:00
|
|
|
d.addCallback(_check_deepstats)
|
|
|
|
|
2008-11-19 22:03:47 +00:00
|
|
|
d.addCallback(lambda res: n.build_manifest().when_done())
|
|
|
|
def _check_manifest(res):
|
|
|
|
manifest = res["manifest"]
|
|
|
|
self.failUnlessEqual(sorted(manifest),
|
|
|
|
sorted(self.expected_manifest))
|
|
|
|
stats = res["stats"]
|
|
|
|
_check_deepstats(stats)
|
2008-11-24 21:40:46 +00:00
|
|
|
self.failUnlessEqual(self.expected_verifycaps,
|
|
|
|
res["verifycaps"])
|
|
|
|
self.failUnlessEqual(self.expected_storage_indexes,
|
|
|
|
res["storage-index"])
|
2008-11-19 22:03:47 +00:00
|
|
|
d.addCallback(_check_manifest)
|
|
|
|
|
2007-12-04 21:32:04 +00:00
|
|
|
def _add_subsubdir(res):
|
2008-02-14 22:45:56 +00:00
|
|
|
return self.subdir.create_empty_directory(u"subsubdir")
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_add_subsubdir)
|
2008-10-03 00:52:03 +00:00
|
|
|
# /
|
|
|
|
# /child = mutable
|
|
|
|
# /subdir = directory
|
|
|
|
# /subdir/subsubdir = directory
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda subsubdir:
|
|
|
|
self.failUnless(isinstance(subsubdir,
|
2007-12-12 01:10:29 +00:00
|
|
|
FakeDirectoryNode)))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_child_at_path(u""))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
|
|
|
|
n.get_uri()))
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"child"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"])))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
2008-10-03 00:52:03 +00:00
|
|
|
d.addCallback(lambda res:
|
2008-10-27 20:15:25 +00:00
|
|
|
self.shouldFail(NoSuchChildError, "gcamap-no",
|
|
|
|
"nope",
|
2008-10-03 01:08:45 +00:00
|
|
|
n.get_child_and_metadata_at_path,
|
2008-10-03 00:52:03 +00:00
|
|
|
u"subdir/nope"))
|
|
|
|
d.addCallback(lambda res:
|
2008-10-03 01:08:45 +00:00
|
|
|
n.get_child_and_metadata_at_path(u""))
|
2008-10-03 00:52:03 +00:00
|
|
|
def _check_child_and_metadata1(res):
|
|
|
|
child, metadata = res
|
|
|
|
self.failUnless(isinstance(child, FakeDirectoryNode))
|
|
|
|
# edge-metadata needs at least one path segment
|
|
|
|
self.failUnlessEqual(sorted(metadata.keys()), [])
|
|
|
|
d.addCallback(_check_child_and_metadata1)
|
|
|
|
d.addCallback(lambda res:
|
2008-10-03 01:08:45 +00:00
|
|
|
n.get_child_and_metadata_at_path(u"child"))
|
2008-10-03 00:52:03 +00:00
|
|
|
|
|
|
|
def _check_child_and_metadata2(res):
|
|
|
|
child, metadata = res
|
|
|
|
self.failUnlessEqual(child.get_uri(),
|
|
|
|
fake_file_uri.to_string())
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"]))
|
2008-10-03 00:52:03 +00:00
|
|
|
d.addCallback(_check_child_and_metadata2)
|
|
|
|
|
|
|
|
d.addCallback(lambda res:
|
2008-10-03 01:08:45 +00:00
|
|
|
n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
|
2008-10-03 00:52:03 +00:00
|
|
|
def _check_child_and_metadata3(res):
|
|
|
|
child, metadata = res
|
|
|
|
self.failUnless(isinstance(child, FakeDirectoryNode))
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"]))
|
2008-10-03 00:52:03 +00:00
|
|
|
d.addCallback(_check_child_and_metadata3)
|
|
|
|
|
2008-02-11 21:53:28 +00:00
|
|
|
# set_uri + metadata
|
|
|
|
# it should be possible to add a child without any metadata
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), {}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"c2"))
|
2009-04-11 22:52:05 +00:00
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
|
|
|
|
|
|
|
|
# You can't override the link timestamps.
|
|
|
|
d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), { 'tahoe': {'linkcrtime': "bogus"}}))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"c2"))
|
|
|
|
def _has_good_linkcrtime(metadata):
|
|
|
|
self.failUnless(metadata.has_key('tahoe'))
|
|
|
|
self.failUnless(metadata['tahoe'].has_key('linkcrtime'))
|
|
|
|
self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus')
|
|
|
|
d.addCallback(_has_good_linkcrtime)
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-02-11 21:53:28 +00:00
|
|
|
# if we don't set any defaults, the child should get timestamps
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri.to_string()))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"c3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"])))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
# or we can add specific metadata at set_uri() time, which
|
|
|
|
# overrides the timestamps
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri.to_string(),
|
2008-02-11 21:53:28 +00:00
|
|
|
{"key": "value"}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"c4"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-12 02:17:42 +00:00
|
|
|
self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
|
2009-04-11 22:52:05 +00:00
|
|
|
(metadata['key'] == "value"), metadata))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"c2"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"c3"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"c4"))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
# set_node + metadata
|
|
|
|
# it should be possible to add a child without any metadata
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"d2", n, {}))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res: self.client.create_empty_dirnode())
|
|
|
|
d.addCallback(lambda n2:
|
|
|
|
self.shouldFail(ExistingChildError, "set_node-no",
|
|
|
|
"child 'd2' already exists",
|
|
|
|
n.set_node, u"d2", n2,
|
|
|
|
overwrite=False))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"d2"))
|
2009-04-11 22:52:05 +00:00
|
|
|
d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
# if we don't set any defaults, the child should get timestamps
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"d3", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"d3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"])))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
# or we can add specific metadata at set_node() time, which
|
|
|
|
# overrides the timestamps
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"d4", n,
|
2008-02-11 21:53:28 +00:00
|
|
|
{"key": "value"}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"d4"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-12 02:17:42 +00:00
|
|
|
self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
|
2009-04-11 22:52:05 +00:00
|
|
|
(metadata['key'] == "value"), metadata))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"d2"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"d3"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"d4"))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
2008-03-01 01:40:27 +00:00
|
|
|
# metadata through set_children()
|
2008-12-19 15:39:24 +00:00
|
|
|
d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri.to_string()),
|
|
|
|
(u"e2", fake_file_uri.to_string(), {}),
|
|
|
|
(u"e3", fake_file_uri.to_string(),
|
2008-02-11 21:53:28 +00:00
|
|
|
{"key": "value"}),
|
|
|
|
]))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "set_children-no",
|
|
|
|
"child 'e1' already exists",
|
|
|
|
n.set_children,
|
|
|
|
[ (u"e1", other_file_uri),
|
|
|
|
(u"new", other_file_uri), ],
|
|
|
|
overwrite=False))
|
|
|
|
# and 'new' should not have been created
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children: self.failIf(u"new" in children))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"e1"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"])))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"e2"))
|
2009-04-12 02:17:42 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"e3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-12 02:17:42 +00:00
|
|
|
self.failUnless((set(metadata.keys()) == set(["key", "tahoe"]))
|
2009-04-11 22:52:05 +00:00
|
|
|
and (metadata['key'] == "value"), metadata))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"e1"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"e2"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"e3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
# metadata through set_nodes()
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
|
|
|
|
(u"f2", n, {}),
|
|
|
|
(u"f3", n,
|
2008-02-11 21:53:28 +00:00
|
|
|
{"key": "value"}),
|
|
|
|
]))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "set_nodes-no",
|
|
|
|
"child 'f1' already exists",
|
|
|
|
n.set_nodes,
|
|
|
|
[ (u"f1", n),
|
|
|
|
(u"new", n), ],
|
|
|
|
overwrite=False))
|
|
|
|
# and 'new' should not have been created
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children: self.failIf(u"new" in children))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"f1"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"])))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"f2"))
|
2009-04-11 22:52:05 +00:00
|
|
|
d.addCallback(
|
|
|
|
lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"f3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-12 02:17:42 +00:00
|
|
|
self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
|
2009-04-11 22:52:05 +00:00
|
|
|
(metadata['key'] == "value"), metadata))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"f1"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"f2"))
|
|
|
|
d.addCallback(lambda res: n.delete(u"f3"))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
|
|
|
|
2008-02-09 01:43:47 +00:00
|
|
|
d.addCallback(lambda res:
|
2008-02-14 22:45:56 +00:00
|
|
|
n.set_metadata_for(u"child",
|
2008-02-11 21:53:28 +00:00
|
|
|
{"tags": ["web2.0-compatible"]}))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
|
2008-02-09 01:43:47 +00:00
|
|
|
d.addCallback(lambda metadata:
|
|
|
|
self.failUnlessEqual(metadata,
|
2008-02-11 21:53:28 +00:00
|
|
|
{"tags": ["web2.0-compatible"]}))
|
2008-02-09 01:43:47 +00:00
|
|
|
|
|
|
|
def _start(res):
|
|
|
|
self._start_timestamp = time.time()
|
|
|
|
d.addCallback(_start)
|
2008-02-12 00:35:17 +00:00
|
|
|
# simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
|
|
|
|
# floats to hundredeths (it uses str(num) instead of repr(num)).
|
|
|
|
# simplejson-1.7.3 does not have this bug. To prevent this bug
|
|
|
|
# from causing the test to fail, stall for more than a few
|
|
|
|
# hundrededths of a second.
|
|
|
|
d.addCallback(self.stall, 0.1)
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.add_file(u"timestamps",
|
2008-03-24 16:46:06 +00:00
|
|
|
upload.Data("stamp me", convergence="some convergence string")))
|
2008-02-12 00:35:17 +00:00
|
|
|
d.addCallback(self.stall, 0.1)
|
2008-02-09 01:43:47 +00:00
|
|
|
def _stop(res):
|
|
|
|
self._stop_timestamp = time.time()
|
|
|
|
d.addCallback(_stop)
|
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
|
2008-02-11 22:12:55 +00:00
|
|
|
def _check_timestamp1(metadata):
|
2008-02-09 01:43:47 +00:00
|
|
|
self.failUnless("ctime" in metadata)
|
|
|
|
self.failUnless("mtime" in metadata)
|
2008-02-11 21:13:07 +00:00
|
|
|
self.failUnlessGreaterOrEqualThan(metadata["ctime"],
|
|
|
|
self._start_timestamp)
|
|
|
|
self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
|
|
|
|
metadata["ctime"])
|
|
|
|
self.failUnlessGreaterOrEqualThan(metadata["mtime"],
|
|
|
|
self._start_timestamp)
|
|
|
|
self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
|
|
|
|
metadata["mtime"])
|
2008-02-11 22:12:55 +00:00
|
|
|
# Our current timestamp rules say that replacing an existing
|
|
|
|
# child should preserve the 'ctime' but update the mtime
|
|
|
|
self._old_ctime = metadata["ctime"]
|
|
|
|
self._old_mtime = metadata["mtime"]
|
|
|
|
d.addCallback(_check_timestamp1)
|
|
|
|
d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"timestamps", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
|
2008-02-11 22:12:55 +00:00
|
|
|
def _check_timestamp2(metadata):
|
|
|
|
self.failUnlessEqual(metadata["ctime"], self._old_ctime,
|
|
|
|
"%s != %s" % (metadata["ctime"],
|
|
|
|
self._old_ctime))
|
|
|
|
self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
|
2008-02-14 22:45:56 +00:00
|
|
|
return n.delete(u"timestamps")
|
2008-02-11 22:12:55 +00:00
|
|
|
d.addCallback(_check_timestamp2)
|
|
|
|
|
|
|
|
# also make sure we can add/update timestamps on a
|
|
|
|
# previously-existing child that didn't have any, since there are
|
|
|
|
# a lot of 0.7.0-generated edges around out there
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
|
|
|
|
d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
|
2008-02-11 22:12:55 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"])))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"no_timestamps"))
|
2008-02-09 01:43:47 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"subdir"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda old_child:
|
|
|
|
self.failUnlessEqual(old_child.get_uri(),
|
|
|
|
self.subdir.get_uri()))
|
|
|
|
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"child"])))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-03-24 16:46:06 +00:00
|
|
|
uploadable = upload.Data("some data", convergence="some convergence string")
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda newnode:
|
|
|
|
self.failUnless(IFileNode.providedBy(newnode)))
|
2008-05-16 23:09:47 +00:00
|
|
|
other_uploadable = upload.Data("some data", convergence="stuff")
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "add_file-no",
|
|
|
|
"child 'newfile' already exists",
|
|
|
|
n.add_file, u"newfile",
|
|
|
|
other_uploadable,
|
|
|
|
overwrite=False))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"child", u"newfile"])))
|
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-11 22:52:05 +00:00
|
|
|
self.failUnlessEqual(set(metadata.keys()),
|
|
|
|
set(["tahoe", "ctime", "mtime"])))
|
2008-02-11 21:53:28 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.add_file(u"newfile-metadata",
|
2008-02-11 21:53:28 +00:00
|
|
|
uploadable,
|
|
|
|
{"key": "value"}))
|
|
|
|
d.addCallback(lambda newnode:
|
|
|
|
self.failUnless(IFileNode.providedBy(newnode)))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
|
2008-02-11 21:53:28 +00:00
|
|
|
d.addCallback(lambda metadata:
|
2009-04-12 02:17:42 +00:00
|
|
|
self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
|
2009-04-11 22:52:05 +00:00
|
|
|
(metadata['key'] == "value"), metadata))
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.delete(u"newfile-metadata"))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
2008-02-14 22:45:56 +00:00
|
|
|
d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
|
2007-12-04 21:32:04 +00:00
|
|
|
def _created2(subdir2):
|
|
|
|
self.subdir2 = subdir2
|
2008-05-16 23:09:47 +00:00
|
|
|
# put something in the way, to make sure it gets overwritten
|
|
|
|
return subdir2.add_file(u"child", upload.Data("overwrite me",
|
|
|
|
"converge"))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(_created2)
|
|
|
|
|
|
|
|
d.addCallback(lambda res:
|
2008-02-14 22:45:56 +00:00
|
|
|
n.move_child_to(u"child", self.subdir2))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"newfile", u"subdir2"])))
|
2007-12-04 21:32:04 +00:00
|
|
|
d.addCallback(lambda res: self.subdir2.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
2008-02-14 22:45:56 +00:00
|
|
|
sorted([u"child"])))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda res: self.subdir2.get(u"child"))
|
|
|
|
d.addCallback(lambda child:
|
|
|
|
self.failUnlessEqual(child.get_uri(),
|
|
|
|
fake_file_uri.to_string()))
|
|
|
|
|
|
|
|
# move it back, using new_child_name=
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.subdir2.move_child_to(u"child", n, u"newchild"))
|
|
|
|
d.addCallback(lambda res: n.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()),
|
|
|
|
sorted([u"newchild", u"newfile",
|
|
|
|
u"subdir2"])))
|
|
|
|
d.addCallback(lambda res: self.subdir2.list())
|
|
|
|
d.addCallback(lambda children:
|
|
|
|
self.failUnlessEqual(sorted(children.keys()), []))
|
|
|
|
|
|
|
|
# now make sure that we honor overwrite=False
|
|
|
|
d.addCallback(lambda res:
|
2008-12-19 15:39:24 +00:00
|
|
|
self.subdir2.set_uri(u"newchild", other_file_uri.to_string()))
|
2008-05-16 23:09:47 +00:00
|
|
|
|
|
|
|
d.addCallback(lambda res:
|
|
|
|
self.shouldFail(ExistingChildError, "move_child_to-no",
|
|
|
|
"child 'newchild' already exists",
|
|
|
|
n.move_child_to, u"newchild",
|
|
|
|
self.subdir2,
|
|
|
|
overwrite=False))
|
|
|
|
d.addCallback(lambda res: self.subdir2.get(u"newchild"))
|
|
|
|
d.addCallback(lambda child:
|
|
|
|
self.failUnlessEqual(child.get_uri(),
|
|
|
|
other_file_uri.to_string()))
|
2007-12-04 21:32:04 +00:00
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
d.addCallback(_then)
|
|
|
|
|
2008-11-19 23:02:12 +00:00
|
|
|
d.addErrback(self.explain_error)
|
2007-12-04 21:32:04 +00:00
|
|
|
return d
|
|
|
|
|
2009-07-04 03:43:28 +00:00
|
|
|
def test_unpack_and_pack_behavior(self):
|
|
|
|
known_tree = b32decode(self.known_tree)
|
|
|
|
d = self.client.create_empty_dirnode()
|
|
|
|
|
|
|
|
def _check_tree(node):
|
|
|
|
def check_children(children):
|
|
|
|
# Are all the expected child nodes there?
|
|
|
|
self.failUnless(children.has_key(u'file1'))
|
|
|
|
self.failUnless(children.has_key(u'file2'))
|
|
|
|
self.failUnless(children.has_key(u'file3'))
|
|
|
|
|
|
|
|
# Are the metadata for child 3 right?
|
|
|
|
file3_rocap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
|
|
|
|
file3_rwcap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
|
|
|
|
file3_metadata = {'ctime': 1246663897.4336269, 'tahoe': {'linkmotime': 1246663897.4336269, 'linkcrtime': 1246663897.4336269}, 'mtime': 1246663897.4336269}
|
|
|
|
self.failUnlessEqual(file3_metadata, children[u'file3'][1])
|
|
|
|
self.failUnlessEqual(file3_rocap,
|
|
|
|
children[u'file3'][0].get_readonly_uri())
|
|
|
|
self.failUnlessEqual(file3_rwcap,
|
|
|
|
children[u'file3'][0].get_uri())
|
|
|
|
|
|
|
|
# Are the metadata for child 2 right?
|
|
|
|
file2_rocap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
|
|
|
|
file2_rwcap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
|
|
|
|
file2_metadata = {'ctime': 1246663897.430218, 'tahoe': {'linkmotime': 1246663897.430218, 'linkcrtime': 1246663897.430218}, 'mtime': 1246663897.430218}
|
|
|
|
self.failUnlessEqual(file2_metadata, children[u'file2'][1])
|
|
|
|
self.failUnlessEqual(file2_rocap,
|
|
|
|
children[u'file2'][0].get_readonly_uri())
|
|
|
|
self.failUnlessEqual(file2_rwcap,
|
|
|
|
children[u'file2'][0].get_uri())
|
|
|
|
|
|
|
|
# Are the metadata for child 1 right?
|
|
|
|
file1_rocap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
|
|
|
|
file1_rwcap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
|
|
|
|
file1_metadata = {'ctime': 1246663897.4275661, 'tahoe': {'linkmotime': 1246663897.4275661, 'linkcrtime': 1246663897.4275661}, 'mtime': 1246663897.4275661}
|
|
|
|
self.failUnlessEqual(file1_metadata, children[u'file1'][1])
|
|
|
|
self.failUnlessEqual(file1_rocap,
|
|
|
|
children[u'file1'][0].get_readonly_uri())
|
|
|
|
self.failUnlessEqual(file1_rwcap,
|
|
|
|
children[u'file1'][0].get_uri())
|
|
|
|
|
|
|
|
children = node._unpack_contents(known_tree)
|
|
|
|
|
|
|
|
check_children(children)
|
|
|
|
|
|
|
|
packed_children = node._pack_contents(children)
|
|
|
|
|
|
|
|
children = node._unpack_contents(packed_children)
|
|
|
|
|
|
|
|
check_children(children)
|
|
|
|
|
|
|
|
d.addCallback(_check_tree)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_caching_dict(self):
|
|
|
|
d = dirnode.CachingDict()
|
|
|
|
d.set_both_items("test", "test2", ("test3", "test4"))
|
|
|
|
cached, value = d.get_both_items("test")
|
|
|
|
|
|
|
|
self.failUnlessEqual(cached, "test2")
|
|
|
|
self.failUnlessEqual(value, ("test3", "test4"))
|
|
|
|
|
|
|
|
d['test'] = ("test3", "test2")
|
|
|
|
|
|
|
|
cached, value = d.get_both_items("test")
|
|
|
|
|
|
|
|
self.failUnlessEqual(cached, None)
|
|
|
|
self.failUnlessEqual(value, ("test3", "test2"))
|
|
|
|
|
2009-07-03 01:07:49 +00:00
|
|
|
class FakeMutableFile:
|
|
|
|
counter = 0
|
|
|
|
def __init__(self, initial_contents=""):
|
|
|
|
self.data = initial_contents
|
|
|
|
counter = FakeMutableFile.counter
|
|
|
|
FakeMutableFile.counter += 1
|
|
|
|
writekey = hashutil.ssk_writekey_hash(str(counter))
|
|
|
|
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(str(counter))
|
|
|
|
self.uri = uri.WriteableSSKFileURI(writekey, fingerprint)
|
|
|
|
def get_uri(self):
|
|
|
|
return self.uri.to_string()
|
|
|
|
def download_best_version(self):
|
|
|
|
return defer.succeed(self.data)
|
|
|
|
def get_writekey(self):
|
|
|
|
return "writekey"
|
|
|
|
def is_readonly(self):
|
|
|
|
return False
|
|
|
|
def is_mutable(self):
|
|
|
|
return True
|
|
|
|
def modify(self, modifier):
|
|
|
|
self.data = modifier(self.data, None, True)
|
|
|
|
return defer.succeed(None)
|
|
|
|
|
|
|
|
class FakeClient2(Client):
|
|
|
|
def __init__(self):
|
|
|
|
pass
|
|
|
|
def create_mutable_file(self, initial_contents=""):
|
|
|
|
return defer.succeed(FakeMutableFile(initial_contents))
|
|
|
|
|
|
|
|
class Dirnode2(unittest.TestCase, testutil.ShouldFailMixin):
|
|
|
|
def setUp(self):
|
|
|
|
self.client = FakeClient2()
|
|
|
|
|
|
|
|
def test_from_future(self):
|
|
|
|
# create a dirnode that contains unknown URI types, and make sure we
|
|
|
|
# tolerate them properly. Since dirnodes aren't allowed to add
|
|
|
|
# unknown node types, we have to be tricky.
|
|
|
|
d = self.client.create_empty_dirnode()
|
|
|
|
future_writecap = "x-tahoe-crazy://I_am_from_the_future."
|
|
|
|
future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future."
|
|
|
|
future_node = UnknownNode(future_writecap, future_readcap)
|
|
|
|
def _then(n):
|
|
|
|
self._node = n
|
|
|
|
return n.set_node(u"future", future_node)
|
|
|
|
d.addCallback(_then)
|
|
|
|
|
|
|
|
# we should be prohibited from adding an unknown URI to a directory,
|
|
|
|
# since we don't know how to diminish the cap to a readcap (for the
|
|
|
|
# dirnode's rocap slot), and we don't want to accidentally grant
|
|
|
|
# write access to a holder of the dirnode's readcap.
|
|
|
|
d.addCallback(lambda ign:
|
|
|
|
self.shouldFail(CannotPackUnknownNodeError,
|
|
|
|
"copy unknown",
|
|
|
|
"cannot pack unknown node as child add",
|
|
|
|
self._node.set_uri, u"add", future_writecap))
|
|
|
|
d.addCallback(lambda ign: self._node.list())
|
|
|
|
def _check(children):
|
|
|
|
self.failUnlessEqual(len(children), 1)
|
|
|
|
(fn, metadata) = children[u"future"]
|
|
|
|
self.failUnless(isinstance(fn, UnknownNode), fn)
|
|
|
|
self.failUnlessEqual(fn.get_uri(), future_writecap)
|
|
|
|
self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
|
|
|
|
# but we *should* be allowed to copy this node, because the
|
|
|
|
# UnknownNode contains all the information that was in the
|
|
|
|
# original directory (readcap and writecap), so we're preserving
|
|
|
|
# everything.
|
|
|
|
return self._node.set_node(u"copy", fn)
|
|
|
|
d.addCallback(_check)
|
|
|
|
d.addCallback(lambda ign: self._node.list())
|
|
|
|
def _check2(children):
|
|
|
|
self.failUnlessEqual(len(children), 2)
|
|
|
|
(fn, metadata) = children[u"copy"]
|
|
|
|
self.failUnless(isinstance(fn, UnknownNode), fn)
|
|
|
|
self.failUnlessEqual(fn.get_uri(), future_writecap)
|
|
|
|
self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
|
|
|
|
return d
|
|
|
|
|
2008-05-08 23:19:42 +00:00
|
|
|
class DeepStats(unittest.TestCase):
|
2009-06-05 03:14:44 +00:00
|
|
|
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
|
2008-05-08 23:19:42 +00:00
|
|
|
def test_stats(self):
|
2008-10-22 00:03:07 +00:00
|
|
|
ds = dirnode.DeepStats(None)
|
2008-05-08 23:19:42 +00:00
|
|
|
ds.add("count-files")
|
|
|
|
ds.add("size-immutable-files", 123)
|
|
|
|
ds.histogram("size-files-histogram", 123)
|
|
|
|
ds.max("largest-directory", 444)
|
|
|
|
|
|
|
|
s = ds.get_results()
|
|
|
|
self.failUnlessEqual(s["count-files"], 1)
|
|
|
|
self.failUnlessEqual(s["size-immutable-files"], 123)
|
|
|
|
self.failUnlessEqual(s["largest-directory"], 444)
|
|
|
|
self.failUnlessEqual(s["count-literal-files"], 0)
|
|
|
|
|
|
|
|
ds.add("count-files")
|
|
|
|
ds.add("size-immutable-files", 321)
|
|
|
|
ds.histogram("size-files-histogram", 321)
|
|
|
|
ds.max("largest-directory", 2)
|
|
|
|
|
|
|
|
s = ds.get_results()
|
|
|
|
self.failUnlessEqual(s["count-files"], 2)
|
|
|
|
self.failUnlessEqual(s["size-immutable-files"], 444)
|
|
|
|
self.failUnlessEqual(s["largest-directory"], 444)
|
|
|
|
self.failUnlessEqual(s["count-literal-files"], 0)
|
|
|
|
self.failUnlessEqual(s["size-files-histogram"],
|
|
|
|
[ (101, 316, 1), (317, 1000, 1) ])
|
|
|
|
|
2008-10-22 00:03:07 +00:00
|
|
|
ds = dirnode.DeepStats(None)
|
2008-05-08 23:19:42 +00:00
|
|
|
for i in range(1, 1100):
|
|
|
|
ds.histogram("size-files-histogram", i)
|
|
|
|
ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
|
|
|
|
s = ds.get_results()
|
|
|
|
self.failUnlessEqual(s["size-files-histogram"],
|
|
|
|
[ (1, 3, 3),
|
|
|
|
(4, 10, 7),
|
|
|
|
(11, 31, 21),
|
|
|
|
(32, 100, 69),
|
|
|
|
(101, 316, 216),
|
|
|
|
(317, 1000, 684),
|
|
|
|
(1001, 3162, 99),
|
|
|
|
(3162277660169L, 10000000000000L, 1),
|
|
|
|
])
|
|
|
|
|
2008-12-06 05:08:37 +00:00
|
|
|
class UCWEingMutableFileNode(MutableFileNode):
|
|
|
|
please_ucwe_after_next_upload = False
|
|
|
|
|
|
|
|
def _upload(self, new_contents, servermap):
|
|
|
|
d = MutableFileNode._upload(self, new_contents, servermap)
|
|
|
|
def _ucwe(res):
|
2008-12-06 05:49:23 +00:00
|
|
|
if self.please_ucwe_after_next_upload:
|
|
|
|
self.please_ucwe_after_next_upload = False
|
|
|
|
raise UncoordinatedWriteError()
|
|
|
|
return res
|
2008-12-06 05:08:37 +00:00
|
|
|
d.addCallback(_ucwe)
|
|
|
|
return d
|
2009-07-17 01:01:03 +00:00
|
|
|
class UCWEingDirectoryNode(dirnode.DirectoryNode):
|
2008-12-06 05:08:37 +00:00
|
|
|
filenode_class = UCWEingMutableFileNode
|
|
|
|
|
|
|
|
|
2009-02-17 00:23:48 +00:00
|
|
|
class Deleter(GridTestMixin, unittest.TestCase):
|
2009-06-10 12:56:39 +00:00
|
|
|
timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
|
2008-12-06 05:08:37 +00:00
|
|
|
def test_retry(self):
|
|
|
|
# ticket #550, a dirnode.delete which experiences an
|
|
|
|
# UncoordinatedWriteError will fail with an incorrect "you're
|
|
|
|
# deleting something which isn't there" NoSuchChildError exception.
|
|
|
|
|
|
|
|
# to trigger this, we start by creating a directory with a single
|
|
|
|
# file in it. Then we create a special dirnode that uses a modified
|
|
|
|
# MutableFileNode which will raise UncoordinatedWriteError once on
|
|
|
|
# demand. We then call dirnode.delete, which ought to retry and
|
|
|
|
# succeed.
|
|
|
|
|
|
|
|
self.basedir = self.mktemp()
|
2009-02-17 00:23:48 +00:00
|
|
|
self.set_up_grid()
|
|
|
|
c0 = self.g.clients[0]
|
|
|
|
d = c0.create_empty_dirnode()
|
2008-12-06 05:08:37 +00:00
|
|
|
small = upload.Data("Small enough for a LIT", None)
|
|
|
|
def _created_dir(dn):
|
|
|
|
self.root = dn
|
|
|
|
self.root_uri = dn.get_uri()
|
|
|
|
return dn.add_file(u"file", small)
|
|
|
|
d.addCallback(_created_dir)
|
|
|
|
def _do_delete(ignored):
|
2009-07-17 01:01:03 +00:00
|
|
|
n = UCWEingDirectoryNode(c0).init_from_uri(self.root_uri)
|
2008-12-06 05:49:23 +00:00
|
|
|
assert n._node.please_ucwe_after_next_upload == False
|
|
|
|
n._node.please_ucwe_after_next_upload = True
|
2008-12-06 05:08:37 +00:00
|
|
|
# This should succeed, not raise an exception
|
|
|
|
return n.delete(u"file")
|
|
|
|
d.addCallback(_do_delete)
|
|
|
|
|
|
|
|
return d
|
|
|
|
|