2006-11-30 23:23:01 +00:00
|
|
|
|
2007-09-14 03:17:42 +00:00
|
|
|
import os
|
2006-11-30 23:23:01 +00:00
|
|
|
from twisted.trial import unittest
|
2007-06-08 05:09:02 +00:00
|
|
|
from twisted.application import service
|
2008-01-03 21:38:24 +00:00
|
|
|
from twisted.python import log
|
2006-11-30 23:23:01 +00:00
|
|
|
|
2007-04-26 19:01:25 +00:00
|
|
|
import allmydata
|
2008-06-18 19:24:16 +00:00
|
|
|
from allmydata import client
|
2009-06-01 21:06:04 +00:00
|
|
|
from allmydata.storage_client import StorageFarmBroker
|
2009-07-03 01:07:49 +00:00
|
|
|
from allmydata.util import base32, fileutil
|
2009-11-20 07:52:55 +00:00
|
|
|
from allmydata.interfaces import IFilesystemNode, IFileNode, \
|
|
|
|
IImmutableFileNode, IMutableFileNode, IDirectoryNode
|
2009-05-22 00:38:23 +00:00
|
|
|
from foolscap.api import flushEventualQueue
|
2010-02-26 08:14:33 +00:00
|
|
|
import allmydata.test.common_util as testutil
|
2006-11-30 23:23:01 +00:00
|
|
|
|
2009-07-03 00:54:50 +00:00
|
|
|
BASECONFIG = ("[client]\n"
|
|
|
|
"introducer.furl = \n"
|
|
|
|
)
|
|
|
|
|
2010-07-11 20:02:52 +00:00
|
|
|
class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
|
2006-12-01 03:18:51 +00:00
|
|
|
def test_loadable(self):
|
2007-03-27 23:12:11 +00:00
|
|
|
basedir = "test_client.Basic.test_loadable"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write("")
|
2010-01-14 22:15:29 +00:00
|
|
|
client.Client(basedir)
|
2007-06-08 00:55:49 +00:00
|
|
|
|
2008-10-29 04:43:35 +00:00
|
|
|
def test_loadable_old_config_bits(self):
|
|
|
|
basedir = "test_client.Basic.test_loadable_old_config_bits"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write("")
|
|
|
|
open(os.path.join(basedir, "no_storage"), "w").write("")
|
|
|
|
open(os.path.join(basedir, "readonly_storage"), "w").write("")
|
|
|
|
open(os.path.join(basedir, "debug_discard_storage"), "w").write("")
|
|
|
|
c = client.Client(basedir)
|
|
|
|
try:
|
|
|
|
c.getServiceNamed("storage")
|
|
|
|
self.fail("that was supposed to fail")
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def test_loadable_old_storage_config_bits(self):
|
|
|
|
basedir = "test_client.Basic.test_loadable_old_storage_config_bits"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write("")
|
|
|
|
open(os.path.join(basedir, "readonly_storage"), "w").write("")
|
|
|
|
open(os.path.join(basedir, "debug_discard_storage"), "w").write("")
|
|
|
|
c = client.Client(basedir)
|
|
|
|
s = c.getServiceNamed("storage")
|
|
|
|
self.failUnless(s.no_storage)
|
|
|
|
self.failUnless(s.readonly_storage)
|
|
|
|
|
2007-08-28 02:30:26 +00:00
|
|
|
def test_secrets(self):
|
|
|
|
basedir = "test_client.Basic.test_secrets"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write("")
|
|
|
|
c = client.Client(basedir)
|
2007-12-17 23:39:54 +00:00
|
|
|
secret_fname = os.path.join(basedir, "private", "secret")
|
|
|
|
self.failUnless(os.path.exists(secret_fname), secret_fname)
|
2007-08-28 02:30:26 +00:00
|
|
|
renew_secret = c.get_renewal_secret()
|
2008-02-15 02:27:47 +00:00
|
|
|
self.failUnless(base32.b2a(renew_secret))
|
2007-08-28 02:30:26 +00:00
|
|
|
cancel_secret = c.get_cancel_secret()
|
2008-02-15 02:27:47 +00:00
|
|
|
self.failUnless(base32.b2a(cancel_secret))
|
2007-08-28 02:30:26 +00:00
|
|
|
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_1(self):
|
|
|
|
basedir = "client.Basic.test_reserved_1"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2008-12-02 00:24:21 +00:00
|
|
|
f = open(os.path.join(basedir, "tahoe.cfg"), "w")
|
2009-07-03 00:54:50 +00:00
|
|
|
f.write(BASECONFIG)
|
2008-12-02 00:24:21 +00:00
|
|
|
f.write("[storage]\n")
|
|
|
|
f.write("enabled = true\n")
|
|
|
|
f.write("reserved_space = 1000\n")
|
|
|
|
f.close()
|
2007-07-04 00:27:07 +00:00
|
|
|
c = client.Client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_2(self):
|
|
|
|
basedir = "client.Basic.test_reserved_2"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2008-12-02 00:24:21 +00:00
|
|
|
f = open(os.path.join(basedir, "tahoe.cfg"), "w")
|
2009-07-03 00:54:50 +00:00
|
|
|
f.write(BASECONFIG)
|
2008-12-02 00:24:21 +00:00
|
|
|
f.write("[storage]\n")
|
|
|
|
f.write("enabled = true\n")
|
|
|
|
f.write("reserved_space = 10K\n")
|
|
|
|
f.close()
|
2007-07-04 00:27:07 +00:00
|
|
|
c = client.Client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_3(self):
|
|
|
|
basedir = "client.Basic.test_reserved_3"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2008-12-02 00:24:21 +00:00
|
|
|
f = open(os.path.join(basedir, "tahoe.cfg"), "w")
|
2009-07-03 00:54:50 +00:00
|
|
|
f.write(BASECONFIG)
|
2008-12-02 00:24:21 +00:00
|
|
|
f.write("[storage]\n")
|
|
|
|
f.write("enabled = true\n")
|
|
|
|
f.write("reserved_space = 5mB\n")
|
|
|
|
f.close()
|
2007-07-04 00:27:07 +00:00
|
|
|
c = client.Client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
|
2007-07-04 00:27:07 +00:00
|
|
|
5*1000*1000)
|
|
|
|
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_4(self):
|
|
|
|
basedir = "client.Basic.test_reserved_4"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2008-12-02 00:24:21 +00:00
|
|
|
f = open(os.path.join(basedir, "tahoe.cfg"), "w")
|
2009-07-03 00:54:50 +00:00
|
|
|
f.write(BASECONFIG)
|
2008-12-02 00:24:21 +00:00
|
|
|
f.write("[storage]\n")
|
|
|
|
f.write("enabled = true\n")
|
|
|
|
f.write("reserved_space = 78Gb\n")
|
|
|
|
f.close()
|
2007-07-04 00:27:07 +00:00
|
|
|
c = client.Client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
|
|
|
|
78*1000*1000*1000)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_bad(self):
|
|
|
|
basedir = "client.Basic.test_reserved_bad"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2008-12-02 00:24:21 +00:00
|
|
|
f = open(os.path.join(basedir, "tahoe.cfg"), "w")
|
2009-07-03 00:54:50 +00:00
|
|
|
f.write(BASECONFIG)
|
2008-12-02 00:24:21 +00:00
|
|
|
f.write("[storage]\n")
|
|
|
|
f.write("enabled = true\n")
|
|
|
|
f.write("reserved_space = bogus\n")
|
|
|
|
f.close()
|
2007-07-04 00:27:07 +00:00
|
|
|
c = client.Client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 0)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2009-06-01 21:06:04 +00:00
|
|
|
def _permute(self, sb, key):
|
2011-02-21 01:58:04 +00:00
|
|
|
return [ s.get_serverid() for s in sb.get_servers_for_psi(key) ]
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2006-12-01 03:18:51 +00:00
|
|
|
def test_permute(self):
|
2009-06-23 02:10:47 +00:00
|
|
|
sb = StorageFarmBroker(None, True)
|
2007-03-23 23:15:57 +00:00
|
|
|
for k in ["%d" % i for i in range(5)]:
|
2011-02-27 02:10:56 +00:00
|
|
|
sb.test_add_rref(k, "rref")
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2010-07-11 20:02:52 +00:00
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "one"), ['3','1','0','4','2'])
|
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "two"), ['0','4','2','1','3'])
|
2011-02-27 02:10:56 +00:00
|
|
|
sb.servers.clear()
|
2010-07-11 20:02:52 +00:00
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "one"), [])
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2007-04-26 19:01:25 +00:00
|
|
|
def test_versions(self):
|
|
|
|
basedir = "test_client.Basic.test_versions"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write("")
|
|
|
|
c = client.Client(basedir)
|
2008-02-06 03:28:59 +00:00
|
|
|
ss = c.getServiceNamed("storage")
|
2008-11-22 00:43:52 +00:00
|
|
|
verdict = ss.remote_get_version()
|
2010-07-11 20:02:52 +00:00
|
|
|
self.failUnlessReallyEqual(verdict["application-version"],
|
|
|
|
str(allmydata.__full_version__))
|
2008-01-03 21:34:59 +00:00
|
|
|
self.failIfEqual(str(allmydata.__version__), "unknown")
|
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
|
|
|
self.failUnless("." in str(allmydata.__full_version__),
|
2008-01-03 21:34:59 +00:00
|
|
|
"non-numeric version in '%s'" % allmydata.__version__)
|
2008-01-03 21:38:24 +00:00
|
|
|
all_versions = allmydata.get_package_versions_string()
|
2010-08-01 16:05:17 +00:00
|
|
|
self.failUnless(allmydata.__appname__ in all_versions)
|
2008-01-03 21:38:24 +00:00
|
|
|
log.msg("tahoe versions: %s" % all_versions)
|
2008-04-17 18:13:39 +00:00
|
|
|
# also test stats
|
|
|
|
stats = c.get_stats()
|
|
|
|
self.failUnless("node.uptime" in stats)
|
|
|
|
self.failUnless(isinstance(stats["node.uptime"], float))
|
2007-04-26 19:01:25 +00:00
|
|
|
|
2007-06-08 05:09:02 +00:00
|
|
|
def flush_but_dont_ignore(res):
|
|
|
|
d = flushEventualQueue()
|
|
|
|
def _done(ignored):
|
|
|
|
return res
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
2008-04-22 23:47:15 +00:00
|
|
|
class Run(unittest.TestCase, testutil.StallMixin):
|
2007-06-08 05:09:02 +00:00
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
self.sparent = service.MultiService()
|
|
|
|
self.sparent.startService()
|
|
|
|
def tearDown(self):
|
|
|
|
d = self.sparent.stopService()
|
|
|
|
d.addBoth(flush_but_dont_ignore)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_loadable(self):
|
|
|
|
basedir = "test_client.Run.test_loadable"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
|
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write(dummy)
|
|
|
|
open(os.path.join(basedir, "suicide_prevention_hotline"), "w")
|
2010-01-14 22:15:29 +00:00
|
|
|
client.Client(basedir)
|
2007-06-08 05:09:02 +00:00
|
|
|
|
|
|
|
def test_reloadable(self):
|
|
|
|
basedir = "test_client.Run.test_reloadable"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
|
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write(dummy)
|
|
|
|
c1 = client.Client(basedir)
|
|
|
|
c1.setServiceParent(self.sparent)
|
|
|
|
|
2007-11-19 03:41:26 +00:00
|
|
|
# delay to let the service start up completely. I'm not entirely sure
|
|
|
|
# this is necessary.
|
|
|
|
d = self.stall(delay=2.0)
|
|
|
|
d.addCallback(lambda res: c1.disownServiceParent())
|
2007-11-02 02:19:08 +00:00
|
|
|
# the cygwin buildslave seems to need more time to let the old
|
|
|
|
# service completely shut down. When delay=0.1, I saw this test fail,
|
|
|
|
# probably due to the logport trying to reclaim the old socket
|
2007-11-19 03:41:26 +00:00
|
|
|
# number. This suggests that either we're dropping a Deferred
|
|
|
|
# somewhere in the shutdown sequence, or that cygwin is just cranky.
|
|
|
|
d.addCallback(self.stall, delay=2.0)
|
2007-06-08 05:09:02 +00:00
|
|
|
def _restart(res):
|
2007-09-26 04:03:54 +00:00
|
|
|
# TODO: pause for slightly over one second, to let
|
|
|
|
# Client._check_hotline poll the file once. That will exercise
|
|
|
|
# another few lines. Then add another test in which we don't
|
|
|
|
# update the file at all, and watch to see the node shutdown. (to
|
|
|
|
# do this, use a modified node which overrides Node.shutdown(),
|
|
|
|
# also change _check_hotline to use it instead of a raw
|
|
|
|
# reactor.stop, also instrument the shutdown event in an
|
|
|
|
# attribute that we can check)
|
2007-06-08 05:09:02 +00:00
|
|
|
c2 = client.Client(basedir)
|
|
|
|
c2.setServiceParent(self.sparent)
|
|
|
|
return c2.disownServiceParent()
|
|
|
|
d.addCallback(_restart)
|
|
|
|
return d
|
|
|
|
|
2010-07-11 20:02:52 +00:00
|
|
|
class NodeMaker(testutil.ReallyEqualMixin, unittest.TestCase):
|
2009-07-03 01:07:49 +00:00
|
|
|
def test_maker(self):
|
|
|
|
basedir = "client/NodeMaker/maker"
|
|
|
|
fileutil.make_dirs(basedir)
|
|
|
|
f = open(os.path.join(basedir, "tahoe.cfg"), "w")
|
|
|
|
f.write(BASECONFIG)
|
|
|
|
f.close()
|
|
|
|
c = client.Client(basedir)
|
|
|
|
|
|
|
|
n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failUnless(IFileNode.providedBy(n))
|
2009-11-20 07:52:55 +00:00
|
|
|
self.failUnless(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_readonly())
|
|
|
|
self.failIf(n.is_mutable())
|
|
|
|
|
2009-11-20 07:52:55 +00:00
|
|
|
n = c.create_node_from_uri("URI:LIT:n5xgk")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failUnless(IFileNode.providedBy(n))
|
|
|
|
self.failUnless(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_readonly())
|
|
|
|
self.failIf(n.is_mutable())
|
|
|
|
|
|
|
|
n = c.create_node_from_uri("URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failUnless(IFileNode.providedBy(n))
|
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failUnless(IMutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failIf(n.is_readonly())
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
|
|
|
|
n = c.create_node_from_uri("URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failUnless(IFileNode.providedBy(n))
|
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failUnless(IMutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_readonly())
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
|
2009-07-03 01:07:49 +00:00
|
|
|
n = c.create_node_from_uri("URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failIf(IFileNode.providedBy(n))
|
2009-11-20 07:52:55 +00:00
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failUnless(IDirectoryNode.providedBy(n))
|
|
|
|
self.failIf(n.is_readonly())
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
|
|
|
|
n = c.create_node_from_uri("URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failIf(IFileNode.providedBy(n))
|
2009-11-20 07:52:55 +00:00
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failUnless(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_readonly())
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
unknown_rw = "lafs://from_the_future"
|
|
|
|
unknown_ro = "lafs://readonly_from_the_future"
|
|
|
|
n = c.create_node_from_uri(unknown_rw, unknown_ro)
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failIf(IFileNode.providedBy(n))
|
2009-11-20 07:52:55 +00:00
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
2010-01-27 06:44:30 +00:00
|
|
|
self.failUnless(n.is_unknown())
|
2010-07-11 20:02:52 +00:00
|
|
|
self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
|
|
|
|
self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
|
|
|
|
self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
|
|
|
|
|
2010-07-18 05:32:50 +00:00
|
|
|
# Note: it isn't that we *intend* to deploy non-ASCII caps in
|
|
|
|
# the future, it is that we want to make sure older Tahoe-LAFS
|
|
|
|
# versions wouldn't choke on them if we were to do so. See
|
|
|
|
# #1051 and wiki:NewCapDesign for details.
|
2010-07-11 20:02:52 +00:00
|
|
|
unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8')
|
|
|
|
unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode('utf-8')
|
|
|
|
n = c.create_node_from_uri(unknown_rw, unknown_ro)
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failIf(IFileNode.providedBy(n))
|
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_unknown())
|
|
|
|
self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
|
|
|
|
self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
|
|
|
|
self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
|