2015-01-30 00:05:14 +00:00
|
|
|
import os, sys
|
2018-01-28 01:05:16 +00:00
|
|
|
import mock
|
2015-07-17 21:03:53 +00:00
|
|
|
import twisted
|
2019-06-12 20:47:25 +00:00
|
|
|
from yaml import (
|
|
|
|
safe_dump,
|
|
|
|
)
|
2006-11-30 23:23:01 +00:00
|
|
|
from twisted.trial import unittest
|
2007-06-08 05:09:02 +00:00
|
|
|
from twisted.application import service
|
2018-01-28 01:05:16 +00:00
|
|
|
from twisted.internet import defer
|
2019-06-12 20:47:25 +00:00
|
|
|
from twisted.python.filepath import (
|
|
|
|
FilePath,
|
|
|
|
)
|
|
|
|
from testtools.matchers import (
|
|
|
|
Equals,
|
|
|
|
AfterPreprocessing,
|
|
|
|
)
|
|
|
|
from testtools.twistedsupport import (
|
|
|
|
succeeded,
|
|
|
|
)
|
2006-11-30 23:23:01 +00:00
|
|
|
|
2007-04-26 19:01:25 +00:00
|
|
|
import allmydata
|
2015-09-16 13:59:49 +00:00
|
|
|
import allmydata.frontends.magic_folder
|
2015-07-17 21:03:53 +00:00
|
|
|
import allmydata.util.log
|
|
|
|
|
2018-03-01 22:45:10 +00:00
|
|
|
from allmydata.node import OldConfigError, OldConfigOptionError, UnescapedHashError, _Config, read_config, create_node_dir
|
2018-02-02 07:10:45 +00:00
|
|
|
from allmydata.node import config_from_string
|
2015-03-24 17:10:00 +00:00
|
|
|
from allmydata.frontends.auth import NeedRootcapLookupScheme
|
2008-06-18 19:24:16 +00:00
|
|
|
from allmydata import client
|
2009-06-01 21:06:04 +00:00
|
|
|
from allmydata.storage_client import StorageFarmBroker
|
2018-03-28 22:17:23 +00:00
|
|
|
from allmydata.util import base32, fileutil, encodingutil
|
2014-08-16 07:50:17 +00:00
|
|
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
2009-11-20 07:52:55 +00:00
|
|
|
from allmydata.interfaces import IFilesystemNode, IFileNode, \
|
|
|
|
IImmutableFileNode, IMutableFileNode, IDirectoryNode
|
2009-05-22 00:38:23 +00:00
|
|
|
from foolscap.api import flushEventualQueue
|
2010-02-26 08:14:33 +00:00
|
|
|
import allmydata.test.common_util as testutil
|
2019-06-12 20:47:25 +00:00
|
|
|
from allmydata.test.common import (
|
|
|
|
SyncTestCase,
|
|
|
|
)
|
2006-11-30 23:23:01 +00:00
|
|
|
|
2011-08-01 23:24:23 +00:00
|
|
|
|
2009-07-03 00:54:50 +00:00
|
|
|
BASECONFIG = ("[client]\n"
|
|
|
|
"introducer.furl = \n"
|
|
|
|
)
|
|
|
|
|
2011-08-01 23:24:23 +00:00
|
|
|
BASECONFIG_I = ("[client]\n"
|
|
|
|
"introducer.furl = %s\n"
|
|
|
|
)
|
|
|
|
|
2015-04-28 19:43:09 +00:00
|
|
|
class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.TestCase):
|
2006-12-01 03:18:51 +00:00
|
|
|
def test_loadable(self):
|
2007-03-27 23:12:11 +00:00
|
|
|
basedir = "test_client.Basic.test_loadable"
|
|
|
|
os.mkdir(basedir)
|
2011-08-01 23:24:23 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
|
|
|
BASECONFIG)
|
2018-01-28 01:05:16 +00:00
|
|
|
return client.create_client(basedir)
|
2007-06-08 00:55:49 +00:00
|
|
|
|
2018-09-04 23:47:47 +00:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_unreadable_introducers(self):
|
|
|
|
"""
|
2018-09-06 18:33:10 +00:00
|
|
|
The Deferred from create_client fails when
|
|
|
|
private/introducers.yaml is unreadable (but exists)
|
2018-09-04 23:47:47 +00:00
|
|
|
"""
|
|
|
|
basedir = "test_client.Basic.test_unreadable_introduers"
|
|
|
|
os.mkdir(basedir, 0o700)
|
|
|
|
os.mkdir(os.path.join(basedir, 'private'), 0o700)
|
|
|
|
intro_fname = os.path.join(basedir, 'private', 'introducers.yaml')
|
|
|
|
with open(intro_fname, 'w') as f:
|
|
|
|
f.write("---\n")
|
|
|
|
os.chmod(intro_fname, 0o000)
|
2018-09-07 16:42:41 +00:00
|
|
|
self.addCleanup(lambda: os.chmod(intro_fname, 0o700))
|
2018-09-04 23:47:47 +00:00
|
|
|
|
|
|
|
with self.assertRaises(EnvironmentError):
|
|
|
|
yield client.create_client(basedir)
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2014-05-05 21:55:50 +00:00
|
|
|
def test_comment(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
2018-09-06 18:33:49 +00:00
|
|
|
An unescaped comment character (#) in a furl results in an
|
|
|
|
UnescapedHashError Failure.
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
2014-05-05 21:55:50 +00:00
|
|
|
should_fail = [r"test#test", r"#testtest", r"test\\#test"]
|
|
|
|
should_not_fail = [r"test\#test", r"test\\\#test", r"testtest"]
|
|
|
|
|
|
|
|
basedir = "test_client.Basic.test_comment"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
|
2014-05-05 22:05:06 +00:00
|
|
|
def write_config(s):
|
2014-05-05 21:55:50 +00:00
|
|
|
config = ("[client]\n"
|
|
|
|
"introducer.furl = %s\n" % s)
|
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), config)
|
|
|
|
|
|
|
|
for s in should_fail:
|
2017-09-06 01:08:35 +00:00
|
|
|
self.failUnless(_Config._contains_unescaped_hash(s))
|
2014-05-05 21:55:50 +00:00
|
|
|
write_config(s)
|
2018-01-28 01:05:16 +00:00
|
|
|
with self.assertRaises(UnescapedHashError) as ctx:
|
|
|
|
yield client.create_client(basedir)
|
|
|
|
self.assertIn("[client]introducer.furl", str(ctx.exception))
|
2014-05-05 21:55:50 +00:00
|
|
|
|
|
|
|
for s in should_not_fail:
|
2017-09-06 01:08:35 +00:00
|
|
|
self.failIf(_Config._contains_unescaped_hash(s))
|
2014-05-05 21:55:50 +00:00
|
|
|
write_config(s)
|
2018-01-28 01:05:16 +00:00
|
|
|
yield client.create_client(basedir)
|
2014-05-05 21:55:50 +00:00
|
|
|
|
2016-08-28 09:29:50 +00:00
|
|
|
def test_unreadable_config(self):
|
|
|
|
if sys.platform == "win32":
|
|
|
|
# if somebody knows a clever way to do this (cause
|
|
|
|
# EnvironmentError when reading a file that really exists), on
|
|
|
|
# windows, please fix this
|
|
|
|
raise unittest.SkipTest("can't make unreadable files on windows")
|
|
|
|
basedir = "test_client.Basic.test_unreadable_config"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
fn = os.path.join(basedir, "tahoe.cfg")
|
|
|
|
fileutil.write(fn, BASECONFIG)
|
|
|
|
old_mode = os.stat(fn).st_mode
|
|
|
|
os.chmod(fn, 0)
|
|
|
|
try:
|
2018-05-29 21:52:09 +00:00
|
|
|
e = self.assertRaises(
|
|
|
|
EnvironmentError,
|
|
|
|
read_config,
|
|
|
|
basedir,
|
|
|
|
"client.port",
|
|
|
|
_valid_config_sections=client._valid_config_sections,
|
|
|
|
)
|
2016-08-28 09:29:50 +00:00
|
|
|
self.assertIn("Permission denied", str(e))
|
|
|
|
finally:
|
|
|
|
# don't leave undeleteable junk lying around
|
|
|
|
os.chmod(fn, old_mode)
|
2014-05-05 21:55:50 +00:00
|
|
|
|
2015-07-17 21:03:53 +00:00
|
|
|
def test_error_on_old_config_files(self):
|
2011-08-03 23:50:36 +00:00
|
|
|
basedir = "test_client.Basic.test_error_on_old_config_files"
|
2008-10-29 04:43:35 +00:00
|
|
|
os.mkdir(basedir)
|
2011-08-03 23:50:36 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
|
|
|
BASECONFIG +
|
|
|
|
"[storage]\n" +
|
|
|
|
"enabled = false\n" +
|
|
|
|
"reserved_space = bogus\n")
|
2011-03-31 14:54:27 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "introducer.furl"), "")
|
|
|
|
fileutil.write(os.path.join(basedir, "no_storage"), "")
|
|
|
|
fileutil.write(os.path.join(basedir, "readonly_storage"), "")
|
|
|
|
fileutil.write(os.path.join(basedir, "debug_discard_storage"), "")
|
2011-08-03 23:50:36 +00:00
|
|
|
|
2015-07-17 21:03:53 +00:00
|
|
|
logged_messages = []
|
|
|
|
self.patch(twisted.python.log, 'msg', logged_messages.append)
|
|
|
|
|
2018-05-29 21:52:09 +00:00
|
|
|
e = self.failUnlessRaises(
|
|
|
|
OldConfigError,
|
|
|
|
read_config,
|
|
|
|
basedir,
|
|
|
|
"client.port",
|
|
|
|
_valid_config_sections=client._valid_config_sections,
|
|
|
|
)
|
2015-01-30 00:05:14 +00:00
|
|
|
abs_basedir = fileutil.abspath_expanduser_unicode(unicode(basedir)).encode(sys.getfilesystemencoding())
|
|
|
|
self.failUnlessIn(os.path.join(abs_basedir, "introducer.furl"), e.args[0])
|
|
|
|
self.failUnlessIn(os.path.join(abs_basedir, "no_storage"), e.args[0])
|
|
|
|
self.failUnlessIn(os.path.join(abs_basedir, "readonly_storage"), e.args[0])
|
|
|
|
self.failUnlessIn(os.path.join(abs_basedir, "debug_discard_storage"), e.args[0])
|
2011-08-03 23:50:36 +00:00
|
|
|
|
|
|
|
for oldfile in ['introducer.furl', 'no_storage', 'readonly_storage',
|
2011-08-01 23:24:23 +00:00
|
|
|
'debug_discard_storage']:
|
2015-07-17 21:03:53 +00:00
|
|
|
logged = [ m for m in logged_messages if
|
|
|
|
("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ]
|
|
|
|
self.failUnless(logged, (oldfile, logged_messages))
|
2011-08-01 23:24:23 +00:00
|
|
|
|
|
|
|
for oldfile in [
|
|
|
|
'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl',
|
|
|
|
'disconnect_timeout', 'advertised_ip_addresses', 'helper.furl',
|
|
|
|
'key_generator.furl', 'stats_gatherer.furl', 'sizelimit',
|
|
|
|
'run_helper']:
|
2015-07-17 21:03:53 +00:00
|
|
|
logged = [ m for m in logged_messages if
|
|
|
|
("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ]
|
|
|
|
self.failIf(logged, (oldfile, logged_messages))
|
2008-10-29 04:43:35 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2007-08-28 02:30:26 +00:00
|
|
|
def test_secrets(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
A new client has renewal + cancel secrets
|
|
|
|
"""
|
2007-08-28 02:30:26 +00:00
|
|
|
basedir = "test_client.Basic.test_secrets"
|
|
|
|
os.mkdir(basedir)
|
2011-08-01 23:24:23 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
|
|
|
BASECONFIG)
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2007-12-17 23:39:54 +00:00
|
|
|
secret_fname = os.path.join(basedir, "private", "secret")
|
|
|
|
self.failUnless(os.path.exists(secret_fname), secret_fname)
|
2007-08-28 02:30:26 +00:00
|
|
|
renew_secret = c.get_renewal_secret()
|
2008-02-15 02:27:47 +00:00
|
|
|
self.failUnless(base32.b2a(renew_secret))
|
2007-08-28 02:30:26 +00:00
|
|
|
cancel_secret = c.get_cancel_secret()
|
2008-02-15 02:27:47 +00:00
|
|
|
self.failUnless(base32.b2a(cancel_secret))
|
2007-08-28 02:30:26 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2013-04-18 07:06:55 +00:00
|
|
|
def test_nodekey_yes_storage(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
We have a nodeid if we're providing storage
|
|
|
|
"""
|
2013-04-18 07:06:55 +00:00
|
|
|
basedir = "test_client.Basic.test_nodekey_yes_storage"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
|
|
|
BASECONFIG)
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2013-04-18 07:06:55 +00:00
|
|
|
self.failUnless(c.get_long_nodeid().startswith("v0-"))
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2013-04-18 07:06:55 +00:00
|
|
|
def test_nodekey_no_storage(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
We have a nodeid if we're not providing storage
|
|
|
|
"""
|
2013-04-18 07:06:55 +00:00
|
|
|
basedir = "test_client.Basic.test_nodekey_no_storage"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
|
|
|
BASECONFIG + "[storage]\n" + "enabled = false\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2013-04-18 07:06:55 +00:00
|
|
|
self.failUnless(c.get_long_nodeid().startswith("v0-"))
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_1(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
reserved_space option is propagated
|
|
|
|
"""
|
2008-12-02 00:24:21 +00:00
|
|
|
basedir = "client.Basic.test_reserved_1"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2011-03-31 14:54:27 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
|
|
|
BASECONFIG + \
|
|
|
|
"[storage]\n" + \
|
|
|
|
"enabled = true\n" + \
|
|
|
|
"reserved_space = 1000\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_2(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
reserved_space option understands 'K' to mean kilobytes
|
|
|
|
"""
|
2008-12-02 00:24:21 +00:00
|
|
|
basedir = "client.Basic.test_reserved_2"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2011-03-31 14:54:27 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
|
|
|
BASECONFIG + \
|
|
|
|
"[storage]\n" + \
|
|
|
|
"enabled = true\n" + \
|
|
|
|
"reserved_space = 10K\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_3(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
reserved_space option understands 'mB' to mean megabytes
|
|
|
|
"""
|
2008-12-02 00:24:21 +00:00
|
|
|
basedir = "client.Basic.test_reserved_3"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2011-03-31 14:54:27 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
|
|
|
BASECONFIG + \
|
|
|
|
"[storage]\n" + \
|
|
|
|
"enabled = true\n" + \
|
|
|
|
"reserved_space = 5mB\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
|
2007-07-04 00:27:07 +00:00
|
|
|
5*1000*1000)
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_4(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
reserved_space option understands 'Gb' to mean gigabytes
|
|
|
|
"""
|
2008-12-02 00:24:21 +00:00
|
|
|
basedir = "client.Basic.test_reserved_4"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2011-03-31 14:54:27 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
|
|
|
BASECONFIG + \
|
|
|
|
"[storage]\n" + \
|
|
|
|
"enabled = true\n" + \
|
|
|
|
"reserved_space = 78Gb\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2010-11-09 23:08:16 +00:00
|
|
|
self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
|
|
|
|
78*1000*1000*1000)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2008-12-02 00:24:21 +00:00
|
|
|
def test_reserved_bad(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
reserved_space option produces errors on non-numbers
|
|
|
|
"""
|
2008-12-02 00:24:21 +00:00
|
|
|
basedir = "client.Basic.test_reserved_bad"
|
2007-07-04 00:27:07 +00:00
|
|
|
os.mkdir(basedir)
|
2011-03-31 14:54:27 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
|
|
|
BASECONFIG + \
|
|
|
|
"[storage]\n" + \
|
|
|
|
"enabled = true\n" + \
|
|
|
|
"reserved_space = bogus\n")
|
2018-01-29 04:58:24 +00:00
|
|
|
with self.assertRaises(ValueError):
|
2018-01-28 01:05:16 +00:00
|
|
|
yield client.create_client(basedir)
|
2007-07-04 00:27:07 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2018-03-01 22:45:10 +00:00
|
|
|
def test_web_apiauthtoken(self):
|
2018-08-24 22:50:04 +00:00
|
|
|
"""
|
|
|
|
Client loads the proper API auth token from disk
|
|
|
|
"""
|
2018-03-01 22:45:10 +00:00
|
|
|
basedir = u"client.Basic.test_web_apiauthtoken"
|
|
|
|
create_node_dir(basedir, "testing")
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2018-03-01 22:45:10 +00:00
|
|
|
# this must come after we create the client, as it will create
|
|
|
|
# a new, random authtoken itself
|
|
|
|
with open(os.path.join(basedir, "private", "api_auth_token"), "w") as f:
|
|
|
|
f.write("deadbeef")
|
|
|
|
|
|
|
|
token = c.get_auth_token()
|
|
|
|
self.assertEqual("deadbeef", token)
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2015-02-04 06:09:40 +00:00
|
|
|
def test_web_staticdir(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
a relative web.static dir is expanded properly
|
|
|
|
"""
|
2015-02-04 06:09:40 +00:00
|
|
|
basedir = u"client.Basic.test_web_staticdir"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
|
|
|
BASECONFIG +
|
|
|
|
"[node]\n" +
|
|
|
|
"web.port = tcp:0:interface=127.0.0.1\n" +
|
|
|
|
"web.static = relative\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2015-02-04 06:09:40 +00:00
|
|
|
w = c.getServiceNamed("webish")
|
|
|
|
abs_basedir = fileutil.abspath_expanduser_unicode(basedir)
|
|
|
|
expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir)
|
|
|
|
self.failUnlessReallyEqual(w.staticdir, expected)
|
|
|
|
|
2015-03-24 17:10:00 +00:00
|
|
|
# TODO: also test config options for SFTP.
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2018-03-01 23:38:42 +00:00
|
|
|
def test_ftp_create(self):
|
2018-08-24 22:50:04 +00:00
|
|
|
"""
|
|
|
|
configuration for sftpd results in it being started
|
|
|
|
"""
|
2018-03-01 23:38:42 +00:00
|
|
|
basedir = u"client.Basic.test_ftp_create"
|
|
|
|
create_node_dir(basedir, "testing")
|
|
|
|
with open(os.path.join(basedir, "tahoe.cfg"), "w") as f:
|
|
|
|
f.write(
|
|
|
|
'[sftpd]\n'
|
|
|
|
'enabled = true\n'
|
|
|
|
'accounts.file = foo\n'
|
|
|
|
'host_pubkey_file = pubkey\n'
|
|
|
|
'host_privkey_file = privkey\n'
|
|
|
|
)
|
|
|
|
with mock.patch('allmydata.frontends.sftpd.SFTPServer') as p:
|
2018-01-28 01:05:16 +00:00
|
|
|
yield client.create_client(basedir)
|
2018-03-01 23:38:42 +00:00
|
|
|
self.assertTrue(p.called)
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2015-03-24 17:10:00 +00:00
|
|
|
def test_ftp_auth_keyfile(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
ftpd accounts.file is parsed properly
|
|
|
|
"""
|
2015-03-24 17:10:00 +00:00
|
|
|
basedir = u"client.Basic.test_ftp_auth_keyfile"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
|
|
|
(BASECONFIG +
|
|
|
|
"[ftpd]\n"
|
|
|
|
"enabled = true\n"
|
|
|
|
"port = tcp:0:interface=127.0.0.1\n"
|
|
|
|
"accounts.file = private/accounts\n"))
|
|
|
|
os.mkdir(os.path.join(basedir, "private"))
|
|
|
|
fileutil.write(os.path.join(basedir, "private", "accounts"), "\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir) # just make sure it can be instantiated
|
2015-03-24 17:10:00 +00:00
|
|
|
del c
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2015-03-24 17:10:00 +00:00
|
|
|
def test_ftp_auth_url(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
ftpd accounts.url is parsed properly
|
|
|
|
"""
|
2015-03-24 17:10:00 +00:00
|
|
|
basedir = u"client.Basic.test_ftp_auth_url"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
|
|
|
(BASECONFIG +
|
|
|
|
"[ftpd]\n"
|
|
|
|
"enabled = true\n"
|
|
|
|
"port = tcp:0:interface=127.0.0.1\n"
|
|
|
|
"accounts.url = http://0.0.0.0/\n"))
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir) # just make sure it can be instantiated
|
2015-03-24 17:10:00 +00:00
|
|
|
del c
|
|
|
|
|
2018-01-28 04:40:43 +00:00
|
|
|
@defer.inlineCallbacks
|
2015-03-24 17:10:00 +00:00
|
|
|
def test_ftp_auth_no_accountfile_or_url(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
ftpd requires some way to look up accounts
|
|
|
|
"""
|
2015-03-24 17:10:00 +00:00
|
|
|
basedir = u"client.Basic.test_ftp_auth_no_accountfile_or_url"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
|
|
|
(BASECONFIG +
|
|
|
|
"[ftpd]\n"
|
|
|
|
"enabled = true\n"
|
|
|
|
"port = tcp:0:interface=127.0.0.1\n"))
|
2018-01-28 01:05:16 +00:00
|
|
|
with self.assertRaises(NeedRootcapLookupScheme):
|
|
|
|
yield client.create_client(basedir)
|
2015-03-24 17:10:00 +00:00
|
|
|
|
2018-08-22 05:56:08 +00:00
|
|
|
@defer.inlineCallbacks
|
2018-03-19 18:21:28 +00:00
|
|
|
def _storage_dir_test(self, basedir, storage_path, expected_path):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
generic helper for following storage_dir tests
|
|
|
|
"""
|
2014-08-16 07:50:17 +00:00
|
|
|
os.mkdir(basedir)
|
2018-03-19 18:21:28 +00:00
|
|
|
cfg_path = os.path.join(basedir, "tahoe.cfg")
|
|
|
|
fileutil.write(
|
|
|
|
cfg_path,
|
|
|
|
BASECONFIG +
|
|
|
|
"[storage]\n"
|
|
|
|
"enabled = true\n",
|
|
|
|
)
|
|
|
|
if storage_path is not None:
|
|
|
|
fileutil.write(
|
|
|
|
cfg_path,
|
|
|
|
"storage_dir = %s\n" % (storage_path,),
|
|
|
|
mode="ab",
|
|
|
|
)
|
2018-08-22 05:56:08 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2018-03-19 18:21:28 +00:00
|
|
|
self.assertEqual(
|
|
|
|
c.getServiceNamed("storage").storedir,
|
|
|
|
expected_path,
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_default_storage_dir(self):
|
|
|
|
"""
|
|
|
|
If no value is given for ``storage_dir`` in the ``storage`` section of
|
|
|
|
``tahoe.cfg`` then the ``storage`` directory beneath the node
|
|
|
|
directory is used.
|
|
|
|
"""
|
|
|
|
basedir = u"client.Basic.test_default_storage_dir"
|
|
|
|
config_path = None
|
|
|
|
expected_path = os.path.join(
|
|
|
|
abspath_expanduser_unicode(basedir),
|
|
|
|
u"storage",
|
|
|
|
)
|
2018-08-22 05:56:08 +00:00
|
|
|
return self._storage_dir_test(
|
2018-03-19 18:21:28 +00:00
|
|
|
basedir,
|
|
|
|
config_path,
|
|
|
|
expected_path,
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_relative_storage_dir(self):
|
|
|
|
"""
|
|
|
|
A storage node can be directed to use a particular directory for share
|
|
|
|
file storage by setting ``storage_dir`` in the ``storage`` section of
|
|
|
|
``tahoe.cfg``. If the path is relative, it is interpreted relative to
|
|
|
|
the node's basedir.
|
|
|
|
"""
|
|
|
|
basedir = u"client.Basic.test_relative_storage_dir"
|
|
|
|
config_path = b"myowndir"
|
|
|
|
expected_path = os.path.join(
|
|
|
|
abspath_expanduser_unicode(basedir),
|
|
|
|
u"myowndir",
|
|
|
|
)
|
2018-08-22 05:56:08 +00:00
|
|
|
return self._storage_dir_test(
|
2018-03-19 18:21:28 +00:00
|
|
|
basedir,
|
|
|
|
config_path,
|
|
|
|
expected_path,
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_absolute_storage_dir(self):
|
|
|
|
"""
|
|
|
|
If the ``storage_dir`` item in the ``storage`` section of the
|
|
|
|
configuration gives an absolute path then exactly that path is used.
|
|
|
|
"""
|
|
|
|
basedir = u"client.Basic.test_absolute_storage_dir"
|
|
|
|
# create_client is going to try to make the storage directory so we
|
|
|
|
# don't want a literal absolute path like /myowndir which we won't
|
|
|
|
# have write permission to. So construct an absolute path that we
|
|
|
|
# should be able to write to.
|
2018-03-28 22:17:23 +00:00
|
|
|
base = u"\N{SNOWMAN}"
|
|
|
|
if encodingutil.filesystem_encoding != "utf-8":
|
|
|
|
base = u"melted_snowman"
|
2018-03-19 18:21:28 +00:00
|
|
|
expected_path = abspath_expanduser_unicode(
|
2018-03-28 22:17:23 +00:00
|
|
|
u"client.Basic.test_absolute_storage_dir_myowndir/" + base
|
2018-03-19 18:21:28 +00:00
|
|
|
)
|
|
|
|
config_path = expected_path.encode("utf-8")
|
2018-08-22 05:56:08 +00:00
|
|
|
return self._storage_dir_test(
|
2018-03-19 18:21:28 +00:00
|
|
|
basedir,
|
|
|
|
config_path,
|
|
|
|
expected_path,
|
|
|
|
)
|
2014-08-16 07:50:17 +00:00
|
|
|
|
2009-06-01 21:06:04 +00:00
|
|
|
def _permute(self, sb, key):
|
new introducer: signed extensible dictionary-based messages! refs #466
This introduces new client and server halves to the Introducer (renaming the
old one with a _V1 suffix). Both have fallbacks to accomodate talking to a
different version: the publishing client switches on whether the server's
.get_version() advertises V2 support, the server switches on which
subscription method was invoked by the subscribing client.
The V2 protocol sends a three-tuple of (serialized announcement dictionary,
signature, pubkey) for each announcement. The V2 server dispatches messages
to subscribers according to the service-name, and throws errors for invalid
signatures, but does not otherwise examine the messages. The V2 receiver's
subscription callback will receive a (serverid, ann_dict) pair. The
'serverid' will be equal to the pubkey if all of the following are true:
the originating client is V2, and was told a privkey to use
the announcement went through a V2 server
the signature is valid
If not, 'serverid' will be equal to the tubid portion of the announced FURL,
as was the case for V1 receivers.
Servers will create a keypair if one does not exist yet, stored in
private/server.privkey .
The signed announcement dictionary puts the server FURL in a key named
"anonymous-storage-FURL", which anticipates upcoming Accounting-related
changes in the server advertisements. It also provides a key named
"permutation-seed-base32" to tell clients what permutation seed to use. This
is computed at startup, using tubid if there are existing shares, otherwise
the pubkey, to retain share-order compatibility for existing servers.
2011-11-20 10:21:32 +00:00
|
|
|
return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2006-12-01 03:18:51 +00:00
|
|
|
def test_permute(self):
|
2016-08-27 23:53:31 +00:00
|
|
|
sb = StorageFarmBroker(True, None)
|
2007-03-23 23:15:57 +00:00
|
|
|
for k in ["%d" % i for i in range(5)]:
|
new introducer: signed extensible dictionary-based messages! refs #466
This introduces new client and server halves to the Introducer (renaming the
old one with a _V1 suffix). Both have fallbacks to accomodate talking to a
different version: the publishing client switches on whether the server's
.get_version() advertises V2 support, the server switches on which
subscription method was invoked by the subscribing client.
The V2 protocol sends a three-tuple of (serialized announcement dictionary,
signature, pubkey) for each announcement. The V2 server dispatches messages
to subscribers according to the service-name, and throws errors for invalid
signatures, but does not otherwise examine the messages. The V2 receiver's
subscription callback will receive a (serverid, ann_dict) pair. The
'serverid' will be equal to the pubkey if all of the following are true:
the originating client is V2, and was told a privkey to use
the announcement went through a V2 server
the signature is valid
If not, 'serverid' will be equal to the tubid portion of the announced FURL,
as was the case for V1 receivers.
Servers will create a keypair if one does not exist yet, stored in
private/server.privkey .
The signed announcement dictionary puts the server FURL in a key named
"anonymous-storage-FURL", which anticipates upcoming Accounting-related
changes in the server advertisements. It also provides a key named
"permutation-seed-base32" to tell clients what permutation seed to use. This
is computed at startup, using tubid if there are existing shares, otherwise
the pubkey, to retain share-order compatibility for existing servers.
2011-11-20 10:21:32 +00:00
|
|
|
ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake",
|
|
|
|
"permutation-seed-base32": base32.b2a(k) }
|
|
|
|
sb.test_add_rref(k, "rref", ann)
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2010-07-11 20:02:52 +00:00
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "one"), ['3','1','0','4','2'])
|
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "two"), ['0','4','2','1','3'])
|
2011-02-27 02:10:56 +00:00
|
|
|
sb.servers.clear()
|
2010-07-11 20:02:52 +00:00
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "one"), [])
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2015-12-01 18:47:50 +00:00
|
|
|
def test_permute_with_preferred(self):
|
2016-08-27 23:53:31 +00:00
|
|
|
sb = StorageFarmBroker(True, None, preferred_peers=['1','4'])
|
2015-12-01 18:47:50 +00:00
|
|
|
for k in ["%d" % i for i in range(5)]:
|
|
|
|
ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake",
|
|
|
|
"permutation-seed-base32": base32.b2a(k) }
|
|
|
|
sb.test_add_rref(k, "rref", ann)
|
|
|
|
|
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "one"), ['1','4','3','0','2'])
|
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "two"), ['4','1','0','2','3'])
|
|
|
|
sb.servers.clear()
|
|
|
|
self.failUnlessReallyEqual(self._permute(sb, "one"), [])
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2007-04-26 19:01:25 +00:00
|
|
|
def test_versions(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
A client knows the versions of software it has
|
|
|
|
"""
|
2007-04-26 19:01:25 +00:00
|
|
|
basedir = "test_client.Basic.test_versions"
|
|
|
|
os.mkdir(basedir)
|
2011-08-01 23:24:23 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
|
|
|
|
BASECONFIG + \
|
|
|
|
"[storage]\n" + \
|
|
|
|
"enabled = true\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2008-02-06 03:28:59 +00:00
|
|
|
ss = c.getServiceNamed("storage")
|
2008-11-22 00:43:52 +00:00
|
|
|
verdict = ss.remote_get_version()
|
2010-07-11 20:02:52 +00:00
|
|
|
self.failUnlessReallyEqual(verdict["application-version"],
|
|
|
|
str(allmydata.__full_version__))
|
2008-01-03 21:34:59 +00:00
|
|
|
self.failIfEqual(str(allmydata.__version__), "unknown")
|
versioning: include an "appname" in the application version string in the versioning protocol, and make that appname be controlled by setup.py
It is currently hardcoded in setup.py to be 'allmydata-tahoe'. Ticket #556 is to make it configurable by a runtime command-line argument to setup.py: "--appname=foo", but I suddenly wondered if we really wanted that and at the same time realized that we don't need that for tahoe-1.3.0 release, so this patch just hardcodes it in setup.py.
setup.py inspects a file named 'src/allmydata/_appname.py' and assert that it contains the string "__appname__ = 'allmydata-tahoe'", and creates it if it isn't already present. src/allmydata/__init__.py import _appname and reads __appname__ from it. The rest of the Python code imports allmydata and inspects "allmydata.__appname__", although actually every use it uses "allmydata.__full_version__" instead, where "allmydata.__full_version__" is created in src/allmydata/__init__.py to be:
__full_version__ = __appname + '-' + str(__version__).
All the code that emits an "application version string" when describing what version of a protocol it supports (introducer server, storage server, upload helper), or when describing itself in general (introducer client), usese allmydata.__full_version__.
This fixes ticket #556 at least well enough for tahoe-1.3.0 release.
2009-02-12 00:18:16 +00:00
|
|
|
self.failUnless("." in str(allmydata.__full_version__),
|
2008-01-03 21:34:59 +00:00
|
|
|
"non-numeric version in '%s'" % allmydata.__version__)
|
2008-01-03 21:38:24 +00:00
|
|
|
all_versions = allmydata.get_package_versions_string()
|
2010-08-01 16:05:17 +00:00
|
|
|
self.failUnless(allmydata.__appname__ in all_versions)
|
2008-04-17 18:13:39 +00:00
|
|
|
# also test stats
|
|
|
|
stats = c.get_stats()
|
|
|
|
self.failUnless("node.uptime" in stats)
|
|
|
|
self.failUnless(isinstance(stats["node.uptime"], float))
|
2007-04-26 19:01:25 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2012-12-29 04:17:00 +00:00
|
|
|
def test_helper_furl(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
various helper.furl arguments are parsed correctly
|
|
|
|
"""
|
2012-12-29 04:17:00 +00:00
|
|
|
basedir = "test_client.Basic.test_helper_furl"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2012-12-29 04:17:00 +00:00
|
|
|
def _check(config, expected_furl):
|
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"),
|
|
|
|
BASECONFIG + config)
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2012-12-29 04:17:00 +00:00
|
|
|
uploader = c.getServiceNamed("uploader")
|
|
|
|
furl, connected = uploader.get_helper_info()
|
|
|
|
self.failUnlessEqual(furl, expected_furl)
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
yield _check("", None)
|
|
|
|
yield _check("helper.furl =\n", None)
|
|
|
|
yield _check("helper.furl = \n", None)
|
|
|
|
yield _check("helper.furl = None", None)
|
|
|
|
yield _check("helper.furl = pb://blah\n", "pb://blah")
|
2012-12-29 04:17:00 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2015-09-16 13:59:49 +00:00
|
|
|
def test_create_magic_folder_service(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
|
|
|
providing magic-folder options actually creates a MagicFolder service
|
|
|
|
"""
|
2018-04-23 15:41:36 +00:00
|
|
|
boom = False
|
|
|
|
class Boom(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
class MockMagicFolder(allmydata.frontends.magic_folder.MagicFolder):
|
2015-09-16 13:59:49 +00:00
|
|
|
name = 'magic-folder'
|
2011-08-10 03:05:38 +00:00
|
|
|
|
2017-08-23 21:34:11 +00:00
|
|
|
def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile, umask, name,
|
|
|
|
inotify=None, uploader_delay=1.0, clock=None, downloader_delay=3):
|
2018-04-23 15:41:36 +00:00
|
|
|
if boom:
|
|
|
|
raise Boom()
|
|
|
|
|
2011-08-10 03:05:38 +00:00
|
|
|
service.MultiService.__init__(self)
|
|
|
|
self.client = client
|
2015-12-09 15:35:26 +00:00
|
|
|
self._umask = umask
|
2011-08-10 03:05:38 +00:00
|
|
|
self.upload_dircap = upload_dircap
|
2015-10-01 21:40:10 +00:00
|
|
|
self.collective_dircap = collective_dircap
|
2016-10-24 23:09:57 +00:00
|
|
|
self.local_dir = local_path_u
|
2015-04-28 19:52:05 +00:00
|
|
|
self.dbfile = dbfile
|
2011-08-10 03:05:38 +00:00
|
|
|
self.inotify = inotify
|
|
|
|
|
2018-04-23 15:41:36 +00:00
|
|
|
def startService(self):
|
|
|
|
self.running = True
|
|
|
|
|
|
|
|
def stopService(self):
|
|
|
|
self.running = False
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def ready(self):
|
|
|
|
pass
|
|
|
|
|
2015-09-16 13:59:49 +00:00
|
|
|
self.patch(allmydata.frontends.magic_folder, 'MagicFolder', MockMagicFolder)
|
2011-08-10 03:05:38 +00:00
|
|
|
|
|
|
|
upload_dircap = "URI:DIR2:blah"
|
2015-04-28 19:43:09 +00:00
|
|
|
local_dir_u = self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir")
|
|
|
|
local_dir_utf8 = local_dir_u.encode('utf-8')
|
2011-08-10 03:05:38 +00:00
|
|
|
config = (BASECONFIG +
|
|
|
|
"[storage]\n" +
|
|
|
|
"enabled = false\n" +
|
2015-09-16 13:59:49 +00:00
|
|
|
"[magic_folder]\n" +
|
2011-11-20 23:24:26 +00:00
|
|
|
"enabled = true\n")
|
2011-08-10 03:05:38 +00:00
|
|
|
|
2015-09-16 13:59:49 +00:00
|
|
|
basedir1 = "test_client.Basic.test_create_magic_folder_service1"
|
2011-08-10 03:05:38 +00:00
|
|
|
os.mkdir(basedir1)
|
2017-08-23 21:34:11 +00:00
|
|
|
os.mkdir(local_dir_u)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
2017-08-23 21:34:11 +00:00
|
|
|
# which config-entry should be missing?
|
2011-11-20 23:24:26 +00:00
|
|
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
|
|
|
config + "local.directory = " + local_dir_utf8 + "\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
with self.assertRaises(IOError):
|
|
|
|
yield client.create_client(basedir1)
|
2011-11-20 23:24:26 +00:00
|
|
|
|
2017-08-23 21:34:11 +00:00
|
|
|
# local.directory entry missing .. but that won't be an error
|
|
|
|
# now, it'll just assume there are not magic folders
|
|
|
|
# .. hrm...should we make that an error (if enabled=true but
|
|
|
|
# there's not yaml AND no local.directory?)
|
2011-08-10 03:05:38 +00:00
|
|
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config)
|
2015-09-16 13:59:49 +00:00
|
|
|
fileutil.write(os.path.join(basedir1, "private", "magic_folder_dircap"), "URI:DIR2:blah")
|
2015-10-01 21:40:10 +00:00
|
|
|
fileutil.write(os.path.join(basedir1, "private", "collective_dircap"), "URI:DIR2:meow")
|
2011-11-20 23:24:26 +00:00
|
|
|
|
|
|
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
2015-09-16 13:59:49 +00:00
|
|
|
config.replace("[magic_folder]\n", "[drop_upload]\n"))
|
2018-01-28 01:05:16 +00:00
|
|
|
|
|
|
|
with self.assertRaises(OldConfigOptionError):
|
|
|
|
yield client.create_client(basedir1)
|
2011-11-20 23:24:26 +00:00
|
|
|
|
|
|
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
|
|
|
config + "local.directory = " + local_dir_utf8 + "\n")
|
2018-01-28 01:05:16 +00:00
|
|
|
c1 = yield client.create_client(basedir1)
|
2015-09-16 13:59:49 +00:00
|
|
|
magicfolder = c1.getServiceNamed('magic-folder')
|
|
|
|
self.failUnless(isinstance(magicfolder, MockMagicFolder), magicfolder)
|
|
|
|
self.failUnlessReallyEqual(magicfolder.client, c1)
|
|
|
|
self.failUnlessReallyEqual(magicfolder.upload_dircap, upload_dircap)
|
|
|
|
self.failUnlessReallyEqual(os.path.basename(magicfolder.local_dir), local_dir_u)
|
|
|
|
self.failUnless(magicfolder.inotify is None, magicfolder.inotify)
|
2019-03-28 20:18:43 +00:00
|
|
|
# It doesn't start until the client starts.
|
|
|
|
self.assertFalse(magicfolder.running)
|
2011-08-10 03:05:38 +00:00
|
|
|
|
2018-04-23 15:41:36 +00:00
|
|
|
# See above.
|
|
|
|
boom = True
|
2015-07-17 21:03:53 +00:00
|
|
|
|
2015-09-16 13:59:49 +00:00
|
|
|
basedir2 = "test_client.Basic.test_create_magic_folder_service2"
|
2011-08-10 03:05:38 +00:00
|
|
|
os.mkdir(basedir2)
|
2011-11-20 23:24:26 +00:00
|
|
|
os.mkdir(os.path.join(basedir2, "private"))
|
2011-08-10 03:05:38 +00:00
|
|
|
fileutil.write(os.path.join(basedir2, "tahoe.cfg"),
|
|
|
|
BASECONFIG +
|
2015-09-16 13:59:49 +00:00
|
|
|
"[magic_folder]\n" +
|
2011-11-20 23:24:26 +00:00
|
|
|
"enabled = true\n" +
|
|
|
|
"local.directory = " + local_dir_utf8 + "\n")
|
2015-09-16 13:59:49 +00:00
|
|
|
fileutil.write(os.path.join(basedir2, "private", "magic_folder_dircap"), "URI:DIR2:blah")
|
2015-10-01 21:40:10 +00:00
|
|
|
fileutil.write(os.path.join(basedir2, "private", "collective_dircap"), "URI:DIR2:meow")
|
2018-01-28 01:05:16 +00:00
|
|
|
with self.assertRaises(Boom):
|
|
|
|
yield client.create_client(basedir2)
|
2011-08-10 03:05:38 +00:00
|
|
|
|
|
|
|
|
2007-06-08 05:09:02 +00:00
|
|
|
def flush_but_dont_ignore(res):
|
|
|
|
d = flushEventualQueue()
|
|
|
|
def _done(ignored):
|
|
|
|
return res
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
2018-02-02 07:10:45 +00:00
|
|
|
|
|
|
|
class IntroducerClients(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_invalid_introducer_furl(self):
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
2018-09-06 18:34:23 +00:00
|
|
|
An introducer.furl of 'None' is invalid and causes
|
|
|
|
create_introducer_clients to fail.
|
2018-08-29 22:44:12 +00:00
|
|
|
"""
|
2018-02-02 07:10:45 +00:00
|
|
|
cfg = (
|
|
|
|
"[client]\n"
|
|
|
|
"introducer.furl = None\n"
|
|
|
|
)
|
2018-03-13 23:11:30 +00:00
|
|
|
config = config_from_string("basedir", "client.port", cfg)
|
2018-02-02 07:10:45 +00:00
|
|
|
|
|
|
|
with self.assertRaises(ValueError) as ctx:
|
|
|
|
client.create_introducer_clients(config, main_tub=None)
|
|
|
|
self.assertIn(
|
|
|
|
"invalid 'introducer.furl = None'",
|
|
|
|
str(ctx.exception)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-06-12 20:47:25 +00:00
|
|
|
class StorageClients(SyncTestCase):
|
|
|
|
"""
|
|
|
|
Tests for storage-related behavior of ``_Client``.
|
|
|
|
"""
|
|
|
|
def test_static_servers(self):
|
|
|
|
"""
|
|
|
|
Storage servers defined in ``private/servers.yaml`` are loaded into the
|
|
|
|
storage broker.
|
|
|
|
"""
|
|
|
|
serverid = u"v0-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
|
|
|
announcement = {
|
|
|
|
u"nickname": 'some-storage-server',
|
|
|
|
u"anonymous-storage-FURL": u"pb://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@tcp:storage.example:100/swissnum",
|
|
|
|
}
|
|
|
|
basedir = FilePath(self.mktemp())
|
|
|
|
private = basedir.child(u"private")
|
|
|
|
private.makedirs()
|
|
|
|
servers = private.child(u"servers.yaml")
|
|
|
|
servers.setContent(safe_dump({
|
|
|
|
u"storage": {
|
|
|
|
serverid: {
|
|
|
|
u"ann": announcement,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
def get_known_server_details(a_client):
|
|
|
|
return list(
|
|
|
|
(s.get_serverid(), s.get_announcement())
|
|
|
|
for s
|
|
|
|
in a_client.storage_broker.get_known_servers()
|
|
|
|
)
|
|
|
|
self.assertThat(
|
|
|
|
client.create_client(basedir.asTextMode().path),
|
|
|
|
succeeded(
|
|
|
|
AfterPreprocessing(
|
|
|
|
get_known_server_details,
|
|
|
|
Equals([(serverid, announcement)]),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
2008-04-22 23:47:15 +00:00
|
|
|
class Run(unittest.TestCase, testutil.StallMixin):
|
2007-06-08 05:09:02 +00:00
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
self.sparent = service.MultiService()
|
|
|
|
self.sparent.startService()
|
|
|
|
def tearDown(self):
|
|
|
|
d = self.sparent.stopService()
|
|
|
|
d.addBoth(flush_but_dont_ignore)
|
|
|
|
return d
|
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2007-06-08 05:09:02 +00:00
|
|
|
def test_loadable(self):
|
|
|
|
basedir = "test_client.Run.test_loadable"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
2011-08-01 23:24:23 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
|
2017-09-06 01:08:35 +00:00
|
|
|
fileutil.write(os.path.join(basedir, client._Client.EXIT_TRIGGER_FILE), "")
|
2018-01-28 01:05:16 +00:00
|
|
|
yield client.create_client(basedir)
|
2007-06-08 05:09:02 +00:00
|
|
|
|
2018-01-28 01:05:16 +00:00
|
|
|
@defer.inlineCallbacks
|
2007-06-08 05:09:02 +00:00
|
|
|
def test_reloadable(self):
|
|
|
|
basedir = "test_client.Run.test_reloadable"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
2011-08-01 23:24:23 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
|
2018-01-28 01:05:16 +00:00
|
|
|
c1 = yield client.create_client(basedir)
|
2007-06-08 05:09:02 +00:00
|
|
|
c1.setServiceParent(self.sparent)
|
|
|
|
|
2007-11-19 03:41:26 +00:00
|
|
|
# delay to let the service start up completely. I'm not entirely sure
|
|
|
|
# this is necessary.
|
2018-01-28 01:05:16 +00:00
|
|
|
yield self.stall(delay=2.0)
|
|
|
|
yield c1.disownServiceParent()
|
2007-11-02 02:19:08 +00:00
|
|
|
# the cygwin buildslave seems to need more time to let the old
|
|
|
|
# service completely shut down. When delay=0.1, I saw this test fail,
|
|
|
|
# probably due to the logport trying to reclaim the old socket
|
2007-11-19 03:41:26 +00:00
|
|
|
# number. This suggests that either we're dropping a Deferred
|
|
|
|
# somewhere in the shutdown sequence, or that cygwin is just cranky.
|
2018-01-28 01:05:16 +00:00
|
|
|
yield self.stall(delay=2.0)
|
|
|
|
|
|
|
|
# TODO: pause for slightly over one second, to let
|
|
|
|
# Client._check_exit_trigger poll the file once. That will exercise
|
|
|
|
# another few lines. Then add another test in which we don't
|
|
|
|
# update the file at all, and watch to see the node shutdown.
|
|
|
|
# (To do this, use a modified node which overrides Node.shutdown(),
|
|
|
|
# also change _check_exit_trigger to use it instead of a raw
|
|
|
|
# reactor.stop, also instrument the shutdown event in an
|
|
|
|
# attribute that we can check.)
|
|
|
|
c2 = yield client.create_client(basedir)
|
|
|
|
c2.setServiceParent(self.sparent)
|
|
|
|
yield c2.disownServiceParent()
|
2007-06-08 05:09:02 +00:00
|
|
|
|
2010-07-11 20:02:52 +00:00
|
|
|
class NodeMaker(testutil.ReallyEqualMixin, unittest.TestCase):
|
2018-01-28 01:05:16 +00:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2009-07-03 01:07:49 +00:00
|
|
|
def test_maker(self):
|
|
|
|
basedir = "client/NodeMaker/maker"
|
|
|
|
fileutil.make_dirs(basedir)
|
2011-03-31 14:54:27 +00:00
|
|
|
fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG)
|
2018-01-28 01:05:16 +00:00
|
|
|
c = yield client.create_client(basedir)
|
2009-07-03 01:07:49 +00:00
|
|
|
|
|
|
|
n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failUnless(IFileNode.providedBy(n))
|
2009-11-20 07:52:55 +00:00
|
|
|
self.failUnless(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_readonly())
|
|
|
|
self.failIf(n.is_mutable())
|
|
|
|
|
2013-02-07 16:35:23 +00:00
|
|
|
# Testing #1679. There was a bug that would occur when downloader was
|
|
|
|
# downloading the same readcap more than once concurrently, so the
|
|
|
|
# filenode object was cached, and there was a failure from one of the
|
|
|
|
# servers in one of the download attempts. No subsequent download
|
|
|
|
# attempt would attempt to use that server again, which would lead to
|
|
|
|
# the file being undownloadable until the gateway was restarted. The
|
|
|
|
# current fix for this (hopefully to be superceded by a better fix
|
|
|
|
# eventually) is to prevent re-use of filenodes, so the NodeMaker is
|
|
|
|
# hereby required *not* to cache and re-use filenodes for CHKs.
|
|
|
|
other_n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
|
|
|
|
self.failIf(n is other_n, (n, other_n))
|
|
|
|
|
2009-11-20 07:52:55 +00:00
|
|
|
n = c.create_node_from_uri("URI:LIT:n5xgk")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failUnless(IFileNode.providedBy(n))
|
|
|
|
self.failUnless(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_readonly())
|
|
|
|
self.failIf(n.is_mutable())
|
|
|
|
|
|
|
|
n = c.create_node_from_uri("URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failUnless(IFileNode.providedBy(n))
|
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failUnless(IMutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failIf(n.is_readonly())
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
|
|
|
|
n = c.create_node_from_uri("URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failUnless(IFileNode.providedBy(n))
|
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failUnless(IMutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_readonly())
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
|
2009-07-03 01:07:49 +00:00
|
|
|
n = c.create_node_from_uri("URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failIf(IFileNode.providedBy(n))
|
2009-11-20 07:52:55 +00:00
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failUnless(IDirectoryNode.providedBy(n))
|
|
|
|
self.failIf(n.is_readonly())
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
|
|
|
|
n = c.create_node_from_uri("URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failIf(IFileNode.providedBy(n))
|
2009-11-20 07:52:55 +00:00
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failUnless(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_readonly())
|
|
|
|
self.failUnless(n.is_mutable())
|
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
unknown_rw = "lafs://from_the_future"
|
|
|
|
unknown_ro = "lafs://readonly_from_the_future"
|
|
|
|
n = c.create_node_from_uri(unknown_rw, unknown_ro)
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failIf(IFileNode.providedBy(n))
|
2009-11-20 07:52:55 +00:00
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
2009-07-03 01:07:49 +00:00
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
2010-01-27 06:44:30 +00:00
|
|
|
self.failUnless(n.is_unknown())
|
2010-07-11 20:02:52 +00:00
|
|
|
self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
|
|
|
|
self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
|
|
|
|
self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
|
|
|
|
|
2010-07-18 05:32:50 +00:00
|
|
|
# Note: it isn't that we *intend* to deploy non-ASCII caps in
|
|
|
|
# the future, it is that we want to make sure older Tahoe-LAFS
|
|
|
|
# versions wouldn't choke on them if we were to do so. See
|
|
|
|
# #1051 and wiki:NewCapDesign for details.
|
2010-07-11 20:02:52 +00:00
|
|
|
unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8')
|
|
|
|
unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode('utf-8')
|
|
|
|
n = c.create_node_from_uri(unknown_rw, unknown_ro)
|
|
|
|
self.failUnless(IFilesystemNode.providedBy(n))
|
|
|
|
self.failIf(IFileNode.providedBy(n))
|
|
|
|
self.failIf(IImmutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IMutableFileNode.providedBy(n))
|
|
|
|
self.failIf(IDirectoryNode.providedBy(n))
|
|
|
|
self.failUnless(n.is_unknown())
|
|
|
|
self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
|
|
|
|
self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
|
|
|
|
self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
|