Move check_magicfolder_smoke.py to proper integration tests

This introduces a py.test-based integration suite (currently just
containing magic-folder end-to-end tests). Also adds a tox environment
("integration") to run them.

The test setup is:

 - a "flogtool gather" instance
 - an Introducer
 - five Storage nodes
 - Alice and Bob client nodes
 - Alice and Bob have paired magic-folders
This commit is contained in:
meejah 2016-08-22 17:36:56 -06:00
parent 0670144d2c
commit 72f17afa76
10 changed files with 767 additions and 432 deletions

15
integration/README Normal file
View File

@ -0,0 +1,15 @@
Install:
pip install -e .[test]
run:
py.test -s -v integration/
If you want to keep the created temp-dir around:
py.test --keep-tempdir -v integration/
The fixtures also set up a "flogtool gather" process and dump all the
logs from all the running processes (introducer, 5 storage nodes,
alice, bob) to a tempfile.

500
integration/conftest.py Normal file
View File

@ -0,0 +1,500 @@
from __future__ import print_function
import sys
import shutil
from sys import stdout as _stdout
from os import mkdir, listdir, unlink
from os.path import join, abspath, curdir, exists
from tempfile import mkdtemp, mktemp
from StringIO import StringIO
from shutilwhich import which
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.task import deferLater
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.error import ProcessExitedAlready, ProcessDone
import pytest
pytest_plugins = 'pytest_twisted'
# pytest customization hooks
def pytest_addoption(parser):
parser.addoption(
"--keep-tempdir", action="store_true", dest="keep",
help="Keep the tmpdir with the client directories (introducer, etc)",
)
# I've mostly defined these fixtures from "easiest" to "most
# complicated", and the dependencies basically go "down the
# page". They're all session-scoped which has the "pro" that we only
# set up the grid once, but the "con" that each test has to be a
# little careful they're not stepping on toes etc :/
@pytest.fixture(scope='session')
def reactor():
# this is a fixture in case we might want to try different
# reactors for some reason.
from twisted.internet import reactor as _reactor
return _reactor
@pytest.fixture(scope='session')
def temp_dir(request):
"""
Invoke like 'py.test --keep ...' to avoid deleting the temp-dir
"""
tmp = mkdtemp(prefix="tahoe")
if request.config.getoption('keep', True):
print("Will retain tempdir '{}'".format(tmp))
# I'm leaving this in and always calling it so that the tempdir
# path is (also) printed out near the end of the run
def cleanup():
if request.config.getoption('keep', True):
print("Keeping tempdir '{}'".format(tmp))
else:
try:
shutil.rmtree(tmp, ignore_errors=True)
except Exception as e:
print("Failed to remove tmpdir: {}".format(e))
request.addfinalizer(cleanup)
return tmp
@pytest.fixture(scope='session')
def tahoe_binary():
"""
Finds the 'tahoe' binary, yields complete path
"""
return which('tahoe')
@pytest.fixture(scope='session')
def flog_binary():
return which('flogtool')
class _ProcessExitedProtocol(ProcessProtocol):
"""
Internal helper that .callback()s on self.done when the process
exits (for any reason).
"""
def __init__(self):
self.done = Deferred()
def processEnded(self, reason):
self.done.callback(None)
class _CollectOutputProtocol(ProcessProtocol):
"""
Internal helper. Collects all output (stdout + stderr) into
self.output, and callback's on done with all of it after the
process exits (for any reason).
"""
def __init__(self):
self.done = Deferred()
self.output = StringIO()
def processEnded(self, reason):
if not self.done.called:
self.done.callback(self.output.getvalue())
def processExited(self, reason):
if not isinstance(reason.value, ProcessDone):
self.done.errback(reason)
def outReceived(self, data):
self.output.write(data)
def errReceived(self, data):
print("ERR", data)
self.output.write(data)
class _DumpOutputProtocol(ProcessProtocol):
"""
Internal helper.
"""
def __init__(self, f):
self.done = Deferred()
self._out = f if f is not None else sys.stdout
def processEnded(self, reason):
if not self.done.called:
self.done.callback(None)
def processExited(self, reason):
if not isinstance(reason.value, ProcessDone):
self.done.errback(reason)
def outReceived(self, data):
self._out.write(data)
def errReceived(self, data):
self._out.write(data)
class _MagicTextProtocol(ProcessProtocol):
"""
Internal helper. Monitors all stdout looking for a magic string,
and then .callback()s on self.done and .errback's if the process exits
"""
def __init__(self, magic_text):
self.magic_seen = Deferred()
self.exited = Deferred()
self._magic_text = magic_text
self._output = StringIO()
def processEnded(self, reason):
self.exited.callback(None)
def outReceived(self, data):
sys.stdout.write(data)
self._output.write(data)
if not self.magic_seen.called and self._magic_text in self._output.getvalue():
print("Saw '{}' in the logs".format(self._magic_text))
self.magic_seen.callback(None)
def errReceived(self, data):
sys.stdout.write(data)
@pytest.fixture(scope='session')
def flog_gatherer(reactor, temp_dir, flog_binary, request):
out_protocol = _CollectOutputProtocol()
gather_dir = join(temp_dir, 'flog_gather')
process = reactor.spawnProcess(
out_protocol,
flog_binary,
(
'flogtool', 'create-gatherer',
'--location', 'tcp:localhost:3117',
'--port', '3117',
gather_dir,
)
)
pytest.blockon(out_protocol.done)
twistd_protocol = _MagicTextProtocol("Gatherer waiting at")
twistd_process = reactor.spawnProcess(
twistd_protocol,
which('twistd'),
(
'twistd', '--nodaemon', '--python',
join(gather_dir, 'gatherer.tac'),
),
path=gather_dir,
)
pytest.blockon(twistd_protocol.magic_seen)
def cleanup():
try:
twistd_process.signalProcess('TERM')
pytest.blockon(twistd_protocol.exited)
except ProcessExitedAlready:
pass
flog_file = mktemp('.flog_dump')
flog_protocol = _DumpOutputProtocol(open(flog_file, 'w'))
flog_dir = join(temp_dir, 'flog_gather')
flogs = [x for x in listdir(flog_dir) if x.endswith('.flog')]
print("Dumping {} flogtool logfiles to '{}'".format(len(flogs), flog_file))
reactor.spawnProcess(
flog_protocol,
flog_binary,
(
'flogtool', 'dump', join(temp_dir, 'flog_gather', flogs[0])
),
)
pytest.blockon(flog_protocol.done)
request.addfinalizer(cleanup)
with open(join(gather_dir, 'log_gatherer.furl'), 'r') as f:
furl = f.read().strip()
return furl
@pytest.fixture(scope='session')
def introducer(reactor, temp_dir, tahoe_binary, flog_gatherer, request):
config = '''
[node]
nickname = introducer0
web.port = 4560
log_gatherer.furl = {log_furl}
'''.format(log_furl=flog_gatherer)
intro_dir = join(temp_dir, 'introducer')
print("making introducer", intro_dir)
if not exists(intro_dir):
mkdir(intro_dir)
done_proto = _ProcessExitedProtocol()
reactor.spawnProcess(
done_proto,
tahoe_binary,
('tahoe', 'create-introducer', intro_dir),
)
pytest.blockon(done_proto.done)
# over-write the config file with our stuff
with open(join(intro_dir, 'tahoe.cfg'), 'w') as f:
f.write(config)
# on windows, "tahoe start" means: run forever in the foreground,
# but on linux it means daemonize. "tahoe run" is consistent
# between platforms.
protocol = _MagicTextProtocol('introducer running')
process = reactor.spawnProcess(
protocol,
tahoe_binary,
('tahoe', 'run', intro_dir),
)
def cleanup():
try:
process.signalProcess('TERM')
pytest.blockon(protocol.exited)
except ProcessExitedAlready:
pass
request.addfinalizer(cleanup)
pytest.blockon(protocol.magic_seen)
return process
@pytest.fixture(scope='session')
def introducer_furl(introducer, temp_dir):
furl_fname = join(temp_dir, 'introducer', 'private', 'introducer.furl')
while not exists(furl_fname):
print("Don't see {} yet".format(furl_fname))
time.sleep(.1)
furl = open(furl_fname, 'r').read()
return furl
def _run_node(reactor, tahoe_binary, node_dir, request, magic_text):
if magic_text is None:
magic_text = "client running"
protocol = _MagicTextProtocol(magic_text)
# on windows, "tahoe start" means: run forever in the foreground,
# but on linux it means daemonize. "tahoe run" is consistent
# between platforms.
process = reactor.spawnProcess(
protocol,
tahoe_binary,
('tahoe', 'run', node_dir),
)
def cleanup():
try:
process.signalProcess('TERM')
pytest.blockon(protocol.exited)
except ProcessExitedAlready:
pass
request.addfinalizer(cleanup)
# we return the 'process' ITransport instance
# XXX abusing the Deferred; should use .when_magic_seen() or something?
protocol.magic_seen.addCallback(lambda _: process)
return protocol.magic_seen
def _create_node(reactor, request, temp_dir, tahoe_binary, introducer_furl, flog_gatherer, name, web_port, storage=True, magic_text=None):
"""
Helper to create a single node, run it and return the instance
spawnProcess returned (ITransport)
"""
node_dir = join(temp_dir, name)
if web_port is None:
web_port = ''
if not exists(node_dir):
print("creating", node_dir)
mkdir(node_dir)
done_proto = _ProcessExitedProtocol()
args = [
'tahoe',
'create-node',
'--nickname', name,
'--introducer', introducer_furl,
]
if not storage:
args.append('--no-storage')
args.append(node_dir)
reactor.spawnProcess(
done_proto,
tahoe_binary,
args,
)
pytest.blockon(done_proto.done)
with open(join(node_dir, 'tahoe.cfg'), 'w') as f:
f.write('''
[node]
nickname = %(name)s
web.port = %(web_port)s
web.static = public_html
log_gatherer.furl = %(log_furl)s
[client]
# Which services should this client connect to?
introducer.furl = %(furl)s
shares.needed = 2
shares.happy = 3
shares.total = 4
''' % {
'name': name,
'furl': introducer_furl,
'web_port': web_port,
'log_furl': flog_gatherer,
})
return _run_node(reactor, tahoe_binary, node_dir, request, magic_text)
@pytest.fixture(scope='session')
def storage_nodes(reactor, temp_dir, tahoe_binary, introducer, introducer_furl, flog_gatherer, request):
nodes = []
# start all 5 nodes in parallel
for x in range(5):
name = 'node{}'.format(x)
# tub_port = 9900 + x
nodes.append(
pytest.blockon(
_create_node(
reactor, request, temp_dir, tahoe_binary, introducer_furl, flog_gatherer, name,
web_port=None, storage=True,
)
)
)
#nodes = pytest.blockon(DeferredList(nodes))
return nodes
@pytest.fixture(scope='session')
def alice(reactor, temp_dir, tahoe_binary, introducer_furl, flog_gatherer, storage_nodes, request):
try:
mkdir(join(temp_dir, 'magic-alice'))
except OSError:
pass
process = pytest.blockon(
_create_node(
reactor, request, temp_dir, tahoe_binary, introducer_furl, flog_gatherer, "alice",
web_port="tcp:9980:interface=localhost",
storage=False,
)
)
return process
@pytest.fixture(scope='session')
def bob(reactor, temp_dir, tahoe_binary, introducer_furl, flog_gatherer, storage_nodes, request):
try:
mkdir(join(temp_dir, 'magic-bob'))
except OSError:
pass
process = pytest.blockon(
_create_node(
reactor, request, temp_dir, tahoe_binary, introducer_furl, flog_gatherer, "bob",
web_port="tcp:9981:interface=localhost",
storage=False,
)
)
return process
@pytest.fixture(scope='session')
def alice_invite(reactor, alice, tahoe_binary, temp_dir, request):
node_dir = join(temp_dir, 'alice')
# FIXME XXX by the time we see "client running" in the logs, the
# storage servers aren't "really" ready to roll yet (uploads
# fairly consistently fail if we don't hack in this pause...)
import time ; time.sleep(5)
proto = _CollectOutputProtocol()
transport = reactor.spawnProcess(
proto,
tahoe_binary,
[
'tahoe', 'magic-folder', 'create',
'--basedir', node_dir, 'magik:', 'alice',
join(temp_dir, 'magic-alice'),
]
)
pytest.blockon(proto.done)
proto = _CollectOutputProtocol()
transport = reactor.spawnProcess(
proto,
tahoe_binary,
[
'tahoe', 'magic-folder', 'invite',
'--basedir', node_dir, 'magik:', 'bob',
]
)
pytest.blockon(proto.done)
invite = proto.output.getvalue()
print("invite from alice", invite)
# before magic-folder works, we have to stop and restart (this is
# crappy for the tests -- can we fix it in magic-folder?)
proto = _CollectOutputProtocol()
transport = reactor.spawnProcess(
proto,
tahoe_binary,
[
'tahoe', 'stop', node_dir
]
)
pytest.blockon(proto.done)
magic_text = 'Completed initial Magic Folder scan successfully'
pytest.blockon(_run_node(reactor, tahoe_binary, node_dir, request, magic_text))
return invite
@pytest.fixture(scope='session')
def magic_folder(reactor, alice_invite, alice, bob, tahoe_binary, temp_dir, request):
print("pairing magic-folder")
bob_dir = join(temp_dir, 'bob')
proto = _CollectOutputProtocol()
transport = reactor.spawnProcess(
proto,
tahoe_binary,
[
'tahoe', 'magic-folder', 'join',
'--basedir', bob_dir,
alice_invite,
join(temp_dir, 'magic-bob'),
]
)
pytest.blockon(proto.done)
# before magic-folder works, we have to stop and restart (this is
# crappy for the tests -- can we fix it in magic-folder?)
proto = _CollectOutputProtocol()
transport = reactor.spawnProcess(
proto,
tahoe_binary,
[
'tahoe', 'stop', bob_dir
]
)
pytest.blockon(proto.done)
magic_text = 'Completed initial Magic Folder scan successfully'
pytest.blockon(_run_node(reactor, tahoe_binary, bob_dir, request, magic_text))
return (join(temp_dir, 'magic-alice'), join(temp_dir, 'magic-bob'))

View File

@ -0,0 +1,22 @@
# So these dummy tests run first and instantiate the pre-requisites
# first (e.g. introducer) and therefore print "something" on the
# console as we go (a . or the test-name in "-v"/verbose mode)
# You can safely skip any of these tests, it'll just appear to "take
# longer" to start the first test as the fixtures get built
def test_create_flogger(flog_gatherer):
print("Created flog_gatherer")
def test_create_introducer(introducer):
print("Created introducer")
def test_create_storage(storage_nodes):
print("Created {} storage nodes".format(len(storage_nodes)))
def test_create_alice_bob_magicfolder(magic_folder):
print("Alice and Bob have paired magic-folders")

187
integration/test_smoke.py Normal file
View File

@ -0,0 +1,187 @@
import time
import shutil
from os import mkdir, unlink, listdir
from os.path import join, exists
import util
# tests converted from check_magicfolder_smoke.py
# see "conftest.py" for the fixtures (e.g. "magic_folder")
def test_alice_writes_bob_receives(magic_folder):
alice_dir, bob_dir = magic_folder
with open(join(alice_dir, "first_file"), "w") as f:
f.write("alice wrote this")
util.await_file_contents(join(bob_dir, "first_file"), "alice wrote this")
return
def test_bob_writes_alice_receives(magic_folder):
alice_dir, bob_dir = magic_folder
with open(join(bob_dir, "second_file"), "w") as f:
f.write("bob wrote this")
util.await_file_contents(join(alice_dir, "second_file"), "bob wrote this")
return
def test_alice_deletes(magic_folder):
# alice writes a file, waits for bob to get it and then deletes it.
alice_dir, bob_dir = magic_folder
with open(join(alice_dir, "delfile"), "w") as f:
f.write("alice wrote this")
util.await_file_contents(join(bob_dir, "delfile"), "alice wrote this")
# bob has the file; now alices deletes it
unlink(join(alice_dir, "delfile"))
# bob should remove his copy, but preserve a backup
util.await_file_vanishes(join(bob_dir, "delfile"))
util.await_file_contents(join(bob_dir, "delfile.backup"), "alice wrote this")
return
def test_alice_creates_bob_edits(magic_folder):
alice_dir, bob_dir = magic_folder
# alice writes a file
with open(join(alice_dir, "editfile"), "w") as f:
f.write("alice wrote this")
util.await_file_contents(join(bob_dir, "editfile"), "alice wrote this")
# now bob edits it
with open(join(bob_dir, "editfile"), "w") as f:
f.write("bob says foo")
util.await_file_contents(join(alice_dir, "editfile"), "bob says foo")
def test_bob_creates_sub_directory(magic_folder):
alice_dir, bob_dir = magic_folder
# bob makes a sub-dir, with a file in it
mkdir(join(bob_dir, "subdir"))
with open(join(bob_dir, "subdir", "a_file"), "w") as f:
f.write("bob wuz here")
# alice gets it
util.await_file_contents(join(alice_dir, "subdir", "a_file"), "bob wuz here")
# now bob deletes it again
shutil.rmtree(join(bob_dir, "subdir"))
# alice should delete it as well
util.await_file_vanishes(join(alice_dir, "subdir", "a_file"))
# i *think* it's by design that the subdir won't disappear,
# because a "a_file.backup" should appear...
util.await_file_contents(join(alice_dir, "subdir", "a_file.backup"), "bob wuz here")
def test_bob_creates_alice_deletes_bob_restores(magic_folder):
alice_dir, bob_dir = magic_folder
# bob creates a file
with open(join(bob_dir, "boom"), "w") as f:
f.write("bob wrote this")
util.await_file_contents(
join(alice_dir, "boom"),
"bob wrote this"
)
# alice deletes it (so bob should as well
unlink(join(alice_dir, "boom"))
util.await_file_vanishes(join(bob_dir, "boom"))
# bob restore it, with new contents
with open(join(bob_dir, "boom"), "w") as f:
f.write("bob wrote this again, because reasons")
# XXX double-check this behavior is correct!
# alice sees bob's update, but marks it as a conflict (because
# .. she previously deleted it? does that really make sense)
util.await_file_contents(
join(alice_dir, "boom.conflict"),
"bob wrote this again, because reasons",
)
# fix the conflict
shutil.move(join(alice_dir, "boom.conflict"), join(alice_dir, "boom"))
def test_bob_creates_alice_deletes_alice_restores(magic_folder):
alice_dir, bob_dir = magic_folder
# bob creates a file
with open(join(bob_dir, "boom2"), "w") as f:
f.write("bob wrote this")
util.await_file_contents(
join(alice_dir, "boom2"),
"bob wrote this"
)
# alice deletes it (so bob should as well
unlink(join(alice_dir, "boom2"))
util.await_file_vanishes(join(bob_dir, "boom2"))
# alice restore it, with new contents
with open(join(alice_dir, "boom2"), "w") as f:
f.write("alice re-wrote this again, because reasons")
def test_bob_conflicts_with_alice_fresh(magic_folder):
# both alice and bob make a file at "the same time".
alice_dir, bob_dir = magic_folder
# really, we fudge this a little: in reality, either alice or bob
# "wins" by uploading to the DMD first. So we make sure bob wins
# this one by giving him a massive head start
with open(join(bob_dir, 'alpha'), 'w') as f:
f.write("this is bob's alpha\n")
time.sleep(0.2)
with open(join(alice_dir, 'alpha'), 'w') as f:
f.write("this is alice's alpha\n")
# since bob uploaded first, alice should see a backup
util.await_file_contents(join(alice_dir, 'alpha'), "this is bob's alpha\n")
util.await_file_contents(join(alice_dir, 'alpha.backup'), "this is alice's alpha\n")
util.await_file_contents(join(bob_dir, 'alpha'), "this is alice's alpha\n")
util.await_file_contents(join(bob_dir, 'alpha.backup'), "this is bob's alpha\n")
def test_bob_conflicts_with_alice_preexisting(magic_folder):
# both alice and bob edit a file at "the same time" (similar to
# above, but the file already exists before the edits)
alice_dir, bob_dir = magic_folder
# have bob create the file
with open(join(bob_dir, 'beta'), 'w') as f:
f.write("original beta (from bob)\n")
util.await_file_contents(join(alice_dir, 'beta'), "original beta (from bob)\n")
# both alice and bob now have a "beta" file, at version 0
# really, we fudge this a little: in reality, either alice or bob
# "wins" by uploading to the DMD first. So we make sure bob wins
# this one by giving him a massive head start
with open(join(bob_dir, 'beta'), 'w') as f:
f.write("this is bob's beta\n")
time.sleep(0.2)
with open(join(alice_dir, 'beta'), 'w') as f:
f.write("this is alice's beta\n")
# since bob uploaded first, alice should see a backup
util.await_file_contents(join(bob_dir, 'beta'), "this is bob's beta\n")
util.await_file_contents(join(alice_dir, 'beta'), "this is bob's beta\n")
util.await_file_contents(join(alice_dir, 'beta.backup'), "this is alice's beta\n")

32
integration/util.py Normal file
View File

@ -0,0 +1,32 @@
import time
from os.path import exists
def await_file_contents(path, contents, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
print(" waiting for '{}'".format(path))
if exists(path):
with open(path, 'r') as f:
current = f.read()
if current == contents:
return True
print(" file contents still mismatched")
print(" wanted: {}".format(contents.replace('\n', ' ')))
print(" got: {}".format(current.replace('\n', ' ')))
time.sleep(1)
if exists(path):
raise Exception("Contents of '{}' mismatched after {}s".format(path, timeout))
raise Exception("Didn't find '{}' after {}s".format(path, timeout))
def await_file_vanishes(path, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
print(" waiting for '{}' to vanish".format(path))
if not exists(path):
return
time.sleep(1)
raise Exception("'{}' still exists after {}s".format(path, timeout))

View File

@ -265,6 +265,8 @@ setup(name="tahoe-lafs", # also set in __init__.py
"txtorcon", # in case pip's resolver doesn't work
"foolscap[i2p]",
"txi2p", # in case pip's resolver doesn't work
"pytest",
"pytest-twisted",
],
"tor": [
"foolscap[tor]",

View File

@ -89,6 +89,9 @@ install_requires = [
# <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2474>.
"pyOpenSSL >= 0.14",
"PyYAML >= 3.11",
# in Python 3.3 stdlib
"shutilwhich >= 1.1.0",
]
# Includes some indirect dependencies, but does not include allmydata.

View File

@ -477,6 +477,7 @@ class Node(service.MultiService):
service.MultiService.startService(self)
self.log("%s running" % self.NODETYPE)
twlog.msg("%s running" % self.NODETYPE)
def stopService(self):
self.log("Node.stopService")

View File

@ -1,432 +0,0 @@
#!/usr/bin/env python
# this is a smoke-test using "./bin/tahoe" to:
#
# 1. create an introducer
# 2. create 5 storage nodes
# 3. create 2 client nodes (alice, bob)
# 4. Alice creates a magic-folder ("magik:")
# 5. Alice invites Bob
# 6. Bob joins
#
# After that, some basic tests are performed; see the "if True:"
# blocks to turn some on or off. Could benefit from some cleanups
# etc. but this seems useful out of the gate for quick testing.
#
# TO RUN:
# from top-level of your checkout (we use "./bin/tahoe"):
# python src/allmydata/test/check_magicfolder_smoke.py
#
# This will create "./smoke_magicfolder" (which is disposable) and
# contains all the Tahoe basedirs for the introducer, storage nodes,
# clients, and the clients' magic-folders. NOTE that if these
# directories already exist they will NOT be re-created. So kill the
# grid and then "rm -rf smoke_magicfolder" if you want to re-run the
# tests cleanly.
#
# Run the script with a single arg, "kill" to run "tahoe stop" on all
# the nodes.
#
# This will have "tahoe start" -ed all the nodes, so you can continue
# to play around after the script exits.
from __future__ import print_function
import sys
import time
import shutil
import subprocess
from os.path import join, abspath, curdir, exists
from os import mkdir, listdir, unlink
is_windows = (sys.platform == 'win32')
tahoe_base = abspath(curdir)
data_base = join(tahoe_base, 'smoke_magicfolder')
if is_windows:
tahoe_bin = 'tahoe.exe'
else:
tahoe_bin = 'tahoe'
python = sys.executable
if not exists(data_base):
print("Creating", data_base)
mkdir(data_base)
if 'kill' in sys.argv:
print("Killing the grid")
for d in listdir(data_base):
print("killing", d)
subprocess.call(
[
tahoe_bin, 'stop', join(data_base, d),
]
)
sys.exit(0)
if not exists(join(data_base, 'introducer')):
subprocess.check_call(
[
tahoe_bin, 'create-introducer', join(data_base, 'introducer'),
]
)
with open(join(data_base, 'introducer', 'tahoe.cfg'), 'w') as f:
f.write('''
[node]
nickname = introducer0
web.port = 4560
''')
if not is_windows:
subprocess.check_call(
[
tahoe_bin, 'start', join(data_base, 'introducer'),
]
)
else:
time.sleep(5)
intro = subprocess.Popen(
[
tahoe_bin, 'start', join(data_base, 'introducer'),
]
)
furl_fname = join(data_base, 'introducer', 'private', 'introducer.furl')
while not exists(furl_fname):
time.sleep(1)
furl = open(furl_fname, 'r').read()
print("FURL", furl)
nodes = []
for x in range(5):
data_dir = join(data_base, 'node%d' % x)
if not exists(data_dir):
subprocess.check_call(
[
tahoe_bin, 'create-node',
'--nickname', 'node%d' % (x,),
'--introducer', furl,
data_dir,
]
)
with open(join(data_dir, 'tahoe.cfg'), 'w') as f:
f.write('''
[node]
nickname = node%(node_id)s
web.port =
web.static = public_html
# tub.location = localhost:%(tub_port)d
[client]
# Which services should this client connect to?
introducer.furl = %(furl)s
shares.needed = 2
shares.happy = 3
shares.total = 4
''' % {'node_id':x, 'furl':furl, 'tub_port':(9900 + x)})
if not is_windows:
subprocess.check_call(
[
tahoe_bin, 'start', data_dir,
]
)
else:
time.sleep(5)
node = subprocess.Popen(
[
tahoe_bin, 'start', data_dir,
]
)
nodes.append(node)
# alice and bob clients
do_invites = False
node_id = 0
clients = []
for name in ['alice', 'bob']:
data_dir = join(data_base, name)
magic_dir = join(data_base, '%s-magic' % (name,))
try:
mkdir(magic_dir)
except Exception:
pass
if not exists(data_dir):
do_invites = True
subprocess.check_call(
[
tahoe_bin, 'create-node',
'--no-storage',
'--nickname', name,
'--introducer', furl,
data_dir,
]
)
with open(join(data_dir, 'tahoe.cfg'), 'w') as f:
f.write('''
[node]
nickname = %(name)s
web.port = tcp:998%(node_id)d:interface=localhost
web.static = public_html
[client]
# Which services should this client connect to?
introducer.furl = %(furl)s
shares.needed = 2
shares.happy = 3
shares.total = 4
''' % {'name':name, 'node_id':node_id, 'furl':furl})
if not is_windows:
subprocess.check_call(
[
tahoe_bin, 'start', data_dir,
]
)
else:
time.sleep(5)
x = subprocess.Popen(
[
tahoe_bin, 'start', data_dir,
]
)
clients.append(x)
node_id += 1
# okay, now we have alice + bob (alice, bob)
# now we have alice create a magic-folder, and invite bob to it
time.sleep(5)
if do_invites:
data_dir = join(data_base, 'alice')
# alice creates her folder, invites bob
print("Alice creates a magic-folder")
subprocess.check_call(
[
tahoe_bin, 'magic-folder', 'create', '--basedir', data_dir, 'magik:', 'alice',
join(data_base, 'alice-magic'),
]
)
print("Alice invites Bob")
invite = subprocess.check_output(
[
tahoe_bin, 'magic-folder', 'invite', '--basedir', data_dir, 'magik:', 'bob',
]
)
print(" invite:", invite)
# now we let "bob"/bob join
print("Bob joins Alice's magic folder")
data_dir = join(data_base, 'bob')
subprocess.check_call(
[
tahoe_bin, 'magic-folder', 'join', '--basedir', data_dir, invite,
join(data_base, 'bob-magic'),
]
)
print("Bob has joined.")
print("Restarting alice + bob clients")
if not is_windows:
subprocess.check_call(
[
tahoe_bin, 'restart', '--basedir', join(data_base, 'alice'),
]
)
subprocess.check_call(
[
tahoe_bin, 'restart', '--basedir', join(data_base, 'bob'),
]
)
else:
for x in clients:
x.terminate()
clients = []
a = subprocess.Popen(
[
tahoe_bin, 'start', '--basedir', join(data_base, 'alice'),
]
)
b = subprocess.Popen(
[
tahoe_bin, 'start', '--basedir', join(data_base, 'bob'),
]
)
clients.append(a)
clients.append(b)
if True:
for name in ['alice', 'bob']:
try:
with open(join(data_base, name, 'private', 'magic_folder_dircap'), 'r') as f:
print("dircap %s: %s" % (name, f.read().strip()))
except Exception:
print("can't find/open %s" % (name,))
# give storage nodes a chance to connect properly? I'm not entirely
# sure what's up here, but I get "UnrecoverableFileError" on the
# first_file upload from Alice "very often" otherwise
print("waiting 3 seconds")
time.sleep(3)
if True:
# alice writes a file; bob should get it
alice_foo = join(data_base, 'alice-magic', 'first_file')
bob_foo = join(data_base, 'bob-magic', 'first_file')
with open(alice_foo, 'w') as f:
f.write("line one\n")
print("Waiting for:", bob_foo)
while True:
if exists(bob_foo):
print(" found", bob_foo)
with open(bob_foo, 'r') as f:
if f.read() == "line one\n":
break
print(" file contents still mismatched")
time.sleep(1)
if True:
# bob writes a file; alice should get it
alice_bar = join(data_base, 'alice-magic', 'second_file')
bob_bar = join(data_base, 'bob-magic', 'second_file')
with open(bob_bar, 'w') as f:
f.write("line one\n")
print("Waiting for:", alice_bar)
while True:
if exists(bob_bar):
print(" found", bob_bar)
with open(bob_bar, 'r') as f:
if f.read() == "line one\n":
break
print(" file contents still mismatched")
time.sleep(1)
if True:
# alice deletes 'first_file'
alice_foo = join(data_base, 'alice-magic', 'first_file')
bob_foo = join(data_base, 'bob-magic', 'first_file')
unlink(alice_foo)
print("Waiting for '%s' to disappear" % (bob_foo,))
while True:
if not exists(bob_foo):
print(" disappeared", bob_foo)
break
time.sleep(1)
bob_tmp = bob_foo + '.backup'
print("Waiting for '%s' to appear" % (bob_tmp,))
while True:
if exists(bob_tmp):
print(" appeared", bob_tmp)
break
time.sleep(1)
if True:
# bob writes new content to 'second_file'; alice should get it
# get it.
alice_foo = join(data_base, 'alice-magic', 'second_file')
bob_foo = join(data_base, 'bob-magic', 'second_file')
gold_content = "line one\nsecond line\n"
with open(bob_foo, 'w') as f:
f.write(gold_content)
print("Waiting for:", alice_foo)
while True:
if exists(alice_foo):
print(" found", alice_foo)
with open(alice_foo, 'r') as f:
content = f.read()
if content == gold_content:
break
print(" file contents still mismatched:\n")
print(content)
time.sleep(1)
if True:
# bob creates a sub-directory and adds a file to it
alice_dir = join(data_base, 'alice-magic', 'subdir')
bob_dir = join(data_base, 'alice-magic', 'subdir')
gold_content = 'a file in a subdirectory\n'
mkdir(bob_dir)
with open(join(bob_dir, 'subfile'), 'w') as f:
f.write(gold_content)
print("Waiting for Bob's subdir '%s' to appear" % (bob_dir,))
while True:
if exists(bob_dir):
print(" found subdir")
if exists(join(bob_dir, 'subfile')):
print(" found file")
with open(join(bob_dir, 'subfile'), 'r') as f:
if f.read() == gold_content:
print(" contents match")
break
time.sleep(0.1)
if True:
# bob deletes the whole subdir
alice_dir = join(data_base, 'alice-magic', 'subdir')
bob_dir = join(data_base, 'alice-magic', 'subdir')
shutil.rmtree(bob_dir)
print("Waiting for Alice's subdir '%s' to disappear" % (alice_dir,))
while True:
if not exists(alice_dir):
print(" it's gone")
break
time.sleep(0.1)
# XXX restore the file not working (but, unit-tests work; what's wrong with them?)
# NOTE: only not-works if it's alice restoring the file!
if True:
# restore 'first_file' but with different contents
print("re-writing 'first_file'")
assert not exists(join(data_base, 'bob-magic', 'first_file'))
assert not exists(join(data_base, 'alice-magic', 'first_file'))
alice_foo = join(data_base, 'alice-magic', 'first_file')
bob_foo = join(data_base, 'bob-magic', 'first_file')
if True:
# if we don't swap around, it works fine
alice_foo, bob_foo = bob_foo, alice_foo
gold_content = "see it again for the first time\n"
with open(bob_foo, 'w') as f:
f.write(gold_content)
print("Waiting for:", alice_foo)
while True:
if exists(alice_foo):
print(" found", alice_foo)
with open(alice_foo, 'r') as f:
content = f.read()
if content == gold_content:
break
print(" file contents still mismatched: %d bytes:\n" % (len(content),))
print(content)
else:
print(" %r not there yet" % (alice_foo,))
time.sleep(1)
if True:
# bob leaves
print('bob leaves')
data_dir = join(data_base, 'bob')
subprocess.check_call(
[
tahoe_bin, 'magic-folder', 'leave', '--basedir', data_dir,
]
)
# XXX test .backup (delete a file)
# port david's clock.advance stuff
# fix clock.advance()
# subdirectory
# file deletes
# conflicts

View File

@ -16,6 +16,11 @@ commands =
tahoe --version
trial --rterrors {posargs:allmydata}
[testenv:integration]
commands =
echo 'run with "py.test --keep-tempdir -s -v integration/" to debug failures'
py.test -v integration/
[testenv:coverage]
# coverage (with --branch) takes about 65% longer to run
passenv = USERPROFILE HOMEDRIVE HOMEPATH