mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-04-04 17:39:19 +00:00
Get rid of "tahoe start", "tahoe daemonize", "tahoe stop", "tahoe restart"
This commit is contained in:
parent
a34fca8e7a
commit
4d28b0ec27
@ -9,8 +9,7 @@ from twisted.internet import defer, task, threads
|
||||
|
||||
from allmydata.scripts.common import get_default_nodedir
|
||||
from allmydata.scripts import debug, create_node, cli, \
|
||||
admin, tahoe_daemonize, tahoe_start, \
|
||||
tahoe_stop, tahoe_restart, tahoe_run, tahoe_invite
|
||||
admin, tahoe_run, tahoe_invite
|
||||
from allmydata.util.encodingutil import quote_output, quote_local_unicode_path, get_io_encoding
|
||||
from allmydata.util.eliotutil import (
|
||||
opt_eliot_destination,
|
||||
@ -37,19 +36,11 @@ if _default_nodedir:
|
||||
|
||||
# XXX all this 'dispatch' stuff needs to be unified + fixed up
|
||||
_control_node_dispatch = {
|
||||
"daemonize": tahoe_daemonize.daemonize,
|
||||
"start": tahoe_start.start,
|
||||
"run": tahoe_run.run,
|
||||
"stop": tahoe_stop.stop,
|
||||
"restart": tahoe_restart.restart,
|
||||
}
|
||||
|
||||
process_control_commands = [
|
||||
["run", None, tahoe_run.RunOptions, "run a node without daemonizing"],
|
||||
["daemonize", None, tahoe_daemonize.DaemonizeOptions, "(deprecated) run a node in the background"],
|
||||
["start", None, tahoe_start.StartOptions, "(deprecated) start a node in the background and confirm it started"],
|
||||
["stop", None, tahoe_stop.StopOptions, "(deprecated) stop a node"],
|
||||
["restart", None, tahoe_restart.RestartOptions, "(deprecated) restart a node"],
|
||||
]
|
||||
|
||||
|
||||
|
@ -1,16 +0,0 @@
|
||||
from .run_common import (
|
||||
RunOptions as _RunOptions,
|
||||
run,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"DaemonizeOptions",
|
||||
"daemonize",
|
||||
]
|
||||
|
||||
class DaemonizeOptions(_RunOptions):
|
||||
subcommand_name = "daemonize"
|
||||
|
||||
def daemonize(config):
|
||||
print("'tahoe daemonize' is deprecated; see 'tahoe run'")
|
||||
return run(config)
|
@ -1,21 +0,0 @@
|
||||
from __future__ import print_function
|
||||
|
||||
from .tahoe_start import StartOptions, start
|
||||
from .tahoe_stop import stop, COULD_NOT_STOP
|
||||
|
||||
|
||||
class RestartOptions(StartOptions):
|
||||
subcommand_name = "restart"
|
||||
|
||||
|
||||
def restart(config):
|
||||
print("'tahoe restart' is deprecated; see 'tahoe run'")
|
||||
stderr = config.stderr
|
||||
rc = stop(config)
|
||||
if rc == COULD_NOT_STOP:
|
||||
print("ignoring couldn't-stop", file=stderr)
|
||||
rc = 0
|
||||
if rc:
|
||||
print("not restarting", file=stderr)
|
||||
return rc
|
||||
return start(config)
|
@ -1,152 +0,0 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import io
|
||||
import sys
|
||||
import time
|
||||
import subprocess
|
||||
from os.path import join, exists
|
||||
|
||||
from allmydata.scripts.common import BasedirOptions
|
||||
from allmydata.scripts.default_nodedir import _default_nodedir
|
||||
from allmydata.util.encodingutil import quote_local_unicode_path
|
||||
|
||||
from .run_common import MyTwistdConfig, identify_node_type
|
||||
|
||||
|
||||
class StartOptions(BasedirOptions):
|
||||
subcommand_name = "start"
|
||||
optParameters = [
|
||||
("basedir", "C", None,
|
||||
"Specify which Tahoe base directory should be used."
|
||||
" This has the same effect as the global --node-directory option."
|
||||
" [default: %s]" % quote_local_unicode_path(_default_nodedir)),
|
||||
]
|
||||
|
||||
def parseArgs(self, basedir=None, *twistd_args):
|
||||
# This can't handle e.g. 'tahoe start --nodaemon', since '--nodaemon'
|
||||
# looks like an option to the tahoe subcommand, not to twistd. So you
|
||||
# can either use 'tahoe start' or 'tahoe start NODEDIR
|
||||
# --TWISTD-OPTIONS'. Note that 'tahoe --node-directory=NODEDIR start
|
||||
# --TWISTD-OPTIONS' also isn't allowed, unfortunately.
|
||||
|
||||
BasedirOptions.parseArgs(self, basedir)
|
||||
self.twistd_args = twistd_args
|
||||
|
||||
def getSynopsis(self):
|
||||
return ("Usage: %s [global-options] %s [options]"
|
||||
" [NODEDIR [twistd-options]]"
|
||||
% (self.command_name, self.subcommand_name))
|
||||
|
||||
def getUsage(self, width=None):
|
||||
t = BasedirOptions.getUsage(self, width) + "\n"
|
||||
twistd_options = str(MyTwistdConfig()).partition("\n")[2].partition("\n\n")[0]
|
||||
t += twistd_options.replace("Options:", "twistd-options:", 1)
|
||||
t += """
|
||||
|
||||
Note that if any twistd-options are used, NODEDIR must be specified explicitly
|
||||
(not by default or using -C/--basedir or -d/--node-directory), and followed by
|
||||
the twistd-options.
|
||||
"""
|
||||
return t
|
||||
|
||||
|
||||
def start(config):
|
||||
"""
|
||||
Start a tahoe node (daemonize it and confirm startup)
|
||||
|
||||
We run 'tahoe daemonize' with all the options given to 'tahoe
|
||||
start' and then watch the log files for the correct text to appear
|
||||
(e.g. "introducer started"). If that doesn't happen within a few
|
||||
seconds, an error is printed along with all collected logs.
|
||||
"""
|
||||
print("'tahoe start' is deprecated; see 'tahoe run'")
|
||||
out = config.stdout
|
||||
err = config.stderr
|
||||
basedir = config['basedir']
|
||||
quoted_basedir = quote_local_unicode_path(basedir)
|
||||
print("STARTING", quoted_basedir, file=out)
|
||||
if not os.path.isdir(basedir):
|
||||
print("%s does not look like a directory at all" % quoted_basedir, file=err)
|
||||
return 1
|
||||
nodetype = identify_node_type(basedir)
|
||||
if not nodetype:
|
||||
print("%s is not a recognizable node directory" % quoted_basedir, file=err)
|
||||
return 1
|
||||
|
||||
# "tahoe start" attempts to monitor the logs for successful
|
||||
# startup -- but we can't always do that.
|
||||
|
||||
can_monitor_logs = False
|
||||
if (nodetype in (u"client", u"introducer")
|
||||
and "--nodaemon" not in config.twistd_args
|
||||
and "--syslog" not in config.twistd_args
|
||||
and "--logfile" not in config.twistd_args):
|
||||
can_monitor_logs = True
|
||||
|
||||
if "--help" in config.twistd_args:
|
||||
return 0
|
||||
|
||||
if not can_monitor_logs:
|
||||
print("Custom logging options; can't monitor logs for proper startup messages", file=out)
|
||||
return 1
|
||||
|
||||
# before we spawn tahoe, we check if "the log file" exists or not,
|
||||
# and if so remember how big it is -- essentially, we're doing
|
||||
# "tail -f" to see what "this" incarnation of "tahoe daemonize"
|
||||
# spews forth.
|
||||
starting_offset = 0
|
||||
log_fname = join(basedir, 'logs', 'twistd.log')
|
||||
if exists(log_fname):
|
||||
with open(log_fname, 'r') as f:
|
||||
f.seek(0, 2)
|
||||
starting_offset = f.tell()
|
||||
|
||||
# spawn tahoe. Note that since this daemonizes, it should return
|
||||
# "pretty fast" and with a zero return-code, or else something
|
||||
# Very Bad has happened.
|
||||
try:
|
||||
args = [sys.executable] if not getattr(sys, 'frozen', False) else []
|
||||
for i, arg in enumerate(sys.argv):
|
||||
if arg in ['start', 'restart']:
|
||||
args.append('daemonize')
|
||||
else:
|
||||
args.append(arg)
|
||||
subprocess.check_call(args)
|
||||
except subprocess.CalledProcessError as e:
|
||||
return e.returncode
|
||||
|
||||
# now, we have to determine if tahoe has actually started up
|
||||
# successfully or not. so, we start sucking up log files and
|
||||
# looking for "the magic string", which depends on the node type.
|
||||
|
||||
magic_string = u'{} running'.format(nodetype)
|
||||
with io.open(log_fname, 'r') as f:
|
||||
f.seek(starting_offset)
|
||||
|
||||
collected = u''
|
||||
overall_start = time.time()
|
||||
while time.time() - overall_start < 60:
|
||||
this_start = time.time()
|
||||
while time.time() - this_start < 5:
|
||||
collected += f.read()
|
||||
if magic_string in collected:
|
||||
if not config.parent['quiet']:
|
||||
print("Node has started successfully", file=out)
|
||||
return 0
|
||||
if 'Traceback ' in collected:
|
||||
print("Error starting node; see '{}' for more:\n\n{}".format(
|
||||
log_fname,
|
||||
collected,
|
||||
), file=err)
|
||||
return 1
|
||||
time.sleep(0.1)
|
||||
print("Still waiting up to {}s for node startup".format(
|
||||
60 - int(time.time() - overall_start)
|
||||
), file=out)
|
||||
|
||||
print("Something has gone wrong starting the node.", file=out)
|
||||
print("Logs are available in '{}'".format(log_fname), file=out)
|
||||
print("Collected for this run:", file=out)
|
||||
print(collected, file=out)
|
||||
return 1
|
@ -1,85 +0,0 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import time
|
||||
import signal
|
||||
|
||||
from allmydata.scripts.common import BasedirOptions
|
||||
from allmydata.util.encodingutil import quote_local_unicode_path
|
||||
from .run_common import get_pidfile, get_pid_from_pidfile
|
||||
|
||||
COULD_NOT_STOP = 2
|
||||
|
||||
|
||||
class StopOptions(BasedirOptions):
|
||||
def parseArgs(self, basedir=None):
|
||||
BasedirOptions.parseArgs(self, basedir)
|
||||
|
||||
def getSynopsis(self):
|
||||
return ("Usage: %s [global-options] stop [options] [NODEDIR]"
|
||||
% (self.command_name,))
|
||||
|
||||
|
||||
def stop(config):
|
||||
print("'tahoe stop' is deprecated; see 'tahoe run'")
|
||||
out = config.stdout
|
||||
err = config.stderr
|
||||
basedir = config['basedir']
|
||||
quoted_basedir = quote_local_unicode_path(basedir)
|
||||
print("STOPPING", quoted_basedir, file=out)
|
||||
pidfile = get_pidfile(basedir)
|
||||
pid = get_pid_from_pidfile(pidfile)
|
||||
if pid is None:
|
||||
print("%s does not look like a running node directory (no twistd.pid)" % quoted_basedir, file=err)
|
||||
# we define rc=2 to mean "nothing is running, but it wasn't me who
|
||||
# stopped it"
|
||||
return COULD_NOT_STOP
|
||||
elif pid == -1:
|
||||
print("%s contains an invalid PID file" % basedir, file=err)
|
||||
# we define rc=2 to mean "nothing is running, but it wasn't me who
|
||||
# stopped it"
|
||||
return COULD_NOT_STOP
|
||||
|
||||
# kill it hard (SIGKILL), delete the twistd.pid file, then wait for the
|
||||
# process itself to go away. If it hasn't gone away after 20 seconds, warn
|
||||
# the user but keep waiting until they give up.
|
||||
try:
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
except OSError as oserr:
|
||||
if oserr.errno == 3:
|
||||
print(oserr.strerror)
|
||||
# the process didn't exist, so wipe the pid file
|
||||
os.remove(pidfile)
|
||||
return COULD_NOT_STOP
|
||||
else:
|
||||
raise
|
||||
try:
|
||||
os.remove(pidfile)
|
||||
except EnvironmentError:
|
||||
pass
|
||||
start = time.time()
|
||||
time.sleep(0.1)
|
||||
wait = 40
|
||||
first_time = True
|
||||
while True:
|
||||
# poll once per second until we see the process is no longer running
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
except OSError:
|
||||
print("process %d is dead" % pid, file=out)
|
||||
return
|
||||
wait -= 1
|
||||
if wait < 0:
|
||||
if first_time:
|
||||
print("It looks like pid %d is still running "
|
||||
"after %d seconds" % (pid,
|
||||
(time.time() - start)), file=err)
|
||||
print("I will keep watching it until you interrupt me.", file=err)
|
||||
wait = 10
|
||||
first_time = False
|
||||
else:
|
||||
print("pid %d still running after %d seconds" % \
|
||||
(pid, (time.time() - start)), file=err)
|
||||
wait = 10
|
||||
time.sleep(1)
|
||||
# control never reaches here: no timeout
|
@ -1,202 +0,0 @@
|
||||
import os
|
||||
from io import (
|
||||
BytesIO,
|
||||
)
|
||||
from os.path import dirname, join
|
||||
from mock import patch, Mock
|
||||
from six.moves import StringIO
|
||||
from sys import getfilesystemencoding
|
||||
from twisted.trial import unittest
|
||||
from allmydata.scripts import runner
|
||||
from allmydata.scripts.run_common import (
|
||||
identify_node_type,
|
||||
DaemonizeTahoeNodePlugin,
|
||||
MyTwistdConfig,
|
||||
)
|
||||
from allmydata.scripts.tahoe_daemonize import (
|
||||
DaemonizeOptions,
|
||||
)
|
||||
|
||||
|
||||
class Util(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.twistd_options = MyTwistdConfig()
|
||||
self.twistd_options.parseOptions(["DaemonizeTahoeNode"])
|
||||
self.options = self.twistd_options.subOptions
|
||||
|
||||
def test_node_type_nothing(self):
|
||||
tmpdir = self.mktemp()
|
||||
base = dirname(tmpdir).decode(getfilesystemencoding())
|
||||
|
||||
t = identify_node_type(base)
|
||||
|
||||
self.assertIs(None, t)
|
||||
|
||||
def test_node_type_introducer(self):
|
||||
tmpdir = self.mktemp()
|
||||
base = dirname(tmpdir).decode(getfilesystemencoding())
|
||||
with open(join(dirname(tmpdir), 'introducer.tac'), 'w') as f:
|
||||
f.write("test placeholder")
|
||||
|
||||
t = identify_node_type(base)
|
||||
|
||||
self.assertEqual(u"introducer", t)
|
||||
|
||||
def test_daemonize(self):
|
||||
tmpdir = self.mktemp()
|
||||
plug = DaemonizeTahoeNodePlugin('client', tmpdir)
|
||||
|
||||
with patch('twisted.internet.reactor') as r:
|
||||
def call(fn, *args, **kw):
|
||||
fn()
|
||||
r.stop = lambda: None
|
||||
r.callWhenRunning = call
|
||||
service = plug.makeService(self.options)
|
||||
service.parent = Mock()
|
||||
service.startService()
|
||||
|
||||
self.assertTrue(service is not None)
|
||||
|
||||
def test_daemonize_no_keygen(self):
|
||||
tmpdir = self.mktemp()
|
||||
stderr = BytesIO()
|
||||
plug = DaemonizeTahoeNodePlugin('key-generator', tmpdir)
|
||||
|
||||
with patch('twisted.internet.reactor') as r:
|
||||
def call(fn, *args, **kw):
|
||||
d = fn()
|
||||
d.addErrback(lambda _: None) # ignore the error we'll trigger
|
||||
r.callWhenRunning = call
|
||||
service = plug.makeService(self.options)
|
||||
service.stderr = stderr
|
||||
service.parent = Mock()
|
||||
# we'll raise ValueError because there's no key-generator
|
||||
# .. BUT we do this in an async function called via
|
||||
# "callWhenRunning" .. hence using a hook
|
||||
d = service.set_hook('running')
|
||||
service.startService()
|
||||
def done(f):
|
||||
self.assertIn(
|
||||
"key-generator support removed",
|
||||
stderr.getvalue(),
|
||||
)
|
||||
return None
|
||||
d.addBoth(done)
|
||||
return d
|
||||
|
||||
def test_daemonize_unknown_nodetype(self):
|
||||
tmpdir = self.mktemp()
|
||||
plug = DaemonizeTahoeNodePlugin('an-unknown-service', tmpdir)
|
||||
|
||||
with patch('twisted.internet.reactor') as r:
|
||||
def call(fn, *args, **kw):
|
||||
fn()
|
||||
r.stop = lambda: None
|
||||
r.callWhenRunning = call
|
||||
service = plug.makeService(self.options)
|
||||
service.parent = Mock()
|
||||
with self.assertRaises(ValueError) as ctx:
|
||||
service.startService()
|
||||
self.assertIn(
|
||||
"unknown nodetype",
|
||||
str(ctx.exception)
|
||||
)
|
||||
|
||||
def test_daemonize_options(self):
|
||||
parent = runner.Options()
|
||||
opts = DaemonizeOptions()
|
||||
opts.parent = parent
|
||||
opts.parseArgs()
|
||||
|
||||
# just gratuitous coverage, ensureing we don't blow up on
|
||||
# these methods.
|
||||
opts.getSynopsis()
|
||||
opts.getUsage()
|
||||
|
||||
|
||||
class RunDaemonizeTests(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# no test should change our working directory
|
||||
self._working = os.path.abspath('.')
|
||||
d = super(RunDaemonizeTests, self).setUp()
|
||||
self._reactor = patch('twisted.internet.reactor')
|
||||
self._reactor.stop = lambda: None
|
||||
self._twistd = patch('allmydata.scripts.run_common.twistd')
|
||||
self.node_dir = self.mktemp()
|
||||
os.mkdir(self.node_dir)
|
||||
for cm in [self._reactor, self._twistd]:
|
||||
cm.__enter__()
|
||||
return d
|
||||
|
||||
def tearDown(self):
|
||||
d = super(RunDaemonizeTests, self).tearDown()
|
||||
for cm in [self._reactor, self._twistd]:
|
||||
cm.__exit__(None, None, None)
|
||||
# Note: if you raise an exception (e.g. via self.assertEqual
|
||||
# or raise RuntimeError) it is apparently just ignored and the
|
||||
# test passes anyway...
|
||||
if self._working != os.path.abspath('.'):
|
||||
print("WARNING: a test just changed the working dir; putting it back")
|
||||
os.chdir(self._working)
|
||||
return d
|
||||
|
||||
def _placeholder_nodetype(self, nodetype):
|
||||
fname = join(self.node_dir, '{}.tac'.format(nodetype))
|
||||
with open(fname, 'w') as f:
|
||||
f.write("test placeholder")
|
||||
|
||||
def test_daemonize_defaults(self):
|
||||
self._placeholder_nodetype('introducer')
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't much around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'daemonize',
|
||||
])
|
||||
i, o, e = StringIO(), StringIO(), StringIO()
|
||||
with patch('allmydata.scripts.runner.sys') as s:
|
||||
exit_code = [None]
|
||||
def _exit(code):
|
||||
exit_code[0] = code
|
||||
s.exit = _exit
|
||||
runner.dispatch(config, i, o, e)
|
||||
|
||||
self.assertEqual(0, exit_code[0])
|
||||
|
||||
def test_daemonize_wrong_nodetype(self):
|
||||
self._placeholder_nodetype('invalid')
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't much around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'daemonize',
|
||||
])
|
||||
i, o, e = StringIO(), StringIO(), StringIO()
|
||||
with patch('allmydata.scripts.runner.sys') as s:
|
||||
exit_code = [None]
|
||||
def _exit(code):
|
||||
exit_code[0] = code
|
||||
s.exit = _exit
|
||||
runner.dispatch(config, i, o, e)
|
||||
|
||||
self.assertEqual(0, exit_code[0])
|
||||
|
||||
def test_daemonize_run(self):
|
||||
self._placeholder_nodetype('client')
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't much around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'daemonize',
|
||||
])
|
||||
with patch('allmydata.scripts.runner.sys') as s:
|
||||
exit_code = [None]
|
||||
def _exit(code):
|
||||
exit_code[0] = code
|
||||
s.exit = _exit
|
||||
from allmydata.scripts.tahoe_daemonize import daemonize
|
||||
daemonize(config)
|
@ -1,273 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from os.path import join
|
||||
from mock import patch
|
||||
from six.moves import StringIO
|
||||
from functools import partial
|
||||
|
||||
from twisted.trial import unittest
|
||||
from allmydata.scripts import runner
|
||||
|
||||
|
||||
#@patch('twisted.internet.reactor')
|
||||
@patch('allmydata.scripts.tahoe_start.subprocess')
|
||||
class RunStartTests(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
d = super(RunStartTests, self).setUp()
|
||||
self.node_dir = self.mktemp()
|
||||
os.mkdir(self.node_dir)
|
||||
return d
|
||||
|
||||
def _placeholder_nodetype(self, nodetype):
|
||||
fname = join(self.node_dir, '{}.tac'.format(nodetype))
|
||||
with open(fname, 'w') as f:
|
||||
f.write("test placeholder")
|
||||
|
||||
def _pid_file(self, pid):
|
||||
fname = join(self.node_dir, 'twistd.pid')
|
||||
with open(fname, 'w') as f:
|
||||
f.write(u"{}\n".format(pid))
|
||||
|
||||
def _logs(self, logs):
|
||||
os.mkdir(join(self.node_dir, 'logs'))
|
||||
fname = join(self.node_dir, 'logs', 'twistd.log')
|
||||
with open(fname, 'w') as f:
|
||||
f.write(logs)
|
||||
|
||||
def test_start_defaults(self, _subprocess):
|
||||
self._placeholder_nodetype('client')
|
||||
self._pid_file(1234)
|
||||
self._logs('one log\ntwo log\nred log\nblue log\n')
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't muck around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'start',
|
||||
])
|
||||
i, o, e = StringIO(), StringIO(), StringIO()
|
||||
try:
|
||||
with patch('allmydata.scripts.tahoe_start.os'):
|
||||
with patch('allmydata.scripts.runner.sys') as s:
|
||||
exit_code = [None]
|
||||
def _exit(code):
|
||||
exit_code[0] = code
|
||||
s.exit = _exit
|
||||
|
||||
def launch(*args, **kw):
|
||||
with open(join(self.node_dir, 'logs', 'twistd.log'), 'a') as f:
|
||||
f.write('client running\n') # "the magic"
|
||||
_subprocess.check_call = launch
|
||||
runner.dispatch(config, i, o, e)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.assertEqual([0], exit_code)
|
||||
self.assertTrue('Node has started' in o.getvalue())
|
||||
|
||||
def test_start_fails(self, _subprocess):
|
||||
self._placeholder_nodetype('client')
|
||||
self._logs('existing log line\n')
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't muck around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'start',
|
||||
])
|
||||
|
||||
i, o, e = StringIO(), StringIO(), StringIO()
|
||||
with patch('allmydata.scripts.tahoe_start.time') as t:
|
||||
with patch('allmydata.scripts.runner.sys') as s:
|
||||
exit_code = [None]
|
||||
def _exit(code):
|
||||
exit_code[0] = code
|
||||
s.exit = _exit
|
||||
|
||||
thetime = [0]
|
||||
def _time():
|
||||
thetime[0] += 0.1
|
||||
return thetime[0]
|
||||
t.time = _time
|
||||
|
||||
def launch(*args, **kw):
|
||||
with open(join(self.node_dir, 'logs', 'twistd.log'), 'a') as f:
|
||||
f.write('a new log line\n')
|
||||
_subprocess.check_call = launch
|
||||
|
||||
runner.dispatch(config, i, o, e)
|
||||
|
||||
# should print out the collected logs and an error-code
|
||||
self.assertTrue("a new log line" in o.getvalue())
|
||||
self.assertEqual([1], exit_code)
|
||||
|
||||
def test_start_subprocess_fails(self, _subprocess):
|
||||
self._placeholder_nodetype('client')
|
||||
self._logs('existing log line\n')
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't muck around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'start',
|
||||
])
|
||||
|
||||
i, o, e = StringIO(), StringIO(), StringIO()
|
||||
with patch('allmydata.scripts.tahoe_start.time'):
|
||||
with patch('allmydata.scripts.runner.sys') as s:
|
||||
# undo patch for the exception-class
|
||||
_subprocess.CalledProcessError = subprocess.CalledProcessError
|
||||
exit_code = [None]
|
||||
def _exit(code):
|
||||
exit_code[0] = code
|
||||
s.exit = _exit
|
||||
|
||||
def launch(*args, **kw):
|
||||
raise subprocess.CalledProcessError(42, "tahoe")
|
||||
_subprocess.check_call = launch
|
||||
|
||||
runner.dispatch(config, i, o, e)
|
||||
|
||||
# should get our "odd" error-code
|
||||
self.assertEqual([42], exit_code)
|
||||
|
||||
def test_start_help(self, _subprocess):
|
||||
self._placeholder_nodetype('client')
|
||||
|
||||
std = StringIO()
|
||||
with patch('sys.stdout') as stdo:
|
||||
stdo.write = std.write
|
||||
try:
|
||||
runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't muck around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'start',
|
||||
'--help',
|
||||
], stdout=std)
|
||||
self.fail("Should get exit")
|
||||
except SystemExit as e:
|
||||
print(e)
|
||||
|
||||
self.assertIn(
|
||||
"Usage:",
|
||||
std.getvalue()
|
||||
)
|
||||
|
||||
def test_start_unknown_node_type(self, _subprocess):
|
||||
self._placeholder_nodetype('bogus')
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't muck around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'start',
|
||||
])
|
||||
|
||||
i, o, e = StringIO(), StringIO(), StringIO()
|
||||
with patch('allmydata.scripts.runner.sys') as s:
|
||||
exit_code = [None]
|
||||
def _exit(code):
|
||||
exit_code[0] = code
|
||||
s.exit = _exit
|
||||
|
||||
runner.dispatch(config, i, o, e)
|
||||
|
||||
# should print out the collected logs and an error-code
|
||||
self.assertIn(
|
||||
"is not a recognizable node directory",
|
||||
e.getvalue()
|
||||
)
|
||||
self.assertEqual([1], exit_code)
|
||||
|
||||
def test_start_nodedir_not_dir(self, _subprocess):
|
||||
shutil.rmtree(self.node_dir)
|
||||
assert not os.path.isdir(self.node_dir)
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't muck around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'start',
|
||||
])
|
||||
|
||||
i, o, e = StringIO(), StringIO(), StringIO()
|
||||
with patch('allmydata.scripts.runner.sys') as s:
|
||||
exit_code = [None]
|
||||
def _exit(code):
|
||||
exit_code[0] = code
|
||||
s.exit = _exit
|
||||
|
||||
runner.dispatch(config, i, o, e)
|
||||
|
||||
# should print out the collected logs and an error-code
|
||||
self.assertIn(
|
||||
"does not look like a directory at all",
|
||||
e.getvalue()
|
||||
)
|
||||
self.assertEqual([1], exit_code)
|
||||
|
||||
|
||||
class RunTests(unittest.TestCase):
|
||||
"""
|
||||
Tests confirming end-user behavior of CLI commands
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
d = super(RunTests, self).setUp()
|
||||
self.addCleanup(partial(os.chdir, os.getcwd()))
|
||||
self.node_dir = self.mktemp()
|
||||
os.mkdir(self.node_dir)
|
||||
return d
|
||||
|
||||
@patch('twisted.internet.reactor')
|
||||
def test_run_invalid_config(self, reactor):
|
||||
"""
|
||||
Configuration that's invalid should be obvious to the user
|
||||
"""
|
||||
|
||||
def cwr(fn, *args, **kw):
|
||||
fn()
|
||||
|
||||
def stop(*args, **kw):
|
||||
stopped.append(None)
|
||||
stopped = []
|
||||
reactor.callWhenRunning = cwr
|
||||
reactor.stop = stop
|
||||
|
||||
with open(os.path.join(self.node_dir, "client.tac"), "w") as f:
|
||||
f.write('test')
|
||||
|
||||
with open(os.path.join(self.node_dir, "tahoe.cfg"), "w") as f:
|
||||
f.write(
|
||||
"[invalid section]\n"
|
||||
"foo = bar\n"
|
||||
)
|
||||
|
||||
config = runner.parse_or_exit_with_explanation([
|
||||
# have to do this so the tests don't muck around in
|
||||
# ~/.tahoe (the default)
|
||||
'--node-directory', self.node_dir,
|
||||
'run',
|
||||
])
|
||||
|
||||
i, o, e = StringIO(), StringIO(), StringIO()
|
||||
d = runner.dispatch(config, i, o, e)
|
||||
|
||||
self.assertFailure(d, SystemExit)
|
||||
|
||||
output = e.getvalue()
|
||||
# should print out the collected logs and an error-code
|
||||
self.assertIn(
|
||||
"invalid section",
|
||||
output,
|
||||
)
|
||||
self.assertIn(
|
||||
"Configuration error:",
|
||||
output,
|
||||
)
|
||||
# ensure reactor.stop was actually called
|
||||
self.assertEqual([None], stopped)
|
||||
return d
|
Loading…
x
Reference in New Issue
Block a user