mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-02-24 02:51:18 +00:00
Re-factor grid fixtures to be mostly helpers
This lets us use them to create our own tiny grid to test the grid-manager certificates (instead of messing with "the" grid)
This commit is contained in:
parent
c029698435
commit
0540df8ab1
@ -36,6 +36,7 @@ from util import (
|
|||||||
await_client_ready,
|
await_client_ready,
|
||||||
TahoeProcess,
|
TahoeProcess,
|
||||||
)
|
)
|
||||||
|
import grid
|
||||||
|
|
||||||
|
|
||||||
# pytest customization hooks
|
# pytest customization hooks
|
||||||
@ -106,60 +107,10 @@ def flog_binary():
|
|||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@log_call(action_type=u"integration:flog_gatherer", include_args=[])
|
@log_call(action_type=u"integration:flog_gatherer", include_args=[])
|
||||||
def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
||||||
out_protocol = _CollectOutputProtocol()
|
fg = pytest_twisted.blockon(
|
||||||
gather_dir = join(temp_dir, 'flog_gather')
|
grid.create_flog_gatherer(reactor, request, temp_dir, flog_binary)
|
||||||
reactor.spawnProcess(
|
|
||||||
out_protocol,
|
|
||||||
flog_binary,
|
|
||||||
(
|
|
||||||
'flogtool', 'create-gatherer',
|
|
||||||
'--location', 'tcp:localhost:3117',
|
|
||||||
'--port', '3117',
|
|
||||||
gather_dir,
|
|
||||||
)
|
)
|
||||||
)
|
return fg
|
||||||
pytest_twisted.blockon(out_protocol.done)
|
|
||||||
|
|
||||||
twistd_protocol = _MagicTextProtocol("Gatherer waiting at")
|
|
||||||
twistd_process = reactor.spawnProcess(
|
|
||||||
twistd_protocol,
|
|
||||||
which('twistd')[0],
|
|
||||||
(
|
|
||||||
'twistd', '--nodaemon', '--python',
|
|
||||||
join(gather_dir, 'gatherer.tac'),
|
|
||||||
),
|
|
||||||
path=gather_dir,
|
|
||||||
)
|
|
||||||
pytest_twisted.blockon(twistd_protocol.magic_seen)
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
_cleanup_tahoe_process(twistd_process, twistd_protocol.exited)
|
|
||||||
|
|
||||||
flog_file = mktemp('.flog_dump')
|
|
||||||
flog_protocol = _DumpOutputProtocol(open(flog_file, 'w'))
|
|
||||||
flog_dir = join(temp_dir, 'flog_gather')
|
|
||||||
flogs = [x for x in listdir(flog_dir) if x.endswith('.flog')]
|
|
||||||
|
|
||||||
print("Dumping {} flogtool logfiles to '{}'".format(len(flogs), flog_file))
|
|
||||||
reactor.spawnProcess(
|
|
||||||
flog_protocol,
|
|
||||||
flog_binary,
|
|
||||||
(
|
|
||||||
'flogtool', 'dump', join(temp_dir, 'flog_gather', flogs[0])
|
|
||||||
),
|
|
||||||
)
|
|
||||||
print("Waiting for flogtool to complete")
|
|
||||||
try:
|
|
||||||
pytest_twisted.blockon(flog_protocol.done)
|
|
||||||
except ProcessTerminated as e:
|
|
||||||
print("flogtool exited unexpectedly: {}".format(str(e)))
|
|
||||||
print("Flogtool completed")
|
|
||||||
|
|
||||||
request.addfinalizer(cleanup)
|
|
||||||
|
|
||||||
with open(join(gather_dir, 'log_gatherer.furl'), 'r') as f:
|
|
||||||
furl = f.read().strip()
|
|
||||||
return furl
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@ -169,64 +120,14 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
|||||||
include_result=False,
|
include_result=False,
|
||||||
)
|
)
|
||||||
def introducer(reactor, temp_dir, flog_gatherer, request):
|
def introducer(reactor, temp_dir, flog_gatherer, request):
|
||||||
config = '''
|
intro = pytest_twisted.blockon(grid.create_introducer(reactor, request, temp_dir, flog_gatherer))
|
||||||
[node]
|
return intro
|
||||||
nickname = introducer0
|
|
||||||
web.port = 4560
|
|
||||||
log_gatherer.furl = {log_furl}
|
|
||||||
'''.format(log_furl=flog_gatherer)
|
|
||||||
|
|
||||||
intro_dir = join(temp_dir, 'introducer')
|
|
||||||
print("making introducer", intro_dir)
|
|
||||||
|
|
||||||
if not exists(intro_dir):
|
|
||||||
mkdir(intro_dir)
|
|
||||||
done_proto = _ProcessExitedProtocol()
|
|
||||||
_tahoe_runner_optional_coverage(
|
|
||||||
done_proto,
|
|
||||||
reactor,
|
|
||||||
request,
|
|
||||||
(
|
|
||||||
'create-introducer',
|
|
||||||
'--listen=tcp',
|
|
||||||
'--hostname=localhost',
|
|
||||||
intro_dir,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
pytest_twisted.blockon(done_proto.done)
|
|
||||||
|
|
||||||
# over-write the config file with our stuff
|
|
||||||
with open(join(intro_dir, 'tahoe.cfg'), 'w') as f:
|
|
||||||
f.write(config)
|
|
||||||
|
|
||||||
# on windows, "tahoe start" means: run forever in the foreground,
|
|
||||||
# but on linux it means daemonize. "tahoe run" is consistent
|
|
||||||
# between platforms.
|
|
||||||
protocol = _MagicTextProtocol('introducer running')
|
|
||||||
transport = _tahoe_runner_optional_coverage(
|
|
||||||
protocol,
|
|
||||||
reactor,
|
|
||||||
request,
|
|
||||||
(
|
|
||||||
'run',
|
|
||||||
intro_dir,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
|
||||||
|
|
||||||
pytest_twisted.blockon(protocol.magic_seen)
|
|
||||||
return TahoeProcess(transport, intro_dir)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@log_call(action_type=u"integration:introducer:furl", include_args=["temp_dir"])
|
@log_call(action_type=u"integration:introducer:furl", include_args=["temp_dir"])
|
||||||
def introducer_furl(introducer, temp_dir):
|
def introducer_furl(introducer, temp_dir):
|
||||||
furl_fname = join(temp_dir, 'introducer', 'private', 'introducer.furl')
|
return introducer.furl
|
||||||
while not exists(furl_fname):
|
|
||||||
print("Don't see {} yet".format(furl_fname))
|
|
||||||
sleep(.1)
|
|
||||||
furl = open(furl_fname, 'r').read()
|
|
||||||
return furl
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@ -313,12 +214,10 @@ def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer,
|
|||||||
# start all 5 nodes in parallel
|
# start all 5 nodes in parallel
|
||||||
for x in range(5):
|
for x in range(5):
|
||||||
name = 'node{}'.format(x)
|
name = 'node{}'.format(x)
|
||||||
web_port= 9990 + x
|
web_port = 'tcp:{}:interface=localhost'.format(9990 + x)
|
||||||
nodes_d.append(
|
nodes_d.append(
|
||||||
_create_node(
|
grid.create_storage_server(
|
||||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, name,
|
reactor, request, temp_dir, introducer, flog_gatherer, name, web_port,
|
||||||
web_port="tcp:{}:interface=localhost".format(web_port),
|
|
||||||
storage=True,
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
nodes_status = pytest_twisted.blockon(DeferredList(nodes_d))
|
nodes_status = pytest_twisted.blockon(DeferredList(nodes_d))
|
||||||
|
262
integration/grid.py
Normal file
262
integration/grid.py
Normal file
@ -0,0 +1,262 @@
|
|||||||
|
from os import mkdir, listdir, environ
|
||||||
|
from os.path import join, exists
|
||||||
|
from tempfile import mkdtemp, mktemp
|
||||||
|
|
||||||
|
from twisted.python.procutils import which
|
||||||
|
from twisted.internet.defer import (
|
||||||
|
inlineCallbacks,
|
||||||
|
returnValue,
|
||||||
|
)
|
||||||
|
from twisted.internet.task import (
|
||||||
|
deferLater,
|
||||||
|
)
|
||||||
|
from twisted.internet.interfaces import (
|
||||||
|
IProcessTransport,
|
||||||
|
IProcessProtocol,
|
||||||
|
IProtocol,
|
||||||
|
)
|
||||||
|
|
||||||
|
from util import (
|
||||||
|
_CollectOutputProtocol,
|
||||||
|
_MagicTextProtocol,
|
||||||
|
_DumpOutputProtocol,
|
||||||
|
_ProcessExitedProtocol,
|
||||||
|
_create_node,
|
||||||
|
_run_node,
|
||||||
|
_cleanup_tahoe_process,
|
||||||
|
_tahoe_runner_optional_coverage,
|
||||||
|
await_client_ready,
|
||||||
|
TahoeProcess,
|
||||||
|
)
|
||||||
|
|
||||||
|
import attr
|
||||||
|
import pytest_twisted
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s
|
||||||
|
class FlogGatherer(object):
|
||||||
|
"""
|
||||||
|
Flog Gatherer process.
|
||||||
|
"""
|
||||||
|
|
||||||
|
process = attr.ib(
|
||||||
|
validator=attr.validators.provides(IProcessTransport)
|
||||||
|
)
|
||||||
|
protocol = attr.ib(
|
||||||
|
validator=attr.validators.provides(IProcessProtocol)
|
||||||
|
)
|
||||||
|
furl = attr.ib()
|
||||||
|
|
||||||
|
|
||||||
|
@inlineCallbacks
|
||||||
|
def create_flog_gatherer(reactor, request, temp_dir, flog_binary):
|
||||||
|
out_protocol = _CollectOutputProtocol()
|
||||||
|
gather_dir = join(temp_dir, 'flog_gather')
|
||||||
|
reactor.spawnProcess(
|
||||||
|
out_protocol,
|
||||||
|
flog_binary,
|
||||||
|
(
|
||||||
|
'flogtool', 'create-gatherer',
|
||||||
|
'--location', 'tcp:localhost:3117',
|
||||||
|
'--port', '3117',
|
||||||
|
gather_dir,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield out_protocol.done
|
||||||
|
|
||||||
|
twistd_protocol = _MagicTextProtocol("Gatherer waiting at")
|
||||||
|
twistd_process = reactor.spawnProcess(
|
||||||
|
twistd_protocol,
|
||||||
|
which('twistd')[0],
|
||||||
|
(
|
||||||
|
'twistd', '--nodaemon', '--python',
|
||||||
|
join(gather_dir, 'gatherer.tac'),
|
||||||
|
),
|
||||||
|
path=gather_dir,
|
||||||
|
)
|
||||||
|
yield twistd_protocol.magic_seen
|
||||||
|
|
||||||
|
def cleanup():
|
||||||
|
_cleanup_tahoe_process(twistd_process, twistd_protocol.exited)
|
||||||
|
|
||||||
|
flog_file = mktemp('.flog_dump')
|
||||||
|
flog_protocol = _DumpOutputProtocol(open(flog_file, 'w'))
|
||||||
|
flog_dir = join(temp_dir, 'flog_gather')
|
||||||
|
flogs = [x for x in listdir(flog_dir) if x.endswith('.flog')]
|
||||||
|
|
||||||
|
print("Dumping {} flogtool logfiles to '{}'".format(len(flogs), flog_file))
|
||||||
|
reactor.spawnProcess(
|
||||||
|
flog_protocol,
|
||||||
|
flog_binary,
|
||||||
|
(
|
||||||
|
'flogtool', 'dump', join(temp_dir, 'flog_gather', flogs[0])
|
||||||
|
),
|
||||||
|
)
|
||||||
|
print("Waiting for flogtool to complete")
|
||||||
|
try:
|
||||||
|
pytest_twisted.blockon(flog_protocol.done)
|
||||||
|
except ProcessTerminated as e:
|
||||||
|
print("flogtool exited unexpectedly: {}".format(str(e)))
|
||||||
|
print("Flogtool completed")
|
||||||
|
|
||||||
|
request.addfinalizer(cleanup)
|
||||||
|
|
||||||
|
with open(join(gather_dir, 'log_gatherer.furl'), 'r') as f:
|
||||||
|
furl = f.read().strip()
|
||||||
|
returnValue(
|
||||||
|
FlogGatherer(
|
||||||
|
protocol=twistd_protocol,
|
||||||
|
process=twistd_process,
|
||||||
|
furl=furl,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s
|
||||||
|
class StorageServer(object):
|
||||||
|
"""
|
||||||
|
Represents a Tahoe Storage Server
|
||||||
|
"""
|
||||||
|
|
||||||
|
process = attr.ib(
|
||||||
|
validator=attr.validators.instance_of(TahoeProcess)
|
||||||
|
)
|
||||||
|
protocol = attr.ib(
|
||||||
|
validator=attr.validators.provides(IProcessProtocol)
|
||||||
|
)
|
||||||
|
|
||||||
|
# XXX needs a restart() probably .. or at least a stop() and
|
||||||
|
# start()
|
||||||
|
|
||||||
|
|
||||||
|
@inlineCallbacks
|
||||||
|
def create_storage_server(reactor, request, temp_dir, introducer, flog_gatherer, name, web_port,
|
||||||
|
needed=2, happy=3, total=4):
|
||||||
|
"""
|
||||||
|
Create a new storage server
|
||||||
|
"""
|
||||||
|
from util import _create_node
|
||||||
|
node_process = yield _create_node(
|
||||||
|
reactor, request, temp_dir, introducer.furl, flog_gatherer,
|
||||||
|
name, web_port, storage=True, needed=needed, happy=happy, total=total,
|
||||||
|
)
|
||||||
|
storage = StorageServer(
|
||||||
|
process=node_process,
|
||||||
|
protocol=node_process.transport._protocol,
|
||||||
|
)
|
||||||
|
returnValue(storage)
|
||||||
|
|
||||||
|
@attr.s
|
||||||
|
class Introducer(object):
|
||||||
|
"""
|
||||||
|
Reprsents a running introducer
|
||||||
|
"""
|
||||||
|
|
||||||
|
process = attr.ib(
|
||||||
|
validator=attr.validators.instance_of(TahoeProcess)
|
||||||
|
)
|
||||||
|
protocol = attr.ib(
|
||||||
|
validator=attr.validators.provides(IProcessProtocol)
|
||||||
|
)
|
||||||
|
furl = attr.ib()
|
||||||
|
|
||||||
|
|
||||||
|
_introducer_num = 0
|
||||||
|
|
||||||
|
|
||||||
|
@inlineCallbacks
|
||||||
|
def create_introducer(reactor, request, temp_dir, flog_gatherer):
|
||||||
|
"""
|
||||||
|
Run a new Introducer and return an Introducer instance.
|
||||||
|
"""
|
||||||
|
global _introducer_num
|
||||||
|
config = (
|
||||||
|
'[node]\n'
|
||||||
|
'nickname = introducer{num}\n'
|
||||||
|
'web.port = {port}\n'
|
||||||
|
'log_gatherer.furl = {log_furl}\n'
|
||||||
|
).format(
|
||||||
|
num=_introducer_num,
|
||||||
|
log_furl=flog_gatherer.furl,
|
||||||
|
port=4560 + _introducer_num,
|
||||||
|
)
|
||||||
|
_introducer_num += 1
|
||||||
|
|
||||||
|
intro_dir = join(temp_dir, 'introducer{}'.format(_introducer_num))
|
||||||
|
print("making introducer", intro_dir, _introducer_num)
|
||||||
|
|
||||||
|
if not exists(intro_dir):
|
||||||
|
mkdir(intro_dir)
|
||||||
|
done_proto = _ProcessExitedProtocol()
|
||||||
|
_tahoe_runner_optional_coverage(
|
||||||
|
done_proto,
|
||||||
|
reactor,
|
||||||
|
request,
|
||||||
|
(
|
||||||
|
'create-introducer',
|
||||||
|
'--listen=tcp',
|
||||||
|
'--hostname=localhost',
|
||||||
|
intro_dir,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
yield done_proto.done
|
||||||
|
|
||||||
|
# over-write the config file with our stuff
|
||||||
|
with open(join(intro_dir, 'tahoe.cfg'), 'w') as f:
|
||||||
|
f.write(config)
|
||||||
|
|
||||||
|
# on windows, "tahoe start" means: run forever in the foreground,
|
||||||
|
# but on linux it means daemonize. "tahoe run" is consistent
|
||||||
|
# between platforms.
|
||||||
|
protocol = _MagicTextProtocol('introducer running')
|
||||||
|
transport = _tahoe_runner_optional_coverage(
|
||||||
|
protocol,
|
||||||
|
reactor,
|
||||||
|
request,
|
||||||
|
(
|
||||||
|
'run',
|
||||||
|
intro_dir,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def clean():
|
||||||
|
return _cleanup_tahoe_process(transport, protocol.exited)
|
||||||
|
request.addfinalizer(clean)
|
||||||
|
|
||||||
|
yield protocol.magic_seen
|
||||||
|
|
||||||
|
furl_fname = join(intro_dir, 'private', 'introducer.furl')
|
||||||
|
while not exists(furl_fname):
|
||||||
|
print("Don't see {} yet".format(furl_fname))
|
||||||
|
yield deferLater(reactor, .1, lambda: None)
|
||||||
|
furl = open(furl_fname, 'r').read()
|
||||||
|
|
||||||
|
returnValue(
|
||||||
|
Introducer(
|
||||||
|
process=TahoeProcess(transport, intro_dir),
|
||||||
|
protocol=protocol,
|
||||||
|
furl=furl,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s
|
||||||
|
class Grid(object):
|
||||||
|
"""
|
||||||
|
Represents an entire Tahoe Grid setup
|
||||||
|
|
||||||
|
A Grid includes an Introducer, Flog Gatherer and some number of
|
||||||
|
Storage Servers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
introducer = attr.ib(default=None)
|
||||||
|
flog_gatherer = attr.ib(default=None)
|
||||||
|
storage_servers = attr.ib(factory=list)
|
||||||
|
|
||||||
|
@storage_servers.validator
|
||||||
|
def check(self, attribute, value):
|
||||||
|
for server in value:
|
||||||
|
if not isinstance(server, StorageServer):
|
||||||
|
raise ValueError(
|
||||||
|
"storage_servers must be StorageServer"
|
||||||
|
)
|
@ -104,86 +104,102 @@ def test_remove_last_client(reactor, request):
|
|||||||
|
|
||||||
|
|
||||||
@pytest_twisted.inlineCallbacks
|
@pytest_twisted.inlineCallbacks
|
||||||
def test_reject_storage_server(reactor, request, storage_nodes, temp_dir, introducer_furl, flog_gatherer):
|
def test_reject_storage_server(reactor, request, temp_dir, flog_gatherer):
|
||||||
"""
|
"""
|
||||||
A client with happines=3 fails to upload to a Grid when it is
|
A client with happines=2 fails to upload to a Grid when it is
|
||||||
using Grid Manager and there are only two storage-servers with
|
using Grid Manager and there is only 1 storage server with a valid
|
||||||
valid certificates.
|
certificate.
|
||||||
"""
|
"""
|
||||||
|
import grid
|
||||||
|
introducer = yield grid.create_introducer(reactor, request, temp_dir, flog_gatherer)
|
||||||
|
storage0 = yield grid.create_storage_server(
|
||||||
|
reactor, request, temp_dir, introducer, flog_gatherer,
|
||||||
|
name="gm_storage0",
|
||||||
|
web_port="tcp:9995:interface=localhost",
|
||||||
|
needed=2,
|
||||||
|
happy=2,
|
||||||
|
total=2,
|
||||||
|
)
|
||||||
|
storage1 = yield grid.create_storage_server(
|
||||||
|
reactor, request, temp_dir, introducer, flog_gatherer,
|
||||||
|
name="gm_storage1",
|
||||||
|
web_port="tcp:9996:interface=localhost",
|
||||||
|
needed=2,
|
||||||
|
happy=2,
|
||||||
|
total=2,
|
||||||
|
)
|
||||||
|
|
||||||
gm_config = yield util.run_tahoe(
|
gm_config = yield util.run_tahoe(
|
||||||
reactor, request, "grid-manager", "--config", "-", "create",
|
reactor, request, "grid-manager", "--config", "-", "create",
|
||||||
)
|
)
|
||||||
gm_privkey_bytes = json.loads(gm_config)['private_key'].encode('ascii')
|
gm_privkey_bytes = json.loads(gm_config)['private_key'].encode('ascii')
|
||||||
gm_privkey, gm_pubkey = ed25519.signing_keypair_from_string(gm_privkey_bytes)
|
gm_privkey, gm_pubkey = ed25519.signing_keypair_from_string(gm_privkey_bytes)
|
||||||
|
|
||||||
# create certificates for first 2 storage-servers
|
# create certificate for the first storage-server
|
||||||
for idx, storage in enumerate(storage_nodes[:2]):
|
pubkey_fname = join(storage0.process.node_dir, "node.pubkey")
|
||||||
pubkey_fname = join(storage._node_dir, "node.pubkey")
|
|
||||||
with open(pubkey_fname, 'r') as f:
|
with open(pubkey_fname, 'r') as f:
|
||||||
pubkey_str = f.read().strip()
|
pubkey_str = f.read().strip()
|
||||||
|
|
||||||
gm_config = yield util.run_tahoe(
|
gm_config = yield util.run_tahoe(
|
||||||
reactor, request, "grid-manager", "--config", "-", "add",
|
reactor, request, "grid-manager", "--config", "-", "add",
|
||||||
"storage{}".format(idx), pubkey_str,
|
"storage0", pubkey_str,
|
||||||
stdin=gm_config,
|
stdin=gm_config,
|
||||||
)
|
)
|
||||||
assert sorted(json.loads(gm_config)['storage_servers'].keys()) == ['storage0', 'storage1']
|
assert json.loads(gm_config)['storage_servers'].keys() == ['storage0']
|
||||||
|
|
||||||
# XXX FIXME need to shut-down and nuke carol when we're done this
|
# XXX FIXME want a grid.create_client() or similar
|
||||||
# test (i.d. request.addfinalizer)
|
diana = yield util._create_node(
|
||||||
carol = yield util._create_node(
|
reactor, request, temp_dir, introducer.furl, flog_gatherer, "diana",
|
||||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "carol",
|
web_port="tcp:9984:interface=localhost",
|
||||||
web_port="tcp:9982:interface=localhost",
|
|
||||||
storage=False,
|
storage=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
print("inserting certificates")
|
print("inserting certificate")
|
||||||
# insert their certificates
|
|
||||||
for idx, storage in enumerate(storage_nodes[:2]):
|
|
||||||
cert = yield util.run_tahoe(
|
cert = yield util.run_tahoe(
|
||||||
reactor, request, "grid-manager", "--config", "-", "sign",
|
reactor, request, "grid-manager", "--config", "-", "sign", "storage0",
|
||||||
"storage{}".format(idx),
|
|
||||||
stdin=gm_config,
|
stdin=gm_config,
|
||||||
)
|
)
|
||||||
with open(join(storage._node_dir, "gridmanager.cert"), "w") as f:
|
with open(join(storage0.process.node_dir, "gridmanager.cert"), "w") as f:
|
||||||
f.write(cert)
|
f.write(cert)
|
||||||
config = configutil.get_config(join(storage._node_dir, "tahoe.cfg"))
|
config = configutil.get_config(join(storage0.process.node_dir, "tahoe.cfg"))
|
||||||
config.set("storage", "grid_management", "True")
|
config.set("storage", "grid_management", "True")
|
||||||
config.add_section("grid_manager_certificates")
|
config.add_section("grid_manager_certificates")
|
||||||
config.set("grid_manager_certificates", "default", "gridmanager.cert")
|
config.set("grid_manager_certificates", "default", "gridmanager.cert")
|
||||||
with open(join(storage._node_dir, "tahoe.cfg"), "w") as f:
|
with open(join(storage0.process.node_dir, "tahoe.cfg"), "w") as f:
|
||||||
config.write(f)
|
config.write(f)
|
||||||
|
|
||||||
# re-start this storage server
|
# re-start this storage server
|
||||||
storage.transport.signalProcess('TERM')
|
storage0.process.transport.signalProcess('TERM')
|
||||||
yield storage.transport._protocol.exited
|
yield storage0.protocol.exited
|
||||||
storage_nodes[idx] = yield util._run_node(
|
yield util._run_node(
|
||||||
reactor, storage._node_dir, request, None,
|
reactor, storage0.process.node_dir, request, None, cleanup=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
# now only two storage-servers have certificates .. configure
|
yield util.await_client_ready(diana, servers=2)
|
||||||
# carol to have the grid-manager certificate
|
|
||||||
|
|
||||||
config = configutil.get_config(join(carol._node_dir, "tahoe.cfg"))
|
# now only one storage-server has the certificate .. configure
|
||||||
|
# diana to have the grid-manager certificate
|
||||||
|
|
||||||
|
config = configutil.get_config(join(diana.node_dir, "tahoe.cfg"))
|
||||||
config.add_section("grid_managers")
|
config.add_section("grid_managers")
|
||||||
config.set("grid_managers", "test", ed25519.string_from_verifying_key(gm_pubkey))
|
config.set("grid_managers", "test", ed25519.string_from_verifying_key(gm_pubkey))
|
||||||
with open(join(carol._node_dir, "tahoe.cfg"), "w") as f:
|
with open(join(diana.node_dir, "tahoe.cfg"), "w") as f:
|
||||||
config.write(f)
|
config.write(f)
|
||||||
carol.transport.signalProcess('TERM')
|
diana.transport.signalProcess('TERM')
|
||||||
yield carol.transport._protocol.exited
|
yield diana.transport._protocol.exited
|
||||||
|
|
||||||
carol = yield util._run_node(
|
diana = yield util._run_node(
|
||||||
reactor, carol._node_dir, request, None,
|
reactor, diana._node_dir, request, None, cleanup=False,
|
||||||
)
|
)
|
||||||
yield util.await_client_ready(carol, servers=5)
|
yield util.await_client_ready(diana, servers=2)
|
||||||
|
|
||||||
# try to put something into the grid, which should fail (because
|
# try to put something into the grid, which should fail (because
|
||||||
# carol has happy=3 but should only find storage0, storage1 to be
|
# diana has happy=2 but should only find storage0 to be acceptable
|
||||||
# acceptable to upload to)
|
# to upload to)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield util.run_tahoe(
|
yield util.run_tahoe(
|
||||||
reactor, request, "--node-directory", carol._node_dir,
|
reactor, request, "--node-directory", diana._node_dir,
|
||||||
"put", "-",
|
"put", "-",
|
||||||
stdin="some content\n" * 200,
|
stdin="some content\n" * 200,
|
||||||
)
|
)
|
||||||
|
@ -96,7 +96,7 @@ def test_helper_status(storage_nodes):
|
|||||||
successfully GET the /helper_status page
|
successfully GET the /helper_status page
|
||||||
"""
|
"""
|
||||||
|
|
||||||
url = util.node_url(storage_nodes[0].node_dir, "helper_status")
|
url = util.node_url(storage_nodes[0].process.node_dir, "helper_status")
|
||||||
resp = requests.get(url)
|
resp = requests.get(url)
|
||||||
assert resp.status_code >= 200 and resp.status_code < 300
|
assert resp.status_code >= 200 and resp.status_code < 300
|
||||||
dom = BeautifulSoup(resp.content, "html5lib")
|
dom = BeautifulSoup(resp.content, "html5lib")
|
||||||
@ -418,7 +418,7 @@ def test_storage_info(storage_nodes):
|
|||||||
storage0 = storage_nodes[0]
|
storage0 = storage_nodes[0]
|
||||||
|
|
||||||
requests.get(
|
requests.get(
|
||||||
util.node_url(storage0.node_dir, u"storage"),
|
util.node_url(storage0.process.node_dir, u"storage"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -429,7 +429,7 @@ def test_storage_info_json(storage_nodes):
|
|||||||
storage0 = storage_nodes[0]
|
storage0 = storage_nodes[0]
|
||||||
|
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(storage0.node_dir, u"storage"),
|
util.node_url(storage0.process.node_dir, u"storage"),
|
||||||
params={u"t": u"json"},
|
params={u"t": u"json"},
|
||||||
)
|
)
|
||||||
data = json.loads(resp.content)
|
data = json.loads(resp.content)
|
||||||
@ -441,12 +441,12 @@ def test_introducer_info(introducer):
|
|||||||
retrieve and confirm /introducer URI for the introducer
|
retrieve and confirm /introducer URI for the introducer
|
||||||
"""
|
"""
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(introducer.node_dir, u""),
|
util.node_url(introducer.process.node_dir, u""),
|
||||||
)
|
)
|
||||||
assert "Introducer" in resp.content
|
assert "Introducer" in resp.content
|
||||||
|
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(introducer.node_dir, u""),
|
util.node_url(introducer.process.node_dir, u""),
|
||||||
params={u"t": u"json"},
|
params={u"t": u"json"},
|
||||||
)
|
)
|
||||||
data = json.loads(resp.content)
|
data = json.loads(resp.content)
|
||||||
|
@ -144,7 +144,7 @@ def _cleanup_tahoe_process(tahoe_transport, exited):
|
|||||||
try:
|
try:
|
||||||
print("signaling {} with TERM".format(tahoe_transport.pid))
|
print("signaling {} with TERM".format(tahoe_transport.pid))
|
||||||
tahoe_transport.signalProcess('TERM')
|
tahoe_transport.signalProcess('TERM')
|
||||||
print("signaled, blocking on exit")
|
print("signaled, blocking on exit {}".format(exited))
|
||||||
pytest_twisted.blockon(exited)
|
pytest_twisted.blockon(exited)
|
||||||
print("exited, goodbye")
|
print("exited, goodbye")
|
||||||
except ProcessExitedAlready:
|
except ProcessExitedAlready:
|
||||||
@ -210,7 +210,7 @@ class TahoeProcess(object):
|
|||||||
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
||||||
|
|
||||||
|
|
||||||
def _run_node(reactor, node_dir, request, magic_text):
|
def _run_node(reactor, node_dir, request, magic_text, cleanup=True):
|
||||||
"""
|
"""
|
||||||
Run a tahoe process from its node_dir.
|
Run a tahoe process from its node_dir.
|
||||||
|
|
||||||
@ -236,6 +236,7 @@ def _run_node(reactor, node_dir, request, magic_text):
|
|||||||
)
|
)
|
||||||
transport.exited = protocol.exited
|
transport.exited = protocol.exited
|
||||||
|
|
||||||
|
if cleanup:
|
||||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
||||||
|
|
||||||
# XXX abusing the Deferred; should use .when_magic_seen() pattern
|
# XXX abusing the Deferred; should use .when_magic_seen() pattern
|
||||||
@ -291,7 +292,7 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
|||||||
def created(_):
|
def created(_):
|
||||||
config_path = join(node_dir, 'tahoe.cfg')
|
config_path = join(node_dir, 'tahoe.cfg')
|
||||||
config = get_config(config_path)
|
config = get_config(config_path)
|
||||||
set_config(config, 'node', 'log_gatherer.furl', flog_gatherer)
|
set_config(config, 'node', 'log_gatherer.furl', flog_gatherer.furl)
|
||||||
write_config(config_path, config)
|
write_config(config_path, config)
|
||||||
created_d.addCallback(created)
|
created_d.addCallback(created)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user