2008-02-05 20:05:13 +00:00
|
|
|
from base64 import b32decode
|
2007-03-27 23:12:11 +00:00
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
import os
|
|
|
|
|
2007-03-27 23:12:11 +00:00
|
|
|
from twisted.trial import unittest
|
2008-02-05 20:05:13 +00:00
|
|
|
from twisted.internet import defer
|
2007-03-27 23:12:11 +00:00
|
|
|
from twisted.python import log
|
|
|
|
|
|
|
|
from foolscap import Tub, Referenceable
|
2007-12-03 21:52:42 +00:00
|
|
|
from foolscap.eventual import fireEventually, flushEventualQueue
|
2007-03-27 23:12:11 +00:00
|
|
|
from twisted.application import service
|
2007-12-03 21:52:42 +00:00
|
|
|
from allmydata.introducer import IntroducerClient, IntroducerService, IntroducerNode
|
2008-04-22 19:54:16 +00:00
|
|
|
from allmydata.util import testutil, idlib
|
2007-03-27 23:12:11 +00:00
|
|
|
|
2007-12-12 01:10:29 +00:00
|
|
|
class FakeNode(Referenceable):
|
2007-03-27 23:12:11 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
class LoggingMultiService(service.MultiService):
|
2007-11-20 01:23:18 +00:00
|
|
|
def log(self, msg, **kw):
|
|
|
|
log.msg(msg, **kw)
|
2007-03-27 23:12:11 +00:00
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
class TestIntroducerNode(testutil.SignalMixin, unittest.TestCase):
|
|
|
|
def test_loadable(self):
|
|
|
|
basedir = "introducer.IntroducerNode.test_loadable"
|
|
|
|
os.mkdir(basedir)
|
|
|
|
q = IntroducerNode(basedir)
|
|
|
|
d = fireEventually(None)
|
|
|
|
d.addCallback(lambda res: q.startService())
|
|
|
|
d.addCallback(lambda res: q.when_tub_ready())
|
|
|
|
d.addCallback(lambda res: q.stopService())
|
|
|
|
d.addCallback(flushEventualQueue)
|
|
|
|
return d
|
|
|
|
|
2007-05-30 00:39:39 +00:00
|
|
|
class TestIntroducer(unittest.TestCase, testutil.PollMixin):
|
2007-03-27 23:12:11 +00:00
|
|
|
def setUp(self):
|
|
|
|
self.parent = LoggingMultiService()
|
|
|
|
self.parent.startService()
|
|
|
|
def tearDown(self):
|
|
|
|
log.msg("TestIntroducer.tearDown")
|
2007-04-04 23:09:13 +00:00
|
|
|
d = defer.succeed(None)
|
2007-03-27 23:12:11 +00:00
|
|
|
d.addCallback(lambda res: self.parent.stopService())
|
2007-03-28 00:16:13 +00:00
|
|
|
d.addCallback(flushEventualQueue)
|
2007-03-27 23:12:11 +00:00
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
def test_create(self):
|
2008-02-05 20:05:13 +00:00
|
|
|
ic = IntroducerClient(None, "introducer.furl", "my_nickname",
|
|
|
|
"my_version", "oldest_version")
|
2007-03-27 23:12:11 +00:00
|
|
|
|
|
|
|
def test_listen(self):
|
2007-12-03 21:52:42 +00:00
|
|
|
i = IntroducerService()
|
2007-03-27 23:12:11 +00:00
|
|
|
i.setServiceParent(self.parent)
|
|
|
|
|
|
|
|
def test_system(self):
|
|
|
|
|
|
|
|
self.central_tub = tub = Tub()
|
|
|
|
#tub.setOption("logLocalFailures", True)
|
|
|
|
#tub.setOption("logRemoteFailures", True)
|
|
|
|
tub.setServiceParent(self.parent)
|
|
|
|
l = tub.listenOn("tcp:0")
|
|
|
|
portnum = l.getPortnum()
|
|
|
|
tub.setLocation("localhost:%d" % portnum)
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
i = IntroducerService()
|
2007-03-27 23:12:11 +00:00
|
|
|
i.setServiceParent(self.parent)
|
2008-02-05 20:05:13 +00:00
|
|
|
introducer_furl = tub.registerReference(i)
|
2007-03-27 23:12:11 +00:00
|
|
|
NUMCLIENTS = 5
|
2008-02-02 02:48:38 +00:00
|
|
|
# we have 5 clients who publish themselves, and an extra one which
|
|
|
|
# does not. When the connections are fully established, all six nodes
|
|
|
|
# should have 5 connections each.
|
2007-03-27 23:12:11 +00:00
|
|
|
|
|
|
|
clients = []
|
|
|
|
tubs = {}
|
2008-02-02 02:48:38 +00:00
|
|
|
for i in range(NUMCLIENTS+1):
|
2007-03-27 23:12:11 +00:00
|
|
|
tub = Tub()
|
|
|
|
#tub.setOption("logLocalFailures", True)
|
|
|
|
#tub.setOption("logRemoteFailures", True)
|
|
|
|
tub.setServiceParent(self.parent)
|
|
|
|
l = tub.listenOn("tcp:0")
|
|
|
|
portnum = l.getPortnum()
|
|
|
|
tub.setLocation("localhost:%d" % portnum)
|
|
|
|
|
2007-12-12 01:10:29 +00:00
|
|
|
n = FakeNode()
|
2008-02-02 02:48:38 +00:00
|
|
|
log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
|
2008-02-05 20:05:13 +00:00
|
|
|
c = IntroducerClient(tub, introducer_furl,
|
|
|
|
"nickname-%d" % i, "version", "oldest")
|
2008-02-02 02:48:38 +00:00
|
|
|
if i < NUMCLIENTS:
|
|
|
|
node_furl = tub.registerReference(n)
|
2008-02-05 20:05:13 +00:00
|
|
|
c.publish(node_furl, "storage", "ri_name")
|
|
|
|
# the last one does not publish anything
|
|
|
|
|
|
|
|
c.subscribe_to("storage")
|
2007-12-11 03:22:59 +00:00
|
|
|
|
2007-03-27 23:12:11 +00:00
|
|
|
c.setServiceParent(self.parent)
|
|
|
|
clients.append(c)
|
|
|
|
tubs[c] = tub
|
|
|
|
|
2008-02-05 20:05:13 +00:00
|
|
|
def _wait_for_all_connections():
|
2007-12-11 03:22:59 +00:00
|
|
|
for c in clients:
|
2008-02-05 20:05:13 +00:00
|
|
|
if len(c.get_all_connections()) < NUMCLIENTS:
|
|
|
|
return False
|
|
|
|
return True
|
2008-02-05 23:37:58 +00:00
|
|
|
d = self.poll(_wait_for_all_connections)
|
2007-03-27 23:12:11 +00:00
|
|
|
|
2007-09-19 18:50:13 +00:00
|
|
|
def _check1(res):
|
|
|
|
log.msg("doing _check1")
|
2007-03-27 23:12:11 +00:00
|
|
|
for c in clients:
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnless(c.connected_to_introducer())
|
|
|
|
self.failUnlessEqual(len(c.get_all_connections()), NUMCLIENTS)
|
|
|
|
self.failUnlessEqual(len(c.get_all_peerids()), NUMCLIENTS)
|
|
|
|
self.failUnlessEqual(len(c.get_all_connections_for("storage")),
|
|
|
|
NUMCLIENTS)
|
2007-09-19 18:50:13 +00:00
|
|
|
d.addCallback(_check1)
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2007-12-11 03:22:59 +00:00
|
|
|
origin_c = clients[0]
|
2007-09-19 18:50:13 +00:00
|
|
|
def _disconnect_somebody_else(res):
|
2007-05-08 02:10:24 +00:00
|
|
|
# now disconnect somebody's connection to someone else
|
2008-02-05 20:05:13 +00:00
|
|
|
current_counter = origin_c.counter
|
|
|
|
victim_nodeid = b32decode(tubs[clients[1]].tubID.upper())
|
2008-04-22 19:54:16 +00:00
|
|
|
log.msg(" disconnecting %s->%s" %
|
|
|
|
(tubs[origin_c].tubID,
|
|
|
|
idlib.shortnodeid_b2a(victim_nodeid)))
|
2008-02-05 20:05:13 +00:00
|
|
|
origin_c.debug_disconnect_from_peerid(victim_nodeid)
|
2007-03-27 23:12:11 +00:00
|
|
|
log.msg(" did disconnect")
|
2008-02-05 20:05:13 +00:00
|
|
|
|
|
|
|
# then wait until something changes, which ought to be them
|
|
|
|
# noticing the loss
|
|
|
|
def _compare():
|
|
|
|
return current_counter != origin_c.counter
|
2008-02-05 23:37:58 +00:00
|
|
|
return self.poll(_compare)
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2007-09-19 18:50:13 +00:00
|
|
|
d.addCallback(_disconnect_somebody_else)
|
2008-02-05 20:05:13 +00:00
|
|
|
|
|
|
|
# and wait for them to reconnect
|
2008-02-05 23:37:58 +00:00
|
|
|
d.addCallback(lambda res: self.poll(_wait_for_all_connections))
|
2007-09-19 18:50:13 +00:00
|
|
|
def _check2(res):
|
|
|
|
log.msg("doing _check2")
|
2007-03-27 23:12:11 +00:00
|
|
|
for c in clients:
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(len(c.get_all_connections()), NUMCLIENTS)
|
2007-09-19 18:50:13 +00:00
|
|
|
d.addCallback(_check2)
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2007-09-19 18:50:13 +00:00
|
|
|
def _disconnect_yourself(res):
|
2007-12-11 03:22:59 +00:00
|
|
|
# now disconnect somebody's connection to themselves.
|
2008-02-05 20:05:13 +00:00
|
|
|
current_counter = origin_c.counter
|
|
|
|
victim_nodeid = b32decode(tubs[clients[0]].tubID.upper())
|
2008-04-22 19:54:16 +00:00
|
|
|
log.msg(" disconnecting %s->%s" %
|
|
|
|
(tubs[origin_c].tubID,
|
|
|
|
idlib.shortnodeid_b2a(victim_nodeid)))
|
2008-02-05 20:05:13 +00:00
|
|
|
origin_c.debug_disconnect_from_peerid(victim_nodeid)
|
2007-12-11 03:22:59 +00:00
|
|
|
log.msg(" did disconnect from self")
|
2008-02-05 20:05:13 +00:00
|
|
|
|
|
|
|
def _compare():
|
|
|
|
return current_counter != origin_c.counter
|
2008-02-05 23:37:58 +00:00
|
|
|
return self.poll(_compare)
|
2007-09-19 18:50:13 +00:00
|
|
|
d.addCallback(_disconnect_yourself)
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2008-02-05 23:37:58 +00:00
|
|
|
d.addCallback(lambda res: self.poll(_wait_for_all_connections))
|
2007-09-19 18:50:13 +00:00
|
|
|
def _check3(res):
|
|
|
|
log.msg("doing _check3")
|
2007-03-27 23:12:11 +00:00
|
|
|
for c in clients:
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(len(c.get_all_connections_for("storage")),
|
|
|
|
NUMCLIENTS)
|
2007-09-19 18:50:13 +00:00
|
|
|
d.addCallback(_check3)
|
|
|
|
def _shutdown_introducer(res):
|
|
|
|
# now shut down the introducer. We do this by shutting down the
|
|
|
|
# tub it's using. Nobody's connections (to each other) should go
|
|
|
|
# down. All clients should notice the loss, and no other errors
|
|
|
|
# should occur.
|
|
|
|
log.msg("shutting down the introducer")
|
|
|
|
return self.central_tub.disownServiceParent()
|
|
|
|
d.addCallback(_shutdown_introducer)
|
2008-02-05 20:05:13 +00:00
|
|
|
def _wait_for_introducer_loss():
|
|
|
|
for c in clients:
|
|
|
|
if c.connected_to_introducer():
|
|
|
|
return False
|
|
|
|
return True
|
2008-02-05 23:37:58 +00:00
|
|
|
d.addCallback(lambda res: self.poll(_wait_for_introducer_loss))
|
2008-02-05 20:05:13 +00:00
|
|
|
|
2007-09-19 18:50:13 +00:00
|
|
|
def _check4(res):
|
|
|
|
log.msg("doing _check4")
|
|
|
|
for c in clients:
|
2008-02-05 20:05:13 +00:00
|
|
|
self.failUnlessEqual(len(c.get_all_connections_for("storage")),
|
|
|
|
NUMCLIENTS)
|
|
|
|
self.failIf(c.connected_to_introducer())
|
2007-09-19 18:50:13 +00:00
|
|
|
d.addCallback(_check4)
|
2007-03-27 23:12:11 +00:00
|
|
|
return d
|
|
|
|
|