mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-29 17:28:53 +00:00
1675 lines
74 KiB
Python
1675 lines
74 KiB
Python
|
|
from base64 import b32encode
|
|
import os, sys, time, re, simplejson
|
|
from cStringIO import StringIO
|
|
from twisted.trial import unittest
|
|
from twisted.internet import defer, reactor
|
|
from twisted.internet import threads # CLI tests use deferToThread
|
|
from twisted.internet.error import ConnectionDone, ConnectionLost
|
|
from twisted.application import service
|
|
import allmydata
|
|
from allmydata import client, uri, download, upload, storage, mutable, offloaded
|
|
from allmydata.introducer import IntroducerNode
|
|
from allmydata.util import deferredutil, fileutil, idlib, mathutil, testutil
|
|
from allmydata.util import log
|
|
from allmydata.scripts import runner
|
|
from allmydata.interfaces import IDirectoryNode, IFileNode, IFileURI
|
|
from allmydata.mutable import NotMutableError
|
|
from allmydata.stats import PickleStatsGatherer
|
|
from allmydata.key_generator import KeyGeneratorService
|
|
from foolscap.eventual import flushEventualQueue, fireEventually
|
|
from foolscap import DeadReferenceError, Tub
|
|
from twisted.python.failure import Failure
|
|
from twisted.web.client import getPage
|
|
from twisted.web.error import Error
|
|
|
|
def flush_but_dont_ignore(res):
|
|
d = flushEventualQueue()
|
|
def _done(ignored):
|
|
return res
|
|
d.addCallback(_done)
|
|
return d
|
|
|
|
LARGE_DATA = """
|
|
This is some data to publish to the virtual drive, which needs to be large
|
|
enough to not fit inside a LIT uri.
|
|
"""
|
|
|
|
class CountingDataUploadable(upload.Data):
|
|
bytes_read = 0
|
|
interrupt_after = None
|
|
interrupt_after_d = None
|
|
|
|
def read(self, length):
|
|
self.bytes_read += length
|
|
if self.interrupt_after is not None:
|
|
if self.bytes_read > self.interrupt_after:
|
|
self.interrupt_after = None
|
|
self.interrupt_after_d.callback(self)
|
|
return upload.Data.read(self, length)
|
|
|
|
|
|
class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
self.sparent = service.MultiService()
|
|
self.sparent.startService()
|
|
def tearDown(self):
|
|
log.msg("shutting down SystemTest services")
|
|
d = self.sparent.stopService()
|
|
d.addBoth(flush_but_dont_ignore)
|
|
return d
|
|
|
|
def getdir(self, subdir):
|
|
return os.path.join(self.basedir, subdir)
|
|
|
|
def add_service(self, s):
|
|
s.setServiceParent(self.sparent)
|
|
return s
|
|
|
|
def set_up_nodes(self, NUMCLIENTS=5, createprivdir=False):
|
|
self.numclients = NUMCLIENTS
|
|
self.createprivdir = createprivdir
|
|
iv_dir = self.getdir("introducer")
|
|
if not os.path.isdir(iv_dir):
|
|
fileutil.make_dirs(iv_dir)
|
|
f = open(os.path.join(iv_dir, "webport"), "w")
|
|
f.write("tcp:0:interface=127.0.0.1\n")
|
|
f.close()
|
|
iv = IntroducerNode(basedir=iv_dir)
|
|
self.introducer = self.add_service(iv)
|
|
d = self.introducer.when_tub_ready()
|
|
d.addCallback(self._get_introducer_web)
|
|
d.addCallback(self._set_up_stats_gatherer)
|
|
d.addCallback(self._set_up_key_generator)
|
|
d.addCallback(self._set_up_nodes_2)
|
|
d.addCallback(self._grab_stats)
|
|
return d
|
|
|
|
def _get_introducer_web(self, res):
|
|
f = open(os.path.join(self.getdir("introducer"), "node.url"), "r")
|
|
self.introweb_url = f.read().strip()
|
|
f.close()
|
|
|
|
def _set_up_stats_gatherer(self, res):
|
|
statsdir = self.getdir("stats_gatherer")
|
|
fileutil.make_dirs(statsdir)
|
|
t = Tub()
|
|
self.add_service(t)
|
|
l = t.listenOn("tcp:0")
|
|
p = l.getPortnum()
|
|
t.setLocation("localhost:%d" % p)
|
|
|
|
self.stats_gatherer = PickleStatsGatherer(t, statsdir, False)
|
|
self.add_service(self.stats_gatherer)
|
|
self.stats_gatherer_furl = self.stats_gatherer.get_furl()
|
|
|
|
def _set_up_key_generator(self, res):
|
|
kgsdir = self.getdir("key_generator")
|
|
fileutil.make_dirs(kgsdir)
|
|
|
|
self.key_generator_svc = KeyGeneratorService(kgsdir, display_furl=False)
|
|
self.key_generator_svc.key_generator.pool_size = 4
|
|
self.key_generator_svc.key_generator.pool_refresh_delay = 60
|
|
self.add_service(self.key_generator_svc)
|
|
|
|
d = fireEventually()
|
|
def check_for_furl():
|
|
return os.path.exists(os.path.join(kgsdir, 'key_generator.furl'))
|
|
d.addCallback(lambda junk: self.poll(check_for_furl, timeout=30))
|
|
def get_furl(junk):
|
|
kgf = os.path.join(kgsdir, 'key_generator.furl')
|
|
self.key_generator_furl = file(kgf, 'rb').read().strip()
|
|
d.addCallback(get_furl)
|
|
return d
|
|
|
|
def _set_up_nodes_2(self, res):
|
|
q = self.introducer
|
|
self.introducer_furl = q.introducer_url
|
|
self.clients = []
|
|
basedirs = []
|
|
for i in range(self.numclients):
|
|
basedir = self.getdir("client%d" % i)
|
|
basedirs.append(basedir)
|
|
fileutil.make_dirs(basedir)
|
|
if i == 0:
|
|
# client[0] runs a webserver and a helper, no key_generator
|
|
open(os.path.join(basedir, "webport"), "w").write("tcp:0:interface=127.0.0.1")
|
|
open(os.path.join(basedir, "run_helper"), "w").write("yes\n")
|
|
if i == 3:
|
|
# client[3] runs a webserver and uses a helper, uses key_generator
|
|
open(os.path.join(basedir, "webport"), "w").write("tcp:0:interface=127.0.0.1")
|
|
kgf = "%s\n" % (self.key_generator_furl,)
|
|
open(os.path.join(basedir, "key_generator.furl"), "w").write(kgf)
|
|
if self.createprivdir:
|
|
fileutil.make_dirs(os.path.join(basedir, "private"))
|
|
open(os.path.join(basedir, "private", "root_dir.cap"), "w")
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write(self.introducer_furl)
|
|
open(os.path.join(basedir, "stats_gatherer.furl"), "w").write(self.stats_gatherer_furl)
|
|
|
|
# start client[0], wait for it's tub to be ready (at which point it
|
|
# will have registered the helper furl).
|
|
c = self.add_service(client.Client(basedir=basedirs[0]))
|
|
self.clients.append(c)
|
|
d = c.when_tub_ready()
|
|
def _ready(res):
|
|
f = open(os.path.join(basedirs[0],"private","helper.furl"), "r")
|
|
helper_furl = f.read()
|
|
f.close()
|
|
self.helper_furl = helper_furl
|
|
f = open(os.path.join(basedirs[3],"helper.furl"), "w")
|
|
f.write(helper_furl)
|
|
f.close()
|
|
|
|
# this starts the rest of the clients
|
|
for i in range(1, self.numclients):
|
|
c = self.add_service(client.Client(basedir=basedirs[i]))
|
|
self.clients.append(c)
|
|
log.msg("STARTING")
|
|
return self.wait_for_connections()
|
|
d.addCallback(_ready)
|
|
def _connected(res):
|
|
log.msg("CONNECTED")
|
|
# now find out where the web port was
|
|
l = self.clients[0].getServiceNamed("webish").listener
|
|
port = l._port.getHost().port
|
|
self.webish_url = "http://localhost:%d/" % port
|
|
# and the helper-using webport
|
|
l = self.clients[3].getServiceNamed("webish").listener
|
|
port = l._port.getHost().port
|
|
self.helper_webish_url = "http://localhost:%d/" % port
|
|
d.addCallback(_connected)
|
|
return d
|
|
|
|
def _grab_stats(self, res):
|
|
d = self.stats_gatherer.poll()
|
|
return d
|
|
|
|
def bounce_client(self, num):
|
|
c = self.clients[num]
|
|
d = c.disownServiceParent()
|
|
# I think windows requires a moment to let the connection really stop
|
|
# and the port number made available for re-use. TODO: examine the
|
|
# behavior, see if this is really the problem, see if we can do
|
|
# better than blindly waiting for a second.
|
|
d.addCallback(self.stall, 1.0)
|
|
def _stopped(res):
|
|
new_c = client.Client(basedir=self.getdir("client%d" % num))
|
|
self.clients[num] = new_c
|
|
self.add_service(new_c)
|
|
return new_c.when_tub_ready()
|
|
d.addCallback(_stopped)
|
|
d.addCallback(lambda res: self.wait_for_connections())
|
|
def _maybe_get_webport(res):
|
|
if num == 0:
|
|
# now find out where the web port was
|
|
l = self.clients[0].getServiceNamed("webish").listener
|
|
port = l._port.getHost().port
|
|
self.webish_url = "http://localhost:%d/" % port
|
|
d.addCallback(_maybe_get_webport)
|
|
return d
|
|
|
|
def add_extra_node(self, client_num, helper_furl=None,
|
|
add_to_sparent=False):
|
|
# usually this node is *not* parented to our self.sparent, so we can
|
|
# shut it down separately from the rest, to exercise the
|
|
# connection-lost code
|
|
basedir = self.getdir("client%d" % client_num)
|
|
if not os.path.isdir(basedir):
|
|
fileutil.make_dirs(basedir)
|
|
open(os.path.join(basedir, "introducer.furl"), "w").write(self.introducer_furl)
|
|
if helper_furl:
|
|
f = open(os.path.join(basedir, "helper.furl") ,"w")
|
|
f.write(helper_furl+"\n")
|
|
f.close()
|
|
|
|
c = client.Client(basedir=basedir)
|
|
self.clients.append(c)
|
|
self.numclients += 1
|
|
if add_to_sparent:
|
|
c.setServiceParent(self.sparent)
|
|
else:
|
|
c.startService()
|
|
d = self.wait_for_connections()
|
|
d.addCallback(lambda res: c)
|
|
return d
|
|
|
|
def _check_connections(self):
|
|
for c in self.clients:
|
|
ic = c.introducer_client
|
|
if not ic.connected_to_introducer():
|
|
return False
|
|
if len(ic.get_all_peerids()) != self.numclients:
|
|
return False
|
|
return True
|
|
|
|
def wait_for_connections(self, ignored=None):
|
|
# TODO: replace this with something that takes a list of peerids and
|
|
# fires when they've all been heard from, instead of using a count
|
|
# and a threshold
|
|
return self.poll(self._check_connections, timeout=200)
|
|
|
|
def test_connections(self):
|
|
self.basedir = "system/SystemTest/test_connections"
|
|
d = self.set_up_nodes()
|
|
self.extra_node = None
|
|
d.addCallback(lambda res: self.add_extra_node(self.numclients))
|
|
def _check(extra_node):
|
|
self.extra_node = extra_node
|
|
for c in self.clients:
|
|
all_peerids = list(c.get_all_peerids())
|
|
self.failUnlessEqual(len(all_peerids), self.numclients+1)
|
|
permuted_peers = list(c.get_permuted_peers("storage", "a"))
|
|
self.failUnlessEqual(len(permuted_peers), self.numclients+1)
|
|
|
|
d.addCallback(_check)
|
|
def _shutdown_extra_node(res):
|
|
if self.extra_node:
|
|
return self.extra_node.stopService()
|
|
return res
|
|
d.addBoth(_shutdown_extra_node)
|
|
return d
|
|
test_connections.timeout = 300
|
|
# test_connections is subsumed by test_upload_and_download, and takes
|
|
# quite a while to run on a slow machine (because of all the TLS
|
|
# connections that must be established). If we ever rework the introducer
|
|
# code to such an extent that we're not sure if it works anymore, we can
|
|
# reinstate this test until it does.
|
|
del test_connections
|
|
|
|
def test_upload_and_download_random_key(self):
|
|
self.basedir = "system/SystemTest/test_upload_and_download_random_key"
|
|
return self._test_upload_and_download(convergence=None)
|
|
test_upload_and_download_random_key.timeout = 4800
|
|
|
|
def test_upload_and_download_convergent(self):
|
|
self.basedir = "system/SystemTest/test_upload_and_download_convergent"
|
|
return self._test_upload_and_download(convergence="some convergence string")
|
|
test_upload_and_download_convergent.timeout = 4800
|
|
|
|
def _test_upload_and_download(self, convergence):
|
|
# we use 4000 bytes of data, which will result in about 400k written
|
|
# to disk among all our simulated nodes
|
|
DATA = "Some data to upload\n" * 200
|
|
d = self.set_up_nodes()
|
|
def _check_connections(res):
|
|
for c in self.clients:
|
|
all_peerids = list(c.get_all_peerids())
|
|
self.failUnlessEqual(len(all_peerids), self.numclients)
|
|
permuted_peers = list(c.get_permuted_peers("storage", "a"))
|
|
self.failUnlessEqual(len(permuted_peers), self.numclients)
|
|
d.addCallback(_check_connections)
|
|
|
|
def _do_upload(res):
|
|
log.msg("UPLOADING")
|
|
u = self.clients[0].getServiceNamed("uploader")
|
|
self.uploader = u
|
|
# we crank the max segsize down to 1024b for the duration of this
|
|
# test, so we can exercise multiple segments. It is important
|
|
# that this is not a multiple of the segment size, so that the
|
|
# tail segment is not the same length as the others. This actualy
|
|
# gets rounded up to 1025 to be a multiple of the number of
|
|
# required shares (since we use 25 out of 100 FEC).
|
|
up = upload.Data(DATA, convergence=convergence)
|
|
up.max_segment_size = 1024
|
|
d1 = u.upload(up)
|
|
return d1
|
|
d.addCallback(_do_upload)
|
|
def _upload_done(results):
|
|
uri = results.uri
|
|
log.msg("upload finished: uri is %s" % (uri,))
|
|
self.uri = uri
|
|
dl = self.clients[1].getServiceNamed("downloader")
|
|
self.downloader = dl
|
|
d.addCallback(_upload_done)
|
|
|
|
def _upload_again(res):
|
|
# Upload again. If using convergent encryption then this ought to be
|
|
# short-circuited, however with the way we currently generate URIs
|
|
# (i.e. because they include the roothash), we have to do all of the
|
|
# encoding work, and only get to save on the upload part.
|
|
log.msg("UPLOADING AGAIN")
|
|
up = upload.Data(DATA, convergence=convergence)
|
|
up.max_segment_size = 1024
|
|
d1 = self.uploader.upload(up)
|
|
d.addCallback(_upload_again)
|
|
|
|
def _download_to_data(res):
|
|
log.msg("DOWNLOADING")
|
|
return self.downloader.download_to_data(self.uri)
|
|
d.addCallback(_download_to_data)
|
|
def _download_to_data_done(data):
|
|
log.msg("download finished")
|
|
self.failUnlessEqual(data, DATA)
|
|
d.addCallback(_download_to_data_done)
|
|
|
|
target_filename = os.path.join(self.basedir, "download.target")
|
|
def _download_to_filename(res):
|
|
return self.downloader.download_to_filename(self.uri,
|
|
target_filename)
|
|
d.addCallback(_download_to_filename)
|
|
def _download_to_filename_done(res):
|
|
newdata = open(target_filename, "rb").read()
|
|
self.failUnlessEqual(newdata, DATA)
|
|
d.addCallback(_download_to_filename_done)
|
|
|
|
target_filename2 = os.path.join(self.basedir, "download.target2")
|
|
def _download_to_filehandle(res):
|
|
fh = open(target_filename2, "wb")
|
|
return self.downloader.download_to_filehandle(self.uri, fh)
|
|
d.addCallback(_download_to_filehandle)
|
|
def _download_to_filehandle_done(fh):
|
|
fh.close()
|
|
newdata = open(target_filename2, "rb").read()
|
|
self.failUnlessEqual(newdata, DATA)
|
|
d.addCallback(_download_to_filehandle_done)
|
|
|
|
def _download_nonexistent_uri(res):
|
|
baduri = self.mangle_uri(self.uri)
|
|
log.msg("about to download non-existent URI", level=log.UNUSUAL,
|
|
facility="tahoe.tests")
|
|
d1 = self.downloader.download_to_data(baduri)
|
|
def _baduri_should_fail(res):
|
|
log.msg("finished downloading non-existend URI",
|
|
level=log.UNUSUAL, facility="tahoe.tests")
|
|
self.failUnless(isinstance(res, Failure))
|
|
self.failUnless(res.check(download.NotEnoughPeersError),
|
|
"expected NotEnoughPeersError, got %s" % res)
|
|
# TODO: files that have zero peers should get a special kind
|
|
# of NotEnoughPeersError, which can be used to suggest that
|
|
# the URI might be wrong or that they've never uploaded the
|
|
# file in the first place.
|
|
d1.addBoth(_baduri_should_fail)
|
|
return d1
|
|
d.addCallback(_download_nonexistent_uri)
|
|
|
|
# add a new node, which doesn't accept shares, and only uses the
|
|
# helper for upload.
|
|
d.addCallback(lambda res: self.add_extra_node(self.numclients,
|
|
self.helper_furl,
|
|
add_to_sparent=True))
|
|
def _added(extra_node):
|
|
self.extra_node = extra_node
|
|
extra_node.getServiceNamed("storage").sizelimit = 0
|
|
d.addCallback(_added)
|
|
|
|
HELPER_DATA = "Data that needs help to upload" * 1000
|
|
def _upload_with_helper(res):
|
|
u = upload.Data(HELPER_DATA, convergence=convergence)
|
|
d = self.extra_node.upload(u)
|
|
def _uploaded(results):
|
|
uri = results.uri
|
|
return self.downloader.download_to_data(uri)
|
|
d.addCallback(_uploaded)
|
|
def _check(newdata):
|
|
self.failUnlessEqual(newdata, HELPER_DATA)
|
|
d.addCallback(_check)
|
|
return d
|
|
d.addCallback(_upload_with_helper)
|
|
|
|
def _upload_duplicate_with_helper(res):
|
|
u = upload.Data(HELPER_DATA, convergence=convergence)
|
|
u.debug_stash_RemoteEncryptedUploadable = True
|
|
d = self.extra_node.upload(u)
|
|
def _uploaded(results):
|
|
uri = results.uri
|
|
return self.downloader.download_to_data(uri)
|
|
d.addCallback(_uploaded)
|
|
def _check(newdata):
|
|
self.failUnlessEqual(newdata, HELPER_DATA)
|
|
self.failIf(hasattr(u, "debug_RemoteEncryptedUploadable"),
|
|
"uploadable started uploading, should have been avoided")
|
|
d.addCallback(_check)
|
|
return d
|
|
if convergence is not None:
|
|
d.addCallback(_upload_duplicate_with_helper)
|
|
|
|
def _upload_resumable(res):
|
|
DATA = "Data that needs help to upload and gets interrupted" * 1000
|
|
u1 = CountingDataUploadable(DATA, convergence=convergence)
|
|
u2 = CountingDataUploadable(DATA, convergence=convergence)
|
|
|
|
# we interrupt the connection after about 5kB by shutting down
|
|
# the helper, then restartingit.
|
|
u1.interrupt_after = 5000
|
|
u1.interrupt_after_d = defer.Deferred()
|
|
u1.interrupt_after_d.addCallback(lambda res:
|
|
self.bounce_client(0))
|
|
|
|
# sneak into the helper and reduce its chunk size, so that our
|
|
# debug_interrupt will sever the connection on about the fifth
|
|
# chunk fetched. This makes sure that we've started to write the
|
|
# new shares before we abandon them, which exercises the
|
|
# abort/delete-partial-share code. TODO: find a cleaner way to do
|
|
# this. I know that this will affect later uses of the helper in
|
|
# this same test run, but I'm not currently worried about it.
|
|
offloaded.CHKCiphertextFetcher.CHUNK_SIZE = 1000
|
|
|
|
d = self.extra_node.upload(u1)
|
|
|
|
def _should_not_finish(res):
|
|
self.fail("interrupted upload should have failed, not finished"
|
|
" with result %s" % (res,))
|
|
def _interrupted(f):
|
|
f.trap(ConnectionLost, ConnectionDone, DeadReferenceError)
|
|
|
|
# make sure we actually interrupted it before finishing the
|
|
# file
|
|
self.failUnless(u1.bytes_read < len(DATA),
|
|
"read %d out of %d total" % (u1.bytes_read,
|
|
len(DATA)))
|
|
|
|
log.msg("waiting for reconnect", level=log.NOISY,
|
|
facility="tahoe.test.test_system")
|
|
# now, we need to give the nodes a chance to notice that this
|
|
# connection has gone away. When this happens, the storage
|
|
# servers will be told to abort their uploads, removing the
|
|
# partial shares. Unfortunately this involves TCP messages
|
|
# going through the loopback interface, and we can't easily
|
|
# predict how long that will take. If it were all local, we
|
|
# could use fireEventually() to stall. Since we don't have
|
|
# the right introduction hooks, the best we can do is use a
|
|
# fixed delay. TODO: this is fragile.
|
|
u1.interrupt_after_d.addCallback(self.stall, 2.0)
|
|
return u1.interrupt_after_d
|
|
d.addCallbacks(_should_not_finish, _interrupted)
|
|
|
|
def _disconnected(res):
|
|
# check to make sure the storage servers aren't still hanging
|
|
# on to the partial share: their incoming/ directories should
|
|
# now be empty.
|
|
log.msg("disconnected", level=log.NOISY,
|
|
facility="tahoe.test.test_system")
|
|
for i in range(self.numclients):
|
|
incdir = os.path.join(self.getdir("client%d" % i),
|
|
"storage", "shares", "incoming")
|
|
self.failIf(os.path.exists(incdir) and os.listdir(incdir))
|
|
d.addCallback(_disconnected)
|
|
|
|
# then we need to give the reconnector a chance to
|
|
# reestablish the connection to the helper.
|
|
d.addCallback(lambda res:
|
|
log.msg("wait_for_connections", level=log.NOISY,
|
|
facility="tahoe.test.test_system"))
|
|
d.addCallback(lambda res: self.wait_for_connections())
|
|
|
|
|
|
d.addCallback(lambda res:
|
|
log.msg("uploading again", level=log.NOISY,
|
|
facility="tahoe.test.test_system"))
|
|
d.addCallback(lambda res: self.extra_node.upload(u2))
|
|
|
|
def _uploaded(results):
|
|
uri = results.uri
|
|
log.msg("Second upload complete", level=log.NOISY,
|
|
facility="tahoe.test.test_system")
|
|
|
|
# this is really bytes received rather than sent, but it's
|
|
# convenient and basically measures the same thing
|
|
bytes_sent = results.ciphertext_fetched
|
|
|
|
# We currently don't support resumption of upload if the data is
|
|
# encrypted with a random key. (Because that would require us
|
|
# to store the key locally and re-use it on the next upload of
|
|
# this file, which isn't a bad thing to do, but we currently
|
|
# don't do it.)
|
|
if convergence is not None:
|
|
# Make sure we did not have to read the whole file the
|
|
# second time around .
|
|
self.failUnless(bytes_sent < len(DATA),
|
|
"resumption didn't save us any work:"
|
|
" read %d bytes out of %d total" %
|
|
(bytes_sent, len(DATA)))
|
|
else:
|
|
# Make sure we did have to read the whole file the second
|
|
# time around -- because the one that we partially uploaded
|
|
# earlier was encrypted with a different random key.
|
|
self.failIf(bytes_sent < len(DATA),
|
|
"resumption saved us some work even though we were using random keys:"
|
|
" read %d bytes out of %d total" %
|
|
(bytes_sent, len(DATA)))
|
|
return self.downloader.download_to_data(uri)
|
|
d.addCallback(_uploaded)
|
|
|
|
def _check(newdata):
|
|
self.failUnlessEqual(newdata, DATA)
|
|
# If using convergent encryption, then also check that the
|
|
# helper has removed the temp file from its directories.
|
|
if convergence is not None:
|
|
basedir = os.path.join(self.getdir("client0"), "helper")
|
|
files = os.listdir(os.path.join(basedir, "CHK_encoding"))
|
|
self.failUnlessEqual(files, [])
|
|
files = os.listdir(os.path.join(basedir, "CHK_incoming"))
|
|
self.failUnlessEqual(files, [])
|
|
d.addCallback(_check)
|
|
return d
|
|
d.addCallback(_upload_resumable)
|
|
|
|
return d
|
|
|
|
def _find_shares(self, basedir):
|
|
shares = []
|
|
for (dirpath, dirnames, filenames) in os.walk(basedir):
|
|
if "storage" not in dirpath:
|
|
continue
|
|
if not filenames:
|
|
continue
|
|
pieces = dirpath.split(os.sep)
|
|
if pieces[-4] == "storage" and pieces[-3] == "shares":
|
|
# we're sitting in .../storage/shares/$START/$SINDEX , and there
|
|
# are sharefiles here
|
|
assert pieces[-5].startswith("client")
|
|
client_num = int(pieces[-5][-1])
|
|
storage_index_s = pieces[-1]
|
|
storage_index = storage.si_a2b(storage_index_s)
|
|
for sharename in filenames:
|
|
shnum = int(sharename)
|
|
filename = os.path.join(dirpath, sharename)
|
|
data = (client_num, storage_index, filename, shnum)
|
|
shares.append(data)
|
|
if not shares:
|
|
self.fail("unable to find any share files in %s" % basedir)
|
|
return shares
|
|
|
|
def _corrupt_mutable_share(self, filename, which):
|
|
msf = storage.MutableShareFile(filename)
|
|
datav = msf.readv([ (0, 1000000) ])
|
|
final_share = datav[0]
|
|
assert len(final_share) < 1000000 # ought to be truncated
|
|
pieces = mutable.unpack_share(final_share)
|
|
(seqnum, root_hash, IV, k, N, segsize, datalen,
|
|
verification_key, signature, share_hash_chain, block_hash_tree,
|
|
share_data, enc_privkey) = pieces
|
|
|
|
if which == "seqnum":
|
|
seqnum = seqnum + 15
|
|
elif which == "R":
|
|
root_hash = self.flip_bit(root_hash)
|
|
elif which == "IV":
|
|
IV = self.flip_bit(IV)
|
|
elif which == "segsize":
|
|
segsize = segsize + 15
|
|
elif which == "pubkey":
|
|
verification_key = self.flip_bit(verification_key)
|
|
elif which == "signature":
|
|
signature = self.flip_bit(signature)
|
|
elif which == "share_hash_chain":
|
|
nodenum = share_hash_chain.keys()[0]
|
|
share_hash_chain[nodenum] = self.flip_bit(share_hash_chain[nodenum])
|
|
elif which == "block_hash_tree":
|
|
block_hash_tree[-1] = self.flip_bit(block_hash_tree[-1])
|
|
elif which == "share_data":
|
|
share_data = self.flip_bit(share_data)
|
|
elif which == "encprivkey":
|
|
enc_privkey = self.flip_bit(enc_privkey)
|
|
|
|
prefix = mutable.pack_prefix(seqnum, root_hash, IV, k, N,
|
|
segsize, datalen)
|
|
final_share = mutable.pack_share(prefix,
|
|
verification_key,
|
|
signature,
|
|
share_hash_chain,
|
|
block_hash_tree,
|
|
share_data,
|
|
enc_privkey)
|
|
msf.writev( [(0, final_share)], None)
|
|
|
|
def test_mutable(self):
|
|
self.basedir = "system/SystemTest/test_mutable"
|
|
DATA = "initial contents go here." # 25 bytes % 3 != 0
|
|
NEWDATA = "new contents yay"
|
|
NEWERDATA = "this is getting old"
|
|
|
|
d = self.set_up_nodes()
|
|
|
|
def _create_mutable(res):
|
|
c = self.clients[0]
|
|
log.msg("starting create_mutable_file")
|
|
d1 = c.create_mutable_file(DATA)
|
|
def _done(res):
|
|
log.msg("DONE: %s" % (res,))
|
|
self._mutable_node_1 = res
|
|
uri = res.get_uri()
|
|
d1.addCallback(_done)
|
|
return d1
|
|
d.addCallback(_create_mutable)
|
|
|
|
def _test_debug(res):
|
|
# find a share. It is important to run this while there is only
|
|
# one slot in the grid.
|
|
shares = self._find_shares(self.basedir)
|
|
(client_num, storage_index, filename, shnum) = shares[0]
|
|
log.msg("test_system.SystemTest.test_mutable._test_debug using %s"
|
|
% filename)
|
|
log.msg(" for clients[%d]" % client_num)
|
|
|
|
out,err = StringIO(), StringIO()
|
|
rc = runner.runner(["dump-share",
|
|
filename],
|
|
stdout=out, stderr=err)
|
|
output = out.getvalue()
|
|
self.failUnlessEqual(rc, 0)
|
|
try:
|
|
self.failUnless("Mutable slot found:\n" in output)
|
|
self.failUnless("share_type: SDMF\n" in output)
|
|
peerid = idlib.nodeid_b2a(self.clients[client_num].nodeid)
|
|
self.failUnless(" WE for nodeid: %s\n" % peerid in output)
|
|
self.failUnless(" num_extra_leases: 0\n" in output)
|
|
# the pubkey size can vary by a byte, so the container might
|
|
# be a bit larger on some runs.
|
|
m = re.search(r'^ container_size: (\d+)$', output, re.M)
|
|
self.failUnless(m)
|
|
container_size = int(m.group(1))
|
|
self.failUnless(2037 <= container_size <= 2049, container_size)
|
|
m = re.search(r'^ data_length: (\d+)$', output, re.M)
|
|
self.failUnless(m)
|
|
data_length = int(m.group(1))
|
|
self.failUnless(2037 <= data_length <= 2049, data_length)
|
|
self.failUnless(" secrets are for nodeid: %s\n" % peerid
|
|
in output)
|
|
self.failUnless(" SDMF contents:\n" in output)
|
|
self.failUnless(" seqnum: 1\n" in output)
|
|
self.failUnless(" required_shares: 3\n" in output)
|
|
self.failUnless(" total_shares: 10\n" in output)
|
|
self.failUnless(" segsize: 27\n" in output, (output, filename))
|
|
self.failUnless(" datalen: 25\n" in output)
|
|
# the exact share_hash_chain nodes depends upon the sharenum,
|
|
# and is more of a hassle to compute than I want to deal with
|
|
# now
|
|
self.failUnless(" share_hash_chain: " in output)
|
|
self.failUnless(" block_hash_tree: 1 nodes\n" in output)
|
|
except unittest.FailTest:
|
|
print
|
|
print "dump-share output was:"
|
|
print output
|
|
raise
|
|
d.addCallback(_test_debug)
|
|
|
|
# test retrieval
|
|
|
|
# first, let's see if we can use the existing node to retrieve the
|
|
# contents. This allows it to use the cached pubkey and maybe the
|
|
# latest-known sharemap.
|
|
|
|
d.addCallback(lambda res: self._mutable_node_1.download_to_data())
|
|
def _check_download_1(res):
|
|
self.failUnlessEqual(res, DATA)
|
|
# now we see if we can retrieve the data from a new node,
|
|
# constructed using the URI of the original one. We do this test
|
|
# on the same client that uploaded the data.
|
|
uri = self._mutable_node_1.get_uri()
|
|
log.msg("starting retrieve1")
|
|
newnode = self.clients[0].create_node_from_uri(uri)
|
|
return newnode.download_to_data()
|
|
d.addCallback(_check_download_1)
|
|
|
|
def _check_download_2(res):
|
|
self.failUnlessEqual(res, DATA)
|
|
# same thing, but with a different client
|
|
uri = self._mutable_node_1.get_uri()
|
|
newnode = self.clients[1].create_node_from_uri(uri)
|
|
log.msg("starting retrieve2")
|
|
d1 = newnode.download_to_data()
|
|
d1.addCallback(lambda res: (res, newnode))
|
|
return d1
|
|
d.addCallback(_check_download_2)
|
|
|
|
def _check_download_3((res, newnode)):
|
|
self.failUnlessEqual(res, DATA)
|
|
# replace the data
|
|
log.msg("starting replace1")
|
|
d1 = newnode.update(NEWDATA)
|
|
d1.addCallback(lambda res: newnode.download_to_data())
|
|
return d1
|
|
d.addCallback(_check_download_3)
|
|
|
|
def _check_download_4(res):
|
|
self.failUnlessEqual(res, NEWDATA)
|
|
# now create an even newer node and replace the data on it. This
|
|
# new node has never been used for download before.
|
|
uri = self._mutable_node_1.get_uri()
|
|
newnode1 = self.clients[2].create_node_from_uri(uri)
|
|
newnode2 = self.clients[3].create_node_from_uri(uri)
|
|
self._newnode3 = self.clients[3].create_node_from_uri(uri)
|
|
log.msg("starting replace2")
|
|
d1 = newnode1.overwrite(NEWERDATA)
|
|
d1.addCallback(lambda res: newnode2.download_to_data())
|
|
return d1
|
|
d.addCallback(_check_download_4)
|
|
|
|
def _check_download_5(res):
|
|
log.msg("finished replace2")
|
|
self.failUnlessEqual(res, NEWERDATA)
|
|
d.addCallback(_check_download_5)
|
|
|
|
def _corrupt_shares(res):
|
|
# run around and flip bits in all but k of the shares, to test
|
|
# the hash checks
|
|
shares = self._find_shares(self.basedir)
|
|
## sort by share number
|
|
#shares.sort( lambda a,b: cmp(a[3], b[3]) )
|
|
where = dict([ (shnum, filename)
|
|
for (client_num, storage_index, filename, shnum)
|
|
in shares ])
|
|
assert len(where) == 10 # this test is designed for 3-of-10
|
|
for shnum, filename in where.items():
|
|
# shares 7,8,9 are left alone. read will check
|
|
# (share_hash_chain, block_hash_tree, share_data). New
|
|
# seqnum+R pairs will trigger a check of (seqnum, R, IV,
|
|
# segsize, signature).
|
|
if shnum == 0:
|
|
# read: this will trigger "pubkey doesn't match
|
|
# fingerprint".
|
|
self._corrupt_mutable_share(filename, "pubkey")
|
|
self._corrupt_mutable_share(filename, "encprivkey")
|
|
elif shnum == 1:
|
|
# triggers "signature is invalid"
|
|
self._corrupt_mutable_share(filename, "seqnum")
|
|
elif shnum == 2:
|
|
# triggers "signature is invalid"
|
|
self._corrupt_mutable_share(filename, "R")
|
|
elif shnum == 3:
|
|
# triggers "signature is invalid"
|
|
self._corrupt_mutable_share(filename, "segsize")
|
|
elif shnum == 4:
|
|
self._corrupt_mutable_share(filename, "share_hash_chain")
|
|
elif shnum == 5:
|
|
self._corrupt_mutable_share(filename, "block_hash_tree")
|
|
elif shnum == 6:
|
|
self._corrupt_mutable_share(filename, "share_data")
|
|
# other things to correct: IV, signature
|
|
# 7,8,9 are left alone
|
|
|
|
# note that initial_query_count=5 means that we'll hit the
|
|
# first 5 servers in effectively random order (based upon
|
|
# response time), so we won't necessarily ever get a "pubkey
|
|
# doesn't match fingerprint" error (if we hit shnum>=1 before
|
|
# shnum=0, we pull the pubkey from there). To get repeatable
|
|
# specific failures, we need to set initial_query_count=1,
|
|
# but of course that will change the sequencing behavior of
|
|
# the retrieval process. TODO: find a reasonable way to make
|
|
# this a parameter, probably when we expand this test to test
|
|
# for one failure mode at a time.
|
|
|
|
# when we retrieve this, we should get three signature
|
|
# failures (where we've mangled seqnum, R, and segsize). The
|
|
# pubkey mangling
|
|
d.addCallback(_corrupt_shares)
|
|
|
|
d.addCallback(lambda res: self._newnode3.download_to_data())
|
|
d.addCallback(_check_download_5)
|
|
|
|
def _check_empty_file(res):
|
|
# make sure we can create empty files, this usually screws up the
|
|
# segsize math
|
|
d1 = self.clients[2].create_mutable_file("")
|
|
d1.addCallback(lambda newnode: newnode.download_to_data())
|
|
d1.addCallback(lambda res: self.failUnlessEqual("", res))
|
|
return d1
|
|
d.addCallback(_check_empty_file)
|
|
|
|
d.addCallback(lambda res: self.clients[0].create_empty_dirnode())
|
|
def _created_dirnode(dnode):
|
|
log.msg("_created_dirnode(%s)" % (dnode,))
|
|
d1 = dnode.list()
|
|
d1.addCallback(lambda children: self.failUnlessEqual(children, {}))
|
|
d1.addCallback(lambda res: dnode.has_child(u"edgar"))
|
|
d1.addCallback(lambda answer: self.failUnlessEqual(answer, False))
|
|
d1.addCallback(lambda res: dnode.set_node(u"see recursive", dnode))
|
|
d1.addCallback(lambda res: dnode.has_child(u"see recursive"))
|
|
d1.addCallback(lambda answer: self.failUnlessEqual(answer, True))
|
|
d1.addCallback(lambda res: dnode.build_manifest())
|
|
d1.addCallback(lambda manifest:
|
|
self.failUnlessEqual(len(manifest), 1))
|
|
return d1
|
|
d.addCallback(_created_dirnode)
|
|
|
|
def wait_for_c3_kg_conn():
|
|
return self.clients[3]._key_generator is not None
|
|
d.addCallback(lambda junk: self.poll(wait_for_c3_kg_conn))
|
|
|
|
def check_kg_poolsize(junk, size_delta):
|
|
self.failUnlessEqual(len(self.key_generator_svc.key_generator.keypool),
|
|
self.key_generator_svc.key_generator.pool_size + size_delta)
|
|
|
|
d.addCallback(check_kg_poolsize, 0)
|
|
d.addCallback(lambda junk: self.clients[3].create_mutable_file('hello, world'))
|
|
d.addCallback(check_kg_poolsize, -1)
|
|
d.addCallback(lambda junk: self.clients[3].create_empty_dirnode())
|
|
d.addCallback(check_kg_poolsize, -2)
|
|
# use_helper induces use of clients[3], which is the using-key_gen client
|
|
d.addCallback(lambda junk: self.POST("uri", use_helper=True, t="mkdir", name='george'))
|
|
d.addCallback(check_kg_poolsize, -3)
|
|
|
|
return d
|
|
# The default 120 second timeout went off when running it under valgrind
|
|
# on my old Windows laptop, so I'm bumping up the timeout.
|
|
test_mutable.timeout = 240
|
|
|
|
def flip_bit(self, good):
|
|
return good[:-1] + chr(ord(good[-1]) ^ 0x01)
|
|
|
|
def mangle_uri(self, gooduri):
|
|
# change the key, which changes the storage index, which means we'll
|
|
# be asking about the wrong file, so nobody will have any shares
|
|
u = IFileURI(gooduri)
|
|
u2 = uri.CHKFileURI(key=self.flip_bit(u.key),
|
|
uri_extension_hash=u.uri_extension_hash,
|
|
needed_shares=u.needed_shares,
|
|
total_shares=u.total_shares,
|
|
size=u.size)
|
|
return u2.to_string()
|
|
|
|
# TODO: add a test which mangles the uri_extension_hash instead, and
|
|
# should fail due to not being able to get a valid uri_extension block.
|
|
# Also a test which sneakily mangles the uri_extension block to change
|
|
# some of the validation data, so it will fail in the post-download phase
|
|
# when the file's crypttext integrity check fails. Do the same thing for
|
|
# the key, which should cause the download to fail the post-download
|
|
# plaintext_hash check.
|
|
|
|
def test_vdrive(self):
|
|
self.basedir = "system/SystemTest/test_vdrive"
|
|
self.data = LARGE_DATA
|
|
d = self.set_up_nodes(createprivdir=True)
|
|
d.addCallback(self._test_introweb)
|
|
d.addCallback(self.log, "starting publish")
|
|
d.addCallback(self._do_publish1)
|
|
d.addCallback(self._test_runner)
|
|
d.addCallback(self._do_publish2)
|
|
# at this point, we have the following filesystem (where "R" denotes
|
|
# self._root_directory_uri):
|
|
# R
|
|
# R/subdir1
|
|
# R/subdir1/mydata567
|
|
# R/subdir1/subdir2/
|
|
# R/subdir1/subdir2/mydata992
|
|
|
|
d.addCallback(lambda res: self.bounce_client(0))
|
|
d.addCallback(self.log, "bounced client0")
|
|
|
|
d.addCallback(self._check_publish1)
|
|
d.addCallback(self.log, "did _check_publish1")
|
|
d.addCallback(self._check_publish2)
|
|
d.addCallback(self.log, "did _check_publish2")
|
|
d.addCallback(self._do_publish_private)
|
|
d.addCallback(self.log, "did _do_publish_private")
|
|
# now we also have (where "P" denotes a new dir):
|
|
# P/personal/sekrit data
|
|
# P/s2-rw -> /subdir1/subdir2/
|
|
# P/s2-ro -> /subdir1/subdir2/ (read-only)
|
|
d.addCallback(self._check_publish_private)
|
|
d.addCallback(self.log, "did _check_publish_private")
|
|
d.addCallback(self._test_web)
|
|
d.addCallback(self._test_control)
|
|
d.addCallback(self._test_cli)
|
|
# P now has four top-level children:
|
|
# P/personal/sekrit data
|
|
# P/s2-ro/
|
|
# P/s2-rw/
|
|
# P/test_put/ (empty)
|
|
d.addCallback(self._test_checker)
|
|
d.addCallback(self._test_verifier)
|
|
d.addCallback(self._grab_stats)
|
|
return d
|
|
test_vdrive.timeout = 1100
|
|
|
|
def _test_introweb(self, res):
|
|
d = getPage(self.introweb_url, method="GET", followRedirect=True)
|
|
def _check(res):
|
|
try:
|
|
self.failUnless("allmydata: %s" % str(allmydata.__version__)
|
|
in res)
|
|
self.failUnless("Announcement Summary: storage: 5, stub_client: 5" in res)
|
|
self.failUnless("Subscription Summary: storage: 5" in res)
|
|
except unittest.FailTest:
|
|
print
|
|
print "GET %s output was:" % self.introweb_url
|
|
print res
|
|
raise
|
|
d.addCallback(_check)
|
|
d.addCallback(lambda res:
|
|
getPage(self.introweb_url + "?t=json",
|
|
method="GET", followRedirect=True))
|
|
def _check_json(res):
|
|
data = simplejson.loads(res)
|
|
try:
|
|
self.failUnlessEqual(data["subscription_summary"],
|
|
{"storage": 5})
|
|
self.failUnlessEqual(data["announcement_summary"],
|
|
{"storage": 5, "stub_client": 5})
|
|
except unittest.FailTest:
|
|
print
|
|
print "GET %s?t=json output was:" % self.introweb_url
|
|
print res
|
|
raise
|
|
d.addCallback(_check_json)
|
|
return d
|
|
|
|
def _do_publish1(self, res):
|
|
ut = upload.Data(self.data, convergence=None)
|
|
c0 = self.clients[0]
|
|
d = c0.create_empty_dirnode()
|
|
def _made_root(new_dirnode):
|
|
self._root_directory_uri = new_dirnode.get_uri()
|
|
return c0.create_node_from_uri(self._root_directory_uri)
|
|
d.addCallback(_made_root)
|
|
d.addCallback(lambda root: root.create_empty_directory(u"subdir1"))
|
|
def _made_subdir1(subdir1_node):
|
|
self._subdir1_node = subdir1_node
|
|
d1 = subdir1_node.add_file(u"mydata567", ut)
|
|
d1.addCallback(self.log, "publish finished")
|
|
def _stash_uri(filenode):
|
|
self.uri = filenode.get_uri()
|
|
d1.addCallback(_stash_uri)
|
|
return d1
|
|
d.addCallback(_made_subdir1)
|
|
return d
|
|
|
|
def _do_publish2(self, res):
|
|
ut = upload.Data(self.data, convergence=None)
|
|
d = self._subdir1_node.create_empty_directory(u"subdir2")
|
|
d.addCallback(lambda subdir2: subdir2.add_file(u"mydata992", ut))
|
|
return d
|
|
|
|
def log(self, res, msg, **kwargs):
|
|
# print "MSG: %s RES: %s" % (msg, res)
|
|
log.msg(msg, **kwargs)
|
|
return res
|
|
|
|
def stall(self, res, delay=1.0):
|
|
d = defer.Deferred()
|
|
reactor.callLater(delay, d.callback, res)
|
|
return d
|
|
|
|
def _do_publish_private(self, res):
|
|
self.smalldata = "sssh, very secret stuff"
|
|
ut = upload.Data(self.smalldata, convergence=None)
|
|
d = self.clients[0].create_empty_dirnode()
|
|
d.addCallback(self.log, "GOT private directory")
|
|
def _got_new_dir(privnode):
|
|
rootnode = self.clients[0].create_node_from_uri(self._root_directory_uri)
|
|
d1 = privnode.create_empty_directory(u"personal")
|
|
d1.addCallback(self.log, "made P/personal")
|
|
d1.addCallback(lambda node: node.add_file(u"sekrit data", ut))
|
|
d1.addCallback(self.log, "made P/personal/sekrit data")
|
|
d1.addCallback(lambda res: rootnode.get_child_at_path([u"subdir1", u"subdir2"]))
|
|
def _got_s2(s2node):
|
|
d2 = privnode.set_uri(u"s2-rw", s2node.get_uri())
|
|
d2.addCallback(lambda node: privnode.set_uri(u"s2-ro", s2node.get_readonly_uri()))
|
|
return d2
|
|
d1.addCallback(_got_s2)
|
|
d1.addCallback(lambda res: privnode)
|
|
return d1
|
|
d.addCallback(_got_new_dir)
|
|
return d
|
|
|
|
def _check_publish1(self, res):
|
|
# this one uses the iterative API
|
|
c1 = self.clients[1]
|
|
d = defer.succeed(c1.create_node_from_uri(self._root_directory_uri))
|
|
d.addCallback(self.log, "check_publish1 got /")
|
|
d.addCallback(lambda root: root.get(u"subdir1"))
|
|
d.addCallback(lambda subdir1: subdir1.get(u"mydata567"))
|
|
d.addCallback(lambda filenode: filenode.download_to_data())
|
|
d.addCallback(self.log, "get finished")
|
|
def _get_done(data):
|
|
self.failUnlessEqual(data, self.data)
|
|
d.addCallback(_get_done)
|
|
return d
|
|
|
|
def _check_publish2(self, res):
|
|
# this one uses the path-based API
|
|
rootnode = self.clients[1].create_node_from_uri(self._root_directory_uri)
|
|
d = rootnode.get_child_at_path(u"subdir1")
|
|
d.addCallback(lambda dirnode:
|
|
self.failUnless(IDirectoryNode.providedBy(dirnode)))
|
|
d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567"))
|
|
d.addCallback(lambda filenode: filenode.download_to_data())
|
|
d.addCallback(lambda data: self.failUnlessEqual(data, self.data))
|
|
|
|
d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567"))
|
|
def _got_filenode(filenode):
|
|
fnode = self.clients[1].create_node_from_uri(filenode.get_uri())
|
|
assert fnode == filenode
|
|
d.addCallback(_got_filenode)
|
|
return d
|
|
|
|
def _check_publish_private(self, resnode):
|
|
# this one uses the path-based API
|
|
self._private_node = resnode
|
|
|
|
d = self._private_node.get_child_at_path(u"personal")
|
|
def _got_personal(personal):
|
|
self._personal_node = personal
|
|
return personal
|
|
d.addCallback(_got_personal)
|
|
|
|
d.addCallback(lambda dirnode:
|
|
self.failUnless(IDirectoryNode.providedBy(dirnode), dirnode))
|
|
def get_path(path):
|
|
return self._private_node.get_child_at_path(path)
|
|
|
|
d.addCallback(lambda res: get_path(u"personal/sekrit data"))
|
|
d.addCallback(lambda filenode: filenode.download_to_data())
|
|
d.addCallback(lambda data: self.failUnlessEqual(data, self.smalldata))
|
|
d.addCallback(lambda res: get_path(u"s2-rw"))
|
|
d.addCallback(lambda dirnode: self.failUnless(dirnode.is_mutable()))
|
|
d.addCallback(lambda res: get_path(u"s2-ro"))
|
|
def _got_s2ro(dirnode):
|
|
self.failUnless(dirnode.is_mutable(), dirnode)
|
|
self.failUnless(dirnode.is_readonly(), dirnode)
|
|
d1 = defer.succeed(None)
|
|
d1.addCallback(lambda res: dirnode.list())
|
|
d1.addCallback(self.log, "dirnode.list")
|
|
|
|
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "mkdir(nope)", None, dirnode.create_empty_directory, u"nope"))
|
|
|
|
d1.addCallback(self.log, "doing add_file(ro)")
|
|
ut = upload.Data("I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.", convergence="99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this)")
|
|
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut))
|
|
|
|
d1.addCallback(self.log, "doing get(ro)")
|
|
d1.addCallback(lambda res: dirnode.get(u"mydata992"))
|
|
d1.addCallback(lambda filenode:
|
|
self.failUnless(IFileNode.providedBy(filenode)))
|
|
|
|
d1.addCallback(self.log, "doing delete(ro)")
|
|
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "delete(nope)", None, dirnode.delete, u"mydata992"))
|
|
|
|
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "set_uri(nope)", None, dirnode.set_uri, u"hopeless", self.uri))
|
|
|
|
d1.addCallback(lambda res: self.shouldFail2(KeyError, "get(missing)", "'missing'", dirnode.get, u"missing"))
|
|
|
|
personal = self._personal_node
|
|
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "mv from readonly", None, dirnode.move_child_to, u"mydata992", personal, u"nope"))
|
|
|
|
d1.addCallback(self.log, "doing move_child_to(ro)2")
|
|
d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "mv to readonly", None, personal.move_child_to, u"sekrit data", dirnode, u"nope"))
|
|
|
|
d1.addCallback(self.log, "finished with _got_s2ro")
|
|
return d1
|
|
d.addCallback(_got_s2ro)
|
|
def _got_home(dummy):
|
|
home = self._private_node
|
|
personal = self._personal_node
|
|
d1 = defer.succeed(None)
|
|
d1.addCallback(self.log, "mv 'P/personal/sekrit data' to P/sekrit")
|
|
d1.addCallback(lambda res:
|
|
personal.move_child_to(u"sekrit data",home,u"sekrit"))
|
|
|
|
d1.addCallback(self.log, "mv P/sekrit 'P/sekrit data'")
|
|
d1.addCallback(lambda res:
|
|
home.move_child_to(u"sekrit", home, u"sekrit data"))
|
|
|
|
d1.addCallback(self.log, "mv 'P/sekret data' P/personal/")
|
|
d1.addCallback(lambda res:
|
|
home.move_child_to(u"sekrit data", personal))
|
|
|
|
d1.addCallback(lambda res: home.build_manifest())
|
|
d1.addCallback(self.log, "manifest")
|
|
# four items:
|
|
# P/personal/
|
|
# P/personal/sekrit data
|
|
# P/s2-rw (same as P/s2-ro)
|
|
# P/s2-rw/mydata992 (same as P/s2-rw/mydata992)
|
|
d1.addCallback(lambda manifest:
|
|
self.failUnlessEqual(len(manifest), 4))
|
|
return d1
|
|
d.addCallback(_got_home)
|
|
return d
|
|
|
|
def shouldFail(self, res, expected_failure, which, substring=None):
|
|
if isinstance(res, Failure):
|
|
res.trap(expected_failure)
|
|
if substring:
|
|
self.failUnless(substring in str(res),
|
|
"substring '%s' not in '%s'"
|
|
% (substring, str(res)))
|
|
else:
|
|
self.fail("%s was supposed to raise %s, not get '%s'" %
|
|
(which, expected_failure, res))
|
|
|
|
def shouldFail2(self, expected_failure, which, substring, callable, *args, **kwargs):
|
|
assert substring is None or isinstance(substring, str)
|
|
d = defer.maybeDeferred(callable, *args, **kwargs)
|
|
def done(res):
|
|
if isinstance(res, Failure):
|
|
res.trap(expected_failure)
|
|
if substring:
|
|
self.failUnless(substring in str(res),
|
|
"substring '%s' not in '%s'"
|
|
% (substring, str(res)))
|
|
else:
|
|
self.fail("%s was supposed to raise %s, not get '%s'" %
|
|
(which, expected_failure, res))
|
|
d.addBoth(done)
|
|
return d
|
|
|
|
def PUT(self, urlpath, data):
|
|
url = self.webish_url + urlpath
|
|
return getPage(url, method="PUT", postdata=data)
|
|
|
|
def GET(self, urlpath, followRedirect=False):
|
|
url = self.webish_url + urlpath
|
|
return getPage(url, method="GET", followRedirect=followRedirect)
|
|
|
|
def POST(self, urlpath, followRedirect=False, use_helper=False, **fields):
|
|
if use_helper:
|
|
url = self.helper_webish_url + urlpath
|
|
else:
|
|
url = self.webish_url + urlpath
|
|
sepbase = "boogabooga"
|
|
sep = "--" + sepbase
|
|
form = []
|
|
form.append(sep)
|
|
form.append('Content-Disposition: form-data; name="_charset"')
|
|
form.append('')
|
|
form.append('UTF-8')
|
|
form.append(sep)
|
|
for name, value in fields.iteritems():
|
|
if isinstance(value, tuple):
|
|
filename, value = value
|
|
form.append('Content-Disposition: form-data; name="%s"; '
|
|
'filename="%s"' % (name, filename.encode("utf-8")))
|
|
else:
|
|
form.append('Content-Disposition: form-data; name="%s"' % name)
|
|
form.append('')
|
|
form.append(str(value))
|
|
form.append(sep)
|
|
form[-1] += "--"
|
|
body = "\r\n".join(form) + "\r\n"
|
|
headers = {"content-type": "multipart/form-data; boundary=%s" % sepbase,
|
|
}
|
|
return getPage(url, method="POST", postdata=body,
|
|
headers=headers, followRedirect=followRedirect)
|
|
|
|
def _test_web(self, res):
|
|
base = self.webish_url
|
|
public = "uri/" + self._root_directory_uri
|
|
d = getPage(base)
|
|
def _got_welcome(page):
|
|
expected = "Connected Storage Servers: <span>%d</span>" % (self.numclients)
|
|
self.failUnless(expected in page,
|
|
"I didn't see the right 'connected storage servers'"
|
|
" message in: %s" % page
|
|
)
|
|
expected = "My nodeid: <span>%s</span>" % (b32encode(self.clients[0].nodeid).lower(),)
|
|
self.failUnless(expected in page,
|
|
"I didn't see the right 'My nodeid' message "
|
|
"in: %s" % page)
|
|
self.failUnless("Helper: 0 active uploads" in page)
|
|
d.addCallback(_got_welcome)
|
|
d.addCallback(self.log, "done with _got_welcome")
|
|
|
|
# get the welcome page from the node that uses the helper too
|
|
d.addCallback(lambda res: getPage(self.helper_webish_url))
|
|
def _got_welcome_helper(page):
|
|
self.failUnless("Connected to helper?: <span>yes</span>" in page,
|
|
page)
|
|
self.failUnless("Not running helper" in page)
|
|
d.addCallback(_got_welcome_helper)
|
|
|
|
d.addCallback(lambda res: getPage(base + public))
|
|
d.addCallback(lambda res: getPage(base + public + "/subdir1"))
|
|
def _got_subdir1(page):
|
|
# there ought to be an href for our file
|
|
self.failUnless(("<td>%d</td>" % len(self.data)) in page)
|
|
self.failUnless(">mydata567</a>" in page)
|
|
d.addCallback(_got_subdir1)
|
|
d.addCallback(self.log, "done with _got_subdir1")
|
|
d.addCallback(lambda res:
|
|
getPage(base + public + "/subdir1/mydata567"))
|
|
def _got_data(page):
|
|
self.failUnlessEqual(page, self.data)
|
|
d.addCallback(_got_data)
|
|
|
|
# download from a URI embedded in a URL
|
|
d.addCallback(self.log, "_get_from_uri")
|
|
def _get_from_uri(res):
|
|
return getPage(base + "uri/%s?filename=%s"
|
|
% (self.uri, "mydata567"))
|
|
d.addCallback(_get_from_uri)
|
|
def _got_from_uri(page):
|
|
self.failUnlessEqual(page, self.data)
|
|
d.addCallback(_got_from_uri)
|
|
|
|
# download from a URI embedded in a URL, second form
|
|
d.addCallback(self.log, "_get_from_uri2")
|
|
def _get_from_uri2(res):
|
|
return getPage(base + "uri?uri=%s" % (self.uri,))
|
|
d.addCallback(_get_from_uri2)
|
|
d.addCallback(_got_from_uri)
|
|
|
|
# download from a bogus URI, make sure we get a reasonable error
|
|
d.addCallback(self.log, "_get_from_bogus_uri", level=log.UNUSUAL)
|
|
def _get_from_bogus_uri(res):
|
|
d1 = getPage(base + "uri/%s?filename=%s"
|
|
% (self.mangle_uri(self.uri), "mydata567"))
|
|
d1.addBoth(self.shouldFail, Error, "downloading bogus URI",
|
|
"410")
|
|
return d1
|
|
d.addCallback(_get_from_bogus_uri)
|
|
d.addCallback(self.log, "_got_from_bogus_uri", level=log.UNUSUAL)
|
|
|
|
# upload a file with PUT
|
|
d.addCallback(self.log, "about to try PUT")
|
|
d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt",
|
|
"new.txt contents"))
|
|
d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt"))
|
|
d.addCallback(self.failUnlessEqual, "new.txt contents")
|
|
# and again with something large enough to use multiple segments,
|
|
# and hopefully trigger pauseProducing too
|
|
d.addCallback(lambda res: self.PUT(public + "/subdir3/big.txt",
|
|
"big" * 500000)) # 1.5MB
|
|
d.addCallback(lambda res: self.GET(public + "/subdir3/big.txt"))
|
|
d.addCallback(lambda res: self.failUnlessEqual(len(res), 1500000))
|
|
|
|
# can we replace files in place?
|
|
d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt",
|
|
"NEWER contents"))
|
|
d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt"))
|
|
d.addCallback(self.failUnlessEqual, "NEWER contents")
|
|
|
|
# test unlinked POST
|
|
d.addCallback(lambda res: self.POST("uri", t="upload",
|
|
file=("new.txt", "data" * 10000)))
|
|
# and again using the helper, which exercises different upload-status
|
|
# display code
|
|
d.addCallback(lambda res: self.POST("uri", use_helper=True, t="upload",
|
|
file=("foo.txt", "data2" * 10000)))
|
|
|
|
# check that the status page exists
|
|
d.addCallback(lambda res: self.GET("status", followRedirect=True))
|
|
def _got_status(res):
|
|
# find an interesting upload and download to look at. LIT files
|
|
# are not interesting.
|
|
for dl in self.clients[0].list_recent_downloads():
|
|
if dl.get_size() > 200:
|
|
self._down_status = dl.get_counter()
|
|
for ul in self.clients[0].list_recent_uploads():
|
|
if ul.get_size() > 200:
|
|
self._up_status = ul.get_counter()
|
|
#rs = self.clients[0].list_recent_retrieve()[0]
|
|
#self._retrieve_status = rs.get_counter()
|
|
#ps = self.clients[0].list_recent_publish()[0]
|
|
#self._publish_status = ps.get_counter()
|
|
|
|
# and that there are some upload- and download- status pages
|
|
return self.GET("status/up-%d" % self._up_status)
|
|
d.addCallback(_got_status)
|
|
def _got_up(res):
|
|
return self.GET("status/down-%d" % self._down_status)
|
|
d.addCallback(_got_up)
|
|
def _got_down(res):
|
|
return self.GET("status/publish-%d" % self._publish_status)
|
|
#d.addCallback(_got_down)
|
|
def _got_publish(res):
|
|
return self.GET("status/retrieve-%d" % self._retrieve_status)
|
|
d.addCallback(_got_publish)
|
|
|
|
# TODO: mangle the second segment of a file, to test errors that
|
|
# occur after we've already sent some good data, which uses a
|
|
# different error path.
|
|
|
|
# TODO: download a URI with a form
|
|
# TODO: create a directory by using a form
|
|
# TODO: upload by using a form on the directory page
|
|
# url = base + "somedir/subdir1/freeform_post!!upload"
|
|
# TODO: delete a file by using a button on the directory page
|
|
|
|
return d
|
|
|
|
def _test_runner(self, res):
|
|
# exercise some of the diagnostic tools in runner.py
|
|
|
|
# find a share
|
|
for (dirpath, dirnames, filenames) in os.walk(self.basedir):
|
|
if "storage" not in dirpath:
|
|
continue
|
|
if not filenames:
|
|
continue
|
|
pieces = dirpath.split(os.sep)
|
|
if pieces[-4] == "storage" and pieces[-3] == "shares":
|
|
# we're sitting in .../storage/shares/$START/$SINDEX , and there
|
|
# are sharefiles here
|
|
filename = os.path.join(dirpath, filenames[0])
|
|
# peek at the magic to see if it is a chk share
|
|
magic = open(filename, "rb").read(4)
|
|
if magic == '\x00\x00\x00\x01':
|
|
break
|
|
else:
|
|
self.fail("unable to find any uri_extension files in %s"
|
|
% self.basedir)
|
|
log.msg("test_system.SystemTest._test_runner using %s" % filename)
|
|
|
|
out,err = StringIO(), StringIO()
|
|
rc = runner.runner(["dump-share",
|
|
filename],
|
|
stdout=out, stderr=err)
|
|
output = out.getvalue()
|
|
self.failUnlessEqual(rc, 0)
|
|
|
|
# we only upload a single file, so we can assert some things about
|
|
# its size and shares.
|
|
self.failUnless(("share filename: %s" % filename) in output)
|
|
self.failUnless("size: %d\n" % len(self.data) in output)
|
|
self.failUnless("num_segments: 1\n" in output)
|
|
# segment_size is always a multiple of needed_shares
|
|
self.failUnless("segment_size: %d\n" % mathutil.next_multiple(len(self.data), 3) in output)
|
|
self.failUnless("total_shares: 10\n" in output)
|
|
# keys which are supposed to be present
|
|
for key in ("size", "num_segments", "segment_size",
|
|
"needed_shares", "total_shares",
|
|
"codec_name", "codec_params", "tail_codec_params",
|
|
#"plaintext_hash", "plaintext_root_hash",
|
|
"crypttext_hash", "crypttext_root_hash",
|
|
"share_root_hash", "UEB_hash"):
|
|
self.failUnless("%s: " % key in output, key)
|
|
|
|
# now use its storage index to find the other shares using the
|
|
# 'find-shares' tool
|
|
sharedir, shnum = os.path.split(filename)
|
|
storagedir, storage_index_s = os.path.split(sharedir)
|
|
out,err = StringIO(), StringIO()
|
|
nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)]
|
|
cmd = ["find-shares", storage_index_s] + nodedirs
|
|
rc = runner.runner(cmd, stdout=out, stderr=err)
|
|
self.failUnlessEqual(rc, 0)
|
|
out.seek(0)
|
|
sharefiles = [sfn.strip() for sfn in out.readlines()]
|
|
self.failUnlessEqual(len(sharefiles), 10)
|
|
|
|
# also exercise the 'catalog-shares' tool
|
|
out,err = StringIO(), StringIO()
|
|
nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)]
|
|
cmd = ["catalog-shares"] + nodedirs
|
|
rc = runner.runner(cmd, stdout=out, stderr=err)
|
|
self.failUnlessEqual(rc, 0)
|
|
out.seek(0)
|
|
descriptions = [sfn.strip() for sfn in out.readlines()]
|
|
self.failUnlessEqual(len(descriptions), 30)
|
|
matching = [line
|
|
for line in descriptions
|
|
if line.startswith("CHK %s " % storage_index_s)]
|
|
self.failUnlessEqual(len(matching), 10)
|
|
|
|
def _test_control(self, res):
|
|
# exercise the remote-control-the-client foolscap interfaces in
|
|
# allmydata.control (mostly used for performance tests)
|
|
c0 = self.clients[0]
|
|
control_furl_file = os.path.join(c0.basedir, "private", "control.furl")
|
|
control_furl = open(control_furl_file, "r").read().strip()
|
|
# it doesn't really matter which Tub we use to connect to the client,
|
|
# so let's just use our IntroducerNode's
|
|
d = self.introducer.tub.getReference(control_furl)
|
|
d.addCallback(self._test_control2, control_furl_file)
|
|
return d
|
|
def _test_control2(self, rref, filename):
|
|
d = rref.callRemote("upload_from_file_to_uri", filename, convergence=None)
|
|
downfile = os.path.join(self.basedir, "control.downfile")
|
|
d.addCallback(lambda uri:
|
|
rref.callRemote("download_from_uri_to_file",
|
|
uri, downfile))
|
|
def _check(res):
|
|
self.failUnlessEqual(res, downfile)
|
|
data = open(downfile, "r").read()
|
|
expected_data = open(filename, "r").read()
|
|
self.failUnlessEqual(data, expected_data)
|
|
d.addCallback(_check)
|
|
d.addCallback(lambda res: rref.callRemote("speed_test", 1, 200, False))
|
|
if sys.platform == "linux2":
|
|
d.addCallback(lambda res: rref.callRemote("get_memory_usage"))
|
|
d.addCallback(lambda res: rref.callRemote("measure_peer_response_time"))
|
|
return d
|
|
|
|
def _test_cli(self, res):
|
|
# run various CLI commands (in a thread, since they use blocking
|
|
# network calls)
|
|
|
|
private_uri = self._private_node.get_uri()
|
|
some_uri = self._root_directory_uri
|
|
client0_basedir = self.getdir("client0")
|
|
|
|
nodeargs = [
|
|
"--node-directory", client0_basedir,
|
|
"--dir-cap", private_uri,
|
|
]
|
|
public_nodeargs = [
|
|
"--node-url", self.webish_url,
|
|
"--dir-cap", some_uri,
|
|
]
|
|
TESTDATA = "I will not write the same thing over and over.\n" * 100
|
|
|
|
d = defer.succeed(None)
|
|
|
|
def _ls_root(res):
|
|
argv = ["ls"] + nodeargs
|
|
return self._run_cli(argv)
|
|
d.addCallback(_ls_root)
|
|
def _check_ls_root((out,err)):
|
|
self.failUnless("personal" in out)
|
|
self.failUnless("s2-ro" in out)
|
|
self.failUnless("s2-rw" in out)
|
|
self.failUnlessEqual(err, "")
|
|
d.addCallback(_check_ls_root)
|
|
|
|
def _ls_subdir(res):
|
|
argv = ["ls"] + nodeargs + ["personal"]
|
|
return self._run_cli(argv)
|
|
d.addCallback(_ls_subdir)
|
|
def _check_ls_subdir((out,err)):
|
|
self.failUnless("sekrit data" in out)
|
|
self.failUnlessEqual(err, "")
|
|
d.addCallback(_check_ls_subdir)
|
|
|
|
def _ls_public_subdir(res):
|
|
argv = ["ls"] + public_nodeargs + ["subdir1"]
|
|
return self._run_cli(argv)
|
|
d.addCallback(_ls_public_subdir)
|
|
def _check_ls_public_subdir((out,err)):
|
|
self.failUnless("subdir2" in out)
|
|
self.failUnless("mydata567" in out)
|
|
self.failUnlessEqual(err, "")
|
|
d.addCallback(_check_ls_public_subdir)
|
|
|
|
def _ls_file(res):
|
|
argv = ["ls"] + public_nodeargs + ["subdir1/mydata567"]
|
|
return self._run_cli(argv)
|
|
d.addCallback(_ls_file)
|
|
def _check_ls_file((out,err)):
|
|
self.failUnlessEqual(out.strip(), "112 subdir1/mydata567")
|
|
self.failUnlessEqual(err, "")
|
|
d.addCallback(_check_ls_file)
|
|
|
|
# tahoe_ls doesn't currently handle the error correctly: it tries to
|
|
# JSON-parse a traceback.
|
|
## def _ls_missing(res):
|
|
## argv = ["ls"] + nodeargs + ["bogus"]
|
|
## return self._run_cli(argv)
|
|
## d.addCallback(_ls_missing)
|
|
## def _check_ls_missing((out,err)):
|
|
## print "OUT", out
|
|
## print "ERR", err
|
|
## self.failUnlessEqual(err, "")
|
|
## d.addCallback(_check_ls_missing)
|
|
|
|
def _put(res):
|
|
tdir = self.getdir("cli_put")
|
|
fileutil.make_dirs(tdir)
|
|
fn = os.path.join(tdir, "upload_me")
|
|
f = open(fn, "wb")
|
|
f.write(TESTDATA)
|
|
f.close()
|
|
argv = ["put"] + nodeargs + [fn, "test_put/upload.txt"]
|
|
return self._run_cli(argv)
|
|
d.addCallback(_put)
|
|
def _check_put((out,err)):
|
|
self.failUnless("200 OK" in out)
|
|
self.failUnlessEqual(err, "")
|
|
d = self._private_node.get_child_at_path(u"test_put/upload.txt")
|
|
d.addCallback(lambda filenode: filenode.download_to_data())
|
|
def _check_put2(res):
|
|
self.failUnlessEqual(res, TESTDATA)
|
|
d.addCallback(_check_put2)
|
|
return d
|
|
d.addCallback(_check_put)
|
|
|
|
def _get_to_stdout(res):
|
|
argv = ["get"] + nodeargs + ["test_put/upload.txt"]
|
|
return self._run_cli(argv)
|
|
d.addCallback(_get_to_stdout)
|
|
def _check_get_to_stdout((out,err)):
|
|
self.failUnlessEqual(out, TESTDATA)
|
|
self.failUnlessEqual(err, "")
|
|
d.addCallback(_check_get_to_stdout)
|
|
|
|
get_to_file_target = self.basedir + "/get.downfile"
|
|
def _get_to_file(res):
|
|
argv = ["get"] + nodeargs + ["test_put/upload.txt",
|
|
get_to_file_target]
|
|
return self._run_cli(argv)
|
|
d.addCallback(_get_to_file)
|
|
def _check_get_to_file((out,err)):
|
|
data = open(get_to_file_target, "rb").read()
|
|
self.failUnlessEqual(data, TESTDATA)
|
|
self.failUnlessEqual(out, "")
|
|
self.failUnlessEqual(err, "test_put/upload.txt retrieved and written to system/SystemTest/test_vdrive/get.downfile\n")
|
|
d.addCallback(_check_get_to_file)
|
|
|
|
|
|
def _mv(res):
|
|
argv = ["mv"] + nodeargs + ["test_put/upload.txt",
|
|
"test_put/moved.txt"]
|
|
return self._run_cli(argv)
|
|
d.addCallback(_mv)
|
|
def _check_mv((out,err)):
|
|
self.failUnless("OK" in out)
|
|
self.failUnlessEqual(err, "")
|
|
d = self.shouldFail2(KeyError, "test_cli._check_rm", "'upload.txt'", self._private_node.get_child_at_path, u"test_put/upload.txt")
|
|
|
|
d.addCallback(lambda res:
|
|
self._private_node.get_child_at_path(u"test_put/moved.txt"))
|
|
d.addCallback(lambda filenode: filenode.download_to_data())
|
|
def _check_mv2(res):
|
|
self.failUnlessEqual(res, TESTDATA)
|
|
d.addCallback(_check_mv2)
|
|
return d
|
|
d.addCallback(_check_mv)
|
|
|
|
def _rm(res):
|
|
argv = ["rm"] + nodeargs + ["test_put/moved.txt"]
|
|
return self._run_cli(argv)
|
|
d.addCallback(_rm)
|
|
def _check_rm((out,err)):
|
|
self.failUnless("200 OK" in out)
|
|
self.failUnlessEqual(err, "")
|
|
d = self.shouldFail2(KeyError, "test_cli._check_rm", "'moved.txt'", self._private_node.get_child_at_path, u"test_put/moved.txt")
|
|
return d
|
|
d.addCallback(_check_rm)
|
|
return d
|
|
|
|
def _run_cli(self, argv):
|
|
stdout, stderr = StringIO(), StringIO()
|
|
d = threads.deferToThread(runner.runner, argv, run_by_human=False,
|
|
stdout=stdout, stderr=stderr)
|
|
def _done(res):
|
|
return stdout.getvalue(), stderr.getvalue()
|
|
d.addCallback(_done)
|
|
return d
|
|
|
|
def _test_checker(self, res):
|
|
d = self._private_node.build_manifest()
|
|
d.addCallback(self._test_checker_2)
|
|
return d
|
|
|
|
def _test_checker_2(self, manifest):
|
|
checker1 = self.clients[1].getServiceNamed("checker")
|
|
self.failUnlessEqual(checker1.checker_results_for(None), [])
|
|
self.failUnlessEqual(checker1.checker_results_for(list(manifest)[0]),
|
|
[])
|
|
dl = []
|
|
starting_time = time.time()
|
|
for si in manifest:
|
|
dl.append(checker1.check(si))
|
|
d = deferredutil.DeferredListShouldSucceed(dl)
|
|
|
|
def _check_checker_results(res):
|
|
for i in res:
|
|
if type(i) is bool:
|
|
self.failUnless(i is True)
|
|
else:
|
|
(needed, total, found, sharemap) = i
|
|
self.failUnlessEqual(needed, 3)
|
|
self.failUnlessEqual(total, 10)
|
|
self.failUnlessEqual(found, total)
|
|
self.failUnlessEqual(len(sharemap.keys()), 10)
|
|
peers = set()
|
|
for shpeers in sharemap.values():
|
|
peers.update(shpeers)
|
|
self.failUnlessEqual(len(peers), self.numclients)
|
|
d.addCallback(_check_checker_results)
|
|
|
|
def _check_stored_results(res):
|
|
finish_time = time.time()
|
|
all_results = []
|
|
for si in manifest:
|
|
results = checker1.checker_results_for(si)
|
|
if not results:
|
|
# TODO: implement checker for mutable files and implement tests of that checker
|
|
continue
|
|
self.failUnlessEqual(len(results), 1)
|
|
when, those_results = results[0]
|
|
self.failUnless(isinstance(when, (int, float)))
|
|
self.failUnless(starting_time <= when <= finish_time)
|
|
all_results.append(those_results)
|
|
_check_checker_results(all_results)
|
|
d.addCallback(_check_stored_results)
|
|
|
|
d.addCallback(self._test_checker_3)
|
|
return d
|
|
|
|
def _test_checker_3(self, res):
|
|
# check one file, through FileNode.check()
|
|
d = self._private_node.get_child_at_path(u"personal/sekrit data")
|
|
d.addCallback(lambda n: n.check())
|
|
def _checked(results):
|
|
# 'sekrit data' is small, and fits in a LiteralFileNode, so
|
|
# checking it is trivial and always returns True
|
|
self.failUnlessEqual(results, True)
|
|
d.addCallback(_checked)
|
|
|
|
c0 = self.clients[1]
|
|
n = c0.create_node_from_uri(self._root_directory_uri)
|
|
d.addCallback(lambda res: n.get_child_at_path(u"subdir1/mydata567"))
|
|
d.addCallback(lambda n: n.check())
|
|
def _checked2(results):
|
|
# mydata567 is large and lives in a CHK
|
|
(needed, total, found, sharemap) = results
|
|
self.failUnlessEqual(needed, 3)
|
|
self.failUnlessEqual(total, 10)
|
|
self.failUnlessEqual(found, 10)
|
|
self.failUnlessEqual(len(sharemap), 10)
|
|
for shnum in range(10):
|
|
self.failUnlessEqual(len(sharemap[shnum]), 1)
|
|
d.addCallback(_checked2)
|
|
return d
|
|
|
|
|
|
def _test_verifier(self, res):
|
|
checker1 = self.clients[1].getServiceNamed("checker")
|
|
d = self._private_node.build_manifest()
|
|
def _check_all(manifest):
|
|
dl = []
|
|
for si in manifest:
|
|
dl.append(checker1.verify(si))
|
|
return deferredutil.DeferredListShouldSucceed(dl)
|
|
d.addCallback(_check_all)
|
|
def _done(res):
|
|
for i in res:
|
|
self.failUnless(i is True)
|
|
d.addCallback(_done)
|
|
d.addCallback(lambda res: checker1.verify(None))
|
|
d.addCallback(self.failUnlessEqual, True)
|
|
return d
|