mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-01-18 18:56:28 +00:00
Merge branch '20-improve-tests'
This reduces the 'tox -e coverage' test time (on travis) by 40%, from 1711s to 1014s. refs ticket:20
This commit is contained in:
commit
594dd26285
2
setup.py
2
setup.py
@ -260,6 +260,8 @@ setup(name=APPNAME,
|
||||
'allmydata.scripts',
|
||||
'allmydata.storage',
|
||||
'allmydata.test',
|
||||
'allmydata.test.mutable',
|
||||
'allmydata.test.cli',
|
||||
'allmydata.util',
|
||||
'allmydata.web',
|
||||
'allmydata.windows',
|
||||
|
0
src/allmydata/test/cli/__init__.py
Normal file
0
src/allmydata/test/cli/__init__.py
Normal file
@ -12,8 +12,8 @@ from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||
from allmydata.util.encodingutil import get_io_encoding, unicode_to_argv
|
||||
from allmydata.util.namespace import Namespace
|
||||
from allmydata.scripts import cli, backupdb
|
||||
from .common_util import StallMixin
|
||||
from .no_network import GridTestMixin
|
||||
from ..common_util import StallMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from .test_cli import CLITestMixin, parse_options
|
||||
|
||||
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
|
||||
@ -38,7 +38,7 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
|
||||
|
||||
def test_backup(self):
|
||||
self.basedir = "cli/Backup/backup"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
# is the backupdb available? If so, we test that a second backup does
|
||||
# not create new directories.
|
||||
@ -356,7 +356,7 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
|
||||
raise unittest.SkipTest("Symlinks are not supported by Python on this platform.")
|
||||
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
source = os.path.join(self.basedir, "home")
|
||||
self.writeto("foo.txt", "foo")
|
||||
@ -387,7 +387,7 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
|
||||
|
||||
def test_ignore_unreadable_file(self):
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
source = os.path.join(self.basedir, "home")
|
||||
self.writeto("foo.txt", "foo")
|
||||
@ -421,7 +421,7 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
|
||||
|
||||
def test_ignore_unreadable_directory(self):
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
source = os.path.join(self.basedir, "home")
|
||||
os.mkdir(source)
|
||||
@ -457,7 +457,7 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
|
||||
# 'tahoe backup' should output a sensible error message when invoked
|
||||
# without an alias instead of a stack trace.
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
source = os.path.join(self.basedir, "file1")
|
||||
d = self.do_cli('backup', source, source)
|
||||
def _check((rc, out, err)):
|
||||
@ -471,7 +471,7 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
|
||||
# 'tahoe backup' should output a sensible error message when invoked
|
||||
# with a nonexistent alias.
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
source = os.path.join(self.basedir, "file1")
|
||||
d = self.do_cli("backup", source, "nonexistent:" + source)
|
||||
def _check((rc, out, err)):
|
@ -9,7 +9,7 @@ from allmydata.util.encodingutil import quote_output, to_str
|
||||
from allmydata.mutable.publish import MutableData
|
||||
from allmydata.immutable import upload
|
||||
from allmydata.scripts import debug
|
||||
from .no_network import GridTestMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from .test_cli import CLITestMixin
|
||||
|
||||
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
|
||||
@ -358,7 +358,7 @@ class Check(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# 'tahoe check' should output a sensible error message if it needs to
|
||||
# find the default alias and can't
|
||||
self.basedir = "cli/Check/check_without_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("check")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -373,7 +373,7 @@ class Check(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# 'tahoe check' should output a sensible error message if it needs to
|
||||
# find an alias and can't.
|
||||
self.basedir = "cli/Check/check_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("check", "nonexistent:")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -385,7 +385,7 @@ class Check(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_check_with_multiple_aliases(self):
|
||||
self.basedir = "cli/Check/check_with_multiple_aliases"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
self.uriList = []
|
||||
c0 = self.g.clients[0]
|
||||
d = c0.create_dirnode()
|
@ -29,8 +29,8 @@ from allmydata.scripts.common import DEFAULT_ALIAS, get_aliases, get_alias, \
|
||||
DefaultAliasMarker
|
||||
|
||||
from allmydata.scripts import cli, debug, runner
|
||||
from allmydata.test.common_util import ReallyEqualMixin
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from ..common_util import ReallyEqualMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from twisted.internet import threads # CLI tests use deferToThread
|
||||
from twisted.python import usage
|
||||
|
||||
@ -714,7 +714,7 @@ class Ln(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# exist, 'tahoe ln' should output a useful error message and not
|
||||
# a stack trace
|
||||
self.basedir = "cli/Ln/ln_without_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("ln", "from", "to")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -734,7 +734,7 @@ class Ln(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# If invoked with aliases that don't exist, 'tahoe ln' should
|
||||
# output a useful error message and not a stack trace.
|
||||
self.basedir = "cli/Ln/ln_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("ln", "havasu:from", "havasu:to")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -845,7 +845,7 @@ class Errors(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# When the http connection breaks (such as when node.url is overwritten
|
||||
# by a confused user), a user friendly error message should be printed.
|
||||
self.basedir = "cli/Errors/test_broken_socket"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
# Simulate a connection error
|
||||
def _socket_error(*args, **kwargs):
|
||||
@ -867,7 +867,7 @@ class Get(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# without an explicit alias and when the default 'tahoe' alias
|
||||
# hasn't been created yet.
|
||||
self.basedir = "cli/Get/get_without_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli('get', 'file')
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -880,7 +880,7 @@ class Get(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# 'tahoe get' should output a useful error message when invoked with
|
||||
# an explicit alias that doesn't exist.
|
||||
self.basedir = "cli/Get/get_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("get", "nonexistent:file")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -897,7 +897,7 @@ class Manifest(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# without an explicit alias when the default 'tahoe' alias is
|
||||
# missing.
|
||||
self.basedir = "cli/Manifest/manifest_without_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("manifest")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -910,7 +910,7 @@ class Manifest(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# 'tahoe manifest' should output a useful error message when invoked
|
||||
# with an explicit alias that doesn't exist.
|
||||
self.basedir = "cli/Manifest/manifest_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("manifest", "nonexistent:")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -924,7 +924,7 @@ class Manifest(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
class Mkdir(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
def test_mkdir(self):
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
d = self.do_cli("create-alias", "tahoe")
|
||||
d.addCallback(lambda res: self.do_cli("mkdir", "test"))
|
||||
@ -938,7 +938,7 @@ class Mkdir(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_mkdir_mutable_type(self):
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("create-alias", "tahoe")
|
||||
def _check((rc, out, err), st):
|
||||
self.failUnlessReallyEqual(rc, 0)
|
||||
@ -967,7 +967,7 @@ class Mkdir(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_mkdir_mutable_type_unlinked(self):
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("mkdir", "--format=SDMF")
|
||||
def _check((rc, out, err), st):
|
||||
self.failUnlessReallyEqual(rc, 0)
|
||||
@ -1003,7 +1003,7 @@ class Mkdir(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_mkdir_unicode(self):
|
||||
self.basedir = os.path.dirname(self.mktemp())
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
try:
|
||||
motorhead_arg = u"tahoe:Mot\u00F6rhead".encode(get_io_encoding())
|
||||
@ -1024,7 +1024,7 @@ class Mkdir(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# when invoked with an alias that doesn't exist, 'tahoe mkdir' should
|
||||
# output a sensible error message rather than a stack trace.
|
||||
self.basedir = "cli/Mkdir/mkdir_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("mkdir", "havasu:")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -1047,7 +1047,7 @@ class Unlink(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# 'tahoe unlink' should behave sensibly when invoked without an explicit
|
||||
# alias before the default 'tahoe' alias has been created.
|
||||
self.basedir = "cli/Unlink/%s_without_alias" % (self.command,)
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli(self.command, "afile")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -1063,7 +1063,7 @@ class Unlink(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# 'tahoe unlink' should behave sensibly when invoked with an explicit
|
||||
# alias that doesn't exist.
|
||||
self.basedir = "cli/Unlink/%s_with_nonexistent_alias" % (self.command,)
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli(self.command, "nonexistent:afile")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -1079,7 +1079,7 @@ class Unlink(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
def test_unlink_without_path(self):
|
||||
# 'tahoe unlink' should give a sensible error message when invoked without a path.
|
||||
self.basedir = "cli/Unlink/%s_without_path" % (self.command,)
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
self._create_test_file()
|
||||
d = self.do_cli("create-alias", "tahoe")
|
||||
d.addCallback(lambda ign: self.do_cli("put", self.datafile, "tahoe:test"))
|
||||
@ -1106,7 +1106,7 @@ class Rm(Unlink):
|
||||
class Stats(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
def test_empty_directory(self):
|
||||
self.basedir = "cli/Stats/empty_directory"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
c0 = self.g.clients[0]
|
||||
self.fileurls = {}
|
||||
d = c0.create_dirnode()
|
||||
@ -1136,7 +1136,7 @@ class Stats(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# alias is created, 'tahoe stats' should output an informative error
|
||||
# message, not a stack trace.
|
||||
self.basedir = "cli/Stats/stats_without_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("stats")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -1149,7 +1149,7 @@ class Stats(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# when invoked with an explicit alias that doesn't exist,
|
||||
# 'tahoe stats' should output a useful error message.
|
||||
self.basedir = "cli/Stats/stats_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("stats", "havasu:")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -1165,7 +1165,7 @@ class Webopen(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# should output an informative error message instead of a stack
|
||||
# trace.
|
||||
self.basedir = "cli/Webopen/webopen_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("webopen", "fake:")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -1189,7 +1189,7 @@ class Webopen(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
webbrowser.open = call_webbrowser_open
|
||||
|
||||
self.basedir = "cli/Webopen/webopen"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("create-alias", "alias:")
|
||||
def _check_alias((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 0, repr((rc, out, err)))
|
@ -8,7 +8,7 @@ from allmydata.util import fileutil
|
||||
from allmydata.util.encodingutil import (quote_output, get_io_encoding,
|
||||
unicode_to_output, to_str)
|
||||
from allmydata.util.assertutil import _assert
|
||||
from .no_network import GridTestMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from .test_cli import CLITestMixin
|
||||
|
||||
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
|
||||
@ -32,7 +32,7 @@ class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
self.skip_if_cannot_represent_filename(fn1)
|
||||
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
DATA1 = "unicode file content"
|
||||
fileutil.write(fn1, DATA1)
|
||||
@ -76,7 +76,7 @@ class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
# cp -r on a directory containing a dangling symlink shouldn't assert
|
||||
self.basedir = "cli/Cp/dangling_symlink_vs_recursion"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
dn = os.path.join(self.basedir, "dir")
|
||||
os.mkdir(dn)
|
||||
fn = os.path.join(dn, "Fakebandica")
|
||||
@ -90,7 +90,7 @@ class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_copy_using_filecap(self):
|
||||
self.basedir = "cli/Cp/test_copy_using_filecap"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
outdir = os.path.join(self.basedir, "outdir")
|
||||
os.mkdir(outdir)
|
||||
fn1 = os.path.join(self.basedir, "Metallica")
|
||||
@ -172,7 +172,7 @@ class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# when invoked with an alias or aliases that don't exist, 'tahoe cp'
|
||||
# should output a sensible error message rather than a stack trace.
|
||||
self.basedir = "cli/Cp/cp_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("cp", "fake:file1", "fake:file2")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -200,7 +200,7 @@ class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
self.skip_if_cannot_represent_filename(fn1)
|
||||
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
d = self.do_cli("create-alias", "tahoe")
|
||||
d.addCallback(lambda res: self.do_cli("mkdir", "tahoe:test/" + artonwall_arg))
|
||||
@ -224,7 +224,7 @@ class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_cp_replaces_mutable_file_contents(self):
|
||||
self.basedir = "cli/Cp/cp_replaces_mutable_file_contents"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
# Write a test file, which we'll copy to the grid.
|
||||
test_txt_path = os.path.join(self.basedir, "test.txt")
|
||||
@ -447,7 +447,7 @@ class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# tahoe cp should print an error when asked to overwrite a
|
||||
# mutable file that it can't overwrite.
|
||||
self.basedir = "cli/Cp/overwrite_readonly_mutable_file"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
# This is our initial file. We'll link its readcap into the
|
||||
# tahoe: alias.
|
||||
@ -574,7 +574,7 @@ class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_cp_verbose(self):
|
||||
self.basedir = "cli/Cp/cp_verbose"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
# Write two test files, which we'll copy to the grid.
|
||||
test1_path = os.path.join(self.basedir, "test1")
|
||||
@ -606,7 +606,7 @@ starting copy, 2 files, 1 directories
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/712
|
||||
|
||||
self.basedir = "cli/Cp/cp_copies_dir"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
subdir = os.path.join(self.basedir, "foo")
|
||||
os.mkdir(subdir)
|
||||
test1_path = os.path.join(subdir, "test1")
|
||||
@ -640,7 +640,7 @@ starting copy, 2 files, 1 directories
|
||||
# a local directory without a specified file name.
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2027
|
||||
self.basedir = "cli/Cp/cp_verbose"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
# Write a test file, which we'll copy to the grid.
|
||||
test1_path = os.path.join(self.basedir, "test1")
|
||||
@ -1008,7 +1008,7 @@ class CopyOut(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
def test_cp_out(self):
|
||||
# test copying all sorts of things out of a tahoe filesystem
|
||||
self.basedir = "cli_cp/CopyOut/cp_out"
|
||||
self.set_up_grid(num_servers=1)
|
||||
self.set_up_grid(num_servers=1, oneshare=True)
|
||||
|
||||
d = self.do_setup()
|
||||
d.addCallback(lambda ign: self.do_tests())
|
@ -4,7 +4,7 @@ import urllib
|
||||
from allmydata.util import fileutil
|
||||
from allmydata.scripts.common import get_aliases
|
||||
from allmydata.scripts import cli, runner
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from allmydata.util.encodingutil import quote_output, get_io_encoding
|
||||
from .test_cli import CLITestMixin
|
||||
|
||||
@ -24,7 +24,7 @@ class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_create(self):
|
||||
self.basedir = "cli/CreateAlias/create"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
aliasfile = os.path.join(self.get_clientdir(), "private", "aliases")
|
||||
|
||||
d = self.do_cli("create-alias", "tahoe")
|
||||
@ -148,7 +148,7 @@ class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_create_unicode(self):
|
||||
self.basedir = "cli/CreateAlias/create_unicode"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
try:
|
||||
etudes_arg = u"\u00E9tudes".encode(get_io_encoding())
|
@ -4,7 +4,7 @@ from twisted.internet import defer
|
||||
from allmydata.immutable import upload
|
||||
from allmydata.interfaces import MDMF_VERSION, SDMF_VERSION
|
||||
from allmydata.mutable.publish import MutableData
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from allmydata.util.encodingutil import quote_output, get_io_encoding
|
||||
from .test_cli import CLITestMixin
|
||||
|
||||
@ -165,7 +165,7 @@ class List(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# doing just 'tahoe ls' without specifying an alias or first
|
||||
# doing 'tahoe create-alias tahoe' should fail gracefully.
|
||||
self.basedir = "cli/List/list_without_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("ls")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -178,7 +178,7 @@ class List(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# doing 'tahoe ls' while specifying an alias that doesn't already
|
||||
# exist should fail with an informative error message
|
||||
self.basedir = "cli/List/list_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("ls", "nonexistent:")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -245,7 +245,7 @@ class List(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
def test_list_mdmf(self):
|
||||
# 'tahoe ls' should include MDMF files.
|
||||
self.basedir = "cli/List/list_mdmf"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self._create_directory_structure()
|
||||
d.addCallback(lambda ignored:
|
||||
self.do_cli("ls", self._dircap))
|
||||
@ -262,7 +262,7 @@ class List(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# 'tahoe ls' should include MDMF caps when invoked with MDMF
|
||||
# caps.
|
||||
self.basedir = "cli/List/list_mdmf_json"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self._create_directory_structure()
|
||||
d.addCallback(lambda ignored:
|
||||
self.do_cli("ls", "--json", self._dircap))
|
@ -9,7 +9,7 @@ from twisted.python import usage
|
||||
from allmydata.util.assertutil import precondition
|
||||
from allmydata.util import fileutil
|
||||
from allmydata.scripts.common import get_aliases
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from .test_cli import CLITestMixin
|
||||
from allmydata.scripts import magic_folder_cli
|
||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||
@ -135,7 +135,7 @@ class MagicFolderCLITestMixin(CLITestMixin, GridTestMixin):
|
||||
return magicfolder
|
||||
|
||||
def setup_alice_and_bob(self, alice_clock=reactor, bob_clock=reactor):
|
||||
self.set_up_grid(num_clients=2)
|
||||
self.set_up_grid(num_clients=2, oneshare=True)
|
||||
|
||||
self.alice_magicfolder = None
|
||||
self.bob_magicfolder = None
|
||||
@ -188,7 +188,7 @@ class MagicFolderCLITestMixin(CLITestMixin, GridTestMixin):
|
||||
class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase):
|
||||
def test_create_and_then_invite_join(self):
|
||||
self.basedir = "cli/MagicFolder/create-and-then-invite-join"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
local_dir = os.path.join(self.basedir, "magic")
|
||||
abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
|
||||
|
||||
@ -207,7 +207,7 @@ class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_create_error(self):
|
||||
self.basedir = "cli/MagicFolder/create-error"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
d = self.do_cli("magic-folder", "create", "m a g i c:", client_num=0)
|
||||
def _done((rc, stdout, stderr)):
|
||||
@ -218,7 +218,7 @@ class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_create_invite_join(self):
|
||||
self.basedir = "cli/MagicFolder/create-invite-join"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
local_dir = os.path.join(self.basedir, "magic")
|
||||
abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
|
||||
|
||||
@ -262,7 +262,7 @@ class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase):
|
||||
def test_join_twice_failure(self):
|
||||
self.basedir = "cli/MagicFolder/create-join-twice-failure"
|
||||
os.makedirs(self.basedir)
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
local_dir = os.path.join(self.basedir, "magic")
|
||||
abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
|
||||
|
||||
@ -292,7 +292,7 @@ class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase):
|
||||
def test_join_leave_join(self):
|
||||
self.basedir = "cli/MagicFolder/create-join-leave-join"
|
||||
os.makedirs(self.basedir)
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
local_dir = os.path.join(self.basedir, "magic")
|
||||
abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
|
||||
|
||||
@ -323,7 +323,7 @@ class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase):
|
||||
def test_join_failures(self):
|
||||
self.basedir = "cli/MagicFolder/create-join-failures"
|
||||
os.makedirs(self.basedir)
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
local_dir = os.path.join(self.basedir, "magic")
|
||||
abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
|
||||
|
@ -1,7 +1,7 @@
|
||||
import os.path
|
||||
from twisted.trial import unittest
|
||||
from allmydata.util import fileutil
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from allmydata.scripts import tahoe_mv
|
||||
from .test_cli import CLITestMixin
|
||||
|
||||
@ -10,7 +10,7 @@ timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
|
||||
class Mv(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
def test_mv_behavior(self):
|
||||
self.basedir = "cli/Mv/mv_behavior"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
fn1 = os.path.join(self.basedir, "file1")
|
||||
DATA1 = "Nuclear launch codes"
|
||||
fileutil.write(fn1, DATA1)
|
||||
@ -104,7 +104,7 @@ class Mv(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_mv_error_if_DELETE_fails(self):
|
||||
self.basedir = "cli/Mv/mv_error_if_DELETE_fails"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
fn1 = os.path.join(self.basedir, "file1")
|
||||
DATA1 = "Nuclear launch codes"
|
||||
fileutil.write(fn1, DATA1)
|
||||
@ -147,7 +147,7 @@ class Mv(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# creating the default 'tahoe' alias should fail with a useful
|
||||
# error message.
|
||||
self.basedir = "cli/Mv/mv_without_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("mv", "afile", "anotherfile")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -173,7 +173,7 @@ class Mv(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# doing 'tahoe mv' with an alias that doesn't exist should fail
|
||||
# with an informative error message.
|
||||
self.basedir = "cli/Mv/mv_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("mv", "fake:afile", "fake:anotherfile")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
@ -5,7 +5,7 @@ from twisted.python import usage
|
||||
from allmydata.util import fileutil
|
||||
from allmydata.scripts.common import get_aliases
|
||||
from allmydata.scripts import cli
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from ..no_network import GridTestMixin
|
||||
from allmydata.util.encodingutil import get_io_encoding, unicode_to_argv
|
||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||
from .test_cli import CLITestMixin
|
||||
@ -19,7 +19,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# tahoe get `echo DATA | tahoe put -`
|
||||
self.basedir = "cli/Put/unlinked_immutable_stdin"
|
||||
DATA = "data" * 100
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("put", stdin=DATA)
|
||||
def _uploaded(res):
|
||||
(rc, out, err) = res
|
||||
@ -45,7 +45,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# tahoe put /tmp/file.txt
|
||||
# tahoe put ~/file.txt
|
||||
self.basedir = "cli/Put/unlinked_immutable_from_file"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
rel_fn = os.path.join(self.basedir, "DATAFILE")
|
||||
abs_fn = unicode_to_argv(abspath_expanduser_unicode(unicode(rel_fn)))
|
||||
@ -75,7 +75,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# tahoe put file.txt DIRCAP:./uploaded.txt
|
||||
# tahoe put file.txt DIRCAP:./subdir/uploaded.txt
|
||||
self.basedir = "cli/Put/immutable_from_file"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
rel_fn = os.path.join(self.basedir, "DATAFILE")
|
||||
# we make the file small enough to fit in a LIT file, for speed
|
||||
@ -154,7 +154,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# tahoe get FILECAP, compare against DATA2
|
||||
# tahoe put file.txt FILECAP
|
||||
self.basedir = "cli/Put/mutable_unlinked"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
DATA = "data" * 100
|
||||
DATA2 = "two" * 100
|
||||
@ -200,7 +200,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# tahoe get uploaded.txt, compare against DATA2
|
||||
|
||||
self.basedir = "cli/Put/mutable"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
DATA1 = "data" * 100
|
||||
fn1 = os.path.join(self.basedir, "DATA1")
|
||||
@ -259,7 +259,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_format(self):
|
||||
self.basedir = "cli/Put/format"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
data = "data" * 40000 # 160kB total, two segments
|
||||
fn1 = os.path.join(self.basedir, "data")
|
||||
fileutil.write(fn1, data)
|
||||
@ -326,7 +326,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_put_to_mdmf_cap(self):
|
||||
self.basedir = "cli/Put/put_to_mdmf_cap"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
data = "data" * 100000
|
||||
fn1 = os.path.join(self.basedir, "data")
|
||||
fileutil.write(fn1, data)
|
||||
@ -372,7 +372,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
|
||||
def test_put_to_sdmf_cap(self):
|
||||
self.basedir = "cli/Put/put_to_sdmf_cap"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
data = "data" * 100000
|
||||
fn1 = os.path.join(self.basedir, "data")
|
||||
fileutil.write(fn1, data)
|
||||
@ -410,7 +410,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
# when invoked with an alias that doesn't exist, 'tahoe put'
|
||||
# should output a useful error message, not a stack trace
|
||||
self.basedir = "cli/Put/put_with_nonexistent_alias"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self.do_cli("put", "somefile", "fake:afile")
|
||||
def _check((rc, out, err)):
|
||||
self.failUnlessReallyEqual(rc, 1)
|
||||
@ -430,7 +430,7 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||
self.skip_if_cannot_represent_filename(u"\u00E0 trier.txt")
|
||||
|
||||
self.basedir = "cli/Put/immutable_from_file_unicode"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
rel_fn = os.path.join(unicode(self.basedir), u"\u00E0 trier.txt")
|
||||
# we make the file small enough to fit in a LIT file, for speed
|
@ -5,9 +5,7 @@ from twisted.internet.interfaces import IPullProducer
|
||||
from twisted.python import failure
|
||||
from twisted.application import service
|
||||
from twisted.web.error import Error as WebError
|
||||
from foolscap.api import flushEventualQueue, fireEventually
|
||||
from allmydata import uri, client
|
||||
from allmydata.introducer.server import IntroducerNode
|
||||
from allmydata import uri
|
||||
from allmydata.interfaces import IMutableFileNode, IImmutableFileNode,\
|
||||
NotEnoughSharesError, ICheckable, \
|
||||
IMutableUploadable, SDMF_VERSION, \
|
||||
@ -18,22 +16,14 @@ from allmydata.storage_client import StubServer
|
||||
from allmydata.mutable.layout import unpack_header
|
||||
from allmydata.mutable.publish import MutableData
|
||||
from allmydata.storage.mutable import MutableShareFile
|
||||
from allmydata.util import hashutil, log, fileutil, pollmixin, iputil
|
||||
from allmydata.util import hashutil, log
|
||||
from allmydata.util.assertutil import precondition
|
||||
from allmydata.util.consumer import download_to_data
|
||||
from allmydata.stats import StatsGathererService
|
||||
import allmydata.test.common_util as testutil
|
||||
from allmydata import immutable
|
||||
from allmydata.immutable.upload import Uploader
|
||||
|
||||
TEST_RSA_KEY_SIZE = 522
|
||||
|
||||
def flush_but_dont_ignore(res):
|
||||
d = flushEventualQueue()
|
||||
def _done(ignored):
|
||||
return res
|
||||
d.addCallback(_done)
|
||||
return d
|
||||
|
||||
class DummyProducer:
|
||||
implements(IPullProducer)
|
||||
def resumeProducing(self):
|
||||
@ -433,573 +423,8 @@ class LoggingServiceParent(service.MultiService):
|
||||
def log(self, *args, **kwargs):
|
||||
return log.msg(*args, **kwargs)
|
||||
|
||||
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||
|
||||
# SystemTestMixin tests tend to be a lot of work, and we have a few
|
||||
# buildslaves that are pretty slow, and every once in a while these tests
|
||||
# run up against the default 120 second timeout. So increase the default
|
||||
# timeout. Individual test cases can override this, of course.
|
||||
timeout = 300
|
||||
|
||||
def setUp(self):
|
||||
self.sparent = service.MultiService()
|
||||
self.sparent.startService()
|
||||
|
||||
self.stats_gatherer = None
|
||||
self.stats_gatherer_furl = None
|
||||
|
||||
def tearDown(self):
|
||||
log.msg("shutting down SystemTest services")
|
||||
d = self.sparent.stopService()
|
||||
d.addBoth(flush_but_dont_ignore)
|
||||
return d
|
||||
|
||||
def getdir(self, subdir):
|
||||
return os.path.join(self.basedir, subdir)
|
||||
|
||||
def add_service(self, s):
|
||||
s.setServiceParent(self.sparent)
|
||||
return s
|
||||
|
||||
def set_up_nodes(self, NUMCLIENTS=5, use_stats_gatherer=False):
|
||||
self.numclients = NUMCLIENTS
|
||||
iv_dir = self.getdir("introducer")
|
||||
if not os.path.isdir(iv_dir):
|
||||
fileutil.make_dirs(iv_dir)
|
||||
fileutil.write(os.path.join(iv_dir, 'tahoe.cfg'),
|
||||
"[node]\n" +
|
||||
u"nickname = introducer \u263A\n".encode('utf-8') +
|
||||
"web.port = tcp:0:interface=127.0.0.1\n")
|
||||
if SYSTEM_TEST_CERTS:
|
||||
os.mkdir(os.path.join(iv_dir, "private"))
|
||||
f = open(os.path.join(iv_dir, "private", "node.pem"), "w")
|
||||
f.write(SYSTEM_TEST_CERTS[0])
|
||||
f.close()
|
||||
iv = IntroducerNode(basedir=iv_dir)
|
||||
self.introducer = self.add_service(iv)
|
||||
self._get_introducer_web()
|
||||
d = defer.succeed(None)
|
||||
if use_stats_gatherer:
|
||||
d.addCallback(self._set_up_stats_gatherer)
|
||||
d.addCallback(self._set_up_nodes_2)
|
||||
if use_stats_gatherer:
|
||||
d.addCallback(self._grab_stats)
|
||||
return d
|
||||
|
||||
def _get_introducer_web(self):
|
||||
f = open(os.path.join(self.getdir("introducer"), "node.url"), "r")
|
||||
self.introweb_url = f.read().strip()
|
||||
f.close()
|
||||
|
||||
def _set_up_stats_gatherer(self, res):
|
||||
statsdir = self.getdir("stats_gatherer")
|
||||
fileutil.make_dirs(statsdir)
|
||||
portnum = iputil.allocate_tcp_port()
|
||||
location = "tcp:127.0.0.1:%d" % portnum
|
||||
fileutil.write(os.path.join(statsdir, "location"), location)
|
||||
port = "tcp:%d:interface=127.0.0.1" % portnum
|
||||
fileutil.write(os.path.join(statsdir, "port"), port)
|
||||
self.stats_gatherer_svc = StatsGathererService(statsdir)
|
||||
self.stats_gatherer = self.stats_gatherer_svc.stats_gatherer
|
||||
self.add_service(self.stats_gatherer_svc)
|
||||
|
||||
d = fireEventually()
|
||||
sgf = os.path.join(statsdir, 'stats_gatherer.furl')
|
||||
def check_for_furl():
|
||||
return os.path.exists(sgf)
|
||||
d.addCallback(lambda junk: self.poll(check_for_furl, timeout=30))
|
||||
def get_furl(junk):
|
||||
self.stats_gatherer_furl = file(sgf, 'rb').read().strip()
|
||||
d.addCallback(get_furl)
|
||||
return d
|
||||
|
||||
def _set_up_nodes_2(self, res):
|
||||
q = self.introducer
|
||||
self.introducer_furl = q.introducer_url
|
||||
self.clients = []
|
||||
basedirs = []
|
||||
for i in range(self.numclients):
|
||||
basedir = self.getdir("client%d" % i)
|
||||
basedirs.append(basedir)
|
||||
fileutil.make_dirs(os.path.join(basedir, "private"))
|
||||
if len(SYSTEM_TEST_CERTS) > (i+1):
|
||||
f = open(os.path.join(basedir, "private", "node.pem"), "w")
|
||||
f.write(SYSTEM_TEST_CERTS[i+1])
|
||||
f.close()
|
||||
|
||||
config = "[client]\n"
|
||||
config += "introducer.furl = %s\n" % self.introducer_furl
|
||||
if self.stats_gatherer_furl:
|
||||
config += "stats_gatherer.furl = %s\n" % self.stats_gatherer_furl
|
||||
|
||||
nodeconfig = "[node]\n"
|
||||
nodeconfig += (u"nickname = client %d \u263A\n" % (i,)).encode('utf-8')
|
||||
tub_port = iputil.allocate_tcp_port()
|
||||
# Don't let it use AUTO: there's no need for tests to use
|
||||
# anything other than 127.0.0.1
|
||||
nodeconfig += "tub.port = tcp:%d\n" % tub_port
|
||||
nodeconfig += "tub.location = tcp:127.0.0.1:%d\n" % tub_port
|
||||
|
||||
if i == 0:
|
||||
# clients[0] runs a webserver and a helper
|
||||
config += nodeconfig
|
||||
config += "web.port = tcp:0:interface=127.0.0.1\n"
|
||||
config += "timeout.keepalive = 600\n"
|
||||
config += "[helper]\n"
|
||||
config += "enabled = True\n"
|
||||
elif i == 3:
|
||||
# clients[3] runs a webserver and uses a helper
|
||||
config += nodeconfig
|
||||
config += "web.port = tcp:0:interface=127.0.0.1\n"
|
||||
config += "timeout.disconnect = 1800\n"
|
||||
else:
|
||||
config += nodeconfig
|
||||
|
||||
fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
|
||||
|
||||
# give subclasses a chance to append lines to the node's tahoe.cfg
|
||||
# files before they are launched.
|
||||
self._set_up_nodes_extra_config()
|
||||
|
||||
# start clients[0], wait for it's tub to be ready (at which point it
|
||||
# will have registered the helper furl).
|
||||
c = self.add_service(client.Client(basedir=basedirs[0]))
|
||||
self.clients.append(c)
|
||||
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||
|
||||
f = open(os.path.join(basedirs[0],"private","helper.furl"), "r")
|
||||
helper_furl = f.read()
|
||||
f.close()
|
||||
self.helper_furl = helper_furl
|
||||
if self.numclients >= 4:
|
||||
f = open(os.path.join(basedirs[3], 'tahoe.cfg'), 'ab+')
|
||||
f.write(
|
||||
"[client]\n"
|
||||
"helper.furl = %s\n" % helper_furl)
|
||||
f.close()
|
||||
|
||||
# this starts the rest of the clients
|
||||
for i in range(1, self.numclients):
|
||||
c = self.add_service(client.Client(basedir=basedirs[i]))
|
||||
self.clients.append(c)
|
||||
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||
log.msg("STARTING")
|
||||
d = self.wait_for_connections()
|
||||
def _connected(res):
|
||||
log.msg("CONNECTED")
|
||||
# now find out where the web port was
|
||||
self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
|
||||
if self.numclients >=4:
|
||||
# and the helper-using webport
|
||||
self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL()
|
||||
d.addCallback(_connected)
|
||||
return d
|
||||
|
||||
def _set_up_nodes_extra_config(self):
|
||||
# for overriding by subclasses
|
||||
pass
|
||||
|
||||
def _grab_stats(self, res):
|
||||
d = self.stats_gatherer.poll()
|
||||
return d
|
||||
|
||||
def bounce_client(self, num):
|
||||
c = self.clients[num]
|
||||
d = c.disownServiceParent()
|
||||
# I think windows requires a moment to let the connection really stop
|
||||
# and the port number made available for re-use. TODO: examine the
|
||||
# behavior, see if this is really the problem, see if we can do
|
||||
# better than blindly waiting for a second.
|
||||
d.addCallback(self.stall, 1.0)
|
||||
def _stopped(res):
|
||||
new_c = client.Client(basedir=self.getdir("client%d" % num))
|
||||
self.clients[num] = new_c
|
||||
new_c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||
self.add_service(new_c)
|
||||
d.addCallback(_stopped)
|
||||
d.addCallback(lambda res: self.wait_for_connections())
|
||||
def _maybe_get_webport(res):
|
||||
if num == 0:
|
||||
# now find out where the web port was
|
||||
self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
|
||||
d.addCallback(_maybe_get_webport)
|
||||
return d
|
||||
|
||||
def add_extra_node(self, client_num, helper_furl=None,
|
||||
add_to_sparent=False):
|
||||
# usually this node is *not* parented to our self.sparent, so we can
|
||||
# shut it down separately from the rest, to exercise the
|
||||
# connection-lost code
|
||||
basedir = self.getdir("client%d" % client_num)
|
||||
if not os.path.isdir(basedir):
|
||||
fileutil.make_dirs(basedir)
|
||||
config = "[client]\n"
|
||||
config += "introducer.furl = %s\n" % self.introducer_furl
|
||||
if helper_furl:
|
||||
config += "helper.furl = %s\n" % helper_furl
|
||||
fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
|
||||
|
||||
c = client.Client(basedir=basedir)
|
||||
self.clients.append(c)
|
||||
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||
self.numclients += 1
|
||||
if add_to_sparent:
|
||||
c.setServiceParent(self.sparent)
|
||||
else:
|
||||
c.startService()
|
||||
d = self.wait_for_connections()
|
||||
d.addCallback(lambda res: c)
|
||||
return d
|
||||
|
||||
def _check_connections(self):
|
||||
for c in self.clients:
|
||||
if not c.connected_to_introducer():
|
||||
return False
|
||||
sb = c.get_storage_broker()
|
||||
if len(sb.get_connected_servers()) != self.numclients:
|
||||
return False
|
||||
up = c.getServiceNamed("uploader")
|
||||
if up._helper_furl and not up._helper:
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_for_connections(self, ignored=None):
|
||||
return self.poll(self._check_connections, timeout=200)
|
||||
|
||||
|
||||
# our system test uses the same Tub certificates each time, to avoid the
|
||||
# overhead of key generation
|
||||
SYSTEM_TEST_CERTS = [
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDVaFw0wOTA3MjUyMjQyMDVaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxHCWajrR
|
||||
2h/iurw8k93m8WUdE3xypJiiAITw7GkKlKbCLD+dEce2MXwVVYca0n/MZZsj89Cu
|
||||
Ko0lLjksMseoSDoj98iEmVpaY5mc2ntpQ+FXdoEmPP234XRWEg2HQ+EaK6+WkGQg
|
||||
DDXQvFJCVCQk/n1MdAwZZ6vqf2ITzSuD44kCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQBn6qPKGdFjWJy7sOOTUFfm/THhHQqAh1pBDLkjR+OtzuobCoP8n8J1LNG3Yxds
|
||||
Jj7NWQL7X5TfOlfoi7e9jK0ujGgWh3yYU6PnHzJLkDiDT3LCSywQuGXCjh0tOStS
|
||||
2gaCmmAK2cfxSStKzNcewl2Zs8wHMygq8TLFoZ6ozN1+xQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQDEcJZqOtHaH+K6vDyT3ebxZR0TfHKkmKIAhPDsaQqUpsIsP50R
|
||||
x7YxfBVVhxrSf8xlmyPz0K4qjSUuOSwyx6hIOiP3yISZWlpjmZzae2lD4Vd2gSY8
|
||||
/bfhdFYSDYdD4Rorr5aQZCAMNdC8UkJUJCT+fUx0DBlnq+p/YhPNK4PjiQIDAQAB
|
||||
AoGAZyDMdrymiyMOPwavrtlicvyohSBid3MCKc+hRBvpSB0790r2RO1aAySndp1V
|
||||
QYmCXx1RhKDbrs8m49t0Dryu5T+sQrFl0E3usAP3vvXWeh4jwJ9GyiRWy4xOEuEQ
|
||||
3ewjbEItHqA/bRJF0TNtbOmZTDC7v9FRPf2bTAyFfTZep5kCQQD33q1RA8WUYtmQ
|
||||
IArgHqt69i421lpXlOgqotFHwTx4FiGgVzDQCDuXU6txB9EeKRM340poissav/n6
|
||||
bkLZ7/VDAkEAyuIPkeI59sE5NnmW+N47NbCfdM1Smy1YxZpv942EmP9Veub5N0dw
|
||||
iK5bLAgEguUIjpTsh3BRmsE9Xd+ItmnRQwJBAMZhbg19G1EbnE0BmDKv2UbcaThy
|
||||
bnPSNc6J6T2opqDl9ZvCrMqTDD6dNIWOYAvni/4a556sFsoeBBAu10peBskCQE6S
|
||||
cB86cuJagLLVMh/dySaI6ahNoFFSpY+ZuQUxfInYUR2Q+DFtbGqyw8JwtHaRBthZ
|
||||
WqU1XZVGg2KooISsxIsCQQD1PS7//xHLumBb0jnpL7n6W8gmiTyzblT+0otaCisP
|
||||
fN6rTlwV1o8VsOUAz0rmKO5RArCbkmb01WtMgPCDBYkk
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 0
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDVaFw0wOTA3MjUyMjQyMDVaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAs9CALdmW
|
||||
kJ6r0KPSLdGCA8rzQKxWayrMckT22ZtbRv3aw6VA96dWclpY+T2maV0LrAzmMSL8
|
||||
n61ydJHM33iYDOyWbwHWN45XCjY/e20PL54XUl/DmbBHEhQVQLIfCldcRcnWEfoO
|
||||
iOhDJfWpDO1dmP/aOYLdkZCZvBtPAfyUqRcCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQAN9eaCREkzzk4yPIaWYkWHg3Igs1vnOR/iDw3OjyxO/xJFP2lkA2WtrwL2RTRq
|
||||
dxA8gwdPyrWgdiZElwZH8mzTJ4OdUXLSMclLOg9kvH6gtSvhLztfEDwDP1wRhikh
|
||||
OeWWu2GIC+uqFCI1ftoGgU+aIa6yrHswf66rrQvBSSvJPQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCz0IAt2ZaQnqvQo9It0YIDyvNArFZrKsxyRPbZm1tG/drDpUD3
|
||||
p1ZyWlj5PaZpXQusDOYxIvyfrXJ0kczfeJgM7JZvAdY3jlcKNj97bQ8vnhdSX8OZ
|
||||
sEcSFBVAsh8KV1xFydYR+g6I6EMl9akM7V2Y/9o5gt2RkJm8G08B/JSpFwIDAQAB
|
||||
AoGBAIUy5zCPpSP+FeJY6CG+t6Pdm/IFd4KtUoM3KPCrT6M3+uzApm6Ny9Crsor2
|
||||
qyYTocjSSVaOxzn1fvpw4qWLrH1veUf8ozMs8Z0VuPHD1GYUGjOXaBPXb5o1fQL9
|
||||
h7pS5/HrDDPN6wwDNTsxRf/fP58CnfwQUhwdoxcx8TnVmDQxAkEA6N3jBXt/Lh0z
|
||||
UbXHhv3QBOcqLZA2I4tY7wQzvUvKvVmCJoW1tfhBdYQWeQv0jzjL5PzrrNY8hC4l
|
||||
8+sFM3h5TwJBAMWtbFIEZfRSG1JhHK3evYHDTZnr/j+CdoWuhzP5RkjkIKsiLEH7
|
||||
2ZhA7CdFQLZF14oXy+g1uVCzzfB2WELtUbkCQQDKrb1XWzrBlzbAipfkXWs9qTmj
|
||||
uJ32Z+V6+0xRGPOXxJ0sDDqw7CeFMfchWg98zLFiV+SEZV78qPHtkAPR3ayvAkB+
|
||||
hUMhM4N13t9x2IoclsXAOhp++9bdG0l0woHyuAdOPATUw6iECwf4NQVxFRgYEZek
|
||||
4Ro3Y7taddrHn1dabr6xAkAic47OoLOROYLpljmJJO0eRe3Z5IFe+0D2LfhAW3LQ
|
||||
JU+oGq5pCjfnoaDElRRZn0+GmunnWeQEYKoflTi/lI9d
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 1
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsxG7LTrz
|
||||
DF+9wegOR/BRJhjSumPUbYQnNAUKtPraFsGjAJILP44AHdnHt1MONLgTeX1ynapo
|
||||
q6O/q5cdKtBB7uEh7FpkLCCwpZt/m0y79cynn8AmWoQVgl8oS0567UmPeJnTzFPv
|
||||
dmT5dlaQALeX5YGceAsEvhmAsdOMttaor38CAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQA345rxotfvh2kfgrmRzAyGewVBV4r23Go30GSZir8X2GoH3qKNwO4SekAohuSw
|
||||
AiXzLUbwIdSRSqaLFxSC7Duqc9eIeFDAWjeEmpfFLBNiw3K8SLA00QrHCUXnECTD
|
||||
b/Kk6OGuvPOiuuONVjEuEcRdCH3/Li30D0AhJaMynjhQJQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCzEbstOvMMX73B6A5H8FEmGNK6Y9RthCc0BQq0+toWwaMAkgs/
|
||||
jgAd2ce3Uw40uBN5fXKdqmiro7+rlx0q0EHu4SHsWmQsILClm3+bTLv1zKefwCZa
|
||||
hBWCXyhLTnrtSY94mdPMU+92ZPl2VpAAt5flgZx4CwS+GYCx04y21qivfwIDAQAB
|
||||
AoGBAIlhFg/aRPL+VM9539LzHN60dp8GzceDdqwjHhbAySZiQlLCuJx2rcI4/U65
|
||||
CpIJku9G/fLV9N2RkA/trDPXeGyqCTJfnNzyZcvvMscRMFqSGyc21Y0a+GS8bIxt
|
||||
1R2B18epSVMsWSWWMypeEgsfv29LV7oSWG8UKaqQ9+0h63DhAkEA4i2L/rori/Fb
|
||||
wpIBfA+xbXL/GmWR7xPW+3nG3LdLQpVzxz4rIsmtO9hIXzvYpcufQbwgVACyMmRf
|
||||
TMABeSDM7wJBAMquEdTaVXjGfH0EJ7z95Ys2rYTiCXjBfyEOi6RXXReqV9SXNKlN
|
||||
aKsO22zYecpkAjY1EdUdXWP/mNVEybjpZnECQQCcuh0JPS5RwcTo9c2rjyBOjGIz
|
||||
g3B1b5UIG2FurmCrWe6pgO3ZJFEzZ/L2cvz0Hj5UCa2JKBZTDvRutZoPumfnAkAb
|
||||
nSW+y1Rz1Q8m9Ub4v9rjYbq4bRd/RVWtyk6KQIDldYbr5wH8wxgsniSVKtVFFuUa
|
||||
P5bDY3HS6wMGo42cTOhxAkAcdweQSQ3j7mfc5vh71HeAC1v/VAKGehGOUdeEIQNl
|
||||
Sb2WuzpZkbfsrVzW6MdlgY6eE7ufRswhDPLWPC8MP0d1
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 2
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxnH+pbOS
|
||||
qlJlsHpKUQtV0oN1Mv+ESG+yUDxStFFGjkJv/UIRzpxqFqY/6nJ3D03kZsDdcXyi
|
||||
CfV9hPYQaVNMn6z+puPmIagfBQ0aOyuI+nUhCttZIYD9071BjW5bCMX5NZWL/CZm
|
||||
E0HdAZ77H6UrRckJ7VR8wAFpihBxD5WliZcCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQAwXqY1Sjvp9JSTHKklu7s0T6YmH/BKSXrHpS2xO69svK+ze5/+5td3jPn4Qe50
|
||||
xwRNZSFmSLuJLfCO32QJSJTB7Vs5D3dNTZ2i8umsaodm97t8hit7L75nXRGHKH//
|
||||
xDVWAFB9sSgCQyPMRkL4wB4YSfRhoSKVwMvaz+XRZDUU0A==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXAIBAAKBgQDGcf6ls5KqUmWwekpRC1XSg3Uy/4RIb7JQPFK0UUaOQm/9QhHO
|
||||
nGoWpj/qcncPTeRmwN1xfKIJ9X2E9hBpU0yfrP6m4+YhqB8FDRo7K4j6dSEK21kh
|
||||
gP3TvUGNblsIxfk1lYv8JmYTQd0BnvsfpStFyQntVHzAAWmKEHEPlaWJlwIDAQAB
|
||||
AoGAdHNMlXwtItm7ZrY8ihZ2xFP0IHsk60TwhHkBp2LSXoTKJvnwbSgIcUYZ18BX
|
||||
8Zkp4MpoqEIU7HcssyuaMdR572huV2w0D/2gYJQLQ5JapaR3hMox3YG4wjXasN1U
|
||||
1iZt7JkhKlOy+ElL5T9mKTE1jDsX2RAv4WALzMpYFo7vs4ECQQDxqrPaqRQ5uYS/
|
||||
ejmIk05nM3Q1zmoLtMDrfRqrjBhaf/W3hqGihiqN2kL3PIIYcxSRWiyNlYXjElsR
|
||||
2sllBTe3AkEA0jcMHVThwKt1+Ce5VcE7N6hFfbsgISTjfJ+Q3K2NkvJkmtE8ZRX5
|
||||
XprssnPN8owkfF5yuKbcSZL3uvaaSGN9IQJAfTVnN9wwOXQwHhDSbDt9/KRBCnum
|
||||
n+gHqDrKLaVJHOJ9SZf8eLswoww5c+UqtkYxmtlwie61Tp+9BXQosilQ4wJBAIZ1
|
||||
XVNZmriBM4jR59L5MOZtxF0ilu98R+HLsn3kqLyIPF9mXCoQPxwLHkEan213xFKk
|
||||
mt6PJDIPRlOZLqAEuuECQFQMCrn0VUwPg8E40pxMwgMETvVflPs/oZK1Iu+b7+WY
|
||||
vBptAyhMu31fHQFnJpiUOyHqSZnOZyEn1Qu2lszNvUg=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 3
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAnjiOwipn
|
||||
jigDuNMfNG/tBJhPwYUHhSbQdvrTubhsxw1oOq5XpNqUwRtC8hktOKM3hghyqExP
|
||||
62EOi0aJBkRhtwtPSLBCINptArZLfkog/nTIqVv4eLEzJ19nTi/llHHWKcgA6XTI
|
||||
sU/snUhGlySA3RpETvXqIJTauQRZz0kToSUCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQCQ+u/CsX5WC5m0cLrpyIS6qZa62lrB3mj9H1aIQhisT5kRsMz3FJ1aOaS8zPRz
|
||||
w0jhyRmamCcSsWf5WK539iOtsXbKMdAyjNtkQO3g+fnsLgmznAjjst24jfr+XU59
|
||||
0amiy1U6TY93gtEBZHtiLldPdUMsTuFbBlqbcMBQ50x9rA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXAIBAAKBgQCeOI7CKmeOKAO40x80b+0EmE/BhQeFJtB2+tO5uGzHDWg6rlek
|
||||
2pTBG0LyGS04ozeGCHKoTE/rYQ6LRokGRGG3C09IsEIg2m0Ctkt+SiD+dMipW/h4
|
||||
sTMnX2dOL+WUcdYpyADpdMixT+ydSEaXJIDdGkRO9eoglNq5BFnPSROhJQIDAQAB
|
||||
AoGAAPrst3s3xQOucjismtCOsVaYN+SxFTwWUoZfRWlFEz6cBLELzfOktEWM9p79
|
||||
TrqEH4px22UNobGqO2amdql5yXwEFVhYQkRB8uDA8uVaqpL8NLWTGPRXxZ2DSU+n
|
||||
7/FLf/TWT3ti/ZtXaPVRj6E2/Mq9AVEVOjUYzkNjM02OxcECQQDKEqmPbdZq2URU
|
||||
7RbUxkq5aTp8nzAgbpUsgBGQ9PDAymhj60BDEP0q28Ssa7tU70pRnQ3AZs9txgmL
|
||||
kK2g97FNAkEAyHH9cIb6qXOAJPIr/xamFGr5uuYw9TJPz/hfVkVimW/aZnBB+e6Q
|
||||
oALJBDKJWeYPzdNbouJYg8MeU0qWdZ5DOQJADUk+1sxc/bd9U6wnBSRog1pU2x7I
|
||||
VkmPC1b8ULCaJ8LnLDKqjf5O9wNuIfwPXB1DoKwX3F+mIcyUkhWYJO5EPQJAUj5D
|
||||
KMqZSrGzYHVlC/M1Daee88rDR7fu+3wDUhiCDkbQq7tftrbl7GF4LRq3NIWq8l7I
|
||||
eJq6isWiSbaO6Y+YMQJBAJFBpVhlY5Px2BX5+Hsfq6dSP3sVVc0eHkdsoZFFxq37
|
||||
fksL/q2vlPczvBihgcxt+UzW/UrNkelOuX3i57PDvFs=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 4
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsCQuudDF
|
||||
zgmY5tDpT0TkUo8fpJ5JcvgCkLFpSDD8REpXhLFkHWhTmTj3CAxfv4lA3sQzHZxe
|
||||
4S9YCb5c/VTbFEdgwc/wlxMmJiz2jYghdmWPBb8pBEk31YihIhC+u4kex6gJBH5y
|
||||
ixiZ3PPRRMaOBBo+ZfM50XIyWbFOOM/7FwcCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQB4cFURaiiUx6n8eS4j4Vxrii5PtsaNEI4acANFSYknGd0xTP4vnmoivNmo5fWE
|
||||
Q4hYtGezNu4a9MnNhcQmI20KzXmvhLJtkwWCgGOVJtMem8hDWXSALV1Ih8hmVkGS
|
||||
CI1elfr9eyguunGp9eMMQfKhWH52WHFA0NYa0Kpv5BY33A==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICWwIBAAKBgQCwJC650MXOCZjm0OlPRORSjx+knkly+AKQsWlIMPxESleEsWQd
|
||||
aFOZOPcIDF+/iUDexDMdnF7hL1gJvlz9VNsUR2DBz/CXEyYmLPaNiCF2ZY8FvykE
|
||||
STfViKEiEL67iR7HqAkEfnKLGJnc89FExo4EGj5l8znRcjJZsU44z/sXBwIDAQAB
|
||||
AoGABA7xXKqoxBSIh1js5zypHhXaHsre2l1Igdj0mgs25MPpvE7yBZNvyan8Vx0h
|
||||
36Hj8r4Gh3og3YNfvem67sNTwNwONY0ep+Xho/3vG0jFATGduSXdcT04DusgZNqg
|
||||
UJqW75cqxrD6o/nya5wUoN9NL5pcd5AgVMdOYvJGbrwQuaECQQDiCs/5dsUkUkeC
|
||||
Tlur1wh0wJpW4Y2ctO3ncRdnAoAA9y8dELHXMqwKE4HtlyzHY7Bxds/BDh373EVK
|
||||
rsdl+v9JAkEAx3xTmsOQvWa1tf/O30sdItVpGogKDvYqkLCNthUzPaL85BWB03E2
|
||||
xunHcVVlqAOE5tFuw0/UEyEkOaGlNTJTzwJAPIVel9FoCUiKYuYt/z1swy3KZRaw
|
||||
/tMmm4AZHvh5Y0jLcYHFy/OCQpRkhkOitqQHWunPyEXKW2PnnY5cTv68GQJAHG7H
|
||||
B88KCUTjb25nkQIGxBlA4swzCtDhXkAb4rEA3a8mdmfuWjHPyeg2ShwO4jSmM7P0
|
||||
Iph1NMjLff9hKcTjlwJARpItOFkYEdtSODC7FMm7KRKQnNB27gFAizsOYWD4D2b7
|
||||
w1FTEZ/kSA9wSNhyNGt7dgUo6zFhm2u973HBCUb3dg==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 5
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAvhTRj1dA
|
||||
NOfse/UBeTfMekZKxZHsNPr+qBYaveWAHDded/BMyMgaMV2n6HQdiDaRjJkzjHCF
|
||||
3xBtpIJeEGUqfrF0ob8BIZXy3qk68eX/0CVUbgmjSBN44ahlo63NshyXmZtEAkRV
|
||||
VE/+cRKw3N2wtuTed5xwfNcL6dg4KTOEYEkCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQCN+CLuVwLeWjSdVbdizYyrOVckqtwiIHG9BbGMlcIdm0qpvD7V7/sN2csk5LaT
|
||||
BNiHi1t5628/4UHqqodYmFw8ri8ItFwB+MmTJi11CX6dIP9OUhS0qO8Z/BKtot7H
|
||||
j04oNwl+WqZZfHIYwTIEL0HBn60nOvCQPDtnWG2BhpUxMA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQC+FNGPV0A05+x79QF5N8x6RkrFkew0+v6oFhq95YAcN1538EzI
|
||||
yBoxXafodB2INpGMmTOMcIXfEG2kgl4QZSp+sXShvwEhlfLeqTrx5f/QJVRuCaNI
|
||||
E3jhqGWjrc2yHJeZm0QCRFVUT/5xErDc3bC25N53nHB81wvp2DgpM4RgSQIDAQAB
|
||||
AoGALl2BqIdN4Bnac3oV++2CcSkIQB0SEvJOf820hDGhCEDxSCxTbn5w9S21MVxx
|
||||
f7Jf2n3cNxuTbA/jzscGDtW+gXCs+WAbAr5aOqHLUPGEobhKQrQT2hrxQHyv3UFp
|
||||
0tIl9eXFknOyVAaUJ3athK5tyjSiCZQQHLGzeLaDSKVAPqECQQD1GK7DkTcLaSvw
|
||||
hoTJ3dBK3JoKT2HHLitfEE0QV58mkqFMjofpe+nyeKWvEb/oB4WBp/cfTvtf7DJK
|
||||
zl1OSf11AkEAxomWmJeub0xpqksCmnVI1Jt1mvmcE4xpIcXq8sxzLHRc2QOv0kTw
|
||||
IcFl4QcN6EQBmE+8kl7Tx8SPAVKfJMoZBQJAGsUFYYrczjxAdlba7glyFJsfn/yn
|
||||
m0+poQpwwFYxpc7iGzB+G7xTAw62WfbAVSFtLYog7aR8xC9SFuWPP1vJeQJBAILo
|
||||
xBj3ovgWTXIRJbVM8mnl28UFI0msgsHXK9VOw/6i93nMuYkPFbtcN14KdbwZ42dX
|
||||
5EIrLr+BNr4riW4LqDUCQQCbsEEpTmj3upKUOONPt+6CH/OOMjazUzYHZ/3ORHGp
|
||||
Q3Wt+I4IrR/OsiACSIQAhS4kBfk/LGggnj56DrWt+oBl
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #6
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAtKhx6sEA
|
||||
jn6HWc6T2klwlPn0quyHtATIw8V3ezP46v6g2rRS7dTywo4GTP4vX58l+sC9z9Je
|
||||
qhQ1rWSwMK4FmnDMZCu7AVO7oMIXpXdSz7l0bgCnNjvbpkA2pOfbB1Z8oj8iebff
|
||||
J33ID5DdkmCzqYVtKpII1o/5z7Jo292JYy8CAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQA0PYMA07wo9kEH4fv9TCfo+zz42Px6lUxrQBPxBvDiGYhk2kME/wX0IcoZPKTV
|
||||
WyBGmDAYWvFaHWbrbbTOfzlLWfYrDD913hCi9cO8iF8oBqRjIlkKcxAoe7vVg5Az
|
||||
ydVcrY+zqULJovWwyNmH1QNIQfMat0rj7fylwjiS1y/YsA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXAIBAAKBgQC0qHHqwQCOfodZzpPaSXCU+fSq7Ie0BMjDxXd7M/jq/qDatFLt
|
||||
1PLCjgZM/i9fnyX6wL3P0l6qFDWtZLAwrgWacMxkK7sBU7ugwheld1LPuXRuAKc2
|
||||
O9umQDak59sHVnyiPyJ5t98nfcgPkN2SYLOphW0qkgjWj/nPsmjb3YljLwIDAQAB
|
||||
AoGAU4CYRv22mCZ7wVLunDLdyr5ODMMPZnHfqj2XoGbBYz0WdIBs5GlNXAfxeZzz
|
||||
oKsbDvAPzANcphh5RxAHMDj/dT8rZOez+eJrs1GEV+crl1T9p83iUkAuOJFtgUgf
|
||||
TtQBL9vHaj7DfvCEXcBPmN/teDFmAAOyUNbtuhTkRa3PbuECQQDwaqZ45Kr0natH
|
||||
V312dqlf9ms8I6e873pAu+RvA3BAWczk65eGcRjEBxVpTvNEcYKFrV8O5ZYtolrr
|
||||
VJl97AfdAkEAwF4w4KJ32fLPVoPnrYlgLw86NejMpAkixblm8cn51avPQmwbtahb
|
||||
BZUuca22IpgDpjeEk5SpEMixKe/UjzxMewJBALy4q2cY8U3F+u6sshLtAPYQZIs3
|
||||
3fNE9W2dUKsIQvRwyZMlkLN7UhqHCPq6e+HNTM0MlCMIfAPkf4Rdy4N6ZY0CQCKE
|
||||
BAMaQ6TwgzFDw5sIjiCDe+9WUPmRxhJyHL1/fvtOs4Z4fVRP290ZklbFU2vLmMQH
|
||||
LBuKzfb7+4XJyXrV1+cCQBqfPFQQZLr5UgccABYQ2jnWVbJPISJ5h2b0cwXt+pz/
|
||||
8ODEYLjqWr9K8dtbgwdpzwbkaGhQYpyvsguMvNPMohs=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #7
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAnBfNHycn
|
||||
5RnYzDN4EWTk2q1BBxA6ZYtlG1WPkj5iKeaYKzUk58zBL7mNOA0ucq+yTwh9C4IC
|
||||
EutWPaKBSKY5XI+Rdebh+Efq+urtOLgfJHlfcCraEx7hYN+tqqMVgEgnO/MqIsn1
|
||||
I1Fvnp89mSYbQ9tmvhSH4Hm+nbeK6iL2tIsCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQBt9zxfsKWoyyV764rRb6XThuTDMNSDaVofqePEWjudAbDu6tp0pHcrL0XpIrnT
|
||||
3iPgD47pdlwQNbGJ7xXwZu2QTOq+Lv62E6PCL8FljDVoYqR3WwJFFUigNvBT2Zzu
|
||||
Pxx7KUfOlm/M4XUSMu31sNJ0kQniBwpkW43YmHVNFb/R7g==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCcF80fJyflGdjMM3gRZOTarUEHEDpli2UbVY+SPmIp5pgrNSTn
|
||||
zMEvuY04DS5yr7JPCH0LggIS61Y9ooFIpjlcj5F15uH4R+r66u04uB8keV9wKtoT
|
||||
HuFg362qoxWASCc78yoiyfUjUW+enz2ZJhtD22a+FIfgeb6dt4rqIva0iwIDAQAB
|
||||
AoGBAIHstcnWd7iUeQYPWUNxLaRvTY8pjNH04yWLZEOgNWkXDVX5mExw++RTmB4t
|
||||
qpm/cLWkJSEtB7jjthb7ao0j/t2ljqfr6kAbClDv3zByAEDhOu8xB/5ne6Ioo+k2
|
||||
dygC+GcVcobhv8qRU+z0fpeXSP8yS1bQQHOaa17bSGsncvHRAkEAzwsn8jBTOqaW
|
||||
6Iymvr7Aql++LiwEBrqMMRVyBZlkux4hiKa2P7XXEL6/mOPR0aI2LuCqE2COrO7R
|
||||
0wAFZ54bjwJBAMEAe6cs0zI3p3STHwA3LoSZB81lzLhGUnYBvOq1yoDSlJCOYpld
|
||||
YM1y3eC0vwiOnEu3GG1bhkW+h6Kx0I/qyUUCQBiH9NqwORxI4rZ4+8S76y4EnA7y
|
||||
biOx9KxYIyNgslutTUHYpt1TmUDFqQPfclvJQWw6eExFc4Iv5bJ/XSSSyicCQGyY
|
||||
5PrwEfYTsrm5fpwUcKxTnzxHp6WYjBWybKZ0m/lYhBfCxmAdVrbDh21Exqj99Zv0
|
||||
7l26PhdIWfGFtCEGrzECQQCtPyXa3ostSceR7zEKxyn9QBCNXKARfNNTBja6+VRE
|
||||
qDC6jLqzu/SoOYaqa13QzCsttO2iZk8Ygfy3Yz0n37GE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #8
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4mnLf+x0
|
||||
CWKDKP5PLZ87t2ReSDE/J5QoI5VhE0bXaahdhPrQTC2wvOpT+N9nzEpI9ASh/ejV
|
||||
kYGlc03nNKRL7zyVM1UyGduEwsRssFMqfyJhI1p+VmxDMWNplex7mIAheAdskPj3
|
||||
pwi2CP4VIMjOj368AXvXItPzeCfAhYhEVaMCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQAEzmwq5JFI5Z0dX20m9rq7NKgwRyAH3h5aE8bdjO8nEc69qscfDRx79Lws3kK8
|
||||
A0LG0DhxKB8cTNu3u+jy81tjcC4pLNQ5IKap9ksmP7RtIHfTA55G8M3fPl2ZgDYQ
|
||||
ZzsWAZvTNXd/eme0SgOzD10rfntA6ZIgJTWHx3E0RkdwKw==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQDiact/7HQJYoMo/k8tnzu3ZF5IMT8nlCgjlWETRtdpqF2E+tBM
|
||||
LbC86lP432fMSkj0BKH96NWRgaVzTec0pEvvPJUzVTIZ24TCxGywUyp/ImEjWn5W
|
||||
bEMxY2mV7HuYgCF4B2yQ+PenCLYI/hUgyM6PfrwBe9ci0/N4J8CFiERVowIDAQAB
|
||||
AoGAQYTl+8XcKl8Un4dAOG6M5FwqIHAH25c3Klzu85obehrbvUCriG/sZi7VT/6u
|
||||
VeLlS6APlJ+NNgczbrOLhaNJyYzjICSt8BI96PldFUzCEkVlgE+29pO7RNoZmDYB
|
||||
dSGyIDrWdVYfdzpir6kC0KDcrpA16Sc+/bK6Q8ALLRpC7QECQQD7F7fhIQ03CKSk
|
||||
lS4mgDuBQrB/52jXgBumtjp71ANNeaWR6+06KDPTLysM+olsh97Q7YOGORbrBnBg
|
||||
Y2HPnOgjAkEA5taZaMfdFa8V1SPcX7mgCLykYIujqss0AmauZN/24oLdNE8HtTBF
|
||||
OLaxE6PnQ0JWfx9KGIy3E0V3aFk5FWb0gQJBAO4KFEaXgOG1jfCBhNj3JHJseMso
|
||||
5Nm4F366r0MJQYBHXNGzqphB2K/Svat2MKX1QSUspk2u/a0d05dtYCLki6UCQHWS
|
||||
sChyQ+UbfF9HGKOZBC3vBzo1ZXNEdIUUj5bJjBHq3YgbCK38nAU66A482TmkvDGb
|
||||
Wj4OzeB+7Ua0yyJfggECQQDVlAa8HqdAcrbEwI/YfPydFsavBJ0KtcIGK2owQ+dk
|
||||
dhlDnpXDud/AtX4Ft2LaquQ15fteRrYjjwI9SFGytjtp
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #9
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAueLfowPT
|
||||
kXXtHeU2FZSz2mJhHmjqeyI1oMoyyggonccx65vMxaRfljnz2dOjVVYpCOn/LrdP
|
||||
wVxHO8KNDsmQeWPRjnnBa2dFqqOnp/8gEJFJBW7K/gI9se6o+xe9QIWBq6d/fKVR
|
||||
BURJe5TycLogzZuxQn1xHHILa3XleYuHAbMCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQBEC1lfC3XK0galQC96B7faLpnQmhn5lX2FUUoFIQQtBTetoE+gTqnLSOIZcOK4
|
||||
pkT3YvxUvgOV0LOLClryo2IknMMGWRSAcXtVUBBLRHVTSSuVUyyLr5kdRU7B4E+l
|
||||
OU0j8Md/dzlkm//K1bzLyUaPq204ofH8su2IEX4b3IGmAQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICWwIBAAKBgQC54t+jA9ORde0d5TYVlLPaYmEeaOp7IjWgyjLKCCidxzHrm8zF
|
||||
pF+WOfPZ06NVVikI6f8ut0/BXEc7wo0OyZB5Y9GOecFrZ0Wqo6en/yAQkUkFbsr+
|
||||
Aj2x7qj7F71AhYGrp398pVEFREl7lPJwuiDNm7FCfXEccgtrdeV5i4cBswIDAQAB
|
||||
AoGAO4PnJHNaLs16AMNdgKVevEIZZDolMQ1v7C4w+ryH/JRFaHE2q+UH8bpWV9zK
|
||||
A82VT9RTrqpkb71S1VBiB2UDyz263XdAI/N2HcIVMmfKb72oV4gCI1KOv4DfFwZv
|
||||
tVVcIdVEDBOZ2TgqK4opGOgWMDqgIAl2z3PbsIoNylZHEJECQQDtQeJFhEJGH4Qz
|
||||
BGpdND0j2nnnJyhOFHJqikJNdul3uBwmxTK8FPEUUH/rtpyUan3VMOyDx3kX4OQg
|
||||
GDNSb32rAkEAyJIZIJ0EMRHVedyWsfqR0zTGKRQ+qsc3sCfyUhFksWms9jsSS0DT
|
||||
tVeTdC3F6EIAdpKOGhSyfBTU4jxwbFc0GQJADI4L9znEeAl66Wg2aLA2/Aq3oK/F
|
||||
xjv2wgSG9apxOFCZzMNqp+FD0Jth6YtEReZMuldYbLDFi6nu6HPfY2Fa+QJAdpm1
|
||||
lAxk6yMxiZK/5VRWoH6HYske2Vtd+aNVbePtF992ME/z3F3kEkpL3hom+dT1cyfs
|
||||
MU3l0Ot8ip7Ul6vlGQJAegNzpcfl2GFSdWQMxQ+nN3woKnPqpR1M3jgnqvo7L4Xe
|
||||
JW3vRxvfdrUuzdlvZ/Pbsu/vOd+cuIa4h0yD5q3N+g==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #10
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAruBhwk+J
|
||||
XdlwfKXXN8K+43JyEYCV7Fp7ZiES4t4AEJuQuBqJVMxpzeZzu2t/vVb59ThaxxtY
|
||||
NGD3Xy6Og5dTv//ztWng8P7HwwvfbrUICU6zo6JAhg7kfaNa116krCYOkC/cdJWt
|
||||
o5W+zsDmI1jUVGH0D73h29atc1gn6wLpAsMCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQAEJ/ITGJ9lK/rk0yHcenW8SHsaSTlZMuJ4yEiIgrJ2t71Rd6mtCC/ljx9USvvK
|
||||
bF500whTiZlnWgKi02boBEKa44z/DytF6pljeNPefBQSqZyUByGEb/8Mn58Idyls
|
||||
q4/d9iKXMPvbpQdcesOzgOffFZevLQSWyPRaIdYBOOiYUA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCu4GHCT4ld2XB8pdc3wr7jcnIRgJXsWntmIRLi3gAQm5C4GolU
|
||||
zGnN5nO7a3+9Vvn1OFrHG1g0YPdfLo6Dl1O///O1aeDw/sfDC99utQgJTrOjokCG
|
||||
DuR9o1rXXqSsJg6QL9x0la2jlb7OwOYjWNRUYfQPveHb1q1zWCfrAukCwwIDAQAB
|
||||
AoGAcZAXC/dYrlBpIxkTRQu7qLqGZuVI9t7fabgqqpceFargdR4Odrn0L5jrKRer
|
||||
MYrM8bjyAoC4a/NYUUBLnhrkcCQWO9q5fSQuFKFVWHY53SM63Qdqk8Y9Fmy/h/4c
|
||||
UtwZ5BWkUWItvnTMgb9bFcvSiIhEcNQauypnMpgNknopu7kCQQDlSQT10LkX2IGT
|
||||
bTUhPcManx92gucaKsPONKq2mP+1sIciThevRTZWZsxyIuoBBY43NcKKi8NlZCtj
|
||||
hhSbtzYdAkEAw0B93CXfso8g2QIMj/HJJz/wNTLtg+rriXp6jh5HWe6lKWRVrce+
|
||||
1w8Qz6OI/ZP6xuQ9HNeZxJ/W6rZPW6BGXwJAHcTuRPA1p/fvUvHh7Q/0zfcNAbkb
|
||||
QlV9GL/TzmNtB+0EjpqvDo2g8XTlZIhN85YCEf8D5DMjSn3H+GMHN/SArQJBAJlW
|
||||
MIGPjNoh5V4Hae4xqBOW9wIQeM880rUo5s5toQNTk4mqLk9Hquwh/MXUXGUora08
|
||||
2XGpMC1midXSTwhaGmkCQQCdivptFEYl33PrVbxY9nzHynpp4Mi89vQF0cjCmaYY
|
||||
N8L+bvLd4BU9g6hRS8b59lQ6GNjryx2bUnCVtLcey4Jd
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #11
|
||||
]
|
||||
|
||||
# To disable the pre-computed tub certs, uncomment this line.
|
||||
#SYSTEM_TEST_CERTS = []
|
||||
|
||||
TEST_DATA="\x02"*(immutable.upload.Uploader.URI_LIT_SIZE_THRESHOLD+1)
|
||||
TEST_DATA="\x02"*(Uploader.URI_LIT_SIZE_THRESHOLD+1)
|
||||
|
||||
class ShouldFailMixin:
|
||||
def shouldFail(self, expected_failure, which, substring,
|
||||
|
@ -8,6 +8,10 @@ from allmydata.util import fileutil, log
|
||||
from allmydata.util.encodingutil import unicode_platform, get_filesystem_encoding
|
||||
|
||||
|
||||
class DevNullDictionary(dict):
|
||||
def __setitem__(self, key, value):
|
||||
return
|
||||
|
||||
def insecurerandstr(n):
|
||||
return ''.join(map(chr, map(randrange, [0]*n, [256]*n)))
|
||||
|
||||
|
0
src/allmydata/test/mutable/__init__.py
Normal file
0
src/allmydata/test/mutable/__init__.py
Normal file
256
src/allmydata/test/mutable/test_checker.py
Normal file
256
src/allmydata/test/mutable/test_checker.py
Normal file
@ -0,0 +1,256 @@
|
||||
from twisted.trial import unittest
|
||||
from foolscap.api import flushEventualQueue
|
||||
from allmydata.monitor import Monitor
|
||||
from allmydata.mutable.common import CorruptShareError
|
||||
from .util import PublishMixin, corrupt, CheckerMixin
|
||||
|
||||
class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
|
||||
def setUp(self):
|
||||
return self.publish_one()
|
||||
|
||||
|
||||
def test_check_good(self):
|
||||
d = self._fn.check(Monitor())
|
||||
d.addCallback(self.check_good, "test_check_good")
|
||||
return d
|
||||
|
||||
def test_check_mdmf_good(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor()))
|
||||
d.addCallback(self.check_good, "test_check_mdmf_good")
|
||||
return d
|
||||
|
||||
def test_check_no_shares(self):
|
||||
for shares in self._storage._peers.values():
|
||||
shares.clear()
|
||||
d = self._fn.check(Monitor())
|
||||
d.addCallback(self.check_bad, "test_check_no_shares")
|
||||
return d
|
||||
|
||||
def test_check_mdmf_no_shares(self):
|
||||
d = self.publish_mdmf()
|
||||
def _then(ignored):
|
||||
for share in self._storage._peers.values():
|
||||
share.clear()
|
||||
d.addCallback(_then)
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor()))
|
||||
d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
|
||||
return d
|
||||
|
||||
def test_check_not_enough_shares(self):
|
||||
for shares in self._storage._peers.values():
|
||||
for shnum in shares.keys():
|
||||
if shnum > 0:
|
||||
del shares[shnum]
|
||||
d = self._fn.check(Monitor())
|
||||
d.addCallback(self.check_bad, "test_check_not_enough_shares")
|
||||
return d
|
||||
|
||||
def test_check_mdmf_not_enough_shares(self):
|
||||
d = self.publish_mdmf()
|
||||
def _then(ignored):
|
||||
for shares in self._storage._peers.values():
|
||||
for shnum in shares.keys():
|
||||
if shnum > 0:
|
||||
del shares[shnum]
|
||||
d.addCallback(_then)
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor()))
|
||||
d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
|
||||
return d
|
||||
|
||||
|
||||
def test_check_all_bad_sig(self):
|
||||
d = corrupt(None, self._storage, 1) # bad sig
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor()))
|
||||
d.addCallback(self.check_bad, "test_check_all_bad_sig")
|
||||
return d
|
||||
|
||||
def test_check_mdmf_all_bad_sig(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
corrupt(None, self._storage, 1))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor()))
|
||||
d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
|
||||
return d
|
||||
|
||||
def test_verify_mdmf_all_bad_sharedata(self):
|
||||
d = self.publish_mdmf()
|
||||
# On 8 of the shares, corrupt the beginning of the share data.
|
||||
# The signature check during the servermap update won't catch this.
|
||||
d.addCallback(lambda ignored:
|
||||
corrupt(None, self._storage, "share_data", range(8)))
|
||||
# On 2 of the shares, corrupt the end of the share data.
|
||||
# The signature check during the servermap update won't catch
|
||||
# this either, and the retrieval process will have to process
|
||||
# all of the segments before it notices.
|
||||
d.addCallback(lambda ignored:
|
||||
# the block hash tree comes right after the share data, so if we
|
||||
# corrupt a little before the block hash tree, we'll corrupt in the
|
||||
# last block of each share.
|
||||
corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
# The verifier should flag the file as unhealthy, and should
|
||||
# list all 10 shares as bad.
|
||||
d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
|
||||
def _check_num_bad(r):
|
||||
self.failIf(r.is_recoverable())
|
||||
smap = r.get_servermap()
|
||||
self.failUnlessEqual(len(smap.get_bad_shares()), 10)
|
||||
d.addCallback(_check_num_bad)
|
||||
return d
|
||||
|
||||
def test_check_all_bad_blocks(self):
|
||||
d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
|
||||
# the Checker won't notice this.. it doesn't look at actual data
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor()))
|
||||
d.addCallback(self.check_good, "test_check_all_bad_blocks")
|
||||
return d
|
||||
|
||||
|
||||
def test_check_mdmf_all_bad_blocks(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
corrupt(None, self._storage, "share_data"))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor()))
|
||||
d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
|
||||
return d
|
||||
|
||||
def test_verify_good(self):
|
||||
d = self._fn.check(Monitor(), verify=True)
|
||||
d.addCallback(self.check_good, "test_verify_good")
|
||||
return d
|
||||
|
||||
def test_verify_all_bad_sig(self):
|
||||
d = corrupt(None, self._storage, 1) # bad sig
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_bad, "test_verify_all_bad_sig")
|
||||
return d
|
||||
|
||||
def test_verify_one_bad_sig(self):
|
||||
d = corrupt(None, self._storage, 1, [9]) # bad sig
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_bad, "test_verify_one_bad_sig")
|
||||
return d
|
||||
|
||||
def test_verify_one_bad_block(self):
|
||||
d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
|
||||
# the Verifier *will* notice this, since it examines every byte
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_bad, "test_verify_one_bad_block")
|
||||
d.addCallback(self.check_expected_failure,
|
||||
CorruptShareError, "block hash tree failure",
|
||||
"test_verify_one_bad_block")
|
||||
return d
|
||||
|
||||
def test_verify_one_bad_sharehash(self):
|
||||
d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
|
||||
d.addCallback(self.check_expected_failure,
|
||||
CorruptShareError, "corrupt hashes",
|
||||
"test_verify_one_bad_sharehash")
|
||||
return d
|
||||
|
||||
def test_verify_one_bad_encprivkey(self):
|
||||
d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
|
||||
d.addCallback(self.check_expected_failure,
|
||||
CorruptShareError, "invalid privkey",
|
||||
"test_verify_one_bad_encprivkey")
|
||||
return d
|
||||
|
||||
def test_verify_one_bad_encprivkey_uncheckable(self):
|
||||
d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
|
||||
readonly_fn = self._fn.get_readonly()
|
||||
# a read-only node has no way to validate the privkey
|
||||
d.addCallback(lambda ignored:
|
||||
readonly_fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_good,
|
||||
"test_verify_one_bad_encprivkey_uncheckable")
|
||||
return d
|
||||
|
||||
|
||||
def test_verify_mdmf_good(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_good, "test_verify_mdmf_good")
|
||||
return d
|
||||
|
||||
|
||||
def test_verify_mdmf_one_bad_block(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
corrupt(None, self._storage, "share_data", [1]))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
# We should find one bad block here
|
||||
d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
|
||||
d.addCallback(self.check_expected_failure,
|
||||
CorruptShareError, "block hash tree failure",
|
||||
"test_verify_mdmf_one_bad_block")
|
||||
return d
|
||||
|
||||
|
||||
def test_verify_mdmf_bad_encprivkey(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
corrupt(None, self._storage, "enc_privkey", [0]))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
|
||||
d.addCallback(self.check_expected_failure,
|
||||
CorruptShareError, "privkey",
|
||||
"test_verify_mdmf_bad_encprivkey")
|
||||
return d
|
||||
|
||||
|
||||
def test_verify_mdmf_bad_sig(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
corrupt(None, self._storage, 1, [1]))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
|
||||
return d
|
||||
|
||||
|
||||
def test_verify_mdmf_bad_encprivkey_uncheckable(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
corrupt(None, self._storage, "enc_privkey", [1]))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.get_readonly())
|
||||
d.addCallback(lambda fn:
|
||||
fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_good,
|
||||
"test_verify_mdmf_bad_encprivkey_uncheckable")
|
||||
return d
|
||||
|
||||
def test_verify_sdmf_empty(self):
|
||||
d = self.publish_sdmf("")
|
||||
d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_good, "test_verify_sdmf")
|
||||
d.addCallback(flushEventualQueue)
|
||||
return d
|
||||
|
||||
def test_verify_mdmf_empty(self):
|
||||
d = self.publish_mdmf("")
|
||||
d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
|
||||
d.addCallback(self.check_good, "test_verify_mdmf")
|
||||
d.addCallback(flushEventualQueue)
|
||||
return d
|
41
src/allmydata/test/mutable/test_datahandle.py
Normal file
41
src/allmydata/test/mutable/test_datahandle.py
Normal file
@ -0,0 +1,41 @@
|
||||
from twisted.trial import unittest
|
||||
from allmydata.mutable.publish import MutableData
|
||||
|
||||
class DataHandle(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.test_data = "Test Data" * 50000
|
||||
self.uploadable = MutableData(self.test_data)
|
||||
|
||||
|
||||
def test_datahandle_read(self):
|
||||
chunk_size = 10
|
||||
for i in xrange(0, len(self.test_data), chunk_size):
|
||||
data = self.uploadable.read(chunk_size)
|
||||
data = "".join(data)
|
||||
start = i
|
||||
end = i + chunk_size
|
||||
self.failUnlessEqual(data, self.test_data[start:end])
|
||||
|
||||
|
||||
def test_datahandle_get_size(self):
|
||||
actual_size = len(self.test_data)
|
||||
size = self.uploadable.get_size()
|
||||
self.failUnlessEqual(size, actual_size)
|
||||
|
||||
|
||||
def test_datahandle_get_size_out_of_order(self):
|
||||
# We should be able to call get_size whenever we want without
|
||||
# disturbing the location of the seek pointer.
|
||||
chunk_size = 100
|
||||
data = self.uploadable.read(chunk_size)
|
||||
self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
|
||||
|
||||
# Now get the size.
|
||||
size = self.uploadable.get_size()
|
||||
self.failUnlessEqual(size, len(self.test_data))
|
||||
|
||||
# Now get more data. We should be right where we left off.
|
||||
more_data = self.uploadable.read(chunk_size)
|
||||
start = chunk_size
|
||||
end = chunk_size * 2
|
||||
self.failUnlessEqual("".join(more_data), self.test_data[start:end])
|
24
src/allmydata/test/mutable/test_different_encoding.py
Normal file
24
src/allmydata/test/mutable/test_different_encoding.py
Normal file
@ -0,0 +1,24 @@
|
||||
from twisted.trial import unittest
|
||||
from .util import FakeStorage, make_nodemaker
|
||||
|
||||
class DifferentEncoding(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._storage = s = FakeStorage()
|
||||
self.nodemaker = make_nodemaker(s)
|
||||
|
||||
def test_filenode(self):
|
||||
# create a file with 3-of-20, then modify it with a client configured
|
||||
# to do 3-of-10. #1510 tracks a failure here
|
||||
self.nodemaker.default_encoding_parameters["n"] = 20
|
||||
d = self.nodemaker.create_mutable_file("old contents")
|
||||
def _created(n):
|
||||
filecap = n.get_cap().to_string()
|
||||
del n # we want a new object, not the cached one
|
||||
self.nodemaker.default_encoding_parameters["n"] = 10
|
||||
n2 = self.nodemaker.create_from_cap(filecap)
|
||||
return n2
|
||||
d.addCallback(_created)
|
||||
def modifier(old_contents, servermap, first_time):
|
||||
return "new contents"
|
||||
d.addCallback(lambda n: n.modify(modifier))
|
||||
return d
|
9
src/allmydata/test/mutable/test_exceptions.py
Normal file
9
src/allmydata/test/mutable/test_exceptions.py
Normal file
@ -0,0 +1,9 @@
|
||||
from twisted.trial import unittest
|
||||
from allmydata.mutable.common import NeedMoreDataError, UncoordinatedWriteError
|
||||
|
||||
class Exceptions(unittest.TestCase):
|
||||
def test_repr(self):
|
||||
nmde = NeedMoreDataError(100, 50, 100)
|
||||
self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
|
||||
ucwe = UncoordinatedWriteError()
|
||||
self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
|
74
src/allmydata/test/mutable/test_filehandle.py
Normal file
74
src/allmydata/test/mutable/test_filehandle.py
Normal file
@ -0,0 +1,74 @@
|
||||
import os
|
||||
from cStringIO import StringIO
|
||||
from twisted.trial import unittest
|
||||
from allmydata.mutable.publish import MutableFileHandle
|
||||
|
||||
class FileHandle(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.test_data = "Test Data" * 50000
|
||||
self.sio = StringIO(self.test_data)
|
||||
self.uploadable = MutableFileHandle(self.sio)
|
||||
|
||||
|
||||
def test_filehandle_read(self):
|
||||
self.basedir = "mutable/FileHandle/test_filehandle_read"
|
||||
chunk_size = 10
|
||||
for i in xrange(0, len(self.test_data), chunk_size):
|
||||
data = self.uploadable.read(chunk_size)
|
||||
data = "".join(data)
|
||||
start = i
|
||||
end = i + chunk_size
|
||||
self.failUnlessEqual(data, self.test_data[start:end])
|
||||
|
||||
|
||||
def test_filehandle_get_size(self):
|
||||
self.basedir = "mutable/FileHandle/test_filehandle_get_size"
|
||||
actual_size = len(self.test_data)
|
||||
size = self.uploadable.get_size()
|
||||
self.failUnlessEqual(size, actual_size)
|
||||
|
||||
|
||||
def test_filehandle_get_size_out_of_order(self):
|
||||
# We should be able to call get_size whenever we want without
|
||||
# disturbing the location of the seek pointer.
|
||||
chunk_size = 100
|
||||
data = self.uploadable.read(chunk_size)
|
||||
self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
|
||||
|
||||
# Now get the size.
|
||||
size = self.uploadable.get_size()
|
||||
self.failUnlessEqual(size, len(self.test_data))
|
||||
|
||||
# Now get more data. We should be right where we left off.
|
||||
more_data = self.uploadable.read(chunk_size)
|
||||
start = chunk_size
|
||||
end = chunk_size * 2
|
||||
self.failUnlessEqual("".join(more_data), self.test_data[start:end])
|
||||
|
||||
|
||||
def test_filehandle_file(self):
|
||||
# Make sure that the MutableFileHandle works on a file as well
|
||||
# as a StringIO object, since in some cases it will be asked to
|
||||
# deal with files.
|
||||
self.basedir = self.mktemp()
|
||||
# necessary? What am I doing wrong here?
|
||||
os.mkdir(self.basedir)
|
||||
f_path = os.path.join(self.basedir, "test_file")
|
||||
f = open(f_path, "w")
|
||||
f.write(self.test_data)
|
||||
f.close()
|
||||
f = open(f_path, "r")
|
||||
|
||||
uploadable = MutableFileHandle(f)
|
||||
|
||||
data = uploadable.read(len(self.test_data))
|
||||
self.failUnlessEqual("".join(data), self.test_data)
|
||||
size = uploadable.get_size()
|
||||
self.failUnlessEqual(size, len(self.test_data))
|
||||
|
||||
|
||||
def test_close(self):
|
||||
# Make sure that the MutableFileHandle closes its handle when
|
||||
# told to do so.
|
||||
self.uploadable.close()
|
||||
self.failUnless(self.sio.closed)
|
660
src/allmydata/test/mutable/test_filenode.py
Normal file
660
src/allmydata/test/mutable/test_filenode.py
Normal file
@ -0,0 +1,660 @@
|
||||
from cStringIO import StringIO
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.trial import unittest
|
||||
from allmydata import uri, client
|
||||
from allmydata.util.consumer import MemoryConsumer
|
||||
from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION, DownloadStopped
|
||||
from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
|
||||
from allmydata.mutable.common import MODE_ANYTHING, MODE_WRITE, MODE_READ, UncoordinatedWriteError
|
||||
|
||||
from allmydata.mutable.publish import MutableData
|
||||
from ..test_download import PausingConsumer, PausingAndStoppingConsumer, \
|
||||
StoppingConsumer, ImmediatelyStoppingConsumer
|
||||
from .. import common_util as testutil
|
||||
from .util import FakeStorage, make_nodemaker
|
||||
|
||||
class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
|
||||
# this used to be in Publish, but we removed the limit. Some of
|
||||
# these tests test whether the new code correctly allows files
|
||||
# larger than the limit.
|
||||
OLD_MAX_SEGMENT_SIZE = 3500000
|
||||
def setUp(self):
|
||||
self._storage = s = FakeStorage()
|
||||
self.nodemaker = make_nodemaker(s)
|
||||
|
||||
def test_create(self):
|
||||
d = self.nodemaker.create_mutable_file()
|
||||
def _created(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
self.failUnlessEqual(n.get_storage_index(), n._storage_index)
|
||||
sb = self.nodemaker.storage_broker
|
||||
peer0 = sorted(sb.get_all_serverids())[0]
|
||||
shnums = self._storage._peers[peer0].keys()
|
||||
self.failUnlessEqual(len(shnums), 1)
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_create_mdmf(self):
|
||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||
def _created(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
self.failUnlessEqual(n.get_storage_index(), n._storage_index)
|
||||
sb = self.nodemaker.storage_broker
|
||||
peer0 = sorted(sb.get_all_serverids())[0]
|
||||
shnums = self._storage._peers[peer0].keys()
|
||||
self.failUnlessEqual(len(shnums), 1)
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def test_single_share(self):
|
||||
# Make sure that we tolerate publishing a single share.
|
||||
self.nodemaker.default_encoding_parameters['k'] = 1
|
||||
self.nodemaker.default_encoding_parameters['happy'] = 1
|
||||
self.nodemaker.default_encoding_parameters['n'] = 1
|
||||
d = defer.succeed(None)
|
||||
for v in (SDMF_VERSION, MDMF_VERSION):
|
||||
d.addCallback(lambda ignored, v=v:
|
||||
self.nodemaker.create_mutable_file(version=v))
|
||||
def _created(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
self._node = n
|
||||
return n
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda n:
|
||||
n.overwrite(MutableData("Contents" * 50000)))
|
||||
d.addCallback(lambda ignored:
|
||||
self._node.download_best_version())
|
||||
d.addCallback(lambda contents:
|
||||
self.failUnlessEqual(contents, "Contents" * 50000))
|
||||
return d
|
||||
|
||||
def test_max_shares(self):
|
||||
self.nodemaker.default_encoding_parameters['n'] = 255
|
||||
d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
|
||||
def _created(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
self.failUnlessEqual(n.get_storage_index(), n._storage_index)
|
||||
sb = self.nodemaker.storage_broker
|
||||
num_shares = sum([len(self._storage._peers[x].keys()) for x \
|
||||
in sb.get_all_serverids()])
|
||||
self.failUnlessEqual(num_shares, 255)
|
||||
self._node = n
|
||||
return n
|
||||
d.addCallback(_created)
|
||||
# Now we upload some contents
|
||||
d.addCallback(lambda n:
|
||||
n.overwrite(MutableData("contents" * 50000)))
|
||||
# ...then download contents
|
||||
d.addCallback(lambda ignored:
|
||||
self._node.download_best_version())
|
||||
# ...and check to make sure everything went okay.
|
||||
d.addCallback(lambda contents:
|
||||
self.failUnlessEqual("contents" * 50000, contents))
|
||||
return d
|
||||
|
||||
def test_max_shares_mdmf(self):
|
||||
# Test how files behave when there are 255 shares.
|
||||
self.nodemaker.default_encoding_parameters['n'] = 255
|
||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||
def _created(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
self.failUnlessEqual(n.get_storage_index(), n._storage_index)
|
||||
sb = self.nodemaker.storage_broker
|
||||
num_shares = sum([len(self._storage._peers[x].keys()) for x \
|
||||
in sb.get_all_serverids()])
|
||||
self.failUnlessEqual(num_shares, 255)
|
||||
self._node = n
|
||||
return n
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda n:
|
||||
n.overwrite(MutableData("contents" * 50000)))
|
||||
d.addCallback(lambda ignored:
|
||||
self._node.download_best_version())
|
||||
d.addCallback(lambda contents:
|
||||
self.failUnlessEqual(contents, "contents" * 50000))
|
||||
return d
|
||||
|
||||
def test_mdmf_filenode_cap(self):
|
||||
# Test that an MDMF filenode, once created, returns an MDMF URI.
|
||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||
def _created(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
cap = n.get_cap()
|
||||
self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
|
||||
rcap = n.get_readcap()
|
||||
self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
|
||||
vcap = n.get_verify_cap()
|
||||
self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_create_from_mdmf_writecap(self):
|
||||
# Test that the nodemaker is capable of creating an MDMF
|
||||
# filenode given an MDMF cap.
|
||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||
def _created(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
s = n.get_uri()
|
||||
self.failUnless(s.startswith("URI:MDMF"))
|
||||
n2 = self.nodemaker.create_from_cap(s)
|
||||
self.failUnless(isinstance(n2, MutableFileNode))
|
||||
self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
|
||||
self.failUnlessEqual(n.get_uri(), n2.get_uri())
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_create_from_mdmf_readcap(self):
|
||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||
def _created(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
s = n.get_readonly_uri()
|
||||
n2 = self.nodemaker.create_from_cap(s)
|
||||
self.failUnless(isinstance(n2, MutableFileNode))
|
||||
|
||||
# Check that it's a readonly node
|
||||
self.failUnless(n2.is_readonly())
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_internal_version_from_cap(self):
|
||||
# MutableFileNodes and MutableFileVersions have an internal
|
||||
# switch that tells them whether they're dealing with an SDMF or
|
||||
# MDMF mutable file when they start doing stuff. We want to make
|
||||
# sure that this is set appropriately given an MDMF cap.
|
||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||
def _created(n):
|
||||
self.uri = n.get_uri()
|
||||
self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
|
||||
|
||||
n2 = self.nodemaker.create_from_cap(self.uri)
|
||||
self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_serialize(self):
|
||||
n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
|
||||
calls = []
|
||||
def _callback(*args, **kwargs):
|
||||
self.failUnlessEqual(args, (4,) )
|
||||
self.failUnlessEqual(kwargs, {"foo": 5})
|
||||
calls.append(1)
|
||||
return 6
|
||||
d = n._do_serialized(_callback, 4, foo=5)
|
||||
def _check_callback(res):
|
||||
self.failUnlessEqual(res, 6)
|
||||
self.failUnlessEqual(calls, [1])
|
||||
d.addCallback(_check_callback)
|
||||
|
||||
def _errback():
|
||||
raise ValueError("heya")
|
||||
d.addCallback(lambda res:
|
||||
self.shouldFail(ValueError, "_check_errback", "heya",
|
||||
n._do_serialized, _errback))
|
||||
return d
|
||||
|
||||
def test_upload_and_download(self):
|
||||
d = self.nodemaker.create_mutable_file()
|
||||
def _created(n):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_READ))
|
||||
d.addCallback(lambda smap: smap.dump(StringIO()))
|
||||
d.addCallback(lambda sio:
|
||||
self.failUnless("3-of-10" in sio.getvalue()))
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
|
||||
d.addCallback(lambda res: self.failUnlessIdentical(res, None))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
|
||||
d.addCallback(lambda res: n.get_size_of_best_version())
|
||||
d.addCallback(lambda size:
|
||||
self.failUnlessEqual(size, len("contents 1")))
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
|
||||
d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
|
||||
d.addCallback(lambda smap:
|
||||
n.download_version(smap,
|
||||
smap.best_recoverable_version()))
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
|
||||
# test a file that is large enough to overcome the
|
||||
# mapupdate-to-retrieve data caching (i.e. make the shares larger
|
||||
# than the default readsize, which is 2000 bytes). A 15kB file
|
||||
# will have 5kB shares.
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res:
|
||||
self.failUnlessEqual(res, "large size file" * 1000))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_upload_and_download_mdmf(self):
|
||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||
def _created(n):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ignored:
|
||||
n.get_servermap(MODE_READ))
|
||||
def _then(servermap):
|
||||
dumped = servermap.dump(StringIO())
|
||||
self.failUnlessIn("3-of-10", dumped.getvalue())
|
||||
d.addCallback(_then)
|
||||
# Now overwrite the contents with some new contents. We want
|
||||
# to make them big enough to force the file to be uploaded
|
||||
# in more than one segment.
|
||||
big_contents = "contents1" * 100000 # about 900 KiB
|
||||
big_contents_uploadable = MutableData(big_contents)
|
||||
d.addCallback(lambda ignored:
|
||||
n.overwrite(big_contents_uploadable))
|
||||
d.addCallback(lambda ignored:
|
||||
n.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessEqual(data, big_contents))
|
||||
# Overwrite the contents again with some new contents. As
|
||||
# before, they need to be big enough to force multiple
|
||||
# segments, so that we make the downloader deal with
|
||||
# multiple segments.
|
||||
bigger_contents = "contents2" * 1000000 # about 9MiB
|
||||
bigger_contents_uploadable = MutableData(bigger_contents)
|
||||
d.addCallback(lambda ignored:
|
||||
n.overwrite(bigger_contents_uploadable))
|
||||
d.addCallback(lambda ignored:
|
||||
n.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessEqual(data, bigger_contents))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_retrieve_producer_mdmf(self):
|
||||
# We should make sure that the retriever is able to pause and stop
|
||||
# correctly.
|
||||
data = "contents1" * 100000
|
||||
d = self.nodemaker.create_mutable_file(MutableData(data),
|
||||
version=MDMF_VERSION)
|
||||
d.addCallback(lambda node: node.get_best_mutable_version())
|
||||
d.addCallback(self._test_retrieve_producer, "MDMF", data)
|
||||
return d
|
||||
|
||||
# note: SDMF has only one big segment, so we can't use the usual
|
||||
# after-the-first-write() trick to pause or stop the download.
|
||||
# Disabled until we find a better approach.
|
||||
def OFF_test_retrieve_producer_sdmf(self):
|
||||
data = "contents1" * 100000
|
||||
d = self.nodemaker.create_mutable_file(MutableData(data),
|
||||
version=SDMF_VERSION)
|
||||
d.addCallback(lambda node: node.get_best_mutable_version())
|
||||
d.addCallback(self._test_retrieve_producer, "SDMF", data)
|
||||
return d
|
||||
|
||||
def _test_retrieve_producer(self, version, kind, data):
|
||||
# Now we'll retrieve it into a pausing consumer.
|
||||
c = PausingConsumer()
|
||||
d = version.read(c)
|
||||
d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
|
||||
|
||||
c2 = PausingAndStoppingConsumer()
|
||||
d.addCallback(lambda ign:
|
||||
self.shouldFail(DownloadStopped, kind+"_pause_stop",
|
||||
"our Consumer called stopProducing()",
|
||||
version.read, c2))
|
||||
|
||||
c3 = StoppingConsumer()
|
||||
d.addCallback(lambda ign:
|
||||
self.shouldFail(DownloadStopped, kind+"_stop",
|
||||
"our Consumer called stopProducing()",
|
||||
version.read, c3))
|
||||
|
||||
c4 = ImmediatelyStoppingConsumer()
|
||||
d.addCallback(lambda ign:
|
||||
self.shouldFail(DownloadStopped, kind+"_stop_imm",
|
||||
"our Consumer called stopProducing()",
|
||||
version.read, c4))
|
||||
|
||||
def _then(ign):
|
||||
c5 = MemoryConsumer()
|
||||
d1 = version.read(c5)
|
||||
c5.producer.stopProducing()
|
||||
return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
|
||||
"our Consumer called stopProducing()",
|
||||
lambda: d1)
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
|
||||
def test_download_from_mdmf_cap(self):
|
||||
# We should be able to download an MDMF file given its cap
|
||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||
def _created(node):
|
||||
self.uri = node.get_uri()
|
||||
# also confirm that the cap has no extension fields
|
||||
pieces = self.uri.split(":")
|
||||
self.failUnlessEqual(len(pieces), 4)
|
||||
|
||||
return node.overwrite(MutableData("contents1" * 100000))
|
||||
def _then(ignored):
|
||||
node = self.nodemaker.create_from_cap(self.uri)
|
||||
return node.download_best_version()
|
||||
def _downloaded(data):
|
||||
self.failUnlessEqual(data, "contents1" * 100000)
|
||||
d.addCallback(_created)
|
||||
d.addCallback(_then)
|
||||
d.addCallback(_downloaded)
|
||||
return d
|
||||
|
||||
|
||||
def test_mdmf_write_count(self):
|
||||
# Publishing an MDMF file should only cause one write for each
|
||||
# share that is to be published. Otherwise, we introduce
|
||||
# undesirable semantics that are a regression from SDMF
|
||||
upload = MutableData("MDMF" * 100000) # about 400 KiB
|
||||
d = self.nodemaker.create_mutable_file(upload,
|
||||
version=MDMF_VERSION)
|
||||
def _check_server_write_counts(ignored):
|
||||
sb = self.nodemaker.storage_broker
|
||||
for server in sb.servers.itervalues():
|
||||
self.failUnlessEqual(server.get_rref().queries, 1)
|
||||
d.addCallback(_check_server_write_counts)
|
||||
return d
|
||||
|
||||
|
||||
def test_create_with_initial_contents(self):
|
||||
upload1 = MutableData("contents 1")
|
||||
d = self.nodemaker.create_mutable_file(upload1)
|
||||
def _created(n):
|
||||
d = n.download_best_version()
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
|
||||
upload2 = MutableData("contents 2")
|
||||
d.addCallback(lambda res: n.overwrite(upload2))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_create_mdmf_with_initial_contents(self):
|
||||
initial_contents = "foobarbaz" * 131072 # 900KiB
|
||||
initial_contents_uploadable = MutableData(initial_contents)
|
||||
d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
|
||||
version=MDMF_VERSION)
|
||||
def _created(n):
|
||||
d = n.download_best_version()
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessEqual(data, initial_contents))
|
||||
uploadable2 = MutableData(initial_contents + "foobarbaz")
|
||||
d.addCallback(lambda ignored:
|
||||
n.overwrite(uploadable2))
|
||||
d.addCallback(lambda ignored:
|
||||
n.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessEqual(data, initial_contents +
|
||||
"foobarbaz"))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def test_create_with_initial_contents_function(self):
|
||||
data = "initial contents"
|
||||
def _make_contents(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
key = n.get_writekey()
|
||||
self.failUnless(isinstance(key, str), key)
|
||||
self.failUnlessEqual(len(key), 16) # AES key size
|
||||
return MutableData(data)
|
||||
d = self.nodemaker.create_mutable_file(_make_contents)
|
||||
def _created(n):
|
||||
return n.download_best_version()
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
|
||||
return d
|
||||
|
||||
|
||||
def test_create_mdmf_with_initial_contents_function(self):
|
||||
data = "initial contents" * 100000
|
||||
def _make_contents(n):
|
||||
self.failUnless(isinstance(n, MutableFileNode))
|
||||
key = n.get_writekey()
|
||||
self.failUnless(isinstance(key, str), key)
|
||||
self.failUnlessEqual(len(key), 16)
|
||||
return MutableData(data)
|
||||
d = self.nodemaker.create_mutable_file(_make_contents,
|
||||
version=MDMF_VERSION)
|
||||
d.addCallback(lambda n:
|
||||
n.download_best_version())
|
||||
d.addCallback(lambda data2:
|
||||
self.failUnlessEqual(data2, data))
|
||||
return d
|
||||
|
||||
|
||||
def test_create_with_too_large_contents(self):
|
||||
BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
|
||||
BIG_uploadable = MutableData(BIG)
|
||||
d = self.nodemaker.create_mutable_file(BIG_uploadable)
|
||||
def _created(n):
|
||||
other_BIG_uploadable = MutableData(BIG)
|
||||
d = n.overwrite(other_BIG_uploadable)
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
|
||||
d = n.get_servermap(MODE_READ)
|
||||
d.addCallback(lambda servermap: servermap.best_recoverable_version())
|
||||
d.addCallback(lambda verinfo:
|
||||
self.failUnlessEqual(verinfo[0], expected_seqnum, which))
|
||||
return d
|
||||
|
||||
def test_modify(self):
|
||||
def _modifier(old_contents, servermap, first_time):
|
||||
new_contents = old_contents + "line2"
|
||||
return new_contents
|
||||
def _non_modifier(old_contents, servermap, first_time):
|
||||
return old_contents
|
||||
def _none_modifier(old_contents, servermap, first_time):
|
||||
return None
|
||||
def _error_modifier(old_contents, servermap, first_time):
|
||||
raise ValueError("oops")
|
||||
def _toobig_modifier(old_contents, servermap, first_time):
|
||||
new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
|
||||
return new_content
|
||||
calls = []
|
||||
def _ucw_error_modifier(old_contents, servermap, first_time):
|
||||
# simulate an UncoordinatedWriteError once
|
||||
calls.append(1)
|
||||
if len(calls) <= 1:
|
||||
raise UncoordinatedWriteError("simulated")
|
||||
new_contents = old_contents + "line3"
|
||||
return new_contents
|
||||
def _ucw_error_non_modifier(old_contents, servermap, first_time):
|
||||
# simulate an UncoordinatedWriteError once, and don't actually
|
||||
# modify the contents on subsequent invocations
|
||||
calls.append(1)
|
||||
if len(calls) <= 1:
|
||||
raise UncoordinatedWriteError("simulated")
|
||||
return old_contents
|
||||
|
||||
initial_contents = "line1"
|
||||
d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
|
||||
def _created(n):
|
||||
d = n.modify(_modifier)
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
|
||||
|
||||
d.addCallback(lambda res: n.modify(_non_modifier))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
|
||||
|
||||
d.addCallback(lambda res: n.modify(_none_modifier))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
|
||||
|
||||
d.addCallback(lambda res:
|
||||
self.shouldFail(ValueError, "error_modifier", None,
|
||||
n.modify, _error_modifier))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
|
||||
|
||||
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
|
||||
|
||||
d.addCallback(lambda res: n.modify(_ucw_error_modifier))
|
||||
d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res,
|
||||
"line1line2line3"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
|
||||
|
||||
def _reset_ucw_error_modifier(res):
|
||||
calls[:] = []
|
||||
return res
|
||||
d.addCallback(_reset_ucw_error_modifier)
|
||||
|
||||
# in practice, this n.modify call should publish twice: the first
|
||||
# one gets a UCWE, the second does not. But our test jig (in
|
||||
# which the modifier raises the UCWE) skips over the first one,
|
||||
# so in this test there will be only one publish, and the seqnum
|
||||
# will only be one larger than the previous test, not two (i.e. 4
|
||||
# instead of 5).
|
||||
d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
|
||||
d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res,
|
||||
"line1line2line3"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
|
||||
d.addCallback(lambda res: n.modify(_toobig_modifier))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_modify_backoffer(self):
|
||||
def _modifier(old_contents, servermap, first_time):
|
||||
return old_contents + "line2"
|
||||
calls = []
|
||||
def _ucw_error_modifier(old_contents, servermap, first_time):
|
||||
# simulate an UncoordinatedWriteError once
|
||||
calls.append(1)
|
||||
if len(calls) <= 1:
|
||||
raise UncoordinatedWriteError("simulated")
|
||||
return old_contents + "line3"
|
||||
def _always_ucw_error_modifier(old_contents, servermap, first_time):
|
||||
raise UncoordinatedWriteError("simulated")
|
||||
def _backoff_stopper(node, f):
|
||||
return f
|
||||
def _backoff_pauser(node, f):
|
||||
d = defer.Deferred()
|
||||
reactor.callLater(0.5, d.callback, None)
|
||||
return d
|
||||
|
||||
# the give-up-er will hit its maximum retry count quickly
|
||||
giveuper = BackoffAgent()
|
||||
giveuper._delay = 0.1
|
||||
giveuper.factor = 1
|
||||
|
||||
d = self.nodemaker.create_mutable_file(MutableData("line1"))
|
||||
def _created(n):
|
||||
d = n.modify(_modifier)
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
|
||||
|
||||
d.addCallback(lambda res:
|
||||
self.shouldFail(UncoordinatedWriteError,
|
||||
"_backoff_stopper", None,
|
||||
n.modify, _ucw_error_modifier,
|
||||
_backoff_stopper))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
|
||||
|
||||
def _reset_ucw_error_modifier(res):
|
||||
calls[:] = []
|
||||
return res
|
||||
d.addCallback(_reset_ucw_error_modifier)
|
||||
d.addCallback(lambda res: n.modify(_ucw_error_modifier,
|
||||
_backoff_pauser))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res,
|
||||
"line1line2line3"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
|
||||
|
||||
d.addCallback(lambda res:
|
||||
self.shouldFail(UncoordinatedWriteError,
|
||||
"giveuper", None,
|
||||
n.modify, _always_ucw_error_modifier,
|
||||
giveuper.delay))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res,
|
||||
"line1line2line3"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
|
||||
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def test_upload_and_download_full_size_keys(self):
|
||||
self.nodemaker.key_generator = client.KeyGenerator()
|
||||
d = self.nodemaker.create_mutable_file()
|
||||
def _created(n):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_READ))
|
||||
d.addCallback(lambda smap: smap.dump(StringIO()))
|
||||
d.addCallback(lambda sio:
|
||||
self.failUnless("3-of-10" in sio.getvalue()))
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
|
||||
d.addCallback(lambda res: self.failUnlessIdentical(res, None))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
|
||||
d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
|
||||
d.addCallback(lambda smap:
|
||||
n.download_version(smap,
|
||||
smap.best_recoverable_version()))
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_size_after_servermap_update(self):
|
||||
# a mutable file node should have something to say about how big
|
||||
# it is after a servermap update is performed, since this tells
|
||||
# us how large the best version of that mutable file is.
|
||||
d = self.nodemaker.create_mutable_file()
|
||||
def _created(n):
|
||||
self.n = n
|
||||
return n.get_servermap(MODE_READ)
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda ignored:
|
||||
self.failUnlessEqual(self.n.get_size(), 0))
|
||||
d.addCallback(lambda ignored:
|
||||
self.n.overwrite(MutableData("foobarbaz")))
|
||||
d.addCallback(lambda ignored:
|
||||
self.failUnlessEqual(self.n.get_size(), 9))
|
||||
d.addCallback(lambda ignored:
|
||||
self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda ignored:
|
||||
self.failUnlessEqual(self.n.get_size(), 9))
|
||||
return d
|
54
src/allmydata/test/mutable/test_interoperability.py
Normal file
54
src/allmydata/test/mutable/test_interoperability.py
Normal file
@ -0,0 +1,54 @@
|
||||
import os, base64
|
||||
from twisted.trial import unittest
|
||||
from allmydata import uri
|
||||
from allmydata.storage.common import storage_index_to_dir
|
||||
from allmydata.util import fileutil
|
||||
from .. import common_util as testutil
|
||||
from ..no_network import GridTestMixin
|
||||
|
||||
class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
|
||||
sdmf_old_shares = {}
|
||||
sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
|
||||
sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
|
||||
sdmf_old_contents = "This is a test file.\n"
|
||||
def copy_sdmf_shares(self):
|
||||
# We'll basically be short-circuiting the upload process.
|
||||
servernums = self.g.servers_by_number.keys()
|
||||
assert len(servernums) == 10
|
||||
|
||||
assignments = zip(self.sdmf_old_shares.keys(), servernums)
|
||||
# Get the storage index.
|
||||
cap = uri.from_string(self.sdmf_old_cap)
|
||||
si = cap.get_storage_index()
|
||||
|
||||
# Now execute each assignment by writing the storage.
|
||||
for (share, servernum) in assignments:
|
||||
sharedata = base64.b64decode(self.sdmf_old_shares[share])
|
||||
storedir = self.get_serverdir(servernum)
|
||||
storage_path = os.path.join(storedir, "shares",
|
||||
storage_index_to_dir(si))
|
||||
fileutil.make_dirs(storage_path)
|
||||
fileutil.write(os.path.join(storage_path, "%d" % share),
|
||||
sharedata)
|
||||
# ...and verify that the shares are there.
|
||||
shares = self.find_uri_shares(self.sdmf_old_cap)
|
||||
assert len(shares) == 10
|
||||
|
||||
def test_new_downloader_can_read_old_shares(self):
|
||||
self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
|
||||
self.set_up_grid()
|
||||
self.copy_sdmf_shares()
|
||||
nm = self.g.clients[0].nodemaker
|
||||
n = nm.create_from_cap(self.sdmf_old_cap)
|
||||
d = n.download_best_version()
|
||||
d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
|
||||
return d
|
152
src/allmydata/test/mutable/test_multiple_encodings.py
Normal file
152
src/allmydata/test/mutable/test_multiple_encodings.py
Normal file
@ -0,0 +1,152 @@
|
||||
from twisted.trial import unittest
|
||||
from allmydata.interfaces import SDMF_VERSION
|
||||
from allmydata.monitor import Monitor
|
||||
from foolscap.logging import log
|
||||
from allmydata.mutable.common import MODE_READ
|
||||
from allmydata.mutable.publish import Publish, MutableData
|
||||
from allmydata.mutable.servermap import ServerMap, ServermapUpdater
|
||||
from ..common_util import DevNullDictionary
|
||||
from .util import FakeStorage, make_nodemaker
|
||||
|
||||
class MultipleEncodings(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.CONTENTS = "New contents go here"
|
||||
self.uploadable = MutableData(self.CONTENTS)
|
||||
self._storage = FakeStorage()
|
||||
self._nodemaker = make_nodemaker(self._storage, num_peers=20)
|
||||
self._storage_broker = self._nodemaker.storage_broker
|
||||
d = self._nodemaker.create_mutable_file(self.uploadable)
|
||||
def _created(node):
|
||||
self._fn = node
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def _encode(self, k, n, data, version=SDMF_VERSION):
|
||||
# encode 'data' into a peerid->shares dict.
|
||||
|
||||
fn = self._fn
|
||||
# disable the nodecache, since for these tests we explicitly need
|
||||
# multiple nodes pointing at the same file
|
||||
self._nodemaker._node_cache = DevNullDictionary()
|
||||
fn2 = self._nodemaker.create_from_cap(fn.get_uri())
|
||||
# then we copy over other fields that are normally fetched from the
|
||||
# existing shares
|
||||
fn2._pubkey = fn._pubkey
|
||||
fn2._privkey = fn._privkey
|
||||
fn2._encprivkey = fn._encprivkey
|
||||
# and set the encoding parameters to something completely different
|
||||
fn2._required_shares = k
|
||||
fn2._total_shares = n
|
||||
|
||||
s = self._storage
|
||||
s._peers = {} # clear existing storage
|
||||
p2 = Publish(fn2, self._storage_broker, None)
|
||||
uploadable = MutableData(data)
|
||||
d = p2.publish(uploadable)
|
||||
def _published(res):
|
||||
shares = s._peers
|
||||
s._peers = {}
|
||||
return shares
|
||||
d.addCallback(_published)
|
||||
return d
|
||||
|
||||
def make_servermap(self, mode=MODE_READ, oldmap=None):
|
||||
if oldmap is None:
|
||||
oldmap = ServerMap()
|
||||
smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
|
||||
oldmap, mode)
|
||||
d = smu.update()
|
||||
return d
|
||||
|
||||
def test_multiple_encodings(self):
|
||||
# we encode the same file in two different ways (3-of-10 and 4-of-9),
|
||||
# then mix up the shares, to make sure that download survives seeing
|
||||
# a variety of encodings. This is actually kind of tricky to set up.
|
||||
|
||||
contents1 = "Contents for encoding 1 (3-of-10) go here"*1000
|
||||
contents2 = "Contents for encoding 2 (4-of-9) go here"*1000
|
||||
contents3 = "Contents for encoding 3 (4-of-7) go here"*1000
|
||||
|
||||
# we make a retrieval object that doesn't know what encoding
|
||||
# parameters to use
|
||||
fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
|
||||
|
||||
# now we upload a file through fn1, and grab its shares
|
||||
d = self._encode(3, 10, contents1)
|
||||
def _encoded_1(shares):
|
||||
self._shares1 = shares
|
||||
d.addCallback(_encoded_1)
|
||||
d.addCallback(lambda res: self._encode(4, 9, contents2))
|
||||
def _encoded_2(shares):
|
||||
self._shares2 = shares
|
||||
d.addCallback(_encoded_2)
|
||||
d.addCallback(lambda res: self._encode(4, 7, contents3))
|
||||
def _encoded_3(shares):
|
||||
self._shares3 = shares
|
||||
d.addCallback(_encoded_3)
|
||||
|
||||
def _merge(res):
|
||||
log.msg("merging sharelists")
|
||||
# we merge the shares from the two sets, leaving each shnum in
|
||||
# its original location, but using a share from set1 or set2
|
||||
# according to the following sequence:
|
||||
#
|
||||
# 4-of-9 a s2
|
||||
# 4-of-9 b s2
|
||||
# 4-of-7 c s3
|
||||
# 4-of-9 d s2
|
||||
# 3-of-9 e s1
|
||||
# 3-of-9 f s1
|
||||
# 3-of-9 g s1
|
||||
# 4-of-9 h s2
|
||||
#
|
||||
# so that neither form can be recovered until fetch [f], at which
|
||||
# point version-s1 (the 3-of-10 form) should be recoverable. If
|
||||
# the implementation latches on to the first version it sees,
|
||||
# then s2 will be recoverable at fetch [g].
|
||||
|
||||
# Later, when we implement code that handles multiple versions,
|
||||
# we can use this framework to assert that all recoverable
|
||||
# versions are retrieved, and test that 'epsilon' does its job
|
||||
|
||||
places = [2, 2, 3, 2, 1, 1, 1, 2]
|
||||
|
||||
sharemap = {}
|
||||
sb = self._storage_broker
|
||||
|
||||
for peerid in sorted(sb.get_all_serverids()):
|
||||
for shnum in self._shares1.get(peerid, {}):
|
||||
if shnum < len(places):
|
||||
which = places[shnum]
|
||||
else:
|
||||
which = "x"
|
||||
self._storage._peers[peerid] = peers = {}
|
||||
in_1 = shnum in self._shares1[peerid]
|
||||
in_2 = shnum in self._shares2.get(peerid, {})
|
||||
in_3 = shnum in self._shares3.get(peerid, {})
|
||||
if which == 1:
|
||||
if in_1:
|
||||
peers[shnum] = self._shares1[peerid][shnum]
|
||||
sharemap[shnum] = peerid
|
||||
elif which == 2:
|
||||
if in_2:
|
||||
peers[shnum] = self._shares2[peerid][shnum]
|
||||
sharemap[shnum] = peerid
|
||||
elif which == 3:
|
||||
if in_3:
|
||||
peers[shnum] = self._shares3[peerid][shnum]
|
||||
sharemap[shnum] = peerid
|
||||
|
||||
# we don't bother placing any other shares
|
||||
# now sort the sequence so that share 0 is returned first
|
||||
new_sequence = [sharemap[shnum]
|
||||
for shnum in sorted(sharemap.keys())]
|
||||
self._storage._sequence = new_sequence
|
||||
log.msg("merge done")
|
||||
d.addCallback(_merge)
|
||||
d.addCallback(lambda res: fn3.download_best_version())
|
||||
def _retrieved(new_contents):
|
||||
# the current specified behavior is "first version recoverable"
|
||||
self.failUnlessEqual(new_contents, contents1)
|
||||
d.addCallback(_retrieved)
|
||||
return d
|
85
src/allmydata/test/mutable/test_multiple_versions.py
Normal file
85
src/allmydata/test/mutable/test_multiple_versions.py
Normal file
@ -0,0 +1,85 @@
|
||||
from twisted.trial import unittest
|
||||
from allmydata.monitor import Monitor
|
||||
from allmydata.mutable.common import MODE_CHECK, MODE_READ
|
||||
from .util import PublishMixin, CheckerMixin
|
||||
|
||||
|
||||
class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
|
||||
|
||||
def setUp(self):
|
||||
return self.publish_multiple()
|
||||
|
||||
def test_multiple_versions(self):
|
||||
# if we see a mix of versions in the grid, download_best_version
|
||||
# should get the latest one
|
||||
self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
|
||||
d = self._fn.download_best_version()
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
|
||||
# and the checker should report problems
|
||||
d.addCallback(lambda res: self._fn.check(Monitor()))
|
||||
d.addCallback(self.check_bad, "test_multiple_versions")
|
||||
|
||||
# but if everything is at version 2, that's what we should download
|
||||
d.addCallback(lambda res:
|
||||
self._set_versions(dict([(i,2) for i in range(10)])))
|
||||
d.addCallback(lambda res: self._fn.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
|
||||
# if exactly one share is at version 3, we should still get v2
|
||||
d.addCallback(lambda res:
|
||||
self._set_versions({0:3}))
|
||||
d.addCallback(lambda res: self._fn.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
|
||||
# but the servermap should see the unrecoverable version. This
|
||||
# depends upon the single newer share being queried early.
|
||||
d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
|
||||
def _check_smap(smap):
|
||||
self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
|
||||
newer = smap.unrecoverable_newer_versions()
|
||||
self.failUnlessEqual(len(newer), 1)
|
||||
verinfo, health = newer.items()[0]
|
||||
self.failUnlessEqual(verinfo[0], 4)
|
||||
self.failUnlessEqual(health, (1,3))
|
||||
self.failIf(smap.needs_merge())
|
||||
d.addCallback(_check_smap)
|
||||
# if we have a mix of two parallel versions (s4a and s4b), we could
|
||||
# recover either
|
||||
d.addCallback(lambda res:
|
||||
self._set_versions({0:3,2:3,4:3,6:3,8:3,
|
||||
1:4,3:4,5:4,7:4,9:4}))
|
||||
d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
|
||||
def _check_smap_mixed(smap):
|
||||
self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
|
||||
newer = smap.unrecoverable_newer_versions()
|
||||
self.failUnlessEqual(len(newer), 0)
|
||||
self.failUnless(smap.needs_merge())
|
||||
d.addCallback(_check_smap_mixed)
|
||||
d.addCallback(lambda res: self._fn.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
|
||||
res == self.CONTENTS[4]))
|
||||
return d
|
||||
|
||||
def test_replace(self):
|
||||
# if we see a mix of versions in the grid, we should be able to
|
||||
# replace them all with a newer version
|
||||
|
||||
# if exactly one share is at version 3, we should download (and
|
||||
# replace) v2, and the result should be v4. Note that the index we
|
||||
# give to _set_versions is different than the sequence number.
|
||||
target = dict([(i,2) for i in range(10)]) # seqnum3
|
||||
target[0] = 3 # seqnum4
|
||||
self._set_versions(target)
|
||||
|
||||
def _modify(oldversion, servermap, first_time):
|
||||
return oldversion + " modified"
|
||||
d = self._fn.modify(_modify)
|
||||
d.addCallback(lambda res: self._fn.download_best_version())
|
||||
expected = self.CONTENTS[2] + " modified"
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, expected))
|
||||
# and the servermap should indicate that the outlier was replaced too
|
||||
d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
|
||||
def _check_smap(smap):
|
||||
self.failUnlessEqual(smap.highest_seqnum(), 5)
|
||||
self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
|
||||
self.failUnlessEqual(len(smap.recoverable_versions()), 1)
|
||||
d.addCallback(_check_smap)
|
||||
return d
|
558
src/allmydata/test/mutable/test_problems.py
Normal file
558
src/allmydata/test/mutable/test_problems.py
Normal file
@ -0,0 +1,558 @@
|
||||
import os, base64
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
from foolscap.logging import log
|
||||
from allmydata import uri
|
||||
from allmydata.interfaces import NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
|
||||
from allmydata.util import fileutil
|
||||
from allmydata.util.hashutil import ssk_writekey_hash, ssk_pubkey_fingerprint_hash
|
||||
from allmydata.mutable.common import \
|
||||
MODE_CHECK, MODE_WRITE, MODE_READ, \
|
||||
UncoordinatedWriteError, \
|
||||
NotEnoughServersError
|
||||
from allmydata.mutable.publish import MutableData
|
||||
from allmydata.storage.common import storage_index_to_dir
|
||||
from ..common import TEST_RSA_KEY_SIZE
|
||||
from ..no_network import GridTestMixin
|
||||
from .. import common_util as testutil
|
||||
from ..common_util import DevNullDictionary
|
||||
|
||||
class SameKeyGenerator:
|
||||
def __init__(self, pubkey, privkey):
|
||||
self.pubkey = pubkey
|
||||
self.privkey = privkey
|
||||
def generate(self, keysize=None):
|
||||
return defer.succeed( (self.pubkey, self.privkey) )
|
||||
|
||||
class FirstServerGetsKilled:
|
||||
done = False
|
||||
def notify(self, retval, wrapper, methname):
|
||||
if not self.done:
|
||||
wrapper.broken = True
|
||||
self.done = True
|
||||
return retval
|
||||
|
||||
class FirstServerGetsDeleted:
|
||||
def __init__(self):
|
||||
self.done = False
|
||||
self.silenced = None
|
||||
def notify(self, retval, wrapper, methname):
|
||||
if not self.done:
|
||||
# this query will work, but later queries should think the share
|
||||
# has been deleted
|
||||
self.done = True
|
||||
self.silenced = wrapper
|
||||
return retval
|
||||
if wrapper == self.silenced:
|
||||
assert methname == "slot_testv_and_readv_and_writev"
|
||||
return (True, {})
|
||||
return retval
|
||||
|
||||
class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
|
||||
def do_publish_surprise(self, version):
|
||||
self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
|
||||
self.set_up_grid()
|
||||
nm = self.g.clients[0].nodemaker
|
||||
d = nm.create_mutable_file(MutableData("contents 1"),
|
||||
version=version)
|
||||
def _created(n):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
|
||||
def _got_smap1(smap):
|
||||
# stash the old state of the file
|
||||
self.old_map = smap
|
||||
d.addCallback(_got_smap1)
|
||||
# then modify the file, leaving the old map untouched
|
||||
d.addCallback(lambda res: log.msg("starting winning write"))
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
|
||||
# now attempt to modify the file with the old servermap. This
|
||||
# will look just like an uncoordinated write, in which every
|
||||
# single share got updated between our mapupdate and our publish
|
||||
d.addCallback(lambda res: log.msg("starting doomed write"))
|
||||
d.addCallback(lambda res:
|
||||
self.shouldFail(UncoordinatedWriteError,
|
||||
"test_publish_surprise", None,
|
||||
n.upload,
|
||||
MutableData("contents 2a"), self.old_map))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def test_publish_surprise_sdmf(self):
|
||||
return self.do_publish_surprise(SDMF_VERSION)
|
||||
|
||||
def test_publish_surprise_mdmf(self):
|
||||
return self.do_publish_surprise(MDMF_VERSION)
|
||||
|
||||
def test_retrieve_surprise(self):
|
||||
self.basedir = "mutable/Problems/test_retrieve_surprise"
|
||||
self.set_up_grid()
|
||||
nm = self.g.clients[0].nodemaker
|
||||
d = nm.create_mutable_file(MutableData("contents 1"*4000))
|
||||
def _created(n):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_READ))
|
||||
def _got_smap1(smap):
|
||||
# stash the old state of the file
|
||||
self.old_map = smap
|
||||
d.addCallback(_got_smap1)
|
||||
# then modify the file, leaving the old map untouched
|
||||
d.addCallback(lambda res: log.msg("starting winning write"))
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
|
||||
# now attempt to retrieve the old version with the old servermap.
|
||||
# This will look like someone has changed the file since we
|
||||
# updated the servermap.
|
||||
d.addCallback(lambda res: log.msg("starting doomed read"))
|
||||
d.addCallback(lambda res:
|
||||
self.shouldFail(NotEnoughSharesError,
|
||||
"test_retrieve_surprise",
|
||||
"ran out of servers: have 0 of 1",
|
||||
n.download_version,
|
||||
self.old_map,
|
||||
self.old_map.best_recoverable_version(),
|
||||
))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def test_unexpected_shares(self):
|
||||
# upload the file, take a servermap, shut down one of the servers,
|
||||
# upload it again (causing shares to appear on a new server), then
|
||||
# upload using the old servermap. The last upload should fail with an
|
||||
# UncoordinatedWriteError, because of the shares that didn't appear
|
||||
# in the servermap.
|
||||
self.basedir = "mutable/Problems/test_unexpected_shares"
|
||||
self.set_up_grid()
|
||||
nm = self.g.clients[0].nodemaker
|
||||
d = nm.create_mutable_file(MutableData("contents 1"))
|
||||
def _created(n):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
|
||||
def _got_smap1(smap):
|
||||
# stash the old state of the file
|
||||
self.old_map = smap
|
||||
# now shut down one of the servers
|
||||
peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
|
||||
self.g.remove_server(peer0)
|
||||
# then modify the file, leaving the old map untouched
|
||||
log.msg("starting winning write")
|
||||
return n.overwrite(MutableData("contents 2"))
|
||||
d.addCallback(_got_smap1)
|
||||
# now attempt to modify the file with the old servermap. This
|
||||
# will look just like an uncoordinated write, in which every
|
||||
# single share got updated between our mapupdate and our publish
|
||||
d.addCallback(lambda res: log.msg("starting doomed write"))
|
||||
d.addCallback(lambda res:
|
||||
self.shouldFail(UncoordinatedWriteError,
|
||||
"test_surprise", None,
|
||||
n.upload,
|
||||
MutableData("contents 2a"), self.old_map))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def test_multiply_placed_shares(self):
|
||||
self.basedir = "mutable/Problems/test_multiply_placed_shares"
|
||||
self.set_up_grid()
|
||||
nm = self.g.clients[0].nodemaker
|
||||
d = nm.create_mutable_file(MutableData("contents 1"))
|
||||
# remove one of the servers and reupload the file.
|
||||
def _created(n):
|
||||
self._node = n
|
||||
|
||||
servers = self.g.get_all_serverids()
|
||||
self.ss = self.g.remove_server(servers[len(servers)-1])
|
||||
|
||||
new_server = self.g.make_server(len(servers)-1)
|
||||
self.g.add_server(len(servers)-1, new_server)
|
||||
|
||||
return self._node.download_best_version()
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda data: MutableData(data))
|
||||
d.addCallback(lambda data: self._node.overwrite(data))
|
||||
|
||||
# restore the server we removed earlier, then download+upload
|
||||
# the file again
|
||||
def _overwritten(ign):
|
||||
self.g.add_server(len(self.g.servers_by_number), self.ss)
|
||||
return self._node.download_best_version()
|
||||
d.addCallback(_overwritten)
|
||||
d.addCallback(lambda data: MutableData(data))
|
||||
d.addCallback(lambda data: self._node.overwrite(data))
|
||||
d.addCallback(lambda ignored:
|
||||
self._node.get_servermap(MODE_CHECK))
|
||||
def _overwritten_again(smap):
|
||||
# Make sure that all shares were updated by making sure that
|
||||
# there aren't any other versions in the sharemap.
|
||||
self.failUnlessEqual(len(smap.recoverable_versions()), 1)
|
||||
self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
|
||||
d.addCallback(_overwritten_again)
|
||||
return d
|
||||
|
||||
def test_bad_server(self):
|
||||
# Break one server, then create the file: the initial publish should
|
||||
# complete with an alternate server. Breaking a second server should
|
||||
# not prevent an update from succeeding either.
|
||||
self.basedir = "mutable/Problems/test_bad_server"
|
||||
self.set_up_grid()
|
||||
nm = self.g.clients[0].nodemaker
|
||||
|
||||
# to make sure that one of the initial peers is broken, we have to
|
||||
# get creative. We create an RSA key and compute its storage-index.
|
||||
# Then we make a KeyGenerator that always returns that one key, and
|
||||
# use it to create the mutable file. This will get easier when we can
|
||||
# use #467 static-server-selection to disable permutation and force
|
||||
# the choice of server for share[0].
|
||||
|
||||
d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
|
||||
def _got_key( (pubkey, privkey) ):
|
||||
nm.key_generator = SameKeyGenerator(pubkey, privkey)
|
||||
pubkey_s = pubkey.serialize()
|
||||
privkey_s = privkey.serialize()
|
||||
u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
|
||||
ssk_pubkey_fingerprint_hash(pubkey_s))
|
||||
self._storage_index = u.get_storage_index()
|
||||
d.addCallback(_got_key)
|
||||
def _break_peer0(res):
|
||||
si = self._storage_index
|
||||
servers = nm.storage_broker.get_servers_for_psi(si)
|
||||
self.g.break_server(servers[0].get_serverid())
|
||||
self.server1 = servers[1]
|
||||
d.addCallback(_break_peer0)
|
||||
# now "create" the file, using the pre-established key, and let the
|
||||
# initial publish finally happen
|
||||
d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
|
||||
# that ought to work
|
||||
def _got_node(n):
|
||||
d = n.download_best_version()
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
|
||||
# now break the second peer
|
||||
def _break_peer1(res):
|
||||
self.g.break_server(self.server1.get_serverid())
|
||||
d.addCallback(_break_peer1)
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
|
||||
# that ought to work too
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
|
||||
def _explain_error(f):
|
||||
print f
|
||||
if f.check(NotEnoughServersError):
|
||||
print "first_error:", f.value.first_error
|
||||
return f
|
||||
d.addErrback(_explain_error)
|
||||
return d
|
||||
d.addCallback(_got_node)
|
||||
return d
|
||||
|
||||
def test_bad_server_overlap(self):
|
||||
# like test_bad_server, but with no extra unused servers to fall back
|
||||
# upon. This means that we must re-use a server which we've already
|
||||
# used. If we don't remember the fact that we sent them one share
|
||||
# already, we'll mistakenly think we're experiencing an
|
||||
# UncoordinatedWriteError.
|
||||
|
||||
# Break one server, then create the file: the initial publish should
|
||||
# complete with an alternate server. Breaking a second server should
|
||||
# not prevent an update from succeeding either.
|
||||
self.basedir = "mutable/Problems/test_bad_server_overlap"
|
||||
self.set_up_grid()
|
||||
nm = self.g.clients[0].nodemaker
|
||||
sb = nm.storage_broker
|
||||
|
||||
peerids = [s.get_serverid() for s in sb.get_connected_servers()]
|
||||
self.g.break_server(peerids[0])
|
||||
|
||||
d = nm.create_mutable_file(MutableData("contents 1"))
|
||||
def _created(n):
|
||||
d = n.download_best_version()
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
|
||||
# now break one of the remaining servers
|
||||
def _break_second_server(res):
|
||||
self.g.break_server(peerids[1])
|
||||
d.addCallback(_break_second_server)
|
||||
d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
|
||||
# that ought to work too
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def test_publish_all_servers_bad(self):
|
||||
# Break all servers: the publish should fail
|
||||
self.basedir = "mutable/Problems/test_publish_all_servers_bad"
|
||||
self.set_up_grid()
|
||||
nm = self.g.clients[0].nodemaker
|
||||
for s in nm.storage_broker.get_connected_servers():
|
||||
s.get_rref().broken = True
|
||||
|
||||
d = self.shouldFail(NotEnoughServersError,
|
||||
"test_publish_all_servers_bad",
|
||||
"ran out of good servers",
|
||||
nm.create_mutable_file, MutableData("contents"))
|
||||
return d
|
||||
|
||||
def test_publish_no_servers(self):
|
||||
# no servers at all: the publish should fail
|
||||
self.basedir = "mutable/Problems/test_publish_no_servers"
|
||||
self.set_up_grid(num_servers=0)
|
||||
nm = self.g.clients[0].nodemaker
|
||||
|
||||
d = self.shouldFail(NotEnoughServersError,
|
||||
"test_publish_no_servers",
|
||||
"Ran out of non-bad servers",
|
||||
nm.create_mutable_file, MutableData("contents"))
|
||||
return d
|
||||
|
||||
|
||||
def test_privkey_query_error(self):
|
||||
# when a servermap is updated with MODE_WRITE, it tries to get the
|
||||
# privkey. Something might go wrong during this query attempt.
|
||||
# Exercise the code in _privkey_query_failed which tries to handle
|
||||
# such an error.
|
||||
self.basedir = "mutable/Problems/test_privkey_query_error"
|
||||
self.set_up_grid(num_servers=20)
|
||||
nm = self.g.clients[0].nodemaker
|
||||
nm._node_cache = DevNullDictionary() # disable the nodecache
|
||||
|
||||
# we need some contents that are large enough to push the privkey out
|
||||
# of the early part of the file
|
||||
LARGE = "These are Larger contents" * 2000 # about 50KB
|
||||
LARGE_uploadable = MutableData(LARGE)
|
||||
d = nm.create_mutable_file(LARGE_uploadable)
|
||||
def _created(n):
|
||||
self.uri = n.get_uri()
|
||||
self.n2 = nm.create_from_cap(self.uri)
|
||||
|
||||
# When a mapupdate is performed on a node that doesn't yet know
|
||||
# the privkey, a short read is sent to a batch of servers, to get
|
||||
# the verinfo and (hopefully, if the file is short enough) the
|
||||
# encprivkey. Our file is too large to let this first read
|
||||
# contain the encprivkey. Each non-encprivkey-bearing response
|
||||
# that arrives (until the node gets the encprivkey) will trigger
|
||||
# a second read to specifically read the encprivkey.
|
||||
#
|
||||
# So, to exercise this case:
|
||||
# 1. notice which server gets a read() call first
|
||||
# 2. tell that server to start throwing errors
|
||||
killer = FirstServerGetsKilled()
|
||||
for s in nm.storage_broker.get_connected_servers():
|
||||
s.get_rref().post_call_notifier = killer.notify
|
||||
d.addCallback(_created)
|
||||
|
||||
# now we update a servermap from a new node (which doesn't have the
|
||||
# privkey yet, forcing it to use a separate privkey query). Note that
|
||||
# the map-update will succeed, since we'll just get a copy from one
|
||||
# of the other shares.
|
||||
d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
|
||||
|
||||
return d
|
||||
|
||||
def test_privkey_query_missing(self):
|
||||
# like test_privkey_query_error, but the shares are deleted by the
|
||||
# second query, instead of raising an exception.
|
||||
self.basedir = "mutable/Problems/test_privkey_query_missing"
|
||||
self.set_up_grid(num_servers=20)
|
||||
nm = self.g.clients[0].nodemaker
|
||||
LARGE = "These are Larger contents" * 2000 # about 50KiB
|
||||
LARGE_uploadable = MutableData(LARGE)
|
||||
nm._node_cache = DevNullDictionary() # disable the nodecache
|
||||
|
||||
d = nm.create_mutable_file(LARGE_uploadable)
|
||||
def _created(n):
|
||||
self.uri = n.get_uri()
|
||||
self.n2 = nm.create_from_cap(self.uri)
|
||||
deleter = FirstServerGetsDeleted()
|
||||
for s in nm.storage_broker.get_connected_servers():
|
||||
s.get_rref().post_call_notifier = deleter.notify
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
|
||||
return d
|
||||
|
||||
|
||||
def test_block_and_hash_query_error(self):
|
||||
# This tests for what happens when a query to a remote server
|
||||
# fails in either the hash validation step or the block getting
|
||||
# step (because of batching, this is the same actual query).
|
||||
# We need to have the storage server persist up until the point
|
||||
# that its prefix is validated, then suddenly die. This
|
||||
# exercises some exception handling code in Retrieve.
|
||||
self.basedir = "mutable/Problems/test_block_and_hash_query_error"
|
||||
self.set_up_grid(num_servers=20)
|
||||
nm = self.g.clients[0].nodemaker
|
||||
CONTENTS = "contents" * 2000
|
||||
CONTENTS_uploadable = MutableData(CONTENTS)
|
||||
d = nm.create_mutable_file(CONTENTS_uploadable)
|
||||
def _created(node):
|
||||
self._node = node
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda ignored:
|
||||
self._node.get_servermap(MODE_READ))
|
||||
def _then(servermap):
|
||||
# we have our servermap. Now we set up the servers like the
|
||||
# tests above -- the first one that gets a read call should
|
||||
# start throwing errors, but only after returning its prefix
|
||||
# for validation. Since we'll download without fetching the
|
||||
# private key, the next query to the remote server will be
|
||||
# for either a block and salt or for hashes, either of which
|
||||
# will exercise the error handling code.
|
||||
killer = FirstServerGetsKilled()
|
||||
for s in nm.storage_broker.get_connected_servers():
|
||||
s.get_rref().post_call_notifier = killer.notify
|
||||
ver = servermap.best_recoverable_version()
|
||||
assert ver
|
||||
return self._node.download_version(servermap, ver)
|
||||
d.addCallback(_then)
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessEqual(data, CONTENTS))
|
||||
return d
|
||||
|
||||
def test_1654(self):
|
||||
# test that the Retrieve object unconditionally verifies the block
|
||||
# hash tree root for mutable shares. The failure mode is that
|
||||
# carefully crafted shares can cause undetected corruption (the
|
||||
# retrieve appears to finish successfully, but the result is
|
||||
# corrupted). When fixed, these shares always cause a
|
||||
# CorruptShareError, which results in NotEnoughSharesError in this
|
||||
# 2-of-2 file.
|
||||
self.basedir = "mutable/Problems/test_1654"
|
||||
self.set_up_grid(num_servers=2)
|
||||
cap = uri.from_string(TEST_1654_CAP)
|
||||
si = cap.get_storage_index()
|
||||
|
||||
for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
|
||||
sharedata = base64.b64decode(share)
|
||||
storedir = self.get_serverdir(shnum)
|
||||
storage_path = os.path.join(storedir, "shares",
|
||||
storage_index_to_dir(si))
|
||||
fileutil.make_dirs(storage_path)
|
||||
fileutil.write(os.path.join(storage_path, "%d" % shnum),
|
||||
sharedata)
|
||||
|
||||
nm = self.g.clients[0].nodemaker
|
||||
n = nm.create_from_cap(TEST_1654_CAP)
|
||||
# to exercise the problem correctly, we must ensure that sh0 is
|
||||
# processed first, and sh1 second. NoNetworkGrid has facilities to
|
||||
# stall the first request from a single server, but it's not
|
||||
# currently easy to extend that to stall the second request (mutable
|
||||
# retrievals will see two: first the mapupdate, then the fetch).
|
||||
# However, repeated executions of this run without the #1654 fix
|
||||
# suggests that we're failing reliably even without explicit stalls,
|
||||
# probably because the servers are queried in a fixed order. So I'm
|
||||
# ok with relying upon that.
|
||||
d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
|
||||
"ran out of servers",
|
||||
n.download_best_version)
|
||||
return d
|
||||
|
||||
|
||||
TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
|
||||
|
||||
TEST_1654_SH0 = """\
|
||||
VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
|
||||
AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
|
||||
AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
|
||||
UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
|
||||
uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
|
||||
AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
|
||||
ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
|
||||
vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
|
||||
CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
|
||||
Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
|
||||
FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
|
||||
DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
|
||||
AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
|
||||
Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
|
||||
/KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
|
||||
73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
|
||||
GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
|
||||
ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
|
||||
+QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
|
||||
bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
|
||||
z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
|
||||
eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
|
||||
d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
|
||||
dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
|
||||
2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
|
||||
wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
|
||||
sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
|
||||
eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
|
||||
PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
|
||||
CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
|
||||
Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
|
||||
Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
|
||||
tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
|
||||
Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
|
||||
LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
|
||||
ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
|
||||
jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
|
||||
fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
|
||||
DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
|
||||
tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
|
||||
7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
|
||||
jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
|
||||
TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
|
||||
4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
|
||||
bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
|
||||
72mXGlqyLyWYuAAAAAA="""
|
||||
|
||||
TEST_1654_SH1 = """\
|
||||
VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
|
||||
9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
|
||||
AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
|
||||
MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
|
||||
uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
|
||||
AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
|
||||
ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
|
||||
vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
|
||||
CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
|
||||
Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
|
||||
FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
|
||||
DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
|
||||
AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
|
||||
Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
|
||||
/KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
|
||||
73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
|
||||
GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
|
||||
ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
|
||||
+QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
|
||||
98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
|
||||
z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
|
||||
eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
|
||||
d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
|
||||
dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
|
||||
2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
|
||||
wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
|
||||
sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
|
||||
eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
|
||||
PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
|
||||
CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
|
||||
Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
|
||||
Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
|
||||
tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
|
||||
Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
|
||||
LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
|
||||
ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
|
||||
jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
|
||||
fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
|
||||
DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
|
||||
tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
|
||||
7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
|
||||
jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
|
||||
TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
|
||||
4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
|
||||
bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
|
||||
72mXGlqyLyWYuAAAAAA="""
|
274
src/allmydata/test/mutable/test_repair.py
Normal file
274
src/allmydata/test/mutable/test_repair.py
Normal file
@ -0,0 +1,274 @@
|
||||
from twisted.trial import unittest
|
||||
from allmydata.interfaces import IRepairResults, ICheckAndRepairResults
|
||||
from allmydata.monitor import Monitor
|
||||
from allmydata.mutable.common import MODE_CHECK
|
||||
from allmydata.mutable.layout import unpack_header
|
||||
from allmydata.mutable.repairer import MustForceRepairError
|
||||
from ..common import ShouldFailMixin
|
||||
from .util import PublishMixin
|
||||
|
||||
class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
|
||||
|
||||
def get_shares(self, s):
|
||||
all_shares = {} # maps (peerid, shnum) to share data
|
||||
for peerid in s._peers:
|
||||
shares = s._peers[peerid]
|
||||
for shnum in shares:
|
||||
data = shares[shnum]
|
||||
all_shares[ (peerid, shnum) ] = data
|
||||
return all_shares
|
||||
|
||||
def copy_shares(self, ignored=None):
|
||||
self.old_shares.append(self.get_shares(self._storage))
|
||||
|
||||
def test_repair_nop(self):
|
||||
self.old_shares = []
|
||||
d = self.publish_one()
|
||||
d.addCallback(self.copy_shares)
|
||||
d.addCallback(lambda res: self._fn.check(Monitor()))
|
||||
d.addCallback(lambda check_results: self._fn.repair(check_results))
|
||||
def _check_results(rres):
|
||||
self.failUnless(IRepairResults.providedBy(rres))
|
||||
self.failUnless(rres.get_successful())
|
||||
# TODO: examine results
|
||||
|
||||
self.copy_shares()
|
||||
|
||||
initial_shares = self.old_shares[0]
|
||||
new_shares = self.old_shares[1]
|
||||
# TODO: this really shouldn't change anything. When we implement
|
||||
# a "minimal-bandwidth" repairer", change this test to assert:
|
||||
#self.failUnlessEqual(new_shares, initial_shares)
|
||||
|
||||
# all shares should be in the same place as before
|
||||
self.failUnlessEqual(set(initial_shares.keys()),
|
||||
set(new_shares.keys()))
|
||||
# but they should all be at a newer seqnum. The IV will be
|
||||
# different, so the roothash will be too.
|
||||
for key in initial_shares:
|
||||
(version0,
|
||||
seqnum0,
|
||||
root_hash0,
|
||||
IV0,
|
||||
k0, N0, segsize0, datalen0,
|
||||
o0) = unpack_header(initial_shares[key])
|
||||
(version1,
|
||||
seqnum1,
|
||||
root_hash1,
|
||||
IV1,
|
||||
k1, N1, segsize1, datalen1,
|
||||
o1) = unpack_header(new_shares[key])
|
||||
self.failUnlessEqual(version0, version1)
|
||||
self.failUnlessEqual(seqnum0+1, seqnum1)
|
||||
self.failUnlessEqual(k0, k1)
|
||||
self.failUnlessEqual(N0, N1)
|
||||
self.failUnlessEqual(segsize0, segsize1)
|
||||
self.failUnlessEqual(datalen0, datalen1)
|
||||
d.addCallback(_check_results)
|
||||
return d
|
||||
|
||||
def failIfSharesChanged(self, ignored=None):
|
||||
old_shares = self.old_shares[-2]
|
||||
current_shares = self.old_shares[-1]
|
||||
self.failUnlessEqual(old_shares, current_shares)
|
||||
|
||||
|
||||
def _test_whether_repairable(self, publisher, nshares, expected_result):
|
||||
d = publisher()
|
||||
def _delete_some_shares(ign):
|
||||
shares = self._storage._peers
|
||||
for peerid in shares:
|
||||
for shnum in list(shares[peerid]):
|
||||
if shnum >= nshares:
|
||||
del shares[peerid][shnum]
|
||||
d.addCallback(_delete_some_shares)
|
||||
d.addCallback(lambda ign: self._fn.check(Monitor()))
|
||||
def _check(cr):
|
||||
self.failIf(cr.is_healthy())
|
||||
self.failUnlessEqual(cr.is_recoverable(), expected_result)
|
||||
return cr
|
||||
d.addCallback(_check)
|
||||
d.addCallback(lambda check_results: self._fn.repair(check_results))
|
||||
d.addCallback(lambda crr: self.failUnlessEqual(crr.get_successful(), expected_result))
|
||||
return d
|
||||
|
||||
def test_unrepairable_0shares(self):
|
||||
return self._test_whether_repairable(self.publish_one, 0, False)
|
||||
|
||||
def test_mdmf_unrepairable_0shares(self):
|
||||
return self._test_whether_repairable(self.publish_mdmf, 0, False)
|
||||
|
||||
def test_unrepairable_1share(self):
|
||||
return self._test_whether_repairable(self.publish_one, 1, False)
|
||||
|
||||
def test_mdmf_unrepairable_1share(self):
|
||||
return self._test_whether_repairable(self.publish_mdmf, 1, False)
|
||||
|
||||
def test_repairable_5shares(self):
|
||||
return self._test_whether_repairable(self.publish_one, 5, True)
|
||||
|
||||
def test_mdmf_repairable_5shares(self):
|
||||
return self._test_whether_repairable(self.publish_mdmf, 5, True)
|
||||
|
||||
def _test_whether_checkandrepairable(self, publisher, nshares, expected_result):
|
||||
"""
|
||||
Like the _test_whether_repairable tests, but invoking check_and_repair
|
||||
instead of invoking check and then invoking repair.
|
||||
"""
|
||||
d = publisher()
|
||||
def _delete_some_shares(ign):
|
||||
shares = self._storage._peers
|
||||
for peerid in shares:
|
||||
for shnum in list(shares[peerid]):
|
||||
if shnum >= nshares:
|
||||
del shares[peerid][shnum]
|
||||
d.addCallback(_delete_some_shares)
|
||||
d.addCallback(lambda ign: self._fn.check_and_repair(Monitor()))
|
||||
d.addCallback(lambda crr: self.failUnlessEqual(crr.get_repair_successful(), expected_result))
|
||||
return d
|
||||
|
||||
def test_unrepairable_0shares_checkandrepair(self):
|
||||
return self._test_whether_checkandrepairable(self.publish_one, 0, False)
|
||||
|
||||
def test_mdmf_unrepairable_0shares_checkandrepair(self):
|
||||
return self._test_whether_checkandrepairable(self.publish_mdmf, 0, False)
|
||||
|
||||
def test_unrepairable_1share_checkandrepair(self):
|
||||
return self._test_whether_checkandrepairable(self.publish_one, 1, False)
|
||||
|
||||
def test_mdmf_unrepairable_1share_checkandrepair(self):
|
||||
return self._test_whether_checkandrepairable(self.publish_mdmf, 1, False)
|
||||
|
||||
def test_repairable_5shares_checkandrepair(self):
|
||||
return self._test_whether_checkandrepairable(self.publish_one, 5, True)
|
||||
|
||||
def test_mdmf_repairable_5shares_checkandrepair(self):
|
||||
return self._test_whether_checkandrepairable(self.publish_mdmf, 5, True)
|
||||
|
||||
|
||||
def test_merge(self):
|
||||
self.old_shares = []
|
||||
d = self.publish_multiple()
|
||||
# repair will refuse to merge multiple highest seqnums unless you
|
||||
# pass force=True
|
||||
d.addCallback(lambda res:
|
||||
self._set_versions({0:3,2:3,4:3,6:3,8:3,
|
||||
1:4,3:4,5:4,7:4,9:4}))
|
||||
d.addCallback(self.copy_shares)
|
||||
d.addCallback(lambda res: self._fn.check(Monitor()))
|
||||
def _try_repair(check_results):
|
||||
ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
|
||||
d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
|
||||
self._fn.repair, check_results)
|
||||
d2.addCallback(self.copy_shares)
|
||||
d2.addCallback(self.failIfSharesChanged)
|
||||
d2.addCallback(lambda res: check_results)
|
||||
return d2
|
||||
d.addCallback(_try_repair)
|
||||
d.addCallback(lambda check_results:
|
||||
self._fn.repair(check_results, force=True))
|
||||
# this should give us 10 shares of the highest roothash
|
||||
def _check_repair_results(rres):
|
||||
self.failUnless(rres.get_successful())
|
||||
pass # TODO
|
||||
d.addCallback(_check_repair_results)
|
||||
d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
|
||||
def _check_smap(smap):
|
||||
self.failUnlessEqual(len(smap.recoverable_versions()), 1)
|
||||
self.failIf(smap.unrecoverable_versions())
|
||||
# now, which should have won?
|
||||
roothash_s4a = self.get_roothash_for(3)
|
||||
roothash_s4b = self.get_roothash_for(4)
|
||||
if roothash_s4b > roothash_s4a:
|
||||
expected_contents = self.CONTENTS[4]
|
||||
else:
|
||||
expected_contents = self.CONTENTS[3]
|
||||
new_versionid = smap.best_recoverable_version()
|
||||
self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
|
||||
d2 = self._fn.download_version(smap, new_versionid)
|
||||
d2.addCallback(self.failUnlessEqual, expected_contents)
|
||||
return d2
|
||||
d.addCallback(_check_smap)
|
||||
return d
|
||||
|
||||
def test_non_merge(self):
|
||||
self.old_shares = []
|
||||
d = self.publish_multiple()
|
||||
# repair should not refuse a repair that doesn't need to merge. In
|
||||
# this case, we combine v2 with v3. The repair should ignore v2 and
|
||||
# copy v3 into a new v5.
|
||||
d.addCallback(lambda res:
|
||||
self._set_versions({0:2,2:2,4:2,6:2,8:2,
|
||||
1:3,3:3,5:3,7:3,9:3}))
|
||||
d.addCallback(lambda res: self._fn.check(Monitor()))
|
||||
d.addCallback(lambda check_results: self._fn.repair(check_results))
|
||||
# this should give us 10 shares of v3
|
||||
def _check_repair_results(rres):
|
||||
self.failUnless(rres.get_successful())
|
||||
pass # TODO
|
||||
d.addCallback(_check_repair_results)
|
||||
d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
|
||||
def _check_smap(smap):
|
||||
self.failUnlessEqual(len(smap.recoverable_versions()), 1)
|
||||
self.failIf(smap.unrecoverable_versions())
|
||||
# now, which should have won?
|
||||
expected_contents = self.CONTENTS[3]
|
||||
new_versionid = smap.best_recoverable_version()
|
||||
self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
|
||||
d2 = self._fn.download_version(smap, new_versionid)
|
||||
d2.addCallback(self.failUnlessEqual, expected_contents)
|
||||
return d2
|
||||
d.addCallback(_check_smap)
|
||||
return d
|
||||
|
||||
def get_roothash_for(self, index):
|
||||
# return the roothash for the first share we see in the saved set
|
||||
shares = self._copied_shares[index]
|
||||
for peerid in shares:
|
||||
for shnum in shares[peerid]:
|
||||
share = shares[peerid][shnum]
|
||||
(version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
|
||||
unpack_header(share)
|
||||
return root_hash
|
||||
|
||||
def test_check_and_repair_readcap(self):
|
||||
# we can't currently repair from a mutable readcap: #625
|
||||
self.old_shares = []
|
||||
d = self.publish_one()
|
||||
d.addCallback(self.copy_shares)
|
||||
def _get_readcap(res):
|
||||
self._fn3 = self._fn.get_readonly()
|
||||
# also delete some shares
|
||||
for peerid,shares in self._storage._peers.items():
|
||||
shares.pop(0, None)
|
||||
d.addCallback(_get_readcap)
|
||||
d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
|
||||
def _check_results(crr):
|
||||
self.failUnless(ICheckAndRepairResults.providedBy(crr))
|
||||
# we should detect the unhealthy, but skip over mutable-readcap
|
||||
# repairs until #625 is fixed
|
||||
self.failIf(crr.get_pre_repair_results().is_healthy())
|
||||
self.failIf(crr.get_repair_attempted())
|
||||
self.failIf(crr.get_post_repair_results().is_healthy())
|
||||
d.addCallback(_check_results)
|
||||
return d
|
||||
|
||||
def test_repair_empty(self):
|
||||
# bug 1689: delete one share of an empty mutable file, then repair.
|
||||
# In the buggy version, the check that precedes the retrieve+publish
|
||||
# cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
|
||||
# privkey that repair needs.
|
||||
d = self.publish_sdmf("")
|
||||
def _delete_one_share(ign):
|
||||
shares = self._storage._peers
|
||||
for peerid in shares:
|
||||
for shnum in list(shares[peerid]):
|
||||
if shnum == 0:
|
||||
del shares[peerid][shnum]
|
||||
d.addCallback(_delete_one_share)
|
||||
d.addCallback(lambda ign: self._fn2.check(Monitor()))
|
||||
d.addCallback(lambda check_results: self._fn2.repair(check_results))
|
||||
def _check(crr):
|
||||
self.failUnlessEqual(crr.get_successful(), True)
|
||||
d.addCallback(_check)
|
||||
return d
|
410
src/allmydata/test/mutable/test_roundtrip.py
Normal file
410
src/allmydata/test/mutable/test_roundtrip.py
Normal file
@ -0,0 +1,410 @@
|
||||
from cStringIO import StringIO
|
||||
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
|
||||
from allmydata.util import base32, consumer
|
||||
from allmydata.interfaces import NotEnoughSharesError
|
||||
from allmydata.monitor import Monitor
|
||||
from allmydata.mutable.common import MODE_READ, UnrecoverableFileError
|
||||
from allmydata.mutable.servermap import ServerMap, ServermapUpdater
|
||||
from allmydata.mutable.retrieve import Retrieve
|
||||
from .util import PublishMixin, make_storagebroker, corrupt
|
||||
from .. import common_util as testutil
|
||||
|
||||
class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
|
||||
def setUp(self):
|
||||
return self.publish_one()
|
||||
|
||||
def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
|
||||
if oldmap is None:
|
||||
oldmap = ServerMap()
|
||||
if sb is None:
|
||||
sb = self._storage_broker
|
||||
smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
|
||||
d = smu.update()
|
||||
return d
|
||||
|
||||
def abbrev_verinfo(self, verinfo):
|
||||
if verinfo is None:
|
||||
return None
|
||||
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
||||
offsets_tuple) = verinfo
|
||||
return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
|
||||
|
||||
def abbrev_verinfo_dict(self, verinfo_d):
|
||||
output = {}
|
||||
for verinfo,value in verinfo_d.items():
|
||||
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
||||
offsets_tuple) = verinfo
|
||||
output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
|
||||
return output
|
||||
|
||||
def dump_servermap(self, servermap):
|
||||
print "SERVERMAP", servermap
|
||||
print "RECOVERABLE", [self.abbrev_verinfo(v)
|
||||
for v in servermap.recoverable_versions()]
|
||||
print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
|
||||
print "available", self.abbrev_verinfo_dict(servermap.shares_available())
|
||||
|
||||
def do_download(self, servermap, version=None):
|
||||
if version is None:
|
||||
version = servermap.best_recoverable_version()
|
||||
r = Retrieve(self._fn, self._storage_broker, servermap, version)
|
||||
c = consumer.MemoryConsumer()
|
||||
d = r.download(consumer=c)
|
||||
d.addCallback(lambda mc: "".join(mc.chunks))
|
||||
return d
|
||||
|
||||
|
||||
def test_basic(self):
|
||||
d = self.make_servermap()
|
||||
def _do_retrieve(servermap):
|
||||
self._smap = servermap
|
||||
#self.dump_servermap(servermap)
|
||||
self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
|
||||
return self.do_download(servermap)
|
||||
d.addCallback(_do_retrieve)
|
||||
def _retrieved(new_contents):
|
||||
self.failUnlessEqual(new_contents, self.CONTENTS)
|
||||
d.addCallback(_retrieved)
|
||||
# we should be able to re-use the same servermap, both with and
|
||||
# without updating it.
|
||||
d.addCallback(lambda res: self.do_download(self._smap))
|
||||
d.addCallback(_retrieved)
|
||||
d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
|
||||
d.addCallback(lambda res: self.do_download(self._smap))
|
||||
d.addCallback(_retrieved)
|
||||
# clobbering the pubkey should make the servermap updater re-fetch it
|
||||
def _clobber_pubkey(res):
|
||||
self._fn._pubkey = None
|
||||
d.addCallback(_clobber_pubkey)
|
||||
d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
|
||||
d.addCallback(lambda res: self.do_download(self._smap))
|
||||
d.addCallback(_retrieved)
|
||||
return d
|
||||
|
||||
def test_all_shares_vanished(self):
|
||||
d = self.make_servermap()
|
||||
def _remove_shares(servermap):
|
||||
for shares in self._storage._peers.values():
|
||||
shares.clear()
|
||||
d1 = self.shouldFail(NotEnoughSharesError,
|
||||
"test_all_shares_vanished",
|
||||
"ran out of servers",
|
||||
self.do_download, servermap)
|
||||
return d1
|
||||
d.addCallback(_remove_shares)
|
||||
return d
|
||||
|
||||
def test_all_but_two_shares_vanished_updated_servermap(self):
|
||||
# tests error reporting for ticket #1742
|
||||
d = self.make_servermap()
|
||||
def _remove_shares(servermap):
|
||||
self._version = servermap.best_recoverable_version()
|
||||
for shares in self._storage._peers.values()[2:]:
|
||||
shares.clear()
|
||||
return self.make_servermap(servermap)
|
||||
d.addCallback(_remove_shares)
|
||||
def _check(updated_servermap):
|
||||
d1 = self.shouldFail(NotEnoughSharesError,
|
||||
"test_all_but_two_shares_vanished_updated_servermap",
|
||||
"ran out of servers",
|
||||
self.do_download, updated_servermap, version=self._version)
|
||||
return d1
|
||||
d.addCallback(_check)
|
||||
return d
|
||||
|
||||
def test_no_servers(self):
|
||||
sb2 = make_storagebroker(num_peers=0)
|
||||
# if there are no servers, then a MODE_READ servermap should come
|
||||
# back empty
|
||||
d = self.make_servermap(sb=sb2)
|
||||
def _check_servermap(servermap):
|
||||
self.failUnlessEqual(servermap.best_recoverable_version(), None)
|
||||
self.failIf(servermap.recoverable_versions())
|
||||
self.failIf(servermap.unrecoverable_versions())
|
||||
self.failIf(servermap.all_servers())
|
||||
d.addCallback(_check_servermap)
|
||||
return d
|
||||
|
||||
def test_no_servers_download(self):
|
||||
sb2 = make_storagebroker(num_peers=0)
|
||||
self._fn._storage_broker = sb2
|
||||
d = self.shouldFail(UnrecoverableFileError,
|
||||
"test_no_servers_download",
|
||||
"no recoverable versions",
|
||||
self._fn.download_best_version)
|
||||
def _restore(res):
|
||||
# a failed download that occurs while we aren't connected to
|
||||
# anybody should not prevent a subsequent download from working.
|
||||
# This isn't quite the webapi-driven test that #463 wants, but it
|
||||
# should be close enough.
|
||||
self._fn._storage_broker = self._storage_broker
|
||||
return self._fn.download_best_version()
|
||||
def _retrieved(new_contents):
|
||||
self.failUnlessEqual(new_contents, self.CONTENTS)
|
||||
d.addCallback(_restore)
|
||||
d.addCallback(_retrieved)
|
||||
return d
|
||||
|
||||
|
||||
def _test_corrupt_all(self, offset, substring,
|
||||
should_succeed=False,
|
||||
corrupt_early=True,
|
||||
failure_checker=None,
|
||||
fetch_privkey=False):
|
||||
d = defer.succeed(None)
|
||||
if corrupt_early:
|
||||
d.addCallback(corrupt, self._storage, offset)
|
||||
d.addCallback(lambda res: self.make_servermap())
|
||||
if not corrupt_early:
|
||||
d.addCallback(corrupt, self._storage, offset)
|
||||
def _do_retrieve(servermap):
|
||||
ver = servermap.best_recoverable_version()
|
||||
if ver is None and not should_succeed:
|
||||
# no recoverable versions == not succeeding. The problem
|
||||
# should be noted in the servermap's list of problems.
|
||||
if substring:
|
||||
allproblems = [str(f) for f in servermap.get_problems()]
|
||||
self.failUnlessIn(substring, "".join(allproblems))
|
||||
return servermap
|
||||
if should_succeed:
|
||||
d1 = self._fn.download_version(servermap, ver,
|
||||
fetch_privkey)
|
||||
d1.addCallback(lambda new_contents:
|
||||
self.failUnlessEqual(new_contents, self.CONTENTS))
|
||||
else:
|
||||
d1 = self.shouldFail(NotEnoughSharesError,
|
||||
"_corrupt_all(offset=%s)" % (offset,),
|
||||
substring,
|
||||
self._fn.download_version, servermap,
|
||||
ver,
|
||||
fetch_privkey)
|
||||
if failure_checker:
|
||||
d1.addCallback(failure_checker)
|
||||
d1.addCallback(lambda res: servermap)
|
||||
return d1
|
||||
d.addCallback(_do_retrieve)
|
||||
return d
|
||||
|
||||
def test_corrupt_all_verbyte(self):
|
||||
# when the version byte is not 0 or 1, we hit an UnknownVersionError
|
||||
# error in unpack_share().
|
||||
d = self._test_corrupt_all(0, "UnknownVersionError")
|
||||
def _check_servermap(servermap):
|
||||
# and the dump should mention the problems
|
||||
s = StringIO()
|
||||
dump = servermap.dump(s).getvalue()
|
||||
self.failUnless("30 PROBLEMS" in dump, dump)
|
||||
d.addCallback(_check_servermap)
|
||||
return d
|
||||
|
||||
def test_corrupt_all_seqnum(self):
|
||||
# a corrupt sequence number will trigger a bad signature
|
||||
return self._test_corrupt_all(1, "signature is invalid")
|
||||
|
||||
def test_corrupt_all_R(self):
|
||||
# a corrupt root hash will trigger a bad signature
|
||||
return self._test_corrupt_all(9, "signature is invalid")
|
||||
|
||||
def test_corrupt_all_IV(self):
|
||||
# a corrupt salt/IV will trigger a bad signature
|
||||
return self._test_corrupt_all(41, "signature is invalid")
|
||||
|
||||
def test_corrupt_all_k(self):
|
||||
# a corrupt 'k' will trigger a bad signature
|
||||
return self._test_corrupt_all(57, "signature is invalid")
|
||||
|
||||
def test_corrupt_all_N(self):
|
||||
# a corrupt 'N' will trigger a bad signature
|
||||
return self._test_corrupt_all(58, "signature is invalid")
|
||||
|
||||
def test_corrupt_all_segsize(self):
|
||||
# a corrupt segsize will trigger a bad signature
|
||||
return self._test_corrupt_all(59, "signature is invalid")
|
||||
|
||||
def test_corrupt_all_datalen(self):
|
||||
# a corrupt data length will trigger a bad signature
|
||||
return self._test_corrupt_all(67, "signature is invalid")
|
||||
|
||||
def test_corrupt_all_pubkey(self):
|
||||
# a corrupt pubkey won't match the URI's fingerprint. We need to
|
||||
# remove the pubkey from the filenode, or else it won't bother trying
|
||||
# to update it.
|
||||
self._fn._pubkey = None
|
||||
return self._test_corrupt_all("pubkey",
|
||||
"pubkey doesn't match fingerprint")
|
||||
|
||||
def test_corrupt_all_sig(self):
|
||||
# a corrupt signature is a bad one
|
||||
# the signature runs from about [543:799], depending upon the length
|
||||
# of the pubkey
|
||||
return self._test_corrupt_all("signature", "signature is invalid")
|
||||
|
||||
def test_corrupt_all_share_hash_chain_number(self):
|
||||
# a corrupt share hash chain entry will show up as a bad hash. If we
|
||||
# mangle the first byte, that will look like a bad hash number,
|
||||
# causing an IndexError
|
||||
return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
|
||||
|
||||
def test_corrupt_all_share_hash_chain_hash(self):
|
||||
# a corrupt share hash chain entry will show up as a bad hash. If we
|
||||
# mangle a few bytes in, that will look like a bad hash.
|
||||
return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
|
||||
|
||||
def test_corrupt_all_block_hash_tree(self):
|
||||
return self._test_corrupt_all("block_hash_tree",
|
||||
"block hash tree failure")
|
||||
|
||||
def test_corrupt_all_block(self):
|
||||
return self._test_corrupt_all("share_data", "block hash tree failure")
|
||||
|
||||
def test_corrupt_all_encprivkey(self):
|
||||
# a corrupted privkey won't even be noticed by the reader, only by a
|
||||
# writer.
|
||||
return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
|
||||
|
||||
|
||||
def test_corrupt_all_encprivkey_late(self):
|
||||
# this should work for the same reason as above, but we corrupt
|
||||
# after the servermap update to exercise the error handling
|
||||
# code.
|
||||
# We need to remove the privkey from the node, or the retrieve
|
||||
# process won't know to update it.
|
||||
self._fn._privkey = None
|
||||
return self._test_corrupt_all("enc_privkey",
|
||||
None, # this shouldn't fail
|
||||
should_succeed=True,
|
||||
corrupt_early=False,
|
||||
fetch_privkey=True)
|
||||
|
||||
|
||||
# disabled until retrieve tests checkstring on each blockfetch. I didn't
|
||||
# just use a .todo because the failing-but-ignored test emits about 30kB
|
||||
# of noise.
|
||||
def OFF_test_corrupt_all_seqnum_late(self):
|
||||
# corrupting the seqnum between mapupdate and retrieve should result
|
||||
# in NotEnoughSharesError, since each share will look invalid
|
||||
def _check(res):
|
||||
f = res[0]
|
||||
self.failUnless(f.check(NotEnoughSharesError))
|
||||
self.failUnless("uncoordinated write" in str(f))
|
||||
return self._test_corrupt_all(1, "ran out of servers",
|
||||
corrupt_early=False,
|
||||
failure_checker=_check)
|
||||
|
||||
|
||||
def test_corrupt_all_block_late(self):
|
||||
def _check(res):
|
||||
f = res[0]
|
||||
self.failUnless(f.check(NotEnoughSharesError))
|
||||
return self._test_corrupt_all("share_data", "block hash tree failure",
|
||||
corrupt_early=False,
|
||||
failure_checker=_check)
|
||||
|
||||
|
||||
def test_basic_pubkey_at_end(self):
|
||||
# we corrupt the pubkey in all but the last 'k' shares, allowing the
|
||||
# download to succeed but forcing a bunch of retries first. Note that
|
||||
# this is rather pessimistic: our Retrieve process will throw away
|
||||
# the whole share if the pubkey is bad, even though the rest of the
|
||||
# share might be good.
|
||||
|
||||
self._fn._pubkey = None
|
||||
k = self._fn.get_required_shares()
|
||||
N = self._fn.get_total_shares()
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(corrupt, self._storage, "pubkey",
|
||||
shnums_to_corrupt=range(0, N-k))
|
||||
d.addCallback(lambda res: self.make_servermap())
|
||||
def _do_retrieve(servermap):
|
||||
self.failUnless(servermap.get_problems())
|
||||
self.failUnless("pubkey doesn't match fingerprint"
|
||||
in str(servermap.get_problems()[0]))
|
||||
ver = servermap.best_recoverable_version()
|
||||
r = Retrieve(self._fn, self._storage_broker, servermap, ver)
|
||||
c = consumer.MemoryConsumer()
|
||||
return r.download(c)
|
||||
d.addCallback(_do_retrieve)
|
||||
d.addCallback(lambda mc: "".join(mc.chunks))
|
||||
d.addCallback(lambda new_contents:
|
||||
self.failUnlessEqual(new_contents, self.CONTENTS))
|
||||
return d
|
||||
|
||||
|
||||
def _test_corrupt_some(self, offset, mdmf=False):
|
||||
if mdmf:
|
||||
d = self.publish_mdmf()
|
||||
else:
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ignored:
|
||||
corrupt(None, self._storage, offset, range(5)))
|
||||
d.addCallback(lambda ignored:
|
||||
self.make_servermap())
|
||||
def _do_retrieve(servermap):
|
||||
ver = servermap.best_recoverable_version()
|
||||
self.failUnless(ver)
|
||||
return self._fn.download_best_version()
|
||||
d.addCallback(_do_retrieve)
|
||||
d.addCallback(lambda new_contents:
|
||||
self.failUnlessEqual(new_contents, self.CONTENTS))
|
||||
return d
|
||||
|
||||
|
||||
def test_corrupt_some(self):
|
||||
# corrupt the data of first five shares (so the servermap thinks
|
||||
# they're good but retrieve marks them as bad), so that the
|
||||
# MODE_READ set of 6 will be insufficient, forcing node.download to
|
||||
# retry with more servers.
|
||||
return self._test_corrupt_some("share_data")
|
||||
|
||||
|
||||
def test_download_fails(self):
|
||||
d = corrupt(None, self._storage, "signature")
|
||||
d.addCallback(lambda ignored:
|
||||
self.shouldFail(UnrecoverableFileError, "test_download_anyway",
|
||||
"no recoverable versions",
|
||||
self._fn.download_best_version))
|
||||
return d
|
||||
|
||||
|
||||
|
||||
def test_corrupt_mdmf_block_hash_tree(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
self._test_corrupt_all(("block_hash_tree", 12 * 32),
|
||||
"block hash tree failure",
|
||||
corrupt_early=True,
|
||||
should_succeed=False))
|
||||
return d
|
||||
|
||||
|
||||
def test_corrupt_mdmf_block_hash_tree_late(self):
|
||||
# Note - there is no SDMF counterpart to this test, as the SDMF
|
||||
# files are guaranteed to have exactly one block, and therefore
|
||||
# the block hash tree fits within the initial read (#1240).
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
self._test_corrupt_all(("block_hash_tree", 12 * 32),
|
||||
"block hash tree failure",
|
||||
corrupt_early=False,
|
||||
should_succeed=False))
|
||||
return d
|
||||
|
||||
|
||||
def test_corrupt_mdmf_share_data(self):
|
||||
d = self.publish_mdmf()
|
||||
d.addCallback(lambda ignored:
|
||||
# TODO: Find out what the block size is and corrupt a
|
||||
# specific block, rather than just guessing.
|
||||
self._test_corrupt_all(("share_data", 12 * 40),
|
||||
"block hash tree failure",
|
||||
corrupt_early=True,
|
||||
should_succeed=False))
|
||||
return d
|
||||
|
||||
|
||||
def test_corrupt_some_mdmf(self):
|
||||
return self._test_corrupt_some(("share_data", 12 * 40),
|
||||
mdmf=True)
|
235
src/allmydata/test/mutable/test_servermap.py
Normal file
235
src/allmydata/test/mutable/test_servermap.py
Normal file
@ -0,0 +1,235 @@
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
from allmydata.monitor import Monitor
|
||||
from allmydata.mutable.common import \
|
||||
MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ
|
||||
from allmydata.mutable.publish import MutableData
|
||||
from allmydata.mutable.servermap import ServerMap, ServermapUpdater
|
||||
from .util import PublishMixin
|
||||
|
||||
class Servermap(unittest.TestCase, PublishMixin):
|
||||
def setUp(self):
|
||||
return self.publish_one()
|
||||
|
||||
def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
|
||||
update_range=None):
|
||||
if fn is None:
|
||||
fn = self._fn
|
||||
if sb is None:
|
||||
sb = self._storage_broker
|
||||
smu = ServermapUpdater(fn, sb, Monitor(),
|
||||
ServerMap(), mode, update_range=update_range)
|
||||
d = smu.update()
|
||||
return d
|
||||
|
||||
def update_servermap(self, oldmap, mode=MODE_CHECK):
|
||||
smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
|
||||
oldmap, mode)
|
||||
d = smu.update()
|
||||
return d
|
||||
|
||||
def failUnlessOneRecoverable(self, sm, num_shares):
|
||||
self.failUnlessEqual(len(sm.recoverable_versions()), 1)
|
||||
self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
|
||||
best = sm.best_recoverable_version()
|
||||
self.failIfEqual(best, None)
|
||||
self.failUnlessEqual(sm.recoverable_versions(), set([best]))
|
||||
self.failUnlessEqual(len(sm.shares_available()), 1)
|
||||
self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
|
||||
shnum, servers = sm.make_sharemap().items()[0]
|
||||
server = list(servers)[0]
|
||||
self.failUnlessEqual(sm.version_on_server(server, shnum), best)
|
||||
self.failUnlessEqual(sm.version_on_server(server, 666), None)
|
||||
return sm
|
||||
|
||||
def test_basic(self):
|
||||
d = defer.succeed(None)
|
||||
ms = self.make_servermap
|
||||
us = self.update_servermap
|
||||
|
||||
d.addCallback(lambda res: ms(mode=MODE_CHECK))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
|
||||
d.addCallback(lambda res: ms(mode=MODE_WRITE))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
|
||||
d.addCallback(lambda res: ms(mode=MODE_READ))
|
||||
# this mode stops at k+epsilon, and epsilon=k, so 6 shares
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
|
||||
d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
|
||||
# this mode stops at 'k' shares
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
|
||||
|
||||
# and can we re-use the same servermap? Note that these are sorted in
|
||||
# increasing order of number of servers queried, since once a server
|
||||
# gets into the servermap, we'll always ask it for an update.
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
|
||||
d.addCallback(lambda sm: us(sm, mode=MODE_READ))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
|
||||
d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
|
||||
d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
|
||||
d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
|
||||
|
||||
return d
|
||||
|
||||
def test_fetch_privkey(self):
|
||||
d = defer.succeed(None)
|
||||
# use the sibling filenode (which hasn't been used yet), and make
|
||||
# sure it can fetch the privkey. The file is small, so the privkey
|
||||
# will be fetched on the first (query) pass.
|
||||
d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
|
||||
|
||||
# create a new file, which is large enough to knock the privkey out
|
||||
# of the early part of the file
|
||||
LARGE = "These are Larger contents" * 200 # about 5KB
|
||||
LARGE_uploadable = MutableData(LARGE)
|
||||
d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
|
||||
def _created(large_fn):
|
||||
large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
|
||||
return self.make_servermap(MODE_WRITE, large_fn2)
|
||||
d.addCallback(_created)
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
|
||||
return d
|
||||
|
||||
|
||||
def test_mark_bad(self):
|
||||
d = defer.succeed(None)
|
||||
ms = self.make_servermap
|
||||
|
||||
d.addCallback(lambda res: ms(mode=MODE_READ))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
|
||||
def _made_map(sm):
|
||||
v = sm.best_recoverable_version()
|
||||
vm = sm.make_versionmap()
|
||||
shares = list(vm[v])
|
||||
self.failUnlessEqual(len(shares), 6)
|
||||
self._corrupted = set()
|
||||
# mark the first 5 shares as corrupt, then update the servermap.
|
||||
# The map should not have the marked shares it in any more, and
|
||||
# new shares should be found to replace the missing ones.
|
||||
for (shnum, server, timestamp) in shares:
|
||||
if shnum < 5:
|
||||
self._corrupted.add( (server, shnum) )
|
||||
sm.mark_bad_share(server, shnum, "")
|
||||
return self.update_servermap(sm, MODE_WRITE)
|
||||
d.addCallback(_made_map)
|
||||
def _check_map(sm):
|
||||
# this should find all 5 shares that weren't marked bad
|
||||
v = sm.best_recoverable_version()
|
||||
vm = sm.make_versionmap()
|
||||
shares = list(vm[v])
|
||||
for (server, shnum) in self._corrupted:
|
||||
server_shares = sm.debug_shares_on_server(server)
|
||||
self.failIf(shnum in server_shares,
|
||||
"%d was in %s" % (shnum, server_shares))
|
||||
self.failUnlessEqual(len(shares), 5)
|
||||
d.addCallback(_check_map)
|
||||
return d
|
||||
|
||||
def failUnlessNoneRecoverable(self, sm):
|
||||
self.failUnlessEqual(len(sm.recoverable_versions()), 0)
|
||||
self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
|
||||
best = sm.best_recoverable_version()
|
||||
self.failUnlessEqual(best, None)
|
||||
self.failUnlessEqual(len(sm.shares_available()), 0)
|
||||
|
||||
def test_no_shares(self):
|
||||
self._storage._peers = {} # delete all shares
|
||||
ms = self.make_servermap
|
||||
d = defer.succeed(None)
|
||||
#
|
||||
d.addCallback(lambda res: ms(mode=MODE_CHECK))
|
||||
d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
|
||||
|
||||
d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
|
||||
d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
|
||||
|
||||
d.addCallback(lambda res: ms(mode=MODE_WRITE))
|
||||
d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
|
||||
|
||||
d.addCallback(lambda res: ms(mode=MODE_READ))
|
||||
d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
|
||||
|
||||
return d
|
||||
|
||||
def failUnlessNotQuiteEnough(self, sm):
|
||||
self.failUnlessEqual(len(sm.recoverable_versions()), 0)
|
||||
self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
|
||||
best = sm.best_recoverable_version()
|
||||
self.failUnlessEqual(best, None)
|
||||
self.failUnlessEqual(len(sm.shares_available()), 1)
|
||||
self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
|
||||
return sm
|
||||
|
||||
def test_not_quite_enough_shares(self):
|
||||
s = self._storage
|
||||
ms = self.make_servermap
|
||||
num_shares = len(s._peers)
|
||||
for peerid in s._peers:
|
||||
s._peers[peerid] = {}
|
||||
num_shares -= 1
|
||||
if num_shares == 2:
|
||||
break
|
||||
# now there ought to be only two shares left
|
||||
assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
|
||||
|
||||
d = defer.succeed(None)
|
||||
|
||||
d.addCallback(lambda res: ms(mode=MODE_CHECK))
|
||||
d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
|
||||
d.addCallback(lambda sm:
|
||||
self.failUnlessEqual(len(sm.make_sharemap()), 2))
|
||||
d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
|
||||
d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
|
||||
d.addCallback(lambda res: ms(mode=MODE_WRITE))
|
||||
d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
|
||||
d.addCallback(lambda res: ms(mode=MODE_READ))
|
||||
d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def test_servermapupdater_finds_mdmf_files(self):
|
||||
# setUp already published an MDMF file for us. We just need to
|
||||
# make sure that when we run the ServermapUpdater, the file is
|
||||
# reported to have one recoverable version.
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ignored:
|
||||
self.publish_mdmf())
|
||||
d.addCallback(lambda ignored:
|
||||
self.make_servermap(mode=MODE_CHECK))
|
||||
# Calling make_servermap also updates the servermap in the mode
|
||||
# that we specify, so we just need to see what it says.
|
||||
def _check_servermap(sm):
|
||||
self.failUnlessEqual(len(sm.recoverable_versions()), 1)
|
||||
d.addCallback(_check_servermap)
|
||||
return d
|
||||
|
||||
|
||||
def test_fetch_update(self):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ignored:
|
||||
self.publish_mdmf())
|
||||
d.addCallback(lambda ignored:
|
||||
self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
|
||||
def _check_servermap(sm):
|
||||
# 10 shares
|
||||
self.failUnlessEqual(len(sm.update_data), 10)
|
||||
# one version
|
||||
for data in sm.update_data.itervalues():
|
||||
self.failUnlessEqual(len(data), 1)
|
||||
d.addCallback(_check_servermap)
|
||||
return d
|
||||
|
||||
|
||||
def test_servermapupdater_finds_sdmf_files(self):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ignored:
|
||||
self.publish_sdmf())
|
||||
d.addCallback(lambda ignored:
|
||||
self.make_servermap(mode=MODE_CHECK))
|
||||
d.addCallback(lambda servermap:
|
||||
self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
|
||||
return d
|
237
src/allmydata/test/mutable/test_update.py
Normal file
237
src/allmydata/test/mutable/test_update.py
Normal file
@ -0,0 +1,237 @@
|
||||
import re
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
from allmydata.interfaces import MDMF_VERSION
|
||||
from allmydata.mutable.filenode import MutableFileNode
|
||||
from allmydata.mutable.publish import MutableData, DEFAULT_MAX_SEGMENT_SIZE
|
||||
from ..no_network import GridTestMixin
|
||||
from .. import common_util as testutil
|
||||
|
||||
# We should really force a smaller segsize for the duration of the tests, to
|
||||
# let them run faster, but Many of them tests depend upon a specific segment
|
||||
# size. Factor out this expectation here, to start the process of cleaning
|
||||
# this up.
|
||||
SEGSIZE = 128*1024
|
||||
|
||||
class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
|
||||
timeout = 400 # these tests are too big, 120s is not enough on slow
|
||||
# platforms
|
||||
def setUp(self):
|
||||
GridTestMixin.setUp(self)
|
||||
self.basedir = self.mktemp()
|
||||
self.set_up_grid(num_servers=13)
|
||||
self.c = self.g.clients[0]
|
||||
self.nm = self.c.nodemaker
|
||||
# self.data should be at least three segments long.
|
||||
td = "testdata "
|
||||
self.data = td*(int(3*SEGSIZE/len(td))+10) # currently about 400kB
|
||||
assert len(self.data) > 3*SEGSIZE
|
||||
self.small_data = "test data" * 10 # 90 B; SDMF
|
||||
|
||||
|
||||
def do_upload_sdmf(self):
|
||||
d = self.nm.create_mutable_file(MutableData(self.small_data))
|
||||
def _then(n):
|
||||
assert isinstance(n, MutableFileNode)
|
||||
self.sdmf_node = n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
|
||||
def do_upload_mdmf(self):
|
||||
d = self.nm.create_mutable_file(MutableData(self.data),
|
||||
version=MDMF_VERSION)
|
||||
def _then(n):
|
||||
assert isinstance(n, MutableFileNode)
|
||||
self.mdmf_node = n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
|
||||
def _test_replace(self, offset, new_data):
|
||||
expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
|
||||
d0 = self.do_upload_mdmf()
|
||||
def _run(ign):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
|
||||
d.addCallback(lambda mv: mv.update(MutableData(new_data), offset))
|
||||
d.addCallback(lambda ign: self.mdmf_node.download_best_version())
|
||||
def _check(results):
|
||||
if results != expected:
|
||||
print
|
||||
print "got: %s ... %s" % (results[:20], results[-20:])
|
||||
print "exp: %s ... %s" % (expected[:20], expected[-20:])
|
||||
self.fail("results != expected")
|
||||
d.addCallback(_check)
|
||||
return d
|
||||
d0.addCallback(_run)
|
||||
return d0
|
||||
|
||||
def test_append(self):
|
||||
# We should be able to append data to a mutable file and get
|
||||
# what we expect.
|
||||
return self._test_replace(len(self.data), "appended")
|
||||
|
||||
def test_replace_middle(self):
|
||||
# We should be able to replace data in the middle of a mutable
|
||||
# file and get what we expect back.
|
||||
return self._test_replace(100, "replaced")
|
||||
|
||||
def test_replace_beginning(self):
|
||||
# We should be able to replace data at the beginning of the file
|
||||
# without truncating the file
|
||||
return self._test_replace(0, "beginning")
|
||||
|
||||
def test_replace_segstart1(self):
|
||||
return self._test_replace(128*1024+1, "NNNN")
|
||||
|
||||
def test_replace_zero_length_beginning(self):
|
||||
return self._test_replace(0, "")
|
||||
|
||||
def test_replace_zero_length_middle(self):
|
||||
return self._test_replace(50, "")
|
||||
|
||||
def test_replace_zero_length_segstart1(self):
|
||||
return self._test_replace(128*1024+1, "")
|
||||
|
||||
def test_replace_and_extend(self):
|
||||
# We should be able to replace data in the middle of a mutable
|
||||
# file and extend that mutable file and get what we expect.
|
||||
return self._test_replace(100, "modified " * 100000)
|
||||
|
||||
|
||||
def _check_differences(self, got, expected):
|
||||
# displaying arbitrary file corruption is tricky for a
|
||||
# 1MB file of repeating data,, so look for likely places
|
||||
# with problems and display them separately
|
||||
gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
|
||||
expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
|
||||
gotspans = ["%d:%d=%s" % (start,end,got[start:end])
|
||||
for (start,end) in gotmods]
|
||||
expspans = ["%d:%d=%s" % (start,end,expected[start:end])
|
||||
for (start,end) in expmods]
|
||||
#print "expecting: %s" % expspans
|
||||
|
||||
if got != expected:
|
||||
print "differences:"
|
||||
for segnum in range(len(expected)//SEGSIZE):
|
||||
start = segnum * SEGSIZE
|
||||
end = (segnum+1) * SEGSIZE
|
||||
got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
|
||||
exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
|
||||
if got_ends != exp_ends:
|
||||
print "expected[%d]: %s" % (start, exp_ends)
|
||||
print "got [%d]: %s" % (start, got_ends)
|
||||
if expspans != gotspans:
|
||||
print "expected: %s" % expspans
|
||||
print "got : %s" % gotspans
|
||||
open("EXPECTED","wb").write(expected)
|
||||
open("GOT","wb").write(got)
|
||||
print "wrote data to EXPECTED and GOT"
|
||||
self.fail("didn't get expected data")
|
||||
|
||||
|
||||
def test_replace_locations(self):
|
||||
# exercise fencepost conditions
|
||||
suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
|
||||
letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
d0 = self.do_upload_mdmf()
|
||||
def _run(ign):
|
||||
expected = self.data
|
||||
d = defer.succeed(None)
|
||||
for offset in suspects:
|
||||
new_data = letters.next()*2 # "AA", then "BB", etc
|
||||
expected = expected[:offset]+new_data+expected[offset+2:]
|
||||
d.addCallback(lambda ign:
|
||||
self.mdmf_node.get_best_mutable_version())
|
||||
def _modify(mv, offset=offset, new_data=new_data):
|
||||
# close over 'offset','new_data'
|
||||
md = MutableData(new_data)
|
||||
return mv.update(md, offset)
|
||||
d.addCallback(_modify)
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(self._check_differences, expected)
|
||||
return d
|
||||
d0.addCallback(_run)
|
||||
return d0
|
||||
|
||||
|
||||
def test_append_power_of_two(self):
|
||||
# If we attempt to extend a mutable file so that its segment
|
||||
# count crosses a power-of-two boundary, the update operation
|
||||
# should know how to reencode the file.
|
||||
|
||||
# Note that the data populating self.mdmf_node is about 900 KiB
|
||||
# long -- this is 7 segments in the default segment size. So we
|
||||
# need to add 2 segments worth of data to push it over a
|
||||
# power-of-two boundary.
|
||||
segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
|
||||
new_data = self.data + (segment * 2)
|
||||
d0 = self.do_upload_mdmf()
|
||||
def _run(ign):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
|
||||
d.addCallback(lambda mv: mv.update(MutableData(segment * 2),
|
||||
len(self.data)))
|
||||
d.addCallback(lambda ign: self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda results:
|
||||
self.failUnlessEqual(results, new_data))
|
||||
return d
|
||||
d0.addCallback(_run)
|
||||
return d0
|
||||
|
||||
def test_update_sdmf(self):
|
||||
# Running update on a single-segment file should still work.
|
||||
new_data = self.small_data + "appended"
|
||||
d0 = self.do_upload_sdmf()
|
||||
def _run(ign):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
|
||||
d.addCallback(lambda mv: mv.update(MutableData("appended"),
|
||||
len(self.small_data)))
|
||||
d.addCallback(lambda ign: self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda results:
|
||||
self.failUnlessEqual(results, new_data))
|
||||
return d
|
||||
d0.addCallback(_run)
|
||||
return d0
|
||||
|
||||
def test_replace_in_last_segment(self):
|
||||
# The wrapper should know how to handle the tail segment
|
||||
# appropriately.
|
||||
replace_offset = len(self.data) - 100
|
||||
new_data = self.data[:replace_offset] + "replaced"
|
||||
rest_offset = replace_offset + len("replaced")
|
||||
new_data += self.data[rest_offset:]
|
||||
d0 = self.do_upload_mdmf()
|
||||
def _run(ign):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
|
||||
d.addCallback(lambda mv: mv.update(MutableData("replaced"),
|
||||
replace_offset))
|
||||
d.addCallback(lambda ign: self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda results:
|
||||
self.failUnlessEqual(results, new_data))
|
||||
return d
|
||||
d0.addCallback(_run)
|
||||
return d0
|
||||
|
||||
def test_multiple_segment_replace(self):
|
||||
replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
|
||||
new_data = self.data[:replace_offset]
|
||||
new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
|
||||
new_data += 2 * new_segment
|
||||
new_data += "replaced"
|
||||
rest_offset = len(new_data)
|
||||
new_data += self.data[rest_offset:]
|
||||
d0 = self.do_upload_mdmf()
|
||||
def _run(ign):
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
|
||||
d.addCallback(lambda mv: mv.update(MutableData((2 * new_segment) + "replaced"),
|
||||
replace_offset))
|
||||
d.addCallback(lambda ignored: self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda results:
|
||||
self.failUnlessEqual(results, new_data))
|
||||
return d
|
||||
d0.addCallback(_run)
|
||||
return d0
|
462
src/allmydata/test/mutable/test_version.py
Normal file
462
src/allmydata/test/mutable/test_version.py
Normal file
@ -0,0 +1,462 @@
|
||||
import os
|
||||
from cStringIO import StringIO
|
||||
from twisted.internet import defer
|
||||
from twisted.trial import unittest
|
||||
from allmydata import uri
|
||||
from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION
|
||||
from allmydata.util import base32, consumer, mathutil
|
||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||
from allmydata.util.deferredutil import gatherResults
|
||||
from allmydata.mutable.filenode import MutableFileNode
|
||||
from allmydata.mutable.common import MODE_WRITE, MODE_READ, UnrecoverableFileError
|
||||
from allmydata.mutable.publish import MutableData
|
||||
from allmydata.scripts import debug
|
||||
from ..no_network import GridTestMixin
|
||||
from .util import PublishMixin
|
||||
from .. import common_util as testutil
|
||||
|
||||
class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
|
||||
PublishMixin):
|
||||
def setUp(self):
|
||||
GridTestMixin.setUp(self)
|
||||
self.basedir = self.mktemp()
|
||||
self.set_up_grid()
|
||||
self.c = self.g.clients[0]
|
||||
self.nm = self.c.nodemaker
|
||||
self.data = "test data" * 100000 # about 900 KiB; MDMF
|
||||
self.small_data = "test data" * 10 # 90 B; SDMF
|
||||
|
||||
|
||||
def do_upload_mdmf(self, data=None):
|
||||
if data is None:
|
||||
data = self.data
|
||||
d = self.nm.create_mutable_file(MutableData(data),
|
||||
version=MDMF_VERSION)
|
||||
def _then(n):
|
||||
assert isinstance(n, MutableFileNode)
|
||||
assert n._protocol_version == MDMF_VERSION
|
||||
self.mdmf_node = n
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
|
||||
def do_upload_sdmf(self, data=None):
|
||||
if data is None:
|
||||
data = self.small_data
|
||||
d = self.nm.create_mutable_file(MutableData(data))
|
||||
def _then(n):
|
||||
assert isinstance(n, MutableFileNode)
|
||||
assert n._protocol_version == SDMF_VERSION
|
||||
self.sdmf_node = n
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
|
||||
def do_upload_empty_sdmf(self):
|
||||
d = self.nm.create_mutable_file(MutableData(""))
|
||||
def _then(n):
|
||||
assert isinstance(n, MutableFileNode)
|
||||
self.sdmf_zero_length_node = n
|
||||
assert n._protocol_version == SDMF_VERSION
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
|
||||
def do_upload(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(lambda ign: self.do_upload_sdmf())
|
||||
return d
|
||||
|
||||
def test_debug(self):
|
||||
d = self.do_upload_mdmf()
|
||||
def _debug(n):
|
||||
fso = debug.FindSharesOptions()
|
||||
storage_index = base32.b2a(n.get_storage_index())
|
||||
fso.si_s = storage_index
|
||||
fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(unicode(storedir)))
|
||||
for (i,ss,storedir)
|
||||
in self.iterate_servers()]
|
||||
fso.stdout = StringIO()
|
||||
fso.stderr = StringIO()
|
||||
debug.find_shares(fso)
|
||||
sharefiles = fso.stdout.getvalue().splitlines()
|
||||
expected = self.nm.default_encoding_parameters["n"]
|
||||
self.failUnlessEqual(len(sharefiles), expected)
|
||||
|
||||
do = debug.DumpOptions()
|
||||
do["filename"] = sharefiles[0]
|
||||
do.stdout = StringIO()
|
||||
debug.dump_share(do)
|
||||
output = do.stdout.getvalue()
|
||||
lines = set(output.splitlines())
|
||||
self.failUnless("Mutable slot found:" in lines, output)
|
||||
self.failUnless(" share_type: MDMF" in lines, output)
|
||||
self.failUnless(" num_extra_leases: 0" in lines, output)
|
||||
self.failUnless(" MDMF contents:" in lines, output)
|
||||
self.failUnless(" seqnum: 1" in lines, output)
|
||||
self.failUnless(" required_shares: 3" in lines, output)
|
||||
self.failUnless(" total_shares: 10" in lines, output)
|
||||
self.failUnless(" segsize: 131073" in lines, output)
|
||||
self.failUnless(" datalen: %d" % len(self.data) in lines, output)
|
||||
vcap = n.get_verify_cap().to_string()
|
||||
self.failUnless(" verify-cap: %s" % vcap in lines, output)
|
||||
|
||||
cso = debug.CatalogSharesOptions()
|
||||
cso.nodedirs = fso.nodedirs
|
||||
cso.stdout = StringIO()
|
||||
cso.stderr = StringIO()
|
||||
debug.catalog_shares(cso)
|
||||
shares = cso.stdout.getvalue().splitlines()
|
||||
oneshare = shares[0] # all shares should be MDMF
|
||||
self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
|
||||
self.failUnless(oneshare.startswith("MDMF"), oneshare)
|
||||
fields = oneshare.split()
|
||||
self.failUnlessEqual(fields[0], "MDMF")
|
||||
self.failUnlessEqual(fields[1], storage_index)
|
||||
self.failUnlessEqual(fields[2], "3/10")
|
||||
self.failUnlessEqual(fields[3], "%d" % len(self.data))
|
||||
self.failUnless(fields[4].startswith("#1:"), fields[3])
|
||||
# the rest of fields[4] is the roothash, which depends upon
|
||||
# encryption salts and is not constant. fields[5] is the
|
||||
# remaining time on the longest lease, which is timing dependent.
|
||||
# The rest of the line is the quoted pathname to the share.
|
||||
d.addCallback(_debug)
|
||||
return d
|
||||
|
||||
def test_get_sequence_number(self):
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.failUnlessEqual(bv.get_sequence_number(), 1))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.failUnlessEqual(bv.get_sequence_number(), 1))
|
||||
# Now update. The sequence number in both cases should be 1 in
|
||||
# both cases.
|
||||
def _do_update(ignored):
|
||||
new_data = MutableData("foo bar baz" * 100000)
|
||||
new_small_data = MutableData("foo bar baz" * 10)
|
||||
d1 = self.mdmf_node.overwrite(new_data)
|
||||
d2 = self.sdmf_node.overwrite(new_small_data)
|
||||
dl = gatherResults([d1, d2])
|
||||
return dl
|
||||
d.addCallback(_do_update)
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.failUnlessEqual(bv.get_sequence_number(), 2))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.failUnlessEqual(bv.get_sequence_number(), 2))
|
||||
return d
|
||||
|
||||
|
||||
def test_cap_after_upload(self):
|
||||
# If we create a new mutable file and upload things to it, and
|
||||
# it's an MDMF file, we should get an MDMF cap back from that
|
||||
# file and should be able to use that.
|
||||
# That's essentially what MDMF node is, so just check that.
|
||||
d = self.do_upload_mdmf()
|
||||
def _then(ign):
|
||||
mdmf_uri = self.mdmf_node.get_uri()
|
||||
cap = uri.from_string(mdmf_uri)
|
||||
self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
|
||||
readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
|
||||
cap = uri.from_string(readonly_mdmf_uri)
|
||||
self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
|
||||
def test_mutable_version(self):
|
||||
# assert that getting parameters from the IMutableVersion object
|
||||
# gives us the same data as getting them from the filenode itself
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
|
||||
def _check_mdmf(bv):
|
||||
n = self.mdmf_node
|
||||
self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
|
||||
self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
|
||||
self.failIf(bv.is_readonly())
|
||||
d.addCallback(_check_mdmf)
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
|
||||
def _check_sdmf(bv):
|
||||
n = self.sdmf_node
|
||||
self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
|
||||
self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
|
||||
self.failIf(bv.is_readonly())
|
||||
d.addCallback(_check_sdmf)
|
||||
return d
|
||||
|
||||
|
||||
def test_get_readonly_version(self):
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
|
||||
|
||||
# Attempting to get a mutable version of a mutable file from a
|
||||
# filenode initialized with a readcap should return a readonly
|
||||
# version of that same node.
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_readonly())
|
||||
d.addCallback(lambda ro: ro.get_best_mutable_version())
|
||||
d.addCallback(lambda v: self.failUnless(v.is_readonly()))
|
||||
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
|
||||
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_readonly())
|
||||
d.addCallback(lambda ro: ro.get_best_mutable_version())
|
||||
d.addCallback(lambda v: self.failUnless(v.is_readonly()))
|
||||
return d
|
||||
|
||||
|
||||
def test_toplevel_overwrite(self):
|
||||
new_data = MutableData("foo bar baz" * 100000)
|
||||
new_small_data = MutableData("foo bar baz" * 10)
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessEqual(data, "foo bar baz" * 100000))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.overwrite(new_small_data))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessEqual(data, "foo bar baz" * 10))
|
||||
return d
|
||||
|
||||
|
||||
def test_toplevel_modify(self):
|
||||
d = self.do_upload()
|
||||
def modifier(old_contents, servermap, first_time):
|
||||
return old_contents + "modified"
|
||||
d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessIn("modified", data))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessIn("modified", data))
|
||||
return d
|
||||
|
||||
|
||||
def test_version_modify(self):
|
||||
# TODO: When we can publish multiple versions, alter this test
|
||||
# to modify a version other than the best usable version, then
|
||||
# test to see that the best recoverable version is that.
|
||||
d = self.do_upload()
|
||||
def modifier(old_contents, servermap, first_time):
|
||||
return old_contents + "modified"
|
||||
d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessIn("modified", data))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.failUnlessIn("modified", data))
|
||||
return d
|
||||
|
||||
|
||||
def test_download_version(self):
|
||||
d = self.publish_multiple()
|
||||
# We want to have two recoverable versions on the grid.
|
||||
d.addCallback(lambda res:
|
||||
self._set_versions({0:0,2:0,4:0,6:0,8:0,
|
||||
1:1,3:1,5:1,7:1,9:1}))
|
||||
# Now try to download each version. We should get the plaintext
|
||||
# associated with that version.
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.get_servermap(mode=MODE_READ))
|
||||
def _got_servermap(smap):
|
||||
versions = smap.recoverable_versions()
|
||||
assert len(versions) == 2
|
||||
|
||||
self.servermap = smap
|
||||
self.version1, self.version2 = versions
|
||||
assert self.version1 != self.version2
|
||||
|
||||
self.version1_seqnum = self.version1[0]
|
||||
self.version2_seqnum = self.version2[0]
|
||||
self.version1_index = self.version1_seqnum - 1
|
||||
self.version2_index = self.version2_seqnum - 1
|
||||
|
||||
d.addCallback(_got_servermap)
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.download_version(self.servermap, self.version1))
|
||||
d.addCallback(lambda results:
|
||||
self.failUnlessEqual(self.CONTENTS[self.version1_index],
|
||||
results))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.download_version(self.servermap, self.version2))
|
||||
d.addCallback(lambda results:
|
||||
self.failUnlessEqual(self.CONTENTS[self.version2_index],
|
||||
results))
|
||||
return d
|
||||
|
||||
|
||||
def test_download_nonexistent_version(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
|
||||
def _set_servermap(servermap):
|
||||
self.servermap = servermap
|
||||
d.addCallback(_set_servermap)
|
||||
d.addCallback(lambda ignored:
|
||||
self.shouldFail(UnrecoverableFileError, "nonexistent version",
|
||||
None,
|
||||
self.mdmf_node.download_version, self.servermap,
|
||||
"not a version"))
|
||||
return d
|
||||
|
||||
|
||||
def _test_partial_read(self, node, expected, modes, step):
|
||||
d = node.get_best_readable_version()
|
||||
for (name, offset, length) in modes:
|
||||
d.addCallback(self._do_partial_read, name, expected, offset, length)
|
||||
# then read the whole thing, but only a few bytes at a time, and see
|
||||
# that the results are what we expect.
|
||||
def _read_data(version):
|
||||
c = consumer.MemoryConsumer()
|
||||
d2 = defer.succeed(None)
|
||||
for i in xrange(0, len(expected), step):
|
||||
d2.addCallback(lambda ignored, i=i: version.read(c, i, step))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.failUnlessEqual(expected, "".join(c.chunks)))
|
||||
return d2
|
||||
d.addCallback(_read_data)
|
||||
return d
|
||||
|
||||
def _do_partial_read(self, version, name, expected, offset, length):
|
||||
c = consumer.MemoryConsumer()
|
||||
d = version.read(c, offset, length)
|
||||
if length is None:
|
||||
expected_range = expected[offset:]
|
||||
else:
|
||||
expected_range = expected[offset:offset+length]
|
||||
d.addCallback(lambda ignored: "".join(c.chunks))
|
||||
def _check(results):
|
||||
if results != expected_range:
|
||||
print "read([%d]+%s) got %d bytes, not %d" % \
|
||||
(offset, length, len(results), len(expected_range))
|
||||
print "got: %s ... %s" % (results[:20], results[-20:])
|
||||
print "exp: %s ... %s" % (expected_range[:20], expected_range[-20:])
|
||||
self.fail("results[%s] != expected_range" % name)
|
||||
return version # daisy-chained to next call
|
||||
d.addCallback(_check)
|
||||
return d
|
||||
|
||||
def test_partial_read_mdmf_0(self):
|
||||
data = ""
|
||||
d = self.do_upload_mdmf(data=data)
|
||||
modes = [("all1", 0,0),
|
||||
("all2", 0,None),
|
||||
]
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
|
||||
def test_partial_read_mdmf_large(self):
|
||||
segment_boundary = mathutil.next_multiple(128 * 1024, 3)
|
||||
modes = [("start_on_segment_boundary", segment_boundary, 50),
|
||||
("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
|
||||
("zero_length_at_start", 0, 0),
|
||||
("zero_length_in_middle", 50, 0),
|
||||
("zero_length_at_segment_boundary", segment_boundary, 0),
|
||||
("complete_file1", 0, len(self.data)),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(self._test_partial_read, self.data, modes, 10000)
|
||||
return d
|
||||
|
||||
def test_partial_read_sdmf_0(self):
|
||||
data = ""
|
||||
modes = [("all1", 0,0),
|
||||
("all2", 0,None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
|
||||
def test_partial_read_sdmf_2(self):
|
||||
data = "hi"
|
||||
modes = [("one_byte", 0, 1),
|
||||
("last_byte", 1, 1),
|
||||
("last_byte2", 1, None),
|
||||
("complete_file", 0, 2),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
|
||||
def test_partial_read_sdmf_90(self):
|
||||
modes = [("start_at_middle", 50, 40),
|
||||
("start_at_middle2", 50, None),
|
||||
("zero_length_at_start", 0, 0),
|
||||
("zero_length_in_middle", 50, 0),
|
||||
("zero_length_at_end", 90, 0),
|
||||
("complete_file1", 0, None),
|
||||
("complete_file2", 0, 90),
|
||||
]
|
||||
d = self.do_upload_sdmf()
|
||||
d.addCallback(self._test_partial_read, self.small_data, modes, 10)
|
||||
return d
|
||||
|
||||
def test_partial_read_sdmf_100(self):
|
||||
data = "test data "*10
|
||||
modes = [("start_at_middle", 50, 50),
|
||||
("start_at_middle2", 50, None),
|
||||
("zero_length_at_start", 0, 0),
|
||||
("zero_length_in_middle", 50, 0),
|
||||
("complete_file1", 0, 100),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 10)
|
||||
return d
|
||||
|
||||
|
||||
def _test_read_and_download(self, node, expected):
|
||||
d = node.get_best_readable_version()
|
||||
def _read_data(version):
|
||||
c = consumer.MemoryConsumer()
|
||||
c2 = consumer.MemoryConsumer()
|
||||
d2 = defer.succeed(None)
|
||||
d2.addCallback(lambda ignored: version.read(c))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.failUnlessEqual(expected, "".join(c.chunks)))
|
||||
|
||||
d2.addCallback(lambda ignored: version.read(c2, offset=0,
|
||||
size=len(expected)))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.failUnlessEqual(expected, "".join(c2.chunks)))
|
||||
return d2
|
||||
d.addCallback(_read_data)
|
||||
d.addCallback(lambda ignored: node.download_best_version())
|
||||
d.addCallback(lambda data: self.failUnlessEqual(expected, data))
|
||||
return d
|
||||
|
||||
def test_read_and_download_mdmf(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(self._test_read_and_download, self.data)
|
||||
return d
|
||||
|
||||
def test_read_and_download_sdmf(self):
|
||||
d = self.do_upload_sdmf()
|
||||
d.addCallback(self._test_read_and_download, self.small_data)
|
||||
return d
|
||||
|
||||
def test_read_and_download_sdmf_zero_length(self):
|
||||
d = self.do_upload_empty_sdmf()
|
||||
d.addCallback(self._test_read_and_download, "")
|
||||
return d
|
352
src/allmydata/test/mutable/util.py
Normal file
352
src/allmydata/test/mutable/util.py
Normal file
@ -0,0 +1,352 @@
|
||||
from cStringIO import StringIO
|
||||
from twisted.internet import defer, reactor
|
||||
from foolscap.api import eventually, fireEventually
|
||||
from allmydata import client
|
||||
from allmydata.nodemaker import NodeMaker
|
||||
from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION
|
||||
from allmydata.util import base32
|
||||
from allmydata.util.hashutil import tagged_hash
|
||||
from allmydata.storage_client import StorageFarmBroker
|
||||
from allmydata.mutable.layout import MDMFSlotReadProxy
|
||||
from allmydata.mutable.publish import MutableData
|
||||
from ..common import TEST_RSA_KEY_SIZE
|
||||
|
||||
def eventuaaaaaly(res=None):
|
||||
d = fireEventually(res)
|
||||
d.addCallback(fireEventually)
|
||||
d.addCallback(fireEventually)
|
||||
return d
|
||||
|
||||
# this "FakeStorage" exists to put the share data in RAM and avoid using real
|
||||
# network connections, both to speed up the tests and to reduce the amount of
|
||||
# non-mutable.py code being exercised.
|
||||
|
||||
class FakeStorage:
|
||||
# this class replaces the collection of storage servers, allowing the
|
||||
# tests to examine and manipulate the published shares. It also lets us
|
||||
# control the order in which read queries are answered, to exercise more
|
||||
# of the error-handling code in Retrieve .
|
||||
#
|
||||
# Note that we ignore the storage index: this FakeStorage instance can
|
||||
# only be used for a single storage index.
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self._peers = {}
|
||||
# _sequence is used to cause the responses to occur in a specific
|
||||
# order. If it is in use, then we will defer queries instead of
|
||||
# answering them right away, accumulating the Deferreds in a dict. We
|
||||
# don't know exactly how many queries we'll get, so exactly one
|
||||
# second after the first query arrives, we will release them all (in
|
||||
# order).
|
||||
self._sequence = None
|
||||
self._pending = {}
|
||||
self._pending_timer = None
|
||||
|
||||
def read(self, peerid, storage_index):
|
||||
shares = self._peers.get(peerid, {})
|
||||
if self._sequence is None:
|
||||
return eventuaaaaaly(shares)
|
||||
d = defer.Deferred()
|
||||
if not self._pending:
|
||||
self._pending_timer = reactor.callLater(1.0, self._fire_readers)
|
||||
if peerid not in self._pending:
|
||||
self._pending[peerid] = []
|
||||
self._pending[peerid].append( (d, shares) )
|
||||
return d
|
||||
|
||||
def _fire_readers(self):
|
||||
self._pending_timer = None
|
||||
pending = self._pending
|
||||
self._pending = {}
|
||||
for peerid in self._sequence:
|
||||
if peerid in pending:
|
||||
for (d, shares) in pending.pop(peerid):
|
||||
eventually(d.callback, shares)
|
||||
for peerid in pending:
|
||||
for (d, shares) in pending[peerid]:
|
||||
eventually(d.callback, shares)
|
||||
|
||||
def write(self, peerid, storage_index, shnum, offset, data):
|
||||
if peerid not in self._peers:
|
||||
self._peers[peerid] = {}
|
||||
shares = self._peers[peerid]
|
||||
f = StringIO()
|
||||
f.write(shares.get(shnum, ""))
|
||||
f.seek(offset)
|
||||
f.write(data)
|
||||
shares[shnum] = f.getvalue()
|
||||
|
||||
|
||||
class FakeStorageServer:
|
||||
def __init__(self, peerid, storage):
|
||||
self.peerid = peerid
|
||||
self.storage = storage
|
||||
self.queries = 0
|
||||
def callRemote(self, methname, *args, **kwargs):
|
||||
self.queries += 1
|
||||
def _call():
|
||||
meth = getattr(self, methname)
|
||||
return meth(*args, **kwargs)
|
||||
d = fireEventually()
|
||||
d.addCallback(lambda res: _call())
|
||||
return d
|
||||
|
||||
def callRemoteOnly(self, methname, *args, **kwargs):
|
||||
self.queries += 1
|
||||
d = self.callRemote(methname, *args, **kwargs)
|
||||
d.addBoth(lambda ignore: None)
|
||||
pass
|
||||
|
||||
def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
|
||||
pass
|
||||
|
||||
def slot_readv(self, storage_index, shnums, readv):
|
||||
d = self.storage.read(self.peerid, storage_index)
|
||||
def _read(shares):
|
||||
response = {}
|
||||
for shnum in shares:
|
||||
if shnums and shnum not in shnums:
|
||||
continue
|
||||
vector = response[shnum] = []
|
||||
for (offset, length) in readv:
|
||||
assert isinstance(offset, (int, long)), offset
|
||||
assert isinstance(length, (int, long)), length
|
||||
vector.append(shares[shnum][offset:offset+length])
|
||||
return response
|
||||
d.addCallback(_read)
|
||||
return d
|
||||
|
||||
def slot_testv_and_readv_and_writev(self, storage_index, secrets,
|
||||
tw_vectors, read_vector):
|
||||
# always-pass: parrot the test vectors back to them.
|
||||
readv = {}
|
||||
for shnum, (testv, writev, new_length) in tw_vectors.items():
|
||||
for (offset, length, op, specimen) in testv:
|
||||
assert op in ("le", "eq", "ge")
|
||||
# TODO: this isn't right, the read is controlled by read_vector,
|
||||
# not by testv
|
||||
readv[shnum] = [ specimen
|
||||
for (offset, length, op, specimen)
|
||||
in testv ]
|
||||
for (offset, data) in writev:
|
||||
self.storage.write(self.peerid, storage_index, shnum,
|
||||
offset, data)
|
||||
answer = (True, readv)
|
||||
return fireEventually(answer)
|
||||
|
||||
|
||||
def flip_bit(original, byte_offset):
|
||||
return (original[:byte_offset] +
|
||||
chr(ord(original[byte_offset]) ^ 0x01) +
|
||||
original[byte_offset+1:])
|
||||
|
||||
def add_two(original, byte_offset):
|
||||
# It isn't enough to simply flip the bit for the version number,
|
||||
# because 1 is a valid version number. So we add two instead.
|
||||
return (original[:byte_offset] +
|
||||
chr(ord(original[byte_offset]) ^ 0x02) +
|
||||
original[byte_offset+1:])
|
||||
|
||||
def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
|
||||
# if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
|
||||
# list of shnums to corrupt.
|
||||
ds = []
|
||||
for peerid in s._peers:
|
||||
shares = s._peers[peerid]
|
||||
for shnum in shares:
|
||||
if (shnums_to_corrupt is not None
|
||||
and shnum not in shnums_to_corrupt):
|
||||
continue
|
||||
data = shares[shnum]
|
||||
# We're feeding the reader all of the share data, so it
|
||||
# won't need to use the rref that we didn't provide, nor the
|
||||
# storage index that we didn't provide. We do this because
|
||||
# the reader will work for both MDMF and SDMF.
|
||||
reader = MDMFSlotReadProxy(None, None, shnum, data)
|
||||
# We need to get the offsets for the next part.
|
||||
d = reader.get_verinfo()
|
||||
def _do_corruption(verinfo, data, shnum, shares):
|
||||
(seqnum,
|
||||
root_hash,
|
||||
IV,
|
||||
segsize,
|
||||
datalen,
|
||||
k, n, prefix, o) = verinfo
|
||||
if isinstance(offset, tuple):
|
||||
offset1, offset2 = offset
|
||||
else:
|
||||
offset1 = offset
|
||||
offset2 = 0
|
||||
if offset1 == "pubkey" and IV:
|
||||
real_offset = 107
|
||||
elif offset1 in o:
|
||||
real_offset = o[offset1]
|
||||
else:
|
||||
real_offset = offset1
|
||||
real_offset = int(real_offset) + offset2 + offset_offset
|
||||
assert isinstance(real_offset, int), offset
|
||||
if offset1 == 0: # verbyte
|
||||
f = add_two
|
||||
else:
|
||||
f = flip_bit
|
||||
shares[shnum] = f(data, real_offset)
|
||||
d.addCallback(_do_corruption, data, shnum, shares)
|
||||
ds.append(d)
|
||||
dl = defer.DeferredList(ds)
|
||||
dl.addCallback(lambda ignored: res)
|
||||
return dl
|
||||
|
||||
def make_storagebroker(s=None, num_peers=10):
|
||||
if not s:
|
||||
s = FakeStorage()
|
||||
peerids = [tagged_hash("peerid", "%d" % i)[:20]
|
||||
for i in range(num_peers)]
|
||||
storage_broker = StorageFarmBroker(True)
|
||||
for peerid in peerids:
|
||||
fss = FakeStorageServer(peerid, s)
|
||||
ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
|
||||
"permutation-seed-base32": base32.b2a(peerid) }
|
||||
storage_broker.test_add_rref(peerid, fss, ann)
|
||||
return storage_broker
|
||||
|
||||
def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
|
||||
storage_broker = make_storagebroker(s, num_peers)
|
||||
sh = client.SecretHolder("lease secret", "convergence secret")
|
||||
keygen = client.KeyGenerator()
|
||||
if keysize:
|
||||
keygen.set_default_keysize(keysize)
|
||||
nodemaker = NodeMaker(storage_broker, sh, None,
|
||||
None, None,
|
||||
{"k": 3, "n": 10}, SDMF_VERSION, keygen)
|
||||
return nodemaker
|
||||
|
||||
class PublishMixin:
|
||||
def publish_one(self):
|
||||
# publish a file and create shares, which can then be manipulated
|
||||
# later.
|
||||
self.CONTENTS = "New contents go here" * 1000
|
||||
self.uploadable = MutableData(self.CONTENTS)
|
||||
self._storage = FakeStorage()
|
||||
self._nodemaker = make_nodemaker(self._storage)
|
||||
self._storage_broker = self._nodemaker.storage_broker
|
||||
d = self._nodemaker.create_mutable_file(self.uploadable)
|
||||
def _created(node):
|
||||
self._fn = node
|
||||
self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def publish_mdmf(self, data=None):
|
||||
# like publish_one, except that the result is guaranteed to be
|
||||
# an MDMF file.
|
||||
# self.CONTENTS should have more than one segment.
|
||||
if data is None:
|
||||
data = "This is an MDMF file" * 100000
|
||||
self.CONTENTS = data
|
||||
self.uploadable = MutableData(self.CONTENTS)
|
||||
self._storage = FakeStorage()
|
||||
self._nodemaker = make_nodemaker(self._storage)
|
||||
self._storage_broker = self._nodemaker.storage_broker
|
||||
d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
|
||||
def _created(node):
|
||||
self._fn = node
|
||||
self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def publish_sdmf(self, data=None):
|
||||
# like publish_one, except that the result is guaranteed to be
|
||||
# an SDMF file
|
||||
if data is None:
|
||||
data = "This is an SDMF file" * 1000
|
||||
self.CONTENTS = data
|
||||
self.uploadable = MutableData(self.CONTENTS)
|
||||
self._storage = FakeStorage()
|
||||
self._nodemaker = make_nodemaker(self._storage)
|
||||
self._storage_broker = self._nodemaker.storage_broker
|
||||
d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
|
||||
def _created(node):
|
||||
self._fn = node
|
||||
self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def publish_multiple(self, version=0):
|
||||
self.CONTENTS = ["Contents 0",
|
||||
"Contents 1",
|
||||
"Contents 2",
|
||||
"Contents 3a",
|
||||
"Contents 3b"]
|
||||
self.uploadables = [MutableData(d) for d in self.CONTENTS]
|
||||
self._copied_shares = {}
|
||||
self._storage = FakeStorage()
|
||||
self._nodemaker = make_nodemaker(self._storage)
|
||||
d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
|
||||
def _created(node):
|
||||
self._fn = node
|
||||
# now create multiple versions of the same file, and accumulate
|
||||
# their shares, so we can mix and match them later.
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(self._copy_shares, 0)
|
||||
d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
|
||||
d.addCallback(self._copy_shares, 1)
|
||||
d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
|
||||
d.addCallback(self._copy_shares, 2)
|
||||
d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
|
||||
d.addCallback(self._copy_shares, 3)
|
||||
# now we replace all the shares with version s3, and upload a new
|
||||
# version to get s4b.
|
||||
rollback = dict([(i,2) for i in range(10)])
|
||||
d.addCallback(lambda res: self._set_versions(rollback))
|
||||
d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
|
||||
d.addCallback(self._copy_shares, 4)
|
||||
# we leave the storage in state 4
|
||||
return d
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
|
||||
def _copy_shares(self, ignored, index):
|
||||
shares = self._storage._peers
|
||||
# we need a deep copy
|
||||
new_shares = {}
|
||||
for peerid in shares:
|
||||
new_shares[peerid] = {}
|
||||
for shnum in shares[peerid]:
|
||||
new_shares[peerid][shnum] = shares[peerid][shnum]
|
||||
self._copied_shares[index] = new_shares
|
||||
|
||||
def _set_versions(self, versionmap):
|
||||
# versionmap maps shnums to which version (0,1,2,3,4) we want the
|
||||
# share to be at. Any shnum which is left out of the map will stay at
|
||||
# its current version.
|
||||
shares = self._storage._peers
|
||||
oldshares = self._copied_shares
|
||||
for peerid in shares:
|
||||
for shnum in shares[peerid]:
|
||||
if shnum in versionmap:
|
||||
index = versionmap[shnum]
|
||||
shares[peerid][shnum] = oldshares[index][peerid][shnum]
|
||||
|
||||
class CheckerMixin:
|
||||
def check_good(self, r, where):
|
||||
self.failUnless(r.is_healthy(), where)
|
||||
return r
|
||||
|
||||
def check_bad(self, r, where):
|
||||
self.failIf(r.is_healthy(), where)
|
||||
return r
|
||||
|
||||
def check_expected_failure(self, r, expected_exception, substring, where):
|
||||
for (peerid, storage_index, shnum, f) in r.get_share_problems():
|
||||
if f.check(expected_exception):
|
||||
self.failUnless(substring in str(f),
|
||||
"%s: substring '%s' not in '%s'" %
|
||||
(where, substring, str(f)))
|
||||
return
|
||||
self.fail("%s: didn't see expected exception %s in problems %s" %
|
||||
(where, expected_exception, r.get_share_problems()))
|
||||
|
@ -30,7 +30,7 @@ from allmydata.util import fileutil, idlib, hashutil
|
||||
from allmydata.util.hashutil import sha1
|
||||
from allmydata.test.common_web import HTTPClientGETFactory
|
||||
from allmydata.interfaces import IStorageBroker, IServer
|
||||
from allmydata.test.common import TEST_RSA_KEY_SIZE
|
||||
from .common import TEST_RSA_KEY_SIZE
|
||||
|
||||
|
||||
class IntentionalError(Exception):
|
||||
@ -184,6 +184,10 @@ class NoNetworkClient(Client):
|
||||
pass
|
||||
def init_introducer_client(self):
|
||||
pass
|
||||
def create_control_tub(self):
|
||||
pass
|
||||
def create_log_tub(self):
|
||||
pass
|
||||
def setup_logging(self):
|
||||
pass
|
||||
def startService(self):
|
||||
@ -360,13 +364,18 @@ class GridTestMixin:
|
||||
return self.s.stopService()
|
||||
|
||||
def set_up_grid(self, num_clients=1, num_servers=10,
|
||||
client_config_hooks={}):
|
||||
client_config_hooks={}, oneshare=False):
|
||||
# self.basedir must be set
|
||||
self.g = NoNetworkGrid(self.basedir,
|
||||
num_clients=num_clients,
|
||||
num_servers=num_servers,
|
||||
client_config_hooks=client_config_hooks)
|
||||
self.g.setServiceParent(self.s)
|
||||
if oneshare:
|
||||
c = self.get_client(0)
|
||||
c.encoding_params["k"] = 1
|
||||
c.encoding_params["happy"] = 1
|
||||
c.encoding_params["n"] = 1
|
||||
self._record_webports_and_baseurls()
|
||||
|
||||
def _record_webports_and_baseurls(self):
|
||||
|
@ -4,14 +4,14 @@ from twisted.trial import unittest
|
||||
|
||||
from allmydata.util import configutil
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from .test_cli import CLITestMixin
|
||||
from .cli.test_cli import CLITestMixin
|
||||
|
||||
|
||||
class ConfigUtilTests(CLITestMixin, GridTestMixin, unittest.TestCase):
|
||||
|
||||
def test_config_utils(self):
|
||||
self.basedir = "cli/ConfigUtilTests/test-config-utils"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
tahoe_cfg = os.path.join(self.get_clientdir(i=0), "tahoe.cfg")
|
||||
|
||||
# test that at least one option was read correctly
|
||||
|
@ -732,22 +732,22 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_basic(self):
|
||||
self.basedir = "dirnode/Dirnode/test_basic"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_basic_test()
|
||||
|
||||
def test_basic_mdmf(self):
|
||||
self.basedir = "dirnode/Dirnode/test_basic_mdmf"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_basic_test(mdmf=True)
|
||||
|
||||
def test_initial_children(self):
|
||||
self.basedir = "dirnode/Dirnode/test_initial_children"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_initial_children_test()
|
||||
|
||||
def test_immutable(self):
|
||||
self.basedir = "dirnode/Dirnode/test_immutable"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
c = self.g.clients[0]
|
||||
nm = c.nodemaker
|
||||
|
||||
@ -941,7 +941,7 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_directory_representation(self):
|
||||
self.basedir = "dirnode/Dirnode/test_directory_representation"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
c = self.g.clients[0]
|
||||
nm = c.nodemaker
|
||||
|
||||
@ -1038,7 +1038,7 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_check(self):
|
||||
self.basedir = "dirnode/Dirnode/test_check"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
c = self.g.clients[0]
|
||||
d = c.create_dirnode()
|
||||
d.addCallback(lambda dn: dn.check(Monitor()))
|
||||
@ -1079,7 +1079,7 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_deepcheck(self):
|
||||
self.basedir = "dirnode/Dirnode/test_deepcheck"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self._test_deepcheck_create()
|
||||
d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
|
||||
def _check_results(r):
|
||||
@ -1099,7 +1099,7 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_deepcheck_cachemisses(self):
|
||||
self.basedir = "dirnode/Dirnode/test_mdmf_cachemisses"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self._test_deepcheck_create()
|
||||
# Clear the counters and set the rootnode
|
||||
d.addCallback(lambda rootnode:
|
||||
@ -1116,7 +1116,7 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_deepcheck_mdmf(self):
|
||||
self.basedir = "dirnode/Dirnode/test_deepcheck_mdmf"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self._test_deepcheck_create(MDMF_VERSION)
|
||||
d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
|
||||
def _check_results(r):
|
||||
@ -1136,7 +1136,7 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_deepcheck_and_repair(self):
|
||||
self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self._test_deepcheck_create()
|
||||
d.addCallback(lambda rootnode:
|
||||
rootnode.start_deep_check_and_repair().when_done())
|
||||
@ -1165,7 +1165,7 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_deepcheck_and_repair_mdmf(self):
|
||||
self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair_mdmf"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
d = self._test_deepcheck_create(version=MDMF_VERSION)
|
||||
d.addCallback(lambda rootnode:
|
||||
rootnode.start_deep_check_and_repair().when_done())
|
||||
@ -1286,12 +1286,12 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_readonly(self):
|
||||
self.basedir = "dirnode/Dirnode/test_readonly"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_readonly_test()
|
||||
|
||||
def test_readonly_mdmf(self):
|
||||
self.basedir = "dirnode/Dirnode/test_readonly_mdmf"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_readonly_test(version=MDMF_VERSION)
|
||||
|
||||
def failUnlessGreaterThan(self, a, b):
|
||||
@ -1302,7 +1302,7 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_create(self):
|
||||
self.basedir = "dirnode/Dirnode/test_create"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_create_test()
|
||||
|
||||
def test_update_metadata(self):
|
||||
@ -1355,22 +1355,22 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
|
||||
def test_create_subdirectory(self):
|
||||
self.basedir = "dirnode/Dirnode/test_create_subdirectory"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_create_subdirectory_test()
|
||||
|
||||
def test_create_subdirectory_mdmf(self):
|
||||
self.basedir = "dirnode/Dirnode/test_create_subdirectory_mdmf"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_create_subdirectory_test(version=MDMF_VERSION)
|
||||
|
||||
def test_create_mdmf(self):
|
||||
self.basedir = "dirnode/Dirnode/test_mdmf"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_create_test(mdmf=True)
|
||||
|
||||
def test_mdmf_initial_children(self):
|
||||
self.basedir = "dirnode/Dirnode/test_mdmf"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
return self._do_initial_children_test(mdmf=True)
|
||||
|
||||
class MinimalFakeMutableFile:
|
||||
@ -1828,7 +1828,7 @@ class Deleter(GridTestMixin, testutil.ReallyEqualMixin, unittest.TestCase):
|
||||
# succeed.
|
||||
|
||||
self.basedir = self.mktemp()
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
c0 = self.g.clients[0]
|
||||
d = c0.create_dirnode()
|
||||
small = upload.Data("Small enough for a LIT", None)
|
||||
@ -1862,7 +1862,7 @@ class Adder(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
|
||||
# of dn.add_file, and use a special NodeMaker that creates fake
|
||||
# mutable files.
|
||||
self.basedir = "dirnode/Adder/test_overwrite"
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
c = self.g.clients[0]
|
||||
fileuri = make_chk_file_uri(1234)
|
||||
filenode = c.nodemaker.create_from_cap(fileuri)
|
||||
|
@ -18,7 +18,8 @@ class Handler(GridTestMixin, ReallyEqualMixin, unittest.TestCase):
|
||||
|
||||
def _set_up(self, basedir, num_clients=1, num_servers=10):
|
||||
self.basedir = "ftp/" + basedir
|
||||
self.set_up_grid(num_clients=num_clients, num_servers=num_servers)
|
||||
self.set_up_grid(num_clients=num_clients, num_servers=num_servers,
|
||||
oneshare=True)
|
||||
|
||||
self.client = self.g.clients[0]
|
||||
self.username = "alice"
|
||||
|
@ -18,7 +18,7 @@ from allmydata.util.consumer import download_to_data
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from allmydata.test.common_util import ReallyEqualMixin, NonASCIIPathMixin
|
||||
from allmydata.test.common import ShouldFailMixin
|
||||
from .test_cli_magic_folder import MagicFolderCLITestMixin
|
||||
from .cli.test_magic_folder import MagicFolderCLITestMixin
|
||||
|
||||
from allmydata.frontends import magic_folder
|
||||
from allmydata.frontends.magic_folder import MagicFolder, WriteFileMixin
|
||||
@ -229,7 +229,7 @@ class MagicFolderAliceBobTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, Rea
|
||||
temp = self.mktemp()
|
||||
self.basedir = abspath_expanduser_unicode(temp.decode(get_filesystem_encoding()))
|
||||
# set_up_grid depends on self.basedir existing
|
||||
self.set_up_grid(num_clients=2)
|
||||
self.set_up_grid(num_clients=2, oneshare=True)
|
||||
|
||||
self.alice_clock = task.Clock()
|
||||
self.bob_clock = task.Clock()
|
||||
@ -959,7 +959,7 @@ class SingleMagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, Reall
|
||||
temp = self.mktemp()
|
||||
self.basedir = abspath_expanduser_unicode(temp.decode(get_filesystem_encoding()))
|
||||
self.magicfolder = None
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
self.local_dir = os.path.join(self.basedir, u"local_dir")
|
||||
self.mkdir_nonascii(self.local_dir)
|
||||
|
||||
@ -1283,7 +1283,7 @@ class MockTest(SingleMagicFolderTestMixin, unittest.TestCase):
|
||||
return task.deferLater(reactor, 0.1, lambda: None)
|
||||
|
||||
def test_errors(self):
|
||||
self.set_up_grid()
|
||||
self.set_up_grid(oneshare=True)
|
||||
|
||||
errors_dir = abspath_expanduser_unicode(u"errors_dir", base=self.basedir)
|
||||
os.mkdir(errors_dir)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -62,7 +62,8 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas
|
||||
|
||||
def _set_up(self, basedir, num_clients=1, num_servers=10):
|
||||
self.basedir = "sftp/" + basedir
|
||||
self.set_up_grid(num_clients=num_clients, num_servers=num_servers)
|
||||
self.set_up_grid(num_clients=num_clients, num_servers=num_servers,
|
||||
oneshare=True)
|
||||
|
||||
self.client = self.g.clients[0]
|
||||
self.username = "alice"
|
||||
|
@ -5,20 +5,23 @@ from cStringIO import StringIO
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import threads # CLI tests use deferToThread
|
||||
from twisted.application import service
|
||||
|
||||
import allmydata
|
||||
from allmydata import uri
|
||||
from allmydata import client, uri
|
||||
from allmydata.introducer.server import IntroducerNode
|
||||
from allmydata.storage.mutable import MutableShareFile
|
||||
from allmydata.storage.server import si_a2b
|
||||
from allmydata.immutable import offloaded, upload
|
||||
from allmydata.immutable.literal import LiteralFileNode
|
||||
from allmydata.immutable.filenode import ImmutableFileNode
|
||||
from allmydata.util import idlib, mathutil
|
||||
from allmydata.util import idlib, mathutil, pollmixin, fileutil, iputil
|
||||
from allmydata.util import log, base32
|
||||
from allmydata.util.encodingutil import quote_output, unicode_to_argv
|
||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||
from allmydata.util.consumer import MemoryConsumer, download_to_data
|
||||
from allmydata.scripts import runner
|
||||
from allmydata.stats import StatsGathererService
|
||||
from allmydata.interfaces import IDirectoryNode, IFileNode, \
|
||||
NoSuchChildError, NoSharesError
|
||||
from allmydata.monitor import Monitor
|
||||
@ -26,21 +29,594 @@ from allmydata.mutable.common import NotWriteableError
|
||||
from allmydata.mutable import layout as mutable_layout
|
||||
from allmydata.mutable.publish import MutableData
|
||||
|
||||
from foolscap.api import DeadReferenceError, fireEventually
|
||||
from foolscap.api import DeadReferenceError, fireEventually, flushEventualQueue
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.client import getPage
|
||||
from twisted.web.error import Error
|
||||
|
||||
from allmydata.test.common import SystemTestMixin
|
||||
from .common import TEST_RSA_KEY_SIZE
|
||||
|
||||
# TODO: move this to common or common_util
|
||||
from allmydata.test.test_runner import RunBinTahoeMixin
|
||||
from . import common_util as testutil
|
||||
|
||||
LARGE_DATA = """
|
||||
This is some data to publish to the remote grid.., which needs to be large
|
||||
enough to not fit inside a LIT uri.
|
||||
"""
|
||||
|
||||
# our system test uses the same Tub certificates each time, to avoid the
|
||||
# overhead of key generation
|
||||
SYSTEM_TEST_CERTS = [
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDVaFw0wOTA3MjUyMjQyMDVaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxHCWajrR
|
||||
2h/iurw8k93m8WUdE3xypJiiAITw7GkKlKbCLD+dEce2MXwVVYca0n/MZZsj89Cu
|
||||
Ko0lLjksMseoSDoj98iEmVpaY5mc2ntpQ+FXdoEmPP234XRWEg2HQ+EaK6+WkGQg
|
||||
DDXQvFJCVCQk/n1MdAwZZ6vqf2ITzSuD44kCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQBn6qPKGdFjWJy7sOOTUFfm/THhHQqAh1pBDLkjR+OtzuobCoP8n8J1LNG3Yxds
|
||||
Jj7NWQL7X5TfOlfoi7e9jK0ujGgWh3yYU6PnHzJLkDiDT3LCSywQuGXCjh0tOStS
|
||||
2gaCmmAK2cfxSStKzNcewl2Zs8wHMygq8TLFoZ6ozN1+xQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQDEcJZqOtHaH+K6vDyT3ebxZR0TfHKkmKIAhPDsaQqUpsIsP50R
|
||||
x7YxfBVVhxrSf8xlmyPz0K4qjSUuOSwyx6hIOiP3yISZWlpjmZzae2lD4Vd2gSY8
|
||||
/bfhdFYSDYdD4Rorr5aQZCAMNdC8UkJUJCT+fUx0DBlnq+p/YhPNK4PjiQIDAQAB
|
||||
AoGAZyDMdrymiyMOPwavrtlicvyohSBid3MCKc+hRBvpSB0790r2RO1aAySndp1V
|
||||
QYmCXx1RhKDbrs8m49t0Dryu5T+sQrFl0E3usAP3vvXWeh4jwJ9GyiRWy4xOEuEQ
|
||||
3ewjbEItHqA/bRJF0TNtbOmZTDC7v9FRPf2bTAyFfTZep5kCQQD33q1RA8WUYtmQ
|
||||
IArgHqt69i421lpXlOgqotFHwTx4FiGgVzDQCDuXU6txB9EeKRM340poissav/n6
|
||||
bkLZ7/VDAkEAyuIPkeI59sE5NnmW+N47NbCfdM1Smy1YxZpv942EmP9Veub5N0dw
|
||||
iK5bLAgEguUIjpTsh3BRmsE9Xd+ItmnRQwJBAMZhbg19G1EbnE0BmDKv2UbcaThy
|
||||
bnPSNc6J6T2opqDl9ZvCrMqTDD6dNIWOYAvni/4a556sFsoeBBAu10peBskCQE6S
|
||||
cB86cuJagLLVMh/dySaI6ahNoFFSpY+ZuQUxfInYUR2Q+DFtbGqyw8JwtHaRBthZ
|
||||
WqU1XZVGg2KooISsxIsCQQD1PS7//xHLumBb0jnpL7n6W8gmiTyzblT+0otaCisP
|
||||
fN6rTlwV1o8VsOUAz0rmKO5RArCbkmb01WtMgPCDBYkk
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 0
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDVaFw0wOTA3MjUyMjQyMDVaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAs9CALdmW
|
||||
kJ6r0KPSLdGCA8rzQKxWayrMckT22ZtbRv3aw6VA96dWclpY+T2maV0LrAzmMSL8
|
||||
n61ydJHM33iYDOyWbwHWN45XCjY/e20PL54XUl/DmbBHEhQVQLIfCldcRcnWEfoO
|
||||
iOhDJfWpDO1dmP/aOYLdkZCZvBtPAfyUqRcCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQAN9eaCREkzzk4yPIaWYkWHg3Igs1vnOR/iDw3OjyxO/xJFP2lkA2WtrwL2RTRq
|
||||
dxA8gwdPyrWgdiZElwZH8mzTJ4OdUXLSMclLOg9kvH6gtSvhLztfEDwDP1wRhikh
|
||||
OeWWu2GIC+uqFCI1ftoGgU+aIa6yrHswf66rrQvBSSvJPQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCz0IAt2ZaQnqvQo9It0YIDyvNArFZrKsxyRPbZm1tG/drDpUD3
|
||||
p1ZyWlj5PaZpXQusDOYxIvyfrXJ0kczfeJgM7JZvAdY3jlcKNj97bQ8vnhdSX8OZ
|
||||
sEcSFBVAsh8KV1xFydYR+g6I6EMl9akM7V2Y/9o5gt2RkJm8G08B/JSpFwIDAQAB
|
||||
AoGBAIUy5zCPpSP+FeJY6CG+t6Pdm/IFd4KtUoM3KPCrT6M3+uzApm6Ny9Crsor2
|
||||
qyYTocjSSVaOxzn1fvpw4qWLrH1veUf8ozMs8Z0VuPHD1GYUGjOXaBPXb5o1fQL9
|
||||
h7pS5/HrDDPN6wwDNTsxRf/fP58CnfwQUhwdoxcx8TnVmDQxAkEA6N3jBXt/Lh0z
|
||||
UbXHhv3QBOcqLZA2I4tY7wQzvUvKvVmCJoW1tfhBdYQWeQv0jzjL5PzrrNY8hC4l
|
||||
8+sFM3h5TwJBAMWtbFIEZfRSG1JhHK3evYHDTZnr/j+CdoWuhzP5RkjkIKsiLEH7
|
||||
2ZhA7CdFQLZF14oXy+g1uVCzzfB2WELtUbkCQQDKrb1XWzrBlzbAipfkXWs9qTmj
|
||||
uJ32Z+V6+0xRGPOXxJ0sDDqw7CeFMfchWg98zLFiV+SEZV78qPHtkAPR3ayvAkB+
|
||||
hUMhM4N13t9x2IoclsXAOhp++9bdG0l0woHyuAdOPATUw6iECwf4NQVxFRgYEZek
|
||||
4Ro3Y7taddrHn1dabr6xAkAic47OoLOROYLpljmJJO0eRe3Z5IFe+0D2LfhAW3LQ
|
||||
JU+oGq5pCjfnoaDElRRZn0+GmunnWeQEYKoflTi/lI9d
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 1
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsxG7LTrz
|
||||
DF+9wegOR/BRJhjSumPUbYQnNAUKtPraFsGjAJILP44AHdnHt1MONLgTeX1ynapo
|
||||
q6O/q5cdKtBB7uEh7FpkLCCwpZt/m0y79cynn8AmWoQVgl8oS0567UmPeJnTzFPv
|
||||
dmT5dlaQALeX5YGceAsEvhmAsdOMttaor38CAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQA345rxotfvh2kfgrmRzAyGewVBV4r23Go30GSZir8X2GoH3qKNwO4SekAohuSw
|
||||
AiXzLUbwIdSRSqaLFxSC7Duqc9eIeFDAWjeEmpfFLBNiw3K8SLA00QrHCUXnECTD
|
||||
b/Kk6OGuvPOiuuONVjEuEcRdCH3/Li30D0AhJaMynjhQJQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCzEbstOvMMX73B6A5H8FEmGNK6Y9RthCc0BQq0+toWwaMAkgs/
|
||||
jgAd2ce3Uw40uBN5fXKdqmiro7+rlx0q0EHu4SHsWmQsILClm3+bTLv1zKefwCZa
|
||||
hBWCXyhLTnrtSY94mdPMU+92ZPl2VpAAt5flgZx4CwS+GYCx04y21qivfwIDAQAB
|
||||
AoGBAIlhFg/aRPL+VM9539LzHN60dp8GzceDdqwjHhbAySZiQlLCuJx2rcI4/U65
|
||||
CpIJku9G/fLV9N2RkA/trDPXeGyqCTJfnNzyZcvvMscRMFqSGyc21Y0a+GS8bIxt
|
||||
1R2B18epSVMsWSWWMypeEgsfv29LV7oSWG8UKaqQ9+0h63DhAkEA4i2L/rori/Fb
|
||||
wpIBfA+xbXL/GmWR7xPW+3nG3LdLQpVzxz4rIsmtO9hIXzvYpcufQbwgVACyMmRf
|
||||
TMABeSDM7wJBAMquEdTaVXjGfH0EJ7z95Ys2rYTiCXjBfyEOi6RXXReqV9SXNKlN
|
||||
aKsO22zYecpkAjY1EdUdXWP/mNVEybjpZnECQQCcuh0JPS5RwcTo9c2rjyBOjGIz
|
||||
g3B1b5UIG2FurmCrWe6pgO3ZJFEzZ/L2cvz0Hj5UCa2JKBZTDvRutZoPumfnAkAb
|
||||
nSW+y1Rz1Q8m9Ub4v9rjYbq4bRd/RVWtyk6KQIDldYbr5wH8wxgsniSVKtVFFuUa
|
||||
P5bDY3HS6wMGo42cTOhxAkAcdweQSQ3j7mfc5vh71HeAC1v/VAKGehGOUdeEIQNl
|
||||
Sb2WuzpZkbfsrVzW6MdlgY6eE7ufRswhDPLWPC8MP0d1
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 2
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxnH+pbOS
|
||||
qlJlsHpKUQtV0oN1Mv+ESG+yUDxStFFGjkJv/UIRzpxqFqY/6nJ3D03kZsDdcXyi
|
||||
CfV9hPYQaVNMn6z+puPmIagfBQ0aOyuI+nUhCttZIYD9071BjW5bCMX5NZWL/CZm
|
||||
E0HdAZ77H6UrRckJ7VR8wAFpihBxD5WliZcCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQAwXqY1Sjvp9JSTHKklu7s0T6YmH/BKSXrHpS2xO69svK+ze5/+5td3jPn4Qe50
|
||||
xwRNZSFmSLuJLfCO32QJSJTB7Vs5D3dNTZ2i8umsaodm97t8hit7L75nXRGHKH//
|
||||
xDVWAFB9sSgCQyPMRkL4wB4YSfRhoSKVwMvaz+XRZDUU0A==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXAIBAAKBgQDGcf6ls5KqUmWwekpRC1XSg3Uy/4RIb7JQPFK0UUaOQm/9QhHO
|
||||
nGoWpj/qcncPTeRmwN1xfKIJ9X2E9hBpU0yfrP6m4+YhqB8FDRo7K4j6dSEK21kh
|
||||
gP3TvUGNblsIxfk1lYv8JmYTQd0BnvsfpStFyQntVHzAAWmKEHEPlaWJlwIDAQAB
|
||||
AoGAdHNMlXwtItm7ZrY8ihZ2xFP0IHsk60TwhHkBp2LSXoTKJvnwbSgIcUYZ18BX
|
||||
8Zkp4MpoqEIU7HcssyuaMdR572huV2w0D/2gYJQLQ5JapaR3hMox3YG4wjXasN1U
|
||||
1iZt7JkhKlOy+ElL5T9mKTE1jDsX2RAv4WALzMpYFo7vs4ECQQDxqrPaqRQ5uYS/
|
||||
ejmIk05nM3Q1zmoLtMDrfRqrjBhaf/W3hqGihiqN2kL3PIIYcxSRWiyNlYXjElsR
|
||||
2sllBTe3AkEA0jcMHVThwKt1+Ce5VcE7N6hFfbsgISTjfJ+Q3K2NkvJkmtE8ZRX5
|
||||
XprssnPN8owkfF5yuKbcSZL3uvaaSGN9IQJAfTVnN9wwOXQwHhDSbDt9/KRBCnum
|
||||
n+gHqDrKLaVJHOJ9SZf8eLswoww5c+UqtkYxmtlwie61Tp+9BXQosilQ4wJBAIZ1
|
||||
XVNZmriBM4jR59L5MOZtxF0ilu98R+HLsn3kqLyIPF9mXCoQPxwLHkEan213xFKk
|
||||
mt6PJDIPRlOZLqAEuuECQFQMCrn0VUwPg8E40pxMwgMETvVflPs/oZK1Iu+b7+WY
|
||||
vBptAyhMu31fHQFnJpiUOyHqSZnOZyEn1Qu2lszNvUg=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 3
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAnjiOwipn
|
||||
jigDuNMfNG/tBJhPwYUHhSbQdvrTubhsxw1oOq5XpNqUwRtC8hktOKM3hghyqExP
|
||||
62EOi0aJBkRhtwtPSLBCINptArZLfkog/nTIqVv4eLEzJ19nTi/llHHWKcgA6XTI
|
||||
sU/snUhGlySA3RpETvXqIJTauQRZz0kToSUCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQCQ+u/CsX5WC5m0cLrpyIS6qZa62lrB3mj9H1aIQhisT5kRsMz3FJ1aOaS8zPRz
|
||||
w0jhyRmamCcSsWf5WK539iOtsXbKMdAyjNtkQO3g+fnsLgmznAjjst24jfr+XU59
|
||||
0amiy1U6TY93gtEBZHtiLldPdUMsTuFbBlqbcMBQ50x9rA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXAIBAAKBgQCeOI7CKmeOKAO40x80b+0EmE/BhQeFJtB2+tO5uGzHDWg6rlek
|
||||
2pTBG0LyGS04ozeGCHKoTE/rYQ6LRokGRGG3C09IsEIg2m0Ctkt+SiD+dMipW/h4
|
||||
sTMnX2dOL+WUcdYpyADpdMixT+ydSEaXJIDdGkRO9eoglNq5BFnPSROhJQIDAQAB
|
||||
AoGAAPrst3s3xQOucjismtCOsVaYN+SxFTwWUoZfRWlFEz6cBLELzfOktEWM9p79
|
||||
TrqEH4px22UNobGqO2amdql5yXwEFVhYQkRB8uDA8uVaqpL8NLWTGPRXxZ2DSU+n
|
||||
7/FLf/TWT3ti/ZtXaPVRj6E2/Mq9AVEVOjUYzkNjM02OxcECQQDKEqmPbdZq2URU
|
||||
7RbUxkq5aTp8nzAgbpUsgBGQ9PDAymhj60BDEP0q28Ssa7tU70pRnQ3AZs9txgmL
|
||||
kK2g97FNAkEAyHH9cIb6qXOAJPIr/xamFGr5uuYw9TJPz/hfVkVimW/aZnBB+e6Q
|
||||
oALJBDKJWeYPzdNbouJYg8MeU0qWdZ5DOQJADUk+1sxc/bd9U6wnBSRog1pU2x7I
|
||||
VkmPC1b8ULCaJ8LnLDKqjf5O9wNuIfwPXB1DoKwX3F+mIcyUkhWYJO5EPQJAUj5D
|
||||
KMqZSrGzYHVlC/M1Daee88rDR7fu+3wDUhiCDkbQq7tftrbl7GF4LRq3NIWq8l7I
|
||||
eJq6isWiSbaO6Y+YMQJBAJFBpVhlY5Px2BX5+Hsfq6dSP3sVVc0eHkdsoZFFxq37
|
||||
fksL/q2vlPczvBihgcxt+UzW/UrNkelOuX3i57PDvFs=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 4
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsCQuudDF
|
||||
zgmY5tDpT0TkUo8fpJ5JcvgCkLFpSDD8REpXhLFkHWhTmTj3CAxfv4lA3sQzHZxe
|
||||
4S9YCb5c/VTbFEdgwc/wlxMmJiz2jYghdmWPBb8pBEk31YihIhC+u4kex6gJBH5y
|
||||
ixiZ3PPRRMaOBBo+ZfM50XIyWbFOOM/7FwcCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQB4cFURaiiUx6n8eS4j4Vxrii5PtsaNEI4acANFSYknGd0xTP4vnmoivNmo5fWE
|
||||
Q4hYtGezNu4a9MnNhcQmI20KzXmvhLJtkwWCgGOVJtMem8hDWXSALV1Ih8hmVkGS
|
||||
CI1elfr9eyguunGp9eMMQfKhWH52WHFA0NYa0Kpv5BY33A==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICWwIBAAKBgQCwJC650MXOCZjm0OlPRORSjx+knkly+AKQsWlIMPxESleEsWQd
|
||||
aFOZOPcIDF+/iUDexDMdnF7hL1gJvlz9VNsUR2DBz/CXEyYmLPaNiCF2ZY8FvykE
|
||||
STfViKEiEL67iR7HqAkEfnKLGJnc89FExo4EGj5l8znRcjJZsU44z/sXBwIDAQAB
|
||||
AoGABA7xXKqoxBSIh1js5zypHhXaHsre2l1Igdj0mgs25MPpvE7yBZNvyan8Vx0h
|
||||
36Hj8r4Gh3og3YNfvem67sNTwNwONY0ep+Xho/3vG0jFATGduSXdcT04DusgZNqg
|
||||
UJqW75cqxrD6o/nya5wUoN9NL5pcd5AgVMdOYvJGbrwQuaECQQDiCs/5dsUkUkeC
|
||||
Tlur1wh0wJpW4Y2ctO3ncRdnAoAA9y8dELHXMqwKE4HtlyzHY7Bxds/BDh373EVK
|
||||
rsdl+v9JAkEAx3xTmsOQvWa1tf/O30sdItVpGogKDvYqkLCNthUzPaL85BWB03E2
|
||||
xunHcVVlqAOE5tFuw0/UEyEkOaGlNTJTzwJAPIVel9FoCUiKYuYt/z1swy3KZRaw
|
||||
/tMmm4AZHvh5Y0jLcYHFy/OCQpRkhkOitqQHWunPyEXKW2PnnY5cTv68GQJAHG7H
|
||||
B88KCUTjb25nkQIGxBlA4swzCtDhXkAb4rEA3a8mdmfuWjHPyeg2ShwO4jSmM7P0
|
||||
Iph1NMjLff9hKcTjlwJARpItOFkYEdtSODC7FMm7KRKQnNB27gFAizsOYWD4D2b7
|
||||
w1FTEZ/kSA9wSNhyNGt7dgUo6zFhm2u973HBCUb3dg==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", # 5
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAvhTRj1dA
|
||||
NOfse/UBeTfMekZKxZHsNPr+qBYaveWAHDded/BMyMgaMV2n6HQdiDaRjJkzjHCF
|
||||
3xBtpIJeEGUqfrF0ob8BIZXy3qk68eX/0CVUbgmjSBN44ahlo63NshyXmZtEAkRV
|
||||
VE/+cRKw3N2wtuTed5xwfNcL6dg4KTOEYEkCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQCN+CLuVwLeWjSdVbdizYyrOVckqtwiIHG9BbGMlcIdm0qpvD7V7/sN2csk5LaT
|
||||
BNiHi1t5628/4UHqqodYmFw8ri8ItFwB+MmTJi11CX6dIP9OUhS0qO8Z/BKtot7H
|
||||
j04oNwl+WqZZfHIYwTIEL0HBn60nOvCQPDtnWG2BhpUxMA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQC+FNGPV0A05+x79QF5N8x6RkrFkew0+v6oFhq95YAcN1538EzI
|
||||
yBoxXafodB2INpGMmTOMcIXfEG2kgl4QZSp+sXShvwEhlfLeqTrx5f/QJVRuCaNI
|
||||
E3jhqGWjrc2yHJeZm0QCRFVUT/5xErDc3bC25N53nHB81wvp2DgpM4RgSQIDAQAB
|
||||
AoGALl2BqIdN4Bnac3oV++2CcSkIQB0SEvJOf820hDGhCEDxSCxTbn5w9S21MVxx
|
||||
f7Jf2n3cNxuTbA/jzscGDtW+gXCs+WAbAr5aOqHLUPGEobhKQrQT2hrxQHyv3UFp
|
||||
0tIl9eXFknOyVAaUJ3athK5tyjSiCZQQHLGzeLaDSKVAPqECQQD1GK7DkTcLaSvw
|
||||
hoTJ3dBK3JoKT2HHLitfEE0QV58mkqFMjofpe+nyeKWvEb/oB4WBp/cfTvtf7DJK
|
||||
zl1OSf11AkEAxomWmJeub0xpqksCmnVI1Jt1mvmcE4xpIcXq8sxzLHRc2QOv0kTw
|
||||
IcFl4QcN6EQBmE+8kl7Tx8SPAVKfJMoZBQJAGsUFYYrczjxAdlba7glyFJsfn/yn
|
||||
m0+poQpwwFYxpc7iGzB+G7xTAw62WfbAVSFtLYog7aR8xC9SFuWPP1vJeQJBAILo
|
||||
xBj3ovgWTXIRJbVM8mnl28UFI0msgsHXK9VOw/6i93nMuYkPFbtcN14KdbwZ42dX
|
||||
5EIrLr+BNr4riW4LqDUCQQCbsEEpTmj3upKUOONPt+6CH/OOMjazUzYHZ/3ORHGp
|
||||
Q3Wt+I4IrR/OsiACSIQAhS4kBfk/LGggnj56DrWt+oBl
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #6
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAtKhx6sEA
|
||||
jn6HWc6T2klwlPn0quyHtATIw8V3ezP46v6g2rRS7dTywo4GTP4vX58l+sC9z9Je
|
||||
qhQ1rWSwMK4FmnDMZCu7AVO7oMIXpXdSz7l0bgCnNjvbpkA2pOfbB1Z8oj8iebff
|
||||
J33ID5DdkmCzqYVtKpII1o/5z7Jo292JYy8CAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQA0PYMA07wo9kEH4fv9TCfo+zz42Px6lUxrQBPxBvDiGYhk2kME/wX0IcoZPKTV
|
||||
WyBGmDAYWvFaHWbrbbTOfzlLWfYrDD913hCi9cO8iF8oBqRjIlkKcxAoe7vVg5Az
|
||||
ydVcrY+zqULJovWwyNmH1QNIQfMat0rj7fylwjiS1y/YsA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXAIBAAKBgQC0qHHqwQCOfodZzpPaSXCU+fSq7Ie0BMjDxXd7M/jq/qDatFLt
|
||||
1PLCjgZM/i9fnyX6wL3P0l6qFDWtZLAwrgWacMxkK7sBU7ugwheld1LPuXRuAKc2
|
||||
O9umQDak59sHVnyiPyJ5t98nfcgPkN2SYLOphW0qkgjWj/nPsmjb3YljLwIDAQAB
|
||||
AoGAU4CYRv22mCZ7wVLunDLdyr5ODMMPZnHfqj2XoGbBYz0WdIBs5GlNXAfxeZzz
|
||||
oKsbDvAPzANcphh5RxAHMDj/dT8rZOez+eJrs1GEV+crl1T9p83iUkAuOJFtgUgf
|
||||
TtQBL9vHaj7DfvCEXcBPmN/teDFmAAOyUNbtuhTkRa3PbuECQQDwaqZ45Kr0natH
|
||||
V312dqlf9ms8I6e873pAu+RvA3BAWczk65eGcRjEBxVpTvNEcYKFrV8O5ZYtolrr
|
||||
VJl97AfdAkEAwF4w4KJ32fLPVoPnrYlgLw86NejMpAkixblm8cn51avPQmwbtahb
|
||||
BZUuca22IpgDpjeEk5SpEMixKe/UjzxMewJBALy4q2cY8U3F+u6sshLtAPYQZIs3
|
||||
3fNE9W2dUKsIQvRwyZMlkLN7UhqHCPq6e+HNTM0MlCMIfAPkf4Rdy4N6ZY0CQCKE
|
||||
BAMaQ6TwgzFDw5sIjiCDe+9WUPmRxhJyHL1/fvtOs4Z4fVRP290ZklbFU2vLmMQH
|
||||
LBuKzfb7+4XJyXrV1+cCQBqfPFQQZLr5UgccABYQ2jnWVbJPISJ5h2b0cwXt+pz/
|
||||
8ODEYLjqWr9K8dtbgwdpzwbkaGhQYpyvsguMvNPMohs=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #7
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAnBfNHycn
|
||||
5RnYzDN4EWTk2q1BBxA6ZYtlG1WPkj5iKeaYKzUk58zBL7mNOA0ucq+yTwh9C4IC
|
||||
EutWPaKBSKY5XI+Rdebh+Efq+urtOLgfJHlfcCraEx7hYN+tqqMVgEgnO/MqIsn1
|
||||
I1Fvnp89mSYbQ9tmvhSH4Hm+nbeK6iL2tIsCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQBt9zxfsKWoyyV764rRb6XThuTDMNSDaVofqePEWjudAbDu6tp0pHcrL0XpIrnT
|
||||
3iPgD47pdlwQNbGJ7xXwZu2QTOq+Lv62E6PCL8FljDVoYqR3WwJFFUigNvBT2Zzu
|
||||
Pxx7KUfOlm/M4XUSMu31sNJ0kQniBwpkW43YmHVNFb/R7g==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCcF80fJyflGdjMM3gRZOTarUEHEDpli2UbVY+SPmIp5pgrNSTn
|
||||
zMEvuY04DS5yr7JPCH0LggIS61Y9ooFIpjlcj5F15uH4R+r66u04uB8keV9wKtoT
|
||||
HuFg362qoxWASCc78yoiyfUjUW+enz2ZJhtD22a+FIfgeb6dt4rqIva0iwIDAQAB
|
||||
AoGBAIHstcnWd7iUeQYPWUNxLaRvTY8pjNH04yWLZEOgNWkXDVX5mExw++RTmB4t
|
||||
qpm/cLWkJSEtB7jjthb7ao0j/t2ljqfr6kAbClDv3zByAEDhOu8xB/5ne6Ioo+k2
|
||||
dygC+GcVcobhv8qRU+z0fpeXSP8yS1bQQHOaa17bSGsncvHRAkEAzwsn8jBTOqaW
|
||||
6Iymvr7Aql++LiwEBrqMMRVyBZlkux4hiKa2P7XXEL6/mOPR0aI2LuCqE2COrO7R
|
||||
0wAFZ54bjwJBAMEAe6cs0zI3p3STHwA3LoSZB81lzLhGUnYBvOq1yoDSlJCOYpld
|
||||
YM1y3eC0vwiOnEu3GG1bhkW+h6Kx0I/qyUUCQBiH9NqwORxI4rZ4+8S76y4EnA7y
|
||||
biOx9KxYIyNgslutTUHYpt1TmUDFqQPfclvJQWw6eExFc4Iv5bJ/XSSSyicCQGyY
|
||||
5PrwEfYTsrm5fpwUcKxTnzxHp6WYjBWybKZ0m/lYhBfCxmAdVrbDh21Exqj99Zv0
|
||||
7l26PhdIWfGFtCEGrzECQQCtPyXa3ostSceR7zEKxyn9QBCNXKARfNNTBja6+VRE
|
||||
qDC6jLqzu/SoOYaqa13QzCsttO2iZk8Ygfy3Yz0n37GE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #8
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4mnLf+x0
|
||||
CWKDKP5PLZ87t2ReSDE/J5QoI5VhE0bXaahdhPrQTC2wvOpT+N9nzEpI9ASh/ejV
|
||||
kYGlc03nNKRL7zyVM1UyGduEwsRssFMqfyJhI1p+VmxDMWNplex7mIAheAdskPj3
|
||||
pwi2CP4VIMjOj368AXvXItPzeCfAhYhEVaMCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQAEzmwq5JFI5Z0dX20m9rq7NKgwRyAH3h5aE8bdjO8nEc69qscfDRx79Lws3kK8
|
||||
A0LG0DhxKB8cTNu3u+jy81tjcC4pLNQ5IKap9ksmP7RtIHfTA55G8M3fPl2ZgDYQ
|
||||
ZzsWAZvTNXd/eme0SgOzD10rfntA6ZIgJTWHx3E0RkdwKw==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQDiact/7HQJYoMo/k8tnzu3ZF5IMT8nlCgjlWETRtdpqF2E+tBM
|
||||
LbC86lP432fMSkj0BKH96NWRgaVzTec0pEvvPJUzVTIZ24TCxGywUyp/ImEjWn5W
|
||||
bEMxY2mV7HuYgCF4B2yQ+PenCLYI/hUgyM6PfrwBe9ci0/N4J8CFiERVowIDAQAB
|
||||
AoGAQYTl+8XcKl8Un4dAOG6M5FwqIHAH25c3Klzu85obehrbvUCriG/sZi7VT/6u
|
||||
VeLlS6APlJ+NNgczbrOLhaNJyYzjICSt8BI96PldFUzCEkVlgE+29pO7RNoZmDYB
|
||||
dSGyIDrWdVYfdzpir6kC0KDcrpA16Sc+/bK6Q8ALLRpC7QECQQD7F7fhIQ03CKSk
|
||||
lS4mgDuBQrB/52jXgBumtjp71ANNeaWR6+06KDPTLysM+olsh97Q7YOGORbrBnBg
|
||||
Y2HPnOgjAkEA5taZaMfdFa8V1SPcX7mgCLykYIujqss0AmauZN/24oLdNE8HtTBF
|
||||
OLaxE6PnQ0JWfx9KGIy3E0V3aFk5FWb0gQJBAO4KFEaXgOG1jfCBhNj3JHJseMso
|
||||
5Nm4F366r0MJQYBHXNGzqphB2K/Svat2MKX1QSUspk2u/a0d05dtYCLki6UCQHWS
|
||||
sChyQ+UbfF9HGKOZBC3vBzo1ZXNEdIUUj5bJjBHq3YgbCK38nAU66A482TmkvDGb
|
||||
Wj4OzeB+7Ua0yyJfggECQQDVlAa8HqdAcrbEwI/YfPydFsavBJ0KtcIGK2owQ+dk
|
||||
dhlDnpXDud/AtX4Ft2LaquQ15fteRrYjjwI9SFGytjtp
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #9
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAueLfowPT
|
||||
kXXtHeU2FZSz2mJhHmjqeyI1oMoyyggonccx65vMxaRfljnz2dOjVVYpCOn/LrdP
|
||||
wVxHO8KNDsmQeWPRjnnBa2dFqqOnp/8gEJFJBW7K/gI9se6o+xe9QIWBq6d/fKVR
|
||||
BURJe5TycLogzZuxQn1xHHILa3XleYuHAbMCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQBEC1lfC3XK0galQC96B7faLpnQmhn5lX2FUUoFIQQtBTetoE+gTqnLSOIZcOK4
|
||||
pkT3YvxUvgOV0LOLClryo2IknMMGWRSAcXtVUBBLRHVTSSuVUyyLr5kdRU7B4E+l
|
||||
OU0j8Md/dzlkm//K1bzLyUaPq204ofH8su2IEX4b3IGmAQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICWwIBAAKBgQC54t+jA9ORde0d5TYVlLPaYmEeaOp7IjWgyjLKCCidxzHrm8zF
|
||||
pF+WOfPZ06NVVikI6f8ut0/BXEc7wo0OyZB5Y9GOecFrZ0Wqo6en/yAQkUkFbsr+
|
||||
Aj2x7qj7F71AhYGrp398pVEFREl7lPJwuiDNm7FCfXEccgtrdeV5i4cBswIDAQAB
|
||||
AoGAO4PnJHNaLs16AMNdgKVevEIZZDolMQ1v7C4w+ryH/JRFaHE2q+UH8bpWV9zK
|
||||
A82VT9RTrqpkb71S1VBiB2UDyz263XdAI/N2HcIVMmfKb72oV4gCI1KOv4DfFwZv
|
||||
tVVcIdVEDBOZ2TgqK4opGOgWMDqgIAl2z3PbsIoNylZHEJECQQDtQeJFhEJGH4Qz
|
||||
BGpdND0j2nnnJyhOFHJqikJNdul3uBwmxTK8FPEUUH/rtpyUan3VMOyDx3kX4OQg
|
||||
GDNSb32rAkEAyJIZIJ0EMRHVedyWsfqR0zTGKRQ+qsc3sCfyUhFksWms9jsSS0DT
|
||||
tVeTdC3F6EIAdpKOGhSyfBTU4jxwbFc0GQJADI4L9znEeAl66Wg2aLA2/Aq3oK/F
|
||||
xjv2wgSG9apxOFCZzMNqp+FD0Jth6YtEReZMuldYbLDFi6nu6HPfY2Fa+QJAdpm1
|
||||
lAxk6yMxiZK/5VRWoH6HYske2Vtd+aNVbePtF992ME/z3F3kEkpL3hom+dT1cyfs
|
||||
MU3l0Ot8ip7Ul6vlGQJAegNzpcfl2GFSdWQMxQ+nN3woKnPqpR1M3jgnqvo7L4Xe
|
||||
JW3vRxvfdrUuzdlvZ/Pbsu/vOd+cuIa4h0yD5q3N+g==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #10
|
||||
"""-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
|
||||
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
|
||||
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAruBhwk+J
|
||||
XdlwfKXXN8K+43JyEYCV7Fp7ZiES4t4AEJuQuBqJVMxpzeZzu2t/vVb59ThaxxtY
|
||||
NGD3Xy6Og5dTv//ztWng8P7HwwvfbrUICU6zo6JAhg7kfaNa116krCYOkC/cdJWt
|
||||
o5W+zsDmI1jUVGH0D73h29atc1gn6wLpAsMCAwEAATANBgkqhkiG9w0BAQQFAAOB
|
||||
gQAEJ/ITGJ9lK/rk0yHcenW8SHsaSTlZMuJ4yEiIgrJ2t71Rd6mtCC/ljx9USvvK
|
||||
bF500whTiZlnWgKi02boBEKa44z/DytF6pljeNPefBQSqZyUByGEb/8Mn58Idyls
|
||||
q4/d9iKXMPvbpQdcesOzgOffFZevLQSWyPRaIdYBOOiYUA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCu4GHCT4ld2XB8pdc3wr7jcnIRgJXsWntmIRLi3gAQm5C4GolU
|
||||
zGnN5nO7a3+9Vvn1OFrHG1g0YPdfLo6Dl1O///O1aeDw/sfDC99utQgJTrOjokCG
|
||||
DuR9o1rXXqSsJg6QL9x0la2jlb7OwOYjWNRUYfQPveHb1q1zWCfrAukCwwIDAQAB
|
||||
AoGAcZAXC/dYrlBpIxkTRQu7qLqGZuVI9t7fabgqqpceFargdR4Odrn0L5jrKRer
|
||||
MYrM8bjyAoC4a/NYUUBLnhrkcCQWO9q5fSQuFKFVWHY53SM63Qdqk8Y9Fmy/h/4c
|
||||
UtwZ5BWkUWItvnTMgb9bFcvSiIhEcNQauypnMpgNknopu7kCQQDlSQT10LkX2IGT
|
||||
bTUhPcManx92gucaKsPONKq2mP+1sIciThevRTZWZsxyIuoBBY43NcKKi8NlZCtj
|
||||
hhSbtzYdAkEAw0B93CXfso8g2QIMj/HJJz/wNTLtg+rriXp6jh5HWe6lKWRVrce+
|
||||
1w8Qz6OI/ZP6xuQ9HNeZxJ/W6rZPW6BGXwJAHcTuRPA1p/fvUvHh7Q/0zfcNAbkb
|
||||
QlV9GL/TzmNtB+0EjpqvDo2g8XTlZIhN85YCEf8D5DMjSn3H+GMHN/SArQJBAJlW
|
||||
MIGPjNoh5V4Hae4xqBOW9wIQeM880rUo5s5toQNTk4mqLk9Hquwh/MXUXGUora08
|
||||
2XGpMC1midXSTwhaGmkCQQCdivptFEYl33PrVbxY9nzHynpp4Mi89vQF0cjCmaYY
|
||||
N8L+bvLd4BU9g6hRS8b59lQ6GNjryx2bUnCVtLcey4Jd
|
||||
-----END RSA PRIVATE KEY-----
|
||||
""", #11
|
||||
]
|
||||
|
||||
# To disable the pre-computed tub certs, uncomment this line.
|
||||
#SYSTEM_TEST_CERTS = []
|
||||
|
||||
def flush_but_dont_ignore(res):
|
||||
d = flushEventualQueue()
|
||||
def _done(ignored):
|
||||
return res
|
||||
d.addCallback(_done)
|
||||
return d
|
||||
|
||||
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||
|
||||
# SystemTestMixin tests tend to be a lot of work, and we have a few
|
||||
# buildslaves that are pretty slow, and every once in a while these tests
|
||||
# run up against the default 120 second timeout. So increase the default
|
||||
# timeout. Individual test cases can override this, of course.
|
||||
timeout = 300
|
||||
|
||||
def setUp(self):
|
||||
self.sparent = service.MultiService()
|
||||
self.sparent.startService()
|
||||
|
||||
self.stats_gatherer = None
|
||||
self.stats_gatherer_furl = None
|
||||
|
||||
def tearDown(self):
|
||||
log.msg("shutting down SystemTest services")
|
||||
d = self.sparent.stopService()
|
||||
d.addBoth(flush_but_dont_ignore)
|
||||
return d
|
||||
|
||||
def getdir(self, subdir):
|
||||
return os.path.join(self.basedir, subdir)
|
||||
|
||||
def add_service(self, s):
|
||||
s.setServiceParent(self.sparent)
|
||||
return s
|
||||
|
||||
def set_up_nodes(self, NUMCLIENTS=5, use_stats_gatherer=False):
|
||||
self.numclients = NUMCLIENTS
|
||||
iv_dir = self.getdir("introducer")
|
||||
if not os.path.isdir(iv_dir):
|
||||
fileutil.make_dirs(iv_dir)
|
||||
fileutil.write(os.path.join(iv_dir, 'tahoe.cfg'),
|
||||
"[node]\n" +
|
||||
u"nickname = introducer \u263A\n".encode('utf-8') +
|
||||
"web.port = tcp:0:interface=127.0.0.1\n")
|
||||
if SYSTEM_TEST_CERTS:
|
||||
os.mkdir(os.path.join(iv_dir, "private"))
|
||||
f = open(os.path.join(iv_dir, "private", "node.pem"), "w")
|
||||
f.write(SYSTEM_TEST_CERTS[0])
|
||||
f.close()
|
||||
iv = IntroducerNode(basedir=iv_dir)
|
||||
self.introducer = self.add_service(iv)
|
||||
self._get_introducer_web()
|
||||
d = defer.succeed(None)
|
||||
if use_stats_gatherer:
|
||||
d.addCallback(self._set_up_stats_gatherer)
|
||||
d.addCallback(self._set_up_nodes_2)
|
||||
if use_stats_gatherer:
|
||||
d.addCallback(self._grab_stats)
|
||||
return d
|
||||
|
||||
def _get_introducer_web(self):
|
||||
f = open(os.path.join(self.getdir("introducer"), "node.url"), "r")
|
||||
self.introweb_url = f.read().strip()
|
||||
f.close()
|
||||
|
||||
def _set_up_stats_gatherer(self, res):
|
||||
statsdir = self.getdir("stats_gatherer")
|
||||
fileutil.make_dirs(statsdir)
|
||||
portnum = iputil.allocate_tcp_port()
|
||||
location = "tcp:127.0.0.1:%d" % portnum
|
||||
fileutil.write(os.path.join(statsdir, "location"), location)
|
||||
port = "tcp:%d:interface=127.0.0.1" % portnum
|
||||
fileutil.write(os.path.join(statsdir, "port"), port)
|
||||
self.stats_gatherer_svc = StatsGathererService(statsdir)
|
||||
self.stats_gatherer = self.stats_gatherer_svc.stats_gatherer
|
||||
self.add_service(self.stats_gatherer_svc)
|
||||
|
||||
d = fireEventually()
|
||||
sgf = os.path.join(statsdir, 'stats_gatherer.furl')
|
||||
def check_for_furl():
|
||||
return os.path.exists(sgf)
|
||||
d.addCallback(lambda junk: self.poll(check_for_furl, timeout=30))
|
||||
def get_furl(junk):
|
||||
self.stats_gatherer_furl = file(sgf, 'rb').read().strip()
|
||||
d.addCallback(get_furl)
|
||||
return d
|
||||
|
||||
def _set_up_nodes_2(self, res):
|
||||
q = self.introducer
|
||||
self.introducer_furl = q.introducer_url
|
||||
self.clients = []
|
||||
basedirs = []
|
||||
for i in range(self.numclients):
|
||||
basedir = self.getdir("client%d" % i)
|
||||
basedirs.append(basedir)
|
||||
fileutil.make_dirs(os.path.join(basedir, "private"))
|
||||
if len(SYSTEM_TEST_CERTS) > (i+1):
|
||||
f = open(os.path.join(basedir, "private", "node.pem"), "w")
|
||||
f.write(SYSTEM_TEST_CERTS[i+1])
|
||||
f.close()
|
||||
|
||||
config = "[client]\n"
|
||||
config += "introducer.furl = %s\n" % self.introducer_furl
|
||||
if self.stats_gatherer_furl:
|
||||
config += "stats_gatherer.furl = %s\n" % self.stats_gatherer_furl
|
||||
|
||||
nodeconfig = "[node]\n"
|
||||
nodeconfig += (u"nickname = client %d \u263A\n" % (i,)).encode('utf-8')
|
||||
tub_port = iputil.allocate_tcp_port()
|
||||
# Don't let it use AUTO: there's no need for tests to use
|
||||
# anything other than 127.0.0.1
|
||||
nodeconfig += "tub.port = tcp:%d\n" % tub_port
|
||||
nodeconfig += "tub.location = tcp:127.0.0.1:%d\n" % tub_port
|
||||
|
||||
if i == 0:
|
||||
# clients[0] runs a webserver and a helper
|
||||
config += nodeconfig
|
||||
config += "web.port = tcp:0:interface=127.0.0.1\n"
|
||||
config += "timeout.keepalive = 600\n"
|
||||
config += "[helper]\n"
|
||||
config += "enabled = True\n"
|
||||
elif i == 3:
|
||||
# clients[3] runs a webserver and uses a helper
|
||||
config += nodeconfig
|
||||
config += "web.port = tcp:0:interface=127.0.0.1\n"
|
||||
config += "timeout.disconnect = 1800\n"
|
||||
else:
|
||||
config += nodeconfig
|
||||
|
||||
fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
|
||||
|
||||
# give subclasses a chance to append lines to the node's tahoe.cfg
|
||||
# files before they are launched.
|
||||
self._set_up_nodes_extra_config()
|
||||
|
||||
# start clients[0], wait for it's tub to be ready (at which point it
|
||||
# will have registered the helper furl).
|
||||
c = self.add_service(client.Client(basedir=basedirs[0]))
|
||||
self.clients.append(c)
|
||||
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||
|
||||
f = open(os.path.join(basedirs[0],"private","helper.furl"), "r")
|
||||
helper_furl = f.read()
|
||||
f.close()
|
||||
self.helper_furl = helper_furl
|
||||
if self.numclients >= 4:
|
||||
f = open(os.path.join(basedirs[3], 'tahoe.cfg'), 'ab+')
|
||||
f.write(
|
||||
"[client]\n"
|
||||
"helper.furl = %s\n" % helper_furl)
|
||||
f.close()
|
||||
|
||||
# this starts the rest of the clients
|
||||
for i in range(1, self.numclients):
|
||||
c = self.add_service(client.Client(basedir=basedirs[i]))
|
||||
self.clients.append(c)
|
||||
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||
log.msg("STARTING")
|
||||
d = self.wait_for_connections()
|
||||
def _connected(res):
|
||||
log.msg("CONNECTED")
|
||||
# now find out where the web port was
|
||||
self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
|
||||
if self.numclients >=4:
|
||||
# and the helper-using webport
|
||||
self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL()
|
||||
d.addCallback(_connected)
|
||||
return d
|
||||
|
||||
def _set_up_nodes_extra_config(self):
|
||||
# for overriding by subclasses
|
||||
pass
|
||||
|
||||
def _grab_stats(self, res):
|
||||
d = self.stats_gatherer.poll()
|
||||
return d
|
||||
|
||||
def bounce_client(self, num):
|
||||
c = self.clients[num]
|
||||
d = c.disownServiceParent()
|
||||
# I think windows requires a moment to let the connection really stop
|
||||
# and the port number made available for re-use. TODO: examine the
|
||||
# behavior, see if this is really the problem, see if we can do
|
||||
# better than blindly waiting for a second.
|
||||
d.addCallback(self.stall, 1.0)
|
||||
def _stopped(res):
|
||||
new_c = client.Client(basedir=self.getdir("client%d" % num))
|
||||
self.clients[num] = new_c
|
||||
new_c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||
self.add_service(new_c)
|
||||
d.addCallback(_stopped)
|
||||
d.addCallback(lambda res: self.wait_for_connections())
|
||||
def _maybe_get_webport(res):
|
||||
if num == 0:
|
||||
# now find out where the web port was
|
||||
self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
|
||||
d.addCallback(_maybe_get_webport)
|
||||
return d
|
||||
|
||||
def add_extra_node(self, client_num, helper_furl=None,
|
||||
add_to_sparent=False):
|
||||
# usually this node is *not* parented to our self.sparent, so we can
|
||||
# shut it down separately from the rest, to exercise the
|
||||
# connection-lost code
|
||||
basedir = self.getdir("client%d" % client_num)
|
||||
if not os.path.isdir(basedir):
|
||||
fileutil.make_dirs(basedir)
|
||||
config = "[client]\n"
|
||||
config += "introducer.furl = %s\n" % self.introducer_furl
|
||||
if helper_furl:
|
||||
config += "helper.furl = %s\n" % helper_furl
|
||||
fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
|
||||
|
||||
c = client.Client(basedir=basedir)
|
||||
self.clients.append(c)
|
||||
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||
self.numclients += 1
|
||||
if add_to_sparent:
|
||||
c.setServiceParent(self.sparent)
|
||||
else:
|
||||
c.startService()
|
||||
d = self.wait_for_connections()
|
||||
d.addCallback(lambda res: c)
|
||||
return d
|
||||
|
||||
def _check_connections(self):
|
||||
for c in self.clients:
|
||||
if not c.connected_to_introducer():
|
||||
return False
|
||||
sb = c.get_storage_broker()
|
||||
if len(sb.get_connected_servers()) != self.numclients:
|
||||
return False
|
||||
up = c.getServiceNamed("uploader")
|
||||
if up._helper_furl and not up._helper:
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_for_connections(self, ignored=None):
|
||||
return self.poll(self._check_connections, timeout=200)
|
||||
|
||||
class CountingDataUploadable(upload.Data):
|
||||
bytes_read = 0
|
||||
interrupt_after = None
|
||||
|
0
src/allmydata/test/web/__init__.py
Normal file
0
src/allmydata/test/web/__init__.py
Normal file
6
src/allmydata/test/web/common.py
Normal file
6
src/allmydata/test/web/common.py
Normal file
@ -0,0 +1,6 @@
|
||||
|
||||
unknown_rwcap = u"lafs://from_the_future_rw_\u263A".encode('utf-8')
|
||||
unknown_rocap = u"ro.lafs://readonly_from_the_future_ro_\u263A".encode('utf-8')
|
||||
unknown_immcap = u"imm.lafs://immutable_from_the_future_imm_\u263A".encode('utf-8')
|
||||
|
||||
FAVICON_MARKUP = '<link href="/icon.png" rel="shortcut icon" />'
|
1375
src/allmydata/test/web/test_grid.py
Normal file
1375
src/allmydata/test/web/test_grid.py
Normal file
File diff suppressed because it is too large
Load Diff
59
src/allmydata/test/web/test_introducer.py
Normal file
59
src/allmydata/test/web/test_introducer.py
Normal file
@ -0,0 +1,59 @@
|
||||
import os.path
|
||||
from twisted.trial import unittest
|
||||
from foolscap.api import fireEventually, flushEventualQueue
|
||||
from allmydata.util import fileutil
|
||||
from twisted.internet import defer, reactor
|
||||
from allmydata.introducer import IntroducerNode
|
||||
from .common import FAVICON_MARKUP
|
||||
from ..common_web import HTTPClientGETFactory
|
||||
|
||||
class IntroducerWeb(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.node = None
|
||||
|
||||
def tearDown(self):
|
||||
d = defer.succeed(None)
|
||||
if self.node:
|
||||
d.addCallback(lambda ign: self.node.stopService())
|
||||
d.addCallback(flushEventualQueue)
|
||||
return d
|
||||
|
||||
def test_welcome(self):
|
||||
basedir = "web.IntroducerWeb.test_welcome"
|
||||
os.mkdir(basedir)
|
||||
cfg = "\n".join(["[node]",
|
||||
"tub.location = 127.0.0.1:1",
|
||||
"web.port = tcp:0",
|
||||
]) + "\n"
|
||||
fileutil.write(os.path.join(basedir, "tahoe.cfg"), cfg)
|
||||
self.node = IntroducerNode(basedir)
|
||||
self.ws = self.node.getServiceNamed("webish")
|
||||
|
||||
d = fireEventually(None)
|
||||
d.addCallback(lambda ign: self.node.startService())
|
||||
|
||||
d.addCallback(lambda ign: self.GET("/"))
|
||||
def _check(res):
|
||||
self.failUnlessIn('Welcome to the Tahoe-LAFS Introducer', res)
|
||||
self.failUnlessIn(FAVICON_MARKUP, res)
|
||||
self.failUnlessIn('Page rendered at', res)
|
||||
self.failUnlessIn('Tahoe-LAFS code imported from:', res)
|
||||
d.addCallback(_check)
|
||||
return d
|
||||
|
||||
def GET(self, urlpath, followRedirect=False, return_response=False,
|
||||
**kwargs):
|
||||
# if return_response=True, this fires with (data, statuscode,
|
||||
# respheaders) instead of just data.
|
||||
assert not isinstance(urlpath, unicode)
|
||||
url = self.ws.getURL().rstrip('/') + urlpath
|
||||
factory = HTTPClientGETFactory(url, method="GET",
|
||||
followRedirect=followRedirect, **kwargs)
|
||||
reactor.connectTCP("localhost", self.ws.getPortnum(), factory)
|
||||
d = factory.deferred
|
||||
def _got_data(data):
|
||||
return (data, factory.status, factory.response_headers)
|
||||
if return_response:
|
||||
d.addCallback(_got_data)
|
||||
return factory.deferred
|
||||
|
105
src/allmydata/test/web/test_token.py
Normal file
105
src/allmydata/test/web/test_token.py
Normal file
@ -0,0 +1,105 @@
|
||||
from zope.interface import implementer
|
||||
from twisted.trial import unittest
|
||||
from twisted.web import server
|
||||
from nevow.inevow import IRequest
|
||||
from allmydata.web import common
|
||||
|
||||
# XXX FIXME when we introduce "mock" as a dependency, these can
|
||||
# probably just be Mock instances
|
||||
@implementer(IRequest)
|
||||
class FakeRequest(object):
|
||||
def __init__(self):
|
||||
self.method = "POST"
|
||||
self.fields = dict()
|
||||
self.args = dict()
|
||||
|
||||
|
||||
class FakeField(object):
|
||||
def __init__(self, *values):
|
||||
if len(values) == 1:
|
||||
self.value = values[0]
|
||||
else:
|
||||
self.value = list(values)
|
||||
|
||||
|
||||
class FakeClientWithToken(object):
|
||||
token = 'a' * 32
|
||||
|
||||
def get_auth_token(self):
|
||||
return self.token
|
||||
|
||||
|
||||
class TestTokenOnlyApi(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.client = FakeClientWithToken()
|
||||
self.page = common.TokenOnlyWebApi(self.client)
|
||||
|
||||
def test_not_post(self):
|
||||
req = FakeRequest()
|
||||
req.method = "GET"
|
||||
|
||||
self.assertRaises(
|
||||
server.UnsupportedMethod,
|
||||
self.page.render, req,
|
||||
)
|
||||
|
||||
def test_missing_token(self):
|
||||
req = FakeRequest()
|
||||
|
||||
exc = self.assertRaises(
|
||||
common.WebError,
|
||||
self.page.render, req,
|
||||
)
|
||||
self.assertEquals(exc.text, "Missing token")
|
||||
self.assertEquals(exc.code, 401)
|
||||
|
||||
def test_token_in_get_args(self):
|
||||
req = FakeRequest()
|
||||
req.args['token'] = 'z' * 32
|
||||
|
||||
exc = self.assertRaises(
|
||||
common.WebError,
|
||||
self.page.render, req,
|
||||
)
|
||||
self.assertEquals(exc.text, "Do not pass 'token' as URL argument")
|
||||
self.assertEquals(exc.code, 400)
|
||||
|
||||
def test_invalid_token(self):
|
||||
wrong_token = 'b' * 32
|
||||
req = FakeRequest()
|
||||
req.fields['token'] = FakeField(wrong_token)
|
||||
|
||||
exc = self.assertRaises(
|
||||
common.WebError,
|
||||
self.page.render, req,
|
||||
)
|
||||
self.assertEquals(exc.text, "Invalid token")
|
||||
self.assertEquals(exc.code, 401)
|
||||
|
||||
def test_valid_token_no_t_arg(self):
|
||||
req = FakeRequest()
|
||||
req.fields['token'] = FakeField(self.client.token)
|
||||
|
||||
with self.assertRaises(common.WebError) as exc:
|
||||
self.page.render(req)
|
||||
self.assertEquals(exc.exception.text, "Must provide 't=' argument")
|
||||
self.assertEquals(exc.exception.code, 400)
|
||||
|
||||
def test_valid_token_invalid_t_arg(self):
|
||||
req = FakeRequest()
|
||||
req.fields['token'] = FakeField(self.client.token)
|
||||
req.args['t'] = 'not at all json'
|
||||
|
||||
with self.assertRaises(common.WebError) as exc:
|
||||
self.page.render(req)
|
||||
self.assertTrue("invalid type" in exc.exception.text)
|
||||
self.assertEquals(exc.exception.code, 400)
|
||||
|
||||
def test_valid(self):
|
||||
req = FakeRequest()
|
||||
req.fields['token'] = FakeField(self.client.token)
|
||||
req.args['t'] = ['json']
|
||||
|
||||
result = self.page.render(req)
|
||||
self.assertTrue(result == NotImplemented)
|
65
src/allmydata/test/web/test_util.py
Normal file
65
src/allmydata/test/web/test_util.py
Normal file
@ -0,0 +1,65 @@
|
||||
from twisted.trial import unittest
|
||||
from allmydata.web import status, common
|
||||
from ..common import ShouldFailMixin
|
||||
from .. import common_util as testutil
|
||||
|
||||
class Util(ShouldFailMixin, testutil.ReallyEqualMixin, unittest.TestCase):
|
||||
def test_load_file(self):
|
||||
# This will raise an exception unless a well-formed XML file is found under that name.
|
||||
common.getxmlfile('directory.xhtml').load()
|
||||
|
||||
def test_parse_replace_arg(self):
|
||||
self.failUnlessReallyEqual(common.parse_replace_arg("true"), True)
|
||||
self.failUnlessReallyEqual(common.parse_replace_arg("false"), False)
|
||||
self.failUnlessReallyEqual(common.parse_replace_arg("only-files"),
|
||||
"only-files")
|
||||
self.failUnlessRaises(common.WebError, common.parse_replace_arg, "only_fles")
|
||||
|
||||
def test_abbreviate_time(self):
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(None), "")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(1.234), "1.23s")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(0.123), "123ms")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(0.00123), "1.2ms")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(0.000123), "123us")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(-123000), "-123000000000us")
|
||||
|
||||
def test_compute_rate(self):
|
||||
self.failUnlessReallyEqual(common.compute_rate(None, None), None)
|
||||
self.failUnlessReallyEqual(common.compute_rate(None, 1), None)
|
||||
self.failUnlessReallyEqual(common.compute_rate(250000, None), None)
|
||||
self.failUnlessReallyEqual(common.compute_rate(250000, 0), None)
|
||||
self.failUnlessReallyEqual(common.compute_rate(250000, 10), 25000.0)
|
||||
self.failUnlessReallyEqual(common.compute_rate(0, 10), 0.0)
|
||||
self.shouldFail(AssertionError, "test_compute_rate", "",
|
||||
common.compute_rate, -100, 10)
|
||||
self.shouldFail(AssertionError, "test_compute_rate", "",
|
||||
common.compute_rate, 100, -10)
|
||||
|
||||
# Sanity check
|
||||
rate = common.compute_rate(10*1000*1000, 1)
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(rate), "10.00MBps")
|
||||
|
||||
def test_abbreviate_rate(self):
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(None), "")
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(1234000), "1.23MBps")
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(12340), "12.3kBps")
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(123), "123Bps")
|
||||
|
||||
def test_abbreviate_size(self):
|
||||
self.failUnlessReallyEqual(common.abbreviate_size(None), "")
|
||||
self.failUnlessReallyEqual(common.abbreviate_size(1.23*1000*1000*1000), "1.23GB")
|
||||
self.failUnlessReallyEqual(common.abbreviate_size(1.23*1000*1000), "1.23MB")
|
||||
self.failUnlessReallyEqual(common.abbreviate_size(1230), "1.2kB")
|
||||
self.failUnlessReallyEqual(common.abbreviate_size(123), "123B")
|
||||
|
||||
def test_plural(self):
|
||||
def convert(s):
|
||||
return "%d second%s" % (s, status.plural(s))
|
||||
self.failUnlessReallyEqual(convert(0), "0 seconds")
|
||||
self.failUnlessReallyEqual(convert(1), "1 second")
|
||||
self.failUnlessReallyEqual(convert(2), "2 seconds")
|
||||
def convert2(s):
|
||||
return "has share%s: %s" % (status.plural(s), ",".join(s))
|
||||
self.failUnlessReallyEqual(convert2([]), "has shares: ")
|
||||
self.failUnlessReallyEqual(convert2(["1"]), "has share: 1")
|
||||
self.failUnlessReallyEqual(convert2(["1","2"]), "has shares: 1,2")
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user