mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-24 07:06:41 +00:00
Merge remote-tracking branch 'origin/master' into 3377.configutil-connection_status-python-3
This commit is contained in:
commit
46b498f99c
@ -88,9 +88,5 @@ if [ -n "${ARTIFACTS}" ]; then
|
||||
|
||||
# Create a junitxml results area.
|
||||
mkdir -p "$(dirname "${JUNITXML}")"
|
||||
# Always succeed even if subunit2junitxml fails. subunit2junitxml signals
|
||||
# failure if the stream it is processing contains test failures. This is
|
||||
# not what we care about. If we cared about it, the test command above
|
||||
# would have signalled failure already and we wouldn't be here.
|
||||
"${BOOTSTRAP_VENV}"/bin/subunit2junitxml < "${SUBUNIT2}" > "${JUNITXML}" || true
|
||||
"${BOOTSTRAP_VENV}"/bin/subunit2junitxml < "${SUBUNIT2}" > "${JUNITXML}" || "${alternative}"
|
||||
fi
|
||||
|
0
newsfragments/3373.minor
Normal file
0
newsfragments/3373.minor
Normal file
0
newsfragments/3380.minor
Normal file
0
newsfragments/3380.minor
Normal file
0
newsfragments/3383.minor
Normal file
0
newsfragments/3383.minor
Normal file
@ -1,5 +1,7 @@
|
||||
from past.builtins import long
|
||||
|
||||
from past.builtins import long
|
||||
|
||||
from zope.interface import Interface, Attribute
|
||||
from twisted.plugin import (
|
||||
IPlugin,
|
||||
|
@ -1,6 +1,9 @@
|
||||
|
||||
import os, time, struct
|
||||
import cPickle as pickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
from twisted.internet import reactor
|
||||
from twisted.application import service
|
||||
from allmydata.storage.common import si_b2a
|
||||
|
@ -12,14 +12,14 @@ from __future__ import print_function
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from past.builtins import unicode
|
||||
|
||||
import os
|
||||
import time
|
||||
import signal
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python import failure
|
||||
from twisted.trial import unittest
|
||||
|
||||
from ..util.assertutil import precondition
|
||||
@ -73,6 +73,29 @@ class SignalMixin(object):
|
||||
return super(SignalMixin, self).tearDown()
|
||||
|
||||
|
||||
class ShouldFailMixin(object):
|
||||
|
||||
def shouldFail(self, expected_failure, which, substring,
|
||||
callable, *args, **kwargs):
|
||||
assert substring is None or isinstance(substring, (bytes, unicode))
|
||||
d = defer.maybeDeferred(callable, *args, **kwargs)
|
||||
def done(res):
|
||||
if isinstance(res, failure.Failure):
|
||||
res.trap(expected_failure)
|
||||
if substring:
|
||||
self.failUnless(substring in str(res),
|
||||
"%s: substring '%s' not in '%s'"
|
||||
% (which, substring, str(res)))
|
||||
# return the Failure for further analysis, but in a form that
|
||||
# doesn't make the Deferred chain think that we failed.
|
||||
return [res]
|
||||
else:
|
||||
self.fail("%s was supposed to raise %s, not get '%s'" %
|
||||
(which, expected_failure, res))
|
||||
d.addBoth(done)
|
||||
return d
|
||||
|
||||
|
||||
class ReallyEqualMixin(object):
|
||||
def failUnlessReallyEqual(self, a, b, msg=None):
|
||||
self.assertEqual(a, b, msg)
|
||||
@ -88,3 +111,4 @@ def skip_if_cannot_represent_filename(u):
|
||||
u.encode(enc)
|
||||
except UnicodeEncodeError:
|
||||
raise unittest.SkipTest("A non-ASCII filename could not be encoded on this platform.")
|
||||
|
||||
|
@ -5,7 +5,6 @@ from random import randrange
|
||||
from six.moves import StringIO
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.python import failure
|
||||
from twisted.trial import unittest
|
||||
|
||||
from ..util.assertutil import precondition
|
||||
@ -16,7 +15,7 @@ if PY2: # XXX this is a hack that makes some tests pass on Python3, remove
|
||||
from ..scripts import runner
|
||||
# Imported for backwards compatibility:
|
||||
from .common_py3 import (
|
||||
SignalMixin, skip_if_cannot_represent_filename, ReallyEqualMixin,
|
||||
SignalMixin, skip_if_cannot_represent_filename, ReallyEqualMixin, ShouldFailMixin
|
||||
)
|
||||
|
||||
|
||||
@ -85,28 +84,6 @@ class StallMixin(object):
|
||||
reactor.callLater(delay, d.callback, res)
|
||||
return d
|
||||
|
||||
class ShouldFailMixin(object):
|
||||
|
||||
def shouldFail(self, expected_failure, which, substring,
|
||||
callable, *args, **kwargs):
|
||||
assert substring is None or isinstance(substring, str)
|
||||
d = defer.maybeDeferred(callable, *args, **kwargs)
|
||||
def done(res):
|
||||
if isinstance(res, failure.Failure):
|
||||
res.trap(expected_failure)
|
||||
if substring:
|
||||
self.failUnless(substring in str(res),
|
||||
"%s: substring '%s' not in '%s'"
|
||||
% (which, substring, str(res)))
|
||||
# return the Failure for further analysis, but in a form that
|
||||
# doesn't make the Deferred chain think that we failed.
|
||||
return [res]
|
||||
else:
|
||||
self.fail("%s was supposed to raise %s, not get '%s'" %
|
||||
(which, expected_failure, res))
|
||||
d.addBoth(done)
|
||||
return d
|
||||
|
||||
|
||||
class TestMixin(SignalMixin):
|
||||
def setUp(self):
|
||||
|
@ -1,4 +1,10 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Tests for allmydata.immutable.happiness_upload and
|
||||
allmydata.util.happinessutil.
|
||||
|
||||
Ported to Python 3.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@ -13,12 +19,17 @@ if PY2:
|
||||
from twisted.trial import unittest
|
||||
from hypothesis import given
|
||||
from hypothesis.strategies import text, sets
|
||||
|
||||
from allmydata.immutable import happiness_upload
|
||||
from allmydata.util.happinessutil import servers_of_happiness, \
|
||||
shares_by_server, merge_servers
|
||||
from allmydata.test.common_py3 import ShouldFailMixin
|
||||
|
||||
|
||||
class HappinessUtils(unittest.TestCase):
|
||||
class HappinessUploadUtils(unittest.TestCase):
|
||||
"""
|
||||
test-cases for utility functions augmenting_path_for and residual_network
|
||||
test-cases for happiness_upload utility functions augmenting_path_for and
|
||||
residual_network.
|
||||
"""
|
||||
|
||||
def test_residual_0(self):
|
||||
@ -279,3 +290,192 @@ class PlacementTests(unittest.TestCase):
|
||||
# peers; if we have fewer shares than peers happiness is capped at
|
||||
# # of peers.
|
||||
assert happiness == min(len(peers), len(shares))
|
||||
|
||||
|
||||
class FakeServerTracker(object):
|
||||
def __init__(self, serverid, buckets):
|
||||
self._serverid = serverid
|
||||
self.buckets = buckets
|
||||
def get_serverid(self):
|
||||
return self._serverid
|
||||
|
||||
|
||||
class HappinessUtilTests(unittest.TestCase, ShouldFailMixin):
|
||||
"""Tests for happinesutil.py."""
|
||||
|
||||
def test_merge_servers(self):
|
||||
# merge_servers merges a list of upload_servers and a dict of
|
||||
# shareid -> serverid mappings.
|
||||
shares = {
|
||||
1 : set(["server1"]),
|
||||
2 : set(["server2"]),
|
||||
3 : set(["server3"]),
|
||||
4 : set(["server4", "server5"]),
|
||||
5 : set(["server1", "server2"]),
|
||||
}
|
||||
# if not provided with a upload_servers argument, it should just
|
||||
# return the first argument unchanged.
|
||||
self.failUnlessEqual(shares, merge_servers(shares, set([])))
|
||||
trackers = []
|
||||
for (i, server) in [(i, "server%d" % i) for i in range(5, 9)]:
|
||||
t = FakeServerTracker(server, [i])
|
||||
trackers.append(t)
|
||||
expected = {
|
||||
1 : set(["server1"]),
|
||||
2 : set(["server2"]),
|
||||
3 : set(["server3"]),
|
||||
4 : set(["server4", "server5"]),
|
||||
5 : set(["server1", "server2", "server5"]),
|
||||
6 : set(["server6"]),
|
||||
7 : set(["server7"]),
|
||||
8 : set(["server8"]),
|
||||
}
|
||||
self.failUnlessEqual(expected, merge_servers(shares, set(trackers)))
|
||||
shares2 = {}
|
||||
expected = {
|
||||
5 : set(["server5"]),
|
||||
6 : set(["server6"]),
|
||||
7 : set(["server7"]),
|
||||
8 : set(["server8"]),
|
||||
}
|
||||
self.failUnlessEqual(expected, merge_servers(shares2, set(trackers)))
|
||||
shares3 = {}
|
||||
trackers = []
|
||||
expected = {}
|
||||
for (i, server) in [(i, "server%d" % i) for i in range(10)]:
|
||||
shares3[i] = set([server])
|
||||
t = FakeServerTracker(server, [i])
|
||||
trackers.append(t)
|
||||
expected[i] = set([server])
|
||||
self.failUnlessEqual(expected, merge_servers(shares3, set(trackers)))
|
||||
|
||||
|
||||
def test_servers_of_happiness_utility_function(self):
|
||||
# These tests are concerned with the servers_of_happiness()
|
||||
# utility function, and its underlying matching algorithm. Other
|
||||
# aspects of the servers_of_happiness behavior are tested
|
||||
# elsehwere These tests exist to ensure that
|
||||
# servers_of_happiness doesn't under or overcount the happiness
|
||||
# value for given inputs.
|
||||
|
||||
# servers_of_happiness expects a dict of
|
||||
# shnum => set(serverids) as a preexisting shares argument.
|
||||
test1 = {
|
||||
1 : set(["server1"]),
|
||||
2 : set(["server2"]),
|
||||
3 : set(["server3"]),
|
||||
4 : set(["server4"])
|
||||
}
|
||||
happy = servers_of_happiness(test1)
|
||||
self.failUnlessEqual(4, happy)
|
||||
test1[4] = set(["server1"])
|
||||
# We've added a duplicate server, so now servers_of_happiness
|
||||
# should be 3 instead of 4.
|
||||
happy = servers_of_happiness(test1)
|
||||
self.failUnlessEqual(3, happy)
|
||||
# The second argument of merge_servers should be a set of objects with
|
||||
# serverid and buckets as attributes. In actual use, these will be
|
||||
# ServerTracker instances, but for testing it is fine to make a
|
||||
# FakeServerTracker whose job is to hold those instance variables to
|
||||
# test that part.
|
||||
trackers = []
|
||||
for (i, server) in [(i, "server%d" % i) for i in range(5, 9)]:
|
||||
t = FakeServerTracker(server, [i])
|
||||
trackers.append(t)
|
||||
# Recall that test1 is a server layout with servers_of_happiness
|
||||
# = 3. Since there isn't any overlap between the shnum ->
|
||||
# set([serverid]) correspondences in test1 and those in trackers,
|
||||
# the result here should be 7.
|
||||
test2 = merge_servers(test1, set(trackers))
|
||||
happy = servers_of_happiness(test2)
|
||||
self.failUnlessEqual(7, happy)
|
||||
# Now add an overlapping server to trackers. This is redundant,
|
||||
# so it should not cause the previously reported happiness value
|
||||
# to change.
|
||||
t = FakeServerTracker("server1", [1])
|
||||
trackers.append(t)
|
||||
test2 = merge_servers(test1, set(trackers))
|
||||
happy = servers_of_happiness(test2)
|
||||
self.failUnlessEqual(7, happy)
|
||||
test = {}
|
||||
happy = servers_of_happiness(test)
|
||||
self.failUnlessEqual(0, happy)
|
||||
# Test a more substantial overlap between the trackers and the
|
||||
# existing assignments.
|
||||
test = {
|
||||
1 : set(['server1']),
|
||||
2 : set(['server2']),
|
||||
3 : set(['server3']),
|
||||
4 : set(['server4']),
|
||||
}
|
||||
trackers = []
|
||||
t = FakeServerTracker('server5', [4])
|
||||
trackers.append(t)
|
||||
t = FakeServerTracker('server6', [3, 5])
|
||||
trackers.append(t)
|
||||
# The value returned by servers_of_happiness is the size
|
||||
# of a maximum matching in the bipartite graph that
|
||||
# servers_of_happiness() makes between serverids and share
|
||||
# numbers. It should find something like this:
|
||||
# (server 1, share 1)
|
||||
# (server 2, share 2)
|
||||
# (server 3, share 3)
|
||||
# (server 5, share 4)
|
||||
# (server 6, share 5)
|
||||
#
|
||||
# and, since there are 5 edges in this matching, it should
|
||||
# return 5.
|
||||
test2 = merge_servers(test, set(trackers))
|
||||
happy = servers_of_happiness(test2)
|
||||
self.failUnlessEqual(5, happy)
|
||||
# Zooko's first puzzle:
|
||||
# (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:156)
|
||||
#
|
||||
# server 1: shares 0, 1
|
||||
# server 2: shares 1, 2
|
||||
# server 3: share 2
|
||||
#
|
||||
# This should yield happiness of 3.
|
||||
test = {
|
||||
0 : set(['server1']),
|
||||
1 : set(['server1', 'server2']),
|
||||
2 : set(['server2', 'server3']),
|
||||
}
|
||||
self.failUnlessEqual(3, servers_of_happiness(test))
|
||||
# Zooko's second puzzle:
|
||||
# (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:158)
|
||||
#
|
||||
# server 1: shares 0, 1
|
||||
# server 2: share 1
|
||||
#
|
||||
# This should yield happiness of 2.
|
||||
test = {
|
||||
0 : set(['server1']),
|
||||
1 : set(['server1', 'server2']),
|
||||
}
|
||||
self.failUnlessEqual(2, servers_of_happiness(test))
|
||||
|
||||
|
||||
def test_shares_by_server(self):
|
||||
test = dict([(i, set(["server%d" % i])) for i in range(1, 5)])
|
||||
sbs = shares_by_server(test)
|
||||
self.failUnlessEqual(set([1]), sbs["server1"])
|
||||
self.failUnlessEqual(set([2]), sbs["server2"])
|
||||
self.failUnlessEqual(set([3]), sbs["server3"])
|
||||
self.failUnlessEqual(set([4]), sbs["server4"])
|
||||
test1 = {
|
||||
1 : set(["server1"]),
|
||||
2 : set(["server1"]),
|
||||
3 : set(["server1"]),
|
||||
4 : set(["server2"]),
|
||||
5 : set(["server2"])
|
||||
}
|
||||
sbs = shares_by_server(test1)
|
||||
self.failUnlessEqual(set([1, 2, 3]), sbs["server1"])
|
||||
self.failUnlessEqual(set([4, 5]), sbs["server2"])
|
||||
# This should fail unless the serverid part of the mapping is a set
|
||||
test2 = {1: "server1"}
|
||||
self.shouldFail(AssertionError,
|
||||
"test_shares_by_server",
|
||||
"",
|
||||
shares_by_server, test2)
|
||||
|
File diff suppressed because it is too large
Load Diff
1309
src/allmydata/test/test_storage_web.py
Normal file
1309
src/allmydata/test/test_storage_web.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -15,17 +15,15 @@ from allmydata.util import log, base32
|
||||
from allmydata.util.assertutil import precondition
|
||||
from allmydata.util.deferredutil import DeferredListShouldSucceed
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from allmydata.test.common_util import ShouldFailMixin
|
||||
from allmydata.util.happinessutil import servers_of_happiness, \
|
||||
shares_by_server, merge_servers
|
||||
from allmydata.test.common_py3 import ShouldFailMixin
|
||||
from allmydata.storage_client import StorageFarmBroker
|
||||
from allmydata.storage.server import storage_index_to_dir
|
||||
from allmydata.client import _Client
|
||||
|
||||
from .common import (
|
||||
EMPTY_CLIENT_CONFIG,
|
||||
)
|
||||
|
||||
|
||||
MiB = 1024*1024
|
||||
|
||||
def extract_uri(results):
|
||||
@ -864,12 +862,6 @@ def is_happy_enough(servertoshnums, h, k):
|
||||
return False
|
||||
return True
|
||||
|
||||
class FakeServerTracker(object):
|
||||
def __init__(self, serverid, buckets):
|
||||
self._serverid = serverid
|
||||
self.buckets = buckets
|
||||
def get_serverid(self):
|
||||
return self._serverid
|
||||
|
||||
class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
|
||||
ShouldFailMixin):
|
||||
@ -1499,185 +1491,6 @@ class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
|
||||
self._do_upload_with_broken_servers, 2))
|
||||
return d
|
||||
|
||||
|
||||
def test_merge_servers(self):
|
||||
# merge_servers merges a list of upload_servers and a dict of
|
||||
# shareid -> serverid mappings.
|
||||
shares = {
|
||||
1 : set(["server1"]),
|
||||
2 : set(["server2"]),
|
||||
3 : set(["server3"]),
|
||||
4 : set(["server4", "server5"]),
|
||||
5 : set(["server1", "server2"]),
|
||||
}
|
||||
# if not provided with a upload_servers argument, it should just
|
||||
# return the first argument unchanged.
|
||||
self.failUnlessEqual(shares, merge_servers(shares, set([])))
|
||||
trackers = []
|
||||
for (i, server) in [(i, "server%d" % i) for i in xrange(5, 9)]:
|
||||
t = FakeServerTracker(server, [i])
|
||||
trackers.append(t)
|
||||
expected = {
|
||||
1 : set(["server1"]),
|
||||
2 : set(["server2"]),
|
||||
3 : set(["server3"]),
|
||||
4 : set(["server4", "server5"]),
|
||||
5 : set(["server1", "server2", "server5"]),
|
||||
6 : set(["server6"]),
|
||||
7 : set(["server7"]),
|
||||
8 : set(["server8"]),
|
||||
}
|
||||
self.failUnlessEqual(expected, merge_servers(shares, set(trackers)))
|
||||
shares2 = {}
|
||||
expected = {
|
||||
5 : set(["server5"]),
|
||||
6 : set(["server6"]),
|
||||
7 : set(["server7"]),
|
||||
8 : set(["server8"]),
|
||||
}
|
||||
self.failUnlessEqual(expected, merge_servers(shares2, set(trackers)))
|
||||
shares3 = {}
|
||||
trackers = []
|
||||
expected = {}
|
||||
for (i, server) in [(i, "server%d" % i) for i in xrange(10)]:
|
||||
shares3[i] = set([server])
|
||||
t = FakeServerTracker(server, [i])
|
||||
trackers.append(t)
|
||||
expected[i] = set([server])
|
||||
self.failUnlessEqual(expected, merge_servers(shares3, set(trackers)))
|
||||
|
||||
|
||||
def test_servers_of_happiness_utility_function(self):
|
||||
# These tests are concerned with the servers_of_happiness()
|
||||
# utility function, and its underlying matching algorithm. Other
|
||||
# aspects of the servers_of_happiness behavior are tested
|
||||
# elsehwere These tests exist to ensure that
|
||||
# servers_of_happiness doesn't under or overcount the happiness
|
||||
# value for given inputs.
|
||||
|
||||
# servers_of_happiness expects a dict of
|
||||
# shnum => set(serverids) as a preexisting shares argument.
|
||||
test1 = {
|
||||
1 : set(["server1"]),
|
||||
2 : set(["server2"]),
|
||||
3 : set(["server3"]),
|
||||
4 : set(["server4"])
|
||||
}
|
||||
happy = servers_of_happiness(test1)
|
||||
self.failUnlessEqual(4, happy)
|
||||
test1[4] = set(["server1"])
|
||||
# We've added a duplicate server, so now servers_of_happiness
|
||||
# should be 3 instead of 4.
|
||||
happy = servers_of_happiness(test1)
|
||||
self.failUnlessEqual(3, happy)
|
||||
# The second argument of merge_servers should be a set of objects with
|
||||
# serverid and buckets as attributes. In actual use, these will be
|
||||
# ServerTracker instances, but for testing it is fine to make a
|
||||
# FakeServerTracker whose job is to hold those instance variables to
|
||||
# test that part.
|
||||
trackers = []
|
||||
for (i, server) in [(i, "server%d" % i) for i in xrange(5, 9)]:
|
||||
t = FakeServerTracker(server, [i])
|
||||
trackers.append(t)
|
||||
# Recall that test1 is a server layout with servers_of_happiness
|
||||
# = 3. Since there isn't any overlap between the shnum ->
|
||||
# set([serverid]) correspondences in test1 and those in trackers,
|
||||
# the result here should be 7.
|
||||
test2 = merge_servers(test1, set(trackers))
|
||||
happy = servers_of_happiness(test2)
|
||||
self.failUnlessEqual(7, happy)
|
||||
# Now add an overlapping server to trackers. This is redundant,
|
||||
# so it should not cause the previously reported happiness value
|
||||
# to change.
|
||||
t = FakeServerTracker("server1", [1])
|
||||
trackers.append(t)
|
||||
test2 = merge_servers(test1, set(trackers))
|
||||
happy = servers_of_happiness(test2)
|
||||
self.failUnlessEqual(7, happy)
|
||||
test = {}
|
||||
happy = servers_of_happiness(test)
|
||||
self.failUnlessEqual(0, happy)
|
||||
# Test a more substantial overlap between the trackers and the
|
||||
# existing assignments.
|
||||
test = {
|
||||
1 : set(['server1']),
|
||||
2 : set(['server2']),
|
||||
3 : set(['server3']),
|
||||
4 : set(['server4']),
|
||||
}
|
||||
trackers = []
|
||||
t = FakeServerTracker('server5', [4])
|
||||
trackers.append(t)
|
||||
t = FakeServerTracker('server6', [3, 5])
|
||||
trackers.append(t)
|
||||
# The value returned by servers_of_happiness is the size
|
||||
# of a maximum matching in the bipartite graph that
|
||||
# servers_of_happiness() makes between serverids and share
|
||||
# numbers. It should find something like this:
|
||||
# (server 1, share 1)
|
||||
# (server 2, share 2)
|
||||
# (server 3, share 3)
|
||||
# (server 5, share 4)
|
||||
# (server 6, share 5)
|
||||
#
|
||||
# and, since there are 5 edges in this matching, it should
|
||||
# return 5.
|
||||
test2 = merge_servers(test, set(trackers))
|
||||
happy = servers_of_happiness(test2)
|
||||
self.failUnlessEqual(5, happy)
|
||||
# Zooko's first puzzle:
|
||||
# (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:156)
|
||||
#
|
||||
# server 1: shares 0, 1
|
||||
# server 2: shares 1, 2
|
||||
# server 3: share 2
|
||||
#
|
||||
# This should yield happiness of 3.
|
||||
test = {
|
||||
0 : set(['server1']),
|
||||
1 : set(['server1', 'server2']),
|
||||
2 : set(['server2', 'server3']),
|
||||
}
|
||||
self.failUnlessEqual(3, servers_of_happiness(test))
|
||||
# Zooko's second puzzle:
|
||||
# (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:158)
|
||||
#
|
||||
# server 1: shares 0, 1
|
||||
# server 2: share 1
|
||||
#
|
||||
# This should yield happiness of 2.
|
||||
test = {
|
||||
0 : set(['server1']),
|
||||
1 : set(['server1', 'server2']),
|
||||
}
|
||||
self.failUnlessEqual(2, servers_of_happiness(test))
|
||||
|
||||
|
||||
def test_shares_by_server(self):
|
||||
test = dict([(i, set(["server%d" % i])) for i in xrange(1, 5)])
|
||||
sbs = shares_by_server(test)
|
||||
self.failUnlessEqual(set([1]), sbs["server1"])
|
||||
self.failUnlessEqual(set([2]), sbs["server2"])
|
||||
self.failUnlessEqual(set([3]), sbs["server3"])
|
||||
self.failUnlessEqual(set([4]), sbs["server4"])
|
||||
test1 = {
|
||||
1 : set(["server1"]),
|
||||
2 : set(["server1"]),
|
||||
3 : set(["server1"]),
|
||||
4 : set(["server2"]),
|
||||
5 : set(["server2"])
|
||||
}
|
||||
sbs = shares_by_server(test1)
|
||||
self.failUnlessEqual(set([1, 2, 3]), sbs["server1"])
|
||||
self.failUnlessEqual(set([4, 5]), sbs["server2"])
|
||||
# This should fail unless the serverid part of the mapping is a set
|
||||
test2 = {1: "server1"}
|
||||
self.shouldFail(AssertionError,
|
||||
"test_shares_by_server",
|
||||
"",
|
||||
shares_by_server, test2)
|
||||
|
||||
|
||||
def test_existing_share_detection(self):
|
||||
self.basedir = self.mktemp()
|
||||
d = self._setup_and_upload()
|
||||
|
@ -45,6 +45,7 @@ PORTED_MODULES = [
|
||||
"allmydata.util.dictutil",
|
||||
"allmydata.util.encodingutil",
|
||||
"allmydata.util.gcutil",
|
||||
"allmydata.util.happinessutil",
|
||||
"allmydata.util.hashutil",
|
||||
"allmydata.util.humanreadable",
|
||||
"allmydata.util.iputil",
|
||||
|
@ -1,7 +1,18 @@
|
||||
"""
|
||||
I contain utilities useful for calculating servers_of_happiness, and for
|
||||
reporting it in messages
|
||||
reporting it in messages.
|
||||
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
# We omit dict, just in case newdict breaks things.
|
||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from copy import deepcopy
|
||||
from allmydata.immutable.happiness_upload import residual_network
|
||||
@ -51,7 +62,7 @@ def shares_by_server(servermap):
|
||||
dictionary of sets of shares, indexed by peerids.
|
||||
"""
|
||||
ret = {}
|
||||
for shareid, peers in servermap.iteritems():
|
||||
for shareid, peers in servermap.items():
|
||||
assert isinstance(peers, set)
|
||||
for peerid in peers:
|
||||
ret.setdefault(peerid, set()).add(shareid)
|
||||
@ -146,7 +157,7 @@ def servers_of_happiness(sharemap):
|
||||
# The implementation here is an adapation of an algorithm described in
|
||||
# "Introduction to Algorithms", Cormen et al, 2nd ed., pp 658-662.
|
||||
dim = len(graph)
|
||||
flow_function = [[0 for sh in xrange(dim)] for s in xrange(dim)]
|
||||
flow_function = [[0 for sh in range(dim)] for s in range(dim)]
|
||||
residual_graph, residual_function = residual_network(graph, flow_function)
|
||||
while augmenting_path_for(residual_graph):
|
||||
path = augmenting_path_for(residual_graph)
|
||||
@ -169,7 +180,7 @@ def servers_of_happiness(sharemap):
|
||||
# our graph, so we can stop after summing flow across those. The
|
||||
# value of a flow computed in this way is the size of a maximum
|
||||
# matching on the bipartite graph described above.
|
||||
return sum([flow_function[0][v] for v in xrange(1, num_servers+1)])
|
||||
return sum([flow_function[0][v] for v in range(1, num_servers+1)])
|
||||
|
||||
def _flow_network_for(servermap):
|
||||
"""
|
||||
@ -198,14 +209,14 @@ def _flow_network_for(servermap):
|
||||
graph = [] # index -> [index], an adjacency list
|
||||
# Add an entry at the top (index 0) that has an edge to every server
|
||||
# in servermap
|
||||
graph.append(servermap.keys())
|
||||
graph.append(list(servermap.keys()))
|
||||
# For each server, add an entry that has an edge to every share that it
|
||||
# contains (or will contain).
|
||||
for k in servermap:
|
||||
graph.append(servermap[k])
|
||||
# For each share, add an entry that has an edge to the sink.
|
||||
sink_num = num_servers + num_shares + 1
|
||||
for i in xrange(num_shares):
|
||||
for i in range(num_shares):
|
||||
graph.append([sink_num])
|
||||
# Add an empty entry for the sink, which has no outbound edges.
|
||||
graph.append([])
|
||||
@ -231,8 +242,8 @@ def _reindex(servermap, base_index):
|
||||
# Number the shares
|
||||
for k in ret:
|
||||
for shnum in ret[k]:
|
||||
if not shares.has_key(shnum):
|
||||
if shnum not in shares:
|
||||
shares[shnum] = num
|
||||
num += 1
|
||||
ret[k] = map(lambda x: shares[x], ret[k])
|
||||
ret[k] = [shares[x] for x in ret[k]]
|
||||
return (ret, len(shares))
|
||||
|
Loading…
Reference in New Issue
Block a user