Take a get_rref` call out of the test suite

This commit is contained in:
Jean-Paul Calderone 2019-05-31 15:54:44 -04:00
parent 72cf590320
commit e745dbfb66
2 changed files with 109 additions and 16 deletions

View File

@ -11,7 +11,11 @@ from allmydata.mutable.publish import MutableData
from ..test_download import PausingConsumer, PausingAndStoppingConsumer, \
StoppingConsumer, ImmediatelyStoppingConsumer
from .. import common_util as testutil
from .util import FakeStorage, make_nodemaker
from .util import (
FakeStorage,
make_nodemaker_with_peers,
make_peer,
)
class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
# this used to be in Publish, but we removed the limit. Some of
@ -19,8 +23,15 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
# larger than the limit.
OLD_MAX_SEGMENT_SIZE = 3500000
def setUp(self):
self._storage = s = FakeStorage()
self.nodemaker = make_nodemaker(s)
self._storage = FakeStorage()
self._peers = list(
make_peer(self._storage, n)
for n
# 10 is the default for N. We're trying to make enough servers
# here so that each only gets one share.
in range(10)
)
self.nodemaker = make_nodemaker_with_peers(self._peers)
def test_create(self):
d = self.nodemaker.create_mutable_file()
@ -352,16 +363,20 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
def test_mdmf_write_count(self):
# Publishing an MDMF file should only cause one write for each
# share that is to be published. Otherwise, we introduce
# undesirable semantics that are a regression from SDMF
"""
Publishing an MDMF file causes exactly one write for each share that is to
be published. Otherwise, we introduce undesirable semantics that are a
regression from SDMF.
"""
upload = MutableData("MDMF" * 100000) # about 400 KiB
d = self.nodemaker.create_mutable_file(upload,
version=MDMF_VERSION)
def _check_server_write_counts(ignored):
sb = self.nodemaker.storage_broker
for server in sb.servers.itervalues():
self.failUnlessEqual(server.get_rref().queries, 1)
for peer in self._peers:
# There were enough servers for each to only get a single
# share.
self.assertEqual(peer.storage_server.queries, 1)
d.addCallback(_check_server_write_counts)
return d

View File

@ -1,4 +1,5 @@
from six.moves import cStringIO as StringIO
import attr
from twisted.internet import defer, reactor
from foolscap.api import eventually, fireEventually
from allmydata import client
@ -199,21 +200,98 @@ def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
dl.addCallback(lambda ignored: res)
return dl
@attr.s
class Peer(object):
peerid = attr.ib()
storage_server = attr.ib()
announcement = attr.ib()
def make_peer(s, i):
"""
Create a "peer" suitable for use with ``make_storagebroker_with_peers`` or
``make_nodemaker_with_peers``.
:param IServer s: The server with which to associate the peers.
:param int i: A unique identifier for this peer within the whole group of
peers to be used. For example, a sequence number. This is used to
generate a unique peer id.
:rtype: ``Peer``
"""
peerid = tagged_hash("peerid", "%d" % i)[:20]
fss = FakeStorageServer(peerid, s)
ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
"permutation-seed-base32": base32.b2a(peerid) }
return Peer(peerid=peerid, storage_server=fss, announcement=ann)
def make_storagebroker(s=None, num_peers=10):
"""
Make a ``StorageFarmBroker`` connected to some number of fake storage
servers.
:param IServer s: The server with which to associate the fake storage
servers.
:param int num_peers: The number of fake storage servers to associate with
the broker.
"""
if not s:
s = FakeStorage()
peerids = [tagged_hash("peerid", "%d" % i)[:20]
for i in range(num_peers)]
peers = []
for peer_num in range(num_peers):
peers.append(make_peer(s, peer_num))
return make_storagebroker_with_peers(peers)
def make_storagebroker_with_peers(peers):
"""
Make a ``StorageFarmBroker`` connected to the given storage servers.
:param list peers: The storage servers to associate with the storage
broker.
"""
storage_broker = StorageFarmBroker(True, None)
for peerid in peerids:
fss = FakeStorageServer(peerid, s)
ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
"permutation-seed-base32": base32.b2a(peerid) }
storage_broker.test_add_rref(peerid, fss, ann)
for peer in peers:
storage_broker.test_add_rref(
peer.peerid,
peer.storage_server,
peer.announcement,
)
return storage_broker
def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
"""
Make a ``NodeMaker`` connected to some number of fake storage servers.
:param IServer s: The server with which to associate the fake storage
servers.
:param int num_peers: The number of fake storage servers to associate with
the node maker.
"""
storage_broker = make_storagebroker(s, num_peers)
return make_nodemaker_with_storage_broker(storage_broker, keysize)
def make_nodemaker_with_peers(peers, keysize=TEST_RSA_KEY_SIZE):
"""
Make a ``NodeMaker`` connected to the given storage servers.
:param list peers: The storage servers to associate with the node maker.
"""
storage_broker = make_storagebroker_with_peers(peers)
return make_nodemaker_with_storage_broker(storage_broker, keysize)
def make_nodemaker_with_storage_broker(storage_broker, keysize):
"""
Make a ``NodeMaker`` using the given storage broker.
:param StorageFarmBroker peers: The storage broker to use.
"""
sh = client.SecretHolder("lease secret", "convergence secret")
keygen = client.KeyGenerator()
if keysize:
@ -223,6 +301,7 @@ def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
{"k": 3, "n": 10}, SDMF_VERSION, keygen)
return nodemaker
class PublishMixin(object):
def publish_one(self):
# publish a file and create shares, which can then be manipulated
@ -351,4 +430,3 @@ class CheckerMixin(object):
return
self.fail("%s: didn't see expected exception %s in problems %s" %
(where, expected_exception, r.get_share_problems()))