mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-02-21 02:01:31 +00:00
trivial: rename and add in-line doc to clarify "used_peers" => "upload_servers"
This commit is contained in:
parent
56fb5322a6
commit
9b4f412e49
@ -173,9 +173,9 @@ class Tahoe2PeerSelector:
|
||||
num_segments, total_shares, needed_shares,
|
||||
servers_of_happiness):
|
||||
"""
|
||||
@return: (used_peers, already_peers), where used_peers is a set of
|
||||
@return: (upload_servers, already_peers), where upload_servers is a set of
|
||||
PeerTracker instances that have agreed to hold some shares
|
||||
for us (the shnum is stashed inside the PeerTracker),
|
||||
for us (the shareids are stashed inside the PeerTracker),
|
||||
and already_peers is a dict mapping shnum to a set of peers
|
||||
which claim to already have the share.
|
||||
"""
|
||||
@ -908,27 +908,27 @@ class CHKUploader:
|
||||
d.addCallback(_done)
|
||||
return d
|
||||
|
||||
def set_shareholders(self, (used_peers, already_peers), encoder):
|
||||
def set_shareholders(self, (upload_servers, already_peers), encoder):
|
||||
"""
|
||||
@param used_peers: a sequence of PeerTracker objects
|
||||
@param upload_servers: a sequence of PeerTracker objects that have agreed to hold some shares for us (the shareids are stashed inside the PeerTracker)
|
||||
@paran already_peers: a dict mapping sharenum to a set of peerids
|
||||
that claim to already have this share
|
||||
"""
|
||||
self.log("_send_shares, used_peers is %s" % (used_peers,))
|
||||
self.log("_send_shares, upload_servers is %s" % (upload_servers,))
|
||||
# record already-present shares in self._results
|
||||
self._results.preexisting_shares = len(already_peers)
|
||||
|
||||
self._peer_trackers = {} # k: shnum, v: instance of PeerTracker
|
||||
for peer in used_peers:
|
||||
for peer in upload_servers:
|
||||
assert isinstance(peer, PeerTracker)
|
||||
buckets = {}
|
||||
servermap = already_peers.copy()
|
||||
for peer in used_peers:
|
||||
for peer in upload_servers:
|
||||
buckets.update(peer.buckets)
|
||||
for shnum in peer.buckets:
|
||||
self._peer_trackers[shnum] = peer
|
||||
servermap.setdefault(shnum, set()).add(peer.peerid)
|
||||
assert len(buckets) == sum([len(peer.buckets) for peer in used_peers]), "%s (%s) != %s (%s)" % (len(buckets), buckets, sum([len(peer.buckets) for peer in used_peers]), [(p.buckets, p.peerid) for p in used_peers])
|
||||
assert len(buckets) == sum([len(peer.buckets) for peer in upload_servers]), "%s (%s) != %s (%s)" % (len(buckets), buckets, sum([len(peer.buckets) for peer in upload_servers]), [(p.buckets, p.peerid) for p in upload_servers])
|
||||
encoder.set_shareholders(buckets, servermap)
|
||||
|
||||
def _encrypted_done(self, verifycap):
|
||||
|
@ -750,7 +750,7 @@ class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
|
||||
"""
|
||||
I act like a normal upload, but before I send the results of
|
||||
Tahoe2PeerSelector to the Encoder, I break the first servers_to_break
|
||||
PeerTrackers in the used_peers part of the return result.
|
||||
PeerTrackers in the upload_servers part of the return result.
|
||||
"""
|
||||
assert self.g, "I tried to find a grid at self.g, but failed"
|
||||
broker = self.g.clients[0].storage_broker
|
||||
@ -771,15 +771,15 @@ class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
|
||||
d = selector.get_shareholders(broker, sh, storage_index,
|
||||
share_size, block_size, num_segments,
|
||||
10, 3, 4)
|
||||
def _have_shareholders((used_peers, already_peers)):
|
||||
assert servers_to_break <= len(used_peers)
|
||||
def _have_shareholders((upload_servers, already_peers)):
|
||||
assert servers_to_break <= len(upload_servers)
|
||||
for index in xrange(servers_to_break):
|
||||
server = list(used_peers)[index]
|
||||
server = list(upload_servers)[index]
|
||||
for share in server.buckets.keys():
|
||||
server.buckets[share].abort()
|
||||
buckets = {}
|
||||
servermap = already_peers.copy()
|
||||
for peer in used_peers:
|
||||
for peer in upload_servers:
|
||||
buckets.update(peer.buckets)
|
||||
for bucket in peer.buckets:
|
||||
servermap.setdefault(bucket, set()).add(peer.peerid)
|
||||
@ -1342,7 +1342,7 @@ class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
|
||||
|
||||
|
||||
def test_merge_peers(self):
|
||||
# merge_peers merges a list of used_peers and a dict of
|
||||
# merge_peers merges a list of upload_servers and a dict of
|
||||
# shareid -> peerid mappings.
|
||||
shares = {
|
||||
1 : set(["server1"]),
|
||||
@ -1351,7 +1351,7 @@ class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
|
||||
4 : set(["server4", "server5"]),
|
||||
5 : set(["server1", "server2"]),
|
||||
}
|
||||
# if not provided with a used_peers argument, it should just
|
||||
# if not provided with a upload_servers argument, it should just
|
||||
# return the first argument unchanged.
|
||||
self.failUnlessEqual(shares, merge_peers(shares, set([])))
|
||||
class FakePeerTracker:
|
||||
|
@ -54,7 +54,7 @@ def shares_by_server(servermap):
|
||||
ret.setdefault(peerid, set()).add(shareid)
|
||||
return ret
|
||||
|
||||
def merge_peers(servermap, used_peers=None):
|
||||
def merge_peers(servermap, upload_servers=None):
|
||||
"""
|
||||
I accept a dict of shareid -> set(peerid) mappings, and optionally a
|
||||
set of PeerTrackers. If no set of PeerTrackers is provided, I return
|
||||
@ -66,13 +66,13 @@ def merge_peers(servermap, used_peers=None):
|
||||
# context where it is okay to do that, make a copy of servermap and
|
||||
# work with it.
|
||||
servermap = deepcopy(servermap)
|
||||
if not used_peers:
|
||||
if not upload_servers:
|
||||
return servermap
|
||||
|
||||
assert(isinstance(servermap, dict))
|
||||
assert(isinstance(used_peers, set))
|
||||
assert(isinstance(upload_servers, set))
|
||||
|
||||
for peer in used_peers:
|
||||
for peer in upload_servers:
|
||||
for shnum in peer.buckets:
|
||||
servermap.setdefault(shnum, set()).add(peer.peerid)
|
||||
return servermap
|
||||
|
Loading…
x
Reference in New Issue
Block a user