mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-04-08 19:34:18 +00:00
introducer: remove remaining bits of 'push-to-myself' flags. The uploading/downloading node is no longer special.
This commit is contained in:
parent
80b72d919a
commit
d98fde952c
@ -146,9 +146,7 @@ class Client(node.Node, testutil.PollMixin):
|
||||
|
||||
|
||||
def init_options(self):
|
||||
self.push_to_ourselves = None
|
||||
if self.get_config("push_to_ourselves") is not None:
|
||||
self.push_to_ourselves = True
|
||||
pass
|
||||
|
||||
def init_web(self, webport):
|
||||
self.log("init_web(webport=%s)", args=(webport,))
|
||||
@ -213,9 +211,6 @@ class Client(node.Node, testutil.PollMixin):
|
||||
assert isinstance(key, str)
|
||||
return self.introducer_client.get_permuted_peers(service_name, key)
|
||||
|
||||
def get_push_to_ourselves(self):
|
||||
return self.push_to_ourselves
|
||||
|
||||
def get_encoding_parameters(self):
|
||||
return self.DEFAULT_ENCODING_PARAMETERS
|
||||
p = self.introducer_client.encoding_parameters # a tuple
|
||||
|
@ -314,38 +314,18 @@ class IntroducerClient(service.Service, Referenceable):
|
||||
|
||||
def get_permuted_peers(self, service_name, key):
|
||||
"""Return an ordered list of (peerid, rref) tuples."""
|
||||
# TODO: flags like add-myself-at-beginning and remove-myself? maybe
|
||||
# not.
|
||||
|
||||
results = []
|
||||
for (c_peerid, c_service_name, rref) in self._connections:
|
||||
assert isinstance(c_peerid, str)
|
||||
if c_service_name != service_name:
|
||||
continue
|
||||
#if not include_myself and peerid == self.nodeid:
|
||||
# self.log("get_permuted_peers: removing myself from the list")
|
||||
# continue
|
||||
permuted = sha.new(key + c_peerid).digest()
|
||||
results.append((permuted, c_peerid, rref))
|
||||
|
||||
results.sort(lambda a,b: cmp(a[0], b[0]))
|
||||
return [ (r[1], r[2]) for r in results ]
|
||||
|
||||
def _TODO__add_ourselves(self, partial_peerlist, peerlist):
|
||||
# moved here from mutable.Publish
|
||||
my_peerid = self._node._client.nodeid
|
||||
for (permutedid, peerid, conn) in partial_peerlist:
|
||||
if peerid == my_peerid:
|
||||
# we're already in there
|
||||
return partial_peerlist
|
||||
for (permutedid, peerid, conn) in peerlist:
|
||||
if peerid == self._node._client.nodeid:
|
||||
# found it
|
||||
partial_peerlist.append( (permutedid, peerid, conn) )
|
||||
return partial_peerlist
|
||||
self.log("we aren't in our own peerlist??", level=log.WEIRD)
|
||||
return partial_peerlist
|
||||
|
||||
|
||||
|
||||
def remote_set_encoding_parameters(self, parameters):
|
||||
|
@ -303,7 +303,6 @@ class Retrieve:
|
||||
n = self._node
|
||||
full_peerlist = n._client.get_permuted_peers("storage",
|
||||
self._storage_index)
|
||||
# TODO: include_myself=True
|
||||
|
||||
# _peerlist is a list of (peerid,conn) tuples for peers that are
|
||||
# worth talking too. This starts with the first numqueries in the
|
||||
@ -503,7 +502,6 @@ class Retrieve:
|
||||
# we might be able to get some more peers from the list
|
||||
peers = self._node._client.get_permuted_peers("storage",
|
||||
self._storage_index)
|
||||
# TODO: include_myself=True
|
||||
self._peerlist = [p for p in islice(peers, search_distance)]
|
||||
self._peerlist_limit = search_distance
|
||||
self.log("added peers, peerlist=%d, peerlist_limit=%d"
|
||||
@ -778,20 +776,22 @@ class Publish:
|
||||
|
||||
storage_index = self._storage_index
|
||||
|
||||
# we need to include ourselves in the list for two reasons. The most
|
||||
# important is so that any shares which already exist on our own
|
||||
# server get updated. The second is to ensure that we leave a share
|
||||
# on our own server, so we're more likely to have the signing key
|
||||
# around later. This way, even if all the servers die and the
|
||||
# directory contents are unrecoverable, at least we can still push
|
||||
# out a new copy with brand-new contents. TODO: it would be nice if
|
||||
# the share we use for ourselves didn't count against the N total..
|
||||
# maybe use N+1 if we find ourselves in the permuted list?
|
||||
# In 0.7.0, we went through extra work to make sure that we include
|
||||
# ourselves in the peerlist, mainly to match Retrieve (which did the
|
||||
# same thing. With the post-0.7.0 Introducer refactoring, we got rid
|
||||
# of the include-myself flags, and standardized on the
|
||||
# uploading/downloading node not being special.
|
||||
|
||||
# One nice feature of the old approach was that by putting a share on
|
||||
# the local storage server, we're more likely to be able to retrieve
|
||||
# a copy of the encrypted private key (even if all the old servers
|
||||
# have gone away), so we can regenerate new shares even if we can't
|
||||
# retrieve the old contents. This need will eventually go away when
|
||||
# we switch to DSA-based mutable files (which store the private key
|
||||
# in the URI).
|
||||
|
||||
peerlist = self._node._client.get_permuted_peers("storage",
|
||||
storage_index)
|
||||
# make sure our local server is in the list
|
||||
# TODO: include_myself_at_beginning=True
|
||||
|
||||
current_share_peers = DictOfSets()
|
||||
reachable_peers = {}
|
||||
@ -818,11 +818,13 @@ class Publish:
|
||||
total_shares, reachable_peers,
|
||||
current_share_peers)
|
||||
# TODO: add an errback to, probably to ignore that peer
|
||||
|
||||
# TODO: if we can't get a privkey from these servers, consider
|
||||
# looking farther afield. Make sure we include ourselves in the
|
||||
# initial list, because of the 0.7.0 behavior that causes us to
|
||||
# create our initial directory before we've connected to anyone
|
||||
# but ourselves.
|
||||
# looking farther afield. Be aware of the old 0.7.0 behavior that
|
||||
# causes us to create our initial directory before we've connected to
|
||||
# anyone but ourselves.. those old directories may not be
|
||||
# retrieveable if our own server is no longer in the early part of
|
||||
# the permuted peerlist.
|
||||
return d
|
||||
|
||||
def _do_query(self, ss, peerid, storage_index):
|
||||
|
@ -242,17 +242,15 @@ this file are ignored.
|
||||
pass
|
||||
else:
|
||||
# don't accept any shares
|
||||
f = open(os.path.join(clientdir, "sizelimit"), "w")
|
||||
f.write("0\n")
|
||||
f = open(os.path.join(clientdir, "readonly_storage"), "w")
|
||||
f.write("true\n")
|
||||
f.close()
|
||||
## also, if we do receive any shares, throw them away
|
||||
#f = open(os.path.join(clientdir, "debug_no_storage"), "w")
|
||||
#f.write("no_storage\n")
|
||||
#f.close()
|
||||
if self.mode == "upload-self":
|
||||
f = open(os.path.join(clientdir, "push_to_ourselves"), "w")
|
||||
f.write("push_to_ourselves\n")
|
||||
f.close()
|
||||
pass
|
||||
self.keepalive_file = os.path.join(clientdir,
|
||||
"suicide_prevention_hotline")
|
||||
# now start updating the mtime.
|
||||
|
@ -39,8 +39,6 @@ class FakeClient(service.MultiService):
|
||||
}
|
||||
def log(self, *args, **kwargs):
|
||||
return log.msg(*args, **kwargs)
|
||||
def get_push_to_ourselves(self):
|
||||
return True
|
||||
def get_encoding_parameters(self):
|
||||
return self.DEFAULT_ENCODING_PARAMETERS
|
||||
def get_permuted_peers(self, service_name, storage_index):
|
||||
|
@ -144,8 +144,6 @@ class FakeClient:
|
||||
for fakeid in range(self.num_servers) ]
|
||||
self.last_peers = [p[1] for p in peers]
|
||||
return peers
|
||||
def get_push_to_ourselves(self):
|
||||
return None
|
||||
def get_encoding_parameters(self):
|
||||
return self.DEFAULT_ENCODING_PARAMETERS
|
||||
|
||||
|
@ -54,7 +54,6 @@ class PeerTracker:
|
||||
self._storageserver = storage_server # to an RIStorageServer
|
||||
self.buckets = {} # k: shareid, v: IRemoteBucketWriter
|
||||
self.sharesize = sharesize
|
||||
#print "PeerTracker", peerid, sharesize
|
||||
as = storage.allocated_size(sharesize,
|
||||
num_segments,
|
||||
num_share_hashes,
|
||||
@ -75,7 +74,6 @@ class PeerTracker:
|
||||
idlib.b2a(self.storage_index)[:6]))
|
||||
|
||||
def query(self, sharenums):
|
||||
#print " query", self.peerid, len(sharenums)
|
||||
d = self._storageserver.callRemote("allocate_buckets",
|
||||
self.storage_index,
|
||||
self.renew_secret,
|
||||
@ -115,8 +113,7 @@ class Tahoe2PeerSelector:
|
||||
|
||||
def get_shareholders(self, client,
|
||||
storage_index, share_size, block_size,
|
||||
num_segments, total_shares, shares_of_happiness,
|
||||
push_to_ourselves):
|
||||
num_segments, total_shares, shares_of_happiness):
|
||||
"""
|
||||
@return: a set of PeerTracker instances that have agreed to hold some
|
||||
shares for us
|
||||
@ -134,7 +131,6 @@ class Tahoe2PeerSelector:
|
||||
self.preexisting_shares = {} # sharenum -> PeerTracker holding the share
|
||||
|
||||
peers = client.get_permuted_peers("storage", storage_index)
|
||||
# TODO: push_to_ourselves
|
||||
if not peers:
|
||||
raise encode.NotEnoughPeersError("client gave us zero peers")
|
||||
|
||||
@ -608,11 +604,10 @@ class CHKUploader:
|
||||
block_size = encoder.get_param("block_size")
|
||||
num_segments = encoder.get_param("num_segments")
|
||||
k,desired,n = encoder.get_param("share_counts")
|
||||
push_to_ourselves = self._client.get_push_to_ourselves()
|
||||
|
||||
gs = peer_selector.get_shareholders
|
||||
d = gs(self._client, storage_index, share_size, block_size,
|
||||
num_segments, n, desired, push_to_ourselves)
|
||||
d = peer_selector.get_shareholders(self._client, storage_index,
|
||||
share_size, block_size,
|
||||
num_segments, n, desired)
|
||||
return d
|
||||
|
||||
def set_shareholders(self, used_peers, encoder):
|
||||
|
Loading…
x
Reference in New Issue
Block a user