mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-04-10 04:09:58 +00:00
Added tests for count-good-share-hosts under check and repair conditions. Patched the incorrect computation in immutable/filenode.py
Signed-off-by: Andrew Miller <amiller@dappervision.com> Fixed missing import statements Signed-off-by: Andrew Miller <amiller@dappervision.com>
This commit is contained in:
parent
8f630a76fe
commit
4b7f34d179
@ -122,7 +122,7 @@ class CiphertextFileNode:
|
||||
servers_responding.union(ur.sharemap.iterkeys())
|
||||
prr.data['servers-responding'] = list(servers_responding)
|
||||
prr.data['count-shares-good'] = len(sm)
|
||||
prr.data['count-good-share-hosts'] = len(sm)
|
||||
prr.data['count-good-share-hosts'] = len(reduce(set.union, sm.itervalues()))
|
||||
is_healthy = bool(len(sm) >= verifycap.total_shares)
|
||||
is_recoverable = bool(len(sm) >= verifycap.needed_shares)
|
||||
prr.set_healthy(is_healthy)
|
||||
|
@ -1,10 +1,13 @@
|
||||
|
||||
import simplejson
|
||||
import os.path, shutil
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
from allmydata import check_results, uri
|
||||
from allmydata import uri as tahoe_uri
|
||||
from allmydata.web import check_results as web_check_results
|
||||
from allmydata.storage_client import StorageFarmBroker, NativeStorageServer
|
||||
from allmydata.storage.server import storage_index_to_dir
|
||||
from allmydata.monitor import Monitor
|
||||
from allmydata.test.no_network import GridTestMixin
|
||||
from allmydata.immutable.upload import Data
|
||||
@ -275,6 +278,107 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
|
||||
d.addCallback(_got_lit_results)
|
||||
return d
|
||||
|
||||
class BalancingAct(GridTestMixin, unittest.TestCase):
|
||||
# test for #1115 regarding the 'count-good-share-hosts' metric
|
||||
|
||||
|
||||
def add_server(self, server_number, readonly=False):
|
||||
assert self.g, "I tried to find a grid at self.g, but failed"
|
||||
ss = self.g.make_server(server_number, readonly)
|
||||
#log.msg("just created a server, number: %s => %s" % (server_number, ss,))
|
||||
self.g.add_server(server_number, ss)
|
||||
|
||||
def add_server_with_share(self, server_number, uri, share_number=None,
|
||||
readonly=False):
|
||||
self.add_server(server_number, readonly)
|
||||
if share_number is not None:
|
||||
self.copy_share_to_server(uri, share_number, server_number)
|
||||
|
||||
def copy_share_to_server(self, uri, share_number, server_number):
|
||||
ss = self.g.servers_by_number[server_number]
|
||||
# Copy share i from the directory associated with the first
|
||||
# storage server to the directory associated with this one.
|
||||
assert self.g, "I tried to find a grid at self.g, but failed"
|
||||
assert self.shares, "I tried to find shares at self.shares, but failed"
|
||||
old_share_location = self.shares[share_number][2]
|
||||
new_share_location = os.path.join(ss.storedir, "shares")
|
||||
si = tahoe_uri.from_string(self.uri).get_storage_index()
|
||||
new_share_location = os.path.join(new_share_location,
|
||||
storage_index_to_dir(si))
|
||||
if not os.path.exists(new_share_location):
|
||||
os.makedirs(new_share_location)
|
||||
new_share_location = os.path.join(new_share_location,
|
||||
str(share_number))
|
||||
if old_share_location != new_share_location:
|
||||
shutil.copy(old_share_location, new_share_location)
|
||||
shares = self.find_uri_shares(uri)
|
||||
# Make sure that the storage server has the share.
|
||||
self.failUnless((share_number, ss.my_nodeid, new_share_location)
|
||||
in shares)
|
||||
|
||||
def _pretty_shares_chart(self, uri):
|
||||
# Servers are labeled A-Z, shares are labeled 0-9
|
||||
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
||||
assert len(self.g.servers_by_number) < len(letters), \
|
||||
"This little printing function is only meant for < 26 servers"
|
||||
shares_chart = {}
|
||||
names = dict(zip([ss.my_nodeid for _,ss in
|
||||
self.g.servers_by_number.iteritems()], letters))
|
||||
for shnum, serverid, _ in self.find_uri_shares(uri):
|
||||
shares_chart.setdefault(shnum, []).append(names[serverid])
|
||||
return shares_chart
|
||||
|
||||
def test_1115(self):
|
||||
self.basedir = "checker/BalancingAct/1115"
|
||||
self.set_up_grid(num_servers=1)
|
||||
c0 = self.g.clients[0]
|
||||
c0.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
|
||||
c0.DEFAULT_ENCODING_PARAMETERS['n'] = 4
|
||||
c0.DEFAULT_ENCODING_PARAMETERS['k'] = 3
|
||||
|
||||
DATA = "data" * 100
|
||||
d = c0.upload(Data(DATA, convergence=""))
|
||||
def _stash_immutable(ur):
|
||||
self.imm = c0.create_node_from_uri(ur.uri)
|
||||
self.uri = self.imm.get_uri()
|
||||
d.addCallback(_stash_immutable)
|
||||
d.addCallback(lambda ign:
|
||||
self.find_uri_shares(self.uri))
|
||||
def _store_shares(shares):
|
||||
self.shares = shares
|
||||
d.addCallback(_store_shares)
|
||||
|
||||
def add_three(_, i):
|
||||
# Add a new server with just share 3
|
||||
self.add_server_with_share(i, self.uri, 3)
|
||||
#print self._shares_chart(self.uri)
|
||||
for i in range(1,5):
|
||||
d.addCallback(add_three, i)
|
||||
|
||||
def _check_and_repair(_):
|
||||
return self.imm.check_and_repair(Monitor())
|
||||
def _check_counts(crr):
|
||||
p_crr = crr.get_post_repair_results().data
|
||||
#print self._shares_chart(self.uri)
|
||||
self.failUnless(p_crr['count-shares-good'] == 4)
|
||||
self.failUnless(p_crr['count-good-share-hosts'] == 5)
|
||||
|
||||
"""
|
||||
Initial sharemap:
|
||||
0:[A] 1:[A] 2:[A] 3:[A,B,C,D,E]
|
||||
4 good shares, but 5 good hosts
|
||||
After deleting all instances of share #3 and repairing:
|
||||
0:[A,B], 1:[A,C], 2:[A,D], 3:[E]
|
||||
Still 4 good shares and 5 good hosts
|
||||
"""
|
||||
d.addCallback(_check_and_repair)
|
||||
d.addCallback(_check_counts)
|
||||
d.addCallback(lambda _: self.delete_shares_numbered(self.uri, [3]))
|
||||
d.addCallback(_check_and_repair)
|
||||
d.addCallback(_check_counts)
|
||||
|
||||
return d
|
||||
|
||||
class AddLease(GridTestMixin, unittest.TestCase):
|
||||
# test for #875, in which failures in the add-lease call cause
|
||||
# false-negatives in the checker
|
||||
|
Loading…
x
Reference in New Issue
Block a user