mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-06-22 16:58:58 +00:00
storage.expirer: exercise the last missing line of webstatus code
This commit is contained in:
@ -1634,6 +1634,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
|||||||
self.failUnlessEqual(so_far["leases-per-share-histogram"], {1: 1})
|
self.failUnlessEqual(so_far["leases-per-share-histogram"], {1: 1})
|
||||||
self.failUnlessEqual(so_far["buckets-examined"], 1)
|
self.failUnlessEqual(so_far["buckets-examined"], 1)
|
||||||
self.failUnlessEqual(so_far["shares-examined"], 1)
|
self.failUnlessEqual(so_far["shares-examined"], 1)
|
||||||
|
self.failUnlessEqual(so_far["corrupt-shares"], [])
|
||||||
sr1 = so_far["space-recovered"]
|
sr1 = so_far["space-recovered"]
|
||||||
self.failUnlessEqual(sr1["actual-numshares"], 0)
|
self.failUnlessEqual(sr1["actual-numshares"], 0)
|
||||||
self.failUnlessEqual(sr1["configured-leasetimer-diskbytes"], 0)
|
self.failUnlessEqual(sr1["configured-leasetimer-diskbytes"], 0)
|
||||||
@ -1999,13 +2000,14 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
|||||||
d.addCallback(_check)
|
d.addCallback(_check)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def test_bad_share(self):
|
def test_share_corruption(self):
|
||||||
basedir = "storage/LeaseCrawler/bad_share"
|
basedir = "storage/LeaseCrawler/share_corruption"
|
||||||
fileutil.make_dirs(basedir)
|
fileutil.make_dirs(basedir)
|
||||||
ss = StorageServer(basedir, "\x00" * 20)
|
ss = InstrumentedStorageServer(basedir, "\x00" * 20)
|
||||||
w = StorageStatus(ss)
|
w = StorageStatus(ss)
|
||||||
# make it start sooner than usual.
|
# make it start sooner than usual.
|
||||||
lc = ss.lease_checker
|
lc = ss.lease_checker
|
||||||
|
lc.stop_after_first_bucket = True
|
||||||
lc.slow_start = 0
|
lc.slow_start = 0
|
||||||
lc.cpu_slice = 500
|
lc.cpu_slice = 500
|
||||||
|
|
||||||
@ -2014,43 +2016,76 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
|||||||
|
|
||||||
# now corrupt one, and make sure the lease-checker keeps going
|
# now corrupt one, and make sure the lease-checker keeps going
|
||||||
[immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis
|
[immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis
|
||||||
first_mutable = min(mutable_si_2, mutable_si_3)
|
first = min(self.sis)
|
||||||
fn = os.path.join(ss.sharedir, storage_index_to_dir(first_mutable), "0")
|
first_b32 = base32.b2a(first)
|
||||||
|
fn = os.path.join(ss.sharedir, storage_index_to_dir(first), "0")
|
||||||
f = open(fn, "rb+")
|
f = open(fn, "rb+")
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
f.write("BAD MAGIC")
|
f.write("BAD MAGIC")
|
||||||
f.close()
|
f.close()
|
||||||
# get_share_file() doesn't see the correct mutable magic, so it
|
# if get_share_file() doesn't see the correct mutable magic, it
|
||||||
# assumes the file is an immutable share, and then
|
# assumes the file is an immutable share, and then
|
||||||
# immutable.ShareFile sees a bad version. So this actually triggers
|
# immutable.ShareFile sees a bad version. So regardless of which kind
|
||||||
|
# of share we corrupted, this will trigger an
|
||||||
# UnknownImmutableContainerVersionError.
|
# UnknownImmutableContainerVersionError.
|
||||||
|
|
||||||
ss.setServiceParent(self.s)
|
ss.setServiceParent(self.s)
|
||||||
|
|
||||||
|
d = eventual.fireEventually()
|
||||||
|
|
||||||
|
# now examine the state right after the first bucket has been
|
||||||
|
# processed.
|
||||||
|
def _after_first_bucket(ignored):
|
||||||
|
so_far = lc.get_state()["cycle-to-date"]
|
||||||
|
self.failUnlessEqual(so_far["buckets-examined"], 1)
|
||||||
|
self.failUnlessEqual(so_far["shares-examined"], 0)
|
||||||
|
self.failUnlessEqual(so_far["corrupt-shares"], [(first_b32, 0)])
|
||||||
|
d.addCallback(_after_first_bucket)
|
||||||
|
|
||||||
|
d.addCallback(lambda ign: self.render_json(w))
|
||||||
|
def _check_json(json):
|
||||||
|
data = simplejson.loads(json)
|
||||||
|
# grr. json turns all dict keys into strings.
|
||||||
|
so_far = data["lease-checker"]["cycle-to-date"]
|
||||||
|
corrupt_shares = so_far["corrupt-shares"]
|
||||||
|
# it also turns all tuples into lists
|
||||||
|
self.failUnlessEqual(corrupt_shares, [[first_b32, 0]])
|
||||||
|
d.addCallback(_check_json)
|
||||||
|
d.addCallback(lambda ign: self.render1(w))
|
||||||
|
def _check_html(html):
|
||||||
|
s = remove_tags(html)
|
||||||
|
self.failUnlessIn("Corrupt shares: SI %s shnum 0" % first_b32, s)
|
||||||
|
d.addCallback(_check_html)
|
||||||
|
|
||||||
def _wait():
|
def _wait():
|
||||||
return bool(lc.get_state()["last-cycle-finished"] is not None)
|
return bool(lc.get_state()["last-cycle-finished"] is not None)
|
||||||
d = self.poll(_wait)
|
d.addCallback(lambda ign: self.poll(_wait))
|
||||||
|
|
||||||
def _after_first_cycle(ignored):
|
def _after_first_cycle(ignored):
|
||||||
s = lc.get_state()
|
s = lc.get_state()
|
||||||
last = s["history"][0]
|
last = s["history"][0]
|
||||||
self.failUnlessEqual(last["buckets-examined"], 4)
|
self.failUnlessEqual(last["buckets-examined"], 4)
|
||||||
self.failUnlessEqual(last["shares-examined"], 3)
|
self.failUnlessEqual(last["shares-examined"], 3)
|
||||||
self.failUnlessEqual(last["corrupt-shares"],
|
self.failUnlessEqual(last["corrupt-shares"], [(first_b32, 0)])
|
||||||
[(base32.b2a(first_mutable), 0)])
|
|
||||||
self.flushLoggedErrors(UnknownMutableContainerVersionError,
|
|
||||||
UnknownImmutableContainerVersionError)
|
|
||||||
d.addCallback(_after_first_cycle)
|
d.addCallback(_after_first_cycle)
|
||||||
d.addCallback(lambda ign: self.render_json(w))
|
d.addCallback(lambda ign: self.render_json(w))
|
||||||
def _check_json(json):
|
def _check_json_history(json):
|
||||||
data = simplejson.loads(json)
|
data = simplejson.loads(json)
|
||||||
# grr. json turns all dict keys into strings.
|
|
||||||
last = data["lease-checker"]["history"]["0"]
|
last = data["lease-checker"]["history"]["0"]
|
||||||
corrupt_shares = last["corrupt-shares"]
|
corrupt_shares = last["corrupt-shares"]
|
||||||
# it also turns all tuples into lists
|
self.failUnlessEqual(corrupt_shares, [[first_b32, 0]])
|
||||||
self.failUnlessEqual(corrupt_shares,
|
d.addCallback(_check_json_history)
|
||||||
[[base32.b2a(first_mutable), 0]])
|
d.addCallback(lambda ign: self.render1(w))
|
||||||
d.addCallback(_check_json)
|
def _check_html_history(html):
|
||||||
|
s = remove_tags(html)
|
||||||
|
self.failUnlessIn("Corrupt shares: SI %s shnum 0" % first_b32, s)
|
||||||
|
d.addCallback(_check_html_history)
|
||||||
|
|
||||||
|
def _cleanup(res):
|
||||||
|
self.flushLoggedErrors(UnknownMutableContainerVersionError,
|
||||||
|
UnknownImmutableContainerVersionError)
|
||||||
|
return res
|
||||||
|
d.addBoth(_cleanup)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def render_json(self, page):
|
def render_json(self, page):
|
||||||
|
@ -153,7 +153,6 @@ class StorageStatus(rend.Page):
|
|||||||
p = lc.get_progress()
|
p = lc.get_progress()
|
||||||
if not p["cycle-in-progress"]:
|
if not p["cycle-in-progress"]:
|
||||||
return ""
|
return ""
|
||||||
pieces = []
|
|
||||||
s = lc.get_state()
|
s = lc.get_state()
|
||||||
so_far = s["cycle-to-date"]
|
so_far = s["cycle-to-date"]
|
||||||
sr = so_far["space-recovered"]
|
sr = so_far["space-recovered"]
|
||||||
@ -163,6 +162,7 @@ class StorageStatus(rend.Page):
|
|||||||
ecr = ec["space-recovered"]
|
ecr = ec["space-recovered"]
|
||||||
|
|
||||||
p = T.ul()
|
p = T.ul()
|
||||||
|
pieces = []
|
||||||
def add(*pieces):
|
def add(*pieces):
|
||||||
p[T.li[pieces]]
|
p[T.li[pieces]]
|
||||||
|
|
||||||
@ -194,6 +194,12 @@ class StorageStatus(rend.Page):
|
|||||||
% abbreviate_time(so_far["configured-expiration-time"]),
|
% abbreviate_time(so_far["configured-expiration-time"]),
|
||||||
self.format_recovered(ecr, "original-leasetimer"))
|
self.format_recovered(ecr, "original-leasetimer"))
|
||||||
|
|
||||||
|
if so_far["corrupt-shares"]:
|
||||||
|
add("Corrupt shares:",
|
||||||
|
T.ul[ [T.li[ ["SI %s shnum %d" % corrupt_share
|
||||||
|
for corrupt_share in so_far["corrupt-shares"] ]
|
||||||
|
]]])
|
||||||
|
|
||||||
return ctx.tag["Current cycle:", p]
|
return ctx.tag["Current cycle:", p]
|
||||||
|
|
||||||
def render_lease_last_cycle_results(self, ctx, data):
|
def render_lease_last_cycle_results(self, ctx, data):
|
||||||
@ -202,22 +208,30 @@ class StorageStatus(rend.Page):
|
|||||||
if not h:
|
if not h:
|
||||||
return ""
|
return ""
|
||||||
last = h[max(h.keys())]
|
last = h[max(h.keys())]
|
||||||
pieces = []
|
|
||||||
start, end = last["cycle-start-finish-times"]
|
start, end = last["cycle-start-finish-times"]
|
||||||
ctx.tag["Last complete cycle "
|
ctx.tag["Last complete cycle (which took %s and finished %s ago)"
|
||||||
"(which took %s and finished %s ago)"
|
" recovered: " % (abbreviate_time(end-start),
|
||||||
" recovered: "
|
|
||||||
% (abbreviate_time(end-start),
|
|
||||||
abbreviate_time(time.time() - end)),
|
abbreviate_time(time.time() - end)),
|
||||||
self.format_recovered(last["space-recovered"],
|
self.format_recovered(last["space-recovered"], "actual")
|
||||||
"actual")]
|
]
|
||||||
|
|
||||||
|
p = T.ul()
|
||||||
|
pieces = []
|
||||||
|
def add(*pieces):
|
||||||
|
p[T.li[pieces]]
|
||||||
|
|
||||||
if not last["expiration-enabled"]:
|
if not last["expiration-enabled"]:
|
||||||
rec = self.format_recovered(last["space-recovered"],
|
rec = self.format_recovered(last["space-recovered"],
|
||||||
"configured-leasetimer")
|
"configured-leasetimer")
|
||||||
pieces.append(T.li["but expiration was not enabled. If it "
|
add("but expiration was not enabled. If it had been, "
|
||||||
"had been, it would have recovered: ",
|
"it would have recovered: ", rec)
|
||||||
rec])
|
|
||||||
if pieces:
|
if last["corrupt-shares"]:
|
||||||
ctx.tag[T.ul[pieces]]
|
add("Corrupt shares:",
|
||||||
return ctx.tag
|
T.ul[ [T.li[ ["SI %s shnum %d" % corrupt_share
|
||||||
|
for corrupt_share in last["corrupt-shares"] ]
|
||||||
|
]]])
|
||||||
|
|
||||||
|
return ctx.tag[p]
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user