mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-20 13:33:09 +00:00
Merge 'origin/master' into 3288.status-download-nevow-to-twisted-web
This commit is contained in:
commit
4a47e8311f
0
newsfragments/3247.minor
Normal file
0
newsfragments/3247.minor
Normal file
0
newsfragments/3287.minor
Normal file
0
newsfragments/3287.minor
Normal file
1
newsfragments/3312.bugfix
Normal file
1
newsfragments/3312.bugfix
Normal file
@ -0,0 +1 @@
|
||||
Make directory page links work.
|
1
newsfragments/3315.minor
Normal file
1
newsfragments/3315.minor
Normal file
@ -0,0 +1 @@
|
||||
Replace nevow with twisted.web in web.operations.ReloadMixin
|
1
newsfragments/3317.feature
Normal file
1
newsfragments/3317.feature
Normal file
@ -0,0 +1 @@
|
||||
allmydata.testing.web, a new module, now offers a supported Python API for testing Tahoe-LAFS web API clients.
|
@ -1269,5 +1269,5 @@ class TrialTestCase(_TrialTestCase):
|
||||
|
||||
if six.PY2:
|
||||
if isinstance(msg, six.text_type):
|
||||
return super(self, TrialTestCase).fail(msg.encode("utf8"))
|
||||
return super(self, TrialTestCase).fail(msg)
|
||||
return super(TrialTestCase, self).fail(msg.encode("utf8"))
|
||||
return super(TrialTestCase, self).fail(msg)
|
||||
|
@ -12,6 +12,17 @@ from twisted.trial import unittest
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.application import service
|
||||
from twisted.web.template import flattenString
|
||||
|
||||
# We need to use `nevow.inevow.IRequest` for now for compatibility
|
||||
# with the code in web/common.py. Once nevow bits are gone from
|
||||
# web/common.py, we can use `twisted.web.iweb.IRequest` here.
|
||||
from nevow.inevow import IRequest
|
||||
|
||||
from twisted.web.server import Request
|
||||
from twisted.web.test.requesthelper import DummyChannel
|
||||
from zope.interface import implementer
|
||||
|
||||
from foolscap.api import fireEventually
|
||||
import itertools
|
||||
from allmydata import interfaces
|
||||
@ -36,9 +47,12 @@ from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \
|
||||
SHARE_HASH_CHAIN_SIZE
|
||||
from allmydata.interfaces import BadWriteEnablerError
|
||||
from allmydata.test.common import LoggingServiceParent, ShouldFailMixin
|
||||
from allmydata.test.common_web import WebRenderingMixin
|
||||
from allmydata.test.no_network import NoNetworkServer
|
||||
from allmydata.web.storage import StorageStatus, remove_prefix
|
||||
from allmydata.web.storage import (
|
||||
StorageStatus,
|
||||
StorageStatusElement,
|
||||
remove_prefix
|
||||
)
|
||||
from allmydata.storage_client import (
|
||||
_StorageServer,
|
||||
)
|
||||
@ -2972,6 +2986,39 @@ def remove_tags(s):
|
||||
s = re.sub(r'\s+', ' ', s)
|
||||
return s
|
||||
|
||||
def renderSynchronously(ss):
|
||||
"""
|
||||
Return fully rendered HTML document.
|
||||
|
||||
:param _StorageStatus ss: a StorageStatus instance.
|
||||
"""
|
||||
return unittest.TestCase().successResultOf(renderDeferred(ss))
|
||||
|
||||
def renderDeferred(ss):
|
||||
"""
|
||||
Return a `Deferred` HTML renderer.
|
||||
|
||||
:param _StorageStatus ss: a StorageStatus instance.
|
||||
"""
|
||||
elem = StorageStatusElement(ss._storage, ss._nickname)
|
||||
return flattenString(None, elem)
|
||||
|
||||
def renderJSON(resource):
|
||||
"""Render a JSON from the given resource."""
|
||||
|
||||
@implementer(IRequest)
|
||||
class JSONRequest(Request):
|
||||
"""
|
||||
A Request with t=json argument added to it. This is useful to
|
||||
invoke a Resouce.render_JSON() method.
|
||||
"""
|
||||
def __init__(self):
|
||||
Request.__init__(self, DummyChannel())
|
||||
self.args = {"t": ["json"]}
|
||||
self.fields = {}
|
||||
|
||||
return resource.render(JSONRequest())
|
||||
|
||||
class MyBucketCountingCrawler(BucketCountingCrawler):
|
||||
def finished_prefix(self, cycle, prefix):
|
||||
BucketCountingCrawler.finished_prefix(self, cycle, prefix)
|
||||
@ -3008,7 +3055,7 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin):
|
||||
w = StorageStatus(ss)
|
||||
|
||||
# this sample is before the crawler has started doing anything
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
self.failUnlessIn("<h1>Storage Server Status</h1>", html)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Accepting new shares: Yes", s)
|
||||
@ -3031,7 +3078,7 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin):
|
||||
self.failUnlessEqual(state["last-complete-prefix"],
|
||||
ss.bucket_counter.prefixes[0])
|
||||
ss.bucket_counter.cpu_slice = 100.0 # finish as fast as possible
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn(" Current crawl ", s)
|
||||
self.failUnlessIn(" (next work in ", s)
|
||||
@ -3043,7 +3090,7 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin):
|
||||
d.addCallback(lambda ignored: self.poll(_watch))
|
||||
def _check2(ignored):
|
||||
ss.bucket_counter.cpu_slice = orig_cpu_slice
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Total buckets: 0 (the number of", s)
|
||||
self.failUnless("Next crawl in 59 minutes" in s or "Next crawl in 60 minutes" in s, s)
|
||||
@ -3105,20 +3152,20 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin):
|
||||
|
||||
def _check_1(ignored):
|
||||
# no ETA is available yet
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("complete (next work", s)
|
||||
|
||||
def _check_2(ignored):
|
||||
# one prefix has finished, so an ETA based upon that elapsed time
|
||||
# should be available.
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("complete (ETA ", s)
|
||||
|
||||
def _check_3(ignored):
|
||||
# two prefixes have finished
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("complete (ETA ", s)
|
||||
d.callback("done")
|
||||
@ -3161,7 +3208,7 @@ class InstrumentedStorageServer(StorageServer):
|
||||
class No_ST_BLOCKS_StorageServer(StorageServer):
|
||||
LeaseCheckerClass = No_ST_BLOCKS_LeaseCheckingCrawler
|
||||
|
||||
class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin):
|
||||
|
||||
def setUp(self):
|
||||
self.s = service.MultiService()
|
||||
@ -3291,7 +3338,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failIfEqual(sr2["configured-diskbytes"], None)
|
||||
self.failIfEqual(sr2["original-sharebytes"], None)
|
||||
d.addCallback(_after_first_bucket)
|
||||
d.addCallback(lambda ign: self.render1(webstatus))
|
||||
d.addCallback(lambda ign: renderDeferred(webstatus))
|
||||
def _check_html_in_cycle(html):
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("So far, this cycle has examined "
|
||||
@ -3366,7 +3413,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failUnlessEqual(count_leases(mutable_si_2), 1)
|
||||
self.failUnlessEqual(count_leases(mutable_si_3), 2)
|
||||
d.addCallback(_after_first_cycle)
|
||||
d.addCallback(lambda ign: self.render1(webstatus))
|
||||
d.addCallback(lambda ign: renderDeferred(webstatus))
|
||||
def _check_html(html):
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("recovered: 0 shares, 0 buckets "
|
||||
@ -3375,7 +3422,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
"(2 mutable / 2 immutable),", s)
|
||||
self.failUnlessIn("but expiration was not enabled", s)
|
||||
d.addCallback(_check_html)
|
||||
d.addCallback(lambda ign: self.render_json(webstatus))
|
||||
d.addCallback(lambda ign: renderJSON(webstatus))
|
||||
def _check_json(raw):
|
||||
data = json.loads(raw)
|
||||
self.failUnlessIn("lease-checker", data)
|
||||
@ -3466,7 +3513,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
d2.addCallback(_after_first_bucket)
|
||||
return d2
|
||||
d.addCallback(_after_first_bucket)
|
||||
d.addCallback(lambda ign: self.render1(webstatus))
|
||||
d.addCallback(lambda ign: renderDeferred(webstatus))
|
||||
def _check_html_in_cycle(html):
|
||||
s = remove_tags(html)
|
||||
# the first bucket encountered gets deleted, and its prefix
|
||||
@ -3525,7 +3572,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failUnless(rec["configured-diskbytes"] >= 0,
|
||||
rec["configured-diskbytes"])
|
||||
d.addCallback(_after_first_cycle)
|
||||
d.addCallback(lambda ign: self.render1(webstatus))
|
||||
d.addCallback(lambda ign: renderDeferred(webstatus))
|
||||
def _check_html(html):
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Expiration Enabled: expired leases will be removed", s)
|
||||
@ -3610,7 +3657,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
d2.addCallback(_after_first_bucket)
|
||||
return d2
|
||||
d.addCallback(_after_first_bucket)
|
||||
d.addCallback(lambda ign: self.render1(webstatus))
|
||||
d.addCallback(lambda ign: renderDeferred(webstatus))
|
||||
def _check_html_in_cycle(html):
|
||||
s = remove_tags(html)
|
||||
# the first bucket encountered gets deleted, and its prefix
|
||||
@ -3671,7 +3718,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failUnless(rec["configured-diskbytes"] >= 0,
|
||||
rec["configured-diskbytes"])
|
||||
d.addCallback(_after_first_cycle)
|
||||
d.addCallback(lambda ign: self.render1(webstatus))
|
||||
d.addCallback(lambda ign: renderDeferred(webstatus))
|
||||
def _check_html(html):
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Expiration Enabled:"
|
||||
@ -3733,7 +3780,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failUnlessEqual(count_shares(mutable_si_3), 1)
|
||||
self.failUnlessEqual(count_leases(mutable_si_3), 2)
|
||||
d.addCallback(_after_first_cycle)
|
||||
d.addCallback(lambda ign: self.render1(webstatus))
|
||||
d.addCallback(lambda ign: renderDeferred(webstatus))
|
||||
def _check_html(html):
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("The following sharetypes will be expired: immutable.", s)
|
||||
@ -3790,7 +3837,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failUnlessEqual(count_shares(mutable_si_2), 0)
|
||||
self.failUnlessEqual(count_shares(mutable_si_3), 0)
|
||||
d.addCallback(_after_first_cycle)
|
||||
d.addCallback(lambda ign: self.render1(webstatus))
|
||||
d.addCallback(lambda ign: renderDeferred(webstatus))
|
||||
def _check_html(html):
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("The following sharetypes will be expired: mutable.", s)
|
||||
@ -4012,7 +4059,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failUnlessEqual(so_far["corrupt-shares"], [(first_b32, 0)])
|
||||
d.addCallback(_after_first_bucket)
|
||||
|
||||
d.addCallback(lambda ign: self.render_json(w))
|
||||
d.addCallback(lambda ign: renderJSON(w))
|
||||
def _check_json(raw):
|
||||
data = json.loads(raw)
|
||||
# grr. json turns all dict keys into strings.
|
||||
@ -4021,7 +4068,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
# it also turns all tuples into lists
|
||||
self.failUnlessEqual(corrupt_shares, [[first_b32, 0]])
|
||||
d.addCallback(_check_json)
|
||||
d.addCallback(lambda ign: self.render1(w))
|
||||
d.addCallback(lambda ign: renderDeferred(w))
|
||||
def _check_html(html):
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Corrupt shares: SI %s shnum 0" % first_b32, s)
|
||||
@ -4039,14 +4086,14 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failUnlessEqual(rec["examined-shares"], 3)
|
||||
self.failUnlessEqual(last["corrupt-shares"], [(first_b32, 0)])
|
||||
d.addCallback(_after_first_cycle)
|
||||
d.addCallback(lambda ign: self.render_json(w))
|
||||
d.addCallback(lambda ign: renderJSON(w))
|
||||
def _check_json_history(raw):
|
||||
data = json.loads(raw)
|
||||
last = data["lease-checker"]["history"]["0"]
|
||||
corrupt_shares = last["corrupt-shares"]
|
||||
self.failUnlessEqual(corrupt_shares, [[first_b32, 0]])
|
||||
d.addCallback(_check_json_history)
|
||||
d.addCallback(lambda ign: self.render1(w))
|
||||
d.addCallback(lambda ign: renderDeferred(w))
|
||||
def _check_html_history(html):
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Corrupt shares: SI %s shnum 0" % first_b32, s)
|
||||
@ -4059,11 +4106,8 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
d.addBoth(_cleanup)
|
||||
return d
|
||||
|
||||
def render_json(self, page):
|
||||
d = self.render1(page, args={"t": ["json"]})
|
||||
return d
|
||||
|
||||
class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
class WebStatus(unittest.TestCase, pollmixin.PollMixin):
|
||||
|
||||
def setUp(self):
|
||||
self.s = service.MultiService()
|
||||
@ -4073,7 +4117,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
|
||||
def test_no_server(self):
|
||||
w = StorageStatus(None)
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
self.failUnlessIn("<h1>No Storage Server Running</h1>", html)
|
||||
|
||||
def test_status(self):
|
||||
@ -4083,7 +4127,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
ss = StorageServer(basedir, nodeid)
|
||||
ss.setServiceParent(self.s)
|
||||
w = StorageStatus(ss, "nickname")
|
||||
d = self.render1(w)
|
||||
d = renderDeferred(w)
|
||||
def _check_html(html):
|
||||
self.failUnlessIn("<h1>Storage Server Status</h1>", html)
|
||||
s = remove_tags(html)
|
||||
@ -4092,7 +4136,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
self.failUnlessIn("Accepting new shares: Yes", s)
|
||||
self.failUnlessIn("Reserved space: - 0 B (0)", s)
|
||||
d.addCallback(_check_html)
|
||||
d.addCallback(lambda ign: self.render_json(w))
|
||||
d.addCallback(lambda ign: renderJSON(w))
|
||||
def _check_json(raw):
|
||||
data = json.loads(raw)
|
||||
s = data["stats"]
|
||||
@ -4103,9 +4147,6 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
d.addCallback(_check_json)
|
||||
return d
|
||||
|
||||
def render_json(self, page):
|
||||
d = self.render1(page, args={"t": ["json"]})
|
||||
return d
|
||||
|
||||
def test_status_no_disk_stats(self):
|
||||
def call_get_disk_stats(whichdir, reserved_space=0):
|
||||
@ -4119,7 +4160,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
ss = StorageServer(basedir, "\x00" * 20)
|
||||
ss.setServiceParent(self.s)
|
||||
w = StorageStatus(ss)
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
self.failUnlessIn("<h1>Storage Server Status</h1>", html)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Accepting new shares: Yes", s)
|
||||
@ -4139,7 +4180,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
ss = StorageServer(basedir, "\x00" * 20)
|
||||
ss.setServiceParent(self.s)
|
||||
w = StorageStatus(ss)
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
self.failUnlessIn("<h1>Storage Server Status</h1>", html)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Accepting new shares: No", s)
|
||||
@ -4175,7 +4216,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
|
||||
ss.setServiceParent(self.s)
|
||||
w = StorageStatus(ss)
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
|
||||
self.failUnlessIn("<h1>Storage Server Status</h1>", html)
|
||||
s = remove_tags(html)
|
||||
@ -4193,7 +4234,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
ss = StorageServer(basedir, "\x00" * 20, readonly_storage=True)
|
||||
ss.setServiceParent(self.s)
|
||||
w = StorageStatus(ss)
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
self.failUnlessIn("<h1>Storage Server Status</h1>", html)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Accepting new shares: No", s)
|
||||
@ -4204,7 +4245,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
ss = StorageServer(basedir, "\x00" * 20, reserved_space=10e6)
|
||||
ss.setServiceParent(self.s)
|
||||
w = StorageStatus(ss)
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
self.failUnlessIn("<h1>Storage Server Status</h1>", html)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Reserved space: - 10.00 MB (10000000)", s)
|
||||
@ -4215,16 +4256,16 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin):
|
||||
ss = StorageServer(basedir, "\x00" * 20, reserved_space=10e6)
|
||||
ss.setServiceParent(self.s)
|
||||
w = StorageStatus(ss)
|
||||
html = w.renderSynchronously()
|
||||
html = renderSynchronously(w)
|
||||
self.failUnlessIn("<h1>Storage Server Status</h1>", html)
|
||||
s = remove_tags(html)
|
||||
self.failUnlessIn("Reserved space: - 10.00 MB (10000000)", s)
|
||||
|
||||
def test_util(self):
|
||||
w = StorageStatus(None)
|
||||
self.failUnlessEqual(w.render_space(None, None), "?")
|
||||
self.failUnlessEqual(w.render_space(None, 10e6), "10000000")
|
||||
self.failUnlessEqual(w.render_abbrev_space(None, None), "?")
|
||||
self.failUnlessEqual(w.render_abbrev_space(None, 10e6), "10.00 MB")
|
||||
w = StorageStatusElement(None, None)
|
||||
self.failUnlessEqual(w.render_space(None), "?")
|
||||
self.failUnlessEqual(w.render_space(10e6), "10000000")
|
||||
self.failUnlessEqual(w.render_abbrev_space(None), "?")
|
||||
self.failUnlessEqual(w.render_abbrev_space(10e6), "10.00 MB")
|
||||
self.failUnlessEqual(remove_prefix("foo.bar", "foo."), "bar")
|
||||
self.failUnlessEqual(remove_prefix("foo.bar", "baz."), None)
|
||||
|
170
src/allmydata/test/test_testing.py
Normal file
170
src/allmydata/test/test_testing.py
Normal file
@ -0,0 +1,170 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Tahoe-LAFS -- secure, distributed storage grid
|
||||
#
|
||||
# Copyright © 2020 The Tahoe-LAFS Software Foundation
|
||||
#
|
||||
# This file is part of Tahoe-LAFS.
|
||||
#
|
||||
# See the docs/about.rst file for licensing information.
|
||||
|
||||
"""
|
||||
Tests for the allmydata.testing helpers
|
||||
"""
|
||||
|
||||
from twisted.internet.defer import (
|
||||
inlineCallbacks,
|
||||
)
|
||||
|
||||
from allmydata.uri import (
|
||||
from_string,
|
||||
CHKFileURI,
|
||||
)
|
||||
from allmydata.testing.web import (
|
||||
create_tahoe_treq_client,
|
||||
capability_generator,
|
||||
)
|
||||
|
||||
from hyperlink import (
|
||||
DecodedURL,
|
||||
)
|
||||
|
||||
from hypothesis import (
|
||||
given,
|
||||
)
|
||||
from hypothesis.strategies import (
|
||||
binary,
|
||||
)
|
||||
|
||||
from testtools import (
|
||||
TestCase,
|
||||
)
|
||||
from testtools.matchers import (
|
||||
Always,
|
||||
Equals,
|
||||
IsInstance,
|
||||
MatchesStructure,
|
||||
AfterPreprocessing,
|
||||
)
|
||||
from testtools.twistedsupport import (
|
||||
succeeded,
|
||||
)
|
||||
|
||||
|
||||
class FakeWebTest(TestCase):
|
||||
"""
|
||||
Test the WebUI verified-fakes infrastucture
|
||||
"""
|
||||
|
||||
# Note: do NOT use setUp() because Hypothesis doesn't work
|
||||
# properly with it. You must instead do all fixture-type work
|
||||
# yourself in each test.
|
||||
|
||||
@given(
|
||||
content=binary(),
|
||||
)
|
||||
def test_create_and_download(self, content):
|
||||
"""
|
||||
Upload some content (via 'PUT /uri') and then download it (via
|
||||
'GET /uri?uri=...')
|
||||
"""
|
||||
http_client = create_tahoe_treq_client()
|
||||
|
||||
@inlineCallbacks
|
||||
def do_test():
|
||||
resp = yield http_client.put("http://example.com/uri", content)
|
||||
self.assertThat(resp.code, Equals(201))
|
||||
|
||||
cap_raw = yield resp.content()
|
||||
cap = from_string(cap_raw)
|
||||
self.assertThat(cap, IsInstance(CHKFileURI))
|
||||
|
||||
resp = yield http_client.get(
|
||||
"http://example.com/uri?uri={}".format(cap.to_string())
|
||||
)
|
||||
self.assertThat(resp.code, Equals(200))
|
||||
|
||||
round_trip_content = yield resp.content()
|
||||
|
||||
# using the form "/uri/<cap>" is also valid
|
||||
|
||||
resp = yield http_client.get(
|
||||
"http://example.com/uri/{}".format(cap.to_string())
|
||||
)
|
||||
self.assertEqual(resp.code, 200)
|
||||
|
||||
round_trip_content = yield resp.content()
|
||||
self.assertEqual(content, round_trip_content)
|
||||
self.assertThat(
|
||||
do_test(),
|
||||
succeeded(Always()),
|
||||
)
|
||||
|
||||
@given(
|
||||
content=binary(),
|
||||
)
|
||||
def test_duplicate_upload(self, content):
|
||||
"""
|
||||
Upload the same content (via 'PUT /uri') twice
|
||||
"""
|
||||
|
||||
http_client = create_tahoe_treq_client()
|
||||
|
||||
@inlineCallbacks
|
||||
def do_test():
|
||||
resp = yield http_client.put("http://example.com/uri", content)
|
||||
self.assertEqual(resp.code, 201)
|
||||
|
||||
cap_raw = yield resp.content()
|
||||
self.assertThat(
|
||||
cap_raw,
|
||||
AfterPreprocessing(
|
||||
from_string,
|
||||
IsInstance(CHKFileURI)
|
||||
)
|
||||
)
|
||||
|
||||
resp = yield http_client.put("http://example.com/uri", content)
|
||||
self.assertThat(resp.code, Equals(200))
|
||||
self.assertThat(
|
||||
do_test(),
|
||||
succeeded(Always()),
|
||||
)
|
||||
|
||||
def test_download_missing(self):
|
||||
"""
|
||||
Error if we download a capability that doesn't exist
|
||||
"""
|
||||
|
||||
http_client = create_tahoe_treq_client()
|
||||
cap_gen = capability_generator("URI:CHK:")
|
||||
|
||||
uri = DecodedURL.from_text(u"http://example.com/uri?uri={}".format(next(cap_gen)))
|
||||
resp = http_client.get(uri.to_uri().to_text())
|
||||
|
||||
self.assertThat(
|
||||
resp,
|
||||
succeeded(
|
||||
MatchesStructure(
|
||||
code=Equals(500)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
def test_download_no_arg(self):
|
||||
"""
|
||||
Error if we GET from "/uri" with no ?uri= query-arg
|
||||
"""
|
||||
|
||||
http_client = create_tahoe_treq_client()
|
||||
|
||||
uri = DecodedURL.from_text(u"http://example.com/uri/")
|
||||
resp = http_client.get(uri.to_uri().to_text())
|
||||
|
||||
self.assertThat(
|
||||
resp,
|
||||
succeeded(
|
||||
MatchesStructure(
|
||||
code=Equals(400)
|
||||
)
|
||||
)
|
||||
)
|
@ -26,6 +26,16 @@ class Util(ShouldFailMixin, testutil.ReallyEqualMixin, unittest.TestCase):
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(0.25), "250ms")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(0.0021), "2.1ms")
|
||||
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(None), "")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(2.5), "2.50s")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(0.25), "250ms")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(0.0021), "2.1ms")
|
||||
self.failUnlessReallyEqual(common.abbreviate_time(0.000123), "123us")
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(None), "")
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(2500000), "2.50MBps")
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(30100), "30.1kBps")
|
||||
self.failUnlessReallyEqual(common.abbreviate_rate(123), "123Bps")
|
||||
|
||||
def test_compute_rate(self):
|
||||
self.failUnlessReallyEqual(common.compute_rate(None, None), None)
|
||||
self.failUnlessReallyEqual(common.compute_rate(None, 1), None)
|
||||
|
@ -954,8 +954,9 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
|
||||
def test_storage(self):
|
||||
d = self.GET("/storage")
|
||||
def _check(res):
|
||||
self.failUnlessIn('Storage Server Status', res)
|
||||
self.failUnlessIn(FAVICON_MARKUP, res)
|
||||
soup = BeautifulSoup(res, 'html5lib')
|
||||
assert_soup_has_text(self, soup, 'Storage Server Status')
|
||||
assert_soup_has_favicon(self, soup)
|
||||
res_u = res.decode('utf-8')
|
||||
self.failUnlessIn(u'<li>Server Nickname: <span class="nickname mine">fake_nickname \u263A</span></li>', res_u)
|
||||
d.addCallback(_check)
|
||||
@ -1046,6 +1047,17 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
|
||||
self.failUnlessReallyEqual(urrm.render_rate(None, 30100), "30.1kBps")
|
||||
self.failUnlessReallyEqual(urrm.render_rate(None, 123), "123Bps")
|
||||
|
||||
drrm = status.DownloadResultsRendererMixin()
|
||||
self.failUnlessReallyEqual(drrm.render_time(None, None), "")
|
||||
self.failUnlessReallyEqual(drrm.render_time(None, 2.5), "2.50s")
|
||||
self.failUnlessReallyEqual(drrm.render_time(None, 0.25), "250ms")
|
||||
self.failUnlessReallyEqual(drrm.render_time(None, 0.0021), "2.1ms")
|
||||
self.failUnlessReallyEqual(drrm.render_time(None, 0.000123), "123us")
|
||||
self.failUnlessReallyEqual(drrm.render_rate(None, None), "")
|
||||
self.failUnlessReallyEqual(drrm.render_rate(None, 2500000), "2.50MBps")
|
||||
self.failUnlessReallyEqual(drrm.render_rate(None, 30100), "30.1kBps")
|
||||
self.failUnlessReallyEqual(drrm.render_rate(None, 123), "123Bps")
|
||||
|
||||
def test_GET_FILEURL(self):
|
||||
d = self.GET(self.public_url + "/foo/bar.txt")
|
||||
d.addCallback(self.failUnlessIsBarDotTxt)
|
||||
|
0
src/allmydata/testing/__init__.py
Normal file
0
src/allmydata/testing/__init__.py
Normal file
289
src/allmydata/testing/web.py
Normal file
289
src/allmydata/testing/web.py
Normal file
@ -0,0 +1,289 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Tahoe-LAFS -- secure, distributed storage grid
|
||||
#
|
||||
# Copyright © 2020 The Tahoe-LAFS Software Foundation
|
||||
#
|
||||
# This file is part of Tahoe-LAFS.
|
||||
#
|
||||
# See the docs/about.rst file for licensing information.
|
||||
|
||||
"""
|
||||
Test-helpers for clients that use the WebUI.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
|
||||
import attr
|
||||
|
||||
from hyperlink import DecodedURL
|
||||
|
||||
from twisted.web.resource import (
|
||||
Resource,
|
||||
)
|
||||
from twisted.web.iweb import (
|
||||
IBodyProducer,
|
||||
)
|
||||
from twisted.web import (
|
||||
http,
|
||||
)
|
||||
|
||||
from twisted.internet.defer import (
|
||||
succeed,
|
||||
)
|
||||
|
||||
from treq.client import (
|
||||
HTTPClient,
|
||||
FileBodyProducer,
|
||||
)
|
||||
from treq.testing import (
|
||||
RequestTraversalAgent,
|
||||
)
|
||||
from zope.interface import implementer
|
||||
|
||||
import allmydata.uri
|
||||
from allmydata.util import (
|
||||
base32,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"create_fake_tahoe_root",
|
||||
"create_tahoe_treq_client",
|
||||
)
|
||||
|
||||
|
||||
class _FakeTahoeRoot(Resource, object):
|
||||
"""
|
||||
An in-memory 'fake' of a Tahoe WebUI root. Currently it only
|
||||
implements (some of) the `/uri` resource.
|
||||
"""
|
||||
|
||||
def __init__(self, uri=None):
|
||||
"""
|
||||
:param uri: a Resource to handle the `/uri` tree.
|
||||
"""
|
||||
Resource.__init__(self) # this is an old-style class :(
|
||||
self._uri = uri
|
||||
self.putChild(b"uri", self._uri)
|
||||
|
||||
def add_data(self, kind, data):
|
||||
fresh, cap = self._uri.add_data(kind, data)
|
||||
return cap
|
||||
|
||||
|
||||
KNOWN_CAPABILITIES = [
|
||||
getattr(allmydata.uri, t).BASE_STRING
|
||||
for t in dir(allmydata.uri)
|
||||
if hasattr(getattr(allmydata.uri, t), 'BASE_STRING')
|
||||
]
|
||||
|
||||
|
||||
def capability_generator(kind):
|
||||
"""
|
||||
Deterministically generates a stream of valid capabilities of the
|
||||
given kind. The N, K and size values aren't related to anything
|
||||
real.
|
||||
|
||||
:param str kind: the kind of capability, like `URI:CHK`
|
||||
|
||||
:returns: a generator that yields new capablities of a particular
|
||||
kind.
|
||||
"""
|
||||
if kind not in KNOWN_CAPABILITIES:
|
||||
raise ValueError(
|
||||
"Unknown capability kind '{} (valid are {})'".format(
|
||||
kind,
|
||||
", ".join(KNOWN_CAPABILITIES),
|
||||
)
|
||||
)
|
||||
# what we do here is to start with empty hashers for the key and
|
||||
# ueb_hash and repeatedly feed() them a zero byte on each
|
||||
# iteration .. so the same sequence of capabilities will always be
|
||||
# produced. We could add a seed= argument if we wanted to produce
|
||||
# different sequences.
|
||||
number = 0
|
||||
key_hasher = hashlib.new("sha256")
|
||||
ueb_hasher = hashlib.new("sha256") # ueb means "URI Extension Block"
|
||||
|
||||
# capabilities are "prefix:<128-bits-base32>:<256-bits-base32>:N:K:size"
|
||||
while True:
|
||||
number += 1
|
||||
key_hasher.update("\x00")
|
||||
ueb_hasher.update("\x00")
|
||||
|
||||
key = base32.b2a(key_hasher.digest()[:16]) # key is 16 bytes
|
||||
ueb_hash = base32.b2a(ueb_hasher.digest()) # ueb hash is 32 bytes
|
||||
|
||||
cap = u"{kind}{key}:{ueb_hash}:{n}:{k}:{size}".format(
|
||||
kind=kind,
|
||||
key=key,
|
||||
ueb_hash=ueb_hash,
|
||||
n=1,
|
||||
k=1,
|
||||
size=number * 1000,
|
||||
)
|
||||
yield cap.encode("ascii")
|
||||
|
||||
|
||||
@attr.s
|
||||
class _FakeTahoeUriHandler(Resource, object):
|
||||
"""
|
||||
An in-memory fake of (some of) the `/uri` endpoint of a Tahoe
|
||||
WebUI
|
||||
"""
|
||||
|
||||
isLeaf = True
|
||||
|
||||
data = attr.ib(default=attr.Factory(dict))
|
||||
capability_generators = attr.ib(default=attr.Factory(dict))
|
||||
|
||||
def _generate_capability(self, kind):
|
||||
"""
|
||||
:param str kind: any valid capability-string type
|
||||
|
||||
:returns: the next capability-string for the given kind
|
||||
"""
|
||||
if kind not in self.capability_generators:
|
||||
self.capability_generators[kind] = capability_generator(kind)
|
||||
capability = next(self.capability_generators[kind])
|
||||
return capability
|
||||
|
||||
def add_data(self, kind, data):
|
||||
"""
|
||||
adds some data to our grid
|
||||
|
||||
:returns: a two-tuple: a bool (True if the data is freshly added) and a capability-string
|
||||
"""
|
||||
if not isinstance(data, bytes):
|
||||
raise TypeError("'data' must be bytes")
|
||||
|
||||
for k in self.data:
|
||||
if self.data[k] == data:
|
||||
return (False, k)
|
||||
|
||||
cap = self._generate_capability(kind)
|
||||
# it should be impossible for this to already be in our data,
|
||||
# but check anyway to be sure
|
||||
if cap in self.data:
|
||||
raise Exception("Internal error; key already exists somehow")
|
||||
self.data[cap] = data
|
||||
return (True, cap)
|
||||
|
||||
def render_PUT(self, request):
|
||||
data = request.content.read()
|
||||
fresh, cap = self.add_data("URI:CHK:", data)
|
||||
if fresh:
|
||||
request.setResponseCode(http.CREATED) # real code does this for brand-new files
|
||||
else:
|
||||
request.setResponseCode(http.OK) # replaced/modified files
|
||||
return cap
|
||||
|
||||
def render_POST(self, request):
|
||||
t = request.args[u"t"][0]
|
||||
data = request.content.read()
|
||||
|
||||
type_to_kind = {
|
||||
"mkdir-immutable": "URI:DIR2-CHK:"
|
||||
}
|
||||
kind = type_to_kind[t]
|
||||
fresh, cap = self.add_data(kind, data)
|
||||
return cap
|
||||
|
||||
def render_GET(self, request):
|
||||
uri = DecodedURL.from_text(request.uri.decode('utf8'))
|
||||
capability = None
|
||||
for arg, value in uri.query:
|
||||
if arg == u"uri":
|
||||
capability = value
|
||||
# it's legal to use the form "/uri/<capability>"
|
||||
if capability is None and request.postpath and request.postpath[0]:
|
||||
capability = request.postpath[0]
|
||||
|
||||
# if we don't yet have a capability, that's an error
|
||||
if capability is None:
|
||||
request.setResponseCode(http.BAD_REQUEST)
|
||||
return b"GET /uri requires uri="
|
||||
|
||||
# the user gave us a capability; if our Grid doesn't have any
|
||||
# data for it, that's an error.
|
||||
if capability not in self.data:
|
||||
request.setResponseCode(http.BAD_REQUEST)
|
||||
return u"No data for '{}'".format(capability).decode("ascii")
|
||||
|
||||
return self.data[capability]
|
||||
|
||||
|
||||
def create_fake_tahoe_root():
|
||||
"""
|
||||
If you wish to pre-populate data into the fake Tahoe grid, retain
|
||||
a reference to this root by creating it yourself and passing it to
|
||||
`create_tahoe_treq_client`. For example::
|
||||
|
||||
root = create_fake_tahoe_root()
|
||||
cap_string = root.add_data(...)
|
||||
client = create_tahoe_treq_client(root)
|
||||
|
||||
:returns: an IResource instance that will handle certain Tahoe URI
|
||||
endpoints similar to a real Tahoe server.
|
||||
"""
|
||||
root = _FakeTahoeRoot(
|
||||
uri=_FakeTahoeUriHandler(),
|
||||
)
|
||||
return root
|
||||
|
||||
|
||||
@implementer(IBodyProducer)
|
||||
class _SynchronousProducer(object):
|
||||
"""
|
||||
A partial implementation of an :obj:`IBodyProducer` which produces its
|
||||
entire payload immediately. There is no way to access to an instance of
|
||||
this object from :obj:`RequestTraversalAgent` or :obj:`StubTreq`, or even a
|
||||
:obj:`Resource: passed to :obj:`StubTreq`.
|
||||
|
||||
This does not implement the :func:`IBodyProducer.stopProducing` method,
|
||||
because that is very difficult to trigger. (The request from
|
||||
`RequestTraversalAgent` would have to be canceled while it is still in the
|
||||
transmitting state), and the intent is to use `RequestTraversalAgent` to
|
||||
make synchronous requests.
|
||||
"""
|
||||
|
||||
def __init__(self, body):
|
||||
"""
|
||||
Create a synchronous producer with some bytes.
|
||||
"""
|
||||
if isinstance(body, FileBodyProducer):
|
||||
body = body._inputFile.read()
|
||||
|
||||
if not isinstance(body, bytes):
|
||||
raise ValueError(
|
||||
"'body' must be bytes not '{}'".format(type(body))
|
||||
)
|
||||
self.body = body
|
||||
self.length = len(body)
|
||||
|
||||
def startProducing(self, consumer):
|
||||
"""
|
||||
Immediately produce all data.
|
||||
"""
|
||||
consumer.write(self.body)
|
||||
return succeed(None)
|
||||
|
||||
|
||||
def create_tahoe_treq_client(root=None):
|
||||
"""
|
||||
:param root: an instance created via `create_fake_tahoe_root`. The
|
||||
caller might want a copy of this to call `.add_data` for example.
|
||||
|
||||
:returns: an instance of treq.client.HTTPClient wired up to
|
||||
in-memory fakes of the Tahoe WebUI. Only a subset of the real
|
||||
WebUI is available.
|
||||
"""
|
||||
|
||||
if root is None:
|
||||
root = create_fake_tahoe_root()
|
||||
|
||||
client = HTTPClient(
|
||||
agent=RequestTraversalAgent(root),
|
||||
data_to_body_producer=_SynchronousProducer,
|
||||
)
|
||||
return client
|
@ -102,12 +102,21 @@ class DirectoryNodeHandler(ReplaceMeMixin, Resource, object):
|
||||
# trying to replicate what I have observed as Nevow behavior
|
||||
# for these nodes, which is that a URI like
|
||||
# "/uri/URI%3ADIR2%3Aj...vq/" (that is, with a trailing slash
|
||||
# or no further children) renders "this" page
|
||||
# or no further children) renders "this" page. We also need
|
||||
# to reject "/uri/URI:DIR2:..//", so we look at postpath.
|
||||
name = name.decode('utf8')
|
||||
if not name:
|
||||
raise EmptyPathnameComponentError(
|
||||
u"The webapi does not allow empty pathname components",
|
||||
)
|
||||
if not name and req.postpath != ['']:
|
||||
return self
|
||||
|
||||
# Rejecting URIs that contain empty path pieces (for example:
|
||||
# "/uri/URI:DIR2:../foo//new.txt" or "/uri/URI:DIR2:..//") was
|
||||
# the old nevow behavior and it is encoded in the test suite;
|
||||
# we will follow suit.
|
||||
for segment in req.prepath:
|
||||
if not segment:
|
||||
raise EmptyPathnameComponentError(
|
||||
u"The webapi does not allow empty pathname components",
|
||||
)
|
||||
|
||||
d = self.node.get(name)
|
||||
d.addBoth(self._got_child, req, name)
|
||||
|
@ -31,7 +31,7 @@
|
||||
<div class="well sidebar-nav">
|
||||
<ul class="nav nav-list">
|
||||
<li class="toolbar-item" t:render="welcome" />
|
||||
<li class="toolbar-item"><a href=".">Refresh</a></li>
|
||||
<li class="toolbar-item"><a href="">Refresh</a></li>
|
||||
<li class="toolbar-item"><a href="?t=info">More info on this directory</a></li>
|
||||
<li class="toolbar-item" t:render="show_readonly" />
|
||||
</ul>
|
||||
|
@ -1,7 +1,11 @@
|
||||
|
||||
import time
|
||||
from nevow import rend, url, tags as T
|
||||
from nevow import rend, url
|
||||
from nevow.inevow import IRequest
|
||||
from twisted.web.template import (
|
||||
renderer,
|
||||
tags as T,
|
||||
)
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.http import NOT_FOUND
|
||||
@ -122,31 +126,31 @@ class OphandleTable(rend.Page, service.Service):
|
||||
class ReloadMixin(object):
|
||||
REFRESH_TIME = 1*MINUTE
|
||||
|
||||
def render_refresh(self, ctx, data):
|
||||
@renderer
|
||||
def refresh(self, req, tag):
|
||||
if self.monitor.is_finished():
|
||||
return ""
|
||||
# dreid suggests ctx.tag(**dict([("http-equiv", "refresh")]))
|
||||
# but I can't tell if he's joking or not
|
||||
ctx.tag.attributes["http-equiv"] = "refresh"
|
||||
ctx.tag.attributes["content"] = str(self.REFRESH_TIME)
|
||||
return ctx.tag
|
||||
tag.attributes["http-equiv"] = "refresh"
|
||||
tag.attributes["content"] = str(self.REFRESH_TIME)
|
||||
return tag
|
||||
|
||||
def render_reload(self, ctx, data):
|
||||
@renderer
|
||||
def reload(self, req, tag):
|
||||
if self.monitor.is_finished():
|
||||
return ""
|
||||
req = IRequest(ctx)
|
||||
# url.gethere would break a proxy, so the correct thing to do is
|
||||
# req.path[-1] + queryargs
|
||||
ophandle = req.prepath[-1]
|
||||
reload_target = ophandle + "?output=html"
|
||||
cancel_target = ophandle + "?t=cancel"
|
||||
cancel_button = T.form(action=cancel_target, method="POST",
|
||||
enctype="multipart/form-data")[
|
||||
T.input(type="submit", value="Cancel"),
|
||||
]
|
||||
cancel_button = T.form(T.input(type="submit", value="Cancel"),
|
||||
action=cancel_target,
|
||||
method="POST",
|
||||
enctype="multipart/form-data",)
|
||||
|
||||
return [T.h2["Operation still running: ",
|
||||
T.a(href=reload_target)["Reload"],
|
||||
],
|
||||
cancel_button,
|
||||
]
|
||||
return (T.h2("Operation still running: ",
|
||||
T.a("Reload", href=reload_target),
|
||||
),
|
||||
cancel_button,)
|
||||
|
@ -35,187 +35,235 @@ class RateAndTimeMixin(object):
|
||||
def render_rate(self, ctx, data):
|
||||
return abbreviate_rate(data)
|
||||
|
||||
class UploadResultsRendererMixin(RateAndTimeMixin):
|
||||
|
||||
class UploadResultsRendererMixin(Element):
|
||||
# this requires a method named 'upload_results'
|
||||
|
||||
def render_pushed_shares(self, ctx, data):
|
||||
@renderer
|
||||
def pushed_shares(self, req, tag):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res: res.get_pushed_shares())
|
||||
d.addCallback(lambda res: str(res.get_pushed_shares()))
|
||||
return d
|
||||
|
||||
def render_preexisting_shares(self, ctx, data):
|
||||
@renderer
|
||||
def preexisting_shares(self, req, tag):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res: res.get_preexisting_shares())
|
||||
d.addCallback(lambda res: str(res.get_preexisting_shares()))
|
||||
return d
|
||||
|
||||
def render_sharemap(self, ctx, data):
|
||||
@renderer
|
||||
def sharemap(self, req, tag):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res: res.get_sharemap())
|
||||
def _render(sharemap):
|
||||
if sharemap is None:
|
||||
return "None"
|
||||
l = T.ul()
|
||||
ul = tags.ul()
|
||||
for shnum, servers in sorted(sharemap.items()):
|
||||
server_names = ', '.join([s.get_name() for s in servers])
|
||||
l[T.li["%d -> placed on [%s]" % (shnum, server_names)]]
|
||||
return l
|
||||
ul(tags.li("%d -> placed on [%s]" % (shnum, server_names)))
|
||||
return ul
|
||||
d.addCallback(_render)
|
||||
return d
|
||||
|
||||
def render_servermap(self, ctx, data):
|
||||
@renderer
|
||||
def servermap(self, req, tag):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res: res.get_servermap())
|
||||
def _render(servermap):
|
||||
if servermap is None:
|
||||
return "None"
|
||||
l = T.ul()
|
||||
ul = tags.ul()
|
||||
for server, shnums in sorted(servermap.items()):
|
||||
shares_s = ",".join(["#%d" % shnum for shnum in shnums])
|
||||
l[T.li["[%s] got share%s: %s" % (server.get_name(),
|
||||
plural(shnums), shares_s)]]
|
||||
return l
|
||||
ul(tags.li("[%s] got share%s: %s" % (server.get_name(),
|
||||
plural(shnums), shares_s)))
|
||||
return ul
|
||||
d.addCallback(_render)
|
||||
return d
|
||||
|
||||
def data_file_size(self, ctx, data):
|
||||
@renderer
|
||||
def file_size(self, req, tag):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res: res.get_file_size())
|
||||
d.addCallback(lambda res: str(res.get_file_size()))
|
||||
return d
|
||||
|
||||
def _get_time(self, name):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res: res.get_timings().get(name))
|
||||
d.addCallback(lambda res: abbreviate_time(res.get_timings().get(name)))
|
||||
return d
|
||||
|
||||
def data_time_total(self, ctx, data):
|
||||
return self._get_time("total")
|
||||
@renderer
|
||||
def time_total(self, req, tag):
|
||||
return tag(self._get_time("total"))
|
||||
|
||||
def data_time_storage_index(self, ctx, data):
|
||||
return self._get_time("storage_index")
|
||||
@renderer
|
||||
def time_storage_index(self, req, tag):
|
||||
return tag(self._get_time("storage_index"))
|
||||
|
||||
def data_time_contacting_helper(self, ctx, data):
|
||||
return self._get_time("contacting_helper")
|
||||
@renderer
|
||||
def time_contacting_helper(self, req, tag):
|
||||
return tag(self._get_time("contacting_helper"))
|
||||
|
||||
def data_time_cumulative_fetch(self, ctx, data):
|
||||
return self._get_time("cumulative_fetch")
|
||||
@renderer
|
||||
def time_cumulative_fetch(self, req, tag):
|
||||
return tag(self._get_time("cumulative_fetch"))
|
||||
|
||||
def data_time_helper_total(self, ctx, data):
|
||||
return self._get_time("helper_total")
|
||||
@renderer
|
||||
def time_helper_total(self, req, tag):
|
||||
return tag(self._get_time("helper_total"))
|
||||
|
||||
def data_time_peer_selection(self, ctx, data):
|
||||
return self._get_time("peer_selection")
|
||||
@renderer
|
||||
def time_peer_selection(self, req, tag):
|
||||
return tag(self._get_time("peer_selection"))
|
||||
|
||||
def data_time_total_encode_and_push(self, ctx, data):
|
||||
return self._get_time("total_encode_and_push")
|
||||
@renderer
|
||||
def time_total_encode_and_push(self, req, tag):
|
||||
return tag(self._get_time("total_encode_and_push"))
|
||||
|
||||
def data_time_cumulative_encoding(self, ctx, data):
|
||||
return self._get_time("cumulative_encoding")
|
||||
@renderer
|
||||
def time_cumulative_encoding(self, req, tag):
|
||||
return tag(self._get_time("cumulative_encoding"))
|
||||
|
||||
def data_time_cumulative_sending(self, ctx, data):
|
||||
return self._get_time("cumulative_sending")
|
||||
@renderer
|
||||
def time_cumulative_sending(self, req, tag):
|
||||
return tag(self._get_time("cumulative_sending"))
|
||||
|
||||
def data_time_hashes_and_close(self, ctx, data):
|
||||
return self._get_time("hashes_and_close")
|
||||
@renderer
|
||||
def time_hashes_and_close(self, req, tag):
|
||||
return tag(self._get_time("hashes_and_close"))
|
||||
|
||||
def _get_rate(self, name):
|
||||
d = self.upload_results()
|
||||
def _convert(r):
|
||||
file_size = r.get_file_size()
|
||||
duration = r.get_timings().get(name)
|
||||
return compute_rate(file_size, duration)
|
||||
return abbreviate_rate(compute_rate(file_size, duration))
|
||||
d.addCallback(_convert)
|
||||
return d
|
||||
|
||||
def data_rate_total(self, ctx, data):
|
||||
return self._get_rate("total")
|
||||
@renderer
|
||||
def rate_total(self, req, tag):
|
||||
return tag(self._get_rate("total"))
|
||||
|
||||
def data_rate_storage_index(self, ctx, data):
|
||||
return self._get_rate("storage_index")
|
||||
@renderer
|
||||
def rate_storage_index(self, req, tag):
|
||||
return tag(self._get_rate("storage_index"))
|
||||
|
||||
def data_rate_encode(self, ctx, data):
|
||||
return self._get_rate("cumulative_encoding")
|
||||
@renderer
|
||||
def rate_encode(self, req, tag):
|
||||
return tag(self._get_rate("cumulative_encoding"))
|
||||
|
||||
def data_rate_push(self, ctx, data):
|
||||
@renderer
|
||||
def rate_push(self, req, tag):
|
||||
return self._get_rate("cumulative_sending")
|
||||
|
||||
def data_rate_encode_and_push(self, ctx, data):
|
||||
@renderer
|
||||
def rate_encode_and_push(self, req, tag):
|
||||
d = self.upload_results()
|
||||
def _convert(r):
|
||||
file_size = r.get_file_size()
|
||||
time1 = r.get_timings().get("cumulative_encoding")
|
||||
time2 = r.get_timings().get("cumulative_sending")
|
||||
if (time1 is None or time2 is None):
|
||||
return None
|
||||
return abbreviate_rate(None)
|
||||
else:
|
||||
return compute_rate(file_size, time1+time2)
|
||||
return abbreviate_rate(compute_rate(file_size, time1+time2))
|
||||
d.addCallback(_convert)
|
||||
return d
|
||||
|
||||
def data_rate_ciphertext_fetch(self, ctx, data):
|
||||
@renderer
|
||||
def rate_ciphertext_fetch(self, req, tag):
|
||||
d = self.upload_results()
|
||||
def _convert(r):
|
||||
fetch_size = r.get_ciphertext_fetched()
|
||||
duration = r.get_timings().get("cumulative_fetch")
|
||||
return compute_rate(fetch_size, duration)
|
||||
return abbreviate_rate(compute_rate(fetch_size, duration))
|
||||
d.addCallback(_convert)
|
||||
return d
|
||||
|
||||
class UploadStatusPage(UploadResultsRendererMixin, rend.Page):
|
||||
docFactory = getxmlfile("upload-status.xhtml")
|
||||
|
||||
def __init__(self, data):
|
||||
rend.Page.__init__(self, data)
|
||||
self.upload_status = data
|
||||
class UploadStatusPage(Resource, object):
|
||||
"""Renders /status/up-%d."""
|
||||
|
||||
def __init__(self, upload_status):
|
||||
"""
|
||||
:param IUploadStatus upload_status: stats provider.
|
||||
"""
|
||||
super(UploadStatusPage, self).__init__()
|
||||
self._upload_status = upload_status
|
||||
|
||||
def render_GET(self, req):
|
||||
elem = UploadStatusElement(self._upload_status)
|
||||
return renderElement(req, elem)
|
||||
|
||||
|
||||
class UploadStatusElement(UploadResultsRendererMixin):
|
||||
|
||||
loader = XMLFile(FilePath(__file__).sibling("upload-status.xhtml"))
|
||||
|
||||
def __init__(self, upload_status):
|
||||
super(UploadStatusElement, self).__init__()
|
||||
self._upload_status = upload_status
|
||||
|
||||
def upload_results(self):
|
||||
return defer.maybeDeferred(self.upload_status.get_results)
|
||||
return defer.maybeDeferred(self._upload_status.get_results)
|
||||
|
||||
def render_results(self, ctx, data):
|
||||
@renderer
|
||||
def results(self, req, tag):
|
||||
d = self.upload_results()
|
||||
def _got_results(results):
|
||||
if results:
|
||||
return ctx.tag
|
||||
return tag
|
||||
return ""
|
||||
d.addCallback(_got_results)
|
||||
return d
|
||||
|
||||
def render_started(self, ctx, data):
|
||||
started_s = render_time(data.get_started())
|
||||
return started_s
|
||||
@renderer
|
||||
def started(self, req, tag):
|
||||
started_s = render_time(self._upload_status.get_started())
|
||||
return tag(started_s)
|
||||
|
||||
def render_si(self, ctx, data):
|
||||
si_s = base32.b2a_or_none(data.get_storage_index())
|
||||
@renderer
|
||||
def si(self, req, tag):
|
||||
si_s = base32.b2a_or_none(self._upload_status.get_storage_index())
|
||||
if si_s is None:
|
||||
si_s = "(None)"
|
||||
return si_s
|
||||
return tag(str(si_s))
|
||||
|
||||
def render_helper(self, ctx, data):
|
||||
return {True: "Yes",
|
||||
False: "No"}[data.using_helper()]
|
||||
@renderer
|
||||
def helper(self, req, tag):
|
||||
return tag({True: "Yes",
|
||||
False: "No"}[self._upload_status.using_helper()])
|
||||
|
||||
def render_total_size(self, ctx, data):
|
||||
size = data.get_size()
|
||||
@renderer
|
||||
def total_size(self, req, tag):
|
||||
size = self._upload_status.get_size()
|
||||
if size is None:
|
||||
return "(unknown)"
|
||||
return size
|
||||
return tag(str(size))
|
||||
|
||||
def render_progress_hash(self, ctx, data):
|
||||
progress = data.get_progress()[0]
|
||||
@renderer
|
||||
def progress_hash(self, req, tag):
|
||||
progress = self._upload_status.get_progress()[0]
|
||||
# TODO: make an ascii-art bar
|
||||
return tag("%.1f%%" % (100.0 * progress))
|
||||
|
||||
@renderer
|
||||
def progress_ciphertext(self, req, tag):
|
||||
progress = self._upload_status.get_progress()[1]
|
||||
# TODO: make an ascii-art bar
|
||||
return "%.1f%%" % (100.0 * progress)
|
||||
|
||||
def render_progress_ciphertext(self, ctx, data):
|
||||
progress = data.get_progress()[1]
|
||||
@renderer
|
||||
def progress_encode_push(self, req, tag):
|
||||
progress = self._upload_status.get_progress()[2]
|
||||
# TODO: make an ascii-art bar
|
||||
return "%.1f%%" % (100.0 * progress)
|
||||
return tag("%.1f%%" % (100.0 * progress))
|
||||
|
||||
def render_progress_encode_push(self, ctx, data):
|
||||
progress = data.get_progress()[2]
|
||||
# TODO: make an ascii-art bar
|
||||
return "%.1f%%" % (100.0 * progress)
|
||||
|
||||
def render_status(self, ctx, data):
|
||||
return data.get_status()
|
||||
@renderer
|
||||
def status(self, req, tag):
|
||||
return tag(self._upload_status.get_status())
|
||||
|
||||
|
||||
def _find_overlap(events, start_key, end_key):
|
||||
|
@ -1,10 +1,16 @@
|
||||
|
||||
import time, json
|
||||
from nevow import rend, tags as T
|
||||
from twisted.python.filepath import FilePath
|
||||
from twisted.web.template import (
|
||||
Element,
|
||||
XMLFile,
|
||||
tags as T,
|
||||
renderer,
|
||||
renderElement
|
||||
)
|
||||
from allmydata.web.common import (
|
||||
getxmlfile,
|
||||
abbreviate_time,
|
||||
MultiFormatPage,
|
||||
MultiFormatResource
|
||||
)
|
||||
from allmydata.util.abbreviate import abbreviate_space
|
||||
from allmydata.util import time_format, idlib
|
||||
@ -16,91 +22,108 @@ def remove_prefix(s, prefix):
|
||||
return s[len(prefix):]
|
||||
|
||||
|
||||
class StorageStatus(MultiFormatPage):
|
||||
docFactory = getxmlfile("storage_status.xhtml")
|
||||
# the default 'data' argument is the StorageServer instance
|
||||
class StorageStatusElement(Element):
|
||||
"""Class to render a storage status page."""
|
||||
|
||||
loader = XMLFile(FilePath(__file__).sibling("storage_status.xhtml"))
|
||||
|
||||
def __init__(self, storage, nickname=""):
|
||||
rend.Page.__init__(self, storage)
|
||||
self.storage = storage
|
||||
self.nickname = nickname
|
||||
"""
|
||||
:param _StorageServer storage: data about storage.
|
||||
:param string nickname: friendly name for storage.
|
||||
"""
|
||||
super(StorageStatusElement, self).__init__()
|
||||
self._storage = storage
|
||||
self._nickname = nickname
|
||||
|
||||
def render_JSON(self, req):
|
||||
req.setHeader("content-type", "text/plain")
|
||||
d = {"stats": self.storage.get_stats(),
|
||||
"bucket-counter": self.storage.bucket_counter.get_state(),
|
||||
"lease-checker": self.storage.lease_checker.get_state(),
|
||||
"lease-checker-progress": self.storage.lease_checker.get_progress(),
|
||||
}
|
||||
return json.dumps(d, indent=1) + "\n"
|
||||
@renderer
|
||||
def nickname(self, req, tag):
|
||||
return tag(self._nickname)
|
||||
|
||||
def data_nickname(self, ctx, storage):
|
||||
return self.nickname
|
||||
def data_nodeid(self, ctx, storage):
|
||||
return idlib.nodeid_b2a(self.storage.my_nodeid)
|
||||
@renderer
|
||||
def nodeid(self, req, tag):
|
||||
return tag(idlib.nodeid_b2a(self._storage.my_nodeid))
|
||||
|
||||
def render_storage_running(self, ctx, storage):
|
||||
if storage:
|
||||
return ctx.tag
|
||||
else:
|
||||
return T.h1["No Storage Server Running"]
|
||||
def _get_storage_stat(self, key):
|
||||
"""Get storage server statistics.
|
||||
|
||||
def render_bool(self, ctx, data):
|
||||
return {True: "Yes", False: "No"}[bool(data)]
|
||||
Storage Server keeps a dict that contains various usage and
|
||||
latency statistics. The dict looks like this:
|
||||
|
||||
def render_abbrev_space(self, ctx, size):
|
||||
{
|
||||
'storage_server.accepting_immutable_shares': 1,
|
||||
'storage_server.allocated': 0,
|
||||
'storage_server.disk_avail': 106539192320,
|
||||
'storage_server.disk_free_for_nonroot': 106539192320,
|
||||
'storage_server.disk_free_for_root': 154415284224,
|
||||
'storage_server.disk_total': 941088460800,
|
||||
'storage_server.disk_used': 786673176576,
|
||||
'storage_server.latencies.add-lease.01_0_percentile': None,
|
||||
'storage_server.latencies.add-lease.10_0_percentile': None,
|
||||
...
|
||||
}
|
||||
|
||||
``StorageServer.get_stats()`` returns the above dict. Storage
|
||||
status page uses a subset of the items in the dict, concerning
|
||||
disk usage.
|
||||
|
||||
:param str key: storage server statistic we want to know.
|
||||
"""
|
||||
return self._storage.get_stats().get(key)
|
||||
|
||||
def render_abbrev_space(self, size):
|
||||
if size is None:
|
||||
return "?"
|
||||
return u"?"
|
||||
return abbreviate_space(size)
|
||||
|
||||
def render_space(self, ctx, size):
|
||||
def render_space(self, size):
|
||||
if size is None:
|
||||
return "?"
|
||||
return "%d" % size
|
||||
return u"?"
|
||||
return u"%d" % size
|
||||
|
||||
def data_stats(self, ctx, data):
|
||||
# FYI: 'data' appears to be self, rather than the StorageServer
|
||||
# object in self.original that gets passed to render_* methods. I
|
||||
# still don't understand Nevow.
|
||||
@renderer
|
||||
def storage_stats(self, req, tag):
|
||||
# Render storage status table that appears near the top of the page.
|
||||
total = self._get_storage_stat("storage_server.disk_total")
|
||||
used = self._get_storage_stat("storage_server.disk_used")
|
||||
free_root = self._get_storage_stat("storage_server.disk_free_for_root")
|
||||
free_nonroot = self._get_storage_stat("storage_server.disk_free_for_nonroot")
|
||||
reserved = self._get_storage_stat("storage_server.reserved_space")
|
||||
available = self._get_storage_stat("storage_server.disk_avail")
|
||||
|
||||
# Nevow has nevow.accessors.DictionaryContainer: Any data= directive
|
||||
# that appears in a context in which the current data is a dictionary
|
||||
# will be looked up as keys in that dictionary. So if data_stats()
|
||||
# returns a dictionary, then we can use something like this:
|
||||
#
|
||||
# <ul n:data="stats">
|
||||
# <li>disk_total: <span n:render="abbrev" n:data="disk_total" /></li>
|
||||
# </ul>
|
||||
tag.fillSlots(
|
||||
disk_total = self.render_space(total),
|
||||
disk_total_abbrev = self.render_abbrev_space(total),
|
||||
disk_used = self.render_space(used),
|
||||
disk_used_abbrev = self.render_abbrev_space(used),
|
||||
disk_free_for_root = self.render_space(free_root),
|
||||
disk_free_for_root_abbrev = self.render_abbrev_space(free_root),
|
||||
disk_free_for_nonroot = self.render_space(free_nonroot),
|
||||
disk_free_for_nonroot_abbrev = self.render_abbrev_space(free_nonroot),
|
||||
reserved_space = self.render_space(reserved),
|
||||
reserved_space_abbrev = self.render_abbrev_space(reserved),
|
||||
disk_avail = self.render_space(available),
|
||||
disk_avail_abbrev = self.render_abbrev_space(available)
|
||||
)
|
||||
return tag
|
||||
|
||||
# to use get_stats()["storage_server.disk_total"] . However,
|
||||
# DictionaryContainer does a raw d[] instead of d.get(), so any
|
||||
# missing keys will cause an error, even if the renderer can tolerate
|
||||
# None values. To overcome this, we either need a dict-like object
|
||||
# that always returns None for unknown keys, or we must pre-populate
|
||||
# our dict with those missing keys, or we should get rid of data_
|
||||
# methods that return dicts (or find some way to override Nevow's
|
||||
# handling of dictionaries).
|
||||
@renderer
|
||||
def accepting_immutable_shares(self, req, tag):
|
||||
accepting = self._get_storage_stat("storage_server.accepting_immutable_shares")
|
||||
return tag({True: "Yes", False: "No"}[bool(accepting)])
|
||||
|
||||
d = dict([ (remove_prefix(k, "storage_server."), v)
|
||||
for k,v in self.storage.get_stats().items() ])
|
||||
d.setdefault("disk_total", None)
|
||||
d.setdefault("disk_used", None)
|
||||
d.setdefault("disk_free_for_root", None)
|
||||
d.setdefault("disk_free_for_nonroot", None)
|
||||
d.setdefault("reserved_space", None)
|
||||
d.setdefault("disk_avail", None)
|
||||
return d
|
||||
|
||||
def data_last_complete_bucket_count(self, ctx, data):
|
||||
s = self.storage.bucket_counter.get_state()
|
||||
@renderer
|
||||
def last_complete_bucket_count(self, req, tag):
|
||||
s = self._storage.bucket_counter.get_state()
|
||||
count = s.get("last-complete-bucket-count")
|
||||
if count is None:
|
||||
return "Not computed yet"
|
||||
return count
|
||||
return tag("Not computed yet")
|
||||
return tag(str(count))
|
||||
|
||||
def render_count_crawler_status(self, ctx, storage):
|
||||
p = self.storage.bucket_counter.get_progress()
|
||||
return ctx.tag[self.format_crawler_progress(p)]
|
||||
@renderer
|
||||
def count_crawler_status(self, req, tag):
|
||||
p = self._storage.bucket_counter.get_progress()
|
||||
return tag(self.format_crawler_progress(p))
|
||||
|
||||
def format_crawler_progress(self, p):
|
||||
cycletime = p["estimated-time-per-cycle"]
|
||||
@ -127,56 +150,52 @@ class StorageStatus(MultiFormatPage):
|
||||
return ["Next crawl in %s" % abbreviate_time(soon),
|
||||
cycletime_s]
|
||||
|
||||
def render_lease_expiration_enabled(self, ctx, data):
|
||||
lc = self.storage.lease_checker
|
||||
if lc.expiration_enabled:
|
||||
return ctx.tag["Enabled: expired leases will be removed"]
|
||||
else:
|
||||
return ctx.tag["Disabled: scan-only mode, no leases will be removed"]
|
||||
@renderer
|
||||
def storage_running(self, req, tag):
|
||||
if self._storage:
|
||||
return tag
|
||||
return T.h1("No Storage Server Running")
|
||||
|
||||
def render_lease_expiration_mode(self, ctx, data):
|
||||
lc = self.storage.lease_checker
|
||||
@renderer
|
||||
def lease_expiration_enabled(self, req, tag):
|
||||
lc = self._storage.lease_checker
|
||||
if lc.expiration_enabled:
|
||||
return tag("Enabled: expired leases will be removed")
|
||||
else:
|
||||
return tag("Disabled: scan-only mode, no leases will be removed")
|
||||
|
||||
@renderer
|
||||
def lease_expiration_mode(self, req, tag):
|
||||
lc = self._storage.lease_checker
|
||||
if lc.mode == "age":
|
||||
if lc.override_lease_duration is None:
|
||||
ctx.tag["Leases will expire naturally, probably 31 days after "
|
||||
"creation or renewal."]
|
||||
tag("Leases will expire naturally, probably 31 days after "
|
||||
"creation or renewal.")
|
||||
else:
|
||||
ctx.tag["Leases created or last renewed more than %s ago "
|
||||
"will be considered expired."
|
||||
% abbreviate_time(lc.override_lease_duration)]
|
||||
tag("Leases created or last renewed more than %s ago "
|
||||
"will be considered expired."
|
||||
% abbreviate_time(lc.override_lease_duration))
|
||||
else:
|
||||
assert lc.mode == "cutoff-date"
|
||||
localizedutcdate = time.strftime("%d-%b-%Y", time.gmtime(lc.cutoff_date))
|
||||
isoutcdate = time_format.iso_utc_date(lc.cutoff_date)
|
||||
ctx.tag["Leases created or last renewed before %s (%s) UTC "
|
||||
"will be considered expired." % (isoutcdate, localizedutcdate, )]
|
||||
tag("Leases created or last renewed before %s (%s) UTC "
|
||||
"will be considered expired."
|
||||
% (isoutcdate, localizedutcdate, ))
|
||||
if len(lc.mode) > 2:
|
||||
ctx.tag[" The following sharetypes will be expired: ",
|
||||
" ".join(sorted(lc.sharetypes_to_expire)), "."]
|
||||
return ctx.tag
|
||||
tag(" The following sharetypes will be expired: ",
|
||||
" ".join(sorted(lc.sharetypes_to_expire)), ".")
|
||||
return tag
|
||||
|
||||
def format_recovered(self, sr, a):
|
||||
def maybe(d):
|
||||
if d is None:
|
||||
return "?"
|
||||
return "%d" % d
|
||||
return "%s shares, %s buckets (%s mutable / %s immutable), %s (%s / %s)" % \
|
||||
(maybe(sr["%s-shares" % a]),
|
||||
maybe(sr["%s-buckets" % a]),
|
||||
maybe(sr["%s-buckets-mutable" % a]),
|
||||
maybe(sr["%s-buckets-immutable" % a]),
|
||||
abbreviate_space(sr["%s-diskbytes" % a]),
|
||||
abbreviate_space(sr["%s-diskbytes-mutable" % a]),
|
||||
abbreviate_space(sr["%s-diskbytes-immutable" % a]),
|
||||
)
|
||||
|
||||
def render_lease_current_cycle_progress(self, ctx, data):
|
||||
lc = self.storage.lease_checker
|
||||
@renderer
|
||||
def lease_current_cycle_progress(self, req, tag):
|
||||
lc = self._storage.lease_checker
|
||||
p = lc.get_progress()
|
||||
return ctx.tag[self.format_crawler_progress(p)]
|
||||
return tag(self.format_crawler_progress(p))
|
||||
|
||||
def render_lease_current_cycle_results(self, ctx, data):
|
||||
lc = self.storage.lease_checker
|
||||
@renderer
|
||||
def lease_current_cycle_results(self, req, tag):
|
||||
lc = self._storage.lease_checker
|
||||
p = lc.get_progress()
|
||||
if not p["cycle-in-progress"]:
|
||||
return ""
|
||||
@ -190,7 +209,7 @@ class StorageStatus(MultiFormatPage):
|
||||
|
||||
p = T.ul()
|
||||
def add(*pieces):
|
||||
p[T.li[pieces]]
|
||||
p(T.li(pieces))
|
||||
|
||||
def maybe(d):
|
||||
if d is None:
|
||||
@ -226,29 +245,29 @@ class StorageStatus(MultiFormatPage):
|
||||
|
||||
if so_far["corrupt-shares"]:
|
||||
add("Corrupt shares:",
|
||||
T.ul[ [T.li[ ["SI %s shnum %d" % corrupt_share
|
||||
T.ul( (T.li( ["SI %s shnum %d" % corrupt_share
|
||||
for corrupt_share in so_far["corrupt-shares"] ]
|
||||
]]])
|
||||
))))
|
||||
return tag("Current cycle:", p)
|
||||
|
||||
return ctx.tag["Current cycle:", p]
|
||||
|
||||
def render_lease_last_cycle_results(self, ctx, data):
|
||||
lc = self.storage.lease_checker
|
||||
@renderer
|
||||
def lease_last_cycle_results(self, req, tag):
|
||||
lc = self._storage.lease_checker
|
||||
h = lc.get_state()["history"]
|
||||
if not h:
|
||||
return ""
|
||||
last = h[max(h.keys())]
|
||||
|
||||
start, end = last["cycle-start-finish-times"]
|
||||
ctx.tag["Last complete cycle (which took %s and finished %s ago)"
|
||||
" recovered: " % (abbreviate_time(end-start),
|
||||
abbreviate_time(time.time() - end)),
|
||||
self.format_recovered(last["space-recovered"], "actual")
|
||||
]
|
||||
tag("Last complete cycle (which took %s and finished %s ago)"
|
||||
" recovered: " % (abbreviate_time(end-start),
|
||||
abbreviate_time(time.time() - end)),
|
||||
self.format_recovered(last["space-recovered"], "actual"))
|
||||
|
||||
p = T.ul()
|
||||
|
||||
def add(*pieces):
|
||||
p[T.li[pieces]]
|
||||
p(T.li(pieces))
|
||||
|
||||
saw = self.format_recovered(last["space-recovered"], "examined")
|
||||
add("and saw a total of ", saw)
|
||||
@ -260,8 +279,42 @@ class StorageStatus(MultiFormatPage):
|
||||
|
||||
if last["corrupt-shares"]:
|
||||
add("Corrupt shares:",
|
||||
T.ul[ [T.li[ ["SI %s shnum %d" % corrupt_share
|
||||
T.ul( (T.li( ["SI %s shnum %d" % corrupt_share
|
||||
for corrupt_share in last["corrupt-shares"] ]
|
||||
]]])
|
||||
))))
|
||||
|
||||
return ctx.tag[p]
|
||||
return tag(p)
|
||||
|
||||
@staticmethod
|
||||
def format_recovered(sr, a):
|
||||
def maybe(d):
|
||||
if d is None:
|
||||
return "?"
|
||||
return "%d" % d
|
||||
return "%s shares, %s buckets (%s mutable / %s immutable), %s (%s / %s)" % \
|
||||
(maybe(sr["%s-shares" % a]),
|
||||
maybe(sr["%s-buckets" % a]),
|
||||
maybe(sr["%s-buckets-mutable" % a]),
|
||||
maybe(sr["%s-buckets-immutable" % a]),
|
||||
abbreviate_space(sr["%s-diskbytes" % a]),
|
||||
abbreviate_space(sr["%s-diskbytes-mutable" % a]),
|
||||
abbreviate_space(sr["%s-diskbytes-immutable" % a]),
|
||||
)
|
||||
|
||||
class StorageStatus(MultiFormatResource):
|
||||
def __init__(self, storage, nickname=""):
|
||||
super(StorageStatus, self).__init__()
|
||||
self._storage = storage
|
||||
self._nickname = nickname
|
||||
|
||||
def render_HTML(self, req):
|
||||
return renderElement(req, StorageStatusElement(self._storage, self._nickname))
|
||||
|
||||
def render_JSON(self, req):
|
||||
req.setHeader("content-type", "text/plain")
|
||||
d = {"stats": self._storage.get_stats(),
|
||||
"bucket-counter": self._storage.bucket_counter.get_state(),
|
||||
"lease-checker": self._storage.lease_checker.get_state(),
|
||||
"lease-checker-progress": self._storage.lease_checker.get_progress(),
|
||||
}
|
||||
return json.dumps(d, indent=1) + "\n"
|
||||
|
@ -1,4 +1,4 @@
|
||||
<html xmlns:n="http://nevow.com/ns/nevow/0.1">
|
||||
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">
|
||||
<head>
|
||||
<title>Tahoe-LAFS - Storage Server Status</title>
|
||||
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
|
||||
@ -7,19 +7,19 @@
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div n:render="storage_running">
|
||||
<div t:render="storage_running">
|
||||
|
||||
<h1>Storage Server Status</h1>
|
||||
|
||||
<table n:data="stats">
|
||||
<table class="storage_status" t:render="storage_stats">
|
||||
<tr><td>Total disk space:</td>
|
||||
<td><span n:render="abbrev_space" n:data="disk_total" /></td>
|
||||
<td>(<span n:render="space" n:data="disk_total" />)</td>
|
||||
<td><t:slot name="disk_total_abbrev" /></td>
|
||||
<td>(<t:slot name="disk_total" />)</td>
|
||||
<td />
|
||||
</tr>
|
||||
<tr><td>Disk space used:</td>
|
||||
<td>- <span n:render="abbrev_space" n:data="disk_used" /></td>
|
||||
<td>(<span n:render="space" n:data="disk_used" />)</td>
|
||||
<td>- <t:slot name="disk_used_abbrev" /></td>
|
||||
<td>(<t:slot name="disk_used" />)</td>
|
||||
<td />
|
||||
</tr>
|
||||
<tr><td />
|
||||
@ -28,18 +28,18 @@
|
||||
<td />
|
||||
</tr>
|
||||
<tr><td>Disk space free (root):</td>
|
||||
<td><span n:render="abbrev_space" n:data="disk_free_for_root"/></td>
|
||||
<td>(<span n:render="space" n:data="disk_free_for_root"/>)</td>
|
||||
<td><t:slot name="disk_free_for_root_abbrev"/></td>
|
||||
<td>(<t:slot name="disk_free_for_root"/>)</td>
|
||||
<td>[see 1]</td>
|
||||
</tr>
|
||||
<tr><td>Disk space free (non-root):</td>
|
||||
<td><span n:render="abbrev_space" n:data="disk_free_for_nonroot" /></td>
|
||||
<td>(<span n:render="space" n:data="disk_free_for_nonroot" />)</td>
|
||||
<td><t:slot name="disk_free_for_nonroot_abbrev" /></td>
|
||||
<td>(<t:slot name="disk_free_for_nonroot" />)</td>
|
||||
<td>[see 2]</td>
|
||||
</tr>
|
||||
<tr><td>Reserved space:</td>
|
||||
<td>- <span n:render="abbrev_space" n:data="reserved_space" /></td>
|
||||
<td>(<span n:render="space" n:data="reserved_space" />)</td>
|
||||
<td>- <t:slot name="reserved_space_abbrev" /></td>
|
||||
<td>(<t:slot name="reserved_space" />)</td>
|
||||
<td />
|
||||
</tr>
|
||||
<tr><td />
|
||||
@ -48,23 +48,23 @@
|
||||
<td />
|
||||
</tr>
|
||||
<tr><td>Space Available to Tahoe:</td>
|
||||
<td><span n:render="abbrev_space" n:data="disk_avail" /></td>
|
||||
<td>(<span n:render="space" n:data="disk_avail" />)</td>
|
||||
<td><t:slot name="disk_avail_abbrev" /></td>
|
||||
<td>(<t:slot name="disk_avail" />)</td>
|
||||
<td />
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<ul>
|
||||
<li>Server Nickname: <span class="nickname mine" n:render="data" n:data="nickname" /></li>
|
||||
<li>Server Nodeid: <span class="nodeid mine data-chars" n:render="string" n:data="nodeid" /></li>
|
||||
<li n:data="stats">Accepting new shares:
|
||||
<span n:render="bool" n:data="accepting_immutable_shares" /></li>
|
||||
<li>Server Nickname: <span class="nickname mine"><t:transparent t:render="nickname" /></span></li>
|
||||
<li>Server Nodeid: <span class="nodeid mine data-chars"> <t:transparent t:render="nodeid" /></span></li>
|
||||
<li>Accepting new shares:
|
||||
<span t:render="accepting_immutable_shares" /></li>
|
||||
<li>Total buckets:
|
||||
<span n:render="string" n:data="last_complete_bucket_count" />
|
||||
<span t:render="last_complete_bucket_count" />
|
||||
(the number of files and directories for which this server is holding
|
||||
a share)
|
||||
<ul>
|
||||
<li n:render="count_crawler_status" />
|
||||
<li><span t:render="count_crawler_status" /></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
@ -72,11 +72,11 @@
|
||||
<h2>Lease Expiration Crawler</h2>
|
||||
|
||||
<ul>
|
||||
<li>Expiration <span n:render="lease_expiration_enabled" /></li>
|
||||
<li n:render="lease_expiration_mode" />
|
||||
<li n:render="lease_current_cycle_progress" />
|
||||
<li n:render="lease_current_cycle_results" />
|
||||
<li n:render="lease_last_cycle_results" />
|
||||
<li>Expiration <span t:render="lease_expiration_enabled" /></li>
|
||||
<li t:render="lease_expiration_mode" />
|
||||
<li t:render="lease_current_cycle_progress" />
|
||||
<li t:render="lease_current_cycle_results" />
|
||||
<li t:render="lease_last_cycle_results" />
|
||||
</ul>
|
||||
|
||||
<hr />
|
||||
|
@ -2,11 +2,25 @@
|
||||
import urllib
|
||||
from twisted.web import http
|
||||
from twisted.internet import defer
|
||||
from nevow import rend, url, tags as T
|
||||
from twisted.python.filepath import FilePath
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.template import (
|
||||
XMLFile,
|
||||
renderer,
|
||||
renderElement,
|
||||
tags,
|
||||
)
|
||||
from nevow import url
|
||||
from allmydata.immutable.upload import FileHandle
|
||||
from allmydata.mutable.publish import MutableFileHandle
|
||||
from allmydata.web.common import getxmlfile, get_arg, boolean_of_arg, \
|
||||
convert_children_json, WebError, get_format, get_mutable_type
|
||||
from allmydata.web.common import (
|
||||
get_arg,
|
||||
boolean_of_arg,
|
||||
convert_children_json,
|
||||
WebError,
|
||||
get_format,
|
||||
get_mutable_type,
|
||||
)
|
||||
from allmydata.web import status
|
||||
|
||||
def PUTUnlinkedCHK(req, client):
|
||||
@ -59,34 +73,53 @@ def POSTUnlinkedCHK(req, client):
|
||||
return d
|
||||
|
||||
|
||||
class UploadResultsPage(status.UploadResultsRendererMixin, rend.Page):
|
||||
class UploadResultsPage(Resource, object):
|
||||
"""'POST /uri', to create an unlinked file."""
|
||||
docFactory = getxmlfile("upload-results.xhtml")
|
||||
|
||||
def __init__(self, upload_results):
|
||||
rend.Page.__init__(self)
|
||||
self.results = upload_results
|
||||
"""
|
||||
:param IUploadResults upload_results: stats provider.
|
||||
"""
|
||||
super(UploadResultsPage, self).__init__()
|
||||
self._upload_results = upload_results
|
||||
|
||||
def render_POST(self, req):
|
||||
elem = UploadResultsElement(self._upload_results)
|
||||
return renderElement(req, elem)
|
||||
|
||||
|
||||
class UploadResultsElement(status.UploadResultsRendererMixin):
|
||||
|
||||
loader = XMLFile(FilePath(__file__).sibling("upload-results.xhtml"))
|
||||
|
||||
def __init__(self, upload_results):
|
||||
super(UploadResultsElement, self).__init__()
|
||||
self._upload_results = upload_results
|
||||
|
||||
def upload_results(self):
|
||||
return defer.succeed(self.results)
|
||||
return defer.succeed(self._upload_results)
|
||||
|
||||
def data_done(self, ctx, data):
|
||||
@renderer
|
||||
def done(self, req, tag):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res: "done!")
|
||||
return d
|
||||
|
||||
def data_uri(self, ctx, data):
|
||||
@renderer
|
||||
def uri(self, req, tag):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res: res.get_uri())
|
||||
return d
|
||||
|
||||
def render_download_link(self, ctx, data):
|
||||
@renderer
|
||||
def download_link(self, req, tag):
|
||||
d = self.upload_results()
|
||||
d.addCallback(lambda res:
|
||||
T.a(href="/uri/" + urllib.quote(res.get_uri()))
|
||||
["/uri/" + res.get_uri()])
|
||||
tags.a("/uri/" + res.get_uri(),
|
||||
href="/uri/" + urllib.quote(res.get_uri())))
|
||||
return d
|
||||
|
||||
|
||||
def POSTUnlinkedSSK(req, client, version):
|
||||
# "POST /uri", to create an unlinked file.
|
||||
# SDMF: files are small, and we can only upload data
|
||||
|
@ -1,4 +1,4 @@
|
||||
<html xmlns:n="http://nevow.com/ns/nevow/0.1">
|
||||
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">
|
||||
<head>
|
||||
<title>Tahoe-LAFS - File Uploaded</title>
|
||||
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
|
||||
@ -7,37 +7,37 @@
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<h1>Uploading File... <span n:render="string" n:data="done" /></h1>
|
||||
<h1>Uploading File... <t:transparent t:render="done" /></h1>
|
||||
|
||||
<h2>Upload Results:</h2>
|
||||
<ul>
|
||||
<li>URI: <tt><span n:render="string" n:data="uri" /></tt></li>
|
||||
<li>Download link: <span n:render="download_link" /></li>
|
||||
<li>Sharemap: <span n:render="sharemap" /></li>
|
||||
<li>Servermap: <span n:render="servermap" /></li>
|
||||
<li>URI: <tt><span><t:transparent t:render="uri" /></span></tt></li>
|
||||
<li>Download link: <t:transparent t:render="download_link" /></li>
|
||||
<li>Sharemap: <t:transparent t:render="sharemap" /></li>
|
||||
<li>Servermap: <t:transparent t:render="servermap" /></li>
|
||||
<li>Timings:</li>
|
||||
<ul>
|
||||
<li>File Size: <span n:render="string" n:data="file_size" /> bytes</li>
|
||||
<li>Total: <span n:render="time" n:data="time_total" />
|
||||
(<span n:render="rate" n:data="rate_total" />)</li>
|
||||
<li>File Size: <t:transparent t:render="file_size" /> bytes</li>
|
||||
<li>Total: <t:transparent t:render="time_total" />
|
||||
(<t:transparent t:render="rate_total" />)</li>
|
||||
<ul>
|
||||
<li>Storage Index: <span n:render="time" n:data="time_storage_index" />
|
||||
(<span n:render="rate" n:data="rate_storage_index" />)</li>
|
||||
<li>[Contacting Helper]: <span n:render="time" n:data="time_contacting_helper" /></li>
|
||||
<li>[Upload Ciphertext To Helper]: <span n:render="time" n:data="time_cumulative_fetch" />
|
||||
(<span n:render="rate" n:data="rate_ciphertext_fetch" />)</li>
|
||||
<li>Storage Index: <t:transparent t:render="time_storage_index" />
|
||||
(<t:transparent t:render="rate_storage_index" />)</li>
|
||||
<li>[Contacting Helper]: <t:transparent t:render="time_contacting_helper" /></li>
|
||||
<li>[Upload Ciphertext To Helper]: <t:transparent t:render="time_cumulative_fetch" />
|
||||
(<t:transparent t:render="rate_ciphertext_fetch" />)</li>
|
||||
|
||||
<li>Peer Selection: <span n:render="time" n:data="time_peer_selection" /></li>
|
||||
<li>Encode And Push: <span n:render="time" n:data="time_total_encode_and_push" />
|
||||
(<span n:render="rate" n:data="rate_encode_and_push" />)</li>
|
||||
<li>Peer Selection: <t:transparent t:render="time_peer_selection" /></li>
|
||||
<li>Encode And Push: <t:transparent t:render="time_total_encode_and_push" />
|
||||
(<t:transparent t:render="rate_encode_and_push" />)</li>
|
||||
<ul>
|
||||
<li>Cumulative Encoding: <span n:render="time" n:data="time_cumulative_encoding" />
|
||||
(<span n:render="rate" n:data="rate_encode" />)</li>
|
||||
<li>Cumulative Pushing: <span n:render="time" n:data="time_cumulative_sending" />
|
||||
(<span n:render="rate" n:data="rate_push" />)</li>
|
||||
<li>Send Hashes And Close: <span n:render="time" n:data="time_hashes_and_close" /></li>
|
||||
<li>Cumulative Encoding: <t:transparent t:render="time_cumulative_encoding" />
|
||||
(<t:transparent t:render="rate_encode" />)</li>
|
||||
<li>Cumulative Pushing: <t:transparent t:render="time_cumulative_sending" />
|
||||
(<t:transparent t:render="rate_push" />)</li>
|
||||
<li>Send Hashes And Close: <t:transparent t:render="time_hashes_and_close" /></li>
|
||||
</ul>
|
||||
<li>[Helper Total]: <span n:render="time" n:data="time_helper_total" /></li>
|
||||
<li>[Helper Total]: <t:transparent t:render="time_helper_total" /></li>
|
||||
</ul>
|
||||
</ul>
|
||||
</ul>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<html xmlns:n="http://nevow.com/ns/nevow/0.1">
|
||||
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">
|
||||
<head>
|
||||
<title>Tahoe-LAFS - File Upload Status</title>
|
||||
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
|
||||
@ -10,46 +10,46 @@
|
||||
<h1>File Upload Status</h1>
|
||||
|
||||
<ul>
|
||||
<li>Started: <span n:render="started"/></li>
|
||||
<li>Storage Index: <span n:render="si"/></li>
|
||||
<li>Helper?: <span n:render="helper"/></li>
|
||||
<li>Total Size: <span n:render="total_size"/></li>
|
||||
<li>Progress (Hash): <span n:render="progress_hash"/></li>
|
||||
<li>Progress (Ciphertext): <span n:render="progress_ciphertext"/></li>
|
||||
<li>Progress (Encode+Push): <span n:render="progress_encode_push"/></li>
|
||||
<li>Status: <span n:render="status"/></li>
|
||||
<li>Started: <t:transparent t:render="started"/></li>
|
||||
<li>Storage Index: <t:transparent t:render="si"/></li>
|
||||
<li>Helper?: <t:transparent t:render="helper"/></li>
|
||||
<li>Total Size: <t:transparent t:render="total_size"/></li>
|
||||
<li>Progress (Hash): <t:transparent t:render="progress_hash"/></li>
|
||||
<li>Progress (Ciphertext): <t:transparent t:render="progress_ciphertext"/></li>
|
||||
<li>Progress (Encode+Push): <t:transparent t:render="progress_encode_push"/></li>
|
||||
<li>Status: <t:transparent t:render="status"/></li>
|
||||
</ul>
|
||||
|
||||
<div n:render="results">
|
||||
<div t:render="results">
|
||||
<h2>Upload Results</h2>
|
||||
<ul>
|
||||
<li>Shares Pushed: <span n:render="pushed_shares" /></li>
|
||||
<li>Shares Already Present: <span n:render="preexisting_shares" /></li>
|
||||
<li>Sharemap: <span n:render="sharemap" /></li>
|
||||
<li>Servermap: <span n:render="servermap" /></li>
|
||||
<li>Shares Pushed: <t:transparent t:render="pushed_shares" /></li>
|
||||
<li>Shares Already Present: <t:transparent t:render="preexisting_shares" /></li>
|
||||
<li>Sharemap: <t:transparent t:render="sharemap" /></li>
|
||||
<li>Servermap: <t:transparent t:render="servermap" /></li>
|
||||
<li>Timings:</li>
|
||||
<ul>
|
||||
<li>File Size: <span n:render="string" n:data="file_size" /> bytes</li>
|
||||
<li>Total: <span n:render="time" n:data="time_total" />
|
||||
(<span n:render="rate" n:data="rate_total" />)</li>
|
||||
<li>File Size: <t:transparent t:render="file_size" /> bytes</li>
|
||||
<li>Total: <t:transparent t:render="time_total" />
|
||||
(<t:transparent t:render="rate_total" />)</li>
|
||||
<ul>
|
||||
<li>Storage Index: <span n:render="time" n:data="time_storage_index" />
|
||||
(<span n:render="rate" n:data="rate_storage_index" />)</li>
|
||||
<li>[Contacting Helper]: <span n:render="time" n:data="time_contacting_helper" /></li>
|
||||
<li>[Upload Ciphertext To Helper]: <span n:render="time" n:data="time_cumulative_fetch" />
|
||||
(<span n:render="rate" n:data="rate_ciphertext_fetch" />)</li>
|
||||
<li>Storage Index: <t:transparent t:render="time_storage_index" />
|
||||
(<t:transparent t:render="rate_storage_index" />)</li>
|
||||
<li>[Contacting Helper]: <t:transparent t:render="time_contacting_helper" /></li>
|
||||
<li>[Upload Ciphertext To Helper]: <t:transparent t:render="time_cumulative_fetch" />
|
||||
(<t:transparent t:render="rate_ciphertext_fetch" />)</li>
|
||||
|
||||
<li>Peer Selection: <span n:render="time" n:data="time_peer_selection" /></li>
|
||||
<li>Encode And Push: <span n:render="time" n:data="time_total_encode_and_push" />
|
||||
(<span n:render="rate" n:data="rate_encode_and_push" />)</li>
|
||||
<li>Peer Selection: <t:transparent t:render="time_peer_selection" /></li>
|
||||
<li>Encode And Push: <t:transparent t:render="time_total_encode_and_push" />
|
||||
(<t:transparent t:render="rate_encode_and_push" />)</li>
|
||||
<ul>
|
||||
<li>Cumulative Encoding: <span n:render="time" n:data="time_cumulative_encoding" />
|
||||
(<span n:render="rate" n:data="rate_encode" />)</li>
|
||||
<li>Cumulative Pushing: <span n:render="time" n:data="time_cumulative_sending" />
|
||||
(<span n:render="rate" n:data="rate_push" />)</li>
|
||||
<li>Send Hashes And Close: <span n:render="time" n:data="time_hashes_and_close" /></li>
|
||||
<li>Cumulative Encoding: <t:transparent t:render="time_cumulative_encoding" />
|
||||
(<t:transparent t:render="rate_encode" />)</li>
|
||||
<li>Cumulative Pushing: <t:transparent t:render="time_cumulative_sending" />
|
||||
(<t:transparent t:render="rate_push" />)</li>
|
||||
<li>Send Hashes And Close: <t:transparent t:render="time_hashes_and_close" /></li>
|
||||
</ul>
|
||||
<li>[Helper Total]: <span n:render="time" n:data="time_helper_total" /></li>
|
||||
<li>[Helper Total]: <t:transparent t:render="time_helper_total" /></li>
|
||||
</ul>
|
||||
</ul>
|
||||
</ul>
|
||||
|
Loading…
Reference in New Issue
Block a user