Merge pull request #639 from meejah/ticket3227-remove-child-attributes-nevow

Ticket3227 remove child_ attributes/method use (nevow port)
This commit is contained in:
meejah 2019-08-29 18:04:40 +00:00 committed by GitHub
commit 5f14297526
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 771 additions and 190 deletions

View File

@ -34,6 +34,7 @@ from util import (
_cleanup_tahoe_process, _cleanup_tahoe_process,
_tahoe_runner_optional_coverage, _tahoe_runner_optional_coverage,
await_client_ready, await_client_ready,
TahoeProcess,
) )
@ -202,7 +203,7 @@ log_gatherer.furl = {log_furl}
# but on linux it means daemonize. "tahoe run" is consistent # but on linux it means daemonize. "tahoe run" is consistent
# between platforms. # between platforms.
protocol = _MagicTextProtocol('introducer running') protocol = _MagicTextProtocol('introducer running')
process = _tahoe_runner_optional_coverage( transport = _tahoe_runner_optional_coverage(
protocol, protocol,
reactor, reactor,
request, request,
@ -211,10 +212,10 @@ log_gatherer.furl = {log_furl}
intro_dir, intro_dir,
), ),
) )
request.addfinalizer(partial(_cleanup_tahoe_process, process, protocol.exited)) request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
pytest_twisted.blockon(protocol.magic_seen) pytest_twisted.blockon(protocol.magic_seen)
return process return TahoeProcess(transport, intro_dir)
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
@ -312,11 +313,12 @@ def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer,
# start all 5 nodes in parallel # start all 5 nodes in parallel
for x in range(5): for x in range(5):
name = 'node{}'.format(x) name = 'node{}'.format(x)
# tub_port = 9900 + x web_port= 9990 + x
nodes_d.append( nodes_d.append(
_create_node( _create_node(
reactor, request, temp_dir, introducer_furl, flog_gatherer, name, reactor, request, temp_dir, introducer_furl, flog_gatherer, name,
web_port=None, storage=True, web_port="tcp:{}:interface=localhost".format(web_port),
storage=True,
) )
) )
nodes_status = pytest_twisted.blockon(DeferredList(nodes_d)) nodes_status = pytest_twisted.blockon(DeferredList(nodes_d))

521
integration/test_web.py Normal file
View File

@ -0,0 +1,521 @@
"""
These tests were originally written to achieve some level of
coverage for the WebAPI functionality during Python3 porting (there
aren't many tests of the Web API period).
Most of the tests have cursory asserts and encode 'what the WebAPI did
at the time of testing' -- not necessarily a cohesive idea of what the
WebAPI *should* do in every situation. It's not clear the latter
exists anywhere, however.
"""
import sys
import time
import shutil
import json
import urllib2
from os import mkdir, unlink, utime
from os.path import join, exists, getmtime
import allmydata.uri
import util
import requests
import pytest_twisted
import html5lib
from bs4 import BeautifulSoup
def test_index(alice):
"""
we can download the index file
"""
util.web_get(alice, u"")
def test_index_json(alice):
"""
we can download the index file as json
"""
data = util.web_get(alice, u"", params={u"t": u"json"})
# it should be valid json
json.loads(data)
def test_upload_download(alice):
"""
upload a file, then download it via readcap
"""
FILE_CONTENTS = u"some contents"
readcap = util.web_post(
alice, u"uri",
data={
u"t": u"upload",
u"format": u"mdmf",
},
files={
u"file": FILE_CONTENTS,
},
)
readcap = readcap.strip()
data = util.web_get(
alice, u"uri",
params={
u"uri": readcap,
u"filename": u"boom",
}
)
assert data == FILE_CONTENTS
def test_put(alice):
"""
use PUT to create a file
"""
FILE_CONTENTS = b"added via PUT" * 20
resp = requests.put(
util.node_url(alice.node_dir, u"uri"),
data=FILE_CONTENTS,
)
cap = allmydata.uri.from_string(resp.text.strip().encode('ascii'))
cfg = alice.get_config()
assert isinstance(cap, allmydata.uri.CHKFileURI)
assert cap.size == len(FILE_CONTENTS)
assert cap.total_shares == int(cfg.get_config("client", "shares.total"))
assert cap.needed_shares == int(cfg.get_config("client", "shares.needed"))
def test_helper_status(storage_nodes):
"""
successfully GET the /helper_status page
"""
url = util.node_url(storage_nodes[0].node_dir, "helper_status")
resp = requests.get(url)
assert resp.status_code >= 200 and resp.status_code < 300
dom = BeautifulSoup(resp.content, "html5lib")
assert unicode(dom.h1.string) == u"Helper Status"
def test_deep_stats(alice):
"""
create a directory, do deep-stats on it and prove the /operations/
URIs work
"""
resp = requests.post(
util.node_url(alice.node_dir, "uri"),
params={
"format": "sdmf",
"t": "mkdir",
"redirect_to_result": "true",
},
)
assert resp.status_code >= 200 and resp.status_code < 300
# when creating a directory, we'll be re-directed to a URL
# containing our writecap..
uri = urllib2.unquote(resp.url)
assert 'URI:DIR2:' in uri
dircap = uri[uri.find("URI:DIR2:"):].rstrip('/')
dircap_uri = util.node_url(alice.node_dir, "uri/{}".format(urllib2.quote(dircap)))
# POST a file into this directory
FILE_CONTENTS = u"a file in a directory"
resp = requests.post(
dircap_uri,
data={
u"t": u"upload",
u"when_done": u".",
},
files={
u"file": FILE_CONTENTS,
},
)
# confirm the file is in the directory
resp = requests.get(
dircap_uri,
params={
u"t": u"json",
},
)
d = json.loads(resp.content)
k, data = d
assert k == u"dirnode"
assert len(data['children']) == 1
k, child = data['children'].values()[0]
assert k == u"filenode"
assert child['size'] == len(FILE_CONTENTS)
# perform deep-stats on it...
resp = requests.post(
dircap_uri,
data={
u"t": u"start-deep-stats",
u"ophandle": u"something_random",
},
)
assert resp.status_code >= 200 and resp.status_code < 300
# confirm we get information from the op .. after its done
tries = 10
while tries > 0:
tries -= 1
resp = requests.get(
util.node_url(alice.node_dir, u"operations/something_random"),
)
d = json.loads(resp.content)
if d['size-literal-files'] == len(FILE_CONTENTS):
print("stats completed successfully")
break
else:
print("{} != {}; waiting".format(d['size-literal-files'], len(FILE_CONTENTS)))
time.sleep(.5)
def test_status(alice):
"""
confirm we get something sensible from /status and the various sub-types
"""
# upload a file
# (because of the nature of the integration-tests, we can only
# assert things about "our" file because we don't know what other
# operations may have happened in the grid before our test runs).
FILE_CONTENTS = u"all the Important Data of alice\n" * 1200
resp = requests.put(
util.node_url(alice.node_dir, u"uri"),
data=FILE_CONTENTS,
)
cap = resp.text.strip()
print("Uploaded data, cap={}".format(cap))
resp = requests.get(
util.node_url(alice.node_dir, u"uri/{}".format(urllib2.quote(cap))),
)
print("Downloaded {} bytes of data".format(len(resp.content)))
assert resp.content == FILE_CONTENTS
resp = requests.get(
util.node_url(alice.node_dir, "status"),
)
dom = html5lib.parse(resp.content)
hrefs = [
a.get('href')
for a in dom.iter(u'{http://www.w3.org/1999/xhtml}a')
]
found_upload = False
found_download = False
for href in hrefs:
if href.startswith(u"/") or not href:
continue
resp = requests.get(
util.node_url(alice.node_dir, u"status/{}".format(href)),
)
if href.startswith(u'up'):
assert "File Upload Status" in resp.content
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
found_upload = True
elif href.startswith(u'down'):
assert "File Download Status" in resp.content
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
found_download = True
# download the specialized event information
resp = requests.get(
util.node_url(alice.node_dir, u"status/{}/event_json".format(href)),
)
js = json.loads(resp.content)
# there's usually just one "read" operation, but this can handle many ..
total_bytes = sum([st['bytes_returned'] for st in js['read']], 0)
assert total_bytes == len(FILE_CONTENTS)
assert found_upload, "Failed to find the file we uploaded in the status-page"
assert found_download, "Failed to find the file we downloaded in the status-page"
def test_directory_deep_check(alice):
"""
use deep-check and confirm the result pages work
"""
# create a directory
resp = requests.post(
util.node_url(alice.node_dir, u"uri"),
params={
u"t": u"mkdir",
u"redirect_to_result": u"true",
}
)
# get json information about our directory
dircap_url = resp.url
resp = requests.get(
dircap_url,
params={u"t": u"json"},
)
dir_meta = json.loads(resp.content)
# upload a file of pangrams into the directory
FILE_CONTENTS = u"Sphinx of black quartz, judge my vow.\n" * (2048*10)
resp = requests.post(
dircap_url,
params={
u"t": u"upload",
u"upload-chk": u"upload-chk",
},
files={
u"file": FILE_CONTENTS,
}
)
cap0 = resp.content
print("Uploaded data0, cap={}".format(cap0))
# a different pangram
FILE_CONTENTS = u"The five boxing wizards jump quickly.\n" * (2048*10)
resp = requests.post(
dircap_url,
params={
u"t": u"upload",
u"upload-chk": u"upload-chk",
},
files={
u"file": FILE_CONTENTS,
}
)
cap1 = resp.content
print("Uploaded data1, cap={}".format(cap1))
resp = requests.get(
util.node_url(alice.node_dir, u"uri/{}".format(urllib2.quote(cap0))),
params={u"t": u"info"},
)
def check_repair_data(checkdata):
assert checkdata["healthy"] is True
assert checkdata["count-happiness"] == 4
assert checkdata["count-good-share-hosts"] == 4
assert checkdata["count-shares-good"] == 4
assert checkdata["count-corrupt-shares"] == 0
assert checkdata["list-corrupt-shares"] == []
# do a "check" (once for HTML, then with JSON for easier asserts)
resp = requests.post(
dircap_url,
params={
u"t": u"check",
u"return_to": u".",
u"verify": u"true",
}
)
resp = requests.post(
dircap_url,
params={
u"t": u"check",
u"return_to": u".",
u"verify": u"true",
u"output": u"JSON",
}
)
check_repair_data(json.loads(resp.content)["results"])
# "check and repair"
resp = requests.post(
dircap_url,
params={
u"t": u"check",
u"return_to": u".",
u"verify": u"true",
u"repair": u"true",
}
)
resp = requests.post(
dircap_url,
params={
u"t": u"check",
u"return_to": u".",
u"verify": u"true",
u"repair": u"true",
u"output": u"JSON",
}
)
check_repair_data(json.loads(resp.content)["post-repair-results"]["results"])
# start a "deep check and repair"
resp = requests.post(
dircap_url,
params={
u"t": u"start-deep-check",
u"return_to": u".",
u"verify": u"on",
u"repair": u"on",
u"output": u"JSON",
u"ophandle": u"deadbeef",
}
)
deepcheck_uri = resp.url
data = json.loads(resp.content)
tries = 10
while not data['finished'] and tries > 0:
tries -= 1
time.sleep(0.5)
print("deep-check not finished, reloading")
resp = requests.get(deepcheck_uri, params={u"output": "JSON"})
data = json.loads(resp.content)
print("deep-check finished")
assert data[u"stats"][u"count-immutable-files"] == 1
assert data[u"stats"][u"count-literal-files"] == 0
assert data[u"stats"][u"largest-immutable-file"] == 778240
assert data[u"count-objects-checked"] == 2
# also get the HTML version
resp = requests.post(
dircap_url,
params={
u"t": u"start-deep-check",
u"return_to": u".",
u"verify": u"on",
u"repair": u"on",
u"ophandle": u"definitely_random",
}
)
deepcheck_uri = resp.url
# if the operations isn't done, there's an <H2> tag with the
# reload link; otherwise there's only an <H1> tag..wait up to 5
# seconds for this to respond properly.
for _ in range(5):
resp = requests.get(deepcheck_uri)
dom = BeautifulSoup(resp.content, "html5lib")
if dom.h1 and u'Results' in unicode(dom.h1.string):
break
if dom.h2 and dom.h2.a and u"Reload" in unicode(dom.h2.a.string):
dom = None
time.sleep(1)
assert dom is not None, "Operation never completed"
def test_storage_info(storage_nodes):
"""
retrieve and confirm /storage URI for one storage node
"""
storage0 = storage_nodes[0]
requests.get(
util.node_url(storage0.node_dir, u"storage"),
)
def test_storage_info_json(storage_nodes):
"""
retrieve and confirm /storage?t=json URI for one storage node
"""
storage0 = storage_nodes[0]
resp = requests.get(
util.node_url(storage0.node_dir, u"storage"),
params={u"t": u"json"},
)
data = json.loads(resp.content)
assert data[u"stats"][u"storage_server.reserved_space"] == 1000000000
def test_introducer_info(introducer):
"""
retrieve and confirm /introducer URI for the introducer
"""
resp = requests.get(
util.node_url(introducer.node_dir, u""),
)
assert "Introducer" in resp.content
resp = requests.get(
util.node_url(introducer.node_dir, u""),
params={u"t": u"json"},
)
data = json.loads(resp.content)
assert "announcement_summary" in data
assert "subscription_summary" in data
def test_mkdir_with_children(alice):
"""
create a directory using ?t=mkdir-with-children
"""
# create a file to put in our directory
FILE_CONTENTS = u"some file contents\n" * 500
resp = requests.put(
util.node_url(alice.node_dir, u"uri"),
data=FILE_CONTENTS,
)
filecap = resp.content.strip()
# create a (sub) directory to put in our directory
resp = requests.post(
util.node_url(alice.node_dir, u"uri"),
params={
u"t": u"mkdir",
}
)
# (we need both the read-write and read-only URIs I guess)
dircap = resp.content
dircap_obj = allmydata.uri.from_string(dircap)
dircap_ro = dircap_obj.get_readonly().to_string()
# create json information about our directory
meta = {
"a_file": [
"filenode", {
"ro_uri": filecap,
"metadata": {
"ctime": 1202777696.7564139,
"mtime": 1202777696.7564139,
"tahoe": {
"linkcrtime": 1202777696.7564139,
"linkmotime": 1202777696.7564139
}
}
}
],
"some_subdir": [
"dirnode", {
"rw_uri": dircap,
"ro_uri": dircap_ro,
"metadata": {
"ctime": 1202778102.7589991,
"mtime": 1202778111.2160511,
"tahoe": {
"linkcrtime": 1202777696.7564139,
"linkmotime": 1202777696.7564139
}
}
}
]
}
# create a new directory with one file and one sub-dir (all-at-once)
resp = util.web_post(
alice, u"uri",
params={u"t": "mkdir-with-children"},
data=json.dumps(meta),
)
assert resp.startswith("URI:DIR2")
cap = allmydata.uri.from_string(resp)
assert isinstance(cap, allmydata.uri.DirectoryURI)

View File

@ -17,6 +17,7 @@ from allmydata.util.configutil import (
set_config, set_config,
write_config, write_config,
) )
from allmydata import client
import pytest_twisted import pytest_twisted
@ -164,6 +165,15 @@ class TahoeProcess(object):
def node_dir(self): def node_dir(self):
return self._node_dir return self._node_dir
def get_config(self):
return client.read_config(
self._node_dir,
u"portnum",
)
def __str__(self):
return "<TahoeProcess in '{}'>".format(self._node_dir)
def _run_node(reactor, node_dir, request, magic_text): def _run_node(reactor, node_dir, request, magic_text):
""" """
@ -407,35 +417,39 @@ def _check_status(response):
) )
def web_get(node_dir, uri_fragment, **kwargs): def web_get(tahoe, uri_fragment, **kwargs):
""" """
Make a GET request to the webport of `node_dir`. This will look Make a GET request to the webport of `tahoe` (a `TahoeProcess`,
like: `http://localhost:<webport>/<uri_fragment>`. All `kwargs` usually from a fixture (e.g. `alice`). This will look like:
are passed on to `requests.get` `http://localhost:<webport>/<uri_fragment>`. All `kwargs` are
passed on to `requests.get`
""" """
url = node_url(node_dir, uri_fragment) url = node_url(tahoe.node_dir, uri_fragment)
resp = requests.get(url, **kwargs) resp = requests.get(url, **kwargs)
_check_status(resp) _check_status(resp)
return resp.content return resp.content
def web_post(node_dir, uri_fragment, **kwargs): def web_post(tahoe, uri_fragment, **kwargs):
""" """
Make a POST request to the webport of `node_dir`. This will look Make a POST request to the webport of `node` (a `TahoeProcess,
like: `http://localhost:<webport>/<uri_fragment>`. All `kwargs` usually from a fixture e.g. `alice`). This will look like:
are passed on to `requests.post` `http://localhost:<webport>/<uri_fragment>`. All `kwargs` are
passed on to `requests.post`
""" """
url = node_url(node_dir, uri_fragment) url = node_url(tahoe.node_dir, uri_fragment)
resp = requests.post(url, **kwargs) resp = requests.post(url, **kwargs)
_check_status(resp) _check_status(resp)
return resp.content return resp.content
def await_client_ready(process, timeout=10, liveness=60*2): def await_client_ready(tahoe, timeout=10, liveness=60*2):
""" """
Uses the status API to wait for a client-type node to be Uses the status API to wait for a client-type node (in `tahoe`, a
`TahoeProcess` instance usually from a fixture e.g. `alice`) to be
'ready'. A client is deemed ready if: 'ready'. A client is deemed ready if:
- it answers http://<node_url>/statistics/?t=json/
- it answers `http://<node_url>/statistics/?t=json/`
- there is at least one storage-server connected - there is at least one storage-server connected
- every storage-server has a "last_received_data" and it is - every storage-server has a "last_received_data" and it is
within the last `liveness` seconds within the last `liveness` seconds
@ -446,7 +460,7 @@ def await_client_ready(process, timeout=10, liveness=60*2):
start = time.time() start = time.time()
while (time.time() - start) < float(timeout): while (time.time() - start) < float(timeout):
try: try:
data = web_get(process.node_dir, u"", params={u"t": u"json"}) data = web_get(tahoe, u"", params={u"t": u"json"})
js = json.loads(data) js = json.loads(data)
except Exception as e: except Exception as e:
print("waiting because '{}'".format(e)) print("waiting because '{}'".format(e))
@ -481,7 +495,7 @@ def await_client_ready(process, timeout=10, liveness=60*2):
raise RuntimeError( raise RuntimeError(
"Waited {} seconds for {} to be 'ready' but it never was".format( "Waited {} seconds for {} to be 'ready' but it never was".format(
timeout, timeout,
process.node_dir, tahoe,
) )
) )

0
newsfragments/3227.minor Normal file
View File

View File

@ -496,6 +496,12 @@ class _Client(node.Node, pollmixin.PollMixin):
def get_long_tubid(self): def get_long_tubid(self):
return idlib.nodeid_b2a(self.nodeid) return idlib.nodeid_b2a(self.nodeid)
def get_web_service(self):
"""
:return: a reference to our web server
"""
return self.getServiceNamed("webish")
def _init_permutation_seed(self, ss): def _init_permutation_seed(self, ss):
seed = self.config.get_config_from_file("permutation-seed") seed = self.config.get_config_from_file("permutation-seed")
if not seed: if not seed:

View File

@ -4521,7 +4521,7 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
res = yield self.GET("/operations/128?t=status&output=JSON") res = yield self.GET("/operations/128?t=status&output=JSON")
data = json.loads(res) data = json.loads(res)
self.failUnless("finished" in data, res) self.failUnless("finished" in data, res)
monitor = self.ws.root.child_operations.handles["128"][0] monitor = self.ws.getServiceNamed("operations").handles["128"][0]
res = yield self.POST("/operations/128?t=cancel&output=JSON") res = yield self.POST("/operations/128?t=cancel&output=JSON")
data = json.loads(res) data = json.loads(res)

View File

@ -5,7 +5,6 @@ import json
from twisted.web import http, server, resource, template from twisted.web import http, server, resource, template
from twisted.python import log from twisted.python import log
from twisted.python.failure import Failure from twisted.python.failure import Failure
from zope.interface import Interface
from nevow import loaders, appserver from nevow import loaders, appserver
from nevow.rend import Page from nevow.rend import Page
from nevow.inevow import IRequest from nevow.inevow import IRequest
@ -39,9 +38,6 @@ def get_filenode_metadata(filenode):
metadata['size'] = size metadata['size'] = size
return metadata return metadata
class IOpHandleTable(Interface):
pass
def getxmlfile(name): def getxmlfile(name):
return loaders.xmlfile(resource_filename('allmydata.web', '%s' % name)) return loaders.xmlfile(resource_filename('allmydata.web', '%s' % name))

View File

@ -22,7 +22,7 @@ from allmydata.blacklist import ProhibitedNode
from allmydata.monitor import Monitor, OperationCancelledError from allmydata.monitor import Monitor, OperationCancelledError
from allmydata import dirnode from allmydata import dirnode
from allmydata.web.common import text_plain, WebError, \ from allmydata.web.common import text_plain, WebError, \
IOpHandleTable, NeedOperationHandleError, \ NeedOperationHandleError, \
boolean_of_arg, get_arg, get_root, parse_replace_arg, \ boolean_of_arg, get_arg, get_root, parse_replace_arg, \
should_create_intermediate_directories, \ should_create_intermediate_directories, \
getxmlfile, RenderMixin, humanize_failure, convert_children_json, \ getxmlfile, RenderMixin, humanize_failure, convert_children_json, \
@ -62,6 +62,7 @@ class DirectoryNodeHandler(RenderMixin, rend.Page, ReplaceMeMixin):
self.node = node self.node = node
self.parentnode = parentnode self.parentnode = parentnode
self.name = name self.name = name
self._operations = client.get_web_service().get_operations()
def childFactory(self, ctx, name): def childFactory(self, ctx, name):
name = name.decode("utf-8") name = name.decode("utf-8")
@ -442,9 +443,8 @@ class DirectoryNodeHandler(RenderMixin, rend.Page, ReplaceMeMixin):
return d return d
def _start_operation(self, monitor, renderer, ctx): def _start_operation(self, monitor, renderer, ctx):
table = IOpHandleTable(ctx) self._operations.add_monitor(ctx, monitor, renderer)
table.add_monitor(ctx, monitor, renderer) return self._operations.redirect_to(ctx)
return table.redirect_to(ctx)
def _POST_start_deep_check(self, ctx): def _POST_start_deep_check(self, ctx):
# check this directory and everything reachable from it # check this directory and everything reachable from it

View File

@ -16,7 +16,6 @@
<li>Total Size: <span n:render="total_size"/></li> <li>Total Size: <span n:render="total_size"/></li>
<li>Progress: <span n:render="progress"/></li> <li>Progress: <span n:render="progress"/></li>
<li>Status: <span n:render="status"/></li> <li>Status: <span n:render="status"/></li>
<li><span n:render="timeline_link"/></li>
</ul> </ul>
<div n:render="events"></div> <div n:render="events"></div>

View File

@ -1,6 +1,5 @@
import time import time
from zope.interface import implementer
from nevow import rend, url, tags as T from nevow import rend, url, tags as T
from nevow.inevow import IRequest from nevow.inevow import IRequest
from twisted.python.failure import Failure from twisted.python.failure import Failure
@ -9,7 +8,7 @@ from twisted.web.http import NOT_FOUND
from twisted.web.html import escape from twisted.web.html import escape
from twisted.application import service from twisted.application import service
from allmydata.web.common import IOpHandleTable, WebError, \ from allmydata.web.common import WebError, \
get_root, get_arg, boolean_of_arg get_root, get_arg, boolean_of_arg
MINUTE = 60 MINUTE = 60
@ -18,8 +17,8 @@ DAY = 24*HOUR
(MONITOR, RENDERER, WHEN_ADDED) = range(3) (MONITOR, RENDERER, WHEN_ADDED) = range(3)
@implementer(IOpHandleTable)
class OphandleTable(rend.Page, service.Service): class OphandleTable(rend.Page, service.Service):
name = "operations"
UNCOLLECTED_HANDLE_LIFETIME = 4*DAY UNCOLLECTED_HANDLE_LIFETIME = 4*DAY
COLLECTED_HANDLE_LIFETIME = 1*DAY COLLECTED_HANDLE_LIFETIME = 1*DAY

View File

@ -10,7 +10,7 @@ import allmydata # to display import path
from allmydata.version_checks import get_package_versions_string from allmydata.version_checks import get_package_versions_string
from allmydata.util import log from allmydata.util import log
from allmydata.interfaces import IFileNode from allmydata.interfaces import IFileNode
from allmydata.web import filenode, directory, unlinked, status, operations from allmydata.web import filenode, directory, unlinked, status
from allmydata.web import storage, magic_folder from allmydata.web import storage, magic_folder
from allmydata.web.common import ( from allmydata.web.common import (
abbreviate_size, abbreviate_size,
@ -154,50 +154,59 @@ class Root(MultiFormatPage):
def __init__(self, client, clock=None, now_fn=None): def __init__(self, client, clock=None, now_fn=None):
rend.Page.__init__(self, client) rend.Page.__init__(self, client)
self.client = client self.client = client
# If set, clock is a twisted.internet.task.Clock that the tests
# use to test ophandle expiration.
self.child_operations = operations.OphandleTable(clock)
self.now_fn = now_fn self.now_fn = now_fn
try: try:
s = client.getServiceNamed("storage") s = client.getServiceNamed("storage")
except KeyError: except KeyError:
s = None s = None
self.child_storage = storage.StorageStatus(s, self.client.nickname)
self.child_uri = URIHandler(client) self.putChild("storage", storage.StorageStatus(s, self.client.nickname))
self.child_cap = URIHandler(client)
self.putChild("uri", URIHandler(client))
self.putChild("cap", URIHandler(client))
# handler for "/magic_folder" URIs # handler for "/magic_folder" URIs
self.child_magic_folder = magic_folder.MagicFolderWebApi(client) self.putChild("magic_folder", magic_folder.MagicFolderWebApi(client))
# Handler for everything beneath "/private", an area of the resource # Handler for everything beneath "/private", an area of the resource
# hierarchy which is only accessible with the private per-node API # hierarchy which is only accessible with the private per-node API
# auth token. # auth token.
self.child_private = create_private_tree(client.get_auth_token) self.putChild("private", create_private_tree(client.get_auth_token))
self.child_file = FileHandler(client) self.putChild("file", FileHandler(client))
self.child_named = FileHandler(client) self.putChild("named", FileHandler(client))
self.child_status = status.Status(client.get_history()) self.putChild("status", status.Status(client.get_history()))
self.child_statistics = status.Statistics(client.stats_provider) self.putChild("statistics", status.Statistics(client.stats_provider))
static_dir = resource_filename("allmydata.web", "static") static_dir = resource_filename("allmydata.web", "static")
for filen in os.listdir(static_dir): for filen in os.listdir(static_dir):
self.putChild(filen, nevow_File(os.path.join(static_dir, filen))) self.putChild(filen, nevow_File(os.path.join(static_dir, filen)))
def child_helper_status(self, ctx): self.putChild("report_incident", IncidentReporter())
# the Helper isn't attached until after the Tub starts, so this child
# needs to created on each request
return status.HelperStatus(self.client.helper)
child_report_incident = IncidentReporter() # until we get rid of nevow.Page in favour of twisted.web.resource
#child_server # let's reserve this for storage-server-over-HTTP # we can't use getChild() -- but we CAN use childFactory or
# override locatechild
def childFactory(self, ctx, name):
request = IRequest(ctx)
return self.getChild(name, request)
def getChild(self, path, request):
if path == "helper_status":
# the Helper isn't attached until after the Tub starts, so this child
# needs to created on each request
return status.HelperStatus(self.client.helper)
# FIXME: This code is duplicated in root.py and introweb.py. # FIXME: This code is duplicated in root.py and introweb.py.
def data_rendered_at(self, ctx, data): def data_rendered_at(self, ctx, data):
return render_time(time.time()) return render_time(time.time())
def data_version(self, ctx, data): def data_version(self, ctx, data):
return get_package_versions_string() return get_package_versions_string()
def data_import_path(self, ctx, data): def data_import_path(self, ctx, data):
return str(allmydata) return str(allmydata)
def render_my_nodeid(self, ctx, data): def render_my_nodeid(self, ctx, data):
tubid_s = "TubID: "+self.client.get_long_tubid() tubid_s = "TubID: "+self.client.get_long_tubid()
return T.td(title=tubid_s)[self.client.get_long_nodeid()] return T.td(title=tubid_s)[self.client.get_long_nodeid()]

View File

@ -2,7 +2,8 @@
import pprint, itertools, hashlib import pprint, itertools, hashlib
import json import json
from twisted.internet import defer from twisted.internet import defer
from nevow import rend, inevow, tags as T from twisted.web.resource import Resource
from nevow import rend, tags as T
from allmydata.util import base32, idlib from allmydata.util import base32, idlib
from allmydata.web.common import ( from allmydata.web.common import (
getxmlfile, getxmlfile,
@ -331,12 +332,161 @@ class DownloadResultsRendererMixin(RateAndTimeMixin):
d.addCallback(_render) d.addCallback(_render)
return d return d
def _find_overlap(events, start_key, end_key):
"""
given a list of event dicts, return a new list in which each event
has an extra "row" key (an int, starting at 0), and if appropriate
a "serverid" key (ascii-encoded server id), replacing the "server"
key. This is a hint to our JS frontend about how to overlap the
parts of the graph it is drawing.
we must always make a copy, since we're going to be adding keys
and don't want to change the original objects. If we're
stringifying serverids, we'll also be changing the serverid keys.
"""
new_events = []
rows = []
for ev in events:
ev = ev.copy()
if ev.has_key('server'):
ev["serverid"] = ev["server"].get_longname()
del ev["server"]
# find an empty slot in the rows
free_slot = None
for row,finished in enumerate(rows):
if finished is not None:
if ev[start_key] > finished:
free_slot = row
break
if free_slot is None:
free_slot = len(rows)
rows.append(ev[end_key])
else:
rows[free_slot] = ev[end_key]
ev["row"] = free_slot
new_events.append(ev)
return new_events
def _find_overlap_requests(events):
"""
We compute a three-element 'row tuple' for each event: (serverid,
shnum, row). All elements are ints. The first is a mapping from
serverid to group number, the second is a mapping from shnum to
subgroup number. The third is a row within the subgroup.
We also return a list of lists of rowcounts, so renderers can decide
how much vertical space to give to each row.
"""
serverid_to_group = {}
groupnum_to_rows = {} # maps groupnum to a table of rows. Each table
# is a list with an element for each row number
# (int starting from 0) that contains a
# finish_time, indicating that the row is empty
# beyond that time. If finish_time is None, it
# indicate a response that has not yet
# completed, so the row cannot be reused.
new_events = []
for ev in events:
# DownloadStatus promises to give us events in temporal order
ev = ev.copy()
ev["serverid"] = ev["server"].get_longname()
del ev["server"]
if ev["serverid"] not in serverid_to_group:
groupnum = len(serverid_to_group)
serverid_to_group[ev["serverid"]] = groupnum
groupnum = serverid_to_group[ev["serverid"]]
if groupnum not in groupnum_to_rows:
groupnum_to_rows[groupnum] = []
rows = groupnum_to_rows[groupnum]
# find an empty slot in the rows
free_slot = None
for row,finished in enumerate(rows):
if finished is not None:
if ev["start_time"] > finished:
free_slot = row
break
if free_slot is None:
free_slot = len(rows)
rows.append(ev["finish_time"])
else:
rows[free_slot] = ev["finish_time"]
ev["row"] = (groupnum, free_slot)
new_events.append(ev)
del groupnum
# maybe also return serverid_to_group, groupnum_to_rows, and some
# indication of the highest finish_time
#
# actually, return the highest rownum for each groupnum
highest_rownums = [len(groupnum_to_rows[groupnum])
for groupnum in range(len(serverid_to_group))]
return new_events, highest_rownums
def _color(server):
h = hashlib.sha256(server.get_serverid()).digest()
def m(c):
return min(ord(c) / 2 + 0x80, 0xff)
return "#%02x%02x%02x" % (m(h[0]), m(h[1]), m(h[2]))
class _EventJson(Resource, object):
def __init__(self, download_status):
self._download_status = download_status
def render(self, request):
request.setHeader("content-type", "text/plain")
data = { } # this will be returned to the GET
ds = self._download_status
data["misc"] = _find_overlap(
ds.misc_events,
"start_time", "finish_time",
)
data["read"] = _find_overlap(
ds.read_events,
"start_time", "finish_time",
)
data["segment"] = _find_overlap(
ds.segment_events,
"start_time", "finish_time",
)
# TODO: overlap on DYHB isn't very useful, and usually gets in the
# way. So don't do it.
data["dyhb"] = _find_overlap(
ds.dyhb_requests,
"start_time", "finish_time",
)
data["block"],data["block_rownums"] =_find_overlap_requests(ds.block_requests)
server_info = {} # maps longname to {num,color,short}
server_shortnames = {} # maps servernum to shortname
for d_ev in ds.dyhb_requests:
s = d_ev["server"]
longname = s.get_longname()
if longname not in server_info:
num = len(server_info)
server_info[longname] = {"num": num,
"color": _color(s),
"short": s.get_name() }
server_shortnames[str(num)] = s.get_name()
data["server_info"] = server_info
data["num_serverids"] = len(server_info)
# we'd prefer the keys of serverids[] to be ints, but this is JSON,
# so they get converted to strings. Stupid javascript.
data["serverids"] = server_shortnames
data["bounds"] = {"min": ds.first_timestamp, "max": ds.last_timestamp}
return json.dumps(data, indent=1) + "\n"
class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page): class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
docFactory = getxmlfile("download-status.xhtml") docFactory = getxmlfile("download-status.xhtml")
def __init__(self, data): def __init__(self, data):
rend.Page.__init__(self, data) rend.Page.__init__(self, data)
self.download_status = data self.download_status = data
self.putChild("event_json", _EventJson(self.download_status))
def download_results(self): def download_results(self):
return defer.maybeDeferred(self.download_status.get_results) return defer.maybeDeferred(self.download_status.get_results)
@ -353,130 +503,6 @@ class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
return "" return ""
return "+%.6fs" % t return "+%.6fs" % t
def _find_overlap(self, events, start_key, end_key):
# given a list of event dicts, return a new list in which each event
# has an extra "row" key (an int, starting at 0), and if appropriate
# a "serverid" key (ascii-encoded server id), replacing the "server"
# key. This is a hint to our JS frontend about how to overlap the
# parts of the graph it is drawing.
# we must always make a copy, since we're going to be adding keys
# and don't want to change the original objects. If we're
# stringifying serverids, we'll also be changing the serverid keys.
new_events = []
rows = []
for ev in events:
ev = ev.copy()
if ev.has_key('server'):
ev["serverid"] = ev["server"].get_longname()
del ev["server"]
# find an empty slot in the rows
free_slot = None
for row,finished in enumerate(rows):
if finished is not None:
if ev[start_key] > finished:
free_slot = row
break
if free_slot is None:
free_slot = len(rows)
rows.append(ev[end_key])
else:
rows[free_slot] = ev[end_key]
ev["row"] = free_slot
new_events.append(ev)
return new_events
def _find_overlap_requests(self, events):
"""We compute a three-element 'row tuple' for each event: (serverid,
shnum, row). All elements are ints. The first is a mapping from
serverid to group number, the second is a mapping from shnum to
subgroup number. The third is a row within the subgroup.
We also return a list of lists of rowcounts, so renderers can decide
how much vertical space to give to each row.
"""
serverid_to_group = {}
groupnum_to_rows = {} # maps groupnum to a table of rows. Each table
# is a list with an element for each row number
# (int starting from 0) that contains a
# finish_time, indicating that the row is empty
# beyond that time. If finish_time is None, it
# indicate a response that has not yet
# completed, so the row cannot be reused.
new_events = []
for ev in events:
# DownloadStatus promises to give us events in temporal order
ev = ev.copy()
ev["serverid"] = ev["server"].get_longname()
del ev["server"]
if ev["serverid"] not in serverid_to_group:
groupnum = len(serverid_to_group)
serverid_to_group[ev["serverid"]] = groupnum
groupnum = serverid_to_group[ev["serverid"]]
if groupnum not in groupnum_to_rows:
groupnum_to_rows[groupnum] = []
rows = groupnum_to_rows[groupnum]
# find an empty slot in the rows
free_slot = None
for row,finished in enumerate(rows):
if finished is not None:
if ev["start_time"] > finished:
free_slot = row
break
if free_slot is None:
free_slot = len(rows)
rows.append(ev["finish_time"])
else:
rows[free_slot] = ev["finish_time"]
ev["row"] = (groupnum, free_slot)
new_events.append(ev)
del groupnum
# maybe also return serverid_to_group, groupnum_to_rows, and some
# indication of the highest finish_time
#
# actually, return the highest rownum for each groupnum
highest_rownums = [len(groupnum_to_rows[groupnum])
for groupnum in range(len(serverid_to_group))]
return new_events, highest_rownums
def child_event_json(self, ctx):
inevow.IRequest(ctx).setHeader("content-type", "text/plain")
data = { } # this will be returned to the GET
ds = self.download_status
data["misc"] = self._find_overlap(ds.misc_events,
"start_time", "finish_time")
data["read"] = self._find_overlap(ds.read_events,
"start_time", "finish_time")
data["segment"] = self._find_overlap(ds.segment_events,
"start_time", "finish_time")
# TODO: overlap on DYHB isn't very useful, and usually gets in the
# way. So don't do it.
data["dyhb"] = self._find_overlap(ds.dyhb_requests,
"start_time", "finish_time")
data["block"],data["block_rownums"] = self._find_overlap_requests(ds.block_requests)
server_info = {} # maps longname to {num,color,short}
server_shortnames = {} # maps servernum to shortname
for d_ev in ds.dyhb_requests:
s = d_ev["server"]
longname = s.get_longname()
if longname not in server_info:
num = len(server_info)
server_info[longname] = {"num": num,
"color": self.color(s),
"short": s.get_name() }
server_shortnames[str(num)] = s.get_name()
data["server_info"] = server_info
data["num_serverids"] = len(server_info)
# we'd prefer the keys of serverids[] to be ints, but this is JSON,
# so they get converted to strings. Stupid javascript.
data["serverids"] = server_shortnames
data["bounds"] = {"min": ds.first_timestamp, "max": ds.last_timestamp}
return json.dumps(data, indent=1) + "\n"
def render_timeline_link(self, ctx, data): def render_timeline_link(self, ctx, data):
from nevow import url from nevow import url
return T.a(href=url.URL.fromContext(ctx).child("timeline"))["timeline"] return T.a(href=url.URL.fromContext(ctx).child("timeline"))["timeline"]
@ -507,7 +533,7 @@ class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
rtt = received - sent rtt = received - sent
if not shnums: if not shnums:
shnums = ["-"] shnums = ["-"]
t[T.tr(style="background: %s" % self.color(server))[ t[T.tr(style="background: %s" % _color(server))[
[T.td[server.get_name()], T.td[srt(sent)], T.td[srt(received)], [T.td[server.get_name()], T.td[srt(sent)], T.td[srt(received)],
T.td[",".join([str(shnum) for shnum in shnums])], T.td[",".join([str(shnum) for shnum in shnums])],
T.td[self.render_time(None, rtt)], T.td[self.render_time(None, rtt)],
@ -587,7 +613,7 @@ class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
rtt = None rtt = None
if r_ev["finish_time"] is not None: if r_ev["finish_time"] is not None:
rtt = r_ev["finish_time"] - r_ev["start_time"] rtt = r_ev["finish_time"] - r_ev["start_time"]
color = self.color(server) color = _color(server)
t[T.tr(style="background: %s" % color)[ t[T.tr(style="background: %s" % color)[
T.td[server.get_name()], T.td[r_ev["shnum"]], T.td[server.get_name()], T.td[r_ev["shnum"]],
T.td["[%d:+%d]" % (r_ev["start"], r_ev["length"])], T.td["[%d:+%d]" % (r_ev["start"], r_ev["length"])],
@ -601,12 +627,6 @@ class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
return l return l
def color(self, server):
h = hashlib.sha256(server.get_serverid()).digest()
def m(c):
return min(ord(c) / 2 + 0x80, 0xff)
return "#%02x%02x%02x" % (m(h[0]), m(h[1]), m(h[2]))
def render_results(self, ctx, data): def render_results(self, ctx, data):
d = self.download_results() d = self.download_results()
def _got_results(results): def _got_results(results):
@ -1069,7 +1089,12 @@ class Status(MultiFormatPage):
def childFactory(self, ctx, name): def childFactory(self, ctx, name):
h = self.history h = self.history
stype,count_s = name.split("-") try:
stype, count_s = name.split("-")
except ValueError:
raise RuntimeError(
"no - in '{}'".format(name)
)
count = int(count_s) count = int(count_s)
if stype == "up": if stype == "up":
for s in itertools.chain(h.list_all_upload_statuses(), for s in itertools.chain(h.list_all_upload_statuses(),

View File

@ -10,7 +10,8 @@ from nevow import appserver, inevow
from allmydata.util import log, fileutil from allmydata.util import log, fileutil
from allmydata.web import introweb, root from allmydata.web import introweb, root
from allmydata.web.common import IOpHandleTable, MyExceptionHandler from allmydata.web.common import MyExceptionHandler
from allmydata.web.operations import OphandleTable
# we must override twisted.web.http.Request.requestReceived with a version # we must override twisted.web.http.Request.requestReceived with a version
# that doesn't use cgi.parse_multipart() . Since we actually use Nevow, we # that doesn't use cgi.parse_multipart() . Since we actually use Nevow, we
@ -164,9 +165,12 @@ class WebishServer(service.MultiService):
self.root = root.Root(client, clock, now_fn) self.root = root.Root(client, clock, now_fn)
self.buildServer(webport, nodeurl_path, staticdir) self.buildServer(webport, nodeurl_path, staticdir)
if self.root.child_operations:
self.site.remember(self.root.child_operations, IOpHandleTable) # If set, clock is a twisted.internet.task.Clock that the tests
self.root.child_operations.setServiceParent(self) # use to test ophandle expiration.
self._operations = OphandleTable(clock)
self._operations.setServiceParent(self)
self.root.putChild("operations", self._operations)
def buildServer(self, webport, nodeurl_path, staticdir): def buildServer(self, webport, nodeurl_path, staticdir):
self.webport = webport self.webport = webport
@ -237,6 +241,12 @@ class WebishServer(service.MultiService):
# who knows, probably some weirdo future version of Twisted # who knows, probably some weirdo future version of Twisted
self._started.errback(AssertionError("couldn't find out the scheme or port for the web-API server")) self._started.errback(AssertionError("couldn't find out the scheme or port for the web-API server"))
def get_operations(self):
"""
:return: a reference to our "active operations" tracker
"""
return self._operations
class IntroducerWebishServer(WebishServer): class IntroducerWebishServer(WebishServer):
def __init__(self, introducer, webport, nodeurl_path=None, staticdir=None): def __init__(self, introducer, webport, nodeurl_path=None, staticdir=None):