mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-01-31 16:36:20 +00:00
Merge remote-tracking branch 'origin/master' into integration/storage-economics
This commit is contained in:
commit
8c1f536ba4
@ -41,6 +41,10 @@ workflows:
|
||||
# integration tests.
|
||||
- "debian-9"
|
||||
|
||||
# Generate the underlying data for a visualization to aid with Python 3
|
||||
# porting.
|
||||
- "build-porting-depgraph"
|
||||
|
||||
images:
|
||||
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
||||
# faster and takes various spurious failures out of the critical path.
|
||||
@ -292,6 +296,33 @@ jobs:
|
||||
- store_artifacts: *STORE_OTHER_ARTIFACTS
|
||||
- run: *SUBMIT_COVERAGE
|
||||
|
||||
# Generate up-to-date data for the dependency graph visualizer.
|
||||
build-porting-depgraph:
|
||||
# Get a system in which we can easily install Tahoe-LAFS and all its
|
||||
# dependencies. The dependency graph analyzer works by executing the code.
|
||||
# It's Python, what do you expect?
|
||||
<<: *DEBIAN
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
||||
- add_ssh_keys:
|
||||
fingerprints:
|
||||
# Jean-Paul Calderone <exarkun@twistedmatrix.com> (CircleCI depgraph key)
|
||||
# This lets us push to tahoe-lafs/tahoe-depgraph in the next step.
|
||||
- "86:38:18:a7:c0:97:42:43:18:46:55:d6:21:b0:5f:d4"
|
||||
|
||||
- run:
|
||||
name: "Setup Python Environment"
|
||||
command: |
|
||||
/tmp/venv/bin/pip install -e /tmp/project
|
||||
|
||||
- run:
|
||||
name: "Generate dependency graph data"
|
||||
command: |
|
||||
. /tmp/venv/bin/activate
|
||||
./misc/python3/depgraph.sh
|
||||
|
||||
build-image: &BUILD_IMAGE
|
||||
# This is a template for a job to build a Docker image that has as much of
|
||||
# the setup as we can manage already done and baked in. This cuts down on
|
||||
|
@ -25,7 +25,7 @@ script:
|
||||
- |
|
||||
set -eo pipefail
|
||||
if [ "${T}" = "py35" ]; then
|
||||
python3 -m compileall -f .
|
||||
python3 -m compileall -f -x tahoe-depgraph.py .
|
||||
else
|
||||
tox -e ${T}
|
||||
fi
|
||||
|
@ -34,6 +34,7 @@ from util import (
|
||||
_cleanup_tahoe_process,
|
||||
_tahoe_runner_optional_coverage,
|
||||
await_client_ready,
|
||||
TahoeProcess,
|
||||
)
|
||||
|
||||
|
||||
@ -202,7 +203,7 @@ log_gatherer.furl = {log_furl}
|
||||
# but on linux it means daemonize. "tahoe run" is consistent
|
||||
# between platforms.
|
||||
protocol = _MagicTextProtocol('introducer running')
|
||||
process = _tahoe_runner_optional_coverage(
|
||||
transport = _tahoe_runner_optional_coverage(
|
||||
protocol,
|
||||
reactor,
|
||||
request,
|
||||
@ -211,10 +212,10 @@ log_gatherer.furl = {log_furl}
|
||||
intro_dir,
|
||||
),
|
||||
)
|
||||
request.addfinalizer(partial(_cleanup_tahoe_process, process, protocol.exited))
|
||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
||||
|
||||
pytest_twisted.blockon(protocol.magic_seen)
|
||||
return process
|
||||
return TahoeProcess(transport, intro_dir)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@ -312,11 +313,12 @@ def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer,
|
||||
# start all 5 nodes in parallel
|
||||
for x in range(5):
|
||||
name = 'node{}'.format(x)
|
||||
# tub_port = 9900 + x
|
||||
web_port= 9990 + x
|
||||
nodes_d.append(
|
||||
_create_node(
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, name,
|
||||
web_port=None, storage=True,
|
||||
web_port="tcp:{}:interface=localhost".format(web_port),
|
||||
storage=True,
|
||||
)
|
||||
)
|
||||
nodes_status = pytest_twisted.blockon(DeferredList(nodes_d))
|
||||
|
521
integration/test_web.py
Normal file
521
integration/test_web.py
Normal file
@ -0,0 +1,521 @@
|
||||
"""
|
||||
These tests were originally written to achieve some level of
|
||||
coverage for the WebAPI functionality during Python3 porting (there
|
||||
aren't many tests of the Web API period).
|
||||
|
||||
Most of the tests have cursory asserts and encode 'what the WebAPI did
|
||||
at the time of testing' -- not necessarily a cohesive idea of what the
|
||||
WebAPI *should* do in every situation. It's not clear the latter
|
||||
exists anywhere, however.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
import shutil
|
||||
import json
|
||||
import urllib2
|
||||
from os import mkdir, unlink, utime
|
||||
from os.path import join, exists, getmtime
|
||||
|
||||
import allmydata.uri
|
||||
|
||||
import util
|
||||
|
||||
import requests
|
||||
import pytest_twisted
|
||||
import html5lib
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
def test_index(alice):
|
||||
"""
|
||||
we can download the index file
|
||||
"""
|
||||
util.web_get(alice, u"")
|
||||
|
||||
|
||||
def test_index_json(alice):
|
||||
"""
|
||||
we can download the index file as json
|
||||
"""
|
||||
data = util.web_get(alice, u"", params={u"t": u"json"})
|
||||
# it should be valid json
|
||||
json.loads(data)
|
||||
|
||||
|
||||
def test_upload_download(alice):
|
||||
"""
|
||||
upload a file, then download it via readcap
|
||||
"""
|
||||
|
||||
FILE_CONTENTS = u"some contents"
|
||||
|
||||
readcap = util.web_post(
|
||||
alice, u"uri",
|
||||
data={
|
||||
u"t": u"upload",
|
||||
u"format": u"mdmf",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
},
|
||||
)
|
||||
readcap = readcap.strip()
|
||||
|
||||
data = util.web_get(
|
||||
alice, u"uri",
|
||||
params={
|
||||
u"uri": readcap,
|
||||
u"filename": u"boom",
|
||||
}
|
||||
)
|
||||
assert data == FILE_CONTENTS
|
||||
|
||||
|
||||
def test_put(alice):
|
||||
"""
|
||||
use PUT to create a file
|
||||
"""
|
||||
|
||||
FILE_CONTENTS = b"added via PUT" * 20
|
||||
|
||||
resp = requests.put(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
data=FILE_CONTENTS,
|
||||
)
|
||||
cap = allmydata.uri.from_string(resp.text.strip().encode('ascii'))
|
||||
cfg = alice.get_config()
|
||||
assert isinstance(cap, allmydata.uri.CHKFileURI)
|
||||
assert cap.size == len(FILE_CONTENTS)
|
||||
assert cap.total_shares == int(cfg.get_config("client", "shares.total"))
|
||||
assert cap.needed_shares == int(cfg.get_config("client", "shares.needed"))
|
||||
|
||||
|
||||
def test_helper_status(storage_nodes):
|
||||
"""
|
||||
successfully GET the /helper_status page
|
||||
"""
|
||||
|
||||
url = util.node_url(storage_nodes[0].node_dir, "helper_status")
|
||||
resp = requests.get(url)
|
||||
assert resp.status_code >= 200 and resp.status_code < 300
|
||||
dom = BeautifulSoup(resp.content, "html5lib")
|
||||
assert unicode(dom.h1.string) == u"Helper Status"
|
||||
|
||||
|
||||
def test_deep_stats(alice):
|
||||
"""
|
||||
create a directory, do deep-stats on it and prove the /operations/
|
||||
URIs work
|
||||
"""
|
||||
resp = requests.post(
|
||||
util.node_url(alice.node_dir, "uri"),
|
||||
params={
|
||||
"format": "sdmf",
|
||||
"t": "mkdir",
|
||||
"redirect_to_result": "true",
|
||||
},
|
||||
)
|
||||
assert resp.status_code >= 200 and resp.status_code < 300
|
||||
|
||||
# when creating a directory, we'll be re-directed to a URL
|
||||
# containing our writecap..
|
||||
uri = urllib2.unquote(resp.url)
|
||||
assert 'URI:DIR2:' in uri
|
||||
dircap = uri[uri.find("URI:DIR2:"):].rstrip('/')
|
||||
dircap_uri = util.node_url(alice.node_dir, "uri/{}".format(urllib2.quote(dircap)))
|
||||
|
||||
# POST a file into this directory
|
||||
FILE_CONTENTS = u"a file in a directory"
|
||||
|
||||
resp = requests.post(
|
||||
dircap_uri,
|
||||
data={
|
||||
u"t": u"upload",
|
||||
u"when_done": u".",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
},
|
||||
)
|
||||
|
||||
# confirm the file is in the directory
|
||||
resp = requests.get(
|
||||
dircap_uri,
|
||||
params={
|
||||
u"t": u"json",
|
||||
},
|
||||
)
|
||||
d = json.loads(resp.content)
|
||||
k, data = d
|
||||
assert k == u"dirnode"
|
||||
assert len(data['children']) == 1
|
||||
k, child = data['children'].values()[0]
|
||||
assert k == u"filenode"
|
||||
assert child['size'] == len(FILE_CONTENTS)
|
||||
|
||||
# perform deep-stats on it...
|
||||
resp = requests.post(
|
||||
dircap_uri,
|
||||
data={
|
||||
u"t": u"start-deep-stats",
|
||||
u"ophandle": u"something_random",
|
||||
},
|
||||
)
|
||||
assert resp.status_code >= 200 and resp.status_code < 300
|
||||
|
||||
# confirm we get information from the op .. after its done
|
||||
tries = 10
|
||||
while tries > 0:
|
||||
tries -= 1
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"operations/something_random"),
|
||||
)
|
||||
d = json.loads(resp.content)
|
||||
if d['size-literal-files'] == len(FILE_CONTENTS):
|
||||
print("stats completed successfully")
|
||||
break
|
||||
else:
|
||||
print("{} != {}; waiting".format(d['size-literal-files'], len(FILE_CONTENTS)))
|
||||
time.sleep(.5)
|
||||
|
||||
|
||||
def test_status(alice):
|
||||
"""
|
||||
confirm we get something sensible from /status and the various sub-types
|
||||
"""
|
||||
|
||||
# upload a file
|
||||
# (because of the nature of the integration-tests, we can only
|
||||
# assert things about "our" file because we don't know what other
|
||||
# operations may have happened in the grid before our test runs).
|
||||
|
||||
FILE_CONTENTS = u"all the Important Data of alice\n" * 1200
|
||||
|
||||
resp = requests.put(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
data=FILE_CONTENTS,
|
||||
)
|
||||
cap = resp.text.strip()
|
||||
|
||||
print("Uploaded data, cap={}".format(cap))
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"uri/{}".format(urllib2.quote(cap))),
|
||||
)
|
||||
|
||||
print("Downloaded {} bytes of data".format(len(resp.content)))
|
||||
assert resp.content == FILE_CONTENTS
|
||||
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, "status"),
|
||||
)
|
||||
dom = html5lib.parse(resp.content)
|
||||
|
||||
hrefs = [
|
||||
a.get('href')
|
||||
for a in dom.iter(u'{http://www.w3.org/1999/xhtml}a')
|
||||
]
|
||||
|
||||
found_upload = False
|
||||
found_download = False
|
||||
for href in hrefs:
|
||||
if href.startswith(u"/") or not href:
|
||||
continue
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"status/{}".format(href)),
|
||||
)
|
||||
if href.startswith(u'up'):
|
||||
assert "File Upload Status" in resp.content
|
||||
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
||||
found_upload = True
|
||||
elif href.startswith(u'down'):
|
||||
assert "File Download Status" in resp.content
|
||||
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
||||
found_download = True
|
||||
|
||||
# download the specialized event information
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"status/{}/event_json".format(href)),
|
||||
)
|
||||
js = json.loads(resp.content)
|
||||
# there's usually just one "read" operation, but this can handle many ..
|
||||
total_bytes = sum([st['bytes_returned'] for st in js['read']], 0)
|
||||
assert total_bytes == len(FILE_CONTENTS)
|
||||
|
||||
|
||||
assert found_upload, "Failed to find the file we uploaded in the status-page"
|
||||
assert found_download, "Failed to find the file we downloaded in the status-page"
|
||||
|
||||
|
||||
def test_directory_deep_check(alice):
|
||||
"""
|
||||
use deep-check and confirm the result pages work
|
||||
"""
|
||||
|
||||
# create a directory
|
||||
resp = requests.post(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
params={
|
||||
u"t": u"mkdir",
|
||||
u"redirect_to_result": u"true",
|
||||
}
|
||||
)
|
||||
|
||||
# get json information about our directory
|
||||
dircap_url = resp.url
|
||||
resp = requests.get(
|
||||
dircap_url,
|
||||
params={u"t": u"json"},
|
||||
)
|
||||
dir_meta = json.loads(resp.content)
|
||||
|
||||
# upload a file of pangrams into the directory
|
||||
FILE_CONTENTS = u"Sphinx of black quartz, judge my vow.\n" * (2048*10)
|
||||
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"upload",
|
||||
u"upload-chk": u"upload-chk",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
}
|
||||
)
|
||||
cap0 = resp.content
|
||||
print("Uploaded data0, cap={}".format(cap0))
|
||||
|
||||
# a different pangram
|
||||
FILE_CONTENTS = u"The five boxing wizards jump quickly.\n" * (2048*10)
|
||||
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"upload",
|
||||
u"upload-chk": u"upload-chk",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
}
|
||||
)
|
||||
cap1 = resp.content
|
||||
print("Uploaded data1, cap={}".format(cap1))
|
||||
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"uri/{}".format(urllib2.quote(cap0))),
|
||||
params={u"t": u"info"},
|
||||
)
|
||||
|
||||
def check_repair_data(checkdata):
|
||||
assert checkdata["healthy"] is True
|
||||
assert checkdata["count-happiness"] == 4
|
||||
assert checkdata["count-good-share-hosts"] == 4
|
||||
assert checkdata["count-shares-good"] == 4
|
||||
assert checkdata["count-corrupt-shares"] == 0
|
||||
assert checkdata["list-corrupt-shares"] == []
|
||||
|
||||
# do a "check" (once for HTML, then with JSON for easier asserts)
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"true",
|
||||
}
|
||||
)
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"true",
|
||||
u"output": u"JSON",
|
||||
}
|
||||
)
|
||||
check_repair_data(json.loads(resp.content)["results"])
|
||||
|
||||
# "check and repair"
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"true",
|
||||
u"repair": u"true",
|
||||
}
|
||||
)
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"true",
|
||||
u"repair": u"true",
|
||||
u"output": u"JSON",
|
||||
}
|
||||
)
|
||||
check_repair_data(json.loads(resp.content)["post-repair-results"]["results"])
|
||||
|
||||
# start a "deep check and repair"
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"start-deep-check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"on",
|
||||
u"repair": u"on",
|
||||
u"output": u"JSON",
|
||||
u"ophandle": u"deadbeef",
|
||||
}
|
||||
)
|
||||
deepcheck_uri = resp.url
|
||||
|
||||
data = json.loads(resp.content)
|
||||
tries = 10
|
||||
while not data['finished'] and tries > 0:
|
||||
tries -= 1
|
||||
time.sleep(0.5)
|
||||
print("deep-check not finished, reloading")
|
||||
resp = requests.get(deepcheck_uri, params={u"output": "JSON"})
|
||||
data = json.loads(resp.content)
|
||||
print("deep-check finished")
|
||||
assert data[u"stats"][u"count-immutable-files"] == 1
|
||||
assert data[u"stats"][u"count-literal-files"] == 0
|
||||
assert data[u"stats"][u"largest-immutable-file"] == 778240
|
||||
assert data[u"count-objects-checked"] == 2
|
||||
|
||||
# also get the HTML version
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"start-deep-check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"on",
|
||||
u"repair": u"on",
|
||||
u"ophandle": u"definitely_random",
|
||||
}
|
||||
)
|
||||
deepcheck_uri = resp.url
|
||||
|
||||
# if the operations isn't done, there's an <H2> tag with the
|
||||
# reload link; otherwise there's only an <H1> tag..wait up to 5
|
||||
# seconds for this to respond properly.
|
||||
for _ in range(5):
|
||||
resp = requests.get(deepcheck_uri)
|
||||
dom = BeautifulSoup(resp.content, "html5lib")
|
||||
if dom.h1 and u'Results' in unicode(dom.h1.string):
|
||||
break
|
||||
if dom.h2 and dom.h2.a and u"Reload" in unicode(dom.h2.a.string):
|
||||
dom = None
|
||||
time.sleep(1)
|
||||
assert dom is not None, "Operation never completed"
|
||||
|
||||
|
||||
def test_storage_info(storage_nodes):
|
||||
"""
|
||||
retrieve and confirm /storage URI for one storage node
|
||||
"""
|
||||
storage0 = storage_nodes[0]
|
||||
|
||||
requests.get(
|
||||
util.node_url(storage0.node_dir, u"storage"),
|
||||
)
|
||||
|
||||
|
||||
def test_storage_info_json(storage_nodes):
|
||||
"""
|
||||
retrieve and confirm /storage?t=json URI for one storage node
|
||||
"""
|
||||
storage0 = storage_nodes[0]
|
||||
|
||||
resp = requests.get(
|
||||
util.node_url(storage0.node_dir, u"storage"),
|
||||
params={u"t": u"json"},
|
||||
)
|
||||
data = json.loads(resp.content)
|
||||
assert data[u"stats"][u"storage_server.reserved_space"] == 1000000000
|
||||
|
||||
|
||||
def test_introducer_info(introducer):
|
||||
"""
|
||||
retrieve and confirm /introducer URI for the introducer
|
||||
"""
|
||||
resp = requests.get(
|
||||
util.node_url(introducer.node_dir, u""),
|
||||
)
|
||||
assert "Introducer" in resp.content
|
||||
|
||||
resp = requests.get(
|
||||
util.node_url(introducer.node_dir, u""),
|
||||
params={u"t": u"json"},
|
||||
)
|
||||
data = json.loads(resp.content)
|
||||
assert "announcement_summary" in data
|
||||
assert "subscription_summary" in data
|
||||
|
||||
|
||||
def test_mkdir_with_children(alice):
|
||||
"""
|
||||
create a directory using ?t=mkdir-with-children
|
||||
"""
|
||||
|
||||
# create a file to put in our directory
|
||||
FILE_CONTENTS = u"some file contents\n" * 500
|
||||
resp = requests.put(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
data=FILE_CONTENTS,
|
||||
)
|
||||
filecap = resp.content.strip()
|
||||
|
||||
# create a (sub) directory to put in our directory
|
||||
resp = requests.post(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
params={
|
||||
u"t": u"mkdir",
|
||||
}
|
||||
)
|
||||
# (we need both the read-write and read-only URIs I guess)
|
||||
dircap = resp.content
|
||||
dircap_obj = allmydata.uri.from_string(dircap)
|
||||
dircap_ro = dircap_obj.get_readonly().to_string()
|
||||
|
||||
# create json information about our directory
|
||||
meta = {
|
||||
"a_file": [
|
||||
"filenode", {
|
||||
"ro_uri": filecap,
|
||||
"metadata": {
|
||||
"ctime": 1202777696.7564139,
|
||||
"mtime": 1202777696.7564139,
|
||||
"tahoe": {
|
||||
"linkcrtime": 1202777696.7564139,
|
||||
"linkmotime": 1202777696.7564139
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"some_subdir": [
|
||||
"dirnode", {
|
||||
"rw_uri": dircap,
|
||||
"ro_uri": dircap_ro,
|
||||
"metadata": {
|
||||
"ctime": 1202778102.7589991,
|
||||
"mtime": 1202778111.2160511,
|
||||
"tahoe": {
|
||||
"linkcrtime": 1202777696.7564139,
|
||||
"linkmotime": 1202777696.7564139
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# create a new directory with one file and one sub-dir (all-at-once)
|
||||
resp = util.web_post(
|
||||
alice, u"uri",
|
||||
params={u"t": "mkdir-with-children"},
|
||||
data=json.dumps(meta),
|
||||
)
|
||||
assert resp.startswith("URI:DIR2")
|
||||
cap = allmydata.uri.from_string(resp)
|
||||
assert isinstance(cap, allmydata.uri.DirectoryURI)
|
@ -17,6 +17,7 @@ from allmydata.util.configutil import (
|
||||
set_config,
|
||||
write_config,
|
||||
)
|
||||
from allmydata import client
|
||||
|
||||
import pytest_twisted
|
||||
|
||||
@ -164,6 +165,15 @@ class TahoeProcess(object):
|
||||
def node_dir(self):
|
||||
return self._node_dir
|
||||
|
||||
def get_config(self):
|
||||
return client.read_config(
|
||||
self._node_dir,
|
||||
u"portnum",
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
||||
|
||||
|
||||
def _run_node(reactor, node_dir, request, magic_text):
|
||||
"""
|
||||
@ -407,35 +417,39 @@ def _check_status(response):
|
||||
)
|
||||
|
||||
|
||||
def web_get(node_dir, uri_fragment, **kwargs):
|
||||
def web_get(tahoe, uri_fragment, **kwargs):
|
||||
"""
|
||||
Make a GET request to the webport of `node_dir`. This will look
|
||||
like: `http://localhost:<webport>/<uri_fragment>`. All `kwargs`
|
||||
are passed on to `requests.get`
|
||||
Make a GET request to the webport of `tahoe` (a `TahoeProcess`,
|
||||
usually from a fixture (e.g. `alice`). This will look like:
|
||||
`http://localhost:<webport>/<uri_fragment>`. All `kwargs` are
|
||||
passed on to `requests.get`
|
||||
"""
|
||||
url = node_url(node_dir, uri_fragment)
|
||||
url = node_url(tahoe.node_dir, uri_fragment)
|
||||
resp = requests.get(url, **kwargs)
|
||||
_check_status(resp)
|
||||
return resp.content
|
||||
|
||||
|
||||
def web_post(node_dir, uri_fragment, **kwargs):
|
||||
def web_post(tahoe, uri_fragment, **kwargs):
|
||||
"""
|
||||
Make a POST request to the webport of `node_dir`. This will look
|
||||
like: `http://localhost:<webport>/<uri_fragment>`. All `kwargs`
|
||||
are passed on to `requests.post`
|
||||
Make a POST request to the webport of `node` (a `TahoeProcess,
|
||||
usually from a fixture e.g. `alice`). This will look like:
|
||||
`http://localhost:<webport>/<uri_fragment>`. All `kwargs` are
|
||||
passed on to `requests.post`
|
||||
"""
|
||||
url = node_url(node_dir, uri_fragment)
|
||||
url = node_url(tahoe.node_dir, uri_fragment)
|
||||
resp = requests.post(url, **kwargs)
|
||||
_check_status(resp)
|
||||
return resp.content
|
||||
|
||||
|
||||
def await_client_ready(process, timeout=10, liveness=60*2):
|
||||
def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
||||
"""
|
||||
Uses the status API to wait for a client-type node to be
|
||||
Uses the status API to wait for a client-type node (in `tahoe`, a
|
||||
`TahoeProcess` instance usually from a fixture e.g. `alice`) to be
|
||||
'ready'. A client is deemed ready if:
|
||||
- it answers http://<node_url>/statistics/?t=json/
|
||||
|
||||
- it answers `http://<node_url>/statistics/?t=json/`
|
||||
- there is at least one storage-server connected
|
||||
- every storage-server has a "last_received_data" and it is
|
||||
within the last `liveness` seconds
|
||||
@ -446,7 +460,7 @@ def await_client_ready(process, timeout=10, liveness=60*2):
|
||||
start = time.time()
|
||||
while (time.time() - start) < float(timeout):
|
||||
try:
|
||||
data = web_get(process.node_dir, u"", params={u"t": u"json"})
|
||||
data = web_get(tahoe, u"", params={u"t": u"json"})
|
||||
js = json.loads(data)
|
||||
except Exception as e:
|
||||
print("waiting because '{}'".format(e))
|
||||
@ -481,7 +495,7 @@ def await_client_ready(process, timeout=10, liveness=60*2):
|
||||
raise RuntimeError(
|
||||
"Waited {} seconds for {} to be 'ready' but it never was".format(
|
||||
timeout,
|
||||
process.node_dir,
|
||||
tahoe,
|
||||
)
|
||||
)
|
||||
|
||||
|
34
misc/python3/depgraph.sh
Executable file
34
misc/python3/depgraph.sh
Executable file
@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -x
|
||||
set -eo pipefail
|
||||
|
||||
TAHOE="${PWD}"
|
||||
git clone -b gh-pages git@github.com:tahoe-lafs/tahoe-depgraph.git
|
||||
cd tahoe-depgraph
|
||||
|
||||
# Generate the maybe-changed data.
|
||||
python "${TAHOE}"/misc/python3/tahoe-depgraph.py "${TAHOE}"
|
||||
|
||||
if git diff-index --quiet HEAD; then
|
||||
echo "Declining to commit without any changes."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
git config user.name 'Build Automation'
|
||||
git config user.email 'tahoe-dev@tahoe-lafs.org'
|
||||
|
||||
git add tahoe-deps.json tahoe-ported.json
|
||||
git commit -m "\
|
||||
Built from ${CIRCLE_REPOSITORY_URL}@${CIRCLE_SHA1}
|
||||
|
||||
tahoe-depgraph was $(git rev-parse HEAD)
|
||||
"
|
||||
|
||||
if [ "${CIRCLE_BRANCH}" != "master" ]; then
|
||||
echo "Declining to update dependency graph for non-master build."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Publish it on GitHub.
|
||||
git push -q origin gh-pages
|
1
misc/python3/ported-modules.txt
Normal file
1
misc/python3/ported-modules.txt
Normal file
@ -0,0 +1 @@
|
||||
allmydata.util.namespace
|
123
misc/python3/tahoe-depgraph.py
Normal file
123
misc/python3/tahoe-depgraph.py
Normal file
@ -0,0 +1,123 @@
|
||||
# Copyright 2004, 2009 Toby Dickenson
|
||||
# Copyright 2014-2015 Aaron Gallagher
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject
|
||||
# to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
import modulefinder
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from twisted.python import reflect
|
||||
|
||||
|
||||
class mymf(modulefinder.ModuleFinder):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._depgraph = collections.defaultdict(set)
|
||||
self._types = {}
|
||||
self._last_caller = None
|
||||
modulefinder.ModuleFinder.__init__(self, *args, **kwargs)
|
||||
|
||||
def import_hook(self, name, caller=None, fromlist=None, level=None):
|
||||
old_last_caller = self._last_caller
|
||||
try:
|
||||
self._last_caller = caller
|
||||
return modulefinder.ModuleFinder.import_hook(
|
||||
self, name, caller, fromlist)
|
||||
finally:
|
||||
self._last_caller = old_last_caller
|
||||
|
||||
def import_module(self, partnam, fqname, parent):
|
||||
if partnam.endswith('_py3'):
|
||||
return None
|
||||
r = modulefinder.ModuleFinder.import_module(
|
||||
self, partnam, fqname, parent)
|
||||
last_caller = self._last_caller
|
||||
if r is not None and 'allmydata' in r.__name__:
|
||||
if last_caller is None or last_caller.__name__ == '__main__':
|
||||
self._depgraph[fqname]
|
||||
else:
|
||||
self._depgraph[last_caller.__name__].add(fqname)
|
||||
return r
|
||||
|
||||
def load_module(self, fqname, fp, pathname, (suffix, mode, type)):
|
||||
r = modulefinder.ModuleFinder.load_module(
|
||||
self, fqname, fp, pathname, (suffix, mode, type))
|
||||
if r is not None:
|
||||
self._types[r.__name__] = type
|
||||
return r
|
||||
|
||||
def as_json(self):
|
||||
return {
|
||||
'depgraph': {
|
||||
name: dict.fromkeys(deps, 1)
|
||||
for name, deps in self._depgraph.iteritems()},
|
||||
'types': self._types,
|
||||
}
|
||||
|
||||
|
||||
json_dump = functools.partial(
|
||||
json.dump, indent=4, separators=(',', ': '), sort_keys=True)
|
||||
|
||||
|
||||
def main(target):
|
||||
mf = mymf(sys.path[:], 0, [])
|
||||
|
||||
moduleNames = []
|
||||
for path, dirnames, filenames in os.walk(os.path.join(target, 'src', 'allmydata')):
|
||||
if 'test' in dirnames:
|
||||
dirnames.remove('test')
|
||||
for filename in filenames:
|
||||
if not filename.endswith('.py'):
|
||||
continue
|
||||
if filename in ('setup.py',):
|
||||
continue
|
||||
if '-' in filename:
|
||||
# a script like update-documentation.py
|
||||
continue
|
||||
if filename != '__init__.py':
|
||||
filepath = os.path.join(path, filename)
|
||||
else:
|
||||
filepath = path
|
||||
moduleNames.append(reflect.filenameToModuleName(filepath))
|
||||
|
||||
with tempfile.NamedTemporaryFile() as tmpfile:
|
||||
for moduleName in moduleNames:
|
||||
tmpfile.write('import %s\n' % moduleName)
|
||||
tmpfile.flush()
|
||||
mf.run_script(tmpfile.name)
|
||||
|
||||
with open('tahoe-deps.json', 'wb') as outfile:
|
||||
json_dump(mf.as_json(), outfile)
|
||||
outfile.write('\n')
|
||||
|
||||
ported_modules_path = os.path.join(target, "misc", "python3", "ported-modules.txt")
|
||||
with open(ported_modules_path) as ported_modules:
|
||||
port_status = dict.fromkeys((line.strip() for line in ported_modules), "ported")
|
||||
with open('tahoe-ported.json', 'wb') as outfile:
|
||||
json_dump(port_status, outfile)
|
||||
outfile.write('\n')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(*sys.argv[1:])
|
0
newsfragments/1893.minor
Normal file
0
newsfragments/1893.minor
Normal file
1
newsfragments/2942.feature
Normal file
1
newsfragments/2942.feature
Normal file
@ -0,0 +1 @@
|
||||
Add a "tox -e draftnews" which runs towncrier in draft mode
|
1
newsfragments/3152.other
Normal file
1
newsfragments/3152.other
Normal file
@ -0,0 +1 @@
|
||||
Progress towards Python 3 compatibility is now visible at <https://tahoe-lafs.github.io/tahoe-depgraph/>.
|
0
newsfragments/3227.minor
Normal file
0
newsfragments/3227.minor
Normal file
0
newsfragments/3245.minor
Normal file
0
newsfragments/3245.minor
Normal file
0
newsfragments/3246.minor
Normal file
0
newsfragments/3246.minor
Normal file
0
newsfragments/3256.minor
Normal file
0
newsfragments/3256.minor
Normal file
2
setup.py
2
setup.py
@ -358,6 +358,8 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
||||
"towncrier",
|
||||
"testtools",
|
||||
"fixtures",
|
||||
"beautifulsoup4",
|
||||
"html5lib",
|
||||
] + tor_requires + i2p_requires,
|
||||
"tor": tor_requires,
|
||||
"i2p": i2p_requires,
|
||||
|
@ -759,6 +759,12 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
def get_long_tubid(self):
|
||||
return idlib.nodeid_b2a(self.nodeid)
|
||||
|
||||
def get_web_service(self):
|
||||
"""
|
||||
:return: a reference to our web server
|
||||
"""
|
||||
return self.getServiceNamed("webish")
|
||||
|
||||
def _init_permutation_seed(self, ss):
|
||||
seed = self.config.get_config_from_file("permutation-seed")
|
||||
if not seed:
|
||||
|
@ -1,4 +1,5 @@
|
||||
import os, re, weakref, struct, time
|
||||
import six
|
||||
|
||||
from foolscap.api import Referenceable
|
||||
from twisted.application import service
|
||||
@ -391,8 +392,9 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
bucket. Each lease is returned as a LeaseInfo instance.
|
||||
|
||||
This method is not for client use.
|
||||
"""
|
||||
|
||||
:note: Only for immutable shares.
|
||||
"""
|
||||
# since all shares get the same lease data, we just grab the leases
|
||||
# from the first share
|
||||
try:
|
||||
@ -402,20 +404,39 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
except StopIteration:
|
||||
return iter([])
|
||||
|
||||
def remote_slot_testv_and_readv_and_writev(self, storage_index,
|
||||
secrets,
|
||||
test_and_write_vectors,
|
||||
read_vector):
|
||||
start = time.time()
|
||||
self.count("writev")
|
||||
si_s = si_b2a(storage_index)
|
||||
log.msg("storage: slot_writev %s" % si_s)
|
||||
si_dir = storage_index_to_dir(storage_index)
|
||||
(write_enabler, renew_secret, cancel_secret) = secrets
|
||||
# shares exist if there is a file for them
|
||||
bucketdir = os.path.join(self.sharedir, si_dir)
|
||||
def get_slot_leases(self, storage_index):
|
||||
"""
|
||||
This method is not for client use.
|
||||
|
||||
:note: Only for mutable shares.
|
||||
|
||||
:return: An iterable of the leases attached to this slot.
|
||||
"""
|
||||
for _, share_filename in self._get_bucket_shares(storage_index):
|
||||
share = MutableShareFile(share_filename)
|
||||
return share.get_leases()
|
||||
return []
|
||||
|
||||
def _collect_mutable_shares_for_storage_index(self, bucketdir, write_enabler, si_s):
|
||||
"""
|
||||
Gather up existing mutable shares for the given storage index.
|
||||
|
||||
:param bytes bucketdir: The filesystem path containing shares for the
|
||||
given storage index.
|
||||
|
||||
:param bytes write_enabler: The write enabler secret for the shares.
|
||||
|
||||
:param bytes si_s: The storage index in encoded (base32) form.
|
||||
|
||||
:raise BadWriteEnablerError: If the write enabler is not correct for
|
||||
any of the collected shares.
|
||||
|
||||
:return dict[int, MutableShareFile]: The collected shares in a mapping
|
||||
from integer share numbers to ``MutableShareFile`` instances.
|
||||
"""
|
||||
shares = {}
|
||||
if os.path.isdir(bucketdir):
|
||||
# shares exist if there is a file for them
|
||||
for sharenum_s in os.listdir(bucketdir):
|
||||
try:
|
||||
sharenum = int(sharenum_s)
|
||||
@ -425,67 +446,197 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
msf = MutableShareFile(filename, self)
|
||||
msf.check_write_enabler(write_enabler, si_s)
|
||||
shares[sharenum] = msf
|
||||
# write_enabler is good for all existing shares.
|
||||
return shares
|
||||
|
||||
# Now evaluate test vectors.
|
||||
testv_is_good = True
|
||||
def _evaluate_test_vectors(self, test_and_write_vectors, shares):
|
||||
"""
|
||||
Execute test vectors against share data.
|
||||
|
||||
:param test_and_write_vectors: See
|
||||
``allmydata.interfaces.TestAndWriteVectorsForShares``.
|
||||
|
||||
:param dict[int, MutableShareFile] shares: The shares against which to
|
||||
execute the vectors.
|
||||
|
||||
:return bool: ``True`` if and only if all of the test vectors succeed
|
||||
against the given shares.
|
||||
"""
|
||||
for sharenum in test_and_write_vectors:
|
||||
(testv, datav, new_length) = test_and_write_vectors[sharenum]
|
||||
if sharenum in shares:
|
||||
if not shares[sharenum].check_testv(testv):
|
||||
self.log("testv failed: [%d]: %r" % (sharenum, testv))
|
||||
testv_is_good = False
|
||||
break
|
||||
return False
|
||||
else:
|
||||
# compare the vectors against an empty share, in which all
|
||||
# reads return empty strings.
|
||||
if not EmptyShare().check_testv(testv):
|
||||
self.log("testv failed (empty): [%d] %r" % (sharenum,
|
||||
testv))
|
||||
testv_is_good = False
|
||||
break
|
||||
return False
|
||||
return True
|
||||
|
||||
# now gather the read vectors, before we do any writes
|
||||
def _evaluate_read_vectors(self, read_vector, shares):
|
||||
"""
|
||||
Execute read vectors against share data.
|
||||
|
||||
:param read_vector: See ``allmydata.interfaces.ReadVector``.
|
||||
|
||||
:param dict[int, MutableShareFile] shares: The shares against which to
|
||||
execute the vector.
|
||||
|
||||
:return dict[int, bytes]: The data read from the shares.
|
||||
"""
|
||||
read_data = {}
|
||||
for sharenum, share in shares.items():
|
||||
read_data[sharenum] = share.readv(read_vector)
|
||||
return read_data
|
||||
|
||||
def _evaluate_write_vectors(self, bucketdir, secrets, test_and_write_vectors, shares):
|
||||
"""
|
||||
Execute write vectors against share data.
|
||||
|
||||
:param bytes bucketdir: The parent directory holding the shares. This
|
||||
is removed if the last share is removed from it. If shares are
|
||||
created, they are created in it.
|
||||
|
||||
:param secrets: A tuple of ``WriteEnablerSecret``,
|
||||
``LeaseRenewSecret``, and ``LeaseCancelSecret``. These secrets
|
||||
are used to initialize new shares.
|
||||
|
||||
:param test_and_write_vectors: See
|
||||
``allmydata.interfaces.TestAndWriteVectorsForShares``.
|
||||
|
||||
:param dict[int, MutableShareFile]: The shares against which to
|
||||
execute the vectors.
|
||||
|
||||
:return dict[int, MutableShareFile]: The shares which still exist
|
||||
after applying the vectors.
|
||||
"""
|
||||
remaining_shares = {}
|
||||
|
||||
for sharenum in test_and_write_vectors:
|
||||
(testv, datav, new_length) = test_and_write_vectors[sharenum]
|
||||
if new_length == 0:
|
||||
if sharenum in shares:
|
||||
shares[sharenum].unlink()
|
||||
else:
|
||||
if sharenum not in shares:
|
||||
# allocate a new share
|
||||
allocated_size = 2000 # arbitrary, really
|
||||
share = self._allocate_slot_share(bucketdir, secrets,
|
||||
sharenum,
|
||||
allocated_size,
|
||||
owner_num=0)
|
||||
shares[sharenum] = share
|
||||
shares[sharenum].writev(datav, new_length)
|
||||
remaining_shares[sharenum] = shares[sharenum]
|
||||
|
||||
if new_length == 0:
|
||||
# delete bucket directories that exist but are empty. They
|
||||
# might not exist if a client showed up and asked us to
|
||||
# truncate a share we weren't even holding.
|
||||
if os.path.exists(bucketdir) and [] == os.listdir(bucketdir):
|
||||
os.rmdir(bucketdir)
|
||||
return remaining_shares
|
||||
|
||||
def _make_lease_info(self, renew_secret, cancel_secret):
|
||||
"""
|
||||
:return LeaseInfo: Information for a new lease for a share.
|
||||
"""
|
||||
ownerid = 1 # TODO
|
||||
expire_time = time.time() + 31*24*60*60 # one month
|
||||
lease_info = LeaseInfo(ownerid,
|
||||
renew_secret, cancel_secret,
|
||||
expire_time, self.my_nodeid)
|
||||
return lease_info
|
||||
|
||||
def _add_or_renew_leases(self, shares, lease_info):
|
||||
"""
|
||||
Put the given lease onto the given shares.
|
||||
|
||||
:param dict[int, MutableShareFile] shares: The shares to put the lease
|
||||
onto.
|
||||
|
||||
:param LeaseInfo lease_info: The lease to put on the shares.
|
||||
"""
|
||||
for share in six.viewvalues(shares):
|
||||
share.add_or_renew_lease(lease_info)
|
||||
|
||||
def slot_testv_and_readv_and_writev(
|
||||
self,
|
||||
storage_index,
|
||||
secrets,
|
||||
test_and_write_vectors,
|
||||
read_vector,
|
||||
renew_leases,
|
||||
):
|
||||
"""
|
||||
Read data from shares and conditionally write some data to them.
|
||||
|
||||
:param bool renew_leases: If and only if this is ``True`` and the test
|
||||
vectors pass then shares in this slot will also have an updated
|
||||
lease applied to them.
|
||||
|
||||
See ``allmydata.interfaces.RIStorageServer`` for details about other
|
||||
parameters and return value.
|
||||
"""
|
||||
start = time.time()
|
||||
self.count("writev")
|
||||
si_s = si_b2a(storage_index)
|
||||
log.msg("storage: slot_writev %s" % si_s)
|
||||
si_dir = storage_index_to_dir(storage_index)
|
||||
(write_enabler, renew_secret, cancel_secret) = secrets
|
||||
bucketdir = os.path.join(self.sharedir, si_dir)
|
||||
|
||||
# If collection succeeds we know the write_enabler is good for all
|
||||
# existing shares.
|
||||
shares = self._collect_mutable_shares_for_storage_index(
|
||||
bucketdir,
|
||||
write_enabler,
|
||||
si_s,
|
||||
)
|
||||
|
||||
# Now evaluate test vectors.
|
||||
testv_is_good = self._evaluate_test_vectors(
|
||||
test_and_write_vectors,
|
||||
shares,
|
||||
)
|
||||
|
||||
# now gather the read vectors, before we do any writes
|
||||
read_data = self._evaluate_read_vectors(
|
||||
read_vector,
|
||||
shares,
|
||||
)
|
||||
|
||||
if testv_is_good:
|
||||
# now apply the write vectors
|
||||
for sharenum in test_and_write_vectors:
|
||||
(testv, datav, new_length) = test_and_write_vectors[sharenum]
|
||||
if new_length == 0:
|
||||
if sharenum in shares:
|
||||
shares[sharenum].unlink()
|
||||
else:
|
||||
if sharenum not in shares:
|
||||
# allocate a new share
|
||||
allocated_size = 2000 # arbitrary, really
|
||||
share = self._allocate_slot_share(bucketdir, secrets,
|
||||
sharenum,
|
||||
allocated_size,
|
||||
owner_num=0)
|
||||
shares[sharenum] = share
|
||||
shares[sharenum].writev(datav, new_length)
|
||||
# and update the lease
|
||||
shares[sharenum].add_or_renew_lease(lease_info)
|
||||
|
||||
if new_length == 0:
|
||||
# delete empty bucket directories
|
||||
if not os.listdir(bucketdir):
|
||||
os.rmdir(bucketdir)
|
||||
|
||||
remaining_shares = self._evaluate_write_vectors(
|
||||
bucketdir,
|
||||
secrets,
|
||||
test_and_write_vectors,
|
||||
shares,
|
||||
)
|
||||
if renew_leases:
|
||||
lease_info = self._make_lease_info(renew_secret, cancel_secret)
|
||||
self._add_or_renew_leases(remaining_shares, lease_info)
|
||||
|
||||
# all done
|
||||
self.add_latency("writev", time.time() - start)
|
||||
return (testv_is_good, read_data)
|
||||
|
||||
def remote_slot_testv_and_readv_and_writev(self, storage_index,
|
||||
secrets,
|
||||
test_and_write_vectors,
|
||||
read_vector):
|
||||
return self.slot_testv_and_readv_and_writev(
|
||||
storage_index,
|
||||
secrets,
|
||||
test_and_write_vectors,
|
||||
read_vector,
|
||||
renew_leases=True,
|
||||
)
|
||||
|
||||
def _allocate_slot_share(self, bucketdir, secrets, sharenum,
|
||||
allocated_size, owner_num=0):
|
||||
(write_enabler, renew_secret, cancel_secret) = secrets
|
||||
|
@ -11,6 +11,7 @@ from testtools.matchers import (
|
||||
from twisted.internet import defer, address
|
||||
from twisted.python import log
|
||||
from twisted.python.filepath import FilePath
|
||||
from twisted.web.template import flattenString
|
||||
|
||||
from foolscap.api import Tub, Referenceable, fireEventually, flushEventualQueue
|
||||
from twisted.application import service
|
||||
@ -592,7 +593,12 @@ class SystemTest(SystemTestMixin, AsyncTestCase):
|
||||
# now check the web status, make sure it renders without error
|
||||
ir = introweb.IntroducerRoot(self.parent)
|
||||
self.parent.nodeid = "NODEID"
|
||||
text = ir.renderSynchronously().decode("utf-8")
|
||||
log.msg("_check1 done")
|
||||
return flattenString(None, ir._create_element())
|
||||
d.addCallback(_check1)
|
||||
|
||||
def _check2(flattened_bytes):
|
||||
text = flattened_bytes.decode("utf-8")
|
||||
self.assertIn(NICKNAME % "0", text) # a v2 client
|
||||
self.assertIn(NICKNAME % "1", text) # another v2 client
|
||||
for i in range(NUM_STORAGE):
|
||||
@ -601,8 +607,8 @@ class SystemTest(SystemTestMixin, AsyncTestCase):
|
||||
# make sure there isn't a double-base32ed string too
|
||||
self.assertNotIn(idlib.nodeid_b2a(printable_serverids[i]), text,
|
||||
(i,printable_serverids[i],text))
|
||||
log.msg("_check1 done")
|
||||
d.addCallback(_check1)
|
||||
log.msg("_check2 done")
|
||||
d.addCallback(_check2)
|
||||
|
||||
# force an introducer reconnect, by shutting down the Tub it's using
|
||||
# and starting a new Tub (with the old introducer). Everybody should
|
||||
|
@ -1369,6 +1369,81 @@ class MutableServer(unittest.TestCase):
|
||||
self.failUnless(os.path.exists(prefixdir), prefixdir)
|
||||
self.failIf(os.path.exists(bucketdir), bucketdir)
|
||||
|
||||
def test_writev_without_renew_lease(self):
|
||||
"""
|
||||
The helper method ``slot_testv_and_readv_and_writev`` does not renew
|
||||
leases if ``False`` is passed for the ``renew_leases`` parameter.
|
||||
"""
|
||||
ss = self.create("test_writev_without_renew_lease")
|
||||
|
||||
storage_index = "si2"
|
||||
secrets = (
|
||||
self.write_enabler(storage_index),
|
||||
self.renew_secret(storage_index),
|
||||
self.cancel_secret(storage_index),
|
||||
)
|
||||
|
||||
sharenum = 3
|
||||
datav = [(0, b"Hello, world")]
|
||||
|
||||
ss.slot_testv_and_readv_and_writev(
|
||||
storage_index=storage_index,
|
||||
secrets=secrets,
|
||||
test_and_write_vectors={
|
||||
sharenum: ([], datav, None),
|
||||
},
|
||||
read_vector=[],
|
||||
renew_leases=False,
|
||||
)
|
||||
leases = list(ss.get_slot_leases(storage_index))
|
||||
self.assertEqual([], leases)
|
||||
|
||||
def test_get_slot_leases_empty_slot(self):
|
||||
"""
|
||||
When ``get_slot_leases`` is called for a slot for which the server has no
|
||||
shares, it returns an empty iterable.
|
||||
"""
|
||||
ss = self.create(b"test_get_slot_leases_empty_slot")
|
||||
self.assertEqual(
|
||||
list(ss.get_slot_leases(b"si1")),
|
||||
[],
|
||||
)
|
||||
|
||||
def test_remove_non_present(self):
|
||||
"""
|
||||
A write vector which would remove a share completely is applied as a no-op
|
||||
by a server which does not have the share.
|
||||
"""
|
||||
ss = self.create("test_remove_non_present")
|
||||
|
||||
storage_index = "si1"
|
||||
secrets = (
|
||||
self.write_enabler(storage_index),
|
||||
self.renew_secret(storage_index),
|
||||
self.cancel_secret(storage_index),
|
||||
)
|
||||
|
||||
sharenum = 3
|
||||
testv = []
|
||||
datav = []
|
||||
new_length = 0
|
||||
read_vector = []
|
||||
|
||||
# We don't even need to create any shares to exercise this
|
||||
# functionality. Just go straight to sending a truncate-to-zero
|
||||
# write.
|
||||
testv_is_good, read_data = ss.remote_slot_testv_and_readv_and_writev(
|
||||
storage_index=storage_index,
|
||||
secrets=secrets,
|
||||
test_and_write_vectors={
|
||||
sharenum: (testv, datav, new_length),
|
||||
},
|
||||
read_vector=read_vector,
|
||||
)
|
||||
|
||||
self.assertTrue(testv_is_good)
|
||||
self.assertEqual({}, read_data)
|
||||
|
||||
|
||||
class MDMFProxies(unittest.TestCase, ShouldFailMixin):
|
||||
def setUp(self):
|
||||
|
@ -1,6 +1,29 @@
|
||||
|
||||
import re
|
||||
|
||||
unknown_rwcap = u"lafs://from_the_future_rw_\u263A".encode('utf-8')
|
||||
unknown_rocap = u"ro.lafs://readonly_from_the_future_ro_\u263A".encode('utf-8')
|
||||
unknown_immcap = u"imm.lafs://immutable_from_the_future_imm_\u263A".encode('utf-8')
|
||||
|
||||
FAVICON_MARKUP = '<link href="/icon.png" rel="shortcut icon" />'
|
||||
|
||||
|
||||
def assert_soup_has_favicon(testcase, soup):
|
||||
"""
|
||||
Using a ``TestCase`` object ``testcase``, assert that the passed in
|
||||
``BeautifulSoup`` object ``soup`` contains the tahoe favicon link.
|
||||
"""
|
||||
links = soup.find_all(u'link', rel=u'shortcut icon')
|
||||
testcase.assert_(
|
||||
any(t[u'href'] == u'/icon.png' for t in links), soup)
|
||||
|
||||
|
||||
def assert_soup_has_text(testcase, soup, text):
|
||||
"""
|
||||
Using a ``TestCase`` object ``testcase``, assert that the passed in
|
||||
``BeautifulSoup`` object ``soup`` contains the passed in ``text`` anywhere
|
||||
as a text node.
|
||||
"""
|
||||
testcase.assert_(
|
||||
soup.find_all(string=re.compile(re.escape(text))),
|
||||
soup)
|
||||
|
@ -1,3 +1,4 @@
|
||||
from bs4 import BeautifulSoup
|
||||
from os.path import join
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import reactor
|
||||
@ -6,13 +7,15 @@ from twisted.internet import defer
|
||||
from allmydata.introducer import create_introducer
|
||||
from allmydata import node
|
||||
from .common import (
|
||||
FAVICON_MARKUP,
|
||||
assert_soup_has_favicon,
|
||||
assert_soup_has_text,
|
||||
)
|
||||
from ..common import (
|
||||
SameProcessStreamEndpointAssigner,
|
||||
)
|
||||
from ..common_web import do_http
|
||||
|
||||
|
||||
class IntroducerWeb(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.node = None
|
||||
@ -47,7 +50,8 @@ class IntroducerWeb(unittest.TestCase):
|
||||
|
||||
url = "http://localhost:%d/" % self.ws.getPortnum()
|
||||
res = yield do_http("get", url)
|
||||
self.failUnlessIn('Welcome to the Tahoe-LAFS Introducer', res)
|
||||
self.failUnlessIn(FAVICON_MARKUP, res)
|
||||
self.failUnlessIn('Page rendered at', res)
|
||||
self.failUnlessIn('Tahoe-LAFS code imported from:', res)
|
||||
soup = BeautifulSoup(res, 'html5lib')
|
||||
assert_soup_has_text(self, soup, u'Welcome to the Tahoe-LAFS Introducer')
|
||||
assert_soup_has_favicon(self, soup)
|
||||
assert_soup_has_text(self, soup, u'Page rendered at')
|
||||
assert_soup_has_text(self, soup, u'Tahoe-LAFS code imported from:')
|
||||
|
@ -4531,7 +4531,7 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
|
||||
res = yield self.GET("/operations/128?t=status&output=JSON")
|
||||
data = json.loads(res)
|
||||
self.failUnless("finished" in data, res)
|
||||
monitor = self.ws.root.child_operations.handles["128"][0]
|
||||
monitor = self.ws.getServiceNamed("operations").handles["128"][0]
|
||||
|
||||
res = yield self.POST("/operations/128?t=cancel&output=JSON")
|
||||
data = json.loads(res)
|
||||
|
@ -2,10 +2,9 @@
|
||||
import time
|
||||
import json
|
||||
|
||||
from twisted.web import http, server, resource
|
||||
from twisted.web import http, server, resource, template
|
||||
from twisted.python import log
|
||||
from twisted.python.failure import Failure
|
||||
from zope.interface import Interface
|
||||
from nevow import loaders, appserver
|
||||
from nevow.rend import Page
|
||||
from nevow.inevow import IRequest
|
||||
@ -39,9 +38,6 @@ def get_filenode_metadata(filenode):
|
||||
metadata['size'] = size
|
||||
return metadata
|
||||
|
||||
class IOpHandleTable(Interface):
|
||||
pass
|
||||
|
||||
def getxmlfile(name):
|
||||
return loaders.xmlfile(resource_filename('allmydata.web', '%s' % name))
|
||||
|
||||
@ -460,6 +456,102 @@ class MultiFormatPage(Page):
|
||||
return lambda ctx: renderer(IRequest(ctx))
|
||||
|
||||
|
||||
class MultiFormatResource(resource.Resource, object):
|
||||
"""
|
||||
``MultiFormatResource`` is a ``resource.Resource`` that can be rendered in
|
||||
a number of different formats.
|
||||
|
||||
Rendered format is controlled by a query argument (given by
|
||||
``self.formatArgument``). Different resources may support different
|
||||
formats but ``json`` is a pretty common one. ``html`` is the default
|
||||
format if nothing else is given as the ``formatDefault``.
|
||||
"""
|
||||
formatArgument = "t"
|
||||
formatDefault = None
|
||||
|
||||
def render(self, req):
|
||||
"""
|
||||
Dispatch to a renderer for a particular format, as selected by a query
|
||||
argument.
|
||||
|
||||
A renderer for the format given by the query argument matching
|
||||
``formatArgument`` will be selected and invoked. render_HTML will be
|
||||
used as a default if no format is selected (either by query arguments
|
||||
or by ``formatDefault``).
|
||||
|
||||
:return: The result of the selected renderer.
|
||||
"""
|
||||
t = get_arg(req, self.formatArgument, self.formatDefault)
|
||||
renderer = self._get_renderer(t)
|
||||
return renderer(req)
|
||||
|
||||
def _get_renderer(self, fmt):
|
||||
"""
|
||||
Get the renderer for the indicated format.
|
||||
|
||||
:param str fmt: The format. If a method with a prefix of ``render_``
|
||||
and a suffix of this format (upper-cased) is found, it will be
|
||||
used.
|
||||
|
||||
:return: A callable which takes a twisted.web Request and renders a
|
||||
response.
|
||||
"""
|
||||
renderer = None
|
||||
|
||||
if fmt is not None:
|
||||
try:
|
||||
renderer = getattr(self, "render_{}".format(fmt.upper()))
|
||||
except AttributeError:
|
||||
raise WebError(
|
||||
"Unknown {} value: {!r}".format(self.formatArgument, fmt),
|
||||
)
|
||||
|
||||
if renderer is None:
|
||||
renderer = self.render_HTML
|
||||
|
||||
return renderer
|
||||
|
||||
|
||||
class SlotsSequenceElement(template.Element):
|
||||
"""
|
||||
``SlotsSequenceElement` is a minimal port of nevow's sequence renderer for
|
||||
twisted.web.template.
|
||||
|
||||
Tags passed in to be templated will have two renderers available: ``item``
|
||||
and ``tag``.
|
||||
"""
|
||||
|
||||
def __init__(self, tag, seq):
|
||||
self.loader = template.TagLoader(tag)
|
||||
self.seq = seq
|
||||
|
||||
@template.renderer
|
||||
def item(self, request, tag):
|
||||
"""
|
||||
A template renderer for each sequence item.
|
||||
|
||||
``tag`` will be cloned for each item in the sequence provided, and its
|
||||
slots filled from the sequence item. Each item must be dict-like enough
|
||||
for ``tag.fillSlots(**item)``. Each cloned tag will be siblings with no
|
||||
separator beween them.
|
||||
"""
|
||||
for item in self.seq:
|
||||
yield tag.clone(deep=False).fillSlots(**item)
|
||||
|
||||
@template.renderer
|
||||
def empty(self, request, tag):
|
||||
"""
|
||||
A template renderer for empty sequences.
|
||||
|
||||
This renderer will either return ``tag`` unmodified if the provided
|
||||
sequence has no items, or return the empty string if there are any
|
||||
items.
|
||||
"""
|
||||
if len(self.seq) > 0:
|
||||
return u''
|
||||
else:
|
||||
return tag
|
||||
|
||||
|
||||
class TokenOnlyWebApi(resource.Resource, object):
|
||||
"""
|
||||
|
@ -22,7 +22,7 @@ from allmydata.blacklist import ProhibitedNode
|
||||
from allmydata.monitor import Monitor, OperationCancelledError
|
||||
from allmydata import dirnode
|
||||
from allmydata.web.common import text_plain, WebError, \
|
||||
IOpHandleTable, NeedOperationHandleError, \
|
||||
NeedOperationHandleError, \
|
||||
boolean_of_arg, get_arg, get_root, parse_replace_arg, \
|
||||
should_create_intermediate_directories, \
|
||||
getxmlfile, RenderMixin, humanize_failure, convert_children_json, \
|
||||
@ -62,6 +62,7 @@ class DirectoryNodeHandler(RenderMixin, rend.Page, ReplaceMeMixin):
|
||||
self.node = node
|
||||
self.parentnode = parentnode
|
||||
self.name = name
|
||||
self._operations = client.get_web_service().get_operations()
|
||||
|
||||
def childFactory(self, ctx, name):
|
||||
name = name.decode("utf-8")
|
||||
@ -442,9 +443,8 @@ class DirectoryNodeHandler(RenderMixin, rend.Page, ReplaceMeMixin):
|
||||
return d
|
||||
|
||||
def _start_operation(self, monitor, renderer, ctx):
|
||||
table = IOpHandleTable(ctx)
|
||||
table.add_monitor(ctx, monitor, renderer)
|
||||
return table.redirect_to(ctx)
|
||||
self._operations.add_monitor(ctx, monitor, renderer)
|
||||
return self._operations.redirect_to(ctx)
|
||||
|
||||
def _POST_start_deep_check(self, ctx):
|
||||
# check this directory and everything reachable from it
|
||||
|
@ -16,7 +16,6 @@
|
||||
<li>Total Size: <span n:render="total_size"/></li>
|
||||
<li>Progress: <span n:render="progress"/></li>
|
||||
<li>Status: <span n:render="status"/></li>
|
||||
<li><span n:render="timeline_link"/></li>
|
||||
</ul>
|
||||
|
||||
<div n:render="events"></div>
|
||||
|
@ -1,17 +1,47 @@
|
||||
|
||||
import os, urllib
|
||||
|
||||
from nevow import rend, tags as T
|
||||
from nevow.inevow import IRequest
|
||||
from twisted.python.filepath import FilePath
|
||||
from twisted.web.template import tags as T, Element, renderElement, XMLFile, renderer
|
||||
|
||||
from allmydata.util import base32
|
||||
from allmydata.interfaces import IDirectoryNode, IFileNode, MDMF_VERSION
|
||||
from allmydata.web.common import getxmlfile
|
||||
from allmydata.web.common import MultiFormatResource
|
||||
from allmydata.mutable.common import UnrecoverableFileError # TODO: move
|
||||
|
||||
class MoreInfo(rend.Page):
|
||||
addSlash = False
|
||||
docFactory = getxmlfile("info.xhtml")
|
||||
|
||||
class MoreInfo(MultiFormatResource):
|
||||
"""
|
||||
A ``Resource`` for describing more information about a node.
|
||||
|
||||
:param node Node: The node to describe.
|
||||
"""
|
||||
|
||||
def __init__(self, node):
|
||||
super(MoreInfo, self).__init__()
|
||||
self.node = node
|
||||
|
||||
def render_HTML(self, req):
|
||||
"""
|
||||
Render an HTML template describing this node.
|
||||
"""
|
||||
return renderElement(req, MoreInfoElement(self.node))
|
||||
|
||||
render_INFO = render_HTML
|
||||
|
||||
|
||||
class MoreInfoElement(Element):
|
||||
"""
|
||||
An ``Element`` HTML template which can be flattened to describe this node.
|
||||
|
||||
:param Node node: The node to describe.
|
||||
"""
|
||||
|
||||
loader = XMLFile(FilePath(__file__).sibling("info.xhtml"))
|
||||
|
||||
def __init__(self, node):
|
||||
super(MoreInfoElement, self).__init__()
|
||||
self.original = node
|
||||
|
||||
def abbrev(self, storage_index_or_none):
|
||||
if storage_index_or_none:
|
||||
@ -38,27 +68,32 @@ class MoreInfo(rend.Page):
|
||||
return "immutable LIT file"
|
||||
return "unknown"
|
||||
|
||||
def render_title(self, ctx, data):
|
||||
@renderer
|
||||
def title(self, req, tag):
|
||||
node = self.original
|
||||
si = node.get_storage_index()
|
||||
t = "More Info for %s" % self.get_type()
|
||||
if si:
|
||||
t += " (SI=%s)" % self.abbrev(si)
|
||||
return ctx.tag[t]
|
||||
return tag(t)
|
||||
|
||||
def render_header(self, ctx, data):
|
||||
return self.render_title(ctx, data)
|
||||
@renderer
|
||||
def header(self, req, tag):
|
||||
return self.title(req, tag)
|
||||
|
||||
def render_type(self, ctx, data):
|
||||
return ctx.tag[self.get_type()]
|
||||
@renderer
|
||||
def type(self, req, tag):
|
||||
return tag(self.get_type())
|
||||
|
||||
def render_si(self, ctx, data):
|
||||
@renderer
|
||||
def si(self, req, tag):
|
||||
si = self.original.get_storage_index()
|
||||
if not si:
|
||||
return "None"
|
||||
return ctx.tag[base32.b2a(si)]
|
||||
return tag(base32.b2a(si))
|
||||
|
||||
def render_size(self, ctx, data):
|
||||
@renderer
|
||||
def size(self, req, tag):
|
||||
node = self.original
|
||||
d = node.get_current_size()
|
||||
def _no_size(size):
|
||||
@ -70,67 +105,73 @@ class MoreInfo(rend.Page):
|
||||
f.trap(UnrecoverableFileError)
|
||||
return "?"
|
||||
d.addErrback(_handle_unrecoverable)
|
||||
d.addCallback(lambda size: ctx.tag[size])
|
||||
d.addCallback(lambda size: tag(str(size)))
|
||||
return d
|
||||
|
||||
def render_directory_writecap(self, ctx, data):
|
||||
@renderer
|
||||
def directory_writecap(self, req, tag):
|
||||
node = self.original
|
||||
if not IDirectoryNode.providedBy(node):
|
||||
return ""
|
||||
if node.is_readonly():
|
||||
return ""
|
||||
return ctx.tag[node.get_uri()]
|
||||
return tag(node.get_uri())
|
||||
|
||||
def render_directory_readcap(self, ctx, data):
|
||||
@renderer
|
||||
def directory_readcap(self, req, tag):
|
||||
node = self.original
|
||||
if not IDirectoryNode.providedBy(node):
|
||||
return ""
|
||||
return ctx.tag[node.get_readonly_uri()]
|
||||
return tag(node.get_readonly_uri())
|
||||
|
||||
def render_directory_verifycap(self, ctx, data):
|
||||
@renderer
|
||||
def directory_verifycap(self, req, tag):
|
||||
node = self.original
|
||||
if not IDirectoryNode.providedBy(node):
|
||||
return ""
|
||||
verifier = node.get_verify_cap()
|
||||
if verifier:
|
||||
return ctx.tag[node.get_verify_cap().to_string()]
|
||||
return tag(node.get_verify_cap().to_string())
|
||||
return ""
|
||||
|
||||
def render_file_writecap(self, ctx, data):
|
||||
@renderer
|
||||
def file_writecap(self, req, tag):
|
||||
node = self.original
|
||||
if IDirectoryNode.providedBy(node):
|
||||
node = node._node
|
||||
write_uri = node.get_write_uri()
|
||||
if not write_uri:
|
||||
return ""
|
||||
return ctx.tag[write_uri]
|
||||
return tag(write_uri)
|
||||
|
||||
def render_file_readcap(self, ctx, data):
|
||||
@renderer
|
||||
def file_readcap(self, req, tag):
|
||||
node = self.original
|
||||
if IDirectoryNode.providedBy(node):
|
||||
node = node._node
|
||||
read_uri = node.get_readonly_uri()
|
||||
if not read_uri:
|
||||
return ""
|
||||
return ctx.tag[read_uri]
|
||||
return tag(read_uri)
|
||||
|
||||
def render_file_verifycap(self, ctx, data):
|
||||
@renderer
|
||||
def file_verifycap(self, req, tag):
|
||||
node = self.original
|
||||
if IDirectoryNode.providedBy(node):
|
||||
node = node._node
|
||||
verifier = node.get_verify_cap()
|
||||
if verifier:
|
||||
return ctx.tag[node.get_verify_cap().to_string()]
|
||||
return tag(node.get_verify_cap().to_string())
|
||||
return ""
|
||||
|
||||
def get_root(self, ctx):
|
||||
req = IRequest(ctx)
|
||||
def get_root(self, req):
|
||||
# the addSlash=True gives us one extra (empty) segment
|
||||
depth = len(req.prepath) + len(req.postpath) - 1
|
||||
link = "/".join([".."] * depth)
|
||||
return link
|
||||
|
||||
def render_raw_link(self, ctx, data):
|
||||
@renderer
|
||||
def raw_link(self, req, tag):
|
||||
node = self.original
|
||||
if IDirectoryNode.providedBy(node):
|
||||
node = node._node
|
||||
@ -138,147 +179,156 @@ class MoreInfo(rend.Page):
|
||||
pass
|
||||
else:
|
||||
return ""
|
||||
root = self.get_root(ctx)
|
||||
root = self.get_root(req)
|
||||
quoted_uri = urllib.quote(node.get_uri())
|
||||
text_plain_url = "%s/file/%s/@@named=/raw.txt" % (root, quoted_uri)
|
||||
return T.li["Raw data as ", T.a(href=text_plain_url)["text/plain"]]
|
||||
return T.li("Raw data as ", T.a("text/plain", href=text_plain_url))
|
||||
|
||||
def render_is_checkable(self, ctx, data):
|
||||
@renderer
|
||||
def is_checkable(self, req, tag):
|
||||
node = self.original
|
||||
si = node.get_storage_index()
|
||||
if si:
|
||||
return ctx.tag
|
||||
return tag
|
||||
# don't show checker button for LIT files
|
||||
return ""
|
||||
|
||||
def render_check_form(self, ctx, data):
|
||||
@renderer
|
||||
def check_form(self, req, tag):
|
||||
node = self.original
|
||||
quoted_uri = urllib.quote(node.get_uri())
|
||||
target = self.get_root(ctx) + "/uri/" + quoted_uri
|
||||
target = self.get_root(req) + "/uri/" + quoted_uri
|
||||
if IDirectoryNode.providedBy(node):
|
||||
target += "/"
|
||||
check = T.form(action=target, method="post",
|
||||
enctype="multipart/form-data")[
|
||||
T.fieldset[
|
||||
enctype="multipart/form-data")(
|
||||
T.fieldset(
|
||||
T.input(type="hidden", name="t", value="check"),
|
||||
T.input(type="hidden", name="return_to", value="."),
|
||||
T.legend(class_="freeform-form-label")["Check on this object"],
|
||||
T.div[
|
||||
T.legend("Check on this object", class_="freeform-form-label"),
|
||||
T.div(
|
||||
"Verify every bit? (EXPENSIVE):",
|
||||
T.input(type="checkbox", name="verify"),
|
||||
],
|
||||
T.div["Repair any problems?: ",
|
||||
T.input(type="checkbox", name="repair")],
|
||||
T.div["Add/renew lease on all shares?: ",
|
||||
T.input(type="checkbox", name="add-lease")],
|
||||
T.div["Emit results in JSON format?: ",
|
||||
T.input(type="checkbox", name="output", value="JSON")],
|
||||
),
|
||||
T.div("Repair any problems?: ",
|
||||
T.input(type="checkbox", name="repair")),
|
||||
T.div("Add/renew lease on all shares?: ",
|
||||
T.input(type="checkbox", name="add-lease")),
|
||||
T.div("Emit results in JSON format?: ",
|
||||
T.input(type="checkbox", name="output", value="JSON")),
|
||||
|
||||
T.input(type="submit", value="Check"),
|
||||
|
||||
]]
|
||||
return ctx.tag[check]
|
||||
))
|
||||
return tag(check)
|
||||
|
||||
def render_is_mutable_file(self, ctx, data):
|
||||
@renderer
|
||||
def is_mutable_file(self, req, tag):
|
||||
node = self.original
|
||||
if IDirectoryNode.providedBy(node):
|
||||
return ""
|
||||
if (IFileNode.providedBy(node)
|
||||
and node.is_mutable() and not node.is_readonly()):
|
||||
return ctx.tag
|
||||
return tag
|
||||
return ""
|
||||
|
||||
def render_overwrite_form(self, ctx, data):
|
||||
@renderer
|
||||
def overwrite_form(self, req, tag):
|
||||
node = self.original
|
||||
root = self.get_root(ctx)
|
||||
root = self.get_root(req)
|
||||
action = "%s/uri/%s" % (root, urllib.quote(node.get_uri()))
|
||||
done_url = "%s/uri/%s?t=info" % (root, urllib.quote(node.get_uri()))
|
||||
overwrite = T.form(action=action, method="post",
|
||||
enctype="multipart/form-data")[
|
||||
T.fieldset[
|
||||
enctype="multipart/form-data")(
|
||||
T.fieldset(
|
||||
T.input(type="hidden", name="t", value="upload"),
|
||||
T.input(type='hidden', name='when_done', value=done_url),
|
||||
T.legend(class_="freeform-form-label")["Overwrite"],
|
||||
T.legend("Overwrite", class_="freeform-form-label"),
|
||||
"Upload new contents: ",
|
||||
T.input(type="file", name="file"),
|
||||
" ",
|
||||
T.input(type="submit", value="Replace Contents")
|
||||
]]
|
||||
return ctx.tag[overwrite]
|
||||
))
|
||||
return tag(overwrite)
|
||||
|
||||
def render_is_directory(self, ctx, data):
|
||||
@renderer
|
||||
def is_directory(self, req, tag):
|
||||
node = self.original
|
||||
if IDirectoryNode.providedBy(node):
|
||||
return ctx.tag
|
||||
return tag
|
||||
return ""
|
||||
|
||||
def render_deep_check_form(self, ctx, data):
|
||||
@renderer
|
||||
def deep_check_form(self, req, tag):
|
||||
ophandle = base32.b2a(os.urandom(16))
|
||||
deep_check = T.form(action=".", method="post",
|
||||
enctype="multipart/form-data")[
|
||||
T.fieldset[
|
||||
enctype="multipart/form-data")(
|
||||
T.fieldset(
|
||||
T.input(type="hidden", name="t", value="start-deep-check"),
|
||||
T.input(type="hidden", name="return_to", value="."),
|
||||
T.legend(class_="freeform-form-label")["Run a deep-check operation (EXPENSIVE)"],
|
||||
T.div[
|
||||
T.legend("Run a deep-check operation (EXPENSIVE)", class_="freeform-form-label"),
|
||||
T.div(
|
||||
"Verify every bit? (EVEN MORE EXPENSIVE):",
|
||||
T.input(type="checkbox", name="verify"),
|
||||
],
|
||||
T.div["Repair any problems?: ",
|
||||
T.input(type="checkbox", name="repair")],
|
||||
T.div["Add/renew lease on all shares?: ",
|
||||
T.input(type="checkbox", name="add-lease")],
|
||||
T.div["Emit results in JSON format?: ",
|
||||
T.input(type="checkbox", name="output", value="JSON")],
|
||||
),
|
||||
T.div("Repair any problems?: ",
|
||||
T.input(type="checkbox", name="repair")),
|
||||
T.div("Add/renew lease on all shares?: ",
|
||||
T.input(type="checkbox", name="add-lease")),
|
||||
T.div("Emit results in JSON format?: ",
|
||||
T.input(type="checkbox", name="output", value="JSON")),
|
||||
|
||||
T.input(type="hidden", name="ophandle", value=ophandle),
|
||||
T.input(type="submit", value="Deep-Check"),
|
||||
|
||||
]]
|
||||
return ctx.tag[deep_check]
|
||||
))
|
||||
return tag(deep_check)
|
||||
|
||||
def render_deep_size_form(self, ctx, data):
|
||||
@renderer
|
||||
def deep_size_form(self, req, tag):
|
||||
ophandle = base32.b2a(os.urandom(16))
|
||||
deep_size = T.form(action=".", method="post",
|
||||
enctype="multipart/form-data")[
|
||||
T.fieldset[
|
||||
enctype="multipart/form-data")(
|
||||
T.fieldset(
|
||||
T.input(type="hidden", name="t", value="start-deep-size"),
|
||||
T.legend(class_="freeform-form-label")["Run a deep-size operation (EXPENSIVE)"],
|
||||
T.legend("Run a deep-size operation (EXPENSIVE)", class_="freeform-form-label"),
|
||||
T.input(type="hidden", name="ophandle", value=ophandle),
|
||||
T.input(type="submit", value="Deep-Size"),
|
||||
]]
|
||||
return ctx.tag[deep_size]
|
||||
))
|
||||
return tag(deep_size)
|
||||
|
||||
def render_deep_stats_form(self, ctx, data):
|
||||
@renderer
|
||||
def deep_stats_form(self, req, tag):
|
||||
ophandle = base32.b2a(os.urandom(16))
|
||||
deep_stats = T.form(action=".", method="post",
|
||||
enctype="multipart/form-data")[
|
||||
T.fieldset[
|
||||
enctype="multipart/form-data")(
|
||||
T.fieldset(
|
||||
T.input(type="hidden", name="t", value="start-deep-stats"),
|
||||
T.legend(class_="freeform-form-label")["Run a deep-stats operation (EXPENSIVE)"],
|
||||
T.legend("Run a deep-stats operation (EXPENSIVE)", class_="freeform-form-label"),
|
||||
T.input(type="hidden", name="ophandle", value=ophandle),
|
||||
T.input(type="submit", value="Deep-Stats"),
|
||||
]]
|
||||
return ctx.tag[deep_stats]
|
||||
))
|
||||
return tag(deep_stats)
|
||||
|
||||
def render_manifest_form(self, ctx, data):
|
||||
@renderer
|
||||
def manifest_form(self, req, tag):
|
||||
ophandle = base32.b2a(os.urandom(16))
|
||||
manifest = T.form(action=".", method="post",
|
||||
enctype="multipart/form-data")[
|
||||
T.fieldset[
|
||||
enctype="multipart/form-data")(
|
||||
T.fieldset(
|
||||
T.input(type="hidden", name="t", value="start-manifest"),
|
||||
T.legend(class_="freeform-form-label")["Run a manifest operation (EXPENSIVE)"],
|
||||
T.div["Output Format: ",
|
||||
T.legend("Run a manifest operation (EXPENSIVE)", class_="freeform-form-label"),
|
||||
T.div("Output Format: ",
|
||||
T.select(name="output")
|
||||
[ T.option(value="html", selected="true")["HTML"],
|
||||
T.option(value="text")["text"],
|
||||
T.option(value="json")["JSON"],
|
||||
],
|
||||
],
|
||||
( T.option("HTML", value="html", selected="true"),
|
||||
T.option("text", value="text"),
|
||||
T.option("JSON", value="json"),
|
||||
),
|
||||
),
|
||||
T.input(type="hidden", name="ophandle", value=ophandle),
|
||||
T.input(type="submit", value="Manifest"),
|
||||
]]
|
||||
return ctx.tag[manifest]
|
||||
))
|
||||
return tag(manifest)
|
||||
|
||||
|
||||
# TODO: edge metadata
|
||||
|
@ -1,67 +1,67 @@
|
||||
<html xmlns:n="http://nevow.com/ns/nevow/0.1"><head>
|
||||
<title n:render="title"></title>
|
||||
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"><head>
|
||||
<title t:render="title" />
|
||||
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
|
||||
<link href="/icon.png" rel="shortcut icon" />
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
|
||||
</head><body>
|
||||
|
||||
<h1 n:render="header"></h1>
|
||||
<h1 t:render="header"></h1>
|
||||
|
||||
<ul>
|
||||
<li>Object Type: <span n:render="type" /></li>
|
||||
<li>Storage Index: <tt n:render="si" /></li>
|
||||
<li>Object Size: <span n:render="size" /></li>
|
||||
<li>Object Type: <span t:render="type" /></li>
|
||||
<li>Storage Index: <tt t:render="si" /></li>
|
||||
<li>Object Size: <span t:render="size" /></li>
|
||||
<li>Access Caps (Tahoe-URIs):
|
||||
<table class="table-headings-left">
|
||||
<span n:render="is_directory">
|
||||
<span t:render="is_directory">
|
||||
<tr>
|
||||
<th>Directory writecap</th>
|
||||
<td><tt class="data-chars" n:render="directory_writecap" /></td>
|
||||
<td><tt class="data-chars" t:render="directory_writecap" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>Directory readcap</th>
|
||||
<td><tt class="data-chars" n:render="directory_readcap" /></td>
|
||||
<td><tt class="data-chars" t:render="directory_readcap" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>Directory verifycap</th>
|
||||
<td><tt class="data-chars" n:render="directory_verifycap" /></td>
|
||||
<td><tt class="data-chars" t:render="directory_verifycap" /></td>
|
||||
</tr>
|
||||
</span>
|
||||
|
||||
<tr>
|
||||
<th>File writecap</th>
|
||||
<td><tt class="data-chars" n:render="file_writecap" /></td>
|
||||
<td><tt class="data-chars" t:render="file_writecap" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>File readcap</th>
|
||||
<td><tt class="data-chars" n:render="file_readcap" /></td>
|
||||
<td><tt class="data-chars" t:render="file_readcap" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>File verifycap</th>
|
||||
<td><tt class="data-chars" n:render="file_verifycap" /></td>
|
||||
<td><tt class="data-chars" t:render="file_verifycap" /></td>
|
||||
</tr>
|
||||
</table></li>
|
||||
<li><a href="?t=json">JSON</a></li>
|
||||
<li n:render="raw_link" />
|
||||
<li t:render="raw_link" />
|
||||
</ul>
|
||||
|
||||
<div n:render="is_checkable">
|
||||
<div t:render="is_checkable">
|
||||
<h2>Checker Operations</h2>
|
||||
<div n:render="check_form" />
|
||||
<div t:render="check_form" />
|
||||
</div>
|
||||
|
||||
<div n:render="is_mutable_file">
|
||||
<div t:render="is_mutable_file">
|
||||
<h2>Mutable File Operations</h2>
|
||||
<div n:render="overwrite_form" />
|
||||
<div t:render="overwrite_form" />
|
||||
</div>
|
||||
|
||||
<div n:render="is_directory">
|
||||
<div t:render="is_directory">
|
||||
<h2>Directory Operations</h2>
|
||||
<div n:render="deep_check_form" />
|
||||
<div n:render="deep_size_form" />
|
||||
<div n:render="deep_stats_form" />
|
||||
<div n:render="manifest_form" />
|
||||
<div t:render="deep_check_form" />
|
||||
<div t:render="deep_size_form" />
|
||||
<div t:render="deep_stats_form" />
|
||||
<div t:render="manifest_form" />
|
||||
</div>
|
||||
|
||||
</body></html>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<html xmlns:n="http://nevow.com/ns/nevow/0.1"><head>
|
||||
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"><head>
|
||||
<title>Tahoe-LAFS - Introducer Status</title>
|
||||
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
|
||||
<link href="/icon.png" rel="shortcut icon" />
|
||||
@ -10,23 +10,23 @@
|
||||
<div class="section" id="this-client">
|
||||
<h2>This Introducer</h2>
|
||||
|
||||
<table class="node-info table-headings-left">
|
||||
<tr><th>My nodeid:</th> <td class="nodeid mine data-chars" n:render="string" n:data="my_nodeid" /></tr>
|
||||
<tr><th>My versions:</th> <td n:render="string" n:data="version" /></tr>
|
||||
<tr><th>Tahoe-LAFS code imported from:</th> <td n:render="data" n:data="import_path" /></tr>
|
||||
<table class="node-info table-headings-left" t:render="node_data">
|
||||
<tr><th>My nodeid:</th> <td class="nodeid mine data-chars"><t:slot name="my_nodeid" /></td></tr>
|
||||
<tr><th>My versions:</th> <td><t:slot name="version" /></td></tr>
|
||||
<tr><th>Tahoe-LAFS code imported from:</th> <td><t:slot name="import_path" /></td></tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div>Announcement Summary: <span n:render="announcement_summary" /></div>
|
||||
<div>Subscription Summary: <span n:render="client_summary" /></div>
|
||||
<div>Announcement Summary: <span t:render="announcement_summary" /></div>
|
||||
<div>Subscription Summary: <span t:render="client_summary" /></div>
|
||||
|
||||
<br />
|
||||
|
||||
|
||||
<div class="section">
|
||||
<h2>Service Announcements</h2>
|
||||
<table class="services table-headings-top" n:render="sequence" n:data="services">
|
||||
<tr n:pattern="header">
|
||||
<table class="services table-headings-top" t:render="services">
|
||||
<tr>
|
||||
<th class="nickname-and-peerid">
|
||||
<div class="service-nickname">Nickname</div>
|
||||
<div class="nodeid data-chars">ServerID</div></th>
|
||||
@ -34,23 +34,23 @@
|
||||
<th>Version</th>
|
||||
<th>Service Name</th>
|
||||
</tr>
|
||||
<tr n:pattern="item" n:render="service_row">
|
||||
<tr t:render="item">
|
||||
<td class="nickname-and-peerid">
|
||||
<div class="nickname"><n:slot name="nickname"/></div>
|
||||
<div class="nodeid data-chars"><n:slot name="serverid"/></div></td>
|
||||
<td class="service-announced"><n:attr name="title"><n:slot name="connection-hints"/></n:attr><n:slot name="announced"/></td>
|
||||
<td class="service-version"><n:slot name="version"/></td>
|
||||
<td class="service-service-name"><n:slot name="service_name"/></td>
|
||||
<div class="nickname"><t:slot name="nickname"/></div>
|
||||
<div class="nodeid data-chars"><t:slot name="serverid"/></div></td>
|
||||
<td class="service-announced"><t:attr name="title"><t:slot name="connection-hints"/></t:attr><t:slot name="announced"/></td>
|
||||
<td class="service-version"><t:slot name="version"/></td>
|
||||
<td class="service-service-name"><t:slot name="service_name"/></td>
|
||||
</tr>
|
||||
<tr n:pattern="empty"><td>no peers!</td></tr>
|
||||
<tr t:render="empty"><td>no peers!</td></tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
|
||||
<div>
|
||||
<h2>Subscribed Clients</h2>
|
||||
<table class="services table-headings-top" n:render="sequence" n:data="subscribers">
|
||||
<tr n:pattern="header">
|
||||
<table class="services table-headings-top" t:render="subscribers">
|
||||
<tr>
|
||||
<th class="nickname-and-peerid">
|
||||
<div class="service-nickname">Nickname</div>
|
||||
<div class="nodeid data-chars">Tub ID</div></th>
|
||||
@ -59,20 +59,20 @@
|
||||
<th>Version</th>
|
||||
<th>Subscribed To</th>
|
||||
</tr>
|
||||
<tr n:pattern="item" n:render="subscriber_row">
|
||||
<tr t:render="item">
|
||||
<td class="nickname-and-peerid">
|
||||
<div class="nickname"><n:slot name="nickname"/></div>
|
||||
<div class="nodeid data-chars"><n:slot name="tubid"/></div></td>
|
||||
<td><n:slot name="connected"/></td>
|
||||
<td class="service-since"><n:slot name="since"/></td>
|
||||
<td class="service-version"><n:slot name="version"/></td>
|
||||
<td class="service-service-name"><n:slot name="service_name"/></td>
|
||||
<div class="nickname"><t:slot name="nickname"/></div>
|
||||
<div class="nodeid data-chars"><t:slot name="tubid"/></div></td>
|
||||
<td><t:slot name="connected"/></td>
|
||||
<td class="service-since"><t:slot name="since"/></td>
|
||||
<td class="service-version"><t:slot name="version"/></td>
|
||||
<td class="service-service-name"><t:slot name="service_name"/></td>
|
||||
</tr>
|
||||
<tr n:pattern="empty"><td>no subscribers!</td></tr>
|
||||
<tr t:render="empty"><td>no subscribers!</td></tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<p class="minutia">Page rendered at <span n:render="data" n:data="rendered_at" /></p>
|
||||
<p class="minutia" t:render="node_data">Page rendered at <span><t:slot name="rendered_at" /></span></p>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,35 +1,56 @@
|
||||
|
||||
import time, os
|
||||
from nevow import rend
|
||||
from nevow.static import File as nevow_File
|
||||
from nevow.util import resource_filename
|
||||
from pkg_resources import resource_filename
|
||||
from twisted.web.template import Element, XMLFile, renderElement, renderer
|
||||
from twisted.python.filepath import FilePath
|
||||
from twisted.web import static
|
||||
import allmydata
|
||||
import json
|
||||
from allmydata.version_checks import get_package_versions_string
|
||||
from allmydata.util import idlib
|
||||
from allmydata.web.common import (
|
||||
getxmlfile,
|
||||
render_time,
|
||||
MultiFormatPage,
|
||||
MultiFormatResource,
|
||||
SlotsSequenceElement,
|
||||
)
|
||||
|
||||
|
||||
class IntroducerRoot(MultiFormatPage):
|
||||
class IntroducerRoot(MultiFormatResource):
|
||||
"""
|
||||
A ``Resource`` intended as the root resource for introducers.
|
||||
|
||||
addSlash = True
|
||||
docFactory = getxmlfile("introducer.xhtml")
|
||||
|
||||
child_operations = None
|
||||
:param _IntroducerNode introducer_node: The introducer node to template
|
||||
information about.
|
||||
"""
|
||||
|
||||
def __init__(self, introducer_node):
|
||||
super(IntroducerRoot, self).__init__()
|
||||
self.introducer_node = introducer_node
|
||||
self.introducer_service = introducer_node.getServiceNamed("introducer")
|
||||
rend.Page.__init__(self, introducer_node)
|
||||
# necessary as a root Resource
|
||||
self.putChild("", self)
|
||||
static_dir = resource_filename("allmydata.web", "static")
|
||||
for filen in os.listdir(static_dir):
|
||||
self.putChild(filen, nevow_File(os.path.join(static_dir, filen)))
|
||||
self.putChild(filen, static.File(os.path.join(static_dir, filen)))
|
||||
|
||||
def _create_element(self):
|
||||
"""
|
||||
Create a ``IntroducerRootElement`` which can be flattened into an HTML
|
||||
response.
|
||||
"""
|
||||
return IntroducerRootElement(
|
||||
self.introducer_node, self.introducer_service)
|
||||
|
||||
def render_HTML(self, req):
|
||||
"""
|
||||
Render an HTML template describing this introducer node.
|
||||
"""
|
||||
return renderElement(req, self._create_element())
|
||||
|
||||
def render_JSON(self, req):
|
||||
"""
|
||||
Render JSON describing this introducer node.
|
||||
"""
|
||||
res = {}
|
||||
|
||||
counts = {}
|
||||
@ -37,7 +58,7 @@ class IntroducerRoot(MultiFormatPage):
|
||||
if s.service_name not in counts:
|
||||
counts[s.service_name] = 0
|
||||
counts[s.service_name] += 1
|
||||
res["subscription_summary"] = counts
|
||||
res[u"subscription_summary"] = counts
|
||||
|
||||
announcement_summary = {}
|
||||
for ad in self.introducer_service.get_announcements():
|
||||
@ -45,21 +66,40 @@ class IntroducerRoot(MultiFormatPage):
|
||||
if service_name not in announcement_summary:
|
||||
announcement_summary[service_name] = 0
|
||||
announcement_summary[service_name] += 1
|
||||
res["announcement_summary"] = announcement_summary
|
||||
res[u"announcement_summary"] = announcement_summary
|
||||
|
||||
return json.dumps(res, indent=1) + "\n"
|
||||
return json.dumps(res, indent=1) + b"\n"
|
||||
|
||||
# FIXME: This code is duplicated in root.py and introweb.py.
|
||||
def data_rendered_at(self, ctx, data):
|
||||
return render_time(time.time())
|
||||
def data_version(self, ctx, data):
|
||||
return get_package_versions_string()
|
||||
def data_import_path(self, ctx, data):
|
||||
return str(allmydata).replace("/", "/ ") # XXX kludge for wrapping
|
||||
def data_my_nodeid(self, ctx, data):
|
||||
return idlib.nodeid_b2a(self.introducer_node.nodeid)
|
||||
|
||||
def render_announcement_summary(self, ctx, data):
|
||||
class IntroducerRootElement(Element):
|
||||
"""
|
||||
An ``Element`` HTML template which can be flattened to describe this
|
||||
introducer node.
|
||||
|
||||
:param _IntroducerNode introducer_node: The introducer node to describe.
|
||||
:param IntroducerService introducer_service: The introducer service created
|
||||
by the node.
|
||||
"""
|
||||
|
||||
loader = XMLFile(FilePath(__file__).sibling("introducer.xhtml"))
|
||||
|
||||
def __init__(self, introducer_node, introducer_service):
|
||||
super(IntroducerRootElement, self).__init__()
|
||||
self.introducer_node = introducer_node
|
||||
self.introducer_service = introducer_service
|
||||
self.node_data_dict = {
|
||||
"my_nodeid": idlib.nodeid_b2a(self.introducer_node.nodeid),
|
||||
"version": get_package_versions_string(),
|
||||
"import_path": str(allmydata).replace("/", "/ "), # XXX kludge for wrapping
|
||||
"rendered_at": render_time(time.time()),
|
||||
}
|
||||
|
||||
@renderer
|
||||
def node_data(self, req, tag):
|
||||
return tag.fillSlots(**self.node_data_dict)
|
||||
|
||||
@renderer
|
||||
def announcement_summary(self, req, tag):
|
||||
services = {}
|
||||
for ad in self.introducer_service.get_announcements():
|
||||
if ad.service_name not in services:
|
||||
@ -67,44 +107,43 @@ class IntroducerRoot(MultiFormatPage):
|
||||
services[ad.service_name] += 1
|
||||
service_names = services.keys()
|
||||
service_names.sort()
|
||||
return ", ".join(["%s: %d" % (service_name, services[service_name])
|
||||
for service_name in service_names])
|
||||
return u", ".join(u"{}: {}".format(service_name, services[service_name])
|
||||
for service_name in service_names)
|
||||
|
||||
def render_client_summary(self, ctx, data):
|
||||
@renderer
|
||||
def client_summary(self, req, tag):
|
||||
counts = {}
|
||||
for s in self.introducer_service.get_subscribers():
|
||||
if s.service_name not in counts:
|
||||
counts[s.service_name] = 0
|
||||
counts[s.service_name] += 1
|
||||
return ", ".join([ "%s: %d" % (name, counts[name])
|
||||
for name in sorted(counts.keys()) ] )
|
||||
return u", ".join(u"{}: {}".format(name, counts[name])
|
||||
for name in sorted(counts.keys()))
|
||||
|
||||
def data_services(self, ctx, data):
|
||||
@renderer
|
||||
def services(self, req, tag):
|
||||
services = self.introducer_service.get_announcements()
|
||||
services.sort(key=lambda ad: (ad.service_name, ad.nickname))
|
||||
return services
|
||||
services = [{
|
||||
"serverid": ad.serverid,
|
||||
"nickname": ad.nickname,
|
||||
"connection-hints":
|
||||
u"connection hints: " + u" ".join(ad.connection_hints),
|
||||
"connected": u"?",
|
||||
"announced": render_time(ad.when),
|
||||
"version": ad.version,
|
||||
"service_name": ad.service_name,
|
||||
} for ad in services]
|
||||
return SlotsSequenceElement(tag, services)
|
||||
|
||||
def render_service_row(self, ctx, ad):
|
||||
ctx.fillSlots("serverid", ad.serverid)
|
||||
ctx.fillSlots("nickname", ad.nickname)
|
||||
ctx.fillSlots("connection-hints",
|
||||
"connection hints: " + " ".join(ad.connection_hints))
|
||||
ctx.fillSlots("connected", "?")
|
||||
when_s = render_time(ad.when)
|
||||
ctx.fillSlots("announced", when_s)
|
||||
ctx.fillSlots("version", ad.version)
|
||||
ctx.fillSlots("service_name", ad.service_name)
|
||||
return ctx.tag
|
||||
|
||||
def data_subscribers(self, ctx, data):
|
||||
return self.introducer_service.get_subscribers()
|
||||
|
||||
def render_subscriber_row(self, ctx, s):
|
||||
ctx.fillSlots("nickname", s.nickname)
|
||||
ctx.fillSlots("tubid", s.tubid)
|
||||
ctx.fillSlots("connected", s.remote_address)
|
||||
since_s = render_time(s.when)
|
||||
ctx.fillSlots("since", since_s)
|
||||
ctx.fillSlots("version", s.version)
|
||||
ctx.fillSlots("service_name", s.service_name)
|
||||
return ctx.tag
|
||||
@renderer
|
||||
def subscribers(self, req, tag):
|
||||
subscribers = [{
|
||||
"nickname": s.nickname,
|
||||
"tubid": s.tubid,
|
||||
"connected": s.remote_address,
|
||||
"since": render_time(s.when),
|
||||
"version": s.version,
|
||||
"service_name": s.service_name,
|
||||
} for s in self.introducer_service.get_subscribers()]
|
||||
return SlotsSequenceElement(tag, subscribers)
|
||||
|
@ -1,6 +1,5 @@
|
||||
|
||||
import time
|
||||
from zope.interface import implementer
|
||||
from nevow import rend, url, tags as T
|
||||
from nevow.inevow import IRequest
|
||||
from twisted.python.failure import Failure
|
||||
@ -9,7 +8,7 @@ from twisted.web.http import NOT_FOUND
|
||||
from twisted.web.html import escape
|
||||
from twisted.application import service
|
||||
|
||||
from allmydata.web.common import IOpHandleTable, WebError, \
|
||||
from allmydata.web.common import WebError, \
|
||||
get_root, get_arg, boolean_of_arg
|
||||
|
||||
MINUTE = 60
|
||||
@ -18,8 +17,8 @@ DAY = 24*HOUR
|
||||
|
||||
(MONITOR, RENDERER, WHEN_ADDED) = range(3)
|
||||
|
||||
@implementer(IOpHandleTable)
|
||||
class OphandleTable(rend.Page, service.Service):
|
||||
name = "operations"
|
||||
|
||||
UNCOLLECTED_HANDLE_LIFETIME = 4*DAY
|
||||
COLLECTED_HANDLE_LIFETIME = 1*DAY
|
||||
|
@ -10,7 +10,7 @@ import allmydata # to display import path
|
||||
from allmydata.version_checks import get_package_versions_string
|
||||
from allmydata.util import log
|
||||
from allmydata.interfaces import IFileNode
|
||||
from allmydata.web import filenode, directory, unlinked, status, operations
|
||||
from allmydata.web import filenode, directory, unlinked, status
|
||||
from allmydata.web import storage, magic_folder
|
||||
from allmydata.web.common import (
|
||||
abbreviate_size,
|
||||
@ -154,50 +154,59 @@ class Root(MultiFormatPage):
|
||||
def __init__(self, client, clock=None, now_fn=None):
|
||||
rend.Page.__init__(self, client)
|
||||
self.client = client
|
||||
# If set, clock is a twisted.internet.task.Clock that the tests
|
||||
# use to test ophandle expiration.
|
||||
self.child_operations = operations.OphandleTable(clock)
|
||||
self.now_fn = now_fn
|
||||
try:
|
||||
s = client.getServiceNamed("storage")
|
||||
except KeyError:
|
||||
s = None
|
||||
self.child_storage = storage.StorageStatus(s, self.client.nickname)
|
||||
|
||||
self.child_uri = URIHandler(client)
|
||||
self.child_cap = URIHandler(client)
|
||||
self.putChild("storage", storage.StorageStatus(s, self.client.nickname))
|
||||
|
||||
self.putChild("uri", URIHandler(client))
|
||||
self.putChild("cap", URIHandler(client))
|
||||
|
||||
# handler for "/magic_folder" URIs
|
||||
self.child_magic_folder = magic_folder.MagicFolderWebApi(client)
|
||||
self.putChild("magic_folder", magic_folder.MagicFolderWebApi(client))
|
||||
|
||||
# Handler for everything beneath "/private", an area of the resource
|
||||
# hierarchy which is only accessible with the private per-node API
|
||||
# auth token.
|
||||
self.child_private = create_private_tree(client.get_auth_token)
|
||||
self.putChild("private", create_private_tree(client.get_auth_token))
|
||||
|
||||
self.child_file = FileHandler(client)
|
||||
self.child_named = FileHandler(client)
|
||||
self.child_status = status.Status(client.get_history())
|
||||
self.child_statistics = status.Statistics(client.stats_provider)
|
||||
self.putChild("file", FileHandler(client))
|
||||
self.putChild("named", FileHandler(client))
|
||||
self.putChild("status", status.Status(client.get_history()))
|
||||
self.putChild("statistics", status.Statistics(client.stats_provider))
|
||||
static_dir = resource_filename("allmydata.web", "static")
|
||||
for filen in os.listdir(static_dir):
|
||||
self.putChild(filen, nevow_File(os.path.join(static_dir, filen)))
|
||||
|
||||
def child_helper_status(self, ctx):
|
||||
# the Helper isn't attached until after the Tub starts, so this child
|
||||
# needs to created on each request
|
||||
return status.HelperStatus(self.client.helper)
|
||||
self.putChild("report_incident", IncidentReporter())
|
||||
|
||||
child_report_incident = IncidentReporter()
|
||||
#child_server # let's reserve this for storage-server-over-HTTP
|
||||
# until we get rid of nevow.Page in favour of twisted.web.resource
|
||||
# we can't use getChild() -- but we CAN use childFactory or
|
||||
# override locatechild
|
||||
def childFactory(self, ctx, name):
|
||||
request = IRequest(ctx)
|
||||
return self.getChild(name, request)
|
||||
|
||||
|
||||
def getChild(self, path, request):
|
||||
if path == "helper_status":
|
||||
# the Helper isn't attached until after the Tub starts, so this child
|
||||
# needs to created on each request
|
||||
return status.HelperStatus(self.client.helper)
|
||||
|
||||
# FIXME: This code is duplicated in root.py and introweb.py.
|
||||
def data_rendered_at(self, ctx, data):
|
||||
return render_time(time.time())
|
||||
|
||||
def data_version(self, ctx, data):
|
||||
return get_package_versions_string()
|
||||
|
||||
def data_import_path(self, ctx, data):
|
||||
return str(allmydata)
|
||||
|
||||
def render_my_nodeid(self, ctx, data):
|
||||
tubid_s = "TubID: "+self.client.get_long_tubid()
|
||||
return T.td(title=tubid_s)[self.client.get_long_nodeid()]
|
||||
|
@ -2,7 +2,8 @@
|
||||
import pprint, itertools, hashlib
|
||||
import json
|
||||
from twisted.internet import defer
|
||||
from nevow import rend, inevow, tags as T
|
||||
from twisted.web.resource import Resource
|
||||
from nevow import rend, tags as T
|
||||
from allmydata.util import base32, idlib
|
||||
from allmydata.web.common import (
|
||||
getxmlfile,
|
||||
@ -331,12 +332,161 @@ class DownloadResultsRendererMixin(RateAndTimeMixin):
|
||||
d.addCallback(_render)
|
||||
return d
|
||||
|
||||
def _find_overlap(events, start_key, end_key):
|
||||
"""
|
||||
given a list of event dicts, return a new list in which each event
|
||||
has an extra "row" key (an int, starting at 0), and if appropriate
|
||||
a "serverid" key (ascii-encoded server id), replacing the "server"
|
||||
key. This is a hint to our JS frontend about how to overlap the
|
||||
parts of the graph it is drawing.
|
||||
|
||||
we must always make a copy, since we're going to be adding keys
|
||||
and don't want to change the original objects. If we're
|
||||
stringifying serverids, we'll also be changing the serverid keys.
|
||||
"""
|
||||
new_events = []
|
||||
rows = []
|
||||
for ev in events:
|
||||
ev = ev.copy()
|
||||
if ev.has_key('server'):
|
||||
ev["serverid"] = ev["server"].get_longname()
|
||||
del ev["server"]
|
||||
# find an empty slot in the rows
|
||||
free_slot = None
|
||||
for row,finished in enumerate(rows):
|
||||
if finished is not None:
|
||||
if ev[start_key] > finished:
|
||||
free_slot = row
|
||||
break
|
||||
if free_slot is None:
|
||||
free_slot = len(rows)
|
||||
rows.append(ev[end_key])
|
||||
else:
|
||||
rows[free_slot] = ev[end_key]
|
||||
ev["row"] = free_slot
|
||||
new_events.append(ev)
|
||||
return new_events
|
||||
|
||||
def _find_overlap_requests(events):
|
||||
"""
|
||||
We compute a three-element 'row tuple' for each event: (serverid,
|
||||
shnum, row). All elements are ints. The first is a mapping from
|
||||
serverid to group number, the second is a mapping from shnum to
|
||||
subgroup number. The third is a row within the subgroup.
|
||||
|
||||
We also return a list of lists of rowcounts, so renderers can decide
|
||||
how much vertical space to give to each row.
|
||||
"""
|
||||
|
||||
serverid_to_group = {}
|
||||
groupnum_to_rows = {} # maps groupnum to a table of rows. Each table
|
||||
# is a list with an element for each row number
|
||||
# (int starting from 0) that contains a
|
||||
# finish_time, indicating that the row is empty
|
||||
# beyond that time. If finish_time is None, it
|
||||
# indicate a response that has not yet
|
||||
# completed, so the row cannot be reused.
|
||||
new_events = []
|
||||
for ev in events:
|
||||
# DownloadStatus promises to give us events in temporal order
|
||||
ev = ev.copy()
|
||||
ev["serverid"] = ev["server"].get_longname()
|
||||
del ev["server"]
|
||||
if ev["serverid"] not in serverid_to_group:
|
||||
groupnum = len(serverid_to_group)
|
||||
serverid_to_group[ev["serverid"]] = groupnum
|
||||
groupnum = serverid_to_group[ev["serverid"]]
|
||||
if groupnum not in groupnum_to_rows:
|
||||
groupnum_to_rows[groupnum] = []
|
||||
rows = groupnum_to_rows[groupnum]
|
||||
# find an empty slot in the rows
|
||||
free_slot = None
|
||||
for row,finished in enumerate(rows):
|
||||
if finished is not None:
|
||||
if ev["start_time"] > finished:
|
||||
free_slot = row
|
||||
break
|
||||
if free_slot is None:
|
||||
free_slot = len(rows)
|
||||
rows.append(ev["finish_time"])
|
||||
else:
|
||||
rows[free_slot] = ev["finish_time"]
|
||||
ev["row"] = (groupnum, free_slot)
|
||||
new_events.append(ev)
|
||||
del groupnum
|
||||
# maybe also return serverid_to_group, groupnum_to_rows, and some
|
||||
# indication of the highest finish_time
|
||||
#
|
||||
# actually, return the highest rownum for each groupnum
|
||||
highest_rownums = [len(groupnum_to_rows[groupnum])
|
||||
for groupnum in range(len(serverid_to_group))]
|
||||
return new_events, highest_rownums
|
||||
|
||||
|
||||
def _color(server):
|
||||
h = hashlib.sha256(server.get_serverid()).digest()
|
||||
def m(c):
|
||||
return min(ord(c) / 2 + 0x80, 0xff)
|
||||
return "#%02x%02x%02x" % (m(h[0]), m(h[1]), m(h[2]))
|
||||
|
||||
class _EventJson(Resource, object):
|
||||
|
||||
def __init__(self, download_status):
|
||||
self._download_status = download_status
|
||||
|
||||
def render(self, request):
|
||||
request.setHeader("content-type", "text/plain")
|
||||
data = { } # this will be returned to the GET
|
||||
ds = self._download_status
|
||||
|
||||
data["misc"] = _find_overlap(
|
||||
ds.misc_events,
|
||||
"start_time", "finish_time",
|
||||
)
|
||||
data["read"] = _find_overlap(
|
||||
ds.read_events,
|
||||
"start_time", "finish_time",
|
||||
)
|
||||
data["segment"] = _find_overlap(
|
||||
ds.segment_events,
|
||||
"start_time", "finish_time",
|
||||
)
|
||||
# TODO: overlap on DYHB isn't very useful, and usually gets in the
|
||||
# way. So don't do it.
|
||||
data["dyhb"] = _find_overlap(
|
||||
ds.dyhb_requests,
|
||||
"start_time", "finish_time",
|
||||
)
|
||||
data["block"],data["block_rownums"] =_find_overlap_requests(ds.block_requests)
|
||||
|
||||
server_info = {} # maps longname to {num,color,short}
|
||||
server_shortnames = {} # maps servernum to shortname
|
||||
for d_ev in ds.dyhb_requests:
|
||||
s = d_ev["server"]
|
||||
longname = s.get_longname()
|
||||
if longname not in server_info:
|
||||
num = len(server_info)
|
||||
server_info[longname] = {"num": num,
|
||||
"color": _color(s),
|
||||
"short": s.get_name() }
|
||||
server_shortnames[str(num)] = s.get_name()
|
||||
|
||||
data["server_info"] = server_info
|
||||
data["num_serverids"] = len(server_info)
|
||||
# we'd prefer the keys of serverids[] to be ints, but this is JSON,
|
||||
# so they get converted to strings. Stupid javascript.
|
||||
data["serverids"] = server_shortnames
|
||||
data["bounds"] = {"min": ds.first_timestamp, "max": ds.last_timestamp}
|
||||
return json.dumps(data, indent=1) + "\n"
|
||||
|
||||
|
||||
class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
|
||||
docFactory = getxmlfile("download-status.xhtml")
|
||||
|
||||
def __init__(self, data):
|
||||
rend.Page.__init__(self, data)
|
||||
self.download_status = data
|
||||
self.putChild("event_json", _EventJson(self.download_status))
|
||||
|
||||
def download_results(self):
|
||||
return defer.maybeDeferred(self.download_status.get_results)
|
||||
@ -353,130 +503,6 @@ class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
|
||||
return ""
|
||||
return "+%.6fs" % t
|
||||
|
||||
def _find_overlap(self, events, start_key, end_key):
|
||||
# given a list of event dicts, return a new list in which each event
|
||||
# has an extra "row" key (an int, starting at 0), and if appropriate
|
||||
# a "serverid" key (ascii-encoded server id), replacing the "server"
|
||||
# key. This is a hint to our JS frontend about how to overlap the
|
||||
# parts of the graph it is drawing.
|
||||
|
||||
# we must always make a copy, since we're going to be adding keys
|
||||
# and don't want to change the original objects. If we're
|
||||
# stringifying serverids, we'll also be changing the serverid keys.
|
||||
new_events = []
|
||||
rows = []
|
||||
for ev in events:
|
||||
ev = ev.copy()
|
||||
if ev.has_key('server'):
|
||||
ev["serverid"] = ev["server"].get_longname()
|
||||
del ev["server"]
|
||||
# find an empty slot in the rows
|
||||
free_slot = None
|
||||
for row,finished in enumerate(rows):
|
||||
if finished is not None:
|
||||
if ev[start_key] > finished:
|
||||
free_slot = row
|
||||
break
|
||||
if free_slot is None:
|
||||
free_slot = len(rows)
|
||||
rows.append(ev[end_key])
|
||||
else:
|
||||
rows[free_slot] = ev[end_key]
|
||||
ev["row"] = free_slot
|
||||
new_events.append(ev)
|
||||
return new_events
|
||||
|
||||
def _find_overlap_requests(self, events):
|
||||
"""We compute a three-element 'row tuple' for each event: (serverid,
|
||||
shnum, row). All elements are ints. The first is a mapping from
|
||||
serverid to group number, the second is a mapping from shnum to
|
||||
subgroup number. The third is a row within the subgroup.
|
||||
|
||||
We also return a list of lists of rowcounts, so renderers can decide
|
||||
how much vertical space to give to each row.
|
||||
"""
|
||||
|
||||
serverid_to_group = {}
|
||||
groupnum_to_rows = {} # maps groupnum to a table of rows. Each table
|
||||
# is a list with an element for each row number
|
||||
# (int starting from 0) that contains a
|
||||
# finish_time, indicating that the row is empty
|
||||
# beyond that time. If finish_time is None, it
|
||||
# indicate a response that has not yet
|
||||
# completed, so the row cannot be reused.
|
||||
new_events = []
|
||||
for ev in events:
|
||||
# DownloadStatus promises to give us events in temporal order
|
||||
ev = ev.copy()
|
||||
ev["serverid"] = ev["server"].get_longname()
|
||||
del ev["server"]
|
||||
if ev["serverid"] not in serverid_to_group:
|
||||
groupnum = len(serverid_to_group)
|
||||
serverid_to_group[ev["serverid"]] = groupnum
|
||||
groupnum = serverid_to_group[ev["serverid"]]
|
||||
if groupnum not in groupnum_to_rows:
|
||||
groupnum_to_rows[groupnum] = []
|
||||
rows = groupnum_to_rows[groupnum]
|
||||
# find an empty slot in the rows
|
||||
free_slot = None
|
||||
for row,finished in enumerate(rows):
|
||||
if finished is not None:
|
||||
if ev["start_time"] > finished:
|
||||
free_slot = row
|
||||
break
|
||||
if free_slot is None:
|
||||
free_slot = len(rows)
|
||||
rows.append(ev["finish_time"])
|
||||
else:
|
||||
rows[free_slot] = ev["finish_time"]
|
||||
ev["row"] = (groupnum, free_slot)
|
||||
new_events.append(ev)
|
||||
del groupnum
|
||||
# maybe also return serverid_to_group, groupnum_to_rows, and some
|
||||
# indication of the highest finish_time
|
||||
#
|
||||
# actually, return the highest rownum for each groupnum
|
||||
highest_rownums = [len(groupnum_to_rows[groupnum])
|
||||
for groupnum in range(len(serverid_to_group))]
|
||||
return new_events, highest_rownums
|
||||
|
||||
def child_event_json(self, ctx):
|
||||
inevow.IRequest(ctx).setHeader("content-type", "text/plain")
|
||||
data = { } # this will be returned to the GET
|
||||
ds = self.download_status
|
||||
|
||||
data["misc"] = self._find_overlap(ds.misc_events,
|
||||
"start_time", "finish_time")
|
||||
data["read"] = self._find_overlap(ds.read_events,
|
||||
"start_time", "finish_time")
|
||||
data["segment"] = self._find_overlap(ds.segment_events,
|
||||
"start_time", "finish_time")
|
||||
# TODO: overlap on DYHB isn't very useful, and usually gets in the
|
||||
# way. So don't do it.
|
||||
data["dyhb"] = self._find_overlap(ds.dyhb_requests,
|
||||
"start_time", "finish_time")
|
||||
data["block"],data["block_rownums"] = self._find_overlap_requests(ds.block_requests)
|
||||
|
||||
server_info = {} # maps longname to {num,color,short}
|
||||
server_shortnames = {} # maps servernum to shortname
|
||||
for d_ev in ds.dyhb_requests:
|
||||
s = d_ev["server"]
|
||||
longname = s.get_longname()
|
||||
if longname not in server_info:
|
||||
num = len(server_info)
|
||||
server_info[longname] = {"num": num,
|
||||
"color": self.color(s),
|
||||
"short": s.get_name() }
|
||||
server_shortnames[str(num)] = s.get_name()
|
||||
|
||||
data["server_info"] = server_info
|
||||
data["num_serverids"] = len(server_info)
|
||||
# we'd prefer the keys of serverids[] to be ints, but this is JSON,
|
||||
# so they get converted to strings. Stupid javascript.
|
||||
data["serverids"] = server_shortnames
|
||||
data["bounds"] = {"min": ds.first_timestamp, "max": ds.last_timestamp}
|
||||
return json.dumps(data, indent=1) + "\n"
|
||||
|
||||
def render_timeline_link(self, ctx, data):
|
||||
from nevow import url
|
||||
return T.a(href=url.URL.fromContext(ctx).child("timeline"))["timeline"]
|
||||
@ -507,7 +533,7 @@ class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
|
||||
rtt = received - sent
|
||||
if not shnums:
|
||||
shnums = ["-"]
|
||||
t[T.tr(style="background: %s" % self.color(server))[
|
||||
t[T.tr(style="background: %s" % _color(server))[
|
||||
[T.td[server.get_name()], T.td[srt(sent)], T.td[srt(received)],
|
||||
T.td[",".join([str(shnum) for shnum in shnums])],
|
||||
T.td[self.render_time(None, rtt)],
|
||||
@ -587,7 +613,7 @@ class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
|
||||
rtt = None
|
||||
if r_ev["finish_time"] is not None:
|
||||
rtt = r_ev["finish_time"] - r_ev["start_time"]
|
||||
color = self.color(server)
|
||||
color = _color(server)
|
||||
t[T.tr(style="background: %s" % color)[
|
||||
T.td[server.get_name()], T.td[r_ev["shnum"]],
|
||||
T.td["[%d:+%d]" % (r_ev["start"], r_ev["length"])],
|
||||
@ -601,12 +627,6 @@ class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
|
||||
|
||||
return l
|
||||
|
||||
def color(self, server):
|
||||
h = hashlib.sha256(server.get_serverid()).digest()
|
||||
def m(c):
|
||||
return min(ord(c) / 2 + 0x80, 0xff)
|
||||
return "#%02x%02x%02x" % (m(h[0]), m(h[1]), m(h[2]))
|
||||
|
||||
def render_results(self, ctx, data):
|
||||
d = self.download_results()
|
||||
def _got_results(results):
|
||||
@ -1069,7 +1089,12 @@ class Status(MultiFormatPage):
|
||||
|
||||
def childFactory(self, ctx, name):
|
||||
h = self.history
|
||||
stype,count_s = name.split("-")
|
||||
try:
|
||||
stype, count_s = name.split("-")
|
||||
except ValueError:
|
||||
raise RuntimeError(
|
||||
"no - in '{}'".format(name)
|
||||
)
|
||||
count = int(count_s)
|
||||
if stype == "up":
|
||||
for s in itertools.chain(h.list_all_upload_statuses(),
|
||||
|
@ -10,7 +10,8 @@ from nevow import appserver, inevow
|
||||
from allmydata.util import log, fileutil
|
||||
|
||||
from allmydata.web import introweb, root
|
||||
from allmydata.web.common import IOpHandleTable, MyExceptionHandler
|
||||
from allmydata.web.common import MyExceptionHandler
|
||||
from allmydata.web.operations import OphandleTable
|
||||
|
||||
from .web.storage_plugins import (
|
||||
StoragePlugins,
|
||||
@ -168,9 +169,12 @@ class WebishServer(service.MultiService):
|
||||
|
||||
self.root = root.Root(client, clock, now_fn)
|
||||
self.buildServer(webport, nodeurl_path, staticdir)
|
||||
if self.root.child_operations:
|
||||
self.site.remember(self.root.child_operations, IOpHandleTable)
|
||||
self.root.child_operations.setServiceParent(self)
|
||||
|
||||
# If set, clock is a twisted.internet.task.Clock that the tests
|
||||
# use to test ophandle expiration.
|
||||
self._operations = OphandleTable(clock)
|
||||
self._operations.setServiceParent(self)
|
||||
self.root.putChild("operations", self._operations)
|
||||
|
||||
self.root.putChild(b"storage-plugins", StoragePlugins(client))
|
||||
|
||||
@ -243,6 +247,12 @@ class WebishServer(service.MultiService):
|
||||
# who knows, probably some weirdo future version of Twisted
|
||||
self._started.errback(AssertionError("couldn't find out the scheme or port for the web-API server"))
|
||||
|
||||
def get_operations(self):
|
||||
"""
|
||||
:return: a reference to our "active operations" tracker
|
||||
"""
|
||||
return self._operations
|
||||
|
||||
|
||||
class IntroducerWebishServer(WebishServer):
|
||||
def __init__(self, introducer, webport, nodeurl_path=None, staticdir=None):
|
||||
|
@ -1,6 +1,6 @@
|
||||
[tool.towncrier]
|
||||
package = "allmydata"
|
||||
package_dir = "src"
|
||||
package = "allmydata"
|
||||
filename = "NEWS.rst"
|
||||
directory = "newsfragments"
|
||||
start_string = ".. towncrier start line"
|
||||
|
26
tox.ini
26
tox.ini
@ -98,6 +98,32 @@ commands =
|
||||
# file. See pyproject.toml for legal <change type> values.
|
||||
python -m towncrier.check
|
||||
|
||||
[testenv:draftnews]
|
||||
passenv = TAHOE_LAFS_* PIP_* SUBUNITREPORTER_* USERPROFILE HOMEDRIVE HOMEPATH
|
||||
# see comment in [testenv] about "certifi"
|
||||
whitelist_externals = mv
|
||||
deps =
|
||||
certifi
|
||||
towncrier >= 19.2
|
||||
commands =
|
||||
# With pip >= 10 the existence of pyproject.toml (which we are
|
||||
# required to have to configure towncrier) triggers a "build
|
||||
# isolation" mode which prevents anything from working. Avoid
|
||||
# triggering that pip behavior by keeping the towncrier configuration
|
||||
# somewhere else and only bringing it in when it's actually needed
|
||||
# (after pip is done).
|
||||
#
|
||||
# Some discussion is available at
|
||||
# https://github.com/pypa/pip/issues/5696
|
||||
mv towncrier.pyproject.toml pyproject.toml
|
||||
|
||||
# towncrier 19.2 + works with python2.7
|
||||
python -m towncrier --draft
|
||||
|
||||
# put it back
|
||||
mv pyproject.toml towncrier.pyproject.toml
|
||||
|
||||
|
||||
[testenv:deprecations]
|
||||
setenv =
|
||||
PYTHONWARNINGS=default::DeprecationWarning
|
||||
|
Loading…
x
Reference in New Issue
Block a user