2011-11-17 21:49:23 +00:00
|
|
|
import time, os
|
2008-05-19 19:57:04 +00:00
|
|
|
|
|
|
|
from twisted.internet import address
|
|
|
|
from twisted.web import http
|
2012-02-15 18:18:53 +00:00
|
|
|
from nevow import rend, url, tags as T
|
2008-05-19 19:57:04 +00:00
|
|
|
from nevow.inevow import IRequest
|
|
|
|
from nevow.static import File as nevow_File # TODO: merge with static.File?
|
|
|
|
from nevow.util import resource_filename
|
|
|
|
|
|
|
|
import allmydata # to display import path
|
|
|
|
from allmydata import get_package_versions_string
|
2013-04-14 21:32:13 +00:00
|
|
|
from allmydata.util import log
|
2011-10-02 03:45:03 +00:00
|
|
|
from allmydata.interfaces import IFileNode
|
2008-10-22 00:03:07 +00:00
|
|
|
from allmydata.web import filenode, directory, unlinked, status, operations
|
2015-11-12 23:16:28 +00:00
|
|
|
from allmydata.web import storage, magic_folder
|
2009-02-20 19:15:54 +00:00
|
|
|
from allmydata.web.common import abbreviate_size, getxmlfile, WebError, \
|
2016-01-04 19:58:55 +00:00
|
|
|
get_arg, RenderMixin, get_format, get_mutable_type, render_time_delta, render_time, render_time_attr
|
2008-05-19 19:57:04 +00:00
|
|
|
|
|
|
|
|
2008-05-19 22:19:25 +00:00
|
|
|
class URIHandler(RenderMixin, rend.Page):
|
2008-05-19 19:57:04 +00:00
|
|
|
# I live at /uri . There are several operations defined on /uri itself,
|
2008-07-07 07:18:16 +00:00
|
|
|
# mostly involved with creation of unlinked files and directories.
|
2009-01-18 17:56:08 +00:00
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def __init__(self, client):
|
|
|
|
rend.Page.__init__(self, client)
|
|
|
|
self.client = client
|
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
def render_GET(self, ctx):
|
|
|
|
req = IRequest(ctx)
|
|
|
|
uri = get_arg(req, "uri", None)
|
|
|
|
if uri is None:
|
|
|
|
raise WebError("GET /uri requires uri=")
|
|
|
|
there = url.URL.fromContext(ctx)
|
|
|
|
there = there.clear("uri")
|
|
|
|
# I thought about escaping the childcap that we attach to the URL
|
|
|
|
# here, but it seems that nevow does that for us.
|
|
|
|
there = there.child(uri)
|
|
|
|
return there
|
|
|
|
|
|
|
|
def render_PUT(self, ctx):
|
|
|
|
req = IRequest(ctx)
|
|
|
|
# either "PUT /uri" to create an unlinked file, or
|
|
|
|
# "PUT /uri?t=mkdir" to create an unlinked directory
|
|
|
|
t = get_arg(req, "t", "").strip()
|
|
|
|
if t == "":
|
2011-10-13 16:29:51 +00:00
|
|
|
file_format = get_format(req, "CHK")
|
2011-10-13 16:32:29 +00:00
|
|
|
mutable_type = get_mutable_type(file_format)
|
|
|
|
if mutable_type is not None:
|
|
|
|
return unlinked.PUTUnlinkedSSK(req, self.client, mutable_type)
|
2008-05-19 19:57:04 +00:00
|
|
|
else:
|
2009-02-20 19:15:54 +00:00
|
|
|
return unlinked.PUTUnlinkedCHK(req, self.client)
|
2008-05-19 19:57:04 +00:00
|
|
|
if t == "mkdir":
|
2009-02-20 19:15:54 +00:00
|
|
|
return unlinked.PUTUnlinkedCreateDirectory(req, self.client)
|
2008-05-19 19:57:04 +00:00
|
|
|
errmsg = ("/uri accepts only PUT, PUT?t=mkdir, POST?t=upload, "
|
|
|
|
"and POST?t=mkdir")
|
|
|
|
raise WebError(errmsg, http.BAD_REQUEST)
|
|
|
|
|
|
|
|
def render_POST(self, ctx):
|
|
|
|
# "POST /uri?t=upload&file=newfile" to upload an
|
|
|
|
# unlinked file or "POST /uri?t=mkdir" to create a
|
|
|
|
# new directory
|
|
|
|
req = IRequest(ctx)
|
|
|
|
t = get_arg(req, "t", "").strip()
|
|
|
|
if t in ("", "upload"):
|
2011-10-13 16:29:51 +00:00
|
|
|
file_format = get_format(req)
|
2011-10-13 16:32:29 +00:00
|
|
|
mutable_type = get_mutable_type(file_format)
|
|
|
|
if mutable_type is not None:
|
|
|
|
return unlinked.POSTUnlinkedSSK(req, self.client, mutable_type)
|
2008-05-19 19:57:04 +00:00
|
|
|
else:
|
2009-02-20 19:15:54 +00:00
|
|
|
return unlinked.POSTUnlinkedCHK(req, self.client)
|
2008-05-19 19:57:04 +00:00
|
|
|
if t == "mkdir":
|
2009-02-20 19:15:54 +00:00
|
|
|
return unlinked.POSTUnlinkedCreateDirectory(req, self.client)
|
2009-10-26 01:13:21 +00:00
|
|
|
elif t == "mkdir-with-children":
|
|
|
|
return unlinked.POSTUnlinkedCreateDirectoryWithChildren(req,
|
|
|
|
self.client)
|
2009-11-18 07:09:00 +00:00
|
|
|
elif t == "mkdir-immutable":
|
|
|
|
return unlinked.POSTUnlinkedCreateImmutableDirectory(req,
|
|
|
|
self.client)
|
2008-05-19 19:57:04 +00:00
|
|
|
errmsg = ("/uri accepts only PUT, PUT?t=mkdir, POST?t=upload, "
|
|
|
|
"and POST?t=mkdir")
|
|
|
|
raise WebError(errmsg, http.BAD_REQUEST)
|
|
|
|
|
|
|
|
def childFactory(self, ctx, name):
|
|
|
|
# 'name' is expected to be a URI
|
|
|
|
try:
|
2009-02-20 19:15:54 +00:00
|
|
|
node = self.client.create_node_from_uri(name)
|
|
|
|
return directory.make_handler_for(node, self.client)
|
2010-01-27 06:44:30 +00:00
|
|
|
except (TypeError, AssertionError):
|
2008-05-19 19:57:04 +00:00
|
|
|
raise WebError("'%s' is not a valid file- or directory- cap"
|
|
|
|
% name)
|
|
|
|
|
|
|
|
class FileHandler(rend.Page):
|
|
|
|
# I handle /file/$FILECAP[/IGNORED] , which provides a URL from which a
|
|
|
|
# file can be downloaded correctly by tools like "wget".
|
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def __init__(self, client):
|
|
|
|
rend.Page.__init__(self, client)
|
|
|
|
self.client = client
|
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
def childFactory(self, ctx, name):
|
|
|
|
req = IRequest(ctx)
|
|
|
|
if req.method not in ("GET", "HEAD"):
|
|
|
|
raise WebError("/file can only be used with GET or HEAD")
|
|
|
|
# 'name' must be a file URI
|
|
|
|
try:
|
2009-02-20 19:15:54 +00:00
|
|
|
node = self.client.create_node_from_uri(name)
|
2010-01-27 06:44:30 +00:00
|
|
|
except (TypeError, AssertionError):
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
# I think this can no longer be reached
|
2008-05-19 19:57:04 +00:00
|
|
|
raise WebError("'%s' is not a valid file- or directory- cap"
|
|
|
|
% name)
|
|
|
|
if not IFileNode.providedBy(node):
|
|
|
|
raise WebError("'%s' is not a file-cap" % name)
|
2009-02-20 19:15:54 +00:00
|
|
|
return filenode.FileNodeDownloadHandler(self.client, node)
|
2008-05-19 19:57:04 +00:00
|
|
|
|
|
|
|
def renderHTTP(self, ctx):
|
|
|
|
raise WebError("/file must be followed by a file-cap and a name",
|
|
|
|
http.NOT_FOUND)
|
|
|
|
|
2008-08-05 19:09:21 +00:00
|
|
|
class IncidentReporter(RenderMixin, rend.Page):
|
|
|
|
def render_POST(self, ctx):
|
|
|
|
req = IRequest(ctx)
|
|
|
|
log.msg(format="User reports incident through web page: %(details)s",
|
|
|
|
details=get_arg(req, "details", ""),
|
2008-08-26 01:57:59 +00:00
|
|
|
level=log.WEIRD, umid="LkD9Pw")
|
2008-08-05 19:09:21 +00:00
|
|
|
req.setHeader("content-type", "text/plain")
|
2013-05-19 07:31:43 +00:00
|
|
|
return "An incident report has been saved to logs/incidents/ in the node directory."
|
2008-08-05 19:09:21 +00:00
|
|
|
|
2011-10-02 03:45:03 +00:00
|
|
|
SPACE = u"\u00A0"*2
|
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
class Root(rend.Page):
|
|
|
|
|
|
|
|
addSlash = True
|
|
|
|
docFactory = getxmlfile("welcome.xhtml")
|
|
|
|
|
2015-01-21 00:31:56 +00:00
|
|
|
_connectedalts = {
|
|
|
|
"not-configured": "Not Configured",
|
|
|
|
"yes": "Connected",
|
|
|
|
"no": "Disconnected",
|
|
|
|
}
|
|
|
|
|
2016-01-04 19:58:55 +00:00
|
|
|
def __init__(self, client, clock=None, now_fn=None):
|
2009-02-20 19:15:54 +00:00
|
|
|
rend.Page.__init__(self, client)
|
|
|
|
self.client = client
|
Change OphandleTable to use a deterministic clock, so we can test it
To test the changes for #577, we need a deterministic way to simulate
the passage of long periods of time. twisted.internet.task.Clock seems,
from my Googling, to be the way to go for this functionality. I changed
a few things so that OphandleTable would use twisted.internet.task.Clock
when testing:
* WebishServer.__init___ now takes an optional 'clock' parameter,
* which it passes to the root.Root instance it creates.
* root.Root.__init__ now takes an optional 'clock' parameter, which it
passes to the OphandleTable.__init__ method.
* OphandleTable.__init__ now takes an optional 'clock' parameter. If
it is provided, and it isn't None, its callLater method will be used
to schedule ophandle expirations (as opposed to using
reactor.callLater, which is what OphandleTable does normally).
* The WebMixin object in test_web.py now sets a self.clock parameter,
which is a twisted.internet.task.Clock that it feeds to the
WebishServer it creates.
Tests using the WebMixin can control the passage of time in
OphandleTable by accessing self.clock.
2010-02-20 21:07:13 +00:00
|
|
|
# If set, clock is a twisted.internet.task.Clock that the tests
|
|
|
|
# use to test ophandle expiration.
|
|
|
|
self.child_operations = operations.OphandleTable(clock)
|
2016-01-04 19:58:55 +00:00
|
|
|
self.now_fn = now_fn
|
2009-02-20 21:29:26 +00:00
|
|
|
try:
|
|
|
|
s = client.getServiceNamed("storage")
|
|
|
|
except KeyError:
|
|
|
|
s = None
|
2012-03-13 02:57:36 +00:00
|
|
|
self.child_storage = storage.StorageStatus(s, self.client.nickname)
|
2008-10-22 05:13:54 +00:00
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
self.child_uri = URIHandler(client)
|
|
|
|
self.child_cap = URIHandler(client)
|
|
|
|
|
2015-11-12 23:16:28 +00:00
|
|
|
# handler for "/magic_folder" URIs
|
|
|
|
self.child_magic_folder = magic_folder.MagicFolderWebApi(client)
|
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
self.child_file = FileHandler(client)
|
|
|
|
self.child_named = FileHandler(client)
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
self.child_status = status.Status(client.get_history())
|
2009-02-20 19:15:54 +00:00
|
|
|
self.child_statistics = status.Statistics(client.stats_provider)
|
2011-11-17 21:49:23 +00:00
|
|
|
static_dir = resource_filename("allmydata.web", "static")
|
|
|
|
for filen in os.listdir(static_dir):
|
|
|
|
self.putChild(filen, nevow_File(os.path.join(static_dir, filen)))
|
2009-02-20 19:15:54 +00:00
|
|
|
|
|
|
|
def child_helper_status(self, ctx):
|
|
|
|
# the Helper isn't attached until after the Tub starts, so this child
|
|
|
|
# needs to created on each request
|
2009-08-15 20:17:37 +00:00
|
|
|
return status.HelperStatus(self.client.helper)
|
2008-05-19 19:57:04 +00:00
|
|
|
|
2008-08-05 19:09:21 +00:00
|
|
|
child_report_incident = IncidentReporter()
|
2009-02-20 21:29:26 +00:00
|
|
|
#child_server # let's reserve this for storage-server-over-HTTP
|
2008-08-05 19:09:21 +00:00
|
|
|
|
2009-05-03 20:34:42 +00:00
|
|
|
# FIXME: This code is duplicated in root.py and introweb.py.
|
2013-05-19 22:27:23 +00:00
|
|
|
def data_rendered_at(self, ctx, data):
|
2016-01-04 16:00:59 +00:00
|
|
|
return render_time(time.time())
|
2008-05-19 19:57:04 +00:00
|
|
|
def data_version(self, ctx, data):
|
|
|
|
return get_package_versions_string()
|
|
|
|
def data_import_path(self, ctx, data):
|
|
|
|
return str(allmydata)
|
2013-04-14 21:32:13 +00:00
|
|
|
def render_my_nodeid(self, ctx, data):
|
|
|
|
tubid_s = "TubID: "+self.client.get_long_tubid()
|
|
|
|
return T.td(title=tubid_s)[self.client.get_long_nodeid()]
|
2008-06-03 22:02:10 +00:00
|
|
|
def data_my_nickname(self, ctx, data):
|
2009-02-20 19:15:54 +00:00
|
|
|
return self.client.nickname
|
2008-05-19 19:57:04 +00:00
|
|
|
|
2016-08-09 20:25:25 +00:00
|
|
|
def render_magic_folder(self, ctx, data):
|
|
|
|
if self.client._magic_folder is None:
|
|
|
|
return T.p()
|
|
|
|
|
|
|
|
(ok, messages) = self.client._magic_folder.get_public_status()
|
|
|
|
|
|
|
|
if ok:
|
|
|
|
ctx.fillSlots("magic_folder_status", "yes")
|
|
|
|
ctx.fillSlots("magic_folder_status_alt", "working")
|
|
|
|
else:
|
|
|
|
ctx.fillSlots("magic_folder_status", "no")
|
|
|
|
ctx.fillSlots("magic_folder_status_alt", "not working")
|
|
|
|
|
|
|
|
status = T.ul()
|
|
|
|
for msg in messages:
|
|
|
|
status[T.li[str(msg)]]
|
|
|
|
|
|
|
|
return ctx.tag[status]
|
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
def render_services(self, ctx, data):
|
|
|
|
ul = T.ul()
|
|
|
|
try:
|
2009-02-20 19:15:54 +00:00
|
|
|
ss = self.client.getServiceNamed("storage")
|
2009-02-20 21:29:26 +00:00
|
|
|
stats = ss.get_stats()
|
|
|
|
if stats["storage_server.accepting_immutable_shares"]:
|
|
|
|
msg = "accepting new shares"
|
|
|
|
else:
|
|
|
|
msg = "not accepting new shares (read-only)"
|
|
|
|
available = stats.get("storage_server.disk_avail")
|
|
|
|
if available is not None:
|
|
|
|
msg += ", %s available" % abbreviate_size(available)
|
|
|
|
ul[T.li[T.a(href="storage")["Storage Server"], ": ", msg]]
|
2008-05-19 19:57:04 +00:00
|
|
|
except KeyError:
|
|
|
|
ul[T.li["Not running storage server"]]
|
|
|
|
|
2009-08-15 20:17:37 +00:00
|
|
|
if self.client.helper:
|
|
|
|
stats = self.client.helper.get_stats()
|
2008-05-19 19:57:04 +00:00
|
|
|
active_uploads = stats["chk_upload_helper.active_uploads"]
|
|
|
|
ul[T.li["Helper: %d active uploads" % (active_uploads,)]]
|
2009-08-15 20:17:37 +00:00
|
|
|
else:
|
2008-05-19 19:57:04 +00:00
|
|
|
ul[T.li["Not running helper"]]
|
|
|
|
|
|
|
|
return ctx.tag[ul]
|
|
|
|
|
2016-09-12 23:01:23 +00:00
|
|
|
def data_introducer_description(self, ctx, data):
|
|
|
|
connected_count = self.data_connected_introducers( ctx, data )
|
|
|
|
if connected_count == 0:
|
|
|
|
return "No introducers connected"
|
|
|
|
elif connected_count == 1:
|
|
|
|
return "1 introducer connected"
|
2013-03-21 00:25:49 +00:00
|
|
|
else:
|
2016-09-12 23:01:23 +00:00
|
|
|
return "%s introducers connected" % (connected_count,)
|
2012-12-29 04:17:00 +00:00
|
|
|
|
2016-09-12 23:01:23 +00:00
|
|
|
def data_total_introducers(self, ctx, data):
|
|
|
|
return len(self.client.introducer_furls)
|
|
|
|
|
|
|
|
def data_connected_introducers(self, ctx, data):
|
|
|
|
return self.client.introducer_connection_statuses().count(True)
|
2013-03-19 03:47:24 +00:00
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
def data_connected_to_introducer(self, ctx, data):
|
2009-02-20 19:15:54 +00:00
|
|
|
if self.client.connected_to_introducer():
|
2008-05-19 19:57:04 +00:00
|
|
|
return "yes"
|
|
|
|
return "no"
|
|
|
|
|
2016-09-12 23:01:23 +00:00
|
|
|
def data_connected_to_at_least_one_introducer(self, ctx, data):
|
|
|
|
if True in self.client.introducer_connection_statuses():
|
|
|
|
return "yes"
|
|
|
|
return "no"
|
|
|
|
|
|
|
|
def data_connected_to_at_least_one_introducer_alt(self, ctx, data):
|
|
|
|
return self._connectedalts[self.data_connected_to_at_least_one_introducer(ctx, data)]
|
|
|
|
|
|
|
|
# In case we configure multiple introducers
|
|
|
|
def data_introducers(self, ctx, data):
|
|
|
|
connection_statuses = self.client.introducer_connection_statuses()
|
|
|
|
s = []
|
|
|
|
furls = self.client.introducer_furls
|
|
|
|
for furl in furls:
|
|
|
|
if connection_statuses:
|
|
|
|
display_furl = furl
|
|
|
|
# trim off the secret swissnum
|
|
|
|
(prefix, _, swissnum) = furl.rpartition("/")
|
|
|
|
if swissnum != "introducer":
|
|
|
|
display_furl = "%s/[censored]" % (prefix,)
|
|
|
|
i = furls.index(furl)
|
|
|
|
ic = self.client.introducer_clients[i]
|
|
|
|
s.append((display_furl, bool(connection_statuses[i]), ic))
|
|
|
|
s.sort()
|
|
|
|
return s
|
|
|
|
|
|
|
|
def render_introducers_row(self, ctx, s):
|
|
|
|
(furl, connected, ic) = s
|
|
|
|
service_connection_status = "yes" if connected else "no"
|
|
|
|
|
|
|
|
since = ic.get_since()
|
|
|
|
service_connection_status_rel_time = render_time_delta(since, self.now_fn())
|
|
|
|
service_connection_status_abs_time = render_time_attr(since)
|
|
|
|
|
|
|
|
last_received_data_time = ic.get_last_received_data_time()
|
|
|
|
last_received_data_rel_time = render_time_delta(last_received_data_time, self.now_fn())
|
|
|
|
last_received_data_abs_time = render_time_attr(last_received_data_time)
|
|
|
|
|
|
|
|
ctx.fillSlots("introducer_furl", "%s" % (furl))
|
|
|
|
ctx.fillSlots("service_connection_status", "%s" % (service_connection_status,))
|
|
|
|
ctx.fillSlots("service_connection_status_alt",
|
|
|
|
self._connectedalts[service_connection_status])
|
|
|
|
ctx.fillSlots("service_connection_status_abs_time", service_connection_status_abs_time)
|
|
|
|
ctx.fillSlots("service_connection_status_rel_time", service_connection_status_rel_time)
|
|
|
|
ctx.fillSlots("last_received_data_abs_time", last_received_data_abs_time)
|
|
|
|
ctx.fillSlots("last_received_data_rel_time", last_received_data_rel_time)
|
|
|
|
return ctx.tag
|
2015-01-21 00:31:56 +00:00
|
|
|
|
2013-03-21 00:25:49 +00:00
|
|
|
def data_helper_furl_prefix(self, ctx, data):
|
2008-05-19 19:57:04 +00:00
|
|
|
try:
|
2009-02-20 19:15:54 +00:00
|
|
|
uploader = self.client.getServiceNamed("uploader")
|
2008-05-19 19:57:04 +00:00
|
|
|
except KeyError:
|
|
|
|
return None
|
|
|
|
furl, connected = uploader.get_helper_info()
|
2013-03-21 00:25:49 +00:00
|
|
|
if not furl:
|
|
|
|
return None
|
|
|
|
# trim off the secret swissnum
|
|
|
|
(prefix, _, swissnum) = furl.rpartition("/")
|
|
|
|
return "%s/[censored]" % (prefix,)
|
2012-12-29 04:17:00 +00:00
|
|
|
|
2013-03-19 03:47:24 +00:00
|
|
|
def data_helper_description(self, ctx, data):
|
|
|
|
if self.data_connected_to_helper(ctx, data) == "no":
|
|
|
|
return "Helper not connected"
|
|
|
|
return "Helper"
|
2012-12-29 04:17:00 +00:00
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
def data_connected_to_helper(self, ctx, data):
|
|
|
|
try:
|
2009-02-20 19:15:54 +00:00
|
|
|
uploader = self.client.getServiceNamed("uploader")
|
2008-05-19 19:57:04 +00:00
|
|
|
except KeyError:
|
|
|
|
return "no" # we don't even have an Uploader
|
|
|
|
furl, connected = uploader.get_helper_info()
|
2012-12-29 04:17:00 +00:00
|
|
|
|
|
|
|
if furl is None:
|
|
|
|
return "not-configured"
|
2008-05-19 19:57:04 +00:00
|
|
|
if connected:
|
|
|
|
return "yes"
|
|
|
|
return "no"
|
|
|
|
|
2015-01-21 00:31:56 +00:00
|
|
|
def data_connected_to_helper_alt(self, ctx, data):
|
|
|
|
return self._connectedalts[self.data_connected_to_helper(ctx, data)]
|
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
def data_known_storage_servers(self, ctx, data):
|
2009-06-23 02:10:47 +00:00
|
|
|
sb = self.client.get_storage_broker()
|
|
|
|
return len(sb.get_all_serverids())
|
2008-05-19 19:57:04 +00:00
|
|
|
|
|
|
|
def data_connected_storage_servers(self, ctx, data):
|
2009-06-23 02:10:47 +00:00
|
|
|
sb = self.client.get_storage_broker()
|
2011-02-21 01:58:04 +00:00
|
|
|
return len(sb.get_connected_servers())
|
2008-05-19 19:57:04 +00:00
|
|
|
|
|
|
|
def data_services(self, ctx, data):
|
2009-06-23 02:10:47 +00:00
|
|
|
sb = self.client.get_storage_broker()
|
2011-08-01 17:44:52 +00:00
|
|
|
return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid())
|
2009-06-23 02:10:47 +00:00
|
|
|
|
2011-02-21 01:58:04 +00:00
|
|
|
def render_service_row(self, ctx, server):
|
make IServer.get_serverid() use pubkey, not tubid
This is a change I've wanted to make for many years, because when we get
to HTTP-based servers, we won't have tubids for them. What held me back
was that there's code all over the place that uses the serverid for
various purposes, so I wasn't sure it was safe. I did a big push a few
years ago to use IServer instances instead of serverids in most
places (in #1363), and to split out the values that actually depend upon
tubid into separate accessors (like get_lease_seed and
get_foolscap_write_enabler_seed), which I think took care of all the
important uses.
There are a number of places that use get_serverid() as dictionary key
to track shares (Checker results, mutable servermap). I believe these
are happy to use pubkeys instead of tubids: the only thing they do with
get_serverid() is to compare it to other values obtained from
get_serverid(). A few places in the WUI used serverid to compute display
values: these were fixed.
The main trouble was the Helper: it returns a HelperUploadResults (a
Copyable) with a share->server mapping that's keyed by whatever the
Helper's get_serverid() returns. If the uploader and the helper are on
different sides of this change, the Helper could return values that the
uploader won't recognize. This is cosmetic: that mapping is only used to
display the upload results on the "Recent and Active Operations" page.
I've added code to StorageFarmBroker.get_stub_server() to fall back to
tubids when looking up a server, so this should still work correctly
when the uploader is new and the Helper is old. If the Helper is new and
the uploader is old, the upload results will show unusual server ids.
refs ticket:1363
2016-08-26 19:16:17 +00:00
|
|
|
server_id = server.get_serverid()
|
2009-06-23 02:10:47 +00:00
|
|
|
|
2011-08-01 17:44:28 +00:00
|
|
|
ctx.fillSlots("peerid", server.get_longname())
|
2011-02-21 01:58:04 +00:00
|
|
|
ctx.fillSlots("nickname", server.get_nickname())
|
|
|
|
rhost = server.get_remote_host()
|
2016-01-04 19:58:55 +00:00
|
|
|
if server.is_connected():
|
make IServer.get_serverid() use pubkey, not tubid
This is a change I've wanted to make for many years, because when we get
to HTTP-based servers, we won't have tubids for them. What held me back
was that there's code all over the place that uses the serverid for
various purposes, so I wasn't sure it was safe. I did a big push a few
years ago to use IServer instances instead of serverids in most
places (in #1363), and to split out the values that actually depend upon
tubid into separate accessors (like get_lease_seed and
get_foolscap_write_enabler_seed), which I think took care of all the
important uses.
There are a number of places that use get_serverid() as dictionary key
to track shares (Checker results, mutable servermap). I believe these
are happy to use pubkeys instead of tubids: the only thing they do with
get_serverid() is to compare it to other values obtained from
get_serverid(). A few places in the WUI used serverid to compute display
values: these were fixed.
The main trouble was the Helper: it returns a HelperUploadResults (a
Copyable) with a share->server mapping that's keyed by whatever the
Helper's get_serverid() returns. If the uploader and the helper are on
different sides of this change, the Helper could return values that the
uploader won't recognize. This is cosmetic: that mapping is only used to
display the upload results on the "Recent and Active Operations" page.
I've added code to StorageFarmBroker.get_stub_server() to fall back to
tubids when looking up a server, so this should still work correctly
when the uploader is new and the Helper is old. If the Helper is new and
the uploader is old, the upload results will show unusual server ids.
refs ticket:1363
2016-08-26 19:16:17 +00:00
|
|
|
if server_id == self.client.get_long_nodeid():
|
2008-05-19 19:57:04 +00:00
|
|
|
rhost_s = "(loopback)"
|
|
|
|
elif isinstance(rhost, address.IPv4Address):
|
|
|
|
rhost_s = "%s:%d" % (rhost.host, rhost.port)
|
|
|
|
else:
|
|
|
|
rhost_s = str(rhost)
|
2013-03-15 02:28:38 +00:00
|
|
|
addr = rhost_s
|
2016-01-04 19:58:55 +00:00
|
|
|
service_connection_status = "yes"
|
|
|
|
last_connect_time = server.get_last_connect_time()
|
|
|
|
service_connection_status_rel_time = render_time_delta(last_connect_time, self.now_fn())
|
|
|
|
service_connection_status_abs_time = render_time_attr(last_connect_time)
|
2008-05-19 19:57:04 +00:00
|
|
|
else:
|
2013-03-15 02:28:38 +00:00
|
|
|
addr = "N/A"
|
2016-01-04 19:58:55 +00:00
|
|
|
service_connection_status = "no"
|
|
|
|
last_loss_time = server.get_last_loss_time()
|
|
|
|
service_connection_status_rel_time = render_time_delta(last_loss_time, self.now_fn())
|
|
|
|
service_connection_status_abs_time = render_time_attr(last_loss_time)
|
|
|
|
|
|
|
|
last_received_data_time = server.get_last_received_data_time()
|
|
|
|
last_received_data_rel_time = render_time_delta(last_received_data_time, self.now_fn())
|
|
|
|
last_received_data_abs_time = render_time_attr(last_received_data_time)
|
|
|
|
|
2011-02-21 01:58:04 +00:00
|
|
|
announcement = server.get_announcement()
|
2016-08-27 00:55:52 +00:00
|
|
|
version = announcement.get("my-version", "")
|
2014-11-20 22:46:20 +00:00
|
|
|
available_space = server.get_available_space()
|
|
|
|
if available_space is None:
|
|
|
|
available_space = "N/A"
|
|
|
|
else:
|
|
|
|
available_space = abbreviate_size(available_space)
|
2013-03-15 02:28:38 +00:00
|
|
|
ctx.fillSlots("address", addr)
|
2016-01-04 19:58:55 +00:00
|
|
|
ctx.fillSlots("service_connection_status", service_connection_status)
|
2016-09-12 23:01:23 +00:00
|
|
|
ctx.fillSlots("service_connection_status_alt",
|
|
|
|
self._connectedalts[service_connection_status])
|
2016-01-04 19:58:55 +00:00
|
|
|
ctx.fillSlots("service_connection_status_abs_time", service_connection_status_abs_time)
|
|
|
|
ctx.fillSlots("service_connection_status_rel_time", service_connection_status_rel_time)
|
|
|
|
ctx.fillSlots("last_received_data_abs_time", last_received_data_abs_time)
|
|
|
|
ctx.fillSlots("last_received_data_rel_time", last_received_data_rel_time)
|
2009-06-23 02:10:47 +00:00
|
|
|
ctx.fillSlots("version", version)
|
2014-11-20 22:46:20 +00:00
|
|
|
ctx.fillSlots("available_space", available_space)
|
2008-05-19 19:57:04 +00:00
|
|
|
|
|
|
|
return ctx.tag
|
|
|
|
|
|
|
|
def render_download_form(self, ctx, data):
|
|
|
|
# this is a form where users can download files by URI
|
|
|
|
form = T.form(action="uri", method="get",
|
|
|
|
enctype="multipart/form-data")[
|
|
|
|
T.fieldset[
|
|
|
|
T.legend(class_="freeform-form-label")["Download a file"],
|
2011-10-02 03:45:03 +00:00
|
|
|
T.div["Tahoe-URI to download:"+SPACE,
|
2009-05-26 23:25:45 +00:00
|
|
|
T.input(type="text", name="uri")],
|
2011-10-02 03:45:03 +00:00
|
|
|
T.div["Filename to download as:"+SPACE,
|
2009-05-26 23:25:45 +00:00
|
|
|
T.input(type="text", name="filename")],
|
2008-05-19 19:57:04 +00:00
|
|
|
T.input(type="submit", value="Download!"),
|
|
|
|
]]
|
|
|
|
return T.div[form]
|
|
|
|
|
|
|
|
def render_view_form(self, ctx, data):
|
|
|
|
# this is a form where users can download files by URI, or jump to a
|
|
|
|
# named directory
|
|
|
|
form = T.form(action="uri", method="get",
|
|
|
|
enctype="multipart/form-data")[
|
|
|
|
T.fieldset[
|
|
|
|
T.legend(class_="freeform-form-label")["View a file or directory"],
|
2011-10-02 03:45:03 +00:00
|
|
|
"Tahoe-URI to view:"+SPACE,
|
|
|
|
T.input(type="text", name="uri"), SPACE*2,
|
2008-05-19 19:57:04 +00:00
|
|
|
T.input(type="submit", value="View!"),
|
|
|
|
]]
|
|
|
|
return T.div[form]
|
|
|
|
|
|
|
|
def render_upload_form(self, ctx, data):
|
2011-10-02 03:45:03 +00:00
|
|
|
# This is a form where users can upload unlinked files.
|
|
|
|
# Users can choose immutable, SDMF, or MDMF from a radio button.
|
2011-08-07 00:43:48 +00:00
|
|
|
|
2011-10-02 03:45:03 +00:00
|
|
|
upload_chk = T.input(type='radio', name='format',
|
|
|
|
value='chk', id='upload-chk',
|
|
|
|
checked='checked')
|
|
|
|
upload_sdmf = T.input(type='radio', name='format',
|
|
|
|
value='sdmf', id='upload-sdmf')
|
|
|
|
upload_mdmf = T.input(type='radio', name='format',
|
|
|
|
value='mdmf', id='upload-mdmf')
|
2011-08-07 00:43:48 +00:00
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
form = T.form(action="uri", method="post",
|
|
|
|
enctype="multipart/form-data")[
|
|
|
|
T.fieldset[
|
|
|
|
T.legend(class_="freeform-form-label")["Upload a file"],
|
2011-10-02 03:45:03 +00:00
|
|
|
T.div["Choose a file:"+SPACE,
|
2009-05-26 23:25:45 +00:00
|
|
|
T.input(type="file", name="file", class_="freeform-input-file")],
|
2008-05-19 19:57:04 +00:00
|
|
|
T.input(type="hidden", name="t", value="upload"),
|
2011-10-02 03:45:03 +00:00
|
|
|
T.div[upload_chk, T.label(for_="upload-chk") [" Immutable"], SPACE,
|
|
|
|
upload_sdmf, T.label(for_="upload-sdmf")[" SDMF"], SPACE,
|
|
|
|
upload_mdmf, T.label(for_="upload-mdmf")[" MDMF (experimental)"], SPACE*2,
|
|
|
|
T.input(type="submit", value="Upload!")],
|
2008-05-19 19:57:04 +00:00
|
|
|
]]
|
|
|
|
return T.div[form]
|
|
|
|
|
|
|
|
def render_mkdir_form(self, ctx, data):
|
2011-10-02 03:45:03 +00:00
|
|
|
# This is a form where users can create new directories.
|
|
|
|
# Users can choose SDMF or MDMF from a radio button.
|
|
|
|
|
|
|
|
mkdir_sdmf = T.input(type='radio', name='format',
|
|
|
|
value='sdmf', id='mkdir-sdmf',
|
2011-08-07 00:43:48 +00:00
|
|
|
checked='checked')
|
2011-10-02 03:45:03 +00:00
|
|
|
mkdir_mdmf = T.input(type='radio', name='format',
|
|
|
|
value='mdmf', id='mkdir-mdmf')
|
|
|
|
|
2008-05-19 19:57:04 +00:00
|
|
|
form = T.form(action="uri", method="post",
|
|
|
|
enctype="multipart/form-data")[
|
|
|
|
T.fieldset[
|
2009-05-26 23:25:45 +00:00
|
|
|
T.legend(class_="freeform-form-label")["Create a directory"],
|
2011-10-02 03:45:03 +00:00
|
|
|
mkdir_sdmf, T.label(for_='mkdir-sdmf')[" SDMF"], SPACE,
|
|
|
|
mkdir_mdmf, T.label(for_='mkdir-mdmf')[" MDMF (experimental)"], SPACE*2,
|
2008-05-19 19:57:04 +00:00
|
|
|
T.input(type="hidden", name="t", value="mkdir"),
|
|
|
|
T.input(type="hidden", name="redirect_to_result", value="true"),
|
2009-05-26 23:25:45 +00:00
|
|
|
T.input(type="submit", value="Create a directory"),
|
2008-05-19 19:57:04 +00:00
|
|
|
]]
|
|
|
|
return T.div[form]
|
|
|
|
|
2008-08-05 19:09:21 +00:00
|
|
|
def render_incident_button(self, ctx, data):
|
|
|
|
# this button triggers a foolscap-logging "incident"
|
|
|
|
form = T.form(action="report_incident", method="post",
|
|
|
|
enctype="multipart/form-data")[
|
|
|
|
T.fieldset[
|
|
|
|
T.input(type="hidden", name="t", value="report-incident"),
|
2013-03-15 02:28:38 +00:00
|
|
|
"What went wrong?"+SPACE,
|
2011-10-02 03:45:03 +00:00
|
|
|
T.input(type="text", name="details"), SPACE,
|
2013-05-19 07:31:43 +00:00
|
|
|
T.input(type="submit", value=u"Save \u00BB"),
|
2008-08-05 19:09:21 +00:00
|
|
|
]]
|
|
|
|
return T.div[form]
|