2008-03-05 21:59:56 +00:00
|
|
|
|
2008-04-17 20:02:22 +00:00
|
|
|
import time, pprint, itertools
|
2008-03-27 23:46:08 +00:00
|
|
|
import simplejson
|
2008-03-05 21:59:56 +00:00
|
|
|
from twisted.internet import defer
|
2008-03-27 23:46:08 +00:00
|
|
|
from nevow import rend, inevow, tags as T
|
2008-03-05 21:59:56 +00:00
|
|
|
from allmydata.util import base32, idlib
|
2009-02-20 19:15:54 +00:00
|
|
|
from allmydata.web.common import getxmlfile, get_arg, \
|
2010-08-15 14:19:33 +00:00
|
|
|
abbreviate_time, abbreviate_rate, abbreviate_size, plural, compute_rate
|
2008-03-05 21:59:56 +00:00
|
|
|
from allmydata.interfaces import IUploadStatus, IDownloadStatus, \
|
2008-04-17 02:05:41 +00:00
|
|
|
IPublishStatus, IRetrieveStatus, IServermapUpdaterStatus
|
2008-03-05 21:59:56 +00:00
|
|
|
|
|
|
|
class RateAndTimeMixin:
|
|
|
|
|
|
|
|
def render_time(self, ctx, data):
|
2008-03-07 04:16:38 +00:00
|
|
|
return abbreviate_time(data)
|
2008-03-05 21:59:56 +00:00
|
|
|
|
|
|
|
def render_rate(self, ctx, data):
|
2008-03-07 04:16:38 +00:00
|
|
|
return abbreviate_rate(data)
|
2008-03-05 21:59:56 +00:00
|
|
|
|
|
|
|
class UploadResultsRendererMixin(RateAndTimeMixin):
|
|
|
|
# this requires a method named 'upload_results'
|
|
|
|
|
2008-03-06 02:51:51 +00:00
|
|
|
def render_pushed_shares(self, ctx, data):
|
|
|
|
d = self.upload_results()
|
|
|
|
d.addCallback(lambda res: res.pushed_shares)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def render_preexisting_shares(self, ctx, data):
|
|
|
|
d = self.upload_results()
|
|
|
|
d.addCallback(lambda res: res.preexisting_shares)
|
|
|
|
return d
|
|
|
|
|
2008-03-05 21:59:56 +00:00
|
|
|
def render_sharemap(self, ctx, data):
|
|
|
|
d = self.upload_results()
|
|
|
|
d.addCallback(lambda res: res.sharemap)
|
|
|
|
def _render(sharemap):
|
|
|
|
if sharemap is None:
|
|
|
|
return "None"
|
|
|
|
l = T.ul()
|
2009-01-10 18:46:23 +00:00
|
|
|
for shnum, peerids in sorted(sharemap.items()):
|
2009-01-10 21:02:09 +00:00
|
|
|
peerids = ', '.join([idlib.shortnodeid_b2a(i) for i in peerids])
|
|
|
|
l[T.li["%d -> placed on [%s]" % (shnum, peerids)]]
|
2008-03-05 21:59:56 +00:00
|
|
|
return l
|
|
|
|
d.addCallback(_render)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def render_servermap(self, ctx, data):
|
|
|
|
d = self.upload_results()
|
|
|
|
d.addCallback(lambda res: res.servermap)
|
|
|
|
def _render(servermap):
|
|
|
|
if servermap is None:
|
|
|
|
return "None"
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(servermap.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
shares_s = ",".join(["#%d" % shnum
|
|
|
|
for shnum in servermap[peerid]])
|
|
|
|
l[T.li["[%s] got share%s: %s" % (peerid_s,
|
|
|
|
plural(servermap[peerid]),
|
|
|
|
shares_s)]]
|
|
|
|
return l
|
|
|
|
d.addCallback(_render)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def data_file_size(self, ctx, data):
|
|
|
|
d = self.upload_results()
|
|
|
|
d.addCallback(lambda res: res.file_size)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _get_time(self, name):
|
|
|
|
d = self.upload_results()
|
|
|
|
d.addCallback(lambda res: res.timings.get(name))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def data_time_total(self, ctx, data):
|
|
|
|
return self._get_time("total")
|
|
|
|
|
|
|
|
def data_time_storage_index(self, ctx, data):
|
|
|
|
return self._get_time("storage_index")
|
|
|
|
|
|
|
|
def data_time_contacting_helper(self, ctx, data):
|
|
|
|
return self._get_time("contacting_helper")
|
|
|
|
|
|
|
|
def data_time_existence_check(self, ctx, data):
|
|
|
|
return self._get_time("existence_check")
|
|
|
|
|
|
|
|
def data_time_cumulative_fetch(self, ctx, data):
|
|
|
|
return self._get_time("cumulative_fetch")
|
|
|
|
|
|
|
|
def data_time_helper_total(self, ctx, data):
|
|
|
|
return self._get_time("helper_total")
|
|
|
|
|
|
|
|
def data_time_peer_selection(self, ctx, data):
|
|
|
|
return self._get_time("peer_selection")
|
|
|
|
|
|
|
|
def data_time_total_encode_and_push(self, ctx, data):
|
|
|
|
return self._get_time("total_encode_and_push")
|
|
|
|
|
|
|
|
def data_time_cumulative_encoding(self, ctx, data):
|
|
|
|
return self._get_time("cumulative_encoding")
|
|
|
|
|
|
|
|
def data_time_cumulative_sending(self, ctx, data):
|
|
|
|
return self._get_time("cumulative_sending")
|
|
|
|
|
|
|
|
def data_time_hashes_and_close(self, ctx, data):
|
|
|
|
return self._get_time("hashes_and_close")
|
|
|
|
|
|
|
|
def _get_rate(self, name):
|
|
|
|
d = self.upload_results()
|
|
|
|
def _convert(r):
|
|
|
|
file_size = r.file_size
|
|
|
|
time = r.timings.get(name)
|
2010-08-15 14:19:33 +00:00
|
|
|
return compute_rate(file_size, time)
|
2008-03-05 21:59:56 +00:00
|
|
|
d.addCallback(_convert)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def data_rate_total(self, ctx, data):
|
|
|
|
return self._get_rate("total")
|
|
|
|
|
|
|
|
def data_rate_storage_index(self, ctx, data):
|
|
|
|
return self._get_rate("storage_index")
|
|
|
|
|
|
|
|
def data_rate_encode(self, ctx, data):
|
|
|
|
return self._get_rate("cumulative_encoding")
|
|
|
|
|
|
|
|
def data_rate_push(self, ctx, data):
|
|
|
|
return self._get_rate("cumulative_sending")
|
|
|
|
|
|
|
|
def data_rate_encode_and_push(self, ctx, data):
|
|
|
|
d = self.upload_results()
|
|
|
|
def _convert(r):
|
|
|
|
file_size = r.file_size
|
|
|
|
time1 = r.timings.get("cumulative_encoding")
|
|
|
|
time2 = r.timings.get("cumulative_sending")
|
2010-08-15 14:19:33 +00:00
|
|
|
if (time1 is None or time2 is None):
|
2008-03-05 21:59:56 +00:00
|
|
|
return None
|
2010-08-15 14:19:33 +00:00
|
|
|
else:
|
|
|
|
return compute_rate(file_size, time1+time2)
|
2008-03-05 21:59:56 +00:00
|
|
|
d.addCallback(_convert)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def data_rate_ciphertext_fetch(self, ctx, data):
|
|
|
|
d = self.upload_results()
|
|
|
|
def _convert(r):
|
|
|
|
fetch_size = r.ciphertext_fetched
|
|
|
|
time = r.timings.get("cumulative_fetch")
|
2010-08-15 14:19:33 +00:00
|
|
|
return compute_rate(fetch_size, time)
|
2008-03-05 21:59:56 +00:00
|
|
|
d.addCallback(_convert)
|
|
|
|
return d
|
|
|
|
|
|
|
|
class UploadStatusPage(UploadResultsRendererMixin, rend.Page):
|
|
|
|
docFactory = getxmlfile("upload-status.xhtml")
|
|
|
|
|
|
|
|
def __init__(self, data):
|
|
|
|
rend.Page.__init__(self, data)
|
|
|
|
self.upload_status = data
|
|
|
|
|
|
|
|
def upload_results(self):
|
|
|
|
return defer.maybeDeferred(self.upload_status.get_results)
|
|
|
|
|
|
|
|
def render_results(self, ctx, data):
|
|
|
|
d = self.upload_results()
|
|
|
|
def _got_results(results):
|
|
|
|
if results:
|
|
|
|
return ctx.tag
|
|
|
|
return ""
|
|
|
|
d.addCallback(_got_results)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def render_started(self, ctx, data):
|
|
|
|
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
|
|
|
|
started_s = time.strftime(TIME_FORMAT,
|
|
|
|
time.localtime(data.get_started()))
|
|
|
|
return started_s
|
|
|
|
|
|
|
|
def render_si(self, ctx, data):
|
|
|
|
si_s = base32.b2a_or_none(data.get_storage_index())
|
|
|
|
if si_s is None:
|
|
|
|
si_s = "(None)"
|
|
|
|
return si_s
|
|
|
|
|
|
|
|
def render_helper(self, ctx, data):
|
|
|
|
return {True: "Yes",
|
|
|
|
False: "No"}[data.using_helper()]
|
|
|
|
|
|
|
|
def render_total_size(self, ctx, data):
|
|
|
|
size = data.get_size()
|
|
|
|
if size is None:
|
2008-05-07 20:53:58 +00:00
|
|
|
return "(unknown)"
|
2008-03-05 21:59:56 +00:00
|
|
|
return size
|
|
|
|
|
|
|
|
def render_progress_hash(self, ctx, data):
|
|
|
|
progress = data.get_progress()[0]
|
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
return "%.1f%%" % (100.0 * progress)
|
|
|
|
|
|
|
|
def render_progress_ciphertext(self, ctx, data):
|
|
|
|
progress = data.get_progress()[1]
|
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
return "%.1f%%" % (100.0 * progress)
|
|
|
|
|
|
|
|
def render_progress_encode_push(self, ctx, data):
|
|
|
|
progress = data.get_progress()[2]
|
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
return "%.1f%%" % (100.0 * progress)
|
|
|
|
|
|
|
|
def render_status(self, ctx, data):
|
|
|
|
return data.get_status()
|
|
|
|
|
|
|
|
class DownloadResultsRendererMixin(RateAndTimeMixin):
|
|
|
|
# this requires a method named 'download_results'
|
|
|
|
|
|
|
|
def render_servermap(self, ctx, data):
|
|
|
|
d = self.download_results()
|
|
|
|
d.addCallback(lambda res: res.servermap)
|
|
|
|
def _render(servermap):
|
|
|
|
if servermap is None:
|
|
|
|
return "None"
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(servermap.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
shares_s = ",".join(["#%d" % shnum
|
|
|
|
for shnum in servermap[peerid]])
|
|
|
|
l[T.li["[%s] has share%s: %s" % (peerid_s,
|
|
|
|
plural(servermap[peerid]),
|
|
|
|
shares_s)]]
|
|
|
|
return l
|
|
|
|
d.addCallback(_render)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def render_servers_used(self, ctx, data):
|
|
|
|
d = self.download_results()
|
|
|
|
d.addCallback(lambda res: res.servers_used)
|
|
|
|
def _got(servers_used):
|
|
|
|
if not servers_used:
|
|
|
|
return ""
|
|
|
|
peerids_s = ", ".join(["[%s]" % idlib.shortnodeid_b2a(peerid)
|
|
|
|
for peerid in servers_used])
|
|
|
|
return T.li["Servers Used: ", peerids_s]
|
|
|
|
d.addCallback(_got)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def render_problems(self, ctx, data):
|
|
|
|
d = self.download_results()
|
|
|
|
d.addCallback(lambda res: res.server_problems)
|
|
|
|
def _got(server_problems):
|
|
|
|
if not server_problems:
|
|
|
|
return ""
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(server_problems.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
l[T.li["[%s]: %s" % (peerid_s, server_problems[peerid])]]
|
|
|
|
return T.li["Server Problems:", l]
|
|
|
|
d.addCallback(_got)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def data_file_size(self, ctx, data):
|
|
|
|
d = self.download_results()
|
|
|
|
d.addCallback(lambda res: res.file_size)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _get_time(self, name):
|
|
|
|
d = self.download_results()
|
|
|
|
d.addCallback(lambda res: res.timings.get(name))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def data_time_total(self, ctx, data):
|
|
|
|
return self._get_time("total")
|
|
|
|
|
|
|
|
def data_time_peer_selection(self, ctx, data):
|
|
|
|
return self._get_time("peer_selection")
|
|
|
|
|
|
|
|
def data_time_uri_extension(self, ctx, data):
|
|
|
|
return self._get_time("uri_extension")
|
|
|
|
|
|
|
|
def data_time_hashtrees(self, ctx, data):
|
|
|
|
return self._get_time("hashtrees")
|
|
|
|
|
|
|
|
def data_time_segments(self, ctx, data):
|
|
|
|
return self._get_time("segments")
|
|
|
|
|
|
|
|
def data_time_cumulative_fetch(self, ctx, data):
|
|
|
|
return self._get_time("cumulative_fetch")
|
|
|
|
|
|
|
|
def data_time_cumulative_decode(self, ctx, data):
|
|
|
|
return self._get_time("cumulative_decode")
|
|
|
|
|
|
|
|
def data_time_cumulative_decrypt(self, ctx, data):
|
|
|
|
return self._get_time("cumulative_decrypt")
|
|
|
|
|
2008-04-21 19:19:17 +00:00
|
|
|
def data_time_paused(self, ctx, data):
|
|
|
|
return self._get_time("paused")
|
|
|
|
|
2008-03-05 21:59:56 +00:00
|
|
|
def _get_rate(self, name):
|
|
|
|
d = self.download_results()
|
|
|
|
def _convert(r):
|
|
|
|
file_size = r.file_size
|
|
|
|
time = r.timings.get(name)
|
2010-08-15 14:19:33 +00:00
|
|
|
return compute_rate(file_size, time)
|
2008-03-05 21:59:56 +00:00
|
|
|
d.addCallback(_convert)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def data_rate_total(self, ctx, data):
|
|
|
|
return self._get_rate("total")
|
|
|
|
|
|
|
|
def data_rate_segments(self, ctx, data):
|
|
|
|
return self._get_rate("segments")
|
|
|
|
|
|
|
|
def data_rate_fetch(self, ctx, data):
|
|
|
|
return self._get_rate("cumulative_fetch")
|
|
|
|
|
|
|
|
def data_rate_decode(self, ctx, data):
|
|
|
|
return self._get_rate("cumulative_decode")
|
|
|
|
|
|
|
|
def data_rate_decrypt(self, ctx, data):
|
|
|
|
return self._get_rate("cumulative_decrypt")
|
|
|
|
|
|
|
|
def render_server_timings(self, ctx, data):
|
|
|
|
d = self.download_results()
|
|
|
|
d.addCallback(lambda res: res.timings.get("fetch_per_server"))
|
|
|
|
def _render(per_server):
|
|
|
|
if per_server is None:
|
|
|
|
return ""
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(per_server.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
times_s = ", ".join([self.render_time(None, t)
|
|
|
|
for t in per_server[peerid]])
|
|
|
|
l[T.li["[%s]: %s" % (peerid_s, times_s)]]
|
|
|
|
return T.li["Per-Server Segment Fetch Response Times: ", l]
|
|
|
|
d.addCallback(_render)
|
|
|
|
return d
|
|
|
|
|
|
|
|
class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
|
|
|
|
docFactory = getxmlfile("download-status.xhtml")
|
|
|
|
|
|
|
|
def __init__(self, data):
|
|
|
|
rend.Page.__init__(self, data)
|
|
|
|
self.download_status = data
|
|
|
|
|
2011-06-29 22:26:06 +00:00
|
|
|
def child_timeline(self, ctx):
|
|
|
|
return DownloadStatusTimelinePage(self.download_status)
|
|
|
|
|
2008-03-05 21:59:56 +00:00
|
|
|
def download_results(self):
|
|
|
|
return defer.maybeDeferred(self.download_status.get_results)
|
|
|
|
|
2010-08-04 07:27:02 +00:00
|
|
|
def relative_time(self, t):
|
|
|
|
if t is None:
|
|
|
|
return t
|
2011-06-29 22:25:55 +00:00
|
|
|
if self.download_status.first_timestamp is not None:
|
|
|
|
return t - self.download_status.first_timestamp
|
2010-08-04 07:27:02 +00:00
|
|
|
return t
|
|
|
|
def short_relative_time(self, t):
|
|
|
|
t = self.relative_time(t)
|
|
|
|
if t is None:
|
|
|
|
return ""
|
|
|
|
return "+%.6fs" % t
|
|
|
|
|
2011-06-29 22:25:55 +00:00
|
|
|
def _find_overlap(self, events, start_key, end_key):
|
|
|
|
# given a list of event dicts, return a new list in which each event
|
2011-08-01 18:54:01 +00:00
|
|
|
# has an extra "row" key (an int, starting at 0), and if appropriate
|
|
|
|
# a "serverid" key (ascii-encoded server id), replacing the "server"
|
|
|
|
# key. This is a hint to our JS frontend about how to overlap the
|
|
|
|
# parts of the graph it is drawing.
|
2011-06-29 22:25:55 +00:00
|
|
|
|
2011-08-01 18:54:01 +00:00
|
|
|
# we must always make a copy, since we're going to be adding keys
|
|
|
|
# and don't want to change the original objects. If we're
|
2011-06-29 22:25:55 +00:00
|
|
|
# stringifying serverids, we'll also be changing the serverid keys.
|
|
|
|
new_events = []
|
|
|
|
rows = []
|
|
|
|
for ev in events:
|
|
|
|
ev = ev.copy()
|
2011-08-01 18:54:01 +00:00
|
|
|
if ev.has_key('server'):
|
|
|
|
ev["serverid"] = base32.b2a(ev["server"].get_serverid())
|
|
|
|
del ev["server"]
|
2011-06-29 22:25:55 +00:00
|
|
|
# find an empty slot in the rows
|
|
|
|
free_slot = None
|
|
|
|
for row,finished in enumerate(rows):
|
|
|
|
if finished is not None:
|
|
|
|
if ev[start_key] > finished:
|
|
|
|
free_slot = row
|
|
|
|
break
|
|
|
|
if free_slot is None:
|
|
|
|
free_slot = len(rows)
|
|
|
|
rows.append(ev[end_key])
|
|
|
|
else:
|
|
|
|
rows[free_slot] = ev[end_key]
|
|
|
|
ev["row"] = free_slot
|
|
|
|
new_events.append(ev)
|
|
|
|
return new_events
|
|
|
|
|
|
|
|
def _find_overlap_requests(self, events):
|
|
|
|
"""We compute a three-element 'row tuple' for each event: (serverid,
|
|
|
|
shnum, row). All elements are ints. The first is a mapping from
|
|
|
|
serverid to group number, the second is a mapping from shnum to
|
|
|
|
subgroup number. The third is a row within the subgroup.
|
|
|
|
|
|
|
|
We also return a list of lists of rowcounts, so renderers can decide
|
|
|
|
how much vertical space to give to each row.
|
|
|
|
"""
|
|
|
|
|
|
|
|
serverid_to_group = {}
|
|
|
|
groupnum_to_rows = {} # maps groupnum to a table of rows. Each table
|
|
|
|
# is a list with an element for each row number
|
|
|
|
# (int starting from 0) that contains a
|
|
|
|
# finish_time, indicating that the row is empty
|
|
|
|
# beyond that time. If finish_time is None, it
|
|
|
|
# indicate a response that has not yet
|
|
|
|
# completed, so the row cannot be reused.
|
|
|
|
new_events = []
|
|
|
|
for ev in events:
|
|
|
|
# DownloadStatus promises to give us events in temporal order
|
|
|
|
ev = ev.copy()
|
2011-08-01 18:53:44 +00:00
|
|
|
ev["serverid"] = base32.b2a(ev["server"].get_serverid())
|
2011-08-01 18:54:01 +00:00
|
|
|
del ev["server"]
|
2011-06-29 22:25:55 +00:00
|
|
|
if ev["serverid"] not in serverid_to_group:
|
|
|
|
groupnum = len(serverid_to_group)
|
|
|
|
serverid_to_group[ev["serverid"]] = groupnum
|
|
|
|
groupnum = serverid_to_group[ev["serverid"]]
|
|
|
|
if groupnum not in groupnum_to_rows:
|
|
|
|
groupnum_to_rows[groupnum] = []
|
|
|
|
rows = groupnum_to_rows[groupnum]
|
|
|
|
# find an empty slot in the rows
|
|
|
|
free_slot = None
|
|
|
|
for row,finished in enumerate(rows):
|
|
|
|
if finished is not None:
|
|
|
|
if ev["start_time"] > finished:
|
|
|
|
free_slot = row
|
|
|
|
break
|
|
|
|
if free_slot is None:
|
|
|
|
free_slot = len(rows)
|
|
|
|
rows.append(ev["finish_time"])
|
|
|
|
else:
|
|
|
|
rows[free_slot] = ev["finish_time"]
|
|
|
|
ev["row"] = (groupnum, free_slot)
|
|
|
|
new_events.append(ev)
|
|
|
|
# maybe also return serverid_to_group, groupnum_to_rows, and some
|
|
|
|
# indication of the highest finish_time
|
|
|
|
#
|
|
|
|
# actually, return the highest rownum for each groupnum
|
|
|
|
highest_rownums = [len(groupnum_to_rows[groupnum])
|
|
|
|
for groupnum in range(len(serverid_to_group))]
|
|
|
|
return new_events, highest_rownums
|
|
|
|
|
|
|
|
def child_event_json(self, ctx):
|
|
|
|
inevow.IRequest(ctx).setHeader("content-type", "text/plain")
|
|
|
|
data = { } # this will be returned to the GET
|
|
|
|
ds = self.download_status
|
|
|
|
|
|
|
|
data["read"] = self._find_overlap(ds.read_events,
|
|
|
|
"start_time", "finish_time")
|
|
|
|
data["segment"] = self._find_overlap(ds.segment_events,
|
|
|
|
"start_time", "finish_time")
|
|
|
|
data["dyhb"] = self._find_overlap(ds.dyhb_requests,
|
|
|
|
"start_time", "finish_time")
|
|
|
|
data["block"],data["block_rownums"] = self._find_overlap_requests(ds.block_requests)
|
|
|
|
|
|
|
|
servernums = {}
|
|
|
|
serverid_strings = {}
|
|
|
|
for d_ev in data["dyhb"]:
|
|
|
|
if d_ev["serverid"] not in servernums:
|
|
|
|
servernum = len(servernums)
|
|
|
|
servernums[d_ev["serverid"]] = servernum
|
|
|
|
#title= "%s: %s" % ( ",".join([str(shnum) for shnum in shnums]))
|
|
|
|
serverid_strings[servernum] = d_ev["serverid"][:4]
|
|
|
|
data["server_info"] = dict([(serverid, {"num": servernums[serverid],
|
|
|
|
"color": self.color(base32.a2b(serverid)),
|
|
|
|
"short": serverid_strings[servernums[serverid]],
|
|
|
|
})
|
|
|
|
for serverid in servernums.keys()])
|
|
|
|
data["num_serverids"] = len(serverid_strings)
|
|
|
|
# we'd prefer the keys of serverids[] to be ints, but this is JSON,
|
|
|
|
# so they get converted to strings. Stupid javascript.
|
|
|
|
data["serverids"] = serverid_strings
|
2011-06-29 22:26:06 +00:00
|
|
|
data["bounds"] = {"min": ds.first_timestamp, "max": ds.last_timestamp}
|
2010-08-04 07:27:02 +00:00
|
|
|
return simplejson.dumps(data, indent=1) + "\n"
|
|
|
|
|
2011-06-29 22:26:06 +00:00
|
|
|
def render_timeline_link(self, ctx, data):
|
|
|
|
from nevow import url
|
|
|
|
return T.a(href=url.URL.fromContext(ctx).child("timeline"))["timeline"]
|
|
|
|
|
2011-06-29 22:25:55 +00:00
|
|
|
def _rate_and_time(self, bytes, seconds):
|
|
|
|
time_s = self.render_time(None, seconds)
|
|
|
|
if seconds != 0:
|
|
|
|
rate = self.render_rate(None, 1.0 * bytes / seconds)
|
|
|
|
return T.span(title=rate)[time_s]
|
|
|
|
return T.span[time_s]
|
|
|
|
|
2010-08-04 07:27:02 +00:00
|
|
|
def render_events(self, ctx, data):
|
|
|
|
if not self.download_status.storage_index:
|
|
|
|
return
|
|
|
|
srt = self.short_relative_time
|
2011-01-02 03:42:14 +00:00
|
|
|
l = T.div()
|
2011-06-29 22:25:55 +00:00
|
|
|
|
2011-01-02 03:42:14 +00:00
|
|
|
t = T.table(align="left", class_="status-download-events")
|
|
|
|
t[T.tr[T.th["serverid"], T.th["sent"], T.th["received"],
|
|
|
|
T.th["shnums"], T.th["RTT"]]]
|
2011-06-29 22:25:55 +00:00
|
|
|
for d_ev in self.download_status.dyhb_requests:
|
2011-08-01 18:54:01 +00:00
|
|
|
server = d_ev["server"]
|
2011-06-29 22:25:55 +00:00
|
|
|
sent = d_ev["start_time"]
|
|
|
|
shnums = d_ev["response_shnums"]
|
|
|
|
received = d_ev["finish_time"]
|
2010-08-09 22:03:42 +00:00
|
|
|
rtt = None
|
|
|
|
if received is not None:
|
|
|
|
rtt = received - sent
|
|
|
|
if not shnums:
|
2011-01-02 03:42:14 +00:00
|
|
|
shnums = ["-"]
|
2011-08-01 18:54:01 +00:00
|
|
|
t[T.tr(style="background: %s" % self.color(server.get_serverid()))[
|
|
|
|
[T.td[server.get_name()], T.td[srt(sent)], T.td[srt(received)],
|
2010-08-04 07:27:02 +00:00
|
|
|
T.td[",".join([str(shnum) for shnum in shnums])],
|
|
|
|
T.td[self.render_time(None, rtt)],
|
|
|
|
]]]
|
2011-06-29 22:25:55 +00:00
|
|
|
|
2011-01-02 03:42:14 +00:00
|
|
|
l[T.h2["DYHB Requests:"], t]
|
|
|
|
l[T.br(clear="all")]
|
2011-06-29 22:25:55 +00:00
|
|
|
|
2011-01-02 03:42:14 +00:00
|
|
|
t = T.table(align="left",class_="status-download-events")
|
|
|
|
t[T.tr[T.th["range"], T.th["start"], T.th["finish"], T.th["got"],
|
|
|
|
T.th["time"], T.th["decrypttime"], T.th["pausedtime"],
|
|
|
|
T.th["speed"]]]
|
2010-08-04 07:27:02 +00:00
|
|
|
for r_ev in self.download_status.read_events:
|
2011-06-29 22:25:55 +00:00
|
|
|
start = r_ev["start"]
|
|
|
|
length = r_ev["length"]
|
|
|
|
bytes = r_ev["bytes_returned"]
|
|
|
|
decrypt_time = ""
|
|
|
|
if bytes:
|
|
|
|
decrypt_time = self._rate_and_time(bytes, r_ev["decrypt_time"])
|
|
|
|
speed, rtt = "",""
|
|
|
|
if r_ev["finish_time"] is not None:
|
|
|
|
rtt = r_ev["finish_time"] - r_ev["start_time"] - r_ev["paused_time"]
|
2010-08-15 14:19:33 +00:00
|
|
|
speed = self.render_rate(None, compute_rate(bytes, rtt))
|
2010-08-04 07:27:02 +00:00
|
|
|
rtt = self.render_time(None, rtt)
|
2011-06-29 22:25:55 +00:00
|
|
|
paused = self.render_time(None, r_ev["paused_time"])
|
|
|
|
|
2010-08-04 07:27:02 +00:00
|
|
|
t[T.tr[T.td["[%d:+%d]" % (start, length)],
|
2011-06-29 22:25:55 +00:00
|
|
|
T.td[srt(r_ev["start_time"])], T.td[srt(r_ev["finish_time"])],
|
|
|
|
T.td[bytes], T.td[rtt],
|
|
|
|
T.td[decrypt_time], T.td[paused],
|
2010-08-04 07:27:02 +00:00
|
|
|
T.td[speed],
|
|
|
|
]]
|
2011-06-29 22:25:55 +00:00
|
|
|
|
2011-01-02 03:42:14 +00:00
|
|
|
l[T.h2["Read Events:"], t]
|
|
|
|
l[T.br(clear="all")]
|
2011-06-29 22:25:55 +00:00
|
|
|
|
2011-01-02 03:42:14 +00:00
|
|
|
t = T.table(align="left",class_="status-download-events")
|
2011-06-29 22:25:55 +00:00
|
|
|
t[T.tr[T.th["segnum"], T.th["start"], T.th["active"], T.th["finish"],
|
|
|
|
T.th["range"],
|
2011-01-02 03:42:14 +00:00
|
|
|
T.th["decodetime"], T.th["segtime"], T.th["speed"]]]
|
2010-08-04 07:27:02 +00:00
|
|
|
for s_ev in self.download_status.segment_events:
|
2011-06-29 22:25:55 +00:00
|
|
|
range_s = "-"
|
|
|
|
segtime_s = "-"
|
|
|
|
speed = "-"
|
|
|
|
decode_time = "-"
|
|
|
|
if s_ev["finish_time"] is not None:
|
|
|
|
if s_ev["success"]:
|
|
|
|
segtime = s_ev["finish_time"] - s_ev["active_time"]
|
|
|
|
segtime_s = self.render_time(None, segtime)
|
|
|
|
seglen = s_ev["segment_length"]
|
|
|
|
range_s = "[%d:+%d]" % (s_ev["segment_start"], seglen)
|
2010-08-15 14:19:33 +00:00
|
|
|
speed = self.render_rate(None, compute_rate(seglen, segtime))
|
2011-06-29 22:25:55 +00:00
|
|
|
decode_time = self._rate_and_time(seglen, s_ev["decode_time"])
|
2010-08-04 07:27:02 +00:00
|
|
|
else:
|
2011-06-29 22:25:55 +00:00
|
|
|
# error
|
|
|
|
range_s = "error"
|
|
|
|
else:
|
|
|
|
# not finished yet
|
|
|
|
pass
|
|
|
|
|
|
|
|
t[T.tr[T.td["seg%d" % s_ev["segment_number"]],
|
|
|
|
T.td[srt(s_ev["start_time"])],
|
|
|
|
T.td[srt(s_ev["active_time"])],
|
|
|
|
T.td[srt(s_ev["finish_time"])],
|
|
|
|
T.td[range_s],
|
|
|
|
T.td[decode_time],
|
|
|
|
T.td[segtime_s], T.td[speed]]]
|
|
|
|
|
2011-01-02 03:42:14 +00:00
|
|
|
l[T.h2["Segment Events:"], t]
|
|
|
|
l[T.br(clear="all")]
|
|
|
|
t = T.table(align="left",class_="status-download-events")
|
|
|
|
t[T.tr[T.th["serverid"], T.th["shnum"], T.th["range"],
|
2011-06-29 22:25:55 +00:00
|
|
|
T.th["txtime"], T.th["rxtime"],
|
|
|
|
T.th["received"], T.th["RTT"]]]
|
|
|
|
for r_ev in self.download_status.block_requests:
|
2011-08-01 18:53:44 +00:00
|
|
|
server = r_ev["server"]
|
2010-08-04 07:27:02 +00:00
|
|
|
rtt = None
|
2011-06-29 22:25:55 +00:00
|
|
|
if r_ev["finish_time"] is not None:
|
|
|
|
rtt = r_ev["finish_time"] - r_ev["start_time"]
|
2011-08-01 18:53:44 +00:00
|
|
|
color = self.color(server.get_serverid())
|
|
|
|
t[T.tr(style="background: %s" % color)[
|
|
|
|
T.td[server.get_name()], T.td[r_ev["shnum"]],
|
2011-06-29 22:25:55 +00:00
|
|
|
T.td["[%d:+%d]" % (r_ev["start"], r_ev["length"])],
|
|
|
|
T.td[srt(r_ev["start_time"])], T.td[srt(r_ev["finish_time"])],
|
|
|
|
T.td[r_ev["response_length"] or ""],
|
2010-08-04 07:27:02 +00:00
|
|
|
T.td[self.render_time(None, rtt)],
|
|
|
|
]]
|
2011-06-29 22:25:55 +00:00
|
|
|
|
2011-01-02 03:42:14 +00:00
|
|
|
l[T.h2["Requests:"], t]
|
|
|
|
l[T.br(clear="all")]
|
2010-08-04 07:27:02 +00:00
|
|
|
|
|
|
|
return l
|
|
|
|
|
|
|
|
def color(self, peerid):
|
|
|
|
def m(c):
|
|
|
|
return min(ord(c) / 2 + 0x80, 0xff)
|
|
|
|
return "#%02x%02x%02x" % (m(peerid[0]), m(peerid[1]), m(peerid[2]))
|
|
|
|
|
2008-03-05 21:59:56 +00:00
|
|
|
def render_results(self, ctx, data):
|
|
|
|
d = self.download_results()
|
|
|
|
def _got_results(results):
|
|
|
|
if results:
|
|
|
|
return ctx.tag
|
|
|
|
return ""
|
|
|
|
d.addCallback(_got_results)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def render_started(self, ctx, data):
|
|
|
|
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
|
|
|
|
started_s = time.strftime(TIME_FORMAT,
|
|
|
|
time.localtime(data.get_started()))
|
2010-08-04 07:27:02 +00:00
|
|
|
return started_s + " (%s)" % data.get_started()
|
2008-03-05 21:59:56 +00:00
|
|
|
|
|
|
|
def render_si(self, ctx, data):
|
|
|
|
si_s = base32.b2a_or_none(data.get_storage_index())
|
|
|
|
if si_s is None:
|
|
|
|
si_s = "(None)"
|
|
|
|
return si_s
|
|
|
|
|
|
|
|
def render_helper(self, ctx, data):
|
|
|
|
return {True: "Yes",
|
|
|
|
False: "No"}[data.using_helper()]
|
|
|
|
|
|
|
|
def render_total_size(self, ctx, data):
|
|
|
|
size = data.get_size()
|
|
|
|
if size is None:
|
2008-05-07 20:53:58 +00:00
|
|
|
return "(unknown)"
|
2008-03-05 21:59:56 +00:00
|
|
|
return size
|
|
|
|
|
|
|
|
def render_progress(self, ctx, data):
|
|
|
|
progress = data.get_progress()
|
2011-06-29 22:26:06 +00:00
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
return "%.1f%%" % (100.0 * progress)
|
|
|
|
|
|
|
|
def render_status(self, ctx, data):
|
|
|
|
return data.get_status()
|
|
|
|
|
|
|
|
class DownloadStatusTimelinePage(rend.Page):
|
|
|
|
docFactory = getxmlfile("download-status-timeline.xhtml")
|
|
|
|
|
|
|
|
def render_started(self, ctx, data):
|
|
|
|
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
|
|
|
|
started_s = time.strftime(TIME_FORMAT,
|
|
|
|
time.localtime(data.get_started()))
|
|
|
|
return started_s + " (%s)" % data.get_started()
|
|
|
|
|
|
|
|
def render_si(self, ctx, data):
|
|
|
|
si_s = base32.b2a_or_none(data.get_storage_index())
|
|
|
|
if si_s is None:
|
|
|
|
si_s = "(None)"
|
|
|
|
return si_s
|
|
|
|
|
|
|
|
def render_helper(self, ctx, data):
|
|
|
|
return {True: "Yes",
|
|
|
|
False: "No"}[data.using_helper()]
|
|
|
|
|
|
|
|
def render_total_size(self, ctx, data):
|
|
|
|
size = data.get_size()
|
|
|
|
if size is None:
|
|
|
|
return "(unknown)"
|
|
|
|
return size
|
|
|
|
|
|
|
|
def render_progress(self, ctx, data):
|
|
|
|
progress = data.get_progress()
|
2008-03-05 21:59:56 +00:00
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
return "%.1f%%" % (100.0 * progress)
|
|
|
|
|
|
|
|
def render_status(self, ctx, data):
|
|
|
|
return data.get_status()
|
|
|
|
|
|
|
|
class RetrieveStatusPage(rend.Page, RateAndTimeMixin):
|
|
|
|
docFactory = getxmlfile("retrieve-status.xhtml")
|
|
|
|
|
|
|
|
def __init__(self, data):
|
|
|
|
rend.Page.__init__(self, data)
|
|
|
|
self.retrieve_status = data
|
|
|
|
|
|
|
|
def render_started(self, ctx, data):
|
|
|
|
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
|
|
|
|
started_s = time.strftime(TIME_FORMAT,
|
|
|
|
time.localtime(data.get_started()))
|
|
|
|
return started_s
|
|
|
|
|
|
|
|
def render_si(self, ctx, data):
|
|
|
|
si_s = base32.b2a_or_none(data.get_storage_index())
|
|
|
|
if si_s is None:
|
|
|
|
si_s = "(None)"
|
|
|
|
return si_s
|
|
|
|
|
|
|
|
def render_helper(self, ctx, data):
|
|
|
|
return {True: "Yes",
|
|
|
|
False: "No"}[data.using_helper()]
|
|
|
|
|
|
|
|
def render_current_size(self, ctx, data):
|
|
|
|
size = data.get_size()
|
|
|
|
if size is None:
|
|
|
|
size = "(unknown)"
|
|
|
|
return size
|
|
|
|
|
|
|
|
def render_progress(self, ctx, data):
|
|
|
|
progress = data.get_progress()
|
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
return "%.1f%%" % (100.0 * progress)
|
|
|
|
|
|
|
|
def render_status(self, ctx, data):
|
|
|
|
return data.get_status()
|
|
|
|
|
|
|
|
def render_encoding(self, ctx, data):
|
|
|
|
k, n = data.get_encoding()
|
|
|
|
return ctx.tag["Encoding: %s of %s" % (k, n)]
|
|
|
|
|
|
|
|
def render_problems(self, ctx, data):
|
|
|
|
problems = data.problems
|
|
|
|
if not problems:
|
|
|
|
return ""
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(problems.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
|
|
|
|
return ctx.tag["Server Problems:", l]
|
|
|
|
|
|
|
|
def _get_rate(self, data, name):
|
|
|
|
file_size = self.retrieve_status.get_size()
|
|
|
|
time = self.retrieve_status.timings.get(name)
|
2010-08-15 14:19:33 +00:00
|
|
|
return compute_rate(file_size, time)
|
2008-03-05 21:59:56 +00:00
|
|
|
|
|
|
|
def data_time_total(self, ctx, data):
|
|
|
|
return self.retrieve_status.timings.get("total")
|
|
|
|
def data_rate_total(self, ctx, data):
|
|
|
|
return self._get_rate(data, "total")
|
|
|
|
|
|
|
|
def data_time_fetch(self, ctx, data):
|
|
|
|
return self.retrieve_status.timings.get("fetch")
|
|
|
|
def data_rate_fetch(self, ctx, data):
|
|
|
|
return self._get_rate(data, "fetch")
|
|
|
|
|
|
|
|
def data_time_decode(self, ctx, data):
|
|
|
|
return self.retrieve_status.timings.get("decode")
|
|
|
|
def data_rate_decode(self, ctx, data):
|
|
|
|
return self._get_rate(data, "decode")
|
|
|
|
|
|
|
|
def data_time_decrypt(self, ctx, data):
|
|
|
|
return self.retrieve_status.timings.get("decrypt")
|
|
|
|
def data_rate_decrypt(self, ctx, data):
|
|
|
|
return self._get_rate(data, "decrypt")
|
|
|
|
|
|
|
|
def render_server_timings(self, ctx, data):
|
|
|
|
per_server = self.retrieve_status.timings.get("fetch_per_server")
|
|
|
|
if not per_server:
|
|
|
|
return ""
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(per_server.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
times_s = ", ".join([self.render_time(None, t)
|
|
|
|
for t in per_server[peerid]])
|
|
|
|
l[T.li["[%s]: %s" % (peerid_s, times_s)]]
|
|
|
|
return T.li["Per-Server Fetch Response Times: ", l]
|
|
|
|
|
|
|
|
|
2008-03-06 01:41:10 +00:00
|
|
|
class PublishStatusPage(rend.Page, RateAndTimeMixin):
|
2008-03-05 21:59:56 +00:00
|
|
|
docFactory = getxmlfile("publish-status.xhtml")
|
|
|
|
|
2008-03-06 01:41:10 +00:00
|
|
|
def __init__(self, data):
|
|
|
|
rend.Page.__init__(self, data)
|
|
|
|
self.publish_status = data
|
|
|
|
|
2008-03-05 21:59:56 +00:00
|
|
|
def render_started(self, ctx, data):
|
|
|
|
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
|
|
|
|
started_s = time.strftime(TIME_FORMAT,
|
|
|
|
time.localtime(data.get_started()))
|
|
|
|
return started_s
|
|
|
|
|
|
|
|
def render_si(self, ctx, data):
|
|
|
|
si_s = base32.b2a_or_none(data.get_storage_index())
|
|
|
|
if si_s is None:
|
|
|
|
si_s = "(None)"
|
|
|
|
return si_s
|
|
|
|
|
|
|
|
def render_helper(self, ctx, data):
|
|
|
|
return {True: "Yes",
|
|
|
|
False: "No"}[data.using_helper()]
|
|
|
|
|
|
|
|
def render_current_size(self, ctx, data):
|
|
|
|
size = data.get_size()
|
|
|
|
if size is None:
|
|
|
|
size = "(unknown)"
|
|
|
|
return size
|
|
|
|
|
|
|
|
def render_progress(self, ctx, data):
|
|
|
|
progress = data.get_progress()
|
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
return "%.1f%%" % (100.0 * progress)
|
|
|
|
|
|
|
|
def render_status(self, ctx, data):
|
|
|
|
return data.get_status()
|
|
|
|
|
2008-03-06 01:41:10 +00:00
|
|
|
def render_encoding(self, ctx, data):
|
|
|
|
k, n = data.get_encoding()
|
|
|
|
return ctx.tag["Encoding: %s of %s" % (k, n)]
|
|
|
|
|
|
|
|
def render_sharemap(self, ctx, data):
|
2008-04-17 00:49:06 +00:00
|
|
|
servermap = data.get_servermap()
|
|
|
|
if servermap is None:
|
2008-03-06 01:41:10 +00:00
|
|
|
return ctx.tag["None"]
|
|
|
|
l = T.ul()
|
2008-04-17 00:49:06 +00:00
|
|
|
sharemap = servermap.make_sharemap()
|
2008-03-06 01:41:10 +00:00
|
|
|
for shnum in sorted(sharemap.keys()):
|
|
|
|
l[T.li["%d -> Placed on " % shnum,
|
|
|
|
", ".join(["[%s]" % idlib.shortnodeid_b2a(peerid)
|
2008-04-17 00:49:06 +00:00
|
|
|
for peerid in sharemap[shnum]])]]
|
2008-03-06 01:41:10 +00:00
|
|
|
return ctx.tag["Sharemap:", l]
|
|
|
|
|
|
|
|
def render_problems(self, ctx, data):
|
|
|
|
problems = data.problems
|
|
|
|
if not problems:
|
|
|
|
return ""
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(problems.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
|
|
|
|
return ctx.tag["Server Problems:", l]
|
|
|
|
|
|
|
|
def _get_rate(self, data, name):
|
|
|
|
file_size = self.publish_status.get_size()
|
|
|
|
time = self.publish_status.timings.get(name)
|
2010-08-15 14:19:33 +00:00
|
|
|
return compute_rate(file_size, time)
|
2008-03-06 01:41:10 +00:00
|
|
|
|
|
|
|
def data_time_total(self, ctx, data):
|
|
|
|
return self.publish_status.timings.get("total")
|
|
|
|
def data_rate_total(self, ctx, data):
|
|
|
|
return self._get_rate(data, "total")
|
|
|
|
|
|
|
|
def data_time_setup(self, ctx, data):
|
|
|
|
return self.publish_status.timings.get("setup")
|
|
|
|
|
|
|
|
def data_time_encrypt(self, ctx, data):
|
|
|
|
return self.publish_status.timings.get("encrypt")
|
|
|
|
def data_rate_encrypt(self, ctx, data):
|
|
|
|
return self._get_rate(data, "encrypt")
|
|
|
|
|
|
|
|
def data_time_encode(self, ctx, data):
|
|
|
|
return self.publish_status.timings.get("encode")
|
|
|
|
def data_rate_encode(self, ctx, data):
|
|
|
|
return self._get_rate(data, "encode")
|
|
|
|
|
|
|
|
def data_time_pack(self, ctx, data):
|
|
|
|
return self.publish_status.timings.get("pack")
|
|
|
|
def data_rate_pack(self, ctx, data):
|
|
|
|
return self._get_rate(data, "pack")
|
|
|
|
def data_time_sign(self, ctx, data):
|
|
|
|
return self.publish_status.timings.get("sign")
|
|
|
|
|
|
|
|
def data_time_push(self, ctx, data):
|
|
|
|
return self.publish_status.timings.get("push")
|
|
|
|
def data_rate_push(self, ctx, data):
|
|
|
|
return self._get_rate(data, "push")
|
|
|
|
|
|
|
|
def render_server_timings(self, ctx, data):
|
2008-04-17 00:49:06 +00:00
|
|
|
per_server = self.publish_status.timings.get("send_per_server")
|
2008-03-06 01:41:10 +00:00
|
|
|
if not per_server:
|
|
|
|
return ""
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(per_server.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
2008-04-17 00:49:06 +00:00
|
|
|
times_s = ", ".join([self.render_time(None, t)
|
|
|
|
for t in per_server[peerid]])
|
2008-03-06 01:41:10 +00:00
|
|
|
l[T.li["[%s]: %s" % (peerid_s, times_s)]]
|
|
|
|
return T.li["Per-Server Response Times: ", l]
|
|
|
|
|
2008-04-17 02:05:41 +00:00
|
|
|
class MapupdateStatusPage(rend.Page, RateAndTimeMixin):
|
|
|
|
docFactory = getxmlfile("map-update-status.xhtml")
|
|
|
|
|
|
|
|
def __init__(self, data):
|
|
|
|
rend.Page.__init__(self, data)
|
|
|
|
self.update_status = data
|
|
|
|
|
|
|
|
def render_started(self, ctx, data):
|
|
|
|
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
|
|
|
|
started_s = time.strftime(TIME_FORMAT,
|
|
|
|
time.localtime(data.get_started()))
|
|
|
|
return started_s
|
|
|
|
|
2008-04-19 02:55:12 +00:00
|
|
|
def render_finished(self, ctx, data):
|
|
|
|
when = data.get_finished()
|
|
|
|
if not when:
|
|
|
|
return "not yet"
|
|
|
|
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
|
|
|
|
started_s = time.strftime(TIME_FORMAT,
|
|
|
|
time.localtime(data.get_finished()))
|
|
|
|
return started_s
|
|
|
|
|
2008-04-17 02:05:41 +00:00
|
|
|
def render_si(self, ctx, data):
|
|
|
|
si_s = base32.b2a_or_none(data.get_storage_index())
|
|
|
|
if si_s is None:
|
|
|
|
si_s = "(None)"
|
|
|
|
return si_s
|
|
|
|
|
|
|
|
def render_helper(self, ctx, data):
|
|
|
|
return {True: "Yes",
|
|
|
|
False: "No"}[data.using_helper()]
|
|
|
|
|
|
|
|
def render_progress(self, ctx, data):
|
|
|
|
progress = data.get_progress()
|
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
return "%.1f%%" % (100.0 * progress)
|
|
|
|
|
|
|
|
def render_status(self, ctx, data):
|
|
|
|
return data.get_status()
|
|
|
|
|
|
|
|
def render_problems(self, ctx, data):
|
|
|
|
problems = data.problems
|
|
|
|
if not problems:
|
|
|
|
return ""
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(problems.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
|
|
|
|
return ctx.tag["Server Problems:", l]
|
|
|
|
|
|
|
|
def render_privkey_from(self, ctx, data):
|
|
|
|
peerid = data.get_privkey_from()
|
|
|
|
if peerid:
|
|
|
|
return ctx.tag["Got privkey from: [%s]"
|
|
|
|
% idlib.shortnodeid_b2a(peerid)]
|
|
|
|
else:
|
|
|
|
return ""
|
|
|
|
|
|
|
|
def data_time_total(self, ctx, data):
|
|
|
|
return self.update_status.timings.get("total")
|
|
|
|
|
2008-04-21 23:16:55 +00:00
|
|
|
def data_time_initial_queries(self, ctx, data):
|
|
|
|
return self.update_status.timings.get("initial_queries")
|
2008-04-17 02:05:41 +00:00
|
|
|
|
|
|
|
def data_time_cumulative_verify(self, ctx, data):
|
|
|
|
return self.update_status.timings.get("cumulative_verify")
|
|
|
|
|
|
|
|
def render_server_timings(self, ctx, data):
|
|
|
|
per_server = self.update_status.timings.get("per_server")
|
|
|
|
if not per_server:
|
|
|
|
return ""
|
|
|
|
l = T.ul()
|
|
|
|
for peerid in sorted(per_server.keys()):
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
times = []
|
2008-04-19 02:55:12 +00:00
|
|
|
for op,started,t in per_server[peerid]:
|
2008-04-21 23:16:55 +00:00
|
|
|
#times.append("%s/%.4fs/%s/%s" % (op,
|
|
|
|
# started,
|
|
|
|
# self.render_time(None, started - self.update_status.get_started()),
|
|
|
|
# self.render_time(None,t)))
|
2008-04-17 02:05:41 +00:00
|
|
|
if op == "query":
|
|
|
|
times.append( self.render_time(None, t) )
|
2008-04-19 02:55:12 +00:00
|
|
|
elif op == "late":
|
|
|
|
times.append( "late(" + self.render_time(None, t) + ")" )
|
2008-04-17 02:05:41 +00:00
|
|
|
else:
|
2008-04-19 02:55:12 +00:00
|
|
|
times.append( "privkey(" + self.render_time(None, t) + ")" )
|
2008-04-17 02:05:41 +00:00
|
|
|
times_s = ", ".join(times)
|
|
|
|
l[T.li["[%s]: %s" % (peerid_s, times_s)]]
|
|
|
|
return T.li["Per-Server Response Times: ", l]
|
|
|
|
|
2008-04-21 23:16:55 +00:00
|
|
|
def render_timing_chart(self, ctx, data):
|
|
|
|
imageurl = self._timing_chart()
|
|
|
|
return ctx.tag[imageurl]
|
|
|
|
|
|
|
|
def _timing_chart(self):
|
|
|
|
started = self.update_status.get_started()
|
2008-04-22 00:29:12 +00:00
|
|
|
total = self.update_status.timings.get("total")
|
2008-04-21 23:16:55 +00:00
|
|
|
per_server = self.update_status.timings.get("per_server")
|
|
|
|
base = "http://chart.apis.google.com/chart?"
|
2011-01-02 03:42:14 +00:00
|
|
|
pieces = ["cht=bhs"]
|
2008-04-21 23:16:55 +00:00
|
|
|
pieces.append("chco=ffffff,4d89f9,c6d9fd") # colors
|
|
|
|
data0 = []
|
|
|
|
data1 = []
|
|
|
|
data2 = []
|
2011-01-02 03:42:14 +00:00
|
|
|
nb_nodes = 0
|
|
|
|
graph_botom_margin= 21
|
|
|
|
graph_top_margin = 5
|
2008-04-21 23:16:55 +00:00
|
|
|
peerids_s = []
|
|
|
|
top_abs = started
|
|
|
|
# we sort the queries by the time at which we sent the first request
|
|
|
|
sorttable = [ (times[0][1], peerid)
|
|
|
|
for peerid, times in per_server.items() ]
|
|
|
|
sorttable.sort()
|
|
|
|
peerids = [t[1] for t in sorttable]
|
2008-04-22 18:51:14 +00:00
|
|
|
|
2008-04-21 23:16:55 +00:00
|
|
|
for peerid in peerids:
|
2011-01-02 03:42:14 +00:00
|
|
|
nb_nodes += 1
|
2008-04-21 23:16:55 +00:00
|
|
|
times = per_server[peerid]
|
|
|
|
peerid_s = idlib.shortnodeid_b2a(peerid)
|
|
|
|
peerids_s.append(peerid_s)
|
|
|
|
# for servermap updates, there are either one or two queries per
|
|
|
|
# peer. The second (if present) is to get the privkey.
|
|
|
|
op,q_started,q_elapsed = times[0]
|
|
|
|
data0.append("%.3f" % (q_started-started))
|
|
|
|
data1.append("%.3f" % q_elapsed)
|
|
|
|
top_abs = max(top_abs, q_started+q_elapsed)
|
|
|
|
if len(times) > 1:
|
|
|
|
op,p_started,p_elapsed = times[0]
|
|
|
|
data2.append("%.3f" % p_elapsed)
|
|
|
|
top_abs = max(top_abs, p_started+p_elapsed)
|
|
|
|
else:
|
|
|
|
data2.append("0.0")
|
|
|
|
finished = self.update_status.get_finished()
|
|
|
|
if finished:
|
|
|
|
top_abs = max(top_abs, finished)
|
|
|
|
top_rel = top_abs - started
|
2011-01-02 03:42:14 +00:00
|
|
|
chs ="chs=400x%d" % ( (nb_nodes*28) + graph_top_margin + graph_botom_margin )
|
2008-04-21 23:16:55 +00:00
|
|
|
chd = "chd=t:" + "|".join([",".join(data0),
|
|
|
|
",".join(data1),
|
|
|
|
",".join(data2)])
|
|
|
|
pieces.append(chd)
|
2011-01-02 03:42:14 +00:00
|
|
|
pieces.append(chs)
|
2008-04-21 23:16:55 +00:00
|
|
|
chds = "chds=0,%0.3f" % top_rel
|
|
|
|
pieces.append(chds)
|
|
|
|
pieces.append("chxt=x,y")
|
|
|
|
pieces.append("chxr=0,0.0,%0.3f" % top_rel)
|
|
|
|
pieces.append("chxl=1:|" + "|".join(reversed(peerids_s)))
|
|
|
|
# use up to 10 grid lines, at decimal multiples.
|
|
|
|
# mathutil.next_power_of_k doesn't handle numbers smaller than one,
|
|
|
|
# unfortunately.
|
|
|
|
#pieces.append("chg="
|
2008-04-22 00:29:12 +00:00
|
|
|
|
|
|
|
if total is not None:
|
|
|
|
finished_f = 1.0 * total / top_rel
|
|
|
|
pieces.append("chm=r,FF0000,0,%0.3f,%0.3f" % (finished_f,
|
|
|
|
finished_f+0.01))
|
2008-04-21 23:16:55 +00:00
|
|
|
url = base + "&".join(pieces)
|
2011-01-02 03:42:14 +00:00
|
|
|
return T.img(src=url,border="1",align="right", float="right")
|
2008-04-21 23:16:55 +00:00
|
|
|
|
2008-03-06 01:41:10 +00:00
|
|
|
|
2008-03-05 21:59:56 +00:00
|
|
|
class Status(rend.Page):
|
|
|
|
docFactory = getxmlfile("status.xhtml")
|
|
|
|
addSlash = True
|
|
|
|
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
def __init__(self, history):
|
|
|
|
rend.Page.__init__(self, history)
|
|
|
|
self.history = history
|
2009-02-20 19:15:54 +00:00
|
|
|
|
2008-07-26 00:41:10 +00:00
|
|
|
def renderHTTP(self, ctx):
|
2009-02-20 19:15:54 +00:00
|
|
|
req = inevow.IRequest(ctx)
|
|
|
|
t = get_arg(req, "t")
|
2008-07-26 00:41:10 +00:00
|
|
|
if t == "json":
|
2009-02-20 19:15:54 +00:00
|
|
|
return self.json(req)
|
2008-07-26 00:41:10 +00:00
|
|
|
return rend.Page.renderHTTP(self, ctx)
|
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def json(self, req):
|
|
|
|
req.setHeader("content-type", "text/plain")
|
2008-07-26 00:41:10 +00:00
|
|
|
data = {}
|
|
|
|
data["active"] = active = []
|
2009-02-20 19:15:54 +00:00
|
|
|
for s in self._get_active_operations():
|
2008-07-26 00:41:10 +00:00
|
|
|
si_s = base32.b2a_or_none(s.get_storage_index())
|
|
|
|
size = s.get_size()
|
|
|
|
status = s.get_status()
|
|
|
|
if IUploadStatus.providedBy(s):
|
|
|
|
h,c,e = s.get_progress()
|
|
|
|
active.append({"type": "upload",
|
|
|
|
"storage-index-string": si_s,
|
|
|
|
"total-size": size,
|
|
|
|
"status": status,
|
|
|
|
"progress-hash": h,
|
|
|
|
"progress-ciphertext": c,
|
|
|
|
"progress-encode-push": e,
|
|
|
|
})
|
|
|
|
elif IDownloadStatus.providedBy(s):
|
|
|
|
active.append({"type": "download",
|
|
|
|
"storage-index-string": si_s,
|
|
|
|
"total-size": size,
|
|
|
|
"status": status,
|
|
|
|
"progress": s.get_progress(),
|
|
|
|
})
|
|
|
|
|
2008-09-15 20:43:14 +00:00
|
|
|
return simplejson.dumps(data, indent=1) + "\n"
|
2008-07-26 00:41:10 +00:00
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def _get_all_statuses(self):
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
h = self.history
|
|
|
|
return itertools.chain(h.list_all_upload_statuses(),
|
|
|
|
h.list_all_download_statuses(),
|
|
|
|
h.list_all_mapupdate_statuses(),
|
|
|
|
h.list_all_publish_statuses(),
|
|
|
|
h.list_all_retrieve_statuses(),
|
|
|
|
h.list_all_helper_statuses(),
|
2008-04-17 20:02:22 +00:00
|
|
|
)
|
|
|
|
|
2008-03-05 21:59:56 +00:00
|
|
|
def data_active_operations(self, ctx, data):
|
2009-02-20 19:15:54 +00:00
|
|
|
return self._get_active_operations()
|
|
|
|
|
|
|
|
def _get_active_operations(self):
|
2008-04-17 20:02:22 +00:00
|
|
|
active = [s
|
2009-02-20 19:15:54 +00:00
|
|
|
for s in self._get_all_statuses()
|
2008-04-17 20:02:22 +00:00
|
|
|
if s.get_active()]
|
2008-03-05 21:59:56 +00:00
|
|
|
return active
|
|
|
|
|
|
|
|
def data_recent_operations(self, ctx, data):
|
2009-02-20 19:15:54 +00:00
|
|
|
return self._get_recent_operations()
|
|
|
|
|
|
|
|
def _get_recent_operations(self):
|
2008-04-17 20:02:22 +00:00
|
|
|
recent = [s
|
2009-02-20 19:15:54 +00:00
|
|
|
for s in self._get_all_statuses()
|
2008-04-17 20:02:22 +00:00
|
|
|
if not s.get_active()]
|
2008-03-05 21:59:56 +00:00
|
|
|
recent.sort(lambda a,b: cmp(a.get_started(), b.get_started()))
|
|
|
|
recent.reverse()
|
|
|
|
return recent
|
|
|
|
|
|
|
|
def render_row(self, ctx, data):
|
|
|
|
s = data
|
|
|
|
|
|
|
|
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
|
|
|
|
started_s = time.strftime(TIME_FORMAT,
|
|
|
|
time.localtime(s.get_started()))
|
|
|
|
ctx.fillSlots("started", started_s)
|
|
|
|
|
|
|
|
si_s = base32.b2a_or_none(s.get_storage_index())
|
|
|
|
if si_s is None:
|
|
|
|
si_s = "(None)"
|
|
|
|
ctx.fillSlots("si", si_s)
|
|
|
|
ctx.fillSlots("helper", {True: "Yes",
|
|
|
|
False: "No"}[s.using_helper()])
|
|
|
|
|
|
|
|
size = s.get_size()
|
|
|
|
if size is None:
|
|
|
|
size = "(unknown)"
|
2008-05-07 20:53:58 +00:00
|
|
|
elif isinstance(size, (int, long, float)):
|
|
|
|
size = abbreviate_size(size)
|
2008-03-05 21:59:56 +00:00
|
|
|
ctx.fillSlots("total_size", size)
|
|
|
|
|
|
|
|
progress = data.get_progress()
|
|
|
|
if IUploadStatus.providedBy(data):
|
|
|
|
link = "up-%d" % data.get_counter()
|
|
|
|
ctx.fillSlots("type", "upload")
|
|
|
|
# TODO: make an ascii-art bar
|
|
|
|
(chk, ciphertext, encandpush) = progress
|
|
|
|
progress_s = ("hash: %.1f%%, ciphertext: %.1f%%, encode: %.1f%%" %
|
|
|
|
( (100.0 * chk),
|
|
|
|
(100.0 * ciphertext),
|
|
|
|
(100.0 * encandpush) ))
|
|
|
|
ctx.fillSlots("progress", progress_s)
|
|
|
|
elif IDownloadStatus.providedBy(data):
|
|
|
|
link = "down-%d" % data.get_counter()
|
|
|
|
ctx.fillSlots("type", "download")
|
|
|
|
ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress))
|
|
|
|
elif IPublishStatus.providedBy(data):
|
|
|
|
link = "publish-%d" % data.get_counter()
|
|
|
|
ctx.fillSlots("type", "publish")
|
|
|
|
ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress))
|
2008-04-17 02:05:41 +00:00
|
|
|
elif IRetrieveStatus.providedBy(data):
|
2008-03-05 21:59:56 +00:00
|
|
|
ctx.fillSlots("type", "retrieve")
|
|
|
|
link = "retrieve-%d" % data.get_counter()
|
|
|
|
ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress))
|
2008-04-17 02:05:41 +00:00
|
|
|
else:
|
|
|
|
assert IServermapUpdaterStatus.providedBy(data)
|
|
|
|
ctx.fillSlots("type", "mapupdate %s" % data.get_mode())
|
|
|
|
link = "mapupdate-%d" % data.get_counter()
|
|
|
|
ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress))
|
2008-03-05 21:59:56 +00:00
|
|
|
ctx.fillSlots("status", T.a(href=link)[s.get_status()])
|
|
|
|
return ctx.tag
|
|
|
|
|
|
|
|
def childFactory(self, ctx, name):
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
h = self.history
|
2008-03-05 21:59:56 +00:00
|
|
|
stype,count_s = name.split("-")
|
|
|
|
count = int(count_s)
|
|
|
|
if stype == "up":
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
for s in itertools.chain(h.list_all_upload_statuses(),
|
|
|
|
h.list_all_helper_statuses()):
|
2008-04-17 20:02:22 +00:00
|
|
|
# immutable-upload helpers use the same status object as a
|
|
|
|
# regular immutable-upload
|
2008-03-05 21:59:56 +00:00
|
|
|
if s.get_counter() == count:
|
|
|
|
return UploadStatusPage(s)
|
|
|
|
if stype == "down":
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
for s in h.list_all_download_statuses():
|
2008-03-05 21:59:56 +00:00
|
|
|
if s.get_counter() == count:
|
|
|
|
return DownloadStatusPage(s)
|
2008-04-17 02:05:41 +00:00
|
|
|
if stype == "mapupdate":
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
for s in h.list_all_mapupdate_statuses():
|
2008-04-17 02:05:41 +00:00
|
|
|
if s.get_counter() == count:
|
|
|
|
return MapupdateStatusPage(s)
|
2008-03-05 21:59:56 +00:00
|
|
|
if stype == "publish":
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
for s in h.list_all_publish_statuses():
|
2008-03-05 21:59:56 +00:00
|
|
|
if s.get_counter() == count:
|
|
|
|
return PublishStatusPage(s)
|
|
|
|
if stype == "retrieve":
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
for s in h.list_all_retrieve_statuses():
|
2008-03-05 21:59:56 +00:00
|
|
|
if s.get_counter() == count:
|
|
|
|
return RetrieveStatusPage(s)
|
|
|
|
|
|
|
|
|
2008-03-27 23:46:08 +00:00
|
|
|
class HelperStatus(rend.Page):
|
|
|
|
docFactory = getxmlfile("helper.xhtml")
|
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def __init__(self, helper):
|
|
|
|
rend.Page.__init__(self, helper)
|
|
|
|
self.helper = helper
|
|
|
|
|
2008-03-27 23:46:08 +00:00
|
|
|
def renderHTTP(self, ctx):
|
2009-02-20 19:15:54 +00:00
|
|
|
req = inevow.IRequest(ctx)
|
|
|
|
t = get_arg(req, "t")
|
2008-03-27 23:46:08 +00:00
|
|
|
if t == "json":
|
2009-02-20 19:15:54 +00:00
|
|
|
return self.render_JSON(req)
|
2008-03-27 23:46:08 +00:00
|
|
|
return rend.Page.renderHTTP(self, ctx)
|
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def data_helper_stats(self, ctx, data):
|
|
|
|
return self.helper.get_stats()
|
2008-03-27 23:46:08 +00:00
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def render_JSON(self, req):
|
|
|
|
req.setHeader("content-type", "text/plain")
|
|
|
|
if self.helper:
|
|
|
|
stats = self.helper.get_stats()
|
|
|
|
return simplejson.dumps(stats, indent=1) + "\n"
|
|
|
|
return simplejson.dumps({}) + "\n"
|
2008-03-27 23:46:08 +00:00
|
|
|
|
|
|
|
def render_active_uploads(self, ctx, data):
|
2008-04-14 20:18:53 +00:00
|
|
|
return data["chk_upload_helper.active_uploads"]
|
2008-03-27 23:46:08 +00:00
|
|
|
|
|
|
|
def render_incoming(self, ctx, data):
|
2008-04-14 20:18:53 +00:00
|
|
|
return "%d bytes in %d files" % (data["chk_upload_helper.incoming_size"],
|
|
|
|
data["chk_upload_helper.incoming_count"])
|
2008-03-27 23:46:08 +00:00
|
|
|
|
|
|
|
def render_encoding(self, ctx, data):
|
2008-04-14 20:18:53 +00:00
|
|
|
return "%d bytes in %d files" % (data["chk_upload_helper.encoding_size"],
|
|
|
|
data["chk_upload_helper.encoding_count"])
|
2008-03-27 23:46:08 +00:00
|
|
|
|
|
|
|
def render_upload_requests(self, ctx, data):
|
2008-04-14 20:18:53 +00:00
|
|
|
return str(data["chk_upload_helper.upload_requests"])
|
2008-03-27 23:46:08 +00:00
|
|
|
|
|
|
|
def render_upload_already_present(self, ctx, data):
|
2008-04-14 20:18:53 +00:00
|
|
|
return str(data["chk_upload_helper.upload_already_present"])
|
2008-03-27 23:46:08 +00:00
|
|
|
|
|
|
|
def render_upload_need_upload(self, ctx, data):
|
2008-04-14 20:18:53 +00:00
|
|
|
return str(data["chk_upload_helper.upload_need_upload"])
|
2008-03-27 23:46:08 +00:00
|
|
|
|
|
|
|
def render_upload_bytes_fetched(self, ctx, data):
|
2008-04-14 20:18:53 +00:00
|
|
|
return str(data["chk_upload_helper.fetched_bytes"])
|
2008-03-27 23:46:08 +00:00
|
|
|
|
|
|
|
def render_upload_bytes_encoded(self, ctx, data):
|
2008-04-14 20:18:53 +00:00
|
|
|
return str(data["chk_upload_helper.encoded_bytes"])
|
2008-03-27 23:46:08 +00:00
|
|
|
|
2008-04-14 21:17:08 +00:00
|
|
|
|
|
|
|
class Statistics(rend.Page):
|
|
|
|
docFactory = getxmlfile("statistics.xhtml")
|
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def __init__(self, provider):
|
|
|
|
rend.Page.__init__(self, provider)
|
|
|
|
self.provider = provider
|
|
|
|
|
2008-04-14 21:17:08 +00:00
|
|
|
def renderHTTP(self, ctx):
|
2009-02-20 19:15:54 +00:00
|
|
|
req = inevow.IRequest(ctx)
|
|
|
|
t = get_arg(req, "t")
|
2008-04-14 21:17:08 +00:00
|
|
|
if t == "json":
|
2009-02-20 19:15:54 +00:00
|
|
|
stats = self.provider.get_stats()
|
|
|
|
req.setHeader("content-type", "text/plain")
|
2008-09-15 20:43:14 +00:00
|
|
|
return simplejson.dumps(stats, indent=1) + "\n"
|
2008-04-14 21:17:08 +00:00
|
|
|
return rend.Page.renderHTTP(self, ctx)
|
|
|
|
|
2009-02-20 19:15:54 +00:00
|
|
|
def data_get_stats(self, ctx, data):
|
|
|
|
return self.provider.get_stats()
|
|
|
|
|
2008-04-14 21:17:08 +00:00
|
|
|
def render_load_average(self, ctx, data):
|
|
|
|
return str(data["stats"].get("load_monitor.avg_load"))
|
|
|
|
|
|
|
|
def render_peak_load(self, ctx, data):
|
|
|
|
return str(data["stats"].get("load_monitor.max_load"))
|
|
|
|
|
|
|
|
def render_uploads(self, ctx, data):
|
2008-04-30 01:20:05 +00:00
|
|
|
files = data["counters"].get("uploader.files_uploaded", 0)
|
|
|
|
bytes = data["counters"].get("uploader.bytes_uploaded", 0)
|
2008-04-14 21:17:08 +00:00
|
|
|
return ("%s files / %s bytes (%s)" %
|
|
|
|
(files, bytes, abbreviate_size(bytes)))
|
|
|
|
|
|
|
|
def render_downloads(self, ctx, data):
|
2008-04-30 01:20:05 +00:00
|
|
|
files = data["counters"].get("downloader.files_downloaded", 0)
|
|
|
|
bytes = data["counters"].get("downloader.bytes_downloaded", 0)
|
2008-04-14 21:17:08 +00:00
|
|
|
return ("%s files / %s bytes (%s)" %
|
|
|
|
(files, bytes, abbreviate_size(bytes)))
|
|
|
|
|
|
|
|
def render_publishes(self, ctx, data):
|
2008-06-16 23:37:04 +00:00
|
|
|
files = data["counters"].get("mutable.files_published", 0)
|
2008-04-30 01:20:05 +00:00
|
|
|
bytes = data["counters"].get("mutable.bytes_published", 0)
|
|
|
|
return "%s files / %s bytes (%s)" % (files, bytes,
|
|
|
|
abbreviate_size(bytes))
|
2008-04-14 21:17:08 +00:00
|
|
|
|
|
|
|
def render_retrieves(self, ctx, data):
|
2008-06-16 23:37:04 +00:00
|
|
|
files = data["counters"].get("mutable.files_retrieved", 0)
|
2008-04-30 01:20:05 +00:00
|
|
|
bytes = data["counters"].get("mutable.bytes_retrieved", 0)
|
|
|
|
return "%s files / %s bytes (%s)" % (files, bytes,
|
|
|
|
abbreviate_size(bytes))
|
2008-04-14 21:17:08 +00:00
|
|
|
|
|
|
|
def render_raw(self, ctx, data):
|
|
|
|
raw = pprint.pformat(data)
|
|
|
|
return ctx.tag[raw]
|