Fix mutable status (mapupdate/retrieve/publish) to use serverids, not tubids

This still leaves immutable-publish results incorrectly using tubids instead
of serverids. That will need some more work, since it might change the Helper
interface.
This commit is contained in:
Brian Warner 2012-03-17 16:52:32 -07:00
parent 890449801e
commit a56e639346
5 changed files with 32 additions and 39 deletions

View File

@ -1968,7 +1968,7 @@ class IDownloadResults(Interface):
cumulative_decode : just time spent in zfec
cumulative_decrypt : just time spent in decryption
total : total download time, start to finish
fetch_per_server : dict of peerid to list of per-segment fetch times
fetch_per_server : dict of server to list of per-segment fetch times
"""

View File

@ -51,10 +51,9 @@ class PublishStatus:
self.started = time.time()
def add_per_server_time(self, server, elapsed):
serverid = server.get_serverid()
if serverid not in self.timings["send_per_server"]:
self.timings["send_per_server"][serverid] = []
self.timings["send_per_server"][serverid].append(elapsed)
if server not in self.timings["send_per_server"]:
self.timings["send_per_server"][server] = []
self.timings["send_per_server"][server].append(elapsed)
def accumulate_encode_time(self, elapsed):
self.timings["encode"] += elapsed
def accumulate_encrypt_time(self, elapsed):

View File

@ -62,10 +62,9 @@ class RetrieveStatus:
return self._problems
def add_fetch_timing(self, server, elapsed):
serverid = server.get_serverid()
if serverid not in self.timings["fetch_per_server"]:
self.timings["fetch_per_server"][serverid] = []
self.timings["fetch_per_server"][serverid].append(elapsed)
if server not in self.timings["fetch_per_server"]:
self.timings["fetch_per_server"][server] = []
self.timings["fetch_per_server"][server].append(elapsed)
def accumulate_decode_time(self, elapsed):
self.timings["decode"] += elapsed
def accumulate_decrypt_time(self, elapsed):

View File

@ -35,11 +35,10 @@ class UpdateStatus:
self.finished = None
def add_per_server_time(self, server, op, sent, elapsed):
serverid = server.get_serverid()
assert op in ("query", "late", "privkey")
if serverid not in self.timings["per_server"]:
self.timings["per_server"][serverid] = []
self.timings["per_server"][serverid].append((op,sent,elapsed))
if server not in self.timings["per_server"]:
self.timings["per_server"][server] = []
self.timings["per_server"][server].append((op,sent,elapsed))
def get_started(self):
return self.started
@ -71,7 +70,7 @@ class UpdateStatus:
def set_mode(self, mode):
self.mode = mode
def set_privkey_from(self, server):
self.privkey_from = server.get_serverid()
self.privkey_from = server
def set_status(self, status):
self.status = status
def set_progress(self, value):

View File

@ -761,11 +761,10 @@ class RetrieveStatusPage(rend.Page, RateAndTimeMixin):
if not per_server:
return ""
l = T.ul()
for peerid in sorted(per_server.keys()):
peerid_s = idlib.shortnodeid_b2a(peerid)
for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
times_s = ", ".join([self.render_time(None, t)
for t in per_server[peerid]])
l[T.li["[%s]: %s" % (peerid_s, times_s)]]
for t in per_server[server]])
l[T.li["[%s]: %s" % (server.get_name(), times_s)]]
return T.li["Per-Server Fetch Response Times: ", l]
@ -874,11 +873,10 @@ class PublishStatusPage(rend.Page, RateAndTimeMixin):
if not per_server:
return ""
l = T.ul()
for peerid in sorted(per_server.keys()):
peerid_s = idlib.shortnodeid_b2a(peerid)
for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
times_s = ", ".join([self.render_time(None, t)
for t in per_server[peerid]])
l[T.li["[%s]: %s" % (peerid_s, times_s)]]
for t in per_server[server]])
l[T.li["[%s]: %s" % (server.get_name(), times_s)]]
return T.li["Per-Server Response Times: ", l]
class MapupdateStatusPage(rend.Page, RateAndTimeMixin):
@ -932,10 +930,9 @@ class MapupdateStatusPage(rend.Page, RateAndTimeMixin):
return ctx.tag["Server Problems:", l]
def render_privkey_from(self, ctx, data):
peerid = data.get_privkey_from()
if peerid:
return ctx.tag["Got privkey from: [%s]"
% idlib.shortnodeid_b2a(peerid)]
server = data.get_privkey_from()
if server:
return ctx.tag["Got privkey from: [%s]" % server.get_name()]
else:
return ""
@ -953,10 +950,9 @@ class MapupdateStatusPage(rend.Page, RateAndTimeMixin):
if not per_server:
return ""
l = T.ul()
for peerid in sorted(per_server.keys()):
peerid_s = idlib.shortnodeid_b2a(peerid)
for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
times = []
for op,started,t in per_server[peerid]:
for op,started,t in per_server[server]:
#times.append("%s/%.4fs/%s/%s" % (op,
# started,
# self.render_time(None, started - self.update_status.get_started()),
@ -968,7 +964,7 @@ class MapupdateStatusPage(rend.Page, RateAndTimeMixin):
else:
times.append( "privkey(" + self.render_time(None, t) + ")" )
times_s = ", ".join(times)
l[T.li["[%s]: %s" % (peerid_s, times_s)]]
l[T.li["[%s]: %s" % (server.get_name(), times_s)]]
return T.li["Per-Server Response Times: ", l]
def render_timing_chart(self, ctx, data):
@ -989,19 +985,19 @@ class MapupdateStatusPage(rend.Page, RateAndTimeMixin):
nb_nodes = 0
graph_botom_margin= 21
graph_top_margin = 5
peerids_s = []
server_names = []
top_abs = started
# we sort the queries by the time at which we sent the first request
sorttable = [ (times[0][1], peerid)
for peerid, times in per_server.items() ]
sorttable = [ (times[0][1], server)
for server, times in per_server.items() ]
sorttable.sort()
peerids = [t[1] for t in sorttable]
servers = [t[1] for t in sorttable]
for peerid in peerids:
for server in servers:
nb_nodes += 1
times = per_server[peerid]
peerid_s = idlib.shortnodeid_b2a(peerid)
peerids_s.append(peerid_s)
times = per_server[server]
name = server.get_name()
server_names.append(name)
# for servermap updates, there are either one or two queries per
# peer. The second (if present) is to get the privkey.
op,q_started,q_elapsed = times[0]
@ -1028,7 +1024,7 @@ class MapupdateStatusPage(rend.Page, RateAndTimeMixin):
pieces.append(chds)
pieces.append("chxt=x,y")
pieces.append("chxr=0,0.0,%0.3f" % top_rel)
pieces.append("chxl=1:|" + "|".join(reversed(peerids_s)))
pieces.append("chxl=1:|" + "|".join(reversed(server_names)))
# use up to 10 grid lines, at decimal multiples.
# mathutil.next_power_of_k doesn't handle numbers smaller than one,
# unfortunately.