mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-24 07:06:41 +00:00
Merge remote-tracking branch 'origin/master' into ticket3006-websocket-streaming-logs
This commit is contained in:
commit
848950a0c0
@ -29,6 +29,8 @@ workflows:
|
|||||||
- "lint"
|
- "lint"
|
||||||
- "deprecations"
|
- "deprecations"
|
||||||
- "c-locale"
|
- "c-locale"
|
||||||
|
# Any locale other than C or UTF-8.
|
||||||
|
- "another-locale"
|
||||||
|
|
||||||
- "integration":
|
- "integration":
|
||||||
requires:
|
requires:
|
||||||
@ -196,6 +198,15 @@ jobs:
|
|||||||
LANG: "C"
|
LANG: "C"
|
||||||
|
|
||||||
|
|
||||||
|
another-locale:
|
||||||
|
<<: *DEBIAN
|
||||||
|
|
||||||
|
environment:
|
||||||
|
<<: *UTF_8_ENVIRONMENT
|
||||||
|
# aka "Latin 1"
|
||||||
|
LANG: "en_US.ISO-8859-1"
|
||||||
|
|
||||||
|
|
||||||
deprecations:
|
deprecations:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
# -*- python -*-
|
# -*- python -*-
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
"""Monitor a Tahoe grid, by playing sounds in response to remote events.
|
"""Monitor a Tahoe grid, by playing sounds in response to remote events.
|
||||||
|
|
||||||
To install:
|
To install:
|
||||||
@ -47,20 +49,20 @@ class Listener:
|
|||||||
|
|
||||||
# messages emitted by the Introducer: client join/leave
|
# messages emitted by the Introducer: client join/leave
|
||||||
if message.startswith("introducer: subscription[storage] request"):
|
if message.startswith("introducer: subscription[storage] request"):
|
||||||
print "new client"
|
print("new client")
|
||||||
self.sound("voice/hooray.aiff")
|
self.sound("voice/hooray.aiff")
|
||||||
if message.startswith("introducer: unsubscribing"):
|
if message.startswith("introducer: unsubscribing"):
|
||||||
print "unsubscribe"
|
print("unsubscribe")
|
||||||
self.sound("electro/zaptrill-fade.aiff")
|
self.sound("electro/zaptrill-fade.aiff")
|
||||||
|
|
||||||
# messages from the helper
|
# messages from the helper
|
||||||
if message == "file already found in grid":
|
if message == "file already found in grid":
|
||||||
print "already found"
|
print("already found")
|
||||||
self.sound("mech/ziplash-high.aiff")
|
self.sound("mech/ziplash-high.aiff")
|
||||||
#if message == "upload done":
|
#if message == "upload done":
|
||||||
if format == "plaintext_hash=%(plaintext_hash)s, SI=%(SI)s, size=%(size)d":
|
if format == "plaintext_hash=%(plaintext_hash)s, SI=%(SI)s, size=%(size)d":
|
||||||
size = m.get("size")
|
size = m.get("size")
|
||||||
print "upload done, size", size
|
print("upload done, size", size)
|
||||||
self.sound("mech/ziplash-low.aiff")
|
self.sound("mech/ziplash-low.aiff")
|
||||||
if "fetching " in message:
|
if "fetching " in message:
|
||||||
# helper grabbing ciphertext from client
|
# helper grabbing ciphertext from client
|
||||||
@ -90,31 +92,31 @@ class Listener:
|
|||||||
pass
|
pass
|
||||||
elif format == "excessive reactor delay (%ss)":
|
elif format == "excessive reactor delay (%ss)":
|
||||||
self.sound("animal/frog-cheep.aiff")
|
self.sound("animal/frog-cheep.aiff")
|
||||||
print "excessive delay %s: %s" % (m['args'][0], furl)
|
print("excessive delay %s: %s" % (m['args'][0], furl))
|
||||||
elif format == "excessive reactor delay (%(delay)ss)":
|
elif format == "excessive reactor delay (%(delay)ss)":
|
||||||
self.sound("animal/frog-cheep.aiff")
|
self.sound("animal/frog-cheep.aiff")
|
||||||
print "excessive delay %s: %s" % (m['delay'], furl)
|
print("excessive delay %s: %s" % (m['delay'], furl))
|
||||||
elif facility == "foolscap.negotiation":
|
elif facility == "foolscap.negotiation":
|
||||||
if (message == "got offer for an existing connection"
|
if (message == "got offer for an existing connection"
|
||||||
or "master told us to use a new connection" in message):
|
or "master told us to use a new connection" in message):
|
||||||
print "foolscap: got offer for an existing connection", message, furl
|
print("foolscap: got offer for an existing connection", message, furl)
|
||||||
else:
|
else:
|
||||||
#print "foolscap:", message
|
#print "foolscap:", message
|
||||||
pass
|
pass
|
||||||
elif m['level'] > 30: # SCARY or BAD
|
elif m['level'] > 30: # SCARY or BAD
|
||||||
#self.sound("mech/alarm-bell.aiff")
|
#self.sound("mech/alarm-bell.aiff")
|
||||||
self.sound("environ/thunder-tense.aiff")
|
self.sound("environ/thunder-tense.aiff")
|
||||||
print m, furl
|
print(m, furl)
|
||||||
elif m['level'] == 30: # WEIRD
|
elif m['level'] == 30: # WEIRD
|
||||||
self.sound("mech/glass-breaking.aiff")
|
self.sound("mech/glass-breaking.aiff")
|
||||||
print m, furl
|
print(m, furl)
|
||||||
elif m['level'] > 20: # UNUSUAL or INFREQUENT or CURIOUS
|
elif m['level'] > 20: # UNUSUAL or INFREQUENT or CURIOUS
|
||||||
self.sound("mech/telephone-ring-old.aiff")
|
self.sound("mech/telephone-ring-old.aiff")
|
||||||
print m, furl
|
print(m, furl)
|
||||||
|
|
||||||
class BoodleSender(protocol.Protocol):
|
class BoodleSender(protocol.Protocol):
|
||||||
def connectionMade(self):
|
def connectionMade(self):
|
||||||
print "connected to boodler"
|
print("connected to boodler")
|
||||||
self.factory.listener.boodler = self.transport
|
self.factory.listener.boodler = self.transport
|
||||||
|
|
||||||
class Bridge(Referenceable):
|
class Bridge(Referenceable):
|
||||||
@ -150,7 +152,7 @@ class Monitor(service.MultiService):
|
|||||||
reactor.connectTCP("localhost", 31863, cf)
|
reactor.connectTCP("localhost", 31863, cf)
|
||||||
|
|
||||||
def _got_logpublisher(self, publisher, fn, i, target):
|
def _got_logpublisher(self, publisher, fn, i, target):
|
||||||
print "connected to %s:%d, %s" % (fn, i, target)
|
print("connected to %s:%d, %s" % (fn, i, target))
|
||||||
b = Bridge(target, self.listener)
|
b = Bridge(target, self.listener)
|
||||||
publisher.callRemote("subscribe_to_all", b)
|
publisher.callRemote("subscribe_to_all", b)
|
||||||
|
|
||||||
|
@ -2,13 +2,15 @@
|
|||||||
|
|
||||||
# This helper script is used with the 'test-desert-island' Makefile target.
|
# This helper script is used with the 'test-desert-island' Makefile target.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
good = True
|
good = True
|
||||||
build_out = sys.argv[1]
|
build_out = sys.argv[1]
|
||||||
mode = sys.argv[2]
|
mode = sys.argv[2]
|
||||||
|
|
||||||
print
|
print()
|
||||||
|
|
||||||
for line in open(build_out, "r"):
|
for line in open(build_out, "r"):
|
||||||
if mode == "no-downloads":
|
if mode == "no-downloads":
|
||||||
@ -29,13 +31,13 @@ for line in open(build_out, "r"):
|
|||||||
# currently don't enforce that stronger requirement.
|
# currently don't enforce that stronger requirement.
|
||||||
if (line.startswith("Downloading http:") or
|
if (line.startswith("Downloading http:") or
|
||||||
line.startswith("Downloading https:")):
|
line.startswith("Downloading https:")):
|
||||||
print line,
|
print(line, end=' ')
|
||||||
good = False
|
good = False
|
||||||
if good:
|
if good:
|
||||||
if mode == "no-downloads":
|
if mode == "no-downloads":
|
||||||
print "Good: build did not try to download any files"
|
print("Good: build did not try to download any files")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
else:
|
else:
|
||||||
if mode == "no-downloads":
|
if mode == "no-downloads":
|
||||||
print "Failed: build tried to download files"
|
print("Failed: build tried to download files")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
# This script generates a table of dependencies in HTML format on stdout.
|
# This script generates a table of dependencies in HTML format on stdout.
|
||||||
# It expects to be run in the tahoe-lafs-dep-eggs directory.
|
# It expects to be run in the tahoe-lafs-dep-eggs directory.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import re, os, sys
|
import re, os, sys
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
|
||||||
@ -83,27 +85,27 @@ greybgstyle = '; background-color: #E0E0E0'
|
|||||||
nobgstyle = ''
|
nobgstyle = ''
|
||||||
unsupportedstyle = '; color: #C00000'
|
unsupportedstyle = '; color: #C00000'
|
||||||
|
|
||||||
print '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">'
|
print('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">')
|
||||||
print '<html>'
|
print('<html>')
|
||||||
print '<head>'
|
print('<head>')
|
||||||
print ' <meta http-equiv="Content-Type" content="text/html;charset=us-ascii">'
|
print(' <meta http-equiv="Content-Type" content="text/html;charset=us-ascii">')
|
||||||
print ' <title>Software packages that Tahoe-LAFS depends on</title>'
|
print(' <title>Software packages that Tahoe-LAFS depends on</title>')
|
||||||
print '</head>'
|
print('</head>')
|
||||||
print '<body>'
|
print('<body>')
|
||||||
print '<h2>What is this?</h2>'
|
print('<h2>What is this?</h2>')
|
||||||
print '<p>See <a href="https://tahoe-lafs.org/trac/tahoe-lafs/browser/docs/quickstart.rst">quickstart.rst</a>, <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Installation">wiki:Installation</a>, and <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CompileError">wiki:CompileError</a>.'
|
print('<p>See <a href="https://tahoe-lafs.org/trac/tahoe-lafs/browser/docs/quickstart.rst">quickstart.rst</a>, <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Installation">wiki:Installation</a>, and <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CompileError">wiki:CompileError</a>.')
|
||||||
print '<h2>Software packages that Tahoe-LAFS depends on</h2>'
|
print('<h2>Software packages that Tahoe-LAFS depends on</h2>')
|
||||||
print
|
print()
|
||||||
for pyver in reversed(sorted(python_versions)):
|
for pyver in reversed(sorted(python_versions)):
|
||||||
greybackground = False
|
greybackground = False
|
||||||
if pyver:
|
if pyver:
|
||||||
print '<p>Packages for Python %s that have compiled C/C++ code:</p>' % (pyver,)
|
print('<p>Packages for Python %s that have compiled C/C++ code:</p>' % (pyver,))
|
||||||
print '<table border="1">'
|
print('<table border="1">')
|
||||||
print ' <tr>'
|
print(' <tr>')
|
||||||
print ' <th style="background-color: #FFFFD0" width="%d%%"> Platform </th>' % (width,)
|
print(' <th style="background-color: #FFFFD0" width="%d%%"> Platform </th>' % (width,))
|
||||||
for pkg in sorted(platform_dependent_pkgs):
|
for pkg in sorted(platform_dependent_pkgs):
|
||||||
print ' <th style="background-color: #FFE8FF;" width="%d%%"> %s </th>' % (width, pkg)
|
print(' <th style="background-color: #FFE8FF;" width="%d%%"> %s </th>' % (width, pkg))
|
||||||
print ' </tr>'
|
print(' </tr>')
|
||||||
|
|
||||||
first = True
|
first = True
|
||||||
for platform in sorted(matrix[pyver]):
|
for platform in sorted(matrix[pyver]):
|
||||||
@ -122,38 +124,38 @@ for pyver in reversed(sorted(python_versions)):
|
|||||||
style2 = first and 'border-top: 2px solid #000000' or ''
|
style2 = first and 'border-top: 2px solid #000000' or ''
|
||||||
style2 += bgstyle
|
style2 += bgstyle
|
||||||
annotated_platform = platform.replace('-', '‑') + (unsupported_python and ' (unsupported)' or '')
|
annotated_platform = platform.replace('-', '‑') + (unsupported_python and ' (unsupported)' or '')
|
||||||
print ' <tr>'
|
print(' <tr>')
|
||||||
print ' <td style="%s"> %s </td>' % (style1, annotated_platform)
|
print(' <td style="%s"> %s </td>' % (style1, annotated_platform))
|
||||||
for pkg in sorted(platform_dependent_pkgs):
|
for pkg in sorted(platform_dependent_pkgs):
|
||||||
if pkg == 'pywin32' and not platform.startswith('windows'):
|
if pkg == 'pywin32' and not platform.startswith('windows'):
|
||||||
print ' <td style="border: 0; text-align: center; %s"> n/a </td>' % (style2,)
|
print(' <td style="border: 0; text-align: center; %s"> n/a </td>' % (style2,))
|
||||||
else:
|
else:
|
||||||
print ' <td style="%s"> %s</td>' % (style2, file_list(row_files, pkg))
|
print(' <td style="%s"> %s</td>' % (style2, file_list(row_files, pkg)))
|
||||||
print ' </tr>'
|
print(' </tr>')
|
||||||
first = False
|
first = False
|
||||||
|
|
||||||
print '</table>'
|
print('</table>')
|
||||||
print
|
print()
|
||||||
|
|
||||||
print '<p>Packages that are platform-independent or source-only:</p>'
|
print('<p>Packages that are platform-independent or source-only:</p>')
|
||||||
print '<table border="1">'
|
print('<table border="1">')
|
||||||
print ' <tr>'
|
print(' <tr>')
|
||||||
print ' <th style="background-color:#FFFFD0;"> Package </th>'
|
print(' <th style="background-color:#FFFFD0;"> Package </th>')
|
||||||
print ' <th style="background-color:#FFE8FF;"> All Python versions </th>'
|
print(' <th style="background-color:#FFE8FF;"> All Python versions </th>')
|
||||||
print ' </tr>'
|
print(' </tr>')
|
||||||
|
|
||||||
style1 = 'border-top: 2px solid #000000; background-color:#FFFFF0;'
|
style1 = 'border-top: 2px solid #000000; background-color:#FFFFF0;'
|
||||||
style2 = 'border-top: 2px solid #000000;'
|
style2 = 'border-top: 2px solid #000000;'
|
||||||
m = matrix['']['']
|
m = matrix['']['']
|
||||||
for pkg in sorted(platform_independent_pkgs):
|
for pkg in sorted(platform_independent_pkgs):
|
||||||
print ' <tr>'
|
print(' <tr>')
|
||||||
print ' <th style="%s"> %s </th>' % (style1, pkg)
|
print(' <th style="%s"> %s </th>' % (style1, pkg))
|
||||||
print ' <td style="%s"> %s</td>' % (style2, file_list(m, pkg))
|
print(' <td style="%s"> %s</td>' % (style2, file_list(m, pkg)))
|
||||||
print ' </tr>'
|
print(' </tr>')
|
||||||
|
|
||||||
print '</table>'
|
print('</table>')
|
||||||
|
|
||||||
# The document does validate, but not when it is included at the bottom of a directory listing.
|
# The document does validate, but not when it is included at the bottom of a directory listing.
|
||||||
#print '<hr>'
|
#print '<hr>'
|
||||||
#print '<a href="http://validator.w3.org/check?uri=referer" target="_blank"><img border="0" src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01 Transitional" height="31" width="88"></a>'
|
#print '<a href="http://validator.w3.org/check?uri=referer" target="_blank"><img border="0" src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01 Transitional" height="31" width="88"></a>'
|
||||||
print '</body></html>'
|
print('</body></html>')
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys, os, io
|
import sys, os, io
|
||||||
from twisted.internet import reactor, protocol, task, defer
|
from twisted.internet import reactor, protocol, task, defer
|
||||||
from twisted.python.procutils import which
|
from twisted.python.procutils import which
|
||||||
@ -51,7 +53,7 @@ def run_command(main):
|
|||||||
pw = os.environ.get("PYTHONWARNINGS")
|
pw = os.environ.get("PYTHONWARNINGS")
|
||||||
DDW = "default::DeprecationWarning"
|
DDW = "default::DeprecationWarning"
|
||||||
if pw != DDW:
|
if pw != DDW:
|
||||||
print "note: $PYTHONWARNINGS is '%s', not the expected %s" % (pw, DDW)
|
print("note: $PYTHONWARNINGS is '%s', not the expected %s" % (pw, DDW))
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
|
|
||||||
pp = RunPP()
|
pp = RunPP()
|
||||||
@ -84,11 +86,11 @@ def run_command(main):
|
|||||||
if warnings:
|
if warnings:
|
||||||
if config["warnings"]:
|
if config["warnings"]:
|
||||||
with open(config["warnings"], "wb") as f:
|
with open(config["warnings"], "wb") as f:
|
||||||
print >>f, "".join(warnings)
|
print("".join(warnings), file=f)
|
||||||
print "ERROR: %d deprecation warnings found" % len(warnings)
|
print("ERROR: %d deprecation warnings found" % len(warnings))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
print "no deprecation warnings"
|
print("no deprecation warnings")
|
||||||
if signal:
|
if signal:
|
||||||
sys.exit(signal)
|
sys.exit(signal)
|
||||||
sys.exit(rc)
|
sys.exit(rc)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#! /usr/bin/env python
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import locale, os, platform, subprocess, sys, traceback
|
import locale, os, platform, subprocess, sys, traceback
|
||||||
|
|
||||||
|
|
||||||
@ -13,27 +15,27 @@ def print_platform():
|
|||||||
try:
|
try:
|
||||||
import platform
|
import platform
|
||||||
out = platform.platform()
|
out = platform.platform()
|
||||||
print "platform:", foldlines(out)
|
print("platform:", foldlines(out))
|
||||||
print "machine: ", platform.machine()
|
print("machine: ", platform.machine())
|
||||||
if hasattr(platform, 'linux_distribution'):
|
if hasattr(platform, 'linux_distribution'):
|
||||||
print "linux_distribution:", repr(platform.linux_distribution())
|
print("linux_distribution:", repr(platform.linux_distribution()))
|
||||||
except EnvironmentError:
|
except EnvironmentError:
|
||||||
sys.stderr.write("\nGot exception using 'platform'. Exception follows\n")
|
sys.stderr.write("\nGot exception using 'platform'. Exception follows\n")
|
||||||
traceback.print_exc(file=sys.stderr)
|
traceback.print_exc(file=sys.stderr)
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
|
|
||||||
def print_python_ver():
|
def print_python_ver():
|
||||||
print "python:", foldlines(sys.version)
|
print("python:", foldlines(sys.version))
|
||||||
print 'maxunicode: ' + str(sys.maxunicode)
|
print('maxunicode: ' + str(sys.maxunicode))
|
||||||
|
|
||||||
def print_python_encoding_settings():
|
def print_python_encoding_settings():
|
||||||
print 'filesystem.encoding: ' + str(sys.getfilesystemencoding())
|
print('filesystem.encoding: ' + str(sys.getfilesystemencoding()))
|
||||||
print 'locale.getpreferredencoding: ' + str(locale.getpreferredencoding())
|
print('locale.getpreferredencoding: ' + str(locale.getpreferredencoding()))
|
||||||
try:
|
try:
|
||||||
print 'locale.defaultlocale: ' + str(locale.getdefaultlocale())
|
print('locale.defaultlocale: ' + str(locale.getdefaultlocale()))
|
||||||
except ValueError, e:
|
except ValueError, e:
|
||||||
print 'got exception from locale.getdefaultlocale(): ', e
|
print('got exception from locale.getdefaultlocale(): ', e)
|
||||||
print 'locale.locale: ' + str(locale.getlocale())
|
print('locale.locale: ' + str(locale.getlocale()))
|
||||||
|
|
||||||
def print_stdout(cmdlist, label=None, numlines=None):
|
def print_stdout(cmdlist, label=None, numlines=None):
|
||||||
try:
|
try:
|
||||||
@ -41,10 +43,10 @@ def print_stdout(cmdlist, label=None, numlines=None):
|
|||||||
label = cmdlist[0]
|
label = cmdlist[0]
|
||||||
res = subprocess.Popen(cmdlist, stdin=open(os.devnull),
|
res = subprocess.Popen(cmdlist, stdin=open(os.devnull),
|
||||||
stdout=subprocess.PIPE).communicate()[0]
|
stdout=subprocess.PIPE).communicate()[0]
|
||||||
print label + ': ' + foldlines(res, numlines)
|
print(label + ': ' + foldlines(res, numlines))
|
||||||
except EnvironmentError, e:
|
except EnvironmentError, e:
|
||||||
if isinstance(e, OSError) and e.errno == 2:
|
if isinstance(e, OSError) and e.errno == 2:
|
||||||
print label + ': no such file or directory'
|
print(label + ': no such file or directory')
|
||||||
return
|
return
|
||||||
sys.stderr.write("\nGot exception invoking '%s'. Exception follows.\n" % (cmdlist[0],))
|
sys.stderr.write("\nGot exception invoking '%s'. Exception follows.\n" % (cmdlist[0],))
|
||||||
traceback.print_exc(file=sys.stderr)
|
traceback.print_exc(file=sys.stderr)
|
||||||
@ -52,12 +54,12 @@ def print_stdout(cmdlist, label=None, numlines=None):
|
|||||||
|
|
||||||
def print_as_ver():
|
def print_as_ver():
|
||||||
if os.path.exists('a.out'):
|
if os.path.exists('a.out'):
|
||||||
print "WARNING: a file named a.out exists, and getting the version of the 'as' assembler writes to that filename, so I'm not attempting to get the version of 'as'."
|
print("WARNING: a file named a.out exists, and getting the version of the 'as' assembler writes to that filename, so I'm not attempting to get the version of 'as'.")
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
res = subprocess.Popen(['as', '-version'], stdin=open(os.devnull),
|
res = subprocess.Popen(['as', '-version'], stdin=open(os.devnull),
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||||
print 'as: ' + foldlines(res[0]+' '+res[1])
|
print('as: ' + foldlines(res[0]+' '+res[1]))
|
||||||
if os.path.exists('a.out'):
|
if os.path.exists('a.out'):
|
||||||
os.remove('a.out')
|
os.remove('a.out')
|
||||||
except EnvironmentError:
|
except EnvironmentError:
|
||||||
@ -69,49 +71,49 @@ def print_setuptools_ver():
|
|||||||
try:
|
try:
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
out = str(pkg_resources.require("setuptools"))
|
out = str(pkg_resources.require("setuptools"))
|
||||||
print "setuptools:", foldlines(out)
|
print("setuptools:", foldlines(out))
|
||||||
except (ImportError, EnvironmentError):
|
except (ImportError, EnvironmentError):
|
||||||
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of setuptools. Exception follows\n")
|
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of setuptools. Exception follows\n")
|
||||||
traceback.print_exc(file=sys.stderr)
|
traceback.print_exc(file=sys.stderr)
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
except pkg_resources.DistributionNotFound:
|
except pkg_resources.DistributionNotFound:
|
||||||
print 'setuptools: DistributionNotFound'
|
print('setuptools: DistributionNotFound')
|
||||||
|
|
||||||
def print_py_pkg_ver(pkgname, modulename=None):
|
def print_py_pkg_ver(pkgname, modulename=None):
|
||||||
if modulename is None:
|
if modulename is None:
|
||||||
modulename = pkgname
|
modulename = pkgname
|
||||||
print
|
print()
|
||||||
try:
|
try:
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
out = str(pkg_resources.require(pkgname))
|
out = str(pkg_resources.require(pkgname))
|
||||||
print pkgname + ': ' + foldlines(out)
|
print(pkgname + ': ' + foldlines(out))
|
||||||
except (ImportError, EnvironmentError):
|
except (ImportError, EnvironmentError):
|
||||||
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of %s. Exception follows.\n" % (pkgname,))
|
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of %s. Exception follows.\n" % (pkgname,))
|
||||||
traceback.print_exc(file=sys.stderr)
|
traceback.print_exc(file=sys.stderr)
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
except pkg_resources.DistributionNotFound:
|
except pkg_resources.DistributionNotFound:
|
||||||
print pkgname + ': DistributionNotFound'
|
print(pkgname + ': DistributionNotFound')
|
||||||
try:
|
try:
|
||||||
__import__(modulename)
|
__import__(modulename)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
modobj = sys.modules.get(modulename)
|
modobj = sys.modules.get(modulename)
|
||||||
print pkgname + ' module: ' + str(modobj)
|
print(pkgname + ' module: ' + str(modobj))
|
||||||
try:
|
try:
|
||||||
print pkgname + ' __version__: ' + str(modobj.__version__)
|
print(pkgname + ' __version__: ' + str(modobj.__version__))
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
print_platform()
|
print_platform()
|
||||||
print
|
print()
|
||||||
print_python_ver()
|
print_python_ver()
|
||||||
print_stdout(['virtualenv', '--version'])
|
print_stdout(['virtualenv', '--version'])
|
||||||
print_stdout(['tox', '--version'])
|
print_stdout(['tox', '--version'])
|
||||||
print
|
print()
|
||||||
print_stdout(['locale'])
|
print_stdout(['locale'])
|
||||||
print_python_encoding_settings()
|
print_python_encoding_settings()
|
||||||
print
|
print()
|
||||||
print_stdout(['buildbot', '--version'])
|
print_stdout(['buildbot', '--version'])
|
||||||
print_stdout(['buildslave', '--version'])
|
print_stdout(['buildslave', '--version'])
|
||||||
if 'windows' in platform.system().lower():
|
if 'windows' in platform.system().lower():
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
# ./check-debugging.py src
|
# ./check-debugging.py src
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys, re, os
|
import sys, re, os
|
||||||
|
|
||||||
ok = True
|
ok = True
|
||||||
@ -15,8 +17,8 @@ for starting_point in sys.argv[1:]:
|
|||||||
lineno = lineno+1
|
lineno = lineno+1
|
||||||
mo = re.search(r"\.setDebugging\(True\)", line)
|
mo = re.search(r"\.setDebugging\(True\)", line)
|
||||||
if mo:
|
if mo:
|
||||||
print "Do not use defer.setDebugging(True) in production"
|
print("Do not use defer.setDebugging(True) in production")
|
||||||
print "First used here: %s:%d" % (fn, lineno)
|
print("First used here: %s:%d" % (fn, lineno))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
print "No cases of defer.setDebugging(True) were found, good!"
|
print("No cases of defer.setDebugging(True) were found, good!")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
#
|
#
|
||||||
# bin/tahoe @misc/coding_tools/check-interfaces.py
|
# bin/tahoe @misc/coding_tools/check-interfaces.py
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, re, platform
|
import os, sys, re, platform
|
||||||
|
|
||||||
import zope.interface as zi
|
import zope.interface as zi
|
||||||
@ -45,9 +47,9 @@ def strictly_implements(*interfaces):
|
|||||||
try:
|
try:
|
||||||
verifyClass(interface, cls)
|
verifyClass(interface, cls)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print >>_err, ("%s.%s does not correctly implement %s.%s:\n%s"
|
print("%s.%s does not correctly implement %s.%s:\n%s"
|
||||||
% (cls.__module__, cls.__name__,
|
% (cls.__module__, cls.__name__,
|
||||||
interface.__module__, interface.__name__, e))
|
interface.__module__, interface.__name__, e), file=_err)
|
||||||
else:
|
else:
|
||||||
_other_modules_with_violations.add(cls.__module__)
|
_other_modules_with_violations.add(cls.__module__)
|
||||||
return cls
|
return cls
|
||||||
@ -62,7 +64,7 @@ def check():
|
|||||||
|
|
||||||
if len(sys.argv) >= 2:
|
if len(sys.argv) >= 2:
|
||||||
if sys.argv[1] == '--help' or len(sys.argv) > 2:
|
if sys.argv[1] == '--help' or len(sys.argv) > 2:
|
||||||
print >>_err, "Usage: check-miscaptures.py [SOURCEDIR]"
|
print("Usage: check-miscaptures.py [SOURCEDIR]", file=_err)
|
||||||
return
|
return
|
||||||
srcdir = sys.argv[1]
|
srcdir = sys.argv[1]
|
||||||
else:
|
else:
|
||||||
@ -79,8 +81,8 @@ def check():
|
|||||||
for fn in filenames:
|
for fn in filenames:
|
||||||
(basename, ext) = os.path.splitext(fn)
|
(basename, ext) = os.path.splitext(fn)
|
||||||
if ext in ('.pyc', '.pyo') and not os.path.exists(os.path.join(dirpath, basename+'.py')):
|
if ext in ('.pyc', '.pyo') and not os.path.exists(os.path.join(dirpath, basename+'.py')):
|
||||||
print >>_err, ("Warning: no .py source file for %r.\n"
|
print("Warning: no .py source file for %r.\n"
|
||||||
% (os.path.join(dirpath, fn),))
|
% (os.path.join(dirpath, fn),), file=_err)
|
||||||
|
|
||||||
if ext == '.py' and not excluded_file_basenames.match(basename):
|
if ext == '.py' and not excluded_file_basenames.match(basename):
|
||||||
relpath = os.path.join(dirpath[len(srcdir)+1:], basename)
|
relpath = os.path.join(dirpath[len(srcdir)+1:], basename)
|
||||||
@ -89,16 +91,16 @@ def check():
|
|||||||
__import__(module)
|
__import__(module)
|
||||||
except ImportError, e:
|
except ImportError, e:
|
||||||
if not is_windows and (' _win' in str(e) or 'win32' in str(e)):
|
if not is_windows and (' _win' in str(e) or 'win32' in str(e)):
|
||||||
print >>_err, ("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
|
print("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
|
||||||
% (module, str(e)))
|
% (module, str(e)), file=_err)
|
||||||
else:
|
else:
|
||||||
import traceback
|
import traceback
|
||||||
traceback.print_exc(file=_err)
|
traceback.print_exc(file=_err)
|
||||||
print >>_err
|
print(file=_err)
|
||||||
|
|
||||||
others = list(_other_modules_with_violations)
|
others = list(_other_modules_with_violations)
|
||||||
others.sort()
|
others.sort()
|
||||||
print >>_err, "There were also interface violations in:\n", ", ".join(others), "\n"
|
print("There were also interface violations in:\n", ", ".join(others), "\n", file=_err)
|
||||||
|
|
||||||
|
|
||||||
# Forked from
|
# Forked from
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, compiler
|
import os, sys, compiler
|
||||||
from compiler.ast import Node, For, While, ListComp, AssName, Name, Lambda, Function
|
from compiler.ast import Node, For, While, ListComp, AssName, Name, Lambda, Function
|
||||||
|
|
||||||
@ -133,7 +135,7 @@ def make_result(funcnode, var_name, var_lineno):
|
|||||||
|
|
||||||
def report(out, path, results):
|
def report(out, path, results):
|
||||||
for r in results:
|
for r in results:
|
||||||
print >>out, path + (":%r %s captures %r assigned at line %d" % r)
|
print(path + (":%r %s captures %r assigned at line %d" % r), file=out)
|
||||||
|
|
||||||
def check(sources, out):
|
def check(sources, out):
|
||||||
class Counts:
|
class Counts:
|
||||||
@ -146,7 +148,7 @@ def check(sources, out):
|
|||||||
def _process(path):
|
def _process(path):
|
||||||
results = check_file(path)
|
results = check_file(path)
|
||||||
if isinstance(results, SyntaxError):
|
if isinstance(results, SyntaxError):
|
||||||
print >>out, path + (" NOT ANALYSED due to syntax error: %s" % results)
|
print(path + (" NOT ANALYSED due to syntax error: %s" % results), file=out)
|
||||||
counts.error_files += 1
|
counts.error_files += 1
|
||||||
else:
|
else:
|
||||||
report(out, path, results)
|
report(out, path, results)
|
||||||
@ -156,7 +158,7 @@ def check(sources, out):
|
|||||||
counts.suspect_files += 1
|
counts.suspect_files += 1
|
||||||
|
|
||||||
for source in sources:
|
for source in sources:
|
||||||
print >>out, "Checking %s..." % (source,)
|
print("Checking %s..." % (source,), file=out)
|
||||||
if os.path.isfile(source):
|
if os.path.isfile(source):
|
||||||
_process(source)
|
_process(source)
|
||||||
else:
|
else:
|
||||||
@ -166,11 +168,11 @@ def check(sources, out):
|
|||||||
if ext == '.py':
|
if ext == '.py':
|
||||||
_process(os.path.join(dirpath, fn))
|
_process(os.path.join(dirpath, fn))
|
||||||
|
|
||||||
print >>out, ("%d suspiciously captured variables in %d out of %d file(s)."
|
print("%d suspiciously captured variables in %d out of %d file(s)."
|
||||||
% (counts.n, counts.suspect_files, counts.processed_files))
|
% (counts.n, counts.suspect_files, counts.processed_files), file=out)
|
||||||
if counts.error_files > 0:
|
if counts.error_files > 0:
|
||||||
print >>out, ("%d file(s) not processed due to syntax errors."
|
print("%d file(s) not processed due to syntax errors."
|
||||||
% (counts.error_files,))
|
% (counts.error_files,), file=out)
|
||||||
return counts.n
|
return counts.n
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
# ./check-umids.py src
|
# ./check-umids.py src
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys, re, os
|
import sys, re, os
|
||||||
|
|
||||||
ok = True
|
ok = True
|
||||||
@ -20,13 +22,13 @@ for starting_point in sys.argv[1:]:
|
|||||||
umid = mo.group(1)
|
umid = mo.group(1)
|
||||||
if umid in umids:
|
if umid in umids:
|
||||||
oldfn, oldlineno = umids[umid]
|
oldfn, oldlineno = umids[umid]
|
||||||
print "%s:%d: duplicate umid '%s'" % (fn, lineno, umid)
|
print("%s:%d: duplicate umid '%s'" % (fn, lineno, umid))
|
||||||
print "%s:%d: first used here" % (oldfn, oldlineno)
|
print("%s:%d: first used here" % (oldfn, oldlineno))
|
||||||
ok = False
|
ok = False
|
||||||
umids[umid] = (fn,lineno)
|
umids[umid] = (fn,lineno)
|
||||||
|
|
||||||
if ok:
|
if ok:
|
||||||
print "all umids are unique"
|
print("all umids are unique")
|
||||||
else:
|
else:
|
||||||
print "some umids were duplicates"
|
print("some umids were duplicates")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
|
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
@ -22,7 +24,7 @@ def check(fn):
|
|||||||
line = line[:-1]
|
line = line[:-1]
|
||||||
if line.rstrip() != line:
|
if line.rstrip() != line:
|
||||||
# the %s:%d:%d: lets emacs' compile-mode jump to those locations
|
# the %s:%d:%d: lets emacs' compile-mode jump to those locations
|
||||||
print "%s:%d:%d: trailing whitespace" % (fn, i+1, len(line)+1)
|
print("%s:%d:%d: trailing whitespace" % (fn, i+1, len(line)+1))
|
||||||
found[0] = True
|
found[0] = True
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
# Install 'click' first. I run this with py2, but py3 might work too, if the
|
# Install 'click' first. I run this with py2, but py3 might work too, if the
|
||||||
# wheels can be built with py3.
|
# wheels can be built with py3.
|
||||||
|
|
||||||
from __future__ import print_function, unicode_literals
|
from __future__ import unicode_literals, print_function
|
||||||
import os, sys, subprocess, json, tempfile, zipfile, io, re, itertools
|
import os, sys, subprocess, json, tempfile, zipfile, io, re, itertools
|
||||||
import email.parser
|
import email.parser
|
||||||
from pprint import pprint
|
from pprint import pprint
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Given a list of nodeids and a 'convergence' file, create a bunch of files
|
Given a list of nodeids and a 'convergence' file, create a bunch of files
|
||||||
that will (when encoded at k=1,N=1) be uploaded to specific nodeids.
|
that will (when encoded at k=1,N=1) be uploaded to specific nodeids.
|
||||||
@ -86,8 +88,8 @@ for line in open(opts["nodeids"], "r").readlines():
|
|||||||
nodes[nodeid] = nickname
|
nodes[nodeid] = nickname
|
||||||
|
|
||||||
if opts["k"] != 3 or opts["N"] != 10:
|
if opts["k"] != 3 or opts["N"] != 10:
|
||||||
print "note: using non-default k/N requires patching the Tahoe code"
|
print("note: using non-default k/N requires patching the Tahoe code")
|
||||||
print "src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS"
|
print("src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS")
|
||||||
|
|
||||||
convergence_file = os.path.expanduser(opts["convergence"])
|
convergence_file = os.path.expanduser(opts["convergence"])
|
||||||
convergence_s = open(convergence_file, "rb").read().strip()
|
convergence_s = open(convergence_file, "rb").read().strip()
|
||||||
@ -109,7 +111,7 @@ def find_share_for_target(target):
|
|||||||
while True:
|
while True:
|
||||||
attempts += 1
|
attempts += 1
|
||||||
suffix = base32.b2a(os.urandom(10))
|
suffix = base32.b2a(os.urandom(10))
|
||||||
if verbose: print " trying", suffix,
|
if verbose: print(" trying", suffix, end=' ')
|
||||||
data = prefix + suffix + "\n"
|
data = prefix + suffix + "\n"
|
||||||
assert len(data) > 55 # no LIT files
|
assert len(data) > 55 # no LIT files
|
||||||
# now, what storage index will this get?
|
# now, what storage index will this get?
|
||||||
@ -117,11 +119,11 @@ def find_share_for_target(target):
|
|||||||
eu = upload.EncryptAnUploadable(u)
|
eu = upload.EncryptAnUploadable(u)
|
||||||
d = eu.get_storage_index() # this happens to run synchronously
|
d = eu.get_storage_index() # this happens to run synchronously
|
||||||
def _got_si(si, data=data):
|
def _got_si(si, data=data):
|
||||||
if verbose: print "SI", base32.b2a(si),
|
if verbose: print("SI", base32.b2a(si), end=' ')
|
||||||
peerlist = get_permuted_peers(si)
|
peerlist = get_permuted_peers(si)
|
||||||
if peerlist[0] == target:
|
if peerlist[0] == target:
|
||||||
# great!
|
# great!
|
||||||
if verbose: print " yay!"
|
if verbose: print(" yay!")
|
||||||
fn = base32.b2a(target)
|
fn = base32.b2a(target)
|
||||||
if nodes[target]:
|
if nodes[target]:
|
||||||
nickname = nodes[target].replace("/", "_")
|
nickname = nodes[target].replace("/", "_")
|
||||||
@ -131,7 +133,7 @@ def find_share_for_target(target):
|
|||||||
open(fn, "w").write(data)
|
open(fn, "w").write(data)
|
||||||
return True
|
return True
|
||||||
# nope, must try again
|
# nope, must try again
|
||||||
if verbose: print " boo"
|
if verbose: print(" boo")
|
||||||
return False
|
return False
|
||||||
d.addCallback(_got_si)
|
d.addCallback(_got_si)
|
||||||
# get sneaky and look inside the Deferred for the synchronous result
|
# get sneaky and look inside the Deferred for the synchronous result
|
||||||
@ -142,10 +144,10 @@ os.mkdir("canaries")
|
|||||||
attempts = []
|
attempts = []
|
||||||
for target in nodes:
|
for target in nodes:
|
||||||
target_s = base32.b2a(target)
|
target_s = base32.b2a(target)
|
||||||
print "working on", target_s
|
print("working on", target_s)
|
||||||
attempts.append(find_share_for_target(target))
|
attempts.append(find_share_for_target(target))
|
||||||
print "done"
|
print("done")
|
||||||
print "%d attempts total, avg %d per target, max %d" % \
|
print("%d attempts total, avg %d per target, max %d" % \
|
||||||
(sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts))
|
(sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts)))
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
"""Create a short probably-unique string for use as a umid= argument in a
|
"""Create a short probably-unique string for use as a umid= argument in a
|
||||||
Foolscap log() call, to make it easier to locate the source code that
|
Foolscap log() call, to make it easier to locate the source code that
|
||||||
generated the message. The main text of the log message is frequently
|
generated the message. The main text of the log message is frequently
|
||||||
@ -51,5 +53,5 @@ count = 1
|
|||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
count = int(sys.argv[1])
|
count = int(sys.argv[1])
|
||||||
for i in range(count):
|
for i in range(count):
|
||||||
print make_id()
|
print(make_id())
|
||||||
|
|
||||||
|
@ -1,13 +1,15 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
from foolscap import Tub, eventual
|
from foolscap import Tub, eventual
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
import sys
|
import sys
|
||||||
import pprint
|
import pprint
|
||||||
|
|
||||||
def oops(f):
|
def oops(f):
|
||||||
print "ERROR"
|
print("ERROR")
|
||||||
print f
|
print(f)
|
||||||
|
|
||||||
def fetch(furl):
|
def fetch(furl):
|
||||||
t = Tub()
|
t = Tub()
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
# -*- python -*-
|
# -*- python -*-
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -31,7 +33,7 @@ class CPUWatcherSubscriber(service.MultiService, Referenceable):
|
|||||||
tub.connectTo(furl, self.connected)
|
tub.connectTo(furl, self.connected)
|
||||||
|
|
||||||
def connected(self, rref):
|
def connected(self, rref):
|
||||||
print "subscribing"
|
print("subscribing")
|
||||||
d = rref.callRemote("get_averages")
|
d = rref.callRemote("get_averages")
|
||||||
d.addCallback(self.remote_averages)
|
d.addCallback(self.remote_averages)
|
||||||
d.addErrback(log.err)
|
d.addErrback(log.err)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
# -*- python -*-
|
# -*- python -*-
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# run this tool on a linux box in its own directory, with a file named
|
# run this tool on a linux box in its own directory, with a file named
|
||||||
# 'pids.txt' describing which processes to watch. It will follow CPU usage of
|
# 'pids.txt' describing which processes to watch. It will follow CPU usage of
|
||||||
@ -20,7 +22,6 @@
|
|||||||
# built-in graphs on web interface
|
# built-in graphs on web interface
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import pickle, os.path, time, pprint
|
import pickle, os.path, time, pprint
|
||||||
from twisted.application import internet, service, strports
|
from twisted.application import internet, service, strports
|
||||||
from twisted.web import server, resource, http
|
from twisted.web import server, resource, http
|
||||||
@ -210,7 +211,7 @@ class CPUWatcher(service.MultiService, resource.Resource, Referenceable):
|
|||||||
row.append(self._average_N(pid, avg))
|
row.append(self._average_N(pid, avg))
|
||||||
current.append(tuple(row))
|
current.append(tuple(row))
|
||||||
self.current = current
|
self.current = current
|
||||||
print current
|
print(current)
|
||||||
for ob in self.observers:
|
for ob in self.observers:
|
||||||
eventual.eventually(self.notify, ob)
|
eventual.eventually(self.notify, ob)
|
||||||
|
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
# feed this the results of 'tahoe catalog-shares' for all servers
|
# feed this the results of 'tahoe catalog-shares' for all servers
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
chk_encodings = {}
|
chk_encodings = {}
|
||||||
@ -45,23 +47,23 @@ sdmf_multiple_versions = [(si,lines)
|
|||||||
sdmf_multiple_versions.sort()
|
sdmf_multiple_versions.sort()
|
||||||
|
|
||||||
if chk_multiple_encodings:
|
if chk_multiple_encodings:
|
||||||
print
|
print()
|
||||||
print "CHK multiple encodings:"
|
print("CHK multiple encodings:")
|
||||||
for (si,lines) in chk_multiple_encodings:
|
for (si,lines) in chk_multiple_encodings:
|
||||||
print " " + si
|
print(" " + si)
|
||||||
for line in sorted(lines):
|
for line in sorted(lines):
|
||||||
print " " + line
|
print(" " + line)
|
||||||
if sdmf_multiple_encodings:
|
if sdmf_multiple_encodings:
|
||||||
print
|
print()
|
||||||
print "SDMF multiple encodings:"
|
print("SDMF multiple encodings:")
|
||||||
for (si,lines) in sdmf_multiple_encodings:
|
for (si,lines) in sdmf_multiple_encodings:
|
||||||
print " " + si
|
print(" " + si)
|
||||||
for line in sorted(lines):
|
for line in sorted(lines):
|
||||||
print " " + line
|
print(" " + line)
|
||||||
if sdmf_multiple_versions:
|
if sdmf_multiple_versions:
|
||||||
print
|
print()
|
||||||
print "SDMF multiple versions:"
|
print("SDMF multiple versions:")
|
||||||
for (si,lines) in sdmf_multiple_versions:
|
for (si,lines) in sdmf_multiple_versions:
|
||||||
print " " + si
|
print(" " + si)
|
||||||
for line in sorted(lines):
|
for line in sorted(lines):
|
||||||
print " " + line
|
print(" " + line)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#! /usr/bin/env python
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
from foolscap import Tub
|
from foolscap import Tub
|
||||||
from foolscap.eventual import eventually
|
from foolscap.eventual import eventually
|
||||||
import sys
|
import sys
|
||||||
@ -10,7 +12,7 @@ def go():
|
|||||||
d = t.getReference(sys.argv[1])
|
d = t.getReference(sys.argv[1])
|
||||||
d.addCallback(lambda rref: rref.callRemote("get_memory_usage"))
|
d.addCallback(lambda rref: rref.callRemote("get_memory_usage"))
|
||||||
def _got(res):
|
def _got(res):
|
||||||
print res
|
print(res)
|
||||||
reactor.stop()
|
reactor.stop()
|
||||||
d.addCallback(_got)
|
d.addCallback(_got)
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, re
|
import os, sys, re
|
||||||
import urllib
|
import urllib
|
||||||
import json
|
import json
|
||||||
@ -24,6 +26,6 @@ for (name, avg1, avg5, avg15) in current:
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
print data.rstrip()
|
print(data.rstrip())
|
||||||
|
@ -5,18 +5,20 @@
|
|||||||
# is left on all disks across the grid. The plugin should be configured with
|
# is left on all disks across the grid. The plugin should be configured with
|
||||||
# env_url= pointing at the diskwatcher.tac webport.
|
# env_url= pointing at the diskwatcher.tac webport.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, urllib, json
|
import os, sys, urllib, json
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
print """\
|
print("""\
|
||||||
graph_title Tahoe Remaining Disk Space
|
graph_title Tahoe Remaining Disk Space
|
||||||
graph_vlabel bytes remaining
|
graph_vlabel bytes remaining
|
||||||
graph_category tahoe
|
graph_category tahoe
|
||||||
graph_info This graph shows the total amount of disk space left available in the grid
|
graph_info This graph shows the total amount of disk space left available in the grid
|
||||||
disk_left.label disk left
|
disk_left.label disk left
|
||||||
disk_left.draw LINE1"""
|
disk_left.draw LINE1""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
data = json.load(urllib.urlopen(url))["available"]
|
data = json.load(urllib.urlopen(url))["available"]
|
||||||
print "disk_left.value", data
|
print("disk_left.value", data)
|
||||||
|
@ -6,10 +6,12 @@
|
|||||||
# used. The plugin should be configured with env_url= pointing at the
|
# used. The plugin should be configured with env_url= pointing at the
|
||||||
# diskwatcher.tac webport.
|
# diskwatcher.tac webport.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, urllib, json
|
import os, sys, urllib, json
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
print """\
|
print("""\
|
||||||
graph_title Tahoe Total Disk Space
|
graph_title Tahoe Total Disk Space
|
||||||
graph_vlabel bytes
|
graph_vlabel bytes
|
||||||
graph_category tahoe
|
graph_category tahoe
|
||||||
@ -17,10 +19,10 @@ graph_info This graph shows the total amount of disk space present in the grid,
|
|||||||
disk_total.label disk total
|
disk_total.label disk total
|
||||||
disk_total.draw LINE2
|
disk_total.draw LINE2
|
||||||
disk_used.label disk used
|
disk_used.label disk used
|
||||||
disk_used.draw LINE1"""
|
disk_used.draw LINE1""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
data = json.load(urllib.urlopen(url))
|
data = json.load(urllib.urlopen(url))
|
||||||
print "disk_total.value", data["total"]
|
print("disk_total.value", data["total"])
|
||||||
print "disk_used.value", data["used"]
|
print("disk_used.value", data["used"])
|
||||||
|
@ -5,10 +5,12 @@
|
|||||||
# is being used per unit time. The plugin should be configured with env_url=
|
# is being used per unit time. The plugin should be configured with env_url=
|
||||||
# pointing at the diskwatcher.tac webport.
|
# pointing at the diskwatcher.tac webport.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, urllib, json
|
import os, sys, urllib, json
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
print """\
|
print("""\
|
||||||
graph_title Tahoe Disk Usage Measurement
|
graph_title Tahoe Disk Usage Measurement
|
||||||
graph_vlabel bytes per second
|
graph_vlabel bytes per second
|
||||||
graph_category tahoe
|
graph_category tahoe
|
||||||
@ -21,7 +23,7 @@ rate_1day.draw LINE1
|
|||||||
rate_2wk.label (two week sample)
|
rate_2wk.label (two week sample)
|
||||||
rate_2wk.draw LINE2
|
rate_2wk.draw LINE2
|
||||||
rate_4wk.label (four week sample)
|
rate_4wk.label (four week sample)
|
||||||
rate_4wk.draw LINE2"""
|
rate_4wk.draw LINE2""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
@ -31,10 +33,10 @@ data = dict([(name, growth)
|
|||||||
for (name, timespan, growth, timeleft) in timespans])
|
for (name, timespan, growth, timeleft) in timespans])
|
||||||
# growth is in bytes per second
|
# growth is in bytes per second
|
||||||
if "1hr" in data:
|
if "1hr" in data:
|
||||||
print "rate_1hr.value", data["1hr"]
|
print("rate_1hr.value", data["1hr"])
|
||||||
if "1day" in data:
|
if "1day" in data:
|
||||||
print "rate_1day.value", data["1day"]
|
print("rate_1day.value", data["1day"])
|
||||||
if "2wk" in data:
|
if "2wk" in data:
|
||||||
print "rate_2wk.value", data["2wk"]
|
print("rate_2wk.value", data["2wk"])
|
||||||
if "4wk" in data:
|
if "4wk" in data:
|
||||||
print "rate_4wk.value", data["4wk"]
|
print("rate_4wk.value", data["4wk"])
|
||||||
|
@ -5,18 +5,20 @@
|
|||||||
# used on all disks across the grid. The plugin should be configured with
|
# used on all disks across the grid. The plugin should be configured with
|
||||||
# env_url= pointing at the diskwatcher.tac webport.
|
# env_url= pointing at the diskwatcher.tac webport.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, urllib, json
|
import os, sys, urllib, json
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
print """\
|
print("""\
|
||||||
graph_title Tahoe Total Disk Space Used
|
graph_title Tahoe Total Disk Space Used
|
||||||
graph_vlabel bytes used
|
graph_vlabel bytes used
|
||||||
graph_category tahoe
|
graph_category tahoe
|
||||||
graph_info This graph shows the total amount of disk space used across the grid
|
graph_info This graph shows the total amount of disk space used across the grid
|
||||||
disk_used.label disk used
|
disk_used.label disk used
|
||||||
disk_used.draw LINE1"""
|
disk_used.draw LINE1""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
data = json.load(urllib.urlopen(url))["used"]
|
data = json.load(urllib.urlopen(url))["used"]
|
||||||
print "disk_used.value", data
|
print("disk_used.value", data)
|
||||||
|
@ -5,10 +5,12 @@
|
|||||||
# left before the grid fills up. The plugin should be configured with
|
# left before the grid fills up. The plugin should be configured with
|
||||||
# env_url= pointing at the diskwatcher.tac webport.
|
# env_url= pointing at the diskwatcher.tac webport.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, urllib, json
|
import os, sys, urllib, json
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
print """\
|
print("""\
|
||||||
graph_title Tahoe Remaining Time Predictor
|
graph_title Tahoe Remaining Time Predictor
|
||||||
graph_vlabel days remaining
|
graph_vlabel days remaining
|
||||||
graph_category tahoe
|
graph_category tahoe
|
||||||
@ -20,7 +22,7 @@ days_1day.draw LINE1
|
|||||||
days_2wk.label days left (two week sample)
|
days_2wk.label days left (two week sample)
|
||||||
days_2wk.draw LINE2
|
days_2wk.draw LINE2
|
||||||
days_4wk.label days left (four week sample)
|
days_4wk.label days left (four week sample)
|
||||||
days_4wk.draw LINE2"""
|
days_4wk.draw LINE2""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
@ -32,10 +34,10 @@ data = dict([(name, timeleft)
|
|||||||
# timeleft is in seconds
|
# timeleft is in seconds
|
||||||
DAY = 24*60*60
|
DAY = 24*60*60
|
||||||
if "1hr" in data:
|
if "1hr" in data:
|
||||||
print "days_1hr.value", data["1hr"]/DAY
|
print("days_1hr.value", data["1hr"]/DAY)
|
||||||
if "1day" in data:
|
if "1day" in data:
|
||||||
print "days_1day.value", data["1day"]/DAY
|
print("days_1day.value", data["1day"]/DAY)
|
||||||
if "2wk" in data:
|
if "2wk" in data:
|
||||||
print "days_2wk.value", data["2wk"]/DAY
|
print("days_2wk.value", data["2wk"]/DAY)
|
||||||
if "4wk" in data:
|
if "4wk" in data:
|
||||||
print "days_4wk.value", data["4wk"]/DAY
|
print("days_4wk.value", data["4wk"]/DAY)
|
||||||
|
@ -1,15 +1,17 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys, os.path
|
import sys, os.path
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
print """\
|
print("""\
|
||||||
graph_title Tahoe File Estimate
|
graph_title Tahoe File Estimate
|
||||||
graph_vlabel files
|
graph_vlabel files
|
||||||
graph_category tahoe
|
graph_category tahoe
|
||||||
graph_info This graph shows the estimated number of files and directories present in the grid
|
graph_info This graph shows the estimated number of files and directories present in the grid
|
||||||
files.label files
|
files.label files
|
||||||
files.draw LINE2"""
|
files.draw LINE2""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
# Edit this to point at some subset of storage directories.
|
# Edit this to point at some subset of storage directories.
|
||||||
@ -46,4 +48,4 @@ correction = 1+no_chance
|
|||||||
#print "correction", correction
|
#print "correction", correction
|
||||||
|
|
||||||
files = unique_strings * (32*32/len(sections)) * correction
|
files = unique_strings * (32*32/len(sections)) * correction
|
||||||
print "files.value %d" % int(files)
|
print("files.value %d" % int(files))
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
# env.basedir_NODE3 /path/to/node3
|
# env.basedir_NODE3 /path/to/node3
|
||||||
#
|
#
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
|
|
||||||
nodedirs = []
|
nodedirs = []
|
||||||
@ -41,7 +43,7 @@ for nodename, basedir in nodedirs:
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
for nodename, basedir in nodedirs:
|
for nodename, basedir in nodedirs:
|
||||||
@ -52,5 +54,5 @@ for nodename, basedir in nodedirs:
|
|||||||
if dirpath == root and "incoming" in dirnames:
|
if dirpath == root and "incoming" in dirnames:
|
||||||
dirnames.remove("incoming")
|
dirnames.remove("incoming")
|
||||||
shares += len(filenames)
|
shares += len(filenames)
|
||||||
print "%s.value %d" % (nodename, shares)
|
print("%s.value %d" % (nodename, shares))
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
import urllib
|
import urllib
|
||||||
import json
|
import json
|
||||||
@ -15,11 +17,11 @@ fetched.draw LINE2
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
|
|
||||||
data = json.loads(urllib.urlopen(url).read())
|
data = json.loads(urllib.urlopen(url).read())
|
||||||
print "fetched.value %d" % data["chk_upload_helper.active_uploads"]
|
print("fetched.value %d" % data["chk_upload_helper.active_uploads"])
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
import urllib
|
import urllib
|
||||||
import json
|
import json
|
||||||
@ -17,10 +19,10 @@ fetched.min 0
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
|
|
||||||
data = json.loads(urllib.urlopen(url).read())
|
data = json.loads(urllib.urlopen(url).read())
|
||||||
print "fetched.value %d" % data["chk_upload_helper.fetched_bytes"]
|
print("fetched.value %d" % data["chk_upload_helper.fetched_bytes"])
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
import urllib
|
import urllib
|
||||||
import json
|
import json
|
||||||
@ -19,13 +21,13 @@ storage_client.draw LINE2
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
|
|
||||||
data = json.loads(urllib.urlopen(url).read())
|
data = json.loads(urllib.urlopen(url).read())
|
||||||
print "storage_server.value %d" % data["announcement_summary"]["storage"]
|
print("storage_server.value %d" % data["announcement_summary"]["storage"])
|
||||||
print "storage_hosts.value %d" % data["announcement_distinct_hosts"]["storage"]
|
print("storage_hosts.value %d" % data["announcement_distinct_hosts"]["storage"])
|
||||||
print "storage_client.value %d" % data["subscription_summary"]["storage"]
|
print("storage_client.value %d" % data["subscription_summary"]["storage"])
|
||||||
|
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
# by 'allmydata start', then extracts the amount of memory they consume (both
|
# by 'allmydata start', then extracts the amount of memory they consume (both
|
||||||
# VmSize and VmRSS) from /proc
|
# VmSize and VmRSS) from /proc
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, re
|
import os, sys, re
|
||||||
|
|
||||||
# for testing
|
# for testing
|
||||||
@ -47,7 +49,7 @@ graph_info This graph shows the memory used by specific processes
|
|||||||
if f == "VmData":
|
if f == "VmData":
|
||||||
configinfo += "%s_%s.graph no\n" % (nodename, f)
|
configinfo += "%s_%s.graph no\n" % (nodename, f)
|
||||||
|
|
||||||
print configinfo
|
print(configinfo)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
nodestats = {}
|
nodestats = {}
|
||||||
@ -67,4 +69,4 @@ for node,stats in nodestats.items():
|
|||||||
for f,value in stats.items():
|
for f,value in stats.items():
|
||||||
# TODO: not sure if /proc/%d/status means 1000 or 1024 when it says
|
# TODO: not sure if /proc/%d/status means 1000 or 1024 when it says
|
||||||
# 'kB'
|
# 'kB'
|
||||||
print "%s_%s.value %d" % (node, f, 1024*value)
|
print("%s_%s.value %d" % (node, f, 1024*value))
|
||||||
|
@ -27,10 +27,12 @@
|
|||||||
# This plugin should be configured with env_diskwatcher_url= pointing at the
|
# This plugin should be configured with env_diskwatcher_url= pointing at the
|
||||||
# diskwatcher.tac webport, and env_deepsize_url= pointing at the PHP script.
|
# diskwatcher.tac webport, and env_deepsize_url= pointing at the PHP script.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys, urllib, json
|
import os, sys, urllib, json
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
print """\
|
print("""\
|
||||||
graph_title Tahoe Overhead Calculator
|
graph_title Tahoe Overhead Calculator
|
||||||
graph_vlabel Percentage
|
graph_vlabel Percentage
|
||||||
graph_category tahoe
|
graph_category tahoe
|
||||||
@ -40,7 +42,7 @@ overhead.draw LINE2
|
|||||||
inactive.label inactive account usage
|
inactive.label inactive account usage
|
||||||
inactive.draw LINE1
|
inactive.draw LINE1
|
||||||
effective_expansion.label Effective Expansion Factor
|
effective_expansion.label Effective Expansion Factor
|
||||||
effective_expansion.graph no"""
|
effective_expansion.graph no""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
diskwatcher_url = os.environ["diskwatcher_url"]
|
diskwatcher_url = os.environ["diskwatcher_url"]
|
||||||
@ -54,12 +56,12 @@ ideal = expansion * deepsize["all"]
|
|||||||
overhead = (total - ideal) / ideal
|
overhead = (total - ideal) / ideal
|
||||||
if overhead > 0:
|
if overhead > 0:
|
||||||
# until all the storage-servers come online, this number will be nonsense
|
# until all the storage-servers come online, this number will be nonsense
|
||||||
print "overhead.value %f" % (100.0 * overhead)
|
print("overhead.value %f" % (100.0 * overhead))
|
||||||
|
|
||||||
# same for this one
|
# same for this one
|
||||||
effective_expansion = total / deepsize["all"]
|
effective_expansion = total / deepsize["all"]
|
||||||
print "effective_expansion.value %f" % effective_expansion
|
print("effective_expansion.value %f" % effective_expansion)
|
||||||
|
|
||||||
# this value remains valid, though
|
# this value remains valid, though
|
||||||
inactive_savings = (deepsize["all"] - deepsize["active"]) / deepsize["active"]
|
inactive_savings = (deepsize["all"] - deepsize["active"]) / deepsize["active"]
|
||||||
print "inactive.value %f" % (100.0 * inactive_savings)
|
print("inactive.value %f" % (100.0 * inactive_savings))
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
@ -14,10 +16,10 @@ space.draw LINE2
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
url = os.environ["url"]
|
url = os.environ["url"]
|
||||||
|
|
||||||
data = int(urllib.urlopen(url).read().strip())
|
data = int(urllib.urlopen(url).read().strip())
|
||||||
print "space.value %d" % data
|
print("space.value %d" % data)
|
||||||
|
@ -42,6 +42,8 @@
|
|||||||
# of course, these URLs must match the webports you have configured into the
|
# of course, these URLs must match the webports you have configured into the
|
||||||
# storage nodes.
|
# storage nodes.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
import urllib
|
import urllib
|
||||||
import json
|
import json
|
||||||
@ -78,7 +80,7 @@ for nodename, url in node_urls:
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
for nodename, url in node_urls:
|
for nodename, url in node_urls:
|
||||||
@ -89,5 +91,5 @@ for nodename, url in node_urls:
|
|||||||
p_key = percentile + "_percentile"
|
p_key = percentile + "_percentile"
|
||||||
key = "storage_server.latencies.%s.%s" % (operation, p_key)
|
key = "storage_server.latencies.%s.%s" % (operation, p_key)
|
||||||
value = data["stats"][key]
|
value = data["stats"][key]
|
||||||
print "%s.value %s" % (nodename, value)
|
print("%s.value %s" % (nodename, value))
|
||||||
|
|
||||||
|
@ -32,6 +32,8 @@
|
|||||||
# of course, these URLs must match the webports you have configured into the
|
# of course, these URLs must match the webports you have configured into the
|
||||||
# storage nodes.
|
# storage nodes.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
import urllib
|
import urllib
|
||||||
import json
|
import json
|
||||||
@ -64,12 +66,12 @@ for nodename, url in node_urls:
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
for nodename, url in node_urls:
|
for nodename, url in node_urls:
|
||||||
data = json.loads(urllib.urlopen(url).read())
|
data = json.loads(urllib.urlopen(url).read())
|
||||||
key = "storage_server.%s" % operation
|
key = "storage_server.%s" % operation
|
||||||
value = data["counters"][key]
|
value = data["counters"][key]
|
||||||
print "%s.value %s" % (nodename, value)
|
print("%s.value %s" % (nodename, value))
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
# then extrapolate to guess how many weeks/months/years of storage space we
|
# then extrapolate to guess how many weeks/months/years of storage space we
|
||||||
# have left, and output it to another munin graph
|
# have left, and output it to another munin graph
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys, os, time
|
import sys, os, time
|
||||||
import rrdtool
|
import rrdtool
|
||||||
|
|
||||||
@ -82,7 +84,7 @@ def write_to_file(samples):
|
|||||||
os.rename(WEBFILE + ".tmp", WEBFILE)
|
os.rename(WEBFILE + ".tmp", WEBFILE)
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
print """\
|
print("""\
|
||||||
graph_title Tahoe Remaining Space Predictor
|
graph_title Tahoe Remaining Space Predictor
|
||||||
graph_vlabel days remaining
|
graph_vlabel days remaining
|
||||||
graph_category tahoe
|
graph_category tahoe
|
||||||
@ -90,17 +92,17 @@ graph_info This graph shows the estimated number of days left until storage spac
|
|||||||
days_2wk.label days left (2wk sample)
|
days_2wk.label days left (2wk sample)
|
||||||
days_2wk.draw LINE2
|
days_2wk.draw LINE2
|
||||||
days_4wk.label days left (4wk sample)
|
days_4wk.label days left (4wk sample)
|
||||||
days_4wk.draw LINE2"""
|
days_4wk.draw LINE2""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
#rsync_rrd()
|
#rsync_rrd()
|
||||||
samples = {}
|
samples = {}
|
||||||
remaining_4wk = predict_future("4wk")
|
remaining_4wk = predict_future("4wk")
|
||||||
if remaining_4wk is not None:
|
if remaining_4wk is not None:
|
||||||
print "days_4wk.value", remaining_4wk
|
print("days_4wk.value", remaining_4wk)
|
||||||
samples["remaining_4wk"] = remaining_4wk
|
samples["remaining_4wk"] = remaining_4wk
|
||||||
remaining_2wk = predict_future("2wk")
|
remaining_2wk = predict_future("2wk")
|
||||||
if remaining_2wk is not None:
|
if remaining_2wk is not None:
|
||||||
print "days_2wk.value", remaining_2wk
|
print("days_2wk.value", remaining_2wk)
|
||||||
samples["remaining_2wk"] = remaining_2wk
|
samples["remaining_2wk"] = remaining_2wk
|
||||||
write_to_file(samples)
|
write_to_file(samples)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
@ -460,11 +462,11 @@ def main(argv):
|
|||||||
value = nodestats['stats'][category].get(statid)
|
value = nodestats['stats'][category].get(statid)
|
||||||
if value is not None:
|
if value is not None:
|
||||||
args = { 'name': name, 'value': value }
|
args = { 'name': name, 'value': value }
|
||||||
print plugin_conf[output_section] % args
|
print(plugin_conf[output_section] % args)
|
||||||
|
|
||||||
if len(argv) > 1:
|
if len(argv) > 1:
|
||||||
if sys.argv[1] == 'config':
|
if sys.argv[1] == 'config':
|
||||||
print plugin_conf['configheader']
|
print(plugin_conf['configheader'])
|
||||||
output_nodes('graph_config', False)
|
output_nodes('graph_config', False)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
# Allmydata-tahoe must be installed on the system where this plugin is used,
|
# Allmydata-tahoe must be installed on the system where this plugin is used,
|
||||||
# since it imports a utility module from allmydata.utils .
|
# since it imports a utility module from allmydata.utils .
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import os, sys
|
import os, sys
|
||||||
import commands
|
import commands
|
||||||
|
|
||||||
@ -44,7 +46,7 @@ for nodename, basedir in nodedirs:
|
|||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "config":
|
if sys.argv[1] == "config":
|
||||||
print configinfo.rstrip()
|
print(configinfo.rstrip())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
for nodename, basedir in nodedirs:
|
for nodename, basedir in nodedirs:
|
||||||
@ -54,5 +56,5 @@ for nodename, basedir in nodedirs:
|
|||||||
sys.exit(rc)
|
sys.exit(rc)
|
||||||
bytes, extra = out.split()
|
bytes, extra = out.split()
|
||||||
usage = int(bytes)
|
usage = int(bytes)
|
||||||
print "%s.value %d" % (nodename, usage)
|
print("%s.value %d" % (nodename, usage))
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
# -*- python -*-
|
# -*- python -*-
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Run this tool with twistd in its own directory, with a file named 'urls.txt'
|
Run this tool with twistd in its own directory, with a file named 'urls.txt'
|
||||||
describing which nodes to query. Make sure to copy diskwatcher.py into the
|
describing which nodes to query. Make sure to copy diskwatcher.py into the
|
||||||
@ -82,7 +84,7 @@ class DiskWatcher(service.MultiService, resource.Resource):
|
|||||||
ts.setServiceParent(self)
|
ts.setServiceParent(self)
|
||||||
|
|
||||||
def _upgrade_complete(self, ignored):
|
def _upgrade_complete(self, ignored):
|
||||||
print "Axiom store upgrade complete"
|
print("Axiom store upgrade complete")
|
||||||
|
|
||||||
def startService(self):
|
def startService(self):
|
||||||
service.MultiService.startService(self)
|
service.MultiService.startService(self)
|
||||||
@ -155,8 +157,8 @@ class DiskWatcher(service.MultiService, resource.Resource):
|
|||||||
total = data[u"stats"][u"storage_server.disk_total"]
|
total = data[u"stats"][u"storage_server.disk_total"]
|
||||||
used = data[u"stats"][u"storage_server.disk_used"]
|
used = data[u"stats"][u"storage_server.disk_used"]
|
||||||
avail = data[u"stats"][u"storage_server.disk_avail"]
|
avail = data[u"stats"][u"storage_server.disk_avail"]
|
||||||
print "%s : total=%s, used=%s, avail=%s" % (url,
|
print("%s : total=%s, used=%s, avail=%s" % (url,
|
||||||
total, used, avail)
|
total, used, avail))
|
||||||
Sample(store=self.store,
|
Sample(store=self.store,
|
||||||
url=unicode(url), when=when, total=total, used=used, avail=avail)
|
url=unicode(url), when=when, total=total, used=used, avail=avail)
|
||||||
|
|
||||||
@ -168,7 +170,7 @@ class DiskWatcher(service.MultiService, resource.Resource):
|
|||||||
pairs.sort()
|
pairs.sort()
|
||||||
for (timespan,name) in pairs:
|
for (timespan,name) in pairs:
|
||||||
growth = self.growth(timespan)
|
growth = self.growth(timespan)
|
||||||
print name, total_avail_space, growth
|
print(name, total_avail_space, growth)
|
||||||
if growth is not None:
|
if growth is not None:
|
||||||
timeleft = None
|
timeleft = None
|
||||||
if growth > 0:
|
if growth > 0:
|
||||||
@ -286,7 +288,7 @@ class DiskWatcher(service.MultiService, resource.Resource):
|
|||||||
old = old[0]
|
old = old[0]
|
||||||
duration = latest.when.asPOSIXTimestamp() - old.when.asPOSIXTimestamp()
|
duration = latest.when.asPOSIXTimestamp() - old.when.asPOSIXTimestamp()
|
||||||
if not duration:
|
if not duration:
|
||||||
print "only one sample from", url
|
print("only one sample from", url)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
rate = float(latest.used - old.used) / duration
|
rate = float(latest.used - old.used) / duration
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
"""
|
"""
|
||||||
To use this, get a trace file such as this one:
|
To use this, get a trace file such as this one:
|
||||||
|
|
||||||
@ -68,17 +70,17 @@ class B(object):
|
|||||||
elif INIT_S in inline:
|
elif INIT_S in inline:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
print "Warning, didn't recognize this line: %r" % (inline,)
|
print("Warning, didn't recognize this line: %r" % (inline,))
|
||||||
count += 1
|
count += 1
|
||||||
inline = self.inf.readline()
|
inline = self.inf.readline()
|
||||||
|
|
||||||
# print self.stats
|
# print self.stats
|
||||||
|
|
||||||
benchutil.print_bench_footer(UNITS_PER_SECOND=1000000)
|
benchutil.print_bench_footer(UNITS_PER_SECOND=1000000)
|
||||||
print "(microseconds)"
|
print("(microseconds)")
|
||||||
|
|
||||||
for N in [600, 6000, 60000]:
|
for N in [600, 6000, 60000]:
|
||||||
b = B(open(sys.argv[1], 'rU'))
|
b = B(open(sys.argv[1], 'rU'))
|
||||||
print "%7d" % N,
|
print("%7d" % N, end=' ')
|
||||||
benchutil.rep_bench(b.run, N, b.init, UNITS_PER_SECOND=1000000)
|
benchutil.rep_bench(b.run, N, b.init, UNITS_PER_SECOND=1000000)
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
"""
|
"""
|
||||||
This tool estimates how much space would be consumed by a filetree into which
|
This tool estimates how much space would be consumed by a filetree into which
|
||||||
a native directory was copied.
|
a native directory was copied.
|
||||||
@ -96,10 +98,10 @@ def scan(root):
|
|||||||
for mode in MODES:
|
for mode in MODES:
|
||||||
total[mode] += slotsize(mode, len(files), len(dirs)) + stringsize
|
total[mode] += slotsize(mode, len(files), len(dirs)) + stringsize
|
||||||
|
|
||||||
print "%d directories" % num_dirs
|
print("%d directories" % num_dirs)
|
||||||
print "%d files" % num_files
|
print("%d files" % num_files)
|
||||||
for mode in sorted(total.keys()):
|
for mode in sorted(total.keys()):
|
||||||
print "%s: %d bytes" % (mode, total[mode])
|
print("%s: %d bytes" % (mode, total[mode]))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!python
|
#!python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
# range of hash output lengths
|
# range of hash output lengths
|
||||||
range_L_hash = [128]
|
range_L_hash = [128]
|
||||||
|
|
||||||
@ -212,10 +214,10 @@ def calculate(K, K1, K2, q_max, L_hash, trees):
|
|||||||
|
|
||||||
def search():
|
def search():
|
||||||
for L_hash in range_L_hash:
|
for L_hash in range_L_hash:
|
||||||
print >>stderr, "collecting... \r",
|
print("collecting... \r", end=' ', file=stderr)
|
||||||
collect()
|
collect()
|
||||||
|
|
||||||
print >>stderr, "precomputing... \r",
|
print("precomputing... \r", end=' ', file=stderr)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# d/dq (lg(q+1) + L_hash/q) = 1/(ln(2)*(q+1)) - L_hash/q^2
|
# d/dq (lg(q+1) + L_hash/q) = 1/(ln(2)*(q+1)) - L_hash/q^2
|
||||||
@ -277,9 +279,9 @@ def search():
|
|||||||
for K1 in xrange(max(2, K-fuzz), min(K_max, K+fuzz)+1):
|
for K1 in xrange(max(2, K-fuzz), min(K_max, K+fuzz)+1):
|
||||||
candidates += calculate(K, K1, K2, q_max, L_hash, trees)
|
candidates += calculate(K, K1, K2, q_max, L_hash, trees)
|
||||||
progress += 1
|
progress += 1
|
||||||
print >>stderr, "searching: %3d %% \r" % (100.0 * progress / complete,),
|
print("searching: %3d %% \r" % (100.0 * progress / complete,), end=' ', file=stderr)
|
||||||
|
|
||||||
print >>stderr, "filtering... \r",
|
print("filtering... \r", end=' ', file=stderr)
|
||||||
step = 2.0
|
step = 2.0
|
||||||
bins = {}
|
bins = {}
|
||||||
limit = floor_div(limit_cost, step)
|
limit = floor_div(limit_cost, step)
|
||||||
@ -306,33 +308,33 @@ def search():
|
|||||||
"%(c_ver)7d +/-%(c_ver_pm)5d (%(Mcycles_ver)5.2f +/-%(Mcycles_ver_pm)5.2f) "
|
"%(c_ver)7d +/-%(c_ver_pm)5d (%(Mcycles_ver)5.2f +/-%(Mcycles_ver_pm)5.2f) "
|
||||||
) % candidate
|
) % candidate
|
||||||
|
|
||||||
print >>stderr, " \r",
|
print(" \r", end=' ', file=stderr)
|
||||||
if len(best) > 0:
|
if len(best) > 0:
|
||||||
print " B K K1 K2 q T L_hash lg_N sig_bytes c_sign (Mcycles) c_ver ( Mcycles )"
|
print(" B K K1 K2 q T L_hash lg_N sig_bytes c_sign (Mcycles) c_ver ( Mcycles )")
|
||||||
print "---- ---- ---- ------ ---- ---- ------ ------ --------- ------------------ --------------------------------"
|
print("---- ---- ---- ------ ---- ---- ------ ------ --------- ------------------ --------------------------------")
|
||||||
|
|
||||||
best.sort(key=lambda c: (c['sig_bytes'], c['cost']))
|
best.sort(key=lambda c: (c['sig_bytes'], c['cost']))
|
||||||
last_sign = None
|
last_sign = None
|
||||||
last_ver = None
|
last_ver = None
|
||||||
for c in best:
|
for c in best:
|
||||||
if last_sign is None or c['c_sign'] < last_sign or c['c_ver'] < last_ver:
|
if last_sign is None or c['c_sign'] < last_sign or c['c_ver'] < last_ver:
|
||||||
print format_candidate(c)
|
print(format_candidate(c))
|
||||||
last_sign = c['c_sign']
|
last_sign = c['c_sign']
|
||||||
last_ver = c['c_ver']
|
last_ver = c['c_ver']
|
||||||
|
|
||||||
print
|
print()
|
||||||
else:
|
else:
|
||||||
print "No candidates found for L_hash = %d or higher." % (L_hash)
|
print("No candidates found for L_hash = %d or higher." % (L_hash))
|
||||||
return
|
return
|
||||||
|
|
||||||
del bins
|
del bins
|
||||||
del best
|
del best
|
||||||
|
|
||||||
print "Maximum signature size: %d bytes" % (limit_bytes,)
|
print("Maximum signature size: %d bytes" % (limit_bytes,))
|
||||||
print "Maximum (signing + %d*verification) cost: %.1f Mcycles" % (weight_ver, limit_cost)
|
print("Maximum (signing + %d*verification) cost: %.1f Mcycles" % (weight_ver, limit_cost))
|
||||||
print "Hash parameters: %d-bit blocks with %d-bit padding and %d-bit labels, %.2f cycles per byte" \
|
print("Hash parameters: %d-bit blocks with %d-bit padding and %d-bit labels, %.2f cycles per byte" \
|
||||||
% (L_block, L_pad, L_label, cycles_per_byte)
|
% (L_block, L_pad, L_label, cycles_per_byte))
|
||||||
print "PRF output size: %d bits" % (L_prf,)
|
print("PRF output size: %d bits" % (L_prf,))
|
||||||
print "Security level given by L_hash is maintained for up to 2^%d signatures.\n" % (lg_M,)
|
print("Security level given by L_hash is maintained for up to 2^%d signatures.\n" % (lg_M,))
|
||||||
|
|
||||||
search()
|
search()
|
||||||
|
@ -3,6 +3,9 @@
|
|||||||
# used to discuss ticket #302: "stop permuting peerlist?"
|
# used to discuss ticket #302: "stop permuting peerlist?"
|
||||||
|
|
||||||
# import time
|
# import time
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import math
|
import math
|
||||||
from hashlib import md5 # sha1, sha256
|
from hashlib import md5 # sha1, sha256
|
||||||
myhash = md5
|
myhash = md5
|
||||||
@ -47,7 +50,7 @@ def make_up_a_file_size(seed):
|
|||||||
|
|
||||||
sizes = [make_up_a_file_size(str(i)) for i in range(10000)]
|
sizes = [make_up_a_file_size(str(i)) for i in range(10000)]
|
||||||
avg_filesize = sum(sizes)/len(sizes)
|
avg_filesize = sum(sizes)/len(sizes)
|
||||||
print "average file size:", abbreviate_space(avg_filesize)
|
print("average file size:", abbreviate_space(avg_filesize))
|
||||||
|
|
||||||
SERVER_CAPACITY = 10**12
|
SERVER_CAPACITY = 10**12
|
||||||
|
|
||||||
@ -94,11 +97,11 @@ class Ring:
|
|||||||
prev_s = self.servers[(i-1)%len(self.servers)]
|
prev_s = self.servers[(i-1)%len(self.servers)]
|
||||||
diff = "%032x" % (int(s.nodeid,16) - int(prev_s.nodeid,16))
|
diff = "%032x" % (int(s.nodeid,16) - int(prev_s.nodeid,16))
|
||||||
s.prev_diff = diff
|
s.prev_diff = diff
|
||||||
print s, s.prev_diff
|
print(s, s.prev_diff)
|
||||||
|
|
||||||
print "sorted by delta"
|
print("sorted by delta")
|
||||||
for s in sorted(self.servers, key=lambda s:s.prev_diff):
|
for s in sorted(self.servers, key=lambda s:s.prev_diff):
|
||||||
print s, s.prev_diff
|
print(s, s.prev_diff)
|
||||||
|
|
||||||
def servers_for_si(self, si):
|
def servers_for_si(self, si):
|
||||||
if self.permute:
|
if self.permute:
|
||||||
@ -121,7 +124,7 @@ class Ring:
|
|||||||
return "".join(bits)
|
return "".join(bits)
|
||||||
|
|
||||||
def dump_usage(self, numfiles, avg_space_per_file):
|
def dump_usage(self, numfiles, avg_space_per_file):
|
||||||
print "uploaded", numfiles
|
print("uploaded", numfiles)
|
||||||
# avg_space_per_file measures expected grid-wide ciphertext per file
|
# avg_space_per_file measures expected grid-wide ciphertext per file
|
||||||
used = list(reversed(sorted([s.used for s in self.servers])))
|
used = list(reversed(sorted([s.used for s in self.servers])))
|
||||||
# used is actual per-server ciphertext
|
# used is actual per-server ciphertext
|
||||||
@ -137,19 +140,19 @@ class Ring:
|
|||||||
std_deviation = math.sqrt(variance)
|
std_deviation = math.sqrt(variance)
|
||||||
sd_of_total = std_deviation / avg_usage_per_file
|
sd_of_total = std_deviation / avg_usage_per_file
|
||||||
|
|
||||||
print "min/max/(exp) usage-pf-ps %s/%s/(%s):" % (
|
print("min/max/(exp) usage-pf-ps %s/%s/(%s):" % (
|
||||||
abbreviate_space(usedpf[-1]),
|
abbreviate_space(usedpf[-1]),
|
||||||
abbreviate_space(usedpf[0]),
|
abbreviate_space(usedpf[0]),
|
||||||
abbreviate_space(avg_usage_per_file) ),
|
abbreviate_space(avg_usage_per_file) ), end=' ')
|
||||||
print "spread-pf: %s (%.2f%%)" % (
|
print("spread-pf: %s (%.2f%%)" % (
|
||||||
abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file),
|
abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file), end=' ')
|
||||||
#print "average_usage:", abbreviate_space(average_usagepf)
|
#print "average_usage:", abbreviate_space(average_usagepf)
|
||||||
print "stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
|
print("stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
|
||||||
100.0*sd_of_total)
|
100.0*sd_of_total))
|
||||||
if self.SHOW_MINMAX:
|
if self.SHOW_MINMAX:
|
||||||
s2 = sorted(self.servers, key=lambda s: s.used)
|
s2 = sorted(self.servers, key=lambda s: s.used)
|
||||||
print "least:", s2[0].nodeid
|
print("least:", s2[0].nodeid)
|
||||||
print "most:", s2[-1].nodeid
|
print("most:", s2[-1].nodeid)
|
||||||
|
|
||||||
|
|
||||||
class Options(usage.Options):
|
class Options(usage.Options):
|
||||||
@ -196,7 +199,7 @@ def do_run(ring, opts):
|
|||||||
server_was_full = True
|
server_was_full = True
|
||||||
remaining_servers.discard(s)
|
remaining_servers.discard(s)
|
||||||
if not remaining_servers:
|
if not remaining_servers:
|
||||||
print "-- GRID IS FULL"
|
print("-- GRID IS FULL")
|
||||||
ring.dump_usage(filenum, avg_space_per_file)
|
ring.dump_usage(filenum, avg_space_per_file)
|
||||||
return filenum
|
return filenum
|
||||||
index += 1
|
index += 1
|
||||||
@ -207,11 +210,11 @@ def do_run(ring, opts):
|
|||||||
|
|
||||||
if server_was_full and all_servers_have_room:
|
if server_was_full and all_servers_have_room:
|
||||||
all_servers_have_room = False
|
all_servers_have_room = False
|
||||||
print "-- FIRST SERVER FULL"
|
print("-- FIRST SERVER FULL")
|
||||||
ring.dump_usage(filenum, avg_space_per_file)
|
ring.dump_usage(filenum, avg_space_per_file)
|
||||||
if file_was_wrapped and no_files_have_wrapped:
|
if file_was_wrapped and no_files_have_wrapped:
|
||||||
no_files_have_wrapped = False
|
no_files_have_wrapped = False
|
||||||
print "-- FIRST FILE WRAPPED"
|
print("-- FIRST FILE WRAPPED")
|
||||||
ring.dump_usage(filenum, avg_space_per_file)
|
ring.dump_usage(filenum, avg_space_per_file)
|
||||||
|
|
||||||
|
|
||||||
@ -219,11 +222,11 @@ def do_ring(opts):
|
|||||||
total_capacity = opts["servers"]*SERVER_CAPACITY
|
total_capacity = opts["servers"]*SERVER_CAPACITY
|
||||||
avg_space_per_file = avg_filesize * opts["N"] / opts["k"]
|
avg_space_per_file = avg_filesize * opts["N"] / opts["k"]
|
||||||
avg_files = total_capacity / avg_space_per_file
|
avg_files = total_capacity / avg_space_per_file
|
||||||
print "expected number of uploads:", avg_files
|
print("expected number of uploads:", avg_files)
|
||||||
if opts["permute"]:
|
if opts["permute"]:
|
||||||
print " PERMUTED"
|
print(" PERMUTED")
|
||||||
else:
|
else:
|
||||||
print " LINEAR"
|
print(" LINEAR")
|
||||||
seed = opts["seed"]
|
seed = opts["seed"]
|
||||||
|
|
||||||
ring = Ring(opts["servers"], seed, opts["permute"])
|
ring = Ring(opts["servers"], seed, opts["permute"])
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
# WARNING. There is a bug in this script so that it does not simulate the actual Tahoe Two server selection algorithm that it was intended to simulate. See http://allmydata.org/trac/tahoe-lafs/ticket/302 (stop permuting peerlist, use SI as offset into ring instead?)
|
# WARNING. There is a bug in this script so that it does not simulate the actual Tahoe Two server selection algorithm that it was intended to simulate. See http://allmydata.org/trac/tahoe-lafs/ticket/302 (stop permuting peerlist, use SI as offset into ring instead?)
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
SERVER_CAPACITY = 10**12
|
SERVER_CAPACITY = 10**12
|
||||||
@ -83,7 +85,7 @@ def test(permutedpeerlist, iters):
|
|||||||
filledat = []
|
filledat = []
|
||||||
for test in range(iters):
|
for test in range(iters):
|
||||||
(servers, doubled_up_shares) = go(permutedpeerlist)
|
(servers, doubled_up_shares) = go(permutedpeerlist)
|
||||||
print "doubled_up_shares: ", doubled_up_shares
|
print("doubled_up_shares: ", doubled_up_shares)
|
||||||
for server in servers:
|
for server in servers:
|
||||||
fidx = server.full_at_tick
|
fidx = server.full_at_tick
|
||||||
filledat.extend([0]*(fidx-len(filledat)+1))
|
filledat.extend([0]*(fidx-len(filledat)+1))
|
||||||
@ -147,8 +149,8 @@ if __name__ == "__main__":
|
|||||||
if arg.startswith("--iters="):
|
if arg.startswith("--iters="):
|
||||||
iters = int(arg[8:])
|
iters = int(arg[8:])
|
||||||
if "--permute" in sys.argv:
|
if "--permute" in sys.argv:
|
||||||
print "doing permuted peerlist, iterations: %d" % iters
|
print("doing permuted peerlist, iterations: %d" % iters)
|
||||||
test(True, iters)
|
test(True, iters)
|
||||||
else:
|
else:
|
||||||
print "doing simple ring, iterations: %d" % iters
|
print("doing simple ring, iterations: %d" % iters)
|
||||||
test(False, iters)
|
test(False, iters)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#! /usr/bin/env python
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import os, random
|
import os, random
|
||||||
|
|
||||||
@ -202,7 +204,7 @@ class Simulator:
|
|||||||
size = random.randrange(1000)
|
size = random.randrange(1000)
|
||||||
n = random.choice(self.all_nodes)
|
n = random.choice(self.all_nodes)
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print "add_file(size=%d, from node %s)" % (size, n)
|
print("add_file(size=%d, from node %s)" % (size, n))
|
||||||
fileid = randomid()
|
fileid = randomid()
|
||||||
able = n.publish_file(fileid, size)
|
able = n.publish_file(fileid, size)
|
||||||
if able:
|
if able:
|
||||||
@ -223,7 +225,7 @@ class Simulator:
|
|||||||
if n.delete_file():
|
if n.delete_file():
|
||||||
self.deleted_files += 1
|
self.deleted_files += 1
|
||||||
return
|
return
|
||||||
print "no files to delete"
|
print("no files to delete")
|
||||||
|
|
||||||
def _add_event(self, etype):
|
def _add_event(self, etype):
|
||||||
rate = getattr(self, "RATE_" + etype)
|
rate = getattr(self, "RATE_" + etype)
|
||||||
@ -256,14 +258,14 @@ class Simulator:
|
|||||||
# self.print_stats(current_time, etype)
|
# self.print_stats(current_time, etype)
|
||||||
|
|
||||||
def print_stats_header(self):
|
def print_stats_header(self):
|
||||||
print "time: added failed lost avg_tried"
|
print("time: added failed lost avg_tried")
|
||||||
|
|
||||||
def print_stats(self, time, etype):
|
def print_stats(self, time, etype):
|
||||||
if not self.published_files:
|
if not self.published_files:
|
||||||
avg_tried = "NONE"
|
avg_tried = "NONE"
|
||||||
else:
|
else:
|
||||||
avg_tried = sum(self.published_files) / len(self.published_files)
|
avg_tried = sum(self.published_files) / len(self.published_files)
|
||||||
print time, etype, self.added_data, self.failed_files, self.lost_data_bytes, avg_tried, len(self.introducer.living_files), self.introducer.utilization
|
print(time, etype, self.added_data, self.failed_files, self.lost_data_bytes, avg_tried, len(self.introducer.living_files), self.introducer.utilization)
|
||||||
|
|
||||||
s = None
|
s = None
|
||||||
|
|
||||||
@ -278,7 +280,7 @@ def main():
|
|||||||
# s.print_stats_header()
|
# s.print_stats_header()
|
||||||
for i in range(1000):
|
for i in range(1000):
|
||||||
s.do_event()
|
s.do_event()
|
||||||
print "%d files added, %d files deleted" % (s.added_files, s.deleted_files)
|
print("%d files added, %d files deleted" % (s.added_files, s.deleted_files))
|
||||||
return s
|
return s
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#! /usr/bin/env python
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import random, math, re
|
import random, math, re
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
|
|
||||||
@ -126,7 +128,7 @@ class Sizes:
|
|||||||
"share_storage_overhead", "share_transmission_overhead",
|
"share_storage_overhead", "share_transmission_overhead",
|
||||||
"storage_overhead", "storage_overhead_percentage",
|
"storage_overhead", "storage_overhead_percentage",
|
||||||
"bytes_until_some_data"):
|
"bytes_until_some_data"):
|
||||||
print k, getattr(self, k)
|
print(k, getattr(self, k))
|
||||||
|
|
||||||
def fmt(num, trim=False):
|
def fmt(num, trim=False):
|
||||||
if num < KiB:
|
if num < KiB:
|
||||||
@ -160,11 +162,11 @@ def text():
|
|||||||
mode = opts["mode"]
|
mode = opts["mode"]
|
||||||
arity = opts["arity"]
|
arity = opts["arity"]
|
||||||
# 0123456789012345678901234567890123456789012345678901234567890123456
|
# 0123456789012345678901234567890123456789012345678901234567890123456
|
||||||
print "mode=%s" % mode, " arity=%d" % arity
|
print("mode=%s" % mode, " arity=%d" % arity)
|
||||||
print " storage storage"
|
print(" storage storage")
|
||||||
print "Size sharesize overhead overhead k d alacrity"
|
print("Size sharesize overhead overhead k d alacrity")
|
||||||
print " (bytes) (%)"
|
print(" (bytes) (%)")
|
||||||
print "------- ------- -------- -------- ---- -- --------"
|
print("------- ------- -------- -------- ---- -- --------")
|
||||||
#sizes = [2 ** i for i in range(7, 41)]
|
#sizes = [2 ** i for i in range(7, 41)]
|
||||||
#radix = math.sqrt(10); expstep = 2
|
#radix = math.sqrt(10); expstep = 2
|
||||||
radix = 2; expstep = 2
|
radix = 2; expstep = 2
|
||||||
@ -181,7 +183,7 @@ def text():
|
|||||||
out += " %4d" % int(s.block_arity)
|
out += " %4d" % int(s.block_arity)
|
||||||
out += " %2d" % int(s.block_tree_depth)
|
out += " %2d" % int(s.block_tree_depth)
|
||||||
out += " %8s" % fmt(s.bytes_until_some_data)
|
out += " %8s" % fmt(s.bytes_until_some_data)
|
||||||
print out
|
print(out)
|
||||||
|
|
||||||
|
|
||||||
def graph():
|
def graph():
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys, math
|
import sys, math
|
||||||
from allmydata import uri, storage
|
from allmydata import uri, storage
|
||||||
from allmydata.immutable import upload
|
from allmydata.immutable import upload
|
||||||
@ -64,18 +66,18 @@ def calc(filesize, params=(3,7,10), segsize=DEFAULT_MAX_SEGMENT_SIZE):
|
|||||||
def main():
|
def main():
|
||||||
filesize = int(sys.argv[1])
|
filesize = int(sys.argv[1])
|
||||||
urisize, sharesize, sharespace = calc(filesize)
|
urisize, sharesize, sharespace = calc(filesize)
|
||||||
print "urisize:", urisize
|
print("urisize:", urisize)
|
||||||
print "sharesize: %10d" % sharesize
|
print("sharesize: %10d" % sharesize)
|
||||||
print "sharespace: %10d" % sharespace
|
print("sharespace: %10d" % sharespace)
|
||||||
print "desired expansion: %1.1f" % (1.0 * 10 / 3)
|
print("desired expansion: %1.1f" % (1.0 * 10 / 3))
|
||||||
print "effective expansion: %1.1f" % (1.0 * sharespace / filesize)
|
print("effective expansion: %1.1f" % (1.0 * sharespace / filesize))
|
||||||
|
|
||||||
def chart():
|
def chart():
|
||||||
filesize = 2
|
filesize = 2
|
||||||
while filesize < 2**20:
|
while filesize < 2**20:
|
||||||
urisize, sharesize, sharespace = calc(int(filesize))
|
urisize, sharesize, sharespace = calc(int(filesize))
|
||||||
expansion = 1.0 * sharespace / int(filesize)
|
expansion = 1.0 * sharespace / int(filesize)
|
||||||
print "%d,%d,%d,%1.2f" % (int(filesize), urisize, sharespace, expansion)
|
print("%d,%d,%d,%1.2f" % (int(filesize), urisize, sharespace, expansion))
|
||||||
filesize = filesize * 2**0.5
|
filesize = filesize * 2**0.5
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
1
newsfragments/3002.other
Normal file
1
newsfragments/3002.other
Normal file
@ -0,0 +1 @@
|
|||||||
|
Converted all uses of the print statement to the print function in the ./misc/ directory.
|
0
newsfragments/3005.minor
Normal file
0
newsfragments/3005.minor
Normal file
0
newsfragments/3007.minor
Normal file
0
newsfragments/3007.minor
Normal file
@ -100,10 +100,10 @@ install_requires = [
|
|||||||
|
|
||||||
# Eliot is contemplating dropping Python 2 support. Stick to a version we
|
# Eliot is contemplating dropping Python 2 support. Stick to a version we
|
||||||
# know works on Python 2.7. Because we don't have support for `==`
|
# know works on Python 2.7. Because we don't have support for `==`
|
||||||
# constraints, pin 1.6.x this way. I feel pretty safe betting that we
|
# constraints, pin 1.7.x this way. I feel pretty safe betting that we
|
||||||
# won't end up stuck on Eliot 1.6.100 with a critical fix only present in
|
# won't end up stuck on Eliot 1.7.100 with a critical fix only present in
|
||||||
# 1.6.101. And if we do, I know how to deal with that situation.
|
# 1.7.101. And if we do, I know how to deal with that situation.
|
||||||
"eliot >= 1.6.0, <= 1.6.100",
|
"eliot >= 1.7.0, <= 1.7.100",
|
||||||
|
|
||||||
# A great way to define types of values.
|
# A great way to define types of values.
|
||||||
"attrs >= 18.2.0",
|
"attrs >= 18.2.0",
|
||||||
|
@ -9,7 +9,6 @@ from __future__ import (
|
|||||||
division,
|
division,
|
||||||
)
|
)
|
||||||
|
|
||||||
from pprint import pformat
|
|
||||||
from sys import stdout
|
from sys import stdout
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -27,7 +26,6 @@ from testtools.matchers import (
|
|||||||
AfterPreprocessing,
|
AfterPreprocessing,
|
||||||
)
|
)
|
||||||
from testtools.twistedsupport import (
|
from testtools.twistedsupport import (
|
||||||
has_no_result,
|
|
||||||
succeeded,
|
succeeded,
|
||||||
failed,
|
failed,
|
||||||
)
|
)
|
||||||
@ -35,7 +33,6 @@ from testtools.twistedsupport import (
|
|||||||
from eliot import (
|
from eliot import (
|
||||||
Message,
|
Message,
|
||||||
FileDestination,
|
FileDestination,
|
||||||
start_action,
|
|
||||||
)
|
)
|
||||||
from eliot.twisted import DeferredContext
|
from eliot.twisted import DeferredContext
|
||||||
from eliot.testing import (
|
from eliot.testing import (
|
||||||
@ -44,15 +41,12 @@ from eliot.testing import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from twisted.internet.defer import (
|
from twisted.internet.defer import (
|
||||||
Deferred,
|
|
||||||
succeed,
|
succeed,
|
||||||
)
|
)
|
||||||
from twisted.internet.task import deferLater
|
from twisted.internet.task import deferLater
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
|
|
||||||
from ..util.eliotutil import (
|
from ..util.eliotutil import (
|
||||||
eliot_friendly_generator_function,
|
|
||||||
inline_callbacks,
|
|
||||||
log_call_deferred,
|
log_call_deferred,
|
||||||
_parse_destination_description,
|
_parse_destination_description,
|
||||||
_EliotLogging,
|
_EliotLogging,
|
||||||
@ -82,349 +76,6 @@ class EliotLoggedTestTests(AsyncTestCase):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def assert_logged_messages_contain_fields(testcase, logged_messages, expected_fields):
|
|
||||||
testcase.assertEqual(len(logged_messages), len(expected_fields))
|
|
||||||
actual_fields = list(
|
|
||||||
{key: msg.message[key] for key in expected if key in msg.message}
|
|
||||||
for (msg, expected)
|
|
||||||
in zip(logged_messages, expected_fields)
|
|
||||||
)
|
|
||||||
testcase.assertEqual(actual_fields, expected_fields)
|
|
||||||
|
|
||||||
|
|
||||||
def assert_logged_action_contains_messages(testcase, logger, expected_action, expected_fields):
|
|
||||||
action = assertHasAction(
|
|
||||||
testcase,
|
|
||||||
logger,
|
|
||||||
expected_action,
|
|
||||||
True,
|
|
||||||
)
|
|
||||||
assert_logged_messages_contain_fields(
|
|
||||||
testcase,
|
|
||||||
action.children,
|
|
||||||
expected_fields,
|
|
||||||
)
|
|
||||||
|
|
||||||
def assert_expected_action_tree(testcase, logger, expected_action_type, expected_type_tree):
|
|
||||||
logged_action = assertHasAction(
|
|
||||||
testcase,
|
|
||||||
logger,
|
|
||||||
expected_action_type,
|
|
||||||
True,
|
|
||||||
)
|
|
||||||
type_tree = logged_action.type_tree()
|
|
||||||
testcase.assertEqual(
|
|
||||||
{expected_action_type: expected_type_tree},
|
|
||||||
type_tree,
|
|
||||||
"Logger had messages:\n{}".format(pformat(logger.messages, indent=4)),
|
|
||||||
)
|
|
||||||
|
|
||||||
def assert_generator_logs_action_tree(testcase, generator_function, logger, expected_action_type, expected_type_tree):
|
|
||||||
list(eliot_friendly_generator_function(generator_function)())
|
|
||||||
assert_expected_action_tree(
|
|
||||||
testcase,
|
|
||||||
logger,
|
|
||||||
expected_action_type,
|
|
||||||
expected_type_tree,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class EliotFriendlyGeneratorFunctionTests(SyncTestCase):
|
|
||||||
# Get our custom assertion failure messages *and* the standard ones.
|
|
||||||
longMessage = True
|
|
||||||
|
|
||||||
def test_yield_none(self):
|
|
||||||
@eliot_friendly_generator_function
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"hello")
|
|
||||||
yield
|
|
||||||
Message.log(message_type=u"goodbye")
|
|
||||||
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
list(g())
|
|
||||||
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action",
|
|
||||||
[u"hello", u"yielded", u"goodbye"],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_yield_value(self):
|
|
||||||
expected = object()
|
|
||||||
|
|
||||||
@eliot_friendly_generator_function
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"hello")
|
|
||||||
yield expected
|
|
||||||
Message.log(message_type=u"goodbye")
|
|
||||||
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
self.assertEqual([expected], list(g()))
|
|
||||||
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action",
|
|
||||||
[u"hello", u"yielded", u"goodbye"],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_yield_inside_another_action(self):
|
|
||||||
@eliot_friendly_generator_function
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"a")
|
|
||||||
with start_action(action_type=u"confounding-factor"):
|
|
||||||
Message.log(message_type=u"b")
|
|
||||||
yield None
|
|
||||||
Message.log(message_type=u"c")
|
|
||||||
Message.log(message_type=u"d")
|
|
||||||
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
list(g())
|
|
||||||
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action",
|
|
||||||
[u"a",
|
|
||||||
{u"confounding-factor": [u"b", u"yielded", u"c"]},
|
|
||||||
u"d",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_yield_inside_nested_actions(self):
|
|
||||||
@eliot_friendly_generator_function
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"a")
|
|
||||||
with start_action(action_type=u"confounding-factor"):
|
|
||||||
Message.log(message_type=u"b")
|
|
||||||
yield None
|
|
||||||
with start_action(action_type=u"double-confounding-factor"):
|
|
||||||
yield None
|
|
||||||
Message.log(message_type=u"c")
|
|
||||||
Message.log(message_type=u"d")
|
|
||||||
Message.log(message_type=u"e")
|
|
||||||
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
list(g())
|
|
||||||
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action", [
|
|
||||||
u"a",
|
|
||||||
{u"confounding-factor": [
|
|
||||||
u"b",
|
|
||||||
u"yielded",
|
|
||||||
{u"double-confounding-factor": [
|
|
||||||
u"yielded",
|
|
||||||
u"c",
|
|
||||||
]},
|
|
||||||
u"d",
|
|
||||||
]},
|
|
||||||
u"e",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_generator_and_non_generator(self):
|
|
||||||
@eliot_friendly_generator_function
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"a")
|
|
||||||
yield
|
|
||||||
with start_action(action_type=u"action-a"):
|
|
||||||
Message.log(message_type=u"b")
|
|
||||||
yield
|
|
||||||
Message.log(message_type=u"c")
|
|
||||||
|
|
||||||
Message.log(message_type=u"d")
|
|
||||||
yield
|
|
||||||
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
generator = g()
|
|
||||||
next(generator)
|
|
||||||
Message.log(message_type=u"0")
|
|
||||||
next(generator)
|
|
||||||
Message.log(message_type=u"1")
|
|
||||||
next(generator)
|
|
||||||
Message.log(message_type=u"2")
|
|
||||||
self.assertRaises(StopIteration, lambda: next(generator))
|
|
||||||
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action", [
|
|
||||||
u"a",
|
|
||||||
u"yielded",
|
|
||||||
u"0",
|
|
||||||
{
|
|
||||||
u"action-a": [
|
|
||||||
u"b",
|
|
||||||
u"yielded",
|
|
||||||
u"c",
|
|
||||||
],
|
|
||||||
},
|
|
||||||
u"1",
|
|
||||||
u"d",
|
|
||||||
u"yielded",
|
|
||||||
u"2",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_concurrent_generators(self):
|
|
||||||
@eliot_friendly_generator_function
|
|
||||||
def g(which):
|
|
||||||
Message.log(message_type=u"{}-a".format(which))
|
|
||||||
with start_action(action_type=which):
|
|
||||||
Message.log(message_type=u"{}-b".format(which))
|
|
||||||
yield
|
|
||||||
Message.log(message_type=u"{}-c".format(which))
|
|
||||||
Message.log(message_type=u"{}-d".format(which))
|
|
||||||
|
|
||||||
gens = [g(u"1"), g(u"2")]
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
while gens:
|
|
||||||
for g in gens[:]:
|
|
||||||
try:
|
|
||||||
next(g)
|
|
||||||
except StopIteration:
|
|
||||||
gens.remove(g)
|
|
||||||
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action", [
|
|
||||||
u"1-a",
|
|
||||||
{u"1": [
|
|
||||||
u"1-b",
|
|
||||||
u"yielded",
|
|
||||||
u"1-c",
|
|
||||||
]},
|
|
||||||
u"2-a",
|
|
||||||
{u"2": [
|
|
||||||
u"2-b",
|
|
||||||
u"yielded",
|
|
||||||
u"2-c",
|
|
||||||
]},
|
|
||||||
u"1-d",
|
|
||||||
u"2-d",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_close_generator(self):
|
|
||||||
@eliot_friendly_generator_function
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"a")
|
|
||||||
try:
|
|
||||||
yield
|
|
||||||
Message.log(message_type=u"b")
|
|
||||||
finally:
|
|
||||||
Message.log(message_type=u"c")
|
|
||||||
|
|
||||||
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
gen = g()
|
|
||||||
next(gen)
|
|
||||||
gen.close()
|
|
||||||
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action", [
|
|
||||||
u"a",
|
|
||||||
u"yielded",
|
|
||||||
u"c",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_nested_generators(self):
|
|
||||||
@eliot_friendly_generator_function
|
|
||||||
def g(recurse):
|
|
||||||
with start_action(action_type=u"a-recurse={}".format(recurse)):
|
|
||||||
Message.log(message_type=u"m-recurse={}".format(recurse))
|
|
||||||
if recurse:
|
|
||||||
set(g(False))
|
|
||||||
else:
|
|
||||||
yield
|
|
||||||
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
set(g(True))
|
|
||||||
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action", [{
|
|
||||||
u"a-recurse=True": [
|
|
||||||
u"m-recurse=True", {
|
|
||||||
u"a-recurse=False": [
|
|
||||||
u"m-recurse=False",
|
|
||||||
u"yielded",
|
|
||||||
],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
}],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class InlineCallbacksTests(SyncTestCase):
|
|
||||||
# Get our custom assertion failure messages *and* the standard ones.
|
|
||||||
longMessage = True
|
|
||||||
|
|
||||||
def _a_b_test(self, logger, g):
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
self.assertThat(g(), succeeded(Is(None)))
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
logger,
|
|
||||||
u"the-action", [
|
|
||||||
u"a",
|
|
||||||
u"yielded",
|
|
||||||
u"b",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_yield_none(self):
|
|
||||||
@inline_callbacks
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"a")
|
|
||||||
yield
|
|
||||||
Message.log(message_type=u"b")
|
|
||||||
|
|
||||||
self._a_b_test(self.eliot_logger, g)
|
|
||||||
|
|
||||||
def test_yield_fired_deferred(self):
|
|
||||||
@inline_callbacks
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"a")
|
|
||||||
yield succeed(None)
|
|
||||||
Message.log(message_type=u"b")
|
|
||||||
|
|
||||||
self._a_b_test(self.eliot_logger, g)
|
|
||||||
|
|
||||||
def test_yield_unfired_deferred(self):
|
|
||||||
waiting = Deferred()
|
|
||||||
|
|
||||||
@inline_callbacks
|
|
||||||
def g():
|
|
||||||
Message.log(message_type=u"a")
|
|
||||||
yield waiting
|
|
||||||
Message.log(message_type=u"b")
|
|
||||||
|
|
||||||
with start_action(action_type=u"the-action"):
|
|
||||||
d = g()
|
|
||||||
self.assertThat(waiting, has_no_result())
|
|
||||||
waiting.callback(None)
|
|
||||||
self.assertThat(d, succeeded(Is(None)))
|
|
||||||
assert_expected_action_tree(
|
|
||||||
self,
|
|
||||||
self.eliot_logger,
|
|
||||||
u"the-action", [
|
|
||||||
u"a",
|
|
||||||
u"yielded",
|
|
||||||
u"b",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ParseDestinationDescriptionTests(SyncTestCase):
|
class ParseDestinationDescriptionTests(SyncTestCase):
|
||||||
"""
|
"""
|
||||||
Tests for ``_parse_destination_description``.
|
Tests for ``_parse_destination_description``.
|
||||||
|
@ -10,8 +10,6 @@ from __future__ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"use_generator_context",
|
|
||||||
"eliot_friendly_generator_function",
|
|
||||||
"inline_callbacks",
|
"inline_callbacks",
|
||||||
"eliot_logging_service",
|
"eliot_logging_service",
|
||||||
"opt_eliot_destination",
|
"opt_eliot_destination",
|
||||||
@ -30,12 +28,9 @@ __all__ = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
from sys import (
|
from sys import (
|
||||||
exc_info,
|
|
||||||
stdout,
|
stdout,
|
||||||
)
|
)
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from contextlib import contextmanager
|
|
||||||
from weakref import WeakKeyDictionary
|
|
||||||
from logging import (
|
from logging import (
|
||||||
INFO,
|
INFO,
|
||||||
Handler,
|
Handler,
|
||||||
@ -67,7 +62,10 @@ from eliot import (
|
|||||||
from eliot._validation import (
|
from eliot._validation import (
|
||||||
ValidationError,
|
ValidationError,
|
||||||
)
|
)
|
||||||
from eliot.twisted import DeferredContext
|
from eliot.twisted import (
|
||||||
|
DeferredContext,
|
||||||
|
inline_callbacks,
|
||||||
|
)
|
||||||
|
|
||||||
from twisted.python.usage import (
|
from twisted.python.usage import (
|
||||||
UsageError,
|
UsageError,
|
||||||
@ -84,7 +82,6 @@ from twisted.logger import (
|
|||||||
globalLogPublisher,
|
globalLogPublisher,
|
||||||
)
|
)
|
||||||
from twisted.internet.defer import (
|
from twisted.internet.defer import (
|
||||||
inlineCallbacks,
|
|
||||||
maybeDeferred,
|
maybeDeferred,
|
||||||
)
|
)
|
||||||
from twisted.application.service import Service
|
from twisted.application.service import Service
|
||||||
@ -97,123 +94,6 @@ from .fake_inotify import (
|
|||||||
humanReadableMask,
|
humanReadableMask,
|
||||||
)
|
)
|
||||||
|
|
||||||
class _GeneratorContext(object):
|
|
||||||
def __init__(self, execution_context):
|
|
||||||
self._execution_context = execution_context
|
|
||||||
self._contexts = WeakKeyDictionary()
|
|
||||||
self._current_generator = None
|
|
||||||
|
|
||||||
def init_stack(self, generator):
|
|
||||||
stack = list(self._execution_context._get_stack())
|
|
||||||
self._contexts[generator] = stack
|
|
||||||
|
|
||||||
def get_stack(self):
|
|
||||||
if self._current_generator is None:
|
|
||||||
# If there is no currently active generator then we have no
|
|
||||||
# special stack to supply. Let the execution context figure out a
|
|
||||||
# different answer on its own.
|
|
||||||
return None
|
|
||||||
# Otherwise, give back the action context stack we've been tracking
|
|
||||||
# for the currently active generator. It must have been previously
|
|
||||||
# initialized (it's too late to do it now)!
|
|
||||||
return self._contexts[self._current_generator]
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def context(self, generator):
|
|
||||||
previous_generator = self._current_generator
|
|
||||||
try:
|
|
||||||
self._current_generator = generator
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
self._current_generator = previous_generator
|
|
||||||
|
|
||||||
|
|
||||||
from eliot._action import _context
|
|
||||||
_the_generator_context = _GeneratorContext(_context)
|
|
||||||
|
|
||||||
|
|
||||||
def use_generator_context():
|
|
||||||
_context.get_sub_context = _the_generator_context.get_stack
|
|
||||||
use_generator_context()
|
|
||||||
|
|
||||||
|
|
||||||
def eliot_friendly_generator_function(original):
|
|
||||||
"""
|
|
||||||
Decorate a generator function so that the Eliot action context is
|
|
||||||
preserved across ``yield`` expressions.
|
|
||||||
"""
|
|
||||||
@wraps(original)
|
|
||||||
def wrapper(*a, **kw):
|
|
||||||
# Keep track of whether the next value to deliver to the generator is
|
|
||||||
# a non-exception or an exception.
|
|
||||||
ok = True
|
|
||||||
|
|
||||||
# Keep track of the next value to deliver to the generator.
|
|
||||||
value_in = None
|
|
||||||
|
|
||||||
# Create the generator with a call to the generator function. This
|
|
||||||
# happens with whatever Eliot action context happens to be active,
|
|
||||||
# which is fine and correct and also irrelevant because no code in the
|
|
||||||
# generator function can run until we call send or throw on it.
|
|
||||||
gen = original(*a, **kw)
|
|
||||||
|
|
||||||
# Initialize the per-generator Eliot action context stack to the
|
|
||||||
# current action stack. This might be the main stack or, if another
|
|
||||||
# decorated generator is running, it might be the stack for that
|
|
||||||
# generator. Not our business.
|
|
||||||
_the_generator_context.init_stack(gen)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# Whichever way we invoke the generator, we will do it
|
|
||||||
# with the Eliot action context stack we've saved for it.
|
|
||||||
# Then the context manager will re-save it and restore the
|
|
||||||
# "outside" stack for us.
|
|
||||||
with _the_generator_context.context(gen):
|
|
||||||
if ok:
|
|
||||||
value_out = gen.send(value_in)
|
|
||||||
else:
|
|
||||||
value_out = gen.throw(*value_in)
|
|
||||||
# We have obtained a value from the generator. In
|
|
||||||
# giving it to us, it has given up control. Note this
|
|
||||||
# fact here. Importantly, this is within the
|
|
||||||
# generator's action context so that we get a good
|
|
||||||
# indication of where the yield occurred.
|
|
||||||
#
|
|
||||||
# This might be too noisy, consider dropping it or
|
|
||||||
# making it optional.
|
|
||||||
Message.log(message_type=u"yielded")
|
|
||||||
except StopIteration:
|
|
||||||
# When the generator raises this, it is signaling
|
|
||||||
# completion. Leave the loop.
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
# Pass the generator's result along to whoever is
|
|
||||||
# driving. Capture the result as the next value to
|
|
||||||
# send inward.
|
|
||||||
value_in = yield value_out
|
|
||||||
except:
|
|
||||||
# Or capture the exception if that's the flavor of the
|
|
||||||
# next value.
|
|
||||||
ok = False
|
|
||||||
value_in = exc_info()
|
|
||||||
else:
|
|
||||||
ok = True
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def inline_callbacks(original):
|
|
||||||
"""
|
|
||||||
Decorate a function like ``inlineCallbacks`` would but in a more
|
|
||||||
Eliot-friendly way. Use it just like ``inlineCallbacks`` but where you
|
|
||||||
want Eliot action contexts to Do The Right Thing inside the decorated
|
|
||||||
function.
|
|
||||||
"""
|
|
||||||
return inlineCallbacks(
|
|
||||||
eliot_friendly_generator_function(original)
|
|
||||||
)
|
|
||||||
|
|
||||||
def validateInstanceOf(t):
|
def validateInstanceOf(t):
|
||||||
"""
|
"""
|
||||||
Return an Eliot validator that requires values to be instances of ``t``.
|
Return an Eliot validator that requires values to be instances of ``t``.
|
||||||
|
@ -91,7 +91,7 @@ class INotify(object):
|
|||||||
self.callbacks = callbacks
|
self.callbacks = callbacks
|
||||||
|
|
||||||
def event(self, filepath, mask):
|
def event(self, filepath, mask):
|
||||||
with start_action(action_type=u"fake-inotify:event", path=filepath.path, mask=mask):
|
with start_action(action_type=u"fake-inotify:event", path=filepath.asTextMode().path, mask=mask):
|
||||||
for cb in self.callbacks:
|
for cb in self.callbacks:
|
||||||
cb(None, filepath, mask)
|
cb(None, filepath, mask)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user