diff --git a/.circleci/config.yml b/.circleci/config.yml
index 9ff12d9d6..e06163d01 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -29,6 +29,8 @@ workflows:
- "lint"
- "deprecations"
- "c-locale"
+ # Any locale other than C or UTF-8.
+ - "another-locale"
- "integration":
requires:
@@ -196,6 +198,15 @@ jobs:
LANG: "C"
+ another-locale:
+ <<: *DEBIAN
+
+ environment:
+ <<: *UTF_8_ENVIRONMENT
+ # aka "Latin 1"
+ LANG: "en_US.ISO-8859-1"
+
+
deprecations:
<<: *DEBIAN
diff --git a/misc/awesome_weird_stuff/boodlegrid.tac b/misc/awesome_weird_stuff/boodlegrid.tac
index d92c03f62..f13427ceb 100644
--- a/misc/awesome_weird_stuff/boodlegrid.tac
+++ b/misc/awesome_weird_stuff/boodlegrid.tac
@@ -1,5 +1,7 @@
# -*- python -*-
+from __future__ import print_function
+
"""Monitor a Tahoe grid, by playing sounds in response to remote events.
To install:
@@ -47,20 +49,20 @@ class Listener:
# messages emitted by the Introducer: client join/leave
if message.startswith("introducer: subscription[storage] request"):
- print "new client"
+ print("new client")
self.sound("voice/hooray.aiff")
if message.startswith("introducer: unsubscribing"):
- print "unsubscribe"
+ print("unsubscribe")
self.sound("electro/zaptrill-fade.aiff")
# messages from the helper
if message == "file already found in grid":
- print "already found"
+ print("already found")
self.sound("mech/ziplash-high.aiff")
#if message == "upload done":
if format == "plaintext_hash=%(plaintext_hash)s, SI=%(SI)s, size=%(size)d":
size = m.get("size")
- print "upload done, size", size
+ print("upload done, size", size)
self.sound("mech/ziplash-low.aiff")
if "fetching " in message:
# helper grabbing ciphertext from client
@@ -90,31 +92,31 @@ class Listener:
pass
elif format == "excessive reactor delay (%ss)":
self.sound("animal/frog-cheep.aiff")
- print "excessive delay %s: %s" % (m['args'][0], furl)
+ print("excessive delay %s: %s" % (m['args'][0], furl))
elif format == "excessive reactor delay (%(delay)ss)":
self.sound("animal/frog-cheep.aiff")
- print "excessive delay %s: %s" % (m['delay'], furl)
+ print("excessive delay %s: %s" % (m['delay'], furl))
elif facility == "foolscap.negotiation":
if (message == "got offer for an existing connection"
or "master told us to use a new connection" in message):
- print "foolscap: got offer for an existing connection", message, furl
+ print("foolscap: got offer for an existing connection", message, furl)
else:
#print "foolscap:", message
pass
elif m['level'] > 30: # SCARY or BAD
#self.sound("mech/alarm-bell.aiff")
self.sound("environ/thunder-tense.aiff")
- print m, furl
+ print(m, furl)
elif m['level'] == 30: # WEIRD
self.sound("mech/glass-breaking.aiff")
- print m, furl
+ print(m, furl)
elif m['level'] > 20: # UNUSUAL or INFREQUENT or CURIOUS
self.sound("mech/telephone-ring-old.aiff")
- print m, furl
+ print(m, furl)
class BoodleSender(protocol.Protocol):
def connectionMade(self):
- print "connected to boodler"
+ print("connected to boodler")
self.factory.listener.boodler = self.transport
class Bridge(Referenceable):
@@ -150,7 +152,7 @@ class Monitor(service.MultiService):
reactor.connectTCP("localhost", 31863, cf)
def _got_logpublisher(self, publisher, fn, i, target):
- print "connected to %s:%d, %s" % (fn, i, target)
+ print("connected to %s:%d, %s" % (fn, i, target))
b = Bridge(target, self.listener)
publisher.callRemote("subscribe_to_all", b)
diff --git a/misc/build_helpers/check-build.py b/misc/build_helpers/check-build.py
index 37381cd42..994ed650a 100644
--- a/misc/build_helpers/check-build.py
+++ b/misc/build_helpers/check-build.py
@@ -2,13 +2,15 @@
# This helper script is used with the 'test-desert-island' Makefile target.
+from __future__ import print_function
+
import sys
good = True
build_out = sys.argv[1]
mode = sys.argv[2]
-print
+print()
for line in open(build_out, "r"):
if mode == "no-downloads":
@@ -29,13 +31,13 @@ for line in open(build_out, "r"):
# currently don't enforce that stronger requirement.
if (line.startswith("Downloading http:") or
line.startswith("Downloading https:")):
- print line,
+ print(line, end=' ')
good = False
if good:
if mode == "no-downloads":
- print "Good: build did not try to download any files"
+ print("Good: build did not try to download any files")
sys.exit(0)
else:
if mode == "no-downloads":
- print "Failed: build tried to download files"
+ print("Failed: build tried to download files")
sys.exit(1)
diff --git a/misc/build_helpers/gen-package-table.py b/misc/build_helpers/gen-package-table.py
index cd6bdc3c4..a1190820d 100644
--- a/misc/build_helpers/gen-package-table.py
+++ b/misc/build_helpers/gen-package-table.py
@@ -2,6 +2,8 @@
# This script generates a table of dependencies in HTML format on stdout.
# It expects to be run in the tahoe-lafs-dep-eggs directory.
+from __future__ import print_function
+
import re, os, sys
import pkg_resources
@@ -83,27 +85,27 @@ greybgstyle = '; background-color: #E0E0E0'
nobgstyle = ''
unsupportedstyle = '; color: #C00000'
-print ''
-print ''
-print '
'
-print ' '
-print ' Software packages that Tahoe-LAFS depends on'
-print ''
-print ''
-print 'What is this?
'
-print 'See quickstart.rst, wiki:Installation, and wiki:CompileError.'
-print '
Software packages that Tahoe-LAFS depends on
'
-print
+print('')
+print('')
+print('')
+print(' ')
+print(' Software packages that Tahoe-LAFS depends on')
+print('')
+print('')
+print('What is this?
')
+print('See quickstart.rst, wiki:Installation, and wiki:CompileError.')
+print('
Software packages that Tahoe-LAFS depends on
')
+print()
for pyver in reversed(sorted(python_versions)):
greybackground = False
if pyver:
- print 'Packages for Python %s that have compiled C/C++ code:
' % (pyver,)
- print ''
- print ' '
- print ' Platform | ' % (width,)
+ print('Packages for Python %s that have compiled C/C++ code:
' % (pyver,))
+ print('')
+ print(' ')
+ print(' Platform | ' % (width,))
for pkg in sorted(platform_dependent_pkgs):
- print ' %s | ' % (width, pkg)
- print '
'
+ print(' %s | ' % (width, pkg))
+ print(' ')
first = True
for platform in sorted(matrix[pyver]):
@@ -122,38 +124,38 @@ for pyver in reversed(sorted(python_versions)):
style2 = first and 'border-top: 2px solid #000000' or ''
style2 += bgstyle
annotated_platform = platform.replace('-', '‑') + (unsupported_python and ' (unsupported)' or '')
- print ' '
- print ' %s | ' % (style1, annotated_platform)
+ print('
')
+ print(' %s | ' % (style1, annotated_platform))
for pkg in sorted(platform_dependent_pkgs):
if pkg == 'pywin32' and not platform.startswith('windows'):
- print ' n/a | ' % (style2,)
+ print(' n/a | ' % (style2,))
else:
- print ' %s | ' % (style2, file_list(row_files, pkg))
- print '
'
+ print(' %s | ' % (style2, file_list(row_files, pkg)))
+ print(' ')
first = False
- print '
'
- print
+ print('
')
+ print()
-print 'Packages that are platform-independent or source-only:
'
-print ''
-print ' '
-print ' Package | '
-print ' All Python versions | '
-print '
'
+print('Packages that are platform-independent or source-only:
')
+print('')
+print(' ')
+print(' Package | ')
+print(' All Python versions | ')
+print('
')
style1 = 'border-top: 2px solid #000000; background-color:#FFFFF0;'
style2 = 'border-top: 2px solid #000000;'
m = matrix['']['']
for pkg in sorted(platform_independent_pkgs):
- print ' '
- print ' %s | ' % (style1, pkg)
- print ' %s | ' % (style2, file_list(m, pkg))
- print '
'
+ print(' ')
+ print(' %s | ' % (style1, pkg))
+ print(' %s | ' % (style2, file_list(m, pkg)))
+ print('
')
-print '
'
+print('
')
# The document does validate, but not when it is included at the bottom of a directory listing.
#print '
'
#print ''
-print ''
+print('')
diff --git a/misc/build_helpers/run-deprecations.py b/misc/build_helpers/run-deprecations.py
index 49b698615..6c76bcd69 100644
--- a/misc/build_helpers/run-deprecations.py
+++ b/misc/build_helpers/run-deprecations.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
import sys, os, io
from twisted.internet import reactor, protocol, task, defer
from twisted.python.procutils import which
@@ -51,7 +53,7 @@ def run_command(main):
pw = os.environ.get("PYTHONWARNINGS")
DDW = "default::DeprecationWarning"
if pw != DDW:
- print "note: $PYTHONWARNINGS is '%s', not the expected %s" % (pw, DDW)
+ print("note: $PYTHONWARNINGS is '%s', not the expected %s" % (pw, DDW))
sys.stdout.flush()
pp = RunPP()
@@ -84,11 +86,11 @@ def run_command(main):
if warnings:
if config["warnings"]:
with open(config["warnings"], "wb") as f:
- print >>f, "".join(warnings)
- print "ERROR: %d deprecation warnings found" % len(warnings)
+ print("".join(warnings), file=f)
+ print("ERROR: %d deprecation warnings found" % len(warnings))
sys.exit(1)
- print "no deprecation warnings"
+ print("no deprecation warnings")
if signal:
sys.exit(signal)
sys.exit(rc)
diff --git a/misc/build_helpers/show-tool-versions.py b/misc/build_helpers/show-tool-versions.py
index 2aa1aba19..7710298a7 100644
--- a/misc/build_helpers/show-tool-versions.py
+++ b/misc/build_helpers/show-tool-versions.py
@@ -1,5 +1,7 @@
#! /usr/bin/env python
+from __future__ import print_function
+
import locale, os, platform, subprocess, sys, traceback
@@ -13,27 +15,27 @@ def print_platform():
try:
import platform
out = platform.platform()
- print "platform:", foldlines(out)
- print "machine: ", platform.machine()
+ print("platform:", foldlines(out))
+ print("machine: ", platform.machine())
if hasattr(platform, 'linux_distribution'):
- print "linux_distribution:", repr(platform.linux_distribution())
+ print("linux_distribution:", repr(platform.linux_distribution()))
except EnvironmentError:
sys.stderr.write("\nGot exception using 'platform'. Exception follows\n")
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
def print_python_ver():
- print "python:", foldlines(sys.version)
- print 'maxunicode: ' + str(sys.maxunicode)
+ print("python:", foldlines(sys.version))
+ print('maxunicode: ' + str(sys.maxunicode))
def print_python_encoding_settings():
- print 'filesystem.encoding: ' + str(sys.getfilesystemencoding())
- print 'locale.getpreferredencoding: ' + str(locale.getpreferredencoding())
+ print('filesystem.encoding: ' + str(sys.getfilesystemencoding()))
+ print('locale.getpreferredencoding: ' + str(locale.getpreferredencoding()))
try:
- print 'locale.defaultlocale: ' + str(locale.getdefaultlocale())
+ print('locale.defaultlocale: ' + str(locale.getdefaultlocale()))
except ValueError, e:
- print 'got exception from locale.getdefaultlocale(): ', e
- print 'locale.locale: ' + str(locale.getlocale())
+ print('got exception from locale.getdefaultlocale(): ', e)
+ print('locale.locale: ' + str(locale.getlocale()))
def print_stdout(cmdlist, label=None, numlines=None):
try:
@@ -41,10 +43,10 @@ def print_stdout(cmdlist, label=None, numlines=None):
label = cmdlist[0]
res = subprocess.Popen(cmdlist, stdin=open(os.devnull),
stdout=subprocess.PIPE).communicate()[0]
- print label + ': ' + foldlines(res, numlines)
+ print(label + ': ' + foldlines(res, numlines))
except EnvironmentError, e:
if isinstance(e, OSError) and e.errno == 2:
- print label + ': no such file or directory'
+ print(label + ': no such file or directory')
return
sys.stderr.write("\nGot exception invoking '%s'. Exception follows.\n" % (cmdlist[0],))
traceback.print_exc(file=sys.stderr)
@@ -52,12 +54,12 @@ def print_stdout(cmdlist, label=None, numlines=None):
def print_as_ver():
if os.path.exists('a.out'):
- print "WARNING: a file named a.out exists, and getting the version of the 'as' assembler writes to that filename, so I'm not attempting to get the version of 'as'."
+ print("WARNING: a file named a.out exists, and getting the version of the 'as' assembler writes to that filename, so I'm not attempting to get the version of 'as'.")
return
try:
res = subprocess.Popen(['as', '-version'], stdin=open(os.devnull),
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
- print 'as: ' + foldlines(res[0]+' '+res[1])
+ print('as: ' + foldlines(res[0]+' '+res[1]))
if os.path.exists('a.out'):
os.remove('a.out')
except EnvironmentError:
@@ -69,49 +71,49 @@ def print_setuptools_ver():
try:
import pkg_resources
out = str(pkg_resources.require("setuptools"))
- print "setuptools:", foldlines(out)
+ print("setuptools:", foldlines(out))
except (ImportError, EnvironmentError):
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of setuptools. Exception follows\n")
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
except pkg_resources.DistributionNotFound:
- print 'setuptools: DistributionNotFound'
+ print('setuptools: DistributionNotFound')
def print_py_pkg_ver(pkgname, modulename=None):
if modulename is None:
modulename = pkgname
- print
+ print()
try:
import pkg_resources
out = str(pkg_resources.require(pkgname))
- print pkgname + ': ' + foldlines(out)
+ print(pkgname + ': ' + foldlines(out))
except (ImportError, EnvironmentError):
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of %s. Exception follows.\n" % (pkgname,))
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
except pkg_resources.DistributionNotFound:
- print pkgname + ': DistributionNotFound'
+ print(pkgname + ': DistributionNotFound')
try:
__import__(modulename)
except ImportError:
pass
else:
modobj = sys.modules.get(modulename)
- print pkgname + ' module: ' + str(modobj)
+ print(pkgname + ' module: ' + str(modobj))
try:
- print pkgname + ' __version__: ' + str(modobj.__version__)
+ print(pkgname + ' __version__: ' + str(modobj.__version__))
except AttributeError:
pass
print_platform()
-print
+print()
print_python_ver()
print_stdout(['virtualenv', '--version'])
print_stdout(['tox', '--version'])
-print
+print()
print_stdout(['locale'])
print_python_encoding_settings()
-print
+print()
print_stdout(['buildbot', '--version'])
print_stdout(['buildslave', '--version'])
if 'windows' in platform.system().lower():
diff --git a/misc/coding_tools/check-debugging.py b/misc/coding_tools/check-debugging.py
index 8dae80181..17eeb30b7 100755
--- a/misc/coding_tools/check-debugging.py
+++ b/misc/coding_tools/check-debugging.py
@@ -2,6 +2,8 @@
# ./check-debugging.py src
+from __future__ import print_function
+
import sys, re, os
ok = True
@@ -15,8 +17,8 @@ for starting_point in sys.argv[1:]:
lineno = lineno+1
mo = re.search(r"\.setDebugging\(True\)", line)
if mo:
- print "Do not use defer.setDebugging(True) in production"
- print "First used here: %s:%d" % (fn, lineno)
+ print("Do not use defer.setDebugging(True) in production")
+ print("First used here: %s:%d" % (fn, lineno))
sys.exit(1)
-print "No cases of defer.setDebugging(True) were found, good!"
+print("No cases of defer.setDebugging(True) were found, good!")
sys.exit(0)
diff --git a/misc/coding_tools/check-interfaces.py b/misc/coding_tools/check-interfaces.py
index 6400bfe7d..5d6482d0a 100644
--- a/misc/coding_tools/check-interfaces.py
+++ b/misc/coding_tools/check-interfaces.py
@@ -4,6 +4,8 @@
#
# bin/tahoe @misc/coding_tools/check-interfaces.py
+from __future__ import print_function
+
import os, sys, re, platform
import zope.interface as zi
@@ -45,9 +47,9 @@ def strictly_implements(*interfaces):
try:
verifyClass(interface, cls)
except Exception, e:
- print >>_err, ("%s.%s does not correctly implement %s.%s:\n%s"
+ print("%s.%s does not correctly implement %s.%s:\n%s"
% (cls.__module__, cls.__name__,
- interface.__module__, interface.__name__, e))
+ interface.__module__, interface.__name__, e), file=_err)
else:
_other_modules_with_violations.add(cls.__module__)
return cls
@@ -62,7 +64,7 @@ def check():
if len(sys.argv) >= 2:
if sys.argv[1] == '--help' or len(sys.argv) > 2:
- print >>_err, "Usage: check-miscaptures.py [SOURCEDIR]"
+ print("Usage: check-miscaptures.py [SOURCEDIR]", file=_err)
return
srcdir = sys.argv[1]
else:
@@ -79,8 +81,8 @@ def check():
for fn in filenames:
(basename, ext) = os.path.splitext(fn)
if ext in ('.pyc', '.pyo') and not os.path.exists(os.path.join(dirpath, basename+'.py')):
- print >>_err, ("Warning: no .py source file for %r.\n"
- % (os.path.join(dirpath, fn),))
+ print("Warning: no .py source file for %r.\n"
+ % (os.path.join(dirpath, fn),), file=_err)
if ext == '.py' and not excluded_file_basenames.match(basename):
relpath = os.path.join(dirpath[len(srcdir)+1:], basename)
@@ -89,16 +91,16 @@ def check():
__import__(module)
except ImportError, e:
if not is_windows and (' _win' in str(e) or 'win32' in str(e)):
- print >>_err, ("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
- % (module, str(e)))
+ print("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
+ % (module, str(e)), file=_err)
else:
import traceback
traceback.print_exc(file=_err)
- print >>_err
+ print(file=_err)
others = list(_other_modules_with_violations)
others.sort()
- print >>_err, "There were also interface violations in:\n", ", ".join(others), "\n"
+ print("There were also interface violations in:\n", ", ".join(others), "\n", file=_err)
# Forked from
diff --git a/misc/coding_tools/check-miscaptures.py b/misc/coding_tools/check-miscaptures.py
index 5a21190f7..a6424015b 100644
--- a/misc/coding_tools/check-miscaptures.py
+++ b/misc/coding_tools/check-miscaptures.py
@@ -1,5 +1,7 @@
#! /usr/bin/python
+from __future__ import print_function
+
import os, sys, compiler
from compiler.ast import Node, For, While, ListComp, AssName, Name, Lambda, Function
@@ -133,7 +135,7 @@ def make_result(funcnode, var_name, var_lineno):
def report(out, path, results):
for r in results:
- print >>out, path + (":%r %s captures %r assigned at line %d" % r)
+ print(path + (":%r %s captures %r assigned at line %d" % r), file=out)
def check(sources, out):
class Counts:
@@ -146,7 +148,7 @@ def check(sources, out):
def _process(path):
results = check_file(path)
if isinstance(results, SyntaxError):
- print >>out, path + (" NOT ANALYSED due to syntax error: %s" % results)
+ print(path + (" NOT ANALYSED due to syntax error: %s" % results), file=out)
counts.error_files += 1
else:
report(out, path, results)
@@ -156,7 +158,7 @@ def check(sources, out):
counts.suspect_files += 1
for source in sources:
- print >>out, "Checking %s..." % (source,)
+ print("Checking %s..." % (source,), file=out)
if os.path.isfile(source):
_process(source)
else:
@@ -166,11 +168,11 @@ def check(sources, out):
if ext == '.py':
_process(os.path.join(dirpath, fn))
- print >>out, ("%d suspiciously captured variables in %d out of %d file(s)."
- % (counts.n, counts.suspect_files, counts.processed_files))
+ print("%d suspiciously captured variables in %d out of %d file(s)."
+ % (counts.n, counts.suspect_files, counts.processed_files), file=out)
if counts.error_files > 0:
- print >>out, ("%d file(s) not processed due to syntax errors."
- % (counts.error_files,))
+ print("%d file(s) not processed due to syntax errors."
+ % (counts.error_files,), file=out)
return counts.n
diff --git a/misc/coding_tools/check-umids.py b/misc/coding_tools/check-umids.py
index 0b6038f0f..c06b795fe 100644
--- a/misc/coding_tools/check-umids.py
+++ b/misc/coding_tools/check-umids.py
@@ -2,6 +2,8 @@
# ./check-umids.py src
+from __future__ import print_function
+
import sys, re, os
ok = True
@@ -20,13 +22,13 @@ for starting_point in sys.argv[1:]:
umid = mo.group(1)
if umid in umids:
oldfn, oldlineno = umids[umid]
- print "%s:%d: duplicate umid '%s'" % (fn, lineno, umid)
- print "%s:%d: first used here" % (oldfn, oldlineno)
+ print("%s:%d: duplicate umid '%s'" % (fn, lineno, umid))
+ print("%s:%d: first used here" % (oldfn, oldlineno))
ok = False
umids[umid] = (fn,lineno)
if ok:
- print "all umids are unique"
+ print("all umids are unique")
else:
- print "some umids were duplicates"
+ print("some umids were duplicates")
sys.exit(1)
diff --git a/misc/coding_tools/find-trailing-spaces.py b/misc/coding_tools/find-trailing-spaces.py
index ad2cc5835..19e7e3c28 100644
--- a/misc/coding_tools/find-trailing-spaces.py
+++ b/misc/coding_tools/find-trailing-spaces.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import os, sys
from twisted.python import usage
@@ -22,7 +24,7 @@ def check(fn):
line = line[:-1]
if line.rstrip() != line:
# the %s:%d:%d: lets emacs' compile-mode jump to those locations
- print "%s:%d:%d: trailing whitespace" % (fn, i+1, len(line)+1)
+ print("%s:%d:%d: trailing whitespace" % (fn, i+1, len(line)+1))
found[0] = True
f.close()
diff --git a/misc/coding_tools/graph-deps.py b/misc/coding_tools/graph-deps.py
index 525a4131a..0e4d7aedb 100755
--- a/misc/coding_tools/graph-deps.py
+++ b/misc/coding_tools/graph-deps.py
@@ -21,7 +21,7 @@
# Install 'click' first. I run this with py2, but py3 might work too, if the
# wheels can be built with py3.
-from __future__ import print_function, unicode_literals
+from __future__ import unicode_literals, print_function
import os, sys, subprocess, json, tempfile, zipfile, io, re, itertools
import email.parser
from pprint import pprint
diff --git a/misc/coding_tools/make-canary-files.py b/misc/coding_tools/make-canary-files.py
index 4ba06cd9c..fa813f047 100644
--- a/misc/coding_tools/make-canary-files.py
+++ b/misc/coding_tools/make-canary-files.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
"""
Given a list of nodeids and a 'convergence' file, create a bunch of files
that will (when encoded at k=1,N=1) be uploaded to specific nodeids.
@@ -86,8 +88,8 @@ for line in open(opts["nodeids"], "r").readlines():
nodes[nodeid] = nickname
if opts["k"] != 3 or opts["N"] != 10:
- print "note: using non-default k/N requires patching the Tahoe code"
- print "src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS"
+ print("note: using non-default k/N requires patching the Tahoe code")
+ print("src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS")
convergence_file = os.path.expanduser(opts["convergence"])
convergence_s = open(convergence_file, "rb").read().strip()
@@ -109,7 +111,7 @@ def find_share_for_target(target):
while True:
attempts += 1
suffix = base32.b2a(os.urandom(10))
- if verbose: print " trying", suffix,
+ if verbose: print(" trying", suffix, end=' ')
data = prefix + suffix + "\n"
assert len(data) > 55 # no LIT files
# now, what storage index will this get?
@@ -117,11 +119,11 @@ def find_share_for_target(target):
eu = upload.EncryptAnUploadable(u)
d = eu.get_storage_index() # this happens to run synchronously
def _got_si(si, data=data):
- if verbose: print "SI", base32.b2a(si),
+ if verbose: print("SI", base32.b2a(si), end=' ')
peerlist = get_permuted_peers(si)
if peerlist[0] == target:
# great!
- if verbose: print " yay!"
+ if verbose: print(" yay!")
fn = base32.b2a(target)
if nodes[target]:
nickname = nodes[target].replace("/", "_")
@@ -131,7 +133,7 @@ def find_share_for_target(target):
open(fn, "w").write(data)
return True
# nope, must try again
- if verbose: print " boo"
+ if verbose: print(" boo")
return False
d.addCallback(_got_si)
# get sneaky and look inside the Deferred for the synchronous result
@@ -142,10 +144,10 @@ os.mkdir("canaries")
attempts = []
for target in nodes:
target_s = base32.b2a(target)
- print "working on", target_s
+ print("working on", target_s)
attempts.append(find_share_for_target(target))
-print "done"
-print "%d attempts total, avg %d per target, max %d" % \
- (sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts))
+print("done")
+print("%d attempts total, avg %d per target, max %d" % \
+ (sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts)))
diff --git a/misc/coding_tools/make_umid b/misc/coding_tools/make_umid
index 60aab23a0..6b1759681 100644
--- a/misc/coding_tools/make_umid
+++ b/misc/coding_tools/make_umid
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
"""Create a short probably-unique string for use as a umid= argument in a
Foolscap log() call, to make it easier to locate the source code that
generated the message. The main text of the log message is frequently
@@ -51,5 +53,5 @@ count = 1
if len(sys.argv) > 1:
count = int(sys.argv[1])
for i in range(count):
- print make_id()
+ print(make_id())
diff --git a/misc/operations_helpers/cpu-watcher-poll.py b/misc/operations_helpers/cpu-watcher-poll.py
index 68ac4b46a..320dd8ad7 100644
--- a/misc/operations_helpers/cpu-watcher-poll.py
+++ b/misc/operations_helpers/cpu-watcher-poll.py
@@ -1,13 +1,15 @@
#!/usr/bin/env python
+from __future__ import print_function
+
from foolscap import Tub, eventual
from twisted.internet import reactor
import sys
import pprint
def oops(f):
- print "ERROR"
- print f
+ print("ERROR")
+ print(f)
def fetch(furl):
t = Tub()
diff --git a/misc/operations_helpers/cpu-watcher-subscribe.py b/misc/operations_helpers/cpu-watcher-subscribe.py
index 4c560e2c6..36a69cac7 100644
--- a/misc/operations_helpers/cpu-watcher-subscribe.py
+++ b/misc/operations_helpers/cpu-watcher-subscribe.py
@@ -1,5 +1,7 @@
# -*- python -*-
+from __future__ import print_function
+
from twisted.internet import reactor
import sys
@@ -31,7 +33,7 @@ class CPUWatcherSubscriber(service.MultiService, Referenceable):
tub.connectTo(furl, self.connected)
def connected(self, rref):
- print "subscribing"
+ print("subscribing")
d = rref.callRemote("get_averages")
d.addCallback(self.remote_averages)
d.addErrback(log.err)
diff --git a/misc/operations_helpers/cpu-watcher.tac b/misc/operations_helpers/cpu-watcher.tac
index bb67cb402..795b9c444 100644
--- a/misc/operations_helpers/cpu-watcher.tac
+++ b/misc/operations_helpers/cpu-watcher.tac
@@ -1,5 +1,7 @@
# -*- python -*-
+from __future__ import print_function
+
"""
# run this tool on a linux box in its own directory, with a file named
# 'pids.txt' describing which processes to watch. It will follow CPU usage of
@@ -20,7 +22,6 @@
# built-in graphs on web interface
-
import pickle, os.path, time, pprint
from twisted.application import internet, service, strports
from twisted.web import server, resource, http
@@ -210,7 +211,7 @@ class CPUWatcher(service.MultiService, resource.Resource, Referenceable):
row.append(self._average_N(pid, avg))
current.append(tuple(row))
self.current = current
- print current
+ print(current)
for ob in self.observers:
eventual.eventually(self.notify, ob)
diff --git a/misc/operations_helpers/find-share-anomalies.py b/misc/operations_helpers/find-share-anomalies.py
index 8dca51136..d689a8c99 100644
--- a/misc/operations_helpers/find-share-anomalies.py
+++ b/misc/operations_helpers/find-share-anomalies.py
@@ -2,6 +2,8 @@
# feed this the results of 'tahoe catalog-shares' for all servers
+from __future__ import print_function
+
import sys
chk_encodings = {}
@@ -45,23 +47,23 @@ sdmf_multiple_versions = [(si,lines)
sdmf_multiple_versions.sort()
if chk_multiple_encodings:
- print
- print "CHK multiple encodings:"
+ print()
+ print("CHK multiple encodings:")
for (si,lines) in chk_multiple_encodings:
- print " " + si
+ print(" " + si)
for line in sorted(lines):
- print " " + line
+ print(" " + line)
if sdmf_multiple_encodings:
- print
- print "SDMF multiple encodings:"
+ print()
+ print("SDMF multiple encodings:")
for (si,lines) in sdmf_multiple_encodings:
- print " " + si
+ print(" " + si)
for line in sorted(lines):
- print " " + line
+ print(" " + line)
if sdmf_multiple_versions:
- print
- print "SDMF multiple versions:"
+ print()
+ print("SDMF multiple versions:")
for (si,lines) in sdmf_multiple_versions:
- print " " + si
+ print(" " + si)
for line in sorted(lines):
- print " " + line
+ print(" " + line)
diff --git a/misc/operations_helpers/getmem.py b/misc/operations_helpers/getmem.py
index 8ddc3ed7e..b3c6285fe 100644
--- a/misc/operations_helpers/getmem.py
+++ b/misc/operations_helpers/getmem.py
@@ -1,5 +1,7 @@
#! /usr/bin/env python
+from __future__ import print_function
+
from foolscap import Tub
from foolscap.eventual import eventually
import sys
@@ -10,7 +12,7 @@ def go():
d = t.getReference(sys.argv[1])
d.addCallback(lambda rref: rref.callRemote("get_memory_usage"))
def _got(res):
- print res
+ print(res)
reactor.stop()
d.addCallback(_got)
diff --git a/misc/operations_helpers/munin/tahoe_cpu_watcher b/misc/operations_helpers/munin/tahoe_cpu_watcher
index 0cba5fcb8..8f2876792 100644
--- a/misc/operations_helpers/munin/tahoe_cpu_watcher
+++ b/misc/operations_helpers/munin/tahoe_cpu_watcher
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import os, sys, re
import urllib
import json
@@ -24,6 +26,6 @@ for (name, avg1, avg5, avg15) in current:
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
-print data.rstrip()
+print(data.rstrip())
diff --git a/misc/operations_helpers/munin/tahoe_diskleft b/misc/operations_helpers/munin/tahoe_diskleft
index 0f80be223..d5ce04b1a 100644
--- a/misc/operations_helpers/munin/tahoe_diskleft
+++ b/misc/operations_helpers/munin/tahoe_diskleft
@@ -5,18 +5,20 @@
# is left on all disks across the grid. The plugin should be configured with
# env_url= pointing at the diskwatcher.tac webport.
+from __future__ import print_function
+
import os, sys, urllib, json
if len(sys.argv) > 1 and sys.argv[1] == "config":
- print """\
+ print("""\
graph_title Tahoe Remaining Disk Space
graph_vlabel bytes remaining
graph_category tahoe
graph_info This graph shows the total amount of disk space left available in the grid
disk_left.label disk left
-disk_left.draw LINE1"""
+disk_left.draw LINE1""")
sys.exit(0)
url = os.environ["url"]
data = json.load(urllib.urlopen(url))["available"]
-print "disk_left.value", data
+print("disk_left.value", data)
diff --git a/misc/operations_helpers/munin/tahoe_disktotal b/misc/operations_helpers/munin/tahoe_disktotal
index 2d67f1272..b6d1a99e6 100644
--- a/misc/operations_helpers/munin/tahoe_disktotal
+++ b/misc/operations_helpers/munin/tahoe_disktotal
@@ -6,10 +6,12 @@
# used. The plugin should be configured with env_url= pointing at the
# diskwatcher.tac webport.
+from __future__ import print_function
+
import os, sys, urllib, json
if len(sys.argv) > 1 and sys.argv[1] == "config":
- print """\
+ print("""\
graph_title Tahoe Total Disk Space
graph_vlabel bytes
graph_category tahoe
@@ -17,10 +19,10 @@ graph_info This graph shows the total amount of disk space present in the grid,
disk_total.label disk total
disk_total.draw LINE2
disk_used.label disk used
-disk_used.draw LINE1"""
+disk_used.draw LINE1""")
sys.exit(0)
url = os.environ["url"]
data = json.load(urllib.urlopen(url))
-print "disk_total.value", data["total"]
-print "disk_used.value", data["used"]
+print("disk_total.value", data["total"])
+print("disk_used.value", data["used"])
diff --git a/misc/operations_helpers/munin/tahoe_diskusage b/misc/operations_helpers/munin/tahoe_diskusage
index a5098dcac..cc37af3df 100644
--- a/misc/operations_helpers/munin/tahoe_diskusage
+++ b/misc/operations_helpers/munin/tahoe_diskusage
@@ -5,10 +5,12 @@
# is being used per unit time. The plugin should be configured with env_url=
# pointing at the diskwatcher.tac webport.
+from __future__ import print_function
+
import os, sys, urllib, json
if len(sys.argv) > 1 and sys.argv[1] == "config":
- print """\
+ print("""\
graph_title Tahoe Disk Usage Measurement
graph_vlabel bytes per second
graph_category tahoe
@@ -21,7 +23,7 @@ rate_1day.draw LINE1
rate_2wk.label (two week sample)
rate_2wk.draw LINE2
rate_4wk.label (four week sample)
-rate_4wk.draw LINE2"""
+rate_4wk.draw LINE2""")
sys.exit(0)
url = os.environ["url"]
@@ -31,10 +33,10 @@ data = dict([(name, growth)
for (name, timespan, growth, timeleft) in timespans])
# growth is in bytes per second
if "1hr" in data:
- print "rate_1hr.value", data["1hr"]
+ print("rate_1hr.value", data["1hr"])
if "1day" in data:
- print "rate_1day.value", data["1day"]
+ print("rate_1day.value", data["1day"])
if "2wk" in data:
- print "rate_2wk.value", data["2wk"]
+ print("rate_2wk.value", data["2wk"])
if "4wk" in data:
- print "rate_4wk.value", data["4wk"]
+ print("rate_4wk.value", data["4wk"])
diff --git a/misc/operations_helpers/munin/tahoe_diskused b/misc/operations_helpers/munin/tahoe_diskused
index 2dbed1019..26303af86 100644
--- a/misc/operations_helpers/munin/tahoe_diskused
+++ b/misc/operations_helpers/munin/tahoe_diskused
@@ -5,18 +5,20 @@
# used on all disks across the grid. The plugin should be configured with
# env_url= pointing at the diskwatcher.tac webport.
+from __future__ import print_function
+
import os, sys, urllib, json
if len(sys.argv) > 1 and sys.argv[1] == "config":
- print """\
+ print("""\
graph_title Tahoe Total Disk Space Used
graph_vlabel bytes used
graph_category tahoe
graph_info This graph shows the total amount of disk space used across the grid
disk_used.label disk used
-disk_used.draw LINE1"""
+disk_used.draw LINE1""")
sys.exit(0)
url = os.environ["url"]
data = json.load(urllib.urlopen(url))["used"]
-print "disk_used.value", data
+print("disk_used.value", data)
diff --git a/misc/operations_helpers/munin/tahoe_doomsday b/misc/operations_helpers/munin/tahoe_doomsday
index 35ed7d7e9..5a87489c2 100644
--- a/misc/operations_helpers/munin/tahoe_doomsday
+++ b/misc/operations_helpers/munin/tahoe_doomsday
@@ -5,10 +5,12 @@
# left before the grid fills up. The plugin should be configured with
# env_url= pointing at the diskwatcher.tac webport.
+from __future__ import print_function
+
import os, sys, urllib, json
if len(sys.argv) > 1 and sys.argv[1] == "config":
- print """\
+ print("""\
graph_title Tahoe Remaining Time Predictor
graph_vlabel days remaining
graph_category tahoe
@@ -20,7 +22,7 @@ days_1day.draw LINE1
days_2wk.label days left (two week sample)
days_2wk.draw LINE2
days_4wk.label days left (four week sample)
-days_4wk.draw LINE2"""
+days_4wk.draw LINE2""")
sys.exit(0)
url = os.environ["url"]
@@ -32,10 +34,10 @@ data = dict([(name, timeleft)
# timeleft is in seconds
DAY = 24*60*60
if "1hr" in data:
- print "days_1hr.value", data["1hr"]/DAY
+ print("days_1hr.value", data["1hr"]/DAY)
if "1day" in data:
- print "days_1day.value", data["1day"]/DAY
+ print("days_1day.value", data["1day"]/DAY)
if "2wk" in data:
- print "days_2wk.value", data["2wk"]/DAY
+ print("days_2wk.value", data["2wk"]/DAY)
if "4wk" in data:
- print "days_4wk.value", data["4wk"]/DAY
+ print("days_4wk.value", data["4wk"]/DAY)
diff --git a/misc/operations_helpers/munin/tahoe_estimate_files b/misc/operations_helpers/munin/tahoe_estimate_files
index 249565e4d..1dda5affb 100644
--- a/misc/operations_helpers/munin/tahoe_estimate_files
+++ b/misc/operations_helpers/munin/tahoe_estimate_files
@@ -1,15 +1,17 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import sys, os.path
if len(sys.argv) > 1 and sys.argv[1] == "config":
- print """\
+ print("""\
graph_title Tahoe File Estimate
graph_vlabel files
graph_category tahoe
graph_info This graph shows the estimated number of files and directories present in the grid
files.label files
-files.draw LINE2"""
+files.draw LINE2""")
sys.exit(0)
# Edit this to point at some subset of storage directories.
@@ -46,4 +48,4 @@ correction = 1+no_chance
#print "correction", correction
files = unique_strings * (32*32/len(sections)) * correction
-print "files.value %d" % int(files)
+print("files.value %d" % int(files))
diff --git a/misc/operations_helpers/munin/tahoe_files b/misc/operations_helpers/munin/tahoe_files
index b63590946..ec3ee5073 100644
--- a/misc/operations_helpers/munin/tahoe_files
+++ b/misc/operations_helpers/munin/tahoe_files
@@ -18,6 +18,8 @@
# env.basedir_NODE3 /path/to/node3
#
+from __future__ import print_function
+
import os, sys
nodedirs = []
@@ -41,7 +43,7 @@ for nodename, basedir in nodedirs:
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
for nodename, basedir in nodedirs:
@@ -52,5 +54,5 @@ for nodename, basedir in nodedirs:
if dirpath == root and "incoming" in dirnames:
dirnames.remove("incoming")
shares += len(filenames)
- print "%s.value %d" % (nodename, shares)
+ print("%s.value %d" % (nodename, shares))
diff --git a/misc/operations_helpers/munin/tahoe_helperstats_active b/misc/operations_helpers/munin/tahoe_helperstats_active
index ae395bec9..ba1032acb 100644
--- a/misc/operations_helpers/munin/tahoe_helperstats_active
+++ b/misc/operations_helpers/munin/tahoe_helperstats_active
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import os, sys
import urllib
import json
@@ -15,11 +17,11 @@ fetched.draw LINE2
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
url = os.environ["url"]
data = json.loads(urllib.urlopen(url).read())
-print "fetched.value %d" % data["chk_upload_helper.active_uploads"]
+print("fetched.value %d" % data["chk_upload_helper.active_uploads"])
diff --git a/misc/operations_helpers/munin/tahoe_helperstats_fetched b/misc/operations_helpers/munin/tahoe_helperstats_fetched
index 2ca7a39bc..5f53bb82c 100644
--- a/misc/operations_helpers/munin/tahoe_helperstats_fetched
+++ b/misc/operations_helpers/munin/tahoe_helperstats_fetched
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import os, sys
import urllib
import json
@@ -17,10 +19,10 @@ fetched.min 0
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
url = os.environ["url"]
data = json.loads(urllib.urlopen(url).read())
-print "fetched.value %d" % data["chk_upload_helper.fetched_bytes"]
+print("fetched.value %d" % data["chk_upload_helper.fetched_bytes"])
diff --git a/misc/operations_helpers/munin/tahoe_introstats b/misc/operations_helpers/munin/tahoe_introstats
index 8c664c9ef..0373c70e2 100644
--- a/misc/operations_helpers/munin/tahoe_introstats
+++ b/misc/operations_helpers/munin/tahoe_introstats
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import os, sys
import urllib
import json
@@ -19,13 +21,13 @@ storage_client.draw LINE2
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
url = os.environ["url"]
data = json.loads(urllib.urlopen(url).read())
-print "storage_server.value %d" % data["announcement_summary"]["storage"]
-print "storage_hosts.value %d" % data["announcement_distinct_hosts"]["storage"]
-print "storage_client.value %d" % data["subscription_summary"]["storage"]
+print("storage_server.value %d" % data["announcement_summary"]["storage"])
+print("storage_hosts.value %d" % data["announcement_distinct_hosts"]["storage"])
+print("storage_client.value %d" % data["subscription_summary"]["storage"])
diff --git a/misc/operations_helpers/munin/tahoe_nodememory b/misc/operations_helpers/munin/tahoe_nodememory
index 1ecf53fc9..061a50dc2 100644
--- a/misc/operations_helpers/munin/tahoe_nodememory
+++ b/misc/operations_helpers/munin/tahoe_nodememory
@@ -4,6 +4,8 @@
# by 'allmydata start', then extracts the amount of memory they consume (both
# VmSize and VmRSS) from /proc
+from __future__ import print_function
+
import os, sys, re
# for testing
@@ -47,7 +49,7 @@ graph_info This graph shows the memory used by specific processes
if f == "VmData":
configinfo += "%s_%s.graph no\n" % (nodename, f)
- print configinfo
+ print(configinfo)
sys.exit(0)
nodestats = {}
@@ -67,4 +69,4 @@ for node,stats in nodestats.items():
for f,value in stats.items():
# TODO: not sure if /proc/%d/status means 1000 or 1024 when it says
# 'kB'
- print "%s_%s.value %d" % (node, f, 1024*value)
+ print("%s_%s.value %d" % (node, f, 1024*value))
diff --git a/misc/operations_helpers/munin/tahoe_overhead b/misc/operations_helpers/munin/tahoe_overhead
index 28353450f..40640d189 100644
--- a/misc/operations_helpers/munin/tahoe_overhead
+++ b/misc/operations_helpers/munin/tahoe_overhead
@@ -27,10 +27,12 @@
# This plugin should be configured with env_diskwatcher_url= pointing at the
# diskwatcher.tac webport, and env_deepsize_url= pointing at the PHP script.
+from __future__ import print_function
+
import os, sys, urllib, json
if len(sys.argv) > 1 and sys.argv[1] == "config":
- print """\
+ print("""\
graph_title Tahoe Overhead Calculator
graph_vlabel Percentage
graph_category tahoe
@@ -40,7 +42,7 @@ overhead.draw LINE2
inactive.label inactive account usage
inactive.draw LINE1
effective_expansion.label Effective Expansion Factor
-effective_expansion.graph no"""
+effective_expansion.graph no""")
sys.exit(0)
diskwatcher_url = os.environ["diskwatcher_url"]
@@ -54,12 +56,12 @@ ideal = expansion * deepsize["all"]
overhead = (total - ideal) / ideal
if overhead > 0:
# until all the storage-servers come online, this number will be nonsense
- print "overhead.value %f" % (100.0 * overhead)
+ print("overhead.value %f" % (100.0 * overhead))
# same for this one
effective_expansion = total / deepsize["all"]
- print "effective_expansion.value %f" % effective_expansion
+ print("effective_expansion.value %f" % effective_expansion)
# this value remains valid, though
inactive_savings = (deepsize["all"] - deepsize["active"]) / deepsize["active"]
-print "inactive.value %f" % (100.0 * inactive_savings)
+print("inactive.value %f" % (100.0 * inactive_savings))
diff --git a/misc/operations_helpers/munin/tahoe_rootdir_space b/misc/operations_helpers/munin/tahoe_rootdir_space
index 3062193ba..1f5709206 100644
--- a/misc/operations_helpers/munin/tahoe_rootdir_space
+++ b/misc/operations_helpers/munin/tahoe_rootdir_space
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import os, sys
import urllib
@@ -14,10 +16,10 @@ space.draw LINE2
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
url = os.environ["url"]
data = int(urllib.urlopen(url).read().strip())
-print "space.value %d" % data
+print("space.value %d" % data)
diff --git a/misc/operations_helpers/munin/tahoe_server_latency_ b/misc/operations_helpers/munin/tahoe_server_latency_
index 51d954bce..c8930804c 100644
--- a/misc/operations_helpers/munin/tahoe_server_latency_
+++ b/misc/operations_helpers/munin/tahoe_server_latency_
@@ -42,6 +42,8 @@
# of course, these URLs must match the webports you have configured into the
# storage nodes.
+from __future__ import print_function
+
import os, sys
import urllib
import json
@@ -78,7 +80,7 @@ for nodename, url in node_urls:
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
for nodename, url in node_urls:
@@ -89,5 +91,5 @@ for nodename, url in node_urls:
p_key = percentile + "_percentile"
key = "storage_server.latencies.%s.%s" % (operation, p_key)
value = data["stats"][key]
- print "%s.value %s" % (nodename, value)
+ print("%s.value %s" % (nodename, value))
diff --git a/misc/operations_helpers/munin/tahoe_server_operations_ b/misc/operations_helpers/munin/tahoe_server_operations_
index fa3952700..6156a7f48 100644
--- a/misc/operations_helpers/munin/tahoe_server_operations_
+++ b/misc/operations_helpers/munin/tahoe_server_operations_
@@ -32,6 +32,8 @@
# of course, these URLs must match the webports you have configured into the
# storage nodes.
+from __future__ import print_function
+
import os, sys
import urllib
import json
@@ -64,12 +66,12 @@ for nodename, url in node_urls:
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
for nodename, url in node_urls:
data = json.loads(urllib.urlopen(url).read())
key = "storage_server.%s" % operation
value = data["counters"][key]
- print "%s.value %s" % (nodename, value)
+ print("%s.value %s" % (nodename, value))
diff --git a/misc/operations_helpers/munin/tahoe_spacetime b/misc/operations_helpers/munin/tahoe_spacetime
index 563bde7fe..12b5121bf 100644
--- a/misc/operations_helpers/munin/tahoe_spacetime
+++ b/misc/operations_helpers/munin/tahoe_spacetime
@@ -5,6 +5,8 @@
# then extrapolate to guess how many weeks/months/years of storage space we
# have left, and output it to another munin graph
+from __future__ import print_function
+
import sys, os, time
import rrdtool
@@ -82,7 +84,7 @@ def write_to_file(samples):
os.rename(WEBFILE + ".tmp", WEBFILE)
if len(sys.argv) > 1 and sys.argv[1] == "config":
- print """\
+ print("""\
graph_title Tahoe Remaining Space Predictor
graph_vlabel days remaining
graph_category tahoe
@@ -90,17 +92,17 @@ graph_info This graph shows the estimated number of days left until storage spac
days_2wk.label days left (2wk sample)
days_2wk.draw LINE2
days_4wk.label days left (4wk sample)
-days_4wk.draw LINE2"""
+days_4wk.draw LINE2""")
sys.exit(0)
#rsync_rrd()
samples = {}
remaining_4wk = predict_future("4wk")
if remaining_4wk is not None:
- print "days_4wk.value", remaining_4wk
+ print("days_4wk.value", remaining_4wk)
samples["remaining_4wk"] = remaining_4wk
remaining_2wk = predict_future("2wk")
if remaining_2wk is not None:
- print "days_2wk.value", remaining_2wk
+ print("days_2wk.value", remaining_2wk)
samples["remaining_2wk"] = remaining_2wk
write_to_file(samples)
diff --git a/misc/operations_helpers/munin/tahoe_stats b/misc/operations_helpers/munin/tahoe_stats
index 1e04dec0e..03bf116f5 100644
--- a/misc/operations_helpers/munin/tahoe_stats
+++ b/misc/operations_helpers/munin/tahoe_stats
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import os
import json
import re
@@ -460,11 +462,11 @@ def main(argv):
value = nodestats['stats'][category].get(statid)
if value is not None:
args = { 'name': name, 'value': value }
- print plugin_conf[output_section] % args
+ print(plugin_conf[output_section] % args)
if len(argv) > 1:
if sys.argv[1] == 'config':
- print plugin_conf['configheader']
+ print(plugin_conf['configheader'])
output_nodes('graph_config', False)
sys.exit(0)
diff --git a/misc/operations_helpers/munin/tahoe_storagespace b/misc/operations_helpers/munin/tahoe_storagespace
index f6edc3140..73443b428 100644
--- a/misc/operations_helpers/munin/tahoe_storagespace
+++ b/misc/operations_helpers/munin/tahoe_storagespace
@@ -18,6 +18,8 @@
# Allmydata-tahoe must be installed on the system where this plugin is used,
# since it imports a utility module from allmydata.utils .
+from __future__ import print_function
+
import os, sys
import commands
@@ -44,7 +46,7 @@ for nodename, basedir in nodedirs:
if len(sys.argv) > 1:
if sys.argv[1] == "config":
- print configinfo.rstrip()
+ print(configinfo.rstrip())
sys.exit(0)
for nodename, basedir in nodedirs:
@@ -54,5 +56,5 @@ for nodename, basedir in nodedirs:
sys.exit(rc)
bytes, extra = out.split()
usage = int(bytes)
- print "%s.value %d" % (nodename, usage)
+ print("%s.value %d" % (nodename, usage))
diff --git a/misc/operations_helpers/spacetime/diskwatcher.tac b/misc/operations_helpers/spacetime/diskwatcher.tac
index 15143b825..0a43a468e 100644
--- a/misc/operations_helpers/spacetime/diskwatcher.tac
+++ b/misc/operations_helpers/spacetime/diskwatcher.tac
@@ -1,5 +1,7 @@
# -*- python -*-
+from __future__ import print_function
+
"""
Run this tool with twistd in its own directory, with a file named 'urls.txt'
describing which nodes to query. Make sure to copy diskwatcher.py into the
@@ -82,7 +84,7 @@ class DiskWatcher(service.MultiService, resource.Resource):
ts.setServiceParent(self)
def _upgrade_complete(self, ignored):
- print "Axiom store upgrade complete"
+ print("Axiom store upgrade complete")
def startService(self):
service.MultiService.startService(self)
@@ -155,8 +157,8 @@ class DiskWatcher(service.MultiService, resource.Resource):
total = data[u"stats"][u"storage_server.disk_total"]
used = data[u"stats"][u"storage_server.disk_used"]
avail = data[u"stats"][u"storage_server.disk_avail"]
- print "%s : total=%s, used=%s, avail=%s" % (url,
- total, used, avail)
+ print("%s : total=%s, used=%s, avail=%s" % (url,
+ total, used, avail))
Sample(store=self.store,
url=unicode(url), when=when, total=total, used=used, avail=avail)
@@ -168,7 +170,7 @@ class DiskWatcher(service.MultiService, resource.Resource):
pairs.sort()
for (timespan,name) in pairs:
growth = self.growth(timespan)
- print name, total_avail_space, growth
+ print(name, total_avail_space, growth)
if growth is not None:
timeleft = None
if growth > 0:
@@ -286,7 +288,7 @@ class DiskWatcher(service.MultiService, resource.Resource):
old = old[0]
duration = latest.when.asPOSIXTimestamp() - old.when.asPOSIXTimestamp()
if not duration:
- print "only one sample from", url
+ print("only one sample from", url)
continue
rate = float(latest.used - old.used) / duration
diff --git a/misc/simulators/bench_spans.py b/misc/simulators/bench_spans.py
index e1e6e7ebd..d6dc12d7a 100644
--- a/misc/simulators/bench_spans.py
+++ b/misc/simulators/bench_spans.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
"""
To use this, get a trace file such as this one:
@@ -68,17 +70,17 @@ class B(object):
elif INIT_S in inline:
pass
else:
- print "Warning, didn't recognize this line: %r" % (inline,)
+ print("Warning, didn't recognize this line: %r" % (inline,))
count += 1
inline = self.inf.readline()
# print self.stats
benchutil.print_bench_footer(UNITS_PER_SECOND=1000000)
-print "(microseconds)"
+print("(microseconds)")
for N in [600, 6000, 60000]:
b = B(open(sys.argv[1], 'rU'))
- print "%7d" % N,
+ print("%7d" % N, end=' ')
benchutil.rep_bench(b.run, N, b.init, UNITS_PER_SECOND=1000000)
diff --git a/misc/simulators/count_dirs.py b/misc/simulators/count_dirs.py
index 78412d33b..6b52ba96a 100644
--- a/misc/simulators/count_dirs.py
+++ b/misc/simulators/count_dirs.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
"""
This tool estimates how much space would be consumed by a filetree into which
a native directory was copied.
@@ -96,10 +98,10 @@ def scan(root):
for mode in MODES:
total[mode] += slotsize(mode, len(files), len(dirs)) + stringsize
- print "%d directories" % num_dirs
- print "%d files" % num_files
+ print("%d directories" % num_dirs)
+ print("%d files" % num_files)
for mode in sorted(total.keys()):
- print "%s: %d bytes" % (mode, total[mode])
+ print("%s: %d bytes" % (mode, total[mode]))
if __name__ == '__main__':
diff --git a/misc/simulators/hashbasedsig.py b/misc/simulators/hashbasedsig.py
index ea220ddfc..5c135adf6 100644
--- a/misc/simulators/hashbasedsig.py
+++ b/misc/simulators/hashbasedsig.py
@@ -1,5 +1,7 @@
#!python
+from __future__ import print_function
+
# range of hash output lengths
range_L_hash = [128]
@@ -212,10 +214,10 @@ def calculate(K, K1, K2, q_max, L_hash, trees):
def search():
for L_hash in range_L_hash:
- print >>stderr, "collecting... \r",
+ print("collecting... \r", end=' ', file=stderr)
collect()
- print >>stderr, "precomputing... \r",
+ print("precomputing... \r", end=' ', file=stderr)
"""
# d/dq (lg(q+1) + L_hash/q) = 1/(ln(2)*(q+1)) - L_hash/q^2
@@ -277,9 +279,9 @@ def search():
for K1 in xrange(max(2, K-fuzz), min(K_max, K+fuzz)+1):
candidates += calculate(K, K1, K2, q_max, L_hash, trees)
progress += 1
- print >>stderr, "searching: %3d %% \r" % (100.0 * progress / complete,),
+ print("searching: %3d %% \r" % (100.0 * progress / complete,), end=' ', file=stderr)
- print >>stderr, "filtering... \r",
+ print("filtering... \r", end=' ', file=stderr)
step = 2.0
bins = {}
limit = floor_div(limit_cost, step)
@@ -306,33 +308,33 @@ def search():
"%(c_ver)7d +/-%(c_ver_pm)5d (%(Mcycles_ver)5.2f +/-%(Mcycles_ver_pm)5.2f) "
) % candidate
- print >>stderr, " \r",
+ print(" \r", end=' ', file=stderr)
if len(best) > 0:
- print " B K K1 K2 q T L_hash lg_N sig_bytes c_sign (Mcycles) c_ver ( Mcycles )"
- print "---- ---- ---- ------ ---- ---- ------ ------ --------- ------------------ --------------------------------"
+ print(" B K K1 K2 q T L_hash lg_N sig_bytes c_sign (Mcycles) c_ver ( Mcycles )")
+ print("---- ---- ---- ------ ---- ---- ------ ------ --------- ------------------ --------------------------------")
best.sort(key=lambda c: (c['sig_bytes'], c['cost']))
last_sign = None
last_ver = None
for c in best:
if last_sign is None or c['c_sign'] < last_sign or c['c_ver'] < last_ver:
- print format_candidate(c)
+ print(format_candidate(c))
last_sign = c['c_sign']
last_ver = c['c_ver']
- print
+ print()
else:
- print "No candidates found for L_hash = %d or higher." % (L_hash)
+ print("No candidates found for L_hash = %d or higher." % (L_hash))
return
del bins
del best
-print "Maximum signature size: %d bytes" % (limit_bytes,)
-print "Maximum (signing + %d*verification) cost: %.1f Mcycles" % (weight_ver, limit_cost)
-print "Hash parameters: %d-bit blocks with %d-bit padding and %d-bit labels, %.2f cycles per byte" \
- % (L_block, L_pad, L_label, cycles_per_byte)
-print "PRF output size: %d bits" % (L_prf,)
-print "Security level given by L_hash is maintained for up to 2^%d signatures.\n" % (lg_M,)
+print("Maximum signature size: %d bytes" % (limit_bytes,))
+print("Maximum (signing + %d*verification) cost: %.1f Mcycles" % (weight_ver, limit_cost))
+print("Hash parameters: %d-bit blocks with %d-bit padding and %d-bit labels, %.2f cycles per byte" \
+ % (L_block, L_pad, L_label, cycles_per_byte))
+print("PRF output size: %d bits" % (L_prf,))
+print("Security level given by L_hash is maintained for up to 2^%d signatures.\n" % (lg_M,))
search()
diff --git a/misc/simulators/ringsim.py b/misc/simulators/ringsim.py
index df272e216..83ed1302f 100644
--- a/misc/simulators/ringsim.py
+++ b/misc/simulators/ringsim.py
@@ -3,6 +3,9 @@
# used to discuss ticket #302: "stop permuting peerlist?"
# import time
+
+from __future__ import print_function
+
import math
from hashlib import md5 # sha1, sha256
myhash = md5
@@ -47,7 +50,7 @@ def make_up_a_file_size(seed):
sizes = [make_up_a_file_size(str(i)) for i in range(10000)]
avg_filesize = sum(sizes)/len(sizes)
-print "average file size:", abbreviate_space(avg_filesize)
+print("average file size:", abbreviate_space(avg_filesize))
SERVER_CAPACITY = 10**12
@@ -94,11 +97,11 @@ class Ring:
prev_s = self.servers[(i-1)%len(self.servers)]
diff = "%032x" % (int(s.nodeid,16) - int(prev_s.nodeid,16))
s.prev_diff = diff
- print s, s.prev_diff
+ print(s, s.prev_diff)
- print "sorted by delta"
+ print("sorted by delta")
for s in sorted(self.servers, key=lambda s:s.prev_diff):
- print s, s.prev_diff
+ print(s, s.prev_diff)
def servers_for_si(self, si):
if self.permute:
@@ -121,7 +124,7 @@ class Ring:
return "".join(bits)
def dump_usage(self, numfiles, avg_space_per_file):
- print "uploaded", numfiles
+ print("uploaded", numfiles)
# avg_space_per_file measures expected grid-wide ciphertext per file
used = list(reversed(sorted([s.used for s in self.servers])))
# used is actual per-server ciphertext
@@ -137,19 +140,19 @@ class Ring:
std_deviation = math.sqrt(variance)
sd_of_total = std_deviation / avg_usage_per_file
- print "min/max/(exp) usage-pf-ps %s/%s/(%s):" % (
+ print("min/max/(exp) usage-pf-ps %s/%s/(%s):" % (
abbreviate_space(usedpf[-1]),
abbreviate_space(usedpf[0]),
- abbreviate_space(avg_usage_per_file) ),
- print "spread-pf: %s (%.2f%%)" % (
- abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file),
+ abbreviate_space(avg_usage_per_file) ), end=' ')
+ print("spread-pf: %s (%.2f%%)" % (
+ abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file), end=' ')
#print "average_usage:", abbreviate_space(average_usagepf)
- print "stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
- 100.0*sd_of_total)
+ print("stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
+ 100.0*sd_of_total))
if self.SHOW_MINMAX:
s2 = sorted(self.servers, key=lambda s: s.used)
- print "least:", s2[0].nodeid
- print "most:", s2[-1].nodeid
+ print("least:", s2[0].nodeid)
+ print("most:", s2[-1].nodeid)
class Options(usage.Options):
@@ -196,7 +199,7 @@ def do_run(ring, opts):
server_was_full = True
remaining_servers.discard(s)
if not remaining_servers:
- print "-- GRID IS FULL"
+ print("-- GRID IS FULL")
ring.dump_usage(filenum, avg_space_per_file)
return filenum
index += 1
@@ -207,11 +210,11 @@ def do_run(ring, opts):
if server_was_full and all_servers_have_room:
all_servers_have_room = False
- print "-- FIRST SERVER FULL"
+ print("-- FIRST SERVER FULL")
ring.dump_usage(filenum, avg_space_per_file)
if file_was_wrapped and no_files_have_wrapped:
no_files_have_wrapped = False
- print "-- FIRST FILE WRAPPED"
+ print("-- FIRST FILE WRAPPED")
ring.dump_usage(filenum, avg_space_per_file)
@@ -219,11 +222,11 @@ def do_ring(opts):
total_capacity = opts["servers"]*SERVER_CAPACITY
avg_space_per_file = avg_filesize * opts["N"] / opts["k"]
avg_files = total_capacity / avg_space_per_file
- print "expected number of uploads:", avg_files
+ print("expected number of uploads:", avg_files)
if opts["permute"]:
- print " PERMUTED"
+ print(" PERMUTED")
else:
- print " LINEAR"
+ print(" LINEAR")
seed = opts["seed"]
ring = Ring(opts["servers"], seed, opts["permute"])
diff --git a/misc/simulators/simulate_load.py b/misc/simulators/simulate_load.py
index 50ff45b9c..5821ef7de 100644
--- a/misc/simulators/simulate_load.py
+++ b/misc/simulators/simulate_load.py
@@ -2,6 +2,8 @@
# WARNING. There is a bug in this script so that it does not simulate the actual Tahoe Two server selection algorithm that it was intended to simulate. See http://allmydata.org/trac/tahoe-lafs/ticket/302 (stop permuting peerlist, use SI as offset into ring instead?)
+from __future__ import print_function
+
import random
SERVER_CAPACITY = 10**12
@@ -83,7 +85,7 @@ def test(permutedpeerlist, iters):
filledat = []
for test in range(iters):
(servers, doubled_up_shares) = go(permutedpeerlist)
- print "doubled_up_shares: ", doubled_up_shares
+ print("doubled_up_shares: ", doubled_up_shares)
for server in servers:
fidx = server.full_at_tick
filledat.extend([0]*(fidx-len(filledat)+1))
@@ -147,8 +149,8 @@ if __name__ == "__main__":
if arg.startswith("--iters="):
iters = int(arg[8:])
if "--permute" in sys.argv:
- print "doing permuted peerlist, iterations: %d" % iters
+ print("doing permuted peerlist, iterations: %d" % iters)
test(True, iters)
else:
- print "doing simple ring, iterations: %d" % iters
+ print("doing simple ring, iterations: %d" % iters)
test(False, iters)
diff --git a/misc/simulators/simulator.py b/misc/simulators/simulator.py
index a01c5c8c8..d0ef281db 100644
--- a/misc/simulators/simulator.py
+++ b/misc/simulators/simulator.py
@@ -1,5 +1,7 @@
#! /usr/bin/env python
+from __future__ import print_function
+
import hashlib
import os, random
@@ -202,7 +204,7 @@ class Simulator:
size = random.randrange(1000)
n = random.choice(self.all_nodes)
if self.verbose:
- print "add_file(size=%d, from node %s)" % (size, n)
+ print("add_file(size=%d, from node %s)" % (size, n))
fileid = randomid()
able = n.publish_file(fileid, size)
if able:
@@ -223,7 +225,7 @@ class Simulator:
if n.delete_file():
self.deleted_files += 1
return
- print "no files to delete"
+ print("no files to delete")
def _add_event(self, etype):
rate = getattr(self, "RATE_" + etype)
@@ -256,14 +258,14 @@ class Simulator:
# self.print_stats(current_time, etype)
def print_stats_header(self):
- print "time: added failed lost avg_tried"
+ print("time: added failed lost avg_tried")
def print_stats(self, time, etype):
if not self.published_files:
avg_tried = "NONE"
else:
avg_tried = sum(self.published_files) / len(self.published_files)
- print time, etype, self.added_data, self.failed_files, self.lost_data_bytes, avg_tried, len(self.introducer.living_files), self.introducer.utilization
+ print(time, etype, self.added_data, self.failed_files, self.lost_data_bytes, avg_tried, len(self.introducer.living_files), self.introducer.utilization)
s = None
@@ -278,7 +280,7 @@ def main():
# s.print_stats_header()
for i in range(1000):
s.do_event()
- print "%d files added, %d files deleted" % (s.added_files, s.deleted_files)
+ print("%d files added, %d files deleted" % (s.added_files, s.deleted_files))
return s
if __name__ == '__main__':
diff --git a/misc/simulators/sizes.py b/misc/simulators/sizes.py
index 119d5b42b..ca189e4e6 100644
--- a/misc/simulators/sizes.py
+++ b/misc/simulators/sizes.py
@@ -1,5 +1,7 @@
#! /usr/bin/env python
+from __future__ import print_function
+
import random, math, re
from twisted.python import usage
@@ -126,7 +128,7 @@ class Sizes:
"share_storage_overhead", "share_transmission_overhead",
"storage_overhead", "storage_overhead_percentage",
"bytes_until_some_data"):
- print k, getattr(self, k)
+ print(k, getattr(self, k))
def fmt(num, trim=False):
if num < KiB:
@@ -160,11 +162,11 @@ def text():
mode = opts["mode"]
arity = opts["arity"]
# 0123456789012345678901234567890123456789012345678901234567890123456
- print "mode=%s" % mode, " arity=%d" % arity
- print " storage storage"
- print "Size sharesize overhead overhead k d alacrity"
- print " (bytes) (%)"
- print "------- ------- -------- -------- ---- -- --------"
+ print("mode=%s" % mode, " arity=%d" % arity)
+ print(" storage storage")
+ print("Size sharesize overhead overhead k d alacrity")
+ print(" (bytes) (%)")
+ print("------- ------- -------- -------- ---- -- --------")
#sizes = [2 ** i for i in range(7, 41)]
#radix = math.sqrt(10); expstep = 2
radix = 2; expstep = 2
@@ -181,7 +183,7 @@ def text():
out += " %4d" % int(s.block_arity)
out += " %2d" % int(s.block_tree_depth)
out += " %8s" % fmt(s.bytes_until_some_data)
- print out
+ print(out)
def graph():
diff --git a/misc/simulators/storage-overhead.py b/misc/simulators/storage-overhead.py
index a294b8d07..547bc0adf 100644
--- a/misc/simulators/storage-overhead.py
+++ b/misc/simulators/storage-overhead.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import sys, math
from allmydata import uri, storage
from allmydata.immutable import upload
@@ -64,18 +66,18 @@ def calc(filesize, params=(3,7,10), segsize=DEFAULT_MAX_SEGMENT_SIZE):
def main():
filesize = int(sys.argv[1])
urisize, sharesize, sharespace = calc(filesize)
- print "urisize:", urisize
- print "sharesize: %10d" % sharesize
- print "sharespace: %10d" % sharespace
- print "desired expansion: %1.1f" % (1.0 * 10 / 3)
- print "effective expansion: %1.1f" % (1.0 * sharespace / filesize)
+ print("urisize:", urisize)
+ print("sharesize: %10d" % sharesize)
+ print("sharespace: %10d" % sharespace)
+ print("desired expansion: %1.1f" % (1.0 * 10 / 3))
+ print("effective expansion: %1.1f" % (1.0 * sharespace / filesize))
def chart():
filesize = 2
while filesize < 2**20:
urisize, sharesize, sharespace = calc(int(filesize))
expansion = 1.0 * sharespace / int(filesize)
- print "%d,%d,%d,%1.2f" % (int(filesize), urisize, sharespace, expansion)
+ print("%d,%d,%d,%1.2f" % (int(filesize), urisize, sharespace, expansion))
filesize = filesize * 2**0.5
if __name__ == '__main__':
diff --git a/newsfragments/3002.other b/newsfragments/3002.other
new file mode 100644
index 000000000..a10cad243
--- /dev/null
+++ b/newsfragments/3002.other
@@ -0,0 +1 @@
+Converted all uses of the print statement to the print function in the ./misc/ directory.
diff --git a/newsfragments/3005.minor b/newsfragments/3005.minor
new file mode 100644
index 000000000..e69de29bb
diff --git a/newsfragments/3007.minor b/newsfragments/3007.minor
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/allmydata/_auto_deps.py b/src/allmydata/_auto_deps.py
index 78fa672fe..38b98e59c 100644
--- a/src/allmydata/_auto_deps.py
+++ b/src/allmydata/_auto_deps.py
@@ -100,10 +100,10 @@ install_requires = [
# Eliot is contemplating dropping Python 2 support. Stick to a version we
# know works on Python 2.7. Because we don't have support for `==`
- # constraints, pin 1.6.x this way. I feel pretty safe betting that we
- # won't end up stuck on Eliot 1.6.100 with a critical fix only present in
- # 1.6.101. And if we do, I know how to deal with that situation.
- "eliot >= 1.6.0, <= 1.6.100",
+ # constraints, pin 1.7.x this way. I feel pretty safe betting that we
+ # won't end up stuck on Eliot 1.7.100 with a critical fix only present in
+ # 1.7.101. And if we do, I know how to deal with that situation.
+ "eliot >= 1.7.0, <= 1.7.100",
# A great way to define types of values.
"attrs >= 18.2.0",
diff --git a/src/allmydata/test/test_eliotutil.py b/src/allmydata/test/test_eliotutil.py
index 199403038..b382b7289 100644
--- a/src/allmydata/test/test_eliotutil.py
+++ b/src/allmydata/test/test_eliotutil.py
@@ -9,7 +9,6 @@ from __future__ import (
division,
)
-from pprint import pformat
from sys import stdout
import logging
@@ -27,7 +26,6 @@ from testtools.matchers import (
AfterPreprocessing,
)
from testtools.twistedsupport import (
- has_no_result,
succeeded,
failed,
)
@@ -35,7 +33,6 @@ from testtools.twistedsupport import (
from eliot import (
Message,
FileDestination,
- start_action,
)
from eliot.twisted import DeferredContext
from eliot.testing import (
@@ -44,15 +41,12 @@ from eliot.testing import (
)
from twisted.internet.defer import (
- Deferred,
succeed,
)
from twisted.internet.task import deferLater
from twisted.internet import reactor
from ..util.eliotutil import (
- eliot_friendly_generator_function,
- inline_callbacks,
log_call_deferred,
_parse_destination_description,
_EliotLogging,
@@ -82,350 +76,7 @@ class EliotLoggedTestTests(AsyncTestCase):
-def assert_logged_messages_contain_fields(testcase, logged_messages, expected_fields):
- testcase.assertEqual(len(logged_messages), len(expected_fields))
- actual_fields = list(
- {key: msg.message[key] for key in expected if key in msg.message}
- for (msg, expected)
- in zip(logged_messages, expected_fields)
- )
- testcase.assertEqual(actual_fields, expected_fields)
-
-
-def assert_logged_action_contains_messages(testcase, logger, expected_action, expected_fields):
- action = assertHasAction(
- testcase,
- logger,
- expected_action,
- True,
- )
- assert_logged_messages_contain_fields(
- testcase,
- action.children,
- expected_fields,
- )
-
-def assert_expected_action_tree(testcase, logger, expected_action_type, expected_type_tree):
- logged_action = assertHasAction(
- testcase,
- logger,
- expected_action_type,
- True,
- )
- type_tree = logged_action.type_tree()
- testcase.assertEqual(
- {expected_action_type: expected_type_tree},
- type_tree,
- "Logger had messages:\n{}".format(pformat(logger.messages, indent=4)),
- )
-
-def assert_generator_logs_action_tree(testcase, generator_function, logger, expected_action_type, expected_type_tree):
- list(eliot_friendly_generator_function(generator_function)())
- assert_expected_action_tree(
- testcase,
- logger,
- expected_action_type,
- expected_type_tree,
- )
-
-
-class EliotFriendlyGeneratorFunctionTests(SyncTestCase):
- # Get our custom assertion failure messages *and* the standard ones.
- longMessage = True
-
- def test_yield_none(self):
- @eliot_friendly_generator_function
- def g():
- Message.log(message_type=u"hello")
- yield
- Message.log(message_type=u"goodbye")
-
- with start_action(action_type=u"the-action"):
- list(g())
-
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action",
- [u"hello", u"yielded", u"goodbye"],
- )
-
- def test_yield_value(self):
- expected = object()
-
- @eliot_friendly_generator_function
- def g():
- Message.log(message_type=u"hello")
- yield expected
- Message.log(message_type=u"goodbye")
-
- with start_action(action_type=u"the-action"):
- self.assertEqual([expected], list(g()))
-
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action",
- [u"hello", u"yielded", u"goodbye"],
- )
-
- def test_yield_inside_another_action(self):
- @eliot_friendly_generator_function
- def g():
- Message.log(message_type=u"a")
- with start_action(action_type=u"confounding-factor"):
- Message.log(message_type=u"b")
- yield None
- Message.log(message_type=u"c")
- Message.log(message_type=u"d")
-
- with start_action(action_type=u"the-action"):
- list(g())
-
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action",
- [u"a",
- {u"confounding-factor": [u"b", u"yielded", u"c"]},
- u"d",
- ],
- )
-
- def test_yield_inside_nested_actions(self):
- @eliot_friendly_generator_function
- def g():
- Message.log(message_type=u"a")
- with start_action(action_type=u"confounding-factor"):
- Message.log(message_type=u"b")
- yield None
- with start_action(action_type=u"double-confounding-factor"):
- yield None
- Message.log(message_type=u"c")
- Message.log(message_type=u"d")
- Message.log(message_type=u"e")
-
- with start_action(action_type=u"the-action"):
- list(g())
-
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action", [
- u"a",
- {u"confounding-factor": [
- u"b",
- u"yielded",
- {u"double-confounding-factor": [
- u"yielded",
- u"c",
- ]},
- u"d",
- ]},
- u"e",
- ],
- )
-
- def test_generator_and_non_generator(self):
- @eliot_friendly_generator_function
- def g():
- Message.log(message_type=u"a")
- yield
- with start_action(action_type=u"action-a"):
- Message.log(message_type=u"b")
- yield
- Message.log(message_type=u"c")
-
- Message.log(message_type=u"d")
- yield
-
- with start_action(action_type=u"the-action"):
- generator = g()
- next(generator)
- Message.log(message_type=u"0")
- next(generator)
- Message.log(message_type=u"1")
- next(generator)
- Message.log(message_type=u"2")
- self.assertRaises(StopIteration, lambda: next(generator))
-
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action", [
- u"a",
- u"yielded",
- u"0",
- {
- u"action-a": [
- u"b",
- u"yielded",
- u"c",
- ],
- },
- u"1",
- u"d",
- u"yielded",
- u"2",
- ],
- )
-
- def test_concurrent_generators(self):
- @eliot_friendly_generator_function
- def g(which):
- Message.log(message_type=u"{}-a".format(which))
- with start_action(action_type=which):
- Message.log(message_type=u"{}-b".format(which))
- yield
- Message.log(message_type=u"{}-c".format(which))
- Message.log(message_type=u"{}-d".format(which))
-
- gens = [g(u"1"), g(u"2")]
- with start_action(action_type=u"the-action"):
- while gens:
- for g in gens[:]:
- try:
- next(g)
- except StopIteration:
- gens.remove(g)
-
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action", [
- u"1-a",
- {u"1": [
- u"1-b",
- u"yielded",
- u"1-c",
- ]},
- u"2-a",
- {u"2": [
- u"2-b",
- u"yielded",
- u"2-c",
- ]},
- u"1-d",
- u"2-d",
- ],
- )
-
- def test_close_generator(self):
- @eliot_friendly_generator_function
- def g():
- Message.log(message_type=u"a")
- try:
- yield
- Message.log(message_type=u"b")
- finally:
- Message.log(message_type=u"c")
-
-
- with start_action(action_type=u"the-action"):
- gen = g()
- next(gen)
- gen.close()
-
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action", [
- u"a",
- u"yielded",
- u"c",
- ],
- )
-
- def test_nested_generators(self):
- @eliot_friendly_generator_function
- def g(recurse):
- with start_action(action_type=u"a-recurse={}".format(recurse)):
- Message.log(message_type=u"m-recurse={}".format(recurse))
- if recurse:
- set(g(False))
- else:
- yield
-
- with start_action(action_type=u"the-action"):
- set(g(True))
-
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action", [{
- u"a-recurse=True": [
- u"m-recurse=True", {
- u"a-recurse=False": [
- u"m-recurse=False",
- u"yielded",
- ],
- },
- ],
- }],
- )
-
-
-class InlineCallbacksTests(SyncTestCase):
- # Get our custom assertion failure messages *and* the standard ones.
- longMessage = True
-
- def _a_b_test(self, logger, g):
- with start_action(action_type=u"the-action"):
- self.assertThat(g(), succeeded(Is(None)))
- assert_expected_action_tree(
- self,
- logger,
- u"the-action", [
- u"a",
- u"yielded",
- u"b",
- ],
- )
-
- def test_yield_none(self):
- @inline_callbacks
- def g():
- Message.log(message_type=u"a")
- yield
- Message.log(message_type=u"b")
-
- self._a_b_test(self.eliot_logger, g)
-
- def test_yield_fired_deferred(self):
- @inline_callbacks
- def g():
- Message.log(message_type=u"a")
- yield succeed(None)
- Message.log(message_type=u"b")
-
- self._a_b_test(self.eliot_logger, g)
-
- def test_yield_unfired_deferred(self):
- waiting = Deferred()
-
- @inline_callbacks
- def g():
- Message.log(message_type=u"a")
- yield waiting
- Message.log(message_type=u"b")
-
- with start_action(action_type=u"the-action"):
- d = g()
- self.assertThat(waiting, has_no_result())
- waiting.callback(None)
- self.assertThat(d, succeeded(Is(None)))
- assert_expected_action_tree(
- self,
- self.eliot_logger,
- u"the-action", [
- u"a",
- u"yielded",
- u"b",
- ],
- )
-
-
-class ParseDestinationDescriptionTests(SyncTestCase):
+class ParseDestinationDescriptionTests(SyncTestCase):
"""
Tests for ``_parse_destination_description``.
"""
diff --git a/src/allmydata/util/eliotutil.py b/src/allmydata/util/eliotutil.py
index 535f4913d..7b36a27d1 100644
--- a/src/allmydata/util/eliotutil.py
+++ b/src/allmydata/util/eliotutil.py
@@ -10,8 +10,6 @@ from __future__ import (
)
__all__ = [
- "use_generator_context",
- "eliot_friendly_generator_function",
"inline_callbacks",
"eliot_logging_service",
"opt_eliot_destination",
@@ -30,12 +28,9 @@ __all__ = [
]
from sys import (
- exc_info,
stdout,
)
from functools import wraps
-from contextlib import contextmanager
-from weakref import WeakKeyDictionary
from logging import (
INFO,
Handler,
@@ -67,7 +62,10 @@ from eliot import (
from eliot._validation import (
ValidationError,
)
-from eliot.twisted import DeferredContext
+from eliot.twisted import (
+ DeferredContext,
+ inline_callbacks,
+)
from twisted.python.usage import (
UsageError,
@@ -84,7 +82,6 @@ from twisted.logger import (
globalLogPublisher,
)
from twisted.internet.defer import (
- inlineCallbacks,
maybeDeferred,
)
from twisted.application.service import Service
@@ -97,123 +94,6 @@ from .fake_inotify import (
humanReadableMask,
)
-class _GeneratorContext(object):
- def __init__(self, execution_context):
- self._execution_context = execution_context
- self._contexts = WeakKeyDictionary()
- self._current_generator = None
-
- def init_stack(self, generator):
- stack = list(self._execution_context._get_stack())
- self._contexts[generator] = stack
-
- def get_stack(self):
- if self._current_generator is None:
- # If there is no currently active generator then we have no
- # special stack to supply. Let the execution context figure out a
- # different answer on its own.
- return None
- # Otherwise, give back the action context stack we've been tracking
- # for the currently active generator. It must have been previously
- # initialized (it's too late to do it now)!
- return self._contexts[self._current_generator]
-
- @contextmanager
- def context(self, generator):
- previous_generator = self._current_generator
- try:
- self._current_generator = generator
- yield
- finally:
- self._current_generator = previous_generator
-
-
-from eliot._action import _context
-_the_generator_context = _GeneratorContext(_context)
-
-
-def use_generator_context():
- _context.get_sub_context = _the_generator_context.get_stack
-use_generator_context()
-
-
-def eliot_friendly_generator_function(original):
- """
- Decorate a generator function so that the Eliot action context is
- preserved across ``yield`` expressions.
- """
- @wraps(original)
- def wrapper(*a, **kw):
- # Keep track of whether the next value to deliver to the generator is
- # a non-exception or an exception.
- ok = True
-
- # Keep track of the next value to deliver to the generator.
- value_in = None
-
- # Create the generator with a call to the generator function. This
- # happens with whatever Eliot action context happens to be active,
- # which is fine and correct and also irrelevant because no code in the
- # generator function can run until we call send or throw on it.
- gen = original(*a, **kw)
-
- # Initialize the per-generator Eliot action context stack to the
- # current action stack. This might be the main stack or, if another
- # decorated generator is running, it might be the stack for that
- # generator. Not our business.
- _the_generator_context.init_stack(gen)
- while True:
- try:
- # Whichever way we invoke the generator, we will do it
- # with the Eliot action context stack we've saved for it.
- # Then the context manager will re-save it and restore the
- # "outside" stack for us.
- with _the_generator_context.context(gen):
- if ok:
- value_out = gen.send(value_in)
- else:
- value_out = gen.throw(*value_in)
- # We have obtained a value from the generator. In
- # giving it to us, it has given up control. Note this
- # fact here. Importantly, this is within the
- # generator's action context so that we get a good
- # indication of where the yield occurred.
- #
- # This might be too noisy, consider dropping it or
- # making it optional.
- Message.log(message_type=u"yielded")
- except StopIteration:
- # When the generator raises this, it is signaling
- # completion. Leave the loop.
- break
- else:
- try:
- # Pass the generator's result along to whoever is
- # driving. Capture the result as the next value to
- # send inward.
- value_in = yield value_out
- except:
- # Or capture the exception if that's the flavor of the
- # next value.
- ok = False
- value_in = exc_info()
- else:
- ok = True
-
- return wrapper
-
-
-def inline_callbacks(original):
- """
- Decorate a function like ``inlineCallbacks`` would but in a more
- Eliot-friendly way. Use it just like ``inlineCallbacks`` but where you
- want Eliot action contexts to Do The Right Thing inside the decorated
- function.
- """
- return inlineCallbacks(
- eliot_friendly_generator_function(original)
- )
-
def validateInstanceOf(t):
"""
Return an Eliot validator that requires values to be instances of ``t``.
diff --git a/src/allmydata/util/fake_inotify.py b/src/allmydata/util/fake_inotify.py
index 45d360105..284711c52 100644
--- a/src/allmydata/util/fake_inotify.py
+++ b/src/allmydata/util/fake_inotify.py
@@ -91,7 +91,7 @@ class INotify(object):
self.callbacks = callbacks
def event(self, filepath, mask):
- with start_action(action_type=u"fake-inotify:event", path=filepath.path, mask=mask):
+ with start_action(action_type=u"fake-inotify:event", path=filepath.asTextMode().path, mask=mask):
for cb in self.callbacks:
cb(None, filepath, mask)