Python 3 fixes.

This commit is contained in:
Itamar Turner-Trauring 2020-09-16 11:13:23 -04:00
parent 596c4cec8a
commit 7d8320b843
10 changed files with 56 additions and 39 deletions

View File

@ -147,7 +147,7 @@ def _make_secret():
Returns a base32-encoded random secret of hashutil.CRYPTO_VAL_SIZE Returns a base32-encoded random secret of hashutil.CRYPTO_VAL_SIZE
bytes. bytes.
""" """
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n" return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + b"\n"
class SecretHolder(object): class SecretHolder(object):
@ -739,12 +739,12 @@ class _Client(node.Node, pollmixin.PollMixin):
# existing key # existing key
def _make_key(): def _make_key():
private_key, _ = ed25519.create_signing_keypair() private_key, _ = ed25519.create_signing_keypair()
return ed25519.string_from_signing_key(private_key) + "\n" return ed25519.string_from_signing_key(private_key) + b"\n"
private_key_str = self.config.get_or_create_private_config("node.privkey", _make_key) private_key_str = self.config.get_or_create_private_config("node.privkey", _make_key)
private_key, public_key = ed25519.signing_keypair_from_string(private_key_str) private_key, public_key = ed25519.signing_keypair_from_string(private_key_str)
public_key_str = ed25519.string_from_verifying_key(public_key) public_key_str = ed25519.string_from_verifying_key(public_key)
self.config.write_config_file("node.pubkey", public_key_str + "\n", "w") self.config.write_config_file("node.pubkey", public_key_str + b"\n", "wb")
self._node_private_key = private_key self._node_private_key = private_key
self._node_public_key = public_key self._node_public_key = public_key
@ -971,7 +971,7 @@ class _Client(node.Node, pollmixin.PollMixin):
""" """
self.config.write_private_config( self.config.write_private_config(
'api_auth_token', 'api_auth_token',
urlsafe_b64encode(os.urandom(32)) + '\n', urlsafe_b64encode(os.urandom(32)) + b'\n',
) )
def get_storage_broker(self): def get_storage_broker(self):
@ -1021,7 +1021,7 @@ class _Client(node.Node, pollmixin.PollMixin):
c = ControlServer() c = ControlServer()
c.setServiceParent(self) c.setServiceParent(self)
control_url = self.control_tub.registerReference(c) control_url = self.control_tub.registerReference(c)
self.config.write_private_config("control.furl", control_url + "\n") self.config.write_private_config("control.furl", control_url + b"\n")
def init_helper(self): def init_helper(self):
self.helper = Helper(self.config.get_config_path("helper"), self.helper = Helper(self.config.get_config_path("helper"),

View File

@ -106,7 +106,7 @@ class ShareFinder(object):
server = None server = None
try: try:
if self._servers: if self._servers:
server = self._servers.next() server = next(self._servers)
except StopIteration: except StopIteration:
self._servers = None self._servers = None
@ -175,7 +175,7 @@ class ShareFinder(object):
shnums=shnums_s, name=server.get_name(), shnums=shnums_s, name=server.get_name(),
level=log.NOISY, parent=lp, umid="0fcEZw") level=log.NOISY, parent=lp, umid="0fcEZw")
shares = [] shares = []
for shnum, bucket in buckets.iteritems(): for shnum, bucket in buckets.items():
s = self._create_share(shnum, bucket, server, dyhb_rtt) s = self._create_share(shnum, bucket, server, dyhb_rtt)
shares.append(s) shares.append(s)
self._deliver_shares(shares) self._deliver_shares(shares)

View File

@ -361,6 +361,9 @@ class DownloadNode(object):
"num_segments": num_segments, "num_segments": num_segments,
"block_size": block_size, "block_size": block_size,
"tail_block_size": tail_block_size, "tail_block_size": tail_block_size,
block_size = segment_size // k
tail_block_size = tail_segment_padded // k
} }

View File

@ -89,7 +89,7 @@ class DownloadStatus(object):
def __init__(self, storage_index, size): def __init__(self, storage_index, size):
self.storage_index = storage_index self.storage_index = storage_index
self.size = size self.size = size
self.counter = self.statusid_counter.next() self.counter = next(self.statusid_counter)
self.helper = False self.helper = False
self.first_timestamp = None self.first_timestamp = None

View File

@ -205,7 +205,7 @@ class Encoder(object):
assert IStorageBucketWriter.providedBy(landlords[k]) assert IStorageBucketWriter.providedBy(landlords[k])
self.landlords = landlords.copy() self.landlords = landlords.copy()
assert isinstance(servermap, dict) assert isinstance(servermap, dict)
for v in servermap.itervalues(): for v in servermap.values():
assert isinstance(v, set) assert isinstance(v, set)
self.servermap = servermap.copy() self.servermap = servermap.copy()
@ -410,7 +410,7 @@ class Encoder(object):
assert isinstance(data, (list,tuple)) assert isinstance(data, (list,tuple))
if self._aborted: if self._aborted:
raise UploadAborted() raise UploadAborted()
data = "".join(data) data = b"".join(data)
precondition(len(data) <= read_size, len(data), read_size) precondition(len(data) <= read_size, len(data), read_size)
if not allow_short: if not allow_short:
precondition(len(data) == read_size, len(data), read_size) precondition(len(data) == read_size, len(data), read_size)
@ -418,7 +418,7 @@ class Encoder(object):
self._crypttext_hasher.update(data) self._crypttext_hasher.update(data)
if allow_short and len(data) < read_size: if allow_short and len(data) < read_size:
# padding # padding
data += "\x00" * (read_size - len(data)) data += b"\x00" * (read_size - len(data))
encrypted_pieces = [data[i:i+input_chunk_size] encrypted_pieces = [data[i:i+input_chunk_size]
for i in range(0, len(data), input_chunk_size)] for i in range(0, len(data), input_chunk_size)]
return encrypted_pieces return encrypted_pieces

View File

@ -27,7 +27,7 @@ from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \
DEFAULT_MAX_SEGMENT_SIZE, IProgress, IPeerSelector DEFAULT_MAX_SEGMENT_SIZE, IProgress, IPeerSelector
from allmydata.immutable import layout from allmydata.immutable import layout
from six.moves import cStringIO as StringIO from io import BytesIO
from .happiness_upload import share_placement, calculate_happiness from .happiness_upload import share_placement, calculate_happiness
from ..util.eliotutil import ( from ..util.eliotutil import (
@ -226,7 +226,7 @@ EXTENSION_SIZE = 1000
# this. # this.
def pretty_print_shnum_to_servers(s): def pretty_print_shnum_to_servers(s):
return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.iteritems() ]) return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.items() ])
class ServerTracker(object): class ServerTracker(object):
def __init__(self, server, def __init__(self, server,
@ -283,7 +283,7 @@ class ServerTracker(object):
#log.msg("%s._got_reply(%s)" % (self, (alreadygot, buckets))) #log.msg("%s._got_reply(%s)" % (self, (alreadygot, buckets)))
(alreadygot, buckets) = alreadygot_and_buckets (alreadygot, buckets) = alreadygot_and_buckets
b = {} b = {}
for sharenum, rref in buckets.iteritems(): for sharenum, rref in buckets.items():
bp = self.wbp_class(rref, self._server, self.sharesize, bp = self.wbp_class(rref, self._server, self.sharesize,
self.blocksize, self.blocksize,
self.num_segments, self.num_segments,
@ -352,7 +352,7 @@ class PeerSelector(object):
def get_sharemap_of_preexisting_shares(self): def get_sharemap_of_preexisting_shares(self):
preexisting = dictutil.DictOfSets() preexisting = dictutil.DictOfSets()
for server, shares in self.existing_shares.iteritems(): for server, shares in self.existing_shares.items():
for share in shares: for share in shares:
preexisting.add(share, server) preexisting.add(share, server)
return preexisting return preexisting
@ -700,7 +700,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
% (self, self._get_progress_message(), % (self, self._get_progress_message(),
pretty_print_shnum_to_servers(merged), pretty_print_shnum_to_servers(merged),
[', '.join([str_shareloc(k,v) [', '.join([str_shareloc(k,v)
for k,v in st.buckets.iteritems()]) for k,v in st.buckets.items()])
for st in self.use_trackers], for st in self.use_trackers],
pretty_print_shnum_to_servers(self.preexisting_shares)) pretty_print_shnum_to_servers(self.preexisting_shares))
self.log(msg, level=log.OPERATIONAL) self.log(msg, level=log.OPERATIONAL)
@ -951,7 +951,7 @@ class EncryptAnUploadable(object):
self._encryptor = aes.create_encryptor(key) self._encryptor = aes.create_encryptor(key)
storage_index = storage_index_hash(key) storage_index = storage_index_hash(key)
assert isinstance(storage_index, str) assert isinstance(storage_index, bytes)
# There's no point to having the SI be longer than the key, so we # There's no point to having the SI be longer than the key, so we
# specify that it is truncated to the same 128 bits as the AES key. # specify that it is truncated to the same 128 bits as the AES key.
assert len(storage_index) == 16 # SHA-256 truncated to 128b assert len(storage_index) == 16 # SHA-256 truncated to 128b
@ -1120,7 +1120,7 @@ class UploadStatus(object):
self.progress = [0.0, 0.0, 0.0] self.progress = [0.0, 0.0, 0.0]
self.active = True self.active = True
self.results = None self.results = None
self.counter = self.statusid_counter.next() self.counter = next(self.statusid_counter)
self.started = time.time() self.started = time.time()
def get_started(self): def get_started(self):
@ -1281,7 +1281,7 @@ class CHKUploader(object):
""" """
msgtempl = "set_shareholders; upload_trackers is %s, already_serverids is %s" msgtempl = "set_shareholders; upload_trackers is %s, already_serverids is %s"
values = ([', '.join([str_shareloc(k,v) values = ([', '.join([str_shareloc(k,v)
for k,v in st.buckets.iteritems()]) for k,v in st.buckets.items()])
for st in upload_trackers], already_serverids) for st in upload_trackers], already_serverids)
self.log(msgtempl % values, level=log.OPERATIONAL) self.log(msgtempl % values, level=log.OPERATIONAL)
# record already-present shares in self._results # record already-present shares in self._results
@ -1697,7 +1697,7 @@ class FileHandle(BaseUploadable):
then the hash will be hashed together with the string in the then the hash will be hashed together with the string in the
"convergence" argument to form the encryption key. "convergence" argument to form the encryption key.
""" """
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence)) assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence))
self._filehandle = filehandle self._filehandle = filehandle
self._key = None self._key = None
self.convergence = convergence self.convergence = convergence
@ -1787,8 +1787,8 @@ class Data(FileHandle):
then the hash will be hashed together with the string in the then the hash will be hashed together with the string in the
"convergence" argument to form the encryption key. "convergence" argument to form the encryption key.
""" """
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence)) assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence))
FileHandle.__init__(self, StringIO(data), convergence=convergence) FileHandle.__init__(self, BytesIO(data), convergence=convergence)
@implementer(IUploader) @implementer(IUploader)
class Uploader(service.MultiService, log.PrefixingLogMixin): class Uploader(service.MultiService, log.PrefixingLogMixin):

View File

@ -362,7 +362,7 @@ class _Config(object):
if default is _None: if default is _None:
raise MissingConfigEntry("The required configuration file %s is missing." raise MissingConfigEntry("The required configuration file %s is missing."
% (quote_output(privname),)) % (quote_output(privname),))
if isinstance(default, basestring): if isinstance(default, (bytes, unicode)):
value = default value = default
else: else:
value = default() value = default()
@ -375,7 +375,7 @@ class _Config(object):
return it. return it.
""" """
privname = os.path.join(self._basedir, "private", name) privname = os.path.join(self._basedir, "private", name)
with open(privname, "w") as f: with open(privname, "wb") as f:
f.write(value) f.write(value)
def get_private_config(self, name, default=_None): def get_private_config(self, name, default=_None):
@ -759,7 +759,9 @@ class Node(service.MultiService):
""" """
Initialize/create a directory for temporary files. Initialize/create a directory for temporary files.
""" """
tempdir_config = self.config.get_config("node", "tempdir", "tmp").decode('utf-8') tempdir_config = self.config.get_config("node", "tempdir", "tmp")
if isinstance(tempdir_config, bytes):
tempdir_config = tempdir_config.decode('utf-8')
tempdir = self.config.get_config_path(tempdir_config) tempdir = self.config.get_config_path(tempdir_config)
if not os.path.exists(tempdir): if not os.path.exists(tempdir):
fileutil.make_dirs(tempdir) fileutil.make_dirs(tempdir)

View File

@ -50,8 +50,8 @@ class NodeMaker(object):
def create_from_cap(self, writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"): def create_from_cap(self, writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"):
# this returns synchronously. It starts with a "cap string". # this returns synchronously. It starts with a "cap string".
assert isinstance(writecap, (str, type(None))), type(writecap) assert isinstance(writecap, (bytes, type(None))), type(writecap)
assert isinstance(readcap, (str, type(None))), type(readcap) assert isinstance(readcap, (bytes, type(None))), type(readcap)
bigcap = writecap or readcap bigcap = writecap or readcap
if not bigcap: if not bigcap:
@ -63,9 +63,9 @@ class NodeMaker(object):
# The name doesn't matter for caching since it's only used in the error # The name doesn't matter for caching since it's only used in the error
# attribute of an UnknownNode, and we don't cache those. # attribute of an UnknownNode, and we don't cache those.
if deep_immutable: if deep_immutable:
memokey = "I" + bigcap memokey = b"I" + bigcap
else: else:
memokey = "M" + bigcap memokey = b"M" + bigcap
if memokey in self._node_cache: if memokey in self._node_cache:
node = self._node_cache[memokey] node = self._node_cache[memokey]
else: else:

View File

@ -1,3 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# This contains a test harness that creates a full Tahoe grid in a single # This contains a test harness that creates a full Tahoe grid in a single
# process (actually in a single MultiService) which does not use the network. # process (actually in a single MultiService) which does not use the network.
@ -13,6 +17,11 @@
# Tubs, so it is not useful for tests that involve a Helper or the # Tubs, so it is not useful for tests that involve a Helper or the
# control.furl . # control.furl .
from future.utils import PY2
if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
from past.builtins import unicode
import os import os
from zope.interface import implementer from zope.interface import implementer
from twisted.application import service from twisted.application import service
@ -256,6 +265,8 @@ class _NoNetworkClient(_Client):
def init_stub_client(self): def init_stub_client(self):
pass pass
#._servers will be set by the NoNetworkGrid which creates us #._servers will be set by the NoNetworkGrid which creates us
def init_web(self, *args, **kwargs):
pass
class SimpleStats(object): class SimpleStats(object):
def __init__(self): def __init__(self):
@ -308,6 +319,7 @@ class NoNetworkGrid(service.MultiService):
d.addCallback(lambda c: self.clients.append(c)) d.addCallback(lambda c: self.clients.append(c))
def _bad(f): def _bad(f):
print(f)
self._setup_errors.append(f) self._setup_errors.append(f)
d.addErrback(_bad) d.addErrback(_bad)
@ -323,7 +335,7 @@ class NoNetworkGrid(service.MultiService):
@defer.inlineCallbacks @defer.inlineCallbacks
def make_client(self, i, write_config=True): def make_client(self, i, write_config=True):
clientid = hashutil.tagged_hash("clientid", str(i))[:20] clientid = hashutil.tagged_hash(b"clientid", b"%d" % i)[:20]
clientdir = os.path.join(self.basedir, "clients", clientdir = os.path.join(self.basedir, "clients",
idlib.shortnodeid_b2a(clientid)) idlib.shortnodeid_b2a(clientid))
fileutil.make_dirs(clientdir) fileutil.make_dirs(clientdir)
@ -358,7 +370,7 @@ class NoNetworkGrid(service.MultiService):
defer.returnValue(c) defer.returnValue(c)
def make_server(self, i, readonly=False): def make_server(self, i, readonly=False):
serverid = hashutil.tagged_hash("serverid", str(i))[:20] serverid = hashutil.tagged_hash(b"serverid", b"%d" % i)[:20]
serverdir = os.path.join(self.basedir, "servers", serverdir = os.path.join(self.basedir, "servers",
idlib.shortnodeid_b2a(serverid), "storage") idlib.shortnodeid_b2a(serverid), "storage")
fileutil.make_dirs(serverdir) fileutil.make_dirs(serverdir)
@ -381,18 +393,18 @@ class NoNetworkGrid(service.MultiService):
self.rebuild_serverlist() self.rebuild_serverlist()
def get_all_serverids(self): def get_all_serverids(self):
return self.proxies_by_id.keys() return list(self.proxies_by_id.keys())
def rebuild_serverlist(self): def rebuild_serverlist(self):
self._check_clients() self._check_clients()
self.all_servers = frozenset(self.proxies_by_id.values()) self.all_servers = frozenset(list(self.proxies_by_id.values()))
for c in self.clients: for c in self.clients:
c._servers = self.all_servers c._servers = self.all_servers
def remove_server(self, serverid): def remove_server(self, serverid):
# it's enough to remove the server from c._servers (we don't actually # it's enough to remove the server from c._servers (we don't actually
# have to detach and stopService it) # have to detach and stopService it)
for i,ss in self.servers_by_number.items(): for i,ss in list(self.servers_by_number.items()):
if ss.my_nodeid == serverid: if ss.my_nodeid == serverid:
del self.servers_by_number[i] del self.servers_by_number[i]
break break
@ -422,7 +434,7 @@ class NoNetworkGrid(service.MultiService):
def nuke_from_orbit(self): def nuke_from_orbit(self):
""" Empty all share directories in this grid. It's the only way to be sure ;-) """ """ Empty all share directories in this grid. It's the only way to be sure ;-) """
for server in self.servers_by_number.values(): for server in list(self.servers_by_number.values()):
for prefixdir in os.listdir(server.sharedir): for prefixdir in os.listdir(server.sharedir):
if prefixdir != 'incoming': if prefixdir != 'incoming':
fileutil.rm_dir(os.path.join(server.sharedir, prefixdir)) fileutil.rm_dir(os.path.join(server.sharedir, prefixdir))
@ -506,7 +518,7 @@ class GridTestMixin(object):
si = tahoe_uri.from_string(uri).get_storage_index() si = tahoe_uri.from_string(uri).get_storage_index()
prefixdir = storage_index_to_dir(si) prefixdir = storage_index_to_dir(si)
shares = [] shares = []
for i,ss in self.g.servers_by_number.items(): for i,ss in list(self.g.servers_by_number.items()):
serverid = ss.my_nodeid serverid = ss.my_nodeid
basedir = os.path.join(ss.sharedir, prefixdir) basedir = os.path.join(ss.sharedir, prefixdir)
if not os.path.exists(basedir): if not os.path.exists(basedir):
@ -527,7 +539,7 @@ class GridTestMixin(object):
return shares return shares
def restore_all_shares(self, shares): def restore_all_shares(self, shares):
for sharefile, data in shares.items(): for sharefile, data in list(shares.items()):
with open(sharefile, "wb") as f: with open(sharefile, "wb") as f:
f.write(data) f.write(data)

View File

@ -41,8 +41,8 @@ class Harness(unittest.TestCase):
g.setServiceParent(self.s) g.setServiceParent(self.s)
c0 = g.clients[0] c0 = g.clients[0]
DATA = "Data to upload" * 100 DATA = b"Data to upload" * 100
data = Data(DATA, "") data = Data(DATA, b"")
d = c0.upload(data) d = c0.upload(data)
def _uploaded(res): def _uploaded(res):
n = c0.create_node_from_uri(res.get_uri()) n = c0.create_node_from_uri(res.get_uri())