mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-01-19 03:06:33 +00:00
Python 3 fixes.
This commit is contained in:
parent
596c4cec8a
commit
7d8320b843
@ -147,7 +147,7 @@ def _make_secret():
|
||||
Returns a base32-encoded random secret of hashutil.CRYPTO_VAL_SIZE
|
||||
bytes.
|
||||
"""
|
||||
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
|
||||
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + b"\n"
|
||||
|
||||
|
||||
class SecretHolder(object):
|
||||
@ -739,12 +739,12 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
# existing key
|
||||
def _make_key():
|
||||
private_key, _ = ed25519.create_signing_keypair()
|
||||
return ed25519.string_from_signing_key(private_key) + "\n"
|
||||
return ed25519.string_from_signing_key(private_key) + b"\n"
|
||||
|
||||
private_key_str = self.config.get_or_create_private_config("node.privkey", _make_key)
|
||||
private_key, public_key = ed25519.signing_keypair_from_string(private_key_str)
|
||||
public_key_str = ed25519.string_from_verifying_key(public_key)
|
||||
self.config.write_config_file("node.pubkey", public_key_str + "\n", "w")
|
||||
self.config.write_config_file("node.pubkey", public_key_str + b"\n", "wb")
|
||||
self._node_private_key = private_key
|
||||
self._node_public_key = public_key
|
||||
|
||||
@ -971,7 +971,7 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
"""
|
||||
self.config.write_private_config(
|
||||
'api_auth_token',
|
||||
urlsafe_b64encode(os.urandom(32)) + '\n',
|
||||
urlsafe_b64encode(os.urandom(32)) + b'\n',
|
||||
)
|
||||
|
||||
def get_storage_broker(self):
|
||||
@ -1021,7 +1021,7 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
c = ControlServer()
|
||||
c.setServiceParent(self)
|
||||
control_url = self.control_tub.registerReference(c)
|
||||
self.config.write_private_config("control.furl", control_url + "\n")
|
||||
self.config.write_private_config("control.furl", control_url + b"\n")
|
||||
|
||||
def init_helper(self):
|
||||
self.helper = Helper(self.config.get_config_path("helper"),
|
||||
|
@ -106,7 +106,7 @@ class ShareFinder(object):
|
||||
server = None
|
||||
try:
|
||||
if self._servers:
|
||||
server = self._servers.next()
|
||||
server = next(self._servers)
|
||||
except StopIteration:
|
||||
self._servers = None
|
||||
|
||||
@ -175,7 +175,7 @@ class ShareFinder(object):
|
||||
shnums=shnums_s, name=server.get_name(),
|
||||
level=log.NOISY, parent=lp, umid="0fcEZw")
|
||||
shares = []
|
||||
for shnum, bucket in buckets.iteritems():
|
||||
for shnum, bucket in buckets.items():
|
||||
s = self._create_share(shnum, bucket, server, dyhb_rtt)
|
||||
shares.append(s)
|
||||
self._deliver_shares(shares)
|
||||
|
@ -361,6 +361,9 @@ class DownloadNode(object):
|
||||
"num_segments": num_segments,
|
||||
"block_size": block_size,
|
||||
"tail_block_size": tail_block_size,
|
||||
block_size = segment_size // k
|
||||
tail_block_size = tail_segment_padded // k
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -89,7 +89,7 @@ class DownloadStatus(object):
|
||||
def __init__(self, storage_index, size):
|
||||
self.storage_index = storage_index
|
||||
self.size = size
|
||||
self.counter = self.statusid_counter.next()
|
||||
self.counter = next(self.statusid_counter)
|
||||
self.helper = False
|
||||
|
||||
self.first_timestamp = None
|
||||
|
@ -205,7 +205,7 @@ class Encoder(object):
|
||||
assert IStorageBucketWriter.providedBy(landlords[k])
|
||||
self.landlords = landlords.copy()
|
||||
assert isinstance(servermap, dict)
|
||||
for v in servermap.itervalues():
|
||||
for v in servermap.values():
|
||||
assert isinstance(v, set)
|
||||
self.servermap = servermap.copy()
|
||||
|
||||
@ -410,7 +410,7 @@ class Encoder(object):
|
||||
assert isinstance(data, (list,tuple))
|
||||
if self._aborted:
|
||||
raise UploadAborted()
|
||||
data = "".join(data)
|
||||
data = b"".join(data)
|
||||
precondition(len(data) <= read_size, len(data), read_size)
|
||||
if not allow_short:
|
||||
precondition(len(data) == read_size, len(data), read_size)
|
||||
@ -418,7 +418,7 @@ class Encoder(object):
|
||||
self._crypttext_hasher.update(data)
|
||||
if allow_short and len(data) < read_size:
|
||||
# padding
|
||||
data += "\x00" * (read_size - len(data))
|
||||
data += b"\x00" * (read_size - len(data))
|
||||
encrypted_pieces = [data[i:i+input_chunk_size]
|
||||
for i in range(0, len(data), input_chunk_size)]
|
||||
return encrypted_pieces
|
||||
|
@ -27,7 +27,7 @@ from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \
|
||||
DEFAULT_MAX_SEGMENT_SIZE, IProgress, IPeerSelector
|
||||
from allmydata.immutable import layout
|
||||
|
||||
from six.moves import cStringIO as StringIO
|
||||
from io import BytesIO
|
||||
from .happiness_upload import share_placement, calculate_happiness
|
||||
|
||||
from ..util.eliotutil import (
|
||||
@ -226,7 +226,7 @@ EXTENSION_SIZE = 1000
|
||||
# this.
|
||||
|
||||
def pretty_print_shnum_to_servers(s):
|
||||
return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.iteritems() ])
|
||||
return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.items() ])
|
||||
|
||||
class ServerTracker(object):
|
||||
def __init__(self, server,
|
||||
@ -283,7 +283,7 @@ class ServerTracker(object):
|
||||
#log.msg("%s._got_reply(%s)" % (self, (alreadygot, buckets)))
|
||||
(alreadygot, buckets) = alreadygot_and_buckets
|
||||
b = {}
|
||||
for sharenum, rref in buckets.iteritems():
|
||||
for sharenum, rref in buckets.items():
|
||||
bp = self.wbp_class(rref, self._server, self.sharesize,
|
||||
self.blocksize,
|
||||
self.num_segments,
|
||||
@ -352,7 +352,7 @@ class PeerSelector(object):
|
||||
|
||||
def get_sharemap_of_preexisting_shares(self):
|
||||
preexisting = dictutil.DictOfSets()
|
||||
for server, shares in self.existing_shares.iteritems():
|
||||
for server, shares in self.existing_shares.items():
|
||||
for share in shares:
|
||||
preexisting.add(share, server)
|
||||
return preexisting
|
||||
@ -700,7 +700,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
||||
% (self, self._get_progress_message(),
|
||||
pretty_print_shnum_to_servers(merged),
|
||||
[', '.join([str_shareloc(k,v)
|
||||
for k,v in st.buckets.iteritems()])
|
||||
for k,v in st.buckets.items()])
|
||||
for st in self.use_trackers],
|
||||
pretty_print_shnum_to_servers(self.preexisting_shares))
|
||||
self.log(msg, level=log.OPERATIONAL)
|
||||
@ -951,7 +951,7 @@ class EncryptAnUploadable(object):
|
||||
self._encryptor = aes.create_encryptor(key)
|
||||
|
||||
storage_index = storage_index_hash(key)
|
||||
assert isinstance(storage_index, str)
|
||||
assert isinstance(storage_index, bytes)
|
||||
# There's no point to having the SI be longer than the key, so we
|
||||
# specify that it is truncated to the same 128 bits as the AES key.
|
||||
assert len(storage_index) == 16 # SHA-256 truncated to 128b
|
||||
@ -1120,7 +1120,7 @@ class UploadStatus(object):
|
||||
self.progress = [0.0, 0.0, 0.0]
|
||||
self.active = True
|
||||
self.results = None
|
||||
self.counter = self.statusid_counter.next()
|
||||
self.counter = next(self.statusid_counter)
|
||||
self.started = time.time()
|
||||
|
||||
def get_started(self):
|
||||
@ -1281,7 +1281,7 @@ class CHKUploader(object):
|
||||
"""
|
||||
msgtempl = "set_shareholders; upload_trackers is %s, already_serverids is %s"
|
||||
values = ([', '.join([str_shareloc(k,v)
|
||||
for k,v in st.buckets.iteritems()])
|
||||
for k,v in st.buckets.items()])
|
||||
for st in upload_trackers], already_serverids)
|
||||
self.log(msgtempl % values, level=log.OPERATIONAL)
|
||||
# record already-present shares in self._results
|
||||
@ -1697,7 +1697,7 @@ class FileHandle(BaseUploadable):
|
||||
then the hash will be hashed together with the string in the
|
||||
"convergence" argument to form the encryption key.
|
||||
"""
|
||||
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence))
|
||||
assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence))
|
||||
self._filehandle = filehandle
|
||||
self._key = None
|
||||
self.convergence = convergence
|
||||
@ -1787,8 +1787,8 @@ class Data(FileHandle):
|
||||
then the hash will be hashed together with the string in the
|
||||
"convergence" argument to form the encryption key.
|
||||
"""
|
||||
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence))
|
||||
FileHandle.__init__(self, StringIO(data), convergence=convergence)
|
||||
assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence))
|
||||
FileHandle.__init__(self, BytesIO(data), convergence=convergence)
|
||||
|
||||
@implementer(IUploader)
|
||||
class Uploader(service.MultiService, log.PrefixingLogMixin):
|
||||
|
@ -362,7 +362,7 @@ class _Config(object):
|
||||
if default is _None:
|
||||
raise MissingConfigEntry("The required configuration file %s is missing."
|
||||
% (quote_output(privname),))
|
||||
if isinstance(default, basestring):
|
||||
if isinstance(default, (bytes, unicode)):
|
||||
value = default
|
||||
else:
|
||||
value = default()
|
||||
@ -375,7 +375,7 @@ class _Config(object):
|
||||
return it.
|
||||
"""
|
||||
privname = os.path.join(self._basedir, "private", name)
|
||||
with open(privname, "w") as f:
|
||||
with open(privname, "wb") as f:
|
||||
f.write(value)
|
||||
|
||||
def get_private_config(self, name, default=_None):
|
||||
@ -759,7 +759,9 @@ class Node(service.MultiService):
|
||||
"""
|
||||
Initialize/create a directory for temporary files.
|
||||
"""
|
||||
tempdir_config = self.config.get_config("node", "tempdir", "tmp").decode('utf-8')
|
||||
tempdir_config = self.config.get_config("node", "tempdir", "tmp")
|
||||
if isinstance(tempdir_config, bytes):
|
||||
tempdir_config = tempdir_config.decode('utf-8')
|
||||
tempdir = self.config.get_config_path(tempdir_config)
|
||||
if not os.path.exists(tempdir):
|
||||
fileutil.make_dirs(tempdir)
|
||||
|
@ -50,8 +50,8 @@ class NodeMaker(object):
|
||||
|
||||
def create_from_cap(self, writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"):
|
||||
# this returns synchronously. It starts with a "cap string".
|
||||
assert isinstance(writecap, (str, type(None))), type(writecap)
|
||||
assert isinstance(readcap, (str, type(None))), type(readcap)
|
||||
assert isinstance(writecap, (bytes, type(None))), type(writecap)
|
||||
assert isinstance(readcap, (bytes, type(None))), type(readcap)
|
||||
|
||||
bigcap = writecap or readcap
|
||||
if not bigcap:
|
||||
@ -63,9 +63,9 @@ class NodeMaker(object):
|
||||
# The name doesn't matter for caching since it's only used in the error
|
||||
# attribute of an UnknownNode, and we don't cache those.
|
||||
if deep_immutable:
|
||||
memokey = "I" + bigcap
|
||||
memokey = b"I" + bigcap
|
||||
else:
|
||||
memokey = "M" + bigcap
|
||||
memokey = b"M" + bigcap
|
||||
if memokey in self._node_cache:
|
||||
node = self._node_cache[memokey]
|
||||
else:
|
||||
|
@ -1,3 +1,7 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# This contains a test harness that creates a full Tahoe grid in a single
|
||||
# process (actually in a single MultiService) which does not use the network.
|
||||
@ -13,6 +17,11 @@
|
||||
# Tubs, so it is not useful for tests that involve a Helper or the
|
||||
# control.furl .
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from past.builtins import unicode
|
||||
|
||||
import os
|
||||
from zope.interface import implementer
|
||||
from twisted.application import service
|
||||
@ -256,6 +265,8 @@ class _NoNetworkClient(_Client):
|
||||
def init_stub_client(self):
|
||||
pass
|
||||
#._servers will be set by the NoNetworkGrid which creates us
|
||||
def init_web(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
class SimpleStats(object):
|
||||
def __init__(self):
|
||||
@ -308,6 +319,7 @@ class NoNetworkGrid(service.MultiService):
|
||||
d.addCallback(lambda c: self.clients.append(c))
|
||||
|
||||
def _bad(f):
|
||||
print(f)
|
||||
self._setup_errors.append(f)
|
||||
d.addErrback(_bad)
|
||||
|
||||
@ -323,7 +335,7 @@ class NoNetworkGrid(service.MultiService):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def make_client(self, i, write_config=True):
|
||||
clientid = hashutil.tagged_hash("clientid", str(i))[:20]
|
||||
clientid = hashutil.tagged_hash(b"clientid", b"%d" % i)[:20]
|
||||
clientdir = os.path.join(self.basedir, "clients",
|
||||
idlib.shortnodeid_b2a(clientid))
|
||||
fileutil.make_dirs(clientdir)
|
||||
@ -358,7 +370,7 @@ class NoNetworkGrid(service.MultiService):
|
||||
defer.returnValue(c)
|
||||
|
||||
def make_server(self, i, readonly=False):
|
||||
serverid = hashutil.tagged_hash("serverid", str(i))[:20]
|
||||
serverid = hashutil.tagged_hash(b"serverid", b"%d" % i)[:20]
|
||||
serverdir = os.path.join(self.basedir, "servers",
|
||||
idlib.shortnodeid_b2a(serverid), "storage")
|
||||
fileutil.make_dirs(serverdir)
|
||||
@ -381,18 +393,18 @@ class NoNetworkGrid(service.MultiService):
|
||||
self.rebuild_serverlist()
|
||||
|
||||
def get_all_serverids(self):
|
||||
return self.proxies_by_id.keys()
|
||||
return list(self.proxies_by_id.keys())
|
||||
|
||||
def rebuild_serverlist(self):
|
||||
self._check_clients()
|
||||
self.all_servers = frozenset(self.proxies_by_id.values())
|
||||
self.all_servers = frozenset(list(self.proxies_by_id.values()))
|
||||
for c in self.clients:
|
||||
c._servers = self.all_servers
|
||||
|
||||
def remove_server(self, serverid):
|
||||
# it's enough to remove the server from c._servers (we don't actually
|
||||
# have to detach and stopService it)
|
||||
for i,ss in self.servers_by_number.items():
|
||||
for i,ss in list(self.servers_by_number.items()):
|
||||
if ss.my_nodeid == serverid:
|
||||
del self.servers_by_number[i]
|
||||
break
|
||||
@ -422,7 +434,7 @@ class NoNetworkGrid(service.MultiService):
|
||||
|
||||
def nuke_from_orbit(self):
|
||||
""" Empty all share directories in this grid. It's the only way to be sure ;-) """
|
||||
for server in self.servers_by_number.values():
|
||||
for server in list(self.servers_by_number.values()):
|
||||
for prefixdir in os.listdir(server.sharedir):
|
||||
if prefixdir != 'incoming':
|
||||
fileutil.rm_dir(os.path.join(server.sharedir, prefixdir))
|
||||
@ -506,7 +518,7 @@ class GridTestMixin(object):
|
||||
si = tahoe_uri.from_string(uri).get_storage_index()
|
||||
prefixdir = storage_index_to_dir(si)
|
||||
shares = []
|
||||
for i,ss in self.g.servers_by_number.items():
|
||||
for i,ss in list(self.g.servers_by_number.items()):
|
||||
serverid = ss.my_nodeid
|
||||
basedir = os.path.join(ss.sharedir, prefixdir)
|
||||
if not os.path.exists(basedir):
|
||||
@ -527,7 +539,7 @@ class GridTestMixin(object):
|
||||
return shares
|
||||
|
||||
def restore_all_shares(self, shares):
|
||||
for sharefile, data in shares.items():
|
||||
for sharefile, data in list(shares.items()):
|
||||
with open(sharefile, "wb") as f:
|
||||
f.write(data)
|
||||
|
||||
|
@ -41,8 +41,8 @@ class Harness(unittest.TestCase):
|
||||
g.setServiceParent(self.s)
|
||||
|
||||
c0 = g.clients[0]
|
||||
DATA = "Data to upload" * 100
|
||||
data = Data(DATA, "")
|
||||
DATA = b"Data to upload" * 100
|
||||
data = Data(DATA, b"")
|
||||
d = c0.upload(data)
|
||||
def _uploaded(res):
|
||||
n = c0.create_node_from_uri(res.get_uri())
|
||||
|
Loading…
Reference in New Issue
Block a user