mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-20 05:28:04 +00:00
Merge 'origin/master' into 3423.return-errorpage-from-filenode
This commit is contained in:
commit
c34dc78875
0
newsfragments/3415.minor
Normal file
0
newsfragments/3415.minor
Normal file
0
newsfragments/3416.minor
Normal file
0
newsfragments/3416.minor
Normal file
@ -147,7 +147,7 @@ def _make_secret():
|
||||
Returns a base32-encoded random secret of hashutil.CRYPTO_VAL_SIZE
|
||||
bytes.
|
||||
"""
|
||||
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
|
||||
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + b"\n"
|
||||
|
||||
|
||||
class SecretHolder(object):
|
||||
@ -739,12 +739,12 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
# existing key
|
||||
def _make_key():
|
||||
private_key, _ = ed25519.create_signing_keypair()
|
||||
return ed25519.string_from_signing_key(private_key) + "\n"
|
||||
return ed25519.string_from_signing_key(private_key) + b"\n"
|
||||
|
||||
private_key_str = self.config.get_or_create_private_config("node.privkey", _make_key)
|
||||
private_key, public_key = ed25519.signing_keypair_from_string(private_key_str)
|
||||
public_key_str = ed25519.string_from_verifying_key(public_key)
|
||||
self.config.write_config_file("node.pubkey", public_key_str + "\n", "w")
|
||||
self.config.write_config_file("node.pubkey", public_key_str + b"\n", "wb")
|
||||
self._node_private_key = private_key
|
||||
self._node_public_key = public_key
|
||||
|
||||
@ -971,7 +971,7 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
"""
|
||||
self.config.write_private_config(
|
||||
'api_auth_token',
|
||||
urlsafe_b64encode(os.urandom(32)) + '\n',
|
||||
urlsafe_b64encode(os.urandom(32)) + b'\n',
|
||||
)
|
||||
|
||||
def get_storage_broker(self):
|
||||
@ -1021,7 +1021,7 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
c = ControlServer()
|
||||
c.setServiceParent(self)
|
||||
control_url = self.control_tub.registerReference(c)
|
||||
self.config.write_private_config("control.furl", control_url + "\n")
|
||||
self.config.write_private_config("control.furl", control_url + b"\n")
|
||||
|
||||
def init_helper(self):
|
||||
self.helper = Helper(self.config.get_config_path("helper"),
|
||||
|
@ -117,7 +117,7 @@ class ValidatedExtendedURIProxy(object):
|
||||
|
||||
|
||||
# Next: things that are optional and not redundant: crypttext_hash
|
||||
if d.has_key('crypttext_hash'):
|
||||
if 'crypttext_hash' in d:
|
||||
self.crypttext_hash = d['crypttext_hash']
|
||||
if len(self.crypttext_hash) != CRYPTO_VAL_SIZE:
|
||||
raise BadURIExtension('crypttext_hash is required to be hashutil.CRYPTO_VAL_SIZE bytes, not %s bytes' % (len(self.crypttext_hash),))
|
||||
@ -126,11 +126,11 @@ class ValidatedExtendedURIProxy(object):
|
||||
# Next: things that are optional, redundant, and required to be
|
||||
# consistent: codec_name, codec_params, tail_codec_params,
|
||||
# num_segments, size, needed_shares, total_shares
|
||||
if d.has_key('codec_name'):
|
||||
if d['codec_name'] != "crs":
|
||||
if 'codec_name' in d:
|
||||
if d['codec_name'] != b"crs":
|
||||
raise UnsupportedErasureCodec(d['codec_name'])
|
||||
|
||||
if d.has_key('codec_params'):
|
||||
if 'codec_params' in d:
|
||||
ucpss, ucpns, ucpts = codec.parse_params(d['codec_params'])
|
||||
if ucpss != self.segment_size:
|
||||
raise BadURIExtension("inconsistent erasure code params: "
|
||||
@ -145,7 +145,7 @@ class ValidatedExtendedURIProxy(object):
|
||||
"self._verifycap.total_shares: %s" %
|
||||
(ucpts, self._verifycap.total_shares))
|
||||
|
||||
if d.has_key('tail_codec_params'):
|
||||
if 'tail_codec_params' in d:
|
||||
utcpss, utcpns, utcpts = codec.parse_params(d['tail_codec_params'])
|
||||
if utcpss != self.tail_segment_size:
|
||||
raise BadURIExtension("inconsistent erasure code params: utcpss: %s != "
|
||||
@ -162,7 +162,7 @@ class ValidatedExtendedURIProxy(object):
|
||||
"self._verifycap.total_shares: %s" % (utcpts,
|
||||
self._verifycap.total_shares))
|
||||
|
||||
if d.has_key('num_segments'):
|
||||
if 'num_segments' in d:
|
||||
if d['num_segments'] != self.num_segments:
|
||||
raise BadURIExtension("inconsistent num_segments: size: %s, "
|
||||
"segment_size: %s, computed_num_segments: %s, "
|
||||
@ -170,18 +170,18 @@ class ValidatedExtendedURIProxy(object):
|
||||
self.segment_size,
|
||||
self.num_segments, d['num_segments']))
|
||||
|
||||
if d.has_key('size'):
|
||||
if 'size' in d:
|
||||
if d['size'] != self._verifycap.size:
|
||||
raise BadURIExtension("inconsistent size: URI size: %s, UEB size: %s" %
|
||||
(self._verifycap.size, d['size']))
|
||||
|
||||
if d.has_key('needed_shares'):
|
||||
if 'needed_shares' in d:
|
||||
if d['needed_shares'] != self._verifycap.needed_shares:
|
||||
raise BadURIExtension("inconsistent needed shares: URI needed shares: %s, UEB "
|
||||
"needed shares: %s" % (self._verifycap.total_shares,
|
||||
d['needed_shares']))
|
||||
|
||||
if d.has_key('total_shares'):
|
||||
if 'total_shares' in d:
|
||||
if d['total_shares'] != self._verifycap.total_shares:
|
||||
raise BadURIExtension("inconsistent total shares: URI total shares: %s, UEB "
|
||||
"total shares: %s" % (self._verifycap.total_shares,
|
||||
@ -428,7 +428,7 @@ class ValidatedReadBucketProxy(log.PrefixingLogMixin):
|
||||
lines.append("%3d: %s" % (i, base32.b2a_or_none(h)))
|
||||
self.log(" sharehashes:\n" + "\n".join(lines) + "\n")
|
||||
lines = []
|
||||
for i,h in blockhashes.items():
|
||||
for i,h in list(blockhashes.items()):
|
||||
lines.append("%3d: %s" % (i, base32.b2a_or_none(h)))
|
||||
log.msg(" blockhashes:\n" + "\n".join(lines) + "\n")
|
||||
raise BadOrMissingHash(le)
|
||||
@ -695,7 +695,7 @@ class Checker(log.PrefixingLogMixin):
|
||||
bucketdict, success = result
|
||||
|
||||
shareverds = []
|
||||
for (sharenum, bucket) in bucketdict.items():
|
||||
for (sharenum, bucket) in list(bucketdict.items()):
|
||||
d = self._download_and_verify(s, sharenum, bucket)
|
||||
shareverds.append(d)
|
||||
|
||||
|
@ -106,7 +106,7 @@ class ShareFinder(object):
|
||||
server = None
|
||||
try:
|
||||
if self._servers:
|
||||
server = self._servers.next()
|
||||
server = next(self._servers)
|
||||
except StopIteration:
|
||||
self._servers = None
|
||||
|
||||
@ -175,7 +175,7 @@ class ShareFinder(object):
|
||||
shnums=shnums_s, name=server.get_name(),
|
||||
level=log.NOISY, parent=lp, umid="0fcEZw")
|
||||
shares = []
|
||||
for shnum, bucket in buckets.iteritems():
|
||||
for shnum, bucket in buckets.items():
|
||||
s = self._create_share(shnum, bucket, server, dyhb_rtt)
|
||||
shares.append(s)
|
||||
self._deliver_shares(shares)
|
||||
|
@ -353,14 +353,14 @@ class DownloadNode(object):
|
||||
|
||||
# each segment is turned into N blocks. All but the last are of size
|
||||
# block_size, and the last is of size tail_block_size
|
||||
block_size = segment_size / k
|
||||
tail_block_size = tail_segment_padded / k
|
||||
block_size = segment_size // k
|
||||
tail_block_size = tail_segment_padded // k
|
||||
|
||||
return { "tail_segment_size": tail_segment_size,
|
||||
"tail_segment_padded": tail_segment_padded,
|
||||
"num_segments": num_segments,
|
||||
"block_size": block_size,
|
||||
"tail_block_size": tail_block_size,
|
||||
"tail_block_size": tail_block_size
|
||||
}
|
||||
|
||||
|
||||
@ -455,7 +455,7 @@ class DownloadNode(object):
|
||||
|
||||
shares = []
|
||||
shareids = []
|
||||
for (shareid, share) in blocks.iteritems():
|
||||
for (shareid, share) in blocks.items():
|
||||
assert len(share) == block_size
|
||||
shareids.append(shareid)
|
||||
shares.append(share)
|
||||
@ -465,7 +465,7 @@ class DownloadNode(object):
|
||||
del shares
|
||||
def _process(buffers):
|
||||
decodetime = now() - start
|
||||
segment = "".join(buffers)
|
||||
segment = b"".join(buffers)
|
||||
assert len(segment) == decoded_size
|
||||
del buffers
|
||||
if tail:
|
||||
|
@ -85,8 +85,8 @@ class Share(object):
|
||||
|
||||
self._requested_blocks = [] # (segnum, set(observer2..))
|
||||
v = server.get_version()
|
||||
ver = v["http://allmydata.org/tahoe/protocols/storage/v1"]
|
||||
self._overrun_ok = ver["tolerates-immutable-read-overrun"]
|
||||
ver = v[b"http://allmydata.org/tahoe/protocols/storage/v1"]
|
||||
self._overrun_ok = ver[b"tolerates-immutable-read-overrun"]
|
||||
# If _overrun_ok and we guess the offsets correctly, we can get
|
||||
# everything in one RTT. If _overrun_ok and we guess wrong, we might
|
||||
# need two RTT (but we could get lucky and do it in one). If overrun
|
||||
|
@ -89,7 +89,7 @@ class DownloadStatus(object):
|
||||
def __init__(self, storage_index, size):
|
||||
self.storage_index = storage_index
|
||||
self.size = size
|
||||
self.counter = self.statusid_counter.next()
|
||||
self.counter = next(self.statusid_counter)
|
||||
self.helper = False
|
||||
|
||||
self.first_timestamp = None
|
||||
|
@ -205,7 +205,7 @@ class Encoder(object):
|
||||
assert IStorageBucketWriter.providedBy(landlords[k])
|
||||
self.landlords = landlords.copy()
|
||||
assert isinstance(servermap, dict)
|
||||
for v in servermap.itervalues():
|
||||
for v in servermap.values():
|
||||
assert isinstance(v, set)
|
||||
self.servermap = servermap.copy()
|
||||
|
||||
@ -410,7 +410,7 @@ class Encoder(object):
|
||||
assert isinstance(data, (list,tuple))
|
||||
if self._aborted:
|
||||
raise UploadAborted()
|
||||
data = "".join(data)
|
||||
data = b"".join(data)
|
||||
precondition(len(data) <= read_size, len(data), read_size)
|
||||
if not allow_short:
|
||||
precondition(len(data) == read_size, len(data), read_size)
|
||||
@ -418,7 +418,7 @@ class Encoder(object):
|
||||
self._crypttext_hasher.update(data)
|
||||
if allow_short and len(data) < read_size:
|
||||
# padding
|
||||
data += "\x00" * (read_size - len(data))
|
||||
data += b"\x00" * (read_size - len(data))
|
||||
encrypted_pieces = [data[i:i+input_chunk_size]
|
||||
for i in range(0, len(data), input_chunk_size)]
|
||||
return encrypted_pieces
|
||||
|
@ -1,4 +1,5 @@
|
||||
from six.moves import cStringIO as StringIO
|
||||
from io import BytesIO
|
||||
|
||||
from zope.interface import implementer
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IPushProducer
|
||||
@ -104,7 +105,7 @@ class LiteralFileNode(_ImmutableFileNodeBase):
|
||||
# vfs.adapters.ftp._FileToConsumerAdapter), neither of which is
|
||||
# likely to be used as the target for a Tahoe download.
|
||||
|
||||
d = basic.FileSender().beginFileTransfer(StringIO(data), consumer)
|
||||
d = basic.FileSender().beginFileTransfer(BytesIO(data), consumer)
|
||||
d.addCallback(lambda lastSent: consumer)
|
||||
return d
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from past.builtins import long
|
||||
from past.builtins import long, unicode
|
||||
|
||||
import os, time, weakref, itertools
|
||||
from zope.interface import implementer
|
||||
@ -27,7 +27,7 @@ from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \
|
||||
DEFAULT_MAX_SEGMENT_SIZE, IProgress, IPeerSelector
|
||||
from allmydata.immutable import layout
|
||||
|
||||
from six.moves import cStringIO as StringIO
|
||||
from io import BytesIO
|
||||
from .happiness_upload import share_placement, calculate_happiness
|
||||
|
||||
from ..util.eliotutil import (
|
||||
@ -226,7 +226,7 @@ EXTENSION_SIZE = 1000
|
||||
# this.
|
||||
|
||||
def pretty_print_shnum_to_servers(s):
|
||||
return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.iteritems() ])
|
||||
return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.items() ])
|
||||
|
||||
class ServerTracker(object):
|
||||
def __init__(self, server,
|
||||
@ -283,7 +283,7 @@ class ServerTracker(object):
|
||||
#log.msg("%s._got_reply(%s)" % (self, (alreadygot, buckets)))
|
||||
(alreadygot, buckets) = alreadygot_and_buckets
|
||||
b = {}
|
||||
for sharenum, rref in buckets.iteritems():
|
||||
for sharenum, rref in buckets.items():
|
||||
bp = self.wbp_class(rref, self._server, self.sharesize,
|
||||
self.blocksize,
|
||||
self.num_segments,
|
||||
@ -352,7 +352,7 @@ class PeerSelector(object):
|
||||
|
||||
def get_sharemap_of_preexisting_shares(self):
|
||||
preexisting = dictutil.DictOfSets()
|
||||
for server, shares in self.existing_shares.iteritems():
|
||||
for server, shares in self.existing_shares.items():
|
||||
for share in shares:
|
||||
preexisting.add(share, server)
|
||||
return preexisting
|
||||
@ -419,8 +419,8 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
||||
# 12GiB). See #439 for details.
|
||||
def _get_maxsize(server):
|
||||
v0 = server.get_version()
|
||||
v1 = v0["http://allmydata.org/tahoe/protocols/storage/v1"]
|
||||
return v1["maximum-immutable-share-size"]
|
||||
v1 = v0[b"http://allmydata.org/tahoe/protocols/storage/v1"]
|
||||
return v1[b"maximum-immutable-share-size"]
|
||||
|
||||
for server in candidate_servers:
|
||||
self.peer_selector.add_peer(server.get_serverid())
|
||||
@ -700,7 +700,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
||||
% (self, self._get_progress_message(),
|
||||
pretty_print_shnum_to_servers(merged),
|
||||
[', '.join([str_shareloc(k,v)
|
||||
for k,v in st.buckets.iteritems()])
|
||||
for k,v in st.buckets.items()])
|
||||
for st in self.use_trackers],
|
||||
pretty_print_shnum_to_servers(self.preexisting_shares))
|
||||
self.log(msg, level=log.OPERATIONAL)
|
||||
@ -951,7 +951,7 @@ class EncryptAnUploadable(object):
|
||||
self._encryptor = aes.create_encryptor(key)
|
||||
|
||||
storage_index = storage_index_hash(key)
|
||||
assert isinstance(storage_index, str)
|
||||
assert isinstance(storage_index, bytes)
|
||||
# There's no point to having the SI be longer than the key, so we
|
||||
# specify that it is truncated to the same 128 bits as the AES key.
|
||||
assert len(storage_index) == 16 # SHA-256 truncated to 128b
|
||||
@ -1120,7 +1120,7 @@ class UploadStatus(object):
|
||||
self.progress = [0.0, 0.0, 0.0]
|
||||
self.active = True
|
||||
self.results = None
|
||||
self.counter = self.statusid_counter.next()
|
||||
self.counter = next(self.statusid_counter)
|
||||
self.started = time.time()
|
||||
|
||||
def get_started(self):
|
||||
@ -1281,7 +1281,7 @@ class CHKUploader(object):
|
||||
"""
|
||||
msgtempl = "set_shareholders; upload_trackers is %s, already_serverids is %s"
|
||||
values = ([', '.join([str_shareloc(k,v)
|
||||
for k,v in st.buckets.iteritems()])
|
||||
for k,v in st.buckets.items()])
|
||||
for st in upload_trackers], already_serverids)
|
||||
self.log(msgtempl % values, level=log.OPERATIONAL)
|
||||
# record already-present shares in self._results
|
||||
@ -1377,7 +1377,7 @@ class LiteralUploader(object):
|
||||
self._progress.set_progress_total(size)
|
||||
return read_this_many_bytes(uploadable, size)
|
||||
d.addCallback(_got_size)
|
||||
d.addCallback(lambda data: uri.LiteralFileURI("".join(data)))
|
||||
d.addCallback(lambda data: uri.LiteralFileURI(b"".join(data)))
|
||||
d.addCallback(lambda u: u.to_string())
|
||||
d.addCallback(self._build_results)
|
||||
return d
|
||||
@ -1500,7 +1500,7 @@ class AssistedUploader(object):
|
||||
|
||||
Returns a Deferred that will fire with the UploadResults instance.
|
||||
"""
|
||||
precondition(isinstance(storage_index, str), storage_index)
|
||||
precondition(isinstance(storage_index, bytes), storage_index)
|
||||
self._started = time.time()
|
||||
eu = IEncryptedUploadable(encrypted_uploadable)
|
||||
eu.set_upload_status(self._upload_status)
|
||||
@ -1653,7 +1653,7 @@ class BaseUploadable(object):
|
||||
def set_default_encoding_parameters(self, default_params):
|
||||
assert isinstance(default_params, dict)
|
||||
for k,v in default_params.items():
|
||||
precondition(isinstance(k, str), k, v)
|
||||
precondition(isinstance(k, (bytes, unicode)), k, v)
|
||||
precondition(isinstance(v, int), k, v)
|
||||
if "k" in default_params:
|
||||
self.default_encoding_param_k = default_params["k"]
|
||||
@ -1697,7 +1697,7 @@ class FileHandle(BaseUploadable):
|
||||
then the hash will be hashed together with the string in the
|
||||
"convergence" argument to form the encryption key.
|
||||
"""
|
||||
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence))
|
||||
assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence))
|
||||
self._filehandle = filehandle
|
||||
self._key = None
|
||||
self.convergence = convergence
|
||||
@ -1773,7 +1773,7 @@ class FileName(FileHandle):
|
||||
then the hash will be hashed together with the string in the
|
||||
"convergence" argument to form the encryption key.
|
||||
"""
|
||||
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence))
|
||||
assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence))
|
||||
FileHandle.__init__(self, open(filename, "rb"), convergence=convergence)
|
||||
def close(self):
|
||||
FileHandle.close(self)
|
||||
@ -1787,8 +1787,8 @@ class Data(FileHandle):
|
||||
then the hash will be hashed together with the string in the
|
||||
"convergence" argument to form the encryption key.
|
||||
"""
|
||||
assert convergence is None or isinstance(convergence, str), (convergence, type(convergence))
|
||||
FileHandle.__init__(self, StringIO(data), convergence=convergence)
|
||||
assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence))
|
||||
FileHandle.__init__(self, BytesIO(data), convergence=convergence)
|
||||
|
||||
@implementer(IUploader)
|
||||
class Uploader(service.MultiService, log.PrefixingLogMixin):
|
||||
|
@ -362,7 +362,7 @@ class _Config(object):
|
||||
if default is _None:
|
||||
raise MissingConfigEntry("The required configuration file %s is missing."
|
||||
% (quote_output(privname),))
|
||||
if isinstance(default, basestring):
|
||||
if isinstance(default, (bytes, unicode)):
|
||||
value = default
|
||||
else:
|
||||
value = default()
|
||||
@ -375,7 +375,7 @@ class _Config(object):
|
||||
return it.
|
||||
"""
|
||||
privname = os.path.join(self._basedir, "private", name)
|
||||
with open(privname, "w") as f:
|
||||
with open(privname, "wb") as f:
|
||||
f.write(value)
|
||||
|
||||
def get_private_config(self, name, default=_None):
|
||||
@ -759,7 +759,9 @@ class Node(service.MultiService):
|
||||
"""
|
||||
Initialize/create a directory for temporary files.
|
||||
"""
|
||||
tempdir_config = self.config.get_config("node", "tempdir", "tmp").decode('utf-8')
|
||||
tempdir_config = self.config.get_config("node", "tempdir", "tmp")
|
||||
if isinstance(tempdir_config, bytes):
|
||||
tempdir_config = tempdir_config.decode('utf-8')
|
||||
tempdir = self.config.get_config_path(tempdir_config)
|
||||
if not os.path.exists(tempdir):
|
||||
fileutil.make_dirs(tempdir)
|
||||
|
@ -50,8 +50,8 @@ class NodeMaker(object):
|
||||
|
||||
def create_from_cap(self, writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"):
|
||||
# this returns synchronously. It starts with a "cap string".
|
||||
assert isinstance(writecap, (str, type(None))), type(writecap)
|
||||
assert isinstance(readcap, (str, type(None))), type(readcap)
|
||||
assert isinstance(writecap, (bytes, type(None))), type(writecap)
|
||||
assert isinstance(readcap, (bytes, type(None))), type(readcap)
|
||||
|
||||
bigcap = writecap or readcap
|
||||
if not bigcap:
|
||||
@ -63,9 +63,9 @@ class NodeMaker(object):
|
||||
# The name doesn't matter for caching since it's only used in the error
|
||||
# attribute of an UnknownNode, and we don't cache those.
|
||||
if deep_immutable:
|
||||
memokey = "I" + bigcap
|
||||
memokey = b"I" + bigcap
|
||||
else:
|
||||
memokey = "M" + bigcap
|
||||
memokey = b"M" + bigcap
|
||||
if memokey in self._node_cache:
|
||||
node = self._node_cache[memokey]
|
||||
else:
|
||||
|
@ -1,4 +1,18 @@
|
||||
from future.utils import bytes_to_native_str
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import bytes_to_native_str, PY2
|
||||
if PY2:
|
||||
# Omit open() to get native behavior where open("w") always accepts native
|
||||
# strings. Omit bytes so we don't leak future's custom bytes.
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, pow, round, super, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
|
||||
import os, re, struct, time
|
||||
import weakref
|
||||
import six
|
||||
@ -228,16 +242,18 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
# We're on a platform that has no API to get disk stats.
|
||||
remaining_space = 2**64
|
||||
|
||||
version = { "http://allmydata.org/tahoe/protocols/storage/v1" :
|
||||
{ "maximum-immutable-share-size": remaining_space,
|
||||
"maximum-mutable-share-size": MAX_MUTABLE_SHARE_SIZE,
|
||||
"available-space": remaining_space,
|
||||
"tolerates-immutable-read-overrun": True,
|
||||
"delete-mutable-shares-with-zero-length-writev": True,
|
||||
"fills-holes-with-zero-bytes": True,
|
||||
"prevents-read-past-end-of-share-data": True,
|
||||
# Unicode strings might be nicer, but for now sticking to bytes since
|
||||
# this is what the wire protocol has always been.
|
||||
version = { b"http://allmydata.org/tahoe/protocols/storage/v1" :
|
||||
{ b"maximum-immutable-share-size": remaining_space,
|
||||
b"maximum-mutable-share-size": MAX_MUTABLE_SHARE_SIZE,
|
||||
b"available-space": remaining_space,
|
||||
b"tolerates-immutable-read-overrun": True,
|
||||
b"delete-mutable-shares-with-zero-length-writev": True,
|
||||
b"fills-holes-with-zero-bytes": True,
|
||||
b"prevents-read-past-end-of-share-data": True,
|
||||
},
|
||||
"application-version": str(allmydata.__full_version__),
|
||||
b"application-version": allmydata.__full_version__.encode("utf-8"),
|
||||
}
|
||||
return version
|
||||
|
||||
@ -671,7 +687,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
filename = os.path.join(bucketdir, sharenum_s)
|
||||
msf = MutableShareFile(filename, self)
|
||||
datavs[sharenum] = msf.readv(readv)
|
||||
log.msg("returning shares %s" % (datavs.keys(),),
|
||||
log.msg("returning shares %s" % (list(datavs.keys()),),
|
||||
facility="tahoe.storage", level=log.NOISY, parent=lp)
|
||||
self.add_latency("readv", time.time() - start)
|
||||
return datavs
|
||||
|
@ -123,7 +123,7 @@ class ShouldFailMixin(object):
|
||||
class ReallyEqualMixin(object):
|
||||
def failUnlessReallyEqual(self, a, b, msg=None):
|
||||
self.assertEqual(a, b, msg)
|
||||
self.assertEqual(type(a), type(b), "a :: %r, b :: %r, %r" % (a, b, msg))
|
||||
self.assertEqual(type(a), type(b), "a :: %r (%s), b :: %r (%s), %r" % (a, type(a), b, type(b), msg))
|
||||
|
||||
|
||||
def skip_if_cannot_represent_filename(u):
|
||||
|
@ -1,3 +1,10 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# This contains a test harness that creates a full Tahoe grid in a single
|
||||
# process (actually in a single MultiService) which does not use the network.
|
||||
@ -13,6 +20,11 @@
|
||||
# Tubs, so it is not useful for tests that involve a Helper or the
|
||||
# control.furl .
|
||||
|
||||
from future.utils import PY2, PY3
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from past.builtins import unicode
|
||||
|
||||
import os
|
||||
from zope.interface import implementer
|
||||
from twisted.application import service
|
||||
@ -257,6 +269,11 @@ class _NoNetworkClient(_Client):
|
||||
pass
|
||||
#._servers will be set by the NoNetworkGrid which creates us
|
||||
|
||||
if PY3:
|
||||
def init_web(self, *args, **kwargs):
|
||||
print("Web service is temporarily disabled until nevow is gone.")
|
||||
|
||||
|
||||
class SimpleStats(object):
|
||||
def __init__(self):
|
||||
self.counters = {}
|
||||
@ -323,7 +340,7 @@ class NoNetworkGrid(service.MultiService):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def make_client(self, i, write_config=True):
|
||||
clientid = hashutil.tagged_hash("clientid", str(i))[:20]
|
||||
clientid = hashutil.tagged_hash(b"clientid", b"%d" % i)[:20]
|
||||
clientdir = os.path.join(self.basedir, "clients",
|
||||
idlib.shortnodeid_b2a(clientid))
|
||||
fileutil.make_dirs(clientdir)
|
||||
@ -358,7 +375,7 @@ class NoNetworkGrid(service.MultiService):
|
||||
defer.returnValue(c)
|
||||
|
||||
def make_server(self, i, readonly=False):
|
||||
serverid = hashutil.tagged_hash("serverid", str(i))[:20]
|
||||
serverid = hashutil.tagged_hash(b"serverid", b"%d" % i)[:20]
|
||||
serverdir = os.path.join(self.basedir, "servers",
|
||||
idlib.shortnodeid_b2a(serverid), "storage")
|
||||
fileutil.make_dirs(serverdir)
|
||||
@ -381,18 +398,18 @@ class NoNetworkGrid(service.MultiService):
|
||||
self.rebuild_serverlist()
|
||||
|
||||
def get_all_serverids(self):
|
||||
return self.proxies_by_id.keys()
|
||||
return list(self.proxies_by_id.keys())
|
||||
|
||||
def rebuild_serverlist(self):
|
||||
self._check_clients()
|
||||
self.all_servers = frozenset(self.proxies_by_id.values())
|
||||
self.all_servers = frozenset(list(self.proxies_by_id.values()))
|
||||
for c in self.clients:
|
||||
c._servers = self.all_servers
|
||||
|
||||
def remove_server(self, serverid):
|
||||
# it's enough to remove the server from c._servers (we don't actually
|
||||
# have to detach and stopService it)
|
||||
for i,ss in self.servers_by_number.items():
|
||||
for i,ss in list(self.servers_by_number.items()):
|
||||
if ss.my_nodeid == serverid:
|
||||
del self.servers_by_number[i]
|
||||
break
|
||||
@ -422,7 +439,7 @@ class NoNetworkGrid(service.MultiService):
|
||||
|
||||
def nuke_from_orbit(self):
|
||||
""" Empty all share directories in this grid. It's the only way to be sure ;-) """
|
||||
for server in self.servers_by_number.values():
|
||||
for server in list(self.servers_by_number.values()):
|
||||
for prefixdir in os.listdir(server.sharedir):
|
||||
if prefixdir != 'incoming':
|
||||
fileutil.rm_dir(os.path.join(server.sharedir, prefixdir))
|
||||
@ -462,10 +479,12 @@ class GridTestMixin(object):
|
||||
|
||||
def _record_webports_and_baseurls(self):
|
||||
self.g._check_clients()
|
||||
self.client_webports = [c.getServiceNamed("webish").getPortnum()
|
||||
for c in self.g.clients]
|
||||
self.client_baseurls = [c.getServiceNamed("webish").getURL()
|
||||
for c in self.g.clients]
|
||||
if PY2:
|
||||
# Temporarily disabled on Python 3 until Nevow is gone:
|
||||
self.client_webports = [c.getServiceNamed("webish").getPortnum()
|
||||
for c in self.g.clients]
|
||||
self.client_baseurls = [c.getServiceNamed("webish").getURL()
|
||||
for c in self.g.clients]
|
||||
|
||||
def get_client_config(self, i=0):
|
||||
self.g._check_clients()
|
||||
@ -506,7 +525,7 @@ class GridTestMixin(object):
|
||||
si = tahoe_uri.from_string(uri).get_storage_index()
|
||||
prefixdir = storage_index_to_dir(si)
|
||||
shares = []
|
||||
for i,ss in self.g.servers_by_number.items():
|
||||
for i,ss in list(self.g.servers_by_number.items()):
|
||||
serverid = ss.my_nodeid
|
||||
basedir = os.path.join(ss.sharedir, prefixdir)
|
||||
if not os.path.exists(basedir):
|
||||
@ -527,7 +546,7 @@ class GridTestMixin(object):
|
||||
return shares
|
||||
|
||||
def restore_all_shares(self, shares):
|
||||
for sharefile, data in shares.items():
|
||||
for sharefile, data in list(shares.items()):
|
||||
with open(sharefile, "wb") as f:
|
||||
f.write(data)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
from future.utils import native_str
|
||||
|
||||
import os, json, urllib
|
||||
from twisted.trial import unittest
|
||||
@ -945,7 +946,7 @@ class DeepCheckWebBad(DeepCheckBase, unittest.TestCase):
|
||||
def _corrupt_some_shares(self, node):
|
||||
for (shnum, serverid, sharefile) in self.find_uri_shares(node.get_uri()):
|
||||
if shnum in (0,1):
|
||||
yield run_cli("debug", "corrupt-share", sharefile)
|
||||
yield run_cli("debug", "corrupt-share", native_str(sharefile))
|
||||
|
||||
def _delete_most_shares(self, node):
|
||||
self.delete_shares_numbered(node.get_uri(), range(1,10))
|
||||
|
@ -1,3 +1,16 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from past.builtins import chr as byteschr, long
|
||||
|
||||
from zope.interface import implementer
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
@ -15,7 +28,7 @@ class LostPeerError(Exception):
|
||||
pass
|
||||
|
||||
def flip_bit(good): # flips the last bit
|
||||
return good[:-1] + chr(ord(good[-1]) ^ 0x01)
|
||||
return good[:-1] + byteschr(ord(good[-1]) ^ 0x01)
|
||||
|
||||
@implementer(IStorageBucketWriter, IStorageBucketReader)
|
||||
class FakeBucketReaderWriterProxy(object):
|
||||
@ -158,7 +171,7 @@ class FakeBucketReaderWriterProxy(object):
|
||||
|
||||
|
||||
def make_data(length):
|
||||
data = "happy happy joy joy" * 100
|
||||
data = b"happy happy joy joy" * 100
|
||||
assert length <= len(data)
|
||||
return data[:length]
|
||||
|
||||
@ -173,32 +186,32 @@ class ValidatedExtendedURIProxy(unittest.TestCase):
|
||||
if _TMP % K != 0:
|
||||
_TMP += (K - (_TMP % K))
|
||||
TAIL_SEGSIZE = _TMP
|
||||
_TMP = SIZE / SEGSIZE
|
||||
_TMP = SIZE // SEGSIZE
|
||||
if SIZE % SEGSIZE != 0:
|
||||
_TMP += 1
|
||||
NUM_SEGMENTS = _TMP
|
||||
mindict = { 'segment_size': SEGSIZE,
|
||||
'crypttext_root_hash': '0'*hashutil.CRYPTO_VAL_SIZE,
|
||||
'share_root_hash': '1'*hashutil.CRYPTO_VAL_SIZE }
|
||||
optional_consistent = { 'crypttext_hash': '2'*hashutil.CRYPTO_VAL_SIZE,
|
||||
'codec_name': "crs",
|
||||
'codec_params': "%d-%d-%d" % (SEGSIZE, K, M),
|
||||
'tail_codec_params': "%d-%d-%d" % (TAIL_SEGSIZE, K, M),
|
||||
'crypttext_root_hash': b'0'*hashutil.CRYPTO_VAL_SIZE,
|
||||
'share_root_hash': b'1'*hashutil.CRYPTO_VAL_SIZE }
|
||||
optional_consistent = { 'crypttext_hash': b'2'*hashutil.CRYPTO_VAL_SIZE,
|
||||
'codec_name': b"crs",
|
||||
'codec_params': b"%d-%d-%d" % (SEGSIZE, K, M),
|
||||
'tail_codec_params': b"%d-%d-%d" % (TAIL_SEGSIZE, K, M),
|
||||
'num_segments': NUM_SEGMENTS,
|
||||
'size': SIZE,
|
||||
'needed_shares': K,
|
||||
'total_shares': M,
|
||||
'plaintext_hash': "anything",
|
||||
'plaintext_root_hash': "anything", }
|
||||
'plaintext_hash': b"anything",
|
||||
'plaintext_root_hash': b"anything", }
|
||||
# optional_inconsistent = { 'crypttext_hash': ('2'*(hashutil.CRYPTO_VAL_SIZE-1), "", 77),
|
||||
optional_inconsistent = { 'crypttext_hash': (77,),
|
||||
'codec_name': ("digital fountain", ""),
|
||||
'codec_params': ("%d-%d-%d" % (SEGSIZE, K-1, M),
|
||||
"%d-%d-%d" % (SEGSIZE-1, K, M),
|
||||
"%d-%d-%d" % (SEGSIZE, K, M-1)),
|
||||
'tail_codec_params': ("%d-%d-%d" % (TAIL_SEGSIZE, K-1, M),
|
||||
"%d-%d-%d" % (TAIL_SEGSIZE-1, K, M),
|
||||
"%d-%d-%d" % (TAIL_SEGSIZE, K, M-1)),
|
||||
'codec_name': (b"digital fountain", b""),
|
||||
'codec_params': (b"%d-%d-%d" % (SEGSIZE, K-1, M),
|
||||
b"%d-%d-%d" % (SEGSIZE-1, K, M),
|
||||
b"%d-%d-%d" % (SEGSIZE, K, M-1)),
|
||||
'tail_codec_params': (b"%d-%d-%d" % (TAIL_SEGSIZE, K-1, M),
|
||||
b"%d-%d-%d" % (TAIL_SEGSIZE-1, K, M),
|
||||
b"%d-%d-%d" % (TAIL_SEGSIZE, K, M-1)),
|
||||
'num_segments': (NUM_SEGMENTS-1,),
|
||||
'size': (SIZE-1,),
|
||||
'needed_shares': (K-1,),
|
||||
@ -209,7 +222,7 @@ class ValidatedExtendedURIProxy(unittest.TestCase):
|
||||
uebhash = hashutil.uri_extension_hash(uebstring)
|
||||
fb = FakeBucketReaderWriterProxy()
|
||||
fb.put_uri_extension(uebstring)
|
||||
verifycap = uri.CHKFileVerifierURI(storage_index='x'*16, uri_extension_hash=uebhash, needed_shares=self.K, total_shares=self.M, size=self.SIZE)
|
||||
verifycap = uri.CHKFileVerifierURI(storage_index=b'x'*16, uri_extension_hash=uebhash, needed_shares=self.K, total_shares=self.M, size=self.SIZE)
|
||||
vup = checker.ValidatedExtendedURIProxy(fb, verifycap)
|
||||
return vup.start()
|
||||
|
||||
@ -232,7 +245,7 @@ class ValidatedExtendedURIProxy(unittest.TestCase):
|
||||
|
||||
def test_reject_insufficient(self):
|
||||
dl = []
|
||||
for k in self.mindict.iterkeys():
|
||||
for k in self.mindict.keys():
|
||||
insuffdict = self.mindict.copy()
|
||||
del insuffdict[k]
|
||||
d = self._test_reject(insuffdict)
|
||||
@ -241,7 +254,7 @@ class ValidatedExtendedURIProxy(unittest.TestCase):
|
||||
|
||||
def test_accept_optional(self):
|
||||
dl = []
|
||||
for k in self.optional_consistent.iterkeys():
|
||||
for k in self.optional_consistent.keys():
|
||||
mydict = self.mindict.copy()
|
||||
mydict[k] = self.optional_consistent[k]
|
||||
d = self._test_accept(mydict)
|
||||
@ -250,7 +263,7 @@ class ValidatedExtendedURIProxy(unittest.TestCase):
|
||||
|
||||
def test_reject_optional(self):
|
||||
dl = []
|
||||
for k in self.optional_inconsistent.iterkeys():
|
||||
for k in self.optional_inconsistent.keys():
|
||||
for v in self.optional_inconsistent[k]:
|
||||
mydict = self.mindict.copy()
|
||||
mydict[k] = v
|
||||
@ -264,7 +277,7 @@ class Encode(unittest.TestCase):
|
||||
data = make_data(datalen)
|
||||
# force use of multiple segments
|
||||
e = encode.Encoder()
|
||||
u = upload.Data(data, convergence="some convergence string")
|
||||
u = upload.Data(data, convergence=b"some convergence string")
|
||||
u.set_default_encoding_parameters({'max_segment_size': max_segment_size,
|
||||
'k': 25, 'happy': 75, 'n': 100})
|
||||
eu = upload.EncryptAnUploadable(u)
|
||||
@ -294,7 +307,7 @@ class Encode(unittest.TestCase):
|
||||
|
||||
def _check(res):
|
||||
verifycap = res
|
||||
self.failUnless(isinstance(verifycap.uri_extension_hash, str))
|
||||
self.failUnless(isinstance(verifycap.uri_extension_hash, bytes))
|
||||
self.failUnlessEqual(len(verifycap.uri_extension_hash), 32)
|
||||
for i,peer in enumerate(all_shareholders):
|
||||
self.failUnless(peer.closed)
|
||||
@ -398,7 +411,7 @@ class Roundtrip(GridTestMixin, unittest.TestCase):
|
||||
self.basedir = self.mktemp()
|
||||
self.set_up_grid()
|
||||
self.c0 = self.g.clients[0]
|
||||
DATA = "p"*size
|
||||
DATA = b"p"*size
|
||||
d = self.upload(DATA)
|
||||
d.addCallback(lambda n: download_to_data(n))
|
||||
def _downloaded(newdata):
|
||||
|
@ -1,5 +1,16 @@
|
||||
"""
|
||||
Test the NoNetworkGrid test harness.
|
||||
|
||||
# Test the NoNetworkGrid test harness
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from twisted.trial import unittest
|
||||
from twisted.application import service
|
||||
@ -41,8 +52,8 @@ class Harness(unittest.TestCase):
|
||||
g.setServiceParent(self.s)
|
||||
|
||||
c0 = g.clients[0]
|
||||
DATA = "Data to upload" * 100
|
||||
data = Data(DATA, "")
|
||||
DATA = b"Data to upload" * 100
|
||||
data = Data(DATA, b"")
|
||||
d = c0.upload(data)
|
||||
def _uploaded(res):
|
||||
n = c0.create_node_from_uri(res.get_uri())
|
||||
|
@ -366,21 +366,21 @@ class Server(unittest.TestCase):
|
||||
def test_declares_fixed_1528(self):
|
||||
ss = self.create("test_declares_fixed_1528")
|
||||
ver = ss.remote_get_version()
|
||||
sv1 = ver['http://allmydata.org/tahoe/protocols/storage/v1']
|
||||
self.failUnless(sv1.get('prevents-read-past-end-of-share-data'), sv1)
|
||||
sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1']
|
||||
self.failUnless(sv1.get(b'prevents-read-past-end-of-share-data'), sv1)
|
||||
|
||||
def test_declares_maximum_share_sizes(self):
|
||||
ss = self.create("test_declares_maximum_share_sizes")
|
||||
ver = ss.remote_get_version()
|
||||
sv1 = ver['http://allmydata.org/tahoe/protocols/storage/v1']
|
||||
self.failUnlessIn('maximum-immutable-share-size', sv1)
|
||||
self.failUnlessIn('maximum-mutable-share-size', sv1)
|
||||
sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1']
|
||||
self.failUnlessIn(b'maximum-immutable-share-size', sv1)
|
||||
self.failUnlessIn(b'maximum-mutable-share-size', sv1)
|
||||
|
||||
def test_declares_available_space(self):
|
||||
ss = self.create("test_declares_available_space")
|
||||
ver = ss.remote_get_version()
|
||||
sv1 = ver['http://allmydata.org/tahoe/protocols/storage/v1']
|
||||
self.failUnlessIn('available-space', sv1)
|
||||
sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1']
|
||||
self.failUnlessIn(b'available-space', sv1)
|
||||
|
||||
def allocate(self, ss, storage_index, sharenums, size, canary=None):
|
||||
renew_secret = hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret))
|
||||
@ -740,6 +740,12 @@ class Server(unittest.TestCase):
|
||||
leases = list(ss.get_leases(b"si3"))
|
||||
self.failUnlessEqual(len(leases), 2)
|
||||
|
||||
def test_have_shares(self):
|
||||
"""By default the StorageServer has no shares."""
|
||||
workdir = self.workdir("test_have_shares")
|
||||
ss = StorageServer(workdir, b"\x00" * 20, readonly_storage=True)
|
||||
self.assertFalse(ss.have_shares())
|
||||
|
||||
def test_readonly(self):
|
||||
workdir = self.workdir("test_readonly")
|
||||
ss = StorageServer(workdir, b"\x00" * 20, readonly_storage=True)
|
||||
@ -974,8 +980,8 @@ class MutableServer(unittest.TestCase):
|
||||
# Also see if the server explicitly declares that it supports this
|
||||
# feature.
|
||||
ver = ss.remote_get_version()
|
||||
storage_v1_ver = ver["http://allmydata.org/tahoe/protocols/storage/v1"]
|
||||
self.failUnless(storage_v1_ver.get("fills-holes-with-zero-bytes"))
|
||||
storage_v1_ver = ver[b"http://allmydata.org/tahoe/protocols/storage/v1"]
|
||||
self.failUnless(storage_v1_ver.get(b"fills-holes-with-zero-bytes"))
|
||||
|
||||
# If the size is dropped to zero the share is deleted.
|
||||
answer = rstaraw(b"si1", secrets,
|
||||
|
@ -210,17 +210,17 @@ class Extension(testutil.ReallyEqualMixin, unittest.TestCase):
|
||||
}
|
||||
ext = uri.pack_extension(data)
|
||||
d = uri.unpack_extension(ext)
|
||||
self.failUnlessReallyEqual(d[b"stuff"], b"value")
|
||||
self.failUnlessReallyEqual(d[b"size"], 12)
|
||||
self.failUnlessReallyEqual(d[b"big_hash"], hashutil.tagged_hash(b"foo", b"bar"))
|
||||
self.failUnlessReallyEqual(d["stuff"], b"value")
|
||||
self.failUnlessReallyEqual(d["size"], 12)
|
||||
self.failUnlessReallyEqual(d["big_hash"], hashutil.tagged_hash(b"foo", b"bar"))
|
||||
|
||||
readable = uri.unpack_extension_readable(ext)
|
||||
self.failUnlessReallyEqual(readable[b"needed_shares"], 3)
|
||||
self.failUnlessReallyEqual(readable[b"stuff"], b"value")
|
||||
self.failUnlessReallyEqual(readable[b"size"], 12)
|
||||
self.failUnlessReallyEqual(readable[b"big_hash"],
|
||||
self.failUnlessReallyEqual(readable["needed_shares"], 3)
|
||||
self.failUnlessReallyEqual(readable["stuff"], b"value")
|
||||
self.failUnlessReallyEqual(readable["size"], 12)
|
||||
self.failUnlessReallyEqual(readable["big_hash"],
|
||||
base32.b2a(hashutil.tagged_hash(b"foo", b"bar")))
|
||||
self.failUnlessReallyEqual(readable[b"UEB_hash"],
|
||||
self.failUnlessReallyEqual(readable["UEB_hash"],
|
||||
base32.b2a(hashutil.uri_extension_hash(ext)))
|
||||
|
||||
class Unknown(testutil.ReallyEqualMixin, unittest.TestCase):
|
||||
|
@ -59,7 +59,11 @@ from .common import (
|
||||
unknown_immcap,
|
||||
)
|
||||
|
||||
from allmydata.interfaces import IMutableFileNode, SDMF_VERSION, MDMF_VERSION
|
||||
from allmydata.interfaces import (
|
||||
IMutableFileNode, SDMF_VERSION, MDMF_VERSION,
|
||||
FileTooLargeError,
|
||||
MustBeReadonlyError,
|
||||
)
|
||||
from allmydata.mutable import servermap, publish, retrieve
|
||||
from .. import common_util as testutil
|
||||
from ..common_py3 import TimezoneMixin
|
||||
@ -67,6 +71,10 @@ from ..common_web import (
|
||||
do_http,
|
||||
Error,
|
||||
)
|
||||
from ...web.common import (
|
||||
humanize_exception,
|
||||
)
|
||||
|
||||
from allmydata.client import _Client, SecretHolder
|
||||
|
||||
# create a fake uploader/downloader, and a couple of fake dirnodes, then
|
||||
@ -4790,3 +4798,33 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
|
||||
# doesn't reveal anything. This addresses #1720.
|
||||
d.addCallback(lambda e: self.assertEquals(str(e), "404 Not Found"))
|
||||
return d
|
||||
|
||||
|
||||
class HumanizeExceptionTests(TrialTestCase):
|
||||
"""
|
||||
Tests for ``humanize_exception``.
|
||||
"""
|
||||
def test_mustbereadonly(self):
|
||||
"""
|
||||
``humanize_exception`` describes ``MustBeReadonlyError``.
|
||||
"""
|
||||
text, code = humanize_exception(
|
||||
MustBeReadonlyError(
|
||||
"URI:DIR2 directory writecap used in a read-only context",
|
||||
"<unknown name>",
|
||||
),
|
||||
)
|
||||
self.assertIn("MustBeReadonlyError", text)
|
||||
self.assertEqual(code, http.BAD_REQUEST)
|
||||
|
||||
def test_filetoolarge(self):
|
||||
"""
|
||||
``humanize_exception`` describes ``FileTooLargeError``.
|
||||
"""
|
||||
text, code = humanize_exception(
|
||||
FileTooLargeError(
|
||||
"This file is too large to be uploaded (data_size).",
|
||||
),
|
||||
)
|
||||
self.assertIn("FileTooLargeError", text)
|
||||
self.assertEqual(code, http.REQUEST_ENTITY_TOO_LARGE)
|
||||
|
@ -13,8 +13,10 @@ from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
# Don't import bytes, to prevent leaks.
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, str, max, min # noqa: F401
|
||||
# Don't import bytes or str, to prevent future's newbytes leaking and
|
||||
# breaking code that only expects normal bytes.
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, max, min # noqa: F401
|
||||
str = unicode
|
||||
|
||||
from past.builtins import unicode, long
|
||||
|
||||
@ -928,11 +930,11 @@ def unpack_extension(data):
|
||||
assert data[length:length+1] == b','
|
||||
data = data[length+1:]
|
||||
|
||||
d[key] = value
|
||||
d[str(key, "utf-8")] = value
|
||||
|
||||
# convert certain things to numbers
|
||||
for intkey in (b'size', b'segment_size', b'num_segments',
|
||||
b'needed_shares', b'total_shares'):
|
||||
for intkey in ('size', 'segment_size', 'num_segments',
|
||||
'needed_shares', 'total_shares'):
|
||||
if intkey in d:
|
||||
d[intkey] = int(d[intkey])
|
||||
return d
|
||||
@ -940,9 +942,9 @@ def unpack_extension(data):
|
||||
|
||||
def unpack_extension_readable(data):
|
||||
unpacked = unpack_extension(data)
|
||||
unpacked[b"UEB_hash"] = hashutil.uri_extension_hash(data)
|
||||
unpacked["UEB_hash"] = hashutil.uri_extension_hash(data)
|
||||
for k in sorted(unpacked.keys()):
|
||||
if b'hash' in k:
|
||||
if 'hash' in k:
|
||||
unpacked[k] = base32.b2a(unpacked[k])
|
||||
return unpacked
|
||||
|
||||
|
@ -41,8 +41,10 @@ PORTED_MODULES = [
|
||||
"allmydata.storage.immutable",
|
||||
"allmydata.storage.lease",
|
||||
"allmydata.storage.mutable",
|
||||
"allmydata.storage.server",
|
||||
"allmydata.storage.shares",
|
||||
"allmydata.test.common_py3",
|
||||
"allmydata.test.no_network",
|
||||
"allmydata.uri",
|
||||
"allmydata.util._python3",
|
||||
"allmydata.util.abbreviate",
|
||||
@ -83,6 +85,7 @@ PORTED_TEST_MODULES = [
|
||||
"allmydata.test.test_crypto",
|
||||
"allmydata.test.test_deferredutil",
|
||||
"allmydata.test.test_dictutil",
|
||||
"allmydata.test.test_encode",
|
||||
"allmydata.test.test_encodingutil",
|
||||
"allmydata.test.test_happiness",
|
||||
"allmydata.test.test_hashtree",
|
||||
@ -92,6 +95,7 @@ PORTED_TEST_MODULES = [
|
||||
"allmydata.test.test_log",
|
||||
"allmydata.test.test_monitor",
|
||||
"allmydata.test.test_netstring",
|
||||
"allmydata.test.test_no_network",
|
||||
"allmydata.test.test_observer",
|
||||
"allmydata.test.test_pipeline",
|
||||
"allmydata.test.test_python3",
|
||||
|
@ -36,5 +36,5 @@ def download_to_data(n, offset=0, size=None, progress=None):
|
||||
:param progress: None or an IProgress implementer
|
||||
"""
|
||||
d = n.read(MemoryConsumer(progress=progress), offset, size)
|
||||
d.addCallback(lambda mc: "".join(mc.chunks))
|
||||
d.addCallback(lambda mc: b"".join(mc.chunks))
|
||||
return d
|
||||
|
@ -10,7 +10,10 @@ from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
# Don't import bytes to prevent leaking future's bytes.
|
||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, str, max, min, bytes as future_bytes # noqa: F401
|
||||
else:
|
||||
future_bytes = bytes
|
||||
|
||||
from past.builtins import chr as byteschr
|
||||
|
||||
@ -213,7 +216,7 @@ def bucket_cancel_secret_hash(file_cancel_secret, peerid):
|
||||
|
||||
|
||||
def _xor(a, b):
|
||||
return b"".join([byteschr(c ^ b) for c in a])
|
||||
return b"".join([byteschr(c ^ b) for c in future_bytes(a)])
|
||||
|
||||
|
||||
def hmac(tag, data):
|
||||
|
Loading…
Reference in New Issue
Block a user